From noreply at buildbot.pypy.org Sat Jun 1 01:05:29 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 1 Jun 2013 01:05:29 +0200 (CEST) Subject: [pypy-commit] cffi default: One more test Message-ID: <20130531230529.D836F1C153B@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1262:720cdf84b1e3 Date: 2013-06-01 01:03 +0200 http://bitbucket.org/cffi/cffi/changeset/720cdf84b1e3/ Log: One more test diff --git a/testing/test_ffi_backend.py b/testing/test_ffi_backend.py --- a/testing/test_ffi_backend.py +++ b/testing/test_ffi_backend.py @@ -138,6 +138,7 @@ self.check("char x; long long :0; char y;", L, 1, L + 1) self.check("short x, y; int :0; int :0;", 2, 2, 4) self.check("char x; int :0; short b:1; char y;", 5, 2, 6) + self.check("int a:1; int :0; int b:1; char y;", 5, 4, 8) def test_error_cases(self): ffi = FFI() From noreply at buildbot.pypy.org Sat Jun 1 01:05:30 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 1 Jun 2013 01:05:30 +0200 (CEST) Subject: [pypy-commit] cffi default: Always run the C compiler, even if it's MSVC. Found out that the Message-ID: <20130531230530.F256C1C3007@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1263:cc890bf469a7 Date: 2013-06-01 01:05 +0200 http://bitbucket.org/cffi/cffi/changeset/cc890bf469a7/ Log: Always run the C compiler, even if it's MSVC. Found out that the logic to pack bitfields is, of course, very different. diff --git a/testing/test_ffi_backend.py b/testing/test_ffi_backend.py --- a/testing/test_ffi_backend.py +++ b/testing/test_ffi_backend.py @@ -44,36 +44,31 @@ ffi.cdef("struct s1 { %s };" % source) ctype = ffi.typeof("struct s1") # verify the information with gcc - if sys.platform != "win32": - ffi1 = FFI() - ffi1.cdef(""" - static const int Gofs_y, Galign, Gsize; - struct s1 *try_with_value(int fieldnum, long long value); - """) - fnames = [name for name, cfield in ctype.fields - if name and cfield.bitsize > 0] - setters = ['case %d: s.%s = value; break;' % iname - for iname in enumerate(fnames)] - lib = ffi1.verify(""" - struct s1 { %s }; - struct sa { char a; struct s1 b; }; - #define Gofs_y offsetof(struct s1, y) - #define Galign offsetof(struct sa, b) - #define Gsize sizeof(struct s1) - struct s1 *try_with_value(int fieldnum, long long value) - { - static struct s1 s; - memset(&s, 0, sizeof(s)); - switch (fieldnum) { %s } - return &s; - } - """ % (source, ' '.join(setters))) - assert lib.Gofs_y == expected_ofs_y - assert lib.Galign == expected_align - assert lib.Gsize == expected_size - else: - lib = None - fnames = None + ffi1 = FFI() + ffi1.cdef(""" + static const int Gofs_y, Galign, Gsize; + struct s1 *try_with_value(int fieldnum, long long value); + """) + fnames = [name for name, cfield in ctype.fields + if name and cfield.bitsize > 0] + setters = ['case %d: s.%s = value; break;' % iname + for iname in enumerate(fnames)] + lib = ffi1.verify(""" + struct s1 { %s }; + struct sa { char a; struct s1 b; }; + #define Gofs_y offsetof(struct s1, y) + #define Galign offsetof(struct sa, b) + #define Gsize sizeof(struct s1) + struct s1 *try_with_value(int fieldnum, long long value) + { + static struct s1 s; + memset(&s, 0, sizeof(s)); + switch (fieldnum) { %s } + return &s; + } + """ % (source, ' '.join(setters))) + assert (lib.Gofs_y, lib.Galign, lib.Gsize) == ( + expected_ofs_y, expected_align, expected_size) # the real test follows assert ffi.offsetof("struct s1", "y") == expected_ofs_y assert ffi.alignof("struct s1") == expected_align @@ -98,10 +93,9 @@ setattr(s, name, value) assert getattr(s, name) == value raw1 = ffi.buffer(s)[:] - if lib is not None: - t = lib.try_with_value(fnames.index(name), value) - raw2 = ffi.buffer(t, len(raw1))[:] - assert raw1 == raw2 + t = lib.try_with_value(fnames.index(name), value) + raw2 = ffi.buffer(t, len(raw1))[:] + assert raw1 == raw2 def test_bitfield_basic(self): self.check("int a; int b:9; int c:20; int y;", 8, 4, 12) From noreply at buildbot.pypy.org Sat Jun 1 11:48:17 2013 From: noreply at buildbot.pypy.org (lwassermann) Date: Sat, 1 Jun 2013 11:48:17 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk default: (tfel, lwassermann): first iteration of working, exposed functions Message-ID: <20130601094817.391951C0196@cobra.cs.uni-duesseldorf.de> Author: Lars Wassermann Branch: Changeset: r430:554141f002ff Date: 2013-05-31 17:48 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/554141f002ff/ Log: (tfel, lwassermann): first iteration of working, exposed functions diff --git a/spyvm/interpreter_proxy.py b/spyvm/interpreter_proxy.py new file mode 100644 --- /dev/null +++ b/spyvm/interpreter_proxy.py @@ -0,0 +1,54 @@ +# struct VirtualMachine* sqGetInterpreterProxy(void); + +# typedef struct VirtualMachine { +# sqInt (*minorVersion)(void); +# } VirtualMachine; + +# Loading a Plugin: +# plugin setInterpreter: proxy. +# (plugin respondsTo: #initialiseModule) ifTrue:[plugin initialiseModule]. +# plugin perform: primitiveName asSymbol. +from spyvm import error + +from rpython.rlib.entrypoint import entrypoint +from rpython.rtyper.annlowlevel import llhelper +from rpython.rlib.exports import export_struct +from rpython.rtyper.lltypesystem.lltype import FuncType, Struct, Ptr +from rpython.rtyper.lltypesystem import lltype + +sqInt = lltype.Signed +sqLong = lltype.SignedLongLong + +minorVFTP = Ptr(FuncType([], sqInt)) + +VirtualMachine = Struct("VirtualMachine", + ("minorVersion", minorVFTP) + ) +VMPtr = Ptr(VirtualMachine) +# export_struct("VirtualMachine", VirtualMachine) + + at entrypoint('main', [], c_name='sqGetInterpreterProxy') +def sqGetInterpreterProxy(): + if not InterpreterProxy.vm_initialized: + vm_proxy = lltype.malloc(VirtualMachine, flavor='raw') + vm_proxy.minorVersion = llhelper(minorVFTP, minorVersion) + InterpreterProxy.vm_proxy = vm_proxy + InterpreterProxy.vm_initialized = True + return InterpreterProxy.vm_proxy + +def minorVersion(): + return 1 + + +class _InterpreterProxy(object): + _immutable_fields_ = ['vm_initialized?'] + + def __init__(self): + self.vm_proxy = lltype.nullptr(VMPtr.TO) + self.vm_initialized = False + + def call(self, signature, interp, s_frame, argcount, s_method): + print "Hello World..." + raise error.Exit("External Call") + +InterpreterProxy = _InterpreterProxy() From noreply at buildbot.pypy.org Sat Jun 1 11:48:18 2013 From: noreply at buildbot.pypy.org (lwassermann) Date: Sat, 1 Jun 2013 11:48:18 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk default: added decorator for interpreterProxy functions Message-ID: <20130601094818.68E881C0690@cobra.cs.uni-duesseldorf.de> Author: Lars Wassermann Branch: Changeset: r431:63aa3c3de920 Date: 2013-05-31 19:30 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/63aa3c3de920/ Log: added decorator for interpreterProxy functions diff --git a/spyvm/interpreter_proxy.py b/spyvm/interpreter_proxy.py --- a/spyvm/interpreter_proxy.py +++ b/spyvm/interpreter_proxy.py @@ -1,54 +1,74 @@ # struct VirtualMachine* sqGetInterpreterProxy(void); # typedef struct VirtualMachine { -# sqInt (*minorVersion)(void); +# sqInt (*minorVersion)(void); # } VirtualMachine; # Loading a Plugin: -# plugin setInterpreter: proxy. -# (plugin respondsTo: #initialiseModule) ifTrue:[plugin initialiseModule]. -# plugin perform: primitiveName asSymbol. +# plugin setInterpreter: proxy. +# (plugin respondsTo: #initialiseModule) ifTrue:[plugin initialiseModule]. +# plugin perform: primitiveName asSymbol. from spyvm import error from rpython.rlib.entrypoint import entrypoint from rpython.rtyper.annlowlevel import llhelper from rpython.rlib.exports import export_struct -from rpython.rtyper.lltypesystem.lltype import FuncType, Struct, Ptr +from rpython.rtyper.lltypesystem.lltype import FuncType, Ptr from rpython.rtyper.lltypesystem import lltype +from rpython.rlib.unroll import unrolling_iterable sqInt = lltype.Signed sqLong = lltype.SignedLongLong -minorVFTP = Ptr(FuncType([], sqInt)) +major = minor = 0 +functions = [] -VirtualMachine = Struct("VirtualMachine", - ("minorVersion", minorVFTP) - ) +def expose_on_virtual_machine_proxy(signature, minor=0, major=1): + f_ptr = Ptr(signature) + if minor < minor: + minor = minor + if major < major: + major = major + def decorator(func): + functions.append((func.func_name, f_ptr, func)) + return func + return decorator + + at expose_on_virtual_machine_proxy(FuncType([], sqInt)) +def minorVersion(): + return minor + + at expose_on_virtual_machine_proxy(FuncType([], sqInt)) +def majorVersion(): + return major + +VirtualMachine = lltype.Struct("VirtualMachine", + *map(lambda x: (x[0], x[1]), functions)) VMPtr = Ptr(VirtualMachine) -# export_struct("VirtualMachine", VirtualMachine) + +proxy_functions = unrolling_iterable(functions) @entrypoint('main', [], c_name='sqGetInterpreterProxy') def sqGetInterpreterProxy(): - if not InterpreterProxy.vm_initialized: - vm_proxy = lltype.malloc(VirtualMachine, flavor='raw') - vm_proxy.minorVersion = llhelper(minorVFTP, minorVersion) - InterpreterProxy.vm_proxy = vm_proxy - InterpreterProxy.vm_initialized = True - return InterpreterProxy.vm_proxy + if not InterpreterProxy.vm_initialized: + vm_proxy = lltype.malloc(VirtualMachine, flavor='raw') + for func_name, signature, func in proxy_functions: + setattr(vm_proxy, func_name, llhelper(signature, func)) + InterpreterProxy.vm_proxy = vm_proxy + InterpreterProxy.vm_initialized = True + return InterpreterProxy.vm_proxy -def minorVersion(): - return 1 - +# export_struct("VirtualMachine", VirtualMachine) class _InterpreterProxy(object): - _immutable_fields_ = ['vm_initialized?'] + _immutable_fields_ = ['vm_initialized?'] - def __init__(self): - self.vm_proxy = lltype.nullptr(VMPtr.TO) - self.vm_initialized = False + def __init__(self): + self.vm_proxy = lltype.nullptr(VMPtr.TO) + self.vm_initialized = False - def call(self, signature, interp, s_frame, argcount, s_method): - print "Hello World..." - raise error.Exit("External Call") + def call(self, signature, interp, s_frame, argcount, s_method): + print "Hello World..." + raise error.Exit("External Call") InterpreterProxy = _InterpreterProxy() diff --git a/spyvm/primitives.py b/spyvm/primitives.py --- a/spyvm/primitives.py +++ b/spyvm/primitives.py @@ -850,6 +850,9 @@ elif signature[0] == "VMDebugging": from spyvm.plugins.vmdebugging import DebuggingPlugin return DebuggingPlugin.call(signature[1], interp, s_frame, argcount, s_method) + else: + from spyvm.interpreter_proxy import InterpreterProxy + return InterpreterProxy.call(signature, interp, s_frame, argcount, s_method) raise PrimitiveFailedError # ___________________________________________________________________________ diff --git a/targetimageloadingsmalltalk.py b/targetimageloadingsmalltalk.py --- a/targetimageloadingsmalltalk.py +++ b/targetimageloadingsmalltalk.py @@ -7,6 +7,7 @@ from spyvm import model, interpreter, squeakimage, objspace, wrapper,\ error, shadow from spyvm.tool.analyseimage import create_image +from spyvm.interpreter_proxy import VirtualMachine def _run_benchmark(interp, number, benchmark): From noreply at buildbot.pypy.org Sat Jun 1 11:48:19 2013 From: noreply at buildbot.pypy.org (lwassermann) Date: Sat, 1 Jun 2013 11:48:19 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk default: added hint to the VirtualMachine, to have it's field names preserved Message-ID: <20130601094819.8DA871C0196@cobra.cs.uni-duesseldorf.de> Author: Lars Wassermann Branch: Changeset: r432:19f938127015 Date: 2013-05-31 20:59 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/19f938127015/ Log: added hint to the VirtualMachine, to have it's field names preserved diff --git a/spyvm/interpreter_proxy.py b/spyvm/interpreter_proxy.py --- a/spyvm/interpreter_proxy.py +++ b/spyvm/interpreter_proxy.py @@ -14,11 +14,11 @@ from rpython.rtyper.annlowlevel import llhelper from rpython.rlib.exports import export_struct from rpython.rtyper.lltypesystem.lltype import FuncType, Ptr -from rpython.rtyper.lltypesystem import lltype +from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rlib.unroll import unrolling_iterable -sqInt = lltype.Signed -sqLong = lltype.SignedLongLong +sqInt = rffi.INT +sqLong = rffi.LONG major = minor = 0 functions = [] @@ -30,7 +30,7 @@ if major < major: major = major def decorator(func): - functions.append((func.func_name, f_ptr, func)) + functions.append(("c_" + func.func_name, f_ptr, func)) return func return decorator @@ -43,7 +43,8 @@ return major VirtualMachine = lltype.Struct("VirtualMachine", - *map(lambda x: (x[0], x[1]), functions)) + *map(lambda x: (x[0], x[1]), functions), + hints={'c_name': 'VirtualMachine'}) VMPtr = Ptr(VirtualMachine) proxy_functions = unrolling_iterable(functions) @@ -58,7 +59,7 @@ InterpreterProxy.vm_initialized = True return InterpreterProxy.vm_proxy -# export_struct("VirtualMachine", VirtualMachine) +# rffi.llexternal is supposed to represent c-functions. class _InterpreterProxy(object): _immutable_fields_ = ['vm_initialized?'] From noreply at buildbot.pypy.org Sat Jun 1 11:48:20 2013 From: noreply at buildbot.pypy.org (lwassermann) Date: Sat, 1 Jun 2013 11:48:20 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk default: renamed interpreter_proxy.InterpreterProxy to IProxy Message-ID: <20130601094820.A0FC61C0196@cobra.cs.uni-duesseldorf.de> Author: Lars Wassermann Branch: Changeset: r433:03dc3c9bbf83 Date: 2013-05-31 22:40 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/03dc3c9bbf83/ Log: renamed interpreter_proxy.InterpreterProxy to IProxy added 13 interpreterProxy functions, oop & Array-result handling is only stubed diff --git a/spyvm/interpreter_proxy.py b/spyvm/interpreter_proxy.py --- a/spyvm/interpreter_proxy.py +++ b/spyvm/interpreter_proxy.py @@ -8,8 +8,6 @@ # plugin setInterpreter: proxy. # (plugin respondsTo: #initialiseModule) ifTrue:[plugin initialiseModule]. # plugin perform: primitiveName asSymbol. -from spyvm import error - from rpython.rlib.entrypoint import entrypoint from rpython.rtyper.annlowlevel import llhelper from rpython.rlib.exports import export_struct @@ -17,8 +15,11 @@ from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rlib.unroll import unrolling_iterable +from spyvm import error, model + sqInt = rffi.INT sqLong = rffi.LONG +sqDouble = rffi.DOUBLE major = minor = 0 functions = [] @@ -42,6 +43,109 @@ def majorVersion(): return major + at expose_on_virtual_machine_proxy(FuncType([sqInt], sqInt)) +def pop(nItems): + IProxy.s_frame.pop_n(nItems) + return 0 + + at expose_on_virtual_machine_proxy(FuncType([sqInt, sqInt], sqInt)) +def popthenPush(nItems, oop): + s_frame = IProxy.s_frame + s_frame.pop_n(nItems) + s_frame.push(IProxy.oop_to_object(oop)) + return 0 + + at expose_on_virtual_machine_proxy(FuncType([sqInt], sqInt)) +def push(oop): + s_frame = IProxy.s_frame + s_frame.push(IProxy.oop_to_object(oop)) + return 0 + + at expose_on_virtual_machine_proxy(FuncType([sqInt], sqInt)) +def pushBool(trueOrFalse): + s_frame = IProxy.s_frame + if trueOrFalse: + s_frame.push(IProxy.interp.space.w_true) + else: + s_frame.push(IProxy.interp.space.w_false) + return 0 + + at expose_on_virtual_machine_proxy(FuncType([sqDouble], sqInt)) +def pushFloat(f): + s_frame = IProxy.s_frame + s_frame.push(IProxy.space.wrap_float(f)) + return 0 + + at expose_on_virtual_machine_proxy(FuncType([sqInt], sqInt)) +def pushInteger(n): + s_frame = IProxy.s_frame + s_frame.push(IProxy.space.wrap_int(n)) + return 0 + + at expose_on_virtual_machine_proxy(FuncType([sqInt], sqDouble)) +def stackFloatValue(offset): + s_frame = IProxy.s_frame + f = s_frame.peek(offset) + if isinstance(f, model.W_Float): + return f.value + else: + IProxy.successFlag = False + return 0.0 + + at expose_on_virtual_machine_proxy(FuncType([sqInt], sqInt)) +def stackIntegerValue(offset): + s_frame = IProxy.s_frame + n = s_frame.peek(offset) + try: + return IProxy.space.unwrap_int(n) + except error.PrimitiveFailedError: + IProxy.successFlag = False + return 0 + + at expose_on_virtual_machine_proxy(FuncType([sqInt], sqInt)) +def stackObjectValue(offset): + s_frame = IProxy.s_frame + w_object = s_frame.peek(offset) + if not isinstance(w_object, model.W_SmallInteger): + return IProxy.object_to_oop(w_object) + IProxy.successFlag = False + return 0 + + at expose_on_virtual_machine_proxy(FuncType([sqInt], sqInt)) +def stackValue(offset): + s_frame = IProxy.s_frame + return IProxy.object_to_oop(s_frame.peek(offset)) + + at expose_on_virtual_machine_proxy(FuncType([sqInt], sqInt)) +def argumentCountOf(methodOOP): + w_method = IProxy.oop_to_object(methodOOP) + if isinstance(w_method, model.W_CompiledMethod): + return w_method.argsize + IProxy.successFlag = False + return 0 + + at expose_on_virtual_machine_proxy(FuncType([sqInt], Ptr(lltype.Array(sqInt)))) +def arrayValueOf(oop): + w_array = IProxy.oop_to_object(oop) + if isinstance(w_array, model.W_WordsObject) or isinstance(w_array, model.W_BytesObject): + raise NotImplementedError + IProxy.successFlag = False + return [] + + at expose_on_virtual_machine_proxy(FuncType([sqInt], sqInt)) +def byteSizeOf(oop): + w_object = IProxy.oop_to_object(oop) + s_class = w_object.shadow_of_my_class(IProxy.space) + size = s_class.instsize() + if s_class.isvariable(): + size += w_object.primsize(IProxy.space) + if isinstance(w_object, model.W_BytesObject): + size *= size * 4 + return IProxy.space.wrap_int(size) + + +# ############################################################################## + VirtualMachine = lltype.Struct("VirtualMachine", *map(lambda x: (x[0], x[1]), functions), hints={'c_name': 'VirtualMachine'}) @@ -51,13 +155,13 @@ @entrypoint('main', [], c_name='sqGetInterpreterProxy') def sqGetInterpreterProxy(): - if not InterpreterProxy.vm_initialized: + if not IProxy.vm_initialized: vm_proxy = lltype.malloc(VirtualMachine, flavor='raw') for func_name, signature, func in proxy_functions: setattr(vm_proxy, func_name, llhelper(signature, func)) - InterpreterProxy.vm_proxy = vm_proxy - InterpreterProxy.vm_initialized = True - return InterpreterProxy.vm_proxy + IProxy.vm_proxy = vm_proxy + IProxy.vm_initialized = True + return IProxy.vm_proxy # rffi.llexternal is supposed to represent c-functions. @@ -67,9 +171,31 @@ def __init__(self): self.vm_proxy = lltype.nullptr(VMPtr.TO) self.vm_initialized = False + self.reset() + + def reset(self): + self.interp = None + self.s_frame = None + self.argcount = 0 + self.s_method = None + self.successFlag = True def call(self, signature, interp, s_frame, argcount, s_method): - print "Hello World..." - raise error.Exit("External Call") + self.interp = interp + self.s_frame = s_frame + self.argcount = argcount + self.s_method = s_method + self.space = interp.space + try: + print "Hello World..." + raise error.Exit("External Call") + finally: + self.reset() -InterpreterProxy = _InterpreterProxy() + def oop_to_object(self, oop): + return self.interp.space.w_nil + + def object_to_oop(self, oop): + return 0 + +IProxy = _InterpreterProxy() diff --git a/spyvm/primitives.py b/spyvm/primitives.py --- a/spyvm/primitives.py +++ b/spyvm/primitives.py @@ -851,8 +851,8 @@ from spyvm.plugins.vmdebugging import DebuggingPlugin return DebuggingPlugin.call(signature[1], interp, s_frame, argcount, s_method) else: - from spyvm.interpreter_proxy import InterpreterProxy - return InterpreterProxy.call(signature, interp, s_frame, argcount, s_method) + from spyvm.interpreter_proxy import IProxy + return IProxy.call(signature, interp, s_frame, argcount, s_method) raise PrimitiveFailedError # ___________________________________________________________________________ From noreply at buildbot.pypy.org Sat Jun 1 11:48:21 2013 From: noreply at buildbot.pypy.org (lwassermann) Date: Sat, 1 Jun 2013 11:48:21 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk default: fixed two translation caveats Message-ID: <20130601094821.AC5201C0196@cobra.cs.uni-duesseldorf.de> Author: Lars Wassermann Branch: Changeset: r434:85ca24ecf5fb Date: 2013-06-01 11:48 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/85ca24ecf5fb/ Log: fixed two translation caveats diff --git a/spyvm/interpreter_proxy.py b/spyvm/interpreter_proxy.py --- a/spyvm/interpreter_proxy.py +++ b/spyvm/interpreter_proxy.py @@ -20,6 +20,7 @@ sqInt = rffi.INT sqLong = rffi.LONG sqDouble = rffi.DOUBLE +sqIntArray = rffi.CArray(sqInt) major = minor = 0 functions = [] @@ -124,13 +125,13 @@ IProxy.successFlag = False return 0 - at expose_on_virtual_machine_proxy(FuncType([sqInt], Ptr(lltype.Array(sqInt)))) + at expose_on_virtual_machine_proxy(FuncType([sqInt], Ptr(sqIntArray))) def arrayValueOf(oop): w_array = IProxy.oop_to_object(oop) if isinstance(w_array, model.W_WordsObject) or isinstance(w_array, model.W_BytesObject): raise NotImplementedError IProxy.successFlag = False - return [] + return rffi.cast(Ptr(sqIntArray), 0) @expose_on_virtual_machine_proxy(FuncType([sqInt], sqInt)) def byteSizeOf(oop): @@ -140,8 +141,8 @@ if s_class.isvariable(): size += w_object.primsize(IProxy.space) if isinstance(w_object, model.W_BytesObject): - size *= size * 4 - return IProxy.space.wrap_int(size) + size *= 4 + return size # ############################################################################## From noreply at buildbot.pypy.org Sat Jun 1 11:51:32 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 1 Jun 2013 11:51:32 +0200 (CEST) Subject: [pypy-commit] pypy default: Attempt to fix issue #1506: change back the config var 'SO' to be the regular Message-ID: <20130601095132.C4D1E1C0196@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r64698:722471a15693 Date: 2013-06-01 11:50 +0200 http://bitbucket.org/pypy/pypy/changeset/722471a15693/ Log: Attempt to fix issue #1506: change back the config var 'SO' to be the regular so extension for that platforms (e.g. ".so"). Instead, use the extended naming ".pypy-20.so" only in build_ext. diff --git a/lib-python/2.7/distutils/command/build_ext.py b/lib-python/2.7/distutils/command/build_ext.py --- a/lib-python/2.7/distutils/command/build_ext.py +++ b/lib-python/2.7/distutils/command/build_ext.py @@ -8,7 +8,7 @@ __revision__ = "$Id$" -import sys, os, string, re +import sys, os, string, re, imp from types import * from site import USER_BASE, USER_SITE from distutils.core import Command @@ -33,6 +33,11 @@ from distutils.ccompiler import show_compilers show_compilers() +def _get_c_extension_suffix(): + for ext, mod, typ in imp.get_suffixes(): + if typ == imp.C_EXTENSION: + return ext + class build_ext (Command): @@ -677,10 +682,18 @@ # OS/2 has an 8 character module (extension) limit :-( if os.name == "os2": ext_path[len(ext_path) - 1] = ext_path[len(ext_path) - 1][:8] + # PyPy tweak: first try to get the C extension suffix from + # 'imp'. If it fails we fall back to the 'SO' config var, like + # the previous version of this code did. This should work for + # CPython too. The point is that on PyPy with cpyext, the + # config var 'SO' is just ".so" but we want to return + # ".pypy-VERSION.so" instead. + so_ext = _get_c_extension_suffix() + if so_ext is None: + so_ext = get_config_var('SO') # fall-back # extensions in debug_mode are named 'module_d.pyd' under windows - so_ext = get_config_var('SO') if os.name == 'nt' and self.debug: - return os.path.join(*ext_path) + '_d' + so_ext + so_ext = '_d.pyd' return os.path.join(*ext_path) + so_ext def get_export_symbols (self, ext): diff --git a/lib-python/2.7/distutils/sysconfig_pypy.py b/lib-python/2.7/distutils/sysconfig_pypy.py --- a/lib-python/2.7/distutils/sysconfig_pypy.py +++ b/lib-python/2.7/distutils/sysconfig_pypy.py @@ -12,7 +12,6 @@ import sys import os -import imp from distutils.errors import DistutilsPlatformError @@ -58,16 +57,11 @@ _config_vars = None -def _get_so_extension(): - for ext, mod, typ in imp.get_suffixes(): - if typ == imp.C_EXTENSION: - return ext - def _init_posix(): """Initialize the module as appropriate for POSIX systems.""" g = {} g['EXE'] = "" - g['SO'] = _get_so_extension() or ".so" + g['SO'] = ".so" g['SOABI'] = g['SO'].rsplit('.')[0] g['LIBDIR'] = os.path.join(sys.prefix, 'lib') g['CC'] = "gcc -pthread" # -pthread might not be valid on OS/X, check @@ -80,7 +74,7 @@ """Initialize the module as appropriate for NT""" g = {} g['EXE'] = ".exe" - g['SO'] = _get_so_extension() or ".pyd" + g['SO'] = ".pyd" g['SOABI'] = g['SO'].rsplit('.')[0] global _config_vars From noreply at buildbot.pypy.org Sat Jun 1 12:10:53 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 1 Jun 2013 12:10:53 +0200 (CEST) Subject: [pypy-commit] pypy default: Yay, found the source of one occasional crash we get on buildbot. Message-ID: <20130601101053.C7C0D1C0196@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r64699:fe0866846c4b Date: 2013-06-01 12:10 +0200 http://bitbucket.org/pypy/pypy/changeset/fe0866846c4b/ Log: Yay, found the source of one occasional crash we get on buildbot. diff --git a/rpython/translator/c/test/test_genc.py b/rpython/translator/c/test/test_genc.py --- a/rpython/translator/c/test/test_genc.py +++ b/rpython/translator/c/test/test_genc.py @@ -37,7 +37,7 @@ if isinstance(v, float): from rpython.rlib.rfloat import formatd, DTSF_ADD_DOT_0 return formatd(v, 'r', 0, DTSF_ADD_DOT_0) - return v + return str(v) # always return a string, to get consistent types def parse_longlong(a): p0, p1 = a.split(":") @@ -205,6 +205,28 @@ py.test.raises(Exception, f1, "world") # check that it's really typed +def test_int_becomes_float(): + # used to crash "very often": the long chain of mangle() calls end + # up converting the return value of f() from an int to a float, but + # if blocks are followed in random order by the annotator, it will + # very likely first follow the call to llrepr_out() done after the + # call to f(), getting an int first (and a float only later). + @specialize.arg(1) + def mangle(x, chain): + if chain: + return mangle(x, chain[1:]) + return x - 0.5 + def f(x): + if x > 10: + x = mangle(x, (1,1,1,1,1,1,1,1,1,1)) + return x + 1 + + f1 = compile(f, [int]) + + assert f1(5) == 6 + assert f1(12) == 12.5 + + def test_string_arg(): def f(s): total = 0 From noreply at buildbot.pypy.org Sat Jun 1 12:13:00 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 1 Jun 2013 12:13:00 +0200 (CEST) Subject: [pypy-commit] pypy default: ah, it doesn't fail on Linux64. that's why it was happily merged in Message-ID: <20130601101300.B50F01C0196@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r64700:aae73212c3d2 Date: 2013-06-01 12:12 +0200 http://bitbucket.org/pypy/pypy/changeset/aae73212c3d2/ Log: ah, it doesn't fail on Linux64. that's why it was happily merged in diff --git a/pypy/module/micronumpy/test/test_scalar.py b/pypy/module/micronumpy/test/test_scalar.py --- a/pypy/module/micronumpy/test/test_scalar.py +++ b/pypy/module/micronumpy/test/test_scalar.py @@ -24,4 +24,4 @@ def setup_class(cls): import py - py.test.xfail("FIXME: dtype('int32') == dtype('int32') fails") + py.test.xfail("FIXME: dtype('int32') == dtype('int32') fails (but only on 32-bit?)") From noreply at buildbot.pypy.org Sat Jun 1 13:07:50 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 1 Jun 2013 13:07:50 +0200 (CEST) Subject: [pypy-commit] cffi default: MSVC-style bitfields, first attempt Message-ID: <20130601110750.A092D1C1128@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1264:4e06afeabe1c Date: 2013-06-01 13:04 +0200 http://bitbucket.org/cffi/cffi/changeset/4e06afeabe1c/ Log: MSVC-style bitfields, first attempt diff --git a/c/_cffi_backend.c b/c/_cffi_backend.c --- a/c/_cffi_backend.c +++ b/c/_cffi_backend.c @@ -3424,6 +3424,8 @@ return cf; /* borrowed reference */ } +#define SF_MSVC_BITFIELDS 1 + static PyObject *b_complete_struct_or_union(PyObject *self, PyObject *args) { CTypeDescrObject *ct; @@ -3433,11 +3435,17 @@ Py_ssize_t totalsize = -1; int totalalignment = -1; CFieldObject **previous; - - if (!PyArg_ParseTuple(args, "O!O!|Oni:complete_struct_or_union", + int prev_bitfield_size, prev_bitfield_free; +#ifdef MS_WIN32 + int sflags = SF_MSVC_BITFIELDS; +#else + int sflags = 0; +#endif + + if (!PyArg_ParseTuple(args, "O!O!|Onii:complete_struct_or_union", &CTypeDescr_Type, &ct, &PyList_Type, &fields, - &ignored, &totalsize, &totalalignment)) + &ignored, &totalsize, &totalalignment, &sflags)) return NULL; if ((ct->ct_flags & (CT_STRUCT|CT_IS_OPAQUE)) == @@ -3457,6 +3465,8 @@ alignment = 1; boffset = 0; /* this number is in *bits*, not bytes! */ boffsetmax = 0; /* the maximum value of boffset, in bits too */ + prev_bitfield_size = 0; + prev_bitfield_free = 0; nb_fields = PyList_GET_SIZE(fields); interned_fields = PyDict_New(); if (interned_fields == NULL) @@ -3543,6 +3553,7 @@ previous = &(*previous)->cf_next; } boffset += ftype->ct_size * 8; + prev_bitfield_size = 0; } else { /* this is the case of a bitfield */ @@ -3592,29 +3603,59 @@ assert(boffset < field_offset_bytes * 8); } boffset = field_offset_bytes * 8; /* the only effect */ + prev_bitfield_size = 0; } else { - /* Can the field start at the offset given by 'boffset'? It - can if it would entirely fit into an aligned ftype field. */ - bits_already_occupied = boffset - (field_offset_bytes * 8); - - if (bits_already_occupied + fbitsize > 8 * ftype->ct_size) { - /* it would not fit, we need to start at the next - allowed position */ - field_offset_bytes += falign; - assert(boffset < field_offset_bytes * 8); - boffset = field_offset_bytes * 8; - bitshift = 0; + if (!(sflags & SF_MSVC_BITFIELDS)) { + /* GCC's algorithm */ + + /* Can the field start at the offset given by 'boffset'? It + can if it would entirely fit into an aligned ftype field. */ + bits_already_occupied = boffset - (field_offset_bytes * 8); + + if (bits_already_occupied + fbitsize > 8 * ftype->ct_size) { + /* it would not fit, we need to start at the next + allowed position */ + field_offset_bytes += falign; + assert(boffset < field_offset_bytes * 8); + boffset = field_offset_bytes * 8; + bitshift = 0; + } + else { + bitshift = bits_already_occupied; + assert(bitshift >= 0); + } + boffset += fbitsize; } - else - bitshift = bits_already_occupied; + else { + /* MSVC's algorithm */ + + /* A bitfield is considered as taking the full width + of their declared type. It can share some bits + with the previous field only if it was also a + bitfield and used a type of the same size. */ + if (prev_bitfield_size == ftype->ct_size && + prev_bitfield_free >= fbitsize) { + /* yes: reuse */ + bitshift = 8 * prev_bitfield_size - prev_bitfield_free; + } + else { + /* no: start a new full field */ + boffset = (boffset + falign*8-1) & ~(falign*8-1); /*align*/ + boffset += ftype->ct_size * 8; + bitshift = 0; + prev_bitfield_size = ftype->ct_size; + prev_bitfield_free = 8 * prev_bitfield_size; + } + prev_bitfield_free -= fbitsize; + field_offset_bytes = boffset / 8 - ftype->ct_size; + } *previous = _add_field(interned_fields, fname, ftype, field_offset_bytes, bitshift, fbitsize); if (*previous == NULL) goto error; previous = &(*previous)->cf_next; - boffset += fbitsize; } } diff --git a/c/test_c.py b/c/test_c.py --- a/c/test_c.py +++ b/c/test_c.py @@ -2768,22 +2768,28 @@ assert wr() is None py.test.raises(RuntimeError, from_handle, cast(BCharP, 0)) -def test_bitfield_as_gcc(): +def _test_bitfield_details(flag): BChar = new_primitive_type("char") BShort = new_primitive_type("short") BInt = new_primitive_type("int") + BUInt = new_primitive_type("unsigned int") BStruct = new_struct_type("foo1") complete_struct_or_union(BStruct, [('a', BChar, -1), - ('b', BInt, 9), - ('c', BChar, -1)]) - assert typeoffsetof(BStruct, 'c') == (BChar, 3) - assert sizeof(BStruct) == 4 + ('b1', BInt, 9), + ('b2', BUInt, 7), + ('c', BChar, -1)], -1, -1, -1, flag) + if flag == 0: # gcc + assert typeoffsetof(BStruct, 'c') == (BChar, 3) + assert sizeof(BStruct) == 4 + else: # msvc + assert typeoffsetof(BStruct, 'c') == (BChar, 8) + assert sizeof(BStruct) == 12 assert alignof(BStruct) == 4 # BStruct = new_struct_type("foo2") complete_struct_or_union(BStruct, [('a', BChar, -1), ('', BShort, 9), - ('c', BChar, -1)]) + ('c', BChar, -1)], -1, -1, -1, flag) assert typeoffsetof(BStruct, 'c') == (BChar, 4) assert sizeof(BStruct) == 5 assert alignof(BStruct) == 1 @@ -2792,12 +2798,19 @@ complete_struct_or_union(BStruct, [('a', BChar, -1), ('', BInt, 0), ('', BInt, 0), - ('c', BChar, -1)]) + ('c', BChar, -1)], -1, -1, -1, flag) assert typeoffsetof(BStruct, 'c') == (BChar, 4) assert sizeof(BStruct) == 5 assert alignof(BStruct) == 1 +def test_bitfield_as_gcc(): + _test_bitfield_details(flag=0) + +def test_bitfield_as_msvc(): + _test_bitfield_details(flag=1) + + def test_version(): # this test is here mostly for PyPy assert __version__ == "0.7" From noreply at buildbot.pypy.org Sat Jun 1 13:07:51 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 1 Jun 2013 13:07:51 +0200 (CEST) Subject: [pypy-commit] cffi default: Fix this test for MSVC Message-ID: <20130601110751.B70161C1128@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1265:110b6b1931df Date: 2013-06-01 13:07 +0200 http://bitbucket.org/cffi/cffi/changeset/110b6b1931df/ Log: Fix this test for MSVC diff --git a/testing/test_ffi_backend.py b/testing/test_ffi_backend.py --- a/testing/test_ffi_backend.py +++ b/testing/test_ffi_backend.py @@ -40,6 +40,9 @@ class TestBitfield: def check(self, source, expected_ofs_y, expected_align, expected_size): + # NOTE: 'expected_*' is the numbers expected from GCC. + # The numbers expected from MSVC are not explicitly written + # in this file, and will just be taken from the compiler. ffi = FFI() ffi.cdef("struct s1 { %s };" % source) ctype = ffi.typeof("struct s1") @@ -67,8 +70,13 @@ return &s; } """ % (source, ' '.join(setters))) - assert (lib.Gofs_y, lib.Galign, lib.Gsize) == ( - expected_ofs_y, expected_align, expected_size) + if sys.platform == 'win32': + expected_ofs_y = lib.Gofs_y + expected_align = lib.Galign + expected_size = lib.Gsize + else: + assert (lib.Gofs_y, lib.Galign, lib.Gsize) == ( + expected_ofs_y, expected_align, expected_size) # the real test follows assert ffi.offsetof("struct s1", "y") == expected_ofs_y assert ffi.alignof("struct s1") == expected_align From noreply at buildbot.pypy.org Sat Jun 1 13:34:45 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 1 Jun 2013 13:34:45 +0200 (CEST) Subject: [pypy-commit] cffi default: Fix MSVC bitfields in all tested cases. Message-ID: <20130601113445.4EBCB1C0690@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1266:263caa88878e Date: 2013-06-01 13:34 +0200 http://bitbucket.org/cffi/cffi/changeset/263caa88878e/ Log: Fix MSVC bitfields in all tested cases. diff --git a/c/_cffi_backend.c b/c/_cffi_backend.c --- a/c/_cffi_backend.c +++ b/c/_cffi_backend.c @@ -3477,7 +3477,7 @@ for (i=0; i 0)) + + do_align = 1; + if (fbitsize >= 0) { + if (!(sflags & SF_MSVC_BITFIELDS)) { + /* GCC: anonymous bitfields (of any size) don't cause alignment */ + do_align = PyText_GetSize(fname) > 0; + } + else { + /* MSVC: zero-sized bitfields don't cause alignment */ + do_align = fbitsize > 0; + } + } + if (alignment < falign && do_align) alignment = falign; if (fbitsize < 0) { @@ -3598,11 +3610,23 @@ "field '%s.%s' is declared with :0", ct->ct_name, PyText_AS_UTF8(fname)); } - if (boffset > field_offset_bytes * 8) { - field_offset_bytes += falign; - assert(boffset < field_offset_bytes * 8); + if (!(sflags & SF_MSVC_BITFIELDS)) { + /* GCC's notion of "ftype :0;" */ + + /* pad boffset to a value aligned for "ftype" */ + if (boffset > field_offset_bytes * 8) { + field_offset_bytes += falign; + assert(boffset < field_offset_bytes * 8); + } + boffset = field_offset_bytes * 8; } - boffset = field_offset_bytes * 8; /* the only effect */ + else { + /* MSVC's notion of "ftype :0;" */ + + /* Mostly ignored. It seems they only serve as + separator between other bitfields, to force them + into separate words. */ + } prev_bitfield_size = 0; } else { diff --git a/c/test_c.py b/c/test_c.py --- a/c/test_c.py +++ b/c/test_c.py @@ -2791,16 +2791,24 @@ ('', BShort, 9), ('c', BChar, -1)], -1, -1, -1, flag) assert typeoffsetof(BStruct, 'c') == (BChar, 4) - assert sizeof(BStruct) == 5 - assert alignof(BStruct) == 1 + if flag == 0: # gcc + assert sizeof(BStruct) == 5 + assert alignof(BStruct) == 1 + else: # msvc + assert sizeof(BStruct) == 6 + assert alignof(BStruct) == 2 # BStruct = new_struct_type("foo2") complete_struct_or_union(BStruct, [('a', BChar, -1), ('', BInt, 0), ('', BInt, 0), ('c', BChar, -1)], -1, -1, -1, flag) - assert typeoffsetof(BStruct, 'c') == (BChar, 4) - assert sizeof(BStruct) == 5 + if flag == 0: # gcc + assert typeoffsetof(BStruct, 'c') == (BChar, 4) + assert sizeof(BStruct) == 5 + else: # msvc + assert typeoffsetof(BStruct, 'c') == (BChar, 1) + assert sizeof(BStruct) == 2 assert alignof(BStruct) == 1 diff --git a/testing/test_ffi_backend.py b/testing/test_ffi_backend.py --- a/testing/test_ffi_backend.py +++ b/testing/test_ffi_backend.py @@ -137,6 +137,7 @@ L = FFI().alignof("long long") self.check("char y; int :0;", 0, 1, 4) self.check("char x; int :0; char y;", 4, 1, 5) + self.check("char x; int :0; int :0; char y;", 4, 1, 5) self.check("char x; long long :0; char y;", L, 1, L + 1) self.check("short x, y; int :0; int :0;", 2, 2, 4) self.check("char x; int :0; short b:1; char y;", 5, 2, 6) From noreply at buildbot.pypy.org Sat Jun 1 13:59:04 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 1 Jun 2013 13:59:04 +0200 (CEST) Subject: [pypy-commit] pypy default: Update to cffi/263caa88878e Message-ID: <20130601115904.1FF901C0690@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r64701:7b2e8cc68c61 Date: 2013-06-01 13:58 +0200 http://bitbucket.org/pypy/pypy/changeset/7b2e8cc68c61/ Log: Update to cffi/263caa88878e diff --git a/pypy/module/_cffi_backend/newtype.py b/pypy/module/_cffi_backend/newtype.py --- a/pypy/module/_cffi_backend/newtype.py +++ b/pypy/module/_cffi_backend/newtype.py @@ -1,3 +1,4 @@ +import sys from pypy.interpreter.error import OperationError, operationerrfmt from pypy.interpreter.gateway import unwrap_spec @@ -113,6 +114,14 @@ # ____________________________________________________________ +SF_MSVC_BITFIELDS = 1 + +if sys.platform == 'win32': + DEFAULT_SFLAGS = SF_MSVC_BITFIELDS +else: + DEFAULT_SFLAGS = 0 + + @unwrap_spec(name=str) def new_struct_type(space, name): return ctypestruct.W_CTypeStruct(space, name) @@ -121,9 +130,11 @@ def new_union_type(space, name): return ctypestruct.W_CTypeUnion(space, name) - at unwrap_spec(w_ctype=ctypeobj.W_CType, totalsize=int, totalalignment=int) + at unwrap_spec(w_ctype=ctypeobj.W_CType, totalsize=int, totalalignment=int, + sflags=int) def complete_struct_or_union(space, w_ctype, w_fields, w_ignored=None, - totalsize=-1, totalalignment=-1): + totalsize=-1, totalalignment=-1, + sflags=DEFAULT_SFLAGS): if (not isinstance(w_ctype, ctypestruct.W_CTypeStructOrUnion) or w_ctype.size >= 0): raise OperationError(space.w_TypeError, @@ -134,6 +145,8 @@ alignment = 1 boffset = 0 # this number is in *bits*, not bytes! boffsetmax = 0 # the maximum value of boffset, in bits too + prev_bitfield_size = 0 + prev_bitfield_free = 0 fields_w = space.listview(w_fields) fields_list = [] fields_dict = {} @@ -166,7 +179,15 @@ # update the total alignment requirement, but skip it if the # field is an anonymous bitfield falign = ftype.alignof() - if alignment < falign and (fbitsize < 0 or fname != ''): + do_align = True + if fbitsize >= 0: + if (sflags & SF_MSVC_BITFIELDS) == 0: + # GCC: anonymous bitfields (of any size) don't cause alignment + do_align = (fname != '') + else: + # MSVC: zero-sized bitfields don't cause alignment + do_align = (fbitsize > 0) + if alignment < falign and do_align: alignment = falign # if fbitsize < 0: @@ -208,6 +229,7 @@ fields_dict[fname] = fld boffset += ftype.size * 8 + prev_bitfield_size = 0 else: # this is the case of a bitfield @@ -243,31 +265,67 @@ raise operationerrfmt(space.w_TypeError, "field '%s.%s' is declared with :0", w_ctype.name, fname) - if boffset > field_offset_bytes * 8: - field_offset_bytes += falign - assert boffset < field_offset_bytes * 8 - boffset = field_offset_bytes * 8 + if (sflags & SF_MSVC_BITFIELDS) == 0: + # GCC's notion of "ftype :0;" + # pad boffset to a value aligned for "ftype" + if boffset > field_offset_bytes * 8: + field_offset_bytes += falign + assert boffset < field_offset_bytes * 8 + boffset = field_offset_bytes * 8 + else: + # MSVC's notion of "ftype :0; + # Mostly ignored. It seems they only serve as + # separator between other bitfields, to force them + # into separate words. + pass + prev_bitfield_size = 0 + else: - # Can the field start at the offset given by 'boffset'? It - # can if it would entirely fit into an aligned ftype field. - bits_already_occupied = boffset - (field_offset_bytes * 8) + if (sflags & SF_MSVC_BITFIELDS) == 0: + # GCC's algorithm - if bits_already_occupied + fbitsize > 8 * ftype.size: - # it would not fit, we need to start at the next - # allowed position - field_offset_bytes += falign - assert boffset < field_offset_bytes * 8 - boffset = field_offset_bytes * 8 - bitshift = 0 + # Can the field start at the offset given by 'boffset'? It + # can if it would entirely fit into an aligned ftype field. + bits_already_occupied = boffset - (field_offset_bytes * 8) + + if bits_already_occupied + fbitsize > 8 * ftype.size: + # it would not fit, we need to start at the next + # allowed position + field_offset_bytes += falign + assert boffset < field_offset_bytes * 8 + boffset = field_offset_bytes * 8 + bitshift = 0 + else: + bitshift = bits_already_occupied + assert bitshift >= 0 + boffset += fbitsize + else: - bitshift = bits_already_occupied + # MSVC's algorithm + + # A bitfield is considered as taking the full width + # of their declared type. It can share some bits + # with the previous field only if it was also a + # bitfield and used a type of the same size. + if (prev_bitfield_size == ftype.size and + prev_bitfield_free >= fbitsize): + # yes: reuse + bitshift = 8 * prev_bitfield_size - prev_bitfield_free + else: + # no: start a new full field + boffset = (boffset + falign*8-1) & ~(falign*8-1) + boffset += ftype.size * 8 + bitshift = 0 + prev_bitfield_size = ftype.size + prev_bitfield_free = 8 * prev_bitfield_size + # + prev_bitfield_free -= fbitsize + field_offset_bytes = boffset / 8 - ftype.size fld = ctypestruct.W_CField(ftype, field_offset_bytes, bitshift, fbitsize) fields_list.append(fld) fields_dict[fname] = fld - - boffset += fbitsize if boffset > boffsetmax: boffsetmax = boffset diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -2757,36 +2757,57 @@ assert wr() is None py.test.raises(RuntimeError, from_handle, cast(BCharP, 0)) -def test_bitfield_as_gcc(): +def _test_bitfield_details(flag): BChar = new_primitive_type("char") BShort = new_primitive_type("short") BInt = new_primitive_type("int") + BUInt = new_primitive_type("unsigned int") BStruct = new_struct_type("foo1") complete_struct_or_union(BStruct, [('a', BChar, -1), - ('b', BInt, 9), - ('c', BChar, -1)]) - assert typeoffsetof(BStruct, 'c') == (BChar, 3) - assert sizeof(BStruct) == 4 + ('b1', BInt, 9), + ('b2', BUInt, 7), + ('c', BChar, -1)], -1, -1, -1, flag) + if flag == 0: # gcc + assert typeoffsetof(BStruct, 'c') == (BChar, 3) + assert sizeof(BStruct) == 4 + else: # msvc + assert typeoffsetof(BStruct, 'c') == (BChar, 8) + assert sizeof(BStruct) == 12 assert alignof(BStruct) == 4 # BStruct = new_struct_type("foo2") complete_struct_or_union(BStruct, [('a', BChar, -1), ('', BShort, 9), - ('c', BChar, -1)]) + ('c', BChar, -1)], -1, -1, -1, flag) assert typeoffsetof(BStruct, 'c') == (BChar, 4) - assert sizeof(BStruct) == 5 - assert alignof(BStruct) == 1 + if flag == 0: # gcc + assert sizeof(BStruct) == 5 + assert alignof(BStruct) == 1 + else: # msvc + assert sizeof(BStruct) == 6 + assert alignof(BStruct) == 2 # BStruct = new_struct_type("foo2") complete_struct_or_union(BStruct, [('a', BChar, -1), ('', BInt, 0), ('', BInt, 0), - ('c', BChar, -1)]) - assert typeoffsetof(BStruct, 'c') == (BChar, 4) - assert sizeof(BStruct) == 5 + ('c', BChar, -1)], -1, -1, -1, flag) + if flag == 0: # gcc + assert typeoffsetof(BStruct, 'c') == (BChar, 4) + assert sizeof(BStruct) == 5 + else: # msvc + assert typeoffsetof(BStruct, 'c') == (BChar, 1) + assert sizeof(BStruct) == 2 assert alignof(BStruct) == 1 +def test_bitfield_as_gcc(): + _test_bitfield_details(flag=0) + +def test_bitfield_as_msvc(): + _test_bitfield_details(flag=1) + + def test_version(): # this test is here mostly for PyPy assert __version__ == "0.7" From noreply at buildbot.pypy.org Sat Jun 1 17:51:19 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 1 Jun 2013 17:51:19 +0200 (CEST) Subject: [pypy-commit] stmgc default: Continue writing and finding new ideas. Kill the previous text for now. Message-ID: <20130601155119.17D781C0196@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r54:4a7fb6ee72c1 Date: 2013-06-01 17:50 +0200 http://bitbucket.org/pypy/stmgc/changeset/4a7fb6ee72c1/ Log: Continue writing and finding new ideas. Kill the previous text for now. diff --git a/c3/doc-stmgc.txt b/c3/doc-stmgc.txt --- a/c3/doc-stmgc.txt +++ b/c3/doc-stmgc.txt @@ -4,13 +4,13 @@ In this document we say "high-level object" to mean an object from the point of the user of the library, as opposed to an "object copy", which -occupies the space of one allocated piece of memory. One high-level -object can exist in several copies simultaneously. This concept of -"copy" should not be confused with a "revision", which stands for a -globally consistent copy of all objects. One revision is the result of -one transaction. A program usually has one revision per thread in -progress, plus any number of older committed revisions. The committed -revisions are globally ordered. +is what occupies the space of one allocated piece of memory. One +high-level object can exist in several copies simultaneously. This +concept of "copy" should not be confused with a "revision", which stands +for a globally consistent copy of all objects. One revision is the +result of one transaction. A program usually has one revision per +thread in progress, plus any number of older committed revisions. The +committed revisions are globally ordered. The object copies exist in one of three main states: they can be "private", "protected" or "public". A copy is private when it belongs @@ -37,14 +37,13 @@ revision; and the other, private, is the current copy. If the transaction aborts we can forget the private copy and reuse the previous protected copy. If the transaction commits we forget the previous -protected copy instead; then at this point all private objects become -protected. If the object is modified again in the near future, we reuse -the memory that was occupied by the previous copy to store the next -private copy. As a result, each of these two spaces in memory can be -young or old. When the GC runs, if any one of these two copies is -young, only the other copy is kept. Similarly during major collections, -only one copy is kept. So objects no longer modified will eventually -consume only one space. +protected copy instead; then the private copy becomes protected, like +*all* private copies at this point. If the object is modified again in +the near future, we reuse the memory that was occupied by the previous +copy to store the next private copy. As a result, each of these two +spaces in memory can be young or old. If an object is no longer +modified for long enough, the next (minor or major) GC will free one of +the two spaces it uses. The way to share data between threads goes via prebuilt objects, which are always public: it is their existence that gives the starting point @@ -60,19 +59,21 @@ copy. Any access from a different thread will trigger "stealing", as explained next. -2. A thread tries to access a public object but finds that another -thread has committed changes to it (hereafter called the "foreign -thread"). Then we "steal" the object. It is a read-only operation -performed by peeking on the foreign thread's data. The operation -involves making a duplicate of the original copy if it was in the -foreign thread's nursery (so that no thread ever reads another thread's -nursery, outside of "stealing"). The stolen copy, or the original -protected copy if it was not in the nursery, is then marked as public. -From now on nobody is allowed to change the content of this copy, and it -becomes the current public copy. These public copies accumulate: every -time the same object is stolen by a different thread, a new public copy -is made (so that unrelated threads don't have to worry about existing -public copies being written to). +2. When we are running a thread, it may try to access a public object +but find that another thread (the "foreign thread") has committed +changes to it . Then we "steal" the object. It is a read-only +operation performed by peeking on the foreign thread's data. The +operation involves making a duplicate of the original copy, if it was in +the foreign thread's nursery (so that no thread ever reads another +thread's nursery, outside of "stealing"). The stolen copy, or the +original protected copy if it was not in the nursery, is then marked as +public. From now on nobody is allowed to change (or free) the content +of this copy, and it becomes the current public copy. These public +copies accumulate: every time the same object is stolen by a different +thread, a new public copy is made (so that unrelated threads don't have +to worry about existing public copies being updated). (This chain of +objects is freed at the next major GC, which is a stop-the-world +operation.) 3. A subtle but important point about making a public copy is about all the references stored in the object: if they point to other protected @@ -83,16 +84,16 @@ "older" public copy of a protected object (although it is not actually older of course). If "we", the thread that just stole the object, then try to follow one of the references, we will access one of these stubs, -and go back to point 1: we will need to steal the target object's -protected copy. +and go back to point 2: stealing the target object's protected copy. -Implementation --------------- +Read/Write Barriers +------------------- This design is made to optimize the hopefully common case: objects we -handle are mostly private or protected. We can design in consequence -the following three points: +handle are mostly private or protected, or if they are public, they are +mostly read-only. We can design in consequence the following three +points: 1. the extra data stored in the objects (GC flags, and one extra word called `h_revision`). @@ -102,433 +103,94 @@ 3. the read/write barriers. Point 3 is essential for performance: we want most importantly a read -barrier that doesn't trigger for the case of reading protected and -private objects, which is supposed to be the most common case. -Moreover, it should also not trigger in the basic case of reading a -never-modified public object. There are basically three cases: +barrier that doesn't trigger for the cases described above. There are +basically three cases: 1. read_barrier(P) = P [if P is directly a non-modified copy] -2. read_barrier(P) = P->h_revision [a protected copy with a private one] +2. read_barrier(P) = P->h_revision [protected copy -> private copy] 3. all other more complex cases, handled by a call. It is possible to compress the first two cases into C code that GCC -compiles into two or three assembler instructions, using a conditional -move. (Still, it is unclear so far if case 2 is worth inlining at every -read barrier site, or should be left to the call.) +compiles into a total of two or three assembler instructions, using a +conditional move. (Still, it is unclear so far if case 2 is worth +inlining at every read barrier site, or should be left to the call.) -The case of the write barrier is similar, but differs in the exact -checks: basically case 1 is only for private objects. This could be +The case of the write barrier is similar, but differs in the check for +case 1: this case is reduced to only private objects. This could be done simply by checking a different GC flags. +Note that this design relies on the following property: in a given copy +of an object which was committed at revision N, all pointers points to +copies of objects which were committed at or before revision N. This +property is true by construction, but we must be careful not to break it +by "optimizing" the content of a copy. In particular the GC, during +both minor and major collections, has to preserve this property. - - - ------------- - - - - - - -Independently, each object can be private or non-private (we used to say -local or global). The private objects are the ones belonging to the -transaction currently in progress. The non-private objects belong to -already-committed transactions. - -The concepts of age (old or young) and privacy are kept separated, and -all four combinations can exist: it allows good performance both in case -of a lot of very short transactions and in the case of one very long -transaction. In the first case, we leave most young objects behind in -the nursery, as non-private objects, without moving them (they can still -be freed later by the next minor collection). In the other case, over -the course of several minor collections, we have the same behavior as a -regular two-generational GC system which copies the surviving nursery -objects into old ones; but these objects remain private as long as the -transaction isn't finished. - -We'll also divide the non-private objects in two subcategories: "public" -for the old non-private objects, and "protected" for the young -non-private objects. Protected objects, although not private, are still -protected from direct access from other thread as long as they are in -the nursery. - - - non-private | private - +------------------------------------------------------------ - | - old | public objects | old private objects - ---------| - | - young | [ protected objects | private objects (--> grows) ] - (nursery)| - - -Because in principle no object can be modified in-place once it has been -committed, this limits what pointers it can contain: they must directly -reference other objects that are not more recent than the container. -(But an object may contain a reference to some outdated version of an -object; then we use the normal h_revision link to find the latest -version of the latter.) So there are no regular pointers from a -non-private object to a private object (which are always the most recent -ones from the point of view of their thread). We add another -constraint: we don't want any public object to contain a direct pointer -to a protected object. - -The only way to have a link in the "less-public" direction is via the -h_revision field. The multiple versions of the same higher level object -look like this: - - - [=the static object, if it was prebuilt=] - | - | h_revision - | - `------> [=public1=] - | - | h_revision - | - `------> [=public2=] - | - | h_revision with bit 2 set - | - `------> [=protected3=] - | - | h_revision - | - `------> [=protected4=] - - -The h_revision is normally a regular pointer to the next version. But -one of these links may go from public to protected, which is marked by -having the bit 2 set in h_revision. And the latest object in the chain -has h_revision set to an odd value (bit 1 set), which gives the revision -in which that object was committed. - -In addition to the diagram above, a thread may also have one private -version (originally based on the head of the chain, but it might get -out-of-date). This link may be recorded "on-line" [O1] or "off-line" -[O2], see below. When the transaction commits, no object is copied in -memory, but any private object becomes non-private (i.e. protected or -public). If the object has any previous revision, then what is so far -the head of the chain sees its h_revision replaced with a pointer to the -new object. If this creates a public-to-protected link, then its bit 2 -is set. - -Minor collections are mostly regular first-generation collections from -the point of view of the GC: they move all surviving nursery objects -outside. A minor collection always turns all protected objects into -public ones. (In particular, if a transaction started after the most -recent minor collection, then there are no old private objects; -conversely, if it started before the most recent minor collection, then -there are no protected objects.) - -However another process can force a public copy of a protected object: -"stealing". Stealing is triggered when thread A attempts to follow the -h_revision, finds its bit 2 set, and finds that it points outside thread -A's own nursery --- and so it is a protected object from a foreign -thread B. In that case, thread A will "steal" the target object's -latest version, making it public. (This is done instead of simply -waiting for thread B's next minor collection, because it can occur in an -arbitrary amount of time: for all we know thread B may be blocked in a -`sleep(100)`.) - -The diagram above becomes like this, after thread A starts with a -pointer to "public1" and attempts to find its later version: - - - [=public1=] - | - | h_revision - | - `------> [=public2=] - | - | h_revision - | - | - | [=protected3=] - | | - | | h_revision - | | - | `------> [=protected4=] - | . - | . h_revision - | . - `----------------------------> [=public5=] - - -with a new copy "public5" of the object being created outside the -nursery. Afterwards, the h_revision of "public2" points directly to -"public5", and so any future access to the object from "public1" or -"public2" will work directly. (The dotted h_revision link from -"protected4" is not actually written by the stealing thread; it is only -recorded for the original thread to do later [O5].) - -The nursery objects are not written at all by the stealing process. No -write to nursery objects ever occurs from other threads, and the only -reads occur during stealing. But say we have protected objects in a -thread A, and want to steal one of them from a thread B. This operation -still requires care: mostly, we have to be sure that the objects won't -be freed concurrently by thread A while we read them in thread B. This -is done by acquiring thread A's collection lock from thread B. This -lock is also acquired by thread A itself during its own minor -collections. So stealing cannot occur in parallel with thread A running -a minor collection (or thread C stealing from the same thread A), but it -can occur in parallel with thread A's normal execution. This compromize -should be fine: if thread A is currently doing a minor collection, then -thread B can as well wait a bit until it is finished and then try again: -afterwards, all of thread A's protected objects will have become public -anyway. - - -+- - -No public object may contain a reference to a protected object. This -adds a problem that we ignored so far. Fresh public objects are created -by three processes: 1. when we commit; 2. when stealing; 3. when we do -a minor collection. The 3rd case is ignored here because there are no -protected objects after a minor collection. - -1. When we commit, all our private objects become non-private. In particular, -if there are old private objects, they become public. Of course this -problem doesn't exist if there are no old private objects, which is the -common case if we're doing a lot of small transactions: no object can be -old and private if the current transaction started after the most recent -minor collection. The case that pauses problem here is the opposite: a -minor collection occurred during the present transaction. In this case -there are no protected object so far: the nursery contains only private -objects. - -So committing creates newly protected and public objects; any of these -can contain references to any other, including "newly public -> newly -protected" --- i.e. old -> young. Fortunately, we keep track of such -references anyway [O3] for the purpose of the generational garbage -collection (by having our STM-specific write barrier work in this case -like a traditional generational write barrier). - -The 2nd case is stealing: it produces a new, public revision (this is -"public5" in the example above). If we simply copy its content from -"protected4" then it will likely contain references to other protected -objects. - -In both cases we know exactly which objects must be fixed. There are -two different behaviors that can be implemented (or maybe some -combination of both is best). The first option is that for any -reference that still goes to a protected object, we allocate a new -public revision of that object, and make the reference go to that. We -end up with repeating the same process recursively --- this makes a new -public copy of the protected object, which may contain more references -to protected objects, so we make public copies of these ones too, and so -on. The second option is to stop this recursion: instead of allocating -a full public revision, we allocate a public stub that contains only an -h_revision pointing to the protected version, with the bit 2 set. This -works because no pointer to the stub ever escapes the STM subsystem. - - -+- - -The age of an object is implicit. Each thread has enough data to know -if an object is young for this thread or not: whether its address falls -inside our nursery or not. (In practice we also need to check a small -set of extra young objects, the ones that have been allocated outside -the nursery.) - -Private objects are fully read-write but only visible from one thread. -Public objects are visible by any thread, and are theoretically -read-only. Protected objects are intermediate. In more details: - -~~ Private objects ~~ - -Private objects cannot be accessed from other threads. From their own -thread, they are distinguished by having a particular value in -h_revision. We use a negative odd number. When the transaction -commits, we change the number that we use. All previous private -objects' h_revision field is no longer equal to this number, so they -automatically become non-private (without needing to enumerate them -all). - -The number is negative, which is already correct at commit time for all -objects that are new during this transaction: a negative odd number is -"infinitely old", i.e. older than any real revision number (a positive -odd number). This is fine for the first revision of any object. For -the objects that are not first revision (i.e. that have not been created -during this transaction but merely marked as modified during this -transaction), we have to replace h_revision with the real revision -number obtained during commit. - -~~ Public objects ~~ - -In public objects, h_revision is initially set as described above, to -the revision in which the object was created (an odd number). When one -thread is about to commit a new revision of this public object, it -changes atomically the h_revision field of the public object to a -thread-specific "locked" value. After checks to ensure that the whole -commit is consistent, h_revision is again replaced with a pointer to the -new revision. If this new revision is protected, then we set the bit 2 -of the pointer, as described above. - -The GC flags in the header of a public object are never modified, expect -as follows: the flag GCFLAG_PUBLIC_TO_PRIVATE is added when one thread -makes a private copy of this public object. As it is the only change -that can occur --- writing a value equal to the old flags combination -plus this particular flag --- it doesn't need special protection. It is -used as an optimization in the read barrier: if we have an object -without this flag, then we don't have to look in off-line dictionaries -[O2] to find if we made a private copy. - -~~ Protected objects ~~ - -In protected objects, access from other thread is more restricted. This -means that we can directly use h_revision to point to a private copy as -soon as there is one. We need to keep somewhere else (see [O1]) the -overwritten value of h_revision, which gives the revision number at -which the protected object was created; it needs to be written back in -case of a transaction abort. - -The GC flags in the header of a protected object may only be modified by -the thread that protects it (not by a stealing thread). (In fact it's -not clear so far that they ever need to be modified at all.) - - -+- - -Stealing: whenever we encounter an h_revision with the bit 2 set, we -have to check the target object *without reading it*. This is essential -because the target object may belong to a different thread, and anything -can occur concurrently (e.g. the nursery where it lives may be cleared). -This is done by checking in which nursery the address falls: first we -check our own nursery, and then if not, each other thread's nursery. - -In the latter case we enter stealing mode by acquiring the collection -lock of the target thread. With this lock we can then read some of the -off-line data of the target thread. This requires the target thread to -guarantee that it does not change this data concurrently, but only -when it has itself acquired the collection lock. - - -+- - -Details of the data structures +The extendable timestamp model ------------------------------ -Each thread has its own `struct tx_descriptor` structure to store its -STM- and GC-related thread-local metadata off-line. In particular, it -stores all data about the STM status of public objects (e.g. whether -they currently have a private copy). +A public object copy is either up-to-date (no more recent committed +copy) or outdated. The public copies need to have globally consistent +revision numbers (produced at commit time). If there are several copies +of the same object, we only need to store the revision number of the +most recent one. The previous copies are simply outdated and need +instead to store a pointer to a more recent copy. -The object themselves have two words each: a GC header, and the -h_revision number/pointer. The GC header stores the type id of the -object, and additional GC flags. +The important property is that committed transactions must be +"linearized": when we look at them a posteriori, it must be as if they +ran serially in some order. This includes the reads done during the +transaction: they must return data from the most recently committed copy +of the objects (in the same order). This is done with a shared global +variable, the "global time", that gives the most recently committed +revision number. Each transaction in progress stores a temporary +"starting time". It is initially set to the current global time. If, +at the end of the transaction, all objects read during the transaction +have a revision not greater than this starting time, then we have no +"read-write" conflict (i.e. reads of an object that another thread has +modified; there are also "write-write" conflicts). An improvement over +this basic model is that if, during the transaction, we are about to +read a new object and detect a read-write conflict, we can try to +"extend" the starting time to the value that is now stored in the global +time variable. If none of the objects that we have read previously have +been modified in the interval, then the transaction would have given the +same results if it had started at the new time. -The rest is data structures in the `struct tx_descriptor`: +The model described above is known in the literature as the "extendable +timestamp" model. We apply it for public object. It can however be +tweaked for protected objects. -- [O1] protected_with_private_copy: this is a list of all protected - objects with a private copy. In addition, we eagerly replace the - h_revision of the protected object with a pointer to the private - object. It speeds up the read barrier in the common case where the - chain of objects ends in a protected object. But we need to store the - original value of this h_revision field somewhere else; this is needed - to be able to restore it in case of abort, as well as when stealing, - when we need to know what revision number the protected object had. - To store this, private copies of protected objects are allocated with - an extra word after them. (As the majority of the private objects - are expected to be fresh, rather than the copy of some older version - object, we only need to allocate this extra word in the minority of - cases.) +Pointers and revision numbers on protected objects +-------------------------------------------------- - (Note that the private copies are necessarily young: it's not possible - that protected objects exist at the same time as old private objects.) +In traditional transactional systems, we have a special case to speed up +transactions that don't do any write; but that seems too restricted to +be useful in PyPy. Instead, we can have a special case for transactions +that don't write to any *public* object. Our assumption is that +transactions can be anywhere from very small to very large; the small +ones are unlikely to change any object that has been seen by another +thread. Moreover the previous transaction in the same thread is +unlikely to have got one of its objects stolen. - Access pattern: [O1] is only ever accessed by the local thread. The - extra word after the private copy is written once, _before_ we change - h_revision in the protected object; it may subsequently be read freely - by that thread _and_ when stealing is in progress. +To cover this case more efficiently, we assign in theory to each +committed transaction a pair of numbers. The first one is the regular +global time at which the transaction was committed. The second one is a +thread-local number (never actually made explicit in the code). The +global order of the committed transactions is given by the ordering of +these 2-tuples. +A commit with writes to public objects works as described above; it +gives the transaction the number `(global_time, 0)`, and atomically +increments `global_time`. -- [O2] public_to_private: a dictionary that maps public objects to their - private version. Public objects that may be keys in such a dictionary - have GCFLAG_PUBLIC_TO_PRIVATE. This dict [O2] has the same purpose as - the list [O1], but it differs in the representation: the public - objects are seen by all threads, and so we cannot store anything - globally on them. - - Access pattern: [O2] is only ever accessed by the local thread. - - -- [O3] private_old_pointing_to_young: a list of old private objects that - may contain pointers to young private objects. Used for minor - collections. - - Access pattern: [O3] is only ever accessed by the local thread. - - -- [O4] public_to_young: a list of all public objects with a - corresponding young object. This list starts with the public objects - whose h_revision references a protected object (with bit 2 set), and - continues with the public objects that have a corresponding _young_ - private object (a subset of the keys of `public_to_private`). This is - useful information when doing a minor collection. The point of - storing both kinds of objects in the same list is that after each - commit, all objects of the 2nd kind become objects of the 1st kind --- - which is implemented simply by moving the boundary to the end of the - list. - - Access pattern: [O4] is only ever accessed by the local thread. - - -- [O5] stolen_objects: a list of protected objects that have been - stolen, together with the new public copy. This list is written by - the stealing thread, and it's up to the local thread to notice that it - is not empty and to fix the situation. It should be checked at the - latest at commit time, but also when building a private copy of an - existing object. These two cases require acquiring the local - collection lock. - - (Additionally, there is the risk of not noticing a stolen object in - that list, and continuing to run a transaction for a long time after - some other thread committed a new revision of the public object. To - avoid this, we start the read/write barriers by checking if this list - is non-empty. This can be done without acquiring the lock, as it is - not needed for correctness; so it is extremely cheap.) - - Access pattern: only threads that have acquired the local collection - lock can access [O5] (apart from the check for non-emptiness described - in the previous paragraph). - - -Major and minor collections ---------------------------- - -Major collections occur to free the memory used by old objects. For now -we assume that major collections are rare enough, and simply synchronize -all threads. The thread that initiates a major collection first checks -that each thread ran a minor collection just before (and does it on its -behalf if not, e.g. if it is blocked in a system call). Afterwards, -major collection only has to deal with old objects (public or -private-to-some-thread). - -As a first approximation major collection is a regular, non-concurrent, -non-parallel GC. (It could be made parallel relatively easily, by using -the other waiting threads to help, rather than just have them wait. -More subtly, in a few cases we could interrupt other threads' system -calls to let them help, too.) - -The unusual characteristic of our GC is that in our case we can compress -the h_revision chains, thus freeing more objects. This is true for both -minor and major collections. During major collections, we need to keep -alive the latest public version of a surviving object, as well as the -private versions, if any. The older versions can be freed. We need a -bit of care to adjust pointers everywhere. Additionally, this process -might occasionally figure out that a transaction in progress is actually -going to abort in the future (because e.g. it has got an object in its -read set that has a more recent committed revision). Such transactions -can be aborted now. - -Similarly, during minor collections we only need to keep the most recent -protected revision of an object, as well as the young private version, -if any. An important point about minor collections is that they only -look at their own thread's nursery, and so can occur without any -cross-thread synchronization. (If it is useful, we could also figure -out a case where the current transaction is going to abort in the -future: when objects listed as keys in `public_to_private` have become -supersceded by a more recent commit.) +A commit with no write to any public object produces the number +`(start_time, N+1)`, provided that we didn't have any object stolen +since `start_time`. This condition is enough to guarantee that it is ok +to linearize the transaction at `(start_time, N+1)` even though other +threads might already have produced transactions linearized at some +greater time. Indeed, the fact that no object of ours was stolen +means that no other thread's transaction depends on any object we're +about to commit. From noreply at buildbot.pypy.org Sat Jun 1 20:24:41 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 1 Jun 2013 20:24:41 +0200 (CEST) Subject: [pypy-commit] pypy default: Add an explicit error message when trying to build on Win64. Message-ID: <20130601182441.4F6441C30D5@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r64702:b0201e19a481 Date: 2013-06-01 20:24 +0200 http://bitbucket.org/pypy/pypy/changeset/b0201e19a481/ Log: Add an explicit error message when trying to build on Win64. diff --git a/rpython/translator/platform/windows.py b/rpython/translator/platform/windows.py --- a/rpython/translator/platform/windows.py +++ b/rpython/translator/platform/windows.py @@ -28,6 +28,8 @@ return _get_compiler_type(cc, False) def Windows_x64(cc=None): + raise Exception("Win64 is not supported. You must either build for Win32" + " or contribute the missing support in PyPy.") return _get_compiler_type(cc, True) def _get_msvc_env(vsver, x64flag): From noreply at buildbot.pypy.org Sat Jun 1 21:11:02 2013 From: noreply at buildbot.pypy.org (mattip) Date: Sat, 1 Jun 2013 21:11:02 +0200 (CEST) Subject: [pypy-commit] pypy win32-fixes3: merge default into branch Message-ID: <20130601191102.C75761C1120@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: win32-fixes3 Changeset: r64703:416013f3e0de Date: 2013-06-01 22:10 +0300 http://bitbucket.org/pypy/pypy/changeset/416013f3e0de/ Log: merge default into branch diff too long, truncating to 2000 out of 7875 lines diff --git a/lib-python/2.7/distutils/command/build_ext.py b/lib-python/2.7/distutils/command/build_ext.py --- a/lib-python/2.7/distutils/command/build_ext.py +++ b/lib-python/2.7/distutils/command/build_ext.py @@ -8,7 +8,7 @@ __revision__ = "$Id$" -import sys, os, string, re +import sys, os, string, re, imp from types import * from site import USER_BASE, USER_SITE from distutils.core import Command @@ -33,6 +33,11 @@ from distutils.ccompiler import show_compilers show_compilers() +def _get_c_extension_suffix(): + for ext, mod, typ in imp.get_suffixes(): + if typ == imp.C_EXTENSION: + return ext + class build_ext (Command): @@ -677,10 +682,18 @@ # OS/2 has an 8 character module (extension) limit :-( if os.name == "os2": ext_path[len(ext_path) - 1] = ext_path[len(ext_path) - 1][:8] + # PyPy tweak: first try to get the C extension suffix from + # 'imp'. If it fails we fall back to the 'SO' config var, like + # the previous version of this code did. This should work for + # CPython too. The point is that on PyPy with cpyext, the + # config var 'SO' is just ".so" but we want to return + # ".pypy-VERSION.so" instead. + so_ext = _get_c_extension_suffix() + if so_ext is None: + so_ext = get_config_var('SO') # fall-back # extensions in debug_mode are named 'module_d.pyd' under windows - so_ext = get_config_var('SO') if os.name == 'nt' and self.debug: - return os.path.join(*ext_path) + '_d' + so_ext + so_ext = '_d.pyd' return os.path.join(*ext_path) + so_ext def get_export_symbols (self, ext): diff --git a/lib-python/2.7/distutils/sysconfig_pypy.py b/lib-python/2.7/distutils/sysconfig_pypy.py --- a/lib-python/2.7/distutils/sysconfig_pypy.py +++ b/lib-python/2.7/distutils/sysconfig_pypy.py @@ -12,7 +12,6 @@ import sys import os -import imp from distutils.errors import DistutilsPlatformError @@ -58,16 +57,11 @@ _config_vars = None -def _get_so_extension(): - for ext, mod, typ in imp.get_suffixes(): - if typ == imp.C_EXTENSION: - return ext - def _init_posix(): """Initialize the module as appropriate for POSIX systems.""" g = {} g['EXE'] = "" - g['SO'] = _get_so_extension() or ".so" + g['SO'] = ".so" g['SOABI'] = g['SO'].rsplit('.')[0] g['LIBDIR'] = os.path.join(sys.prefix, 'lib') g['CC'] = "gcc -pthread" # -pthread might not be valid on OS/X, check @@ -80,7 +74,7 @@ """Initialize the module as appropriate for NT""" g = {} g['EXE'] = ".exe" - g['SO'] = _get_so_extension() or ".pyd" + g['SO'] = ".pyd" g['SOABI'] = g['SO'].rsplit('.')[0] global _config_vars diff --git a/lib-python/2.7/logging/__init__.py b/lib-python/2.7/logging/__init__.py --- a/lib-python/2.7/logging/__init__.py +++ b/lib-python/2.7/logging/__init__.py @@ -134,20 +134,22 @@ DEBUG = 10 NOTSET = 0 -_levelNames = { - CRITICAL : 'CRITICAL', - ERROR : 'ERROR', - WARNING : 'WARNING', - INFO : 'INFO', - DEBUG : 'DEBUG', - NOTSET : 'NOTSET', - 'CRITICAL' : CRITICAL, - 'ERROR' : ERROR, - 'WARN' : WARNING, - 'WARNING' : WARNING, - 'INFO' : INFO, - 'DEBUG' : DEBUG, - 'NOTSET' : NOTSET, +_levelToName = { + CRITICAL: 'CRITICAL', + ERROR: 'ERROR', + WARNING: 'WARNING', + INFO: 'INFO', + DEBUG: 'DEBUG', + NOTSET: 'NOTSET', +} +_nameToLevel = { + 'CRITICAL': CRITICAL, + 'ERROR': ERROR, + 'WARN': WARNING, + 'WARNING': WARNING, + 'INFO': INFO, + 'DEBUG': DEBUG, + 'NOTSET': NOTSET, } def getLevelName(level): @@ -164,7 +166,7 @@ Otherwise, the string "Level %s" % level is returned. """ - return _levelNames.get(level, ("Level %s" % level)) + return _levelToName.get(level, ("Level %s" % level)) def addLevelName(level, levelName): """ @@ -174,8 +176,8 @@ """ _acquireLock() try: #unlikely to cause an exception, but you never know... - _levelNames[level] = levelName - _levelNames[levelName] = level + _levelToName[level] = levelName + _nameToLevel[levelName] = level finally: _releaseLock() @@ -183,9 +185,9 @@ if isinstance(level, int): rv = level elif str(level) == level: - if level not in _levelNames: + if level not in _nameToLevel: raise ValueError("Unknown level: %r" % level) - rv = _levelNames[level] + rv = _nameToLevel[level] else: raise TypeError("Level not an integer or a valid string: %r" % level) return rv @@ -277,7 +279,7 @@ self.lineno = lineno self.funcName = func self.created = ct - self.msecs = (ct - long(ct)) * 1000 + self.msecs = (ct - int(ct)) * 1000 self.relativeCreated = (self.created - _startTime) * 1000 if logThreads and thread: self.thread = thread.get_ident() diff --git a/lib-python/2.7/logging/config.py b/lib-python/2.7/logging/config.py --- a/lib-python/2.7/logging/config.py +++ b/lib-python/2.7/logging/config.py @@ -156,7 +156,7 @@ h = klass(*args) if "level" in opts: level = cp.get(sectname, "level") - h.setLevel(logging._levelNames[level]) + h.setLevel(level) if len(fmt): h.setFormatter(formatters[fmt]) if issubclass(klass, logging.handlers.MemoryHandler): @@ -187,7 +187,7 @@ opts = cp.options(sectname) if "level" in opts: level = cp.get(sectname, "level") - log.setLevel(logging._levelNames[level]) + log.setLevel(level) for h in root.handlers[:]: root.removeHandler(h) hlist = cp.get(sectname, "handlers") @@ -237,7 +237,7 @@ existing.remove(qn) if "level" in opts: level = cp.get(sectname, "level") - logger.setLevel(logging._levelNames[level]) + logger.setLevel(level) for h in logger.handlers[:]: logger.removeHandler(h) logger.propagate = propagate diff --git a/lib-python/2.7/socket.py b/lib-python/2.7/socket.py --- a/lib-python/2.7/socket.py +++ b/lib-python/2.7/socket.py @@ -96,6 +96,7 @@ _realsocket = socket +_type = type # WSA error codes if sys.platform.lower().startswith("win"): @@ -173,31 +174,37 @@ __doc__ = _realsocket.__doc__ + __slots__ = ["_sock", "__weakref__"] + def __init__(self, family=AF_INET, type=SOCK_STREAM, proto=0, _sock=None): if _sock is None: _sock = _realsocket(family, type, proto) + elif _type(_sock) is _realsocket: + _sock._reuse() + # PyPy note about refcounting: implemented with _reuse()/_drop() + # on the class '_socket.socket'. Python 3 did it differently + # with a reference counter on this class 'socket._socketobject' + # instead, but it is a less compatible change (breaks eventlet). self._sock = _sock - self._io_refs = 0 - self._closed = False def send(self, data, flags=0): - return self._sock.send(data, flags=flags) + return self._sock.send(data, flags) send.__doc__ = _realsocket.send.__doc__ def recv(self, buffersize, flags=0): - return self._sock.recv(buffersize, flags=flags) + return self._sock.recv(buffersize, flags) recv.__doc__ = _realsocket.recv.__doc__ def recv_into(self, buffer, nbytes=0, flags=0): - return self._sock.recv_into(buffer, nbytes=nbytes, flags=flags) + return self._sock.recv_into(buffer, nbytes, flags) recv_into.__doc__ = _realsocket.recv_into.__doc__ def recvfrom(self, buffersize, flags=0): - return self._sock.recvfrom(buffersize, flags=flags) + return self._sock.recvfrom(buffersize, flags) recvfrom.__doc__ = _realsocket.recvfrom.__doc__ def recvfrom_into(self, buffer, nbytes=0, flags=0): - return self._sock.recvfrom_into(buffer, nbytes=nbytes, flags=flags) + return self._sock.recvfrom_into(buffer, nbytes, flags) recvfrom_into.__doc__ = _realsocket.recvfrom_into.__doc__ def sendto(self, data, param2, param3=None): @@ -208,13 +215,17 @@ sendto.__doc__ = _realsocket.sendto.__doc__ def close(self): - # This function should not reference any globals. See issue #808164. + s = self._sock + if type(s) is _realsocket: + s._drop() self._sock = _closedsocket() close.__doc__ = _realsocket.close.__doc__ def accept(self): sock, addr = self._sock.accept() - return _socketobject(_sock=sock), addr + sockobj = _socketobject(_sock=sock) + sock._drop() # already a copy in the _socketobject() + return sockobj, addr accept.__doc__ = _realsocket.accept.__doc__ def dup(self): @@ -228,24 +239,7 @@ Return a regular file object corresponding to the socket. The mode and bufsize arguments are as for the built-in open() function.""" - self._io_refs += 1 - return _fileobject(self, mode, bufsize) - - def _decref_socketios(self): - if self._io_refs > 0: - self._io_refs -= 1 - if self._closed: - self.close() - - def _real_close(self): - # This function should not reference any globals. See issue #808164. - self._sock.close() - - def close(self): - # This function should not reference any globals. See issue #808164. - self._closed = True - if self._io_refs <= 0: - self._real_close() + return _fileobject(self._sock, mode, bufsize) family = property(lambda self: self._sock.family, doc="the socket family") type = property(lambda self: self._sock.type, doc="the socket type") @@ -286,6 +280,8 @@ "_close"] def __init__(self, sock, mode='rb', bufsize=-1, close=False): + if type(sock) is _realsocket: + sock._reuse() self._sock = sock self.mode = mode # Not actually used in this version if bufsize < 0: @@ -320,16 +316,11 @@ if self._sock: self.flush() finally: - if self._sock: - if self._close: - self._sock.close() - else: - try: - self._sock._decref_socketios() - except AttributeError: - pass # bah, someone built a _fileobject manually - # with some unexpected replacement of the - # _socketobject class + s = self._sock + if type(s) is _realsocket: + s._drop() + if self._close: + self._sock.close() self._sock = None def __del__(self): diff --git a/lib-python/2.7/test/test_logging.py b/lib-python/2.7/test/test_logging.py --- a/lib-python/2.7/test/test_logging.py +++ b/lib-python/2.7/test/test_logging.py @@ -65,7 +65,8 @@ self.saved_handlers = logging._handlers.copy() self.saved_handler_list = logging._handlerList[:] self.saved_loggers = logger_dict.copy() - self.saved_level_names = logging._levelNames.copy() + self.saved_name_to_level = logging._nameToLevel.copy() + self.saved_level_to_name = logging._levelToName.copy() finally: logging._releaseLock() @@ -97,8 +98,10 @@ self.root_logger.setLevel(self.original_logging_level) logging._acquireLock() try: - logging._levelNames.clear() - logging._levelNames.update(self.saved_level_names) + logging._levelToName.clear() + logging._levelToName.update(self.saved_level_to_name) + logging._nameToLevel.clear() + logging._nameToLevel.update(self.saved_name_to_level) logging._handlers.clear() logging._handlers.update(self.saved_handlers) logging._handlerList[:] = self.saved_handler_list diff --git a/lib_pypy/cffi/vengine_cpy.py b/lib_pypy/cffi/vengine_cpy.py --- a/lib_pypy/cffi/vengine_cpy.py +++ b/lib_pypy/cffi/vengine_cpy.py @@ -156,6 +156,9 @@ class FFILibrary(object): _cffi_python_module = module _cffi_ffi = self.ffi + _cffi_dir = [] + def __dir__(self): + return FFILibrary._cffi_dir + list(self.__dict__) library = FFILibrary() module._cffi_setup(lst, ffiplatform.VerificationError, library) # @@ -701,7 +704,8 @@ return ptr[0] def setter(library, value): ptr[0] = value - setattr(library.__class__, name, property(getter, setter)) + setattr(type(library), name, property(getter, setter)) + type(library)._cffi_dir.append(name) # ---------- diff --git a/lib_pypy/cffi/vengine_gen.py b/lib_pypy/cffi/vengine_gen.py --- a/lib_pypy/cffi/vengine_gen.py +++ b/lib_pypy/cffi/vengine_gen.py @@ -74,6 +74,9 @@ class FFILibrary(types.ModuleType): _cffi_generic_module = module _cffi_ffi = self.ffi + _cffi_dir = [] + def __dir__(self): + return FFILibrary._cffi_dir library = FFILibrary("") # # finally, call the loaded_gen_xxx() functions. This will set @@ -168,21 +171,22 @@ newfunction = self._load_constant(False, tp, name, module) else: indirections = [] - if any(isinstance(type, model.StructOrUnion) for type in tp.args): + if any(isinstance(typ, model.StructOrUnion) for typ in tp.args): indirect_args = [] - for i, type in enumerate(tp.args): - if isinstance(type, model.StructOrUnion): - type = model.PointerType(type) - indirections.append((i, type)) - indirect_args.append(type) + for i, typ in enumerate(tp.args): + if isinstance(typ, model.StructOrUnion): + typ = model.PointerType(typ) + indirections.append((i, typ)) + indirect_args.append(typ) tp = model.FunctionPtrType(tuple(indirect_args), tp.result, tp.ellipsis) BFunc = self.ffi._get_cached_btype(tp) wrappername = '_cffi_f_%s' % name newfunction = module.load_function(BFunc, wrappername) - for i, type in indirections: - newfunction = self._make_struct_wrapper(newfunction, i, type) + for i, typ in indirections: + newfunction = self._make_struct_wrapper(newfunction, i, typ) setattr(library, name, newfunction) + type(library)._cffi_dir.append(name) def _make_struct_wrapper(self, oldfunc, i, tp): backend = self.ffi._backend @@ -390,6 +394,7 @@ is_int = isinstance(tp, model.PrimitiveType) and tp.is_integer_type() value = self._load_constant(is_int, tp, name, module) setattr(library, name, value) + type(library)._cffi_dir.append(name) # ---------- # enums @@ -437,6 +442,7 @@ def _loaded_gen_enum(self, tp, name, module, library): for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues): setattr(library, enumerator, enumvalue) + type(library)._cffi_dir.append(enumerator) # ---------- # macros: for now only for integers @@ -450,6 +456,7 @@ def _loaded_gen_macro(self, tp, name, module, library): value = self._load_constant(True, tp, name, module) setattr(library, name, value) + type(library)._cffi_dir.append(name) # ---------- # global variables @@ -475,6 +482,7 @@ BArray = self.ffi._get_cached_btype(tp) value = self.ffi.cast(BArray, value) setattr(library, name, value) + type(library)._cffi_dir.append(name) return # remove ptr= from the library instance, and replace # it by a property on the class, which reads/writes into ptr[0]. @@ -486,7 +494,8 @@ return ptr[0] def setter(library, value): ptr[0] = value - setattr(library.__class__, name, property(getter, setter)) + setattr(type(library), name, property(getter, setter)) + type(library)._cffi_dir.append(name) cffimod_header = r''' #include diff --git a/lib_pypy/greenlet.egg-info b/lib_pypy/greenlet.egg-info new file mode 100644 --- /dev/null +++ b/lib_pypy/greenlet.egg-info @@ -0,0 +1,10 @@ +Metadata-Version: 1.0 +Name: greenlet +Version: 0.4.0 +Summary: Lightweight in-process concurrent programming +Home-page: https://github.com/python-greenlet/greenlet +Author: Ralf Schmitt (for CPython), PyPy team +Author-email: pypy-dev at python.org +License: MIT License +Description: UNKNOWN +Platform: UNKNOWN diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -32,11 +32,10 @@ "rctime" , "select", "zipimport", "_lsprof", "crypt", "signal", "_rawffi", "termios", "zlib", "bz2", "struct", "_hashlib", "_md5", "_sha", "_minimal_curses", "cStringIO", - "thread", "itertools", "pyexpat", "_ssl", "array", + "thread", "itertools", "pyexpat", "_ssl", "cpyext", "array", "binascii", "_multiprocessing", '_warnings', "_collections", "_multibytecodec", "micronumpy", "_ffi", - "_continuation", "_cffi_backend", "_csv"] # "cpyext", "cppyy"] -# disabled until problems are fixed + "_continuation", "_cffi_backend", "_csv", "cppyy"] )) translation_modules = default_modules.copy() diff --git a/pypy/doc/rffi.rst b/pypy/doc/rffi.rst --- a/pypy/doc/rffi.rst +++ b/pypy/doc/rffi.rst @@ -5,7 +5,7 @@ Purpose ------- -This document describes an FFI for RPython language, concentrating +This document describes an FFI for the RPython language, concentrating on low-level backends like C. It describes how to declare and call low-level (C) functions from RPython level. @@ -50,7 +50,7 @@ ------ In rffi_ there are various declared types for C-structures, like CCHARP -(char*), SIZE_T (size_t) and others. refer to file for details. +(char*), SIZE_T (size_t) and others. Refer to file for details. Instances of non-primitive types must be alloced by hand, with call to lltype.malloc, and freed by lltype.free both with keyword argument flavor='raw'. There are several helpers like string -> char* diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -34,3 +34,15 @@ .. branch: remove-iter-smm Remove multi-methods on iterators + +.. branch: emit-call-x86 +.. branch: emit-call-arm + +.. branch: on-abort-resops +Added list of resops to the pypyjit on_abort hook. + +.. branch: logging-perf +Speeds up the stdlib logging module + +.. branch: operrfmt-NT +Adds a couple convenient format specifiers to operationerrfmt diff --git a/pypy/interpreter/argument.py b/pypy/interpreter/argument.py --- a/pypy/interpreter/argument.py +++ b/pypy/interpreter/argument.py @@ -86,12 +86,9 @@ args_w = space.fixedview(w_stararg) except OperationError, e: if e.match(space, space.w_TypeError): - w_type = space.type(w_stararg) - typename = w_type.getname(space) - raise OperationError( + raise operationerrfmt( space.w_TypeError, - space.wrap("argument after * must be " - "a sequence, not %s" % (typename,))) + "argument after * must be a sequence, not %T", w_stararg) raise self.arguments_w = self.arguments_w + args_w @@ -116,12 +113,10 @@ w_keys = space.call_method(w_starstararg, "keys") except OperationError, e: if e.match(space, space.w_AttributeError): - w_type = space.type(w_starstararg) - typename = w_type.getname(space) - raise OperationError( + raise operationerrfmt( space.w_TypeError, - space.wrap("argument after ** must be " - "a mapping, not %s" % (typename,))) + "argument after ** must be a mapping, not %T", + w_starstararg) raise keys_w = space.unpackiterable(w_keys) keywords_w = [None] * len(keys_w) diff --git a/pypy/interpreter/astcompiler/ast.py b/pypy/interpreter/astcompiler/ast.py --- a/pypy/interpreter/astcompiler/ast.py +++ b/pypy/interpreter/astcompiler/ast.py @@ -2793,8 +2793,7 @@ def Module_get_body(space, w_self): if not w_self.initialization_state & 1: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -2835,8 +2834,7 @@ def Interactive_get_body(space, w_self): if not w_self.initialization_state & 1: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -2881,8 +2879,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 1: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') return space.wrap(w_self.body) def Expression_set_body(space, w_self, w_new_value): @@ -2925,8 +2922,7 @@ def Suite_get_body(space, w_self): if not w_self.initialization_state & 1: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -2971,8 +2967,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 1: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'lineno') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'lineno') return space.wrap(w_self.lineno) def stmt_set_lineno(space, w_self, w_new_value): @@ -2993,8 +2988,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 2: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'col_offset') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'col_offset') return space.wrap(w_self.col_offset) def stmt_set_col_offset(space, w_self, w_new_value): @@ -3024,8 +3018,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'name') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'name') return space.wrap(w_self.name) def FunctionDef_set_name(space, w_self, w_new_value): @@ -3046,8 +3039,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'args') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'args') return space.wrap(w_self.args) def FunctionDef_set_args(space, w_self, w_new_value): @@ -3064,8 +3056,7 @@ def FunctionDef_get_body(space, w_self): if not w_self.initialization_state & 16: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -3081,8 +3072,7 @@ def FunctionDef_get_decorator_list(space, w_self): if not w_self.initialization_state & 32: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'decorator_list') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'decorator_list') if w_self.w_decorator_list is None: if w_self.decorator_list is None: list_w = [] @@ -3131,8 +3121,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'name') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'name') return space.wrap(w_self.name) def ClassDef_set_name(space, w_self, w_new_value): @@ -3149,8 +3138,7 @@ def ClassDef_get_bases(space, w_self): if not w_self.initialization_state & 8: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'bases') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'bases') if w_self.w_bases is None: if w_self.bases is None: list_w = [] @@ -3166,8 +3154,7 @@ def ClassDef_get_body(space, w_self): if not w_self.initialization_state & 16: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -3183,8 +3170,7 @@ def ClassDef_get_decorator_list(space, w_self): if not w_self.initialization_state & 32: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'decorator_list') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'decorator_list') if w_self.w_decorator_list is None: if w_self.decorator_list is None: list_w = [] @@ -3234,8 +3220,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') return space.wrap(w_self.value) def Return_set_value(space, w_self, w_new_value): @@ -3278,8 +3263,7 @@ def Delete_get_targets(space, w_self): if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'targets') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'targets') if w_self.w_targets is None: if w_self.targets is None: list_w = [] @@ -3320,8 +3304,7 @@ def Assign_get_targets(space, w_self): if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'targets') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'targets') if w_self.w_targets is None: if w_self.targets is None: list_w = [] @@ -3341,8 +3324,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') return space.wrap(w_self.value) def Assign_set_value(space, w_self, w_new_value): @@ -3391,8 +3373,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'target') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'target') return space.wrap(w_self.target) def AugAssign_set_target(space, w_self, w_new_value): @@ -3415,8 +3396,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'op') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'op') return operator_to_class[w_self.op - 1]() def AugAssign_set_op(space, w_self, w_new_value): @@ -3439,8 +3419,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 16: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') return space.wrap(w_self.value) def AugAssign_set_value(space, w_self, w_new_value): @@ -3489,8 +3468,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'dest') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'dest') return space.wrap(w_self.dest) def Print_set_dest(space, w_self, w_new_value): @@ -3509,8 +3487,7 @@ def Print_get_values(space, w_self): if not w_self.initialization_state & 8: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'values') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'values') if w_self.w_values is None: if w_self.values is None: list_w = [] @@ -3530,8 +3507,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 16: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'nl') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'nl') return space.wrap(w_self.nl) def Print_set_nl(space, w_self, w_new_value): @@ -3579,8 +3555,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'target') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'target') return space.wrap(w_self.target) def For_set_target(space, w_self, w_new_value): @@ -3603,8 +3578,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'iter') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'iter') return space.wrap(w_self.iter) def For_set_iter(space, w_self, w_new_value): @@ -3623,8 +3597,7 @@ def For_get_body(space, w_self): if not w_self.initialization_state & 16: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -3640,8 +3613,7 @@ def For_get_orelse(space, w_self): if not w_self.initialization_state & 32: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'orelse') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'orelse') if w_self.w_orelse is None: if w_self.orelse is None: list_w = [] @@ -3690,8 +3662,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'test') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'test') return space.wrap(w_self.test) def While_set_test(space, w_self, w_new_value): @@ -3710,8 +3681,7 @@ def While_get_body(space, w_self): if not w_self.initialization_state & 8: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -3727,8 +3697,7 @@ def While_get_orelse(space, w_self): if not w_self.initialization_state & 16: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'orelse') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'orelse') if w_self.w_orelse is None: if w_self.orelse is None: list_w = [] @@ -3776,8 +3745,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'test') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'test') return space.wrap(w_self.test) def If_set_test(space, w_self, w_new_value): @@ -3796,8 +3764,7 @@ def If_get_body(space, w_self): if not w_self.initialization_state & 8: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -3813,8 +3780,7 @@ def If_get_orelse(space, w_self): if not w_self.initialization_state & 16: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'orelse') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'orelse') if w_self.w_orelse is None: if w_self.orelse is None: list_w = [] @@ -3862,8 +3828,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'context_expr') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'context_expr') return space.wrap(w_self.context_expr) def With_set_context_expr(space, w_self, w_new_value): @@ -3886,8 +3851,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'optional_vars') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'optional_vars') return space.wrap(w_self.optional_vars) def With_set_optional_vars(space, w_self, w_new_value): @@ -3906,8 +3870,7 @@ def With_get_body(space, w_self): if not w_self.initialization_state & 16: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -3954,8 +3917,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'type') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'type') return space.wrap(w_self.type) def Raise_set_type(space, w_self, w_new_value): @@ -3978,8 +3940,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'inst') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'inst') return space.wrap(w_self.inst) def Raise_set_inst(space, w_self, w_new_value): @@ -4002,8 +3963,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 16: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'tback') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'tback') return space.wrap(w_self.tback) def Raise_set_tback(space, w_self, w_new_value): @@ -4048,8 +4008,7 @@ def TryExcept_get_body(space, w_self): if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -4065,8 +4024,7 @@ def TryExcept_get_handlers(space, w_self): if not w_self.initialization_state & 8: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'handlers') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'handlers') if w_self.w_handlers is None: if w_self.handlers is None: list_w = [] @@ -4082,8 +4040,7 @@ def TryExcept_get_orelse(space, w_self): if not w_self.initialization_state & 16: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'orelse') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'orelse') if w_self.w_orelse is None: if w_self.orelse is None: list_w = [] @@ -4128,8 +4085,7 @@ def TryFinally_get_body(space, w_self): if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -4145,8 +4101,7 @@ def TryFinally_get_finalbody(space, w_self): if not w_self.initialization_state & 8: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'finalbody') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'finalbody') if w_self.w_finalbody is None: if w_self.finalbody is None: list_w = [] @@ -4193,8 +4148,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'test') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'test') return space.wrap(w_self.test) def Assert_set_test(space, w_self, w_new_value): @@ -4217,8 +4171,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'msg') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'msg') return space.wrap(w_self.msg) def Assert_set_msg(space, w_self, w_new_value): @@ -4262,8 +4215,7 @@ def Import_get_names(space, w_self): if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'names') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'names') if w_self.w_names is None: if w_self.names is None: list_w = [] @@ -4308,8 +4260,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'module') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'module') return space.wrap(w_self.module) def ImportFrom_set_module(space, w_self, w_new_value): @@ -4329,8 +4280,7 @@ def ImportFrom_get_names(space, w_self): if not w_self.initialization_state & 8: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'names') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'names') if w_self.w_names is None: if w_self.names is None: list_w = [] @@ -4350,8 +4300,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 16: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'level') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'level') return space.wrap(w_self.level) def ImportFrom_set_level(space, w_self, w_new_value): @@ -4399,8 +4348,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') return space.wrap(w_self.body) def Exec_set_body(space, w_self, w_new_value): @@ -4423,8 +4371,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'globals') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'globals') return space.wrap(w_self.globals) def Exec_set_globals(space, w_self, w_new_value): @@ -4447,8 +4394,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 16: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'locals') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'locals') return space.wrap(w_self.locals) def Exec_set_locals(space, w_self, w_new_value): @@ -4493,8 +4439,7 @@ def Global_get_names(space, w_self): if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'names') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'names') if w_self.w_names is None: if w_self.names is None: list_w = [] @@ -4539,8 +4484,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') return space.wrap(w_self.value) def Expr_set_value(space, w_self, w_new_value): @@ -4638,8 +4582,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 1: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'lineno') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'lineno') return space.wrap(w_self.lineno) def expr_set_lineno(space, w_self, w_new_value): @@ -4660,8 +4603,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 2: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'col_offset') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'col_offset') return space.wrap(w_self.col_offset) def expr_set_col_offset(space, w_self, w_new_value): @@ -4691,8 +4633,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'op') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'op') return boolop_to_class[w_self.op - 1]() def BoolOp_set_op(space, w_self, w_new_value): @@ -4711,8 +4652,7 @@ def BoolOp_get_values(space, w_self): if not w_self.initialization_state & 8: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'values') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'values') if w_self.w_values is None: if w_self.values is None: list_w = [] @@ -4758,8 +4698,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'left') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'left') return space.wrap(w_self.left) def BinOp_set_left(space, w_self, w_new_value): @@ -4782,8 +4721,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'op') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'op') return operator_to_class[w_self.op - 1]() def BinOp_set_op(space, w_self, w_new_value): @@ -4806,8 +4744,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 16: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'right') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'right') return space.wrap(w_self.right) def BinOp_set_right(space, w_self, w_new_value): @@ -4856,8 +4793,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'op') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'op') return unaryop_to_class[w_self.op - 1]() def UnaryOp_set_op(space, w_self, w_new_value): @@ -4880,8 +4816,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'operand') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'operand') return space.wrap(w_self.operand) def UnaryOp_set_operand(space, w_self, w_new_value): @@ -4929,8 +4864,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'args') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'args') return space.wrap(w_self.args) def Lambda_set_args(space, w_self, w_new_value): @@ -4951,8 +4885,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') return space.wrap(w_self.body) def Lambda_set_body(space, w_self, w_new_value): @@ -5000,8 +4933,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'test') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'test') return space.wrap(w_self.test) def IfExp_set_test(space, w_self, w_new_value): @@ -5024,8 +4956,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') return space.wrap(w_self.body) def IfExp_set_body(space, w_self, w_new_value): @@ -5048,8 +4979,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 16: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'orelse') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'orelse') return space.wrap(w_self.orelse) def IfExp_set_orelse(space, w_self, w_new_value): @@ -5094,8 +5024,7 @@ def Dict_get_keys(space, w_self): if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'keys') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'keys') if w_self.w_keys is None: if w_self.keys is None: list_w = [] @@ -5111,8 +5040,7 @@ def Dict_get_values(space, w_self): if not w_self.initialization_state & 8: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'values') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'values') if w_self.w_values is None: if w_self.values is None: list_w = [] @@ -5155,8 +5083,7 @@ def Set_get_elts(space, w_self): if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elts') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'elts') if w_self.w_elts is None: if w_self.elts is None: list_w = [] @@ -5201,8 +5128,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elt') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'elt') return space.wrap(w_self.elt) def ListComp_set_elt(space, w_self, w_new_value): @@ -5221,8 +5147,7 @@ def ListComp_get_generators(space, w_self): if not w_self.initialization_state & 8: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'generators') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'generators') if w_self.w_generators is None: if w_self.generators is None: list_w = [] @@ -5268,8 +5193,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elt') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'elt') return space.wrap(w_self.elt) def SetComp_set_elt(space, w_self, w_new_value): @@ -5288,8 +5212,7 @@ def SetComp_get_generators(space, w_self): if not w_self.initialization_state & 8: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'generators') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'generators') if w_self.w_generators is None: if w_self.generators is None: list_w = [] @@ -5335,8 +5258,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'key') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'key') return space.wrap(w_self.key) def DictComp_set_key(space, w_self, w_new_value): @@ -5359,8 +5281,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') return space.wrap(w_self.value) def DictComp_set_value(space, w_self, w_new_value): @@ -5379,8 +5300,7 @@ def DictComp_get_generators(space, w_self): if not w_self.initialization_state & 16: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'generators') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'generators') if w_self.w_generators is None: if w_self.generators is None: list_w = [] @@ -5427,8 +5347,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elt') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'elt') return space.wrap(w_self.elt) def GeneratorExp_set_elt(space, w_self, w_new_value): @@ -5447,8 +5366,7 @@ def GeneratorExp_get_generators(space, w_self): if not w_self.initialization_state & 8: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'generators') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'generators') if w_self.w_generators is None: if w_self.generators is None: list_w = [] @@ -5494,8 +5412,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') return space.wrap(w_self.value) def Yield_set_value(space, w_self, w_new_value): @@ -5542,8 +5459,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'left') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'left') return space.wrap(w_self.left) def Compare_set_left(space, w_self, w_new_value): @@ -5562,8 +5478,7 @@ def Compare_get_ops(space, w_self): if not w_self.initialization_state & 8: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ops') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'ops') if w_self.w_ops is None: if w_self.ops is None: list_w = [] @@ -5579,8 +5494,7 @@ def Compare_get_comparators(space, w_self): if not w_self.initialization_state & 16: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'comparators') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'comparators') if w_self.w_comparators is None: if w_self.comparators is None: list_w = [] @@ -5628,8 +5542,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'func') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'func') return space.wrap(w_self.func) def Call_set_func(space, w_self, w_new_value): @@ -5648,8 +5561,7 @@ def Call_get_args(space, w_self): if not w_self.initialization_state & 8: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'args') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'args') if w_self.w_args is None: if w_self.args is None: list_w = [] @@ -5665,8 +5577,7 @@ def Call_get_keywords(space, w_self): if not w_self.initialization_state & 16: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'keywords') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'keywords') if w_self.w_keywords is None: if w_self.keywords is None: list_w = [] @@ -5686,8 +5597,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 32: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'starargs') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'starargs') return space.wrap(w_self.starargs) def Call_set_starargs(space, w_self, w_new_value): @@ -5710,8 +5620,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 64: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'kwargs') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'kwargs') return space.wrap(w_self.kwargs) def Call_set_kwargs(space, w_self, w_new_value): @@ -5764,8 +5673,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') return space.wrap(w_self.value) def Repr_set_value(space, w_self, w_new_value): @@ -5812,8 +5720,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'n') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'n') return w_self.n def Num_set_n(space, w_self, w_new_value): @@ -5858,8 +5765,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 's') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 's') return w_self.s def Str_set_s(space, w_self, w_new_value): @@ -5904,8 +5810,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') return space.wrap(w_self.value) def Attribute_set_value(space, w_self, w_new_value): @@ -5928,8 +5833,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'attr') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'attr') return space.wrap(w_self.attr) def Attribute_set_attr(space, w_self, w_new_value): @@ -5950,8 +5854,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 16: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ctx') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'ctx') return expr_context_to_class[w_self.ctx - 1]() def Attribute_set_ctx(space, w_self, w_new_value): @@ -6000,8 +5903,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') return space.wrap(w_self.value) def Subscript_set_value(space, w_self, w_new_value): @@ -6024,8 +5926,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'slice') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'slice') return space.wrap(w_self.slice) def Subscript_set_slice(space, w_self, w_new_value): @@ -6048,8 +5949,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 16: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ctx') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'ctx') return expr_context_to_class[w_self.ctx - 1]() def Subscript_set_ctx(space, w_self, w_new_value): @@ -6098,8 +5998,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'id') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'id') return space.wrap(w_self.id) def Name_set_id(space, w_self, w_new_value): @@ -6120,8 +6019,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ctx') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'ctx') return expr_context_to_class[w_self.ctx - 1]() def Name_set_ctx(space, w_self, w_new_value): @@ -6165,8 +6063,7 @@ def List_get_elts(space, w_self): if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elts') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'elts') if w_self.w_elts is None: if w_self.elts is None: list_w = [] @@ -6186,8 +6083,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ctx') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'ctx') return expr_context_to_class[w_self.ctx - 1]() def List_set_ctx(space, w_self, w_new_value): @@ -6232,8 +6128,7 @@ def Tuple_get_elts(space, w_self): if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elts') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'elts') if w_self.w_elts is None: if w_self.elts is None: list_w = [] @@ -6253,8 +6148,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ctx') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'ctx') return expr_context_to_class[w_self.ctx - 1]() def Tuple_set_ctx(space, w_self, w_new_value): @@ -6303,8 +6197,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') return w_self.value def Const_set_value(space, w_self, w_new_value): @@ -6422,8 +6315,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 1: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'lower') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'lower') return space.wrap(w_self.lower) def Slice_set_lower(space, w_self, w_new_value): @@ -6446,8 +6338,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 2: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'upper') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'upper') return space.wrap(w_self.upper) def Slice_set_upper(space, w_self, w_new_value): @@ -6470,8 +6361,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'step') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'step') return space.wrap(w_self.step) def Slice_set_step(space, w_self, w_new_value): @@ -6516,8 +6406,7 @@ def ExtSlice_get_dims(space, w_self): if not w_self.initialization_state & 1: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'dims') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'dims') if w_self.w_dims is None: if w_self.dims is None: list_w = [] @@ -6562,8 +6451,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 1: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') return space.wrap(w_self.value) def Index_set_value(space, w_self, w_new_value): @@ -6834,8 +6722,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 1: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'target') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'target') return space.wrap(w_self.target) def comprehension_set_target(space, w_self, w_new_value): @@ -6858,8 +6745,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 2: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'iter') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'iter') return space.wrap(w_self.iter) def comprehension_set_iter(space, w_self, w_new_value): @@ -6878,8 +6764,7 @@ def comprehension_get_ifs(space, w_self): if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ifs') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'ifs') if w_self.w_ifs is None: if w_self.ifs is None: list_w = [] @@ -6926,8 +6811,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 1: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'lineno') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'lineno') return space.wrap(w_self.lineno) def excepthandler_set_lineno(space, w_self, w_new_value): @@ -6948,8 +6832,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 2: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'col_offset') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'col_offset') return space.wrap(w_self.col_offset) def excepthandler_set_col_offset(space, w_self, w_new_value): @@ -6979,8 +6862,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'type') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'type') return space.wrap(w_self.type) def ExceptHandler_set_type(space, w_self, w_new_value): @@ -7003,8 +6885,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'name') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'name') return space.wrap(w_self.name) def ExceptHandler_set_name(space, w_self, w_new_value): @@ -7023,8 +6904,7 @@ def ExceptHandler_get_body(space, w_self): if not w_self.initialization_state & 16: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -7067,8 +6947,7 @@ def arguments_get_args(space, w_self): if not w_self.initialization_state & 1: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'args') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'args') if w_self.w_args is None: if w_self.args is None: list_w = [] @@ -7088,8 +6967,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 2: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'vararg') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'vararg') return space.wrap(w_self.vararg) def arguments_set_vararg(space, w_self, w_new_value): @@ -7113,8 +6991,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'kwarg') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'kwarg') return space.wrap(w_self.kwarg) def arguments_set_kwarg(space, w_self, w_new_value): @@ -7134,8 +7011,7 @@ def arguments_get_defaults(space, w_self): if not w_self.initialization_state & 8: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'defaults') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'defaults') if w_self.w_defaults is None: if w_self.defaults is None: list_w = [] @@ -7184,8 +7060,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 1: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'arg') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'arg') return space.wrap(w_self.arg) def keyword_set_arg(space, w_self, w_new_value): @@ -7206,8 +7081,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 2: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') return space.wrap(w_self.value) def keyword_set_value(space, w_self, w_new_value): @@ -7255,8 +7129,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 1: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'name') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'name') return space.wrap(w_self.name) def alias_set_name(space, w_self, w_new_value): @@ -7277,8 +7150,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 2: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'asname') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'asname') return space.wrap(w_self.asname) def alias_set_asname(space, w_self, w_new_value): diff --git a/pypy/interpreter/astcompiler/codegen.py b/pypy/interpreter/astcompiler/codegen.py --- a/pypy/interpreter/astcompiler/codegen.py +++ b/pypy/interpreter/astcompiler/codegen.py @@ -11,7 +11,6 @@ from pypy.interpreter.astcompiler import optimize # For side effects from pypy.interpreter.pyparser.error import SyntaxError from pypy.tool import stdlib_opcode as ops -from pypy.interpreter.error import OperationError def compile_ast(space, module, info): @@ -21,91 +20,91 @@ name_ops_default = misc.dict_to_switch({ - ast.Load : ops.LOAD_NAME, - ast.Store : ops.STORE_NAME, - ast.Del : ops.DELETE_NAME + ast.Load: ops.LOAD_NAME, + ast.Store: ops.STORE_NAME, + ast.Del: ops.DELETE_NAME }) name_ops_fast = misc.dict_to_switch({ - ast.Load : ops.LOAD_FAST, - ast.Store : ops.STORE_FAST, - ast.Del : ops.DELETE_FAST + ast.Load: ops.LOAD_FAST, + ast.Store: ops.STORE_FAST, + ast.Del: ops.DELETE_FAST }) name_ops_deref = misc.dict_to_switch({ - ast.Load : ops.LOAD_DEREF, - ast.Store : ops.STORE_DEREF, + ast.Load: ops.LOAD_DEREF, + ast.Store: ops.STORE_DEREF, }) name_ops_global = misc.dict_to_switch({ - ast.Load : ops.LOAD_GLOBAL, - ast.Store : ops.STORE_GLOBAL, - ast.Del : ops.DELETE_GLOBAL + ast.Load: ops.LOAD_GLOBAL, + ast.Store: ops.STORE_GLOBAL, + ast.Del: ops.DELETE_GLOBAL }) unary_operations = misc.dict_to_switch({ - ast.Invert : ops.UNARY_INVERT, - ast.Not : ops.UNARY_NOT, - ast.UAdd : ops.UNARY_POSITIVE, - ast.USub : ops.UNARY_NEGATIVE + ast.Invert: ops.UNARY_INVERT, + ast.Not: ops.UNARY_NOT, + ast.UAdd: ops.UNARY_POSITIVE, + ast.USub: ops.UNARY_NEGATIVE }) binary_operations = misc.dict_to_switch({ - ast.Add : ops.BINARY_ADD, - ast.Sub : ops.BINARY_SUBTRACT, - ast.Mult : ops.BINARY_MULTIPLY, - ast.Mod : ops.BINARY_MODULO, - ast.Pow : ops.BINARY_POWER, - ast.LShift : ops.BINARY_LSHIFT, - ast.RShift : ops.BINARY_RSHIFT, - ast.BitOr : ops.BINARY_OR, - ast.BitAnd : ops.BINARY_AND, - ast.BitXor : ops.BINARY_XOR, - ast.FloorDiv : ops.BINARY_FLOOR_DIVIDE + ast.Add: ops.BINARY_ADD, + ast.Sub: ops.BINARY_SUBTRACT, + ast.Mult: ops.BINARY_MULTIPLY, + ast.Mod: ops.BINARY_MODULO, + ast.Pow: ops.BINARY_POWER, + ast.LShift: ops.BINARY_LSHIFT, + ast.RShift: ops.BINARY_RSHIFT, + ast.BitOr: ops.BINARY_OR, + ast.BitAnd: ops.BINARY_AND, + ast.BitXor: ops.BINARY_XOR, + ast.FloorDiv: ops.BINARY_FLOOR_DIVIDE }) inplace_operations = misc.dict_to_switch({ - ast.Add : ops.INPLACE_ADD, - ast.Sub : ops.INPLACE_SUBTRACT, - ast.Mult : ops.INPLACE_MULTIPLY, - ast.Mod : ops.INPLACE_MODULO, - ast.Pow : ops.INPLACE_POWER, - ast.LShift : ops.INPLACE_LSHIFT, - ast.RShift : ops.INPLACE_RSHIFT, - ast.BitOr : ops.INPLACE_OR, - ast.BitAnd : ops.INPLACE_AND, - ast.BitXor : ops.INPLACE_XOR, - ast.FloorDiv : ops.INPLACE_FLOOR_DIVIDE + ast.Add: ops.INPLACE_ADD, + ast.Sub: ops.INPLACE_SUBTRACT, + ast.Mult: ops.INPLACE_MULTIPLY, + ast.Mod: ops.INPLACE_MODULO, + ast.Pow: ops.INPLACE_POWER, + ast.LShift: ops.INPLACE_LSHIFT, + ast.RShift: ops.INPLACE_RSHIFT, + ast.BitOr: ops.INPLACE_OR, + ast.BitAnd: ops.INPLACE_AND, + ast.BitXor: ops.INPLACE_XOR, + ast.FloorDiv: ops.INPLACE_FLOOR_DIVIDE }) compare_operations = misc.dict_to_switch({ - ast.Eq : 2, - ast.NotEq : 3, - ast.Lt : 0, - ast.LtE : 1, - ast.Gt : 4, - ast.GtE : 5, - ast.In : 6, - ast.NotIn : 7, - ast.Is : 8, - ast.IsNot : 9 + ast.Eq: 2, + ast.NotEq: 3, + ast.Lt: 0, + ast.LtE: 1, + ast.Gt: 4, + ast.GtE: 5, + ast.In: 6, + ast.NotIn: 7, + ast.Is: 8, + ast.IsNot: 9 }) subscr_operations = misc.dict_to_switch({ - ast.AugLoad : ops.BINARY_SUBSCR, - ast.Load : ops.BINARY_SUBSCR, - ast.AugStore : ops.STORE_SUBSCR, - ast.Store : ops.STORE_SUBSCR, - ast.Del : ops.DELETE_SUBSCR + ast.AugLoad: ops.BINARY_SUBSCR, + ast.Load: ops.BINARY_SUBSCR, + ast.AugStore: ops.STORE_SUBSCR, + ast.Store: ops.STORE_SUBSCR, + ast.Del: ops.DELETE_SUBSCR }) slice_operations = misc.dict_to_switch({ - ast.AugLoad : ops.SLICE, - ast.Load : ops.SLICE, From noreply at buildbot.pypy.org Sat Jun 1 21:45:06 2013 From: noreply at buildbot.pypy.org (mattip) Date: Sat, 1 Jun 2013 21:45:06 +0200 (CEST) Subject: [pypy-commit] pypy win32-fixes3: document branch Message-ID: <20130601194506.B8DF11C0196@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: win32-fixes3 Changeset: r64704:6f7a4a142276 Date: 2013-06-01 22:42 +0300 http://bitbucket.org/pypy/pypy/changeset/6f7a4a142276/ Log: document branch diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -46,3 +46,7 @@ .. branch: operrfmt-NT Adds a couple convenient format specifiers to operationerrfmt + +.. branch: win32-fixes3 +Skip and fix some non-translated (own) tests for win32 builds + From noreply at buildbot.pypy.org Sat Jun 1 21:45:07 2013 From: noreply at buildbot.pypy.org (mattip) Date: Sat, 1 Jun 2013 21:45:07 +0200 (CEST) Subject: [pypy-commit] pypy win32-fixes3: close branch to be merged Message-ID: <20130601194507.E7F531C0690@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: win32-fixes3 Changeset: r64705:8ea86639090a Date: 2013-06-01 22:43 +0300 http://bitbucket.org/pypy/pypy/changeset/8ea86639090a/ Log: close branch to be merged From noreply at buildbot.pypy.org Sat Jun 1 21:45:09 2013 From: noreply at buildbot.pypy.org (mattip) Date: Sat, 1 Jun 2013 21:45:09 +0200 (CEST) Subject: [pypy-commit] pypy default: merge win32-fixes3 into default Message-ID: <20130601194509.704241C1107@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r64706:216ba1e7cb65 Date: 2013-06-01 22:44 +0300 http://bitbucket.org/pypy/pypy/changeset/216ba1e7cb65/ Log: merge win32-fixes3 into default diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -46,3 +46,7 @@ .. branch: operrfmt-NT Adds a couple convenient format specifiers to operationerrfmt + +.. branch: win32-fixes3 +Skip and fix some non-translated (own) tests for win32 builds + diff --git a/pypy/interpreter/test/test_app_main.py b/pypy/interpreter/test/test_app_main.py --- a/pypy/interpreter/test/test_app_main.py +++ b/pypy/interpreter/test/test_app_main.py @@ -578,6 +578,11 @@ class TestNonInteractive: def run_with_status_code(self, cmdline, senddata='', expect_prompt=False, expect_banner=False, python_flags='', env=None): + if os.name == 'nt': + try: + import __pypy__ + except: + py.test.skip('app_main cannot run on non-pypy for windows') cmdline = '%s %s "%s" %s' % (sys.executable, python_flags, app_main, cmdline) print 'POPEN:', cmdline @@ -706,6 +711,11 @@ assert 'copyright' not in data def test_non_interactive_stdout_fully_buffered(self): + if os.name == 'nt': + try: + import __pypy__ + except: + py.test.skip('app_main cannot run on non-pypy for windows') path = getscript(r""" import sys, time sys.stdout.write('\x00(STDOUT)\n\x00') # stays in buffers @@ -726,6 +736,11 @@ def test_non_interactive_stdout_unbuffered(self, monkeypatch): monkeypatch.setenv('PYTHONUNBUFFERED', '1') + if os.name == 'nt': + try: + import __pypy__ + except: + py.test.skip('app_main cannot run on non-pypy for windows') path = getscript(r""" import sys, time sys.stdout.write('\x00(STDOUT)\n\x00') diff --git a/pypy/module/test_lib_pypy/test_ctypes_config_cache.py b/pypy/module/test_lib_pypy/test_ctypes_config_cache.py --- a/pypy/module/test_lib_pypy/test_ctypes_config_cache.py +++ b/pypy/module/test_lib_pypy/test_ctypes_config_cache.py @@ -41,6 +41,10 @@ assert 'LOG_NOTICE' in d def test_resource(): + try: + import lib_pypy.resource + except ImportError: + py.test.skip('no syslog on this platform') d = run('resource.ctc.py', '_resource_cache.py') assert 'RLIM_NLIMITS' in d diff --git a/rpython/rlib/ropenssl.py b/rpython/rlib/ropenssl.py --- a/rpython/rlib/ropenssl.py +++ b/rpython/rlib/ropenssl.py @@ -9,7 +9,7 @@ link_files = [] include_dirs = [] if sys.platform == 'win32' and platform.name != 'mingw32': - libraries = ['libeay32', 'ssleay32', + libraries = ['libeay32', 'ssleay32', 'zlib1', 'user32', 'advapi32', 'gdi32', 'msvcrt', 'ws2_32'] includes = [ # ssl.h includes winsock.h, which will conflict with our own diff --git a/rpython/rlib/rposix.py b/rpython/rlib/rposix.py --- a/rpython/rlib/rposix.py +++ b/rpython/rlib/rposix.py @@ -103,11 +103,10 @@ _set_errno(rffi.cast(INT, errno)) if os.name == 'nt': - is_valid_fd = rffi.llexternal( + is_valid_fd = jit.dont_look_inside(rffi.llexternal( "_PyVerify_fd", [rffi.INT], rffi.INT, compilation_info=errno_eci, - ) - @jit.dont_look_inside + )) def validate_fd(fd): if not is_valid_fd(fd): raise OSError(get_errno(), 'Bad file descriptor') diff --git a/rpython/rlib/rzlib.py b/rpython/rlib/rzlib.py --- a/rpython/rlib/rzlib.py +++ b/rpython/rlib/rzlib.py @@ -10,7 +10,7 @@ if compiler.name == "msvc": - libname = 'zlib' + libname = 'zlib1' # since version 1.1.4 and later, see http://www.zlib.net/DLL_FAQ.txt else: libname = 'z' eci = ExternalCompilationInfo( diff --git a/rpython/rtyper/tool/test/test_rffi_platform.py b/rpython/rtyper/tool/test/test_rffi_platform.py --- a/rpython/rtyper/tool/test/test_rffi_platform.py +++ b/rpython/rtyper/tool/test/test_rffi_platform.py @@ -288,9 +288,6 @@ assert a % struct.calcsize("P") == 0 def test_external_lib(): - # XXX this one seems to be a bit too platform-specific. Check - # how to test it on windows correctly (using so_prefix?) - # and what are alternatives to LD_LIBRARY_PATH eci = ExternalCompilationInfo() c_source = """ int f(int a, int b) @@ -298,12 +295,17 @@ return (a + b); } """ + if platform.name == 'mscv': + c_source = '__declspec(dllexport) ' + c_source + libname = 'libc_lib' + else: + libname = 'c_lib' tmpdir = udir.join('external_lib').ensure(dir=1) c_file = tmpdir.join('libc_lib.c') c_file.write(c_source) l = platform.compile([c_file], eci, standalone=False) eci = ExternalCompilationInfo( - libraries = ['c_lib'], + libraries = [libname], library_dirs = [str(tmpdir)] ) rffi_platform.verify_eci(eci) diff --git a/rpython/translator/c/gcc/test/test_asmgcroot.py b/rpython/translator/c/gcc/test/test_asmgcroot.py --- a/rpython/translator/c/gcc/test/test_asmgcroot.py +++ b/rpython/translator/c/gcc/test/test_asmgcroot.py @@ -25,8 +25,8 @@ @classmethod def make_config(cls): - if _MSVC and _WIN64: - py.test.skip("all asmgcroot tests disabled for MSVC X64") + if _MSVC: + py.test.skip("all asmgcroot tests disabled for MSVC") from rpython.config.translationoption import get_combined_translation_config config = get_combined_translation_config(translating=True) config.translation.gc = cls.gcpolicy From noreply at buildbot.pypy.org Sun Jun 2 10:04:17 2013 From: noreply at buildbot.pypy.org (juanfra684) Date: Sun, 2 Jun 2013 10:04:17 +0200 (CEST) Subject: [pypy-commit] pypy default: Fixes to OpenBSD. Message-ID: <20130602080417.757971C01E5@cobra.cs.uni-duesseldorf.de> Author: Juan Francisco Cantero Hurtado Branch: Changeset: r64707:e92ef8459498 Date: 2013-06-02 05:40 +0200 http://bitbucket.org/pypy/pypy/changeset/e92ef8459498/ Log: Fixes to OpenBSD. - Fixes the order of CFLAGS and LDFLAGS. - Uses the compiler within $CC with a fallback to cc. Both changes make the work of the packagers easier. diff --git a/rpython/translator/platform/openbsd.py b/rpython/translator/platform/openbsd.py --- a/rpython/translator/platform/openbsd.py +++ b/rpython/translator/platform/openbsd.py @@ -5,11 +5,16 @@ from rpython.translator.platform.bsd import BSD class OpenBSD(BSD): - DEFAULT_CC = "cc" + if os.environ.get("CC") is None: + DEFAULT_CC = "cc" + else: + DEFAULT_CC = os.environ.get("CC") + name = "openbsd" - link_flags = os.environ.get("LDFLAGS", '-pthread').split() - cflags = os.environ.get("CFLAGS", "-O3 -pthread -fomit-frame-pointer -D_BSD_SOURCE").split() + link_flags = os.environ.get("LDFLAGS", "").split() + ['-pthread'] + cflags = ['-O3', '-pthread', '-fomit-frame-pointer', '-D_BSD_SOURCE' + ] + os.environ.get("CFLAGS", "").split() def _libs(self, libraries): libraries=set(libraries + ("intl", "iconv", "compat")) From noreply at buildbot.pypy.org Sun Jun 2 10:25:00 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 2 Jun 2013 10:25:00 +0200 (CEST) Subject: [pypy-commit] pypy default: Fix _testcapi and _test_ctypes after 722471a15693, sorry. Message-ID: <20130602082500.039631C01E5@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r64708:a674c4a68284 Date: 2013-06-02 10:21 +0200 http://bitbucket.org/pypy/pypy/changeset/a674c4a68284/ Log: Fix _testcapi and _test_ctypes after 722471a15693, sorry. diff --git a/lib_pypy/_ctypes_test.py b/lib_pypy/_ctypes_test.py --- a/lib_pypy/_ctypes_test.py +++ b/lib_pypy/_ctypes_test.py @@ -1,6 +1,12 @@ -import os, sys +import os, sys, imp import tempfile +def _get_c_extension_suffix(): + for ext, mod, typ in imp.get_suffixes(): + if typ == imp.C_EXTENSION: + return ext + + def compile_shared(): """Compile '_ctypes_test.c' into an extension module, and import it """ @@ -8,7 +14,6 @@ output_dir = tempfile.mkdtemp() from distutils.ccompiler import new_compiler - from distutils import sysconfig compiler = new_compiler() compiler.output_dir = output_dir @@ -25,7 +30,7 @@ object_filename = res[0] # set link options - output_filename = '_ctypes_test' + sysconfig.get_config_var('SO') + output_filename = '_ctypes_test' + _get_c_extension_suffix() if sys.platform == 'win32': # XXX libpypy-c.lib is currently not installed automatically library = os.path.join(thisdir, '..', 'include', 'libpypy-c') diff --git a/lib_pypy/_testcapi.py b/lib_pypy/_testcapi.py --- a/lib_pypy/_testcapi.py +++ b/lib_pypy/_testcapi.py @@ -1,6 +1,12 @@ -import os, sys +import os, sys, imp import tempfile +def _get_c_extension_suffix(): + for ext, mod, typ in imp.get_suffixes(): + if typ == imp.C_EXTENSION: + return ext + + def compile_shared(): """Compile '_testcapi.c' into an extension module, and import it """ @@ -8,7 +14,6 @@ output_dir = tempfile.mkdtemp() from distutils.ccompiler import new_compiler - from distutils import sysconfig compiler = new_compiler() compiler.output_dir = output_dir @@ -25,7 +30,7 @@ object_filename = res[0] # set link options - output_filename = '_testcapi' + sysconfig.get_config_var('SO') + output_filename = '_testcapi' + _get_c_extension_suffix() if sys.platform == 'win32': # XXX libpypy-c.lib is currently not installed automatically library = os.path.join(thisdir, '..', 'include', 'libpypy-c') From noreply at buildbot.pypy.org Sun Jun 2 10:25:01 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 2 Jun 2013 10:25:01 +0200 (CEST) Subject: [pypy-commit] pypy default: Kill code duplication. Message-ID: <20130602082501.5EDF71C01E5@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r64709:18c04dc498b8 Date: 2013-06-02 10:28 +0200 http://bitbucket.org/pypy/pypy/changeset/18c04dc498b8/ Log: Kill code duplication. diff --git a/lib_pypy/_ctypes_test.py b/lib_pypy/_ctypes_test.py --- a/lib_pypy/_ctypes_test.py +++ b/lib_pypy/_ctypes_test.py @@ -1,65 +1,7 @@ -import os, sys, imp -import tempfile - -def _get_c_extension_suffix(): - for ext, mod, typ in imp.get_suffixes(): - if typ == imp.C_EXTENSION: - return ext - - -def compile_shared(): - """Compile '_ctypes_test.c' into an extension module, and import it - """ - thisdir = os.path.dirname(__file__) - output_dir = tempfile.mkdtemp() - - from distutils.ccompiler import new_compiler - - compiler = new_compiler() - compiler.output_dir = output_dir - - # Compile .c file - include_dir = os.path.join(thisdir, '..', 'include') - if sys.platform == 'win32': - ccflags = ['-D_CRT_SECURE_NO_WARNINGS'] - else: - ccflags = ['-fPIC', '-Wimplicit-function-declaration'] - res = compiler.compile([os.path.join(thisdir, '_ctypes_test.c')], - include_dirs=[include_dir], - extra_preargs=ccflags) - object_filename = res[0] - - # set link options - output_filename = '_ctypes_test' + _get_c_extension_suffix() - if sys.platform == 'win32': - # XXX libpypy-c.lib is currently not installed automatically - library = os.path.join(thisdir, '..', 'include', 'libpypy-c') - if not os.path.exists(library + '.lib'): - #For a nightly build - library = os.path.join(thisdir, '..', 'include', 'python27') - if not os.path.exists(library + '.lib'): - # For a local translation - library = os.path.join(thisdir, '..', 'pypy', 'goal', 'libpypy-c') - libraries = [library, 'oleaut32'] - extra_ldargs = ['/MANIFEST', # needed for VC10 - '/EXPORT:init_ctypes_test'] - else: - libraries = [] - extra_ldargs = [] - - # link the dynamic library - compiler.link_shared_object( - [object_filename], - output_filename, - libraries=libraries, - extra_preargs=extra_ldargs) - - # Now import the newly created library, it will replace our module in sys.modules - import imp - fp, filename, description = imp.find_module('_ctypes_test', path=[output_dir]) - imp.load_module('_ctypes_test', fp, filename, description) - - +try: + import cpyext +except ImportError: + raise ImportError("No module named '_ctypes_test'") try: import _ctypes _ctypes.PyObj_FromPtr = None @@ -67,4 +9,5 @@ except ImportError: pass # obscure condition of _ctypes_test.py being imported by py.test else: - compile_shared() + import _pypy_testcapi + _pypy_testcapi.compile_shared('_ctypes_test.c', '_ctypes_test') diff --git a/lib_pypy/_testcapi.py b/lib_pypy/_pypy_testcapi.py copy from lib_pypy/_testcapi.py copy to lib_pypy/_pypy_testcapi.py --- a/lib_pypy/_testcapi.py +++ b/lib_pypy/_pypy_testcapi.py @@ -7,8 +7,9 @@ return ext -def compile_shared(): - """Compile '_testcapi.c' into an extension module, and import it +def compile_shared(csource, modulename): + """Compile '_testcapi.c' or '_ctypes_test.c' into an extension module, + and import it. """ thisdir = os.path.dirname(__file__) output_dir = tempfile.mkdtemp() @@ -24,13 +25,13 @@ ccflags = ['-D_CRT_SECURE_NO_WARNINGS'] else: ccflags = ['-fPIC', '-Wimplicit-function-declaration'] - res = compiler.compile([os.path.join(thisdir, '_testcapimodule.c')], + res = compiler.compile([os.path.join(thisdir, csource)], include_dirs=[include_dir], extra_preargs=ccflags) object_filename = res[0] # set link options - output_filename = '_testcapi' + _get_c_extension_suffix() + output_filename = modulename + _get_c_extension_suffix() if sys.platform == 'win32': # XXX libpypy-c.lib is currently not installed automatically library = os.path.join(thisdir, '..', 'include', 'libpypy-c') @@ -42,7 +43,7 @@ library = os.path.join(thisdir, '..', 'pypy', 'goal', 'libpypy-c') libraries = [library, 'oleaut32'] extra_ldargs = ['/MANIFEST', # needed for VC10 - '/EXPORT:init_testcapi'] + '/EXPORT:init' + modulename] else: libraries = [] extra_ldargs = [] @@ -54,14 +55,7 @@ libraries=libraries, extra_preargs=extra_ldargs) - # Now import the newly created library, it will replace our module in sys.modules - import imp - fp, filename, description = imp.find_module('_testcapi', path=[output_dir]) - imp.load_module('_testcapi', fp, filename, description) - -try: - import cpyext -except ImportError: - raise ImportError("No module named '_testcapi'") -else: - compile_shared() + # Now import the newly created library, it will replace the original + # module in sys.modules + fp, filename, description = imp.find_module(modulename, path=[output_dir]) + imp.load_module(modulename, fp, filename, description) diff --git a/lib_pypy/_testcapi.py b/lib_pypy/_testcapi.py --- a/lib_pypy/_testcapi.py +++ b/lib_pypy/_testcapi.py @@ -1,67 +1,7 @@ -import os, sys, imp -import tempfile - -def _get_c_extension_suffix(): - for ext, mod, typ in imp.get_suffixes(): - if typ == imp.C_EXTENSION: - return ext - - -def compile_shared(): - """Compile '_testcapi.c' into an extension module, and import it - """ - thisdir = os.path.dirname(__file__) - output_dir = tempfile.mkdtemp() - - from distutils.ccompiler import new_compiler - - compiler = new_compiler() - compiler.output_dir = output_dir - - # Compile .c file - include_dir = os.path.join(thisdir, '..', 'include') - if sys.platform == 'win32': - ccflags = ['-D_CRT_SECURE_NO_WARNINGS'] - else: - ccflags = ['-fPIC', '-Wimplicit-function-declaration'] - res = compiler.compile([os.path.join(thisdir, '_testcapimodule.c')], - include_dirs=[include_dir], - extra_preargs=ccflags) - object_filename = res[0] - - # set link options - output_filename = '_testcapi' + _get_c_extension_suffix() - if sys.platform == 'win32': - # XXX libpypy-c.lib is currently not installed automatically - library = os.path.join(thisdir, '..', 'include', 'libpypy-c') - if not os.path.exists(library + '.lib'): - #For a nightly build - library = os.path.join(thisdir, '..', 'include', 'python27') - if not os.path.exists(library + '.lib'): - # For a local translation - library = os.path.join(thisdir, '..', 'pypy', 'goal', 'libpypy-c') - libraries = [library, 'oleaut32'] - extra_ldargs = ['/MANIFEST', # needed for VC10 - '/EXPORT:init_testcapi'] - else: - libraries = [] - extra_ldargs = [] - - # link the dynamic library - compiler.link_shared_object( - [object_filename], - output_filename, - libraries=libraries, - extra_preargs=extra_ldargs) - - # Now import the newly created library, it will replace our module in sys.modules - import imp - fp, filename, description = imp.find_module('_testcapi', path=[output_dir]) - imp.load_module('_testcapi', fp, filename, description) - try: import cpyext except ImportError: raise ImportError("No module named '_testcapi'") else: - compile_shared() + import _pypy_testcapi + _pypy_testcapi.compile_shared('_testcapimodule.c', '_testcapi') From noreply at buildbot.pypy.org Sun Jun 2 12:16:47 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 2 Jun 2013 12:16:47 +0200 (CEST) Subject: [pypy-commit] pypy default: issue #1507: silence "not holding the import lock" after a fork(). Message-ID: <20130602101647.573911C0FFF@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r64710:51f304544a4b Date: 2013-06-02 12:15 +0200 http://bitbucket.org/pypy/pypy/changeset/51f304544a4b/ Log: issue #1507: silence "not holding the import lock" after a fork(). diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -791,6 +791,10 @@ def release_lock(self): me = self.space.getexecutioncontext() # used as thread ident if self.lockowner is not me: + if self.lockowner is None: + # Too bad. This situation can occur if a fork() occurred + # with the import lock held, and we're the child. + return if not self._can_have_lock(): return space = self.space From noreply at buildbot.pypy.org Sun Jun 2 13:31:56 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 2 Jun 2013 13:31:56 +0200 (CEST) Subject: [pypy-commit] pypy default: Backward compatibility Message-ID: <20130602113156.0FF361C0FFF@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r64711:3cdac1ac25eb Date: 2013-06-02 13:30 +0200 http://bitbucket.org/pypy/pypy/changeset/3cdac1ac25eb/ Log: Backward compatibility diff --git a/lib-python/2.7/logging/__init__.py b/lib-python/2.7/logging/__init__.py --- a/lib-python/2.7/logging/__init__.py +++ b/lib-python/2.7/logging/__init__.py @@ -151,6 +151,8 @@ 'DEBUG': DEBUG, 'NOTSET': NOTSET, } +_levelNames = dict(_levelToName) +_levelNames.update(_nameToLevel) # backward compatibility def getLevelName(level): """ From noreply at buildbot.pypy.org Sun Jun 2 18:32:50 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 2 Jun 2013 18:32:50 +0200 (CEST) Subject: [pypy-commit] pypy default: Fix for x86/test/test_fficall on Windows: use a value for "abi" that Message-ID: <20130602163250.972DF1C01E5@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r64712:663474f13069 Date: 2013-06-02 18:32 +0200 http://bitbucket.org/pypy/pypy/changeset/663474f13069/ Log: Fix for x86/test/test_fficall on Windows: use a value for "abi" that makes sense for the backend in this test. diff --git a/rpython/jit/metainterp/test/test_fficall.py b/rpython/jit/metainterp/test/test_fficall.py --- a/rpython/jit/metainterp/test/test_fficall.py +++ b/rpython/jit/metainterp/test/test_fficall.py @@ -17,7 +17,7 @@ def get_description(atypes, rtype): p = lltype.malloc(CIF_DESCRIPTION, len(atypes), flavor='raw', immortal=True) - p.abi = 42 + p.abi = 1 # default p.nargs = len(atypes) p.rtype = rtype p.atypes = lltype.malloc(FFI_TYPE_PP.TO, len(atypes), From noreply at buildbot.pypy.org Sun Jun 2 19:22:56 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 2 Jun 2013 19:22:56 +0200 (CEST) Subject: [pypy-commit] stmgc default: Updates Message-ID: <20130602172256.A6CFC1C01E5@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r55:7076fbc8975d Date: 2013-06-02 19:20 +0200 http://bitbucket.org/pypy/stmgc/changeset/7076fbc8975d/ Log: Updates diff --git a/c3/doc-stmgc.txt b/c3/doc-stmgc.txt --- a/c3/doc-stmgc.txt +++ b/c3/doc-stmgc.txt @@ -2,6 +2,55 @@ Details of the interactions between STM and the GC ================================================== + +-------------------------- +Introduction (hand-waving) +-------------------------- + +When we run multiple threads, the common case is to access objects that +have only been seen by the current thread. Accessing the same object +from multiple threads is possible, and handled correctly (that's the +whole point), but a relatively rare case. + +So each object is classified as "public" or "protected". New objects +are protected until they are read by a different thread. The point is +to use very different mechanisms for public and for protected objects. +Public objects are visible by all threads, but read-only in memory; to +change them, a copy must be made, and the changes written to the copy +(the "redolog" approach to STM). Protected objects, on the other hand, +are modified in-place, with (if necessary) a copy of them being made +only for the purpose of a possible abort of the transaction (the +"undolog" approach). + +This is combined with a generational GC similar to PyPy's --- but here, +each thread gets its own nursery and does its own "minor collections", +independently of the others. + +Objects start as protected, and when another thread tries to follow a +pointer to them, then it is that other thread's job to carefully "steal" +the object and turn it public (possibly making a copy of it if needed, +e.g. if it was still a young object living in the original nursery). + +The same object can exist temporarily in multiple versions: any number +of public copies; at most one active protected copy; and optionally one +private copy per thread (this is the copy as currently seen by the +transaction in progress on that thread). The GC cleans up the +unnecessary copies. + +These ideas are basically variants and extensions of the same basic idea +of keeping multiple copies with revision numbers to track them. +Moreover, "read barriers" and "write barriers" are used by the C program +calling into this library in order to be sure that it is accessing the +right version of the object. In the current variant we can have +extremely cheap read barriers, which are definitely a major speed +improvement over the previous variants (and, as far as I know, over most +of the other existing STMs). + + +---------------------- +Details (more precise) +---------------------- + In this document we say "high-level object" to mean an object from the point of the user of the library, as opposed to an "object copy", which is what occupies the space of one allocated piece of memory. One @@ -10,40 +59,41 @@ for a globally consistent copy of all objects. One revision is the result of one transaction. A program usually has one revision per thread in progress, plus any number of older committed revisions. The -committed revisions are globally ordered. +committed revisions are globally ordered. This is the order that the +multithreaded program appears to have followed serially. -The object copies exist in one of three main states: they can be -"private", "protected" or "public". A copy is private when it belongs -to the transaction in progress. When that transaction commits, it -becomes protected, and remains so as long as it is accessed only by the -same thread. A copy becomes public only when another thread requests -access to it (or, more precisely, "steals" access to it). Once public, -a copy is immutable in memory. +The object copies exist in one of two main states: they can be +"protected" or "public". A copy is also called "private" when it was +modified by the transaction in progress; this copy is always protected +and invisible to other threads. When that transaction commits, all +private copies become protected, and remain so as long as it is accessed +only by the same thread. A copy becomes public only when another thread +requests access to it (or, more precisely, "steals" access to it). Once +public, a copy is immutable in memory. From the point of view of the generational GC, each copy is either young or old. All new copies are allocated young. They become old at the next minor collection. In the common case, copies are allocated in the nursery, and during the next minor collection, if they survive, they are moved outside. The nursery contains only young copies, but a few copies -outside might be young too (e.g. copies of objects too large for the +outside might be young too (e.g. object copies too large for the nursery). (In fact we found out in PyPy that it's a good idea to create objects young even if they are outside the nursery; otherwise, a program that creates a lot of medium-sized objects will quickly exhaust the memory and trigger a lot of major collections.) For the rest of this document we'll ignore young copies outside the nursery. -An object that was never seen by a different thread has got at most two -copies: the first, protected, is the copy at the latest committed -revision; and the other, private, is the current copy. If the -transaction aborts we can forget the private copy and reuse the previous -protected copy. If the transaction commits we forget the previous -protected copy instead; then the private copy becomes protected, like -*all* private copies at this point. If the object is modified again in -the near future, we reuse the memory that was occupied by the previous -copy to store the next private copy. As a result, each of these two -spaces in memory can be young or old. If an object is no longer -modified for long enough, the next (minor or major) GC will free one of -the two spaces it uses. +An object that was never seen by a different thread has got either one +of two copies, both protected: the "main" one, used by the thread, which +may be private or not depending on whether the object was modified in +the current transaction; and, if the object is private but older than +the current transaction, then it has got a secondary copy whose purpose +is to record the state that the object had at the start of the current +transaction. + +If an object is committed and then no longer modified for long enough, +the next (minor or major) GC will free the space that was used by the +secondary copy. The way to share data between threads goes via prebuilt objects, which are always public: it is their existence that gives the starting point @@ -53,15 +103,15 @@ 1. A thread tries to write to a public object. This is done by allocating a fresh private copy of the public object. Then writes go to the private copy. If the transaction commits, the private copy becomes -protected, and the public object is made to point to it (with +simply protected, and the public object is made to point to it (with multithread care). From now on, any access to the public object from -the same thread will work on the protected object or its future private -copy. Any access from a different thread will trigger "stealing", as -explained next. +the same thread will work on the protected copy. Any access from a +different thread will trigger "stealing", as explained next. 2. When we are running a thread, it may try to access a public object but find that another thread (the "foreign thread") has committed -changes to it . Then we "steal" the object. It is a read-only +changes to it; i.e. the object has a protected copy, but belonging to a +foreign thread. Then we "steal" the object. It is a read-only operation performed by peeking on the foreign thread's data. The operation involves making a duplicate of the original copy, if it was in the foreign thread's nursery (so that no thread ever reads another @@ -69,11 +119,11 @@ original protected copy if it was not in the nursery, is then marked as public. From now on nobody is allowed to change (or free) the content of this copy, and it becomes the current public copy. These public -copies accumulate: every time the same object is stolen by a different -thread, a new public copy is made (so that unrelated threads don't have -to worry about existing public copies being updated). (This chain of -objects is freed at the next major GC, which is a stop-the-world -operation.) +copies accumulate in case the same object is successively stolen by +different threads. A new public copy is made every time, so that +unrelated threads don't have to worry about existing public copies being +updated. (This chain of objects is freed at the next major GC, which is +a stop-the-world operation.) 3. A subtle but important point about making a public copy is about all the references stored in the object: if they point to other protected @@ -103,23 +153,23 @@ 3. the read/write barriers. Point 3 is essential for performance: we want most importantly a read -barrier that doesn't trigger for the cases described above. There are -basically three cases: +barrier that doesn't trigger for the cases described above. The read +barrier needs to check if a pointer P references a public copy that +was outdated by a future revision. This is an easy check, which can +be implemented by checking a flag in the header of the copy. In all +the common cases, this flag is not set, and no actual call needs to +be done. -1. read_barrier(P) = P [if P is directly a non-modified copy] - -2. read_barrier(P) = P->h_revision [protected copy -> private copy] - -3. all other more complex cases, handled by a call. - -It is possible to compress the first two cases into C code that GCC -compiles into a total of two or three assembler instructions, using a -conditional move. (Still, it is unclear so far if case 2 is worth -inlining at every read barrier site, or should be left to the call.) - -The case of the write barrier is similar, but differs in the check for -case 1: this case is reduced to only private objects. This could be -done simply by checking a different GC flags. +The case of the write barrier is similar, but differs in the check we +need to do. We need to do a call if the object is not already private. +For performance reasons, "being private" is not directly a flag in the +object, because when a transaction commits, we don't want to have to +walk all private objects to change this flag. Instead, private objects +have a precise negative odd number in their `h_revision` field, called +the "local revision number". When a transaction commits, we change the +value of the local revision number, and all previously-private objects +become automatically protected. So the write barrier fast-path checks +if the `h_revision` is equal from the local revision number. Note that this design relies on the following property: in a given copy of an object which was committed at revision N, all pointers points to @@ -134,10 +184,12 @@ A public object copy is either up-to-date (no more recent committed copy) or outdated. The public copies need to have globally consistent -revision numbers (produced at commit time). If there are several copies -of the same object, we only need to store the revision number of the -most recent one. The previous copies are simply outdated and need -instead to store a pointer to a more recent copy. +revision numbers (produced at commit time). If there are several public +copies of the same object, we only need to store the revision number of +the most recent one. The previous copies are simply outdated and need +instead to store a pointer to a more recent copy. We use the same field +`h_revision` to store either the revision number or the pointer to the +more recent copy. The important property is that committed transactions must be "linearized": when we look at them a posteriori, it must be as if they @@ -150,17 +202,20 @@ at the end of the transaction, all objects read during the transaction have a revision not greater than this starting time, then we have no "read-write" conflict (i.e. reads of an object that another thread has -modified; there are also "write-write" conflicts). An improvement over -this basic model is that if, during the transaction, we are about to -read a new object and detect a read-write conflict, we can try to -"extend" the starting time to the value that is now stored in the global -time variable. If none of the objects that we have read previously have -been modified in the interval, then the transaction would have given the -same results if it had started at the new time. +modified; there are also in theory "write-write" conflicts, but this +case can be reduced to read-write conflicts if we consider that all +writes are also reads). + +An improvement over this basic model is that if, during the transaction, +we are about to read a new object and detect a read-write conflict, we +can try to "extend" the starting time to the value that is now stored in +the global time variable. We need to check that none of the objects +that we have read previously have been modified in the interval. If +that is the case, then the transaction would have given the same results +if it had started at the new time. The model described above is known in the literature as the "extendable -timestamp" model. We apply it for public object. It can however be -tweaked for protected objects. +timestamp" model. We apply it for public object. Pointers and revision numbers on protected objects @@ -169,7 +224,7 @@ In traditional transactional systems, we have a special case to speed up transactions that don't do any write; but that seems too restricted to be useful in PyPy. Instead, we can have a special case for transactions -that don't write to any *public* object. Our assumption is that +that didn't try to write to any *public* object. Our assumption is that transactions can be anywhere from very small to very large; the small ones are unlikely to change any object that has been seen by another thread. Moreover the previous transaction in the same thread is @@ -182,15 +237,20 @@ global order of the committed transactions is given by the ordering of these 2-tuples. -A commit with writes to public objects works as described above; it +A commit which writes to public objects works as described above; it gives the transaction the number `(global_time, 0)`, and atomically increments `global_time`. A commit with no write to any public object produces the number -`(start_time, N+1)`, provided that we didn't have any object stolen -since `start_time`. This condition is enough to guarantee that it is ok -to linearize the transaction at `(start_time, N+1)` even though other -threads might already have produced transactions linearized at some -greater time. Indeed, the fact that no object of ours was stolen +`(start_time, N+1)`, provided that we didn't have any of our objects +stolen since `start_time`. This condition is enough to guarantee that +it is ok to linearize the transaction at `(start_time, N+1)` even though +other threads might already have produced transactions linearized at +some later time. Indeed, the fact that no object of ours was stolen means that no other thread's transaction depends on any object we're -about to commit. +about to commit. In other words, the absence of both public writes and +stealing is a cheap way to determine that this transaction "commutes" +with other transactions already committed. (My current guess is that we +can in this way reduce the pressure over the word of memory that +contains the shared "global time" variable, and make many very short +transactions efficient.) From noreply at buildbot.pypy.org Sun Jun 2 19:22:57 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 2 Jun 2013 19:22:57 +0200 (CEST) Subject: [pypy-commit] stmgc default: Update README.txt Message-ID: <20130602172257.D064D1C0FFF@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r56:113d9570c3ad Date: 2013-06-02 19:22 +0200 http://bitbucket.org/pypy/stmgc/changeset/113d9570c3ad/ Log: Update README.txt diff --git a/README.txt b/README.txt --- a/README.txt +++ b/README.txt @@ -9,7 +9,11 @@ The library interface is in "c3/stmgc.h". -A demo program can be found in "c3/demo1.c". +The file "c3/doc-stmgc.txt" contains a high-level overview followed by +more detailled explanations. + +A demo program can be found in "c3/demo1.c", but the code so far is +outdated (it doesn't follow what c3/doc-stmgc describes). It can be built with "make debug-demo1" or "make build-demo1". The plan is to use this C code directly with PyPy, and not write From noreply at buildbot.pypy.org Sun Jun 2 19:34:47 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 2 Jun 2013 19:34:47 +0200 (CEST) Subject: [pypy-commit] stmgc default: More text. Message-ID: <20130602173447.574D71C0FFF@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r57:b522cf8f5f8a Date: 2013-06-02 19:34 +0200 http://bitbucket.org/pypy/stmgc/changeset/b522cf8f5f8a/ Log: More text. diff --git a/c3/doc-stmgc.txt b/c3/doc-stmgc.txt --- a/c3/doc-stmgc.txt +++ b/c3/doc-stmgc.txt @@ -87,13 +87,13 @@ of two copies, both protected: the "main" one, used by the thread, which may be private or not depending on whether the object was modified in the current transaction; and, if the object is private but older than -the current transaction, then it has got a secondary copy whose purpose -is to record the state that the object had at the start of the current +the current transaction, then it has got a backup copy whose purpose is +to record the state that the object had at the start of the current transaction. If an object is committed and then no longer modified for long enough, the next (minor or major) GC will free the space that was used by the -secondary copy. +backup copy. The way to share data between threads goes via prebuilt objects, which are always public: it is their existence that gives the starting point @@ -166,10 +166,10 @@ object, because when a transaction commits, we don't want to have to walk all private objects to change this flag. Instead, private objects have a precise negative odd number in their `h_revision` field, called -the "local revision number". When a transaction commits, we change the -value of the local revision number, and all previously-private objects -become automatically protected. So the write barrier fast-path checks -if the `h_revision` is equal from the local revision number. +the "local revision identifier". When a transaction commits, we change +the value of the local revision identifier, and all previously-private +objects become automatically protected. So the write barrier fast-path +checks if the `h_revision` is equal from the local revision identifier. Note that this design relies on the following property: in a given copy of an object which was committed at revision N, all pointers points to @@ -218,8 +218,8 @@ timestamp" model. We apply it for public object. -Pointers and revision numbers on protected objects --------------------------------------------------- +Commits on protected objects +---------------------------- In traditional transactional systems, we have a special case to speed up transactions that don't do any write; but that seems too restricted to @@ -254,3 +254,28 @@ can in this way reduce the pressure over the word of memory that contains the shared "global time" variable, and make many very short transactions efficient.) + + +Details of protected objects +---------------------------- + +As described above, each thread has a "local revision identifier". It +is a negative odd number that changes whenever it commits a transaction. +The important point for the write barrier is that on any object copy, +`h_revision` must be equal to the local revision identifier if and only +if the copy is private. A newly allocated object is always private. +Once the transaction commits it becomes merely protected. Its +`h_revision` field doesn't change (but the thread's local revision +identifier does). If later the write barrier triggers on it, we make a +backup copy of the object and copy the content of the primary copy to +it. We also set `h_revision` in the primary copy to point to the +backup copy: as long as `h_revision` is different from the local +revision identifier, its exact value is otherwise not used. In this way +we can keep using the same backup copy in each future transaction +that needs to write to the object. + +The backup copy is used in two cases. One is if the transaction aborts; +then we copy the content back over the regular protected copy. The +other case is if the object is stolen. In that case, if the object has +an active backup copy, we must steal this one, because the regular +protected copy is actually private at that point in time. From noreply at buildbot.pypy.org Sun Jun 2 20:27:00 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 2 Jun 2013 20:27:00 +0200 (CEST) Subject: [pypy-commit] stmgc default: Desccribe minor and major collections. Hand-waving for now. Message-ID: <20130602182700.33A6E1C01E5@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r58:3b7bd55eed2f Date: 2013-06-02 20:26 +0200 http://bitbucket.org/pypy/stmgc/changeset/3b7bd55eed2f/ Log: Desccribe minor and major collections. Hand-waving for now. diff --git a/c3/doc-stmgc.txt b/c3/doc-stmgc.txt --- a/c3/doc-stmgc.txt +++ b/c3/doc-stmgc.txt @@ -171,13 +171,6 @@ objects become automatically protected. So the write barrier fast-path checks if the `h_revision` is equal from the local revision identifier. -Note that this design relies on the following property: in a given copy -of an object which was committed at revision N, all pointers points to -copies of objects which were committed at or before revision N. This -property is true by construction, but we must be careful not to break it -by "optimizing" the content of a copy. In particular the GC, during -both minor and major collections, has to preserve this property. - The extendable timestamp model ------------------------------ @@ -279,3 +272,36 @@ other case is if the object is stolen. In that case, if the object has an active backup copy, we must steal this one, because the regular protected copy is actually private at that point in time. + + +Minor and major collections +--------------------------- + +The GC needs to interact with objects being copied outside the nursery: +we need to detect if, later, they are modified to contain a pointer to a +nursery object. This is the classical purpose of a write barrier in GC +terms. In our case, we need the write barrier's call to occur even on a +private object freshly copied out of the nursery, the first time it is +written to. This is easily combined with the write barrier described +above: when a minor collection copies objects out of the nursery, +private objects' `h_revision` field is temporarily replaced with a +different value. + +Major (global) collections are stop-the-world: when they need to occur, +the threads are all stopped at the next safe point. Then the problem +is simplified to a regular complete garbage collection. Additionally, +as hinted above, we can compact chains of public object copies. + +Note that our design relies on the following property: in a given copy +of an object which was committed at revision N, all pointers points to +copies of objects which were committed at or before revision N. This +property is true by construction, but we must be careful not to break it +by "optimizing" the content of a copy. In particular, major collections +have to preserve this property. It means that the best (but still safe) +thing to do during major collection is to compress chains of public +objects down to one copy (the most recent one) and one stub. We fix the +references in existing objects to point to either the real copy or the +stub. This is probably a bit involved: we might have to get the current +revision numbers of all threads, and theoretically compact each interval +of number down to only one number, but still keep one active revision +number per thread. From noreply at buildbot.pypy.org Sun Jun 2 20:48:24 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 2 Jun 2013 20:48:24 +0200 (CEST) Subject: [pypy-commit] pypy default: issue #1508: fix these last two usages of a fixed-ascii string-to-unicode Message-ID: <20130602184824.982921C101E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r64713:64c70e584a54 Date: 2013-06-02 20:47 +0200 http://bitbucket.org/pypy/pypy/changeset/64c70e584a54/ Log: issue #1508: fix these last two usages of a fixed-ascii string-to- unicode convertion, which CPython doesn't do anyway. diff --git a/pypy/objspace/std/formatting.py b/pypy/objspace/std/formatting.py --- a/pypy/objspace/std/formatting.py +++ b/pypy/objspace/std/formatting.py @@ -352,9 +352,8 @@ def std_wp(self, r): length = len(r) if do_unicode and isinstance(r, str): - # convert string to unicode explicitely here - from pypy.objspace.std.unicodetype import plain_str2unicode - r = plain_str2unicode(self.space, r) + # convert string to unicode using the default encoding + r = self.space.unicode_w(self.space.wrap(r)) prec = self.prec if prec == -1 and self.width == 0: # fast path @@ -509,12 +508,10 @@ result = formatter.format() except NeedUnicodeFormattingError: # fall through to the unicode case - from pypy.objspace.std.unicodetype import plain_str2unicode - fmt = plain_str2unicode(space, fmt) + pass else: return space.wrap(result) - else: - fmt = space.unicode_w(w_fmt) + fmt = space.unicode_w(w_fmt) formatter = UnicodeFormatter(space, fmt, values_w, w_valuedict) result = formatter.format() return space.wrap(result) diff --git a/pypy/objspace/std/test/test_stringobject.py b/pypy/objspace/std/test/test_stringobject.py --- a/pypy/objspace/std/test/test_stringobject.py +++ b/pypy/objspace/std/test/test_stringobject.py @@ -530,6 +530,12 @@ del sys.modules[module_name] temp_sys.setdefaultencoding('utf-8') assert u''.join(['\xc3\xa1']) == u'\xe1' + # + assert ('\xc3\xa1:%s' % u'\xe2') == u'\xe1:\xe2' + class Foo(object): + def __repr__(self): + return '\xc3\xa2' + assert u'\xe1:%r' % Foo() == u'\xe1:\xe2' finally: temp_sys.setdefaultencoding(old_encoding) sys.modules.update(self.original_modules) diff --git a/pypy/objspace/std/unicodetype.py b/pypy/objspace/std/unicodetype.py --- a/pypy/objspace/std/unicodetype.py +++ b/pypy/objspace/std/unicodetype.py @@ -13,22 +13,6 @@ from pypy.objspace.std.unicodeobject import W_UnicodeObject return W_UnicodeObject(uni) -def plain_str2unicode(space, s): - try: - return unicode(s) - except UnicodeDecodeError: - for i in range(len(s)): - if ord(s[i]) > 127: - raise OperationError( - space.w_UnicodeDecodeError, - space.newtuple([ - space.wrap('ascii'), - space.wrap(s), - space.wrap(i), - space.wrap(i+1), - space.wrap("ordinal not in range(128)")])) - assert False, "unreachable" - unicode_capitalize = SMM('capitalize', 1, doc='S.capitalize() -> unicode\n\nReturn a' From noreply at buildbot.pypy.org Sun Jun 2 21:22:16 2013 From: noreply at buildbot.pypy.org (juanfra684) Date: Sun, 2 Jun 2013 21:22:16 +0200 (CEST) Subject: [pypy-commit] pypy default: Reverts the last change to CC in openbsd.py. PyPy reads correctly $CC. Message-ID: <20130602192216.8FF311C01E5@cobra.cs.uni-duesseldorf.de> Author: Juan Francisco Cantero Hurtado Branch: Changeset: r64714:b56af2127b16 Date: 2013-06-02 20:46 +0200 http://bitbucket.org/pypy/pypy/changeset/b56af2127b16/ Log: Reverts the last change to CC in openbsd.py. PyPy reads correctly $CC. Reported by Laurence Tratt. diff --git a/rpython/translator/platform/openbsd.py b/rpython/translator/platform/openbsd.py --- a/rpython/translator/platform/openbsd.py +++ b/rpython/translator/platform/openbsd.py @@ -5,11 +5,7 @@ from rpython.translator.platform.bsd import BSD class OpenBSD(BSD): - if os.environ.get("CC") is None: - DEFAULT_CC = "cc" - else: - DEFAULT_CC = os.environ.get("CC") - + DEFAULT_CC = "cc" name = "openbsd" link_flags = os.environ.get("LDFLAGS", "").split() + ['-pthread'] From noreply at buildbot.pypy.org Sun Jun 2 21:22:17 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 2 Jun 2013 21:22:17 +0200 (CEST) Subject: [pypy-commit] pypy default: Merged in juanfra684/pypy (pull request #154) Message-ID: <20130602192217.F03191C01E5@cobra.cs.uni-duesseldorf.de> Author: arigo Branch: Changeset: r64715:a4a78a85ceaf Date: 2013-06-02 21:21 +0200 http://bitbucket.org/pypy/pypy/changeset/a4a78a85ceaf/ Log: Merged in juanfra684/pypy (pull request #154) Reverts the last change to CC in openbsd.py. PyPy reads correctly $CC. diff --git a/rpython/translator/platform/openbsd.py b/rpython/translator/platform/openbsd.py --- a/rpython/translator/platform/openbsd.py +++ b/rpython/translator/platform/openbsd.py @@ -5,11 +5,7 @@ from rpython.translator.platform.bsd import BSD class OpenBSD(BSD): - if os.environ.get("CC") is None: - DEFAULT_CC = "cc" - else: - DEFAULT_CC = os.environ.get("CC") - + DEFAULT_CC = "cc" name = "openbsd" link_flags = os.environ.get("LDFLAGS", "").split() + ['-pthread'] From noreply at buildbot.pypy.org Sun Jun 2 22:21:49 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 2 Jun 2013 22:21:49 +0200 (CEST) Subject: [pypy-commit] pypy default: Hack more, trying to work around both CPython's test_imp.py and Message-ID: <20130602202149.20B231C01E5@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r64716:6c22c424ddb8 Date: 2013-06-02 22:21 +0200 http://bitbucket.org/pypy/pypy/changeset/6c22c424ddb8/ Log: Hack more, trying to work around both CPython's test_imp.py and CPython's buggy corresponding behavior (http://bugs.python.org/issue18122). diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -303,7 +303,7 @@ return _absolute_import(space, modulename, baselevel, fromlist_w, tentative) finally: - lock.release_lock() + lock.release_lock(silent_after_fork=True) @jit.unroll_safe def absolute_import_try(space, modulename, baselevel, fromlist_w): @@ -788,10 +788,10 @@ self.lockowner = me self.lockcounter += 1 - def release_lock(self): + def release_lock(self, silent_after_fork): me = self.space.getexecutioncontext() # used as thread ident if self.lockowner is not me: - if self.lockowner is None: + if self.lockowner is None and silent_after_fork: # Too bad. This situation can occur if a fork() occurred # with the import lock held, and we're the child. return diff --git a/pypy/module/imp/interp_imp.py b/pypy/module/imp/interp_imp.py --- a/pypy/module/imp/interp_imp.py +++ b/pypy/module/imp/interp_imp.py @@ -177,7 +177,7 @@ def release_lock(space): if space.config.objspace.usemodules.thread: - importing.getimportlock(space).release_lock() + importing.getimportlock(space).release_lock(silent_after_fork=False) def reinit_lock(space): if space.config.objspace.usemodules.thread: From noreply at buildbot.pypy.org Sun Jun 2 22:23:20 2013 From: noreply at buildbot.pypy.org (fijal) Date: Sun, 2 Jun 2013 22:23:20 +0200 (CEST) Subject: [pypy-commit] pypy shared-by-default: a branch to measure if --shared can be enabled by default Message-ID: <20130602202320.B835C1C01E5@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: shared-by-default Changeset: r64717:858bc45b1b06 Date: 2013-06-03 04:22 +0800 http://bitbucket.org/pypy/pypy/changeset/858bc45b1b06/ Log: a branch to measure if --shared can be enabled by default diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -93,8 +93,7 @@ # itself needs the interp-level struct module # because 'P' is missing from the app-level one "_rawffi": [("objspace.usemodules.struct", True)], - "cpyext": [("translation.secondaryentrypoints", "cpyext,main"), - ("translation.shared", sys.platform == "win32")], + "cpyext": [("translation.secondaryentrypoints", "cpyext,main")] } module_import_dependencies = { @@ -309,6 +308,7 @@ # unspecified and we get None. It shouldn't occur in translate.py though. type_system = config.translation.type_system backend = config.translation.backend + config.translation.suggest(shared=True) # all the good optimizations for PyPy should be listed here if level in ['2', '3', 'jit']: From noreply at buildbot.pypy.org Sun Jun 2 22:30:59 2013 From: noreply at buildbot.pypy.org (bivab) Date: Sun, 2 Jun 2013 22:30:59 +0200 (CEST) Subject: [pypy-commit] pypy default: avoid 0 sized stack modifications if we are not pushing any args Message-ID: <20130602203059.DF25E1C01E5@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: Changeset: r64718:1d6e2058a146 Date: 2013-06-02 09:44 -0500 http://bitbucket.org/pypy/pypy/changeset/1d6e2058a146/ Log: avoid 0 sized stack modifications if we are not pushing any args diff --git a/rpython/jit/backend/arm/callbuilder.py b/rpython/jit/backend/arm/callbuilder.py --- a/rpython/jit/backend/arm/callbuilder.py +++ b/rpython/jit/backend/arm/callbuilder.py @@ -52,6 +52,8 @@ def _push_stack_args(self, stack_args, on_stack): assert on_stack % 8 == 0 + if on_stack == 0: + return self._adjust_sp(-on_stack) self.current_sp = on_stack ofs = 0 @@ -71,7 +73,7 @@ else: self.mc.gen_load_int(r.ip.value, n) self.mc.ADD_rr(r.sp.value, r.sp.value, r.ip.value) - else: + elif n < 0: n = abs(n) if check_imm_arg(n): self.mc.SUB_ri(r.sp.value, r.sp.value, n) From noreply at buildbot.pypy.org Sun Jun 2 22:31:01 2013 From: noreply at buildbot.pypy.org (bivab) Date: Sun, 2 Jun 2013 22:31:01 +0200 (CEST) Subject: [pypy-commit] pypy default: use larger allowed imm size for VLDR and VSTR Message-ID: <20130602203101.423781C01E5@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: Changeset: r64719:c970352ba2f5 Date: 2013-06-02 15:19 -0500 http://bitbucket.org/pypy/pypy/changeset/c970352ba2f5/ Log: use larger allowed imm size for VLDR and VSTR diff --git a/rpython/jit/backend/arm/assembler.py b/rpython/jit/backend/arm/assembler.py --- a/rpython/jit/backend/arm/assembler.py +++ b/rpython/jit/backend/arm/assembler.py @@ -8,6 +8,7 @@ JITFRAME_FIXED_SIZE) from rpython.jit.backend.arm.codebuilder import InstrBuilder, OverwritingBuilder from rpython.jit.backend.arm.locations import imm, StackLocation +from rpython.jit.backend.arm.helper.regalloc import VMEM_imm_size from rpython.jit.backend.arm.opassembler import ResOpAssembler from rpython.jit.backend.arm.regalloc import (Regalloc, CoreRegisterManager, check_imm_arg, VFPRegisterManager, @@ -961,7 +962,7 @@ return self._load_core_reg(mc, target, base, ofs, cond, helper) def _load_vfp_reg(self, mc, target, base, ofs, cond=c.AL, helper=r.ip): - if check_imm_arg(ofs): + if check_imm_arg(ofs, VMEM_imm_size): mc.VLDR(target.value, base.value, imm=ofs, cond=cond) else: mc.gen_load_int(helper.value, ofs, cond=cond) @@ -982,7 +983,7 @@ return self._store_core_reg(mc, source, base, ofs, cond, helper) def _store_vfp_reg(self, mc, source, base, ofs, cond=c.AL, helper=r.ip): - if check_imm_arg(ofs): + if check_imm_arg(ofs, VMEM_imm_size): mc.VSTR(source.value, base.value, imm=ofs, cond=cond) else: mc.gen_load_int(helper.value, ofs, cond=cond) diff --git a/rpython/jit/backend/arm/helper/regalloc.py b/rpython/jit/backend/arm/helper/regalloc.py --- a/rpython/jit/backend/arm/helper/regalloc.py +++ b/rpython/jit/backend/arm/helper/regalloc.py @@ -4,7 +4,10 @@ from rpython.jit.metainterp.history import ConstInt from rpython.rlib.objectmodel import we_are_translated -def check_imm_arg(arg, size=0xFF, allow_zero=True): +VMEM_imm_size=0x3FC +default_imm_size=0xFF + +def check_imm_arg(arg, size=default_imm_size, allow_zero=True): assert not isinstance(arg, ConstInt) if not we_are_translated(): if not isinstance(arg, int): From noreply at buildbot.pypy.org Sun Jun 2 22:31:02 2013 From: noreply at buildbot.pypy.org (bivab) Date: Sun, 2 Jun 2013 22:31:02 +0200 (CEST) Subject: [pypy-commit] pypy default: unify code that generates memory read instructions Message-ID: <20130602203102.8849D1C01E5@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: Changeset: r64720:641e439a5353 Date: 2013-06-02 15:22 -0500 http://bitbucket.org/pypy/pypy/changeset/641e439a5353/ Log: unify code that generates memory read instructions diff --git a/rpython/jit/backend/arm/opassembler.py b/rpython/jit/backend/arm/opassembler.py --- a/rpython/jit/backend/arm/opassembler.py +++ b/rpython/jit/backend/arm/opassembler.py @@ -15,6 +15,7 @@ gen_emit_unary_float_op, saved_registers) from rpython.jit.backend.arm.helper.regalloc import check_imm_arg +from rpython.jit.backend.arm.helper.regalloc import VMEM_imm_size from rpython.jit.backend.arm.codebuilder import InstrBuilder, OverwritingBuilder from rpython.jit.backend.arm.jump import remap_frame_layout from rpython.jit.backend.arm.regalloc import TempBox @@ -23,6 +24,7 @@ from rpython.jit.backend.llsupport.gcmap import allocate_gcmap from rpython.jit.backend.llsupport.descr import InteriorFieldDescr from rpython.jit.backend.llsupport.assembler import GuardToken, BaseAssembler +from rpython.jit.backend.llsupport.regalloc import get_scale from rpython.jit.metainterp.history import (Box, AbstractFailDescr, INT, FLOAT, REF) from rpython.jit.metainterp.history import TargetToken @@ -559,47 +561,8 @@ def emit_op_getfield_gc(self, op, arglocs, regalloc, fcond): base_loc, ofs, res, size = arglocs signed = op.getdescr().is_field_signed() - if size.value == 8: - assert res.is_vfp_reg() - # vldr only supports imm offsets - # so if the ofset is too large we add it to the base and use an - # offset of 0 - if ofs.is_reg(): - self.mc.ADD_rr(r.ip.value, base_loc.value, ofs.value) - base_loc = r.ip - ofs = imm(0) - else: - assert ofs.value % 4 == 0 - self.mc.VLDR(res.value, base_loc.value, ofs.value) - elif size.value == 4: - if ofs.is_imm(): - self.mc.LDR_ri(res.value, base_loc.value, ofs.value) - else: - self.mc.LDR_rr(res.value, base_loc.value, ofs.value) - elif size.value == 2: - if ofs.is_imm(): - if signed: - self.mc.LDRSH_ri(res.value, base_loc.value, ofs.value) - else: - self.mc.LDRH_ri(res.value, base_loc.value, ofs.value) - else: - if signed: - self.mc.LDRSH_rr(res.value, base_loc.value, ofs.value) - else: - self.mc.LDRH_rr(res.value, base_loc.value, ofs.value) - elif size.value == 1: - if ofs.is_imm(): - if signed: - self.mc.LDRSB_ri(res.value, base_loc.value, ofs.value) - else: - self.mc.LDRB_ri(res.value, base_loc.value, ofs.value) - else: - if signed: - self.mc.LDRSB_rr(res.value, base_loc.value, ofs.value) - else: - self.mc.LDRB_rr(res.value, base_loc.value, ofs.value) - else: - assert 0 + scale = get_scale(size.value) + self._load_from_mem(res, base_loc, ofs, imm(scale), signed, fcond) return fcond emit_op_getfield_raw = emit_op_getfield_gc @@ -609,39 +572,22 @@ def emit_op_getinteriorfield_gc(self, op, arglocs, regalloc, fcond): (base_loc, index_loc, res_loc, ofs_loc, ofs, itemsize, fieldsize) = arglocs - self.mc.gen_load_int(r.ip.value, itemsize.value) - self.mc.MUL(r.ip.value, index_loc.value, r.ip.value) + scale = get_scale(fieldsize.value) + tmploc, save = self.get_tmp_reg([base_loc, ofs_loc]) + assert not save + self.mc.gen_load_int(tmploc.value, itemsize.value) + self.mc.MUL(tmploc.value, index_loc.value, tmploc.value) descr = op.getdescr() assert isinstance(descr, InteriorFieldDescr) signed = descr.fielddescr.is_field_signed() if ofs.value > 0: if ofs_loc.is_imm(): - self.mc.ADD_ri(r.ip.value, r.ip.value, ofs_loc.value) + self.mc.ADD_ri(tmploc.value, tmploc.value, ofs_loc.value) else: - self.mc.ADD_rr(r.ip.value, r.ip.value, ofs_loc.value) - - if fieldsize.value == 8: - # vldr only supports imm offsets - # so if the ofset is too large we add it to the base and use an - # offset of 0 - assert res_loc.is_vfp_reg() - self.mc.ADD_rr(r.ip.value, base_loc.value, r.ip.value) - self.mc.VLDR(res_loc.value, r.ip.value, 0) - elif fieldsize.value == 4: - self.mc.LDR_rr(res_loc.value, base_loc.value, r.ip.value) - elif fieldsize.value == 2: - if signed: - self.mc.LDRSH_rr(res_loc.value, base_loc.value, r.ip.value) - else: - self.mc.LDRH_rr(res_loc.value, base_loc.value, r.ip.value) - elif fieldsize.value == 1: - if signed: - self.mc.LDRSB_rr(res_loc.value, base_loc.value, r.ip.value) - else: - self.mc.LDRB_rr(res_loc.value, base_loc.value, r.ip.value) - else: - assert 0 - + self.mc.ADD_rr(tmploc.value, tmploc.value, ofs_loc.value) + ofs_loc = tmploc + self._load_from_mem(res_loc, base_loc, ofs_loc, + imm(scale), signed, fcond) return fcond def emit_op_setinteriorfield_gc(self, op, arglocs, regalloc, fcond): @@ -731,33 +677,73 @@ self.mc.ADD_ri(r.ip.value, ofs_loc.value, imm=ofs.value) ofs_loc = r.ip # - self._load_from_mem(res_loc, base_loc, ofs_loc, scale, signed) + self._load_from_mem(res_loc, base_loc, ofs_loc, scale, signed, fcond) return fcond def _load_from_mem(self, res_loc, base_loc, ofs_loc, scale, signed=False, fcond=c.AL): if scale.value == 3: assert res_loc.is_vfp_reg() - assert ofs_loc.is_reg() - self.mc.ADD_rr(r.ip.value, base_loc.value, ofs_loc.value) - self.mc.VLDR(res_loc.value, r.ip.value, cond=fcond) + # vldr only supports imm offsets + # if the offset is in a register we add it to the base and use a + # tmp reg + if ofs_loc.is_reg(): + tmploc, save = self.get_tmp_reg([base_loc, ofs_loc]) + assert not save + self.mc.ADD_rr(tmploc.value, base_loc.value, ofs_loc.value) + base_loc = tmploc + ofs_loc = imm(0) + else: + assert ofs_loc.is_imm() + # if the ofset is too large for an imm we add it to the base and use an + # offset of 0 + if check_imm_arg(ofs_loc.value, VMEM_imm_size): + tmploc, save = self.get_tmp_reg([base_loc, ofs_loc]) + assert not save + self.mc.gen_load_int(tmploc.value, ofs_loc.value) + self.mc.ADD_rr(tmploc.value, base_loc.value, tmploc.value) + base_loc = tmploc + ofs_loc = imm(0) + else: # sanity check + assert ofs_loc.value % 4 == 0 + self.mc.VLDR(res_loc.value, base_loc.value, ofs_loc.value, cond=fcond) elif scale.value == 2: - self.mc.LDR_rr(res_loc.value, base_loc.value, - ofs_loc.value, cond=fcond) + if ofs_loc.is_imm(): + self.mc.LDR_ri(res_loc.value, base_loc.value, + ofs_loc.value, cond=fcond) + else: + self.mc.LDR_rr(res_loc.value, base_loc.value, + ofs_loc.value, cond=fcond) elif scale.value == 1: - if signed: - self.mc.LDRSH_rr(res_loc.value, base_loc.value, - ofs_loc.value, cond=fcond) + if ofs_loc.is_imm(): + if signed: + self.mc.LDRSH_ri(res_loc.value, base_loc.value, + ofs_loc.value, cond=fcond) + else: + self.mc.LDRH_ri(res_loc.value, base_loc.value, + ofs_loc.value, cond=fcond) else: - self.mc.LDRH_rr(res_loc.value, base_loc.value, - ofs_loc.value, cond=fcond) + if signed: + self.mc.LDRSH_rr(res_loc.value, base_loc.value, + ofs_loc.value, cond=fcond) + else: + self.mc.LDRH_rr(res_loc.value, base_loc.value, + ofs_loc.value, cond=fcond) elif scale.value == 0: - if signed: - self.mc.LDRSB_rr(res_loc.value, base_loc.value, - ofs_loc.value, cond=fcond) + if ofs_loc.is_imm(): + if signed: + self.mc.LDRSB_ri(res_loc.value, base_loc.value, + ofs_loc.value, cond=fcond) + else: + self.mc.LDRB_ri(res_loc.value, base_loc.value, + ofs_loc.value, cond=fcond) else: - self.mc.LDRB_rr(res_loc.value, base_loc.value, - ofs_loc.value, cond=fcond) + if signed: + self.mc.LDRSB_rr(res_loc.value, base_loc.value, + ofs_loc.value, cond=fcond) + else: + self.mc.LDRB_rr(res_loc.value, base_loc.value, + ofs_loc.value, cond=fcond) else: assert 0 @@ -770,7 +756,7 @@ # no base offset assert ofs.value == 0 signed = op.getdescr().is_item_signed() - self._load_from_mem(res_loc, base_loc, ofs_loc, scale, signed) + self._load_from_mem(res_loc, base_loc, ofs_loc, scale, signed, fcond) return fcond def emit_op_strlen(self, op, arglocs, regalloc, fcond): @@ -993,7 +979,7 @@ assert result_loc.is_vfp_reg() # we always have a register here, since we have to sync them # before call_assembler - self.mc.VLDR(result_loc.value, r.r0.value, imm=ofs) + self.load_reg(self.mc, result_loc, r.r0, ofs=ofs) else: assert result_loc is r.r0 ofs = self.cpu.unpack_arraydescr(descr) From noreply at buildbot.pypy.org Sun Jun 2 22:31:03 2013 From: noreply at buildbot.pypy.org (bivab) Date: Sun, 2 Jun 2013 22:31:03 +0200 (CEST) Subject: [pypy-commit] pypy default: merge heads Message-ID: <20130602203103.D6EBA1C01E5@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: Changeset: r64721:d3b498962380 Date: 2013-06-02 15:23 -0500 http://bitbucket.org/pypy/pypy/changeset/d3b498962380/ Log: merge heads diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -303,7 +303,7 @@ return _absolute_import(space, modulename, baselevel, fromlist_w, tentative) finally: - lock.release_lock() + lock.release_lock(silent_after_fork=True) @jit.unroll_safe def absolute_import_try(space, modulename, baselevel, fromlist_w): @@ -788,10 +788,10 @@ self.lockowner = me self.lockcounter += 1 - def release_lock(self): + def release_lock(self, silent_after_fork): me = self.space.getexecutioncontext() # used as thread ident if self.lockowner is not me: - if self.lockowner is None: + if self.lockowner is None and silent_after_fork: # Too bad. This situation can occur if a fork() occurred # with the import lock held, and we're the child. return diff --git a/pypy/module/imp/interp_imp.py b/pypy/module/imp/interp_imp.py --- a/pypy/module/imp/interp_imp.py +++ b/pypy/module/imp/interp_imp.py @@ -177,7 +177,7 @@ def release_lock(space): if space.config.objspace.usemodules.thread: - importing.getimportlock(space).release_lock() + importing.getimportlock(space).release_lock(silent_after_fork=False) def reinit_lock(space): if space.config.objspace.usemodules.thread: diff --git a/pypy/objspace/std/formatting.py b/pypy/objspace/std/formatting.py --- a/pypy/objspace/std/formatting.py +++ b/pypy/objspace/std/formatting.py @@ -352,9 +352,8 @@ def std_wp(self, r): length = len(r) if do_unicode and isinstance(r, str): - # convert string to unicode explicitely here - from pypy.objspace.std.unicodetype import plain_str2unicode - r = plain_str2unicode(self.space, r) + # convert string to unicode using the default encoding + r = self.space.unicode_w(self.space.wrap(r)) prec = self.prec if prec == -1 and self.width == 0: # fast path @@ -509,12 +508,10 @@ result = formatter.format() except NeedUnicodeFormattingError: # fall through to the unicode case - from pypy.objspace.std.unicodetype import plain_str2unicode - fmt = plain_str2unicode(space, fmt) + pass else: return space.wrap(result) - else: - fmt = space.unicode_w(w_fmt) + fmt = space.unicode_w(w_fmt) formatter = UnicodeFormatter(space, fmt, values_w, w_valuedict) result = formatter.format() return space.wrap(result) diff --git a/pypy/objspace/std/test/test_stringobject.py b/pypy/objspace/std/test/test_stringobject.py --- a/pypy/objspace/std/test/test_stringobject.py +++ b/pypy/objspace/std/test/test_stringobject.py @@ -530,6 +530,12 @@ del sys.modules[module_name] temp_sys.setdefaultencoding('utf-8') assert u''.join(['\xc3\xa1']) == u'\xe1' + # + assert ('\xc3\xa1:%s' % u'\xe2') == u'\xe1:\xe2' + class Foo(object): + def __repr__(self): + return '\xc3\xa2' + assert u'\xe1:%r' % Foo() == u'\xe1:\xe2' finally: temp_sys.setdefaultencoding(old_encoding) sys.modules.update(self.original_modules) diff --git a/pypy/objspace/std/unicodetype.py b/pypy/objspace/std/unicodetype.py --- a/pypy/objspace/std/unicodetype.py +++ b/pypy/objspace/std/unicodetype.py @@ -13,22 +13,6 @@ from pypy.objspace.std.unicodeobject import W_UnicodeObject return W_UnicodeObject(uni) -def plain_str2unicode(space, s): - try: - return unicode(s) - except UnicodeDecodeError: - for i in range(len(s)): - if ord(s[i]) > 127: - raise OperationError( - space.w_UnicodeDecodeError, - space.newtuple([ - space.wrap('ascii'), - space.wrap(s), - space.wrap(i), - space.wrap(i+1), - space.wrap("ordinal not in range(128)")])) - assert False, "unreachable" - unicode_capitalize = SMM('capitalize', 1, doc='S.capitalize() -> unicode\n\nReturn a' diff --git a/rpython/jit/metainterp/test/test_fficall.py b/rpython/jit/metainterp/test/test_fficall.py --- a/rpython/jit/metainterp/test/test_fficall.py +++ b/rpython/jit/metainterp/test/test_fficall.py @@ -17,7 +17,7 @@ def get_description(atypes, rtype): p = lltype.malloc(CIF_DESCRIPTION, len(atypes), flavor='raw', immortal=True) - p.abi = 42 + p.abi = 1 # default p.nargs = len(atypes) p.rtype = rtype p.atypes = lltype.malloc(FFI_TYPE_PP.TO, len(atypes), diff --git a/rpython/translator/platform/openbsd.py b/rpython/translator/platform/openbsd.py --- a/rpython/translator/platform/openbsd.py +++ b/rpython/translator/platform/openbsd.py @@ -5,11 +5,7 @@ from rpython.translator.platform.bsd import BSD class OpenBSD(BSD): - if os.environ.get("CC") is None: - DEFAULT_CC = "cc" - else: - DEFAULT_CC = os.environ.get("CC") - + DEFAULT_CC = "cc" name = "openbsd" link_flags = os.environ.get("LDFLAGS", "").split() + ['-pthread'] From noreply at buildbot.pypy.org Sun Jun 2 22:56:08 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 2 Jun 2013 22:56:08 +0200 (CEST) Subject: [pypy-commit] buildbot default: Call the handlers in a parallel thread, allowing the http request from Message-ID: <20130602205608.D324A1C01E5@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r824:861d54ea624e Date: 2013-06-02 22:55 +0200 http://bitbucket.org/pypy/buildbot/changeset/861d54ea624e/ Log: Call the handlers in a parallel thread, allowing the http request from bitbucket to get immediately an answer. (For tests, we don't use this parallel thread.) diff --git a/bbhook/hook.py b/bbhook/hook.py --- a/bbhook/hook.py +++ b/bbhook/hook.py @@ -3,6 +3,8 @@ import subprocess import sys import time +import thread, Queue +import traceback from .main import app from . import scm @@ -39,7 +41,24 @@ yield commit -def handle(payload, test=False): + +def _handle_thread(): + while True: + local_repo = payload = None + try: + local_repo, payload = queue.get() + _do_handle(local_repo, payload) + except: + traceback.print_exc() + print >> sys.stderr, 'payload:' + pprint.pprint(payload, sys.stderr) + print >> sys.stderr + +queue = Queue.Queue() +thread.start_new_thread(_handle_thread, ()) + + +def handle(payload, test=True): path = payload['repository']['absolute_url'] owner = payload['repository']['owner'] local_repo = app.config['LOCAL_REPOS'].join(path) @@ -47,6 +66,12 @@ if not check_for_local_repo(local_repo, remote_repo, owner): print >> sys.stderr, 'Ignoring unknown repo', path return + if test: + _do_handle(local_repo, payload, test) + else: + queue.put((local_repo, payload)) + +def _do_handle(local_repo, payload, test=False): scm.hg('pull', '-R', local_repo) for commit in get_commits(payload): for handler in HANDLERS: From noreply at buildbot.pypy.org Sun Jun 2 22:59:30 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 2 Jun 2013 22:59:30 +0200 (CEST) Subject: [pypy-commit] pypy default: Typo Message-ID: <20130602205930.58C831C01E5@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r64722:a5b60f700b88 Date: 2013-06-02 22:58 +0200 http://bitbucket.org/pypy/pypy/changeset/a5b60f700b88/ Log: Typo diff --git a/pypy/doc/windows.rst b/pypy/doc/windows.rst --- a/pypy/doc/windows.rst +++ b/pypy/doc/windows.rst @@ -2,7 +2,7 @@ PyPy on Windows =============== -Pypy is supported on Windows platforms, starting with Windows 2000. +PyPy is supported on Windows platforms, starting with Windows 2000. The following text gives some hints about how to translate the PyPy interpreter. From noreply at buildbot.pypy.org Sun Jun 2 23:11:15 2013 From: noreply at buildbot.pypy.org (fijal) Date: Sun, 2 Jun 2013 23:11:15 +0200 (CEST) Subject: [pypy-commit] pypy default: add a comment Message-ID: <20130602211115.6897B1C01E5@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r64723:f4dc731c4c03 Date: 2013-06-03 05:10 +0800 http://bitbucket.org/pypy/pypy/changeset/f4dc731c4c03/ Log: add a comment diff --git a/pypy/doc/__pypy__-module.rst b/pypy/doc/__pypy__-module.rst --- a/pypy/doc/__pypy__-module.rst +++ b/pypy/doc/__pypy__-module.rst @@ -1,3 +1,6 @@ + +.. comment: this document is very incomplete, should we generate it automatically? + ======================= The ``__pypy__`` module ======================= From noreply at buildbot.pypy.org Sun Jun 2 23:11:16 2013 From: noreply at buildbot.pypy.org (fijal) Date: Sun, 2 Jun 2013 23:11:16 +0200 (CEST) Subject: [pypy-commit] pypy default: merge Message-ID: <20130602211116.EC3671C01E5@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r64724:b93ade5abad3 Date: 2013-06-03 05:10 +0800 http://bitbucket.org/pypy/pypy/changeset/b93ade5abad3/ Log: merge diff --git a/pypy/doc/windows.rst b/pypy/doc/windows.rst --- a/pypy/doc/windows.rst +++ b/pypy/doc/windows.rst @@ -2,7 +2,7 @@ PyPy on Windows =============== -Pypy is supported on Windows platforms, starting with Windows 2000. +PyPy is supported on Windows platforms, starting with Windows 2000. The following text gives some hints about how to translate the PyPy interpreter. diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -303,7 +303,7 @@ return _absolute_import(space, modulename, baselevel, fromlist_w, tentative) finally: - lock.release_lock() + lock.release_lock(silent_after_fork=True) @jit.unroll_safe def absolute_import_try(space, modulename, baselevel, fromlist_w): @@ -788,10 +788,10 @@ self.lockowner = me self.lockcounter += 1 - def release_lock(self): + def release_lock(self, silent_after_fork): me = self.space.getexecutioncontext() # used as thread ident if self.lockowner is not me: - if self.lockowner is None: + if self.lockowner is None and silent_after_fork: # Too bad. This situation can occur if a fork() occurred # with the import lock held, and we're the child. return diff --git a/pypy/module/imp/interp_imp.py b/pypy/module/imp/interp_imp.py --- a/pypy/module/imp/interp_imp.py +++ b/pypy/module/imp/interp_imp.py @@ -177,7 +177,7 @@ def release_lock(space): if space.config.objspace.usemodules.thread: - importing.getimportlock(space).release_lock() + importing.getimportlock(space).release_lock(silent_after_fork=False) def reinit_lock(space): if space.config.objspace.usemodules.thread: diff --git a/rpython/jit/backend/arm/assembler.py b/rpython/jit/backend/arm/assembler.py --- a/rpython/jit/backend/arm/assembler.py +++ b/rpython/jit/backend/arm/assembler.py @@ -8,6 +8,7 @@ JITFRAME_FIXED_SIZE) from rpython.jit.backend.arm.codebuilder import InstrBuilder, OverwritingBuilder from rpython.jit.backend.arm.locations import imm, StackLocation +from rpython.jit.backend.arm.helper.regalloc import VMEM_imm_size from rpython.jit.backend.arm.opassembler import ResOpAssembler from rpython.jit.backend.arm.regalloc import (Regalloc, CoreRegisterManager, check_imm_arg, VFPRegisterManager, @@ -961,7 +962,7 @@ return self._load_core_reg(mc, target, base, ofs, cond, helper) def _load_vfp_reg(self, mc, target, base, ofs, cond=c.AL, helper=r.ip): - if check_imm_arg(ofs): + if check_imm_arg(ofs, VMEM_imm_size): mc.VLDR(target.value, base.value, imm=ofs, cond=cond) else: mc.gen_load_int(helper.value, ofs, cond=cond) @@ -982,7 +983,7 @@ return self._store_core_reg(mc, source, base, ofs, cond, helper) def _store_vfp_reg(self, mc, source, base, ofs, cond=c.AL, helper=r.ip): - if check_imm_arg(ofs): + if check_imm_arg(ofs, VMEM_imm_size): mc.VSTR(source.value, base.value, imm=ofs, cond=cond) else: mc.gen_load_int(helper.value, ofs, cond=cond) diff --git a/rpython/jit/backend/arm/callbuilder.py b/rpython/jit/backend/arm/callbuilder.py --- a/rpython/jit/backend/arm/callbuilder.py +++ b/rpython/jit/backend/arm/callbuilder.py @@ -52,6 +52,8 @@ def _push_stack_args(self, stack_args, on_stack): assert on_stack % 8 == 0 + if on_stack == 0: + return self._adjust_sp(-on_stack) self.current_sp = on_stack ofs = 0 @@ -71,7 +73,7 @@ else: self.mc.gen_load_int(r.ip.value, n) self.mc.ADD_rr(r.sp.value, r.sp.value, r.ip.value) - else: + elif n < 0: n = abs(n) if check_imm_arg(n): self.mc.SUB_ri(r.sp.value, r.sp.value, n) diff --git a/rpython/jit/backend/arm/helper/regalloc.py b/rpython/jit/backend/arm/helper/regalloc.py --- a/rpython/jit/backend/arm/helper/regalloc.py +++ b/rpython/jit/backend/arm/helper/regalloc.py @@ -4,7 +4,10 @@ from rpython.jit.metainterp.history import ConstInt from rpython.rlib.objectmodel import we_are_translated -def check_imm_arg(arg, size=0xFF, allow_zero=True): +VMEM_imm_size=0x3FC +default_imm_size=0xFF + +def check_imm_arg(arg, size=default_imm_size, allow_zero=True): assert not isinstance(arg, ConstInt) if not we_are_translated(): if not isinstance(arg, int): diff --git a/rpython/jit/backend/arm/opassembler.py b/rpython/jit/backend/arm/opassembler.py --- a/rpython/jit/backend/arm/opassembler.py +++ b/rpython/jit/backend/arm/opassembler.py @@ -15,6 +15,7 @@ gen_emit_unary_float_op, saved_registers) from rpython.jit.backend.arm.helper.regalloc import check_imm_arg +from rpython.jit.backend.arm.helper.regalloc import VMEM_imm_size from rpython.jit.backend.arm.codebuilder import InstrBuilder, OverwritingBuilder from rpython.jit.backend.arm.jump import remap_frame_layout from rpython.jit.backend.arm.regalloc import TempBox @@ -23,6 +24,7 @@ from rpython.jit.backend.llsupport.gcmap import allocate_gcmap from rpython.jit.backend.llsupport.descr import InteriorFieldDescr from rpython.jit.backend.llsupport.assembler import GuardToken, BaseAssembler +from rpython.jit.backend.llsupport.regalloc import get_scale from rpython.jit.metainterp.history import (Box, AbstractFailDescr, INT, FLOAT, REF) from rpython.jit.metainterp.history import TargetToken @@ -559,47 +561,8 @@ def emit_op_getfield_gc(self, op, arglocs, regalloc, fcond): base_loc, ofs, res, size = arglocs signed = op.getdescr().is_field_signed() - if size.value == 8: - assert res.is_vfp_reg() - # vldr only supports imm offsets - # so if the ofset is too large we add it to the base and use an - # offset of 0 - if ofs.is_reg(): - self.mc.ADD_rr(r.ip.value, base_loc.value, ofs.value) - base_loc = r.ip - ofs = imm(0) - else: - assert ofs.value % 4 == 0 - self.mc.VLDR(res.value, base_loc.value, ofs.value) - elif size.value == 4: - if ofs.is_imm(): - self.mc.LDR_ri(res.value, base_loc.value, ofs.value) - else: - self.mc.LDR_rr(res.value, base_loc.value, ofs.value) - elif size.value == 2: - if ofs.is_imm(): - if signed: - self.mc.LDRSH_ri(res.value, base_loc.value, ofs.value) - else: - self.mc.LDRH_ri(res.value, base_loc.value, ofs.value) - else: - if signed: - self.mc.LDRSH_rr(res.value, base_loc.value, ofs.value) - else: - self.mc.LDRH_rr(res.value, base_loc.value, ofs.value) - elif size.value == 1: - if ofs.is_imm(): - if signed: - self.mc.LDRSB_ri(res.value, base_loc.value, ofs.value) - else: - self.mc.LDRB_ri(res.value, base_loc.value, ofs.value) - else: - if signed: - self.mc.LDRSB_rr(res.value, base_loc.value, ofs.value) - else: - self.mc.LDRB_rr(res.value, base_loc.value, ofs.value) - else: - assert 0 + scale = get_scale(size.value) + self._load_from_mem(res, base_loc, ofs, imm(scale), signed, fcond) return fcond emit_op_getfield_raw = emit_op_getfield_gc @@ -609,39 +572,22 @@ def emit_op_getinteriorfield_gc(self, op, arglocs, regalloc, fcond): (base_loc, index_loc, res_loc, ofs_loc, ofs, itemsize, fieldsize) = arglocs - self.mc.gen_load_int(r.ip.value, itemsize.value) - self.mc.MUL(r.ip.value, index_loc.value, r.ip.value) + scale = get_scale(fieldsize.value) + tmploc, save = self.get_tmp_reg([base_loc, ofs_loc]) + assert not save + self.mc.gen_load_int(tmploc.value, itemsize.value) + self.mc.MUL(tmploc.value, index_loc.value, tmploc.value) descr = op.getdescr() assert isinstance(descr, InteriorFieldDescr) signed = descr.fielddescr.is_field_signed() if ofs.value > 0: if ofs_loc.is_imm(): - self.mc.ADD_ri(r.ip.value, r.ip.value, ofs_loc.value) + self.mc.ADD_ri(tmploc.value, tmploc.value, ofs_loc.value) else: - self.mc.ADD_rr(r.ip.value, r.ip.value, ofs_loc.value) - - if fieldsize.value == 8: - # vldr only supports imm offsets - # so if the ofset is too large we add it to the base and use an - # offset of 0 - assert res_loc.is_vfp_reg() - self.mc.ADD_rr(r.ip.value, base_loc.value, r.ip.value) - self.mc.VLDR(res_loc.value, r.ip.value, 0) - elif fieldsize.value == 4: - self.mc.LDR_rr(res_loc.value, base_loc.value, r.ip.value) - elif fieldsize.value == 2: - if signed: - self.mc.LDRSH_rr(res_loc.value, base_loc.value, r.ip.value) - else: - self.mc.LDRH_rr(res_loc.value, base_loc.value, r.ip.value) - elif fieldsize.value == 1: - if signed: - self.mc.LDRSB_rr(res_loc.value, base_loc.value, r.ip.value) - else: - self.mc.LDRB_rr(res_loc.value, base_loc.value, r.ip.value) - else: - assert 0 - + self.mc.ADD_rr(tmploc.value, tmploc.value, ofs_loc.value) + ofs_loc = tmploc + self._load_from_mem(res_loc, base_loc, ofs_loc, + imm(scale), signed, fcond) return fcond def emit_op_setinteriorfield_gc(self, op, arglocs, regalloc, fcond): @@ -731,33 +677,73 @@ self.mc.ADD_ri(r.ip.value, ofs_loc.value, imm=ofs.value) ofs_loc = r.ip # - self._load_from_mem(res_loc, base_loc, ofs_loc, scale, signed) + self._load_from_mem(res_loc, base_loc, ofs_loc, scale, signed, fcond) return fcond def _load_from_mem(self, res_loc, base_loc, ofs_loc, scale, signed=False, fcond=c.AL): if scale.value == 3: assert res_loc.is_vfp_reg() - assert ofs_loc.is_reg() - self.mc.ADD_rr(r.ip.value, base_loc.value, ofs_loc.value) - self.mc.VLDR(res_loc.value, r.ip.value, cond=fcond) + # vldr only supports imm offsets + # if the offset is in a register we add it to the base and use a + # tmp reg + if ofs_loc.is_reg(): + tmploc, save = self.get_tmp_reg([base_loc, ofs_loc]) + assert not save + self.mc.ADD_rr(tmploc.value, base_loc.value, ofs_loc.value) + base_loc = tmploc + ofs_loc = imm(0) + else: + assert ofs_loc.is_imm() + # if the ofset is too large for an imm we add it to the base and use an + # offset of 0 + if check_imm_arg(ofs_loc.value, VMEM_imm_size): + tmploc, save = self.get_tmp_reg([base_loc, ofs_loc]) + assert not save + self.mc.gen_load_int(tmploc.value, ofs_loc.value) + self.mc.ADD_rr(tmploc.value, base_loc.value, tmploc.value) + base_loc = tmploc + ofs_loc = imm(0) + else: # sanity check + assert ofs_loc.value % 4 == 0 + self.mc.VLDR(res_loc.value, base_loc.value, ofs_loc.value, cond=fcond) elif scale.value == 2: - self.mc.LDR_rr(res_loc.value, base_loc.value, - ofs_loc.value, cond=fcond) + if ofs_loc.is_imm(): + self.mc.LDR_ri(res_loc.value, base_loc.value, + ofs_loc.value, cond=fcond) + else: + self.mc.LDR_rr(res_loc.value, base_loc.value, + ofs_loc.value, cond=fcond) elif scale.value == 1: - if signed: - self.mc.LDRSH_rr(res_loc.value, base_loc.value, - ofs_loc.value, cond=fcond) + if ofs_loc.is_imm(): + if signed: + self.mc.LDRSH_ri(res_loc.value, base_loc.value, + ofs_loc.value, cond=fcond) + else: + self.mc.LDRH_ri(res_loc.value, base_loc.value, + ofs_loc.value, cond=fcond) else: - self.mc.LDRH_rr(res_loc.value, base_loc.value, - ofs_loc.value, cond=fcond) + if signed: + self.mc.LDRSH_rr(res_loc.value, base_loc.value, + ofs_loc.value, cond=fcond) + else: + self.mc.LDRH_rr(res_loc.value, base_loc.value, + ofs_loc.value, cond=fcond) elif scale.value == 0: - if signed: - self.mc.LDRSB_rr(res_loc.value, base_loc.value, - ofs_loc.value, cond=fcond) + if ofs_loc.is_imm(): + if signed: + self.mc.LDRSB_ri(res_loc.value, base_loc.value, + ofs_loc.value, cond=fcond) + else: + self.mc.LDRB_ri(res_loc.value, base_loc.value, + ofs_loc.value, cond=fcond) else: - self.mc.LDRB_rr(res_loc.value, base_loc.value, - ofs_loc.value, cond=fcond) + if signed: + self.mc.LDRSB_rr(res_loc.value, base_loc.value, + ofs_loc.value, cond=fcond) + else: + self.mc.LDRB_rr(res_loc.value, base_loc.value, + ofs_loc.value, cond=fcond) else: assert 0 @@ -770,7 +756,7 @@ # no base offset assert ofs.value == 0 signed = op.getdescr().is_item_signed() - self._load_from_mem(res_loc, base_loc, ofs_loc, scale, signed) + self._load_from_mem(res_loc, base_loc, ofs_loc, scale, signed, fcond) return fcond def emit_op_strlen(self, op, arglocs, regalloc, fcond): @@ -993,7 +979,7 @@ assert result_loc.is_vfp_reg() # we always have a register here, since we have to sync them # before call_assembler - self.mc.VLDR(result_loc.value, r.r0.value, imm=ofs) + self.load_reg(self.mc, result_loc, r.r0, ofs=ofs) else: assert result_loc is r.r0 ofs = self.cpu.unpack_arraydescr(descr) From noreply at buildbot.pypy.org Mon Jun 3 09:05:26 2013 From: noreply at buildbot.pypy.org (andrewsmedina) Date: Mon, 3 Jun 2013 09:05:26 +0200 (CEST) Subject: [pypy-commit] lang-js default: fixed translation. Message-ID: <20130603070526.0BCD91C0619@cobra.cs.uni-duesseldorf.de> Author: Andrews Medina Branch: Changeset: r396:e2275b282ab9 Date: 2013-05-29 00:04 -0300 http://bitbucket.org/pypy/lang-js/changeset/e2275b282ab9/ Log: fixed translation. diff --git a/js/builtins/array.py b/js/builtins/array.py --- a/js/builtins/array.py +++ b/js/builtins/array.py @@ -56,10 +56,14 @@ o = this.ToObject() from_index = get_arg(args, 0).ToUInt32() to_index = get_arg(args, 1).ToUInt32() - n = [] - for k in xrange(from_index, to_index): - n.append(o.get(unicode(str(k)))) - return _w(n) + from js.object_space import object_space + n = object_space.new_array(length=_w(to_index-from_index)) + from js.jsobj import put_property + index = 0 + for item in xrange(from_index, to_index): + put_property(n, unicode(str(index)), o.get(unicode(str(item)))) + index += 1 + return n # 15.4.4.7 From noreply at buildbot.pypy.org Mon Jun 3 10:45:19 2013 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 3 Jun 2013 10:45:19 +0200 (CEST) Subject: [pypy-commit] stmgc default: Forgot about one aspect of read barriers. Now they are no longer Message-ID: <20130603084519.1335A1C010B@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r59:14020650fc09 Date: 2013-06-03 10:45 +0200 http://bitbucket.org/pypy/stmgc/changeset/14020650fc09/ Log: Forgot about one aspect of read barriers. Now they are no longer "extremely cheap". I hope they can still be "very cheap". diff --git a/c3/doc-stmgc.txt b/c3/doc-stmgc.txt --- a/c3/doc-stmgc.txt +++ b/c3/doc-stmgc.txt @@ -41,10 +41,10 @@ of keeping multiple copies with revision numbers to track them. Moreover, "read barriers" and "write barriers" are used by the C program calling into this library in order to be sure that it is accessing the -right version of the object. In the current variant we can have -extremely cheap read barriers, which are definitely a major speed -improvement over the previous variants (and, as far as I know, over most -of the other existing STMs). +right version of the object. In the current variant we can have very +cheap read barriers, which are definitely a major speed improvement over +the previous variants (and, as far as I know, over most of the other +existing STMs). ---------------------- @@ -154,22 +154,34 @@ Point 3 is essential for performance: we want most importantly a read barrier that doesn't trigger for the cases described above. The read -barrier needs to check if a pointer P references a public copy that -was outdated by a future revision. This is an easy check, which can -be implemented by checking a flag in the header of the copy. In all -the common cases, this flag is not set, and no actual call needs to -be done. +barrier has two purposes: it needs to check that a given pointer P +references an object that is not outdated already; and it needs to +record the pointer in the "read set" of the current transaction. -The case of the write barrier is similar, but differs in the check we -need to do. We need to do a call if the object is not already private. -For performance reasons, "being private" is not directly a flag in the -object, because when a transaction commits, we don't want to have to -walk all private objects to change this flag. Instead, private objects -have a precise negative odd number in their `h_revision` field, called -the "local revision identifier". When a transaction commits, we change -the value of the local revision identifier, and all previously-private -objects become automatically protected. So the write barrier fast-path -checks if the `h_revision` is equal from the local revision identifier. +The first check is easy, and can be implemented by checking a flag in +the header of the copy. In all the common cases, this flag is not set, +and no actual call needs to be done. + +The recording in the read set is a bit more annoying. We need to +maintain a thread-local *set* of all accessed objects, but we don't care +about the order or recording the occasional duplicate. Moreover we +don't need to record the private objects; but we do need all other +protected objects, as well as public objects. The best approach is +probably to have a quick check "is it definitely recorded already?" +inline, and do the call if the check fails. It needs careful design to +be done in only a few CPU instructions, but it should be possible. + +The case of the write barrier is similar to the first half of the read +barrier, but differs in the check we need to do. We need to do a call +if the object is not already private. For performance reasons, "being +private" is not directly a flag in the object, because when a +transaction commits, we don't want to have to walk all private objects +to change this flag. Instead, private objects have a precise negative +odd number in their `h_revision` field, called the "local revision +identifier". When a transaction commits, we change the value of the +local revision identifier, and all previously-private objects become +automatically protected. So the write barrier fast-path checks if the +`h_revision` is equal from the local revision identifier. The extendable timestamp model From noreply at buildbot.pypy.org Mon Jun 3 11:00:17 2013 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 3 Jun 2013 11:00:17 +0200 (CEST) Subject: [pypy-commit] stmgc default: Saved the day by moving the cache check inline in 4 cpu instructions. Message-ID: <20130603090017.8AD381C0619@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r60:39d1f410f3ce Date: 2013-06-03 10:59 +0200 http://bitbucket.org/pypy/stmgc/changeset/39d1f410f3ce/ Log: Saved the day by moving the cache check inline in 4 cpu instructions. diff --git a/c3/doc-stmgc.txt b/c3/doc-stmgc.txt --- a/c3/doc-stmgc.txt +++ b/c3/doc-stmgc.txt @@ -158,18 +158,25 @@ references an object that is not outdated already; and it needs to record the pointer in the "read set" of the current transaction. -The first check is easy, and can be implemented by checking a flag in -the header of the copy. In all the common cases, this flag is not set, -and no actual call needs to be done. +The first check is easy, and could be implemented by checking a flag in +the header of the copy. But the recording in the read set is a bit more +annoying. We need to maintain a thread-local *set* of all accessed +objects, but we don't care about the order or recording the occasional +duplicate. Moreover we don't need to record the private objects; but we +do need all other protected objects, as well as public objects. The +best approach is probably to have a quick check "is it definitely +recorded already?" inline, and do the call if the check fails. It needs +careful design to be done in only a few CPU instructions, but it should +be possible. -The recording in the read set is a bit more annoying. We need to -maintain a thread-local *set* of all accessed objects, but we don't care -about the order or recording the occasional duplicate. Moreover we -don't need to record the private objects; but we do need all other -protected objects, as well as public objects. The best approach is -probably to have a quick check "is it definitely recorded already?" -inline, and do the call if the check fails. It needs careful design to -be done in only a few CPU instructions, but it should be possible. +(Code like this compiles to 4 instructions in the fast path: + + __thread char *read_barrier_cache; /* thread-local cache of 64KB */ + + if (__builtin_expect(*(gcptr *)(read_barrier_cache + (((long)x) & 65535)) + != x, 0)) + x = call_read_barrier(x); +) The case of the write barrier is similar to the first half of the read barrier, but differs in the check we need to do. We need to do a call From noreply at buildbot.pypy.org Mon Jun 3 11:33:44 2013 From: noreply at buildbot.pypy.org (lwassermann) Date: Mon, 3 Jun 2013 11:33:44 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk default: changed wrapping mechanism of interpreterProxy functions(ipfs): Message-ID: <20130603093344.93DBA1C1527@cobra.cs.uni-duesseldorf.de> Author: Lars Wassermann Branch: Changeset: r435:9471f0ad6d08 Date: 2013-06-03 11:33 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/9471f0ad6d08/ Log: changed wrapping mechanism of interpreterProxy functions(ipfs): signature is now given from (int, float, oop, ...) and unwrapping/wrapping/default return values are now supplied by the wrapping method failure state is set by the wrapper, not each function added list of all missing ipfs diff --git a/spyvm/interpreter_proxy.py b/spyvm/interpreter_proxy.py --- a/spyvm/interpreter_proxy.py +++ b/spyvm/interpreter_proxy.py @@ -8,6 +8,8 @@ # plugin setInterpreter: proxy. # (plugin respondsTo: #initialiseModule) ifTrue:[plugin initialiseModule]. # plugin perform: primitiveName asSymbol. +import inspect + from rpython.rlib.entrypoint import entrypoint from rpython.rtyper.annlowlevel import llhelper from rpython.rlib.exports import export_struct @@ -20,49 +22,92 @@ sqInt = rffi.INT sqLong = rffi.LONG sqDouble = rffi.DOUBLE -sqIntArray = rffi.CArray(sqInt) +sqIntArrayPtr = Ptr(rffi.CArray(sqInt)) major = minor = 0 functions = [] -def expose_on_virtual_machine_proxy(signature, minor=0, major=1): - f_ptr = Ptr(signature) +oop = object() + +class ProxyFunctionFailed(error.PrimitiveFailedError): + pass + +def expose_on_virtual_machine_proxy(unwrap_spec, result_type, minor=0, major=1): + mapping = {oop: sqInt, int: sqInt, list: sqIntArrayPtr, bool: sqInt, float: sqDouble} + f_ptr = Ptr(FuncType([mapping[spec] for spec in unwrap_spec], mapping[result_type])) if minor < minor: minor = minor if major < major: major = major def decorator(func): - functions.append(("c_" + func.func_name, f_ptr, func)) - return func + len_unwrap_spec = len(unwrap_spec) + assert (len_unwrap_spec == len(inspect.getargspec(func)[0]) + 1, + "wrong number of arguments") + unrolling_unwrap_spec = unrolling_iterable(enumerate(unwrap_spec)) + def wrapped(*c_arguments): + assert len_unwrap_spec == len(c_arguments) + args = () + try: + for i, spec in unrolling_unwrap_spec: + c_arg = c_arguments[i] + if spec is oop: + args += (IProxy.oop_to_object(c_arg), ) + else: + args += (c_arg, ) + result = func(*args) + if result_type is oop: + assert isinstance(result, model.W_Object) + return IProxy.object_to_oop(result) + elif result_type is list: + assert isinstance(result, list) + return IProxy.list_to_carray(result) + elif result_type in (int, float, bool): + assert isinstance(result, result_type) + return result + else: + return result + except error.PrimitiveFailedError: + IProxy.success_flag = False + if mapping[result_type] is sqInt: + return 0 + elif mapping[result_type] is sqDouble: + return 0.0 + elif mapping[result_type] is sqIntArrayPtr: + return rffi.cast(sqIntArrayPtr, 0) + else: + raise NotImplementedError( + "InterpreterProxy: unknown result_type %s" % (result_type, )) + functions.append(("c_" + func.func_name, f_ptr, wrapped)) + return wrapped return decorator - at expose_on_virtual_machine_proxy(FuncType([], sqInt)) + at expose_on_virtual_machine_proxy([], int) def minorVersion(): return minor - at expose_on_virtual_machine_proxy(FuncType([], sqInt)) + at expose_on_virtual_machine_proxy([], int) def majorVersion(): return major - at expose_on_virtual_machine_proxy(FuncType([sqInt], sqInt)) + at expose_on_virtual_machine_proxy([int], int) def pop(nItems): IProxy.s_frame.pop_n(nItems) return 0 - at expose_on_virtual_machine_proxy(FuncType([sqInt, sqInt], sqInt)) -def popthenPush(nItems, oop): + at expose_on_virtual_machine_proxy([int, oop], int) +def popthenPush(nItems, w_object): s_frame = IProxy.s_frame s_frame.pop_n(nItems) - s_frame.push(IProxy.oop_to_object(oop)) + s_frame.push(w_object) return 0 - at expose_on_virtual_machine_proxy(FuncType([sqInt], sqInt)) -def push(oop): + at expose_on_virtual_machine_proxy([oop], int) +def push(w_object): s_frame = IProxy.s_frame - s_frame.push(IProxy.oop_to_object(oop)) + s_frame.push(w_object) return 0 - at expose_on_virtual_machine_proxy(FuncType([sqInt], sqInt)) + at expose_on_virtual_machine_proxy([bool], int) def pushBool(trueOrFalse): s_frame = IProxy.s_frame if trueOrFalse: @@ -71,71 +116,60 @@ s_frame.push(IProxy.interp.space.w_false) return 0 - at expose_on_virtual_machine_proxy(FuncType([sqDouble], sqInt)) + at expose_on_virtual_machine_proxy([float], int) def pushFloat(f): s_frame = IProxy.s_frame s_frame.push(IProxy.space.wrap_float(f)) return 0 - at expose_on_virtual_machine_proxy(FuncType([sqInt], sqInt)) + at expose_on_virtual_machine_proxy([int], int) def pushInteger(n): s_frame = IProxy.s_frame s_frame.push(IProxy.space.wrap_int(n)) return 0 - at expose_on_virtual_machine_proxy(FuncType([sqInt], sqDouble)) + at expose_on_virtual_machine_proxy([int], float) def stackFloatValue(offset): s_frame = IProxy.s_frame f = s_frame.peek(offset) if isinstance(f, model.W_Float): return f.value else: - IProxy.successFlag = False - return 0.0 + raise ProxyFunctionFailed - at expose_on_virtual_machine_proxy(FuncType([sqInt], sqInt)) + at expose_on_virtual_machine_proxy([int], int) def stackIntegerValue(offset): s_frame = IProxy.s_frame n = s_frame.peek(offset) - try: - return IProxy.space.unwrap_int(n) - except error.PrimitiveFailedError: - IProxy.successFlag = False - return 0 + return IProxy.space.unwrap_int(n) - at expose_on_virtual_machine_proxy(FuncType([sqInt], sqInt)) + at expose_on_virtual_machine_proxy([int], oop) def stackObjectValue(offset): s_frame = IProxy.s_frame w_object = s_frame.peek(offset) if not isinstance(w_object, model.W_SmallInteger): - return IProxy.object_to_oop(w_object) - IProxy.successFlag = False - return 0 + return w_object + raise ProxyFunctionFailed - at expose_on_virtual_machine_proxy(FuncType([sqInt], sqInt)) + at expose_on_virtual_machine_proxy([int], oop) def stackValue(offset): s_frame = IProxy.s_frame - return IProxy.object_to_oop(s_frame.peek(offset)) + return s_frame.peek(offset) - at expose_on_virtual_machine_proxy(FuncType([sqInt], sqInt)) -def argumentCountOf(methodOOP): - w_method = IProxy.oop_to_object(methodOOP) + at expose_on_virtual_machine_proxy([oop], int) +def argumentCountOf(w_method): if isinstance(w_method, model.W_CompiledMethod): return w_method.argsize - IProxy.successFlag = False - return 0 + raise ProxyFunctionFailed - at expose_on_virtual_machine_proxy(FuncType([sqInt], Ptr(sqIntArray))) -def arrayValueOf(oop): - w_array = IProxy.oop_to_object(oop) - if isinstance(w_array, model.W_WordsObject) or isinstance(w_array, model.W_BytesObject): + at expose_on_virtual_machine_proxy([oop], list) +def arrayValueOf(w_array): + if w_array.is_array_object(): raise NotImplementedError - IProxy.successFlag = False - return rffi.cast(Ptr(sqIntArray), 0) + raise ProxyFunctionFailed - at expose_on_virtual_machine_proxy(FuncType([sqInt], sqInt)) -def byteSizeOf(oop): - w_object = IProxy.oop_to_object(oop) + at expose_on_virtual_machine_proxy([oop], int) +def byteSizeOf(w_object): s_class = w_object.shadow_of_my_class(IProxy.space) size = s_class.instsize() if s_class.isvariable(): @@ -144,6 +178,263 @@ size *= 4 return size + at expose_on_virtual_machine_proxy([int, oop], list) +def fetchArrayofObject(fieldIndex, w_object): + # arrayOop := self fetchPointer: fieldIndex ofObject: objectPointer. + # ^ self arrayValueOf: arrayOop + w_array = w_object.fetch(IProxy.space, fieldIndex) + if w_array.is_array_object(): + raise NotImplementedError + raise ProxyFunctionFailed + + at expose_on_virtual_machine_proxy([oop], oop) +def fetchClassOf(w_object): + w_class = w_object.getclass(IProxy.space) + return w_class +# sqInt (*fetchClassOf)(sqInt oop); +# double (*fetchFloatofObject)(sqInt fieldIndex, sqInt objectPointer); +# sqInt (*fetchIntegerofObject)(sqInt fieldIndex, sqInt objectPointer); +# sqInt (*fetchPointerofObject)(sqInt fieldIndex, sqInt oop); +# sqInt (*obsoleteDontUseThisFetchWordofObject)(sqInt fieldFieldIndex, sqInt oop); +# void *(*firstFixedField)(sqInt oop); +# void *(*firstIndexableField)(sqInt oop); +# sqInt (*literalofMethod)(sqInt offset, sqInt methodPointer); +# sqInt (*literalCountOf)(sqInt methodPointer); +# sqInt (*methodArgumentCount)(void); +# sqInt (*methodPrimitiveIndex)(void); +# sqInt (*primitiveIndexOf)(sqInt methodPointer); +# sqInt (*sizeOfSTArrayFromCPrimitive)(void *cPtr); +# sqInt (*slotSizeOf)(sqInt oop); +# sqInt (*stObjectat)(sqInt array, sqInt fieldIndex); +# sqInt (*stObjectatput)(sqInt array, sqInt fieldIndex, sqInt value); +# sqInt (*stSizeOf)(sqInt oop); +# sqInt (*storeIntegerofObjectwithValue)(sqInt fieldIndex, sqInt oop, sqInt integer); +# sqInt (*storePointerofObjectwithValue)(sqInt fieldIndex, sqInt oop, sqInt valuePointer); + +# /* InterpreterProxy methodsFor: 'testing' */ + +# sqInt (*isKindOf)(sqInt oop, char *aString); +# sqInt (*isMemberOf)(sqInt oop, char *aString); +# sqInt (*isBytes)(sqInt oop); +# sqInt (*isFloatObject)(sqInt oop); +# sqInt (*isIndexable)(sqInt oop); +# sqInt (*isIntegerObject)(sqInt objectPointer); +# sqInt (*isIntegerValue)(sqInt intValue); +# sqInt (*isPointers)(sqInt oop); +# sqInt (*isWeak)(sqInt oop); +# sqInt (*isWords)(sqInt oop); +# sqInt (*isWordsOrBytes)(sqInt oop); + +# /* InterpreterProxy methodsFor: 'converting' */ + +# sqInt (*booleanValueOf)(sqInt obj); +# sqInt (*checkedIntegerValueOf)(sqInt intOop); +# sqInt (*floatObjectOf)(double aFloat); +# double (*floatValueOf)(sqInt oop); +# sqInt (*integerObjectOf)(sqInt value); +# sqInt (*integerValueOf)(sqInt oop); +# sqInt (*positive32BitIntegerFor)(sqInt integerValue); +# sqInt (*positive32BitValueOf)(sqInt oop); + +# /* InterpreterProxy methodsFor: 'special objects' */ + +# sqInt (*characterTable)(void); +# sqInt (*displayObject)(void); +# sqInt (*falseObject)(void); +# sqInt (*nilObject)(void); +# sqInt (*trueObject)(void); + +# /* InterpreterProxy methodsFor: 'special classes' */ + +# sqInt (*classArray)(void); +# sqInt (*classBitmap)(void); +# sqInt (*classByteArray)(void); +# sqInt (*classCharacter)(void); +# sqInt (*classFloat)(void); +# sqInt (*classLargePositiveInteger)(void); +# sqInt (*classPoint)(void); +# sqInt (*classSemaphore)(void); +# sqInt (*classSmallInteger)(void); +# sqInt (*classString)(void); + +# /* InterpreterProxy methodsFor: 'instance creation' */ + +# sqInt (*clone)(sqInt oop); +# sqInt (*instantiateClassindexableSize)(sqInt classPointer, sqInt size); +# sqInt (*makePointwithxValueyValue)(sqInt xValue, sqInt yValue); +# sqInt (*popRemappableOop)(void); +# sqInt (*pushRemappableOop)(sqInt oop); + +# /* InterpreterProxy methodsFor: 'other' */ + +# sqInt (*becomewith)(sqInt array1, sqInt array2); +# sqInt (*byteSwapped)(sqInt w); +# sqInt (*failed)(void); +# sqInt (*fullDisplayUpdate)(void); +# sqInt (*fullGC)(void); +# sqInt (*incrementalGC)(void); +# sqInt (*primitiveFail)(void); +# sqInt (*showDisplayBitsLeftTopRightBottom)(sqInt aForm, sqInt l, sqInt t, sqInt r, sqInt b); +# sqInt (*signalSemaphoreWithIndex)(sqInt semaIndex); +# sqInt (*success)(sqInt aBoolean); +# sqInt (*superclassOf)(sqInt classPointer); + +# /* InterpreterProxy methodsFor: 'compiler' */ + +# CompilerHook *(*compilerHookVector)(void); +# sqInt (*setCompilerInitialized)(sqInt initFlag); + +# #if VM_PROXY_MINOR > 1 + +# /* InterpreterProxy methodsFor: 'BitBlt support' */ + +# sqInt (*loadBitBltFrom)(sqInt bbOop); +# sqInt (*copyBits)(void); +# sqInt (*copyBitsFromtoat)(sqInt leftX, sqInt rightX, sqInt yValue); + +# #endif + +# #if VM_PROXY_MINOR > 2 + +# sqInt (*classLargeNegativeInteger)(void); +# sqInt (*signed32BitIntegerFor)(sqInt integerValue); +# sqInt (*signed32BitValueOf)(sqInt oop); +# sqInt (*includesBehaviorThatOf)(sqInt aClass, sqInt aSuperClass); +# sqInt (*primitiveMethod)(void); + +# /* InterpreterProxy methodsFor: 'FFI support' */ + +# sqInt (*classExternalAddress)(void); +# sqInt (*classExternalData)(void); +# sqInt (*classExternalFunction)(void); +# sqInt (*classExternalLibrary)(void); +# sqInt (*classExternalStructure)(void); +# sqInt (*ioLoadModuleOfLength)(sqInt modIndex, sqInt modLength); +# sqInt (*ioLoadSymbolOfLengthFromModule)(sqInt fnIndex, sqInt fnLength, sqInt handle); +# sqInt (*isInMemory)(sqInt address); + +# #endif + +# #if VM_PROXY_MINOR > 3 + +# void *(*ioLoadFunctionFrom)(char *fnName, char *modName); +# sqInt (*ioMicroMSecs)(void); + +# #endif + +# #if VM_PROXY_MINOR > 4 + +# # if !defined(sqLong) +# # if _MSC_VER +# # define sqLong __int64 +# # define usqLong unsigned __int64 +# # else +# # define sqLong long long +# # define usqLong unsigned long long +# # endif +# # endif + +# sqInt (*positive64BitIntegerFor)(sqLong integerValue); +# sqLong (*positive64BitValueOf)(sqInt oop); +# sqInt (*signed64BitIntegerFor)(sqLong integerValue); +# sqLong (*signed64BitValueOf)(sqInt oop); + +# #endif + +# #if VM_PROXY_MINOR > 5 +# sqInt (*isArray)(sqInt oop); +# sqInt (*forceInterruptCheck)(void); +# #endif + +# #if VM_PROXY_MINOR > 6 +# sqInt (*fetchLong32ofObject)(sqInt fieldFieldIndex, sqInt oop); +# sqInt (*getThisSessionID)(void); +# sqInt (*ioFilenamefromStringofLengthresolveAliases)(char* aCharBuffer, char* filenameIndex, sqInt filenameLength, sqInt resolveFlag); +# sqInt (*vmEndianness)(void); +# #endif + +# #if VM_PROXY_MINOR > 7 +# /* New methods for proxy version 1.8 */ + +# /* callbackEnter: Re-enter the interpreter loop for a callback. +# Arguments: +# callbackID: Pointer to a location receiving the callback ID +# used in callbackLeave +# Returns: True if successful, false otherwise */ +# sqInt (*callbackEnter)(sqInt *callbackID); + +# /* callbackLeave: Leave the interpreter from a previous callback +# Arguments: +# callbackID: The ID of the callback received from callbackEnter() +# Returns: True if succcessful, false otherwise. */ +# sqInt (*callbackLeave)(sqInt callbackID); + +# /* addGCRoot: Add a variable location to the garbage collector. +# The contents of the variable location will be updated accordingly. +# Arguments: +# varLoc: Pointer to the variable location +# Returns: True if successful, false otherwise. */ +# sqInt (*addGCRoot)(sqInt *varLoc); + +# /* removeGCRoot: Remove a variable location from the garbage collector. +# Arguments: +# varLoc: Pointer to the variable location +# Returns: True if successful, false otherwise. +# */ +# sqInt (*removeGCRoot)(sqInt *varLoc); +# #endif + +# #if VM_PROXY_MINOR > 8 +# /* See interp.h and above for standard error codes. */ +# sqInt (*primitiveFailFor)(sqInt code); +# void (*(*setInterruptCheckChain)(void (*aFunction)(void)))(); +# sqInt (*classAlien)(void); +# sqInt (*classUnsafeAlien)(void); +# sqInt (*sendInvokeCallbackStackRegistersJmpbuf)(sqInt thunkPtrAsInt, sqInt stackPtrAsInt, sqInt regsPtrAsInt, sqInt jmpBufPtrAsInt); +# sqInt (*reestablishContextPriorToCallback)(sqInt callbackContext); +# sqInt *(*getStackPointer)(void); +# sqInt (*isOopImmutable)(sqInt oop); +# sqInt (*isOopMutable)(sqInt oop); +# #endif + +# #if VM_PROXY_MINOR > 9 +# sqInt (*methodArg) (sqInt index); +# sqInt (*objectArg) (sqInt index); +# sqInt (*integerArg) (sqInt index); +# double (*floatArg) (sqInt index); +# sqInt (*methodReturnValue) (sqInt oop); +# sqInt (*topRemappableOop) (void); +# #endif + +# #if VM_PROXY_MINOR > 10 +# # define DisownVMLockOutFullGC 1 +# sqInt (*disownVM)(sqInt flags); +# sqInt (*ownVM) (sqInt threadIdAndFlags); +# void (*addHighPriorityTickee)(void (*ticker)(void), unsigned periodms); +# void (*addSynchronousTickee)(void (*ticker)(void), unsigned periodms, unsigned roundms); +# usqLong (*utcMicroseconds)(void); +# sqInt (*tenuringIncrementalGC)(void); +# sqInt (*isYoung) (sqInt anOop); +# sqInt (*isKindOfClass)(sqInt oop, sqInt aClass); +# sqInt (*primitiveErrorTable)(void); +# sqInt (*primitiveFailureCode)(void); +# sqInt (*instanceSizeOf)(sqInt aClass); +# #endif + +# #if VM_PROXY_MINOR > 11 +# /* VMCallbackContext opaque type avoids all including setjmp.h & vmCallback.h */ +# sqInt (*sendInvokeCallbackContext)(vmccp); +# sqInt (*returnAsThroughCallbackContext)(int, vmccp, sqInt); +# long (*signedMachineIntegerValueOf)(sqInt); +# long (*stackSignedMachineIntegerValue)(sqInt); +# unsigned long (*positiveMachineIntegerValueOf)(sqInt); +# unsigned long (*stackPositiveMachineIntegerValue)(sqInt); +# sqInt (*getInterruptPending)(void); +# char *(*cStringOrNullFor)(sqInt); +# void *(*startOfAlienData)(sqInt); +# usqInt (*sizeOfAlienData)(sqInt); +# sqInt (*signalNoResume)(sqInt); +# #endif # ############################################################################## @@ -179,7 +470,7 @@ self.s_frame = None self.argcount = 0 self.s_method = None - self.successFlag = True + self.success_flag = True def call(self, signature, interp, s_frame, argcount, s_method): self.interp = interp diff --git a/spyvm/model.py b/spyvm/model.py --- a/spyvm/model.py +++ b/spyvm/model.py @@ -139,6 +139,9 @@ from spyvm.fieldtypes import obj return obj + def is_array_object(self): + return False + class W_SmallInteger(W_Object): """Boxed integer value""" # TODO can we tell pypy that its never larger then 31-bit? @@ -319,6 +322,9 @@ from spyvm.fieldtypes import LPI return LPI + def is_array_object(self): + return True + class W_Float(W_AbstractObjectWithIdentityHash): """Boxed float value.""" _attrs_ = ['value'] @@ -789,6 +795,9 @@ word += r_uint(ord(self.getchar(i))) << 8*i return word + def is_array_object(self): + return True + class W_WordsObject(W_AbstractObjectWithClassReference): _attrs_ = ['words'] @@ -856,6 +865,9 @@ return W_AbstractObjectWithClassReference.as_embellished_string(self, className='W_WordsObject', additionalInformation=('len=%d' % self.size())) + def is_array_object(self): + return True + NATIVE_DEPTH = 32 class W_DisplayBitmap(W_AbstractObjectWithClassReference): @@ -911,6 +923,8 @@ def setword(self, n, word): raise NotImplementedError("subclass responsibility") + def is_array_object(self): + return True class W_DisplayBitmap1Bit(W_DisplayBitmap): def getword(self, n): @@ -1162,6 +1176,9 @@ def has_shadow(self): return self._shadow is not None + def is_array_object(self): + return True + class DetachingShadowError(Exception): def __init__(self, old_shadow, new_shadow_class): self.old_shadow = old_shadow From noreply at buildbot.pypy.org Mon Jun 3 15:57:37 2013 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 3 Jun 2013 15:57:37 +0200 (CEST) Subject: [pypy-commit] stmgc default: In-progress Message-ID: <20130603135737.750031C0F88@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r61:82b9992a758c Date: 2013-06-03 15:57 +0200 http://bitbucket.org/pypy/stmgc/changeset/82b9992a758c/ Log: In-progress diff --git a/c3/doc-objects.txt b/c3/doc-objects.txt new file mode 100644 --- /dev/null +++ b/c3/doc-objects.txt @@ -0,0 +1,76 @@ + + + +Object copies state transitions (changes of state of the *same* copy) +--------------------------------------------------------------------- + + + + Private freshly created + \ Private, with backup + \ ^ \ ^ + \ / \ | + commit \ modify / commit | | + \ / | | modify + V / | | + Protected, no backup V | + ^ ^ Protected, with backup + / | gc | + commit / `----------------' + / + / + Private copy of + a public obj + + + + Protected backup copy + \ + \ + stealing \ commit of newer version + \ ,-----------------. + V | V + Up-to-date public copy Outdated public copy + + + +Kind of object copy h_revision +------------------------------------------------------------------- + +Private objects: +- freshly created PRN +- converted from a protected obj PRN +- private copy of a public obj PRN + +Protected objects: +- converted from fresh private obj (old PRN) +- converted from a private obj with backup ptr to backup +- backup copy of a private obj original h_revision +- backup copy still attached to a protected GT +- original obj after GC killed the backup GT + +Public objects: +- prebuilt object, never modified -1 +- other public object, never modified GT +- outdated, has a protected copy ptr to prot/priv | 2 +- outdated, target stolen ptr to next public copy + +Public stubs: +- from stealing: like outdated public objects +- from major GC: like outdated public objects with target stolen + + +PRN = Private revision number (negative odd number) +GT = Global time (positive odd number) + + + +Off-line data stored in the thread-local structure +-------------------------------------------------- + +- the PRN (private revision number): odd, negative, changes for every + transaction that commits + +- list of pairs (private converted from protected, backup copy) + +- dict {public obj: private copy} diff --git a/c3/doc-stmgc.txt b/c3/doc-stmgc.txt --- a/c3/doc-stmgc.txt +++ b/c3/doc-stmgc.txt @@ -12,24 +12,26 @@ from multiple threads is possible, and handled correctly (that's the whole point), but a relatively rare case. -So each object is classified as "public" or "protected". New objects -are protected until they are read by a different thread. The point is -to use very different mechanisms for public and for protected objects. -Public objects are visible by all threads, but read-only in memory; to -change them, a copy must be made, and the changes written to the copy -(the "redolog" approach to STM). Protected objects, on the other hand, -are modified in-place, with (if necessary) a copy of them being made -only for the purpose of a possible abort of the transaction (the -"undolog" approach). +So each object is classified as "public", "protected", or "private". +Objects are created private, and later become protected, and stay so as +long as they are not read by a different thread. The point is to use +very different mechanisms for public and for non-public objects. Public +objects are visible by all threads, but read-only in memory; to change +them, a copy must be made, and the changes written to the copy (the +"redolog" approach to STM). Non-public objects, on the other hand, are +modified in-place, with (if necessary) a copy of them being made only +for the purpose of a possible abort of the transaction (the "undolog" +approach). This is combined with a generational GC similar to PyPy's --- but here, each thread gets its own nursery and does its own "minor collections", independently of the others. -Objects start as protected, and when another thread tries to follow a -pointer to them, then it is that other thread's job to carefully "steal" -the object and turn it public (possibly making a copy of it if needed, -e.g. if it was still a young object living in the original nursery). +The idea of protected objects is that when another thread tries to +follow a pointer to them, then it is that other thread's job to +carefully "steal" the object and turn it public (possibly making a copy +of it if needed, e.g. if it was still a young object living in the +original nursery). The same object can exist temporarily in multiple versions: any number of public copies; at most one active protected copy; and optionally one @@ -62,14 +64,14 @@ committed revisions are globally ordered. This is the order that the multithreaded program appears to have followed serially. -The object copies exist in one of two main states: they can be -"protected" or "public". A copy is also called "private" when it was -modified by the transaction in progress; this copy is always protected -and invisible to other threads. When that transaction commits, all -private copies become protected, and remain so as long as it is accessed -only by the same thread. A copy becomes public only when another thread -requests access to it (or, more precisely, "steals" access to it). Once -public, a copy is immutable in memory. +The object copies exist in one of three main states: they can be +"public", "protected" or "private". A private copy is a copy that was +created or modified by the transaction in progress; this copy is always +invisible to other threads. When that transaction commits, all private +copies become protected. They remain protected as long as they are only +accessed by the same thread. A copy becomes public only when another +thread requests access to it (or, more precisely, "steals" access to +it). Once public, a copy is immutable in memory. From the point of view of the generational GC, each copy is either young or old. All new copies are allocated young. They become old at the @@ -83,17 +85,15 @@ memory and trigger a lot of major collections.) For the rest of this document we'll ignore young copies outside the nursery. -An object that was never seen by a different thread has got either one -of two copies, both protected: the "main" one, used by the thread, which -may be private or not depending on whether the object was modified in -the current transaction; and, if the object is private but older than -the current transaction, then it has got a backup copy whose purpose is -to record the state that the object had at the start of the current -transaction. +An object that was never seen by a different thread has got at most one +private copy (if it was created or modified by the current transaction) +and one protected copy (if it is older than the current transaction). +If it has two copies, then the private one is the regular copy, and the +other copy works as a backup copy that remembers the state that the +object had at the start of the current transaction. -If an object is committed and then no longer modified for long enough, -the next (minor or major) GC will free the space that was used by the -backup copy. +If an object has got a backup copy but isn't modified any more, the next +(minor or major) GC collection will free the backup copy. The way to share data between threads goes via prebuilt objects, which are always public: it is their existence that gives the starting point @@ -103,7 +103,7 @@ 1. A thread tries to write to a public object. This is done by allocating a fresh private copy of the public object. Then writes go to the private copy. If the transaction commits, the private copy becomes -simply protected, and the public object is made to point to it (with +protected, and the public object is made to point to it (with multithread care). From now on, any access to the public object from the same thread will work on the protected copy. Any access from a different thread will trigger "stealing", as explained next. @@ -141,9 +141,8 @@ ------------------- This design is made to optimize the hopefully common case: objects we -handle are mostly private or protected, or if they are public, they are -mostly read-only. We can design in consequence the following three -points: +handle are mostly protected, or if they are public, they are mostly +read-only. We can design in consequence the following three points: 1. the extra data stored in the objects (GC flags, and one extra word called `h_revision`). @@ -162,12 +161,12 @@ the header of the copy. But the recording in the read set is a bit more annoying. We need to maintain a thread-local *set* of all accessed objects, but we don't care about the order or recording the occasional -duplicate. Moreover we don't need to record the private objects; but we -do need all other protected objects, as well as public objects. The -best approach is probably to have a quick check "is it definitely -recorded already?" inline, and do the call if the check fails. It needs -careful design to be done in only a few CPU instructions, but it should -be possible. +duplicate. Moreover we don't need to record the private copies of +objects; but we do need all the protected and public objects. The best +approach is probably to have a quick check "is it definitely recorded +already?" inline, and do the call if the check fails. It needs careful +design to be done in only a few CPU instructions, but it should be +possible. (Code like this compiles to 4 instructions in the fast path: @@ -179,16 +178,16 @@ ) The case of the write barrier is similar to the first half of the read -barrier, but differs in the check we need to do. We need to do a call -if the object is not already private. For performance reasons, "being -private" is not directly a flag in the object, because when a -transaction commits, we don't want to have to walk all private objects -to change this flag. Instead, private objects have a precise negative -odd number in their `h_revision` field, called the "local revision -identifier". When a transaction commits, we change the value of the -local revision identifier, and all previously-private objects become -automatically protected. So the write barrier fast-path checks if the -`h_revision` is equal from the local revision identifier. +barrier. We need to do a call to the slow path if the object is not +already private. For performance reasons, "being private" is not +directly a flag in the object, because when a transaction commits, we +don't want to have to walk all private copies to change this flag. +Instead, private copies have a precise negative odd number in their +`h_revision` field, called the "private revision identifier". When a +transaction commits, we change the value of the private revision +identifier, and all previously-private objects become automatically +protected. So the write barrier fast-path checks if the `h_revision` is +equal to the private revision identifier. The extendable timestamp model @@ -268,29 +267,10 @@ transactions efficient.) -Details of protected objects ----------------------------- +Object copies in detail +----------------------- -As described above, each thread has a "local revision identifier". It -is a negative odd number that changes whenever it commits a transaction. -The important point for the write barrier is that on any object copy, -`h_revision` must be equal to the local revision identifier if and only -if the copy is private. A newly allocated object is always private. -Once the transaction commits it becomes merely protected. Its -`h_revision` field doesn't change (but the thread's local revision -identifier does). If later the write barrier triggers on it, we make a -backup copy of the object and copy the content of the primary copy to -it. We also set `h_revision` in the primary copy to point to the -backup copy: as long as `h_revision` is different from the local -revision identifier, its exact value is otherwise not used. In this way -we can keep using the same backup copy in each future transaction -that needs to write to the object. - -The backup copy is used in two cases. One is if the transaction aborts; -then we copy the content back over the regular protected copy. The -other case is if the object is stolen. In that case, if the object has -an active backup copy, we must steal this one, because the regular -protected copy is actually private at that point in time. +See doc-objects.txt. Minor and major collections @@ -322,5 +302,5 @@ references in existing objects to point to either the real copy or the stub. This is probably a bit involved: we might have to get the current revision numbers of all threads, and theoretically compact each interval -of number down to only one number, but still keep one active revision +of numbers down to only one number, but still keep one active revision number per thread. From noreply at buildbot.pypy.org Mon Jun 3 17:26:10 2013 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 3 Jun 2013 17:26:10 +0200 (CEST) Subject: [pypy-commit] pypy default: typo Message-ID: <20130603152610.4464D1C010B@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r64737:699be6433207 Date: 2013-06-03 17:25 +0200 http://bitbucket.org/pypy/pypy/changeset/699be6433207/ Log: typo diff --git a/pypy/doc/windows.rst b/pypy/doc/windows.rst --- a/pypy/doc/windows.rst +++ b/pypy/doc/windows.rst @@ -201,7 +201,7 @@ hacking on PyPy with the mingw compiler ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Since hacking on Pypy means running tests, you will need a way to specify +Since hacking on PyPy means running tests, you will need a way to specify the mingw compiler when hacking (as opposed to translating). As of March 2012, --cc is not a valid option for pytest.py. However if you set an environment variable CC to the compliter exe, testing will use it. From noreply at buildbot.pypy.org Mon Jun 3 18:38:33 2013 From: noreply at buildbot.pypy.org (rguillebert) Date: Mon, 3 Jun 2013 18:38:33 +0200 (CEST) Subject: [pypy-commit] pypy default: Merge heads Message-ID: <20130603163833.46C741C145C@cobra.cs.uni-duesseldorf.de> Author: Romain Guillebert Branch: Changeset: r64739:f556942951f9 Date: 2013-06-03 16:40 +0000 http://bitbucket.org/pypy/pypy/changeset/f556942951f9/ Log: Merge heads diff --git a/pypy/doc/windows.rst b/pypy/doc/windows.rst --- a/pypy/doc/windows.rst +++ b/pypy/doc/windows.rst @@ -199,9 +199,9 @@ or such, depending on your mingw64 download. -hacking on Pypy with the mingw compiler +hacking on PyPy with the mingw compiler ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Since hacking on Pypy means running tests, you will need a way to specify +Since hacking on PyPy means running tests, you will need a way to specify the mingw compiler when hacking (as opposed to translating). As of March 2012, --cc is not a valid option for pytest.py. However if you set an environment variable CC to the compliter exe, testing will use it. From noreply at buildbot.pypy.org Mon Jun 3 18:38:31 2013 From: noreply at buildbot.pypy.org (rguillebert) Date: Mon, 3 Jun 2013 18:38:31 +0200 (CEST) Subject: [pypy-commit] pypy default: Change how boxes do cache lookup Message-ID: <20130603163831.A477B1C1401@cobra.cs.uni-duesseldorf.de> Author: Romain Guillebert Branch: Changeset: r64738:e69d28ee34e0 Date: 2013-06-03 16:40 +0000 http://bitbucket.org/pypy/pypy/changeset/e69d28ee34e0/ Log: Change how boxes do cache lookup diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -30,7 +30,7 @@ def new_dtype_getter(name): def _get_dtype(space): from pypy.module.micronumpy.interp_dtype import get_dtype_cache - return getattr(get_dtype_cache(space), "w_%sdtype" % name) + return get_dtype_cache(space).dtypes_by_name[name] def new(space, w_subtype, w_value): dtype = _get_dtype(space) diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -273,7 +273,6 @@ fields = space.getitem(w_data, space.wrap(4)) self.set_fields(space, fields) - print self.itemtype class W_ComplexDtype(W_Dtype): def __init__(self, itemtype, num, kind, name, char, w_box_type, diff --git a/pypy/module/micronumpy/test/test_scalar.py b/pypy/module/micronumpy/test/test_scalar.py --- a/pypy/module/micronumpy/test/test_scalar.py +++ b/pypy/module/micronumpy/test/test_scalar.py @@ -21,7 +21,3 @@ a = zeros(3) assert loads(dumps(sum(a))) == sum(a) - - def setup_class(cls): - import py - py.test.xfail("FIXME: dtype('int32') == dtype('int32') fails (but only on 32-bit?)") From noreply at buildbot.pypy.org Mon Jun 3 19:24:46 2013 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 3 Jun 2013 19:24:46 +0200 (CEST) Subject: [pypy-commit] stmgc default: More preparation. Message-ID: <20130603172446.CA8CD1C0619@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r62:4456f79692e9 Date: 2013-06-03 19:24 +0200 http://bitbucket.org/pypy/stmgc/changeset/4456f79692e9/ Log: More preparation. diff --git a/c3/doc-objects.txt b/c3/doc-objects.txt --- a/c3/doc-objects.txt +++ b/c3/doc-objects.txt @@ -8,19 +8,19 @@ Private freshly created \ Private, with backup - \ ^ \ ^ - \ / \ | - commit \ modify / commit | | - \ / | | modify - V / | | - Protected, no backup V | + \ ^ . | ^ + \ / . commit | | + commit \ modify / . | | + \ / . commit | | modify + V / V | | + Protected, no backup V | ^ ^ Protected, with backup / | gc | commit / `----------------' / / - Private copy of - a public obj + Private copy of (the dotted arrow is followed if the + a public obj protected backup copy was stolen) @@ -52,8 +52,8 @@ Public objects: - prebuilt object, never modified -1 - other public object, never modified GT -- outdated, has a protected copy ptr to prot/priv | 2 -- outdated, target stolen ptr to next public copy +- outdated, has a protected copy HANDLE to prot/priv copy +- outdated, target stolen ptr to a more recent public copy Public stubs: - from stealing: like outdated public objects @@ -62,6 +62,8 @@ PRN = Private revision number (negative odd number) GT = Global time (positive odd number) +HANDLE = Reference to a prot/priv copy and its thread + (positive even number, such that: handle % 4 == 2) @@ -71,6 +73,120 @@ - the PRN (private revision number): odd, negative, changes for every transaction that commits -- list of pairs (private converted from protected, backup copy) +- dict active_backup_copies = {private converted from protected: backup copy} -- dict {public obj: private copy} +- dict public_to_private = {public obj: private copy} + +- list read_set containing the objects in the read set, with possibly + some duplicates (but hopefully not too many) + + + +Kind of object copy distinguishing feature +------------------------------------------------------------------- + +Any private object h_revision == PRN +Private with a backup in active_backup_copies +Backup copy GCFLAG_BACKUP_COPY +Any public object GCFLAG_PUBLIC +Any protected object h_revision != PRN && !GCFLAG_PUBLIC +Stubs GCFLAG_STUB + +A public object that might \ +be key in public_to_private has additionally GCFLAG_PUBLIC_TO_PRIVATE + + + +Read barrier +----------------------------------------- + +Inline check: if P in read_barrier_cache, we don't call the slow path. +Slow path: + + if h_revision == PRN, just add P to read_barrier_cache and return + + if GCFLAG_PUBLIC: + + follow the chained list of h_revision's as long as they are + regular pointers + + if it ends with an odd revision number, check that it's older + than start_time; extend the start timestamp if not + + if it ends with a handle (L, Thread): + + if Thread is the current thread: set P = L + + else: do stealing and restart the read barrier + + if we land on a P in read_barrier_cache: return + + add P to 'read_set' + + add P to 'read_barrier_cache' and return + + +Handles are stored for example in a global list, and the actual handle +encodes an index in the list. Every entry in the list is a pointer to a +prot/priv object --- excepted once every N positions, where it is a +thread descriptor giving the thread to which all N-1 following pointers +belong. The pair (L, Thread) is thus `(list[H], list[H rounded down to +a multiple of N])`. + +Stealing of an object copy L is done with the "collection lock" of +the target Thread. The target would also acquire its own lock in +when doing some operations, like a minor collection, which can't +occur in parallel with stealing. + +Once we have the lock, stealing is: + + if the situation changed while we were waiting for the lock, return + + if L has got a backup copy, turn it public; + else L must be protected, and we make a public copy of it + + update the original P->h_revision to point directly to the new + public copy + + + +Write barrier +----------------------------------------- + +The write barrier works for both STM purposes and for GC purposes. + +Inline check: if h_revision == PRN && !GCFLAG_WRITE_BARRIER, we're done. +Slow path: + + R = read_barrier(P) # always do a full read_barrier first + + if h_revision == PRN: + + GC only: remove GCFLAG_WRITE_BARRIER, add R to the GC list of + modified old objects to trace at the next minor collection, + and return R + + elif GCFLAG_PUBLIC: + + add the flag GCFLAG_PUBLIC_TO_PRIVATE to R, if needed + + make a fresh private copy L of R, with h_revision == PRN + + add {R: L} in 'public_to_private' + + return L + + else: # protected object + + if h_revision is not a pointer: + + allocate a backup copy, and attach it to h_revision + + copy the object into the backup copy + + change h_revision to be PRN (i.e. turn private) + + if GCFLAG_WRITE_BARRIER: remove it, add R to the GC list of + modified old objects to trace at the next minor collection + + return R From noreply at buildbot.pypy.org Tue Jun 4 01:35:24 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 4 Jun 2013 01:35:24 +0200 (CEST) Subject: [pypy-commit] pypy py3k: apply 722471a15693 from default Message-ID: <20130603233524.0DF731C1007@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r64741:43b233e93eed Date: 2013-06-03 16:34 -0700 http://bitbucket.org/pypy/pypy/changeset/43b233e93eed/ Log: apply 722471a15693 from default diff --git a/lib-python/3/distutils/command/build_ext.py b/lib-python/3/distutils/command/build_ext.py --- a/lib-python/3/distutils/command/build_ext.py +++ b/lib-python/3/distutils/command/build_ext.py @@ -4,7 +4,7 @@ modules (currently limited to C extensions, should accommodate C++ extensions ASAP).""" -import sys, os, re +import sys, os, re, imp from distutils.core import Command from distutils.errors import * from distutils.sysconfig import customize_compiler, get_python_version @@ -35,6 +35,11 @@ from distutils.ccompiler import show_compilers show_compilers() +def _get_c_extension_suffix(): + for ext, mod, typ in imp.get_suffixes(): + if typ == imp.C_EXTENSION: + return ext + class build_ext(Command): @@ -671,10 +676,18 @@ # OS/2 has an 8 character module (extension) limit :-( if os.name == "os2": ext_path[len(ext_path) - 1] = ext_path[len(ext_path) - 1][:8] + # PyPy tweak: first try to get the C extension suffix from + # 'imp'. If it fails we fall back to the 'SO' config var, like + # the previous version of this code did. This should work for + # CPython too. The point is that on PyPy with cpyext, the + # config var 'SO' is just ".so" but we want to return + # ".pypy-VERSION.so" instead. + so_ext = _get_c_extension_suffix() + if so_ext is None: + so_ext = get_config_var('SO') # fall-back # extensions in debug_mode are named 'module_d.pyd' under windows - so_ext = get_config_var('SO') if os.name == 'nt' and self.debug: - return os.path.join(*ext_path) + '_d' + so_ext + so_ext = '_d.pyd' return os.path.join(*ext_path) + so_ext def get_export_symbols(self, ext): diff --git a/lib-python/3/distutils/sysconfig_pypy.py b/lib-python/3/distutils/sysconfig_pypy.py --- a/lib-python/3/distutils/sysconfig_pypy.py +++ b/lib-python/3/distutils/sysconfig_pypy.py @@ -1,9 +1,17 @@ -"""PyPy's minimal configuration information. +"""Provide access to Python's configuration information. +This is actually PyPy's minimal configuration information. + +The specific configuration variables available depend heavily on the +platform and configuration. The values may be retrieved using +get_config_var(name), and the list of variables is available via +get_config_vars().keys(). Additional convenience functions are also +available. """ +__revision__ = "$Id: sysconfig.py 85358 2010-10-10 09:54:59Z antoine.pitrou $" + import sys import os -import imp from distutils.errors import DistutilsPlatformError @@ -49,18 +57,14 @@ _config_vars = None -def _get_so_extension(): - for ext, mod, typ in imp.get_suffixes(): - if typ == imp.C_EXTENSION: - return ext - def _init_posix(): """Initialize the module as appropriate for POSIX systems.""" g = {} g['EXE'] = "" - g['SO'] = _get_so_extension() or ".so" + g['SO'] = ".so" g['SOABI'] = g['SO'].rsplit('.')[0] g['LIBDIR'] = os.path.join(sys.prefix, 'lib') + g['CC'] = "gcc -pthread" # -pthread might not be valid on OS/X, check global _config_vars _config_vars = g @@ -70,7 +74,7 @@ """Initialize the module as appropriate for NT""" g = {} g['EXE'] = ".exe" - g['SO'] = _get_so_extension() or ".pyd" + g['SO'] = ".pyd" g['SOABI'] = g['SO'].rsplit('.')[0] global _config_vars From noreply at buildbot.pypy.org Tue Jun 4 01:35:22 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 4 Jun 2013 01:35:22 +0200 (CEST) Subject: [pypy-commit] pypy py3k: merge default Message-ID: <20130603233522.C28211C0619@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r64740:f088ba1adc09 Date: 2013-06-03 16:30 -0700 http://bitbucket.org/pypy/pypy/changeset/f088ba1adc09/ Log: merge default diff --git a/lib-python/2.7/distutils/command/build_ext.py b/lib-python/2.7/distutils/command/build_ext.py --- a/lib-python/2.7/distutils/command/build_ext.py +++ b/lib-python/2.7/distutils/command/build_ext.py @@ -8,7 +8,7 @@ __revision__ = "$Id$" -import sys, os, string, re +import sys, os, string, re, imp from types import * from site import USER_BASE, USER_SITE from distutils.core import Command @@ -33,6 +33,11 @@ from distutils.ccompiler import show_compilers show_compilers() +def _get_c_extension_suffix(): + for ext, mod, typ in imp.get_suffixes(): + if typ == imp.C_EXTENSION: + return ext + class build_ext (Command): @@ -677,10 +682,18 @@ # OS/2 has an 8 character module (extension) limit :-( if os.name == "os2": ext_path[len(ext_path) - 1] = ext_path[len(ext_path) - 1][:8] + # PyPy tweak: first try to get the C extension suffix from + # 'imp'. If it fails we fall back to the 'SO' config var, like + # the previous version of this code did. This should work for + # CPython too. The point is that on PyPy with cpyext, the + # config var 'SO' is just ".so" but we want to return + # ".pypy-VERSION.so" instead. + so_ext = _get_c_extension_suffix() + if so_ext is None: + so_ext = get_config_var('SO') # fall-back # extensions in debug_mode are named 'module_d.pyd' under windows - so_ext = get_config_var('SO') if os.name == 'nt' and self.debug: - return os.path.join(*ext_path) + '_d' + so_ext + so_ext = '_d.pyd' return os.path.join(*ext_path) + so_ext def get_export_symbols (self, ext): diff --git a/lib-python/2.7/distutils/sysconfig_pypy.py b/lib-python/2.7/distutils/sysconfig_pypy.py --- a/lib-python/2.7/distutils/sysconfig_pypy.py +++ b/lib-python/2.7/distutils/sysconfig_pypy.py @@ -12,7 +12,6 @@ import sys import os -import imp from distutils.errors import DistutilsPlatformError @@ -58,16 +57,11 @@ _config_vars = None -def _get_so_extension(): - for ext, mod, typ in imp.get_suffixes(): - if typ == imp.C_EXTENSION: - return ext - def _init_posix(): """Initialize the module as appropriate for POSIX systems.""" g = {} g['EXE'] = "" - g['SO'] = _get_so_extension() or ".so" + g['SO'] = ".so" g['SOABI'] = g['SO'].rsplit('.')[0] g['LIBDIR'] = os.path.join(sys.prefix, 'lib') g['CC'] = "gcc -pthread" # -pthread might not be valid on OS/X, check @@ -80,7 +74,7 @@ """Initialize the module as appropriate for NT""" g = {} g['EXE'] = ".exe" - g['SO'] = _get_so_extension() or ".pyd" + g['SO'] = ".pyd" g['SOABI'] = g['SO'].rsplit('.')[0] global _config_vars diff --git a/lib-python/2.7/logging/__init__.py b/lib-python/2.7/logging/__init__.py --- a/lib-python/2.7/logging/__init__.py +++ b/lib-python/2.7/logging/__init__.py @@ -151,6 +151,8 @@ 'DEBUG': DEBUG, 'NOTSET': NOTSET, } +_levelNames = dict(_levelToName) +_levelNames.update(_nameToLevel) # backward compatibility def getLevelName(level): """ diff --git a/lib_pypy/_ctypes_test.py b/lib_pypy/_ctypes_test.py --- a/lib_pypy/_ctypes_test.py +++ b/lib_pypy/_ctypes_test.py @@ -1,60 +1,7 @@ -import os, sys -import tempfile - -def compile_shared(): - """Compile '_ctypes_test.c' into an extension module, and import it - """ - thisdir = os.path.dirname(__file__) - output_dir = tempfile.mkdtemp() - - from distutils.ccompiler import new_compiler - from distutils import sysconfig - - compiler = new_compiler() - compiler.output_dir = output_dir - - # Compile .c file - include_dir = os.path.join(thisdir, '..', 'include') - if sys.platform == 'win32': - ccflags = ['-D_CRT_SECURE_NO_WARNINGS'] - else: - ccflags = ['-fPIC', '-Wimplicit-function-declaration'] - res = compiler.compile([os.path.join(thisdir, '_ctypes_test.c')], - include_dirs=[include_dir], - extra_preargs=ccflags) - object_filename = res[0] - - # set link options - output_filename = '_ctypes_test' + sysconfig.get_config_var('SO') - if sys.platform == 'win32': - # XXX libpypy-c.lib is currently not installed automatically - library = os.path.join(thisdir, '..', 'include', 'libpypy-c') - if not os.path.exists(library + '.lib'): - #For a nightly build - library = os.path.join(thisdir, '..', 'include', 'python3') - if not os.path.exists(library + '.lib'): - # For a local translation - library = os.path.join(thisdir, '..', 'pypy', 'goal', 'libpypy-c') - libraries = [library, 'oleaut32'] - extra_ldargs = ['/MANIFEST', # needed for VC10 - '/EXPORT:PyInit__ctypes_test'] - else: - libraries = [] - extra_ldargs = [] - - # link the dynamic library - compiler.link_shared_object( - [object_filename], - output_filename, - libraries=libraries, - extra_preargs=extra_ldargs) - - # Now import the newly created library, it will replace our module in sys.modules - import imp - fp, filename, description = imp.find_module('_ctypes_test', path=[output_dir]) - imp.load_module('_ctypes_test', fp, filename, description) - - +try: + import cpyext +except ImportError: + raise ImportError("No module named '_ctypes_test'") try: import _ctypes _ctypes.PyObj_FromPtr = None @@ -62,4 +9,5 @@ except ImportError: pass # obscure condition of _ctypes_test.py being imported by py.test else: - compile_shared() + import _pypy_testcapi + _pypy_testcapi.compile_shared('_ctypes_test.c', '_ctypes_test') diff --git a/lib_pypy/_testcapi.py b/lib_pypy/_pypy_testcapi.py copy from lib_pypy/_testcapi.py copy to lib_pypy/_pypy_testcapi.py --- a/lib_pypy/_testcapi.py +++ b/lib_pypy/_pypy_testcapi.py @@ -1,14 +1,20 @@ -import os, sys +import os, sys, imp import tempfile -def compile_shared(): - """Compile '_testcapi.c' into an extension module, and import it +def _get_c_extension_suffix(): + for ext, mod, typ in imp.get_suffixes(): + if typ == imp.C_EXTENSION: + return ext + + +def compile_shared(csource, modulename): + """Compile '_testcapi.c' or '_ctypes_test.c' into an extension module, + and import it. """ thisdir = os.path.dirname(__file__) output_dir = tempfile.mkdtemp() from distutils.ccompiler import new_compiler - from distutils import sysconfig compiler = new_compiler() compiler.output_dir = output_dir @@ -19,13 +25,13 @@ ccflags = ['-D_CRT_SECURE_NO_WARNINGS'] else: ccflags = ['-fPIC', '-Wimplicit-function-declaration'] - res = compiler.compile([os.path.join(thisdir, '_testcapimodule.c')], + res = compiler.compile([os.path.join(thisdir, csource)], include_dirs=[include_dir], extra_preargs=ccflags) object_filename = res[0] # set link options - output_filename = '_testcapi' + sysconfig.get_config_var('SO') + output_filename = modulename + _get_c_extension_suffix() if sys.platform == 'win32': # XXX libpypy-c.lib is currently not installed automatically library = os.path.join(thisdir, '..', 'include', 'libpypy-c') @@ -37,7 +43,7 @@ library = os.path.join(thisdir, '..', 'pypy', 'goal', 'libpypy-c') libraries = [library, 'oleaut32'] extra_ldargs = ['/MANIFEST', # needed for VC10 - '/EXPORT:PyInit__testcapi'] + '/EXPORT:PyInit_' + modulename] else: libraries = [] extra_ldargs = [] @@ -49,14 +55,7 @@ libraries=libraries, extra_preargs=extra_ldargs) - # Now import the newly created library, it will replace our module in sys.modules - import imp - fp, filename, description = imp.find_module('_testcapi', path=[output_dir]) - imp.load_module('_testcapi', fp, filename, description) - -try: - import cpyext -except ImportError: - raise ImportError("No module named '_testcapi'") -else: - compile_shared() + # Now import the newly created library, it will replace the original + # module in sys.modules + fp, filename, description = imp.find_module(modulename, path=[output_dir]) + imp.load_module(modulename, fp, filename, description) diff --git a/lib_pypy/_testcapi.py b/lib_pypy/_testcapi.py --- a/lib_pypy/_testcapi.py +++ b/lib_pypy/_testcapi.py @@ -1,62 +1,7 @@ -import os, sys -import tempfile - -def compile_shared(): - """Compile '_testcapi.c' into an extension module, and import it - """ - thisdir = os.path.dirname(__file__) - output_dir = tempfile.mkdtemp() - - from distutils.ccompiler import new_compiler - from distutils import sysconfig - - compiler = new_compiler() - compiler.output_dir = output_dir - - # Compile .c file - include_dir = os.path.join(thisdir, '..', 'include') - if sys.platform == 'win32': - ccflags = ['-D_CRT_SECURE_NO_WARNINGS'] - else: - ccflags = ['-fPIC', '-Wimplicit-function-declaration'] - res = compiler.compile([os.path.join(thisdir, '_testcapimodule.c')], - include_dirs=[include_dir], - extra_preargs=ccflags) - object_filename = res[0] - - # set link options - output_filename = '_testcapi' + sysconfig.get_config_var('SO') - if sys.platform == 'win32': - # XXX libpypy-c.lib is currently not installed automatically - library = os.path.join(thisdir, '..', 'include', 'libpypy-c') - if not os.path.exists(library + '.lib'): - #For a nightly build - library = os.path.join(thisdir, '..', 'include', 'python3') - if not os.path.exists(library + '.lib'): - # For a local translation - library = os.path.join(thisdir, '..', 'pypy', 'goal', 'libpypy-c') - libraries = [library, 'oleaut32'] - extra_ldargs = ['/MANIFEST', # needed for VC10 - '/EXPORT:PyInit__testcapi'] - else: - libraries = [] - extra_ldargs = [] - - # link the dynamic library - compiler.link_shared_object( - [object_filename], - output_filename, - libraries=libraries, - extra_preargs=extra_ldargs) - - # Now import the newly created library, it will replace our module in sys.modules - import imp - fp, filename, description = imp.find_module('_testcapi', path=[output_dir]) - imp.load_module('_testcapi', fp, filename, description) - try: import cpyext except ImportError: raise ImportError("No module named '_testcapi'") else: - compile_shared() + import _pypy_testcapi + _pypy_testcapi.compile_shared('_testcapimodule.c', '_testcapi') diff --git a/pypy/doc/__pypy__-module.rst b/pypy/doc/__pypy__-module.rst --- a/pypy/doc/__pypy__-module.rst +++ b/pypy/doc/__pypy__-module.rst @@ -1,3 +1,7 @@ + +.. comment: this document is very incomplete, should we generate + it automatically? + ======================= The ``__pypy__`` module ======================= diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -46,3 +46,7 @@ .. branch: operrfmt-NT Adds a couple convenient format specifiers to operationerrfmt + +.. branch: win32-fixes3 +Skip and fix some non-translated (own) tests for win32 builds + diff --git a/pypy/doc/windows.rst b/pypy/doc/windows.rst --- a/pypy/doc/windows.rst +++ b/pypy/doc/windows.rst @@ -2,7 +2,7 @@ PyPy on Windows =============== -Pypy is supported on Windows platforms, starting with Windows 2000. +PyPy is supported on Windows platforms, starting with Windows 2000. The following text gives some hints about how to translate the PyPy interpreter. @@ -199,9 +199,9 @@ or such, depending on your mingw64 download. -hacking on Pypy with the mingw compiler +hacking on PyPy with the mingw compiler ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Since hacking on Pypy means running tests, you will need a way to specify +Since hacking on PyPy means running tests, you will need a way to specify the mingw compiler when hacking (as opposed to translating). As of March 2012, --cc is not a valid option for pytest.py. However if you set an environment variable CC to the compliter exe, testing will use it. diff --git a/pypy/interpreter/test/test_app_main.py b/pypy/interpreter/test/test_app_main.py --- a/pypy/interpreter/test/test_app_main.py +++ b/pypy/interpreter/test/test_app_main.py @@ -607,6 +607,11 @@ class TestNonInteractive: def run_with_status_code(self, cmdline, senddata='', expect_prompt=False, expect_banner=False, python_flags='', env=None): + if os.name == 'nt': + try: + import __pypy__ + except: + py.test.skip('app_main cannot run on non-pypy for windows') cmdline = '%s %s "%s" %s' % (python3, python_flags, app_main, cmdline) print 'POPEN:', cmdline @@ -768,6 +773,11 @@ assert 'copyright' not in data def test_non_interactive_stdout_fully_buffered(self): + if os.name == 'nt': + try: + import __pypy__ + except: + py.test.skip('app_main cannot run on non-pypy for windows') path = getscript(r""" import sys, time sys.stdout.write('\x00(STDOUT)\n\x00') # stays in buffers @@ -789,6 +799,11 @@ def test_non_interactive_stdout_unbuffered(self, monkeypatch): monkeypatch.setenv('PYTHONUNBUFFERED', '1') + if os.name == 'nt': + try: + import __pypy__ + except: + py.test.skip('app_main cannot run on non-pypy for windows') path = getscript(r""" import sys, time sys.stdout.write('\x00(STDOUT)\n\x00') diff --git a/pypy/module/_cffi_backend/newtype.py b/pypy/module/_cffi_backend/newtype.py --- a/pypy/module/_cffi_backend/newtype.py +++ b/pypy/module/_cffi_backend/newtype.py @@ -1,3 +1,4 @@ +import sys from pypy.interpreter.error import OperationError, operationerrfmt from pypy.interpreter.gateway import unwrap_spec @@ -113,6 +114,14 @@ # ____________________________________________________________ +SF_MSVC_BITFIELDS = 1 + +if sys.platform == 'win32': + DEFAULT_SFLAGS = SF_MSVC_BITFIELDS +else: + DEFAULT_SFLAGS = 0 + + @unwrap_spec(name=str) def new_struct_type(space, name): return ctypestruct.W_CTypeStruct(space, name) @@ -121,9 +130,11 @@ def new_union_type(space, name): return ctypestruct.W_CTypeUnion(space, name) - at unwrap_spec(w_ctype=ctypeobj.W_CType, totalsize=int, totalalignment=int) + at unwrap_spec(w_ctype=ctypeobj.W_CType, totalsize=int, totalalignment=int, + sflags=int) def complete_struct_or_union(space, w_ctype, w_fields, w_ignored=None, - totalsize=-1, totalalignment=-1): + totalsize=-1, totalalignment=-1, + sflags=DEFAULT_SFLAGS): if (not isinstance(w_ctype, ctypestruct.W_CTypeStructOrUnion) or w_ctype.size >= 0): raise OperationError(space.w_TypeError, @@ -134,6 +145,8 @@ alignment = 1 boffset = 0 # this number is in *bits*, not bytes! boffsetmax = 0 # the maximum value of boffset, in bits too + prev_bitfield_size = 0 + prev_bitfield_free = 0 fields_w = space.listview(w_fields) fields_list = [] fields_dict = {} @@ -166,7 +179,15 @@ # update the total alignment requirement, but skip it if the # field is an anonymous bitfield falign = ftype.alignof() - if alignment < falign and (fbitsize < 0 or fname != ''): + do_align = True + if fbitsize >= 0: + if (sflags & SF_MSVC_BITFIELDS) == 0: + # GCC: anonymous bitfields (of any size) don't cause alignment + do_align = (fname != '') + else: + # MSVC: zero-sized bitfields don't cause alignment + do_align = (fbitsize > 0) + if alignment < falign and do_align: alignment = falign # if fbitsize < 0: @@ -208,6 +229,7 @@ fields_dict[fname] = fld boffset += ftype.size * 8 + prev_bitfield_size = 0 else: # this is the case of a bitfield @@ -243,31 +265,67 @@ raise operationerrfmt(space.w_TypeError, "field '%s.%s' is declared with :0", w_ctype.name, fname) - if boffset > field_offset_bytes * 8: - field_offset_bytes += falign - assert boffset < field_offset_bytes * 8 - boffset = field_offset_bytes * 8 + if (sflags & SF_MSVC_BITFIELDS) == 0: + # GCC's notion of "ftype :0;" + # pad boffset to a value aligned for "ftype" + if boffset > field_offset_bytes * 8: + field_offset_bytes += falign + assert boffset < field_offset_bytes * 8 + boffset = field_offset_bytes * 8 + else: + # MSVC's notion of "ftype :0; + # Mostly ignored. It seems they only serve as + # separator between other bitfields, to force them + # into separate words. + pass + prev_bitfield_size = 0 + else: - # Can the field start at the offset given by 'boffset'? It - # can if it would entirely fit into an aligned ftype field. - bits_already_occupied = boffset - (field_offset_bytes * 8) + if (sflags & SF_MSVC_BITFIELDS) == 0: + # GCC's algorithm - if bits_already_occupied + fbitsize > 8 * ftype.size: - # it would not fit, we need to start at the next - # allowed position - field_offset_bytes += falign - assert boffset < field_offset_bytes * 8 - boffset = field_offset_bytes * 8 - bitshift = 0 + # Can the field start at the offset given by 'boffset'? It + # can if it would entirely fit into an aligned ftype field. + bits_already_occupied = boffset - (field_offset_bytes * 8) + + if bits_already_occupied + fbitsize > 8 * ftype.size: + # it would not fit, we need to start at the next + # allowed position + field_offset_bytes += falign + assert boffset < field_offset_bytes * 8 + boffset = field_offset_bytes * 8 + bitshift = 0 + else: + bitshift = bits_already_occupied + assert bitshift >= 0 + boffset += fbitsize + else: - bitshift = bits_already_occupied + # MSVC's algorithm + + # A bitfield is considered as taking the full width + # of their declared type. It can share some bits + # with the previous field only if it was also a + # bitfield and used a type of the same size. + if (prev_bitfield_size == ftype.size and + prev_bitfield_free >= fbitsize): + # yes: reuse + bitshift = 8 * prev_bitfield_size - prev_bitfield_free + else: + # no: start a new full field + boffset = (boffset + falign*8-1) & ~(falign*8-1) + boffset += ftype.size * 8 + bitshift = 0 + prev_bitfield_size = ftype.size + prev_bitfield_free = 8 * prev_bitfield_size + # + prev_bitfield_free -= fbitsize + field_offset_bytes = boffset / 8 - ftype.size fld = ctypestruct.W_CField(ftype, field_offset_bytes, bitshift, fbitsize) fields_list.append(fld) fields_dict[fname] = fld - - boffset += fbitsize if boffset > boffsetmax: boffsetmax = boffset diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -2757,36 +2757,57 @@ assert wr() is None py.test.raises(RuntimeError, from_handle, cast(BCharP, 0)) -def test_bitfield_as_gcc(): +def _test_bitfield_details(flag): BChar = new_primitive_type("char") BShort = new_primitive_type("short") BInt = new_primitive_type("int") + BUInt = new_primitive_type("unsigned int") BStruct = new_struct_type("foo1") complete_struct_or_union(BStruct, [('a', BChar, -1), - ('b', BInt, 9), - ('c', BChar, -1)]) - assert typeoffsetof(BStruct, 'c') == (BChar, 3) - assert sizeof(BStruct) == 4 + ('b1', BInt, 9), + ('b2', BUInt, 7), + ('c', BChar, -1)], -1, -1, -1, flag) + if flag == 0: # gcc + assert typeoffsetof(BStruct, 'c') == (BChar, 3) + assert sizeof(BStruct) == 4 + else: # msvc + assert typeoffsetof(BStruct, 'c') == (BChar, 8) + assert sizeof(BStruct) == 12 assert alignof(BStruct) == 4 # BStruct = new_struct_type("foo2") complete_struct_or_union(BStruct, [('a', BChar, -1), ('', BShort, 9), - ('c', BChar, -1)]) + ('c', BChar, -1)], -1, -1, -1, flag) assert typeoffsetof(BStruct, 'c') == (BChar, 4) - assert sizeof(BStruct) == 5 - assert alignof(BStruct) == 1 + if flag == 0: # gcc + assert sizeof(BStruct) == 5 + assert alignof(BStruct) == 1 + else: # msvc + assert sizeof(BStruct) == 6 + assert alignof(BStruct) == 2 # BStruct = new_struct_type("foo2") complete_struct_or_union(BStruct, [('a', BChar, -1), ('', BInt, 0), ('', BInt, 0), - ('c', BChar, -1)]) - assert typeoffsetof(BStruct, 'c') == (BChar, 4) - assert sizeof(BStruct) == 5 + ('c', BChar, -1)], -1, -1, -1, flag) + if flag == 0: # gcc + assert typeoffsetof(BStruct, 'c') == (BChar, 4) + assert sizeof(BStruct) == 5 + else: # msvc + assert typeoffsetof(BStruct, 'c') == (BChar, 1) + assert sizeof(BStruct) == 2 assert alignof(BStruct) == 1 +def test_bitfield_as_gcc(): + _test_bitfield_details(flag=0) + +def test_bitfield_as_msvc(): + _test_bitfield_details(flag=1) + + def test_version(): # this test is here mostly for PyPy assert __version__ == "0.7" diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -280,7 +280,7 @@ return _absolute_import(space, modulename, baselevel, fromlist_w, tentative) finally: - lock.release_lock() + lock.release_lock(silent_after_fork=True) @jit.unroll_safe def absolute_import_try(space, modulename, baselevel, fromlist_w): @@ -775,9 +775,13 @@ self.lockowner = me self.lockcounter += 1 - def release_lock(self): + def release_lock(self, silent_after_fork): me = self.space.getexecutioncontext() # used as thread ident if self.lockowner is not me: + if self.lockowner is None and silent_after_fork: + # Too bad. This situation can occur if a fork() occurred + # with the import lock held, and we're the child. + return if not self._can_have_lock(): return space = self.space diff --git a/pypy/module/imp/interp_imp.py b/pypy/module/imp/interp_imp.py --- a/pypy/module/imp/interp_imp.py +++ b/pypy/module/imp/interp_imp.py @@ -209,7 +209,7 @@ def release_lock(space): if space.config.objspace.usemodules.thread: - importing.getimportlock(space).release_lock() + importing.getimportlock(space).release_lock(silent_after_fork=False) def reinit_lock(space): if space.config.objspace.usemodules.thread: diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -30,7 +30,7 @@ def new_dtype_getter(name): def _get_dtype(space): from pypy.module.micronumpy.interp_dtype import get_dtype_cache - return getattr(get_dtype_cache(space), "w_%sdtype" % name) + return get_dtype_cache(space).dtypes_by_name[name] def new(space, w_subtype, w_value): dtype = _get_dtype(space) diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -273,7 +273,6 @@ fields = space.getitem(w_data, space.wrap(4)) self.set_fields(space, fields) - print self.itemtype class W_ComplexDtype(W_Dtype): def __init__(self, itemtype, num, kind, name, char, w_box_type, diff --git a/pypy/module/micronumpy/test/test_scalar.py b/pypy/module/micronumpy/test/test_scalar.py --- a/pypy/module/micronumpy/test/test_scalar.py +++ b/pypy/module/micronumpy/test/test_scalar.py @@ -21,7 +21,3 @@ a = zeros(3) assert loads(dumps(sum(a))) == sum(a) - - def setup_class(cls): - import py - py.test.xfail("FIXME: dtype('int32') == dtype('int32') fails") diff --git a/pypy/module/pypyjit/policy.py b/pypy/module/pypyjit/policy.py --- a/pypy/module/pypyjit/policy.py +++ b/pypy/module/pypyjit/policy.py @@ -109,7 +109,7 @@ 'posix', '_socket', '_sre', '_lsprof', '_weakref', '__pypy__', 'cStringIO', '_collections', 'struct', 'mmap', 'marshal', '_codecs', 'rctime', 'cppyy', - '_cffi_backend', 'pyexpat', '_continuation']: + '_cffi_backend', 'pyexpat', '_continuation', '_io']: if modname == 'pypyjit' and 'interp_resop' in rest: return False return True diff --git a/pypy/module/pypyjit/test/test_policy.py b/pypy/module/pypyjit/test/test_policy.py --- a/pypy/module/pypyjit/test/test_policy.py +++ b/pypy/module/pypyjit/test/test_policy.py @@ -36,6 +36,10 @@ from pypy.module.rctime.interp_time import time assert pypypolicy.look_inside_function(time) +def test_io(): + from pypy.module._io.interp_bytesio import W_BytesIO + assert pypypolicy.look_inside_function(W_BytesIO.seek_w.im_func) + def test_pypy_module(): from pypy.module._collections.interp_deque import W_Deque from pypy.module._random.interp_random import W_Random diff --git a/pypy/module/test_lib_pypy/test_ctypes_config_cache.py b/pypy/module/test_lib_pypy/test_ctypes_config_cache.py --- a/pypy/module/test_lib_pypy/test_ctypes_config_cache.py +++ b/pypy/module/test_lib_pypy/test_ctypes_config_cache.py @@ -41,6 +41,10 @@ assert 'LOG_NOTICE' in d def test_resource(): + try: + import lib_pypy.resource + except ImportError: + py.test.skip('no syslog on this platform') d = run('resource.ctc.py', '_resource_cache.py') assert 'RLIM_NLIMITS' in d diff --git a/pypy/objspace/std/formatting.py b/pypy/objspace/std/formatting.py --- a/pypy/objspace/std/formatting.py +++ b/pypy/objspace/std/formatting.py @@ -347,9 +347,8 @@ def std_wp(self, r): length = len(r) if do_unicode and isinstance(r, str): - # convert string to unicode explicitely here - from pypy.objspace.std.unicodetype import plain_str2unicode - r = plain_str2unicode(self.space, r) + # convert string to unicode using the default encoding + r = self.space.unicode_w(self.space.wrap(r)) prec = self.prec if prec == -1 and self.width == 0: # fast path @@ -508,12 +507,10 @@ result = formatter.format() except NeedUnicodeFormattingError: # fall through to the unicode case - from pypy.objspace.std.unicodetype import plain_str2unicode - fmt = plain_str2unicode(space, fmt) + pass else: return space.wrap(result) - else: - fmt = space.unicode_w(w_fmt) + fmt = space.unicode_w(w_fmt) formatter = UnicodeFormatter(space, fmt, values_w, w_valuedict) result = formatter.format() return space.wrap(result) diff --git a/pypy/objspace/std/unicodetype.py b/pypy/objspace/std/unicodetype.py --- a/pypy/objspace/std/unicodetype.py +++ b/pypy/objspace/std/unicodetype.py @@ -12,22 +12,6 @@ from pypy.objspace.std.unicodeobject import W_UnicodeObject return W_UnicodeObject(uni) -def plain_str2unicode(space, s): - try: - return unicode(s) - except UnicodeDecodeError: - for i in range(len(s)): - if ord(s[i]) > 127: - raise OperationError( - space.w_UnicodeDecodeError, - space.newtuple([ - space.wrap('ascii'), - space.wrapbytes(s), - space.wrap(i), - space.wrap(i+1), - space.wrap("ordinal not in range(128)")])) - assert False, "unreachable" - unicode_capitalize = SMM('capitalize', 1, doc='S.capitalize() -> unicode\n\nReturn a' diff --git a/rpython/jit/backend/arm/assembler.py b/rpython/jit/backend/arm/assembler.py --- a/rpython/jit/backend/arm/assembler.py +++ b/rpython/jit/backend/arm/assembler.py @@ -8,6 +8,7 @@ JITFRAME_FIXED_SIZE) from rpython.jit.backend.arm.codebuilder import InstrBuilder, OverwritingBuilder from rpython.jit.backend.arm.locations import imm, StackLocation +from rpython.jit.backend.arm.helper.regalloc import VMEM_imm_size from rpython.jit.backend.arm.opassembler import ResOpAssembler from rpython.jit.backend.arm.regalloc import (Regalloc, CoreRegisterManager, check_imm_arg, VFPRegisterManager, @@ -961,7 +962,7 @@ return self._load_core_reg(mc, target, base, ofs, cond, helper) def _load_vfp_reg(self, mc, target, base, ofs, cond=c.AL, helper=r.ip): - if check_imm_arg(ofs): + if check_imm_arg(ofs, VMEM_imm_size): mc.VLDR(target.value, base.value, imm=ofs, cond=cond) else: mc.gen_load_int(helper.value, ofs, cond=cond) @@ -982,7 +983,7 @@ return self._store_core_reg(mc, source, base, ofs, cond, helper) def _store_vfp_reg(self, mc, source, base, ofs, cond=c.AL, helper=r.ip): - if check_imm_arg(ofs): + if check_imm_arg(ofs, VMEM_imm_size): mc.VSTR(source.value, base.value, imm=ofs, cond=cond) else: mc.gen_load_int(helper.value, ofs, cond=cond) diff --git a/rpython/jit/backend/arm/callbuilder.py b/rpython/jit/backend/arm/callbuilder.py --- a/rpython/jit/backend/arm/callbuilder.py +++ b/rpython/jit/backend/arm/callbuilder.py @@ -52,6 +52,8 @@ def _push_stack_args(self, stack_args, on_stack): assert on_stack % 8 == 0 + if on_stack == 0: + return self._adjust_sp(-on_stack) self.current_sp = on_stack ofs = 0 @@ -71,7 +73,7 @@ else: self.mc.gen_load_int(r.ip.value, n) self.mc.ADD_rr(r.sp.value, r.sp.value, r.ip.value) - else: + elif n < 0: n = abs(n) if check_imm_arg(n): self.mc.SUB_ri(r.sp.value, r.sp.value, n) diff --git a/rpython/jit/backend/arm/helper/regalloc.py b/rpython/jit/backend/arm/helper/regalloc.py --- a/rpython/jit/backend/arm/helper/regalloc.py +++ b/rpython/jit/backend/arm/helper/regalloc.py @@ -4,7 +4,10 @@ from rpython.jit.metainterp.history import ConstInt from rpython.rlib.objectmodel import we_are_translated -def check_imm_arg(arg, size=0xFF, allow_zero=True): +VMEM_imm_size=0x3FC +default_imm_size=0xFF + +def check_imm_arg(arg, size=default_imm_size, allow_zero=True): assert not isinstance(arg, ConstInt) if not we_are_translated(): if not isinstance(arg, int): diff --git a/rpython/jit/backend/arm/opassembler.py b/rpython/jit/backend/arm/opassembler.py --- a/rpython/jit/backend/arm/opassembler.py +++ b/rpython/jit/backend/arm/opassembler.py @@ -15,6 +15,7 @@ gen_emit_unary_float_op, saved_registers) from rpython.jit.backend.arm.helper.regalloc import check_imm_arg +from rpython.jit.backend.arm.helper.regalloc import VMEM_imm_size from rpython.jit.backend.arm.codebuilder import InstrBuilder, OverwritingBuilder from rpython.jit.backend.arm.jump import remap_frame_layout from rpython.jit.backend.arm.regalloc import TempBox @@ -23,6 +24,7 @@ from rpython.jit.backend.llsupport.gcmap import allocate_gcmap from rpython.jit.backend.llsupport.descr import InteriorFieldDescr from rpython.jit.backend.llsupport.assembler import GuardToken, BaseAssembler +from rpython.jit.backend.llsupport.regalloc import get_scale from rpython.jit.metainterp.history import (Box, AbstractFailDescr, INT, FLOAT, REF) from rpython.jit.metainterp.history import TargetToken @@ -523,35 +525,9 @@ def emit_op_setfield_gc(self, op, arglocs, regalloc, fcond): value_loc, base_loc, ofs, size = arglocs - if size.value == 8: - assert value_loc.is_vfp_reg() - # vstr only supports imm offsets - # so if the ofset is too large we add it to the base and use an - # offset of 0 - if ofs.is_reg(): - self.mc.ADD_rr(r.ip.value, base_loc.value, ofs.value) - base_loc = r.ip - ofs = imm(0) - else: - assert ofs.value % 4 == 0 - self.mc.VSTR(value_loc.value, base_loc.value, ofs.value) - elif size.value == 4: - if ofs.is_imm(): - self.mc.STR_ri(value_loc.value, base_loc.value, ofs.value) - else: - self.mc.STR_rr(value_loc.value, base_loc.value, ofs.value) - elif size.value == 2: - if ofs.is_imm(): - self.mc.STRH_ri(value_loc.value, base_loc.value, ofs.value) - else: - self.mc.STRH_rr(value_loc.value, base_loc.value, ofs.value) - elif size.value == 1: - if ofs.is_imm(): - self.mc.STRB_ri(value_loc.value, base_loc.value, ofs.value) - else: - self.mc.STRB_rr(value_loc.value, base_loc.value, ofs.value) - else: - assert 0 + scale = get_scale(size.value) + self._write_to_mem(value_loc, base_loc, + ofs, imm(scale), fcond) return fcond emit_op_setfield_raw = emit_op_setfield_gc @@ -559,47 +535,8 @@ def emit_op_getfield_gc(self, op, arglocs, regalloc, fcond): base_loc, ofs, res, size = arglocs signed = op.getdescr().is_field_signed() - if size.value == 8: - assert res.is_vfp_reg() - # vldr only supports imm offsets - # so if the ofset is too large we add it to the base and use an - # offset of 0 - if ofs.is_reg(): - self.mc.ADD_rr(r.ip.value, base_loc.value, ofs.value) - base_loc = r.ip - ofs = imm(0) - else: - assert ofs.value % 4 == 0 - self.mc.VLDR(res.value, base_loc.value, ofs.value) - elif size.value == 4: - if ofs.is_imm(): - self.mc.LDR_ri(res.value, base_loc.value, ofs.value) - else: - self.mc.LDR_rr(res.value, base_loc.value, ofs.value) - elif size.value == 2: - if ofs.is_imm(): - if signed: - self.mc.LDRSH_ri(res.value, base_loc.value, ofs.value) - else: - self.mc.LDRH_ri(res.value, base_loc.value, ofs.value) - else: - if signed: - self.mc.LDRSH_rr(res.value, base_loc.value, ofs.value) - else: - self.mc.LDRH_rr(res.value, base_loc.value, ofs.value) - elif size.value == 1: - if ofs.is_imm(): - if signed: - self.mc.LDRSB_ri(res.value, base_loc.value, ofs.value) - else: - self.mc.LDRB_ri(res.value, base_loc.value, ofs.value) - else: - if signed: - self.mc.LDRSB_rr(res.value, base_loc.value, ofs.value) - else: - self.mc.LDRB_rr(res.value, base_loc.value, ofs.value) - else: - assert 0 + scale = get_scale(size.value) + self._load_from_mem(res, base_loc, ofs, imm(scale), signed, fcond) return fcond emit_op_getfield_raw = emit_op_getfield_gc @@ -609,72 +546,44 @@ def emit_op_getinteriorfield_gc(self, op, arglocs, regalloc, fcond): (base_loc, index_loc, res_loc, ofs_loc, ofs, itemsize, fieldsize) = arglocs - self.mc.gen_load_int(r.ip.value, itemsize.value) - self.mc.MUL(r.ip.value, index_loc.value, r.ip.value) + scale = get_scale(fieldsize.value) + tmploc, save = self.get_tmp_reg([base_loc, ofs_loc]) + assert not save + self.mc.gen_load_int(tmploc.value, itemsize.value) + self.mc.MUL(tmploc.value, index_loc.value, tmploc.value) descr = op.getdescr() assert isinstance(descr, InteriorFieldDescr) signed = descr.fielddescr.is_field_signed() if ofs.value > 0: if ofs_loc.is_imm(): - self.mc.ADD_ri(r.ip.value, r.ip.value, ofs_loc.value) + self.mc.ADD_ri(tmploc.value, tmploc.value, ofs_loc.value) else: - self.mc.ADD_rr(r.ip.value, r.ip.value, ofs_loc.value) - - if fieldsize.value == 8: - # vldr only supports imm offsets - # so if the ofset is too large we add it to the base and use an - # offset of 0 - assert res_loc.is_vfp_reg() - self.mc.ADD_rr(r.ip.value, base_loc.value, r.ip.value) - self.mc.VLDR(res_loc.value, r.ip.value, 0) - elif fieldsize.value == 4: - self.mc.LDR_rr(res_loc.value, base_loc.value, r.ip.value) - elif fieldsize.value == 2: - if signed: - self.mc.LDRSH_rr(res_loc.value, base_loc.value, r.ip.value) - else: - self.mc.LDRH_rr(res_loc.value, base_loc.value, r.ip.value) - elif fieldsize.value == 1: - if signed: - self.mc.LDRSB_rr(res_loc.value, base_loc.value, r.ip.value) - else: - self.mc.LDRB_rr(res_loc.value, base_loc.value, r.ip.value) - else: - assert 0 - + self.mc.ADD_rr(tmploc.value, tmploc.value, ofs_loc.value) + ofs_loc = tmploc + self._load_from_mem(res_loc, base_loc, ofs_loc, + imm(scale), signed, fcond) return fcond def emit_op_setinteriorfield_gc(self, op, arglocs, regalloc, fcond): (base_loc, index_loc, value_loc, ofs_loc, ofs, itemsize, fieldsize) = arglocs - self.mc.gen_load_int(r.ip.value, itemsize.value) - self.mc.MUL(r.ip.value, index_loc.value, r.ip.value) + scale = get_scale(fieldsize.value) + tmploc, save = self.get_tmp_reg([base_loc, index_loc, value_loc, ofs_loc]) + assert not save + self.mc.gen_load_int(tmploc.value, itemsize.value) + self.mc.MUL(tmploc.value, index_loc.value, tmploc.value) if ofs.value > 0: if ofs_loc.is_imm(): - self.mc.ADD_ri(r.ip.value, r.ip.value, ofs_loc.value) + self.mc.ADD_ri(tmploc.value, tmploc.value, ofs_loc.value) else: - self.mc.ADD_rr(r.ip.value, r.ip.value, ofs_loc.value) - if fieldsize.value == 8: - # vstr only supports imm offsets - # so if the ofset is too large we add it to the base and use an - # offset of 0 - assert value_loc.is_vfp_reg() - self.mc.ADD_rr(r.ip.value, base_loc.value, r.ip.value) - self.mc.VSTR(value_loc.value, r.ip.value, 0) - elif fieldsize.value == 4: - self.mc.STR_rr(value_loc.value, base_loc.value, r.ip.value) - elif fieldsize.value == 2: - self.mc.STRH_rr(value_loc.value, base_loc.value, r.ip.value) - elif fieldsize.value == 1: - self.mc.STRB_rr(value_loc.value, base_loc.value, r.ip.value) - else: - assert 0 + self.mc.ADD_rr(tmploc.value, tmploc.value, ofs_loc.value) + self._write_to_mem(value_loc, base_loc, tmploc, imm(scale), fcond) return fcond emit_op_setinteriorfield_raw = emit_op_setinteriorfield_gc def emit_op_arraylen_gc(self, op, arglocs, regalloc, fcond): res, base_loc, ofs = arglocs - self.mc.LDR_ri(res.value, base_loc.value, ofs.value) + self.load_reg(self.mc, res, base_loc, ofs.value) return fcond def emit_op_setarrayitem_gc(self, op, arglocs, regalloc, fcond): @@ -694,18 +603,40 @@ def _write_to_mem(self, value_loc, base_loc, ofs_loc, scale, fcond=c.AL): if scale.value == 3: assert value_loc.is_vfp_reg() - assert ofs_loc.is_reg() - self.mc.ADD_rr(r.ip.value, base_loc.value, ofs_loc.value) - self.mc.VSTR(value_loc.value, r.ip.value, cond=fcond) + # vstr only supports imm offsets + # so if the ofset is too large we add it to the base and use an + # offset of 0 + if ofs_loc.is_reg(): + tmploc, save = self.get_tmp_reg([value_loc, base_loc, ofs_loc]) + assert not save + self.mc.ADD_rr(tmploc.value, base_loc.value, ofs_loc.value) + base_loc = tmploc + ofs_loc = imm(0) + else: + assert ofs_loc.is_imm() + assert ofs_loc.value % 4 == 0 + self.mc.VSTR(value_loc.value, base_loc.value, ofs_loc.value) elif scale.value == 2: - self.mc.STR_rr(value_loc.value, base_loc.value, ofs_loc.value, - cond=fcond) + if ofs_loc.is_imm(): + self.mc.STR_ri(value_loc.value, base_loc.value, + ofs_loc.value, cond=fcond) + else: + self.mc.STR_rr(value_loc.value, base_loc.value, + ofs_loc.value, cond=fcond) elif scale.value == 1: - self.mc.STRH_rr(value_loc.value, base_loc.value, ofs_loc.value, - cond=fcond) + if ofs_loc.is_imm(): + self.mc.STRH_ri(value_loc.value, base_loc.value, + ofs_loc.value, cond=fcond) + else: + self.mc.STRH_rr(value_loc.value, base_loc.value, + ofs_loc.value, cond=fcond) elif scale.value == 0: - self.mc.STRB_rr(value_loc.value, base_loc.value, ofs_loc.value, - cond=fcond) + if ofs_loc.is_imm(): + self.mc.STRB_ri(value_loc.value, base_loc.value, + ofs_loc.value, cond=fcond) + else: + self.mc.STRB_rr(value_loc.value, base_loc.value, + ofs_loc.value, cond=fcond) else: assert 0 @@ -731,33 +662,63 @@ self.mc.ADD_ri(r.ip.value, ofs_loc.value, imm=ofs.value) ofs_loc = r.ip # - self._load_from_mem(res_loc, base_loc, ofs_loc, scale, signed) + self._load_from_mem(res_loc, base_loc, ofs_loc, scale, signed, fcond) return fcond def _load_from_mem(self, res_loc, base_loc, ofs_loc, scale, signed=False, fcond=c.AL): if scale.value == 3: assert res_loc.is_vfp_reg() - assert ofs_loc.is_reg() - self.mc.ADD_rr(r.ip.value, base_loc.value, ofs_loc.value) - self.mc.VLDR(res_loc.value, r.ip.value, cond=fcond) + # vldr only supports imm offsets + # if the offset is in a register we add it to the base and use a + # tmp reg + if ofs_loc.is_reg(): + tmploc, save = self.get_tmp_reg([base_loc, ofs_loc]) + assert not save + self.mc.ADD_rr(tmploc.value, base_loc.value, ofs_loc.value) + base_loc = tmploc + ofs_loc = imm(0) + else: + assert ofs_loc.is_imm() + assert ofs_loc.value % 4 == 0 + self.mc.VLDR(res_loc.value, base_loc.value, ofs_loc.value, cond=fcond) elif scale.value == 2: - self.mc.LDR_rr(res_loc.value, base_loc.value, - ofs_loc.value, cond=fcond) + if ofs_loc.is_imm(): + self.mc.LDR_ri(res_loc.value, base_loc.value, + ofs_loc.value, cond=fcond) + else: + self.mc.LDR_rr(res_loc.value, base_loc.value, + ofs_loc.value, cond=fcond) elif scale.value == 1: - if signed: - self.mc.LDRSH_rr(res_loc.value, base_loc.value, - ofs_loc.value, cond=fcond) + if ofs_loc.is_imm(): + if signed: + self.mc.LDRSH_ri(res_loc.value, base_loc.value, + ofs_loc.value, cond=fcond) + else: + self.mc.LDRH_ri(res_loc.value, base_loc.value, + ofs_loc.value, cond=fcond) else: - self.mc.LDRH_rr(res_loc.value, base_loc.value, - ofs_loc.value, cond=fcond) + if signed: + self.mc.LDRSH_rr(res_loc.value, base_loc.value, + ofs_loc.value, cond=fcond) + else: + self.mc.LDRH_rr(res_loc.value, base_loc.value, + ofs_loc.value, cond=fcond) elif scale.value == 0: - if signed: - self.mc.LDRSB_rr(res_loc.value, base_loc.value, - ofs_loc.value, cond=fcond) + if ofs_loc.is_imm(): + if signed: + self.mc.LDRSB_ri(res_loc.value, base_loc.value, + ofs_loc.value, cond=fcond) + else: + self.mc.LDRB_ri(res_loc.value, base_loc.value, + ofs_loc.value, cond=fcond) else: - self.mc.LDRB_rr(res_loc.value, base_loc.value, - ofs_loc.value, cond=fcond) + if signed: + self.mc.LDRSB_rr(res_loc.value, base_loc.value, + ofs_loc.value, cond=fcond) + else: + self.mc.LDRB_rr(res_loc.value, base_loc.value, + ofs_loc.value, cond=fcond) else: assert 0 @@ -770,7 +731,7 @@ # no base offset assert ofs.value == 0 signed = op.getdescr().is_item_signed() - self._load_from_mem(res_loc, base_loc, ofs_loc, scale, signed) + self._load_from_mem(res_loc, base_loc, ofs_loc, scale, signed, fcond) return fcond def emit_op_strlen(self, op, arglocs, regalloc, fcond): @@ -993,7 +954,7 @@ assert result_loc.is_vfp_reg() # we always have a register here, since we have to sync them # before call_assembler - self.mc.VLDR(result_loc.value, r.r0.value, imm=ofs) + self.load_reg(self.mc, result_loc, r.r0, ofs=ofs) else: assert result_loc is r.r0 ofs = self.cpu.unpack_arraydescr(descr) diff --git a/rpython/jit/backend/arm/regalloc.py b/rpython/jit/backend/arm/regalloc.py --- a/rpython/jit/backend/arm/regalloc.py +++ b/rpython/jit/backend/arm/regalloc.py @@ -13,7 +13,9 @@ prepare_cmp_op, prepare_float_op, check_imm_arg, - check_imm_box + check_imm_box, + VMEM_imm_size, + default_imm_size, ) from rpython.jit.backend.arm.jump import remap_frame_layout_mixed from rpython.jit.backend.arm.arch import WORD, JITFRAME_FIXED_SIZE @@ -811,7 +813,8 @@ ofs, size, sign = unpack_fielddescr(op.getdescr()) base_loc = self.make_sure_var_in_reg(a0, boxes) value_loc = self.make_sure_var_in_reg(a1, boxes) - if check_imm_arg(ofs): + ofs_size = default_imm_size if size < 8 else VMEM_imm_size + if check_imm_arg(ofs, size=ofs_size): ofs_loc = imm(ofs) else: ofs_loc = self.get_scratch_reg(INT, boxes) @@ -825,7 +828,8 @@ ofs, size, sign = unpack_fielddescr(op.getdescr()) base_loc = self.make_sure_var_in_reg(a0) immofs = imm(ofs) - if check_imm_arg(ofs): + ofs_size = default_imm_size if size < 8 else VMEM_imm_size + if check_imm_arg(ofs, size=ofs_size): ofs_loc = immofs else: ofs_loc = self.get_scratch_reg(INT, [a0]) @@ -846,7 +850,8 @@ base_loc = self.make_sure_var_in_reg(op.getarg(0), args) index_loc = self.make_sure_var_in_reg(op.getarg(1), args) immofs = imm(ofs) - if check_imm_arg(ofs): + ofs_size = default_imm_size if fieldsize < 8 else VMEM_imm_size + if check_imm_arg(ofs, size=ofs_size): ofs_loc = immofs else: ofs_loc = self.get_scratch_reg(INT, args) @@ -865,7 +870,8 @@ index_loc = self.make_sure_var_in_reg(op.getarg(1), args) value_loc = self.make_sure_var_in_reg(op.getarg(2), args) immofs = imm(ofs) - if check_imm_arg(ofs): + ofs_size = default_imm_size if fieldsize < 8 else VMEM_imm_size + if check_imm_arg(ofs, size=ofs_size): ofs_loc = immofs else: ofs_loc = self.get_scratch_reg(INT, args) @@ -890,8 +896,8 @@ scale = get_scale(size) args = op.getarglist() base_loc = self.make_sure_var_in_reg(args[0], args) + value_loc = self.make_sure_var_in_reg(args[2], args) ofs_loc = self.make_sure_var_in_reg(args[1], args) - value_loc = self.make_sure_var_in_reg(args[2], args) assert check_imm_arg(ofs) return [value_loc, base_loc, ofs_loc, imm(scale), imm(ofs)] prepare_op_setarrayitem_raw = prepare_op_setarrayitem_gc diff --git a/rpython/jit/backend/arm/test/test_regalloc_mov.py b/rpython/jit/backend/arm/test/test_regalloc_mov.py --- a/rpython/jit/backend/arm/test/test_regalloc_mov.py +++ b/rpython/jit/backend/arm/test/test_regalloc_mov.py @@ -436,7 +436,7 @@ self.push(sf, e) def test_push_large_stackfloat(self): - sf = stack_float(100) + sf = stack_float(1000) e = [ mi('gen_load_int', ip.value, sf.value, cond=AL), mi('ADD_rr', ip.value, fp.value, ip.value, cond=AL), diff --git a/rpython/jit/metainterp/test/test_fficall.py b/rpython/jit/metainterp/test/test_fficall.py --- a/rpython/jit/metainterp/test/test_fficall.py +++ b/rpython/jit/metainterp/test/test_fficall.py @@ -17,7 +17,7 @@ def get_description(atypes, rtype): p = lltype.malloc(CIF_DESCRIPTION, len(atypes), flavor='raw', immortal=True) - p.abi = 42 + p.abi = 1 # default p.nargs = len(atypes) p.rtype = rtype p.atypes = lltype.malloc(FFI_TYPE_PP.TO, len(atypes), diff --git a/rpython/rlib/ropenssl.py b/rpython/rlib/ropenssl.py --- a/rpython/rlib/ropenssl.py +++ b/rpython/rlib/ropenssl.py @@ -9,7 +9,7 @@ link_files = [] include_dirs = [] if sys.platform == 'win32' and platform.name != 'mingw32': - libraries = ['libeay32', 'ssleay32', 'zlib', + libraries = ['libeay32', 'ssleay32', 'zlib1', 'user32', 'advapi32', 'gdi32', 'msvcrt', 'ws2_32'] includes = [ # ssl.h includes winsock.h, which will conflict with our own diff --git a/rpython/rlib/rposix.py b/rpython/rlib/rposix.py --- a/rpython/rlib/rposix.py +++ b/rpython/rlib/rposix.py @@ -107,7 +107,6 @@ "_PyVerify_fd", [rffi.INT], rffi.INT, compilation_info=errno_eci, )) - @jit.dont_look_inside def validate_fd(fd): if not is_valid_fd(fd): raise OSError(get_errno(), 'Bad file descriptor') diff --git a/rpython/rlib/rstruct/runpack.py b/rpython/rlib/rstruct/runpack.py --- a/rpython/rlib/rstruct/runpack.py +++ b/rpython/rlib/rstruct/runpack.py @@ -7,7 +7,6 @@ from struct import unpack from rpython.rlib.rstruct.formatiterator import FormatIterator from rpython.rlib.rstruct.error import StructError -from rpython.rlib.rstruct.nativefmttable import native_is_bigendian class MasterReader(object): def __init__(self, s): @@ -30,9 +29,9 @@ def reader_for_pos(pos): class ReaderForPos(AbstractReader): - def __init__(self, mr): + def __init__(self, mr, bigendian): self.mr = mr - self.bigendian = native_is_bigendian + self.bigendian = bigendian def read(self, count): return self.mr.read(count) @@ -64,6 +63,7 @@ perform_lst = [] miniglobals = {} miniglobals.update(globals()) + miniglobals['bigendian'] = self.bigendian for i in rg: fmtdesc, rep, mask = self.formats[i] miniglobals['unpacker%d' % i] = fmtdesc.unpack @@ -74,8 +74,8 @@ else: perform_lst.append('unpacker%d(reader%d, %d)' % (i, i, rep)) miniglobals['reader_cls%d' % i] = reader_for_pos(i) - readers = ";".join(["reader%d = reader_cls%d(master_reader)" % (i, i) - for i in rg]) + readers = ";".join(["reader%d = reader_cls%d(master_reader, bigendian)" + % (i, i) for i in rg]) perform = ";".join(perform_lst) unpackers = ','.join(['reader%d.value' % i for i in rg]) source = py.code.Source(""" diff --git a/rpython/rlib/rstruct/test/test_runpack.py b/rpython/rlib/rstruct/test/test_runpack.py --- a/rpython/rlib/rstruct/test/test_runpack.py +++ b/rpython/rlib/rstruct/test/test_runpack.py @@ -26,6 +26,18 @@ assert fn() == 123 assert self.interpret(fn, []) == 123 + def test_unpack_big_endian(self): + def fn(): + return runpack(">i", "\x01\x02\x03\x04") + assert fn() == 0x01020304 + assert self.interpret(fn, []) == 0x01020304 + + def test_unpack_double_big_endian(self): + def fn(): + return runpack(">d", "testtest") + assert fn() == struct.unpack(">d", "testtest")[0] + assert self.interpret(fn, []) == struct.unpack(">d", "testtest")[0] + class TestLLType(BaseTestRStruct, LLRtypeMixin): pass diff --git a/rpython/rlib/rzlib.py b/rpython/rlib/rzlib.py --- a/rpython/rlib/rzlib.py +++ b/rpython/rlib/rzlib.py @@ -10,7 +10,7 @@ if compiler.name == "msvc": - libname = 'zlib' + libname = 'zlib1' # since version 1.1.4 and later, see http://www.zlib.net/DLL_FAQ.txt else: libname = 'z' eci = ExternalCompilationInfo( diff --git a/rpython/rtyper/tool/test/test_rffi_platform.py b/rpython/rtyper/tool/test/test_rffi_platform.py --- a/rpython/rtyper/tool/test/test_rffi_platform.py +++ b/rpython/rtyper/tool/test/test_rffi_platform.py @@ -288,9 +288,6 @@ assert a % struct.calcsize("P") == 0 def test_external_lib(): - # XXX this one seems to be a bit too platform-specific. Check - # how to test it on windows correctly (using so_prefix?) - # and what are alternatives to LD_LIBRARY_PATH eci = ExternalCompilationInfo() c_source = """ int f(int a, int b) @@ -298,12 +295,17 @@ return (a + b); } """ + if platform.name == 'mscv': + c_source = '__declspec(dllexport) ' + c_source + libname = 'libc_lib' + else: + libname = 'c_lib' tmpdir = udir.join('external_lib').ensure(dir=1) c_file = tmpdir.join('libc_lib.c') c_file.write(c_source) l = platform.compile([c_file], eci, standalone=False) eci = ExternalCompilationInfo( - libraries = ['c_lib'], + libraries = [libname], library_dirs = [str(tmpdir)] ) rffi_platform.verify_eci(eci) diff --git a/rpython/translator/c/gcc/test/test_asmgcroot.py b/rpython/translator/c/gcc/test/test_asmgcroot.py --- a/rpython/translator/c/gcc/test/test_asmgcroot.py +++ b/rpython/translator/c/gcc/test/test_asmgcroot.py @@ -25,8 +25,8 @@ @classmethod def make_config(cls): - if _MSVC and _WIN64: - py.test.skip("all asmgcroot tests disabled for MSVC X64") + if _MSVC: + py.test.skip("all asmgcroot tests disabled for MSVC") from rpython.config.translationoption import get_combined_translation_config config = get_combined_translation_config(translating=True) config.translation.gc = cls.gcpolicy diff --git a/rpython/translator/c/test/test_genc.py b/rpython/translator/c/test/test_genc.py --- a/rpython/translator/c/test/test_genc.py +++ b/rpython/translator/c/test/test_genc.py @@ -37,7 +37,7 @@ if isinstance(v, float): from rpython.rlib.rfloat import formatd, DTSF_ADD_DOT_0 return formatd(v, 'r', 0, DTSF_ADD_DOT_0) - return v + return str(v) # always return a string, to get consistent types def parse_longlong(a): p0, p1 = a.split(":") @@ -205,6 +205,28 @@ py.test.raises(Exception, f1, "world") # check that it's really typed +def test_int_becomes_float(): + # used to crash "very often": the long chain of mangle() calls end + # up converting the return value of f() from an int to a float, but + # if blocks are followed in random order by the annotator, it will + # very likely first follow the call to llrepr_out() done after the + # call to f(), getting an int first (and a float only later). + @specialize.arg(1) + def mangle(x, chain): + if chain: + return mangle(x, chain[1:]) + return x - 0.5 + def f(x): + if x > 10: + x = mangle(x, (1,1,1,1,1,1,1,1,1,1)) + return x + 1 + + f1 = compile(f, [int]) + + assert f1(5) == 6 + assert f1(12) == 12.5 + + def test_string_arg(): def f(s): total = 0 diff --git a/rpython/translator/platform/openbsd.py b/rpython/translator/platform/openbsd.py --- a/rpython/translator/platform/openbsd.py +++ b/rpython/translator/platform/openbsd.py @@ -8,8 +8,9 @@ DEFAULT_CC = "cc" name = "openbsd" - link_flags = os.environ.get("LDFLAGS", '-pthread').split() - cflags = os.environ.get("CFLAGS", "-O3 -pthread -fomit-frame-pointer -D_BSD_SOURCE").split() + link_flags = os.environ.get("LDFLAGS", "").split() + ['-pthread'] + cflags = ['-O3', '-pthread', '-fomit-frame-pointer', '-D_BSD_SOURCE' + ] + os.environ.get("CFLAGS", "").split() def _libs(self, libraries): libraries=set(libraries + ("intl", "iconv", "compat")) diff --git a/rpython/translator/platform/windows.py b/rpython/translator/platform/windows.py --- a/rpython/translator/platform/windows.py +++ b/rpython/translator/platform/windows.py @@ -28,6 +28,8 @@ return _get_compiler_type(cc, False) def Windows_x64(cc=None): + raise Exception("Win64 is not supported. You must either build for Win32" + " or contribute the missing support in PyPy.") return _get_compiler_type(cc, True) def _get_msvc_env(vsver, x64flag): From noreply at buildbot.pypy.org Tue Jun 4 02:32:15 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 4 Jun 2013 02:32:15 +0200 (CEST) Subject: [pypy-commit] pypy py3k: o fix identifier handling around space.getname (use unicode throughout) Message-ID: <20130604003215.54E021C0F88@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r64742:4042cddab652 Date: 2013-06-03 17:29 -0700 http://bitbucket.org/pypy/pypy/changeset/4042cddab652/ Log: o fix identifier handling around space.getname (use unicode throughout) o space.getname\W_TypeObject.get_module_type_name now return unicode o space.getrepr now accepts unicode diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py --- a/pypy/goal/targetpypystandalone.py +++ b/pypy/goal/targetpypystandalone.py @@ -68,7 +68,7 @@ ## con.interact() except OperationError, e: debug("OperationError:") - debug(" operror-type: " + e.w_type.getname(space)) + debug(" operror-type: " + e.w_type.getname(space).encode('utf-8')) debug(" operror-value: " + space.str_w(space.str(e.get_w_value(space)))) return 1 finally: @@ -76,7 +76,7 @@ space.call_function(w_run_toplevel, w_call_finish_gateway) except OperationError, e: debug("OperationError:") - debug(" operror-type: " + e.w_type.getname(space)) + debug(" operror-type: " + e.w_type.getname(space).encode('utf-8')) debug(" operror-value: " + space.str_w(space.str(e.get_w_value(space)))) return 1 return exitcode @@ -117,7 +117,7 @@ except OperationError, e: if verbose: debug("OperationError:") - debug(" operror-type: " + e.w_type.getname(space)) + debug(" operror-type: " + e.w_type.getname(space).encode('utf-8')) debug(" operror-value: " + space.str_w(space.str(e.get_w_value(space)))) return 1 @@ -155,7 +155,7 @@ stmt.exec_code(space, w_globals, w_globals) except OperationError, e: debug("OperationError:") - debug(" operror-type: " + e.w_type.getname(space)) + debug(" operror-type: " + e.w_type.getname(space).encode('utf-8')) debug(" operror-value: " + space.str_w(space.str(e.get_w_value(space)))) return 1 return 0 diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -77,11 +77,12 @@ raise NotImplementedError("only for interp-level user subclasses " "from typedef.py") - def getname(self, space, default='?'): + def getname(self, space, default=u'?'): try: - return space.str_w(space.getattr(self, space.wrap('__name__'))) + return space.unicode_w(space.getattr(self, space.wrap('__name__'))) except OperationError, e: - if e.match(space, space.w_TypeError) or e.match(space, space.w_AttributeError): + if (e.match(space, space.w_TypeError) or + e.match(space, space.w_AttributeError)): return default raise @@ -104,10 +105,9 @@ w_id = space.rshift(w_id, w_4) return ''.join(addrstring) - def getrepr(self, space, info, moreinfo=''): - addrstring = self.getaddrstring(space) - return space.wrap("<%s at 0x%s%s>" % (info, addrstring, - moreinfo)) + def getrepr(self, space, info, moreinfo=u''): + addrstring = unicode(self.getaddrstring(space)) + return space.wrap(u"<%s at 0x%s%s>" % (info, addrstring, moreinfo)) def getslotvalue(self, index): raise NotImplementedError diff --git a/pypy/interpreter/error.py b/pypy/interpreter/error.py --- a/pypy/interpreter/error.py +++ b/pypy/interpreter/error.py @@ -414,15 +414,7 @@ elif fmt in 'NT': if fmt == 'T': value = space.type(value) - try: - w_name = space.getattr(value, space.wrap('__name__')) - except OperationError as e: - if not (e.match(space, space.w_TypeError) or - e.match(space, space.w_AttributeError)): - raise - result = u'?' - else: - result = space.unicode_w(w_name) + result = value.getname(space) else: result = unicode(value) lst[i + i + 1] = result @@ -450,7 +442,7 @@ %8 - The result of arg.decode('utf-8', 'strict') %N - The result of w_arg.getname(space) - %R - The result of space.str_w(space.repr(w_arg)) + %R - The result of space.unicode_w(space.repr(w_arg)) %T - The result of space.type(w_arg).getname(space) """ diff --git a/pypy/interpreter/function.py b/pypy/interpreter/function.py --- a/pypy/interpreter/function.py +++ b/pypy/interpreter/function.py @@ -245,7 +245,8 @@ return self.call_args(__args__) def descr_function_repr(self): - return self.getrepr(self.space, 'function %s' % (self.name,)) + return self.getrepr(self.space, u'function %s' % + (self.name.decode('utf-8'),)) # delicate _all = {'': None} @@ -390,7 +391,7 @@ self.w_doc = w_doc def fget_func_name(self, space): - return space.wrap(self.name) + return space.wrap(self.name.decode('utf-8')) def fset_func_name(self, space, w_name): try: @@ -492,7 +493,7 @@ return space.wrap(method) def __repr__(self): - return "bound method %s" % (self.w_function.getname(self.space),) + return u"bound method %s" % (self.w_function.getname(self.space),) def call_args(self, args): space = self.space @@ -509,8 +510,8 @@ name = self.w_function.getname(self.space) w_class = space.type(self.w_instance) typename = w_class.getname(self.space) - objrepr = space.str_w(space.repr(self.w_instance)) - s = '' % (typename, name, objrepr) + objrepr = space.unicode_w(space.repr(self.w_instance)) + s = u'' % (typename, name, objrepr) return space.wrap(s) def descr_method_getattribute(self, w_attr): diff --git a/pypy/interpreter/test/test_argument.py b/pypy/interpreter/test/test_argument.py --- a/pypy/interpreter/test/test_argument.py +++ b/pypy/interpreter/test/test_argument.py @@ -135,8 +135,8 @@ def type(self, obj): class Type: - def getname(self, space, default='?'): - return type(obj).__name__ + def getname(self, space, default=u'?'): + return unicode(type(obj).__name__) return Type() diff --git a/pypy/interpreter/test/test_compiler.py b/pypy/interpreter/test/test_compiler.py --- a/pypy/interpreter/test/test_compiler.py +++ b/pypy/interpreter/test/test_compiler.py @@ -844,6 +844,14 @@ c = compile('from os import 日本', '', 'exec') assert ('日本',) in c.co_consts + def test_class_nonascii(self): + """ + class 日本: + pass + assert 日本.__name__ == '日本' + assert '日本' in repr(日本) + """ + def test_cpython_issue2301(self): skip('XXX') try: diff --git a/pypy/interpreter/test/test_error.py b/pypy/interpreter/test/test_error.py --- a/pypy/interpreter/test/test_error.py +++ b/pypy/interpreter/test/test_error.py @@ -126,7 +126,7 @@ def test_new_exception(space): w_error = new_exception_class(space, '_socket.error') - assert w_error.getname(space) == 'error' + assert w_error.getname(space) == u'error' assert space.str_w(space.repr(w_error)) == "" operr = OperationError(w_error, space.wrap("message")) assert operr.match(space, w_error) diff --git a/pypy/interpreter/test/test_executioncontext.py b/pypy/interpreter/test/test_executioncontext.py --- a/pypy/interpreter/test/test_executioncontext.py +++ b/pypy/interpreter/test/test_executioncontext.py @@ -105,7 +105,7 @@ w_class = space.type(seen[0].w_instance) found = 'method %s of %s' % ( seen[0].w_function.name, - w_class.getname(space)) + w_class.getname(space).encode('utf-8')) else: assert isinstance(seen[0], Function) found = 'builtin %s' % seen[0].name diff --git a/pypy/interpreter/test/test_function.py b/pypy/interpreter/test/test_function.py --- a/pypy/interpreter/test/test_function.py +++ b/pypy/interpreter/test/test_function.py @@ -138,6 +138,14 @@ __name__ = "bar" assert f.__module__ == "foo"''') + def test_func_nonascii(self): + """ + def 日本(): + pass + assert repr(日本).startswith('" % (module, typename,)) + return space.wrap(u"<%s.%s>" % (module, typename,)) else: - name_repr = space.str_w(space.repr(w_name)) - return space.wrap("<%s.%s name=%s>" % (module, typename, name_repr)) + name_repr = space.unicode_w(space.repr(w_name)) + return space.wrap(u"<%s.%s name=%s>" % (module, typename, name_repr)) # ______________________________________________ diff --git a/pypy/module/_lsprof/interp_lsprof.py b/pypy/module/_lsprof/interp_lsprof.py --- a/pypy/module/_lsprof/interp_lsprof.py +++ b/pypy/module/_lsprof/interp_lsprof.py @@ -201,27 +201,28 @@ w_realclass, _ = space.lookup_in_type_where(w_type, name) if isinstance(w_realclass, W_TypeObject): class_name = w_realclass.get_module_type_name() - return "{method '%s' of '%s' objects}" % (name, class_name) + return u"{method '%s' of '%s' objects}" % (name.decode('utf-8'), + class_name) @jit.elidable_promote() def create_spec_for_function(space, w_func): if w_func.w_module is None: - module = '' + module = u'' else: - module = space.str_w(w_func.w_module) - if module == 'builtins': - module = '' + module = space.unicode_w(w_func.w_module) + if module == u'builtins': + module = u'' else: - module += '.' - pre = 'built-in function ' if isinstance(w_func, BuiltinFunction) else '' - return '{%s%s%s}' % (pre, module, w_func.name) + module += u'.' + pre = u'built-in function ' if isinstance(w_func, BuiltinFunction) else u'' + return u'{%s%s%s}' % (pre, module, w_func.getname(space)) @jit.elidable_promote() def create_spec_for_object(space, w_obj): class_name = space.type(w_obj).getname(space) - return "{'%s' object}" % (class_name,) + return u"{'%s' object}" % (class_name,) def create_spec(space, w_arg): diff --git a/pypy/module/_multiprocessing/interp_connection.py b/pypy/module/_multiprocessing/interp_connection.py --- a/pypy/module/_multiprocessing/interp_connection.py +++ b/pypy/module/_multiprocessing/interp_connection.py @@ -64,8 +64,9 @@ return space.newbool(bool(self.flags & WRITABLE)) def _repr(self, space, handle): - conn_type = ["read-only", "write-only", "read-write"][self.flags - 1] - return space.wrap("<%s %s, handle %d>" % ( + index = self.flags - 1 + conn_type = [u"read-only", u"write-only", u"read-write"][index] + return space.wrap(u"<%s %s, handle %d>" % ( conn_type, space.type(self).getname(space), handle)) def descr_repr(self, space): diff --git a/pypy/module/_weakref/interp__weakref.py b/pypy/module/_weakref/interp__weakref.py --- a/pypy/module/_weakref/interp__weakref.py +++ b/pypy/module/_weakref/interp__weakref.py @@ -172,15 +172,15 @@ def descr__repr__(self, space): w_obj = self.dereference() if w_obj is None: - state = '; dead' + state = u'; dead' else: typename = space.type(w_obj).getname(space) - objname = w_obj.getname(space, '') + objname = w_obj.getname(space, u'') if objname: - state = "; to '%s' (%s)" % (typename, objname) + state = u"; to '%s' (%s)" % (typename, objname) else: - state = "; to '%s'" % (typename,) - return self.getrepr(space, self.typedef.name, state) + state = u"; to '%s'" % (typename,) + return self.getrepr(space, unicode(self.typedef.name), state) class W_Weakref(W_WeakrefBase): diff --git a/pypy/module/cppyy/test/test_zjit.py b/pypy/module/cppyy/test/test_zjit.py --- a/pypy/module/cppyy/test/test_zjit.py +++ b/pypy/module/cppyy/test/test_zjit.py @@ -61,7 +61,7 @@ self.name = name self.__name__ = name def getname(self, space, name): - return self.name + return unicode(self.name) class FakeBuffer(FakeBase): typedname = "buffer" def __init__(self, val): diff --git a/pypy/module/cpyext/classobject.py b/pypy/module/cpyext/classobject.py --- a/pypy/module/cpyext/classobject.py +++ b/pypy/module/cpyext/classobject.py @@ -28,9 +28,8 @@ return space.call_args(self.w_function, __args__) def descr_repr(self, space): - name = space.str_w( - space.getattr(self.w_function, space.wrap('__name__'))) - return self.getrepr(space, '' % (name,)) + return self.getrepr(space, u'' % + (space.getname(self.w_function),)) InstanceMethod.typedef = TypeDef("instancemethod", __new__ = interp2app(InstanceMethod.descr_new), diff --git a/pypy/module/cpyext/methodobject.py b/pypy/module/cpyext/methodobject.py --- a/pypy/module/cpyext/methodobject.py +++ b/pypy/module/cpyext/methodobject.py @@ -124,9 +124,9 @@ return self.space.unwrap(self.descr_method_repr()) def descr_method_repr(self): - return self.getrepr(self.space, - "built-in method '%s' of '%s' object" % - (self.name, self.w_objclass.getname(self.space))) + return self.getrepr( + self.space, u"built-in method '%s' of '%s' object" % + (self.name.decode('utf-8'), self.w_objclass.getname(self.space))) PyCFunction_Check, PyCFunction_CheckExact = build_type_checkers( "CFunction", W_PyCFunctionObject) @@ -143,9 +143,9 @@ return self.space.unwrap(self.descr_method_repr()) def descr_method_repr(self): - return self.getrepr(self.space, - "built-in method '%s' of '%s' object" % - (self.name, self.w_objclass.getname(self.space))) + return self.getrepr( + self.space, u"built-in method '%s' of '%s' object" % + (self.name.decode('utf-8'), self.w_objclass.getname(self.space))) class W_PyCWrapperObject(W_Root): @@ -173,8 +173,8 @@ return self.wrapper_func(space, w_self, w_args, self.func) def descr_method_repr(self): - return self.space.wrap("" % - (self.method_name, + return self.space.wrap(u"" % + (self.method_name.decode('utf-8'), self.w_objclass.getname(self.space))) def cwrapper_descr_call(space, w_self, __args__): diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -471,7 +471,7 @@ from pypy.module.cpyext.unicodeobject import _PyUnicode_AsString pto.c_tp_name = _PyUnicode_AsString(space, heaptype.c_ht_name) else: - pto.c_tp_name = rffi.str2charp(w_type.getname(space)) + pto.c_tp_name = rffi.str2charp(w_type.getname(space).encode('utf-8')) pto.c_tp_basicsize = -1 # hopefully this makes malloc bail out pto.c_tp_itemsize = 0 # uninitialized fields: diff --git a/pypy/module/exceptions/interp_exceptions.py b/pypy/module/exceptions/interp_exceptions.py --- a/pypy/module/exceptions/interp_exceptions.py +++ b/pypy/module/exceptions/interp_exceptions.py @@ -131,9 +131,10 @@ def descr_repr(self, space): if self.args_w: - args_repr = space.str_w(space.repr(space.newtuple(self.args_w))) + args_repr = space.unicode_w( + space.repr(space.newtuple(self.args_w))) else: - args_repr = "()" + args_repr = u"()" clsname = self.getclass(space).getname(space) return space.wrap(clsname + args_repr) @@ -556,7 +557,7 @@ values_w = space.fixedview(self.args_w[1]) w_tuple = space.newtuple(values_w + [self.w_lastlineno]) args_w = [self.args_w[0], w_tuple] - args_repr = space.str_w(space.repr(space.newtuple(args_w))) + args_repr = space.unicode_w(space.repr(space.newtuple(args_w))) clsname = self.getclass(space).getname(space) return space.wrap(clsname + args_repr) else: diff --git a/pypy/module/exceptions/test/test_exc.py b/pypy/module/exceptions/test/test_exc.py --- a/pypy/module/exceptions/test/test_exc.py +++ b/pypy/module/exceptions/test/test_exc.py @@ -261,3 +261,13 @@ assert e.errno == errno.ENOTDIR else: assert False, "Expected OSError" + + def test_nonascii_name(self): + """ + class 日本(Exception): + pass + assert '日本' in repr(日本) + class 日本2(SyntaxError): + pass + assert '日本2' in repr(日本2) + """ diff --git a/pypy/module/thread/os_lock.py b/pypy/module/thread/os_lock.py --- a/pypy/module/thread/os_lock.py +++ b/pypy/module/thread/os_lock.py @@ -172,7 +172,7 @@ def descr__repr__(self): typename = space.type(self).getname(space) - return space.wrap("<%s owner=%d count=%d>" % ( + return space.wrap(u"<%s owner=%d count=%d>" % ( typename, self.rlock_owner, self.rlock_count)) @unwrap_spec(blocking=bool, timeout=float) diff --git a/pypy/objspace/std/dictmultiobject.py b/pypy/objspace/std/dictmultiobject.py --- a/pypy/objspace/std/dictmultiobject.py +++ b/pypy/objspace/std/dictmultiobject.py @@ -1151,7 +1151,7 @@ w_self.w_dict = w_dict def descr_repr(self, space): - typename = space.type(self).getname(space).decode('utf-8') + typename = space.type(self).getname(space) w_seq = space.call_function(space.w_list, self) seq_repr = space.unicode_w(space.repr(w_seq)) return space.wrap(u"%s(%s)" % (typename, seq_repr)) diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -106,7 +106,7 @@ def _get_printable_location(w_type): return ('list__do_extend_from_iterable [w_type=%s]' % - w_type.getname(w_type.space)) + w_type.getname(w_type.space).encode('utf-8')) _do_extend_jitdriver = jit.JitDriver( diff --git a/pypy/objspace/std/objecttype.py b/pypy/objspace/std/objecttype.py --- a/pypy/objspace/std/objecttype.py +++ b/pypy/objspace/std/objecttype.py @@ -10,13 +10,13 @@ w_module = w_type.lookup("__module__") if w_module is not None: try: - modulename = space.str_w(w_module) + modulename = space.unicode_w(w_module) except OperationError, e: if not e.match(space, space.w_TypeError): raise else: - classname = '%s.%s' % (modulename, classname) - return w_obj.getrepr(space, '%s object' % (classname,)) + classname = u'%s.%s' % (modulename, classname) + return w_obj.getrepr(space, u'%s object' % (classname,)) def descr__str__(space, w_obj): w_type = space.type(w_obj) diff --git a/pypy/objspace/std/test/test_typeobject.py b/pypy/objspace/std/test/test_typeobject.py --- a/pypy/objspace/std/test/test_typeobject.py +++ b/pypy/objspace/std/test/test_typeobject.py @@ -683,9 +683,24 @@ assert d['A'].__module__ == 'builtins' # obscure, follows CPython assert repr(d['A']) == "" - def test_repr_unicode(self): + def test_repr_nonascii(self): assert repr(type('日本', (), {})) == "" % __name__ + def test_name_nonascii(self): + assert type('日本', (), {}).__name__ == '日本' + + def test_errors_nonascii(self): + # Check some arbitrary error messages + Japan = type('日本', (), {}) + obj = Japan() + for f in hex, int, len, next, open, set, 'foo'.startswith: + try: + f(obj) + except TypeError as e: + assert '日本' in str(e) + else: + assert False, 'Expected TypeError' + def test_invalid_mro(self): class A(object): pass diff --git a/pypy/objspace/std/test/test_userobject.py b/pypy/objspace/std/test/test_userobject.py --- a/pypy/objspace/std/test/test_userobject.py +++ b/pypy/objspace/std/test/test_userobject.py @@ -1,3 +1,4 @@ +# encoding: utf-8 import py from pypy.interpreter import gateway from pypy.objspace.test import test_descriptor @@ -214,6 +215,11 @@ s = repr(Foo()) assert s.startswith(' Author: Philip Jenvey Branch: py3k Changeset: r64743:a554ac771ba3 Date: 2013-06-03 18:34 -0700 http://bitbucket.org/pypy/pypy/changeset/a554ac771ba3/ Log: fix translation diff --git a/pypy/module/cpyext/classobject.py b/pypy/module/cpyext/classobject.py --- a/pypy/module/cpyext/classobject.py +++ b/pypy/module/cpyext/classobject.py @@ -29,7 +29,7 @@ def descr_repr(self, space): return self.getrepr(space, u'' % - (space.getname(self.w_function),)) + (self.w_function.getname(space),)) InstanceMethod.typedef = TypeDef("instancemethod", __new__ = interp2app(InstanceMethod.descr_new), From noreply at buildbot.pypy.org Tue Jun 4 06:42:55 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Tue, 4 Jun 2013 06:42:55 +0200 (CEST) Subject: [pypy-commit] pypy default: Inline into rbigint so that comparisons can be constant folded. Message-ID: <20130604044255.DD11E1C010B@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r64744:661d7f7624dc Date: 2013-06-03 20:41 -0700 http://bitbucket.org/pypy/pypy/changeset/661d7f7624dc/ Log: Inline into rbigint so that comparisons can be constant folded. diff --git a/pypy/module/pypyjit/policy.py b/pypy/module/pypyjit/policy.py --- a/pypy/module/pypyjit/policy.py +++ b/pypy/module/pypyjit/policy.py @@ -118,7 +118,7 @@ def look_inside_function(self, func): mod = func.__module__ or '?' - if mod == 'rpython.rlib.rbigint' or mod == 'rpython.rlib.rlocale' or mod == 'rpython.rlib.rsocket': + if mod == 'rpython.rlib.rlocale' or mod == 'rpython.rlib.rsocket': return False if mod.startswith('pypy.interpreter.astcompiler.'): return False diff --git a/pypy/module/pypyjit/test/test_policy.py b/pypy/module/pypyjit/test/test_policy.py --- a/pypy/module/pypyjit/test/test_policy.py +++ b/pypy/module/pypyjit/test/test_policy.py @@ -8,7 +8,7 @@ def test_bigint(): from rpython.rlib.rbigint import rbigint - assert not pypypolicy.look_inside_function(rbigint.lt.im_func) + assert pypypolicy.look_inside_function(rbigint.lt.im_func) def test_rlocale(): from rpython.rlib.rlocale import setlocale diff --git a/pypy/module/pypyjit/test_pypy_c/test_misc.py b/pypy/module/pypyjit/test_pypy_c/test_misc.py --- a/pypy/module/pypyjit/test_pypy_c/test_misc.py +++ b/pypy/module/pypyjit/test_pypy_c/test_misc.py @@ -74,7 +74,6 @@ jump(..., descr=...) """) - def test_mixed_type_loop(self): def main(n): i = 0.0 @@ -94,7 +93,6 @@ jump(..., descr=...) """) - def test_cached_pure_func_of_equal_fields(self): def main(n): class A(object): @@ -196,7 +194,6 @@ jump(..., descr=...) """) - def test_chain_of_guards(self): src = """ class A(object): @@ -220,7 +217,6 @@ loops = log.loops_by_filename(self.filepath) assert len(loops) == 1 - def test_unpack_iterable_non_list_tuple(self): def main(n): import array @@ -258,7 +254,6 @@ jump(..., descr=...) """) - def test_dont_trace_every_iteration(self): def main(a, b): i = sa = 0 @@ -289,7 +284,6 @@ assert log.result == 300 * (-10 % -20) assert log.jit_summary.tracing_no == 1 - def test_overflow_checking(self): """ This test only checks that we get the expected result, not that any @@ -298,7 +292,8 @@ def main(): import sys def f(a,b): - if a < 0: return -1 + if a < 0: + return -1 return a-b # total = sys.maxint - 2147483647 @@ -309,7 +304,6 @@ # self.run_and_check(main, []) - def test_global(self): log = self.run(""" i = 0 @@ -404,3 +398,14 @@ # the following assertion fails if the loop was cancelled due # to "abort: vable escape" assert len(log.loops_by_id("exc_info")) == 1 + + def test_long_comparison(self): + def main(n): + while n: + 12345L > 123L # ID: long_op + n -= 1 + + log = self.run(main, [300]) + loop, = log.loops_by_id("long_op") + assert log.match(""" + """) diff --git a/rpython/rtyper/lltypesystem/rstr.py b/rpython/rtyper/lltypesystem/rstr.py --- a/rpython/rtyper/lltypesystem/rstr.py +++ b/rpython/rtyper/lltypesystem/rstr.py @@ -332,7 +332,7 @@ newstr = s2.malloc(len1 + len2) newstr.copy_contents_from_str(s1, newstr, 0, 0, len1) else: - newstr = s1.malloc(len1 + len2) + newstr = s1.malloc(len1 + len2) newstr.copy_contents(s1, newstr, 0, 0, len1) if typeOf(s2) == Ptr(STR): newstr.copy_contents_from_str(s2, newstr, 0, len1, len2) From noreply at buildbot.pypy.org Tue Jun 4 06:42:57 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Tue, 4 Jun 2013 06:42:57 +0200 (CEST) Subject: [pypy-commit] pypy default: trailing whitespace Message-ID: <20130604044257.217031C0619@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r64745:4830df760669 Date: 2013-06-03 20:44 -0700 http://bitbucket.org/pypy/pypy/changeset/4830df760669/ Log: trailing whitespace diff --git a/rpython/jit/metainterp/optimizeopt/intutils.py b/rpython/jit/metainterp/optimizeopt/intutils.py --- a/rpython/jit/metainterp/optimizeopt/intutils.py +++ b/rpython/jit/metainterp/optimizeopt/intutils.py @@ -2,12 +2,14 @@ from rpython.rlib.objectmodel import we_are_translated from rpython.jit.metainterp.resoperation import rop, ResOperation from rpython.jit.metainterp.history import BoxInt, ConstInt + MAXINT = maxint MININT = -maxint - 1 + class IntBound(object): _attrs_ = ('has_upper', 'has_lower', 'upper', 'lower') - + def __init__(self, lower, upper): self.has_upper = True self.has_lower = True @@ -29,7 +31,7 @@ def make_lt(self, other): return self.make_le(other.add(-1)) - + def make_ge(self, other): if other.has_lower: if not self.has_lower or other.lower > self.lower: @@ -86,7 +88,7 @@ r = True return r - + def add(self, offset): res = self.clone() try: @@ -101,7 +103,7 @@ def mul(self, value): return self.mul_bound(IntBound(value, value)) - + def add_bound(self, other): res = self.clone() if other.has_upper: @@ -115,7 +117,7 @@ try: res.lower = ovfcheck(res.lower + other.lower) except OverflowError: - res.has_lower = False + res.has_lower = False else: res.has_lower = False return res @@ -133,7 +135,7 @@ try: res.lower = ovfcheck(res.lower - other.upper) except OverflowError: - res.has_lower = False + res.has_lower = False else: res.has_lower = False return res @@ -196,7 +198,6 @@ else: return IntUnbounded() - def contains(self, val): if self.has_lower and val < self.lower: return False @@ -216,7 +217,7 @@ elif self.has_upper: return False return True - + def __repr__(self): if self.has_lower: l = '%d' % self.lower @@ -249,7 +250,7 @@ guards.append(op) op = ResOperation(rop.GUARD_TRUE, [res], None) guards.append(op) - + class IntUpperBound(IntBound): def __init__(self, upper): @@ -285,7 +286,7 @@ self._raise() def make_constant(self, value): self._raise() - def intersect(self, other): + def intersect(self, other): self._raise() def min4(t): From noreply at buildbot.pypy.org Tue Jun 4 06:42:59 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Tue, 4 Jun 2013 06:42:59 +0200 (CEST) Subject: [pypy-commit] pypy default: merged upstream Message-ID: <20130604044259.6C9841C0F88@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r64746:dd7556ff00b9 Date: 2013-06-03 23:42 -0500 http://bitbucket.org/pypy/pypy/changeset/dd7556ff00b9/ Log: merged upstream diff --git a/lib-python/2.7/distutils/command/build_ext.py b/lib-python/2.7/distutils/command/build_ext.py --- a/lib-python/2.7/distutils/command/build_ext.py +++ b/lib-python/2.7/distutils/command/build_ext.py @@ -8,7 +8,7 @@ __revision__ = "$Id$" -import sys, os, string, re +import sys, os, string, re, imp from types import * from site import USER_BASE, USER_SITE from distutils.core import Command @@ -33,6 +33,11 @@ from distutils.ccompiler import show_compilers show_compilers() +def _get_c_extension_suffix(): + for ext, mod, typ in imp.get_suffixes(): + if typ == imp.C_EXTENSION: + return ext + class build_ext (Command): @@ -677,10 +682,18 @@ # OS/2 has an 8 character module (extension) limit :-( if os.name == "os2": ext_path[len(ext_path) - 1] = ext_path[len(ext_path) - 1][:8] + # PyPy tweak: first try to get the C extension suffix from + # 'imp'. If it fails we fall back to the 'SO' config var, like + # the previous version of this code did. This should work for + # CPython too. The point is that on PyPy with cpyext, the + # config var 'SO' is just ".so" but we want to return + # ".pypy-VERSION.so" instead. + so_ext = _get_c_extension_suffix() + if so_ext is None: + so_ext = get_config_var('SO') # fall-back # extensions in debug_mode are named 'module_d.pyd' under windows - so_ext = get_config_var('SO') if os.name == 'nt' and self.debug: - return os.path.join(*ext_path) + '_d' + so_ext + so_ext = '_d.pyd' return os.path.join(*ext_path) + so_ext def get_export_symbols (self, ext): diff --git a/lib-python/2.7/distutils/sysconfig_pypy.py b/lib-python/2.7/distutils/sysconfig_pypy.py --- a/lib-python/2.7/distutils/sysconfig_pypy.py +++ b/lib-python/2.7/distutils/sysconfig_pypy.py @@ -12,7 +12,6 @@ import sys import os -import imp from distutils.errors import DistutilsPlatformError @@ -58,16 +57,11 @@ _config_vars = None -def _get_so_extension(): - for ext, mod, typ in imp.get_suffixes(): - if typ == imp.C_EXTENSION: - return ext - def _init_posix(): """Initialize the module as appropriate for POSIX systems.""" g = {} g['EXE'] = "" - g['SO'] = _get_so_extension() or ".so" + g['SO'] = ".so" g['SOABI'] = g['SO'].rsplit('.')[0] g['LIBDIR'] = os.path.join(sys.prefix, 'lib') g['CC'] = "gcc -pthread" # -pthread might not be valid on OS/X, check @@ -80,7 +74,7 @@ """Initialize the module as appropriate for NT""" g = {} g['EXE'] = ".exe" - g['SO'] = _get_so_extension() or ".pyd" + g['SO'] = ".pyd" g['SOABI'] = g['SO'].rsplit('.')[0] global _config_vars diff --git a/lib-python/2.7/logging/__init__.py b/lib-python/2.7/logging/__init__.py --- a/lib-python/2.7/logging/__init__.py +++ b/lib-python/2.7/logging/__init__.py @@ -151,6 +151,8 @@ 'DEBUG': DEBUG, 'NOTSET': NOTSET, } +_levelNames = dict(_levelToName) +_levelNames.update(_nameToLevel) # backward compatibility def getLevelName(level): """ diff --git a/lib_pypy/_ctypes_test.py b/lib_pypy/_ctypes_test.py --- a/lib_pypy/_ctypes_test.py +++ b/lib_pypy/_ctypes_test.py @@ -1,60 +1,7 @@ -import os, sys -import tempfile - -def compile_shared(): - """Compile '_ctypes_test.c' into an extension module, and import it - """ - thisdir = os.path.dirname(__file__) - output_dir = tempfile.mkdtemp() - - from distutils.ccompiler import new_compiler - from distutils import sysconfig - - compiler = new_compiler() - compiler.output_dir = output_dir - - # Compile .c file - include_dir = os.path.join(thisdir, '..', 'include') - if sys.platform == 'win32': - ccflags = ['-D_CRT_SECURE_NO_WARNINGS'] - else: - ccflags = ['-fPIC', '-Wimplicit-function-declaration'] - res = compiler.compile([os.path.join(thisdir, '_ctypes_test.c')], - include_dirs=[include_dir], - extra_preargs=ccflags) - object_filename = res[0] - - # set link options - output_filename = '_ctypes_test' + sysconfig.get_config_var('SO') - if sys.platform == 'win32': - # XXX libpypy-c.lib is currently not installed automatically - library = os.path.join(thisdir, '..', 'include', 'libpypy-c') - if not os.path.exists(library + '.lib'): - #For a nightly build - library = os.path.join(thisdir, '..', 'include', 'python27') - if not os.path.exists(library + '.lib'): - # For a local translation - library = os.path.join(thisdir, '..', 'pypy', 'goal', 'libpypy-c') - libraries = [library, 'oleaut32'] - extra_ldargs = ['/MANIFEST', # needed for VC10 - '/EXPORT:init_ctypes_test'] - else: - libraries = [] - extra_ldargs = [] - - # link the dynamic library - compiler.link_shared_object( - [object_filename], - output_filename, - libraries=libraries, - extra_preargs=extra_ldargs) - - # Now import the newly created library, it will replace our module in sys.modules - import imp - fp, filename, description = imp.find_module('_ctypes_test', path=[output_dir]) - imp.load_module('_ctypes_test', fp, filename, description) - - +try: + import cpyext +except ImportError: + raise ImportError("No module named '_ctypes_test'") try: import _ctypes _ctypes.PyObj_FromPtr = None @@ -62,4 +9,5 @@ except ImportError: pass # obscure condition of _ctypes_test.py being imported by py.test else: - compile_shared() + import _pypy_testcapi + _pypy_testcapi.compile_shared('_ctypes_test.c', '_ctypes_test') diff --git a/lib_pypy/_pypy_testcapi.py b/lib_pypy/_pypy_testcapi.py new file mode 100644 --- /dev/null +++ b/lib_pypy/_pypy_testcapi.py @@ -0,0 +1,61 @@ +import os, sys, imp +import tempfile + +def _get_c_extension_suffix(): + for ext, mod, typ in imp.get_suffixes(): + if typ == imp.C_EXTENSION: + return ext + + +def compile_shared(csource, modulename): + """Compile '_testcapi.c' or '_ctypes_test.c' into an extension module, + and import it. + """ + thisdir = os.path.dirname(__file__) + output_dir = tempfile.mkdtemp() + + from distutils.ccompiler import new_compiler + + compiler = new_compiler() + compiler.output_dir = output_dir + + # Compile .c file + include_dir = os.path.join(thisdir, '..', 'include') + if sys.platform == 'win32': + ccflags = ['-D_CRT_SECURE_NO_WARNINGS'] + else: + ccflags = ['-fPIC', '-Wimplicit-function-declaration'] + res = compiler.compile([os.path.join(thisdir, csource)], + include_dirs=[include_dir], + extra_preargs=ccflags) + object_filename = res[0] + + # set link options + output_filename = modulename + _get_c_extension_suffix() + if sys.platform == 'win32': + # XXX libpypy-c.lib is currently not installed automatically + library = os.path.join(thisdir, '..', 'include', 'libpypy-c') + if not os.path.exists(library + '.lib'): + #For a nightly build + library = os.path.join(thisdir, '..', 'include', 'python27') + if not os.path.exists(library + '.lib'): + # For a local translation + library = os.path.join(thisdir, '..', 'pypy', 'goal', 'libpypy-c') + libraries = [library, 'oleaut32'] + extra_ldargs = ['/MANIFEST', # needed for VC10 + '/EXPORT:init' + modulename] + else: + libraries = [] + extra_ldargs = [] + + # link the dynamic library + compiler.link_shared_object( + [object_filename], + output_filename, + libraries=libraries, + extra_preargs=extra_ldargs) + + # Now import the newly created library, it will replace the original + # module in sys.modules + fp, filename, description = imp.find_module(modulename, path=[output_dir]) + imp.load_module(modulename, fp, filename, description) diff --git a/lib_pypy/_testcapi.py b/lib_pypy/_testcapi.py --- a/lib_pypy/_testcapi.py +++ b/lib_pypy/_testcapi.py @@ -1,62 +1,7 @@ -import os, sys -import tempfile - -def compile_shared(): - """Compile '_testcapi.c' into an extension module, and import it - """ - thisdir = os.path.dirname(__file__) - output_dir = tempfile.mkdtemp() - - from distutils.ccompiler import new_compiler - from distutils import sysconfig - - compiler = new_compiler() - compiler.output_dir = output_dir - - # Compile .c file - include_dir = os.path.join(thisdir, '..', 'include') - if sys.platform == 'win32': - ccflags = ['-D_CRT_SECURE_NO_WARNINGS'] - else: - ccflags = ['-fPIC', '-Wimplicit-function-declaration'] - res = compiler.compile([os.path.join(thisdir, '_testcapimodule.c')], - include_dirs=[include_dir], - extra_preargs=ccflags) - object_filename = res[0] - - # set link options - output_filename = '_testcapi' + sysconfig.get_config_var('SO') - if sys.platform == 'win32': - # XXX libpypy-c.lib is currently not installed automatically - library = os.path.join(thisdir, '..', 'include', 'libpypy-c') - if not os.path.exists(library + '.lib'): - #For a nightly build - library = os.path.join(thisdir, '..', 'include', 'python27') - if not os.path.exists(library + '.lib'): - # For a local translation - library = os.path.join(thisdir, '..', 'pypy', 'goal', 'libpypy-c') - libraries = [library, 'oleaut32'] - extra_ldargs = ['/MANIFEST', # needed for VC10 - '/EXPORT:init_testcapi'] - else: - libraries = [] - extra_ldargs = [] - - # link the dynamic library - compiler.link_shared_object( - [object_filename], - output_filename, - libraries=libraries, - extra_preargs=extra_ldargs) - - # Now import the newly created library, it will replace our module in sys.modules - import imp - fp, filename, description = imp.find_module('_testcapi', path=[output_dir]) - imp.load_module('_testcapi', fp, filename, description) - try: import cpyext except ImportError: raise ImportError("No module named '_testcapi'") else: - compile_shared() + import _pypy_testcapi + _pypy_testcapi.compile_shared('_testcapimodule.c', '_testcapi') diff --git a/pypy/doc/__pypy__-module.rst b/pypy/doc/__pypy__-module.rst --- a/pypy/doc/__pypy__-module.rst +++ b/pypy/doc/__pypy__-module.rst @@ -1,3 +1,7 @@ + +.. comment: this document is very incomplete, should we generate + it automatically? + ======================= The ``__pypy__`` module ======================= diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -46,3 +46,7 @@ .. branch: operrfmt-NT Adds a couple convenient format specifiers to operationerrfmt + +.. branch: win32-fixes3 +Skip and fix some non-translated (own) tests for win32 builds + diff --git a/pypy/doc/windows.rst b/pypy/doc/windows.rst --- a/pypy/doc/windows.rst +++ b/pypy/doc/windows.rst @@ -2,7 +2,7 @@ PyPy on Windows =============== -Pypy is supported on Windows platforms, starting with Windows 2000. +PyPy is supported on Windows platforms, starting with Windows 2000. The following text gives some hints about how to translate the PyPy interpreter. @@ -199,9 +199,9 @@ or such, depending on your mingw64 download. -hacking on Pypy with the mingw compiler +hacking on PyPy with the mingw compiler ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Since hacking on Pypy means running tests, you will need a way to specify +Since hacking on PyPy means running tests, you will need a way to specify the mingw compiler when hacking (as opposed to translating). As of March 2012, --cc is not a valid option for pytest.py. However if you set an environment variable CC to the compliter exe, testing will use it. diff --git a/pypy/interpreter/test/test_app_main.py b/pypy/interpreter/test/test_app_main.py --- a/pypy/interpreter/test/test_app_main.py +++ b/pypy/interpreter/test/test_app_main.py @@ -578,6 +578,11 @@ class TestNonInteractive: def run_with_status_code(self, cmdline, senddata='', expect_prompt=False, expect_banner=False, python_flags='', env=None): + if os.name == 'nt': + try: + import __pypy__ + except: + py.test.skip('app_main cannot run on non-pypy for windows') cmdline = '%s %s "%s" %s' % (sys.executable, python_flags, app_main, cmdline) print 'POPEN:', cmdline @@ -706,6 +711,11 @@ assert 'copyright' not in data def test_non_interactive_stdout_fully_buffered(self): + if os.name == 'nt': + try: + import __pypy__ + except: + py.test.skip('app_main cannot run on non-pypy for windows') path = getscript(r""" import sys, time sys.stdout.write('\x00(STDOUT)\n\x00') # stays in buffers @@ -726,6 +736,11 @@ def test_non_interactive_stdout_unbuffered(self, monkeypatch): monkeypatch.setenv('PYTHONUNBUFFERED', '1') + if os.name == 'nt': + try: + import __pypy__ + except: + py.test.skip('app_main cannot run on non-pypy for windows') path = getscript(r""" import sys, time sys.stdout.write('\x00(STDOUT)\n\x00') diff --git a/pypy/module/_cffi_backend/newtype.py b/pypy/module/_cffi_backend/newtype.py --- a/pypy/module/_cffi_backend/newtype.py +++ b/pypy/module/_cffi_backend/newtype.py @@ -1,3 +1,4 @@ +import sys from pypy.interpreter.error import OperationError, operationerrfmt from pypy.interpreter.gateway import unwrap_spec @@ -113,6 +114,14 @@ # ____________________________________________________________ +SF_MSVC_BITFIELDS = 1 + +if sys.platform == 'win32': + DEFAULT_SFLAGS = SF_MSVC_BITFIELDS +else: + DEFAULT_SFLAGS = 0 + + @unwrap_spec(name=str) def new_struct_type(space, name): return ctypestruct.W_CTypeStruct(space, name) @@ -121,9 +130,11 @@ def new_union_type(space, name): return ctypestruct.W_CTypeUnion(space, name) - at unwrap_spec(w_ctype=ctypeobj.W_CType, totalsize=int, totalalignment=int) + at unwrap_spec(w_ctype=ctypeobj.W_CType, totalsize=int, totalalignment=int, + sflags=int) def complete_struct_or_union(space, w_ctype, w_fields, w_ignored=None, - totalsize=-1, totalalignment=-1): + totalsize=-1, totalalignment=-1, + sflags=DEFAULT_SFLAGS): if (not isinstance(w_ctype, ctypestruct.W_CTypeStructOrUnion) or w_ctype.size >= 0): raise OperationError(space.w_TypeError, @@ -134,6 +145,8 @@ alignment = 1 boffset = 0 # this number is in *bits*, not bytes! boffsetmax = 0 # the maximum value of boffset, in bits too + prev_bitfield_size = 0 + prev_bitfield_free = 0 fields_w = space.listview(w_fields) fields_list = [] fields_dict = {} @@ -166,7 +179,15 @@ # update the total alignment requirement, but skip it if the # field is an anonymous bitfield falign = ftype.alignof() - if alignment < falign and (fbitsize < 0 or fname != ''): + do_align = True + if fbitsize >= 0: + if (sflags & SF_MSVC_BITFIELDS) == 0: + # GCC: anonymous bitfields (of any size) don't cause alignment + do_align = (fname != '') + else: + # MSVC: zero-sized bitfields don't cause alignment + do_align = (fbitsize > 0) + if alignment < falign and do_align: alignment = falign # if fbitsize < 0: @@ -208,6 +229,7 @@ fields_dict[fname] = fld boffset += ftype.size * 8 + prev_bitfield_size = 0 else: # this is the case of a bitfield @@ -243,31 +265,67 @@ raise operationerrfmt(space.w_TypeError, "field '%s.%s' is declared with :0", w_ctype.name, fname) - if boffset > field_offset_bytes * 8: - field_offset_bytes += falign - assert boffset < field_offset_bytes * 8 - boffset = field_offset_bytes * 8 + if (sflags & SF_MSVC_BITFIELDS) == 0: + # GCC's notion of "ftype :0;" + # pad boffset to a value aligned for "ftype" + if boffset > field_offset_bytes * 8: + field_offset_bytes += falign + assert boffset < field_offset_bytes * 8 + boffset = field_offset_bytes * 8 + else: + # MSVC's notion of "ftype :0; + # Mostly ignored. It seems they only serve as + # separator between other bitfields, to force them + # into separate words. + pass + prev_bitfield_size = 0 + else: - # Can the field start at the offset given by 'boffset'? It - # can if it would entirely fit into an aligned ftype field. - bits_already_occupied = boffset - (field_offset_bytes * 8) + if (sflags & SF_MSVC_BITFIELDS) == 0: + # GCC's algorithm - if bits_already_occupied + fbitsize > 8 * ftype.size: - # it would not fit, we need to start at the next - # allowed position - field_offset_bytes += falign - assert boffset < field_offset_bytes * 8 - boffset = field_offset_bytes * 8 - bitshift = 0 + # Can the field start at the offset given by 'boffset'? It + # can if it would entirely fit into an aligned ftype field. + bits_already_occupied = boffset - (field_offset_bytes * 8) + + if bits_already_occupied + fbitsize > 8 * ftype.size: + # it would not fit, we need to start at the next + # allowed position + field_offset_bytes += falign + assert boffset < field_offset_bytes * 8 + boffset = field_offset_bytes * 8 + bitshift = 0 + else: + bitshift = bits_already_occupied + assert bitshift >= 0 + boffset += fbitsize + else: - bitshift = bits_already_occupied + # MSVC's algorithm + + # A bitfield is considered as taking the full width + # of their declared type. It can share some bits + # with the previous field only if it was also a + # bitfield and used a type of the same size. + if (prev_bitfield_size == ftype.size and + prev_bitfield_free >= fbitsize): + # yes: reuse + bitshift = 8 * prev_bitfield_size - prev_bitfield_free + else: + # no: start a new full field + boffset = (boffset + falign*8-1) & ~(falign*8-1) + boffset += ftype.size * 8 + bitshift = 0 + prev_bitfield_size = ftype.size + prev_bitfield_free = 8 * prev_bitfield_size + # + prev_bitfield_free -= fbitsize + field_offset_bytes = boffset / 8 - ftype.size fld = ctypestruct.W_CField(ftype, field_offset_bytes, bitshift, fbitsize) fields_list.append(fld) fields_dict[fname] = fld - - boffset += fbitsize if boffset > boffsetmax: boffsetmax = boffset diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -2757,36 +2757,57 @@ assert wr() is None py.test.raises(RuntimeError, from_handle, cast(BCharP, 0)) -def test_bitfield_as_gcc(): +def _test_bitfield_details(flag): BChar = new_primitive_type("char") BShort = new_primitive_type("short") BInt = new_primitive_type("int") + BUInt = new_primitive_type("unsigned int") BStruct = new_struct_type("foo1") complete_struct_or_union(BStruct, [('a', BChar, -1), - ('b', BInt, 9), - ('c', BChar, -1)]) - assert typeoffsetof(BStruct, 'c') == (BChar, 3) - assert sizeof(BStruct) == 4 + ('b1', BInt, 9), + ('b2', BUInt, 7), + ('c', BChar, -1)], -1, -1, -1, flag) + if flag == 0: # gcc + assert typeoffsetof(BStruct, 'c') == (BChar, 3) + assert sizeof(BStruct) == 4 + else: # msvc + assert typeoffsetof(BStruct, 'c') == (BChar, 8) + assert sizeof(BStruct) == 12 assert alignof(BStruct) == 4 # BStruct = new_struct_type("foo2") complete_struct_or_union(BStruct, [('a', BChar, -1), ('', BShort, 9), - ('c', BChar, -1)]) + ('c', BChar, -1)], -1, -1, -1, flag) assert typeoffsetof(BStruct, 'c') == (BChar, 4) - assert sizeof(BStruct) == 5 - assert alignof(BStruct) == 1 + if flag == 0: # gcc + assert sizeof(BStruct) == 5 + assert alignof(BStruct) == 1 + else: # msvc + assert sizeof(BStruct) == 6 + assert alignof(BStruct) == 2 # BStruct = new_struct_type("foo2") complete_struct_or_union(BStruct, [('a', BChar, -1), ('', BInt, 0), ('', BInt, 0), - ('c', BChar, -1)]) - assert typeoffsetof(BStruct, 'c') == (BChar, 4) - assert sizeof(BStruct) == 5 + ('c', BChar, -1)], -1, -1, -1, flag) + if flag == 0: # gcc + assert typeoffsetof(BStruct, 'c') == (BChar, 4) + assert sizeof(BStruct) == 5 + else: # msvc + assert typeoffsetof(BStruct, 'c') == (BChar, 1) + assert sizeof(BStruct) == 2 assert alignof(BStruct) == 1 +def test_bitfield_as_gcc(): + _test_bitfield_details(flag=0) + +def test_bitfield_as_msvc(): + _test_bitfield_details(flag=1) + + def test_version(): # this test is here mostly for PyPy assert __version__ == "0.7" diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -303,7 +303,7 @@ return _absolute_import(space, modulename, baselevel, fromlist_w, tentative) finally: - lock.release_lock() + lock.release_lock(silent_after_fork=True) @jit.unroll_safe def absolute_import_try(space, modulename, baselevel, fromlist_w): @@ -788,9 +788,13 @@ self.lockowner = me self.lockcounter += 1 - def release_lock(self): + def release_lock(self, silent_after_fork): me = self.space.getexecutioncontext() # used as thread ident if self.lockowner is not me: + if self.lockowner is None and silent_after_fork: + # Too bad. This situation can occur if a fork() occurred + # with the import lock held, and we're the child. + return if not self._can_have_lock(): return space = self.space diff --git a/pypy/module/imp/interp_imp.py b/pypy/module/imp/interp_imp.py --- a/pypy/module/imp/interp_imp.py +++ b/pypy/module/imp/interp_imp.py @@ -177,7 +177,7 @@ def release_lock(space): if space.config.objspace.usemodules.thread: - importing.getimportlock(space).release_lock() + importing.getimportlock(space).release_lock(silent_after_fork=False) def reinit_lock(space): if space.config.objspace.usemodules.thread: diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -30,7 +30,7 @@ def new_dtype_getter(name): def _get_dtype(space): from pypy.module.micronumpy.interp_dtype import get_dtype_cache - return getattr(get_dtype_cache(space), "w_%sdtype" % name) + return get_dtype_cache(space).dtypes_by_name[name] def new(space, w_subtype, w_value): dtype = _get_dtype(space) diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -273,7 +273,6 @@ fields = space.getitem(w_data, space.wrap(4)) self.set_fields(space, fields) - print self.itemtype class W_ComplexDtype(W_Dtype): def __init__(self, itemtype, num, kind, name, char, w_box_type, diff --git a/pypy/module/micronumpy/test/test_scalar.py b/pypy/module/micronumpy/test/test_scalar.py --- a/pypy/module/micronumpy/test/test_scalar.py +++ b/pypy/module/micronumpy/test/test_scalar.py @@ -21,7 +21,3 @@ a = zeros(3) assert loads(dumps(sum(a))) == sum(a) - - def setup_class(cls): - import py - py.test.xfail("FIXME: dtype('int32') == dtype('int32') fails") diff --git a/pypy/module/test_lib_pypy/test_ctypes_config_cache.py b/pypy/module/test_lib_pypy/test_ctypes_config_cache.py --- a/pypy/module/test_lib_pypy/test_ctypes_config_cache.py +++ b/pypy/module/test_lib_pypy/test_ctypes_config_cache.py @@ -41,6 +41,10 @@ assert 'LOG_NOTICE' in d def test_resource(): + try: + import lib_pypy.resource + except ImportError: + py.test.skip('no syslog on this platform') d = run('resource.ctc.py', '_resource_cache.py') assert 'RLIM_NLIMITS' in d diff --git a/pypy/objspace/std/formatting.py b/pypy/objspace/std/formatting.py --- a/pypy/objspace/std/formatting.py +++ b/pypy/objspace/std/formatting.py @@ -352,9 +352,8 @@ def std_wp(self, r): length = len(r) if do_unicode and isinstance(r, str): - # convert string to unicode explicitely here - from pypy.objspace.std.unicodetype import plain_str2unicode - r = plain_str2unicode(self.space, r) + # convert string to unicode using the default encoding + r = self.space.unicode_w(self.space.wrap(r)) prec = self.prec if prec == -1 and self.width == 0: # fast path @@ -509,12 +508,10 @@ result = formatter.format() except NeedUnicodeFormattingError: # fall through to the unicode case - from pypy.objspace.std.unicodetype import plain_str2unicode - fmt = plain_str2unicode(space, fmt) + pass else: return space.wrap(result) - else: - fmt = space.unicode_w(w_fmt) + fmt = space.unicode_w(w_fmt) formatter = UnicodeFormatter(space, fmt, values_w, w_valuedict) result = formatter.format() return space.wrap(result) diff --git a/pypy/objspace/std/test/test_stringobject.py b/pypy/objspace/std/test/test_stringobject.py --- a/pypy/objspace/std/test/test_stringobject.py +++ b/pypy/objspace/std/test/test_stringobject.py @@ -530,6 +530,12 @@ del sys.modules[module_name] temp_sys.setdefaultencoding('utf-8') assert u''.join(['\xc3\xa1']) == u'\xe1' + # + assert ('\xc3\xa1:%s' % u'\xe2') == u'\xe1:\xe2' + class Foo(object): + def __repr__(self): + return '\xc3\xa2' + assert u'\xe1:%r' % Foo() == u'\xe1:\xe2' finally: temp_sys.setdefaultencoding(old_encoding) sys.modules.update(self.original_modules) diff --git a/pypy/objspace/std/unicodetype.py b/pypy/objspace/std/unicodetype.py --- a/pypy/objspace/std/unicodetype.py +++ b/pypy/objspace/std/unicodetype.py @@ -13,22 +13,6 @@ from pypy.objspace.std.unicodeobject import W_UnicodeObject return W_UnicodeObject(uni) -def plain_str2unicode(space, s): - try: - return unicode(s) - except UnicodeDecodeError: - for i in range(len(s)): - if ord(s[i]) > 127: - raise OperationError( - space.w_UnicodeDecodeError, - space.newtuple([ - space.wrap('ascii'), - space.wrap(s), - space.wrap(i), - space.wrap(i+1), - space.wrap("ordinal not in range(128)")])) - assert False, "unreachable" - unicode_capitalize = SMM('capitalize', 1, doc='S.capitalize() -> unicode\n\nReturn a' diff --git a/rpython/jit/backend/arm/assembler.py b/rpython/jit/backend/arm/assembler.py --- a/rpython/jit/backend/arm/assembler.py +++ b/rpython/jit/backend/arm/assembler.py @@ -8,6 +8,7 @@ JITFRAME_FIXED_SIZE) from rpython.jit.backend.arm.codebuilder import InstrBuilder, OverwritingBuilder from rpython.jit.backend.arm.locations import imm, StackLocation +from rpython.jit.backend.arm.helper.regalloc import VMEM_imm_size from rpython.jit.backend.arm.opassembler import ResOpAssembler from rpython.jit.backend.arm.regalloc import (Regalloc, CoreRegisterManager, check_imm_arg, VFPRegisterManager, @@ -961,7 +962,7 @@ return self._load_core_reg(mc, target, base, ofs, cond, helper) def _load_vfp_reg(self, mc, target, base, ofs, cond=c.AL, helper=r.ip): - if check_imm_arg(ofs): + if check_imm_arg(ofs, VMEM_imm_size): mc.VLDR(target.value, base.value, imm=ofs, cond=cond) else: mc.gen_load_int(helper.value, ofs, cond=cond) @@ -982,7 +983,7 @@ return self._store_core_reg(mc, source, base, ofs, cond, helper) def _store_vfp_reg(self, mc, source, base, ofs, cond=c.AL, helper=r.ip): - if check_imm_arg(ofs): + if check_imm_arg(ofs, VMEM_imm_size): mc.VSTR(source.value, base.value, imm=ofs, cond=cond) else: mc.gen_load_int(helper.value, ofs, cond=cond) diff --git a/rpython/jit/backend/arm/callbuilder.py b/rpython/jit/backend/arm/callbuilder.py --- a/rpython/jit/backend/arm/callbuilder.py +++ b/rpython/jit/backend/arm/callbuilder.py @@ -52,6 +52,8 @@ def _push_stack_args(self, stack_args, on_stack): assert on_stack % 8 == 0 + if on_stack == 0: + return self._adjust_sp(-on_stack) self.current_sp = on_stack ofs = 0 @@ -71,7 +73,7 @@ else: self.mc.gen_load_int(r.ip.value, n) self.mc.ADD_rr(r.sp.value, r.sp.value, r.ip.value) - else: + elif n < 0: n = abs(n) if check_imm_arg(n): self.mc.SUB_ri(r.sp.value, r.sp.value, n) diff --git a/rpython/jit/backend/arm/helper/regalloc.py b/rpython/jit/backend/arm/helper/regalloc.py --- a/rpython/jit/backend/arm/helper/regalloc.py +++ b/rpython/jit/backend/arm/helper/regalloc.py @@ -4,7 +4,10 @@ from rpython.jit.metainterp.history import ConstInt from rpython.rlib.objectmodel import we_are_translated -def check_imm_arg(arg, size=0xFF, allow_zero=True): +VMEM_imm_size=0x3FC +default_imm_size=0xFF + +def check_imm_arg(arg, size=default_imm_size, allow_zero=True): assert not isinstance(arg, ConstInt) if not we_are_translated(): if not isinstance(arg, int): diff --git a/rpython/jit/backend/arm/opassembler.py b/rpython/jit/backend/arm/opassembler.py --- a/rpython/jit/backend/arm/opassembler.py +++ b/rpython/jit/backend/arm/opassembler.py @@ -15,6 +15,7 @@ gen_emit_unary_float_op, saved_registers) from rpython.jit.backend.arm.helper.regalloc import check_imm_arg +from rpython.jit.backend.arm.helper.regalloc import VMEM_imm_size from rpython.jit.backend.arm.codebuilder import InstrBuilder, OverwritingBuilder from rpython.jit.backend.arm.jump import remap_frame_layout from rpython.jit.backend.arm.regalloc import TempBox @@ -23,6 +24,7 @@ from rpython.jit.backend.llsupport.gcmap import allocate_gcmap from rpython.jit.backend.llsupport.descr import InteriorFieldDescr from rpython.jit.backend.llsupport.assembler import GuardToken, BaseAssembler +from rpython.jit.backend.llsupport.regalloc import get_scale from rpython.jit.metainterp.history import (Box, AbstractFailDescr, INT, FLOAT, REF) from rpython.jit.metainterp.history import TargetToken @@ -523,35 +525,9 @@ def emit_op_setfield_gc(self, op, arglocs, regalloc, fcond): value_loc, base_loc, ofs, size = arglocs - if size.value == 8: - assert value_loc.is_vfp_reg() - # vstr only supports imm offsets - # so if the ofset is too large we add it to the base and use an - # offset of 0 - if ofs.is_reg(): - self.mc.ADD_rr(r.ip.value, base_loc.value, ofs.value) - base_loc = r.ip - ofs = imm(0) - else: - assert ofs.value % 4 == 0 - self.mc.VSTR(value_loc.value, base_loc.value, ofs.value) - elif size.value == 4: - if ofs.is_imm(): - self.mc.STR_ri(value_loc.value, base_loc.value, ofs.value) - else: - self.mc.STR_rr(value_loc.value, base_loc.value, ofs.value) - elif size.value == 2: - if ofs.is_imm(): - self.mc.STRH_ri(value_loc.value, base_loc.value, ofs.value) - else: - self.mc.STRH_rr(value_loc.value, base_loc.value, ofs.value) - elif size.value == 1: - if ofs.is_imm(): - self.mc.STRB_ri(value_loc.value, base_loc.value, ofs.value) - else: - self.mc.STRB_rr(value_loc.value, base_loc.value, ofs.value) - else: - assert 0 + scale = get_scale(size.value) + self._write_to_mem(value_loc, base_loc, + ofs, imm(scale), fcond) return fcond emit_op_setfield_raw = emit_op_setfield_gc @@ -559,47 +535,8 @@ def emit_op_getfield_gc(self, op, arglocs, regalloc, fcond): base_loc, ofs, res, size = arglocs signed = op.getdescr().is_field_signed() - if size.value == 8: - assert res.is_vfp_reg() - # vldr only supports imm offsets - # so if the ofset is too large we add it to the base and use an - # offset of 0 - if ofs.is_reg(): - self.mc.ADD_rr(r.ip.value, base_loc.value, ofs.value) - base_loc = r.ip - ofs = imm(0) - else: - assert ofs.value % 4 == 0 - self.mc.VLDR(res.value, base_loc.value, ofs.value) - elif size.value == 4: - if ofs.is_imm(): - self.mc.LDR_ri(res.value, base_loc.value, ofs.value) - else: - self.mc.LDR_rr(res.value, base_loc.value, ofs.value) - elif size.value == 2: - if ofs.is_imm(): - if signed: - self.mc.LDRSH_ri(res.value, base_loc.value, ofs.value) - else: - self.mc.LDRH_ri(res.value, base_loc.value, ofs.value) - else: - if signed: - self.mc.LDRSH_rr(res.value, base_loc.value, ofs.value) - else: - self.mc.LDRH_rr(res.value, base_loc.value, ofs.value) - elif size.value == 1: - if ofs.is_imm(): - if signed: - self.mc.LDRSB_ri(res.value, base_loc.value, ofs.value) - else: - self.mc.LDRB_ri(res.value, base_loc.value, ofs.value) - else: - if signed: - self.mc.LDRSB_rr(res.value, base_loc.value, ofs.value) - else: - self.mc.LDRB_rr(res.value, base_loc.value, ofs.value) - else: - assert 0 + scale = get_scale(size.value) + self._load_from_mem(res, base_loc, ofs, imm(scale), signed, fcond) return fcond emit_op_getfield_raw = emit_op_getfield_gc @@ -609,72 +546,44 @@ def emit_op_getinteriorfield_gc(self, op, arglocs, regalloc, fcond): (base_loc, index_loc, res_loc, ofs_loc, ofs, itemsize, fieldsize) = arglocs - self.mc.gen_load_int(r.ip.value, itemsize.value) - self.mc.MUL(r.ip.value, index_loc.value, r.ip.value) + scale = get_scale(fieldsize.value) + tmploc, save = self.get_tmp_reg([base_loc, ofs_loc]) + assert not save + self.mc.gen_load_int(tmploc.value, itemsize.value) + self.mc.MUL(tmploc.value, index_loc.value, tmploc.value) descr = op.getdescr() assert isinstance(descr, InteriorFieldDescr) signed = descr.fielddescr.is_field_signed() if ofs.value > 0: if ofs_loc.is_imm(): - self.mc.ADD_ri(r.ip.value, r.ip.value, ofs_loc.value) + self.mc.ADD_ri(tmploc.value, tmploc.value, ofs_loc.value) else: - self.mc.ADD_rr(r.ip.value, r.ip.value, ofs_loc.value) - - if fieldsize.value == 8: - # vldr only supports imm offsets - # so if the ofset is too large we add it to the base and use an - # offset of 0 - assert res_loc.is_vfp_reg() - self.mc.ADD_rr(r.ip.value, base_loc.value, r.ip.value) - self.mc.VLDR(res_loc.value, r.ip.value, 0) - elif fieldsize.value == 4: - self.mc.LDR_rr(res_loc.value, base_loc.value, r.ip.value) - elif fieldsize.value == 2: - if signed: - self.mc.LDRSH_rr(res_loc.value, base_loc.value, r.ip.value) - else: - self.mc.LDRH_rr(res_loc.value, base_loc.value, r.ip.value) - elif fieldsize.value == 1: - if signed: - self.mc.LDRSB_rr(res_loc.value, base_loc.value, r.ip.value) - else: - self.mc.LDRB_rr(res_loc.value, base_loc.value, r.ip.value) - else: - assert 0 - + self.mc.ADD_rr(tmploc.value, tmploc.value, ofs_loc.value) + ofs_loc = tmploc + self._load_from_mem(res_loc, base_loc, ofs_loc, + imm(scale), signed, fcond) return fcond def emit_op_setinteriorfield_gc(self, op, arglocs, regalloc, fcond): (base_loc, index_loc, value_loc, ofs_loc, ofs, itemsize, fieldsize) = arglocs - self.mc.gen_load_int(r.ip.value, itemsize.value) - self.mc.MUL(r.ip.value, index_loc.value, r.ip.value) + scale = get_scale(fieldsize.value) + tmploc, save = self.get_tmp_reg([base_loc, index_loc, value_loc, ofs_loc]) + assert not save + self.mc.gen_load_int(tmploc.value, itemsize.value) + self.mc.MUL(tmploc.value, index_loc.value, tmploc.value) if ofs.value > 0: if ofs_loc.is_imm(): - self.mc.ADD_ri(r.ip.value, r.ip.value, ofs_loc.value) + self.mc.ADD_ri(tmploc.value, tmploc.value, ofs_loc.value) else: - self.mc.ADD_rr(r.ip.value, r.ip.value, ofs_loc.value) - if fieldsize.value == 8: - # vstr only supports imm offsets - # so if the ofset is too large we add it to the base and use an - # offset of 0 - assert value_loc.is_vfp_reg() - self.mc.ADD_rr(r.ip.value, base_loc.value, r.ip.value) - self.mc.VSTR(value_loc.value, r.ip.value, 0) - elif fieldsize.value == 4: - self.mc.STR_rr(value_loc.value, base_loc.value, r.ip.value) - elif fieldsize.value == 2: - self.mc.STRH_rr(value_loc.value, base_loc.value, r.ip.value) - elif fieldsize.value == 1: - self.mc.STRB_rr(value_loc.value, base_loc.value, r.ip.value) - else: - assert 0 + self.mc.ADD_rr(tmploc.value, tmploc.value, ofs_loc.value) + self._write_to_mem(value_loc, base_loc, tmploc, imm(scale), fcond) return fcond emit_op_setinteriorfield_raw = emit_op_setinteriorfield_gc def emit_op_arraylen_gc(self, op, arglocs, regalloc, fcond): res, base_loc, ofs = arglocs - self.mc.LDR_ri(res.value, base_loc.value, ofs.value) + self.load_reg(self.mc, res, base_loc, ofs.value) return fcond def emit_op_setarrayitem_gc(self, op, arglocs, regalloc, fcond): @@ -694,18 +603,40 @@ def _write_to_mem(self, value_loc, base_loc, ofs_loc, scale, fcond=c.AL): if scale.value == 3: assert value_loc.is_vfp_reg() - assert ofs_loc.is_reg() - self.mc.ADD_rr(r.ip.value, base_loc.value, ofs_loc.value) - self.mc.VSTR(value_loc.value, r.ip.value, cond=fcond) + # vstr only supports imm offsets + # so if the ofset is too large we add it to the base and use an + # offset of 0 + if ofs_loc.is_reg(): + tmploc, save = self.get_tmp_reg([value_loc, base_loc, ofs_loc]) + assert not save + self.mc.ADD_rr(tmploc.value, base_loc.value, ofs_loc.value) + base_loc = tmploc + ofs_loc = imm(0) + else: + assert ofs_loc.is_imm() + assert ofs_loc.value % 4 == 0 + self.mc.VSTR(value_loc.value, base_loc.value, ofs_loc.value) elif scale.value == 2: - self.mc.STR_rr(value_loc.value, base_loc.value, ofs_loc.value, - cond=fcond) + if ofs_loc.is_imm(): + self.mc.STR_ri(value_loc.value, base_loc.value, + ofs_loc.value, cond=fcond) + else: + self.mc.STR_rr(value_loc.value, base_loc.value, + ofs_loc.value, cond=fcond) elif scale.value == 1: - self.mc.STRH_rr(value_loc.value, base_loc.value, ofs_loc.value, - cond=fcond) + if ofs_loc.is_imm(): + self.mc.STRH_ri(value_loc.value, base_loc.value, + ofs_loc.value, cond=fcond) + else: + self.mc.STRH_rr(value_loc.value, base_loc.value, + ofs_loc.value, cond=fcond) elif scale.value == 0: - self.mc.STRB_rr(value_loc.value, base_loc.value, ofs_loc.value, - cond=fcond) + if ofs_loc.is_imm(): + self.mc.STRB_ri(value_loc.value, base_loc.value, + ofs_loc.value, cond=fcond) + else: + self.mc.STRB_rr(value_loc.value, base_loc.value, + ofs_loc.value, cond=fcond) else: assert 0 @@ -731,33 +662,63 @@ self.mc.ADD_ri(r.ip.value, ofs_loc.value, imm=ofs.value) ofs_loc = r.ip # - self._load_from_mem(res_loc, base_loc, ofs_loc, scale, signed) + self._load_from_mem(res_loc, base_loc, ofs_loc, scale, signed, fcond) return fcond def _load_from_mem(self, res_loc, base_loc, ofs_loc, scale, signed=False, fcond=c.AL): if scale.value == 3: assert res_loc.is_vfp_reg() - assert ofs_loc.is_reg() - self.mc.ADD_rr(r.ip.value, base_loc.value, ofs_loc.value) - self.mc.VLDR(res_loc.value, r.ip.value, cond=fcond) + # vldr only supports imm offsets + # if the offset is in a register we add it to the base and use a + # tmp reg + if ofs_loc.is_reg(): + tmploc, save = self.get_tmp_reg([base_loc, ofs_loc]) + assert not save + self.mc.ADD_rr(tmploc.value, base_loc.value, ofs_loc.value) + base_loc = tmploc + ofs_loc = imm(0) + else: + assert ofs_loc.is_imm() + assert ofs_loc.value % 4 == 0 + self.mc.VLDR(res_loc.value, base_loc.value, ofs_loc.value, cond=fcond) elif scale.value == 2: - self.mc.LDR_rr(res_loc.value, base_loc.value, - ofs_loc.value, cond=fcond) + if ofs_loc.is_imm(): + self.mc.LDR_ri(res_loc.value, base_loc.value, + ofs_loc.value, cond=fcond) + else: + self.mc.LDR_rr(res_loc.value, base_loc.value, + ofs_loc.value, cond=fcond) elif scale.value == 1: - if signed: - self.mc.LDRSH_rr(res_loc.value, base_loc.value, - ofs_loc.value, cond=fcond) + if ofs_loc.is_imm(): + if signed: + self.mc.LDRSH_ri(res_loc.value, base_loc.value, + ofs_loc.value, cond=fcond) + else: + self.mc.LDRH_ri(res_loc.value, base_loc.value, + ofs_loc.value, cond=fcond) else: - self.mc.LDRH_rr(res_loc.value, base_loc.value, - ofs_loc.value, cond=fcond) + if signed: + self.mc.LDRSH_rr(res_loc.value, base_loc.value, + ofs_loc.value, cond=fcond) + else: + self.mc.LDRH_rr(res_loc.value, base_loc.value, + ofs_loc.value, cond=fcond) elif scale.value == 0: - if signed: - self.mc.LDRSB_rr(res_loc.value, base_loc.value, - ofs_loc.value, cond=fcond) + if ofs_loc.is_imm(): + if signed: + self.mc.LDRSB_ri(res_loc.value, base_loc.value, + ofs_loc.value, cond=fcond) + else: + self.mc.LDRB_ri(res_loc.value, base_loc.value, + ofs_loc.value, cond=fcond) else: - self.mc.LDRB_rr(res_loc.value, base_loc.value, - ofs_loc.value, cond=fcond) + if signed: + self.mc.LDRSB_rr(res_loc.value, base_loc.value, + ofs_loc.value, cond=fcond) + else: + self.mc.LDRB_rr(res_loc.value, base_loc.value, + ofs_loc.value, cond=fcond) else: assert 0 @@ -770,7 +731,7 @@ # no base offset assert ofs.value == 0 signed = op.getdescr().is_item_signed() - self._load_from_mem(res_loc, base_loc, ofs_loc, scale, signed) + self._load_from_mem(res_loc, base_loc, ofs_loc, scale, signed, fcond) return fcond def emit_op_strlen(self, op, arglocs, regalloc, fcond): @@ -993,7 +954,7 @@ assert result_loc.is_vfp_reg() # we always have a register here, since we have to sync them # before call_assembler - self.mc.VLDR(result_loc.value, r.r0.value, imm=ofs) + self.load_reg(self.mc, result_loc, r.r0, ofs=ofs) else: assert result_loc is r.r0 ofs = self.cpu.unpack_arraydescr(descr) diff --git a/rpython/jit/backend/arm/regalloc.py b/rpython/jit/backend/arm/regalloc.py --- a/rpython/jit/backend/arm/regalloc.py +++ b/rpython/jit/backend/arm/regalloc.py @@ -13,7 +13,9 @@ prepare_cmp_op, prepare_float_op, check_imm_arg, - check_imm_box + check_imm_box, + VMEM_imm_size, + default_imm_size, ) from rpython.jit.backend.arm.jump import remap_frame_layout_mixed from rpython.jit.backend.arm.arch import WORD, JITFRAME_FIXED_SIZE @@ -811,7 +813,8 @@ ofs, size, sign = unpack_fielddescr(op.getdescr()) base_loc = self.make_sure_var_in_reg(a0, boxes) value_loc = self.make_sure_var_in_reg(a1, boxes) - if check_imm_arg(ofs): + ofs_size = default_imm_size if size < 8 else VMEM_imm_size + if check_imm_arg(ofs, size=ofs_size): ofs_loc = imm(ofs) else: ofs_loc = self.get_scratch_reg(INT, boxes) @@ -825,7 +828,8 @@ ofs, size, sign = unpack_fielddescr(op.getdescr()) base_loc = self.make_sure_var_in_reg(a0) immofs = imm(ofs) - if check_imm_arg(ofs): + ofs_size = default_imm_size if size < 8 else VMEM_imm_size + if check_imm_arg(ofs, size=ofs_size): ofs_loc = immofs else: ofs_loc = self.get_scratch_reg(INT, [a0]) @@ -846,7 +850,8 @@ base_loc = self.make_sure_var_in_reg(op.getarg(0), args) index_loc = self.make_sure_var_in_reg(op.getarg(1), args) immofs = imm(ofs) - if check_imm_arg(ofs): + ofs_size = default_imm_size if fieldsize < 8 else VMEM_imm_size + if check_imm_arg(ofs, size=ofs_size): ofs_loc = immofs else: ofs_loc = self.get_scratch_reg(INT, args) @@ -865,7 +870,8 @@ index_loc = self.make_sure_var_in_reg(op.getarg(1), args) value_loc = self.make_sure_var_in_reg(op.getarg(2), args) immofs = imm(ofs) - if check_imm_arg(ofs): + ofs_size = default_imm_size if fieldsize < 8 else VMEM_imm_size + if check_imm_arg(ofs, size=ofs_size): ofs_loc = immofs else: ofs_loc = self.get_scratch_reg(INT, args) @@ -890,8 +896,8 @@ scale = get_scale(size) args = op.getarglist() base_loc = self.make_sure_var_in_reg(args[0], args) + value_loc = self.make_sure_var_in_reg(args[2], args) ofs_loc = self.make_sure_var_in_reg(args[1], args) - value_loc = self.make_sure_var_in_reg(args[2], args) assert check_imm_arg(ofs) return [value_loc, base_loc, ofs_loc, imm(scale), imm(ofs)] prepare_op_setarrayitem_raw = prepare_op_setarrayitem_gc diff --git a/rpython/jit/backend/arm/test/test_regalloc_mov.py b/rpython/jit/backend/arm/test/test_regalloc_mov.py --- a/rpython/jit/backend/arm/test/test_regalloc_mov.py +++ b/rpython/jit/backend/arm/test/test_regalloc_mov.py @@ -436,7 +436,7 @@ self.push(sf, e) def test_push_large_stackfloat(self): - sf = stack_float(100) + sf = stack_float(1000) e = [ mi('gen_load_int', ip.value, sf.value, cond=AL), mi('ADD_rr', ip.value, fp.value, ip.value, cond=AL), diff --git a/rpython/jit/metainterp/test/test_fficall.py b/rpython/jit/metainterp/test/test_fficall.py --- a/rpython/jit/metainterp/test/test_fficall.py +++ b/rpython/jit/metainterp/test/test_fficall.py @@ -17,7 +17,7 @@ def get_description(atypes, rtype): p = lltype.malloc(CIF_DESCRIPTION, len(atypes), flavor='raw', immortal=True) - p.abi = 42 + p.abi = 1 # default p.nargs = len(atypes) p.rtype = rtype p.atypes = lltype.malloc(FFI_TYPE_PP.TO, len(atypes), diff --git a/rpython/rlib/ropenssl.py b/rpython/rlib/ropenssl.py --- a/rpython/rlib/ropenssl.py +++ b/rpython/rlib/ropenssl.py @@ -9,7 +9,7 @@ link_files = [] include_dirs = [] if sys.platform == 'win32' and platform.name != 'mingw32': - libraries = ['libeay32', 'ssleay32', + libraries = ['libeay32', 'ssleay32', 'zlib1', 'user32', 'advapi32', 'gdi32', 'msvcrt', 'ws2_32'] includes = [ # ssl.h includes winsock.h, which will conflict with our own diff --git a/rpython/rlib/rposix.py b/rpython/rlib/rposix.py --- a/rpython/rlib/rposix.py +++ b/rpython/rlib/rposix.py @@ -103,11 +103,10 @@ _set_errno(rffi.cast(INT, errno)) if os.name == 'nt': - is_valid_fd = rffi.llexternal( + is_valid_fd = jit.dont_look_inside(rffi.llexternal( "_PyVerify_fd", [rffi.INT], rffi.INT, compilation_info=errno_eci, - ) - @jit.dont_look_inside + )) def validate_fd(fd): if not is_valid_fd(fd): raise OSError(get_errno(), 'Bad file descriptor') diff --git a/rpython/rlib/rstruct/runpack.py b/rpython/rlib/rstruct/runpack.py --- a/rpython/rlib/rstruct/runpack.py +++ b/rpython/rlib/rstruct/runpack.py @@ -7,7 +7,6 @@ from struct import unpack from rpython.rlib.rstruct.formatiterator import FormatIterator from rpython.rlib.rstruct.error import StructError -from rpython.rlib.rstruct.nativefmttable import native_is_bigendian class MasterReader(object): def __init__(self, s): @@ -30,9 +29,9 @@ def reader_for_pos(pos): class ReaderForPos(AbstractReader): - def __init__(self, mr): + def __init__(self, mr, bigendian): self.mr = mr - self.bigendian = native_is_bigendian + self.bigendian = bigendian def read(self, count): return self.mr.read(count) @@ -64,6 +63,7 @@ perform_lst = [] miniglobals = {} miniglobals.update(globals()) + miniglobals['bigendian'] = self.bigendian for i in rg: fmtdesc, rep, mask = self.formats[i] miniglobals['unpacker%d' % i] = fmtdesc.unpack @@ -74,8 +74,8 @@ else: perform_lst.append('unpacker%d(reader%d, %d)' % (i, i, rep)) miniglobals['reader_cls%d' % i] = reader_for_pos(i) - readers = ";".join(["reader%d = reader_cls%d(master_reader)" % (i, i) - for i in rg]) + readers = ";".join(["reader%d = reader_cls%d(master_reader, bigendian)" + % (i, i) for i in rg]) perform = ";".join(perform_lst) unpackers = ','.join(['reader%d.value' % i for i in rg]) source = py.code.Source(""" diff --git a/rpython/rlib/rstruct/test/test_runpack.py b/rpython/rlib/rstruct/test/test_runpack.py --- a/rpython/rlib/rstruct/test/test_runpack.py +++ b/rpython/rlib/rstruct/test/test_runpack.py @@ -26,6 +26,18 @@ assert fn() == 123 assert self.interpret(fn, []) == 123 + def test_unpack_big_endian(self): + def fn(): + return runpack(">i", "\x01\x02\x03\x04") + assert fn() == 0x01020304 + assert self.interpret(fn, []) == 0x01020304 + + def test_unpack_double_big_endian(self): + def fn(): + return runpack(">d", "testtest") + assert fn() == struct.unpack(">d", "testtest")[0] + assert self.interpret(fn, []) == struct.unpack(">d", "testtest")[0] + class TestLLType(BaseTestRStruct, LLRtypeMixin): pass diff --git a/rpython/rlib/rzlib.py b/rpython/rlib/rzlib.py --- a/rpython/rlib/rzlib.py +++ b/rpython/rlib/rzlib.py @@ -10,7 +10,7 @@ if compiler.name == "msvc": - libname = 'zlib' + libname = 'zlib1' # since version 1.1.4 and later, see http://www.zlib.net/DLL_FAQ.txt else: libname = 'z' eci = ExternalCompilationInfo( diff --git a/rpython/rtyper/tool/test/test_rffi_platform.py b/rpython/rtyper/tool/test/test_rffi_platform.py --- a/rpython/rtyper/tool/test/test_rffi_platform.py +++ b/rpython/rtyper/tool/test/test_rffi_platform.py @@ -288,9 +288,6 @@ assert a % struct.calcsize("P") == 0 def test_external_lib(): - # XXX this one seems to be a bit too platform-specific. Check - # how to test it on windows correctly (using so_prefix?) - # and what are alternatives to LD_LIBRARY_PATH eci = ExternalCompilationInfo() c_source = """ int f(int a, int b) @@ -298,12 +295,17 @@ return (a + b); } """ + if platform.name == 'mscv': + c_source = '__declspec(dllexport) ' + c_source + libname = 'libc_lib' + else: + libname = 'c_lib' tmpdir = udir.join('external_lib').ensure(dir=1) c_file = tmpdir.join('libc_lib.c') c_file.write(c_source) l = platform.compile([c_file], eci, standalone=False) eci = ExternalCompilationInfo( - libraries = ['c_lib'], + libraries = [libname], library_dirs = [str(tmpdir)] ) rffi_platform.verify_eci(eci) diff --git a/rpython/translator/c/gcc/test/test_asmgcroot.py b/rpython/translator/c/gcc/test/test_asmgcroot.py --- a/rpython/translator/c/gcc/test/test_asmgcroot.py +++ b/rpython/translator/c/gcc/test/test_asmgcroot.py @@ -25,8 +25,8 @@ @classmethod def make_config(cls): - if _MSVC and _WIN64: - py.test.skip("all asmgcroot tests disabled for MSVC X64") + if _MSVC: + py.test.skip("all asmgcroot tests disabled for MSVC") from rpython.config.translationoption import get_combined_translation_config config = get_combined_translation_config(translating=True) config.translation.gc = cls.gcpolicy diff --git a/rpython/translator/c/test/test_genc.py b/rpython/translator/c/test/test_genc.py --- a/rpython/translator/c/test/test_genc.py +++ b/rpython/translator/c/test/test_genc.py @@ -37,7 +37,7 @@ if isinstance(v, float): from rpython.rlib.rfloat import formatd, DTSF_ADD_DOT_0 return formatd(v, 'r', 0, DTSF_ADD_DOT_0) - return v + return str(v) # always return a string, to get consistent types def parse_longlong(a): p0, p1 = a.split(":") @@ -205,6 +205,28 @@ py.test.raises(Exception, f1, "world") # check that it's really typed +def test_int_becomes_float(): + # used to crash "very often": the long chain of mangle() calls end + # up converting the return value of f() from an int to a float, but + # if blocks are followed in random order by the annotator, it will + # very likely first follow the call to llrepr_out() done after the + # call to f(), getting an int first (and a float only later). + @specialize.arg(1) + def mangle(x, chain): + if chain: + return mangle(x, chain[1:]) + return x - 0.5 + def f(x): + if x > 10: + x = mangle(x, (1,1,1,1,1,1,1,1,1,1)) + return x + 1 + + f1 = compile(f, [int]) + + assert f1(5) == 6 + assert f1(12) == 12.5 + + def test_string_arg(): def f(s): total = 0 diff --git a/rpython/translator/platform/openbsd.py b/rpython/translator/platform/openbsd.py --- a/rpython/translator/platform/openbsd.py +++ b/rpython/translator/platform/openbsd.py @@ -8,8 +8,9 @@ DEFAULT_CC = "cc" name = "openbsd" - link_flags = os.environ.get("LDFLAGS", '-pthread').split() - cflags = os.environ.get("CFLAGS", "-O3 -pthread -fomit-frame-pointer -D_BSD_SOURCE").split() + link_flags = os.environ.get("LDFLAGS", "").split() + ['-pthread'] + cflags = ['-O3', '-pthread', '-fomit-frame-pointer', '-D_BSD_SOURCE' + ] + os.environ.get("CFLAGS", "").split() def _libs(self, libraries): libraries=set(libraries + ("intl", "iconv", "compat")) diff --git a/rpython/translator/platform/windows.py b/rpython/translator/platform/windows.py --- a/rpython/translator/platform/windows.py +++ b/rpython/translator/platform/windows.py @@ -28,6 +28,8 @@ return _get_compiler_type(cc, False) def Windows_x64(cc=None): + raise Exception("Win64 is not supported. You must either build for Win32" + " or contribute the missing support in PyPy.") return _get_compiler_type(cc, True) def _get_msvc_env(vsver, x64flag): From noreply at buildbot.pypy.org Tue Jun 4 10:37:55 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 4 Jun 2013 10:37:55 +0200 (CEST) Subject: [pypy-commit] stmgc default: Mention a trade-off to explore Message-ID: <20130604083755.BA70A1C1527@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r63:20644865dbf8 Date: 2013-06-04 10:37 +0200 http://bitbucket.org/pypy/stmgc/changeset/20644865dbf8/ Log: Mention a trade-off to explore diff --git a/c3/doc-objects.txt b/c3/doc-objects.txt --- a/c3/doc-objects.txt +++ b/c3/doc-objects.txt @@ -126,6 +126,12 @@ add P to 'read_barrier_cache' and return +It is unclear if it's better to stick all private objects into the +read_barrier_cache, or to extend the read barrier's inline code to also +check if h_revision == PRN (trade-off of polluting the cache with +private objects which might be a majority, vs. making the inline check +larger). + Handles are stored for example in a global list, and the actual handle encodes an index in the list. Every entry in the list is a pointer to a prot/priv object --- excepted once every N positions, where it is a From noreply at buildbot.pypy.org Tue Jun 4 11:19:23 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 4 Jun 2013 11:19:23 +0200 (CEST) Subject: [pypy-commit] pypy default: Merged in kostialopuhin/pypy/ctypes-byref (pull request #152) Message-ID: <20130604091923.E8ADF1C12FE@cobra.cs.uni-duesseldorf.de> Author: arigo Branch: Changeset: r64748:aae5e8afa929 Date: 2013-06-04 11:18 +0200 http://bitbucket.org/pypy/pypy/changeset/aae5e8afa929/ Log: Merged in kostialopuhin/pypy/ctypes-byref (pull request #152) Fix for getting ctypes.byref contents diff --git a/lib_pypy/_ctypes/pointer.py b/lib_pypy/_ctypes/pointer.py --- a/lib_pypy/_ctypes/pointer.py +++ b/lib_pypy/_ctypes/pointer.py @@ -120,6 +120,7 @@ return self._buffer[0] != 0 contents = property(getcontents, setcontents) + _obj = property(getcontents) # byref interface def _as_ffi_pointer_(self, ffitype): return as_ffi_pointer(self, ffitype) diff --git a/pypy/module/test_lib_pypy/ctypes_tests/test_pointers.py b/pypy/module/test_lib_pypy/ctypes_tests/test_pointers.py --- a/pypy/module/test_lib_pypy/ctypes_tests/test_pointers.py +++ b/pypy/module/test_lib_pypy/ctypes_tests/test_pointers.py @@ -108,6 +108,13 @@ py.test.raises(TypeError, delitem, p, 0) + def test_byref(self): + for ct, pt in zip(ctype_types, python_types): + i = ct(42) + p = byref(i) + assert type(p._obj) is ct + assert p._obj.value == 42 + def test_pointer_to_pointer(self): x = c_int(32) y = c_int(42) From noreply at buildbot.pypy.org Tue Jun 4 11:19:22 2013 From: noreply at buildbot.pypy.org (kostialopuhin) Date: Tue, 4 Jun 2013 11:19:22 +0200 (CEST) Subject: [pypy-commit] pypy ctypes-byref: test and fix for getting ctypes.byref contents Message-ID: <20130604091922.9B7071C0651@cobra.cs.uni-duesseldorf.de> Author: Konstantin Lopuhin Branch: ctypes-byref Changeset: r64747:3404eb318128 Date: 2013-05-20 00:23 +0400 http://bitbucket.org/pypy/pypy/changeset/3404eb318128/ Log: test and fix for getting ctypes.byref contents diff --git a/lib_pypy/_ctypes/pointer.py b/lib_pypy/_ctypes/pointer.py --- a/lib_pypy/_ctypes/pointer.py +++ b/lib_pypy/_ctypes/pointer.py @@ -120,6 +120,7 @@ return self._buffer[0] != 0 contents = property(getcontents, setcontents) + _obj = property(getcontents) # byref interface def _as_ffi_pointer_(self, ffitype): return as_ffi_pointer(self, ffitype) diff --git a/pypy/module/test_lib_pypy/ctypes_tests/test_pointers.py b/pypy/module/test_lib_pypy/ctypes_tests/test_pointers.py --- a/pypy/module/test_lib_pypy/ctypes_tests/test_pointers.py +++ b/pypy/module/test_lib_pypy/ctypes_tests/test_pointers.py @@ -108,6 +108,13 @@ py.test.raises(TypeError, delitem, p, 0) + def test_byref(self): + for ct, pt in zip(ctype_types, python_types): + i = ct(42) + p = byref(i) + assert type(p._obj) is ct + assert p._obj.value == 42 + def test_pointer_to_pointer(self): x = c_int(32) y = c_int(42) From noreply at buildbot.pypy.org Tue Jun 4 11:58:32 2013 From: noreply at buildbot.pypy.org (waedt) Date: Tue, 4 Jun 2013 11:58:32 +0200 (CEST) Subject: [pypy-commit] pypy default: Implement skipping asserts when -O option is set Message-ID: <20130604095832.82A6C1C1527@cobra.cs.uni-duesseldorf.de> Author: Tyler Wade Branch: Changeset: r64750:5e679b2d667e Date: 2013-04-28 04:23 -0500 http://bitbucket.org/pypy/pypy/changeset/5e679b2d667e/ Log: Implement skipping asserts when -O option is set diff --git a/lib-python/2.7/opcode.py b/lib-python/2.7/opcode.py --- a/lib-python/2.7/opcode.py +++ b/lib-python/2.7/opcode.py @@ -193,5 +193,6 @@ hasname.append(201) def_op('CALL_METHOD', 202) # #args not including 'self' def_op('BUILD_LIST_FROM_ARG', 203) +jabs_op('JUMP_IF_NOT_DEBUG', 204) # Target address del def_op, name_op, jrel_op, jabs_op diff --git a/pypy/bin/pyinteractive.py b/pypy/bin/pyinteractive.py --- a/pypy/bin/pyinteractive.py +++ b/pypy/bin/pyinteractive.py @@ -27,7 +27,7 @@ BoolOption("completer", "use readline commandline completer", default=False, cmdline="-C"), BoolOption("optimize", - "remove docstrings when importing modules (like CPython -OO)", + "skip assert statements and remove docstrings when importing modules", default=False, cmdline="-O"), BoolOption("no_site_import", "do not 'import site' on initialization", default=False, cmdline="-S"), @@ -91,12 +91,13 @@ space = option.make_objspace(config) if interactiveconfig.optimize: - flags = space.sys.get('flags').getitems_copy() + flags_w = space.sys.get('flags').getitems_copy() #change optimize flag's value - flags[6] = space.wrap(2) - flags = type(space.sys.get('flags'))(flags) - flags.user_setup(space, space.sys.get('flags').w__class__) - space.sys.w_dict.setitem(space.wrap('flags'), flags) + import pdb; pdb.set_trace() + flags_w[6] = space.wrap(2) + w_flags = type(space.sys.get('flags'))(flags_w) + w_flags.user_setup(space, space.sys.get('flags').w__class__) + space.sys.w_dict.setitem(space.wrap('flags'), w_flags) space._starttime = starttime space.setitem(space.sys.w_dict, space.wrap('executable'), diff --git a/pypy/doc/man/pypy.1.rst b/pypy/doc/man/pypy.1.rst --- a/pypy/doc/man/pypy.1.rst +++ b/pypy/doc/man/pypy.1.rst @@ -16,10 +16,10 @@ Inspect interactively after running script. -O - Dummy optimization flag for compatibility with C Python. + Skip assert statements. -OO - Remove docstrings when importing modules (like CPython -OO). + Remove docstrings when importing modules in addition to -O. -c *cmd* Program passed in as CMD (terminates option list). diff --git a/pypy/interpreter/app_main.py b/pypy/interpreter/app_main.py --- a/pypy/interpreter/app_main.py +++ b/pypy/interpreter/app_main.py @@ -4,8 +4,8 @@ """ options: -i inspect interactively after running script - -O dummy optimization flag for compatibility with C Python - -OO remove docstrings when importing modules (like CPython -OO) + -O skip assert statements + -OO remove docstrings when importing modules in addition to -O -c cmd program passed in as CMD (terminates option list) -S do not 'import site' on initialization -u unbuffered binary stdout and stderr diff --git a/pypy/interpreter/astcompiler/assemble.py b/pypy/interpreter/astcompiler/assemble.py --- a/pypy/interpreter/astcompiler/assemble.py +++ b/pypy/interpreter/astcompiler/assemble.py @@ -634,6 +634,7 @@ ops.JUMP_IF_FALSE_OR_POP : 0, ops.POP_JUMP_IF_TRUE : -1, ops.POP_JUMP_IF_FALSE : -1, + ops.JUMP_IF_NOT_DEBUG : 0, ops.BUILD_LIST_FROM_ARG: 1, } diff --git a/pypy/interpreter/astcompiler/codegen.py b/pypy/interpreter/astcompiler/codegen.py --- a/pypy/interpreter/astcompiler/codegen.py +++ b/pypy/interpreter/astcompiler/codegen.py @@ -373,6 +373,7 @@ def visit_Assert(self, asrt): self.update_position(asrt.lineno) end = self.new_block() + self.emit_jump(ops.JUMP_IF_NOT_DEBUG, end, True) asrt.test.accept_jump_if(self, True, end) self.emit_op_name(ops.LOAD_GLOBAL, self.names, "AssertionError") if asrt.msg: diff --git a/pypy/interpreter/astcompiler/test/test_compiler.py b/pypy/interpreter/astcompiler/test/test_compiler.py --- a/pypy/interpreter/astcompiler/test/test_compiler.py +++ b/pypy/interpreter/astcompiler/test/test_compiler.py @@ -844,6 +844,23 @@ yield self.check, dict_w, "C4.__doc__", 'docstring' yield self.check, dict_w, "__doc__", None + def test_assert_skipping(self): + space = self.space + source = """if 1: + assert False + """ + w_saved_flags = space.sys.get('flags') + flags_w = space.sys.get('flags').getitems_copy() + flags_w[6] = space.wrap(1) + w_flags = type(space.sys.get('flags'))(flags_w) + w_flags.user_setup(space, w_saved_flags.w__class__) + space.sys.w_dict.setitem(space.wrap('flags'), w_flags) + try: + self.run(source) + finally: + space.sys.w_dict.setitem(space.wrap('flags'), w_saved_flags) + + class AppTestCompiler: def test_docstring_not_loaded(self): diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -876,6 +876,15 @@ self.popvalue() return next_instr + def JUMP_IF_NOT_DEBUG(self, target, next_instr): + try: + optimize = self.space.sys.get_flag('optimize') + except: + optimize = 0 + if optimize >= 1: + return target + return next_instr + def GET_ITER(self, oparg, next_instr): w_iterable = self.popvalue() w_iterator = self.space.iter(w_iterable) diff --git a/rpython/flowspace/flowcontext.py b/rpython/flowspace/flowcontext.py --- a/rpython/flowspace/flowcontext.py +++ b/rpython/flowspace/flowcontext.py @@ -800,6 +800,9 @@ self.popvalue() return next_instr + def JUMP_IF_NOT_DEBUG(self, target, next_instr): + return next_instr + def GET_ITER(self, oparg, next_instr): w_iterable = self.popvalue() w_iterator = self.space.iter(w_iterable) From noreply at buildbot.pypy.org Tue Jun 4 11:58:31 2013 From: noreply at buildbot.pypy.org (waedt) Date: Tue, 4 Jun 2013 11:58:31 +0200 (CEST) Subject: [pypy-commit] pypy default: Add support for -OO flag Message-ID: <20130604095831.0F9FB1C016D@cobra.cs.uni-duesseldorf.de> Author: Tyler Wade Branch: Changeset: r64749:711d2a861680 Date: 2013-04-26 17:31 -0500 http://bitbucket.org/pypy/pypy/changeset/711d2a861680/ Log: Add support for -OO flag diff --git a/lib_pypy/_pypy_irc_topic.py b/lib_pypy/_pypy_irc_topic.py --- a/lib_pypy/_pypy_irc_topic.py +++ b/lib_pypy/_pypy_irc_topic.py @@ -1,4 +1,4 @@ -"""eclguba: flagnk naq frznagvpf bs clguba, fcrrq bs p, erfgevpgvbaf bs wnin naq pbzcvyre reebe zrffntrf nf crargenoyr nf ZHZCF +__doc__ = """eclguba: flagnk naq frznagvpf bs clguba, fcrrq bs p, erfgevpgvbaf bs wnin naq pbzcvyre reebe zrffntrf nf crargenoyr nf ZHZCF pglcrf unf n fcva bs 1/3 ' ' vf n fcnpr gbb Clguba 2.k rfg cerfdhr zbeg, ivir Clguba! diff --git a/pypy/bin/pyinteractive.py b/pypy/bin/pyinteractive.py --- a/pypy/bin/pyinteractive.py +++ b/pypy/bin/pyinteractive.py @@ -27,7 +27,7 @@ BoolOption("completer", "use readline commandline completer", default=False, cmdline="-C"), BoolOption("optimize", - "dummy optimization flag for compatibility with CPython", + "remove docstrings when importing modules (like CPython -OO)", default=False, cmdline="-O"), BoolOption("no_site_import", "do not 'import site' on initialization", default=False, cmdline="-S"), @@ -90,6 +90,14 @@ space = option.make_objspace(config) + if interactiveconfig.optimize: + flags = space.sys.get('flags').getitems_copy() + #change optimize flag's value + flags[6] = space.wrap(2) + flags = type(space.sys.get('flags'))(flags) + flags.user_setup(space, space.sys.get('flags').w__class__) + space.sys.w_dict.setitem(space.wrap('flags'), flags) + space._starttime = starttime space.setitem(space.sys.w_dict, space.wrap('executable'), space.wrap(argv[0])) diff --git a/pypy/doc/man/pypy.1.rst b/pypy/doc/man/pypy.1.rst --- a/pypy/doc/man/pypy.1.rst +++ b/pypy/doc/man/pypy.1.rst @@ -18,6 +18,9 @@ -O Dummy optimization flag for compatibility with C Python. +-OO + Remove docstrings when importing modules (like CPython -OO). + -c *cmd* Program passed in as CMD (terminates option list). diff --git a/pypy/interpreter/app_main.py b/pypy/interpreter/app_main.py --- a/pypy/interpreter/app_main.py +++ b/pypy/interpreter/app_main.py @@ -5,6 +5,7 @@ options: -i inspect interactively after running script -O dummy optimization flag for compatibility with C Python + -OO remove docstrings when importing modules (like CPython -OO) -c cmd program passed in as CMD (terminates option list) -S do not 'import site' on initialization -u unbuffered binary stdout and stderr diff --git a/pypy/interpreter/astcompiler/assemble.py b/pypy/interpreter/astcompiler/assemble.py --- a/pypy/interpreter/astcompiler/assemble.py +++ b/pypy/interpreter/astcompiler/assemble.py @@ -245,6 +245,8 @@ if w_len is None: w_len = space.len(self.w_consts) space.setitem(self.w_consts, w_key, w_len) + if space.int_w(w_len) == 0: + self.scope.doc_removable = False return space.int_w(w_len) def _make_key(self, obj): diff --git a/pypy/interpreter/astcompiler/codegen.py b/pypy/interpreter/astcompiler/codegen.py --- a/pypy/interpreter/astcompiler/codegen.py +++ b/pypy/interpreter/astcompiler/codegen.py @@ -255,6 +255,7 @@ start = 1 doc_expr.walkabout(self) self.name_op("__doc__", ast.Store) + self.scope.doc_removable = True for i in range(start, len(body)): body[i].walkabout(self) return True @@ -1189,7 +1190,10 @@ tree.walkabout(self) def _get_code_flags(self): - return 0 + flags = 0 + if self.scope.doc_removable: + flags |= consts.CO_KILL_DOCSTRING + return flags class AbstractFunctionCodeGenerator(PythonCodeGenerator): @@ -1216,6 +1220,8 @@ flags |= consts.CO_VARARGS if scope.has_keywords_arg: flags |= consts.CO_VARKEYWORDS + if scope.doc_removable: + flags |= consts.CO_KILL_DOCSTRING if not self.cell_vars and not self.free_vars: flags |= consts.CO_NOFREE return PythonCodeGenerator._get_code_flags(self) | flags @@ -1232,6 +1238,7 @@ doc_expr = None if doc_expr is not None: self.add_const(doc_expr.s) + self.scope.doc_removable = True start = 1 else: self.add_const(self.space.w_None) @@ -1294,3 +1301,9 @@ self._handle_body(cls.body) self.emit_op(ops.LOAD_LOCALS) self.emit_op(ops.RETURN_VALUE) + + def _get_code_flags(self): + flags = 0 + if self.scope.doc_removable: + flags |= consts.CO_KILL_DOCSTRING + return PythonCodeGenerator._get_code_flags(self) | flags diff --git a/pypy/interpreter/astcompiler/consts.py b/pypy/interpreter/astcompiler/consts.py --- a/pypy/interpreter/astcompiler/consts.py +++ b/pypy/interpreter/astcompiler/consts.py @@ -15,6 +15,8 @@ CO_FUTURE_WITH_STATEMENT = 0x8000 CO_FUTURE_PRINT_FUNCTION = 0x10000 CO_FUTURE_UNICODE_LITERALS = 0x20000 +#pypy specific: +CO_KILL_DOCSTRING = 0x100000 PyCF_SOURCE_IS_UTF8 = 0x0100 PyCF_DONT_IMPLY_DEDENT = 0x0200 diff --git a/pypy/interpreter/astcompiler/symtable.py b/pypy/interpreter/astcompiler/symtable.py --- a/pypy/interpreter/astcompiler/symtable.py +++ b/pypy/interpreter/astcompiler/symtable.py @@ -42,6 +42,7 @@ self.has_free = False self.child_has_free = False self.nested = False + self.doc_removable = False def lookup(self, name): """Find the scope of identifier 'name'.""" diff --git a/pypy/interpreter/astcompiler/test/test_compiler.py b/pypy/interpreter/astcompiler/test/test_compiler.py --- a/pypy/interpreter/astcompiler/test/test_compiler.py +++ b/pypy/interpreter/astcompiler/test/test_compiler.py @@ -812,6 +812,37 @@ """ self.simple_test(source, 'ok', 1) + def test_remove_docstring(self): + source = '"module_docstring"\n' + """if 1: + def f1(): + 'docstring' + def f2(): + 'docstring' + return 'docstring' + class C1(): + 'docstring' + class C2(): + __doc__ = 'docstring' + class C3(): + field = 'not docstring' + class C4(): + 'docstring' + field = 'docstring' + """ + code_w = compile_with_astcompiler(source, 'exec', self.space) + code_w.remove_docstrings(self.space) + dict_w = self.space.newdict(); + code_w.exec_code(self.space, dict_w, dict_w) + + yield self.check, dict_w, "f1.__doc__", None + yield self.check, dict_w, "f2.__doc__", 'docstring' + yield self.check, dict_w, "C1.__doc__", None + yield self.check, dict_w, "C2.__doc__", 'docstring' + yield self.check, dict_w, "C3.field", 'not docstring' + yield self.check, dict_w, "C4.field", 'docstring' + yield self.check, dict_w, "C4.__doc__", 'docstring' + yield self.check, dict_w, "C4.__doc__", 'docstring' + yield self.check, dict_w, "__doc__", None class AppTestCompiler: diff --git a/pypy/interpreter/pycode.py b/pypy/interpreter/pycode.py --- a/pypy/interpreter/pycode.py +++ b/pypy/interpreter/pycode.py @@ -12,7 +12,7 @@ from pypy.interpreter.gateway import unwrap_spec from pypy.interpreter.astcompiler.consts import ( CO_OPTIMIZED, CO_NEWLOCALS, CO_VARARGS, CO_VARKEYWORDS, CO_NESTED, - CO_GENERATOR) + CO_GENERATOR, CO_KILL_DOCSTRING) from pypy.tool.stdlib_opcode import opcodedesc, HAVE_ARGUMENT from rpython.rlib.rarithmetic import intmask from rpython.rlib.objectmodel import compute_hash @@ -218,6 +218,13 @@ return w_first return space.w_None + def remove_docstrings(self, space): + if self.co_flags & CO_KILL_DOCSTRING: + self.co_consts_w[0] = space.w_None + for co_w in self.co_consts_w: + if isinstance(co_w, PyCode): + co_w.remove_docstrings(space) + def _to_code(self): """For debugging only.""" consts = [None] * len(self.co_consts_w) diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -913,6 +913,13 @@ if not space.is_true(space.sys.get('dont_write_bytecode')): write_compiled_module(space, code_w, cpathname, mode, mtime) + try: + optimize = space.sys.get_flag('optimize') + except: + optimize = 0 + if optimize >= 2: + code_w.remove_docstrings(space) + update_code_filenames(space, code_w, pathname) exec_code_module(space, w_mod, code_w) @@ -1007,6 +1014,13 @@ "Bad magic number in %s", cpathname) #print "loading pyc file:", cpathname code_w = read_compiled_module(space, cpathname, source) + try: + optimize = space.sys.get_flag('optimize') + except: + optimize = 0 + if optimize >= 2: + code_w.remove_docstrings(space) + exec_code_module(space, w_mod, code_w) return w_mod From noreply at buildbot.pypy.org Tue Jun 4 11:58:33 2013 From: noreply at buildbot.pypy.org (waedt) Date: Tue, 4 Jun 2013 11:58:33 +0200 (CEST) Subject: [pypy-commit] pypy default: Make changing sys.flag.optimize in pyinteractive.py cleaner Message-ID: <20130604095833.CB7351C1528@cobra.cs.uni-duesseldorf.de> Author: Tyler Wade Branch: Changeset: r64751:bcac3e99fa6f Date: 2013-04-28 04:55 -0500 http://bitbucket.org/pypy/pypy/changeset/bcac3e99fa6f/ Log: Make changing sys.flag.optimize in pyinteractive.py cleaner diff --git a/pypy/bin/pyinteractive.py b/pypy/bin/pyinteractive.py --- a/pypy/bin/pyinteractive.py +++ b/pypy/bin/pyinteractive.py @@ -90,19 +90,19 @@ space = option.make_objspace(config) - if interactiveconfig.optimize: - flags_w = space.sys.get('flags').getitems_copy() - #change optimize flag's value - import pdb; pdb.set_trace() - flags_w[6] = space.wrap(2) - w_flags = type(space.sys.get('flags'))(flags_w) - w_flags.user_setup(space, space.sys.get('flags').w__class__) - space.sys.w_dict.setitem(space.wrap('flags'), w_flags) - space._starttime = starttime space.setitem(space.sys.w_dict, space.wrap('executable'), space.wrap(argv[0])) + if interactiveconfig.optimize: + flags_w = space.sys.get('flags').getitems_copy() + #change the optimize flag's value + flags_w[6] = space.wrap(2) + space.appexec([space.wrap(flags_w)], """(flags): + import sys + sys.flags = type(sys.flags)(flags) + """) + # call pypy_find_stdlib: the side-effect is that it sets sys.prefix and # sys.exec_prefix executable = argv[0] diff --git a/pypy/interpreter/astcompiler/test/test_compiler.py b/pypy/interpreter/astcompiler/test/test_compiler.py --- a/pypy/interpreter/astcompiler/test/test_compiler.py +++ b/pypy/interpreter/astcompiler/test/test_compiler.py @@ -852,9 +852,11 @@ w_saved_flags = space.sys.get('flags') flags_w = space.sys.get('flags').getitems_copy() flags_w[6] = space.wrap(1) - w_flags = type(space.sys.get('flags'))(flags_w) - w_flags.user_setup(space, w_saved_flags.w__class__) - space.sys.w_dict.setitem(space.wrap('flags'), w_flags) + space.appexec([space.wrap(flags_w)], """(flags): + import sys + sys.flags = type(sys.flags)(flags) + """) + try: self.run(source) finally: From noreply at buildbot.pypy.org Tue Jun 4 11:58:35 2013 From: noreply at buildbot.pypy.org (waedt) Date: Tue, 4 Jun 2013 11:58:35 +0200 (CEST) Subject: [pypy-commit] pypy default: Move sys.flags changing to applevel code Message-ID: <20130604095835.1E2601C016D@cobra.cs.uni-duesseldorf.de> Author: Tyler Wade Branch: Changeset: r64752:6deea9fd59b6 Date: 2013-04-28 15:21 -0500 http://bitbucket.org/pypy/pypy/changeset/6deea9fd59b6/ Log: Move sys.flags changing to applevel code diff --git a/pypy/bin/pyinteractive.py b/pypy/bin/pyinteractive.py --- a/pypy/bin/pyinteractive.py +++ b/pypy/bin/pyinteractive.py @@ -95,11 +95,11 @@ space.wrap(argv[0])) if interactiveconfig.optimize: - flags_w = space.sys.get('flags').getitems_copy() #change the optimize flag's value - flags_w[6] = space.wrap(2) - space.appexec([space.wrap(flags_w)], """(flags): + space.appexec([], """(): import sys + flags = list(sys.flags) + flags[6] = 2 sys.flags = type(sys.flags)(flags) """) diff --git a/pypy/interpreter/astcompiler/test/test_compiler.py b/pypy/interpreter/astcompiler/test/test_compiler.py --- a/pypy/interpreter/astcompiler/test/test_compiler.py +++ b/pypy/interpreter/astcompiler/test/test_compiler.py @@ -850,10 +850,10 @@ assert False """ w_saved_flags = space.sys.get('flags') - flags_w = space.sys.get('flags').getitems_copy() - flags_w[6] = space.wrap(1) - space.appexec([space.wrap(flags_w)], """(flags): + space.appexec([], """(): import sys + flags = list(sys.flags) + flags[6] = 2 sys.flags = type(sys.flags)(flags) """) From noreply at buildbot.pypy.org Tue Jun 4 11:58:37 2013 From: noreply at buildbot.pypy.org (waedt) Date: Tue, 4 Jun 2013 11:58:37 +0200 (CEST) Subject: [pypy-commit] pypy default: Make -O set __debug__ to False and change JUMP_IF_NOT_DEBUG to a relative jump Message-ID: <20130604095837.016B11C016D@cobra.cs.uni-duesseldorf.de> Author: Tyler Wade Branch: Changeset: r64753:a000ef4ae952 Date: 2013-04-29 00:57 -0500 http://bitbucket.org/pypy/pypy/changeset/a000ef4ae952/ Log: Make -O set __debug__ to False and change JUMP_IF_NOT_DEBUG to a relative jump diff --git a/lib-python/2.7/opcode.py b/lib-python/2.7/opcode.py --- a/lib-python/2.7/opcode.py +++ b/lib-python/2.7/opcode.py @@ -193,6 +193,6 @@ hasname.append(201) def_op('CALL_METHOD', 202) # #args not including 'self' def_op('BUILD_LIST_FROM_ARG', 203) -jabs_op('JUMP_IF_NOT_DEBUG', 204) # Target address +jrel_op('JUMP_IF_NOT_DEBUG', 204) # Target address del def_op, name_op, jrel_op, jabs_op diff --git a/pypy/bin/pyinteractive.py b/pypy/bin/pyinteractive.py --- a/pypy/bin/pyinteractive.py +++ b/pypy/bin/pyinteractive.py @@ -95,12 +95,14 @@ space.wrap(argv[0])) if interactiveconfig.optimize: - #change the optimize flag's value + #change the optimize flag's value and set __debug__ to False space.appexec([], """(): import sys flags = list(sys.flags) flags[6] = 2 sys.flags = type(sys.flags)(flags) + import __builtin__ + setattr(__builtin__, '__debug__', False) """) # call pypy_find_stdlib: the side-effect is that it sets sys.prefix and diff --git a/pypy/interpreter/app_main.py b/pypy/interpreter/app_main.py --- a/pypy/interpreter/app_main.py +++ b/pypy/interpreter/app_main.py @@ -450,6 +450,10 @@ sys.py3kwarning = bool(sys.flags.py3k_warning) sys.dont_write_bytecode = bool(sys.flags.dont_write_bytecode) + if sys.flags.optimize >= 1: + import __builtin__ + setattr(__builtin__, '__debug__', False) + if sys.py3kwarning: print >> sys.stderr, ( "Warning: pypy does not implement py3k warnings") From noreply at buildbot.pypy.org Tue Jun 4 11:58:38 2013 From: noreply at buildbot.pypy.org (waedt) Date: Tue, 4 Jun 2013 11:58:38 +0200 (CEST) Subject: [pypy-commit] pypy default: Add caching for the debug flag for skipping asserts Message-ID: <20130604095838.2CB8A1C016D@cobra.cs.uni-duesseldorf.de> Author: Tyler Wade Branch: Changeset: r64754:c8d597cf7e85 Date: 2013-04-29 04:29 -0500 http://bitbucket.org/pypy/pypy/changeset/c8d597cf7e85/ Log: Add caching for the debug flag for skipping asserts diff --git a/pypy/bin/pyinteractive.py b/pypy/bin/pyinteractive.py --- a/pypy/bin/pyinteractive.py +++ b/pypy/bin/pyinteractive.py @@ -101,8 +101,8 @@ flags = list(sys.flags) flags[6] = 2 sys.flags = type(sys.flags)(flags) - import __builtin__ - setattr(__builtin__, '__debug__', False) + import __pypy__ + __pypy__.set_debug(False) """) # call pypy_find_stdlib: the side-effect is that it sets sys.prefix and diff --git a/pypy/interpreter/app_main.py b/pypy/interpreter/app_main.py --- a/pypy/interpreter/app_main.py +++ b/pypy/interpreter/app_main.py @@ -451,8 +451,8 @@ sys.dont_write_bytecode = bool(sys.flags.dont_write_bytecode) if sys.flags.optimize >= 1: - import __builtin__ - setattr(__builtin__, '__debug__', False) + import __pypy__ + __pypy__.set_debug(False) if sys.py3kwarning: print >> sys.stderr, ( diff --git a/pypy/interpreter/astcompiler/test/test_compiler.py b/pypy/interpreter/astcompiler/test/test_compiler.py --- a/pypy/interpreter/astcompiler/test/test_compiler.py +++ b/pypy/interpreter/astcompiler/test/test_compiler.py @@ -846,21 +846,17 @@ def test_assert_skipping(self): space = self.space + mod = space.getbuiltinmodule('__pypy__') + w_set_debug = space.getattr(mod, space.wrap('set_debug')) + space.call_function(w_set_debug, space.w_False) + source = """if 1: assert False """ - w_saved_flags = space.sys.get('flags') - space.appexec([], """(): - import sys - flags = list(sys.flags) - flags[6] = 2 - sys.flags = type(sys.flags)(flags) - """) - try: self.run(source) finally: - space.sys.w_dict.setitem(space.wrap('flags'), w_saved_flags) + space.call_function(w_set_debug, space.w_True) class AppTestCompiler: diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -877,11 +877,7 @@ return next_instr def JUMP_IF_NOT_DEBUG(self, target, next_instr): - try: - optimize = self.space.sys.get_flag('optimize') - except: - optimize = 0 - if optimize >= 1: + if not self.space.sys.debug: return target return next_instr diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -57,6 +57,7 @@ 'newlist_hint' : 'interp_magic.newlist_hint', 'newdict' : 'interp_dict.newdict', 'dictstrategy' : 'interp_dict.dictstrategy', + 'set_debug' : 'interp_magic.set_debug', } if sys.platform == 'win32': interpleveldefs['get_console_cp'] = 'interp_magic.get_console_cp' diff --git a/pypy/module/__pypy__/interp_magic.py b/pypy/module/__pypy__/interp_magic.py --- a/pypy/module/__pypy__/interp_magic.py +++ b/pypy/module/__pypy__/interp_magic.py @@ -101,3 +101,11 @@ @unwrap_spec(sizehint=int) def newlist_hint(space, sizehint): return space.newlist_hint(sizehint) + + at unwrap_spec(debug=bool) +def set_debug(space, debug): + print debug + space.sys.debug = debug + space.setitem(space.builtin.w_dict, + space.wrap('__debug__'), + space.wrap(debug)) diff --git a/pypy/module/sys/__init__.py b/pypy/module/sys/__init__.py --- a/pypy/module/sys/__init__.py +++ b/pypy/module/sys/__init__.py @@ -7,7 +7,7 @@ class Module(MixedModule): """Sys Builtin Module. """ - _immutable_fields_ = ["defaultencoding?"] + _immutable_fields_ = ["defaultencoding?", "debug?"] def __init__(self, space, w_name): """NOT_RPYTHON""" # because parent __init__ isn't @@ -18,6 +18,7 @@ self.w_default_encoder = None self.defaultencoding = "ascii" self.filesystemencoding = None + self.debug = True interpleveldefs = { '__name__' : '(space.wrap("sys"))', From noreply at buildbot.pypy.org Tue Jun 4 11:58:39 2013 From: noreply at buildbot.pypy.org (waedt) Date: Tue, 4 Jun 2013 11:58:39 +0200 (CEST) Subject: [pypy-commit] pypy default: Code cleanup (stray print, bad variable name, and bare except) Message-ID: <20130604095839.748BC1C016D@cobra.cs.uni-duesseldorf.de> Author: Tyler Wade Branch: Changeset: r64755:83e557521804 Date: 2013-04-29 12:20 -0500 http://bitbucket.org/pypy/pypy/changeset/83e557521804/ Log: Code cleanup (stray print, bad variable name, and bare except) diff --git a/pypy/interpreter/pycode.py b/pypy/interpreter/pycode.py --- a/pypy/interpreter/pycode.py +++ b/pypy/interpreter/pycode.py @@ -221,9 +221,9 @@ def remove_docstrings(self, space): if self.co_flags & CO_KILL_DOCSTRING: self.co_consts_w[0] = space.w_None - for co_w in self.co_consts_w: + for w_co in self.co_consts_w: if isinstance(co_w, PyCode): - co_w.remove_docstrings(space) + w_co.remove_docstrings(space) def _to_code(self): """For debugging only.""" diff --git a/pypy/module/__pypy__/interp_magic.py b/pypy/module/__pypy__/interp_magic.py --- a/pypy/module/__pypy__/interp_magic.py +++ b/pypy/module/__pypy__/interp_magic.py @@ -104,7 +104,6 @@ @unwrap_spec(debug=bool) def set_debug(space, debug): - print debug space.sys.debug = debug space.setitem(space.builtin.w_dict, space.wrap('__debug__'), diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -915,7 +915,7 @@ try: optimize = space.sys.get_flag('optimize') - except: + except Exception: optimize = 0 if optimize >= 2: code_w.remove_docstrings(space) @@ -1016,7 +1016,7 @@ code_w = read_compiled_module(space, cpathname, source) try: optimize = space.sys.get_flag('optimize') - except: + except Exception: optimize = 0 if optimize >= 2: code_w.remove_docstrings(space) From noreply at buildbot.pypy.org Tue Jun 4 11:58:40 2013 From: noreply at buildbot.pypy.org (waedt) Date: Tue, 4 Jun 2013 11:58:40 +0200 (CEST) Subject: [pypy-commit] pypy default: Code cleanup (stray print, bad variable name, and bare except) Message-ID: <20130604095840.B65341C016D@cobra.cs.uni-duesseldorf.de> Author: Tyler Wade Branch: Changeset: r64756:773b31fdbf56 Date: 2013-04-29 12:39 -0500 http://bitbucket.org/pypy/pypy/changeset/773b31fdbf56/ Log: Code cleanup (stray print, bad variable name, and bare except) diff --git a/pypy/interpreter/pycode.py b/pypy/interpreter/pycode.py --- a/pypy/interpreter/pycode.py +++ b/pypy/interpreter/pycode.py @@ -222,7 +222,7 @@ if self.co_flags & CO_KILL_DOCSTRING: self.co_consts_w[0] = space.w_None for w_co in self.co_consts_w: - if isinstance(co_w, PyCode): + if isinstance(w_co, PyCode): w_co.remove_docstrings(space) def _to_code(self): From noreply at buildbot.pypy.org Tue Jun 4 11:58:41 2013 From: noreply at buildbot.pypy.org (waedt) Date: Tue, 4 Jun 2013 11:58:41 +0200 (CEST) Subject: [pypy-commit] pypy default: Add to the remove docstring tests and fix a comment Message-ID: <20130604095841.E84C51C016D@cobra.cs.uni-duesseldorf.de> Author: Tyler Wade Branch: Changeset: r64757:235a08c4b2de Date: 2013-05-09 21:50 -0500 http://bitbucket.org/pypy/pypy/changeset/235a08c4b2de/ Log: Add to the remove docstring tests and fix a comment diff --git a/lib-python/2.7/opcode.py b/lib-python/2.7/opcode.py --- a/lib-python/2.7/opcode.py +++ b/lib-python/2.7/opcode.py @@ -193,6 +193,6 @@ hasname.append(201) def_op('CALL_METHOD', 202) # #args not including 'self' def_op('BUILD_LIST_FROM_ARG', 203) -jrel_op('JUMP_IF_NOT_DEBUG', 204) # Target address +jrel_op('JUMP_IF_NOT_DEBUG', 204) # Distance to target address del def_op, name_op, jrel_op, jabs_op diff --git a/pypy/interpreter/astcompiler/test/test_compiler.py b/pypy/interpreter/astcompiler/test/test_compiler.py --- a/pypy/interpreter/astcompiler/test/test_compiler.py +++ b/pypy/interpreter/astcompiler/test/test_compiler.py @@ -819,6 +819,9 @@ def f2(): 'docstring' return 'docstring' + def f3(): + 'foo' + return 'bar' class C1(): 'docstring' class C2(): @@ -836,6 +839,9 @@ yield self.check, dict_w, "f1.__doc__", None yield self.check, dict_w, "f2.__doc__", 'docstring' + yield self.check, dict_w, "f2()", 'docstring' + yield self.check, dict_w, "f3.__doc__", None + yield self.check, dict_w, "f3()", 'bar' yield self.check, dict_w, "C1.__doc__", None yield self.check, dict_w, "C2.__doc__", 'docstring' yield self.check, dict_w, "C3.field", 'not docstring' From noreply at buildbot.pypy.org Tue Jun 4 11:58:45 2013 From: noreply at buildbot.pypy.org (waedt) Date: Tue, 4 Jun 2013 11:58:45 +0200 (CEST) Subject: [pypy-commit] pypy default: Merge pypy/pypy default Message-ID: <20130604095845.EFBF11C016D@cobra.cs.uni-duesseldorf.de> Author: Tyler Wade Branch: Changeset: r64758:3dda6aa42f0c Date: 2013-05-10 17:23 +0400 http://bitbucket.org/pypy/pypy/changeset/3dda6aa42f0c/ Log: Merge pypy/pypy default diff too long, truncating to 2000 out of 6102 lines diff --git a/lib-python/2.7/distutils/command/install.py b/lib-python/2.7/distutils/command/install.py --- a/lib-python/2.7/distutils/command/install.py +++ b/lib-python/2.7/distutils/command/install.py @@ -474,8 +474,8 @@ def select_scheme (self, name): # it's the caller's problem if they supply a bad name! - if hasattr(sys, 'pypy_version_info') and not ( - name.endswith('_user') or name.endswith('_home')): + if (hasattr(sys, 'pypy_version_info') and + not name.endswith(('_user', '_home'))): name = 'pypy' scheme = INSTALL_SCHEMES[name] for key in SCHEME_KEYS: diff --git a/lib-python/2.7/distutils/sysconfig_pypy.py b/lib-python/2.7/distutils/sysconfig_pypy.py --- a/lib-python/2.7/distutils/sysconfig_pypy.py +++ b/lib-python/2.7/distutils/sysconfig_pypy.py @@ -122,10 +122,10 @@ compiler.compiler_so.extend(['-fPIC', '-Wimplicit']) compiler.shared_lib_extension = get_config_var('SO') if "CFLAGS" in os.environ: - cflags = os.environ["CFLAGS"] - compiler.compiler.append(cflags) - compiler.compiler_so.append(cflags) - compiler.linker_so.append(cflags) + cflags = os.environ["CFLAGS"].split() + compiler.compiler.extend(cflags) + compiler.compiler_so.extend(cflags) + compiler.linker_so.extend(cflags) from sysconfig_cpython import ( diff --git a/lib-python/2.7/pydoc.py b/lib-python/2.7/pydoc.py --- a/lib-python/2.7/pydoc.py +++ b/lib-python/2.7/pydoc.py @@ -1953,7 +1953,11 @@ if key is None: callback(None, modname, '') else: - desc = split(__import__(modname).__doc__ or '', '\n')[0] + try: + module_doc = __import__(modname).__doc__ + except ImportError: + module_doc = None + desc = split(module_doc or '', '\n')[0] if find(lower(modname + ' - ' + desc), key) >= 0: callback(None, modname, desc) diff --git a/lib_pypy/_ctypes/structure.py b/lib_pypy/_ctypes/structure.py --- a/lib_pypy/_ctypes/structure.py +++ b/lib_pypy/_ctypes/structure.py @@ -167,7 +167,6 @@ return if '_fields_' not in self.__dict__: self._fields_ = [] - self._names = [] _set_shape(self, [], self._is_union) __setattr__ = struct_setattr diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py --- a/lib_pypy/cffi/api.py +++ b/lib_pypy/cffi/api.py @@ -370,7 +370,10 @@ if key in ffi._parser._declarations: tp = ffi._parser._declarations[key] BType = ffi._get_cached_btype(tp) - value = backendlib.load_function(BType, name) + try: + value = backendlib.load_function(BType, name) + except KeyError: + raise AttributeError(name) library.__dict__[name] = value return # diff --git a/lib_pypy/ctypes_config_cache/dumpcache.py b/lib_pypy/ctypes_config_cache/dumpcache.py --- a/lib_pypy/ctypes_config_cache/dumpcache.py +++ b/lib_pypy/ctypes_config_cache/dumpcache.py @@ -1,25 +1,21 @@ -import os +import sys, os from ctypes_configure import dumpcache -from rpython.jit.backend import detect_cpu def dumpcache2(basename, config): - model = detect_cpu.autodetect_main_model_and_size() - filename = '_%s_%s_.py' % (basename, model) + size = 32 if sys.maxint <= 2**32 else 64 + filename = '_%s_%s_.py' % (basename, size) dumpcache.dumpcache(__file__, filename, config) # filename = os.path.join(os.path.dirname(__file__), '_%s_cache.py' % (basename,)) g = open(filename, 'w') print >> g, '''\ -try: - from __pypy__ import cpumodel -except ImportError: - from rpython.jit.backend import detect_cpu - cpumodel = detect_cpu.autodetect_main_model_and_size() +import sys +_size = 32 if sys.maxint <= 2**32 else 64 # XXX relative import, should be removed together with # XXX the relative imports done e.g. by lib_pypy/pypy_test/test_hashlib -mod = __import__("_%s_%%s_" %% (cpumodel,), - globals(), locals(), ["*"]) -globals().update(mod.__dict__)\ +_mod = __import__("_%s_%%s_" %% (_size,), + globals(), locals(), ["*"]) +globals().update(_mod.__dict__)\ ''' % (basename,) g.close() diff --git a/lib_pypy/ctypes_config_cache/rebuild.py b/lib_pypy/ctypes_config_cache/rebuild.py --- a/lib_pypy/ctypes_config_cache/rebuild.py +++ b/lib_pypy/ctypes_config_cache/rebuild.py @@ -25,13 +25,12 @@ sys.path[:] = path def try_rebuild(): - from rpython.jit.backend import detect_cpu - model = detect_cpu.autodetect_main_model_and_size() - # remove the files '_*_model_.py' + size = 32 if sys.maxint <= 2**32 else 64 + # remove the files '_*_size_.py' left = {} for p in os.listdir(_dirpath): - if p.startswith('_') and (p.endswith('_%s_.py' % model) or - p.endswith('_%s_.pyc' % model)): + if p.startswith('_') and (p.endswith('_%s_.py' % size) or + p.endswith('_%s_.pyc' % size)): os.unlink(os.path.join(_dirpath, p)) elif p.startswith('_') and (p.endswith('_.py') or p.endswith('_.pyc')): diff --git a/py/_path/local.py b/py/_path/local.py --- a/py/_path/local.py +++ b/py/_path/local.py @@ -655,7 +655,8 @@ mkdtemp = classmethod(mkdtemp) def make_numbered_dir(cls, prefix='session-', rootdir=None, keep=3, - lock_timeout = 172800): # two days + lock_timeout = 172800, # two days + min_timeout = 300): # five minutes """ return unique directory with a number greater than the current maximum one. The number is assumed to start directly after prefix. if keep is true directories with a number less than (maxnum-keep) @@ -723,6 +724,20 @@ for path in rootdir.listdir(): num = parse_num(path) if num is not None and num <= (maxnum - keep): + if min_timeout: + # NB: doing this is needed to prevent (or reduce + # a lot the chance of) the following situation: + # 'keep+1' processes call make_numbered_dir() at + # the same time, they create dirs, but then the + # last process notices the first dir doesn't have + # (yet) a .lock in it and kills it. + try: + t1 = path.lstat().mtime + t2 = lockfile.lstat().mtime + if abs(t2-t1) < min_timeout: + continue # skip directories too recent + except py.error.Error: + continue # failure to get a time, better skip lf = path.join('.lock') try: t1 = lf.lstat().mtime diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -84,13 +84,14 @@ '_multiprocessing': [('objspace.usemodules.rctime', True), ('objspace.usemodules.thread', True)], 'cpyext': [('objspace.usemodules.array', True)], + 'cppyy': [('objspace.usemodules.cpyext', True)], } module_suggests = { # the reason you want _rawffi is for ctypes, which # itself needs the interp-level struct module # because 'P' is missing from the app-level one "_rawffi": [("objspace.usemodules.struct", True)], - "cpyext": [("translation.secondaryentrypoints", "cpyext"), + "cpyext": [("translation.secondaryentrypoints", "cpyext,main"), ("translation.shared", sys.platform == "win32")], } diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py --- a/pypy/doc/conf.py +++ b/pypy/doc/conf.py @@ -47,7 +47,7 @@ # The short X.Y version. version = '2.0' # The full version, including alpha/beta/rc tags. -release = '2.0-beta1' +release = '2.0.0' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/pypy/doc/getting-started-python.rst b/pypy/doc/getting-started-python.rst --- a/pypy/doc/getting-started-python.rst +++ b/pypy/doc/getting-started-python.rst @@ -46,7 +46,7 @@ 2. Install build-time dependencies. On a Debian box these are:: [user at debian-box ~]$ sudo apt-get install \ - gcc make python-dev libffi-dev lib-sqlite3-dev pkg-config \ + gcc make python-dev libffi-dev libsqlite3-dev pkg-config \ libz-dev libbz2-dev libncurses-dev libexpat1-dev \ libssl-dev libgc-dev python-sphinx python-greenlet @@ -105,7 +105,7 @@ $ ./pypy-c Python 2.7.3 (7e4f0faa3d51, Nov 22 2012, 10:35:18) - [PyPy 2.0.0-beta1 with GCC 4.7.1] on linux2 + [PyPy 2.0.0 with GCC 4.7.1] on linux2 Type "help", "copyright", "credits" or "license" for more information. And now for something completely different: ``RPython magically makes you rich and famous (says so on the tin)'' @@ -235,7 +235,7 @@ the ``bin/pypy`` executable. To install PyPy system wide on unix-like systems, it is recommended to put the -whole hierarchy alone (e.g. in ``/opt/pypy2.0-beta1``) and put a symlink to the +whole hierarchy alone (e.g. in ``/opt/pypy2.0``) and put a symlink to the ``pypy`` executable into ``/usr/bin`` or ``/usr/local/bin`` If the executable fails to find suitable libraries, it will report diff --git a/pypy/doc/getting-started.rst b/pypy/doc/getting-started.rst --- a/pypy/doc/getting-started.rst +++ b/pypy/doc/getting-started.rst @@ -53,10 +53,10 @@ PyPy is ready to be executed as soon as you unpack the tarball or the zip file, with no need to install it in any specific location:: - $ tar xf pypy-2.0-beta1-linux.tar.bz2 - $ ./pypy-2.0-beta1/bin/pypy + $ tar xf pypy-2.0.tar.bz2 + $ ./pypy-2.0/bin/pypy Python 2.7.3 (7e4f0faa3d51, Nov 22 2012, 10:35:18) - [PyPy 2.0.0-beta1 with GCC 4.7.1] on linux2 + [PyPy 2.0.0 with GCC 4.7.1] on linux2 Type "help", "copyright", "credits" or "license" for more information. And now for something completely different: ``PyPy is an exciting technology that lets you to write fast, portable, multi-platform interpreters with less @@ -75,14 +75,14 @@ $ curl -O https://raw.github.com/pypa/pip/master/contrib/get-pip.py - $ ./pypy-2.0-beta1/bin/pypy distribute_setup.py + $ ./pypy-2.0/bin/pypy distribute_setup.py - $ ./pypy-2.0-beta1/bin/pypy get-pip.py + $ ./pypy-2.0/bin/pypy get-pip.py - $ ./pypy-2.0-beta1/bin/pip install pygments # for example + $ ./pypy-2.0/bin/pip install pygments # for example -3rd party libraries will be installed in ``pypy-2.0-beta1/site-packages``, and -the scripts in ``pypy-2.0-beta1/bin``. +3rd party libraries will be installed in ``pypy-2.0/site-packages``, and +the scripts in ``pypy-2.0/bin``. Installing using virtualenv --------------------------- diff --git a/pypy/doc/index.rst b/pypy/doc/index.rst --- a/pypy/doc/index.rst +++ b/pypy/doc/index.rst @@ -40,7 +40,7 @@ * `FAQ`_: some frequently asked questions. -* `Release 2.0 beta 2`_: the latest official release +* `Release 2.0`_: the latest official release * `PyPy Blog`_: news and status info about PyPy @@ -110,7 +110,7 @@ .. _`Getting Started`: getting-started.html .. _`Papers`: extradoc.html .. _`Videos`: video-index.html -.. _`Release 2.0 beta 2`: http://pypy.org/download.html +.. _`Release 2.0`: http://pypy.org/download.html .. _`speed.pypy.org`: http://speed.pypy.org .. _`RPython toolchain`: translation.html .. _`potential project ideas`: project-ideas.html diff --git a/pypy/doc/release-2.0.0.rst b/pypy/doc/release-2.0.0.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-2.0.0.rst @@ -0,0 +1,71 @@ +============================ +PyPy 2.0 - Einstein Sandwich +============================ + +We're pleased to announce PyPy 2.0. This is a stable release that brings +a swath of bugfixes, small performance improvements and compatibility fixes. +PyPy 2.0 is a big step for us and we hope in the future we'll be able to +provide stable releases more often. + +You can download the PyPy 2.0 release here: + + http://pypy.org/download.html + +The two biggest changes since PyPy 1.9 are: + +* stackless is now supported including greenlets, which means eventlet + and gevent should work (but read below about gevent) + +* PyPy now contains release 0.6 of `cffi`_ as a builtin module, which + is preferred way of calling C from Python that works well on PyPy + +.. _`cffi`: http://cffi.readthedocs.org + +If you're using PyPy for anything, it would help us immensely if you fill out +the following survey: http://bit.ly/pypysurvey This is for the developers +eyes and we will not make any information public without your agreement. + +What is PyPy? +============= + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7. It's fast (`pypy 2.0 and cpython 2.7.3`_ performance comparison) +due to its integrated tracing JIT compiler. + +This release supports x86 machines running Linux 32/64, Mac OS X 64 or +Windows 32. Windows 64 work is still stalling, we would welcome a volunteer +to handle that. ARM support is on the way, as you can see from the recently +released alpha for ARM. + +.. _`pypy 2.0 and cpython 2.7.3`: http://speed.pypy.org + +Highlights +========== + +* Stackless including greenlets should work. For gevent, you need to check + out `pypycore`_ and use the `pypy-hacks`_ branch of gevent. + +* cffi is now a module included with PyPy. (`cffi`_ also exists for + CPython; the two versions should be fully compatible.) It is the + preferred way of calling C from Python that works on PyPy. + +* Callbacks from C are now JITted, which means XML parsing is much faster. + +* A lot of speed improvements in various language corners, most of them small, + but speeding up some particular corners a lot. + +* The JIT was refactored to emit machine code which manipulates a "frame" + that lives on the heap rather than on the stack. This is what makes + Stackless work, and it could bring another future speed-up (not done yet). + +* A lot of stability issues fixed. + +* Refactoring much of the numpypy array classes, which resulted in removal of + lazy expression evaluation. On the other hand, we now have more complete + dtype support and support more array attributes. + +.. _`pypycore`: https://github.com/gevent-on-pypy/pypycore/ +.. _`pypy-hacks`: https://github.com/schmir/gevent/tree/pypy-hacks + +Cheers, +fijal, arigo and the PyPy team diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-2.0.rst copy from pypy/doc/whatsnew-head.rst copy to pypy/doc/whatsnew-2.0.rst diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -1,140 +1,9 @@ ====================== -What's new in PyPy 2.0 +What's new in PyPy 2.1 ====================== -.. this is a revision shortly after release-2.0-beta1 -.. startrev: 0e6161a009c6 +.. this is a revision shortly after release-2.0 +.. startrev: a13c07067613 -.. branch: split-rpython -Split rpython and pypy into seperate directories - -.. branch: callback-jit -Callbacks from C are now better JITted - -.. branch: fix-jit-logs - -.. branch: remove-globals-in-jit - -.. branch: length-hint -Implement __lenght_hint__ according to PEP 424 - -.. branch: numpypy-longdouble -Long double support for numpypy - -.. branch: numpypy-disable-longdouble -Since r_longdouble support is missing, disable all longdouble and derivative -dtypes using ENABLED_LONG_DOUBLE = False - -.. branch: numpypy-real-as-view -Convert real, imag from ufuncs to views. This involves the beginning of -view() functionality - -.. branch: indexing-by-array -Adds indexing by scalar, adds int conversion from scalar and single element array, -fixes compress, indexing by an array with a smaller shape and the indexed object. - -.. branch: str-dtype-improvement -Allow concatenation of str and numeric arrays - -.. branch: signatures -Improved RPython typing - -.. branch: rpython-bytearray -Rudimentary support for bytearray in RPython - -.. branch: refactor-call_release_gil -Fix a bug which caused cffi to return the wrong result when calling a C -function which calls a Python callback which forces the frames - -.. branch: virtual-raw-mallocs -JIT optimizations which make cffi calls even faster, by removing the need to -allocate a temporary buffer where to store the arguments. - -.. branch: improve-docs-2 -Improve documents and straighten out links - -.. branch: fast-newarray -Inline the fast path of newarray in the assembler. -Disabled on ARM until we fix issues. - -.. branch: reflex-support -Allow dynamic loading of a (Reflex) backend that implements the C-API needed -to provide reflection information - -.. branches we don't care about -.. branch: autoreds -.. branch: kill-faking -.. branch: improved_ebnfparse_error -.. branch: task-decorator -.. branch: fix-e4fa0b2 -.. branch: win32-fixes -.. branch: numpy-unify-methods -.. branch: fix-version-tool -.. branch: popen2-removal -.. branch: pickle-dumps -.. branch: scalar_get_set - -.. branch: release-2.0-beta1 - -.. branch: remove-PYPY_NOT_MAIN_FILE - -.. branch: missing-jit-operations - -.. branch: fix-lookinside-iff-oopspec -Fixed the interaction between two internal tools for controlling the JIT. - -.. branch: inline-virtualref-2 -Better optimized certain types of frame accesses in the JIT, particularly -around exceptions that escape the function they were raised in. - -.. branch: missing-ndarray-attributes -Some missing attributes from ndarrays - -.. branch: cleanup-tests -Consolidated the lib_pypy/pypy_test and pypy/module/test_lib_pypy tests into -one directory for reduced confusion and so they all run nightly. - -.. branch: unquote-faster -.. branch: urlparse-unquote-faster - -.. branch: signal-and-thread -Add "__pypy__.thread.signals_enabled", a context manager. Can be used in a -non-main thread to enable the processing of signal handlers in that thread. - -.. branch: coding-guide-update-rlib-refs -.. branch: rlib-doc-rpython-refs -.. branch: clean-up-remaining-pypy-rlib-refs - -.. branch: enumerate-rstr -Support enumerate() over rstr types. - -.. branch: cleanup-numpypy-namespace -Cleanup _numpypy and numpypy namespaces to more closely resemble numpy. - -.. branch: kill-flowobjspace -Random cleanups to hide FlowObjSpace from public view. - -.. branch: vendor-rename - -.. branch: jitframe-on-heap -Moves optimized JIT frames from stack to heap. As a side effect it enables -stackless to work well with the JIT on PyPy. Also removes a bunch of code from -the GC which fixes cannot find gc roots. - -.. branch: pycon2013-doc-fixes -Documentation fixes after going through the docs at PyCon 2013 sprint. - -.. branch: extregistry-refactor - -.. branch: remove-list-smm -.. branch: bridge-logging -.. branch: curses_cffi -cffi implementation of _curses - -.. branch: sqlite-cffi -cffi implementation of sqlite3 - -.. branch: release-2.0-beta2 -.. branch: unbreak-freebsd - -.. branch: virtualref-virtualizable +.. branch: numpy-pickle +Pickling of numpy arrays and dtypes (including record dtypes) diff --git a/pypy/goal/getnightly.py b/pypy/goal/getnightly.py --- a/pypy/goal/getnightly.py +++ b/pypy/goal/getnightly.py @@ -6,8 +6,14 @@ if sys.platform.startswith('linux'): arch = 'linux' + cmd = 'wget "%s"' + tar = "tar -x -v --wildcards --strip-components=2 -f %s '*/bin/pypy'" +if sys.platform.startswith('darwin'): + arch = 'osx' + cmd = 'curl -O "%s"' + tar = "tar -x -v --strip-components=2 -f %s '*/bin/pypy'" else: - print 'Cannot determine the platform, please update this scrip' + print 'Cannot determine the platform, please update this script' sys.exit(1) if sys.maxint == 2**63 - 1: @@ -23,10 +29,9 @@ tmp = py.path.local.mkdtemp() mydir = tmp.chdir() print 'Downloading pypy to', tmp -if os.system('wget "%s"' % url) != 0: +if os.system(cmd % url) != 0: sys.exit(1) print 'Extracting pypy binary' mydir.chdir() -os.system("tar -x -v --wildcards --strip-components=2 -f %s '*/bin/pypy'" % tmp.join(filename)) - +os.system(tar % tmp.join(filename)) diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py --- a/pypy/goal/targetpypystandalone.py +++ b/pypy/goal/targetpypystandalone.py @@ -22,20 +22,22 @@ # __________ Entry point __________ + def create_entry_point(space, w_dict): - w_entry_point = space.getitem(w_dict, space.wrap('entry_point')) - w_run_toplevel = space.getitem(w_dict, space.wrap('run_toplevel')) - w_call_finish_gateway = space.wrap(gateway.interp2app(call_finish)) - w_call_startup_gateway = space.wrap(gateway.interp2app(call_startup)) - withjit = space.config.objspace.usemodules.pypyjit + if w_dict is not None: # for tests + w_entry_point = space.getitem(w_dict, space.wrap('entry_point')) + w_run_toplevel = space.getitem(w_dict, space.wrap('run_toplevel')) + w_call_finish_gateway = space.wrap(gateway.interp2app(call_finish)) + w_call_startup_gateway = space.wrap(gateway.interp2app(call_startup)) + withjit = space.config.objspace.usemodules.pypyjit def entry_point(argv): if withjit: from rpython.jit.backend.hlinfo import highleveljitinfo highleveljitinfo.sys_executable = argv[0] - #debug("entry point starting") - #for arg in argv: + #debug("entry point starting") + #for arg in argv: # debug(" argv -> " + arg) if len(argv) > 2 and argv[1] == '--heapsize': # Undocumented option, handled at interp-level. @@ -71,7 +73,35 @@ debug(" operror-value: " + space.str_w(space.str(e.get_w_value(space)))) return 1 return exitcode - return entry_point + + # register the minimal equivalent of running a small piece of code. This + # should be used as sparsely as possible, just to register callbacks + + from rpython.rlib.entrypoint import entrypoint + from rpython.rtyper.lltypesystem import rffi + + @entrypoint('main', [rffi.CCHARP], c_name='pypy_execute_source') + def pypy_execute_source(ll_source): + source = rffi.charp2str(ll_source) + return _pypy_execute_source(source) + + w_globals = space.newdict() + space.setitem(w_globals, space.wrap('__builtins__'), + space.builtin_modules['__builtin__']) + + def _pypy_execute_source(source): + try: + compiler = space.createcompiler() + stmt = compiler.compile(source, 'c callback', 'exec', 0) + stmt.exec_code(space, w_globals, w_globals) + except OperationError, e: + debug("OperationError:") + debug(" operror-type: " + e.w_type.getname(space)) + debug(" operror-value: " + space.str_w(space.str(e.get_w_value(space)))) + return 1 + return 0 + + return entry_point, _pypy_execute_source # for tests def call_finish(space): space.finish() @@ -219,7 +249,7 @@ def jitpolicy(self, driver): from pypy.module.pypyjit.policy import PyPyJitPolicy, pypy_hooks return PyPyJitPolicy(pypy_hooks) - + def get_entry_point(self, config): from pypy.tool.lib_pypy import import_from_lib_pypy rebuild = import_from_lib_pypy('ctypes_config_cache/rebuild') @@ -232,7 +262,7 @@ app = gateway.applevel(open(filename).read(), 'app_main.py', 'app_main') app.hidden_applevel = False w_dict = app.getwdict(space) - entry_point = create_entry_point(space, w_dict) + entry_point, _ = create_entry_point(space, w_dict) return entry_point, None, PyPyAnnotatorPolicy(single_space = space) diff --git a/pypy/interpreter/app_main.py b/pypy/interpreter/app_main.py --- a/pypy/interpreter/app_main.py +++ b/pypy/interpreter/app_main.py @@ -1,21 +1,41 @@ #! /usr/bin/env python # App-level version of py.py. # See test/test_app_main. + +# Missing vs CPython: -d, -OO, -t, -v, -x, -3 +"""\ +Options and arguments (and corresponding environment variables): +-B : don't write .py[co] files on import; also PYTHONDONTWRITEBYTECODE=x +-c cmd : program passed in as string (terminates option list) +-E : ignore PYTHON* environment variables (such as PYTHONPATH) +-h : print this help message and exit (also --help) +-i : inspect interactively after running script; forces a prompt even + if stdin does not appear to be a terminal; also PYTHONINSPECT=x +-m mod : run library module as a script (terminates option list) +-O : skip assert statements +-OO : remove docstrings when importing modules in addition to -O +-R : ignored (see http://bugs.python.org/issue14621) +-Q arg : division options: -Qold (default), -Qwarn, -Qwarnall, -Qnew +-s : don't add user site directory to sys.path; also PYTHONNOUSERSITE +-S : don't imply 'import site' on initialization +-u : unbuffered binary stdout and stderr; also PYTHONUNBUFFERED=x +-V : print the Python version number and exit (also --version) +-W arg : warning control; arg is action:message:category:module:lineno + also PYTHONWARNINGS=arg +file : program read from script file +- : program read from stdin (default; interactive mode if a tty) +arg ...: arguments passed to program in sys.argv[1:] +PyPy options and arguments: +--info : print translation information about this PyPy executable """ -options: - -i inspect interactively after running script - -O skip assert statements - -OO remove docstrings when importing modules in addition to -O - -c cmd program passed in as CMD (terminates option list) - -S do not 'import site' on initialization - -u unbuffered binary stdout and stderr - -h, --help show this help message and exit - -m mod library module to be run as a script (terminates option list) - -W arg warning control (arg is action:message:category:module:lineno) - -E ignore environment variables (such as PYTHONPATH) - -R ignored (see http://bugs.python.org/issue14621) - --version print the PyPy version - --info print translation information about this PyPy executable +USAGE1 = __doc__ +# Missing vs CPython: PYTHONHOME, PYTHONCASEOK +USAGE2 = """ +Other environment variables: +PYTHONSTARTUP: file executed on interactive startup (no default) +PYTHONPATH : %r-separated list of directories prefixed to the + default module search path. The result is sys.path. +PYTHONIOENCODING: Encoding[:errors] used for stdin/stdout/stderr. """ import sys diff --git a/pypy/interpreter/generator.py b/pypy/interpreter/generator.py --- a/pypy/interpreter/generator.py +++ b/pypy/interpreter/generator.py @@ -27,7 +27,7 @@ new_inst = mod.get('generator_new') w = space.wrap if self.frame: - w_frame = w(self.frame) + w_frame = self.frame._reduce_state(space) else: w_frame = space.w_None @@ -36,7 +36,20 @@ w(self.running), ] - return space.newtuple([new_inst, space.newtuple(tup)]) + return space.newtuple([new_inst, space.newtuple([]), + space.newtuple(tup)]) + + def descr__setstate__(self, space, w_args): + from rpython.rlib.objectmodel import instantiate + args_w = space.unpackiterable(w_args) + w_framestate, w_running = args_w + if space.is_w(w_framestate, space.w_None): + self.frame = None + else: + frame = instantiate(space.FrameClass) # XXX fish + frame.descr__setstate__(space, w_framestate) + GeneratorIterator.__init__(self, frame) + self.running = self.space.is_true(w_running) def descr__iter__(self): """x.__iter__() <==> iter(x)""" diff --git a/pypy/interpreter/mixedmodule.py b/pypy/interpreter/mixedmodule.py --- a/pypy/interpreter/mixedmodule.py +++ b/pypy/interpreter/mixedmodule.py @@ -13,6 +13,7 @@ # imported yet, and when it has been, it is mod.__dict__.items() just # after startup(). w_initialdict = None + lazy = False def __init__(self, space, w_name): """ NOT_RPYTHON """ diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -16,10 +16,9 @@ from rpython.tool.stdlib_opcode import host_bytecode_spec # Define some opcodes used -g = globals() for op in '''DUP_TOP POP_TOP SETUP_LOOP SETUP_EXCEPT SETUP_FINALLY POP_BLOCK END_FINALLY'''.split(): - g[op] = stdlib_opcode.opmap[op] + globals()[op] = stdlib_opcode.opmap[op] HAVE_ARGUMENT = stdlib_opcode.HAVE_ARGUMENT class PyFrame(eval.Frame): @@ -304,11 +303,17 @@ @jit.dont_look_inside def descr__reduce__(self, space): from pypy.interpreter.mixedmodule import MixedModule - from pypy.module._pickle_support import maker # helper fns w_mod = space.getbuiltinmodule('_pickle_support') mod = space.interp_w(MixedModule, w_mod) new_inst = mod.get('frame_new') - w = space.wrap + w_tup_state = self._reduce_state(space) + nt = space.newtuple + return nt([new_inst, nt([]), w_tup_state]) + + @jit.dont_look_inside + def _reduce_state(self, space): + from pypy.module._pickle_support import maker # helper fns + w = space.wrap nt = space.newtuple cells = self._getcells() @@ -359,8 +364,7 @@ w(self.instr_prev_plus_one), w_cells, ] - - return nt([new_inst, nt([]), nt(tup_state)]) + return nt(tup_state) @jit.dont_look_inside def descr__setstate__(self, space, w_args): diff --git a/pypy/interpreter/test2/mymodule.py b/pypy/interpreter/test/mymodule.py rename from pypy/interpreter/test2/mymodule.py rename to pypy/interpreter/test/mymodule.py diff --git a/pypy/interpreter/test/test_app_main.py b/pypy/interpreter/test/test_app_main.py new file mode 100644 --- /dev/null +++ b/pypy/interpreter/test/test_app_main.py @@ -0,0 +1,966 @@ +""" +Tests for the entry point of pypy-c, app_main.py. +""" +from __future__ import with_statement +import py +import sys, os, re, runpy, subprocess +from rpython.tool.udir import udir +from contextlib import contextmanager +from pypy.conftest import pypydir + +banner = sys.version.splitlines()[0] + +app_main = os.path.join(os.path.realpath(os.path.dirname(__file__)), os.pardir, 'app_main.py') +app_main = os.path.abspath(app_main) + +_counter = 0 +def _get_next_path(ext='.py'): + global _counter + p = udir.join('demo_test_app_main_%d%s' % (_counter, ext)) + _counter += 1 + return p + +def getscript(source): + p = _get_next_path() + p.write(str(py.code.Source(source))) + return str(p) + +def getscript_pyc(space, source): + p = _get_next_path() + p.write(str(py.code.Source(source))) + w_dir = space.wrap(str(p.dirpath())) + w_modname = space.wrap(p.purebasename) + space.appexec([w_dir, w_modname], """(dir, modname): + import sys + d = sys.modules.copy() + sys.path.insert(0, dir) + __import__(modname) + sys.path.pop(0) + for key in sys.modules.keys(): + if key not in d: + del sys.modules[key] + """) + p = str(p) + 'c' + assert os.path.isfile(p) # the .pyc file should have been created above + return p + +def getscript_in_dir(source): + pdir = _get_next_path(ext='') + p = pdir.ensure(dir=1).join('__main__.py') + p.write(str(py.code.Source(source))) + # return relative path for testing purposes + return py.path.local().bestrelpath(pdir) + +demo_script = getscript(""" + print 'hello' + print 'Name:', __name__ + print 'File:', __file__ + import sys + print 'Exec:', sys.executable + print 'Argv:', sys.argv + print 'goodbye' + myvalue = 6*7 + """) + +crashing_demo_script = getscript(""" + print 'Hello2' + myvalue2 = 11 + ooups + myvalue2 = 22 + print 'Goodbye2' # should not be reached + """) + + +class TestParseCommandLine: + def check_options(self, options, sys_argv, **expected): + assert sys.argv == sys_argv + for key, value in expected.items(): + assert options[key] == value + for key, value in options.items(): + if key not in expected: + assert not value, ( + "option %r has unexpectedly the value %r" % (key, value)) + + def check(self, argv, env, **expected): + import StringIO + from pypy.interpreter import app_main + saved_env = os.environ.copy() + saved_sys_argv = sys.argv[:] + saved_sys_stdout = sys.stdout + saved_sys_stderr = sys.stdout + app_main.os = os + try: + os.environ.update(env) + sys.stdout = sys.stderr = StringIO.StringIO() + try: + options = app_main.parse_command_line(argv) + except SystemExit: + output = expected['output_contains'] + assert output in sys.stdout.getvalue() + else: + self.check_options(options, **expected) + finally: + os.environ.clear() + os.environ.update(saved_env) + sys.argv[:] = saved_sys_argv + sys.stdout = saved_sys_stdout + sys.stderr = saved_sys_stderr + + def test_all_combinations_I_can_think_of(self): + self.check([], {}, sys_argv=[''], run_stdin=True) + self.check(['-'], {}, sys_argv=['-'], run_stdin=True) + self.check(['-S'], {}, sys_argv=[''], run_stdin=True, no_site=1) + self.check(['-OO'], {}, sys_argv=[''], run_stdin=True, optimize=2) + self.check(['-O', '-O'], {}, sys_argv=[''], run_stdin=True, optimize=2) + self.check(['-Qnew'], {}, sys_argv=[''], run_stdin=True, division_new=1) + self.check(['-Qold'], {}, sys_argv=[''], run_stdin=True, division_new=0) + self.check(['-Qwarn'], {}, sys_argv=[''], run_stdin=True, division_warning=1) + self.check(['-Qwarnall'], {}, sys_argv=[''], run_stdin=True, + division_warning=2) + self.check(['-Q', 'new'], {}, sys_argv=[''], run_stdin=True, division_new=1) + self.check(['-SOQnew'], {}, sys_argv=[''], run_stdin=True, + no_site=1, optimize=1, division_new=1) + self.check(['-SOQ', 'new'], {}, sys_argv=[''], run_stdin=True, + no_site=1, optimize=1, division_new=1) + self.check(['-i'], {}, sys_argv=[''], run_stdin=True, + interactive=1, inspect=1) + self.check(['-?'], {}, output_contains='usage:') + self.check(['-h'], {}, output_contains='usage:') + self.check(['-S', '-tO', '-h'], {}, output_contains='usage:') + self.check(['-S', '-thO'], {}, output_contains='usage:') + self.check(['-S', '-tO', '--help'], {}, output_contains='usage:') + self.check(['-S', '-tO', '--info'], {}, output_contains='translation') + self.check(['-S', '-tO', '--version'], {}, output_contains='Python') + self.check(['-S', '-tOV'], {}, output_contains='Python') + self.check(['--jit', 'foobar', '-S'], {}, sys_argv=[''], + run_stdin=True, no_site=1) + self.check(['-c', 'pass'], {}, sys_argv=['-c'], run_command='pass') + self.check(['-cpass'], {}, sys_argv=['-c'], run_command='pass') + self.check(['-cpass','x'], {}, sys_argv=['-c','x'], run_command='pass') + self.check(['-Sc', 'pass'], {}, sys_argv=['-c'], run_command='pass', + no_site=1) + self.check(['-Scpass'], {}, sys_argv=['-c'], run_command='pass', no_site=1) + self.check(['-c', '', ''], {}, sys_argv=['-c', ''], run_command='') + self.check(['-mfoo', 'bar', 'baz'], {}, sys_argv=['foo', 'bar', 'baz'], + run_module=True) + self.check(['-m', 'foo', 'bar', 'baz'], {}, sys_argv=['foo', 'bar', 'baz'], + run_module=True) + self.check(['-Smfoo', 'bar', 'baz'], {}, sys_argv=['foo', 'bar', 'baz'], + run_module=True, no_site=1) + self.check(['-Sm', 'foo', 'bar', 'baz'], {}, sys_argv=['foo', 'bar', 'baz'], + run_module=True, no_site=1) + self.check(['-', 'foo', 'bar'], {}, sys_argv=['-', 'foo', 'bar'], + run_stdin=True) + self.check(['foo', 'bar'], {}, sys_argv=['foo', 'bar']) + self.check(['foo', '-i'], {}, sys_argv=['foo', '-i']) + self.check(['-i', 'foo'], {}, sys_argv=['foo'], interactive=1, inspect=1) + self.check(['--', 'foo'], {}, sys_argv=['foo']) + self.check(['--', '-i', 'foo'], {}, sys_argv=['-i', 'foo']) + self.check(['--', '-', 'foo'], {}, sys_argv=['-', 'foo'], run_stdin=True) + self.check(['-Wbog'], {}, sys_argv=[''], warnoptions=['bog'], run_stdin=True) + self.check(['-W', 'ab', '-SWc'], {}, sys_argv=[''], warnoptions=['ab', 'c'], + run_stdin=True, no_site=1) + + self.check([], {'PYTHONDEBUG': '1'}, sys_argv=[''], run_stdin=True, debug=1) + self.check([], {'PYTHONDONTWRITEBYTECODE': '1'}, sys_argv=[''], run_stdin=True, dont_write_bytecode=1) + self.check([], {'PYTHONNOUSERSITE': '1'}, sys_argv=[''], run_stdin=True, no_user_site=1) + self.check([], {'PYTHONUNBUFFERED': '1'}, sys_argv=[''], run_stdin=True, unbuffered=1) + self.check([], {'PYTHONVERBOSE': '1'}, sys_argv=[''], run_stdin=True, verbose=1) + + def test_sysflags(self): + flags = ( + ("debug", "-d", "1"), + ("py3k_warning", "-3", "1"), + ("division_warning", "-Qwarn", "1"), + ("division_warning", "-Qwarnall", "2"), + ("division_new", "-Qnew", "1"), + (["inspect", "interactive"], "-i", "1"), + ("optimize", "-O", "1"), + ("optimize", "-OO", "2"), + ("dont_write_bytecode", "-B", "1"), + ("no_user_site", "-s", "1"), + ("no_site", "-S", "1"), + ("ignore_environment", "-E", "1"), + ("tabcheck", "-t", "1"), + ("tabcheck", "-tt", "2"), + ("verbose", "-v", "1"), + ("unicode", "-U", "1"), + ("bytes_warning", "-b", "1"), + ) + for flag, opt, value in flags: + if isinstance(flag, list): # this is for inspect&interactive + expected = {} + for flag1 in flag: + expected[flag1] = int(value) + else: + expected = {flag: int(value)} + self.check([opt, '-c', 'pass'], {}, sys_argv=['-c'], + run_command='pass', **expected) + + def test_sysflags_envvar(self, monkeypatch): + monkeypatch.setenv('PYTHONNOUSERSITE', '1') + expected = {"no_user_site": True} + self.check(['-c', 'pass'], {}, sys_argv=['-c'], run_command='pass', **expected) + + +class TestInteraction: + """ + These tests require pexpect (UNIX-only). + http://pexpect.sourceforge.net/ + """ + def _spawn(self, *args, **kwds): + try: + import pexpect + except ImportError, e: + py.test.skip(str(e)) + else: + # Version is of the style "0.999" or "2.1". Older versions of + # pexpect try to get the fileno of stdin, which generally won't + # work with py.test (due to sys.stdin being a DontReadFromInput + # instance). + version = map(int, pexpect.__version__.split('.')) + + # I only tested 0.999 and 2.1. The former does not work, the + # latter does. Feel free to refine this measurement. + # -exarkun, 17/12/2007 + if version < [2, 1]: + py.test.skip( + "pexpect version too old, requires 2.1 or newer: %r" % ( + pexpect.__version__,)) + + kwds.setdefault('timeout', 10) + print 'SPAWN:', ' '.join([args[0]] + args[1]), kwds + child = pexpect.spawn(*args, **kwds) + child.logfile = sys.stdout + return child + + def spawn(self, argv): + return self._spawn(sys.executable, [app_main] + argv) + + def test_interactive(self): + child = self.spawn([]) + child.expect('Python ') # banner + child.expect('>>> ') # prompt + child.sendline('[6*7]') + child.expect(re.escape('[42]')) + child.sendline('def f(x):') + child.expect(re.escape('... ')) + child.sendline(' return x + 100') + child.expect(re.escape('... ')) + child.sendline('') + child.expect('>>> ') + child.sendline('f(98)') + child.expect('198') + child.expect('>>> ') + child.sendline('__name__') + child.expect("'__main__'") + child.expect('>>> ') + child.sendline('import sys') + child.expect('>>> ') + child.sendline("'' in sys.path") + child.expect("True") + + def test_help(self): + # test that -h prints the usage, including the name of the executable + # which should be /full/path/to/app_main.py in this case + child = self.spawn(['-h']) + child.expect(r'usage: .*app_main.py \[option\]') + child.expect('PyPy options and arguments:') + + def test_run_script(self): + child = self.spawn([demo_script]) + idx = child.expect(['hello', 'Python ', '>>> ']) + assert idx == 0 # no banner or prompt + child.expect(re.escape("Name: __main__")) + child.expect(re.escape('File: ' + demo_script)) + child.expect(re.escape('Exec: ' + app_main)) + child.expect(re.escape('Argv: ' + repr([demo_script]))) + child.expect('goodbye') + + def test_run_script_with_args(self): + argv = [demo_script, 'hello', 'world'] + child = self.spawn(argv) + child.expect(re.escape('Argv: ' + repr(argv))) + child.expect('goodbye') + + def test_no_such_script(self): + import errno + msg = os.strerror(errno.ENOENT) # 'No such file or directory' + child = self.spawn(['xxx-no-such-file-xxx']) + child.expect(re.escape(msg)) + + def test_option_i(self): + argv = [demo_script, 'foo', 'bar'] + child = self.spawn(['-i'] + argv) + idx = child.expect(['hello', re.escape(banner)]) + assert idx == 0 # no banner + child.expect(re.escape('File: ' + demo_script)) + child.expect(re.escape('Argv: ' + repr(argv))) + child.expect('goodbye') + idx = child.expect(['>>> ', re.escape(banner)]) + assert idx == 0 # prompt, but still no banner + child.sendline('myvalue * 102') + child.expect('4284') + child.sendline('__name__') + child.expect('__main__') + + def test_option_i_crashing(self): + argv = [crashing_demo_script, 'foo', 'bar'] + child = self.spawn(['-i'] + argv) + idx = child.expect(['Hello2', re.escape(banner)]) + assert idx == 0 # no banner + child.expect('NameError') + child.sendline('myvalue2 * 1001') + child.expect('11011') + child.sendline('import sys; sys.argv') + child.expect(re.escape(repr(argv))) + child.sendline('sys.last_type.__name__') + child.expect(re.escape(repr('NameError'))) + + def test_options_i_c(self): + child = self.spawn(['-i', '-c', 'x=555']) + idx = child.expect(['>>> ', re.escape(banner)]) + assert idx == 0 # prompt, but no banner + child.sendline('x') + child.expect('555') + child.sendline('__name__') + child.expect('__main__') + child.sendline('import sys; sys.argv') + child.expect(re.escape("['-c']")) + + def test_options_i_c_crashing(self, monkeypatch): + monkeypatch.setenv('PYTHONPATH', None) + child = self.spawn(['-i', '-c', 'x=666;foobar']) + child.expect('NameError') + idx = child.expect(['>>> ', re.escape(banner)]) + assert idx == 0 # prompt, but no banner + child.sendline('x') + child.expect('666') + child.sendline('__name__') + child.expect('__main__') + child.sendline('import sys; sys.argv') + child.expect(re.escape("['-c']")) + child.sendline('sys.last_type.__name__') + child.expect(re.escape(repr('NameError'))) + + def test_atexit(self): + child = self.spawn([]) + child.expect('>>> ') + child.sendline('def f(): print "foobye"') + child.sendline('') + child.sendline('import atexit; atexit.register(f)') + child.sendline('6*7') + child.expect('42') + # pexpect's sendeof() is confused by py.test capturing, though + # I think that it is a bug of sendeof() + old = sys.stdin + try: + sys.stdin = child + child.sendeof() + finally: + sys.stdin = old + child.expect('foobye') + + def test_pythonstartup(self, monkeypatch): + monkeypatch.setenv('PYTHONPATH', None) + monkeypatch.setenv('PYTHONSTARTUP', crashing_demo_script) + child = self.spawn([]) + child.expect(re.escape(banner)) + child.expect('Traceback') + child.expect('NameError') + child.expect('>>> ') + child.sendline('[myvalue2]') + child.expect(re.escape('[11]')) + child.expect('>>> ') + + child = self.spawn(['-i', demo_script]) + for line in ['hello', 'goodbye', '>>> ']: + idx = child.expect([line, 'Hello2']) + assert idx == 0 # no PYTHONSTARTUP run here + child.sendline('myvalue2') + child.expect('Traceback') + child.expect('NameError') + + def test_pythonstartup_file1(self, monkeypatch): + monkeypatch.setenv('PYTHONPATH', None) + monkeypatch.setenv('PYTHONSTARTUP', demo_script) + child = self.spawn([]) + child.expect('File: [^\n]+\.py') + child.expect('goodbye') + child.expect('>>> ') + child.sendline('[myvalue]') + child.expect(re.escape('[42]')) + child.expect('>>> ') + child.sendline('__file__') + child.expect('Traceback') + child.expect('NameError') + + def test_pythonstartup_file2(self, monkeypatch): + monkeypatch.setenv('PYTHONPATH', None) + monkeypatch.setenv('PYTHONSTARTUP', crashing_demo_script) + child = self.spawn([]) + child.expect('Traceback') + child.expect('>>> ') + child.sendline('__file__') + child.expect('Traceback') + child.expect('NameError') + + def test_ignore_python_startup(self): + old = os.environ.get('PYTHONSTARTUP', '') + try: + os.environ['PYTHONSTARTUP'] = crashing_demo_script + child = self.spawn(['-E']) + child.expect(re.escape(banner)) + index = child.expect(['Traceback', '>>> ']) + assert index == 1 # no traceback + finally: + os.environ['PYTHONSTARTUP'] = old + + def test_ignore_python_inspect(self): + os.environ['PYTHONINSPECT_'] = '1' + try: + child = self.spawn(['-E', '-c', 'pass']) + from pexpect import EOF + index = child.expect(['>>> ', EOF]) + assert index == 1 # no prompt + finally: + del os.environ['PYTHONINSPECT_'] + + def test_python_path_keeps_duplicates(self): + old = os.environ.get('PYTHONPATH', '') + try: + os.environ['PYTHONPATH'] = 'foobarbaz:foobarbaz' + child = self.spawn(['-c', 'import sys; print sys.path']) + child.expect(r"\['', 'foobarbaz', 'foobarbaz', ") + finally: + os.environ['PYTHONPATH'] = old + + def test_ignore_python_path(self): + old = os.environ.get('PYTHONPATH', '') + try: + os.environ['PYTHONPATH'] = 'foobarbaz' + child = self.spawn(['-E', '-c', 'import sys; print sys.path']) + from pexpect import EOF + index = child.expect(['foobarbaz', EOF]) + assert index == 1 # no foobarbaz + finally: + os.environ['PYTHONPATH'] = old + + def test_unbuffered(self): + line = 'import os,sys;sys.stdout.write(str(789));os.read(0,1)' + child = self.spawn(['-u', '-c', line]) + child.expect('789') # expect to see it before the timeout hits + child.sendline('X') + + def test_options_i_m(self, monkeypatch): + if sys.platform == "win32": + skip("close_fds is not supported on Windows platforms") + if not hasattr(runpy, '_run_module_as_main'): + skip("requires CPython >= 2.6") + p = os.path.join(os.path.realpath(os.path.dirname(__file__)), 'mymodule.py') + p = os.path.abspath(p) + monkeypatch.chdir(os.path.dirname(app_main)) + child = self.spawn(['-i', + '-m', 'test.mymodule', + 'extra']) + child.expect('mymodule running') + child.expect('Name: __main__') + child.expect(re.escape('File: ' + p)) + child.expect(re.escape('Argv: ' + repr([p, 'extra']))) + child.expect('>>> ') + child.sendline('somevalue') + child.expect(re.escape(repr("foobar"))) + child.expect('>>> ') + child.sendline('import sys') + child.sendline('"test" in sys.modules') + child.expect('True') + child.sendline('"test.mymodule" in sys.modules') + child.expect('False') + child.sendline('sys.path[0]') + child.expect("''") + + def test_option_i_noexit(self): + child = self.spawn(['-i', '-c', 'import sys; sys.exit(1)']) + child.expect('Traceback') + child.expect('SystemExit: 1') + + def test_options_u_i(self): + if sys.platform == "win32": + skip("close_fds is not supported on Windows platforms") + import subprocess, select, os + python = sys.executable + pipe = subprocess.Popen([python, app_main, "-u", "-i"], + stdout=subprocess.PIPE, + stdin=subprocess.PIPE, + stderr=subprocess.STDOUT, + bufsize=0, close_fds=True) + iwtd, owtd, ewtd = select.select([pipe.stdout], [], [], 5) + assert iwtd # else we timed out + data = os.read(pipe.stdout.fileno(), 1024) + assert data.startswith('Python') + + def test_paste_several_lines_doesnt_mess_prompt(self): + py.test.skip("this can only work if readline is enabled") + child = self.spawn([]) + child.expect('>>> ') + child.sendline('if 1:\n print 42\n') + child.expect('... print 42') + child.expect('... ') + child.expect('42') + child.expect('>>> ') + + def test_pythoninspect(self): + os.environ['PYTHONINSPECT_'] = '1' + try: + path = getscript(""" + print 6*7 + """) + child = self.spawn([path]) + child.expect('42') + child.expect('>>> ') + finally: + del os.environ['PYTHONINSPECT_'] + + def test_set_pythoninspect(self): + path = getscript(""" + import os + os.environ['PYTHONINSPECT'] = '1' + print 6*7 + """) + child = self.spawn([path]) + child.expect('42') + child.expect('>>> ') + + def test_clear_pythoninspect(self): + os.environ['PYTHONINSPECT_'] = '1' + try: + path = getscript(""" + import os + del os.environ['PYTHONINSPECT'] + """) + child = self.spawn([path]) + child.expect('>>> ') + finally: + del os.environ['PYTHONINSPECT_'] + + def test_stdout_flushes_before_stdin_blocks(self): + # This doesn't really test app_main.py, but a behavior that + # can only be checked on top of py.py with pexpect. + path = getscript(""" + import sys + sys.stdout.write('Are you suggesting coconuts migrate? ') + line = sys.stdin.readline() + assert line.rstrip() == 'Not at all. They could be carried.' + print 'A five ounce bird could not carry a one pound coconut.' + """) + py_py = os.path.join(pypydir, 'bin', 'pyinteractive.py') + child = self._spawn(sys.executable, [py_py, '-S', path]) + child.expect('Are you suggesting coconuts migrate?', timeout=120) + child.sendline('Not at all. They could be carried.') + child.expect('A five ounce bird could not carry a one pound coconut.') + + def test_no_space_before_argument(self, monkeypatch): + if not hasattr(runpy, '_run_module_as_main'): + skip("requires CPython >= 2.6") + child = self.spawn(['-cprint "hel" + "lo"']) + child.expect('hello') + + monkeypatch.chdir(os.path.dirname(app_main)) + child = self.spawn(['-mtest.mymodule']) + child.expect('mymodule running') + + def test_ps1_only_if_interactive(self): + argv = ['-c', 'import sys; print hasattr(sys, "ps1")'] + child = self.spawn(argv) + child.expect('False') + + +class TestNonInteractive: + def run_with_status_code(self, cmdline, senddata='', expect_prompt=False, + expect_banner=False, python_flags='', env=None): + cmdline = '%s %s "%s" %s' % (sys.executable, python_flags, + app_main, cmdline) + print 'POPEN:', cmdline + process = subprocess.Popen( + cmdline, + stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, + shell=True, env=env, + universal_newlines=True + ) + child_in, child_out_err = process.stdin, process.stdout + child_in.write(senddata) + child_in.close() + data = child_out_err.read() + child_out_err.close() + process.wait() + assert (banner in data) == expect_banner # no banner unless expected + assert ('>>> ' in data) == expect_prompt # no prompt unless expected + return data, process.returncode + + def run(self, *args, **kwargs): + data, status = self.run_with_status_code(*args, **kwargs) + return data + + def test_script_on_stdin(self): + for extraargs, expected_argv in [ + ('', ['']), + ('-', ['-']), + ('- hello world', ['-', 'hello', 'world']), + ]: + data = self.run('%s < "%s"' % (extraargs, demo_script)) + assert "hello" in data + assert "Name: __main__" in data + assert "File: " in data + assert ("Exec: " + app_main) in data + assert ("Argv: " + repr(expected_argv)) in data + assert "goodbye" in data + + def test_run_crashing_script(self): + data = self.run('"%s"' % (crashing_demo_script,)) + assert 'Hello2' in data + assert 'NameError' in data + assert 'Goodbye2' not in data + + def test_crashing_script_on_stdin(self): + data = self.run(' < "%s"' % (crashing_demo_script,)) + assert 'Hello2' in data + assert 'NameError' in data + assert 'Goodbye2' not in data + + def test_option_W(self): + data = self.run('-W d -c "print 42"') + assert '42' in data + data = self.run('-Wd -c "print 42"') + assert '42' in data + + def test_option_W_crashing(self): + data = self.run('-W') + assert "Argument expected for the '-W' option" in data + + def test_option_W_arg_ignored(self): + data = self.run('-Wc') + assert "Invalid -W option ignored: invalid action: 'c'" in data + + def test_option_W_arg_ignored2(self): + data = self.run('-W-W') + assert "Invalid -W option ignored: invalid action:" in data + + def test_option_c(self): + data = self.run('-c "print 6**5"') + assert '7776' in data + + def test_no_pythonstartup(self, monkeypatch): + monkeypatch.setenv('PYTHONSTARTUP', crashing_demo_script) + data = self.run('"%s"' % (demo_script,)) + assert 'Hello2' not in data + data = self.run('-c pass') + assert 'Hello2' not in data + + def test_pythonwarnings(self, monkeypatch): + # PYTHONWARNINGS_ is special cased by app_main: we cannot directly set + # PYTHONWARNINGS because else the warnings raised from within pypy are + # turned in errors. + monkeypatch.setenv('PYTHONWARNINGS_', "once,error") + data = self.run('-W ignore -W default ' + '-c "import sys; print sys.warnoptions"') + assert "['ignore', 'default', 'once', 'error']" in data + + def test_option_m(self, monkeypatch): + if not hasattr(runpy, '_run_module_as_main'): + skip("requires CPython >= 2.6") + p = os.path.join(os.path.realpath(os.path.dirname(__file__)), 'mymodule.py') + p = os.path.abspath(p) + monkeypatch.chdir(os.path.dirname(app_main)) + data = self.run('-m test.mymodule extra') + assert 'mymodule running' in data + assert 'Name: __main__' in data + # ignoring case for windows. abspath behaves different from autopath + # concerning drive letters right now. + assert ('File: ' + p) in data + assert ('Argv: ' + repr([p, 'extra'])) in data + + def test_pythoninspect_doesnt_override_isatty(self): + os.environ['PYTHONINSPECT_'] = '1' + try: + data = self.run('', senddata='6*7\nprint 2+3\n') + assert data == '5\n' + finally: + del os.environ['PYTHONINSPECT_'] + + def test_i_flag_overrides_isatty(self): + data = self.run('-i', senddata='6*7\nraise SystemExit\n', + expect_prompt=True, expect_banner=True) + assert '42\n' in data + # if a file name is passed, the banner is never printed but + # we get a prompt anyway + cmdline = '-i %s' % getscript(""" + print 'hello world' + """) + data = self.run(cmdline, senddata='6*7\nraise SystemExit\n', + expect_prompt=True, expect_banner=False) + assert 'hello world\n' in data + assert '42\n' in data + + def test_option_S_copyright(self): + data = self.run('-S -i', expect_prompt=True, expect_banner=True) + assert 'copyright' not in data + + def test_non_interactive_stdout_fully_buffered(self): + path = getscript(r""" + import sys, time + sys.stdout.write('\x00(STDOUT)\n\x00') # stays in buffers + time.sleep(1) + sys.stderr.write('\x00[STDERR]\n\x00') + time.sleep(1) + # stdout flushed automatically here + """) + cmdline = '%s -u "%s" %s' % (sys.executable, app_main, path) + print 'POPEN:', cmdline + child_in, child_out_err = os.popen4(cmdline) + data = child_out_err.read(11) + assert data == '\x00[STDERR]\n\x00' # from stderr + child_in.close() + data = child_out_err.read(11) + assert data == '\x00(STDOUT)\n\x00' # from stdout + child_out_err.close() + + def test_non_interactive_stdout_unbuffered(self, monkeypatch): + monkeypatch.setenv('PYTHONUNBUFFERED', '1') + path = getscript(r""" + import sys, time + sys.stdout.write('\x00(STDOUT)\n\x00') + time.sleep(1) + sys.stderr.write('\x00[STDERR]\n\x00') + time.sleep(1) + # stdout flushed automatically here + """) + cmdline = '%s -E "%s" %s' % (sys.executable, app_main, path) + print 'POPEN:', cmdline + child_in, child_out_err = os.popen4(cmdline) + data = child_out_err.read(11) + assert data == '\x00(STDOUT)\n\x00' # from stderr + data = child_out_err.read(11) + assert data == '\x00[STDERR]\n\x00' # from stdout + child_out_err.close() + child_in.close() + + def test_proper_sys_path(self, tmpdir): + data = self.run('-c "import _ctypes"', python_flags='-S') + if data.startswith('Traceback'): + py.test.skip("'python -S' cannot import extension modules: " + "see probably http://bugs.python.org/issue586680") + + @contextmanager + def chdir_and_unset_pythonpath(new_cwd): + old_cwd = new_cwd.chdir() + old_pythonpath = os.getenv('PYTHONPATH') + os.unsetenv('PYTHONPATH') + try: + yield + finally: + old_cwd.chdir() + # Can't call putenv with a None argument. + if old_pythonpath is not None: + os.putenv('PYTHONPATH', old_pythonpath) + + tmpdir.join('site.py').write('print "SHOULD NOT RUN"') + runme_py = tmpdir.join('runme.py') + runme_py.write('print "some text"') + + cmdline = str(runme_py) + + with chdir_and_unset_pythonpath(tmpdir): + data = self.run(cmdline, python_flags='-S') + + assert data == "some text\n" + + runme2_py = tmpdir.mkdir('otherpath').join('runme2.py') + runme2_py.write('print "some new text"\n' + 'import sys\n' + 'print sys.path\n') + + cmdline2 = str(runme2_py) + + with chdir_and_unset_pythonpath(tmpdir): + data = self.run(cmdline2, python_flags='-S') + assert data.startswith("some new text\n") + assert repr(str(tmpdir.join('otherpath'))) in data + assert "''" not in data + + data = self.run('-c "import sys; print sys.path"') + assert data.startswith("[''") + + def test_pyc_commandline_argument(self): + p = getscript_pyc(self.space, "print 6*7\n") + assert os.path.isfile(p) and p.endswith('.pyc') + data = self.run(p) + assert data == 'in _run_compiled_module\n' + + def test_main_in_dir_commandline_argument(self): + if not hasattr(runpy, '_run_module_as_main'): + skip("requires CPython >= 2.6") + p = getscript_in_dir('import sys; print sys.argv[0]\n') + data = self.run(p) + assert data == p + '\n' + data = self.run(p + os.sep) + assert data == p + os.sep + '\n' + + def test_getfilesystemencoding(self): + py.test.skip("encoding is only set if stdout.isatty(), test is flawed") + if sys.version_info < (2, 7): + skip("test requires Python >= 2.7") + p = getscript_in_dir(""" + import sys + sys.stdout.write(u'15\u20ac') + sys.stdout.flush() + """) + env = os.environ.copy() + env["LC_CTYPE"] = 'en_US.UTF-8' + data = self.run(p, env=env) + assert data == '15\xe2\x82\xac' + + def test_pythonioencoding(self): + if sys.version_info < (2, 7): + skip("test requires Python >= 2.7") + for encoding, expected in [ + ("iso-8859-15", "15\xa4"), + ("utf-8", '15\xe2\x82\xac'), + ("utf-16-le", '1\x005\x00\xac\x20'), + ("iso-8859-1:ignore", "15"), + ("iso-8859-1:replace", "15?"), + ("iso-8859-1:backslashreplace", "15\\u20ac"), + ]: + p = getscript_in_dir(""" + import sys + sys.stdout.write(u'15\u20ac') + sys.stdout.flush() + """) + env = os.environ.copy() + env["PYTHONIOENCODING"] = encoding + data = self.run(p, env=env) + assert data == expected + + def test_sys_exit_pythonioencoding(self): + if sys.version_info < (2, 7): + skip("test required Python >= 2.7") + p = getscript_in_dir(""" + import sys + sys.exit(u'15\u20ac') + """) + env = os.environ.copy() + env["PYTHONIOENCODING"] = "utf-8" + data, status = self.run_with_status_code(p, env=env) + assert status == 1 + assert data.startswith("15\xe2\x82\xac") + + +class TestAppMain: + def test_print_info(self): + from pypy.interpreter import app_main + import sys, cStringIO + prev_so = sys.stdout + prev_ti = getattr(sys, 'pypy_translation_info', 'missing') + sys.pypy_translation_info = { + 'translation.foo': True, + 'translation.bar': 42, + 'translation.egg.something': None, + 'objspace.x': 'hello', + } + try: + sys.stdout = f = cStringIO.StringIO() + py.test.raises(SystemExit, app_main.print_info) + finally: + sys.stdout = prev_so + if prev_ti == 'missing': + del sys.pypy_translation_info + else: + sys.pypy_translation_info = prev_ti + assert f.getvalue() == ("[objspace]\n" + " x = 'hello'\n" + "[translation]\n" + " bar = 42\n" + " [egg]\n" + " something = None\n" + " foo = True\n") + + +class AppTestAppMain: + def setup_class(self): + # ---------------------------------------- + # setup code for test_setup_bootstrap_path + # ---------------------------------------- + from pypy.module.sys.version import CPYTHON_VERSION, PYPY_VERSION + cpy_ver = '%d.%d' % CPYTHON_VERSION[:2] + + goal_dir = os.path.dirname(app_main) + # build a directory hierarchy like which contains both bin/pypy-c and + # lib/pypy1.2/* + prefix = udir.join('pathtest').ensure(dir=1) + fake_exe = 'bin/pypy-c' + if sys.platform == 'win32': + fake_exe += '.exe' + fake_exe = prefix.join(fake_exe).ensure(file=1) + expected_path = [str(prefix.join(subdir).ensure(dir=1)) + for subdir in ('lib_pypy', + 'lib-python/%s' % cpy_ver)] + + self.w_goal_dir = self.space.wrap(goal_dir) + self.w_fake_exe = self.space.wrap(str(fake_exe)) + self.w_expected_path = self.space.wrap(expected_path) + self.w_trunkdir = self.space.wrap(os.path.dirname(pypydir)) + + foo_py = prefix.join('foo.py').write("pass") + self.w_foo_py = self.space.wrap(str(foo_py)) + + def test_setup_bootstrap_path(self): + import sys + old_sys_path = sys.path[:] + sys.path.append(self.goal_dir) + try: + import app_main + app_main.setup_bootstrap_path('/tmp/pypy-c') # stdlib not found + assert sys.executable == '' + assert sys.path == old_sys_path + [self.goal_dir] + + app_main.setup_bootstrap_path(self.fake_exe) + assert sys.executable == self.fake_exe + assert self.goal_dir not in sys.path + + newpath = sys.path[:] + if newpath[0].endswith('__extensions__'): + newpath = newpath[1:] + # we get at least 'expected_path', and maybe more (e.g.plat-linux2) + assert newpath[:len(self.expected_path)] == self.expected_path + finally: + sys.path[:] = old_sys_path + + def test_trunk_can_be_prefix(self): + import sys + import os + old_sys_path = sys.path[:] + sys.path.append(self.goal_dir) + try: + import app_main + pypy_c = os.path.join(self.trunkdir, 'pypy', 'goal', 'pypy-c') + app_main.setup_bootstrap_path(pypy_c) + newpath = sys.path[:] + # we get at least lib_pypy + # lib-python/X.Y.Z, and maybe more (e.g. plat-linux2) + assert len(newpath) >= 2 + for p in newpath: + assert p.startswith(self.trunkdir) + finally: + sys.path[:] = old_sys_path + + def test_entry_point(self): + import sys + import os + old_sys_path = sys.path[:] + sys.path.append(self.goal_dir) + try: + import app_main + pypy_c = os.path.join(self.trunkdir, 'pypy', 'goal', 'pypy-c') + app_main.entry_point(pypy_c, [self.foo_py]) + # assert it did not crash + finally: + sys.path[:] = old_sys_path diff --git a/pypy/interpreter/test/test_targetpypy.py b/pypy/interpreter/test/test_targetpypy.py new file mode 100644 --- /dev/null +++ b/pypy/interpreter/test/test_targetpypy.py @@ -0,0 +1,18 @@ +from pypy.goal.targetpypystandalone import get_entry_point, create_entry_point +from pypy.config.pypyoption import get_pypy_config + +class TestTargetPyPy(object): + def test_run(self): + config = get_pypy_config(translating=False) + entry_point = get_entry_point(config)[0] + entry_point(['pypy-c' , '-S', '-c', 'print 3']) + +def test_exeucte_source(space): + _, execute_source = create_entry_point(space, None) + execute_source("import sys; sys.modules['xyz'] = 3") + x = space.int_w(space.getitem(space.getattr(space.builtin_modules['sys'], + space.wrap('modules')), + space.wrap('xyz'))) + assert x == 3 + execute_source("sys") + # did not crash - the same globals diff --git a/pypy/interpreter/test/test_zzpickle_and_slow.py b/pypy/interpreter/test/test_zzpickle_and_slow.py --- a/pypy/interpreter/test/test_zzpickle_and_slow.py +++ b/pypy/interpreter/test/test_zzpickle_and_slow.py @@ -485,3 +485,68 @@ pckl = pickle.dumps(pack.mod) result = pickle.loads(pckl) assert pack.mod is result + + +class AppTestGeneratorCloning: + + def setup_class(cls): + try: + cls.space.appexec([], """(): + def f(): yield 42 + f().__reduce__() + """) + except TypeError, e: + if 'pickle generator' not in str(e): + raise + py.test.skip("Frames can't be __reduce__()-ed") + + def test_deepcopy_generator(self): + import copy + + def f(n): + for i in range(n): + yield 42 + i + g = f(4) + g2 = copy.deepcopy(g) + res = g.next() + assert res == 42 + res = g2.next() + assert res == 42 + g3 = copy.deepcopy(g) + res = g.next() + assert res == 43 + res = g2.next() + assert res == 43 + res = g3.next() + assert res == 43 + + def test_shallowcopy_generator(self): + """Note: shallow copies of generators are often confusing. + To start with, 'for' loops have an iterator that will not + be copied, and so create tons of confusion. + """ + import copy + + def f(n): + while n > 0: + yield 42 + n + n -= 1 + g = f(2) + g2 = copy.copy(g) + res = g.next() + assert res == 44 + res = g2.next() + assert res == 44 + g3 = copy.copy(g) + res = g.next() + assert res == 43 + res = g2.next() + assert res == 43 + res = g3.next() + assert res == 43 + g4 = copy.copy(g2) + for i in range(2): + raises(StopIteration, g.next) + raises(StopIteration, g2.next) + raises(StopIteration, g3.next) + raises(StopIteration, g4.next) diff --git a/pypy/interpreter/test2/__init__.py b/pypy/interpreter/test2/__init__.py deleted file mode 100644 --- a/pypy/interpreter/test2/__init__.py +++ /dev/null @@ -1,1 +0,0 @@ -#empty diff --git a/pypy/interpreter/test2/test_app_main.py b/pypy/interpreter/test2/test_app_main.py deleted file mode 100644 --- a/pypy/interpreter/test2/test_app_main.py +++ /dev/null @@ -1,965 +0,0 @@ -""" -Tests for the entry point of pypy-c, app_main.py. -""" -from __future__ import with_statement -import py -import sys, os, re, runpy, subprocess -from rpython.tool.udir import udir -from contextlib import contextmanager -from pypy.conftest import pypydir - -banner = sys.version.splitlines()[0] - -app_main = os.path.join(os.path.realpath(os.path.dirname(__file__)), os.pardir, 'app_main.py') -app_main = os.path.abspath(app_main) - -_counter = 0 -def _get_next_path(ext='.py'): - global _counter - p = udir.join('demo_test_app_main_%d%s' % (_counter, ext)) - _counter += 1 - return p - -def getscript(source): - p = _get_next_path() - p.write(str(py.code.Source(source))) - return str(p) - -def getscript_pyc(space, source): - p = _get_next_path() - p.write(str(py.code.Source(source))) - w_dir = space.wrap(str(p.dirpath())) - w_modname = space.wrap(p.purebasename) - space.appexec([w_dir, w_modname], """(dir, modname): - import sys - d = sys.modules.copy() - sys.path.insert(0, dir) - __import__(modname) - sys.path.pop(0) - for key in sys.modules.keys(): - if key not in d: - del sys.modules[key] - """) - p = str(p) + 'c' - assert os.path.isfile(p) # the .pyc file should have been created above - return p - -def getscript_in_dir(source): - pdir = _get_next_path(ext='') - p = pdir.ensure(dir=1).join('__main__.py') - p.write(str(py.code.Source(source))) - # return relative path for testing purposes - return py.path.local().bestrelpath(pdir) - -demo_script = getscript(""" - print 'hello' - print 'Name:', __name__ - print 'File:', __file__ - import sys - print 'Exec:', sys.executable - print 'Argv:', sys.argv - print 'goodbye' - myvalue = 6*7 - """) - -crashing_demo_script = getscript(""" - print 'Hello2' - myvalue2 = 11 - ooups - myvalue2 = 22 - print 'Goodbye2' # should not be reached - """) - - -class TestParseCommandLine: - def check_options(self, options, sys_argv, **expected): - assert sys.argv == sys_argv - for key, value in expected.items(): - assert options[key] == value - for key, value in options.items(): - if key not in expected: - assert not value, ( - "option %r has unexpectedly the value %r" % (key, value)) - - def check(self, argv, env, **expected): - import StringIO - from pypy.interpreter import app_main - saved_env = os.environ.copy() - saved_sys_argv = sys.argv[:] - saved_sys_stdout = sys.stdout - saved_sys_stderr = sys.stdout - app_main.os = os - try: - os.environ.update(env) - sys.stdout = sys.stderr = StringIO.StringIO() - try: - options = app_main.parse_command_line(argv) - except SystemExit: - output = expected['output_contains'] - assert output in sys.stdout.getvalue() - else: - self.check_options(options, **expected) - finally: - os.environ.clear() - os.environ.update(saved_env) - sys.argv[:] = saved_sys_argv - sys.stdout = saved_sys_stdout - sys.stderr = saved_sys_stderr - - def test_all_combinations_I_can_think_of(self): - self.check([], {}, sys_argv=[''], run_stdin=True) - self.check(['-'], {}, sys_argv=['-'], run_stdin=True) - self.check(['-S'], {}, sys_argv=[''], run_stdin=True, no_site=1) - self.check(['-OO'], {}, sys_argv=[''], run_stdin=True, optimize=2) - self.check(['-O', '-O'], {}, sys_argv=[''], run_stdin=True, optimize=2) - self.check(['-Qnew'], {}, sys_argv=[''], run_stdin=True, division_new=1) - self.check(['-Qold'], {}, sys_argv=[''], run_stdin=True, division_new=0) - self.check(['-Qwarn'], {}, sys_argv=[''], run_stdin=True, division_warning=1) - self.check(['-Qwarnall'], {}, sys_argv=[''], run_stdin=True, - division_warning=2) - self.check(['-Q', 'new'], {}, sys_argv=[''], run_stdin=True, division_new=1) - self.check(['-SOQnew'], {}, sys_argv=[''], run_stdin=True, - no_site=1, optimize=1, division_new=1) - self.check(['-SOQ', 'new'], {}, sys_argv=[''], run_stdin=True, - no_site=1, optimize=1, division_new=1) - self.check(['-i'], {}, sys_argv=[''], run_stdin=True, - interactive=1, inspect=1) - self.check(['-?'], {}, output_contains='usage:') - self.check(['-h'], {}, output_contains='usage:') - self.check(['-S', '-tO', '-h'], {}, output_contains='usage:') - self.check(['-S', '-thO'], {}, output_contains='usage:') - self.check(['-S', '-tO', '--help'], {}, output_contains='usage:') - self.check(['-S', '-tO', '--info'], {}, output_contains='translation') - self.check(['-S', '-tO', '--version'], {}, output_contains='Python') - self.check(['-S', '-tOV'], {}, output_contains='Python') - self.check(['--jit', 'foobar', '-S'], {}, sys_argv=[''], - run_stdin=True, no_site=1) - self.check(['-c', 'pass'], {}, sys_argv=['-c'], run_command='pass') - self.check(['-cpass'], {}, sys_argv=['-c'], run_command='pass') From noreply at buildbot.pypy.org Tue Jun 4 11:58:47 2013 From: noreply at buildbot.pypy.org (waedt) Date: Tue, 4 Jun 2013 11:58:47 +0200 (CEST) Subject: [pypy-commit] pypy default: Fix a comment Message-ID: <20130604095847.2CE371C016D@cobra.cs.uni-duesseldorf.de> Author: Tyler Wade Branch: Changeset: r64759:1b7d2a50f5f2 Date: 2013-05-10 17:26 +0400 http://bitbucket.org/pypy/pypy/changeset/1b7d2a50f5f2/ Log: Fix a comment diff --git a/pypy/interpreter/app_main.py b/pypy/interpreter/app_main.py --- a/pypy/interpreter/app_main.py +++ b/pypy/interpreter/app_main.py @@ -2,7 +2,7 @@ # App-level version of py.py. # See test/test_app_main. -# Missing vs CPython: -d, -OO, -t, -v, -x, -3 +# Missing vs CPython: -d, -t, -v, -x, -3 """\ Options and arguments (and corresponding environment variables): -B : don't write .py[co] files on import; also PYTHONDONTWRITEBYTECODE=x From noreply at buildbot.pypy.org Tue Jun 4 11:58:50 2013 From: noreply at buildbot.pypy.org (waedt) Date: Tue, 4 Jun 2013 11:58:50 +0200 (CEST) Subject: [pypy-commit] pypy default: Merge pypy/pypy default Message-ID: <20130604095850.7A5D81C016D@cobra.cs.uni-duesseldorf.de> Author: Tyler Wade Branch: Changeset: r64760:317c58dc4f89 Date: 2013-05-22 10:36 -0500 http://bitbucket.org/pypy/pypy/changeset/317c58dc4f89/ Log: Merge pypy/pypy default diff too long, truncating to 2000 out of 11846 lines diff --git a/lib-python/2.7/distutils/sysconfig.py b/lib-python/2.7/distutils/sysconfig.py --- a/lib-python/2.7/distutils/sysconfig.py +++ b/lib-python/2.7/distutils/sysconfig.py @@ -1,30 +1,16 @@ -"""Provide access to Python's configuration information. The specific -configuration variables available depend heavily on the platform and -configuration. The values may be retrieved using -get_config_var(name), and the list of variables is available via -get_config_vars().keys(). Additional convenience functions are also -available. - -Written by: Fred L. Drake, Jr. -Email: -""" - -__revision__ = "$Id: sysconfig.py 85358 2010-10-10 09:54:59Z antoine.pitrou $" - -import sys - # The content of this file is redirected from # sysconfig_cpython or sysconfig_pypy. +# All underscore names are imported too, because +# people like to use undocumented sysconfig._xxx +# directly. +import sys if '__pypy__' in sys.builtin_module_names: - from distutils.sysconfig_pypy import * - from distutils.sysconfig_pypy import _config_vars # needed by setuptools - from distutils.sysconfig_pypy import _variable_rx # read_setup_file() + from distutils import sysconfig_pypy as _sysconfig_module else: - from distutils.sysconfig_cpython import * - from distutils.sysconfig_cpython import _config_vars # needed by setuptools - from distutils.sysconfig_cpython import _variable_rx # read_setup_file() + from distutils import sysconfig_cpython as _sysconfig_module +globals().update(_sysconfig_module.__dict__) _USE_CLANG = None diff --git a/lib-python/2.7/distutils/sysconfig_cpython.py b/lib-python/2.7/distutils/sysconfig_cpython.py --- a/lib-python/2.7/distutils/sysconfig_cpython.py +++ b/lib-python/2.7/distutils/sysconfig_cpython.py @@ -9,7 +9,7 @@ Email: """ -__revision__ = "$Id$" +__revision__ = "$Id: sysconfig.py 85358 2010-10-10 09:54:59Z antoine.pitrou $" import os import re diff --git a/lib-python/2.7/distutils/sysconfig_pypy.py b/lib-python/2.7/distutils/sysconfig_pypy.py --- a/lib-python/2.7/distutils/sysconfig_pypy.py +++ b/lib-python/2.7/distutils/sysconfig_pypy.py @@ -1,6 +1,15 @@ -"""PyPy's minimal configuration information. +"""Provide access to Python's configuration information. +This is actually PyPy's minimal configuration information. + +The specific configuration variables available depend heavily on the +platform and configuration. The values may be retrieved using +get_config_var(name), and the list of variables is available via +get_config_vars().keys(). Additional convenience functions are also +available. """ +__revision__ = "$Id: sysconfig.py 85358 2010-10-10 09:54:59Z antoine.pitrou $" + import sys import os import imp @@ -119,7 +128,7 @@ optional C speedup components. """ if compiler.compiler_type == "unix": - compiler.compiler_so.extend(['-fPIC', '-Wimplicit']) + compiler.compiler_so.extend(['-O2', '-fPIC', '-Wimplicit']) compiler.shared_lib_extension = get_config_var('SO') if "CFLAGS" in os.environ: cflags = os.environ["CFLAGS"].split() diff --git a/lib-python/2.7/socket.py b/lib-python/2.7/socket.py --- a/lib-python/2.7/socket.py +++ b/lib-python/2.7/socket.py @@ -324,7 +324,12 @@ if self._close: self._sock.close() else: - self._sock._decref_socketios() + try: + self._sock._decref_socketios() + except AttributeError: + pass # bah, someone built a _fileobject manually + # with some unexpected replacement of the + # _socketobject class self._sock = None def __del__(self): diff --git a/lib-python/2.7/test/test_codecs.py b/lib-python/2.7/test/test_codecs.py --- a/lib-python/2.7/test/test_codecs.py +++ b/lib-python/2.7/test/test_codecs.py @@ -2,7 +2,11 @@ import unittest import codecs import locale -import sys, StringIO, _testcapi +import sys, StringIO +try: + import _testcapi +except ImportError: + _testcapi = None class Queue(object): """ @@ -1387,7 +1391,7 @@ decodedresult += reader.read() self.assertEqual(decodedresult, s, "%r != %r (encoding=%r)" % (decodedresult, s, encoding)) - if encoding not in broken_incremental_coders: + if encoding not in broken_incremental_coders and _testcapi: # check incremental decoder/encoder (fetched via the Python # and C API) and iterencode()/iterdecode() try: diff --git a/lib-python/2.7/test/test_sysconfig.py b/lib-python/2.7/test/test_sysconfig.py --- a/lib-python/2.7/test/test_sysconfig.py +++ b/lib-python/2.7/test/test_sysconfig.py @@ -7,7 +7,8 @@ import subprocess from copy import copy, deepcopy -from test.test_support import run_unittest, TESTFN, unlink, get_attribute +from test.test_support import (run_unittest, TESTFN, unlink, get_attribute, + import_module) import sysconfig from sysconfig import (get_paths, get_platform, get_config_vars, @@ -236,7 +237,10 @@ def test_get_config_h_filename(self): config_h = sysconfig.get_config_h_filename() - self.assertTrue(os.path.isfile(config_h), config_h) + # import_module skips the test when the CPython C Extension API + # appears to not be supported + self.assertTrue(os.path.isfile(config_h) or + not import_module('_testcapi'), config_h) def test_get_scheme_names(self): wanted = ('nt', 'nt_user', 'os2', 'os2_home', 'osx_framework_user', diff --git a/lib-python/2.7/test/test_traceback.py b/lib-python/2.7/test/test_traceback.py --- a/lib-python/2.7/test/test_traceback.py +++ b/lib-python/2.7/test/test_traceback.py @@ -1,6 +1,9 @@ """Test cases for traceback module""" -from _testcapi import traceback_print +try: + from _testcapi import traceback_print +except ImportError: + traceback_print = None from StringIO import StringIO import sys import unittest @@ -176,6 +179,8 @@ class TracebackFormatTests(unittest.TestCase): def test_traceback_format(self): + if traceback_print is None: + raise unittest.SkipTest('Requires _testcapi') try: raise KeyError('blah') except KeyError: diff --git a/lib-python/2.7/test/test_unicode.py b/lib-python/2.7/test/test_unicode.py --- a/lib-python/2.7/test/test_unicode.py +++ b/lib-python/2.7/test/test_unicode.py @@ -1609,7 +1609,10 @@ self.assertEqual("{}".format(u), '__unicode__ overridden') def test_encode_decimal(self): - from _testcapi import unicode_encodedecimal + try: + from _testcapi import unicode_encodedecimal + except ImportError: + raise unittest.SkipTest('Requires _testcapi') self.assertEqual(unicode_encodedecimal(u'123'), b'123') self.assertEqual(unicode_encodedecimal(u'\u0663.\u0661\u0664'), diff --git a/lib-python/conftest.py b/lib-python/conftest.py --- a/lib-python/conftest.py +++ b/lib-python/conftest.py @@ -130,7 +130,7 @@ RegrTest('test_bz2.py', usemodules='bz2'), RegrTest('test_calendar.py'), RegrTest('test_call.py', core=True), - RegrTest('test_capi.py'), + RegrTest('test_capi.py', usemodules='cpyext'), RegrTest('test_cd.py'), RegrTest('test_cfgparser.py'), RegrTest('test_cgi.py'), @@ -177,7 +177,7 @@ RegrTest('test_cprofile.py'), RegrTest('test_crypt.py', usemodules='crypt'), RegrTest('test_csv.py', usemodules='_csv'), - RegrTest('test_ctypes.py', usemodules="_rawffi thread"), + RegrTest('test_ctypes.py', usemodules="_rawffi thread cpyext"), RegrTest('test_curses.py'), RegrTest('test_datetime.py', usemodules='binascii struct'), RegrTest('test_dbm.py'), diff --git a/lib_pypy/_testcapi.py b/lib_pypy/_testcapi.py --- a/lib_pypy/_testcapi.py +++ b/lib_pypy/_testcapi.py @@ -54,4 +54,9 @@ fp, filename, description = imp.find_module('_testcapi', path=[output_dir]) imp.load_module('_testcapi', fp, filename, description) -compile_shared() +try: + import cpyext +except ImportError: + raise ImportError("No module named '_testcapi'") +else: + compile_shared() diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -32,10 +32,11 @@ "rctime" , "select", "zipimport", "_lsprof", "crypt", "signal", "_rawffi", "termios", "zlib", "bz2", "struct", "_hashlib", "_md5", "_sha", "_minimal_curses", "cStringIO", - "thread", "itertools", "pyexpat", "_ssl", "cpyext", "array", + "thread", "itertools", "pyexpat", "_ssl", "array", "binascii", "_multiprocessing", '_warnings', "_collections", "_multibytecodec", "micronumpy", "_ffi", - "_continuation", "_cffi_backend", "_csv", "cppyy"] + "_continuation", "_cffi_backend", "_csv"] # "cpyext", "cppyy"] +# disabled until problems are fixed )) translation_modules = default_modules.copy() @@ -120,12 +121,10 @@ __import__(name) except (ImportError, CompilationError, py.test.skip.Exception), e: errcls = e.__class__.__name__ - config.add_warning( + raise Exception( "The module %r is disabled\n" % (modname,) + "because importing %s raised %s\n" % (name, errcls) + str(e)) - raise ConflictConfigError("--withmod-%s: %s" % (modname, - errcls)) return validator else: return None @@ -364,6 +363,7 @@ # ignore names from 'essential_modules', notably 'exceptions', which # may not be present in config.objspace.usemodules at all modules = [name for name in modules if name not in essential_modules] + config.objspace.usemodules.suggest(**dict.fromkeys(modules, True)) def enable_translationmodules(config): diff --git a/pypy/doc/coding-guide.rst b/pypy/doc/coding-guide.rst --- a/pypy/doc/coding-guide.rst +++ b/pypy/doc/coding-guide.rst @@ -339,8 +339,9 @@ + methods and other class attributes do not change after startup + single inheritance is fully supported -+ simple mixins work too, but the mixed in class needs a ``_mixin_ = True`` - class attribute ++ simple mixins somewhat work too, but the mixed in class needs a + ``_mixin_ = True`` class attribute. isinstance checks against the + mixin type will fail when translated. + classes are first-class objects too diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py --- a/pypy/doc/conf.py +++ b/pypy/doc/conf.py @@ -47,7 +47,7 @@ # The short X.Y version. version = '2.0' # The full version, including alpha/beta/rc tags. -release = '2.0.0' +release = '2.0.2' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/pypy/doc/how-to-contribute.rst b/pypy/doc/how-to-contribute.rst --- a/pypy/doc/how-to-contribute.rst +++ b/pypy/doc/how-to-contribute.rst @@ -28,7 +28,8 @@ Layers ------ -PyPy has layers. Those layers help us keep the respective parts separated enough +PyPy has layers. Just like Ogres or onions. +Those layers help us keep the respective parts separated enough to be worked on independently and make the complexity manageable. This is, again, just a sanity requirement for such a complex project. For example writing a new optimization for the JIT usually does **not** involve touching a Python diff --git a/pypy/doc/how-to-release.rst b/pypy/doc/how-to-release.rst --- a/pypy/doc/how-to-release.rst +++ b/pypy/doc/how-to-release.rst @@ -22,7 +22,8 @@ will capture the revision number of this change for the release; some of the next updates may be done before or after branching; make sure things are ported back to the trunk and to the branch as - necessary + necessary; also update the version number in pypy/doc/conf.py, + and in pypy/doc/index.rst * update pypy/doc/contributor.rst (and possibly LICENSE) * rename pypy/doc/whatsnew_head.rst to whatsnew_VERSION.rst and create a fresh whatsnew_head.rst after the release diff --git a/pypy/doc/index.rst b/pypy/doc/index.rst --- a/pypy/doc/index.rst +++ b/pypy/doc/index.rst @@ -40,7 +40,7 @@ * `FAQ`_: some frequently asked questions. -* `Release 2.0`_: the latest official release +* `Release 2.0.2`_: the latest official release * `PyPy Blog`_: news and status info about PyPy @@ -110,7 +110,7 @@ .. _`Getting Started`: getting-started.html .. _`Papers`: extradoc.html .. _`Videos`: video-index.html -.. _`Release 2.0`: http://pypy.org/download.html +.. _`Release 2.0.2`: http://pypy.org/download.html .. _`speed.pypy.org`: http://speed.pypy.org .. _`RPython toolchain`: translation.html .. _`potential project ideas`: project-ideas.html diff --git a/pypy/doc/release-2.0.1.rst b/pypy/doc/release-2.0.1.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-2.0.1.rst @@ -0,0 +1,46 @@ +============================== +PyPy 2.0.1 - Bohr Smørrebrød +============================== + +We're pleased to announce PyPy 2.0.1. This is a stable bugfix release +over `2.0`_. You can download it here: + + http://pypy.org/download.html + +The fixes are mainly about fatal errors or crashes in our stdlib. See +below for more details. + +What is PyPy? +============= + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7. It's fast (`pypy 2.0 and cpython 2.7.3`_ performance comparison) +due to its integrated tracing JIT compiler. + +This release supports x86 machines running Linux 32/64, Mac OS X 64 or +Windows 32. Support for ARM is progressing but not bug-free yet. + +.. _`pypy 2.0 and cpython 2.7.3`: http://speed.pypy.org + +Highlights +========== + +- fix an occasional crash in the JIT that ends in `RPython Fatal error: + NotImplementedError`__. + +- `id(x)` is now always a positive number (except on int/float/long/complex). + This fixes an issue in ``_sqlite.py`` (mostly for 32-bit Linux). + +- fix crashes of callback-from-C-functions (with cffi) when used together + with Stackless features, on asmgcc (i.e. Linux only). Now `gevent should + work better`__. + +- work around an eventlet issue with `socket._decref_socketios()`__. + +.. __: https://bugs.pypy.org/issue1482 +.. __: http://mail.python.org/pipermail/pypy-dev/2013-May/011362.html +.. __: https://bugs.pypy.org/issue1468 +.. _2.0: release-2.0.0.html + +Cheers, +arigo et. al. for the PyPy team diff --git a/pypy/doc/release-2.0.2.rst b/pypy/doc/release-2.0.2.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-2.0.2.rst @@ -0,0 +1,46 @@ +========================= +PyPy 2.0.2 - Fermi Panini +========================= + +We're pleased to announce PyPy 2.0.2. This is a stable bugfix release +over `2.0`_ and `2.0.1`_. You can download it here: + + http://pypy.org/download.html + +It fixes a crash in the JIT when calling external C functions (with +ctypes/cffi) in a multithreaded context. + +.. _2.0: release-2.0.0.html +.. _2.0.1: release-2.0.1.html + +What is PyPy? +============= + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7. It's fast (`pypy 2.0 and cpython 2.7.3`_ performance comparison) +due to its integrated tracing JIT compiler. + +This release supports x86 machines running Linux 32/64, Mac OS X 64 or +Windows 32. Support for ARM is progressing but not bug-free yet. + +.. _`pypy 2.0 and cpython 2.7.3`: http://speed.pypy.org + +Highlights +========== + +This release contains only the fix described above. A crash (or wrong +results) used to occur if all these conditions were true: + +- your program is multithreaded; + +- it runs on a single-core machine or a heavily-loaded multi-core one; + +- it uses ctypes or cffi to issue external calls to C functions. + +This was fixed in the branch `emit-call-x86`__ (see the example file +``bug1.py``). + +.. __: https://bitbucket.org/pypy/pypy/commits/7c80121abbf4 + +Cheers, +arigo et. al. for the PyPy team diff --git a/pypy/doc/test/test_whatsnew.py b/pypy/doc/test/test_whatsnew.py --- a/pypy/doc/test/test_whatsnew.py +++ b/pypy/doc/test/test_whatsnew.py @@ -19,23 +19,28 @@ branches.discard('default') return startrev, branches -def get_merged_branches(path, startrev, endrev): - if getstatusoutput('hg root')[0]: +def get_merged_branches(path, startrev, endrev, current_branch=None): + errcode, wc_branch = getstatusoutput('hg branch') + if errcode != 0: py.test.skip('no Mercurial repo') + if current_branch is None: + current_branch = wc_branch # X = take all the merges which are descendants of startrev and are on default # revset = all the parents of X which are not on default # ===> # revset contains all the branches which have been merged to default since # startrev - revset = 'parents(%s::%s and \ + revset = "parents(%s::%s and \ merge() and \ - branch(default)) and \ - not branch(default)' % (startrev, endrev) + branch('%s')) and \ + not branch('%s')" % (startrev, endrev, + current_branch, current_branch) cmd = r'hg log -R "%s" -r "%s" --template "{branches}\n"' % (path, revset) out = getoutput(cmd) branches = set(map(str.strip, out.splitlines())) - return branches + branches.discard("default") + return branches, current_branch def test_parse_doc(): @@ -65,7 +70,8 @@ assert branches == set(['foobar', 'hello']) def test_get_merged_branches(): - branches = get_merged_branches(ROOT, 'f34f0c11299f', '79770e0c2f93') + branches, _ = get_merged_branches(ROOT, 'f34f0c11299f', '79770e0c2f93', + 'default') assert branches == set(['numpy-indexing-by-arrays-bool', 'better-jit-hooks-2', 'numpypy-ufuncs']) @@ -76,7 +82,9 @@ whatsnew_list.sort() last_whatsnew = whatsnew_list[-1].read() startrev, documented = parse_doc(last_whatsnew) - merged = get_merged_branches(ROOT, startrev, '') + merged, branch = get_merged_branches(ROOT, startrev, '') + merged.discard('default') + merged.discard('') not_documented = merged.difference(documented) not_merged = documented.difference(merged) print 'Branches merged but not documented:' @@ -85,4 +93,6 @@ print 'Branches documented but not merged:' print '\n'.join(not_merged) print - assert not not_documented and not not_merged + assert not not_documented + if branch == 'default': + assert not not_merged diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -7,3 +7,24 @@ .. branch: numpy-pickle Pickling of numpy arrays and dtypes (including record dtypes) + +.. branch: remove-array-smm +Remove multimethods in the arraymodule + +.. branch: callback-stacklet +Fixed bug when switching stacklets from a C callback + +.. branch: remove-set-smm +Remove multi-methods on sets + +.. branch: numpy-subarrays +Implement subarrays for numpy + +.. branch: remove-dict-smm +Remove multi-methods on dict + +.. branch: remove-list-smm-2 +Remove remaining multi-methods on list + +.. branch: arm-stacklet +Stacklet support for ARM, enables _continuation support diff --git a/pypy/goal/getnightly.py b/pypy/goal/getnightly.py --- a/pypy/goal/getnightly.py +++ b/pypy/goal/getnightly.py @@ -8,7 +8,9 @@ arch = 'linux' cmd = 'wget "%s"' tar = "tar -x -v --wildcards --strip-components=2 -f %s '*/bin/pypy'" -if sys.platform.startswith('darwin'): + if os.uname()[-1].startswith('arm'): + arch += '-armhf-raspbian' +elif sys.platform.startswith('darwin'): arch = 'osx' cmd = 'curl -O "%s"' tar = "tar -x -v --strip-components=2 -f %s '*/bin/pypy'" diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py --- a/pypy/goal/targetpypystandalone.py +++ b/pypy/goal/targetpypystandalone.py @@ -2,6 +2,7 @@ import os, sys +import pypy from pypy.interpreter import gateway from pypy.interpreter.error import OperationError from pypy.tool.ann_override import PyPyAnnotatorPolicy @@ -9,6 +10,8 @@ from rpython.config.config import ConflictConfigError from pypy.tool.option import make_objspace from pypy.conftest import pypydir +from rpython.rlib import rthread +from pypy.module.thread import os_thread thisdir = py.path.local(__file__).dirpath() @@ -78,13 +81,58 @@ # should be used as sparsely as possible, just to register callbacks from rpython.rlib.entrypoint import entrypoint - from rpython.rtyper.lltypesystem import rffi + from rpython.rtyper.lltypesystem import rffi, lltype + + w_pathsetter = space.appexec([], """(): + def f(path): + import sys + sys.path[:] = path + return f + """) + + @entrypoint('main', [rffi.CCHARP, lltype.Signed], c_name='pypy_setup_home') + def pypy_setup_home(ll_home, verbose): + from pypy.module.sys.initpath import pypy_find_stdlib + if ll_home: + home = rffi.charp2str(ll_home) + else: + home = pypydir + w_path = pypy_find_stdlib(space, home) + if space.is_none(w_path): + if verbose: + debug("Failed to find library based on pypy_find_stdlib") + return 1 + space.startup() + space.call_function(w_pathsetter, w_path) + # import site + try: + import_ = space.getattr(space.getbuiltinmodule('__builtin__'), + space.wrap('__import__')) + space.call_function(import_, space.wrap('site')) + return 0 + except OperationError, e: + if verbose: + debug("OperationError:") + debug(" operror-type: " + e.w_type.getname(space)) + debug(" operror-value: " + space.str_w(space.str(e.get_w_value(space)))) + return 1 @entrypoint('main', [rffi.CCHARP], c_name='pypy_execute_source') def pypy_execute_source(ll_source): source = rffi.charp2str(ll_source) return _pypy_execute_source(source) + @entrypoint('main', [], c_name='pypy_init_threads') + def pypy_init_threads(): + if space.config.objspace.usemodules.thread: + os_thread.setup_threads(space) + rffi.aroundstate.before() + + @entrypoint('main', [], c_name='pypy_thread_attach') + def pypy_thread_attach(): + if space.config.objspace.usemodules.thread: + rthread.gc_thread_start() + w_globals = space.newdict() space.setitem(w_globals, space.wrap('__builtins__'), space.builtin_modules['__builtin__']) @@ -101,7 +149,10 @@ return 1 return 0 - return entry_point, _pypy_execute_source # for tests + return entry_point, {'pypy_execute_source': pypy_execute_source, + 'pypy_init_threads': pypy_init_threads, + 'pypy_thread_attach': pypy_thread_attach, + 'pypy_setup_home': pypy_setup_home} def call_finish(space): space.finish() diff --git a/pypy/interpreter/astcompiler/codegen.py b/pypy/interpreter/astcompiler/codegen.py --- a/pypy/interpreter/astcompiler/codegen.py +++ b/pypy/interpreter/astcompiler/codegen.py @@ -855,9 +855,10 @@ self.emit_jump(ops.JUMP_IF_FALSE_OR_POP, cleanup, True) if i < (ops_count - 1): comp.comparators[i].walkabout(self) - comp.comparators[-1].walkabout(self) - last_kind = compare_operations(comp.ops[-1]) - self.emit_op_arg(ops.COMPARE_OP, last_kind) + last_op, last_comparator = comp.ops[-1], comp.comparators[-1] + if not self._optimize_comparator(last_op, last_comparator): + last_comparator.walkabout(self) + self.emit_op_arg(ops.COMPARE_OP, compare_operations(last_op)) if ops_count > 1: end = self.new_block() self.emit_jump(ops.JUMP_FORWARD, end) @@ -866,6 +867,37 @@ self.emit_op(ops.POP_TOP) self.use_next_block(end) + def _optimize_comparator(self, op, node): + """Fold lists/sets of constants in the context of "in"/"not in". + + lists are folded into tuples, sets into frozensets, otherwise + returns False + """ + if op in (ast.In, ast.NotIn): + is_list = isinstance(node, ast.List) + if is_list or isinstance(node, ast.Set): + w_const = self._tuple_of_consts(node.elts) + if w_const is not None: + if not is_list: + from pypy.objspace.std.setobject import ( + W_FrozensetObject) + w_const = W_FrozensetObject(self.space, w_const) + self.load_const(w_const) + return True + return False + + def _tuple_of_consts(self, elts): + """Return a tuple of consts from elts if possible, or None""" + count = len(elts) if elts is not None else 0 + consts_w = [None] * count + for i in range(count): + w_value = elts[i].as_constant() + if w_value is None: + # Not all constants + return None + consts_w[i] = w_value + return self.space.newtuple(consts_w) + def visit_IfExp(self, ifexp): self.update_position(ifexp.lineno) end = self.new_block() diff --git a/pypy/interpreter/astcompiler/test/test_compiler.py b/pypy/interpreter/astcompiler/test/test_compiler.py --- a/pypy/interpreter/astcompiler/test/test_compiler.py +++ b/pypy/interpreter/astcompiler/test/test_compiler.py @@ -1025,3 +1025,30 @@ counts = self.count_instructions(source3) assert counts[ops.BUILD_LIST] == 1 assert ops.BUILD_LIST_FROM_ARG not in counts + + def test_folding_of_list_constants(self): + for source in ( + # in/not in constants with BUILD_LIST should be folded to a tuple: + 'a in [1,2,3]', + 'a not in ["a","b","c"]', + 'a in [None, 1, None]', + 'a not in [(1, 2), 3, 4]', + ): + source = 'def f(): %s' % source + counts = self.count_instructions(source) + assert ops.BUILD_LIST not in counts + assert ops.LOAD_CONST in counts + + def test_folding_of_set_constants(self): + for source in ( + # in/not in constants with BUILD_SET should be folded to a frozenset: + 'a in {1,2,3}', + 'a not in {"a","b","c"}', + 'a in {None, 1, None}', + 'a not in {(1, 2), 3, 4}', + 'a in {1, 2, 3, 3, 2, 1}', + ): + source = 'def f(): %s' % source + counts = self.count_instructions(source) + assert ops.BUILD_SET not in counts + assert ops.LOAD_CONST in counts diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -242,6 +242,11 @@ def __spacebind__(self, space): return self + def unwrap(self, space): + """NOT_RPYTHON""" + # _____ this code is here to support testing only _____ + return self + class W_InterpIterable(W_Root): def __init__(self, space, w_iterable): @@ -666,7 +671,8 @@ def id(self, w_obj): w_result = w_obj.immutable_unique_id(self) if w_result is None: - w_result = self.wrap(compute_unique_id(w_obj)) + # in the common case, returns an unsigned value + w_result = self.wrap(r_uint(compute_unique_id(w_obj))) return w_result def hash_w(self, w_obj): diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py --- a/pypy/interpreter/gateway.py +++ b/pypy/interpreter/gateway.py @@ -803,7 +803,6 @@ args = inspect.getargs(func.func_code) if args.varargs or args.keywords: raise TypeError("Varargs and keywords not supported in unwrap_spec") - assert not func.func_defaults argspec = ', '.join([arg for arg in args.args[1:]]) func_code = py.code.Source(""" def f(w_obj, %(args)s): @@ -812,11 +811,13 @@ d = {} exec func_code.compile() in d f = d['f'] + f.func_defaults = unbound_meth.func_defaults + f.func_doc = unbound_meth.func_doc f.__module__ = func.__module__ # necessary for unique identifiers for pickling f.func_name = func.func_name if unwrap_spec is None: - unwrap_spec = {} + unwrap_spec = getattr(unbound_meth, 'unwrap_spec', {}) else: assert isinstance(unwrap_spec, dict) unwrap_spec = unwrap_spec.copy() diff --git a/pypy/interpreter/test/test_app_main.py b/pypy/interpreter/test/test_app_main.py --- a/pypy/interpreter/test/test_app_main.py +++ b/pypy/interpreter/test/test_app_main.py @@ -903,22 +903,33 @@ expected_path = [str(prefix.join(subdir).ensure(dir=1)) for subdir in ('lib_pypy', 'lib-python/%s' % cpy_ver)] + # an empty directory from where we can't find the stdlib + tmp_dir = str(udir.join('tmp').ensure(dir=1)) self.w_goal_dir = self.space.wrap(goal_dir) self.w_fake_exe = self.space.wrap(str(fake_exe)) self.w_expected_path = self.space.wrap(expected_path) self.w_trunkdir = self.space.wrap(os.path.dirname(pypydir)) + self.w_tmp_dir = self.space.wrap(tmp_dir) + foo_py = prefix.join('foo.py').write("pass") self.w_foo_py = self.space.wrap(str(foo_py)) def test_setup_bootstrap_path(self): - import sys + # Check how sys.path is handled depending on if we can find a copy of + # the stdlib in setup_bootstrap_path. + import sys, os old_sys_path = sys.path[:] + old_cwd = os.getcwd() + sys.path.append(self.goal_dir) + # make sure cwd does not contain a stdlib + os.chdir(self.tmp_dir) + tmp_pypy_c = os.path.join(self.tmp_dir, 'pypy-c') try: import app_main - app_main.setup_bootstrap_path('/tmp/pypy-c') # stdlib not found + app_main.setup_bootstrap_path(tmp_pypy_c) # stdlib not found assert sys.executable == '' assert sys.path == old_sys_path + [self.goal_dir] @@ -933,6 +944,7 @@ assert newpath[:len(self.expected_path)] == self.expected_path finally: sys.path[:] = old_sys_path + os.chdir(old_cwd) def test_trunk_can_be_prefix(self): import sys diff --git a/pypy/interpreter/test/test_compiler.py b/pypy/interpreter/test/test_compiler.py --- a/pypy/interpreter/test/test_compiler.py +++ b/pypy/interpreter/test/test_compiler.py @@ -936,6 +936,21 @@ output = s.getvalue() assert "LOAD_GLOBAL" not in output + def test_folding_of_list_constants(self): + source = 'a in [1, 2, 3]' + co = compile(source, '', 'exec') + i = co.co_consts.index((1, 2, 3)) + assert i > -1 + assert isinstance(co.co_consts[i], tuple) + + def test_folding_of_set_constants(self): + source = 'a in {1, 2, 3}' + co = compile(source, '', 'exec') + i = co.co_consts.index(set([1, 2, 3])) + assert i > -1 + assert isinstance(co.co_consts[i], frozenset) + + class AppTestCallMethod(object): spaceconfig = {'objspace.opcodes.CALL_METHOD': True} diff --git a/pypy/interpreter/test/test_gateway.py b/pypy/interpreter/test/test_gateway.py --- a/pypy/interpreter/test/test_gateway.py +++ b/pypy/interpreter/test/test_gateway.py @@ -135,18 +135,39 @@ def test_interpindirect2app(self): space = self.space + class BaseA(W_Root): def method(self, space, x): + "This is a method" + pass + + def method_with_default(self, space, x=5): + pass + + @gateway.unwrap_spec(x=int) + def method_with_unwrap_spec(self, space, x): pass class A(BaseA): def method(self, space, x): return space.wrap(x + 2) + def method_with_default(self, space, x): + return space.wrap(x + 2) + + def method_with_unwrap_spec(self, space, x): + return space.wrap(x + 2) + class B(BaseA): def method(self, space, x): return space.wrap(x + 1) + def method_with_default(self, space, x): + return space.wrap(x + 1) + + def method_with_unwrap_spec(self, space, x): + return space.wrap(x + 1) + class FakeTypeDef(object): rawdict = {} bases = {} @@ -163,6 +184,23 @@ assert space.int_w(space.call_function(w_c, w_a, space.wrap(1))) == 1 + 2 assert space.int_w(space.call_function(w_c, w_b, space.wrap(-10))) == -10 + 1 + doc = space.str_w(space.getattr(w_c, space.wrap('__doc__'))) + assert doc == "This is a method" + + meth_with_default = gateway.interpindirect2app( + BaseA.method_with_default, {'x': int}) + w_d = space.wrap(meth_with_default) + + assert space.int_w(space.call_function(w_d, w_a, space.wrap(4))) == 4 + 2 + assert space.int_w(space.call_function(w_d, w_b, space.wrap(-10))) == -10 + 1 + assert space.int_w(space.call_function(w_d, w_a)) == 5 + 2 + assert space.int_w(space.call_function(w_d, w_b)) == 5 + 1 + + meth_with_unwrap_spec = gateway.interpindirect2app( + BaseA.method_with_unwrap_spec) + w_e = space.wrap(meth_with_unwrap_spec) + assert space.int_w(space.call_function(w_e, w_a, space.wrap(4))) == 4 + 2 + def test_interp2app_unwrap_spec(self): space = self.space w = space.wrap diff --git a/pypy/interpreter/test/test_targetpypy.py b/pypy/interpreter/test/test_targetpypy.py --- a/pypy/interpreter/test/test_targetpypy.py +++ b/pypy/interpreter/test/test_targetpypy.py @@ -1,5 +1,6 @@ from pypy.goal.targetpypystandalone import get_entry_point, create_entry_point from pypy.config.pypyoption import get_pypy_config +from rpython.rtyper.lltypesystem import rffi, lltype class TestTargetPyPy(object): def test_run(self): @@ -8,11 +9,20 @@ entry_point(['pypy-c' , '-S', '-c', 'print 3']) def test_exeucte_source(space): - _, execute_source = create_entry_point(space, None) - execute_source("import sys; sys.modules['xyz'] = 3") + _, d = create_entry_point(space, None) + execute_source = d['pypy_execute_source'] + lls = rffi.str2charp("import sys; sys.modules['xyz'] = 3") + execute_source(lls) + lltype.free(lls, flavor='raw') x = space.int_w(space.getitem(space.getattr(space.builtin_modules['sys'], space.wrap('modules')), space.wrap('xyz'))) assert x == 3 - execute_source("sys") + lls = rffi.str2charp("sys") + execute_source(lls) + lltype.free(lls, flavor='raw') # did not crash - the same globals + pypy_setup_home = d['pypy_setup_home'] + lls = rffi.str2charp(__file__) + pypy_setup_home(lls, 1) + lltype.free(lls, flavor='raw') diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -55,6 +55,7 @@ 'validate_fd' : 'interp_magic.validate_fd', 'resizelist_hint' : 'interp_magic.resizelist_hint', 'newlist_hint' : 'interp_magic.newlist_hint', + 'add_memory_pressure' : 'interp_magic.add_memory_pressure', 'newdict' : 'interp_dict.newdict', 'dictstrategy' : 'interp_dict.dictstrategy', 'set_debug' : 'interp_magic.set_debug', diff --git a/pypy/module/__pypy__/interp_magic.py b/pypy/module/__pypy__/interp_magic.py --- a/pypy/module/__pypy__/interp_magic.py +++ b/pypy/module/__pypy__/interp_magic.py @@ -1,11 +1,10 @@ -from pypy.interpreter.baseobjspace import ObjSpace, W_Root from pypy.interpreter.error import OperationError, wrap_oserror from pypy.interpreter.gateway import unwrap_spec from rpython.rlib.objectmodel import we_are_translated from pypy.objspace.std.listobject import W_ListObject from pypy.objspace.std.typeobject import MethodCache from pypy.objspace.std.mapdict import IndexCache -from rpython.rlib import rposix +from rpython.rlib import rposix, rgc def internal_repr(space, w_object): @@ -56,7 +55,7 @@ bltn = BuiltinFunction(func) return space.wrap(bltn) - at unwrap_spec(ObjSpace, W_Root, str) + at unwrap_spec(meth=str) def lookup_special(space, w_obj, meth): """Lookup up a special method on an object.""" if space.is_oldstyle_instance(w_obj): @@ -108,3 +107,7 @@ space.setitem(space.builtin.w_dict, space.wrap('__debug__'), space.wrap(debug)) + + at unwrap_spec(estimate=int) +def add_memory_pressure(estimate): + rgc.add_memory_pressure(estimate) diff --git a/pypy/module/_cffi_backend/cdataobj.py b/pypy/module/_cffi_backend/cdataobj.py --- a/pypy/module/_cffi_backend/cdataobj.py +++ b/pypy/module/_cffi_backend/cdataobj.py @@ -114,8 +114,11 @@ ge = _make_comparison('ge') def hash(self): - h = (objectmodel.compute_identity_hash(self.ctype) ^ - rffi.cast(lltype.Signed, self._cdata)) + h = rffi.cast(lltype.Signed, self._cdata) + # To hash pointers in dictionaries. Assumes that h shows some + # alignment (to 4, 8, maybe 16 bytes), so we use the following + # formula to avoid the trailing bits being always 0. + h = h ^ (h >> 4) return self.space.wrap(h) def getitem(self, w_index): diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -365,8 +365,9 @@ BInt = new_primitive_type("int") BFloat = new_primitive_type("float") for i in range(1, 20): - if (hash(cast(BChar, chr(i))) != - hash(cast(BInt, i))): + x1 = cast(BChar, chr(i)) + x2 = cast(BInt, i) + if hash(x1) != hash(x2): break else: raise AssertionError("hashes are equal") @@ -2723,6 +2724,14 @@ assert x.__name__ == '' assert hasattr(x, '__doc__') +def test_different_types_of_ptr_equality(): + BVoidP = new_pointer_type(new_void_type()) + BIntP = new_pointer_type(new_primitive_type("int")) + x = cast(BVoidP, 12345) + assert x == cast(BIntP, 12345) + assert x != cast(BIntP, 12344) + assert hash(x) == hash(cast(BIntP, 12345)) + def test_version(): # this test is here mostly for PyPy assert __version__ == "0.6" diff --git a/pypy/module/_ffi/test/test_funcptr.py b/pypy/module/_ffi/test/test_funcptr.py --- a/pypy/module/_ffi/test/test_funcptr.py +++ b/pypy/module/_ffi/test/test_funcptr.py @@ -74,9 +74,9 @@ from _ffi import CDLL, types # this should return *all* loaded libs, dlopen(NULL) dll = CDLL(None) - # Assume CPython, or PyPy compiled with cpyext - res = dll.getfunc('Py_IsInitialized', [], types.slong)() - assert res == 1 + # libm should be loaded + res = dll.getfunc('sqrt', [types.double], types.double)(1.0) + assert res == 1.0 def test_callfunc(self): from _ffi import CDLL, types @@ -139,7 +139,7 @@ def test_pointer_args(self): """ - extern int dummy; // defined in test_void_result + extern int dummy; // defined in test_void_result DLLEXPORT int* get_dummy_ptr() { return &dummy; } DLLEXPORT void set_val_to_ptr(int* ptr, int val) { *ptr = val; } """ @@ -158,7 +158,7 @@ def test_convert_pointer_args(self): """ - extern int dummy; // defined in test_void_result + extern int dummy; // defined in test_void_result DLLEXPORT int* get_dummy_ptr(); // defined in test_pointer_args DLLEXPORT void set_val_to_ptr(int* ptr, int val); // ditto """ @@ -170,7 +170,7 @@ def _as_ffi_pointer_(self, ffitype): assert ffitype is types.void_p return self.value - + libfoo = CDLL(self.libfoo_name) get_dummy = libfoo.getfunc('get_dummy', [], types.sint) get_dummy_ptr = libfoo.getfunc('get_dummy_ptr', [], types.void_p) @@ -259,7 +259,7 @@ def test_typed_pointer_args(self): """ - extern int dummy; // defined in test_void_result + extern int dummy; // defined in test_void_result DLLEXPORT int* get_dummy_ptr(); // defined in test_pointer_args DLLEXPORT void set_val_to_ptr(int* ptr, int val); // ditto """ @@ -551,7 +551,7 @@ from _ffi import CDLL, types libfoo = CDLL(self.libfoo_name) raises(TypeError, "libfoo.getfunc('sum_xy', [types.void], types.sint)") - + def test_OSError_loading(self): from _ffi import CDLL, types raises(OSError, "CDLL('I do not exist')") @@ -606,7 +606,7 @@ from _rawffi import FUNCFLAG_STDCALL libm = CDLL(self.libm_name) pow_addr = libm.getaddressindll('pow') - wrong_pow = FuncPtr.fromaddr(pow_addr, 'pow', + wrong_pow = FuncPtr.fromaddr(pow_addr, 'pow', [types.double, types.double], types.double, FUNCFLAG_STDCALL) try: wrong_pow(2, 3) == 8 @@ -622,7 +622,7 @@ from _rawffi import FUNCFLAG_STDCALL kernel = WinDLL('Kernel32.dll') sleep_addr = kernel.getaddressindll('Sleep') - sleep = FuncPtr.fromaddr(sleep_addr, 'sleep', [types.uint], + sleep = FuncPtr.fromaddr(sleep_addr, 'sleep', [types.uint], types.void, FUNCFLAG_STDCALL) sleep(10) diff --git a/pypy/module/_rawffi/test/test__rawffi.py b/pypy/module/_rawffi/test/test__rawffi.py --- a/pypy/module/_rawffi/test/test__rawffi.py +++ b/pypy/module/_rawffi/test/test__rawffi.py @@ -232,9 +232,9 @@ import _rawffi # this should return *all* loaded libs, dlopen(NULL) dll = _rawffi.CDLL(None) - # Assume CPython, or PyPy compiled with cpyext - res = dll.ptr('Py_IsInitialized', [], 'l')() - assert res[0] == 1 + func = dll.ptr('rand', [], 'i') + res = func() + assert res[0] != 0 def test_libc_load(self): import _rawffi diff --git a/pypy/module/array/__init__.py b/pypy/module/array/__init__.py --- a/pypy/module/array/__init__.py +++ b/pypy/module/array/__init__.py @@ -1,12 +1,5 @@ from pypy.interpreter.mixedmodule import MixedModule -from pypy.module.array.interp_array import types -from pypy.objspace.std.model import registerimplementation - -for mytype in types.values(): - registerimplementation(mytype.w_class) - - class Module(MixedModule): interpleveldefs = { 'array': 'interp_array.W_ArrayBase', diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -2,17 +2,14 @@ from pypy.interpreter.buffer import RWBuffer from pypy.interpreter.error import OperationError -from pypy.interpreter.gateway import interp2app, unwrap_spec -from pypy.interpreter.typedef import GetSetProperty, make_weakref_descr +from pypy.interpreter.gateway import interp2app, unwrap_spec, interpindirect2app +from pypy.interpreter.typedef import GetSetProperty, make_weakref_descr, TypeDef +from pypy.interpreter.baseobjspace import W_Root from pypy.module._file.interp_file import W_File -from pypy.objspace.std.model import W_Object -from pypy.objspace.std.multimethod import FailedToImplement -from pypy.objspace.std.stdtypedef import SMM, StdTypeDef -from pypy.objspace.std.register_all import register_all from rpython.rlib import jit from rpython.rlib.rarithmetic import ovfcheck, widen from rpython.rlib.unroll import unrolling_iterable -from rpython.rlib.objectmodel import specialize, keepalive_until_here +from rpython.rlib.objectmodel import keepalive_until_here from rpython.rtyper.lltypesystem import lltype, rffi @@ -39,9 +36,9 @@ if len(__args__.arguments_w) > 0: w_initializer = __args__.arguments_w[0] if space.type(w_initializer) is space.w_str: - a.fromstring(space.str_w(w_initializer)) + a.descr_fromstring(space, space.str_w(w_initializer)) elif space.type(w_initializer) is space.w_list: - a.fromlist(w_initializer) + a.descr_fromlist(space, w_initializer) else: a.extend(w_initializer, True) break @@ -52,31 +49,6 @@ return a -array_append = SMM('append', 2) -array_extend = SMM('extend', 2) - -array_count = SMM('count', 2) -array_index = SMM('index', 2) -array_reverse = SMM('reverse', 1) -array_remove = SMM('remove', 2) -array_pop = SMM('pop', 2, defaults=(-1,)) -array_insert = SMM('insert', 3) - -array_tolist = SMM('tolist', 1) -array_fromlist = SMM('fromlist', 2) -array_tostring = SMM('tostring', 1) -array_fromstring = SMM('fromstring', 2) -array_tounicode = SMM('tounicode', 1) -array_fromunicode = SMM('fromunicode', 2) -array_tofile = SMM('tofile', 2) -array_fromfile = SMM('fromfile', 3) - -array_buffer_info = SMM('buffer_info', 1) -array_reduce = SMM('__reduce__', 1) -array_copy = SMM('__copy__', 1) -array_byteswap = SMM('byteswap', 1) - - def descr_itemsize(space, self): return space.wrap(self.itemsize) @@ -84,28 +56,476 @@ def descr_typecode(space, self): return space.wrap(self.typecode) +arr_eq_driver = jit.JitDriver(greens = ['comp_func'], reds = 'auto') +EQ, NE, LT, LE, GT, GE = range(6) -class W_ArrayBase(W_Object): - @staticmethod - def register(typeorder): - typeorder[W_ArrayBase] = [] +def compare_arrays(space, arr1, arr2, comp_op, comp_func): + if (not isinstance(arr1, W_ArrayBase) or + not isinstance(arr2, W_ArrayBase)): + return space.w_NotImplemented + if comp_op == EQ and arr1.len != arr2.len: + return space.w_False + if comp_op == NE and arr1.len != arr2.len: + return space.w_True + lgt = min(arr1.len, arr2.len) + for i in range(lgt): + arr_eq_driver.jit_merge_point(comp_func=comp_func) + w_elem1 = arr1.w_getitem(space, i) + w_elem2 = arr2.w_getitem(space, i) + res = space.is_true(comp_func(w_elem1, w_elem2)) + if comp_op == EQ: + if not res: + return space.w_False + elif comp_op == NE: + if res: + return space.w_True + elif comp_op == LT or comp_op == GT: + if res: + return space.w_True + elif not space.is_true(space.eq(w_elem1, w_elem2)): + return space.w_False + else: + if not res: + return space.w_False + elif not space.is_true(space.eq(w_elem1, w_elem2)): + return space.w_True + # we have some leftovers + if comp_op == EQ: + return space.w_True + elif comp_op == NE: + return space.w_False + if arr1.len == arr2.len: + if comp_op == LT or comp_op == GT: + return space.w_False + return space.w_True + if comp_op == LT or comp_op == LE: + if arr1.len < arr2.len: + return space.w_True + return space.w_False + if arr1.len > arr2.len: + return space.w_True + return space.w_False -W_ArrayBase.typedef = StdTypeDef( +UNICODE_ARRAY = lltype.Ptr(lltype.Array(lltype.UniChar, + hints={'nolength': True})) + +class W_ArrayBase(W_Root): + _attrs_ = ('space', 'len', 'allocated', '_lifeline_') # no buffer + + def __init__(self, space): + self.space = space + self.len = 0 + self.allocated = 0 + + def descr_append(self, space, w_x): + """ append(x) + + Append new value x to the end of the array. + """ + raise NotImplementedError + + def descr_extend(self, space, w_x): + """ extend(array or iterable) + + Append items to the end of the array. + """ + self.extend(w_x) + + def descr_count(self, space, w_val): + """ count(x) + + Return number of occurrences of x in the array. + """ + raise NotImplementedError + + def descr_index(self, space, w_x): + """ index(x) + + Return index of first occurrence of x in the array. + """ + raise NotImplementedError + + def descr_reverse(self, space): + """ reverse() + + Reverse the order of the items in the array. + """ + raise NotImplementedError + + def descr_remove(self, space, w_val): + """ remove(x) + + Remove the first occurrence of x in the array. + """ + raise NotImplementedError + + @unwrap_spec(i=int) + def descr_pop(self, space, i=-1): + """ pop([i]) + + Return the i-th element and delete it from the array. i defaults to -1. + """ + raise NotImplementedError + + @unwrap_spec(idx=int) + def descr_insert(self, space, idx, w_val): + """ insert(i,x) + + Insert a new item x into the array before position i. + """ + raise NotImplementedError + + def descr_tolist(self, space): + """ tolist() -> list + + Convert array to an ordinary list with the same items. + """ + w_l = space.newlist([]) + for i in range(self.len): + w_l.append(self.w_getitem(space, i)) + return w_l + + def descr_fromlist(self, space, w_lst): + """ fromlist(list) + + Append items to array from list. + """ + if not space.isinstance_w(w_lst, space.w_list): + raise OperationError(space.w_TypeError, + space.wrap("arg must be list")) + s = self.len + try: + self.fromsequence(w_lst) + except OperationError: + self.setlen(s) + raise + + def descr_tostring(self, space): + """ tostring() -> string + + Convert the array to an array of machine values and return the string + representation. + """ + cbuf = self._charbuf_start() + s = rffi.charpsize2str(cbuf, self.len * self.itemsize) + self._charbuf_stop() + return self.space.wrap(s) + + @unwrap_spec(s=str) + def descr_fromstring(self, space, s): + """ fromstring(string) + + Appends items from the string, interpreting it as an array of machine + values,as if it had been read from a file using the fromfile() method). + """ + if len(s) % self.itemsize != 0: + msg = 'string length not a multiple of item size' + raise OperationError(self.space.w_ValueError, self.space.wrap(msg)) + oldlen = self.len + new = len(s) / self.itemsize + self.setlen(oldlen + new) + cbuf = self._charbuf_start() + for i in range(len(s)): + cbuf[oldlen * self.itemsize + i] = s[i] + self._charbuf_stop() + + @unwrap_spec(w_f=W_File, n=int) + def descr_fromfile(self, space, w_f, n): + """ fromfile(f, n) + + Read n objects from the file object f and append them to the end of the + array. Also called as read. + """ + try: + size = ovfcheck(self.itemsize * n) + except OverflowError: + raise MemoryError + w_item = space.call_method(w_f, 'read', space.wrap(size)) + item = space.str_w(w_item) + if len(item) < size: + n = len(item) % self.itemsize + elems = max(0, len(item) - (len(item) % self.itemsize)) + if n != 0: + item = item[0:elems] + self.descr_fromstring(space, item) + msg = "not enough items in file" + raise OperationError(space.w_EOFError, space.wrap(msg)) + self.descr_fromstring(space, item) + + @unwrap_spec(w_f=W_File) + def descr_tofile(self, space, w_f): + """ tofile(f) + + Write all items (as machine values) to the file object f. Also called as + write. + """ + w_s = self.descr_tostring(space) + space.call_method(w_f, 'write', w_s) + + def descr_fromunicode(self, space, w_ustr): + """ fromunicode(ustr) + + Extends this array with data from the unicode string ustr. + The array must be a type 'u' array; otherwise a ValueError + is raised. Use array.fromstring(ustr.decode(...)) to + append Unicode data to an array of some other type. + """ + # XXX the following probable bug is not emulated: + # CPython accepts a non-unicode string or a buffer, and then + # behaves just like fromstring(), except that it strangely truncate + # string arguments at multiples of the unicode byte size. + # Let's only accept unicode arguments for now. + if self.typecode == 'u': + self.fromsequence(w_ustr) + else: + msg = "fromunicode() may only be called on type 'u' arrays" + raise OperationError(space.w_ValueError, space.wrap(msg)) + + def descr_tounicode(self, space): + """ tounicode() -> unicode + + Convert the array to a unicode string. The array must be + a type 'u' array; otherwise a ValueError is raised. Use + array.tostring().decode() to obtain a unicode string from + an array of some other type. + """ + if self.typecode == 'u': + buf = rffi.cast(UNICODE_ARRAY, self._buffer_as_unsigned()) + return space.wrap(rffi.wcharpsize2unicode(buf, self.len)) + else: + msg = "tounicode() may only be called on type 'u' arrays" + raise OperationError(space.w_ValueError, space.wrap(msg)) + + def descr_buffer_info(self, space): + """ buffer_info() -> (address, length) + + Return a tuple (address, length) giving the current memory address and + the length in items of the buffer used to hold array's contents + The length should be multiplied by the itemsize attribute to calculate + the buffer length in bytes. + """ + w_ptr = space.wrap(self._buffer_as_unsigned()) + w_len = space.wrap(self.len) + return space.newtuple([w_ptr, w_len]) + + def descr_reduce(self, space): + """ Return state information for pickling. + """ + if self.len > 0: + w_s = self.descr_tostring(space) + args = [space.wrap(self.typecode), w_s] + else: + args = [space.wrap(self.typecode)] + try: + dct = space.getattr(self, space.wrap('__dict__')) + except OperationError: + dct = space.w_None + return space.newtuple([space.type(self), space.newtuple(args), dct]) + + def descr_copy(self, space): + """ copy(array) + + Return a copy of the array. + """ + w_a = self.constructor(self.space) + w_a.setlen(self.len, overallocate=False) + rffi.c_memcpy( + rffi.cast(rffi.VOIDP, w_a._buffer_as_unsigned()), + rffi.cast(rffi.VOIDP, self._buffer_as_unsigned()), + self.len * self.itemsize + ) + return w_a + + def descr_byteswap(self, space): + """ byteswap() + + Byteswap all items of the array. If the items in the array are not 1, 2, + 4, or 8 bytes in size, RuntimeError is raised. + """ + if self.itemsize not in [1, 2, 4, 8]: + msg = "byteswap not supported for this array" + raise OperationError(space.w_RuntimeError, space.wrap(msg)) + if self.len == 0: + return + bytes = self._charbuf_start() + tmp = [bytes[0]] * self.itemsize + for start in range(0, self.len * self.itemsize, self.itemsize): + stop = start + self.itemsize - 1 + for i in range(self.itemsize): + tmp[i] = bytes[start + i] + for i in range(self.itemsize): + bytes[stop - i] = tmp[i] + self._charbuf_stop() + + def descr_len(self, space): + return space.wrap(self.len) + + def descr_eq(self, space, w_arr2): + "x.__eq__(y) <==> x==y" + return compare_arrays(space, self, w_arr2, EQ, space.eq) + + def descr_ne(self, space, w_arr2): + "x.__ne__(y) <==> x!=y" + return compare_arrays(space, self, w_arr2, NE, space.ne) + + def descr_lt(self, space, w_arr2): + "x.__lt__(y) <==> x x<=y" + return compare_arrays(space, self, w_arr2, LE, space.le) + + def descr_gt(self, space, w_arr2): + "x.__gt__(y) <==> x>y" + return compare_arrays(space, self, w_arr2, GT, space.gt) + + def descr_ge(self, space, w_arr2): + "x.__ge__(y) <==> x>=y" + return compare_arrays(space, self, w_arr2, GE, space.ge) + + # Basic get/set/append/extend methods + + def descr_getitem(self, space, w_idx): + "x.__getitem__(y) <==> x[y]" + if not space.isinstance_w(w_idx, space.w_slice): + idx, stop, step = space.decode_index(w_idx, self.len) + assert step == 0 + return self.w_getitem(space, idx) + else: + return self.getitem_slice(space, w_idx) + + def descr_getslice(self, space, w_i, w_j): + return space.getitem(self, space.newslice(w_i, w_j, space.w_None)) + + + def descr_setitem(self, space, w_idx, w_item): + "x.__setitem__(i, y) <==> x[i]=y" + if space.isinstance_w(w_idx, space.w_slice): + self.setitem_slice(space, w_idx, w_item) + else: + self.setitem(space, w_idx, w_item) + + def descr_setslice(self, space, w_start, w_stop, w_item): + self.setitem_slice(space, + space.newslice(w_start, w_stop, space.w_None), + w_item) + + def descr_delitem(self, space, w_idx): + start, stop, step, size = self.space.decode_index4(w_idx, self.len) + if step != 1: + # I don't care about efficiency of that so far + w_lst = self.descr_tolist(space) + space.delitem(w_lst, w_idx) + self.setlen(0) + self.fromsequence(w_lst) + return + return self.delitem(space, start, stop) + + def descr_delslice(self, space, w_start, w_stop): + self.descr_delitem(space, space.newslice(w_start, w_stop, space.w_None)) + + def descr_add(self, space, w_other): + raise NotImplementedError + + def descr_inplace_add(self, space, w_other): + raise NotImplementedError + + def descr_mul(self, space, w_repeat): + raise NotImplementedError + + def descr_inplace_mul(self, space, w_repeat): + raise NotImplementedError + + def descr_radd(self, space, w_other): + return self.descr_add(space, w_other) + + def descr_rmul(self, space, w_repeat): + return self.descr_mul(space, w_repeat) + + # Misc methods + + def descr_buffer(self, space): + return space.wrap(ArrayBuffer(self)) + + def descr_repr(self, space): + if self.len == 0: + return space.wrap("array('%s')" % self.typecode) + elif self.typecode == "c": + r = space.repr(self.descr_tostring(space)) + s = "array('%s', %s)" % (self.typecode, space.str_w(r)) + return space.wrap(s) + elif self.typecode == "u": + r = space.repr(self.descr_tounicode(space)) + s = "array('%s', %s)" % (self.typecode, space.str_w(r)) + return space.wrap(s) + else: + r = space.repr(self.descr_tolist(space)) + s = "array('%s', %s)" % (self.typecode, space.str_w(r)) + return space.wrap(s) + +W_ArrayBase.typedef = TypeDef( 'array', __new__ = interp2app(w_array), __module__ = 'array', + + __len__ = interp2app(W_ArrayBase.descr_len), + __eq__ = interp2app(W_ArrayBase.descr_eq), + __ne__ = interp2app(W_ArrayBase.descr_ne), + __lt__ = interp2app(W_ArrayBase.descr_lt), + __le__ = interp2app(W_ArrayBase.descr_le), + __gt__ = interp2app(W_ArrayBase.descr_gt), + __ge__ = interp2app(W_ArrayBase.descr_ge), + + __getitem__ = interp2app(W_ArrayBase.descr_getitem), + __getslice__ = interp2app(W_ArrayBase.descr_getslice), + __setitem__ = interp2app(W_ArrayBase.descr_setitem), + __setslice__ = interp2app(W_ArrayBase.descr_setslice), + __delitem__ = interp2app(W_ArrayBase.descr_delitem), + __delslice__ = interp2app(W_ArrayBase.descr_delslice), + + __add__ = interpindirect2app(W_ArrayBase.descr_add), + __iadd__ = interpindirect2app(W_ArrayBase.descr_inplace_add), + __mul__ = interpindirect2app(W_ArrayBase.descr_mul), + __imul__ = interpindirect2app(W_ArrayBase.descr_inplace_mul), + __radd__ = interp2app(W_ArrayBase.descr_radd), + __rmul__ = interp2app(W_ArrayBase.descr_rmul), + + __buffer__ = interp2app(W_ArrayBase.descr_buffer), + __repr__ = interp2app(W_ArrayBase.descr_repr), + itemsize = GetSetProperty(descr_itemsize), typecode = GetSetProperty(descr_typecode), __weakref__ = make_weakref_descr(W_ArrayBase), + append = interpindirect2app(W_ArrayBase.descr_append), + extend = interp2app(W_ArrayBase.descr_extend), + count = interpindirect2app(W_ArrayBase.descr_count), + index = interpindirect2app(W_ArrayBase.descr_index), + reverse = interpindirect2app(W_ArrayBase.descr_reverse), + remove = interpindirect2app(W_ArrayBase.descr_remove), + pop = interpindirect2app(W_ArrayBase.descr_pop), + insert = interpindirect2app(W_ArrayBase.descr_insert), + + tolist = interp2app(W_ArrayBase.descr_tolist), + fromlist = interp2app(W_ArrayBase.descr_fromlist), + tostring = interp2app(W_ArrayBase.descr_tostring), + fromstring = interp2app(W_ArrayBase.descr_fromstring), + tofile = interp2app(W_ArrayBase.descr_tofile), + fromfile = interp2app(W_ArrayBase.descr_fromfile), + fromunicode = interp2app(W_ArrayBase.descr_fromunicode), + tounicode = interp2app(W_ArrayBase.descr_tounicode), + + buffer_info = interp2app(W_ArrayBase.descr_buffer_info), + __copy__ = interp2app(W_ArrayBase.descr_copy), + __reduce__ = interp2app(W_ArrayBase.descr_reduce), + byteswap = interp2app(W_ArrayBase.descr_byteswap), ) -W_ArrayBase.typedef.registermethods(globals()) class TypeCode(object): def __init__(self, itemtype, unwrap, canoverflow=False, signed=False): self.itemtype = itemtype self.bytes = rffi.sizeof(itemtype) - #self.arraytype = lltype.GcArray(itemtype) self.arraytype = lltype.Array(itemtype, hints={'nolength': True}) self.unwrap = unwrap self.signed = signed @@ -175,14 +595,10 @@ itemsize = mytype.bytes typecode = mytype.typecode - @staticmethod - def register(typeorder): - typeorder[W_Array] = [(W_ArrayBase, None)] + _attrs_ = ('space', 'len', 'allocated', '_lifeline_', 'buffer') def __init__(self, space): - self.space = space - self.len = 0 - self.allocated = 0 + W_ArrayBase.__init__(self, space) self.buffer = lltype.nullptr(mytype.arraytype) def item_w(self, w_item): @@ -289,26 +705,6 @@ raise self.setlen(oldlen + i) - def fromstring(self, s): - if len(s) % self.itemsize != 0: - msg = 'string length not a multiple of item size' - raise OperationError(self.space.w_ValueError, self.space.wrap(msg)) - oldlen = self.len - new = len(s) / mytype.bytes - self.setlen(oldlen + new) - cbuf = self._charbuf_start() - for i in range(len(s)): - cbuf[oldlen * mytype.bytes + i] = s[i] - self._charbuf_stop() - - def fromlist(self, w_lst): - s = self.len - try: - self.fromsequence(w_lst) - except OperationError: - self.setlen(s) - raise - def extend(self, w_iterable, accept_different_array=False): space = self.space if isinstance(w_iterable, W_Array): @@ -332,6 +728,9 @@ def _charbuf_start(self): return rffi.cast(rffi.CCHARP, self.buffer) + def _buffer_as_unsigned(self): + return rffi.cast(lltype.Unsigned, self.buffer) + def _charbuf_stop(self): keepalive_until_here(self) @@ -343,202 +742,180 @@ item = float(item) return space.wrap(item) - # Basic get/set/append/extend methods + # interface - def len__Array(space, self): - return space.wrap(self.len) + def descr_append(self, space, w_x): + x = self.item_w(w_x) + self.setlen(self.len + 1) + self.buffer[self.len - 1] = x - def getitem__Array_ANY(space, self, w_idx): - idx, stop, step = space.decode_index(w_idx, self.len) - assert step == 0 - return self.w_getitem(space, idx) + # List interface + def descr_count(self, space, w_val): + cnt = 0 + for i in range(self.len): + # XXX jitdriver + w_item = self.w_getitem(space, i) + if space.is_true(space.eq(w_item, w_val)): + cnt += 1 + return space.wrap(cnt) - def getitem__Array_Slice(space, self, w_slice): - start, stop, step, size = space.decode_index4(w_slice, self.len) - w_a = mytype.w_class(self.space) - w_a.setlen(size, overallocate=False) - assert step != 0 - j = 0 - for i in range(start, stop, step): - w_a.buffer[j] = self.buffer[i] - j += 1 - return w_a + def descr_index(self, space, w_val): + for i in range(self.len): + w_item = self.w_getitem(space, i) + if space.is_true(space.eq(w_item, w_val)): + return space.wrap(i) + msg = 'array.index(x): x not in list' + raise OperationError(space.w_ValueError, space.wrap(msg)) - def getslice__Array_ANY_ANY(space, self, w_i, w_j): - return space.getitem(self, space.newslice(w_i, w_j, space.w_None)) + def descr_reverse(self, space): + b = self.buffer + for i in range(self.len / 2): + b[i], b[self.len - i - 1] = b[self.len - i - 1], b[i] - def setitem__Array_ANY_ANY(space, self, w_idx, w_item): - idx, stop, step = space.decode_index(w_idx, self.len) - if step != 0: - msg = 'can only assign array to array slice' - raise OperationError(self.space.w_TypeError, self.space.wrap(msg)) - item = self.item_w(w_item) - self.buffer[idx] = item + def descr_pop(self, space, i): + if i < 0: + i += self.len + if i < 0 or i >= self.len: + msg = 'pop index out of range' + raise OperationError(space.w_IndexError, space.wrap(msg)) + w_val = self.w_getitem(space, i) + while i < self.len - 1: + self.buffer[i] = self.buffer[i + 1] + i += 1 + self.setlen(self.len - 1) + return w_val - def setitem__Array_Slice_Array(space, self, w_idx, w_item): - start, stop, step, size = self.space.decode_index4(w_idx, self.len) - assert step != 0 - if w_item.len != size or self is w_item: - # XXX this is a giant slow hack - w_lst = array_tolist__Array(space, self) - w_item = space.call_method(w_item, 'tolist') - space.setitem(w_lst, w_idx, w_item) - self.setlen(0) - self.fromsequence(w_lst) - else: + def descr_remove(self, space, w_val): + w_idx = self.descr_index(space, w_val) + self.descr_pop(space, space.int_w(w_idx)) + + def descr_insert(self, space, idx, w_val): + if idx < 0: + idx += self.len + if idx < 0: + idx = 0 + if idx > self.len: + idx = self.len + + val = self.item_w(w_val) + self.setlen(self.len + 1) + i = self.len - 1 + while i > idx: + self.buffer[i] = self.buffer[i - 1] + i -= 1 + self.buffer[i] = val + + def getitem_slice(self, space, w_idx): + start, stop, step, size = space.decode_index4(w_idx, self.len) + w_a = mytype.w_class(self.space) + w_a.setlen(size, overallocate=False) + assert step != 0 j = 0 for i in range(start, stop, step): - self.buffer[i] = w_item.buffer[j] + w_a.buffer[j] = self.buffer[i] j += 1 + return w_a - def setslice__Array_ANY_ANY_ANY(space, self, w_i, w_j, w_x): - space.setitem(self, space.newslice(w_i, w_j, space.w_None), w_x) + def setitem(self, space, w_idx, w_item): + idx, stop, step = space.decode_index(w_idx, self.len) + if step != 0: + msg = 'can only assign array to array slice' + raise OperationError(self.space.w_TypeError, + self.space.wrap(msg)) + item = self.item_w(w_item) + self.buffer[idx] = item - def array_append__Array_ANY(space, self, w_x): - x = self.item_w(w_x) - self.setlen(self.len + 1) - self.buffer[self.len - 1] = x + def setitem_slice(self, space, w_idx, w_item): + if not isinstance(w_item, W_Array): + raise OperationError(space.w_TypeError, space.wrap( + "can only assign to a slice array")) + start, stop, step, size = self.space.decode_index4(w_idx, self.len) + assert step != 0 + if w_item.len != size or self is w_item: + # XXX this is a giant slow hack + w_lst = self.descr_tolist(space) + w_item = space.call_method(w_item, 'tolist') + space.setitem(w_lst, w_idx, w_item) + self.setlen(0) + self.fromsequence(w_lst) + else: + j = 0 + for i in range(start, stop, step): + self.buffer[i] = w_item.buffer[j] + j += 1 - def array_extend__Array_ANY(space, self, w_iterable): - self.extend(w_iterable) + # We can't look into this function until ptradd works with things (in the + # JIT) other than rffi.CCHARP + @jit.dont_look_inside + def delitem(self, space, i, j): + if i < 0: + i += self.len + if i < 0: + i = 0 + if j < 0: + j += self.len + if j < 0: + j = 0 + if j > self.len: + j = self.len + if i >= j: + return None + oldbuffer = self.buffer + self.buffer = lltype.malloc(mytype.arraytype, + max(self.len - (j - i), 0), flavor='raw', + add_memory_pressure=True) + if i: + rffi.c_memcpy( + rffi.cast(rffi.VOIDP, self.buffer), + rffi.cast(rffi.VOIDP, oldbuffer), + i * mytype.bytes + ) + if j < self.len: + rffi.c_memcpy( + rffi.cast(rffi.VOIDP, rffi.ptradd(self.buffer, i)), + rffi.cast(rffi.VOIDP, rffi.ptradd(oldbuffer, j)), + (self.len - j) * mytype.bytes + ) + self.len -= j - i + self.allocated = self.len + if oldbuffer: + lltype.free(oldbuffer, flavor='raw') - # List interface - def array_count__Array_ANY(space, self, w_val): - cnt = 0 - for i in range(self.len): - w_item = self.w_getitem(space, i) - if space.is_true(space.eq(w_item, w_val)): - cnt += 1 - return space.wrap(cnt) + # Add and mul methods - def array_index__Array_ANY(space, self, w_val): - for i in range(self.len): - w_item = self.w_getitem(space, i) - if space.is_true(space.eq(w_item, w_val)): - return space.wrap(i) - msg = 'array.index(x): x not in list' - raise OperationError(space.w_ValueError, space.wrap(msg)) + def descr_add(self, space, w_other): + if not isinstance(w_other, W_Array): + return space.w_NotImplemented + a = mytype.w_class(space) + a.setlen(self.len + w_other.len, overallocate=False) + for i in range(self.len): + a.buffer[i] = self.buffer[i] + for i in range(w_other.len): + a.buffer[i + self.len] = w_other.buffer[i] + return a - def array_reverse__Array(space, self): - b = self.buffer - for i in range(self.len / 2): - b[i], b[self.len - i - 1] = b[self.len - i - 1], b[i] + def descr_inplace_add(self, space, w_other): + if not isinstance(w_other, W_Array): + return space.w_NotImplemented + oldlen = self.len + otherlen = w_other.len + self.setlen(oldlen + otherlen) + for i in range(otherlen): + self.buffer[oldlen + i] = w_other.buffer[i] + return self - def array_pop__Array_ANY(space, self, w_idx): - i = space.int_w(w_idx) - if i < 0: - i += self.len - if i < 0 or i >= self.len: - msg = 'pop index out of range' - raise OperationError(space.w_IndexError, space.wrap(msg)) - w_val = self.w_getitem(space, i) - while i < self.len - 1: From noreply at buildbot.pypy.org Tue Jun 4 11:58:51 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 4 Jun 2013 11:58:51 +0200 (CEST) Subject: [pypy-commit] pypy default: Add a comment that -O in pyinteractive is -OO in CPython. Message-ID: <20130604095851.AA3371C016D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r64761:4c73a79fd2d3 Date: 2013-06-04 11:36 +0200 http://bitbucket.org/pypy/pypy/changeset/4c73a79fd2d3/ Log: Add a comment that -O in pyinteractive is -OO in CPython. diff --git a/pypy/bin/pyinteractive.py b/pypy/bin/pyinteractive.py --- a/pypy/bin/pyinteractive.py +++ b/pypy/bin/pyinteractive.py @@ -27,7 +27,8 @@ BoolOption("completer", "use readline commandline completer", default=False, cmdline="-C"), BoolOption("optimize", - "skip assert statements and remove docstrings when importing modules", + "skip assert statements and remove docstrings when importing modules" + " (this is -OO in regular CPython)", default=False, cmdline="-O"), BoolOption("no_site_import", "do not 'import site' on initialization", default=False, cmdline="-S"), From noreply at buildbot.pypy.org Tue Jun 4 11:58:54 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 4 Jun 2013 11:58:54 +0200 (CEST) Subject: [pypy-commit] pypy default: Revert this change, unsure why it was there (maybe some merge mistake?) Message-ID: <20130604095854.8CE3C1C016D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r64762:155e0c2706e2 Date: 2013-06-04 11:36 +0200 http://bitbucket.org/pypy/pypy/changeset/155e0c2706e2/ Log: Revert this change, unsure why it was there (maybe some merge mistake?) diff --git a/pypy/interpreter/app_main.py b/pypy/interpreter/app_main.py --- a/pypy/interpreter/app_main.py +++ b/pypy/interpreter/app_main.py @@ -3,7 +3,7 @@ # See test/test_app_main. # Missing vs CPython: -d, -t, -v, -x, -3 -"""\ +USAGE1 = __doc__ = """\ Options and arguments (and corresponding environment variables): -B : don't write .py[co] files on import; also PYTHONDONTWRITEBYTECODE=x -c cmd : program passed in as string (terminates option list) @@ -28,7 +28,6 @@ PyPy options and arguments: --info : print translation information about this PyPy executable """ -USAGE1 = __doc__ # Missing vs CPython: PYTHONHOME, PYTHONCASEOK USAGE2 = """ Other environment variables: @@ -157,12 +156,13 @@ raise SystemExit def print_help(*args): - print 'usage: %s [options] [-c cmd|-m mod|file.py|-] [arg...]' % ( + import os + print 'usage: %s [option] ... [-c cmd | -m mod | file | -] [arg] ...' % ( sys.executable,) - print __doc__.rstrip() + print USAGE1, if 'pypyjit' in sys.builtin_module_names: - print " --jit OPTIONS advanced JIT options: try 'off' or 'help'" - print + print "--jit options: advanced JIT options: try 'off' or 'help'" + print (USAGE2 % (os.pathsep,)), raise SystemExit def _print_jit_help(): From noreply at buildbot.pypy.org Tue Jun 4 11:58:56 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 4 Jun 2013 11:58:56 +0200 (CEST) Subject: [pypy-commit] pypy default: Make really JUMP_IF_NOT_DEBUG's target relative. Message-ID: <20130604095856.044641C016D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r64763:ea2d3e68529d Date: 2013-06-04 11:52 +0200 http://bitbucket.org/pypy/pypy/changeset/ea2d3e68529d/ Log: Make really JUMP_IF_NOT_DEBUG's target relative. diff --git a/pypy/interpreter/astcompiler/codegen.py b/pypy/interpreter/astcompiler/codegen.py --- a/pypy/interpreter/astcompiler/codegen.py +++ b/pypy/interpreter/astcompiler/codegen.py @@ -373,7 +373,7 @@ def visit_Assert(self, asrt): self.update_position(asrt.lineno) end = self.new_block() - self.emit_jump(ops.JUMP_IF_NOT_DEBUG, end, True) + self.emit_jump(ops.JUMP_IF_NOT_DEBUG, end) asrt.test.accept_jump_if(self, True, end) self.emit_op_name(ops.LOAD_GLOBAL, self.names, "AssertionError") if asrt.msg: diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -876,9 +876,9 @@ self.popvalue() return next_instr - def JUMP_IF_NOT_DEBUG(self, target, next_instr): + def JUMP_IF_NOT_DEBUG(self, jumpby, next_instr): if not self.space.sys.debug: - return target + next_instr += jumpby return next_instr def GET_ITER(self, oparg, next_instr): From noreply at buildbot.pypy.org Tue Jun 4 11:58:57 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 4 Jun 2013 11:58:57 +0200 (CEST) Subject: [pypy-commit] pypy default: merge the pull request #149, after a few extra fixes: Message-ID: <20130604095857.606911C016D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r64764:e953dfbc7f0a Date: 2013-06-04 11:56 +0200 http://bitbucket.org/pypy/pypy/changeset/e953dfbc7f0a/ Log: merge the pull request #149, after a few extra fixes: implement the -O and -OO flags, by Tyler Wade Most importantly, this is done without generating .pyo files, but by checking at runtime if we should run each assert or not. Similarly, -OO is done by removing the docstrings from memory after we load the .pyc files. diff --git a/lib-python/2.7/opcode.py b/lib-python/2.7/opcode.py --- a/lib-python/2.7/opcode.py +++ b/lib-python/2.7/opcode.py @@ -193,5 +193,6 @@ hasname.append(201) def_op('CALL_METHOD', 202) # #args not including 'self' def_op('BUILD_LIST_FROM_ARG', 203) +jrel_op('JUMP_IF_NOT_DEBUG', 204) # Distance to target address del def_op, name_op, jrel_op, jabs_op diff --git a/lib_pypy/_pypy_irc_topic.py b/lib_pypy/_pypy_irc_topic.py --- a/lib_pypy/_pypy_irc_topic.py +++ b/lib_pypy/_pypy_irc_topic.py @@ -1,4 +1,4 @@ -"""eclguba: flagnk naq frznagvpf bs clguba, fcrrq bs p, erfgevpgvbaf bs wnin naq pbzcvyre reebe zrffntrf nf crargenoyr nf ZHZCF +__doc__ = """eclguba: flagnk naq frznagvpf bs clguba, fcrrq bs p, erfgevpgvbaf bs wnin naq pbzcvyre reebe zrffntrf nf crargenoyr nf ZHZCF pglcrf unf n fcva bs 1/3 ' ' vf n fcnpr gbb Clguba 2.k rfg cerfdhr zbeg, ivir Clguba! diff --git a/pypy/bin/pyinteractive.py b/pypy/bin/pyinteractive.py --- a/pypy/bin/pyinteractive.py +++ b/pypy/bin/pyinteractive.py @@ -27,7 +27,8 @@ BoolOption("completer", "use readline commandline completer", default=False, cmdline="-C"), BoolOption("optimize", - "dummy optimization flag for compatibility with CPython", + "skip assert statements and remove docstrings when importing modules" + " (this is -OO in regular CPython)", default=False, cmdline="-O"), BoolOption("no_site_import", "do not 'import site' on initialization", default=False, cmdline="-S"), @@ -94,6 +95,17 @@ space.setitem(space.sys.w_dict, space.wrap('executable'), space.wrap(argv[0])) + if interactiveconfig.optimize: + #change the optimize flag's value and set __debug__ to False + space.appexec([], """(): + import sys + flags = list(sys.flags) + flags[6] = 2 + sys.flags = type(sys.flags)(flags) + import __pypy__ + __pypy__.set_debug(False) + """) + # call pypy_find_stdlib: the side-effect is that it sets sys.prefix and # sys.exec_prefix executable = argv[0] diff --git a/pypy/doc/man/pypy.1.rst b/pypy/doc/man/pypy.1.rst --- a/pypy/doc/man/pypy.1.rst +++ b/pypy/doc/man/pypy.1.rst @@ -16,7 +16,10 @@ Inspect interactively after running script. -O - Dummy optimization flag for compatibility with C Python. + Skip assert statements. + +-OO + Remove docstrings when importing modules in addition to -O. -c *cmd* Program passed in as CMD (terminates option list). diff --git a/pypy/interpreter/app_main.py b/pypy/interpreter/app_main.py --- a/pypy/interpreter/app_main.py +++ b/pypy/interpreter/app_main.py @@ -2,8 +2,8 @@ # App-level version of py.py. # See test/test_app_main. -# Missing vs CPython: -d, -OO, -t, -v, -x, -3 -"""\ +# Missing vs CPython: -d, -t, -v, -x, -3 +USAGE1 = __doc__ = """\ Options and arguments (and corresponding environment variables): -B : don't write .py[co] files on import; also PYTHONDONTWRITEBYTECODE=x -c cmd : program passed in as string (terminates option list) @@ -12,7 +12,8 @@ -i : inspect interactively after running script; forces a prompt even if stdin does not appear to be a terminal; also PYTHONINSPECT=x -m mod : run library module as a script (terminates option list) --O : dummy optimization flag for compatibility with CPython +-O : skip assert statements +-OO : remove docstrings when importing modules in addition to -O -R : ignored (see http://bugs.python.org/issue14621) -Q arg : division options: -Qold (default), -Qwarn, -Qwarnall, -Qnew -s : don't add user site directory to sys.path; also PYTHONNOUSERSITE @@ -27,7 +28,6 @@ PyPy options and arguments: --info : print translation information about this PyPy executable """ -USAGE1 = __doc__ # Missing vs CPython: PYTHONHOME, PYTHONCASEOK USAGE2 = """ Other environment variables: @@ -470,6 +470,10 @@ sys.py3kwarning = bool(sys.flags.py3k_warning) sys.dont_write_bytecode = bool(sys.flags.dont_write_bytecode) + if sys.flags.optimize >= 1: + import __pypy__ + __pypy__.set_debug(False) + if sys.py3kwarning: print >> sys.stderr, ( "Warning: pypy does not implement py3k warnings") diff --git a/pypy/interpreter/astcompiler/assemble.py b/pypy/interpreter/astcompiler/assemble.py --- a/pypy/interpreter/astcompiler/assemble.py +++ b/pypy/interpreter/astcompiler/assemble.py @@ -245,6 +245,8 @@ if w_len is None: w_len = space.len(self.w_consts) space.setitem(self.w_consts, w_key, w_len) + if space.int_w(w_len) == 0: + self.scope.doc_removable = False return space.int_w(w_len) def _make_key(self, obj): @@ -632,6 +634,7 @@ ops.JUMP_IF_FALSE_OR_POP : 0, ops.POP_JUMP_IF_TRUE : -1, ops.POP_JUMP_IF_FALSE : -1, + ops.JUMP_IF_NOT_DEBUG : 0, ops.BUILD_LIST_FROM_ARG: 1, } diff --git a/pypy/interpreter/astcompiler/codegen.py b/pypy/interpreter/astcompiler/codegen.py --- a/pypy/interpreter/astcompiler/codegen.py +++ b/pypy/interpreter/astcompiler/codegen.py @@ -254,6 +254,7 @@ start = 1 doc_expr.walkabout(self) self.name_op("__doc__", ast.Store) + self.scope.doc_removable = True for i in range(start, len(body)): body[i].walkabout(self) return True @@ -371,6 +372,7 @@ def visit_Assert(self, asrt): self.update_position(asrt.lineno) end = self.new_block() + self.emit_jump(ops.JUMP_IF_NOT_DEBUG, end) asrt.test.accept_jump_if(self, True, end) self.emit_op_name(ops.LOAD_GLOBAL, self.names, "AssertionError") if asrt.msg: @@ -1207,7 +1209,10 @@ tree.walkabout(self) def _get_code_flags(self): - return 0 + flags = 0 + if self.scope.doc_removable: + flags |= consts.CO_KILL_DOCSTRING + return flags class AbstractFunctionCodeGenerator(PythonCodeGenerator): @@ -1234,6 +1239,8 @@ flags |= consts.CO_VARARGS if scope.has_keywords_arg: flags |= consts.CO_VARKEYWORDS + if scope.doc_removable: + flags |= consts.CO_KILL_DOCSTRING if not self.cell_vars and not self.free_vars: flags |= consts.CO_NOFREE return PythonCodeGenerator._get_code_flags(self) | flags @@ -1250,6 +1257,7 @@ doc_expr = None if doc_expr is not None: self.add_const(doc_expr.s) + self.scope.doc_removable = True start = 1 else: self.add_const(self.space.w_None) @@ -1312,3 +1320,9 @@ self._handle_body(cls.body) self.emit_op(ops.LOAD_LOCALS) self.emit_op(ops.RETURN_VALUE) + + def _get_code_flags(self): + flags = 0 + if self.scope.doc_removable: + flags |= consts.CO_KILL_DOCSTRING + return PythonCodeGenerator._get_code_flags(self) | flags diff --git a/pypy/interpreter/astcompiler/consts.py b/pypy/interpreter/astcompiler/consts.py --- a/pypy/interpreter/astcompiler/consts.py +++ b/pypy/interpreter/astcompiler/consts.py @@ -15,6 +15,8 @@ CO_FUTURE_WITH_STATEMENT = 0x8000 CO_FUTURE_PRINT_FUNCTION = 0x10000 CO_FUTURE_UNICODE_LITERALS = 0x20000 +#pypy specific: +CO_KILL_DOCSTRING = 0x100000 PyCF_SOURCE_IS_UTF8 = 0x0100 PyCF_DONT_IMPLY_DEDENT = 0x0200 diff --git a/pypy/interpreter/astcompiler/symtable.py b/pypy/interpreter/astcompiler/symtable.py --- a/pypy/interpreter/astcompiler/symtable.py +++ b/pypy/interpreter/astcompiler/symtable.py @@ -42,6 +42,7 @@ self.has_free = False self.child_has_free = False self.nested = False + self.doc_removable = False def lookup(self, name): """Find the scope of identifier 'name'.""" diff --git a/pypy/interpreter/astcompiler/test/test_compiler.py b/pypy/interpreter/astcompiler/test/test_compiler.py --- a/pypy/interpreter/astcompiler/test/test_compiler.py +++ b/pypy/interpreter/astcompiler/test/test_compiler.py @@ -812,6 +812,58 @@ """ self.simple_test(source, 'ok', 1) + def test_remove_docstring(self): + source = '"module_docstring"\n' + """if 1: + def f1(): + 'docstring' + def f2(): + 'docstring' + return 'docstring' + def f3(): + 'foo' + return 'bar' + class C1(): + 'docstring' + class C2(): + __doc__ = 'docstring' + class C3(): + field = 'not docstring' + class C4(): + 'docstring' + field = 'docstring' + """ + code_w = compile_with_astcompiler(source, 'exec', self.space) + code_w.remove_docstrings(self.space) + dict_w = self.space.newdict(); + code_w.exec_code(self.space, dict_w, dict_w) + + yield self.check, dict_w, "f1.__doc__", None + yield self.check, dict_w, "f2.__doc__", 'docstring' + yield self.check, dict_w, "f2()", 'docstring' + yield self.check, dict_w, "f3.__doc__", None + yield self.check, dict_w, "f3()", 'bar' + yield self.check, dict_w, "C1.__doc__", None + yield self.check, dict_w, "C2.__doc__", 'docstring' + yield self.check, dict_w, "C3.field", 'not docstring' + yield self.check, dict_w, "C4.field", 'docstring' + yield self.check, dict_w, "C4.__doc__", 'docstring' + yield self.check, dict_w, "C4.__doc__", 'docstring' + yield self.check, dict_w, "__doc__", None + + def test_assert_skipping(self): + space = self.space + mod = space.getbuiltinmodule('__pypy__') + w_set_debug = space.getattr(mod, space.wrap('set_debug')) + space.call_function(w_set_debug, space.w_False) + + source = """if 1: + assert False + """ + try: + self.run(source) + finally: + space.call_function(w_set_debug, space.w_True) + class AppTestCompiler: diff --git a/pypy/interpreter/pycode.py b/pypy/interpreter/pycode.py --- a/pypy/interpreter/pycode.py +++ b/pypy/interpreter/pycode.py @@ -12,7 +12,7 @@ from pypy.interpreter.gateway import unwrap_spec from pypy.interpreter.astcompiler.consts import ( CO_OPTIMIZED, CO_NEWLOCALS, CO_VARARGS, CO_VARKEYWORDS, CO_NESTED, - CO_GENERATOR) + CO_GENERATOR, CO_KILL_DOCSTRING) from pypy.tool.stdlib_opcode import opcodedesc, HAVE_ARGUMENT from rpython.rlib.rarithmetic import intmask from rpython.rlib.objectmodel import compute_hash @@ -218,6 +218,13 @@ return w_first return space.w_None + def remove_docstrings(self, space): + if self.co_flags & CO_KILL_DOCSTRING: + self.co_consts_w[0] = space.w_None + for w_co in self.co_consts_w: + if isinstance(w_co, PyCode): + w_co.remove_docstrings(space) + def _to_code(self): """For debugging only.""" consts = [None] * len(self.co_consts_w) diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -876,6 +876,11 @@ self.popvalue() return next_instr + def JUMP_IF_NOT_DEBUG(self, jumpby, next_instr): + if not self.space.sys.debug: + next_instr += jumpby + return next_instr + def GET_ITER(self, oparg, next_instr): w_iterable = self.popvalue() w_iterator = self.space.iter(w_iterable) diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -58,6 +58,7 @@ 'add_memory_pressure' : 'interp_magic.add_memory_pressure', 'newdict' : 'interp_dict.newdict', 'dictstrategy' : 'interp_dict.dictstrategy', + 'set_debug' : 'interp_magic.set_debug', } if sys.platform == 'win32': interpleveldefs['get_console_cp'] = 'interp_magic.get_console_cp' diff --git a/pypy/module/__pypy__/interp_magic.py b/pypy/module/__pypy__/interp_magic.py --- a/pypy/module/__pypy__/interp_magic.py +++ b/pypy/module/__pypy__/interp_magic.py @@ -101,6 +101,13 @@ def newlist_hint(space, sizehint): return space.newlist_hint(sizehint) + at unwrap_spec(debug=bool) +def set_debug(space, debug): + space.sys.debug = debug + space.setitem(space.builtin.w_dict, + space.wrap('__debug__'), + space.wrap(debug)) + @unwrap_spec(estimate=int) def add_memory_pressure(estimate): rgc.add_memory_pressure(estimate) diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -915,6 +915,13 @@ if not space.is_true(space.sys.get('dont_write_bytecode')): write_compiled_module(space, code_w, cpathname, mode, mtime) + try: + optimize = space.sys.get_flag('optimize') + except Exception: + optimize = 0 + if optimize >= 2: + code_w.remove_docstrings(space) + update_code_filenames(space, code_w, pathname) exec_code_module(space, w_mod, code_w) @@ -1009,6 +1016,13 @@ "Bad magic number in %s", cpathname) #print "loading pyc file:", cpathname code_w = read_compiled_module(space, cpathname, source) + try: + optimize = space.sys.get_flag('optimize') + except Exception: + optimize = 0 + if optimize >= 2: + code_w.remove_docstrings(space) + exec_code_module(space, w_mod, code_w) return w_mod diff --git a/pypy/module/sys/__init__.py b/pypy/module/sys/__init__.py --- a/pypy/module/sys/__init__.py +++ b/pypy/module/sys/__init__.py @@ -7,7 +7,7 @@ class Module(MixedModule): """Sys Builtin Module. """ - _immutable_fields_ = ["defaultencoding?"] + _immutable_fields_ = ["defaultencoding?", "debug?"] def __init__(self, space, w_name): """NOT_RPYTHON""" # because parent __init__ isn't @@ -18,6 +18,7 @@ self.w_default_encoder = None self.defaultencoding = "ascii" self.filesystemencoding = None + self.debug = True interpleveldefs = { '__name__' : '(space.wrap("sys"))', diff --git a/rpython/flowspace/flowcontext.py b/rpython/flowspace/flowcontext.py --- a/rpython/flowspace/flowcontext.py +++ b/rpython/flowspace/flowcontext.py @@ -800,6 +800,9 @@ self.popvalue() return next_instr + def JUMP_IF_NOT_DEBUG(self, target, next_instr): + return next_instr + def GET_ITER(self, oparg, next_instr): w_iterable = self.popvalue() w_iterator = self.space.iter(w_iterable) From noreply at buildbot.pypy.org Tue Jun 4 12:13:22 2013 From: noreply at buildbot.pypy.org (kostialopuhin) Date: Tue, 4 Jun 2013 12:13:22 +0200 (CEST) Subject: [pypy-commit] pypy release-2.0.x: test and fix for getting ctypes.byref contents Message-ID: <20130604101322.20DF01C016D@cobra.cs.uni-duesseldorf.de> Author: Konstantin Lopuhin Branch: release-2.0.x Changeset: r64765:cb8c14632483 Date: 2013-05-20 00:23 +0400 http://bitbucket.org/pypy/pypy/changeset/cb8c14632483/ Log: test and fix for getting ctypes.byref contents diff --git a/lib_pypy/_ctypes/pointer.py b/lib_pypy/_ctypes/pointer.py --- a/lib_pypy/_ctypes/pointer.py +++ b/lib_pypy/_ctypes/pointer.py @@ -120,6 +120,7 @@ return self._buffer[0] != 0 contents = property(getcontents, setcontents) + _obj = property(getcontents) # byref interface def _as_ffi_pointer_(self, ffitype): return as_ffi_pointer(self, ffitype) diff --git a/pypy/module/test_lib_pypy/ctypes_tests/test_pointers.py b/pypy/module/test_lib_pypy/ctypes_tests/test_pointers.py --- a/pypy/module/test_lib_pypy/ctypes_tests/test_pointers.py +++ b/pypy/module/test_lib_pypy/ctypes_tests/test_pointers.py @@ -108,6 +108,13 @@ py.test.raises(TypeError, delitem, p, 0) + def test_byref(self): + for ct, pt in zip(ctype_types, python_types): + i = ct(42) + p = byref(i) + assert type(p._obj) is ct + assert p._obj.value == 42 + def test_pointer_to_pointer(self): x = c_int(32) y = c_int(42) From noreply at buildbot.pypy.org Tue Jun 4 13:17:00 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 4 Jun 2013 13:17:00 +0200 (CEST) Subject: [pypy-commit] cffi default: Propagate the original OSError, which contains information returned by Message-ID: <20130604111700.3F7141C016D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1267:28f10889b5aa Date: 2013-06-04 13:16 +0200 http://bitbucket.org/cffi/cffi/changeset/28f10889b5aa/ Log: Propagate the original OSError, which contains information returned by dlerror(). diff --git a/cffi/api.py b/cffi/api.py --- a/cffi/api.py +++ b/cffi/api.py @@ -361,13 +361,13 @@ backend = ffi._backend try: if '.' not in name and '/' not in name: - raise OSError + raise OSError("library not found: %r" % (name,)) backendlib = backend.load_library(name, flags) except OSError: import ctypes.util path = ctypes.util.find_library(name) if path is None: - raise OSError("library not found: %r" % (name,)) + raise # propagate the original OSError backendlib = backend.load_library(path, flags) copied_enums = [] # From noreply at buildbot.pypy.org Tue Jun 4 17:06:51 2013 From: noreply at buildbot.pypy.org (rguillebert) Date: Tue, 4 Jun 2013 17:06:51 +0200 (CEST) Subject: [pypy-commit] pypy default: Add a test for pickling SliceArrays Message-ID: <20130604150651.D74FE1C016D@cobra.cs.uni-duesseldorf.de> Author: Romain Guillebert Branch: Changeset: r64766:dd61debfb36a Date: 2013-06-04 17:04 +0200 http://bitbucket.org/pypy/pypy/changeset/dd61debfb36a/ Log: Add a test for pickling SliceArrays diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -1801,6 +1801,13 @@ pickled_data = dumps(a) assert (loads(pickled_data) == a).all() + def test_pickle_slice(self): + from cPickle import loads, dumps + import numpypy as numpy + + a = numpy.arange(10.)[::2] + assert (loads(dumps(a)) == a).all() + class AppTestMultiDim(BaseNumpyAppTest): def test_init(self): import numpypy From noreply at buildbot.pypy.org Tue Jun 4 17:41:26 2013 From: noreply at buildbot.pypy.org (antocuni) Date: Tue, 4 Jun 2013 17:41:26 +0200 (CEST) Subject: [pypy-commit] pypy fastjson: a branch where to implement a fast json module in rpython Message-ID: <20130604154126.CE1781C145C@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: fastjson Changeset: r64767:9b78f614a0f3 Date: 2013-06-04 14:39 +0200 http://bitbucket.org/pypy/pypy/changeset/9b78f614a0f3/ Log: a branch where to implement a fast json module in rpython From noreply at buildbot.pypy.org Tue Jun 4 17:41:28 2013 From: noreply at buildbot.pypy.org (antocuni) Date: Tue, 4 Jun 2013 17:41:28 +0200 (CEST) Subject: [pypy-commit] pypy fastjson: start to implement the json decoder: decode only strings so far Message-ID: <20130604154128.166E51C145C@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: fastjson Changeset: r64768:6ec983152d16 Date: 2013-06-04 15:28 +0200 http://bitbucket.org/pypy/pypy/changeset/6ec983152d16/ Log: start to implement the json decoder: decode only strings so far diff --git a/pypy/module/_fastjson/__init__.py b/pypy/module/_fastjson/__init__.py new file mode 100644 --- /dev/null +++ b/pypy/module/_fastjson/__init__.py @@ -0,0 +1,10 @@ +from pypy.interpreter.mixedmodule import MixedModule + +class Module(MixedModule): + """fast json implementation""" + + appleveldefs = {} + + interpleveldefs = { + 'loads' : 'interp_decoder.loads', + } diff --git a/pypy/module/_fastjson/interp_decoder.py b/pypy/module/_fastjson/interp_decoder.py new file mode 100644 --- /dev/null +++ b/pypy/module/_fastjson/interp_decoder.py @@ -0,0 +1,65 @@ +from pypy.interpreter.error import OperationError +from pypy.interpreter.gateway import unwrap_spec +from pypy.interpreter import unicodehelper + +def is_whitespace(ch): + return ch == ' ' or ch == '\t' or ch == '\r' or ch == '\n' + +TYPE_INVALID = 0 +TYPE_STRING = 0 + +class JSONDecoder(object): + def __init__(self, space, s): + self.space = space + self.s = s + self.i = 0 + self.last_type = TYPE_INVALID + + def eof(self): + return self.i == len(self.s) + + def peek(self): + return self.s[self.i] + + def next(self): + ch = self.peek() + self.i += 1 + return ch + + def skip_whitespace(self): + while not self.eof(): + ch = self.peek() + if is_whitespace(ch): + self.next() + else: + break + + def decode_any(self): + self.skip_whitespace() + ch = self.peek() + if ch == '"': + return self.decode_string() + else: + assert False, 'Unkown char: %s' % ch + + def decode_string(self): + self.next() + start = self.i + while True: + ch = self.next() + if ch == '"': + end = self.i-1 + assert end > 0 + content = self.s[start:end] + self.last_type = TYPE_STRING + return self.space.wrap(unicodehelper.decode_utf8(self.space, content)) + elif ch == '\\': + raise Exception("escaped strings not supported yet") + + + at unwrap_spec(s=str) +def loads(space, s): + decoder = JSONDecoder(space, s) + return decoder.decode_any() + + diff --git a/pypy/module/_fastjson/test/test__fastjson.py b/pypy/module/_fastjson/test/test__fastjson.py new file mode 100644 --- /dev/null +++ b/pypy/module/_fastjson/test/test__fastjson.py @@ -0,0 +1,32 @@ +# -*- encoding: utf-8 -*- +import py +from pypy.module._fastjson.interp_decoder import JSONDecoder + +def test_skip_whitespace(): + dec = JSONDecoder('fake space', ' hello ') + assert dec.i == 0 + dec.skip_whitespace() + assert dec.next() == 'h' + assert dec.next() == 'e' + assert dec.next() == 'l' + assert dec.next() == 'l' + assert dec.next() == 'o' + dec.skip_whitespace() + assert dec.eof() + + + +class AppTest(object): + spaceconfig = {"objspace.usemodules._fastjson": True} + + def test_load_string(self): + import _fastjson + res = _fastjson.loads('"hello"') + assert res == u'hello' + assert type(res) is unicode + + def test_load_string_utf8(self): + import _fastjson + s = u'àèìòù' + res = _fastjson.loads('"%s"' % s.encode('utf-8')) + assert res == s From noreply at buildbot.pypy.org Tue Jun 4 17:41:29 2013 From: noreply at buildbot.pypy.org (antocuni) Date: Tue, 4 Jun 2013 17:41:29 +0200 (CEST) Subject: [pypy-commit] pypy fastjson: skip the whitespace at the end and complain if there are extra chars Message-ID: <20130604154129.667011C145C@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: fastjson Changeset: r64769:51df42a6fd1c Date: 2013-06-04 15:39 +0200 http://bitbucket.org/pypy/pypy/changeset/51df42a6fd1c/ Log: skip the whitespace at the end and complain if there are extra chars diff --git a/pypy/module/_fastjson/interp_decoder.py b/pypy/module/_fastjson/interp_decoder.py --- a/pypy/module/_fastjson/interp_decoder.py +++ b/pypy/module/_fastjson/interp_decoder.py @@ -1,4 +1,4 @@ -from pypy.interpreter.error import OperationError +from pypy.interpreter.error import OperationError, operationerrfmt from pypy.interpreter.gateway import unwrap_spec from pypy.interpreter import unicodehelper @@ -60,6 +60,10 @@ @unwrap_spec(s=str) def loads(space, s): decoder = JSONDecoder(space, s) - return decoder.decode_any() - - + w_res = decoder.decode_any() + decoder.skip_whitespace() + if not decoder.eof(): + start = decoder.i + end = len(decoder.s) + raise operationerrfmt(space.w_ValueError, "Extra data: char %d - %d", start, end) + return w_res diff --git a/pypy/module/_fastjson/test/test__fastjson.py b/pypy/module/_fastjson/test/test__fastjson.py --- a/pypy/module/_fastjson/test/test__fastjson.py +++ b/pypy/module/_fastjson/test/test__fastjson.py @@ -19,14 +19,21 @@ class AppTest(object): spaceconfig = {"objspace.usemodules._fastjson": True} - def test_load_string(self): + def test_decode_string(self): import _fastjson res = _fastjson.loads('"hello"') assert res == u'hello' assert type(res) is unicode - def test_load_string_utf8(self): + def test_decode_string_utf8(self): import _fastjson s = u'àèìòù' res = _fastjson.loads('"%s"' % s.encode('utf-8')) assert res == s + + def test_skip_whitespace(self): + import _fastjson + s = ' "hello" ' + assert _fastjson.loads(s) == u'hello' + s = ' "hello" extra' + raises(ValueError, "_fastjson.loads(s)") From noreply at buildbot.pypy.org Tue Jun 4 17:41:30 2013 From: noreply at buildbot.pypy.org (antocuni) Date: Tue, 4 Jun 2013 17:41:30 +0200 (CEST) Subject: [pypy-commit] pypy fastjson: add support for escape sequences and complain if the string is non terminated Message-ID: <20130604154130.9FFAE1C145C@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: fastjson Changeset: r64770:9e8497c271b6 Date: 2013-06-04 16:20 +0200 http://bitbucket.org/pypy/pypy/changeset/9e8497c271b6/ Log: add support for escape sequences and complain if the string is non terminated diff --git a/pypy/module/_fastjson/interp_decoder.py b/pypy/module/_fastjson/interp_decoder.py --- a/pypy/module/_fastjson/interp_decoder.py +++ b/pypy/module/_fastjson/interp_decoder.py @@ -1,3 +1,4 @@ +from rpython.rlib.rstring import StringBuilder from pypy.interpreter.error import OperationError, operationerrfmt from pypy.interpreter.gateway import unwrap_spec from pypy.interpreter import unicodehelper @@ -26,6 +27,9 @@ self.i += 1 return ch + def unget(self): + self.i -= 1 + def skip_whitespace(self): while not self.eof(): ch = self.peek() @@ -42,21 +46,65 @@ else: assert False, 'Unkown char: %s' % ch + def getslice(self, start, end): + assert end > 0 + return self.s[start:end] + def decode_string(self): self.next() start = self.i - while True: + while not self.eof(): + # this loop is a fast path for strings which do not contain escape + # characters ch = self.next() if ch == '"': - end = self.i-1 - assert end > 0 - content = self.s[start:end] + content_utf8 = self.getslice(start, self.i-1) + content_unicode = unicodehelper.decode_utf8(self.space, content_utf8) self.last_type = TYPE_STRING - return self.space.wrap(unicodehelper.decode_utf8(self.space, content)) + return self.space.wrap(content_unicode) elif ch == '\\': - raise Exception("escaped strings not supported yet") + content_so_far = self.getslice(start, self.i-1) + self.unget() + return self.decode_string_escaped(start, content_so_far) + raise operationerrfmt(self.space.w_ValueError, + "Unterminated string starting at char %d", start) + def decode_string_escaped(self, start, content_so_far): + builder = StringBuilder(len(content_so_far)*2) # just an estimate + builder.append(content_so_far) + while not self.eof(): + ch = self.next() + if ch == '"': + content_utf8 = builder.build() + content_unicode = unicodehelper.decode_utf8(self.space, content_utf8) + self.last_type = TYPE_STRING + return self.space.wrap(content_unicode) + elif ch == '\\': + newchar = self.decode_escape_sequence() + builder.append_multiple_char(newchar, 1) # we should implement append_char + else: + builder.append_multiple_char(newchar, 1) + + raise operationerrfmt(self.space.w_ValueError, + "Unterminated string starting at char %d", start) + + def decode_escape_sequence(self): + ch = self.next() + if ch == '\\': return '\\' + elif ch == '"': return '"' + elif ch == '/': return '/' + elif ch == 'b': return '\b' + elif ch == 'f': return '\f' + elif ch == 'n': return '\n' + elif ch == 'r': return '\r' + elif ch == 't': return '\t' + elif ch == 'u': + assert False, 'not implemented yet' + else: + raise operationerrfmt(self.space.w_ValueError, + "Invalid \\escape: %s (char %d)", ch, self.i-1) + @unwrap_spec(s=str) def loads(space, s): decoder = JSONDecoder(space, s) diff --git a/pypy/module/_fastjson/test/test__fastjson.py b/pypy/module/_fastjson/test/test__fastjson.py --- a/pypy/module/_fastjson/test/test__fastjson.py +++ b/pypy/module/_fastjson/test/test__fastjson.py @@ -37,3 +37,19 @@ assert _fastjson.loads(s) == u'hello' s = ' "hello" extra' raises(ValueError, "_fastjson.loads(s)") + + def test_unterminated_string(self): + import _fastjson + s = '"hello' # missing the trailing " + raises(ValueError, "_fastjson.loads(s)") + + def test_escape_sequence(self): + import _fastjson + assert _fastjson.loads(r'"\\"') == u'\\' + assert _fastjson.loads(r'"\""') == u'"' + assert _fastjson.loads(r'"\/"') == u'/' + assert _fastjson.loads(r'"\b"') == u'\b' + assert _fastjson.loads(r'"\f"') == u'\f' + assert _fastjson.loads(r'"\n"') == u'\n' + assert _fastjson.loads(r'"\r"') == u'\r' + assert _fastjson.loads(r'"\t"') == u'\t' From noreply at buildbot.pypy.org Tue Jun 4 17:41:31 2013 From: noreply at buildbot.pypy.org (antocuni) Date: Tue, 4 Jun 2013 17:41:31 +0200 (CEST) Subject: [pypy-commit] pypy fastjson: what is not tested is broken: test&fix Message-ID: <20130604154131.E4EF31C145C@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: fastjson Changeset: r64771:d04ad613ac6b Date: 2013-06-04 16:23 +0200 http://bitbucket.org/pypy/pypy/changeset/d04ad613ac6b/ Log: what is not tested is broken: test&fix diff --git a/pypy/module/_fastjson/interp_decoder.py b/pypy/module/_fastjson/interp_decoder.py --- a/pypy/module/_fastjson/interp_decoder.py +++ b/pypy/module/_fastjson/interp_decoder.py @@ -81,10 +81,10 @@ self.last_type = TYPE_STRING return self.space.wrap(content_unicode) elif ch == '\\': - newchar = self.decode_escape_sequence() - builder.append_multiple_char(newchar, 1) # we should implement append_char + ch = self.decode_escape_sequence() + builder.append_multiple_char(ch, 1) # we should implement append_char else: - builder.append_multiple_char(newchar, 1) + builder.append_multiple_char(ch, 1) raise operationerrfmt(self.space.w_ValueError, "Unterminated string starting at char %d", start) diff --git a/pypy/module/_fastjson/test/test__fastjson.py b/pypy/module/_fastjson/test/test__fastjson.py --- a/pypy/module/_fastjson/test/test__fastjson.py +++ b/pypy/module/_fastjson/test/test__fastjson.py @@ -53,3 +53,8 @@ assert _fastjson.loads(r'"\n"') == u'\n' assert _fastjson.loads(r'"\r"') == u'\r' assert _fastjson.loads(r'"\t"') == u'\t' + + def test_escape_sequence_in_the_middle(self): + import _fastjson + s = r'"hello\nworld"' + assert _fastjson.loads(s) == "hello\nworld" From noreply at buildbot.pypy.org Tue Jun 4 17:41:33 2013 From: noreply at buildbot.pypy.org (antocuni) Date: Tue, 4 Jun 2013 17:41:33 +0200 (CEST) Subject: [pypy-commit] pypy fastjson: a passing test Message-ID: <20130604154133.324811C145C@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: fastjson Changeset: r64772:7fddf7d2a349 Date: 2013-06-04 16:26 +0200 http://bitbucket.org/pypy/pypy/changeset/7fddf7d2a349/ Log: a passing test diff --git a/pypy/module/_fastjson/test/test__fastjson.py b/pypy/module/_fastjson/test/test__fastjson.py --- a/pypy/module/_fastjson/test/test__fastjson.py +++ b/pypy/module/_fastjson/test/test__fastjson.py @@ -58,3 +58,9 @@ import _fastjson s = r'"hello\nworld"' assert _fastjson.loads(s) == "hello\nworld" + + def test_unterminated_string_after_escape_sequence(self): + import _fastjson + s = r'"hello\nworld' # missing the trailing " + raises(ValueError, "_fastjson.loads(s)") + From noreply at buildbot.pypy.org Tue Jun 4 17:41:34 2013 From: noreply at buildbot.pypy.org (antocuni) Date: Tue, 4 Jun 2013 17:41:34 +0200 (CEST) Subject: [pypy-commit] pypy fastjson: implement parsing of \uXXXX escapes Message-ID: <20130604154134.8CE8C1C145C@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: fastjson Changeset: r64773:752e9547c85c Date: 2013-06-04 17:15 +0200 http://bitbucket.org/pypy/pypy/changeset/752e9547c85c/ Log: implement parsing of \uXXXX escapes diff --git a/pypy/module/_fastjson/interp_decoder.py b/pypy/module/_fastjson/interp_decoder.py --- a/pypy/module/_fastjson/interp_decoder.py +++ b/pypy/module/_fastjson/interp_decoder.py @@ -81,30 +81,45 @@ self.last_type = TYPE_STRING return self.space.wrap(content_unicode) elif ch == '\\': - ch = self.decode_escape_sequence() + self.decode_escape_sequence(builder) + else: builder.append_multiple_char(ch, 1) # we should implement append_char - else: - builder.append_multiple_char(ch, 1) raise operationerrfmt(self.space.w_ValueError, "Unterminated string starting at char %d", start) - def decode_escape_sequence(self): + def decode_escape_sequence(self, builder): + put = builder.append_multiple_char ch = self.next() - if ch == '\\': return '\\' - elif ch == '"': return '"' - elif ch == '/': return '/' - elif ch == 'b': return '\b' - elif ch == 'f': return '\f' - elif ch == 'n': return '\n' - elif ch == 'r': return '\r' - elif ch == 't': return '\t' + if ch == '\\': put('\\', 1) + elif ch == '"': put('"' , 1) + elif ch == '/': put('/' , 1) + elif ch == 'b': put('\b', 1) + elif ch == 'f': put('\f', 1) + elif ch == 'n': put('\n', 1) + elif ch == 'r': put('\r', 1) + elif ch == 't': put('\t', 1) elif ch == 'u': - assert False, 'not implemented yet' + return self.decode_escape_sequence_unicode(builder) else: raise operationerrfmt(self.space.w_ValueError, "Invalid \\escape: %s (char %d)", ch, self.i-1) + def decode_escape_sequence_unicode(self, builder): + # at this point we are just after the 'u' of the \u1234 sequence. + hexdigits = self.getslice(self.i, self.i+4) + self.i += 4 + try: + uchr = unichr(int(hexdigits, 16)) + except ValueError: + raise operationerrfmt(self.space.w_ValueError, + "Invalid \uXXXX escape (char %d)", self.i-1) + # + utf8_ch = unicodehelper.encode_utf8(self.space, uchr) + builder.append(utf8_ch) + + + @unwrap_spec(s=str) def loads(space, s): decoder = JSONDecoder(space, s) diff --git a/pypy/module/_fastjson/test/test__fastjson.py b/pypy/module/_fastjson/test/test__fastjson.py --- a/pypy/module/_fastjson/test/test__fastjson.py +++ b/pypy/module/_fastjson/test/test__fastjson.py @@ -64,3 +64,7 @@ s = r'"hello\nworld' # missing the trailing " raises(ValueError, "_fastjson.loads(s)") + def test_escape_sequence_unicode(self): + import _fastjson + s = r'"\u1234"' + assert _fastjson.loads(s) == u'\u1234' From noreply at buildbot.pypy.org Tue Jun 4 17:41:35 2013 From: noreply at buildbot.pypy.org (antocuni) Date: Tue, 4 Jun 2013 17:41:35 +0200 (CEST) Subject: [pypy-commit] pypy fastjson: implement decoding of objects Message-ID: <20130604154135.DB8001C145C@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: fastjson Changeset: r64774:0c2d7c11a454 Date: 2013-06-04 17:40 +0200 http://bitbucket.org/pypy/pypy/changeset/0c2d7c11a454/ Log: implement decoding of objects diff --git a/pypy/module/_fastjson/interp_decoder.py b/pypy/module/_fastjson/interp_decoder.py --- a/pypy/module/_fastjson/interp_decoder.py +++ b/pypy/module/_fastjson/interp_decoder.py @@ -6,15 +6,15 @@ def is_whitespace(ch): return ch == ' ' or ch == '\t' or ch == '\r' or ch == '\n' -TYPE_INVALID = 0 -TYPE_STRING = 0 +TYPE_UNKNOWN = 0 +TYPE_STRING = 1 class JSONDecoder(object): def __init__(self, space, s): self.space = space self.s = s self.i = 0 - self.last_type = TYPE_INVALID + self.last_type = TYPE_UNKNOWN def eof(self): return self.i == len(self.s) @@ -30,6 +30,10 @@ def unget(self): self.i -= 1 + def getslice(self, start, end): + assert end > 0 + return self.s[start:end] + def skip_whitespace(self): while not self.eof(): ch = self.peek() @@ -37,21 +41,56 @@ self.next() else: break + + def _raise(self, msg, *args): + raise operationerrfmt(self.w_ValueError, msg, *args) def decode_any(self): self.skip_whitespace() - ch = self.peek() + ch = self.next() if ch == '"': return self.decode_string() + elif ch == '{': + return self.decode_object() else: assert False, 'Unkown char: %s' % ch - def getslice(self, start, end): - assert end > 0 - return self.s[start:end] + def decode_object(self): + start = self.i + w_dict = self.space.newdict() + while not self.eof(): + ch = self.peek() + if ch == '}': + self.next() + return w_dict + # + # parse a key: value + self.last_type = TYPE_UNKNOWN + w_name = self.decode_any() + if self.last_type != TYPE_STRING: + self._raise("Key name must be string") + self.skip_whitespace() + ch = self.next() + if ch != ':': + self._raise("No ':' found at char %d", self.i) + self.skip_whitespace() + # + w_value = self.decode_any() + self.space.setitem(w_dict, w_name, w_value) + self.skip_whitespace() + ch = self.next() + if ch == '}': + return w_dict + elif ch == ',': + pass + else: + self._raise("Unexpected '%s' when decoding object (char %d)", + ch, self.i) + self._raise("Unterminated object starting at char %d", start) + + def decode_string(self): - self.next() start = self.i while not self.eof(): # this loop is a fast path for strings which do not contain escape @@ -66,8 +105,7 @@ content_so_far = self.getslice(start, self.i-1) self.unget() return self.decode_string_escaped(start, content_so_far) - raise operationerrfmt(self.space.w_ValueError, - "Unterminated string starting at char %d", start) + self._raise("Unterminated string starting at char %d", start) def decode_string_escaped(self, start, content_so_far): @@ -84,9 +122,8 @@ self.decode_escape_sequence(builder) else: builder.append_multiple_char(ch, 1) # we should implement append_char - - raise operationerrfmt(self.space.w_ValueError, - "Unterminated string starting at char %d", start) + # + self._raise("Unterminated string starting at char %d", start) def decode_escape_sequence(self, builder): put = builder.append_multiple_char @@ -102,8 +139,7 @@ elif ch == 'u': return self.decode_escape_sequence_unicode(builder) else: - raise operationerrfmt(self.space.w_ValueError, - "Invalid \\escape: %s (char %d)", ch, self.i-1) + self._raise("Invalid \\escape: %s (char %d)", ch, self.i-1) def decode_escape_sequence_unicode(self, builder): # at this point we are just after the 'u' of the \u1234 sequence. @@ -112,8 +148,7 @@ try: uchr = unichr(int(hexdigits, 16)) except ValueError: - raise operationerrfmt(self.space.w_ValueError, - "Invalid \uXXXX escape (char %d)", self.i-1) + self._raise("Invalid \uXXXX escape (char %d)", self.i-1) # utf8_ch = unicodehelper.encode_utf8(self.space, uchr) builder.append(utf8_ch) diff --git a/pypy/module/_fastjson/test/test__fastjson.py b/pypy/module/_fastjson/test/test__fastjson.py --- a/pypy/module/_fastjson/test/test__fastjson.py +++ b/pypy/module/_fastjson/test/test__fastjson.py @@ -68,3 +68,15 @@ import _fastjson s = r'"\u1234"' assert _fastjson.loads(s) == u'\u1234' + + def test_decode_object(self): + import _fastjson + assert _fastjson.loads('{}') == {} + # + s = '{"hello": "world", "aaa": "bbb"}' + assert _fastjson.loads(s) == {'hello': 'world', + 'aaa': 'bbb'} + + def test_decode_object_nonstring_key(self): + pass # write me when we have numbers + From noreply at buildbot.pypy.org Tue Jun 4 17:42:05 2013 From: noreply at buildbot.pypy.org (antocuni) Date: Tue, 4 Jun 2013 17:42:05 +0200 (CEST) Subject: [pypy-commit] pypy fastjson: enable _fastjson by default Message-ID: <20130604154205.1F2131C145C@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: fastjson Changeset: r64775:7cc8032c9144 Date: 2013-06-04 17:41 +0200 http://bitbucket.org/pypy/pypy/changeset/7cc8032c9144/ Log: enable _fastjson by default diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -35,7 +35,7 @@ "thread", "itertools", "pyexpat", "_ssl", "cpyext", "array", "binascii", "_multiprocessing", '_warnings', "_collections", "_multibytecodec", "micronumpy", "_ffi", - "_continuation", "_cffi_backend", "_csv", "cppyy"] + "_continuation", "_cffi_backend", "_csv", "cppyy", "_fastjson"] )) translation_modules = default_modules.copy() From noreply at buildbot.pypy.org Tue Jun 4 19:40:28 2013 From: noreply at buildbot.pypy.org (antocuni) Date: Tue, 4 Jun 2013 19:40:28 +0200 (CEST) Subject: [pypy-commit] pypy fastjson: rpython fix and typo Message-ID: <20130604174028.3B11E1C0651@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: fastjson Changeset: r64776:08d1da984920 Date: 2013-06-04 17:57 +0200 http://bitbucket.org/pypy/pypy/changeset/08d1da984920/ Log: rpython fix and typo diff --git a/pypy/module/_fastjson/interp_decoder.py b/pypy/module/_fastjson/interp_decoder.py --- a/pypy/module/_fastjson/interp_decoder.py +++ b/pypy/module/_fastjson/interp_decoder.py @@ -28,7 +28,9 @@ return ch def unget(self): - self.i -= 1 + i2 = self.i - 1 + assert i2 > 0 # so that we can use self.i as slice start + self.i = i2 def getslice(self, start, end): assert end > 0 @@ -43,7 +45,7 @@ break def _raise(self, msg, *args): - raise operationerrfmt(self.w_ValueError, msg, *args) + raise operationerrfmt(self.space.w_ValueError, msg, *args) def decode_any(self): self.skip_whitespace() From noreply at buildbot.pypy.org Tue Jun 4 19:40:29 2013 From: noreply at buildbot.pypy.org (antocuni) Date: Tue, 4 Jun 2013 19:40:29 +0200 (CEST) Subject: [pypy-commit] pypy fastjson: another rpython fix Message-ID: <20130604174029.7202C1C0651@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: fastjson Changeset: r64777:ced1fbce0894 Date: 2013-06-04 18:03 +0200 http://bitbucket.org/pypy/pypy/changeset/ced1fbce0894/ Log: another rpython fix diff --git a/pypy/module/_fastjson/interp_decoder.py b/pypy/module/_fastjson/interp_decoder.py --- a/pypy/module/_fastjson/interp_decoder.py +++ b/pypy/module/_fastjson/interp_decoder.py @@ -1,4 +1,5 @@ from rpython.rlib.rstring import StringBuilder +from rpython.rlib.objectmodel import specialize from pypy.interpreter.error import OperationError, operationerrfmt from pypy.interpreter.gateway import unwrap_spec from pypy.interpreter import unicodehelper @@ -43,7 +44,8 @@ self.next() else: break - + + @specialize.arg(1) def _raise(self, msg, *args): raise operationerrfmt(self.space.w_ValueError, msg, *args) From noreply at buildbot.pypy.org Tue Jun 4 19:40:30 2013 From: noreply at buildbot.pypy.org (antocuni) Date: Tue, 4 Jun 2013 19:40:30 +0200 (CEST) Subject: [pypy-commit] pypy fastjson: another rpython fix Message-ID: <20130604174030.D77871C0651@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: fastjson Changeset: r64778:9777a0934c56 Date: 2013-06-04 18:11 +0200 http://bitbucket.org/pypy/pypy/changeset/9777a0934c56/ Log: another rpython fix diff --git a/pypy/module/_fastjson/interp_decoder.py b/pypy/module/_fastjson/interp_decoder.py --- a/pypy/module/_fastjson/interp_decoder.py +++ b/pypy/module/_fastjson/interp_decoder.py @@ -72,7 +72,7 @@ self.last_type = TYPE_UNKNOWN w_name = self.decode_any() if self.last_type != TYPE_STRING: - self._raise("Key name must be string") + self._raise("Key name must be string for object starting at char %d", start) self.skip_whitespace() ch = self.next() if ch != ':': From noreply at buildbot.pypy.org Tue Jun 4 20:44:29 2013 From: noreply at buildbot.pypy.org (rguillebert) Date: Tue, 4 Jun 2013 20:44:29 +0200 (CEST) Subject: [pypy-commit] pypy default: Update test Message-ID: <20130604184429.30FB01C016D@cobra.cs.uni-duesseldorf.de> Author: Romain Guillebert Branch: Changeset: r64779:b11b29c91555 Date: 2013-06-04 17:50 +0200 http://bitbucket.org/pypy/pypy/changeset/b11b29c91555/ Log: Update test diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -1805,7 +1805,7 @@ from cPickle import loads, dumps import numpypy as numpy - a = numpy.arange(10.)[::2] + a = numpy.arange(10.).reshape((5, 2))[::2] assert (loads(dumps(a)) == a).all() class AppTestMultiDim(BaseNumpyAppTest): From noreply at buildbot.pypy.org Tue Jun 4 20:44:30 2013 From: noreply at buildbot.pypy.org (rguillebert) Date: Tue, 4 Jun 2013 20:44:30 +0200 (CEST) Subject: [pypy-commit] pypy default: Implement pickling for slices Message-ID: <20130604184430.924391C145C@cobra.cs.uni-duesseldorf.de> Author: Romain Guillebert Branch: Changeset: r64780:d8b79f042e37 Date: 2013-06-04 20:43 +0200 http://bitbucket.org/pypy/pypy/changeset/d8b79f042e37/ Log: Implement pickling for slices diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -13,6 +13,9 @@ from pypy.module.micronumpy.arrayimpl.voidbox import VoidBoxStorage from rpython.rlib.objectmodel import specialize from pypy.interpreter.mixedmodule import MixedModule +from rpython.rtyper.lltypesystem import lltype +from rpython.rlib.rstring import StringBuilder + MIXIN_32 = (int_typedef,) if LONG_BIT == 32 else () MIXIN_64 = (int_typedef,) if LONG_BIT == 64 else () @@ -42,7 +45,23 @@ return func_with_new_name(new, name + "_box_new"), staticmethod(_get_dtype), func_with_new_name(descr_reduce, "descr_reduce") -class PrimitiveBox(object): +class Box(object): + _mixin_ = True + + def reduce(self, space): + from rpython.rlib.rstring import StringBuilder + from rpython.rtyper.lltypesystem import rffi, lltype + + numpypy = space.getbuiltinmodule("_numpypy") + assert isinstance(numpypy, MixedModule) + multiarray = numpypy.get("multiarray") + assert isinstance(multiarray, MixedModule) + scalar = multiarray.get("scalar") + + ret = space.newtuple([scalar, space.newtuple([space.wrap(self._get_dtype(space)), space.wrap(self.raw_str())])]) + return ret + +class PrimitiveBox(Box): _mixin_ = True def __init__(self, value): @@ -54,27 +73,19 @@ def __repr__(self): return '%s(%s)' % (self.__class__.__name__, self.value) - def reduce(self, space): - from rpython.rlib.rstring import StringBuilder - from rpython.rtyper.lltypesystem import rffi, lltype - - numpypy = space.getbuiltinmodule("_numpypy") - assert isinstance(numpypy, MixedModule) - multiarray = numpypy.get("multiarray") - assert isinstance(multiarray, MixedModule) - scalar = multiarray.get("scalar") - + def raw_str(self): value = lltype.malloc(rffi.CArray(lltype.typeOf(self.value)), 1, flavor="raw") value[0] = self.value builder = StringBuilder() builder.append_charpsize(rffi.cast(rffi.CCHARP, value), rffi.sizeof(lltype.typeOf(self.value))) + ret = builder.build() - ret = space.newtuple([scalar, space.newtuple([space.wrap(self._get_dtype(space)), space.wrap(builder.build())])]) lltype.free(value, flavor="raw") + return ret -class ComplexBox(object): +class ComplexBox(Box): _mixin_ = True def __init__(self, real, imag=0.): @@ -90,25 +101,17 @@ def convert_imag_to(self, dtype): return dtype.box(self.imag) - def reduce(self, space): - from rpython.rlib.rstring import StringBuilder - from rpython.rtyper.lltypesystem import rffi, lltype - - numpypy = space.getbuiltinmodule("_numpypy") - assert isinstance(numpypy, MixedModule) - multiarray = numpypy.get("multiarray") - assert isinstance(multiarray, MixedModule) - scalar = multiarray.get("scalar") - + def raw_str(self): value = lltype.malloc(rffi.CArray(lltype.typeOf(self.real)), 2, flavor="raw") value[0] = self.real value[1] = self.imag builder = StringBuilder() builder.append_charpsize(rffi.cast(rffi.CCHARP, value), rffi.sizeof(lltype.typeOf(self.real)) * 2) + ret = builder.build() - ret = space.newtuple([scalar, space.newtuple([space.wrap(self._get_dtype(space)), space.wrap(builder.build())])]) lltype.free(value, flavor="raw") + return ret class W_GenericBox(W_Root): diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -786,6 +786,7 @@ from rpython.rtyper.lltypesystem import rffi from rpython.rlib.rstring import StringBuilder from pypy.interpreter.mixedmodule import MixedModule + from pypy.module.micronumpy.arrayimpl.concrete import SliceArray numpypy = space.getbuiltinmodule("_numpypy") assert isinstance(numpypy, MixedModule) @@ -796,7 +797,14 @@ parameters = space.newtuple([space.gettypefor(W_NDimArray), space.newtuple([space.wrap(0)]), space.wrap("b")]) builder = StringBuilder() - builder.append_charpsize(self.implementation.get_storage(), self.implementation.get_storage_size()) + if isinstance(self.implementation, SliceArray): + iter = self.implementation.create_iter() + while not iter.done(): + box = iter.getitem() + builder.append(box.raw_str()) + iter.next() + else: + builder.append_charpsize(self.implementation.get_storage(), self.implementation.get_storage_size()) state = space.newtuple([ space.wrap(1), # version From noreply at buildbot.pypy.org Tue Jun 4 22:28:21 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Tue, 4 Jun 2013 22:28:21 +0200 (CEST) Subject: [pypy-commit] pypy refactor-str-types: Implement swapcase() method. Message-ID: <20130604202821.05A761C1401@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-str-types Changeset: r64783:6a4af8ab46ec Date: 2013-06-04 21:17 +0200 http://bitbucket.org/pypy/pypy/changeset/6a4af8ab46ec/ Log: Implement swapcase() method. diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py --- a/pypy/objspace/std/bytesobject.py +++ b/pypy/objspace/std/bytesobject.py @@ -96,6 +96,27 @@ _builder = StringBuilder + def _isupper(self, ch): + return ch.isupper() + + def _islower(self, ch): + return ch.islower() + + def _istitle(self, ch): + return ch.istitle() + + def _isspace(self, ch): + return ch.isspace() + + def _isalpha(self, ch): + return ch.isalpha() + + def _isalnum(self, ch): + return ch.isalnum() + + def _isdigit(self, ch): + return ch.isdigit() + def _upper(self, ch): if ch.islower(): o = ord(ch) - 32 diff --git a/pypy/objspace/std/stringmethods.py b/pypy/objspace/std/stringmethods.py --- a/pypy/objspace/std/stringmethods.py +++ b/pypy/objspace/std/stringmethods.py @@ -9,12 +9,6 @@ from rpython.rlib.rstring import split -_isspace = lambda c: c.isspace() -_isdigit = lambda c: c.isdigit() -_isalpha = lambda c: c.isalpha() -_isalnum = lambda c: c.isalnum() - - class StringMethods(object): _mixin_ = True @@ -296,13 +290,13 @@ return space.w_True def descr_isalnum(self, space): - return self._is_generic(space, _isalnum) + return self._is_generic(space, self._isalnum) def descr_isalpha(self, space): - return self._is_generic(space, _isalpha) + return self._is_generic(space, self._isalpha) def descr_isdigit(self, space): - return self._is_generic(space, _isdigit) + return self._is_generic(space, self._isdigit) def descr_islower(self, space): v = self._value @@ -318,7 +312,7 @@ return space.newbool(cased) def descr_isspace(self, space): - return self._is_generic(space, _isspace) + return self._is_generic(space, self._isspace) def descr_istitle(self, space): input = self._value @@ -769,19 +763,14 @@ return self._strip(space, w_chars, left=0, right=1) def descr_swapcase(self, space): - # XXX just to pass the test - return space.wrap(self._val().swapcase()) - - selfvalue = self._value + selfvalue = self._val() builder = self._builder(len(selfvalue)) for i in range(len(selfvalue)): ch = selfvalue[i] - if ch.isupper(): - o = ord(ch) + 32 - builder.append(chr(o)) - elif ch.islower(): - o = ord(ch) - 32 - builder.append(chr(o)) + if self._isupper(ch): + builder.append(self._lower(ch)) + elif self._islower(ch): + builder.append(self._upper(ch)) else: builder.append(ch) return space.wrap(builder.build()) diff --git a/pypy/objspace/std/unicodeobject.py b/pypy/objspace/std/unicodeobject.py --- a/pypy/objspace/std/unicodeobject.py +++ b/pypy/objspace/std/unicodeobject.py @@ -89,6 +89,27 @@ _builder = UnicodeBuilder + def _isupper(self, ch): + return ch.isupper() + + def _islower(self, ch): + return ch.islower() + + def _istitle(self, ch): + return ch.istitle() + + def _isspace(self, ch): + return ch.isspace() + + def _isalpha(self, ch): + return ch.isalpha() + + def _isalnum(self, ch): + return ch.isalnum() + + def _isdigit(self, ch): + return ch.isdigit() + def _upper(self, ch): return unichr(unicodedb.toupper(ord(ch))) From noreply at buildbot.pypy.org Tue Jun 4 22:28:19 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Tue, 4 Jun 2013 22:28:19 +0200 (CEST) Subject: [pypy-commit] pypy refactor-str-types: Implement capitalize() method. Message-ID: <20130604202819.A89551C12FE@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-str-types Changeset: r64782:cfd9859a607d Date: 2013-06-04 21:03 +0200 http://bitbucket.org/pypy/pypy/changeset/cfd9859a607d/ Log: Implement capitalize() method. diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py --- a/pypy/objspace/std/bytesobject.py +++ b/pypy/objspace/std/bytesobject.py @@ -96,6 +96,20 @@ _builder = StringBuilder + def _upper(self, ch): + if ch.islower(): + o = ord(ch) - 32 + return chr(o) + else: + return ch + + def _lower(self, ch): + if ch.isupper(): + o = ord(ch) + 32 + return chr(o) + else: + return ch + @staticmethod @unwrap_spec(w_object = WrappedDefault("")) def descr_new(space, w_stringtype, w_object): diff --git a/pypy/objspace/std/stringmethods.py b/pypy/objspace/std/stringmethods.py --- a/pypy/objspace/std/stringmethods.py +++ b/pypy/objspace/std/stringmethods.py @@ -27,20 +27,6 @@ def _val(self): raise NotImplementedError - def _upper(self, ch): - if ch.islower(): - o = ord(ch) - 32 - return chr(o) - else: - return ch - - def _lower(self, ch): - if ch.isupper(): - o = ord(ch) + 32 - return chr(o) - else: - return ch - def _sliced(self, space, s, start, stop, orig_obj): assert start >= 0 assert stop >= 0 @@ -174,27 +160,14 @@ return self._sliced(space, selfvalue, start, stop, self) def descr_capitalize(self, space): - # XXX just to pass the test - return self._new(self._val().capitalize()) + value = self._val() + if len(value) == 0: + return self.EMPTY - input = self._value - builder = self._builder(len(input)) - if len(input) > 0: - ch = input[0] - if ch.islower(): - o = ord(ch) - 32 - builder.append(chr(o)) - else: - builder.append(ch) - - for i in range(1, len(input)): - ch = input[i] - if ch.isupper(): - o = ord(ch) + 32 - builder.append(chr(o)) - else: - builder.append(ch) - + builder = self._builder(len(value)) + builder.append(self._upper(value[0])) + for i in range(1, len(value)): + builder.append(self._lower(value[i])) return space.wrap(builder.build()) @unwrap_spec(width=int, w_fillchar=WrappedDefault(' ')) diff --git a/pypy/objspace/std/unicodeobject.py b/pypy/objspace/std/unicodeobject.py --- a/pypy/objspace/std/unicodeobject.py +++ b/pypy/objspace/std/unicodeobject.py @@ -89,6 +89,12 @@ _builder = UnicodeBuilder + def _upper(self, ch): + return unichr(unicodedb.toupper(ord(ch))) + + def _lower(self, ch): + return unichr(unicodedb.tolower(ord(ch))) + def descr_repr(self, space): chars = self._value size = len(chars) From noreply at buildbot.pypy.org Tue Jun 4 22:28:18 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Tue, 4 Jun 2013 22:28:18 +0200 (CEST) Subject: [pypy-commit] pypy refactor-str-types: hg merge default Message-ID: <20130604202818.488471C016D@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-str-types Changeset: r64781:08c45f23c86d Date: 2013-06-04 20:41 +0200 http://bitbucket.org/pypy/pypy/changeset/08c45f23c86d/ Log: hg merge default diff too long, truncating to 2000 out of 7083 lines diff --git a/lib-python/2.7/distutils/command/build_ext.py b/lib-python/2.7/distutils/command/build_ext.py --- a/lib-python/2.7/distutils/command/build_ext.py +++ b/lib-python/2.7/distutils/command/build_ext.py @@ -8,7 +8,7 @@ __revision__ = "$Id$" -import sys, os, string, re +import sys, os, string, re, imp from types import * from site import USER_BASE, USER_SITE from distutils.core import Command @@ -33,6 +33,11 @@ from distutils.ccompiler import show_compilers show_compilers() +def _get_c_extension_suffix(): + for ext, mod, typ in imp.get_suffixes(): + if typ == imp.C_EXTENSION: + return ext + class build_ext (Command): @@ -677,10 +682,18 @@ # OS/2 has an 8 character module (extension) limit :-( if os.name == "os2": ext_path[len(ext_path) - 1] = ext_path[len(ext_path) - 1][:8] + # PyPy tweak: first try to get the C extension suffix from + # 'imp'. If it fails we fall back to the 'SO' config var, like + # the previous version of this code did. This should work for + # CPython too. The point is that on PyPy with cpyext, the + # config var 'SO' is just ".so" but we want to return + # ".pypy-VERSION.so" instead. + so_ext = _get_c_extension_suffix() + if so_ext is None: + so_ext = get_config_var('SO') # fall-back # extensions in debug_mode are named 'module_d.pyd' under windows - so_ext = get_config_var('SO') if os.name == 'nt' and self.debug: - return os.path.join(*ext_path) + '_d' + so_ext + so_ext = '_d.pyd' return os.path.join(*ext_path) + so_ext def get_export_symbols (self, ext): diff --git a/lib-python/2.7/distutils/sysconfig_pypy.py b/lib-python/2.7/distutils/sysconfig_pypy.py --- a/lib-python/2.7/distutils/sysconfig_pypy.py +++ b/lib-python/2.7/distutils/sysconfig_pypy.py @@ -12,7 +12,6 @@ import sys import os -import imp from distutils.errors import DistutilsPlatformError @@ -58,16 +57,11 @@ _config_vars = None -def _get_so_extension(): - for ext, mod, typ in imp.get_suffixes(): - if typ == imp.C_EXTENSION: - return ext - def _init_posix(): """Initialize the module as appropriate for POSIX systems.""" g = {} g['EXE'] = "" - g['SO'] = _get_so_extension() or ".so" + g['SO'] = ".so" g['SOABI'] = g['SO'].rsplit('.')[0] g['LIBDIR'] = os.path.join(sys.prefix, 'lib') g['CC'] = "gcc -pthread" # -pthread might not be valid on OS/X, check @@ -80,7 +74,7 @@ """Initialize the module as appropriate for NT""" g = {} g['EXE'] = ".exe" - g['SO'] = _get_so_extension() or ".pyd" + g['SO'] = ".pyd" g['SOABI'] = g['SO'].rsplit('.')[0] global _config_vars diff --git a/lib-python/2.7/logging/__init__.py b/lib-python/2.7/logging/__init__.py --- a/lib-python/2.7/logging/__init__.py +++ b/lib-python/2.7/logging/__init__.py @@ -151,6 +151,8 @@ 'DEBUG': DEBUG, 'NOTSET': NOTSET, } +_levelNames = dict(_levelToName) +_levelNames.update(_nameToLevel) # backward compatibility def getLevelName(level): """ diff --git a/lib-python/2.7/opcode.py b/lib-python/2.7/opcode.py --- a/lib-python/2.7/opcode.py +++ b/lib-python/2.7/opcode.py @@ -193,5 +193,6 @@ hasname.append(201) def_op('CALL_METHOD', 202) # #args not including 'self' def_op('BUILD_LIST_FROM_ARG', 203) +jrel_op('JUMP_IF_NOT_DEBUG', 204) # Distance to target address del def_op, name_op, jrel_op, jabs_op diff --git a/lib-python/2.7/socket.py b/lib-python/2.7/socket.py --- a/lib-python/2.7/socket.py +++ b/lib-python/2.7/socket.py @@ -96,6 +96,7 @@ _realsocket = socket +_type = type # WSA error codes if sys.platform.lower().startswith("win"): @@ -173,31 +174,37 @@ __doc__ = _realsocket.__doc__ + __slots__ = ["_sock", "__weakref__"] + def __init__(self, family=AF_INET, type=SOCK_STREAM, proto=0, _sock=None): if _sock is None: _sock = _realsocket(family, type, proto) + elif _type(_sock) is _realsocket: + _sock._reuse() + # PyPy note about refcounting: implemented with _reuse()/_drop() + # on the class '_socket.socket'. Python 3 did it differently + # with a reference counter on this class 'socket._socketobject' + # instead, but it is a less compatible change (breaks eventlet). self._sock = _sock - self._io_refs = 0 - self._closed = False def send(self, data, flags=0): - return self._sock.send(data, flags=flags) + return self._sock.send(data, flags) send.__doc__ = _realsocket.send.__doc__ def recv(self, buffersize, flags=0): - return self._sock.recv(buffersize, flags=flags) + return self._sock.recv(buffersize, flags) recv.__doc__ = _realsocket.recv.__doc__ def recv_into(self, buffer, nbytes=0, flags=0): - return self._sock.recv_into(buffer, nbytes=nbytes, flags=flags) + return self._sock.recv_into(buffer, nbytes, flags) recv_into.__doc__ = _realsocket.recv_into.__doc__ def recvfrom(self, buffersize, flags=0): - return self._sock.recvfrom(buffersize, flags=flags) + return self._sock.recvfrom(buffersize, flags) recvfrom.__doc__ = _realsocket.recvfrom.__doc__ def recvfrom_into(self, buffer, nbytes=0, flags=0): - return self._sock.recvfrom_into(buffer, nbytes=nbytes, flags=flags) + return self._sock.recvfrom_into(buffer, nbytes, flags) recvfrom_into.__doc__ = _realsocket.recvfrom_into.__doc__ def sendto(self, data, param2, param3=None): @@ -208,13 +215,17 @@ sendto.__doc__ = _realsocket.sendto.__doc__ def close(self): - # This function should not reference any globals. See issue #808164. + s = self._sock + if type(s) is _realsocket: + s._drop() self._sock = _closedsocket() close.__doc__ = _realsocket.close.__doc__ def accept(self): sock, addr = self._sock.accept() - return _socketobject(_sock=sock), addr + sockobj = _socketobject(_sock=sock) + sock._drop() # already a copy in the _socketobject() + return sockobj, addr accept.__doc__ = _realsocket.accept.__doc__ def dup(self): @@ -228,24 +239,7 @@ Return a regular file object corresponding to the socket. The mode and bufsize arguments are as for the built-in open() function.""" - self._io_refs += 1 - return _fileobject(self, mode, bufsize) - - def _decref_socketios(self): - if self._io_refs > 0: - self._io_refs -= 1 - if self._closed: - self.close() - - def _real_close(self): - # This function should not reference any globals. See issue #808164. - self._sock.close() - - def close(self): - # This function should not reference any globals. See issue #808164. - self._closed = True - if self._io_refs <= 0: - self._real_close() + return _fileobject(self._sock, mode, bufsize) family = property(lambda self: self._sock.family, doc="the socket family") type = property(lambda self: self._sock.type, doc="the socket type") @@ -286,6 +280,8 @@ "_close"] def __init__(self, sock, mode='rb', bufsize=-1, close=False): + if type(sock) is _realsocket: + sock._reuse() self._sock = sock self.mode = mode # Not actually used in this version if bufsize < 0: @@ -320,16 +316,11 @@ if self._sock: self.flush() finally: - if self._sock: - if self._close: - self._sock.close() - else: - try: - self._sock._decref_socketios() - except AttributeError: - pass # bah, someone built a _fileobject manually - # with some unexpected replacement of the - # _socketobject class + s = self._sock + if type(s) is _realsocket: + s._drop() + if self._close: + self._sock.close() self._sock = None def __del__(self): diff --git a/lib_pypy/_ctypes/pointer.py b/lib_pypy/_ctypes/pointer.py --- a/lib_pypy/_ctypes/pointer.py +++ b/lib_pypy/_ctypes/pointer.py @@ -120,6 +120,7 @@ return self._buffer[0] != 0 contents = property(getcontents, setcontents) + _obj = property(getcontents) # byref interface def _as_ffi_pointer_(self, ffitype): return as_ffi_pointer(self, ffitype) diff --git a/lib_pypy/_ctypes_test.py b/lib_pypy/_ctypes_test.py --- a/lib_pypy/_ctypes_test.py +++ b/lib_pypy/_ctypes_test.py @@ -1,60 +1,7 @@ -import os, sys -import tempfile - -def compile_shared(): - """Compile '_ctypes_test.c' into an extension module, and import it - """ - thisdir = os.path.dirname(__file__) - output_dir = tempfile.mkdtemp() - - from distutils.ccompiler import new_compiler - from distutils import sysconfig - - compiler = new_compiler() - compiler.output_dir = output_dir - - # Compile .c file - include_dir = os.path.join(thisdir, '..', 'include') - if sys.platform == 'win32': - ccflags = ['-D_CRT_SECURE_NO_WARNINGS'] - else: - ccflags = ['-fPIC', '-Wimplicit-function-declaration'] - res = compiler.compile([os.path.join(thisdir, '_ctypes_test.c')], - include_dirs=[include_dir], - extra_preargs=ccflags) - object_filename = res[0] - - # set link options - output_filename = '_ctypes_test' + sysconfig.get_config_var('SO') - if sys.platform == 'win32': - # XXX libpypy-c.lib is currently not installed automatically - library = os.path.join(thisdir, '..', 'include', 'libpypy-c') - if not os.path.exists(library + '.lib'): - #For a nightly build - library = os.path.join(thisdir, '..', 'include', 'python27') - if not os.path.exists(library + '.lib'): - # For a local translation - library = os.path.join(thisdir, '..', 'pypy', 'goal', 'libpypy-c') - libraries = [library, 'oleaut32'] - extra_ldargs = ['/MANIFEST', # needed for VC10 - '/EXPORT:init_ctypes_test'] - else: - libraries = [] - extra_ldargs = [] - - # link the dynamic library - compiler.link_shared_object( - [object_filename], - output_filename, - libraries=libraries, - extra_preargs=extra_ldargs) - - # Now import the newly created library, it will replace our module in sys.modules - import imp - fp, filename, description = imp.find_module('_ctypes_test', path=[output_dir]) - imp.load_module('_ctypes_test', fp, filename, description) - - +try: + import cpyext +except ImportError: + raise ImportError("No module named '_ctypes_test'") try: import _ctypes _ctypes.PyObj_FromPtr = None @@ -62,4 +9,5 @@ except ImportError: pass # obscure condition of _ctypes_test.py being imported by py.test else: - compile_shared() + import _pypy_testcapi + _pypy_testcapi.compile_shared('_ctypes_test.c', '_ctypes_test') diff --git a/lib_pypy/_pypy_irc_topic.py b/lib_pypy/_pypy_irc_topic.py --- a/lib_pypy/_pypy_irc_topic.py +++ b/lib_pypy/_pypy_irc_topic.py @@ -1,4 +1,4 @@ -"""eclguba: flagnk naq frznagvpf bs clguba, fcrrq bs p, erfgevpgvbaf bs wnin naq pbzcvyre reebe zrffntrf nf crargenoyr nf ZHZCF +__doc__ = """eclguba: flagnk naq frznagvpf bs clguba, fcrrq bs p, erfgevpgvbaf bs wnin naq pbzcvyre reebe zrffntrf nf crargenoyr nf ZHZCF pglcrf unf n fcva bs 1/3 ' ' vf n fcnpr gbb Clguba 2.k rfg cerfdhr zbeg, ivir Clguba! diff --git a/lib_pypy/_testcapi.py b/lib_pypy/_pypy_testcapi.py copy from lib_pypy/_testcapi.py copy to lib_pypy/_pypy_testcapi.py --- a/lib_pypy/_testcapi.py +++ b/lib_pypy/_pypy_testcapi.py @@ -1,14 +1,20 @@ -import os, sys +import os, sys, imp import tempfile -def compile_shared(): - """Compile '_testcapi.c' into an extension module, and import it +def _get_c_extension_suffix(): + for ext, mod, typ in imp.get_suffixes(): + if typ == imp.C_EXTENSION: + return ext + + +def compile_shared(csource, modulename): + """Compile '_testcapi.c' or '_ctypes_test.c' into an extension module, + and import it. """ thisdir = os.path.dirname(__file__) output_dir = tempfile.mkdtemp() from distutils.ccompiler import new_compiler - from distutils import sysconfig compiler = new_compiler() compiler.output_dir = output_dir @@ -19,13 +25,13 @@ ccflags = ['-D_CRT_SECURE_NO_WARNINGS'] else: ccflags = ['-fPIC', '-Wimplicit-function-declaration'] - res = compiler.compile([os.path.join(thisdir, '_testcapimodule.c')], + res = compiler.compile([os.path.join(thisdir, csource)], include_dirs=[include_dir], extra_preargs=ccflags) object_filename = res[0] # set link options - output_filename = '_testcapi' + sysconfig.get_config_var('SO') + output_filename = modulename + _get_c_extension_suffix() if sys.platform == 'win32': # XXX libpypy-c.lib is currently not installed automatically library = os.path.join(thisdir, '..', 'include', 'libpypy-c') @@ -37,7 +43,7 @@ library = os.path.join(thisdir, '..', 'pypy', 'goal', 'libpypy-c') libraries = [library, 'oleaut32'] extra_ldargs = ['/MANIFEST', # needed for VC10 - '/EXPORT:init_testcapi'] + '/EXPORT:init' + modulename] else: libraries = [] extra_ldargs = [] @@ -49,14 +55,7 @@ libraries=libraries, extra_preargs=extra_ldargs) - # Now import the newly created library, it will replace our module in sys.modules - import imp - fp, filename, description = imp.find_module('_testcapi', path=[output_dir]) - imp.load_module('_testcapi', fp, filename, description) - -try: - import cpyext -except ImportError: - raise ImportError("No module named '_testcapi'") -else: - compile_shared() + # Now import the newly created library, it will replace the original + # module in sys.modules + fp, filename, description = imp.find_module(modulename, path=[output_dir]) + imp.load_module(modulename, fp, filename, description) diff --git a/lib_pypy/_testcapi.py b/lib_pypy/_testcapi.py --- a/lib_pypy/_testcapi.py +++ b/lib_pypy/_testcapi.py @@ -1,62 +1,7 @@ -import os, sys -import tempfile - -def compile_shared(): - """Compile '_testcapi.c' into an extension module, and import it - """ - thisdir = os.path.dirname(__file__) - output_dir = tempfile.mkdtemp() - - from distutils.ccompiler import new_compiler - from distutils import sysconfig - - compiler = new_compiler() - compiler.output_dir = output_dir - - # Compile .c file - include_dir = os.path.join(thisdir, '..', 'include') - if sys.platform == 'win32': - ccflags = ['-D_CRT_SECURE_NO_WARNINGS'] - else: - ccflags = ['-fPIC', '-Wimplicit-function-declaration'] - res = compiler.compile([os.path.join(thisdir, '_testcapimodule.c')], - include_dirs=[include_dir], - extra_preargs=ccflags) - object_filename = res[0] - - # set link options - output_filename = '_testcapi' + sysconfig.get_config_var('SO') - if sys.platform == 'win32': - # XXX libpypy-c.lib is currently not installed automatically - library = os.path.join(thisdir, '..', 'include', 'libpypy-c') - if not os.path.exists(library + '.lib'): - #For a nightly build - library = os.path.join(thisdir, '..', 'include', 'python27') - if not os.path.exists(library + '.lib'): - # For a local translation - library = os.path.join(thisdir, '..', 'pypy', 'goal', 'libpypy-c') - libraries = [library, 'oleaut32'] - extra_ldargs = ['/MANIFEST', # needed for VC10 - '/EXPORT:init_testcapi'] - else: - libraries = [] - extra_ldargs = [] - - # link the dynamic library - compiler.link_shared_object( - [object_filename], - output_filename, - libraries=libraries, - extra_preargs=extra_ldargs) - - # Now import the newly created library, it will replace our module in sys.modules - import imp - fp, filename, description = imp.find_module('_testcapi', path=[output_dir]) - imp.load_module('_testcapi', fp, filename, description) - try: import cpyext except ImportError: raise ImportError("No module named '_testcapi'") else: - compile_shared() + import _pypy_testcapi + _pypy_testcapi.compile_shared('_testcapimodule.c', '_testcapi') diff --git a/lib_pypy/cffi/vengine_cpy.py b/lib_pypy/cffi/vengine_cpy.py --- a/lib_pypy/cffi/vengine_cpy.py +++ b/lib_pypy/cffi/vengine_cpy.py @@ -156,6 +156,9 @@ class FFILibrary(object): _cffi_python_module = module _cffi_ffi = self.ffi + _cffi_dir = [] + def __dir__(self): + return FFILibrary._cffi_dir + list(self.__dict__) library = FFILibrary() module._cffi_setup(lst, ffiplatform.VerificationError, library) # @@ -701,7 +704,8 @@ return ptr[0] def setter(library, value): ptr[0] = value - setattr(library.__class__, name, property(getter, setter)) + setattr(type(library), name, property(getter, setter)) + type(library)._cffi_dir.append(name) # ---------- diff --git a/lib_pypy/cffi/vengine_gen.py b/lib_pypy/cffi/vengine_gen.py --- a/lib_pypy/cffi/vengine_gen.py +++ b/lib_pypy/cffi/vengine_gen.py @@ -74,6 +74,9 @@ class FFILibrary(types.ModuleType): _cffi_generic_module = module _cffi_ffi = self.ffi + _cffi_dir = [] + def __dir__(self): + return FFILibrary._cffi_dir library = FFILibrary("") # # finally, call the loaded_gen_xxx() functions. This will set @@ -168,21 +171,22 @@ newfunction = self._load_constant(False, tp, name, module) else: indirections = [] - if any(isinstance(type, model.StructOrUnion) for type in tp.args): + if any(isinstance(typ, model.StructOrUnion) for typ in tp.args): indirect_args = [] - for i, type in enumerate(tp.args): - if isinstance(type, model.StructOrUnion): - type = model.PointerType(type) - indirections.append((i, type)) - indirect_args.append(type) + for i, typ in enumerate(tp.args): + if isinstance(typ, model.StructOrUnion): + typ = model.PointerType(typ) + indirections.append((i, typ)) + indirect_args.append(typ) tp = model.FunctionPtrType(tuple(indirect_args), tp.result, tp.ellipsis) BFunc = self.ffi._get_cached_btype(tp) wrappername = '_cffi_f_%s' % name newfunction = module.load_function(BFunc, wrappername) - for i, type in indirections: - newfunction = self._make_struct_wrapper(newfunction, i, type) + for i, typ in indirections: + newfunction = self._make_struct_wrapper(newfunction, i, typ) setattr(library, name, newfunction) + type(library)._cffi_dir.append(name) def _make_struct_wrapper(self, oldfunc, i, tp): backend = self.ffi._backend @@ -390,6 +394,7 @@ is_int = isinstance(tp, model.PrimitiveType) and tp.is_integer_type() value = self._load_constant(is_int, tp, name, module) setattr(library, name, value) + type(library)._cffi_dir.append(name) # ---------- # enums @@ -437,6 +442,7 @@ def _loaded_gen_enum(self, tp, name, module, library): for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues): setattr(library, enumerator, enumvalue) + type(library)._cffi_dir.append(enumerator) # ---------- # macros: for now only for integers @@ -450,6 +456,7 @@ def _loaded_gen_macro(self, tp, name, module, library): value = self._load_constant(True, tp, name, module) setattr(library, name, value) + type(library)._cffi_dir.append(name) # ---------- # global variables @@ -475,6 +482,7 @@ BArray = self.ffi._get_cached_btype(tp) value = self.ffi.cast(BArray, value) setattr(library, name, value) + type(library)._cffi_dir.append(name) return # remove ptr= from the library instance, and replace # it by a property on the class, which reads/writes into ptr[0]. @@ -486,7 +494,8 @@ return ptr[0] def setter(library, value): ptr[0] = value - setattr(library.__class__, name, property(getter, setter)) + setattr(type(library), name, property(getter, setter)) + type(library)._cffi_dir.append(name) cffimod_header = r''' #include diff --git a/lib_pypy/greenlet.egg-info b/lib_pypy/greenlet.egg-info new file mode 100644 --- /dev/null +++ b/lib_pypy/greenlet.egg-info @@ -0,0 +1,10 @@ +Metadata-Version: 1.0 +Name: greenlet +Version: 0.4.0 +Summary: Lightweight in-process concurrent programming +Home-page: https://github.com/python-greenlet/greenlet +Author: Ralf Schmitt (for CPython), PyPy team +Author-email: pypy-dev at python.org +License: MIT License +Description: UNKNOWN +Platform: UNKNOWN diff --git a/pypy/bin/pyinteractive.py b/pypy/bin/pyinteractive.py --- a/pypy/bin/pyinteractive.py +++ b/pypy/bin/pyinteractive.py @@ -27,7 +27,8 @@ BoolOption("completer", "use readline commandline completer", default=False, cmdline="-C"), BoolOption("optimize", - "dummy optimization flag for compatibility with CPython", + "skip assert statements and remove docstrings when importing modules" + " (this is -OO in regular CPython)", default=False, cmdline="-O"), BoolOption("no_site_import", "do not 'import site' on initialization", default=False, cmdline="-S"), @@ -94,6 +95,17 @@ space.setitem(space.sys.w_dict, space.wrap('executable'), space.wrap(argv[0])) + if interactiveconfig.optimize: + #change the optimize flag's value and set __debug__ to False + space.appexec([], """(): + import sys + flags = list(sys.flags) + flags[6] = 2 + sys.flags = type(sys.flags)(flags) + import __pypy__ + __pypy__.set_debug(False) + """) + # call pypy_find_stdlib: the side-effect is that it sets sys.prefix and # sys.exec_prefix executable = argv[0] diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -32,11 +32,10 @@ "rctime" , "select", "zipimport", "_lsprof", "crypt", "signal", "_rawffi", "termios", "zlib", "bz2", "struct", "_hashlib", "_md5", "_sha", "_minimal_curses", "cStringIO", - "thread", "itertools", "pyexpat", "_ssl", "array", + "thread", "itertools", "pyexpat", "_ssl", "cpyext", "array", "binascii", "_multiprocessing", '_warnings', "_collections", "_multibytecodec", "micronumpy", "_ffi", - "_continuation", "_cffi_backend", "_csv"] # "cpyext", "cppyy"] -# disabled until problems are fixed + "_continuation", "_cffi_backend", "_csv", "cppyy"] )) translation_modules = default_modules.copy() diff --git a/pypy/doc/__pypy__-module.rst b/pypy/doc/__pypy__-module.rst --- a/pypy/doc/__pypy__-module.rst +++ b/pypy/doc/__pypy__-module.rst @@ -1,3 +1,7 @@ + +.. comment: this document is very incomplete, should we generate + it automatically? + ======================= The ``__pypy__`` module ======================= diff --git a/pypy/doc/man/pypy.1.rst b/pypy/doc/man/pypy.1.rst --- a/pypy/doc/man/pypy.1.rst +++ b/pypy/doc/man/pypy.1.rst @@ -16,7 +16,10 @@ Inspect interactively after running script. -O - Dummy optimization flag for compatibility with C Python. + Skip assert statements. + +-OO + Remove docstrings when importing modules in addition to -O. -c *cmd* Program passed in as CMD (terminates option list). diff --git a/pypy/doc/rffi.rst b/pypy/doc/rffi.rst --- a/pypy/doc/rffi.rst +++ b/pypy/doc/rffi.rst @@ -5,7 +5,7 @@ Purpose ------- -This document describes an FFI for RPython language, concentrating +This document describes an FFI for the RPython language, concentrating on low-level backends like C. It describes how to declare and call low-level (C) functions from RPython level. @@ -50,7 +50,7 @@ ------ In rffi_ there are various declared types for C-structures, like CCHARP -(char*), SIZE_T (size_t) and others. refer to file for details. +(char*), SIZE_T (size_t) and others. Refer to file for details. Instances of non-primitive types must be alloced by hand, with call to lltype.malloc, and freed by lltype.free both with keyword argument flavor='raw'. There are several helpers like string -> char* diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -34,3 +34,19 @@ .. branch: remove-iter-smm Remove multi-methods on iterators + +.. branch: emit-call-x86 +.. branch: emit-call-arm + +.. branch: on-abort-resops +Added list of resops to the pypyjit on_abort hook. + +.. branch: logging-perf +Speeds up the stdlib logging module + +.. branch: operrfmt-NT +Adds a couple convenient format specifiers to operationerrfmt + +.. branch: win32-fixes3 +Skip and fix some non-translated (own) tests for win32 builds + diff --git a/pypy/doc/windows.rst b/pypy/doc/windows.rst --- a/pypy/doc/windows.rst +++ b/pypy/doc/windows.rst @@ -2,7 +2,7 @@ PyPy on Windows =============== -Pypy is supported on Windows platforms, starting with Windows 2000. +PyPy is supported on Windows platforms, starting with Windows 2000. The following text gives some hints about how to translate the PyPy interpreter. @@ -199,9 +199,9 @@ or such, depending on your mingw64 download. -hacking on Pypy with the mingw compiler +hacking on PyPy with the mingw compiler ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Since hacking on Pypy means running tests, you will need a way to specify +Since hacking on PyPy means running tests, you will need a way to specify the mingw compiler when hacking (as opposed to translating). As of March 2012, --cc is not a valid option for pytest.py. However if you set an environment variable CC to the compliter exe, testing will use it. diff --git a/pypy/interpreter/app_main.py b/pypy/interpreter/app_main.py --- a/pypy/interpreter/app_main.py +++ b/pypy/interpreter/app_main.py @@ -2,8 +2,8 @@ # App-level version of py.py. # See test/test_app_main. -# Missing vs CPython: -d, -OO, -t, -v, -x, -3 -"""\ +# Missing vs CPython: -d, -t, -v, -x, -3 +USAGE1 = __doc__ = """\ Options and arguments (and corresponding environment variables): -B : don't write .py[co] files on import; also PYTHONDONTWRITEBYTECODE=x -c cmd : program passed in as string (terminates option list) @@ -12,7 +12,8 @@ -i : inspect interactively after running script; forces a prompt even if stdin does not appear to be a terminal; also PYTHONINSPECT=x -m mod : run library module as a script (terminates option list) --O : dummy optimization flag for compatibility with CPython +-O : skip assert statements +-OO : remove docstrings when importing modules in addition to -O -R : ignored (see http://bugs.python.org/issue14621) -Q arg : division options: -Qold (default), -Qwarn, -Qwarnall, -Qnew -s : don't add user site directory to sys.path; also PYTHONNOUSERSITE @@ -27,7 +28,6 @@ PyPy options and arguments: --info : print translation information about this PyPy executable """ -USAGE1 = __doc__ # Missing vs CPython: PYTHONHOME, PYTHONCASEOK USAGE2 = """ Other environment variables: @@ -470,6 +470,10 @@ sys.py3kwarning = bool(sys.flags.py3k_warning) sys.dont_write_bytecode = bool(sys.flags.dont_write_bytecode) + if sys.flags.optimize >= 1: + import __pypy__ + __pypy__.set_debug(False) + if sys.py3kwarning: print >> sys.stderr, ( "Warning: pypy does not implement py3k warnings") diff --git a/pypy/interpreter/argument.py b/pypy/interpreter/argument.py --- a/pypy/interpreter/argument.py +++ b/pypy/interpreter/argument.py @@ -86,12 +86,9 @@ args_w = space.fixedview(w_stararg) except OperationError, e: if e.match(space, space.w_TypeError): - w_type = space.type(w_stararg) - typename = w_type.getname(space) - raise OperationError( + raise operationerrfmt( space.w_TypeError, - space.wrap("argument after * must be " - "a sequence, not %s" % (typename,))) + "argument after * must be a sequence, not %T", w_stararg) raise self.arguments_w = self.arguments_w + args_w @@ -116,12 +113,10 @@ w_keys = space.call_method(w_starstararg, "keys") except OperationError, e: if e.match(space, space.w_AttributeError): - w_type = space.type(w_starstararg) - typename = w_type.getname(space) - raise OperationError( + raise operationerrfmt( space.w_TypeError, - space.wrap("argument after ** must be " - "a mapping, not %s" % (typename,))) + "argument after ** must be a mapping, not %T", + w_starstararg) raise keys_w = space.unpackiterable(w_keys) keywords_w = [None] * len(keys_w) diff --git a/pypy/interpreter/astcompiler/assemble.py b/pypy/interpreter/astcompiler/assemble.py --- a/pypy/interpreter/astcompiler/assemble.py +++ b/pypy/interpreter/astcompiler/assemble.py @@ -245,6 +245,8 @@ if w_len is None: w_len = space.len(self.w_consts) space.setitem(self.w_consts, w_key, w_len) + if space.int_w(w_len) == 0: + self.scope.doc_removable = False return space.int_w(w_len) def _make_key(self, obj): @@ -632,6 +634,7 @@ ops.JUMP_IF_FALSE_OR_POP : 0, ops.POP_JUMP_IF_TRUE : -1, ops.POP_JUMP_IF_FALSE : -1, + ops.JUMP_IF_NOT_DEBUG : 0, ops.BUILD_LIST_FROM_ARG: 1, } diff --git a/pypy/interpreter/astcompiler/ast.py b/pypy/interpreter/astcompiler/ast.py --- a/pypy/interpreter/astcompiler/ast.py +++ b/pypy/interpreter/astcompiler/ast.py @@ -2793,8 +2793,7 @@ def Module_get_body(space, w_self): if not w_self.initialization_state & 1: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -2835,8 +2834,7 @@ def Interactive_get_body(space, w_self): if not w_self.initialization_state & 1: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -2881,8 +2879,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 1: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') return space.wrap(w_self.body) def Expression_set_body(space, w_self, w_new_value): @@ -2925,8 +2922,7 @@ def Suite_get_body(space, w_self): if not w_self.initialization_state & 1: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -2971,8 +2967,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 1: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'lineno') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'lineno') return space.wrap(w_self.lineno) def stmt_set_lineno(space, w_self, w_new_value): @@ -2993,8 +2988,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 2: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'col_offset') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'col_offset') return space.wrap(w_self.col_offset) def stmt_set_col_offset(space, w_self, w_new_value): @@ -3024,8 +3018,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'name') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'name') return space.wrap(w_self.name) def FunctionDef_set_name(space, w_self, w_new_value): @@ -3046,8 +3039,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'args') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'args') return space.wrap(w_self.args) def FunctionDef_set_args(space, w_self, w_new_value): @@ -3064,8 +3056,7 @@ def FunctionDef_get_body(space, w_self): if not w_self.initialization_state & 16: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -3081,8 +3072,7 @@ def FunctionDef_get_decorator_list(space, w_self): if not w_self.initialization_state & 32: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'decorator_list') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'decorator_list') if w_self.w_decorator_list is None: if w_self.decorator_list is None: list_w = [] @@ -3131,8 +3121,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'name') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'name') return space.wrap(w_self.name) def ClassDef_set_name(space, w_self, w_new_value): @@ -3149,8 +3138,7 @@ def ClassDef_get_bases(space, w_self): if not w_self.initialization_state & 8: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'bases') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'bases') if w_self.w_bases is None: if w_self.bases is None: list_w = [] @@ -3166,8 +3154,7 @@ def ClassDef_get_body(space, w_self): if not w_self.initialization_state & 16: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -3183,8 +3170,7 @@ def ClassDef_get_decorator_list(space, w_self): if not w_self.initialization_state & 32: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'decorator_list') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'decorator_list') if w_self.w_decorator_list is None: if w_self.decorator_list is None: list_w = [] @@ -3234,8 +3220,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') return space.wrap(w_self.value) def Return_set_value(space, w_self, w_new_value): @@ -3278,8 +3263,7 @@ def Delete_get_targets(space, w_self): if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'targets') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'targets') if w_self.w_targets is None: if w_self.targets is None: list_w = [] @@ -3320,8 +3304,7 @@ def Assign_get_targets(space, w_self): if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'targets') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'targets') if w_self.w_targets is None: if w_self.targets is None: list_w = [] @@ -3341,8 +3324,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') return space.wrap(w_self.value) def Assign_set_value(space, w_self, w_new_value): @@ -3391,8 +3373,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'target') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'target') return space.wrap(w_self.target) def AugAssign_set_target(space, w_self, w_new_value): @@ -3415,8 +3396,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'op') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'op') return operator_to_class[w_self.op - 1]() def AugAssign_set_op(space, w_self, w_new_value): @@ -3439,8 +3419,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 16: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') return space.wrap(w_self.value) def AugAssign_set_value(space, w_self, w_new_value): @@ -3489,8 +3468,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'dest') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'dest') return space.wrap(w_self.dest) def Print_set_dest(space, w_self, w_new_value): @@ -3509,8 +3487,7 @@ def Print_get_values(space, w_self): if not w_self.initialization_state & 8: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'values') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'values') if w_self.w_values is None: if w_self.values is None: list_w = [] @@ -3530,8 +3507,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 16: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'nl') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'nl') return space.wrap(w_self.nl) def Print_set_nl(space, w_self, w_new_value): @@ -3579,8 +3555,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'target') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'target') return space.wrap(w_self.target) def For_set_target(space, w_self, w_new_value): @@ -3603,8 +3578,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'iter') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'iter') return space.wrap(w_self.iter) def For_set_iter(space, w_self, w_new_value): @@ -3623,8 +3597,7 @@ def For_get_body(space, w_self): if not w_self.initialization_state & 16: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -3640,8 +3613,7 @@ def For_get_orelse(space, w_self): if not w_self.initialization_state & 32: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'orelse') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'orelse') if w_self.w_orelse is None: if w_self.orelse is None: list_w = [] @@ -3690,8 +3662,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'test') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'test') return space.wrap(w_self.test) def While_set_test(space, w_self, w_new_value): @@ -3710,8 +3681,7 @@ def While_get_body(space, w_self): if not w_self.initialization_state & 8: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -3727,8 +3697,7 @@ def While_get_orelse(space, w_self): if not w_self.initialization_state & 16: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'orelse') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'orelse') if w_self.w_orelse is None: if w_self.orelse is None: list_w = [] @@ -3776,8 +3745,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'test') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'test') return space.wrap(w_self.test) def If_set_test(space, w_self, w_new_value): @@ -3796,8 +3764,7 @@ def If_get_body(space, w_self): if not w_self.initialization_state & 8: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -3813,8 +3780,7 @@ def If_get_orelse(space, w_self): if not w_self.initialization_state & 16: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'orelse') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'orelse') if w_self.w_orelse is None: if w_self.orelse is None: list_w = [] @@ -3862,8 +3828,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'context_expr') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'context_expr') return space.wrap(w_self.context_expr) def With_set_context_expr(space, w_self, w_new_value): @@ -3886,8 +3851,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'optional_vars') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'optional_vars') return space.wrap(w_self.optional_vars) def With_set_optional_vars(space, w_self, w_new_value): @@ -3906,8 +3870,7 @@ def With_get_body(space, w_self): if not w_self.initialization_state & 16: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -3954,8 +3917,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'type') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'type') return space.wrap(w_self.type) def Raise_set_type(space, w_self, w_new_value): @@ -3978,8 +3940,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'inst') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'inst') return space.wrap(w_self.inst) def Raise_set_inst(space, w_self, w_new_value): @@ -4002,8 +3963,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 16: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'tback') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'tback') return space.wrap(w_self.tback) def Raise_set_tback(space, w_self, w_new_value): @@ -4048,8 +4008,7 @@ def TryExcept_get_body(space, w_self): if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -4065,8 +4024,7 @@ def TryExcept_get_handlers(space, w_self): if not w_self.initialization_state & 8: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'handlers') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'handlers') if w_self.w_handlers is None: if w_self.handlers is None: list_w = [] @@ -4082,8 +4040,7 @@ def TryExcept_get_orelse(space, w_self): if not w_self.initialization_state & 16: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'orelse') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'orelse') if w_self.w_orelse is None: if w_self.orelse is None: list_w = [] @@ -4128,8 +4085,7 @@ def TryFinally_get_body(space, w_self): if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -4145,8 +4101,7 @@ def TryFinally_get_finalbody(space, w_self): if not w_self.initialization_state & 8: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'finalbody') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'finalbody') if w_self.w_finalbody is None: if w_self.finalbody is None: list_w = [] @@ -4193,8 +4148,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'test') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'test') return space.wrap(w_self.test) def Assert_set_test(space, w_self, w_new_value): @@ -4217,8 +4171,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'msg') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'msg') return space.wrap(w_self.msg) def Assert_set_msg(space, w_self, w_new_value): @@ -4262,8 +4215,7 @@ def Import_get_names(space, w_self): if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'names') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'names') if w_self.w_names is None: if w_self.names is None: list_w = [] @@ -4308,8 +4260,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'module') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'module') return space.wrap(w_self.module) def ImportFrom_set_module(space, w_self, w_new_value): @@ -4329,8 +4280,7 @@ def ImportFrom_get_names(space, w_self): if not w_self.initialization_state & 8: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'names') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'names') if w_self.w_names is None: if w_self.names is None: list_w = [] @@ -4350,8 +4300,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 16: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'level') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'level') return space.wrap(w_self.level) def ImportFrom_set_level(space, w_self, w_new_value): @@ -4399,8 +4348,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') return space.wrap(w_self.body) def Exec_set_body(space, w_self, w_new_value): @@ -4423,8 +4371,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'globals') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'globals') return space.wrap(w_self.globals) def Exec_set_globals(space, w_self, w_new_value): @@ -4447,8 +4394,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 16: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'locals') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'locals') return space.wrap(w_self.locals) def Exec_set_locals(space, w_self, w_new_value): @@ -4493,8 +4439,7 @@ def Global_get_names(space, w_self): if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'names') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'names') if w_self.w_names is None: if w_self.names is None: list_w = [] @@ -4539,8 +4484,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') return space.wrap(w_self.value) def Expr_set_value(space, w_self, w_new_value): @@ -4638,8 +4582,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 1: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'lineno') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'lineno') return space.wrap(w_self.lineno) def expr_set_lineno(space, w_self, w_new_value): @@ -4660,8 +4603,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 2: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'col_offset') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'col_offset') return space.wrap(w_self.col_offset) def expr_set_col_offset(space, w_self, w_new_value): @@ -4691,8 +4633,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'op') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'op') return boolop_to_class[w_self.op - 1]() def BoolOp_set_op(space, w_self, w_new_value): @@ -4711,8 +4652,7 @@ def BoolOp_get_values(space, w_self): if not w_self.initialization_state & 8: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'values') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'values') if w_self.w_values is None: if w_self.values is None: list_w = [] @@ -4758,8 +4698,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'left') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'left') return space.wrap(w_self.left) def BinOp_set_left(space, w_self, w_new_value): @@ -4782,8 +4721,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'op') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'op') return operator_to_class[w_self.op - 1]() def BinOp_set_op(space, w_self, w_new_value): @@ -4806,8 +4744,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 16: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'right') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'right') return space.wrap(w_self.right) def BinOp_set_right(space, w_self, w_new_value): @@ -4856,8 +4793,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'op') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'op') return unaryop_to_class[w_self.op - 1]() def UnaryOp_set_op(space, w_self, w_new_value): @@ -4880,8 +4816,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'operand') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'operand') return space.wrap(w_self.operand) def UnaryOp_set_operand(space, w_self, w_new_value): @@ -4929,8 +4864,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'args') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'args') return space.wrap(w_self.args) def Lambda_set_args(space, w_self, w_new_value): @@ -4951,8 +4885,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') return space.wrap(w_self.body) def Lambda_set_body(space, w_self, w_new_value): @@ -5000,8 +4933,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'test') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'test') return space.wrap(w_self.test) def IfExp_set_test(space, w_self, w_new_value): @@ -5024,8 +4956,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') return space.wrap(w_self.body) def IfExp_set_body(space, w_self, w_new_value): @@ -5048,8 +4979,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 16: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'orelse') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'orelse') return space.wrap(w_self.orelse) def IfExp_set_orelse(space, w_self, w_new_value): @@ -5094,8 +5024,7 @@ def Dict_get_keys(space, w_self): if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'keys') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'keys') if w_self.w_keys is None: if w_self.keys is None: list_w = [] @@ -5111,8 +5040,7 @@ def Dict_get_values(space, w_self): if not w_self.initialization_state & 8: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'values') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'values') if w_self.w_values is None: if w_self.values is None: list_w = [] @@ -5155,8 +5083,7 @@ def Set_get_elts(space, w_self): if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elts') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'elts') if w_self.w_elts is None: if w_self.elts is None: list_w = [] @@ -5201,8 +5128,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elt') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'elt') return space.wrap(w_self.elt) def ListComp_set_elt(space, w_self, w_new_value): @@ -5221,8 +5147,7 @@ def ListComp_get_generators(space, w_self): if not w_self.initialization_state & 8: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'generators') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'generators') if w_self.w_generators is None: if w_self.generators is None: list_w = [] @@ -5268,8 +5193,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elt') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'elt') return space.wrap(w_self.elt) def SetComp_set_elt(space, w_self, w_new_value): @@ -5288,8 +5212,7 @@ def SetComp_get_generators(space, w_self): if not w_self.initialization_state & 8: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'generators') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'generators') if w_self.w_generators is None: if w_self.generators is None: list_w = [] @@ -5335,8 +5258,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'key') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'key') return space.wrap(w_self.key) def DictComp_set_key(space, w_self, w_new_value): @@ -5359,8 +5281,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') return space.wrap(w_self.value) def DictComp_set_value(space, w_self, w_new_value): @@ -5379,8 +5300,7 @@ def DictComp_get_generators(space, w_self): if not w_self.initialization_state & 16: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'generators') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'generators') if w_self.w_generators is None: if w_self.generators is None: list_w = [] @@ -5427,8 +5347,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elt') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'elt') return space.wrap(w_self.elt) def GeneratorExp_set_elt(space, w_self, w_new_value): @@ -5447,8 +5366,7 @@ def GeneratorExp_get_generators(space, w_self): if not w_self.initialization_state & 8: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'generators') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'generators') if w_self.w_generators is None: if w_self.generators is None: list_w = [] @@ -5494,8 +5412,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') return space.wrap(w_self.value) def Yield_set_value(space, w_self, w_new_value): @@ -5542,8 +5459,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'left') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'left') return space.wrap(w_self.left) def Compare_set_left(space, w_self, w_new_value): @@ -5562,8 +5478,7 @@ def Compare_get_ops(space, w_self): if not w_self.initialization_state & 8: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ops') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'ops') if w_self.w_ops is None: if w_self.ops is None: list_w = [] @@ -5579,8 +5494,7 @@ def Compare_get_comparators(space, w_self): if not w_self.initialization_state & 16: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'comparators') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'comparators') if w_self.w_comparators is None: if w_self.comparators is None: list_w = [] @@ -5628,8 +5542,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'func') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'func') return space.wrap(w_self.func) def Call_set_func(space, w_self, w_new_value): @@ -5648,8 +5561,7 @@ def Call_get_args(space, w_self): if not w_self.initialization_state & 8: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'args') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'args') if w_self.w_args is None: if w_self.args is None: list_w = [] @@ -5665,8 +5577,7 @@ def Call_get_keywords(space, w_self): if not w_self.initialization_state & 16: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'keywords') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'keywords') if w_self.w_keywords is None: if w_self.keywords is None: list_w = [] @@ -5686,8 +5597,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 32: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'starargs') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'starargs') return space.wrap(w_self.starargs) def Call_set_starargs(space, w_self, w_new_value): @@ -5710,8 +5620,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 64: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'kwargs') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'kwargs') return space.wrap(w_self.kwargs) def Call_set_kwargs(space, w_self, w_new_value): @@ -5764,8 +5673,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') return space.wrap(w_self.value) def Repr_set_value(space, w_self, w_new_value): @@ -5812,8 +5720,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'n') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'n') return w_self.n def Num_set_n(space, w_self, w_new_value): @@ -5858,8 +5765,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 's') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 's') return w_self.s def Str_set_s(space, w_self, w_new_value): @@ -5904,8 +5810,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') return space.wrap(w_self.value) def Attribute_set_value(space, w_self, w_new_value): @@ -5928,8 +5833,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'attr') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'attr') return space.wrap(w_self.attr) def Attribute_set_attr(space, w_self, w_new_value): @@ -5950,8 +5854,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 16: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ctx') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'ctx') return expr_context_to_class[w_self.ctx - 1]() def Attribute_set_ctx(space, w_self, w_new_value): @@ -6000,8 +5903,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') return space.wrap(w_self.value) def Subscript_set_value(space, w_self, w_new_value): @@ -6024,8 +5926,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'slice') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'slice') return space.wrap(w_self.slice) def Subscript_set_slice(space, w_self, w_new_value): @@ -6048,8 +5949,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 16: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ctx') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'ctx') return expr_context_to_class[w_self.ctx - 1]() def Subscript_set_ctx(space, w_self, w_new_value): @@ -6098,8 +5998,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'id') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'id') return space.wrap(w_self.id) def Name_set_id(space, w_self, w_new_value): @@ -6120,8 +6019,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ctx') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'ctx') return expr_context_to_class[w_self.ctx - 1]() def Name_set_ctx(space, w_self, w_new_value): @@ -6165,8 +6063,7 @@ def List_get_elts(space, w_self): if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elts') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'elts') if w_self.w_elts is None: if w_self.elts is None: list_w = [] @@ -6186,8 +6083,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ctx') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'ctx') return expr_context_to_class[w_self.ctx - 1]() def List_set_ctx(space, w_self, w_new_value): @@ -6232,8 +6128,7 @@ def Tuple_get_elts(space, w_self): if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elts') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'elts') if w_self.w_elts is None: if w_self.elts is None: list_w = [] @@ -6253,8 +6148,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ctx') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'ctx') return expr_context_to_class[w_self.ctx - 1]() def Tuple_set_ctx(space, w_self, w_new_value): @@ -6303,8 +6197,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') return w_self.value def Const_set_value(space, w_self, w_new_value): @@ -6422,8 +6315,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 1: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'lower') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'lower') return space.wrap(w_self.lower) def Slice_set_lower(space, w_self, w_new_value): @@ -6446,8 +6338,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 2: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'upper') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'upper') return space.wrap(w_self.upper) def Slice_set_upper(space, w_self, w_new_value): @@ -6470,8 +6361,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'step') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'step') return space.wrap(w_self.step) def Slice_set_step(space, w_self, w_new_value): @@ -6516,8 +6406,7 @@ def ExtSlice_get_dims(space, w_self): if not w_self.initialization_state & 1: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'dims') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'dims') if w_self.w_dims is None: if w_self.dims is None: list_w = [] @@ -6562,8 +6451,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 1: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') return space.wrap(w_self.value) def Index_set_value(space, w_self, w_new_value): @@ -6834,8 +6722,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 1: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'target') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'target') return space.wrap(w_self.target) def comprehension_set_target(space, w_self, w_new_value): @@ -6858,8 +6745,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 2: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'iter') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'iter') return space.wrap(w_self.iter) def comprehension_set_iter(space, w_self, w_new_value): @@ -6878,8 +6764,7 @@ def comprehension_get_ifs(space, w_self): if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ifs') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'ifs') if w_self.w_ifs is None: if w_self.ifs is None: list_w = [] @@ -6926,8 +6811,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 1: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'lineno') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'lineno') return space.wrap(w_self.lineno) From noreply at buildbot.pypy.org Tue Jun 4 22:28:22 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Tue, 4 Jun 2013 22:28:22 +0200 (CEST) Subject: [pypy-commit] pypy refactor-str-types: Implement title() method. Message-ID: <20130604202822.6C72A1C145C@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-str-types Changeset: r64784:8c06f1b5cde7 Date: 2013-06-04 21:34 +0200 http://bitbucket.org/pypy/pypy/changeset/8c06f1b5cde7/ Log: Implement title() method. diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py --- a/pypy/objspace/std/bytesobject.py +++ b/pypy/objspace/std/bytesobject.py @@ -117,6 +117,8 @@ def _isdigit(self, ch): return ch.isdigit() + _iscased = _isalpha + def _upper(self, ch): if ch.islower(): o = ord(ch) - 32 diff --git a/pypy/objspace/std/stringmethods.py b/pypy/objspace/std/stringmethods.py --- a/pypy/objspace/std/stringmethods.py +++ b/pypy/objspace/std/stringmethods.py @@ -776,22 +776,19 @@ return space.wrap(builder.build()) def descr_title(self, space): - # XXX just to pass the test - return space.wrap(self._val().title()) - selfval = self._val() if len(selfval) == 0: return self builder = self._builder(len(selfval)) - prev_letter = ' ' - for pos in range(len(input)): - ch = input[pos] - if not prev_letter.isalpha(): + previous_is_cased = False + for pos in range(len(selfval)): + ch = selfval[pos] + if not previous_is_cased: builder.append(self._upper(ch)) else: builder.append(self._lower(ch)) - prev_letter = ch + previous_is_cased = self._iscased(ch) return space.wrap(builder.build()) DEFAULT_NOOP_TABLE = ''.join([chr(i) for i in range(256)]) diff --git a/pypy/objspace/std/unicodeobject.py b/pypy/objspace/std/unicodeobject.py --- a/pypy/objspace/std/unicodeobject.py +++ b/pypy/objspace/std/unicodeobject.py @@ -110,6 +110,9 @@ def _isdigit(self, ch): return ch.isdigit() + def _iscased(self, ch): + return unicodedb.iscased(ord(ch)) + def _upper(self, ch): return unichr(unicodedb.toupper(ord(ch))) From noreply at buildbot.pypy.org Tue Jun 4 22:28:26 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Tue, 4 Jun 2013 22:28:26 +0200 (CEST) Subject: [pypy-commit] pypy refactor-str-types: Fix. Message-ID: <20130604202826.F06181C016D@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-str-types Changeset: r64785:1fa825c8c78d Date: 2013-06-04 22:25 +0200 http://bitbucket.org/pypy/pypy/changeset/1fa825c8c78d/ Log: Fix. diff --git a/pypy/module/cpyext/unicodeobject.py b/pypy/module/cpyext/unicodeobject.py --- a/pypy/module/cpyext/unicodeobject.py +++ b/pypy/module/cpyext/unicodeobject.py @@ -12,7 +12,7 @@ from pypy.module.cpyext.stringobject import PyString_Check from pypy.module.sys.interp_encoding import setdefaultencoding from pypy.module._codecs.interp_codecs import CodecState -from pypy.objspace.std import unicodeobject, bytesobject +from pypy.objspace.std import unicodeobject from rpython.rlib import runicode from rpython.tool.sourcetools import func_renamer import sys @@ -684,9 +684,12 @@ str = space.unicode_w(w_str) substr = space.unicode_w(w_substr) if rffi.cast(lltype.Signed, direction) <= 0: - return bytesobject.stringstartswith(str, substr, start, end) + w_ret = space.call_method(w_str, "startswith", w_substr, + space.wrap(start), space.wrap(end)) else: - return bytesobject.stringendswith(str, substr, start, end) + w_ret = space.call_method(w_str, "endswith", w_substr, + space.wrap(start), space.wrap(end)) + return space.int_w(w_ret) @cpython_api([PyObject, PyObject, Py_ssize_t, Py_ssize_t], Py_ssize_t, error=-1) def PyUnicode_Count(space, w_str, w_substr, start, end): diff --git a/pypy/module/micronumpy/stdobjspace.py b/pypy/module/micronumpy/stdobjspace.py --- a/pypy/module/micronumpy/stdobjspace.py +++ b/pypy/module/micronumpy/stdobjspace.py @@ -7,5 +7,5 @@ def register_delegates(typeorder): typeorder[interp_boxes.W_StringBox] = [ - (bytesobject.W_StringObject, delegate_stringbox2stringobj), + (bytesobject.W_BytesObject, delegate_stringbox2stringobj), ] diff --git a/pypy/objspace/std/stringmethods.py b/pypy/objspace/std/stringmethods.py --- a/pypy/objspace/std/stringmethods.py +++ b/pypy/objspace/std/stringmethods.py @@ -434,8 +434,11 @@ return space.wrap(value) def descr_lower(self, space): - self = self._value - return space.wrap(self.lower()) + value = self._val() + builder = self._builder(len(value)) + for i in range(len(value)): + builder.append(self._lower(value[i])) + return self._new(builder.build()) def descr_partition(self, space, w_sub): value = self._val() @@ -822,8 +825,11 @@ return self._new(buf.build()) def descr_upper(self, space): - self = self._value - return space.wrap(self.upper()) + value = self._val() + builder = self._builder(len(value)) + for i in range(len(value)): + builder.append(self._upper(value[i])) + return self._new(builder.build()) @unwrap_spec(width=int) def descr_zfill(self, space, width): From noreply at buildbot.pypy.org Wed Jun 5 03:32:53 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 5 Jun 2013 03:32:53 +0200 (CEST) Subject: [pypy-commit] pypy py3k: properly decode SyntaxError source text Message-ID: <20130605013253.76EA51C1527@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r64786:e7eccdeaa126 Date: 2013-06-04 18:31 -0700 http://bitbucket.org/pypy/pypy/changeset/e7eccdeaa126/ Log: properly decode SyntaxError source text diff --git a/pypy/interpreter/pyparser/error.py b/pypy/interpreter/pyparser/error.py --- a/pypy/interpreter/pyparser/error.py +++ b/pypy/interpreter/pyparser/error.py @@ -12,15 +12,19 @@ self.lastlineno = lastlineno def wrap_info(self, space): - if self.filename is None: - w_filename = space.w_None - else: + w_text = w_filename = space.w_None + if self.text is not None: + from rpython.rlib.runicode import str_decode_utf_8 + # self.text may not be UTF-8 in case of decoding errors + w_text = space.wrap(str_decode_utf_8(self.text, len(self.text), + 'replace')[0]) + if self.filename is not None: w_filename = space.fsdecode(space.wrapbytes(self.filename)) return space.newtuple([space.wrap(self.msg), space.newtuple([w_filename, space.wrap(self.lineno), space.wrap(self.offset), - space.wrap(self.text), + space.wrap(w_text), space.wrap(self.lastlineno)])]) def __str__(self): diff --git a/pypy/interpreter/test/test_compiler.py b/pypy/interpreter/test/test_compiler.py --- a/pypy/interpreter/test/test_compiler.py +++ b/pypy/interpreter/test/test_compiler.py @@ -853,7 +853,6 @@ """ def test_cpython_issue2301(self): - skip('XXX') try: compile(b"# coding: utf7\nprint '+XnQ-'", "dummy", "exec") except SyntaxError as v: From noreply at buildbot.pypy.org Wed Jun 5 10:13:47 2013 From: noreply at buildbot.pypy.org (bivab) Date: Wed, 5 Jun 2013 10:13:47 +0200 (CEST) Subject: [pypy-commit] pypy default: add runner tests for cast_int_to_float and cast_float_to_int Message-ID: <20130605081347.F29361C01CD@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: Changeset: r64787:ca5c756f0e50 Date: 2013-06-05 03:10 -0500 http://bitbucket.org/pypy/pypy/changeset/ca5c756f0e50/ Log: add runner tests for cast_int_to_float and cast_float_to_int diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -1911,6 +1911,29 @@ [BoxPtr(x)], 'int').value assert res == -19 + def test_cast_int_to_float(self): + if not self.cpu.supports_floats: + py.test.skip("requires floats") + for x in [-10, -1, 0, 3, 42, sys.maxint-1]: + res = self.execute_operation(rop.CAST_INT_TO_FLOAT, + [BoxInt(x)], 'float').value + assert longlong.getrealfloat(res) == float(x) + res = self.execute_operation(rop.CAST_INT_TO_FLOAT, + [ConstInt(x)], 'float').value + assert longlong.getrealfloat(res) == float(x) + + def test_cast_float_to_int(self): + if not self.cpu.supports_floats: + py.test.skip("requires floats") + for x in [-24.23, -5.3, 0.0, 3.1234, 11.1, 0.1]: + v = longlong.getfloatstorage(x) + res = self.execute_operation(rop.CAST_FLOAT_TO_INT, + [BoxFloat(v)], 'int').value + assert res == int(x) + res = self.execute_operation(rop.CAST_FLOAT_TO_INT, + [ConstFloat(v)], 'int').value + assert res == int(x) + def test_convert_float_bytes(self): if not self.cpu.supports_floats: py.test.skip("requires floats") From noreply at buildbot.pypy.org Wed Jun 5 11:50:43 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 5 Jun 2013 11:50:43 +0200 (CEST) Subject: [pypy-commit] pypy default: Document merged branch Message-ID: <20130605095043.4870B1C1007@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r64788:5e587e197d4f Date: 2013-06-05 11:49 +0200 http://bitbucket.org/pypy/pypy/changeset/5e587e197d4f/ Log: Document merged branch diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -50,3 +50,5 @@ .. branch: win32-fixes3 Skip and fix some non-translated (own) tests for win32 builds +.. branch: ctypes-byref +Add the '_obj' attribute on ctypes pointer() and byref() objects From noreply at buildbot.pypy.org Wed Jun 5 12:34:18 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 5 Jun 2013 12:34:18 +0200 (CEST) Subject: [pypy-commit] pypy default: Fix various tests failing because of e953dfbc7f0a. Message-ID: <20130605103418.AF0B01C12FE@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r64789:d0ffe15789d8 Date: 2013-06-05 12:38 +0200 http://bitbucket.org/pypy/pypy/changeset/d0ffe15789d8/ Log: Fix various tests failing because of e953dfbc7f0a. diff --git a/lib-python/2.7/opcode.py b/lib-python/2.7/opcode.py --- a/lib-python/2.7/opcode.py +++ b/lib-python/2.7/opcode.py @@ -193,6 +193,6 @@ hasname.append(201) def_op('CALL_METHOD', 202) # #args not including 'self' def_op('BUILD_LIST_FROM_ARG', 203) -jrel_op('JUMP_IF_NOT_DEBUG', 204) # Distance to target address +jrel_op('JUMP_IF_NOT_DEBUG', 204) # jump over assert statements del def_op, name_op, jrel_op, jabs_op diff --git a/lib-python/2.7/test/test_code.py b/lib-python/2.7/test/test_code.py --- a/lib-python/2.7/test/test_code.py +++ b/lib-python/2.7/test/test_code.py @@ -75,7 +75,7 @@ cellvars: () freevars: () nlocals: 0 -flags: 67 +flags: 1048643 consts: ("'doc string'", 'None') """ diff --git a/lib-python/2.7/test/test_dis.py b/lib-python/2.7/test/test_dis.py --- a/lib-python/2.7/test/test_dis.py +++ b/lib-python/2.7/test/test_dis.py @@ -53,25 +53,26 @@ pass dis_bug1333982 = """\ - %-4d 0 LOAD_CONST 1 (0) - 3 POP_JUMP_IF_TRUE 41 - 6 LOAD_GLOBAL 0 (AssertionError) - 9 LOAD_FAST 0 (x) - 12 BUILD_LIST_FROM_ARG 0 - 15 GET_ITER - >> 16 FOR_ITER 12 (to 31) - 19 STORE_FAST 1 (s) - 22 LOAD_FAST 1 (s) - 25 LIST_APPEND 2 - 28 JUMP_ABSOLUTE 16 + %-4d 0 JUMP_IF_NOT_DEBUG 41 (to 44) + 3 LOAD_CONST 1 (0) + 6 POP_JUMP_IF_TRUE 44 + 9 LOAD_GLOBAL 0 (AssertionError) + 12 LOAD_FAST 0 (x) + 15 BUILD_LIST_FROM_ARG 0 + 18 GET_ITER + >> 19 FOR_ITER 12 (to 34) + 22 STORE_FAST 1 (s) + 25 LOAD_FAST 1 (s) + 28 LIST_APPEND 2 + 31 JUMP_ABSOLUTE 19 - %-4d >> 31 LOAD_CONST 2 (1) - 34 BINARY_ADD - 35 CALL_FUNCTION 1 - 38 RAISE_VARARGS 1 + %-4d >> 34 LOAD_CONST 2 (1) + 37 BINARY_ADD + 38 CALL_FUNCTION 1 + 41 RAISE_VARARGS 1 - %-4d >> 41 LOAD_CONST 0 (None) - 44 RETURN_VALUE + %-4d >> 44 LOAD_CONST 0 (None) + 47 RETURN_VALUE """%(bug1333982.func_code.co_firstlineno + 1, bug1333982.func_code.co_firstlineno + 2, bug1333982.func_code.co_firstlineno + 3) diff --git a/pypy/module/pypyjit/test_pypy_c/test_intbound.py b/pypy/module/pypyjit/test_pypy_c/test_intbound.py --- a/pypy/module/pypyjit/test_pypy_c/test_intbound.py +++ b/pypy/module/pypyjit/test_pypy_c/test_intbound.py @@ -231,6 +231,7 @@ assert loop.match(""" i8 = int_lt(i6, 300) guard_true(i8, descr=...) + guard_not_invalidated? i10 = int_lshift(i6, 1) i12 = int_add_ovf(i5, 1) guard_no_overflow(descr=...) @@ -253,6 +254,7 @@ assert loop.match(""" i8 = int_lt(i6, 300) guard_true(i8, descr=...) + guard_not_invalidated? i10 = int_add_ovf(i5, 8) guard_no_overflow(descr=...) i12 = int_add(i6, 1) From noreply at buildbot.pypy.org Wed Jun 5 14:15:27 2013 From: noreply at buildbot.pypy.org (antocuni) Date: Wed, 5 Jun 2013 14:15:27 +0200 (CEST) Subject: [pypy-commit] pypy fastjson: add a fast path for decoding ascii-only, it gives ~10% speedup on certain benchmarks Message-ID: <20130605121527.8541A1C01CD@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: fastjson Changeset: r64790:73de5d85c1b3 Date: 2013-06-05 11:32 +0200 http://bitbucket.org/pypy/pypy/changeset/73de5d85c1b3/ Log: add a fast path for decoding ascii-only, it gives ~10% speedup on certain benchmarks diff --git a/pypy/module/_fastjson/interp_decoder.py b/pypy/module/_fastjson/interp_decoder.py --- a/pypy/module/_fastjson/interp_decoder.py +++ b/pypy/module/_fastjson/interp_decoder.py @@ -96,13 +96,20 @@ def decode_string(self): start = self.i + bits = 0 while not self.eof(): # this loop is a fast path for strings which do not contain escape # characters ch = self.next() + bits |= ord(ch) if ch == '"': content_utf8 = self.getslice(start, self.i-1) - content_unicode = unicodehelper.decode_utf8(self.space, content_utf8) + if bits & 0x80: + # the 8th bit is set, it's an utf8 strnig + content_unicode = content_utf8.decode('utf-8') + else: + # ascii only, faster to decode + content_unicode = content_utf8.decode('ascii') self.last_type = TYPE_STRING return self.space.wrap(content_unicode) elif ch == '\\': From noreply at buildbot.pypy.org Wed Jun 5 14:15:28 2013 From: noreply at buildbot.pypy.org (antocuni) Date: Wed, 5 Jun 2013 14:15:28 +0200 (CEST) Subject: [pypy-commit] pypy fastjson: add parsing of integers Message-ID: <20130605121528.D69CA1C12FE@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: fastjson Changeset: r64791:65996c45a142 Date: 2013-06-05 12:32 +0200 http://bitbucket.org/pypy/pypy/changeset/65996c45a142/ Log: add parsing of integers diff --git a/pypy/module/_fastjson/interp_decoder.py b/pypy/module/_fastjson/interp_decoder.py --- a/pypy/module/_fastjson/interp_decoder.py +++ b/pypy/module/_fastjson/interp_decoder.py @@ -51,14 +51,36 @@ def decode_any(self): self.skip_whitespace() - ch = self.next() + ch = self.peek() if ch == '"': + self.next() return self.decode_string() + elif ch.isdigit() or ch == '-': + return self.decode_numeric(ch) elif ch == '{': + self.next() return self.decode_object() else: assert False, 'Unkown char: %s' % ch + def decode_numeric(self, ch): + intval = 0 + sign = 1 + if ch == '-': + sign = -1 + self.next() + + while not self.eof(): + ch = self.peek() + if ch.isdigit(): + intval = intval*10 + ord(ch)-ord('0') + self.next() + else: + break + # + intval = intval*sign + return self.space.wrap(intval) + def decode_object(self): start = self.i w_dict = self.space.newdict() diff --git a/pypy/module/_fastjson/test/test__fastjson.py b/pypy/module/_fastjson/test/test__fastjson.py --- a/pypy/module/_fastjson/test/test__fastjson.py +++ b/pypy/module/_fastjson/test/test__fastjson.py @@ -69,6 +69,12 @@ s = r'"\u1234"' assert _fastjson.loads(s) == u'\u1234' + def test_decode_numeric(self): + import _fastjson + assert _fastjson.loads('42') == 42 + assert _fastjson.loads('-42') == -42 + raises(ValueError, "_fastjson.loads('42 abc')") + def test_decode_object(self): import _fastjson assert _fastjson.loads('{}') == {} From noreply at buildbot.pypy.org Wed Jun 5 14:15:30 2013 From: noreply at buildbot.pypy.org (antocuni) Date: Wed, 5 Jun 2013 14:15:30 +0200 (CEST) Subject: [pypy-commit] pypy fastjson: add support for parsing float values Message-ID: <20130605121530.1F15C1C01CD@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: fastjson Changeset: r64792:535479329816 Date: 2013-06-05 14:14 +0200 http://bitbucket.org/pypy/pypy/changeset/535479329816/ Log: add support for parsing float values diff --git a/pypy/module/_fastjson/interp_decoder.py b/pypy/module/_fastjson/interp_decoder.py --- a/pypy/module/_fastjson/interp_decoder.py +++ b/pypy/module/_fastjson/interp_decoder.py @@ -7,6 +7,15 @@ def is_whitespace(ch): return ch == ' ' or ch == '\t' or ch == '\r' or ch == '\n' +# precomputing negative powers of 10 is MUCH faster than using e.g. math.pow +# at runtime +NEG_POW_10 = [10**-i for i in range(16)] +def neg_pow_10(x, exp): + if exp >= len(NEG_POW_10): + return 0.0 + return x * NEG_POW_10[exp] + + TYPE_UNKNOWN = 0 TYPE_STRING = 1 @@ -23,6 +32,12 @@ def peek(self): return self.s[self.i] + def peek_maybe(self): + if self.eof(): + return '\0' + else: + return self.peek() + def next(self): ch = self.peek() self.i += 1 @@ -56,30 +71,69 @@ self.next() return self.decode_string() elif ch.isdigit() or ch == '-': - return self.decode_numeric(ch) + return self.decode_numeric() elif ch == '{': self.next() return self.decode_object() else: - assert False, 'Unkown char: %s' % ch + self._raise("No JSON object could be decoded: unexpected '%s' at char %d", + ch, self.i) - def decode_numeric(self, ch): - intval = 0 + def decode_numeric(self): + intval = self.parse_integer() + # + is_float = False + exp = 0 + frcval = 0.0 + frccount = 0 + # + # check for the optional fractional part + ch = self.peek_maybe() + if ch == '.': + is_float = True + self.next() + frcval, frccount = self.parse_digits() + frcval = neg_pow_10(frcval, frccount) + ch = self.peek_maybe() + # check for the optional exponent part + if ch == 'E' or ch == 'e': + is_float = True + self.next() + exp = self.parse_integer() + # + if is_float: + # build the float + floatval = intval + frcval + floatval = floatval * 10**exp + return self.space.wrap(floatval) + else: + return self.space.wrap(intval) + + def parse_integer(self): + "Parse a decimal number with an optional minus sign" sign = 1 - if ch == '-': + if self.peek_maybe() == '-': sign = -1 self.next() + intval, _ = self.parse_digits() + return sign * intval + def parse_digits(self): + "Parse a sequence of digits as a decimal number. No sign allowed" + intval = 0 + count = 0 while not self.eof(): ch = self.peek() if ch.isdigit(): intval = intval*10 + ord(ch)-ord('0') + count += 1 self.next() else: break - # - intval = intval*sign - return self.space.wrap(intval) + if count == 0: + self._raise("Expected digit at char %d", self.i) + return intval, count + def decode_object(self): start = self.i diff --git a/pypy/module/_fastjson/test/test__fastjson.py b/pypy/module/_fastjson/test/test__fastjson.py --- a/pypy/module/_fastjson/test/test__fastjson.py +++ b/pypy/module/_fastjson/test/test__fastjson.py @@ -71,9 +71,31 @@ def test_decode_numeric(self): import _fastjson - assert _fastjson.loads('42') == 42 - assert _fastjson.loads('-42') == -42 - raises(ValueError, "_fastjson.loads('42 abc')") + def check(s, val): + res = _fastjson.loads(s) + assert type(res) is type(val) + assert res == val + # + check('42', 42) + check('-42', -42) + check('42.123', 42.123) + check('42E0', 42.0) + check('42E3', 42000.0) + check('42E-1', 4.2) + check('42.123E3', 42123.0) + + def test_decode_numeric_invalid(self): + import _fastjson + def error(s): + raises(ValueError, _fastjson.loads, s) + # + error(' 42 abc') + error('.123') + error('12.') + error('12.-3') + error('12E') + error('12E-') + def test_decode_object(self): import _fastjson From noreply at buildbot.pypy.org Wed Jun 5 14:53:15 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 5 Jun 2013 14:53:15 +0200 (CEST) Subject: [pypy-commit] pypy default: Partial revert of 661d7f7624dc: don't let the JIT look inside rbigint. Message-ID: <20130605125316.008271C12FE@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r64793:236f4d2c1567 Date: 2013-06-05 13:35 +0200 http://bitbucket.org/pypy/pypy/changeset/236f4d2c1567/ Log: Partial revert of 661d7f7624dc: don't let the JIT look inside rbigint. It causes issues. Instead, fix the test more directly by adding some @jit.elidable that were needed for consistency. diff --git a/pypy/module/pypyjit/policy.py b/pypy/module/pypyjit/policy.py --- a/pypy/module/pypyjit/policy.py +++ b/pypy/module/pypyjit/policy.py @@ -118,7 +118,7 @@ def look_inside_function(self, func): mod = func.__module__ or '?' - if mod == 'rpython.rlib.rlocale' or mod == 'rpython.rlib.rsocket': + if mod == 'rpython.rlib.rbigint' or mod == 'rpython.rlib.rlocale' or mod == 'rpython.rlib.rsocket': return False if mod.startswith('pypy.interpreter.astcompiler.'): return False diff --git a/pypy/module/pypyjit/test/test_policy.py b/pypy/module/pypyjit/test/test_policy.py --- a/pypy/module/pypyjit/test/test_policy.py +++ b/pypy/module/pypyjit/test/test_policy.py @@ -8,7 +8,12 @@ def test_bigint(): from rpython.rlib.rbigint import rbigint - assert pypypolicy.look_inside_function(rbigint.lt.im_func) + assert not pypypolicy.look_inside_function(rbigint.eq.im_func) + assert not pypypolicy.look_inside_function(rbigint.ne.im_func) + assert not pypypolicy.look_inside_function(rbigint.lt.im_func) + assert not pypypolicy.look_inside_function(rbigint.le.im_func) + assert not pypypolicy.look_inside_function(rbigint.gt.im_func) + assert not pypypolicy.look_inside_function(rbigint.ge.im_func) def test_rlocale(): from rpython.rlib.rlocale import setlocale diff --git a/rpython/rlib/rbigint.py b/rpython/rlib/rbigint.py --- a/rpython/rlib/rbigint.py +++ b/rpython/rlib/rbigint.py @@ -448,6 +448,7 @@ i += 1 return True + @jit.elidable def ne(self, other): return not self.eq(other) @@ -486,12 +487,15 @@ i -= 1 return False + @jit.elidable def le(self, other): return not other.lt(self) + @jit.elidable def gt(self, other): return other.lt(self) + @jit.elidable def ge(self, other): return not self.lt(other) From noreply at buildbot.pypy.org Wed Jun 5 14:53:17 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 5 Jun 2013 14:53:17 +0200 (CEST) Subject: [pypy-commit] pypy default: More @jit.elidable. The new test in test_misc is still not passing. Message-ID: <20130605125317.4251A1C12FE@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r64794:e4d4c3e4eafa Date: 2013-06-05 14:57 +0200 http://bitbucket.org/pypy/pypy/changeset/e4d4c3e4eafa/ Log: More @jit.elidable. The new test in test_misc is still not passing. diff --git a/pypy/module/pypyjit/test_pypy_c/test_misc.py b/pypy/module/pypyjit/test_pypy_c/test_misc.py --- a/pypy/module/pypyjit/test_pypy_c/test_misc.py +++ b/pypy/module/pypyjit/test_pypy_c/test_misc.py @@ -407,5 +407,5 @@ log = self.run(main, [300]) loop, = log.loops_by_id("long_op") - assert log.match(""" + assert loop.match(""" """) diff --git a/rpython/rlib/jit.py b/rpython/rlib/jit.py --- a/rpython/rlib/jit.py +++ b/rpython/rlib/jit.py @@ -83,6 +83,13 @@ func._jit_look_inside_ = False return func +def look_inside(func): + """ Make sure the JIT traces inside decorated function, even + if the rest of the module is not visible to the JIT + """ + func._jit_look_inside_ = True + return func + def unroll_safe(func): """ JIT can safely unroll loops in this function and this will not lead to code explosion diff --git a/rpython/rlib/rbigint.py b/rpython/rlib/rbigint.py --- a/rpython/rlib/rbigint.py +++ b/rpython/rlib/rbigint.py @@ -190,9 +190,9 @@ @staticmethod + @jit.elidable def frombool(b): - # This function is marked as pure, so you must not call it and - # then modify the result. + # You must not call this function and then modify the result. if b: return ONERBIGINT return NULLRBIGINT @@ -251,6 +251,7 @@ return _decimalstr_to_bigint(s) @staticmethod + @jit.elidable def frombytes(s, byteorder, signed): if byteorder not in ('big', 'little'): raise InvalidEndiannessError() @@ -383,9 +384,11 @@ def tolonglong(self): return _AsLongLong(self) + @jit.look_inside def tobool(self): return self.sign != 0 + @jit.elidable def touint(self): if self.sign == -1: raise ValueError("cannot convert negative integer to unsigned int") @@ -410,13 +413,16 @@ raise ValueError("cannot convert negative integer to unsigned int") return _AsULonglong_ignore_sign(self) + @jit.elidable def uintmask(self): return _AsUInt_mask(self) + @jit.elidable def ulonglongmask(self): """Return r_ulonglong(self), truncating.""" return _AsULonglong_mask(self) + @jit.elidable def tofloat(self): return _AsDouble(self) @@ -448,7 +454,7 @@ i += 1 return True - @jit.elidable + @jit.look_inside def ne(self, other): return not self.eq(other) @@ -487,15 +493,15 @@ i -= 1 return False - @jit.elidable + @jit.look_inside def le(self, other): return not other.lt(self) - @jit.elidable + @jit.look_inside def gt(self, other): return other.lt(self) - @jit.elidable + @jit.look_inside def ge(self, other): return not self.lt(other) @@ -596,6 +602,7 @@ return div + @jit.look_inside def div(self, other): return self.floordiv(other) @@ -796,14 +803,17 @@ z = z.sub(c) return z + @jit.elidable def neg(self): return rbigint(self._digits, -self.sign, self.size) + @jit.elidable def abs(self): if self.sign != -1: return self return rbigint(self._digits, 1, self.size) + @jit.elidable def invert(self): #Implement ~x as -(x + 1) if self.sign == 0: return ONENEGATIVERBIGINT @@ -913,12 +923,14 @@ def or_(self, other): return _bitwise(self, '|', other) + @jit.elidable def oct(self): if self.sign == 0: return '0L' else: return _format(self, BASE8, '0', 'L') + @jit.elidable def hex(self): return _format(self, BASE16, '0x', 'L') From noreply at buildbot.pypy.org Wed Jun 5 15:01:37 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 5 Jun 2013 15:01:37 +0200 (CEST) Subject: [pypy-commit] pypy default: A test and fix for jit.look_inside. Message-ID: <20130605130137.C45EA1C1401@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r64795:6ee99f717da5 Date: 2013-06-05 15:01 +0200 http://bitbucket.org/pypy/pypy/changeset/6ee99f717da5/ Log: A test and fix for jit.look_inside. diff --git a/rpython/jit/codewriter/policy.py b/rpython/jit/codewriter/policy.py --- a/rpython/jit/codewriter/policy.py +++ b/rpython/jit/codewriter/policy.py @@ -39,8 +39,6 @@ return True # look into everything by default def _reject_function(self, func): - if hasattr(func, '_jit_look_inside_'): - return not func._jit_look_inside_ # explicitly elidable functions are always opaque if getattr(func, '_elidable_function_', False): return True @@ -58,8 +56,11 @@ except AttributeError: see_function = True else: - see_function = (self.look_inside_function(func) and not - self._reject_function(func)) + if hasattr(func, '_jit_look_inside_'): + see_function = func._jit_look_inside_ # override guessing + else: + see_function = (self.look_inside_function(func) and not + self._reject_function(func)) contains_loop = contains_loop and not getattr( func, '_jit_unroll_safe_', False) diff --git a/rpython/jit/codewriter/test/test_policy.py b/rpython/jit/codewriter/test/test_policy.py --- a/rpython/jit/codewriter/test/test_policy.py +++ b/rpython/jit/codewriter/test/test_policy.py @@ -65,6 +65,20 @@ graph = support.getgraph(h, [5]) assert not JitPolicy().look_inside_graph(graph) +def test_look_inside(): + def h1(x): + return x + 1 + @jit.look_inside # force True, even if look_inside_function() thinks not + def h2(x): + return x + 2 + class MyPolicy(JitPolicy): + def look_inside_function(self, func): + return False + graph1 = support.getgraph(h1, [5]) + graph2 = support.getgraph(h2, [5]) + assert not MyPolicy().look_inside_graph(graph1) + assert MyPolicy().look_inside_graph(graph2) + def test_loops(): def g(x): i = 0 From noreply at buildbot.pypy.org Wed Jun 5 15:05:54 2013 From: noreply at buildbot.pypy.org (antocuni) Date: Wed, 5 Jun 2013 15:05:54 +0200 (CEST) Subject: [pypy-commit] pypy fastjson: rpython fixes Message-ID: <20130605130554.EB2FC1C1401@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: fastjson Changeset: r64796:6b66c9b61731 Date: 2013-06-05 15:05 +0200 http://bitbucket.org/pypy/pypy/changeset/6b66c9b61731/ Log: rpython fixes diff --git a/pypy/module/_fastjson/interp_decoder.py b/pypy/module/_fastjson/interp_decoder.py --- a/pypy/module/_fastjson/interp_decoder.py +++ b/pypy/module/_fastjson/interp_decoder.py @@ -1,3 +1,4 @@ +import math from rpython.rlib.rstring import StringBuilder from rpython.rlib.objectmodel import specialize from pypy.interpreter.error import OperationError, operationerrfmt @@ -9,13 +10,12 @@ # precomputing negative powers of 10 is MUCH faster than using e.g. math.pow # at runtime -NEG_POW_10 = [10**-i for i in range(16)] +NEG_POW_10 = [10.0**-i for i in range(16)] def neg_pow_10(x, exp): if exp >= len(NEG_POW_10): return 0.0 return x * NEG_POW_10[exp] - TYPE_UNKNOWN = 0 TYPE_STRING = 1 @@ -104,7 +104,7 @@ if is_float: # build the float floatval = intval + frcval - floatval = floatval * 10**exp + floatval = floatval * math.pow(10, exp) return self.space.wrap(floatval) else: return self.space.wrap(intval) @@ -236,6 +236,8 @@ uchr = unichr(int(hexdigits, 16)) except ValueError: self._raise("Invalid \uXXXX escape (char %d)", self.i-1) + return # help the annotator to know that we'll never go beyond + # this point # utf8_ch = unicodehelper.encode_utf8(self.space, uchr) builder.append(utf8_ch) From noreply at buildbot.pypy.org Wed Jun 5 15:57:37 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 5 Jun 2013 15:57:37 +0200 (CEST) Subject: [pypy-commit] stmgc default: in-progress Message-ID: <20130605135737.74E371C3333@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r64:87d10f56bb42 Date: 2013-06-05 12:53 +0200 http://bitbucket.org/pypy/stmgc/changeset/87d10f56bb42/ Log: in-progress diff --git a/.hgignore b/.hgignore new file mode 100644 --- /dev/null +++ b/.hgignore @@ -0,0 +1,3 @@ +syntax: glob +*.pyc +*~ diff --git a/c3/et.c b/c3/et.c --- a/c3/et.c +++ b/c3/et.c @@ -18,21 +18,18 @@ die if we start more than 0x7fff threads. */ static revision_t next_locked_value = (LOCKED + 1) | 1; -/* a negative odd number that uniquely identifies the currently running - transaction (but the number in aborted transactions is reused). - Because we don't know yet the value of 'global_cur_time' that we'll - be assigned when we commit, we use the (negative of) the value of - 'global_cur_time' when we committed the previous transaction. */ -__thread revision_t stm_local_revision; +/* a negative odd number that identifies the currently running + transaction within the thread. */ +__thread revision_t stm_private_rev_num; revision_t stm_global_cur_time(void) /* for tests */ { return global_cur_time; } -revision_t stm_local_rev(void) /* for tests */ +revision_t get_private_rev_num(void) /* for tests */ { - return stm_local_revision; + return stm_private_rev_num; } struct tx_descriptor *stm_thread_descriptor(void) /* for tests */ { @@ -70,6 +67,8 @@ static gcptr HeadOfRevisionChainList(struct tx_descriptor *d, gcptr G) { + abort(); +#if 0 gcptr R = G; revision_t v; @@ -135,10 +134,13 @@ goto retry; // restart searching from R } return R; +#endif } static inline gcptr AddInReadSet(struct tx_descriptor *d, gcptr R) { + abort(); +#if 0 fprintf(stderr, "AddInReadSet(%p)\n", R); d->count_reads++; if (!fxcache_add(&d->recent_reads_cache, R)) { @@ -153,10 +155,13 @@ // return Localize(d, R); // } return R; +#endif } gcptr stm_DirectReadBarrier(gcptr G) { + abort(); +#if 0 gcptr R; struct tx_descriptor *d = thread_descriptor; assert(d->active >= 1); @@ -174,19 +179,13 @@ G2L_FIND(d->public_to_private, R, entry, goto not_found); L = entry->val; assert(L->h_revision == stm_local_revision); -#if 0 - if (R_Container && !(R_Container->h_tid & GCFLAG_GLOBAL)) - { /* R_Container is a local object */ - gcptr *ref = (gcptr *)(((char *)R_Container) + offset); - *ref = L; /* fix in-place */ - } -#endif return L; not_found:; } R = AddInReadSet(d, R); return R; +#endif } static gcptr _latest_gcptr(gcptr R) @@ -312,6 +311,8 @@ static gcptr LocalizePublic(struct tx_descriptor *d, gcptr R) { + abort(); +#if 0 if (R->h_tid & GCFLAG_PUBLIC_TO_PRIVATE) { wlog_t *entry; @@ -332,6 +333,7 @@ AddInReadSet(d, R); /*mark*/ return L; +#endif } gcptr stm_WriteBarrier(gcptr P) @@ -376,6 +378,8 @@ static _Bool ValidateDuringTransaction(struct tx_descriptor *d, _Bool during_commit) { + abort(); +#if 0 long i, size = d->list_of_read_objects.size; gcptr *items = d->list_of_read_objects.items; @@ -416,6 +420,7 @@ } } return 1; +#endif } static void ValidateNow(struct tx_descriptor *d) @@ -638,8 +643,8 @@ goto retry; gcptr L = item->val; - assert(L->h_revision == stm_local_revision); - assert(v != stm_local_revision); + assert(L->h_revision == stm_private_rev_num); + assert(v != stm_private_rev_num); L->h_revision = v; /* store temporarily this value here */ } G2L_LOOP_END; @@ -647,6 +652,8 @@ static void CancelLocks(struct tx_descriptor *d) { + abort(); +#if 0 revision_t my_lock = d->my_lock; wlog_t *item; @@ -672,6 +679,7 @@ ACCESS_ONCE(R->h_revision) = v; } G2L_LOOP_END; +#endif } static pthread_mutex_t mutex_prebuilt_gcroots = PTHREAD_MUTEX_INITIALIZER; @@ -820,15 +828,15 @@ "*************************************\n", (long)cur_time); - revision_t localrev = stm_local_revision; + revision_t localrev = stm_private_rev_num; UpdateProtectedChainHeads(d, cur_time, localrev); smp_wmb(); revision_t newrev = -(cur_time + 1); assert(newrev & 1); - ACCESS_ONCE(stm_local_revision) = newrev; + ACCESS_ONCE(stm_private_rev_num) = newrev; fprintf(stderr, "%p: stm_local_revision = %ld\n", d, (long)newrev); - assert(d->local_revision_ref = &stm_local_revision); + assert(d->private_revision_ref = &stm_private_rev_num); UpdateChainHeads(d, cur_time, localrev); @@ -1027,8 +1035,8 @@ } assert(d->my_lock & 1); assert(d->my_lock > LOCKED); - stm_local_revision = -d->my_lock; /* a unique negative odd value */ - d->local_revision_ref = &stm_local_revision; + stm_private_rev_num = -1; + d->private_revision_ref = &stm_private_rev_num; d->max_aborts = -1; thread_descriptor = d; diff --git a/c3/et.h b/c3/et.h --- a/c3/et.h +++ b/c3/et.h @@ -1,7 +1,7 @@ /*** Extendable Timestamps * * Documentation: - * https://bitbucket.org/pypy/extradoc/raw/extradoc/talk/stm2012/stmimpl.rst + * doc-*.txt * * This is very indirectly based on rstm_r5/stm/et.hpp. * See http://www.cs.rochester.edu/research/synchronization/rstm/api.shtml @@ -20,35 +20,28 @@ * collection: "young" objects are the ones in the nursery (plus a few big * ones outside) and will be collected by the following minor collection. * - * Additionally, objects are either "public", "protected" or "private". The - * private objects have h_revision == stm_local_revision and are invisible - * to other threads. They become non-private when the transaction commits. - * - * non-private | private - * +------------------------------------------------------------ - * | - * old | public objects | old private objects - * ---------| - * | - * young | [ protected objects | private objects (--> grows) ] - * (nursery)| + * Additionally, objects are either "public", "protected" or "private". * * GCFLAG_OLD is set on old objects. * * GCFLAG_VISITED is used temporarily during major collections. * + * GCFLAG_PUBLIC is set on public objects. + * + * GCFLAG_BACKUP_COPY means the object is a (protected) backup copy. + * * GCFLAG_PUBLIC_TO_PRIVATE is added to a *public* object that has got a * *private* copy. It is sticky, reset only at the next major collection. * * GCFLAG_PREBUILT_ORIGINAL is only set on the original version of * prebuilt objects. * - * GCFLAG_WRITE_BARRIER is set on *old* *private* objects to track old-to- - * young pointers. It may be left set on *public* objects but is ignored - * there, because the write barrier will trigger anyway on any non-private - * object. On an old private object, it is removed once a write occurs - * and the object is recorded in 'private_old_pointing_to_young'; it is - * set again at the next minor collection. + * GCFLAG_WRITE_BARRIER is set on *old* objects to track old-to- young + * pointers. It may be left set on *public* objects but is ignored + * there, because public objects are read-only. The flag is removed + * once a write occurs and the object is recorded in the list + * 'old_pointing_to_young'; it is set again at the next minor + * collection. * * GCFLAG_NURSERY_MOVED is used temporarily during minor collections. * @@ -56,26 +49,31 @@ * have been stolen. * * GCFLAG_STUB is used for debugging: it's set on stub objects made by - * create_yo_stubs() + * stealing or by major collections. */ #define GCFLAG_OLD (STM_FIRST_GCFLAG << 0) #define GCFLAG_VISITED (STM_FIRST_GCFLAG << 1) -#define GCFLAG_PUBLIC_TO_PRIVATE (STM_FIRST_GCFLAG << 2) +#define GCFLAG_PUBLIC (STM_FIRST_GCFLAG << 2) #define GCFLAG_PREBUILT_ORIGINAL (STM_FIRST_GCFLAG << 3) -#define GCFLAG_WRITE_BARRIER (STM_FIRST_GCFLAG << 4) -#define GCFLAG_NURSERY_MOVED (STM_FIRST_GCFLAG << 5) -#define GCFLAG_STOLEN (STM_FIRST_GCFLAG << 6) -#define GCFLAG_STUB (STM_FIRST_GCFLAG << 7) /* debugging */ +#define GCFLAG_BACKUP_COPY (STM_FIRST_GCFLAG << 4) +#define GCFLAG_PUBLIC_TO_PRIVATE (STM_FIRST_GCFLAG << 5) +#define GCFLAG_WRITE_BARRIER (STM_FIRST_GCFLAG << 6) +#define GCFLAG_NURSERY_MOVED (STM_FIRST_GCFLAG << 7) +#define GCFLAG_STOLEN (STM_FIRST_GCFLAG << 8) +#define GCFLAG_STUB (STM_FIRST_GCFLAG << 9) /* debugging */ /* this value must be reflected in PREBUILT_FLAGS in stmgc.h */ #define GCFLAG_PREBUILT (GCFLAG_VISITED | \ GCFLAG_PREBUILT_ORIGINAL | \ - GCFLAG_OLD) + GCFLAG_OLD | \ + GCFLAG_PUBLIC) #define GC_FLAG_NAMES { "OLD", \ "VISITED", \ + "PUBLIC", \ + "PREBUILT_ORIGINAL", \ + "BACKUP_COPY", \ "PUBLIC_TO_PRIVATE", \ - "PREBUILT_ORIGINAL", \ "WRITE_BARRIER", \ "NURSERY_MOVED", \ "STOLEN", \ @@ -121,12 +119,12 @@ char *longest_abort_info; long long longest_abort_info_time; struct FXCache recent_reads_cache; - revision_t *local_revision_ref; + revision_t *private_revision_ref; struct tx_descriptor *tx_next, *tx_prev; /* a doubly linked list */ }; extern __thread struct tx_descriptor *thread_descriptor; -extern __thread revision_t stm_local_revision; +extern __thread revision_t stm_private_rev_num; /************************************************************/ diff --git a/c3/gcpage.c b/c3/gcpage.c --- a/c3/gcpage.c +++ b/c3/gcpage.c @@ -423,7 +423,7 @@ assert(stmgc_classify(item->addr) == K_PUBLIC); /*..rt(stmgc_classify(item->val) == K_PRIVATE); but in the other thread, which becomes: */ - assert(item->val->h_revision == *d->local_revision_ref); + assert(item->val->h_revision == *d->private_revision_ref); item->addr->h_tid |= GCFLAG_PUBLIC_TO_PRIVATE; @@ -559,8 +559,8 @@ { struct tx_descriptor *d; struct tx_descriptor *saved = thread_descriptor; - revision_t saved_local_rev = stm_local_revision; - assert(saved_local_rev == *saved->local_revision_ref); + revision_t saved_private_rev = stm_private_rev_num; + assert(saved_private_rev == *saved->private_revision_ref); for (d = tx_head; d; d = d->tx_next) { /* Force a minor collection to run in the thread 'd'. @@ -572,12 +572,12 @@ /* Hack: temporarily pretend that we "are" the other thread... */ thread_descriptor = d; - stm_local_revision = *d->local_revision_ref; + stm_private_rev_num = *d->private_revision_ref; assert(stmgc_nursery_hiding(d, 0)); stmgc_minor_collect_no_abort(); assert(stmgc_nursery_hiding(d, 1)); thread_descriptor = saved; - stm_local_revision = saved_local_rev; + stm_private_rev_num = saved_private_rev; } } } diff --git a/c3/lists.c b/c3/lists.c --- a/c3/lists.c +++ b/c3/lists.c @@ -223,15 +223,10 @@ /************************************************************/ -void fxcache_clear(struct FXCache *fxcache) +void _fxcache_reset(struct FXCache *fxcache) { - fxcache->shift += 4; - /* FX_ENTRIES+1 entries are needed */ - if (fxcache->shift + FX_ENTRIES + 1 > FX_TOTAL) { + fxcache->shift = 0; memset(fxcache->cache, 0, sizeof(fxcache->cache)); - fxcache->shift = 0; - } - fxcache->cache_start = (char *)(fxcache->cache + fxcache->shift); } /************************************************************/ diff --git a/c3/lists.h b/c3/lists.h --- a/c3/lists.h +++ b/c3/lists.h @@ -168,16 +168,12 @@ /* The fxcache_xx functions implement a fixed-size set of gcptr's. Moreover the gcptr's in the set are mapped to small integers. In case - of collisions, old items are discarded. The cache uses 3-way caching, - stored in 3 consecutive entries, but the 3 entries are in "cache lines" - that are only aligned to a multiple of 2. This means that among the 3 - items, the item 0 overlaps with the item 2 of the previous cache line, - and the item 2 overlaps with the item 0 of the following cache line. - The item 1 can only be seen by the current cache line. + of collisions, old items are discarded. The cache doesn't use + multi-way caching for now. - The cache itself uses a total of FX_ENTRIES+1 entries in the 'cache' - array below, starting at 'cache_start'. The reason it is bigger than - necessary is that fxcache_clear() simply shifts 'cache_start', making + The cache itself uses a total of FX_ENTRIES entries in the 'cache' + array below, starting at 'shift'. The reason it is bigger than + necessary is that fxcache_clear() simply increments 'shift', making any previous entries invalid by not being in the correct position any more. */ @@ -187,41 +183,18 @@ struct FXCache { char *cache_start; - revision_t nextadd; revision_t shift; revision_t cache[FX_TOTAL]; }; -void fxcache_clear(struct FXCache *fxcache); +void _fxcache_reset(struct FXCache *fxcache); -static inline int fxcache_add(struct FXCache *fxcache, gcptr item) { - /* If 'item' is not in the cache, add it and returns 0. - If it is already, return 1. - */ - revision_t uitem = (revision_t)item; - /* 'entry' points to 'cache_start[mask of uitem, even-valued]' */ - revision_t *entry = (revision_t *) - (fxcache->cache_start + (uitem & ((FX_ENTRIES-2) * sizeof(revision_t)))); - revision_t current; - - current = entry[1]; /* first look here, the cache-private entry */ - if (current == uitem) - return 1; - - if (entry[0] == uitem) { - entry[0] = current; /* move from this collidable entry to */ - entry[1] = uitem; /* the cache-private entry */ - return 1; - } - if (entry[2] == uitem) { - entry[2] = current; /* move from this collidable entry to */ - entry[1] = uitem; /* the cache-private entry */ - return 1; - } - - entry[fxcache->nextadd] = uitem; - fxcache->nextadd ^= 2; - return 0; +static inline void fxcache_clear(struct FXCache *fxcache) +{ + fxcache->shift++; + if (fxcache->shift > FX_TOTAL - FX_ENTRIES) + _fxcache_reset(fxcache); + fxcache->cache_start = (char *)(fxcache->cache + fxcache->shift); } /************************************************************/ diff --git a/c3/nursery.c b/c3/nursery.c --- a/c3/nursery.c +++ b/c3/nursery.c @@ -29,7 +29,7 @@ enum protection_class_t stmgc_classify(gcptr obj) { /* note that this function never returns K_OLD_PRIVATE. */ - if (obj->h_revision == stm_local_revision) + if (obj->h_revision == stm_private_rev_num) return K_PRIVATE; if (is_young(obj)) return K_PROTECTED; @@ -42,7 +42,7 @@ /* for assertions only; moreover this function returns K_PRIVATE only for young private objects, and K_OLD_PRIVATE for old ones. */ struct tx_descriptor *d = thread_descriptor; - int private = (obj->h_revision == stm_local_revision); + int private = (obj->h_revision == stm_private_rev_num); enum protection_class_t e; if (is_in_nursery(d, obj)) { @@ -128,7 +128,7 @@ { gcptr p = stmgcpage_malloc(size); memset(p, 0, size); - p->h_revision = stm_local_revision; + p->h_revision = stm_private_rev_num; p->h_tid = GCFLAG_OLD; return p; } @@ -146,7 +146,7 @@ } stm_dbgmem_used_again(cur, size, 1); gcptr p = (gcptr)cur; - p->h_revision = stm_local_revision; + p->h_revision = stm_private_rev_num; return p; } @@ -185,7 +185,7 @@ GCFLAG_PREBUILT_ORIGINAL | GCFLAG_WRITE_BARRIER | GCFLAG_OLD); - localobj->h_revision = stm_local_revision; + localobj->h_revision = stm_private_rev_num; return localobj; } @@ -413,7 +413,7 @@ /* nb. don't use stmgc_classify() here, because some objects trigger an assert at this point: young non-nursery objects which just grew the flag GCFLAG_OLD */ - assert(obj->h_revision != stm_local_revision); /* not a private object */ + assert(obj->h_revision != stm_private_rev_num); /* not a private object */ PATCH_ROOT_WITH(obj); goto retry; } @@ -490,7 +490,7 @@ /* then we record the dependency in the dictionary 'public_to_private' */ - assert(L->h_revision == stm_local_revision); + assert(L->h_revision == stm_private_rev_num); g2l_insert(&d->public_to_private, R, L); /*mark*/ } @@ -956,6 +956,8 @@ static gcptr extract_from_foreign_nursery(struct tx_descriptor *source_d, gcptr R) { + abort(); +#if 0 /* "Stealing": this function follows a chain of protected objects in the foreign nursery of the thread 'source_d'. It copies the last one outside the nursery, and return it. */ @@ -1011,6 +1013,7 @@ source_d->stolen_objects.size - 1); return N; +#endif } void stmgc_public_to_foreign_protected(gcptr P) @@ -1112,7 +1115,7 @@ /* we re-insert L as a private copy of the public object N */ N->h_tid |= GCFLAG_PUBLIC_TO_PRIVATE; - assert(L->h_revision == stm_local_revision); + assert(L->h_revision == stm_private_rev_num); g2l_insert(&d->public_to_private, N, L); gcptrlist_insert(&d->public_to_young, N); } diff --git a/c3/stmgc.h b/c3/stmgc.h --- a/c3/stmgc.h +++ b/c3/stmgc.h @@ -21,7 +21,7 @@ #define STM_SIZE_OF_USER_TID (sizeof(revision_t) / 2) /* in bytes */ #define STM_FIRST_GCFLAG (1L << (8 * STM_SIZE_OF_USER_TID)) #define STM_USER_TID_MASK (STM_FIRST_GCFLAG - 1) -#define PREBUILT_FLAGS (STM_FIRST_GCFLAG * (1 + 2 + 8)) +#define PREBUILT_FLAGS (STM_FIRST_GCFLAG * (1 + 2 + 4 + 8)) #define PREBUILT_REVISION 1 diff --git a/c3/stmsync.c b/c3/stmsync.c --- a/c3/stmsync.c +++ b/c3/stmsync.c @@ -78,8 +78,11 @@ gcptr stm_read_barrier(gcptr obj) { /* XXX inline in the caller */ + abort(); +#if 0 if (UNLIKELY(obj->h_revision != stm_local_revision)) obj = stm_DirectReadBarrier(obj); +#endif return obj; } @@ -87,7 +90,7 @@ { /* XXX inline in the caller */ if (UNLIKELY(((obj->h_tid & GCFLAG_WRITE_BARRIER) != 0) | - (obj->h_revision != stm_local_revision))) + (obj->h_revision != stm_private_rev_num))) obj = stm_WriteBarrier(obj); return obj; } diff --git a/c3/test/support.py b/c3/test/support.py --- a/c3/test/support.py +++ b/c3/test/support.py @@ -80,7 +80,7 @@ void rawsetlong(gcptr, long, long); gcptr pseudoprebuilt(size_t size, int tid); - revision_t get_local_revision(void); + revision_t get_private_rev_num(void); revision_t get_start_time(void); gcptr *addr_of_thread_local(void); @@ -112,6 +112,7 @@ extern revision_t stm_global_cur_time(void); extern void stmgcpage_add_prebuilt_root(gcptr); extern void stm_clear_between_tests(void); + extern revision_t get_private_rev_num(void); int gettid(gcptr obj) { @@ -194,11 +195,6 @@ return x; } - revision_t get_local_revision(void) - { - return stm_local_revision; - } - revision_t get_start_time(void) { return thread_descriptor->start_time; @@ -421,7 +417,8 @@ def nalloc(size): "Allocate a fresh object from the nursery" p = lib.stm_allocate(size, 42 + size) - assert p.h_revision == lib.get_local_revision() + assert p.h_tid == 42 + size # no GC flags + assert p.h_revision == lib.get_private_rev_num() return p def nalloc_refs(nrefs): @@ -442,6 +439,7 @@ p = lib.pseudoprebuilt(HDR + WORD * nrefs, 421 + nrefs) return p +gettid = lib.gettid setptr = lib.setptr getptr = lib.getptr rawsetptr = lib.rawsetptr diff --git a/c3/test/test_et.py b/c3/test/test_et.py new file mode 100644 --- /dev/null +++ b/c3/test/test_et.py @@ -0,0 +1,64 @@ +import py +from support import * + + +def setup_function(f): + lib.stm_clear_between_tests() + lib.stm_initialize_tests(getattr(f, 'max_aborts', 0)) + +def teardown_function(_): + lib.stm_finalize() + + +def test_freshly_created(): + p = nalloc(HDR) + r = lib.get_private_rev_num() + assert r < 0 and r % 2 == 1 + assert p.h_revision == r + assert p.h_tid == lib.gettid(p) | 0 # no GC flags + +def test_write_barrier_private(): + p = nalloc(HDR) + assert lib.stm_write_barrier(p) == p + assert p.h_revision == lib.get_private_rev_num() + assert p.h_tid == lib.gettid(p) | 0 # no GC flags + +def test_protected_no_backup(): + p = nalloc(HDR) + r = lib.get_private_rev_num() + assert p.h_revision == r + lib.stm_commit_transaction() + lib.stm_begin_inevitable_transaction() + r2 = lib.get_private_rev_num() + assert r2 < 0 and r2 % 2 == 1 + assert r != r2 + assert p.h_revision == r + assert p.h_tid == lib.gettid(p) | 0 # no GC flags + +def test_private_with_backup(): + p = nalloc(HDR) + lib.stm_commit_transaction() + lib.stm_begin_inevitable_transaction() + r2 = lib.get_private_rev_num() + assert p.h_revision != r2 + p2 = lib.stm_write_barrier(p) + assert p2 == p # does not move + assert p.h_revision == r2 + +def test_get_backup_copy(): + p = nalloc(HDR + WORD) + lib.setlong(p, 0, 78927812) + lib.stm_commit_transaction() + lib.stm_begin_inevitable_transaction() + org_r = p.h_revision + lib.setlong(p, 0, 927122) + assert p.h_revision == lib.get_private_rev_num() + pback = lib.stm_get_backup_copy(p) + assert pback and pback != p + assert pback.h_revision == org_r + assert pback.h_tid == p.h_tid | GCFLAG_BACKUP_COPY + assert lib.rawgetlong(pback, 0) == 78927812 + assert lib.rawgetlong(p, 0) == 927122 + +def test_protected_with_backup(): + xxx From noreply at buildbot.pypy.org Wed Jun 5 15:57:38 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 5 Jun 2013 15:57:38 +0200 (CEST) Subject: [pypy-commit] stmgc default: in-progress Message-ID: <20130605135738.A62651C3333@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r65:7526350df472 Date: 2013-06-05 15:57 +0200 http://bitbucket.org/pypy/stmgc/changeset/7526350df472/ Log: in-progress diff --git a/c3/et.c b/c3/et.c --- a/c3/et.c +++ b/c3/et.c @@ -286,27 +286,33 @@ #endif } -static gcptr LocalizeProtected(struct tx_descriptor *d, gcptr R) +static gcptr LocalizeProtected(struct tx_descriptor *d, gcptr P) { - assert(!(R->h_tid & GCFLAG_PUBLIC_TO_PRIVATE)); + gcptr B; - /* duplicate, and save the original R->h_revision into an extra - word allocated just after L */ - assert(R->h_revision & 1); - gcptr L = stmgc_duplicate(R, R->h_revision); + assert(P->h_revision != stm_private_rev_num); + assert(!(P->h_tid & GCFLAG_PUBLIC)); + assert(!(P->h_tid & GCFLAG_PUBLIC_TO_PRIVATE)); + assert(!(P->h_tid & GCFLAG_BACKUP_COPY)); + assert(!(P->h_tid & GCFLAG_STUB)); - /* cross-thread memory barrier: make sure the local object is correct - and has h_revision == stm_local_revision, and the extra word is - written as well; when it is done, and only then, then we change - R->h_revision */ - smp_wmb(); - - R->h_revision = (revision_t)L; - - gcptrlist_insert(&d->protected_with_private_copy, R); - AddInReadSet(d, R); - /*mark*/ - return L; + if (P->h_revision & 1) + { + /* does not have a backup yet */ + B = stmgc_duplicate(P, 0); + B->h_tid |= GCFLAG_BACKUP_COPY; + } + else + { + size_t size = stmcb_size(P); + B = (gcptr)P->h_revision; + assert(B->h_tid & GCFLAG_BACKUP_COPY); + memcpy(B + 1, P + 1, size - sizeof(*B)); + } + assert(B->h_tid & GCFLAG_BACKUP_COPY); + g2l_insert(&d->private_to_backup, P, B); + P->h_revision = stm_private_rev_num; + return P; } static gcptr LocalizePublic(struct tx_descriptor *d, gcptr R) @@ -338,35 +344,25 @@ gcptr stm_WriteBarrier(gcptr P) { - gcptr R, W; + gcptr W; struct tx_descriptor *d = thread_descriptor; assert(d->active >= 1); - /* must normalize the situation now, otherwise we risk that - LocalizePublic creates a new private version of a public - object that has got one, attached to the equivalent stolen - protected object */ - if (gcptrlist_size(&d->stolen_objects) > 0) - stmgc_normalize_stolen_objects(); - - /* XXX optimize me based on common patterns */ - R = HeadOfRevisionChainList(d, P); - - switch (stmgc_classify(R)) { - case K_PRIVATE: W = R; break; - case K_PROTECTED: W = LocalizeProtected(d, R); break; - case K_PUBLIC: W = LocalizePublic(d, R); break; - default: abort(); - } - - if (W->h_tid & GCFLAG_WRITE_BARRIER) - stmgc_write_barrier(W); + W = LocalizeProtected(d, P); fprintf(stderr, "write_barrier: %p -> %p\n", P, W); return W; } +gcptr stm_get_backup_copy(gcptr P) +{ + struct tx_descriptor *d = thread_descriptor; + wlog_t *entry; + G2L_FIND(d->private_to_backup, P, entry, return NULL); + return entry->val; +} + /************************************************************/ static revision_t GetGlobalCurTime(struct tx_descriptor *d) @@ -527,6 +523,7 @@ } gcptrlist_clear(&d->list_of_read_objects); + g2l_clear(&d->private_to_backup); stmgc_abort_transaction(d); fprintf(stderr, @@ -590,6 +587,7 @@ d->start_real_time.tv_nsec = -1; } assert(d->list_of_read_objects.size == 0); + assert(!g2l_any_entry(&d->private_to_backup)); stmgc_start_transaction(d); d->count_reads = 1; @@ -775,6 +773,23 @@ } } +void TurnPrivateWithBackupToProtected(struct tx_descriptor *d, + revision_t cur_time) +{ + wlog_t *item; + G2L_LOOP_FORWARD(d->private_to_backup, item) + { + gcptr P = item->addr; + gcptr B = item->val; + assert(P->h_revision == stm_private_rev_num); + assert(B->h_tid & GCFLAG_BACKUP_COPY); + B->h_revision = cur_time; + P->h_revision = (revision_t)B; + + } G2L_LOOP_END; + g2l_clear(&d->private_to_backup); +} + void CommitTransaction(void) { /* must save roots around this call */ revision_t cur_time; @@ -828,6 +843,8 @@ "*************************************\n", (long)cur_time); + TurnPrivateWithBackupToProtected(d, cur_time); + revision_t localrev = stm_private_rev_num; UpdateProtectedChainHeads(d, cur_time, localrev); smp_wmb(); @@ -1057,6 +1074,7 @@ thread_descriptor = NULL; + g2l_delete(&d->private_to_backup); gcptrlist_delete(&d->list_of_read_objects); gcptrlist_delete(&d->abortinfo); free(d->longest_abort_info); diff --git a/c3/et.h b/c3/et.h --- a/c3/et.h +++ b/c3/et.h @@ -116,6 +116,7 @@ unsigned int num_spinloops[SPINLOOP_REASONS]; struct GcPtrList list_of_read_objects; struct GcPtrList abortinfo; + struct G2L private_to_backup; char *longest_abort_info; long long longest_abort_info_time; struct FXCache recent_reads_cache; @@ -175,6 +176,7 @@ gcptr stm_RepeatReadBarrier(gcptr); gcptr stm_WriteBarrier(gcptr); gcptr _stm_nonrecord_barrier(gcptr, int *); +gcptr stm_get_backup_copy(gcptr); int DescriptorInit(void); void DescriptorDone(void); diff --git a/c3/nursery.c b/c3/nursery.c --- a/c3/nursery.c +++ b/c3/nursery.c @@ -180,12 +180,14 @@ memcpy(localobj, globalobj, size); assert(!(localobj->h_tid & GCFLAG_NURSERY_MOVED)); +#if 0 localobj->h_tid &= ~(GCFLAG_VISITED | GCFLAG_PUBLIC_TO_PRIVATE | GCFLAG_PREBUILT_ORIGINAL | GCFLAG_WRITE_BARRIER | GCFLAG_OLD); localobj->h_revision = stm_private_rev_num; +#endif return localobj; } diff --git a/c3/test/support.py b/c3/test/support.py --- a/c3/test/support.py +++ b/c3/test/support.py @@ -68,6 +68,7 @@ void stm_start_sharedlock(void); void stm_stop_sharedlock(void); void AbortTransaction(int); + gcptr stm_get_backup_copy(gcptr); gcptr getptr(gcptr, long); void setptr(gcptr, long, gcptr); @@ -90,12 +91,15 @@ /* some constants normally private that are useful in the tests */ #define WORD ... #define GC_PAGE_SIZE ... + #define GCFLAG_OLD ... #define GCFLAG_VISITED ... + #define GCFLAG_PUBLIC ... + #define GCFLAG_PREBUILT_ORIGINAL ... + #define GCFLAG_BACKUP_COPY ... #define GCFLAG_PUBLIC_TO_PRIVATE ... - #define GCFLAG_PREBUILT_ORIGINAL ... #define GCFLAG_WRITE_BARRIER ... #define GCFLAG_NURSERY_MOVED ... - #define GCFLAG_OLD ... + #define GCFLAG_STOLEN ... #define GCFLAG_STUB ... #define ABRT_MANUAL ... typedef struct { ...; } page_header_t; @@ -509,3 +513,17 @@ def abort_and_retry(): lib.AbortTransaction(lib.ABRT_MANUAL) + +def classify(p): + private = p.h_revision == lib.get_private_rev_num() + public = (p.h_tid & GCFLAG_PUBLIC) != 0 + backup = (p.h_tid & GCFLAG_BACKUP_COPY) != 0 + assert private + public + backup <= 1 + if private: + return "private" + if public: + return "public" + if backup: + return "backup" + else: + return "protected" diff --git a/c3/test/test_et.py b/c3/test/test_et.py --- a/c3/test/test_et.py +++ b/c3/test/test_et.py @@ -7,6 +7,8 @@ lib.stm_initialize_tests(getattr(f, 'max_aborts', 0)) def teardown_function(_): + lib.stm_commit_transaction() + lib.stm_begin_inevitable_transaction() lib.stm_finalize() @@ -16,12 +18,14 @@ assert r < 0 and r % 2 == 1 assert p.h_revision == r assert p.h_tid == lib.gettid(p) | 0 # no GC flags + assert classify(p) == "private" def test_write_barrier_private(): p = nalloc(HDR) assert lib.stm_write_barrier(p) == p assert p.h_revision == lib.get_private_rev_num() assert p.h_tid == lib.gettid(p) | 0 # no GC flags + assert classify(p) == "private" def test_protected_no_backup(): p = nalloc(HDR) @@ -34,6 +38,7 @@ assert r != r2 assert p.h_revision == r assert p.h_tid == lib.gettid(p) | 0 # no GC flags + assert classify(p) == "protected" def test_private_with_backup(): p = nalloc(HDR) @@ -41,9 +46,11 @@ lib.stm_begin_inevitable_transaction() r2 = lib.get_private_rev_num() assert p.h_revision != r2 + assert classify(p) == "protected" p2 = lib.stm_write_barrier(p) assert p2 == p # does not move assert p.h_revision == r2 + assert classify(p) == "private" def test_get_backup_copy(): p = nalloc(HDR + WORD) @@ -59,6 +66,21 @@ assert pback.h_tid == p.h_tid | GCFLAG_BACKUP_COPY assert lib.rawgetlong(pback, 0) == 78927812 assert lib.rawgetlong(p, 0) == 927122 + assert classify(p) == "private" + assert classify(pback) == "backup" def test_protected_with_backup(): - xxx + p = nalloc(HDR + WORD) + lib.setlong(p, 0, 78927812) + lib.stm_commit_transaction() + lib.stm_begin_inevitable_transaction() + lib.setlong(p, 0, 927122) + pback = lib.stm_get_backup_copy(p) + assert pback != p + assert p.h_revision == lib.get_private_rev_num() + lib.stm_commit_transaction() + lib.stm_begin_inevitable_transaction() + assert lib.stm_get_backup_copy(p) == ffi.NULL + assert classify(p) == "protected" + assert classify(pback) == "backup" + assert ffi.cast("revision_t *", p.h_revision) == pback From noreply at buildbot.pypy.org Wed Jun 5 16:02:17 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 5 Jun 2013 16:02:17 +0200 (CEST) Subject: [pypy-commit] pypy default: Fix the test. Message-ID: <20130605140217.3DCC81C3333@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r64797:cb46a58acf67 Date: 2013-06-05 16:06 +0200 http://bitbucket.org/pypy/pypy/changeset/cb46a58acf67/ Log: Fix the test. diff --git a/pypy/module/pypyjit/test_pypy_c/test_misc.py b/pypy/module/pypyjit/test_pypy_c/test_misc.py --- a/pypy/module/pypyjit/test_pypy_c/test_misc.py +++ b/pypy/module/pypyjit/test_pypy_c/test_misc.py @@ -407,5 +407,4 @@ log = self.run(main, [300]) loop, = log.loops_by_id("long_op") - assert loop.match(""" - """) + assert len(loop.ops_by_id("long_op")) == 0 From noreply at buildbot.pypy.org Wed Jun 5 16:44:04 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 5 Jun 2013 16:44:04 +0200 (CEST) Subject: [pypy-commit] stmgc default: Add a passing test Message-ID: <20130605144404.107E61C1401@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r66:48bc3f6660a8 Date: 2013-06-05 16:43 +0200 http://bitbucket.org/pypy/stmgc/changeset/48bc3f6660a8/ Log: Add a passing test diff --git a/c3/test/test_et.py b/c3/test/test_et.py --- a/c3/test/test_et.py +++ b/c3/test/test_et.py @@ -84,3 +84,24 @@ assert classify(p) == "protected" assert classify(pback) == "backup" assert ffi.cast("revision_t *", p.h_revision) == pback + +def test_protected_backup_reused(): + p = nalloc(HDR + WORD) + lib.setlong(p, 0, 78927812) + lib.stm_commit_transaction() + lib.stm_begin_inevitable_transaction() + lib.setlong(p, 0, 927122) + pback = lib.stm_get_backup_copy(p) + assert pback != p + lib.stm_commit_transaction() + lib.stm_begin_inevitable_transaction() + assert lib.stm_get_backup_copy(p) == ffi.NULL + assert classify(p) == "protected" + assert classify(pback) == "backup" + assert lib.rawgetlong(p, 0) == 927122 + assert lib.rawgetlong(pback, 0) == 78927812 # but should not be used + lib.setlong(p, 0, 43891) + assert p.h_revision == lib.get_private_rev_num() + assert pback == lib.stm_get_backup_copy(p) + assert lib.rawgetlong(p, 0) == 43891 + assert lib.rawgetlong(pback, 0) == 927122 From noreply at buildbot.pypy.org Wed Jun 5 16:52:42 2013 From: noreply at buildbot.pypy.org (antocuni) Date: Wed, 5 Jun 2013 16:52:42 +0200 (CEST) Subject: [pypy-commit] pypy fastjson: add a passing test: Message-ID: <20130605145242.291C41C1401@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: fastjson Changeset: r64798:649d62298da1 Date: 2013-06-05 15:37 +0200 http://bitbucket.org/pypy/pypy/changeset/649d62298da1/ Log: add a passing test: diff --git a/pypy/module/_fastjson/test/test__fastjson.py b/pypy/module/_fastjson/test/test__fastjson.py --- a/pypy/module/_fastjson/test/test__fastjson.py +++ b/pypy/module/_fastjson/test/test__fastjson.py @@ -106,5 +106,6 @@ 'aaa': 'bbb'} def test_decode_object_nonstring_key(self): - pass # write me when we have numbers - + import _fastjson + raises(ValueError, "_fastjson.loads('{42: 43}')") + From noreply at buildbot.pypy.org Wed Jun 5 16:52:43 2013 From: noreply at buildbot.pypy.org (antocuni) Date: Wed, 5 Jun 2013 16:52:43 +0200 (CEST) Subject: [pypy-commit] pypy fastjson: decoding arrays Message-ID: <20130605145243.7B2BD1C1401@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: fastjson Changeset: r64799:0fce473a480a Date: 2013-06-05 15:53 +0200 http://bitbucket.org/pypy/pypy/changeset/0fce473a480a/ Log: decoding arrays diff --git a/pypy/module/_fastjson/interp_decoder.py b/pypy/module/_fastjson/interp_decoder.py --- a/pypy/module/_fastjson/interp_decoder.py +++ b/pypy/module/_fastjson/interp_decoder.py @@ -72,6 +72,9 @@ return self.decode_string() elif ch.isdigit() or ch == '-': return self.decode_numeric() + elif ch == '[': + self.next() + return self.decode_array() elif ch == '{': self.next() return self.decode_object() @@ -134,6 +137,30 @@ self._raise("Expected digit at char %d", self.i) return intval, count + def decode_array(self): + start = self.i + w_list = self.space.newlist([]) + self.skip_whitespace() + while not self.eof(): + ch = self.peek() + if ch == ']': + self.next() + return w_list + w_item = self.decode_any() + self.space.call_method(w_list, 'append', w_item) + self.skip_whitespace() + if self.eof(): + break + ch = self.next() + if ch == ']': + return w_list + elif ch == ',': + pass + else: + self._raise("Unexpected '%s' when decoding array (char %d)", + ch, self.i) + self._raise("Unterminated array starting at char %d", start) + def decode_object(self): start = self.i @@ -150,7 +177,7 @@ if self.last_type != TYPE_STRING: self._raise("Key name must be string for object starting at char %d", start) self.skip_whitespace() - ch = self.next() + ch = self.next() # XXX if ch != ':': self._raise("No ':' found at char %d", self.i) self.skip_whitespace() diff --git a/pypy/module/_fastjson/test/test__fastjson.py b/pypy/module/_fastjson/test/test__fastjson.py --- a/pypy/module/_fastjson/test/test__fastjson.py +++ b/pypy/module/_fastjson/test/test__fastjson.py @@ -109,3 +109,11 @@ import _fastjson raises(ValueError, "_fastjson.loads('{42: 43}')") + def test_decode_array(self): + import _fastjson + assert _fastjson.loads('[]') == [] + assert _fastjson.loads('[ ]') == [] + assert _fastjson.loads('[1]') == [1] + assert _fastjson.loads('[1, 2]') == [1, 2] + raises(ValueError, "_fastjson.loads('[1: 2]')") + raises(ValueError, "_fastjson.loads('[1, 2')") From noreply at buildbot.pypy.org Wed Jun 5 16:52:44 2013 From: noreply at buildbot.pypy.org (antocuni) Date: Wed, 5 Jun 2013 16:52:44 +0200 (CEST) Subject: [pypy-commit] pypy fastjson: add two tests and the corresponding fixes Message-ID: <20130605145244.BCE711C1401@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: fastjson Changeset: r64800:ca39af17b48f Date: 2013-06-05 15:56 +0200 http://bitbucket.org/pypy/pypy/changeset/ca39af17b48f/ Log: add two tests and the corresponding fixes diff --git a/pypy/module/_fastjson/interp_decoder.py b/pypy/module/_fastjson/interp_decoder.py --- a/pypy/module/_fastjson/interp_decoder.py +++ b/pypy/module/_fastjson/interp_decoder.py @@ -177,7 +177,9 @@ if self.last_type != TYPE_STRING: self._raise("Key name must be string for object starting at char %d", start) self.skip_whitespace() - ch = self.next() # XXX + if self.eof(): + break + ch = self.next() if ch != ':': self._raise("No ':' found at char %d", self.i) self.skip_whitespace() @@ -185,6 +187,8 @@ w_value = self.decode_any() self.space.setitem(w_dict, w_name, w_value) self.skip_whitespace() + if self.eof(): + break ch = self.next() if ch == '}': return w_dict diff --git a/pypy/module/_fastjson/test/test__fastjson.py b/pypy/module/_fastjson/test/test__fastjson.py --- a/pypy/module/_fastjson/test/test__fastjson.py +++ b/pypy/module/_fastjson/test/test__fastjson.py @@ -104,6 +104,8 @@ s = '{"hello": "world", "aaa": "bbb"}' assert _fastjson.loads(s) == {'hello': 'world', 'aaa': 'bbb'} + raises(ValueError, _fastjson.loads, '{"key"') + raises(ValueError, _fastjson.loads, '{"key": 42') def test_decode_object_nonstring_key(self): import _fastjson From noreply at buildbot.pypy.org Wed Jun 5 16:52:46 2013 From: noreply at buildbot.pypy.org (antocuni) Date: Wed, 5 Jun 2013 16:52:46 +0200 (CEST) Subject: [pypy-commit] pypy fastjson: raise the appropriate applevel exception if we get an invalid utf8 Message-ID: <20130605145246.22CD81C1401@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: fastjson Changeset: r64801:86bc837fcae3 Date: 2013-06-05 16:18 +0200 http://bitbucket.org/pypy/pypy/changeset/86bc837fcae3/ Log: raise the appropriate applevel exception if we get an invalid utf8 diff --git a/pypy/module/_fastjson/interp_decoder.py b/pypy/module/_fastjson/interp_decoder.py --- a/pypy/module/_fastjson/interp_decoder.py +++ b/pypy/module/_fastjson/interp_decoder.py @@ -213,7 +213,7 @@ content_utf8 = self.getslice(start, self.i-1) if bits & 0x80: # the 8th bit is set, it's an utf8 strnig - content_unicode = content_utf8.decode('utf-8') + content_unicode = unicodehelper.decode_utf8(self.space, content_utf8) else: # ascii only, faster to decode content_unicode = content_utf8.decode('ascii') diff --git a/pypy/module/_fastjson/test/test__fastjson.py b/pypy/module/_fastjson/test/test__fastjson.py --- a/pypy/module/_fastjson/test/test__fastjson.py +++ b/pypy/module/_fastjson/test/test__fastjson.py @@ -69,6 +69,12 @@ s = r'"\u1234"' assert _fastjson.loads(s) == u'\u1234' + def test_invalid_utf_8(self): + import _fastjson + s = '"\xe0"' # this is an invalid UTF8 sequence inside a string + raises(UnicodeDecodeError, "_fastjson.loads(s)") + + def test_decode_numeric(self): import _fastjson def check(s, val): From noreply at buildbot.pypy.org Wed Jun 5 16:52:47 2013 From: noreply at buildbot.pypy.org (antocuni) Date: Wed, 5 Jun 2013 16:52:47 +0200 (CEST) Subject: [pypy-commit] pypy fastjson: parsing of null, true and false Message-ID: <20130605145247.A045A1C1401@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: fastjson Changeset: r64802:5e34f815e7f7 Date: 2013-06-05 16:51 +0200 http://bitbucket.org/pypy/pypy/changeset/5e34f815e7f7/ Log: parsing of null, true and false diff --git a/pypy/module/_fastjson/interp_decoder.py b/pypy/module/_fastjson/interp_decoder.py --- a/pypy/module/_fastjson/interp_decoder.py +++ b/pypy/module/_fastjson/interp_decoder.py @@ -78,10 +78,47 @@ elif ch == '{': self.next() return self.decode_object() + elif ch == 'n': + self.next() + return self.decode_null() + elif ch == 't': + self.next() + return self.decode_true() + elif ch == 'f': + self.next() + return self.decode_false() else: self._raise("No JSON object could be decoded: unexpected '%s' at char %d", ch, self.i) + def decode_null(self): + N = len('ull') + if (self.i+N <= len(self.s) and + self.next() == 'u' and + self.next() == 'l' and + self.next() == 'l'): + return self.space.w_None + self._raise("Error when decoding null at char %d", self.i) + + def decode_true(self): + N = len('rue') + if (self.i+N <= len(self.s) and + self.next() == 'r' and + self.next() == 'u' and + self.next() == 'e'): + return self.space.w_True + self._raise("Error when decoding true at char %d", self.i) + + def decode_false(self): + N = len('alse') + if (self.i+N <= len(self.s) and + self.next() == 'a' and + self.next() == 'l' and + self.next() == 's' and + self.next() == 'e'): + return self.space.w_False + self._raise("Error when decoding false at char %d", self.i) + def decode_numeric(self): intval = self.parse_integer() # diff --git a/pypy/module/_fastjson/test/test__fastjson.py b/pypy/module/_fastjson/test/test__fastjson.py --- a/pypy/module/_fastjson/test/test__fastjson.py +++ b/pypy/module/_fastjson/test/test__fastjson.py @@ -19,6 +19,28 @@ class AppTest(object): spaceconfig = {"objspace.usemodules._fastjson": True} + def test_decode_constants(self): + import _fastjson + assert _fastjson.loads('null') is None + raises(ValueError, _fastjson.loads, 'nul') + raises(ValueError, _fastjson.loads, 'nu') + raises(ValueError, _fastjson.loads, 'n') + raises(ValueError, _fastjson.loads, 'nuXX') + # + assert _fastjson.loads('true') is True + raises(ValueError, _fastjson.loads, 'tru') + raises(ValueError, _fastjson.loads, 'tr') + raises(ValueError, _fastjson.loads, 't') + raises(ValueError, _fastjson.loads, 'trXX') + # + assert _fastjson.loads('false') is False + raises(ValueError, _fastjson.loads, 'fals') + raises(ValueError, _fastjson.loads, 'fal') + raises(ValueError, _fastjson.loads, 'fa') + raises(ValueError, _fastjson.loads, 'f') + raises(ValueError, _fastjson.loads, 'falXX') + + def test_decode_string(self): import _fastjson res = _fastjson.loads('"hello"') From noreply at buildbot.pypy.org Wed Jun 5 16:59:17 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 5 Jun 2013 16:59:17 +0200 (CEST) Subject: [pypy-commit] stmgc default: Next test Message-ID: <20130605145917.D71111C1401@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r67:baa39610ff9b Date: 2013-06-05 16:47 +0200 http://bitbucket.org/pypy/stmgc/changeset/baa39610ff9b/ Log: Next test diff --git a/c3/doc-objects.txt b/c3/doc-objects.txt --- a/c3/doc-objects.txt +++ b/c3/doc-objects.txt @@ -50,7 +50,7 @@ - original obj after GC killed the backup GT Public objects: -- prebuilt object, never modified -1 +- prebuilt object, never modified 1 - other public object, never modified GT - outdated, has a protected copy HANDLE to prot/priv copy - outdated, target stolen ptr to a more recent public copy diff --git a/c3/test/support.py b/c3/test/support.py --- a/c3/test/support.py +++ b/c3/test/support.py @@ -436,6 +436,7 @@ def palloc(size): "Get a ``prebuilt'' object." p = lib.pseudoprebuilt(size, 42 + size) + assert p.h_revision == 1 return p def palloc_refs(nrefs): diff --git a/c3/test/test_et.py b/c3/test/test_et.py --- a/c3/test/test_et.py +++ b/c3/test/test_et.py @@ -105,3 +105,12 @@ assert pback == lib.stm_get_backup_copy(p) assert lib.rawgetlong(p, 0) == 43891 assert lib.rawgetlong(pback, 0) == 927122 + +def test_prebuilt_is_public(): + p = palloc(HDR) + assert p.h_revision == 1 + assert p.h_tid == lib.gettid(p) | (GCFLAG_OLD | + GCFLAG_VISITED | + GCFLAG_PUBLIC | + GCFLAG_PREBUILT_ORIGINAL) + assert classify(p) == "public" From noreply at buildbot.pypy.org Wed Jun 5 16:59:19 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 5 Jun 2013 16:59:19 +0200 (CEST) Subject: [pypy-commit] stmgc default: public->private Message-ID: <20130605145919.0E3F71C1401@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68:2f624ecb97a7 Date: 2013-06-05 16:55 +0200 http://bitbucket.org/pypy/stmgc/changeset/2f624ecb97a7/ Log: public->private diff --git a/c3/et.c b/c3/et.c --- a/c3/et.c +++ b/c3/et.c @@ -317,29 +317,32 @@ static gcptr LocalizePublic(struct tx_descriptor *d, gcptr R) { - abort(); -#if 0 if (R->h_tid & GCFLAG_PUBLIC_TO_PRIVATE) { wlog_t *entry; gcptr L; G2L_FIND(d->public_to_private, R, entry, goto not_found); L = entry->val; - assert(L->h_revision == stm_local_revision); + assert(L->h_revision == stm_private_rev_num); /* private object */ return L; } - else - R->h_tid |= GCFLAG_PUBLIC_TO_PRIVATE; + R->h_tid |= GCFLAG_PUBLIC_TO_PRIVATE; not_found:; gcptr L = stmgc_duplicate(R, 0); - assert(L->h_revision == stm_local_revision); + assert(!(L->h_tid & GCFLAG_BACKUP_COPY)); + assert(!(L->h_tid & GCFLAG_STOLEN)); + assert(!(L->h_tid & GCFLAG_STUB)); + L->h_tid &= ~(GCFLAG_OLD | + GCFLAG_VISITED | + GCFLAG_PUBLIC | + GCFLAG_PREBUILT_ORIGINAL | + GCFLAG_PUBLIC_TO_PRIVATE | + GCFLAG_WRITE_BARRIER | + 0); + L->h_revision = stm_private_rev_num; g2l_insert(&d->public_to_private, R, L); - gcptrlist_insert(&d->public_to_young, R); - AddInReadSet(d, R); - /*mark*/ return L; -#endif } gcptr stm_WriteBarrier(gcptr P) @@ -348,7 +351,12 @@ struct tx_descriptor *d = thread_descriptor; assert(d->active >= 1); - W = LocalizeProtected(d, P); + /*P = stm_DirectReadBarrier(P);*/ + + if (P->h_tid & GCFLAG_PUBLIC) + W = LocalizePublic(d, P); + else + W = LocalizeProtected(d, P); fprintf(stderr, "write_barrier: %p -> %p\n", P, W); diff --git a/c3/test/test_et.py b/c3/test/test_et.py --- a/c3/test/test_et.py +++ b/c3/test/test_et.py @@ -114,3 +114,15 @@ GCFLAG_PUBLIC | GCFLAG_PREBUILT_ORIGINAL) assert classify(p) == "public" + +def test_change_prebuilt_object(): + p = palloc(HDR + WORD) + lib.rawsetlong(p, 0, 28971289) + flags = p.h_tid + assert (flags & GCFLAG_PUBLIC_TO_PRIVATE) == 0 + assert classify(p) == "public" + p2 = lib.stm_write_barrier(p) + assert p2 != p + assert classify(p) == "public" + assert classify(p2) == "private" + assert p.h_tid == flags | GCFLAG_PUBLIC_TO_PRIVATE From noreply at buildbot.pypy.org Wed Jun 5 16:59:20 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 5 Jun 2013 16:59:20 +0200 (CEST) Subject: [pypy-commit] stmgc default: This test passes, but probably only because it's simple enough. Message-ID: <20130605145920.1D1901C1401@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69:a31993e9805a Date: 2013-06-05 16:59 +0200 http://bitbucket.org/pypy/stmgc/changeset/a31993e9805a/ Log: This test passes, but probably only because it's simple enough. diff --git a/c3/test/test_et.py b/c3/test/test_et.py --- a/c3/test/test_et.py +++ b/c3/test/test_et.py @@ -115,9 +115,8 @@ GCFLAG_PREBUILT_ORIGINAL) assert classify(p) == "public" -def test_change_prebuilt_object(): - p = palloc(HDR + WORD) - lib.rawsetlong(p, 0, 28971289) +def test_prebuilt_object_to_private(): + p = palloc(HDR) flags = p.h_tid assert (flags & GCFLAG_PUBLIC_TO_PRIVATE) == 0 assert classify(p) == "public" @@ -126,3 +125,17 @@ assert classify(p) == "public" assert classify(p2) == "private" assert p.h_tid == flags | GCFLAG_PUBLIC_TO_PRIVATE + +def test_commit_change_to_prebuilt_object(): + p = palloc(HDR + WORD) + lib.rawsetlong(p, 0, 28971289) + p2 = lib.stm_write_barrier(p) + assert p2 != p + assert classify(p) == "public" + assert classify(p2) == "private" + lib.rawsetlong(p, 0, 1289222) + lib.stm_commit_transaction() + lib.stm_begin_inevitable_transaction() + assert classify(p) == "public" + assert classify(p2) == "protected" + assert p.h_revision == int(ffi.cast("revision_t", p2)) + 2 From noreply at buildbot.pypy.org Wed Jun 5 17:44:35 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 5 Jun 2013 17:44:35 +0200 (CEST) Subject: [pypy-commit] stmgc default: Fixes Message-ID: <20130605154435.42EDF1C1007@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r70:5c86454b6e59 Date: 2013-06-05 17:44 +0200 http://bitbucket.org/pypy/stmgc/changeset/5c86454b6e59/ Log: Fixes diff --git a/c3/et.c b/c3/et.c --- a/c3/et.c +++ b/c3/et.c @@ -716,15 +716,6 @@ #endif L->h_revision = new_revision; - if (is_young(L)) - { - item->val = (gcptr)(((revision_t)L) | 2); -#ifdef DUMP_EXTRA - fprintf(stderr, "PUBLIC-TO-PROTECTED:\n"); - /*mark*/ -#endif - } - } G2L_LOOP_END; smp_wmb(); /* a memory barrier: make sure the new L->h_revisions are visible @@ -742,11 +733,12 @@ assert(R->h_revision != localrev); #ifdef DUMP_EXTRA - fprintf(stderr, "%p->h_revision = %p (UpdateChainHeads2)\n", + fprintf(stderr, "%p->h_revision = %p | 2 (UpdateChainHeads2)\n", R, (gcptr)v); /*mark*/ #endif - ACCESS_ONCE(R->h_revision) = v; + assert(!(v & 3)); + ACCESS_ONCE(R->h_revision) = v | 2; if (R->h_tid & GCFLAG_PREBUILT_ORIGINAL) { diff --git a/c3/test/test_et.py b/c3/test/test_et.py --- a/c3/test/test_et.py +++ b/c3/test/test_et.py @@ -133,9 +133,11 @@ assert p2 != p assert classify(p) == "public" assert classify(p2) == "private" - lib.rawsetlong(p, 0, 1289222) + lib.rawsetlong(p2, 0, 1289222) lib.stm_commit_transaction() lib.stm_begin_inevitable_transaction() assert classify(p) == "public" assert classify(p2) == "protected" assert p.h_revision == int(ffi.cast("revision_t", p2)) + 2 + assert lib.rawgetlong(p, 0) == 28971289 + assert lib.rawgetlong(p2, 0) == 1289222 From noreply at buildbot.pypy.org Wed Jun 5 18:23:06 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 5 Jun 2013 18:23:06 +0200 (CEST) Subject: [pypy-commit] stmgc default: Starting on the read barrier. Message-ID: <20130605162306.F0E131C11B7@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r71:27f0605ccd79 Date: 2013-06-05 18:08 +0200 http://bitbucket.org/pypy/stmgc/changeset/27f0605ccd79/ Log: Starting on the read barrier. diff --git a/c3/et.c b/c3/et.c --- a/c3/et.c +++ b/c3/et.c @@ -137,10 +137,9 @@ #endif } +#if 0 static inline gcptr AddInReadSet(struct tx_descriptor *d, gcptr R) { - abort(); -#if 0 fprintf(stderr, "AddInReadSet(%p)\n", R); d->count_reads++; if (!fxcache_add(&d->recent_reads_cache, R)) { @@ -155,37 +154,22 @@ // return Localize(d, R); // } return R; +} #endif -} -gcptr stm_DirectReadBarrier(gcptr G) +gcptr stm_DirectReadBarrier(gcptr P) { - abort(); -#if 0 - gcptr R; struct tx_descriptor *d = thread_descriptor; - assert(d->active >= 1); - /* XXX optimize me based on common patterns */ - R = HeadOfRevisionChainList(d, G); + if (P->h_tid & GCFLAG_PUBLIC) + { + abort(); + /*...*/ + } - if (R->h_tid & GCFLAG_PUBLIC_TO_PRIVATE) - { - if (gcptrlist_size(&d->stolen_objects) > 0) - stmgc_normalize_stolen_objects(); - - wlog_t *entry; - gcptr L; - G2L_FIND(d->public_to_private, R, entry, goto not_found); - L = entry->val; - assert(L->h_revision == stm_local_revision); - return L; - - not_found:; - } - R = AddInReadSet(d, R); - return R; -#endif + gcptrlist_insert(&d->list_of_read_objects, P); + fxcache_add(&d->recent_reads_cache, P); + return P; } static gcptr _latest_gcptr(gcptr R) @@ -371,6 +355,14 @@ return entry->val; } +gcptr stm_get_read_obj(long index) +{ + struct tx_descriptor *d = thread_descriptor; + if (index < gcptrlist_size(&d->list_of_read_objects)) + return d->list_of_read_objects.items[index]; + return NULL; +} + /************************************************************/ static revision_t GetGlobalCurTime(struct tx_descriptor *d) diff --git a/c3/et.h b/c3/et.h --- a/c3/et.h +++ b/c3/et.h @@ -177,6 +177,7 @@ gcptr stm_WriteBarrier(gcptr); gcptr _stm_nonrecord_barrier(gcptr, int *); gcptr stm_get_backup_copy(gcptr); +gcptr stm_get_read_obj(long); /* debugging */ int DescriptorInit(void); void DescriptorDone(void); diff --git a/c3/lists.c b/c3/lists.c --- a/c3/lists.c +++ b/c3/lists.c @@ -223,6 +223,8 @@ /************************************************************/ +__thread char *stm_read_barrier_cache; + void _fxcache_reset(struct FXCache *fxcache) { fxcache->shift = 0; diff --git a/c3/lists.h b/c3/lists.h --- a/c3/lists.h +++ b/c3/lists.h @@ -178,15 +178,17 @@ more. */ -#define FX_ENTRIES 8192 -#define FX_TOTAL (FX_ENTRIES * 2) +#define FX_MASK 65535 +#define FX_ENTRIES ((FX_MASK + 1) / sizeof(char *)) +#define FX_TOTAL (FX_ENTRIES * 4 / 3) struct FXCache { - char *cache_start; revision_t shift; revision_t cache[FX_TOTAL]; }; +extern __thread char *stm_read_barrier_cache; + void _fxcache_reset(struct FXCache *fxcache); static inline void fxcache_clear(struct FXCache *fxcache) @@ -194,7 +196,16 @@ fxcache->shift++; if (fxcache->shift > FX_TOTAL - FX_ENTRIES) _fxcache_reset(fxcache); - fxcache->cache_start = (char *)(fxcache->cache + fxcache->shift); + stm_read_barrier_cache = (char *)(fxcache->cache + fxcache->shift); +} + +#define FXCACHE_AT(obj) \ + (*(gcptr *)(stm_read_barrier_cache + ((revision_t)(obj) & FX_MASK))) + +static inline void fxcache_add(struct FXCache *fxcache, gcptr newobj) +{ + assert(stm_read_barrier_cache == (char*)(fxcache->cache + fxcache->shift)); + FXCACHE_AT(newobj) = newobj; } /************************************************************/ diff --git a/c3/stmsync.c b/c3/stmsync.c --- a/c3/stmsync.c +++ b/c3/stmsync.c @@ -77,12 +77,10 @@ gcptr stm_read_barrier(gcptr obj) { - /* XXX inline in the caller */ - abort(); -#if 0 - if (UNLIKELY(obj->h_revision != stm_local_revision)) + /* XXX inline in the caller, optimize to get the smallest code */ + if (UNLIKELY((obj->h_revision != stm_private_rev_num) && + (FXCACHE_AT(obj) != obj))) obj = stm_DirectReadBarrier(obj); -#endif return obj; } diff --git a/c3/test/support.py b/c3/test/support.py --- a/c3/test/support.py +++ b/c3/test/support.py @@ -69,6 +69,7 @@ void stm_stop_sharedlock(void); void AbortTransaction(int); gcptr stm_get_backup_copy(gcptr); + gcptr stm_get_read_obj(long index); gcptr getptr(gcptr, long); void setptr(gcptr, long, gcptr); @@ -528,3 +529,14 @@ return "backup" else: return "protected" + +def list_of_read_objects(): + result = [] + index = 0 + while 1: + p = lib.stm_get_read_obj(index) + if p == ffi.NULL: + break + result.append(p) + index += 1 + return result diff --git a/c3/test/test_et.py b/c3/test/test_et.py --- a/c3/test/test_et.py +++ b/c3/test/test_et.py @@ -141,3 +141,18 @@ assert p.h_revision == int(ffi.cast("revision_t", p2)) + 2 assert lib.rawgetlong(p, 0) == 28971289 assert lib.rawgetlong(p2, 0) == 1289222 + +def test_read_barrier_private(): + p = nalloc(HDR) + assert lib.stm_read_barrier(p) == p # no effect + assert p.h_tid == gettid(p) + assert p.h_revision == lib.get_private_rev_num() + assert list_of_read_objects() == [] + +def test_read_barrier_protected(): + p = nalloc(HDR) + lib.stm_commit_transaction() + lib.stm_begin_inevitable_transaction() + assert list_of_read_objects() == [] + assert lib.stm_read_barrier(p) == p # record as a read object + assert list_of_read_objects() == [p] From noreply at buildbot.pypy.org Wed Jun 5 18:23:08 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 5 Jun 2013 18:23:08 +0200 (CEST) Subject: [pypy-commit] stmgc default: Read barrier on public objects Message-ID: <20130605162308.3345F1C11B7@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r72:b9dfe1e2b66e Date: 2013-06-05 18:22 +0200 http://bitbucket.org/pypy/stmgc/changeset/b9dfe1e2b66e/ Log: Read barrier on public objects diff --git a/c3/et.c b/c3/et.c --- a/c3/et.c +++ b/c3/et.c @@ -65,78 +65,6 @@ /************************************************************/ -static gcptr HeadOfRevisionChainList(struct tx_descriptor *d, gcptr G) -{ - abort(); -#if 0 - gcptr R = G; - revision_t v; - - retry: - v = ACCESS_ONCE(R->h_revision); - if (!(v & 1)) // "is a pointer", i.e. - { // "has a more recent revision" - if (v & 2) - { - old_to_young: - v &= ~2; - if (UNLIKELY(!stmgc_is_young_in(d, (gcptr)v))) - { - stmgc_public_to_foreign_protected(R); - goto retry; - } - R = (gcptr)v; - goto retry; - } - - gcptr R_prev = R; - R = (gcptr)v; - - retry_threelevels: - v = ACCESS_ONCE(R->h_revision); - if (!(v & 1)) // "is a pointer", i.e. - { // "has a more recent revision" - if (v & 2) - goto old_to_young; - - /* we update R_prev->h_revision as a shortcut */ - /* XXX check if this really gives a worse performance than only - doing this write occasionally based on a counter in d */ - gcptr R_next = (gcptr)v; - if (R_next->h_revision == stm_local_revision) - { - /* must not update an older h_revision to go directly to - the private copy at the end of a chain of protected - objects! */ - return R_next; - } - if (R_prev->h_tid & GCFLAG_STOLEN) - { - /* must not update the h_revision of a stolen object! */ - R_prev = R; - R = R_next; - goto retry_threelevels; - } - R_prev->h_revision = v; - R = R_next; - goto retry; - } - } - - if (UNLIKELY(v > d->start_time)) // object too recent? - { - if (v >= LOCKED) - { - SpinLoop(SPLP_LOCKED_INFLIGHT); - goto retry; // spinloop until it is no longer LOCKED - } - ValidateNow(d); // try to move start_time forward - goto retry; // restart searching from R - } - return R; -#endif -} - #if 0 static inline gcptr AddInReadSet(struct tx_descriptor *d, gcptr R) { @@ -163,12 +91,57 @@ if (P->h_tid & GCFLAG_PUBLIC) { - abort(); - /*...*/ + /* follow the chained list of h_revision's as long as they are + regular pointers */ + revision_t v; + + retry: + v = ACCESS_ONCE(P->h_revision); + if (!(v & 1)) // "is a pointer", i.e. + { // "has a more recent revision" + if (v & 2) + { + old_to_young: + abort(); + } + assert(P->h_tid & GCFLAG_PUBLIC); + + gcptr P_prev = P; + P = (gcptr)v; + + /* if we land on a P in read_barrier_cache: just return it */ + if (FXCACHE_AT(P) == P) + return P; + + v = ACCESS_ONCE(P->h_revision); + if (!(v & 1)) // "is a pointer", i.e. + { // "has a more recent revision" + if (v & 2) + goto old_to_young; + assert(P->h_tid & GCFLAG_PUBLIC); + + /* we update P_prev->h_revision as a shortcut */ + /* XXX check if this really gives a worse performance than only + doing this write occasionally based on a counter in d */ + P_prev->h_revision = v; + P = (gcptr)v; + goto retry; + } + } + + if (UNLIKELY(v > d->start_time)) // object too recent? + { + if (v >= LOCKED) + { + SpinLoop(SPLP_LOCKED_INFLIGHT); + goto retry; // spinloop until it is no longer LOCKED + } + ValidateNow(d); // try to move start_time forward + goto retry; // restart searching from P + } } - + fxcache_add(&d->recent_reads_cache, P); gcptrlist_insert(&d->list_of_read_objects, P); - fxcache_add(&d->recent_reads_cache, P); return P; } diff --git a/c3/test/test_et.py b/c3/test/test_et.py --- a/c3/test/test_et.py +++ b/c3/test/test_et.py @@ -156,3 +156,25 @@ assert list_of_read_objects() == [] assert lib.stm_read_barrier(p) == p # record as a read object assert list_of_read_objects() == [p] + +def test_read_barrier_public(): + p = palloc(HDR) + assert lib.stm_read_barrier(p) == p + assert list_of_read_objects() == [p] + +def test_read_barrier_public_outdated(): + p1 = palloc(HDR) + p2 = palloc(HDR) + p1.h_revision = ffi.cast("revision_t", p2) + assert lib.stm_read_barrier(p1) == p2 + assert list_of_read_objects() == [p2] + +def test_read_barrier_public_shortcut(): + p1 = palloc(HDR) + p2 = palloc(HDR) + p3 = palloc(HDR) + p1.h_revision = ffi.cast("revision_t", p2) + p2.h_revision = ffi.cast("revision_t", p3) + assert lib.stm_read_barrier(p1) == p3 + assert list_of_read_objects() == [p3] + assert p1.h_revision == int(ffi.cast("revision_t", p3)) # shortcutted From noreply at buildbot.pypy.org Wed Jun 5 18:30:07 2013 From: noreply at buildbot.pypy.org (rguillebert) Date: Wed, 5 Jun 2013 18:30:07 +0200 (CEST) Subject: [pypy-commit] pypy numpypy-nditer: Make the test less specific Message-ID: <20130605163007.564A61C11B7@cobra.cs.uni-duesseldorf.de> Author: Romain Guillebert Branch: numpypy-nditer Changeset: r64803:1be2efa62157 Date: 2013-06-05 18:29 +0200 http://bitbucket.org/pypy/pypy/changeset/1be2efa62157/ Log: Make the test less specific diff --git a/pypy/module/micronumpy/test/test_nditer.py b/pypy/module/micronumpy/test/test_nditer.py --- a/pypy/module/micronumpy/test/test_nditer.py +++ b/pypy/module/micronumpy/test/test_nditer.py @@ -44,21 +44,21 @@ def test_external_loop(self): from numpypy import arange, nditer, array - a = arange(6).reshape(2,3) + a = arange(12).reshape(2,3,2) r = [] n = 0 for x in nditer(a, flags=['external_loop']): r.append(x) n += 1 assert n == 1 - assert (array(r) == [0, 1, 2, 3, 4, 5]).all() + assert (array(r) == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]).all() r = [] n = 0 for x in nditer(a, flags=['external_loop'], order='F'): r.append(x) n += 1 assert n == 3 - assert (array(r) == [[0, 3], [1, 4], [2, 5]]).all() + assert (array(r) == [[0, 6], [2, 8], [4, 10], [1, 7], [3, 9], [5, 11]]).all() def test_interface(self): from numpypy import arange, nditer, zeros From noreply at buildbot.pypy.org Wed Jun 5 18:49:08 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 5 Jun 2013 18:49:08 +0200 (CEST) Subject: [pypy-commit] stmgc default: Next test. Message-ID: <20130605164908.E70C41C01CD@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r73:e3da2bafef96 Date: 2013-06-05 18:44 +0200 http://bitbucket.org/pypy/stmgc/changeset/e3da2bafef96/ Log: Next test. diff --git a/c3/et.c b/c3/et.c --- a/c3/et.c +++ b/c3/et.c @@ -129,6 +129,18 @@ } } + if (P->h_tid & GCFLAG_PUBLIC_TO_PRIVATE) + { + wlog_t *item; + G2L_FIND(d->public_to_private, P, item, goto no_private_obj); + + P = item->val; + assert(!(P->h_tid & GCFLAG_PUBLIC)); + assert(P->h_revision == stm_private_rev_num); + return P; + } + no_private_obj: + if (UNLIKELY(v > d->start_time)) // object too recent? { if (v >= LOCKED) @@ -299,6 +311,13 @@ 0); L->h_revision = stm_private_rev_num; g2l_insert(&d->public_to_private, R, L); + fprintf(stderr, "write_barrier: adding %p -> %p to public_to_private\n", + R, L); + + /* must remove R from the read_barrier_cache, because returning R is no + longer a valid result */ + fxcache_remove(&d->recent_reads_cache, R); + return L; } @@ -308,7 +327,7 @@ struct tx_descriptor *d = thread_descriptor; assert(d->active >= 1); - /*P = stm_DirectReadBarrier(P);*/ + P = stm_read_barrier(P); if (P->h_tid & GCFLAG_PUBLIC) W = LocalizePublic(d, P); diff --git a/c3/lists.h b/c3/lists.h --- a/c3/lists.h +++ b/c3/lists.h @@ -208,6 +208,12 @@ FXCACHE_AT(newobj) = newobj; } +static inline void fxcache_remove(struct FXCache *fxcache, gcptr oldobj) +{ + assert(stm_read_barrier_cache == (char*)(fxcache->cache + fxcache->shift)); + FXCACHE_AT(oldobj) = NULL; +} + /************************************************************/ #endif diff --git a/c3/test/test_et.py b/c3/test/test_et.py --- a/c3/test/test_et.py +++ b/c3/test/test_et.py @@ -178,3 +178,16 @@ assert lib.stm_read_barrier(p1) == p3 assert list_of_read_objects() == [p3] assert p1.h_revision == int(ffi.cast("revision_t", p3)) # shortcutted + +def test_read_barrier_public_to_private(): + p = palloc(HDR) + p2 = lib.stm_write_barrier(p) + assert p2 != p + assert classify(p) == "public" + assert classify(p2) == "private" + assert list_of_read_objects() == [p] + assert p.h_tid & GCFLAG_PUBLIC + assert p.h_tid & GCFLAG_PUBLIC_TO_PRIVATE + p3 = lib.stm_read_barrier(p) + assert p3 == p2 + assert list_of_read_objects() == [p] From noreply at buildbot.pypy.org Wed Jun 5 18:49:10 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 5 Jun 2013 18:49:10 +0200 (CEST) Subject: [pypy-commit] stmgc default: Add some more debugging prints Message-ID: <20130605164910.104101C01CD@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r74:3a3dfae01632 Date: 2013-06-05 18:48 +0200 http://bitbucket.org/pypy/stmgc/changeset/3a3dfae01632/ Log: Add some more debugging prints diff --git a/c3/et.c b/c3/et.c --- a/c3/et.c +++ b/c3/et.c @@ -85,9 +85,10 @@ } #endif -gcptr stm_DirectReadBarrier(gcptr P) +gcptr stm_DirectReadBarrier(gcptr G) { struct tx_descriptor *d = thread_descriptor; + gcptr P = G; if (P->h_tid & GCFLAG_PUBLIC) { @@ -111,7 +112,10 @@ /* if we land on a P in read_barrier_cache: just return it */ if (FXCACHE_AT(P) == P) - return P; + { + fprintf(stderr, "read_barrier: %p -> %p fxcache\n", G, P); + return P; + } v = ACCESS_ONCE(P->h_revision); if (!(v & 1)) // "is a pointer", i.e. @@ -137,6 +141,7 @@ P = item->val; assert(!(P->h_tid & GCFLAG_PUBLIC)); assert(P->h_revision == stm_private_rev_num); + fprintf(stderr, "read_barrier: %p -> %p public_to_private\n", G, P); return P; } no_private_obj: @@ -151,6 +156,11 @@ ValidateNow(d); // try to move start_time forward goto retry; // restart searching from P } + fprintf(stderr, "read_barrier: %p -> %p public\n", G, P); + } + else + { + fprintf(stderr, "read_barrier: %p -> %p protected\n", G, P); } fxcache_add(&d->recent_reads_cache, P); gcptrlist_insert(&d->list_of_read_objects, P); From noreply at buildbot.pypy.org Wed Jun 5 20:48:16 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 5 Jun 2013 20:48:16 +0200 (CEST) Subject: [pypy-commit] stmgc default: Introduce "handles" as written down in doc-objects.txt Message-ID: <20130605184816.8D54A1C01CD@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r75:d8c52869d637 Date: 2013-06-05 20:48 +0200 http://bitbucket.org/pypy/stmgc/changeset/d8c52869d637/ Log: Introduce "handles" as written down in doc-objects.txt diff --git a/c3/et.c b/c3/et.c --- a/c3/et.c +++ b/c3/et.c @@ -296,6 +296,7 @@ static gcptr LocalizePublic(struct tx_descriptor *d, gcptr R) { + assert(R->h_tid & GCFLAG_PUBLIC); if (R->h_tid & GCFLAG_PUBLIC_TO_PRIVATE) { wlog_t *entry; @@ -726,13 +727,22 @@ assert(!is_young(R)); assert(R->h_revision != localrev); + /* XXX compactify and don't leak! */ + revision_t *handle_block = stm_malloc(3 * WORD); + handle_block = (revision_t *) + ((((intptr_t)handle_block) + HANDLE_BLOCK_SIZE-1) + & ~(HANDLE_BLOCK_SIZE-1)); + handle_block[0] = d->my_lock; + handle_block[1] = v; + + revision_t w = ((revision_t)(handle_block + 1)) + 2; + #ifdef DUMP_EXTRA - fprintf(stderr, "%p->h_revision = %p | 2 (UpdateChainHeads2)\n", - R, (gcptr)v); + fprintf(stderr, "%p->h_revision = %p (UpdateChainHeads2)\n", + R, (gcptr)w); /*mark*/ #endif - assert(!(v & 3)); - ACCESS_ONCE(R->h_revision) = v | 2; + ACCESS_ONCE(R->h_revision) = w; if (R->h_tid & GCFLAG_PREBUILT_ORIGINAL) { diff --git a/c3/et.h b/c3/et.h --- a/c3/et.h +++ b/c3/et.h @@ -13,6 +13,8 @@ #define LOCKED ((INTPTR_MAX - 0xffff) | 1) +#define HANDLE_BLOCK_SIZE (2 * WORD) + /* Description of the flags * ------------------------ * diff --git a/c3/test/support.py b/c3/test/support.py --- a/c3/test/support.py +++ b/c3/test/support.py @@ -84,6 +84,7 @@ gcptr pseudoprebuilt(size_t size, int tid); revision_t get_private_rev_num(void); revision_t get_start_time(void); + revision_t get_my_lock(void); gcptr *addr_of_thread_local(void); int in_nursery(gcptr); @@ -92,6 +93,8 @@ /* some constants normally private that are useful in the tests */ #define WORD ... #define GC_PAGE_SIZE ... + #define LOCKED ... + #define HANDLE_BLOCK_SIZE ... #define GCFLAG_OLD ... #define GCFLAG_VISITED ... #define GCFLAG_PUBLIC ... @@ -118,6 +121,7 @@ extern void stmgcpage_add_prebuilt_root(gcptr); extern void stm_clear_between_tests(void); extern revision_t get_private_rev_num(void); + extern local_gcpages_t *stm_local_gcpages(void); int gettid(gcptr obj) { @@ -205,6 +209,11 @@ return thread_descriptor->start_time; } + revision_t get_my_lock(void) + { + return thread_descriptor->my_lock; + } + gcptr *addr_of_thread_local(void) { return &stm_thread_local_obj; @@ -540,3 +549,11 @@ result.append(p) index += 1 return result + +def decode_handle(r): + assert (r & 3) == 2 + p = r & ~(lib.HANDLE_BLOCK_SIZE-1) + my_lock = ffi.cast("revision_t *", p)[0] + assert my_lock >= lib.LOCKED + ptr = ffi.cast("gcptr *", r - 2)[0] + return ptr, my_lock diff --git a/c3/test/test_et.py b/c3/test/test_et.py --- a/c3/test/test_et.py +++ b/c3/test/test_et.py @@ -138,7 +138,7 @@ lib.stm_begin_inevitable_transaction() assert classify(p) == "public" assert classify(p2) == "protected" - assert p.h_revision == int(ffi.cast("revision_t", p2)) + 2 + assert decode_handle(p.h_revision) == (p2, lib.get_my_lock()) assert lib.rawgetlong(p, 0) == 28971289 assert lib.rawgetlong(p2, 0) == 1289222 From noreply at buildbot.pypy.org Wed Jun 5 21:03:22 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 5 Jun 2013 21:03:22 +0200 (CEST) Subject: [pypy-commit] stmgc default: Read barriers through handle objects. Message-ID: <20130605190322.35B451C01CD@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r76:69fa4d0304c6 Date: 2013-06-05 21:03 +0200 http://bitbucket.org/pypy/stmgc/changeset/69fa4d0304c6/ Log: Read barriers through handle objects. diff --git a/c3/et.c b/c3/et.c --- a/c3/et.c +++ b/c3/et.c @@ -89,22 +89,18 @@ { struct tx_descriptor *d = thread_descriptor; gcptr P = G; + revision_t v; if (P->h_tid & GCFLAG_PUBLIC) { /* follow the chained list of h_revision's as long as they are regular pointers */ - revision_t v; - retry: v = ACCESS_ONCE(P->h_revision); if (!(v & 1)) // "is a pointer", i.e. { // "has a more recent revision" if (v & 2) - { - old_to_young: - abort(); - } + goto old_to_young; assert(P->h_tid & GCFLAG_PUBLIC); gcptr P_prev = P; @@ -162,9 +158,42 @@ { fprintf(stderr, "read_barrier: %p -> %p protected\n", G, P); } + + register_in_list_of_read_objects: fxcache_add(&d->recent_reads_cache, P); gcptrlist_insert(&d->list_of_read_objects, P); return P; + + old_to_young:; + revision_t target_lock; + target_lock = *(revision_t *)(v & ~(HANDLE_BLOCK_SIZE-1)); + if (target_lock == d->my_lock) + { + P = (gcptr)(*(revision_t *)(v - 2)); + assert(!(P->h_tid & GCFLAG_PUBLIC)); + if (P->h_revision == stm_private_rev_num) + { + fprintf(stderr, "read_barrier: %p -> %p handle " + "private\n", G, P); + return P; + } + else if (FXCACHE_AT(P) == P) + { + fprintf(stderr, "read_barrier: %p -> %p handle " + "protected fxcache\n", G, P); + return P; + } + else + { + fprintf(stderr, "read_barrier: %p -> %p handle " + "protected\n", G, P); + goto register_in_list_of_read_objects; + } + } + else + { + abort(); // stealing + } } static gcptr _latest_gcptr(gcptr R) diff --git a/c3/test/test_et.py b/c3/test/test_et.py --- a/c3/test/test_et.py +++ b/c3/test/test_et.py @@ -191,3 +191,35 @@ p3 = lib.stm_read_barrier(p) assert p3 == p2 assert list_of_read_objects() == [p] + +def test_read_barrier_handle_protected(): + p = palloc(HDR) + p2 = lib.stm_write_barrier(p) + lib.stm_commit_transaction() + lib.stm_begin_inevitable_transaction() + assert classify(p) == "public" + assert classify(p2) == "protected" + assert list_of_read_objects() == [] + p3 = lib.stm_read_barrier(p) + assert p3 == p2 + assert list_of_read_objects() == [p2] + p4 = lib.stm_read_barrier(p) + assert p4 == p2 + assert list_of_read_objects() == [p2] + +def test_read_barrier_handle_private(): + p = palloc(HDR) + p2 = lib.stm_write_barrier(p) + lib.stm_commit_transaction() + lib.stm_begin_inevitable_transaction() + p2b = lib.stm_write_barrier(p) + assert p2b == p2 + assert classify(p) == "public" + assert classify(p2) == "private" + assert list_of_read_objects() == [p2] + p3 = lib.stm_read_barrier(p) + assert p3 == p2 + assert list_of_read_objects() == [p2] + p4 = lib.stm_read_barrier(p) + assert p4 == p2 + assert list_of_read_objects() == [p2] From noreply at buildbot.pypy.org Wed Jun 5 23:47:02 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 5 Jun 2013 23:47:02 +0200 (CEST) Subject: [pypy-commit] pypy py3k: fix Message-ID: <20130605214702.9907A1C1007@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r64805:88ea391ffde6 Date: 2013-06-05 12:00 -0700 http://bitbucket.org/pypy/pypy/changeset/88ea391ffde6/ Log: fix diff --git a/pypy/interpreter/app_main.py b/pypy/interpreter/app_main.py --- a/pypy/interpreter/app_main.py +++ b/pypy/interpreter/app_main.py @@ -462,13 +462,13 @@ sys.flags = type(sys.flags)(flags) sys.dont_write_bytecode = bool(sys.flags.dont_write_bytecode) - sys._xoptions = dict(x.split('=', 1) if '=' in x else (x, True) - for x in options['_xoptions']) - if sys.flags.optimize >= 1: import __pypy__ __pypy__.set_debug(False) + sys._xoptions = dict(x.split('=', 1) if '=' in x else (x, True) + for x in options['_xoptions']) + ## if not we_are_translated(): ## for key in sorted(options): ## print '%40s: %s' % (key, options[key]) From noreply at buildbot.pypy.org Wed Jun 5 23:47:01 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 5 Jun 2013 23:47:01 +0200 (CEST) Subject: [pypy-commit] pypy py3k: merge default Message-ID: <20130605214701.4BDF11C01CD@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r64804:54af5b1ed1de Date: 2013-06-05 11:49 -0700 http://bitbucket.org/pypy/pypy/changeset/54af5b1ed1de/ Log: merge default diff --git a/lib-python/2.7/opcode.py b/lib-python/2.7/opcode.py --- a/lib-python/2.7/opcode.py +++ b/lib-python/2.7/opcode.py @@ -193,5 +193,6 @@ hasname.append(201) def_op('CALL_METHOD', 202) # #args not including 'self' def_op('BUILD_LIST_FROM_ARG', 203) +jrel_op('JUMP_IF_NOT_DEBUG', 204) # jump over assert statements del def_op, name_op, jrel_op, jabs_op diff --git a/lib-python/2.7/test/test_code.py b/lib-python/2.7/test/test_code.py --- a/lib-python/2.7/test/test_code.py +++ b/lib-python/2.7/test/test_code.py @@ -75,7 +75,7 @@ cellvars: () freevars: () nlocals: 0 -flags: 67 +flags: 1048643 consts: ("'doc string'", 'None') """ diff --git a/lib-python/2.7/test/test_dis.py b/lib-python/2.7/test/test_dis.py --- a/lib-python/2.7/test/test_dis.py +++ b/lib-python/2.7/test/test_dis.py @@ -53,25 +53,26 @@ pass dis_bug1333982 = """\ - %-4d 0 LOAD_CONST 1 (0) - 3 POP_JUMP_IF_TRUE 41 - 6 LOAD_GLOBAL 0 (AssertionError) - 9 LOAD_FAST 0 (x) - 12 BUILD_LIST_FROM_ARG 0 - 15 GET_ITER - >> 16 FOR_ITER 12 (to 31) - 19 STORE_FAST 1 (s) - 22 LOAD_FAST 1 (s) - 25 LIST_APPEND 2 - 28 JUMP_ABSOLUTE 16 + %-4d 0 JUMP_IF_NOT_DEBUG 41 (to 44) + 3 LOAD_CONST 1 (0) + 6 POP_JUMP_IF_TRUE 44 + 9 LOAD_GLOBAL 0 (AssertionError) + 12 LOAD_FAST 0 (x) + 15 BUILD_LIST_FROM_ARG 0 + 18 GET_ITER + >> 19 FOR_ITER 12 (to 34) + 22 STORE_FAST 1 (s) + 25 LOAD_FAST 1 (s) + 28 LIST_APPEND 2 + 31 JUMP_ABSOLUTE 19 - %-4d >> 31 LOAD_CONST 2 (1) - 34 BINARY_ADD - 35 CALL_FUNCTION 1 - 38 RAISE_VARARGS 1 + %-4d >> 34 LOAD_CONST 2 (1) + 37 BINARY_ADD + 38 CALL_FUNCTION 1 + 41 RAISE_VARARGS 1 - %-4d >> 41 LOAD_CONST 0 (None) - 44 RETURN_VALUE + %-4d >> 44 LOAD_CONST 0 (None) + 47 RETURN_VALUE """%(bug1333982.func_code.co_firstlineno + 1, bug1333982.func_code.co_firstlineno + 2, bug1333982.func_code.co_firstlineno + 3) diff --git a/lib_pypy/_ctypes/pointer.py b/lib_pypy/_ctypes/pointer.py --- a/lib_pypy/_ctypes/pointer.py +++ b/lib_pypy/_ctypes/pointer.py @@ -119,6 +119,7 @@ return self._buffer[0] != 0 contents = property(getcontents, setcontents) + _obj = property(getcontents) # byref interface def _as_ffi_pointer_(self, ffitype): return as_ffi_pointer(self, ffitype) diff --git a/lib_pypy/_pypy_irc_topic.py b/lib_pypy/_pypy_irc_topic.py --- a/lib_pypy/_pypy_irc_topic.py +++ b/lib_pypy/_pypy_irc_topic.py @@ -1,4 +1,4 @@ -"""eclguba: flagnk naq frznagvpf bs clguba, fcrrq bs p, erfgevpgvbaf bs wnin naq pbzcvyre reebe zrffntrf nf crargenoyr nf ZHZCF +__doc__ = """eclguba: flagnk naq frznagvpf bs clguba, fcrrq bs p, erfgevpgvbaf bs wnin naq pbzcvyre reebe zrffntrf nf crargenoyr nf ZHZCF pglcrf unf n fcva bs 1/3 ' ' vf n fcnpr gbb Clguba 2.k rfg cerfdhr zbeg, ivir Clguba! diff --git a/pypy/bin/pyinteractive.py b/pypy/bin/pyinteractive.py --- a/pypy/bin/pyinteractive.py +++ b/pypy/bin/pyinteractive.py @@ -27,7 +27,8 @@ BoolOption("completer", "use readline commandline completer", default=False, cmdline="-C"), BoolOption("optimize", - "dummy optimization flag for compatibility with CPython", + "skip assert statements and remove docstrings when importing modules" + " (this is -OO in regular CPython)", default=False, cmdline="-O"), BoolOption("no_site_import", "do not 'import site' on initialization", default=False, cmdline="-S"), @@ -94,6 +95,17 @@ space.setitem(space.sys.w_dict, space.wrap('executable'), space.wrap(argv[0])) + if interactiveconfig.optimize: + #change the optimize flag's value and set __debug__ to False + space.appexec([], """(): + import sys + flags = list(sys.flags) + flags[6] = 2 + sys.flags = type(sys.flags)(flags) + import __pypy__ + __pypy__.set_debug(False) + """) + # call pypy_find_stdlib: the side-effect is that it sets sys.prefix and # sys.exec_prefix executable = argv[0] diff --git a/pypy/doc/man/pypy.1.rst b/pypy/doc/man/pypy.1.rst --- a/pypy/doc/man/pypy.1.rst +++ b/pypy/doc/man/pypy.1.rst @@ -16,7 +16,10 @@ Inspect interactively after running script. -O - Dummy optimization flag for compatibility with C Python. + Skip assert statements. + +-OO + Remove docstrings when importing modules in addition to -O. -c *cmd* Program passed in as CMD (terminates option list). diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -50,3 +50,5 @@ .. branch: win32-fixes3 Skip and fix some non-translated (own) tests for win32 builds +.. branch: ctypes-byref +Add the '_obj' attribute on ctypes pointer() and byref() objects diff --git a/pypy/interpreter/app_main.py b/pypy/interpreter/app_main.py --- a/pypy/interpreter/app_main.py +++ b/pypy/interpreter/app_main.py @@ -2,8 +2,9 @@ # App-level version of py.py. # See test/test_app_main. -# Missing vs CPython: -b, -d, -OO, -v, -x, -3 -"""\ +# Missing vs CPython: -b, -d, -v, -x, -3 +from __future__ import print_function, unicode_literals +USAGE1 = __doc__ = """\ Options and arguments (and corresponding environment variables): -B : don't write .py[co] files on import; also PYTHONDONTWRITEBYTECODE=x -c cmd : program passed in as string (terminates option list) @@ -12,7 +13,8 @@ -i : inspect interactively after running script; forces a prompt even if stdin does not appear to be a terminal; also PYTHONINSPECT=x -m mod : run library module as a script (terminates option list) --O : dummy optimization flag for compatibility with CPython +-O : skip assert statements +-OO : remove docstrings when importing modules in addition to -O -q : don't print version and copyright messages on interactive startup -R : ignored (see http://bugs.python.org/issue14621) -s : don't add user site directory to sys.path; also PYTHONNOUSERSITE @@ -28,8 +30,6 @@ PyPy options and arguments: --info : print translation information about this PyPy executable """ -from __future__ import print_function, unicode_literals -USAGE1 = __doc__ # Missing vs CPython: PYTHONHOME, PYTHONCASEOK USAGE2 = """ Other environment variables: @@ -465,6 +465,10 @@ sys._xoptions = dict(x.split('=', 1) if '=' in x else (x, True) for x in options['_xoptions']) + if sys.flags.optimize >= 1: + import __pypy__ + __pypy__.set_debug(False) + ## if not we_are_translated(): ## for key in sorted(options): ## print '%40s: %s' % (key, options[key]) diff --git a/pypy/interpreter/astcompiler/assemble.py b/pypy/interpreter/astcompiler/assemble.py --- a/pypy/interpreter/astcompiler/assemble.py +++ b/pypy/interpreter/astcompiler/assemble.py @@ -246,6 +246,8 @@ if w_len is None: w_len = space.len(self.w_consts) space.setitem(self.w_consts, w_key, w_len) + if space.int_w(w_len) == 0: + self.scope.doc_removable = False return space.int_w(w_len) def _make_key(self, obj): @@ -618,6 +620,7 @@ ops.JUMP_IF_FALSE_OR_POP : 0, ops.POP_JUMP_IF_TRUE : -1, ops.POP_JUMP_IF_FALSE : -1, + ops.JUMP_IF_NOT_DEBUG : 0, ops.BUILD_LIST_FROM_ARG: 1, } diff --git a/pypy/interpreter/astcompiler/codegen.py b/pypy/interpreter/astcompiler/codegen.py --- a/pypy/interpreter/astcompiler/codegen.py +++ b/pypy/interpreter/astcompiler/codegen.py @@ -263,6 +263,7 @@ start = 1 doc_expr.walkabout(self) self.name_op("__doc__", ast.Store) + self.scope.doc_removable = True for i in range(start, len(body)): body[i].walkabout(self) return True @@ -435,6 +436,7 @@ def visit_Assert(self, asrt): self.update_position(asrt.lineno) end = self.new_block() + self.emit_jump(ops.JUMP_IF_NOT_DEBUG, end) asrt.test.accept_jump_if(self, True, end) self.emit_op_name(ops.LOAD_GLOBAL, self.names, "AssertionError") if asrt.msg: @@ -1225,6 +1227,8 @@ flags = 0 if not self.cell_vars and not self.free_vars: flags |= consts.CO_NOFREE + if self.scope.doc_removable: + flags |= consts.CO_KILL_DOCSTRING return flags @@ -1244,6 +1248,8 @@ flags |= consts.CO_VARARGS if scope.has_keywords_arg: flags |= consts.CO_VARKEYWORDS + if scope.doc_removable: + flags |= consts.CO_KILL_DOCSTRING if not self.cell_vars and not self.free_vars: flags |= consts.CO_NOFREE return PythonCodeGenerator._get_code_flags(self) | flags @@ -1260,6 +1266,7 @@ doc_expr = None if doc_expr is not None: self.add_const(doc_expr.s) + self.scope.doc_removable = True start = 1 else: self.add_const(self.space.w_None) @@ -1339,3 +1346,9 @@ # This happens when nobody references the cell self.load_const(self.space.w_None) self.emit_op(ops.RETURN_VALUE) + + def _get_code_flags(self): + flags = 0 + if self.scope.doc_removable: + flags |= consts.CO_KILL_DOCSTRING + return PythonCodeGenerator._get_code_flags(self) | flags diff --git a/pypy/interpreter/astcompiler/consts.py b/pypy/interpreter/astcompiler/consts.py --- a/pypy/interpreter/astcompiler/consts.py +++ b/pypy/interpreter/astcompiler/consts.py @@ -16,6 +16,8 @@ CO_FUTURE_PRINT_FUNCTION = 0x10000 CO_FUTURE_UNICODE_LITERALS = 0x20000 CO_FUTURE_BARRY_AS_BDFL = 0x40000 +#pypy specific: +CO_KILL_DOCSTRING = 0x100000 PyCF_MASK = (CO_FUTURE_DIVISION | CO_FUTURE_ABSOLUTE_IMPORT | CO_FUTURE_WITH_STATEMENT | CO_FUTURE_PRINT_FUNCTION | diff --git a/pypy/interpreter/astcompiler/symtable.py b/pypy/interpreter/astcompiler/symtable.py --- a/pypy/interpreter/astcompiler/symtable.py +++ b/pypy/interpreter/astcompiler/symtable.py @@ -42,6 +42,7 @@ self.has_free = False self.child_has_free = False self.nested = False + self.doc_removable = False def lookup(self, name): """Find the scope of identifier 'name'.""" diff --git a/pypy/interpreter/astcompiler/test/test_compiler.py b/pypy/interpreter/astcompiler/test/test_compiler.py --- a/pypy/interpreter/astcompiler/test/test_compiler.py +++ b/pypy/interpreter/astcompiler/test/test_compiler.py @@ -842,6 +842,58 @@ """ self.simple_test(source, 'ok', 1) + def test_remove_docstring(self): + source = '"module_docstring"\n' + """if 1: + def f1(): + 'docstring' + def f2(): + 'docstring' + return 'docstring' + def f3(): + 'foo' + return 'bar' + class C1(): + 'docstring' + class C2(): + __doc__ = 'docstring' + class C3(): + field = 'not docstring' + class C4(): + 'docstring' + field = 'docstring' + """ + code_w = compile_with_astcompiler(source, 'exec', self.space) + code_w.remove_docstrings(self.space) + dict_w = self.space.newdict(); + code_w.exec_code(self.space, dict_w, dict_w) + + yield self.check, dict_w, "f1.__doc__", None + yield self.check, dict_w, "f2.__doc__", 'docstring' + yield self.check, dict_w, "f2()", 'docstring' + yield self.check, dict_w, "f3.__doc__", None + yield self.check, dict_w, "f3()", 'bar' + yield self.check, dict_w, "C1.__doc__", None + yield self.check, dict_w, "C2.__doc__", 'docstring' + yield self.check, dict_w, "C3.field", 'not docstring' + yield self.check, dict_w, "C4.field", 'docstring' + yield self.check, dict_w, "C4.__doc__", 'docstring' + yield self.check, dict_w, "C4.__doc__", 'docstring' + yield self.check, dict_w, "__doc__", None + + def test_assert_skipping(self): + space = self.space + mod = space.getbuiltinmodule('__pypy__') + w_set_debug = space.getattr(mod, space.wrap('set_debug')) + space.call_function(w_set_debug, space.w_False) + + source = """if 1: + assert False + """ + try: + self.run(source) + finally: + space.call_function(w_set_debug, space.w_True) + def test_raise_from(self): test = """if 1: def f(): diff --git a/pypy/interpreter/pycode.py b/pypy/interpreter/pycode.py --- a/pypy/interpreter/pycode.py +++ b/pypy/interpreter/pycode.py @@ -12,7 +12,7 @@ from pypy.interpreter.gateway import unwrap_spec from pypy.interpreter.astcompiler.consts import ( CO_OPTIMIZED, CO_NEWLOCALS, CO_VARARGS, CO_VARKEYWORDS, CO_NESTED, - CO_GENERATOR) + CO_GENERATOR, CO_KILL_DOCSTRING) from pypy.tool.stdlib_opcode import opcodedesc, HAVE_ARGUMENT from rpython.rlib.rarithmetic import intmask from rpython.rlib.objectmodel import compute_hash, we_are_translated @@ -240,6 +240,13 @@ return w_first return space.w_None + def remove_docstrings(self, space): + if self.co_flags & CO_KILL_DOCSTRING: + self.co_consts_w[0] = space.w_None + for w_co in self.co_consts_w: + if isinstance(w_co, PyCode): + w_co.remove_docstrings(space) + def _to_code(self): """For debugging only.""" consts = [None] * len(self.co_consts_w) diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -855,6 +855,11 @@ self.popvalue() return next_instr + def JUMP_IF_NOT_DEBUG(self, jumpby, next_instr): + if not self.space.sys.debug: + next_instr += jumpby + return next_instr + def GET_ITER(self, oparg, next_instr): w_iterable = self.popvalue() w_iterator = self.space.iter(w_iterable) diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -59,6 +59,7 @@ 'add_memory_pressure' : 'interp_magic.add_memory_pressure', 'newdict' : 'interp_dict.newdict', 'dictstrategy' : 'interp_dict.dictstrategy', + 'set_debug' : 'interp_magic.set_debug', } submodules = { diff --git a/pypy/module/__pypy__/interp_magic.py b/pypy/module/__pypy__/interp_magic.py --- a/pypy/module/__pypy__/interp_magic.py +++ b/pypy/module/__pypy__/interp_magic.py @@ -98,6 +98,13 @@ def newlist_hint(space, sizehint): return space.newlist_hint(sizehint) + at unwrap_spec(debug=bool) +def set_debug(space, debug): + space.sys.debug = debug + space.setitem(space.builtin.w_dict, + space.wrap('__debug__'), + space.wrap(debug)) + @unwrap_spec(estimate=int) def add_memory_pressure(estimate): rgc.add_memory_pressure(estimate) diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -992,6 +992,13 @@ if not space.is_true(space.sys.get('dont_write_bytecode')): write_compiled_module(space, code_w, cpathname, mode, mtime) + try: + optimize = space.sys.get_flag('optimize') + except Exception: + optimize = 0 + if optimize >= 2: + code_w.remove_docstrings(space) + update_code_filenames(space, code_w, pathname) exec_code_module(space, w_mod, code_w, pathname, cpathname) @@ -1086,6 +1093,13 @@ "Bad magic number in %s", cpathname) #print "loading pyc file:", cpathname code_w = read_compiled_module(space, cpathname, source) + try: + optimize = space.sys.get_flag('optimize') + except Exception: + optimize = 0 + if optimize >= 2: + code_w.remove_docstrings(space) + exec_code_module(space, w_mod, code_w, cpathname, cpathname, write_paths) return w_mod diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -13,6 +13,9 @@ from pypy.module.micronumpy.arrayimpl.voidbox import VoidBoxStorage from rpython.rlib.objectmodel import specialize from pypy.interpreter.mixedmodule import MixedModule +from rpython.rtyper.lltypesystem import lltype +from rpython.rlib.rstring import StringBuilder + MIXIN_32 = (long_typedef,) if LONG_BIT == 32 else () MIXIN_64 = (long_typedef,) if LONG_BIT == 64 else () @@ -42,7 +45,23 @@ return func_with_new_name(new, name + "_box_new"), staticmethod(_get_dtype), func_with_new_name(descr_reduce, "descr_reduce") -class PrimitiveBox(object): +class Box(object): + _mixin_ = True + + def reduce(self, space): + from rpython.rlib.rstring import StringBuilder + from rpython.rtyper.lltypesystem import rffi, lltype + + numpypy = space.getbuiltinmodule("_numpypy") + assert isinstance(numpypy, MixedModule) + multiarray = numpypy.get("multiarray") + assert isinstance(multiarray, MixedModule) + scalar = multiarray.get("scalar") + + ret = space.newtuple([scalar, space.newtuple([space.wrap(self._get_dtype(space)), space.wrap(self.raw_str())])]) + return ret + +class PrimitiveBox(Box): _mixin_ = True def __init__(self, value): @@ -54,27 +73,19 @@ def __repr__(self): return '%s(%s)' % (self.__class__.__name__, self.value) - def reduce(self, space): - from rpython.rlib.rstring import StringBuilder - from rpython.rtyper.lltypesystem import rffi, lltype - - numpypy = space.getbuiltinmodule("_numpypy") - assert isinstance(numpypy, MixedModule) - multiarray = numpypy.get("multiarray") - assert isinstance(multiarray, MixedModule) - scalar = multiarray.get("scalar") - + def raw_str(self): value = lltype.malloc(rffi.CArray(lltype.typeOf(self.value)), 1, flavor="raw") value[0] = self.value builder = StringBuilder() builder.append_charpsize(rffi.cast(rffi.CCHARP, value), rffi.sizeof(lltype.typeOf(self.value))) + ret = builder.build() - ret = space.newtuple([scalar, space.newtuple([space.wrap(self._get_dtype(space)), space.wrap(builder.build())])]) lltype.free(value, flavor="raw") + return ret -class ComplexBox(object): +class ComplexBox(Box): _mixin_ = True def __init__(self, real, imag=0.): @@ -90,25 +101,17 @@ def convert_imag_to(self, dtype): return dtype.box(self.imag) - def reduce(self, space): - from rpython.rlib.rstring import StringBuilder - from rpython.rtyper.lltypesystem import rffi, lltype - - numpypy = space.getbuiltinmodule("_numpypy") - assert isinstance(numpypy, MixedModule) - multiarray = numpypy.get("multiarray") - assert isinstance(multiarray, MixedModule) - scalar = multiarray.get("scalar") - + def raw_str(self): value = lltype.malloc(rffi.CArray(lltype.typeOf(self.real)), 2, flavor="raw") value[0] = self.real value[1] = self.imag builder = StringBuilder() builder.append_charpsize(rffi.cast(rffi.CCHARP, value), rffi.sizeof(lltype.typeOf(self.real)) * 2) + ret = builder.build() - ret = space.newtuple([scalar, space.newtuple([space.wrap(self._get_dtype(space)), space.wrap(builder.build())])]) lltype.free(value, flavor="raw") + return ret class W_GenericBox(W_Root): diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -786,6 +786,7 @@ from rpython.rtyper.lltypesystem import rffi from rpython.rlib.rstring import StringBuilder from pypy.interpreter.mixedmodule import MixedModule + from pypy.module.micronumpy.arrayimpl.concrete import SliceArray numpypy = space.getbuiltinmodule("_numpypy") assert isinstance(numpypy, MixedModule) @@ -796,7 +797,14 @@ parameters = space.newtuple([space.gettypefor(W_NDimArray), space.newtuple([space.wrap(0)]), space.wrap("b")]) builder = StringBuilder() - builder.append_charpsize(self.implementation.get_storage(), self.implementation.get_storage_size()) + if isinstance(self.implementation, SliceArray): + iter = self.implementation.create_iter() + while not iter.done(): + box = iter.getitem() + builder.append(box.raw_str()) + iter.next() + else: + builder.append_charpsize(self.implementation.get_storage(), self.implementation.get_storage_size()) state = space.newtuple([ space.wrap(1), # version diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -1801,6 +1801,13 @@ pickled_data = dumps(a) assert (loads(pickled_data) == a).all() + def test_pickle_slice(self): + from cPickle import loads, dumps + import numpypy as numpy + + a = numpy.arange(10.).reshape((5, 2))[::2] + assert (loads(dumps(a)) == a).all() + class AppTestMultiDim(BaseNumpyAppTest): def test_init(self): import numpypy diff --git a/pypy/module/pypyjit/test/test_policy.py b/pypy/module/pypyjit/test/test_policy.py --- a/pypy/module/pypyjit/test/test_policy.py +++ b/pypy/module/pypyjit/test/test_policy.py @@ -8,7 +8,12 @@ def test_bigint(): from rpython.rlib.rbigint import rbigint + assert not pypypolicy.look_inside_function(rbigint.eq.im_func) + assert not pypypolicy.look_inside_function(rbigint.ne.im_func) assert not pypypolicy.look_inside_function(rbigint.lt.im_func) + assert not pypypolicy.look_inside_function(rbigint.le.im_func) + assert not pypypolicy.look_inside_function(rbigint.gt.im_func) + assert not pypypolicy.look_inside_function(rbigint.ge.im_func) def test_rlocale(): from rpython.rlib.rlocale import setlocale diff --git a/pypy/module/pypyjit/test_pypy_c/test_intbound.py b/pypy/module/pypyjit/test_pypy_c/test_intbound.py --- a/pypy/module/pypyjit/test_pypy_c/test_intbound.py +++ b/pypy/module/pypyjit/test_pypy_c/test_intbound.py @@ -231,6 +231,7 @@ assert loop.match(""" i8 = int_lt(i6, 300) guard_true(i8, descr=...) + guard_not_invalidated? i10 = int_lshift(i6, 1) i12 = int_add_ovf(i5, 1) guard_no_overflow(descr=...) @@ -253,6 +254,7 @@ assert loop.match(""" i8 = int_lt(i6, 300) guard_true(i8, descr=...) + guard_not_invalidated? i10 = int_add_ovf(i5, 8) guard_no_overflow(descr=...) i12 = int_add(i6, 1) diff --git a/pypy/module/pypyjit/test_pypy_c/test_misc.py b/pypy/module/pypyjit/test_pypy_c/test_misc.py --- a/pypy/module/pypyjit/test_pypy_c/test_misc.py +++ b/pypy/module/pypyjit/test_pypy_c/test_misc.py @@ -74,7 +74,6 @@ jump(..., descr=...) """) - def test_mixed_type_loop(self): def main(n): i = 0.0 @@ -94,7 +93,6 @@ jump(..., descr=...) """) - def test_cached_pure_func_of_equal_fields(self): def main(n): class A(object): @@ -196,7 +194,6 @@ jump(..., descr=...) """) - def test_chain_of_guards(self): src = """ class A(object): @@ -220,7 +217,6 @@ loops = log.loops_by_filename(self.filepath) assert len(loops) == 1 - def test_unpack_iterable_non_list_tuple(self): def main(n): import array @@ -258,7 +254,6 @@ jump(..., descr=...) """) - def test_dont_trace_every_iteration(self): def main(a, b): i = sa = 0 @@ -289,7 +284,6 @@ assert log.result == 300 * (-10 % -20) assert log.jit_summary.tracing_no == 1 - def test_overflow_checking(self): """ This test only checks that we get the expected result, not that any @@ -298,7 +292,8 @@ def main(): import sys def f(a,b): - if a < 0: return -1 + if a < 0: + return -1 return a-b # total = sys.maxint - 2147483647 @@ -309,7 +304,6 @@ # self.run_and_check(main, []) - def test_global(self): log = self.run(""" i = 0 @@ -404,3 +398,13 @@ # the following assertion fails if the loop was cancelled due # to "abort: vable escape" assert len(log.loops_by_id("exc_info")) == 1 + + def test_long_comparison(self): + def main(n): + while n: + 12345L > 123L # ID: long_op + n -= 1 + + log = self.run(main, [300]) + loop, = log.loops_by_id("long_op") + assert len(loop.ops_by_id("long_op")) == 0 diff --git a/pypy/module/sys/__init__.py b/pypy/module/sys/__init__.py --- a/pypy/module/sys/__init__.py +++ b/pypy/module/sys/__init__.py @@ -7,6 +7,7 @@ class Module(MixedModule): """Sys Builtin Module. """ + _immutable_fields_ = ["defaultencoding?", "debug?"] def __init__(self, space, w_name): """NOT_RPYTHON""" # because parent __init__ isn't @@ -17,6 +18,7 @@ self.w_default_encoder = None self.defaultencoding = "utf-8" self.filesystemencoding = None + self.debug = True interpleveldefs = { '__name__' : '(space.wrap("sys"))', diff --git a/pypy/module/test_lib_pypy/ctypes_tests/test_pointers.py b/pypy/module/test_lib_pypy/ctypes_tests/test_pointers.py --- a/pypy/module/test_lib_pypy/ctypes_tests/test_pointers.py +++ b/pypy/module/test_lib_pypy/ctypes_tests/test_pointers.py @@ -108,6 +108,13 @@ py.test.raises(TypeError, delitem, p, 0) + def test_byref(self): + for ct, pt in zip(ctype_types, python_types): + i = ct(42) + p = byref(i) + assert type(p._obj) is ct + assert p._obj.value == 42 + def test_pointer_to_pointer(self): x = c_int(32) y = c_int(42) diff --git a/rpython/flowspace/flowcontext.py b/rpython/flowspace/flowcontext.py --- a/rpython/flowspace/flowcontext.py +++ b/rpython/flowspace/flowcontext.py @@ -800,6 +800,9 @@ self.popvalue() return next_instr + def JUMP_IF_NOT_DEBUG(self, target, next_instr): + return next_instr + def GET_ITER(self, oparg, next_instr): w_iterable = self.popvalue() w_iterator = self.space.iter(w_iterable) diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -1911,6 +1911,29 @@ [BoxPtr(x)], 'int').value assert res == -19 + def test_cast_int_to_float(self): + if not self.cpu.supports_floats: + py.test.skip("requires floats") + for x in [-10, -1, 0, 3, 42, sys.maxint-1]: + res = self.execute_operation(rop.CAST_INT_TO_FLOAT, + [BoxInt(x)], 'float').value + assert longlong.getrealfloat(res) == float(x) + res = self.execute_operation(rop.CAST_INT_TO_FLOAT, + [ConstInt(x)], 'float').value + assert longlong.getrealfloat(res) == float(x) + + def test_cast_float_to_int(self): + if not self.cpu.supports_floats: + py.test.skip("requires floats") + for x in [-24.23, -5.3, 0.0, 3.1234, 11.1, 0.1]: + v = longlong.getfloatstorage(x) + res = self.execute_operation(rop.CAST_FLOAT_TO_INT, + [BoxFloat(v)], 'int').value + assert res == int(x) + res = self.execute_operation(rop.CAST_FLOAT_TO_INT, + [ConstFloat(v)], 'int').value + assert res == int(x) + def test_convert_float_bytes(self): if not self.cpu.supports_floats: py.test.skip("requires floats") diff --git a/rpython/jit/codewriter/policy.py b/rpython/jit/codewriter/policy.py --- a/rpython/jit/codewriter/policy.py +++ b/rpython/jit/codewriter/policy.py @@ -39,8 +39,6 @@ return True # look into everything by default def _reject_function(self, func): - if hasattr(func, '_jit_look_inside_'): - return not func._jit_look_inside_ # explicitly elidable functions are always opaque if getattr(func, '_elidable_function_', False): return True @@ -58,8 +56,11 @@ except AttributeError: see_function = True else: - see_function = (self.look_inside_function(func) and not - self._reject_function(func)) + if hasattr(func, '_jit_look_inside_'): + see_function = func._jit_look_inside_ # override guessing + else: + see_function = (self.look_inside_function(func) and not + self._reject_function(func)) contains_loop = contains_loop and not getattr( func, '_jit_unroll_safe_', False) diff --git a/rpython/jit/codewriter/test/test_policy.py b/rpython/jit/codewriter/test/test_policy.py --- a/rpython/jit/codewriter/test/test_policy.py +++ b/rpython/jit/codewriter/test/test_policy.py @@ -65,6 +65,20 @@ graph = support.getgraph(h, [5]) assert not JitPolicy().look_inside_graph(graph) +def test_look_inside(): + def h1(x): + return x + 1 + @jit.look_inside # force True, even if look_inside_function() thinks not + def h2(x): + return x + 2 + class MyPolicy(JitPolicy): + def look_inside_function(self, func): + return False + graph1 = support.getgraph(h1, [5]) + graph2 = support.getgraph(h2, [5]) + assert not MyPolicy().look_inside_graph(graph1) + assert MyPolicy().look_inside_graph(graph2) + def test_loops(): def g(x): i = 0 diff --git a/rpython/jit/metainterp/optimizeopt/intutils.py b/rpython/jit/metainterp/optimizeopt/intutils.py --- a/rpython/jit/metainterp/optimizeopt/intutils.py +++ b/rpython/jit/metainterp/optimizeopt/intutils.py @@ -2,12 +2,14 @@ from rpython.rlib.objectmodel import we_are_translated from rpython.jit.metainterp.resoperation import rop, ResOperation from rpython.jit.metainterp.history import BoxInt, ConstInt + MAXINT = maxint MININT = -maxint - 1 + class IntBound(object): _attrs_ = ('has_upper', 'has_lower', 'upper', 'lower') - + def __init__(self, lower, upper): self.has_upper = True self.has_lower = True @@ -29,7 +31,7 @@ def make_lt(self, other): return self.make_le(other.add(-1)) - + def make_ge(self, other): if other.has_lower: if not self.has_lower or other.lower > self.lower: @@ -86,7 +88,7 @@ r = True return r - + def add(self, offset): res = self.clone() try: @@ -101,7 +103,7 @@ def mul(self, value): return self.mul_bound(IntBound(value, value)) - + def add_bound(self, other): res = self.clone() if other.has_upper: @@ -115,7 +117,7 @@ try: res.lower = ovfcheck(res.lower + other.lower) except OverflowError: - res.has_lower = False + res.has_lower = False else: res.has_lower = False return res @@ -133,7 +135,7 @@ try: res.lower = ovfcheck(res.lower - other.upper) except OverflowError: - res.has_lower = False + res.has_lower = False else: res.has_lower = False return res @@ -196,7 +198,6 @@ else: return IntUnbounded() - def contains(self, val): if self.has_lower and val < self.lower: return False @@ -216,7 +217,7 @@ elif self.has_upper: return False return True - + def __repr__(self): if self.has_lower: l = '%d' % self.lower @@ -249,7 +250,7 @@ guards.append(op) op = ResOperation(rop.GUARD_TRUE, [res], None) guards.append(op) - + class IntUpperBound(IntBound): def __init__(self, upper): @@ -285,7 +286,7 @@ self._raise() def make_constant(self, value): self._raise() - def intersect(self, other): + def intersect(self, other): self._raise() def min4(t): diff --git a/rpython/rlib/jit.py b/rpython/rlib/jit.py --- a/rpython/rlib/jit.py +++ b/rpython/rlib/jit.py @@ -83,6 +83,13 @@ func._jit_look_inside_ = False return func +def look_inside(func): + """ Make sure the JIT traces inside decorated function, even + if the rest of the module is not visible to the JIT + """ + func._jit_look_inside_ = True + return func + def unroll_safe(func): """ JIT can safely unroll loops in this function and this will not lead to code explosion diff --git a/rpython/rlib/rbigint.py b/rpython/rlib/rbigint.py --- a/rpython/rlib/rbigint.py +++ b/rpython/rlib/rbigint.py @@ -190,9 +190,9 @@ @staticmethod + @jit.elidable def frombool(b): - # This function is marked as pure, so you must not call it and - # then modify the result. + # You must not call this function and then modify the result. if b: return ONERBIGINT return NULLRBIGINT @@ -251,6 +251,7 @@ return _decimalstr_to_bigint(s) @staticmethod + @jit.elidable def frombytes(s, byteorder, signed): if byteorder not in ('big', 'little'): raise InvalidEndiannessError() @@ -383,9 +384,11 @@ def tolonglong(self): return _AsLongLong(self) + @jit.look_inside def tobool(self): return self.sign != 0 + @jit.elidable def touint(self): if self.sign == -1: raise ValueError("cannot convert negative integer to unsigned int") @@ -410,13 +413,16 @@ raise ValueError("cannot convert negative integer to unsigned int") return _AsULonglong_ignore_sign(self) + @jit.elidable def uintmask(self): return _AsUInt_mask(self) + @jit.elidable def ulonglongmask(self): """Return r_ulonglong(self), truncating.""" return _AsULonglong_mask(self) + @jit.elidable def tofloat(self): return _AsDouble(self) @@ -448,6 +454,7 @@ i += 1 return True + @jit.look_inside def ne(self, other): return not self.eq(other) @@ -486,12 +493,15 @@ i -= 1 return False + @jit.look_inside def le(self, other): return not other.lt(self) + @jit.look_inside def gt(self, other): return other.lt(self) + @jit.look_inside def ge(self, other): return not self.lt(other) @@ -592,6 +602,7 @@ return div + @jit.look_inside def div(self, other): return self.floordiv(other) @@ -792,14 +803,17 @@ z = z.sub(c) return z + @jit.elidable def neg(self): return rbigint(self._digits, -self.sign, self.size) + @jit.elidable def abs(self): if self.sign != -1: return self return rbigint(self._digits, 1, self.size) + @jit.elidable def invert(self): #Implement ~x as -(x + 1) if self.sign == 0: return ONENEGATIVERBIGINT @@ -909,12 +923,14 @@ def or_(self, other): return _bitwise(self, '|', other) + @jit.elidable def oct(self): if self.sign == 0: return '0L' else: return _format(self, BASE8, '0', 'L') + @jit.elidable def hex(self): return _format(self, BASE16, '0x', 'L') diff --git a/rpython/rtyper/lltypesystem/rstr.py b/rpython/rtyper/lltypesystem/rstr.py --- a/rpython/rtyper/lltypesystem/rstr.py +++ b/rpython/rtyper/lltypesystem/rstr.py @@ -332,7 +332,7 @@ newstr = s2.malloc(len1 + len2) newstr.copy_contents_from_str(s1, newstr, 0, 0, len1) else: - newstr = s1.malloc(len1 + len2) + newstr = s1.malloc(len1 + len2) newstr.copy_contents(s1, newstr, 0, 0, len1) if typeOf(s2) == Ptr(STR): newstr.copy_contents_from_str(s2, newstr, 0, len1, len2) From noreply at buildbot.pypy.org Wed Jun 5 23:47:03 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 5 Jun 2013 23:47:03 +0200 (CEST) Subject: [pypy-commit] pypy py3k: apply stdlib changes from e953dfbc7f0a Message-ID: <20130605214703.D1AAA1C01CD@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r64806:739db6f6edc0 Date: 2013-06-05 14:44 -0700 http://bitbucket.org/pypy/pypy/changeset/739db6f6edc0/ Log: apply stdlib changes from e953dfbc7f0a diff --git a/lib-python/3/opcode.py b/lib-python/3/opcode.py --- a/lib-python/3/opcode.py +++ b/lib-python/3/opcode.py @@ -182,5 +182,6 @@ hasname.append(201) def_op('CALL_METHOD', 202) # #args not including 'self' def_op('BUILD_LIST_FROM_ARG', 203) +jrel_op('JUMP_IF_NOT_DEBUG', 204) # jump over assert statements del def_op, name_op, jrel_op, jabs_op diff --git a/lib-python/3/test/test_code.py b/lib-python/3/test/test_code.py --- a/lib-python/3/test/test_code.py +++ b/lib-python/3/test/test_code.py @@ -81,7 +81,7 @@ cellvars: () freevars: () nlocals: 0 -flags: 67 +flags: 1048643 consts: ("'doc string'", 'None') >>> def keywordonly_args(a,b,*,k1): diff --git a/lib-python/3/test/test_dis.py b/lib-python/3/test/test_dis.py --- a/lib-python/3/test/test_dis.py +++ b/lib-python/3/test/test_dis.py @@ -218,7 +218,7 @@ Kw-only arguments: 0 Number of locals: 1 Stack size: 4 -Flags: OPTIMIZED, NEWLOCALS, NOFREE +Flags: OPTIMIZED, NEWLOCALS, NOFREE, 0x100000 Constants: 0: %r 1: '__func__' From noreply at buildbot.pypy.org Wed Jun 5 23:47:05 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 5 Jun 2013 23:47:05 +0200 (CEST) Subject: [pypy-commit] pypy default: have recursive Cache build failures trigger a more specific RuntimeError Message-ID: <20130605214705.2596C1C01CD@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r64807:e258a99e3784 Date: 2013-06-05 14:46 -0700 http://bitbucket.org/pypy/pypy/changeset/e258a99e3784/ Log: have recursive Cache build failures trigger a more specific RuntimeError diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -917,7 +917,8 @@ try: optimize = space.sys.get_flag('optimize') - except Exception: + except RuntimeError: + # during bootstrapping optimize = 0 if optimize >= 2: code_w.remove_docstrings(space) @@ -1018,7 +1019,8 @@ code_w = read_compiled_module(space, cpathname, source) try: optimize = space.sys.get_flag('optimize') - except Exception: + except RuntimeError: + # during bootstrapping optimize = 0 if optimize >= 2: code_w.remove_docstrings(space) diff --git a/rpython/rlib/cache.py b/rpython/rlib/cache.py --- a/rpython/rlib/cache.py +++ b/rpython/rlib/cache.py @@ -44,8 +44,8 @@ return self.content[key] except KeyError: if key in self._building: - raise Exception, "%s recursive building of %r" % ( - self, key) + raise RuntimeError("%s recursive building of %r" % + (self, key)) self._building[key] = True try: result = self._build(key) From noreply at buildbot.pypy.org Wed Jun 5 23:48:58 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 5 Jun 2013 23:48:58 +0200 (CEST) Subject: [pypy-commit] pypy py3k: (amaury) oops, don't double wrap Message-ID: <20130605214858.34FD41C01CD@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r64808:162c471d831c Date: 2013-06-05 14:48 -0700 http://bitbucket.org/pypy/pypy/changeset/162c471d831c/ Log: (amaury) oops, don't double wrap diff --git a/pypy/interpreter/pyparser/error.py b/pypy/interpreter/pyparser/error.py --- a/pypy/interpreter/pyparser/error.py +++ b/pypy/interpreter/pyparser/error.py @@ -24,7 +24,7 @@ space.newtuple([w_filename, space.wrap(self.lineno), space.wrap(self.offset), - space.wrap(w_text), + w_text, space.wrap(self.lastlineno)])]) def __str__(self): From noreply at buildbot.pypy.org Thu Jun 6 10:54:40 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 6 Jun 2013 10:54:40 +0200 (CEST) Subject: [pypy-commit] pypy default: The front-end never generates pure operations with all-constant Message-ID: <20130606085440.5B9EE1C101F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r64809:fc0d2316ad35 Date: 2013-06-06 10:54 +0200 http://bitbucket.org/pypy/pypy/changeset/fc0d2316ad35/ Log: The front-end never generates pure operations with all-constant arguments. The x86 backend doesn't support them. diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -1918,9 +1918,10 @@ res = self.execute_operation(rop.CAST_INT_TO_FLOAT, [BoxInt(x)], 'float').value assert longlong.getrealfloat(res) == float(x) - res = self.execute_operation(rop.CAST_INT_TO_FLOAT, - [ConstInt(x)], 'float').value - assert longlong.getrealfloat(res) == float(x) + # --- the front-end never generates CAST_INT_TO_FLOAT(Const) + #res = self.execute_operation(rop.CAST_INT_TO_FLOAT, + # [ConstInt(x)], 'float').value + #assert longlong.getrealfloat(res) == float(x) def test_cast_float_to_int(self): if not self.cpu.supports_floats: @@ -1930,9 +1931,10 @@ res = self.execute_operation(rop.CAST_FLOAT_TO_INT, [BoxFloat(v)], 'int').value assert res == int(x) - res = self.execute_operation(rop.CAST_FLOAT_TO_INT, - [ConstFloat(v)], 'int').value - assert res == int(x) + # --- the front-end never generates CAST_FLOAT_TO_INT(Const) + #res = self.execute_operation(rop.CAST_FLOAT_TO_INT, + # [ConstFloat(v)], 'int').value + #assert res == int(x) def test_convert_float_bytes(self): if not self.cpu.supports_floats: From noreply at buildbot.pypy.org Thu Jun 6 10:57:03 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 6 Jun 2013 10:57:03 +0200 (CEST) Subject: [pypy-commit] pypy default: Revert 7e3fde300fa3: the rebuild is needed in some cases. The test Message-ID: <20130606085703.601241C101F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r64810:c48d25dd8c61 Date: 2013-06-06 10:56 +0200 http://bitbucket.org/pypy/pypy/changeset/c48d25dd8c61/ Log: Revert 7e3fde300fa3: the rebuild is needed in some cases. The test fails right now on linux64. diff --git a/pypy/module/test_lib_pypy/test_os_wait.py b/pypy/module/test_lib_pypy/test_os_wait.py --- a/pypy/module/test_lib_pypy/test_os_wait.py +++ b/pypy/module/test_lib_pypy/test_os_wait.py @@ -1,7 +1,7 @@ # Generates the resource cache -#from __future__ import absolute_import -#from lib_pypy.ctypes_config_cache import rebuild -#rebuild.rebuild_one('resource.ctc.py') +from __future__ import absolute_import +from lib_pypy.ctypes_config_cache import rebuild +rebuild.rebuild_one('resource.ctc.py') import os From noreply at buildbot.pypy.org Thu Jun 6 11:14:35 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 6 Jun 2013 11:14:35 +0200 (CEST) Subject: [pypy-commit] pypy default: Attempt to get rid of the occasional failure of test_methodcache.py. Message-ID: <20130606091435.8D8C41C0135@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r64811:9c8f04b5d3a9 Date: 2013-06-06 11:10 +0200 http://bitbucket.org/pypy/pypy/changeset/9c8f04b5d3a9/ Log: Attempt to get rid of the occasional failure of test_methodcache.py. diff --git a/pypy/objspace/std/test/test_methodcache.py b/pypy/objspace/std/test/test_methodcache.py --- a/pypy/objspace/std/test/test_methodcache.py +++ b/pypy/objspace/std/test/test_methodcache.py @@ -4,7 +4,28 @@ class AppTestMethodCaching(test_typeobject.AppTestTypeObject): spaceconfig = {"objspace.std.withmethodcachecounter": True} + def setup_class(cls): + # This is for the following tests, which are a bit fragile and + # historically have been failing once in a while. With this hack, + # they are run up to 5 times in a row, saving the frame of the + # failed attempt. This means occasional collisions should work + # differently during the retry. + cls.w_retry = cls.space.appexec([], """(): + def retry(run): + keepalive = [] + for i in range(4): + try: + return run() + except AssertionError: + import sys + keepalive.append(sys.exc_info()) + return run() + return retry + """) + def test_mix_classes(self): + @self.retry + def run(): import __pypy__ class A(object): def f(self): @@ -25,6 +46,8 @@ assert sum(cache_counter) == 30 def test_class_that_cannot_be_cached(self): + @self.retry + def run(): import __pypy__ class X: pass @@ -50,6 +73,8 @@ assert sum(cache_counter) == 20 def test_change_methods(self): + @self.retry + def run(): import __pypy__ class A(object): def f(self): @@ -88,6 +113,8 @@ assert cache_counter == (17, 3) def test_subclasses(self): + @self.retry + def run(): import __pypy__ class A(object): def f(self): @@ -107,6 +134,8 @@ assert sum(cache_counter) == 30 def test_many_names(self): + @self.retry + def run(): import __pypy__ for j in range(20): class A(object): @@ -160,6 +189,8 @@ assert e.foo == 3 def test_custom_metaclass(self): + @self.retry + def run(): import __pypy__ for j in range(20): class MetaA(type): @@ -181,6 +212,8 @@ raise AssertionError("cache_counter = %r" % (cache_counter,)) def test_mutate_class(self): + @self.retry + def run(): import __pypy__ class A(object): x = 1 From noreply at buildbot.pypy.org Thu Jun 6 11:18:58 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 6 Jun 2013 11:18:58 +0200 (CEST) Subject: [pypy-commit] pypy default: Comment Message-ID: <20130606091858.C81D11C06EC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r64812:109953b1b160 Date: 2013-06-06 11:18 +0200 http://bitbucket.org/pypy/pypy/changeset/109953b1b160/ Log: Comment diff --git a/pypy/module/test_lib_pypy/test_os_wait.py b/pypy/module/test_lib_pypy/test_os_wait.py --- a/pypy/module/test_lib_pypy/test_os_wait.py +++ b/pypy/module/test_lib_pypy/test_os_wait.py @@ -1,4 +1,4 @@ -# Generates the resource cache +# Generates the resource cache (it might be there already, but maybe not) from __future__ import absolute_import from lib_pypy.ctypes_config_cache import rebuild rebuild.rebuild_one('resource.ctc.py') From noreply at buildbot.pypy.org Thu Jun 6 13:56:08 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 6 Jun 2013 13:56:08 +0200 (CEST) Subject: [pypy-commit] stmgc default: Move the code to a new directory "c4" as I need it. Message-ID: <20130606115608.2569A1C13A7@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r77:9baaba7aa86d Date: 2013-06-06 13:55 +0200 http://bitbucket.org/pypy/stmgc/changeset/9baaba7aa86d/ Log: Move the code to a new directory "c4" as I need it. diff --git a/c3/atomic_ops.h b/c4/atomic_ops.h rename from c3/atomic_ops.h rename to c4/atomic_ops.h diff --git a/c3/dbgmem.c b/c4/dbgmem.c rename from c3/dbgmem.c rename to c4/dbgmem.c diff --git a/c3/dbgmem.h b/c4/dbgmem.h rename from c3/dbgmem.h rename to c4/dbgmem.h diff --git a/c3/doc-objects.txt b/c4/doc-objects.txt rename from c3/doc-objects.txt rename to c4/doc-objects.txt diff --git a/c3/doc-stmgc.txt b/c4/doc-stmgc.txt rename from c3/doc-stmgc.txt rename to c4/doc-stmgc.txt diff --git a/c3/et.c b/c4/et.c rename from c3/et.c rename to c4/et.c --- a/c3/et.c +++ b/c4/et.c @@ -192,10 +192,13 @@ } else { - abort(); // stealing + /* stealing */ + fprintf(stderr, "read_barrier: %p -> stealing %p...", G, (gcptr)v); + abort(); } } +#if 0 static gcptr _latest_gcptr(gcptr R) { /* don't use, for tests only */ @@ -270,6 +273,7 @@ return obj; } } +#endif #if 0 void *stm_DirectReadBarrierFromR(void *G1, void *R_Container1, size_t offset) @@ -294,6 +298,14 @@ #endif } +gcptr stmgc_duplicate(gcptr P) +{ + size_t size = stmcb_size(P); + gcptr L = stm_malloc(size); + memcpy(L, P, size); + return L; +} + static gcptr LocalizeProtected(struct tx_descriptor *d, gcptr P) { gcptr B; @@ -307,7 +319,7 @@ if (P->h_revision & 1) { /* does not have a backup yet */ - B = stmgc_duplicate(P, 0); + B = stmgc_duplicate(P); B->h_tid |= GCFLAG_BACKUP_COPY; } else @@ -338,7 +350,7 @@ R->h_tid |= GCFLAG_PUBLIC_TO_PRIVATE; not_found:; - gcptr L = stmgc_duplicate(R, 0); + gcptr L = stmgc_duplicate(R); assert(!(L->h_tid & GCFLAG_BACKUP_COPY)); assert(!(L->h_tid & GCFLAG_STOLEN)); assert(!(L->h_tid & GCFLAG_STUB)); @@ -556,7 +568,7 @@ gcptrlist_clear(&d->list_of_read_objects); g2l_clear(&d->private_to_backup); - stmgc_abort_transaction(d); + abort();//stmgc_abort_transaction(d); fprintf(stderr, "\n" @@ -620,7 +632,7 @@ } assert(d->list_of_read_objects.size == 0); assert(!g2l_any_entry(&d->private_to_backup)); - stmgc_start_transaction(d); + assert(!g2l_any_entry(&d->public_to_private)); d->count_reads = 1; fxcache_clear(&d->recent_reads_cache); @@ -750,10 +762,10 @@ gcptr R = item->addr; revision_t v = (revision_t)item->val; + assert(R->h_tid & GCFLAG_PUBLIC); assert(R->h_tid & GCFLAG_PUBLIC_TO_PRIVATE); assert(!(R->h_tid & GCFLAG_NURSERY_MOVED)); assert(!(R->h_tid & GCFLAG_STOLEN)); - assert(!is_young(R)); assert(R->h_revision != localrev); /* XXX compactify and don't leak! */ @@ -773,6 +785,7 @@ #endif ACCESS_ONCE(R->h_revision) = w; +#if 0 if (R->h_tid & GCFLAG_PREBUILT_ORIGINAL) { /* cannot possibly get here more than once for a given value of R */ @@ -781,12 +794,14 @@ pthread_mutex_unlock(&mutex_prebuilt_gcroots); /*mark*/ } +#endif } G2L_LOOP_END; g2l_clear(&d->public_to_private); } +#if 0 void UpdateProtectedChainHeads(struct tx_descriptor *d, revision_t cur_time, revision_t localrev) { @@ -805,6 +820,7 @@ L->h_revision = new_revision; } } +#endif void TurnPrivateWithBackupToProtected(struct tx_descriptor *d, revision_t cur_time) @@ -829,7 +845,7 @@ struct tx_descriptor *d = thread_descriptor; assert(d->active >= 1); - stmgc_stop_transaction(d); + spinlock_acquire(d->collection_lock, 'C'); /* committing */ AcquireLocks(d); if (is_inevitable(d)) @@ -851,10 +867,10 @@ if (cur_time & 1) { // there is another inevitable transaction CancelLocks(d); - stmgc_suspend_commit_transaction(d); + spinlock_release(d->collection_lock); inev_mutex_acquire(); // wait until released inev_mutex_release(); - stmgc_stop_transaction(d); + spinlock_acquire(d->collection_lock, 'C'); AcquireLocks(d); continue; } @@ -879,8 +895,8 @@ TurnPrivateWithBackupToProtected(d, cur_time); revision_t localrev = stm_private_rev_num; - UpdateProtectedChainHeads(d, cur_time, localrev); - smp_wmb(); + //UpdateProtectedChainHeads(d, cur_time, localrev); + //smp_wmb(); revision_t newrev = -(cur_time + 1); assert(newrev & 1); @@ -890,7 +906,7 @@ UpdateChainHeads(d, cur_time, localrev); - stmgc_committed_transaction(d); + spinlock_release(d->collection_lock); d->num_commits++; d->active = 0; stm_stop_sharedlock(); @@ -1107,6 +1123,7 @@ thread_descriptor = NULL; + g2l_delete(&d->public_to_private); g2l_delete(&d->private_to_backup); gcptrlist_delete(&d->list_of_read_objects); gcptrlist_delete(&d->abortinfo); diff --git a/c3/et.h b/c4/et.h rename from c3/et.h rename to c4/et.h --- a/c3/et.h +++ b/c4/et.h @@ -13,6 +13,7 @@ #define LOCKED ((INTPTR_MAX - 0xffff) | 1) +#define WORD sizeof(gcptr) #define HANDLE_BLOCK_SIZE (2 * WORD) /* Description of the flags @@ -100,11 +101,13 @@ #define SPINLOOP_REASONS 4 struct tx_descriptor { - NURSERY_FIELDS_DECL - local_gcpages_t *local_gcpages; jmp_buf *setjmp_buf; revision_t start_time; revision_t my_lock; + revision_t collection_lock; + gcptr *shadowstack; + gcptr **shadowstack_end_ref; + long atomic; /* 0 = not atomic, > 0 atomic */ unsigned long count_reads; unsigned long reads_size_limit; /* see should_break_tr. */ @@ -119,6 +122,7 @@ struct GcPtrList list_of_read_objects; struct GcPtrList abortinfo; struct G2L private_to_backup; + struct G2L public_to_private; char *longest_abort_info; long long longest_abort_info_time; struct FXCache recent_reads_cache; diff --git a/c3/fprintcolor.c b/c4/fprintcolor.c rename from c3/fprintcolor.c rename to c4/fprintcolor.c diff --git a/c3/fprintcolor.h b/c4/fprintcolor.h rename from c3/fprintcolor.h rename to c4/fprintcolor.h diff --git a/c3/lists.c b/c4/lists.c rename from c3/lists.c rename to c4/lists.c diff --git a/c3/lists.h b/c4/lists.h rename from c3/lists.h rename to c4/lists.h diff --git a/c3/stmgc.h b/c4/stmgc.h rename from c3/stmgc.h rename to c4/stmgc.h diff --git a/c3/stmimpl.h b/c4/stmimpl.h rename from c3/stmimpl.h rename to c4/stmimpl.h --- a/c3/stmimpl.h +++ b/c4/stmimpl.h @@ -30,8 +30,6 @@ #include "fprintcolor.h" #include "lists.h" #include "dbgmem.h" -#include "nursery.h" -#include "gcpage.h" #include "et.h" #include "stmsync.h" diff --git a/c3/stmsync.c b/c4/stmsync.c rename from c3/stmsync.c rename to c4/stmsync.c --- a/c3/stmsync.c +++ b/c4/stmsync.c @@ -59,19 +59,19 @@ { int r = DescriptorInit(); assert(r == 1); - stmgc_init_tls(); + //stmgc_init_tls(); init_shadowstack(); - stmgcpage_init_tls(); + //stmgcpage_init_tls(); BeginInevitableTransaction(); } void stm_finalize(void) { - stmgc_minor_collect(); /* force everything out of the nursery */ + //stmgc_minor_collect(); /* force everything out of the nursery */ CommitTransaction(); - stmgcpage_done_tls(); + //stmgcpage_done_tls(); done_shadowstack(); - stmgc_done_tls(); + //stmgc_done_tls(); DescriptorDone(); } @@ -95,18 +95,16 @@ gcptr stm_allocate(size_t size, unsigned long tid) { - gcptr result = stm_allocate_object_of_size(size); + gcptr result = stm_malloc(size); assert(tid == (tid & STM_USER_TID_MASK)); result->h_tid = tid; + result->h_revision = stm_private_rev_num; return result; } gcptr _stm_allocate_old(size_t size, unsigned long tid) { - gcptr result = _stm_allocate_object_of_size_old(size); - assert(tid == (tid & STM_USER_TID_MASK)); - result->h_tid = tid | GCFLAG_OLD; - return result; + abort(); } /************************************************************/ @@ -237,12 +235,12 @@ { int err = pthread_rwlock_rdlock(&rwlock_shared); assert(err == 0); - assert(stmgc_nursery_hiding(thread_descriptor, 0)); + //assert(stmgc_nursery_hiding(thread_descriptor, 0)); } void stm_stop_sharedlock(void) { - assert(stmgc_nursery_hiding(thread_descriptor, 1)); + //assert(stmgc_nursery_hiding(thread_descriptor, 1)); int err = pthread_rwlock_unlock(&rwlock_shared); assert(err == 0); } @@ -303,3 +301,15 @@ which takes priority here */ stm_start_sharedlock(); } + +/************************************************************/ + +void stm_clear_between_tests(void) +{ + fprintf(stderr, "\n" + "===============================================================\n" + "========================[ START ]============================\n" + "===============================================================\n" + "\n"); + //gcptrlist_clear(&stm_prebuilt_gcroots); +} diff --git a/c3/stmsync.h b/c4/stmsync.h rename from c3/stmsync.h rename to c4/stmsync.h --- a/c3/stmsync.h +++ b/c4/stmsync.h @@ -11,4 +11,6 @@ void stm_possible_safe_point(void); +void stm_clear_between_tests(void); + #endif diff --git a/c3/test/support.py b/c4/test/support.py rename from c3/test/support.py rename to c4/test/support.py --- a/c3/test/support.py +++ b/c4/test/support.py @@ -5,11 +5,11 @@ parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) header_files = [os.path.join(parent_dir, _n) for _n in - "et.h lists.h nursery.h gcpage.h " + "et.h lists.h " "stmsync.h dbgmem.h fprintcolor.h " "stmgc.h stmimpl.h atomic_ops.h".split()] source_files = [os.path.join(parent_dir, _n) for _n in - "et.c lists.c nursery.c gcpage.c " + "et.c lists.c " "stmsync.c dbgmem.c fprintcolor.c".split()] _pycache_ = os.path.join(parent_dir, 'test', '__pycache__') @@ -40,7 +40,7 @@ #define PREBUILT_FLAGS ... #define PREBUILT_REVISION ... - gcptr stm_allocate_object_of_size(size_t size); + //gcptr stm_allocate_object_of_size(size_t size); gcptr stm_allocate(size_t size, unsigned long tid); void stm_push_root(gcptr); gcptr stm_pop_root(void); @@ -55,15 +55,15 @@ void stm_set_transaction_length(long length_max); /* extra non-public code */ - gcptr stmgcpage_malloc(size_t size); - void stmgcpage_free(gcptr obj); - long stmgcpage_count(int quantity); - void stmgcpage_possibly_major_collect(int); + //gcptr stmgcpage_malloc(size_t size); + //void stmgcpage_free(gcptr obj); + //long stmgcpage_count(int quantity); + //void stmgcpage_possibly_major_collect(int); revision_t stm_global_cur_time(void); - void stmgcpage_add_prebuilt_root(gcptr); + //void stmgcpage_add_prebuilt_root(gcptr); void stm_clear_between_tests(void); - void stmgc_minor_collect(void); - gcptr _stm_nonrecord_barrier(gcptr, int *); + //void stmgc_minor_collect(void); + //gcptr _stm_nonrecord_barrier(gcptr, int *); int stm_dbgmem_is_active(void *p, int allow_outside); void stm_start_sharedlock(void); void stm_stop_sharedlock(void); @@ -86,8 +86,8 @@ revision_t get_start_time(void); revision_t get_my_lock(void); - gcptr *addr_of_thread_local(void); - int in_nursery(gcptr); + //gcptr *addr_of_thread_local(void); + //int in_nursery(gcptr); void stm_initialize_tests(int max_aborts); /* some constants normally private that are useful in the tests */ @@ -106,22 +106,20 @@ #define GCFLAG_STOLEN ... #define GCFLAG_STUB ... #define ABRT_MANUAL ... - typedef struct { ...; } page_header_t; + //typedef struct { ...; } page_header_t; ''') lib = ffi.verify(r''' #include "stmgc.h" #include "stmimpl.h" - extern gcptr stmgcpage_malloc(size_t size); - extern void stmgcpage_free(gcptr obj); - extern long stmgcpage_count(int quantity); - extern void stmgcpage_possibly_major_collect(int); + //extern gcptr stmgcpage_malloc(size_t size); + //extern void stmgcpage_free(gcptr obj); + //extern long stmgcpage_count(int quantity); + //extern void stmgcpage_possibly_major_collect(int); extern revision_t stm_global_cur_time(void); - extern void stmgcpage_add_prebuilt_root(gcptr); - extern void stm_clear_between_tests(void); + //extern void stmgcpage_add_prebuilt_root(gcptr); extern revision_t get_private_rev_num(void); - extern local_gcpages_t *stm_local_gcpages(void); int gettid(gcptr obj) { @@ -214,17 +212,17 @@ return thread_descriptor->my_lock; } - gcptr *addr_of_thread_local(void) + /*gcptr *addr_of_thread_local(void) { return &stm_thread_local_obj; - } + }*/ - int in_nursery(gcptr obj) + /*int in_nursery(gcptr obj) { assert(stm_dbgmem_is_active(obj, 1)); struct tx_descriptor *d = thread_descriptor; return (d->nursery <= (char*)obj && ((char*)obj) < d->nursery_end); - } + }*/ void stm_initialize_tests(int max_aborts) { @@ -275,7 +273,7 @@ HDR = ffi.sizeof("struct stm_object_s") WORD = lib.WORD -PAGE_ROOM = lib.GC_PAGE_SIZE - ffi.sizeof("page_header_t") +#PAGE_ROOM = lib.GC_PAGE_SIZE - ffi.sizeof("page_header_t") for name in lib.__dict__: if name.startswith('GCFLAG_') or name.startswith('PREBUILT_'): globals()[name] = getattr(lib, name) @@ -416,7 +414,7 @@ lib.settid(p, 42 + size) return p -ofree = lib.stmgcpage_free +#ofree = lib.stmgcpage_free def oalloc_refs(nrefs): "Allocate an 'old' object, i.e. outside any nursery, with nrefs pointers" diff --git a/c3/test/test_et.py b/c4/test/test_et.py rename from c3/test/test_et.py rename to c4/test/test_et.py --- a/c3/test/test_et.py +++ b/c4/test/test_et.py @@ -223,3 +223,15 @@ p4 = lib.stm_read_barrier(p) assert p4 == p2 assert list_of_read_objects() == [p2] + +def test_stealing_protected_without_backup(): + p = palloc(HDR + WORD) + def f1(r): + lib.setlong(p, 0, 2782172) + lib.stm_commit_transaction() + lib.stm_begin_inevitable_transaction() + r.set(2) + def f2(r): + r.wait(2) + assert lib.getlong(p, 0) == 2782172 + run_parallel(f1, f2) From noreply at buildbot.pypy.org Thu Jun 6 17:22:33 2013 From: noreply at buildbot.pypy.org (rguillebert) Date: Thu, 6 Jun 2013 17:22:33 +0200 (CEST) Subject: [pypy-commit] pypy numpypy-nditer: Refactor the way nditer iterates Message-ID: <20130606152233.E4E2D1C13A7@cobra.cs.uni-duesseldorf.de> Author: Romain Guillebert Branch: numpypy-nditer Changeset: r64813:8c3a4fc396d3 Date: 2013-06-06 17:21 +0200 http://bitbucket.org/pypy/pypy/changeset/8c3a4fc396d3/ Log: Refactor the way nditer iterates diff --git a/pypy/module/micronumpy/interp_nditer.py b/pypy/module/micronumpy/interp_nditer.py --- a/pypy/module/micronumpy/interp_nditer.py +++ b/pypy/module/micronumpy/interp_nditer.py @@ -5,10 +5,41 @@ from pypy.module.micronumpy.base import W_NDimArray, convert_to_array from pypy.module.micronumpy.strides import (calculate_broadcast_strides, shape_agreement_multiple) -from pypy.module.micronumpy.iter import MultiDimViewIterator +from pypy.module.micronumpy.iter import MultiDimViewIterator, SliceIterator from pypy.module.micronumpy import support from pypy.module.micronumpy.arrayimpl.concrete import SliceArray +class AbstractIterator(object): + def done(self): + raise NotImplementedError("Abstract Class") + + def next(self): + raise NotImplementedError("Abstract Class") + + def getitem(self, array): + raise NotImplementedError("Abstract Class") + +class IteratorMixin(object): + _mixin_ = True + def __init__(self, it, op_flags): + self.it = it + self.op_flags = op_flags + + def done(self): + return self.it.done() + + def next(self): + self.it.next() + + def getitem(self, space, array): + return self.op_flags.get_it_item(space, array, self.it) + +class BoxIterator(IteratorMixin): + pass + +class SliceIterator(IteratorMixin): + pass + def parse_op_arg(space, name, w_op_flags, n, parse_one_arg): ret = [] if space.is_w(w_op_flags, space.w_None): @@ -53,6 +84,13 @@ #it.dtype.setitem(res, 0, it.getitem()) return W_NDimArray(res) +def get_readonly_slice(space, array, it): + #XXX Not readonly + return W_NDimArray(it.getslice()) + +def get_readwrite_slice(space, array, it): + return W_NDimArray(it.getslice()) + def parse_op_flag(space, lst): op_flag = OpFlag() for w_item in lst: @@ -191,11 +229,11 @@ self.iters=[] self.shape = iter_shape = shape_agreement_multiple(space, self.seq) if self.external_loop: - xxx find longest contiguous shape + #XXX find longest contiguous shape iter_shape = iter_shape[1:] for i in range(len(self.seq)): - self.iters.append(get_iter(space, self.order, - self.seq[i].implementation, iter_shape)) + self.iters.append(BoxIterator(get_iter(space, self.order, + self.seq[i].implementation, iter_shape), self.op_flags[i])) def descr_iter(self, space): return space.wrap(self) @@ -220,8 +258,7 @@ raise OperationError(space.w_StopIteration, space.w_None) res = [] for i in range(len(self.iters)): - res.append(self.op_flags[i].get_it_item(space, self.seq[i], - self.iters[i])) + res.append(self.iters[i].getitem(space, self.seq[i])) self.iters[i].next() if len(res) <2: return res[0] diff --git a/pypy/module/micronumpy/iter.py b/pypy/module/micronumpy/iter.py --- a/pypy/module/micronumpy/iter.py +++ b/pypy/module/micronumpy/iter.py @@ -32,13 +32,13 @@ shape dimension which is back 25 and forward 1, which is x.strides[1] * (x.shape[1] - 1) + x.strides[0] -so if we precalculate the overflow backstride as +so if we precalculate the overflow backstride as [x.strides[i] * (x.shape[i] - 1) for i in range(len(x.shape))] we can go faster. All the calculations happen in next() next_skip_x() tries to do the iteration for a number of steps at once, -but then we cannot gaurentee that we only overflow one single shape +but then we cannot gaurentee that we only overflow one single shape dimension, perhaps we could overflow times in one big step. """ @@ -266,6 +266,30 @@ def reset(self): self.offset %= self.size +class SliceIterator(object): + def __init__(self, arr, stride, backstride, shape, dtype=None): + self.step = 0 + self.arr = arr + self.stride = stride + self.backstride = backstride + self.shape = shape + if dtype is None: + dtype = arr.implementation.dtype + self.dtype = dtype + self._done = False + + def done(): + return self._done + + def next(): + self.step += self.arr.implementation.dtype.get_size() + if self.step == self.backstride - self.implementation.dtype.get_size(): + self._done = True + + def getslice(self): + from pypy.module.micronumpy.arrayimpl.concrete import SliceArray + return SliceArray(self.step, [self.stride], [self.backstride], self.shape, self.arr.implementation, self.arr, self.dtype) + class AxisIterator(base.BaseArrayIterator): def __init__(self, array, shape, dim, cumultative): self.shape = shape @@ -288,7 +312,7 @@ self.dim = dim self.array = array self.dtype = array.dtype - + def setitem(self, elem): self.dtype.setitem(self.array, self.offset, elem) From noreply at buildbot.pypy.org Thu Jun 6 20:26:15 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 6 Jun 2013 20:26:15 +0200 (CEST) Subject: [pypy-commit] pypy default: issue1501: minor English fixes Message-ID: <20130606182615.2BB351C3386@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r64814:da8f1e729f4d Date: 2013-06-06 20:25 +0200 http://bitbucket.org/pypy/pypy/changeset/da8f1e729f4d/ Log: issue1501: minor English fixes diff --git a/pypy/doc/getting-started-dev.rst b/pypy/doc/getting-started-dev.rst --- a/pypy/doc/getting-started-dev.rst +++ b/pypy/doc/getting-started-dev.rst @@ -8,7 +8,8 @@ interpreter is written mostly in RPython (with pieces in Python), while the RPython compiler is written in Python. The hard to understand part is that Python is a meta-programming language for RPython, that is, -RPython is considered from live objects **after** the imports are done. +"being valid RPython" is a question that only makes sense on the +live objects **after** the imports are done. This might require more explanation. You start writing RPython from ``entry_point``, a good starting point is ``rpython/translator/goal/targetnopstandalone.py``. This does not do all that @@ -37,7 +38,7 @@ In this example ``entry_point`` is RPython, ``add`` and ``sub`` are RPython, however, ``generator`` is not. -A good introductory level articles are available: +The following introductory level articles are available: * Laurence Tratt -- `Fast Enough VMs in Fast Enough Time`_. From noreply at buildbot.pypy.org Fri Jun 7 12:14:55 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Fri, 7 Jun 2013 12:14:55 +0200 (CEST) Subject: [pypy-commit] pypy py3k: hg merge default Message-ID: <20130607101455.832931C0135@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: py3k Changeset: r64815:eb7f8348ff30 Date: 2013-06-07 11:43 +0200 http://bitbucket.org/pypy/pypy/changeset/eb7f8348ff30/ Log: hg merge default diff --git a/pypy/doc/getting-started-dev.rst b/pypy/doc/getting-started-dev.rst --- a/pypy/doc/getting-started-dev.rst +++ b/pypy/doc/getting-started-dev.rst @@ -8,7 +8,8 @@ interpreter is written mostly in RPython (with pieces in Python), while the RPython compiler is written in Python. The hard to understand part is that Python is a meta-programming language for RPython, that is, -RPython is considered from live objects **after** the imports are done. +"being valid RPython" is a question that only makes sense on the +live objects **after** the imports are done. This might require more explanation. You start writing RPython from ``entry_point``, a good starting point is ``rpython/translator/goal/targetnopstandalone.py``. This does not do all that @@ -37,7 +38,7 @@ In this example ``entry_point`` is RPython, ``add`` and ``sub`` are RPython, however, ``generator`` is not. -A good introductory level articles are available: +The following introductory level articles are available: * Laurence Tratt -- `Fast Enough VMs in Fast Enough Time`_. diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -994,7 +994,8 @@ try: optimize = space.sys.get_flag('optimize') - except Exception: + except RuntimeError: + # during bootstrapping optimize = 0 if optimize >= 2: code_w.remove_docstrings(space) @@ -1095,7 +1096,8 @@ code_w = read_compiled_module(space, cpathname, source) try: optimize = space.sys.get_flag('optimize') - except Exception: + except RuntimeError: + # during bootstrapping optimize = 0 if optimize >= 2: code_w.remove_docstrings(space) diff --git a/pypy/module/test_lib_pypy/test_os_wait.py b/pypy/module/test_lib_pypy/test_os_wait.py --- a/pypy/module/test_lib_pypy/test_os_wait.py +++ b/pypy/module/test_lib_pypy/test_os_wait.py @@ -1,6 +1,7 @@ -# Generates the resource cache -#from lib_pypy.ctypes_config_cache import rebuild -#rebuild.rebuild_one('resource.ctc.py') +# Generates the resource cache (it might be there already, but maybe not) +from __future__ import absolute_import +from lib_pypy.ctypes_config_cache import rebuild +rebuild.rebuild_one('resource.ctc.py') import os diff --git a/pypy/objspace/std/test/test_methodcache.py b/pypy/objspace/std/test/test_methodcache.py --- a/pypy/objspace/std/test/test_methodcache.py +++ b/pypy/objspace/std/test/test_methodcache.py @@ -4,7 +4,28 @@ class AppTestMethodCaching(test_typeobject.AppTestTypeObject): spaceconfig = {"objspace.std.withmethodcachecounter": True} + def setup_class(cls): + # This is for the following tests, which are a bit fragile and + # historically have been failing once in a while. With this hack, + # they are run up to 5 times in a row, saving the frame of the + # failed attempt. This means occasional collisions should work + # differently during the retry. + cls.w_retry = cls.space.appexec([], """(): + def retry(run): + keepalive = [] + for i in range(4): + try: + return run() + except AssertionError: + import sys + keepalive.append(sys.exc_info()) + return run() + return retry + """) + def test_mix_classes(self): + @self.retry + def run(): import __pypy__ class A(object): def f(self): @@ -32,6 +53,8 @@ # calling space.str_w, which .encode('ascii') the string, thus # creating new strings all the time. The problem should be solved when # we implement proper unicode identifiers in py3k + @self.retry + def run(): import __pypy__ class A(object): def f(self): @@ -70,6 +93,8 @@ assert cache_counter == (17, 3) def test_subclasses(self): + @self.retry + def run(): import __pypy__ class A(object): def f(self): @@ -89,6 +114,8 @@ assert sum(cache_counter) == 30 def test_many_names(self): + @self.retry + def run(): import __pypy__ laste = None for j in range(20): @@ -143,6 +170,8 @@ assert e.foo == 3 def test_custom_metaclass(self): + @self.retry + def run(): import __pypy__ for j in range(20): class MetaA(type): @@ -164,6 +193,8 @@ raise AssertionError("cache_counter = %r" % (cache_counter,)) def test_mutate_class(self): + @self.retry + def run(): import __pypy__ class A(object): x = 1 diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -1918,9 +1918,10 @@ res = self.execute_operation(rop.CAST_INT_TO_FLOAT, [BoxInt(x)], 'float').value assert longlong.getrealfloat(res) == float(x) - res = self.execute_operation(rop.CAST_INT_TO_FLOAT, - [ConstInt(x)], 'float').value - assert longlong.getrealfloat(res) == float(x) + # --- the front-end never generates CAST_INT_TO_FLOAT(Const) + #res = self.execute_operation(rop.CAST_INT_TO_FLOAT, + # [ConstInt(x)], 'float').value + #assert longlong.getrealfloat(res) == float(x) def test_cast_float_to_int(self): if not self.cpu.supports_floats: @@ -1930,9 +1931,10 @@ res = self.execute_operation(rop.CAST_FLOAT_TO_INT, [BoxFloat(v)], 'int').value assert res == int(x) - res = self.execute_operation(rop.CAST_FLOAT_TO_INT, - [ConstFloat(v)], 'int').value - assert res == int(x) + # --- the front-end never generates CAST_FLOAT_TO_INT(Const) + #res = self.execute_operation(rop.CAST_FLOAT_TO_INT, + # [ConstFloat(v)], 'int').value + #assert res == int(x) def test_convert_float_bytes(self): if not self.cpu.supports_floats: diff --git a/rpython/rlib/cache.py b/rpython/rlib/cache.py --- a/rpython/rlib/cache.py +++ b/rpython/rlib/cache.py @@ -44,8 +44,8 @@ return self.content[key] except KeyError: if key in self._building: - raise Exception, "%s recursive building of %r" % ( - self, key) + raise RuntimeError("%s recursive building of %r" % + (self, key)) self._building[key] = True try: result = self._build(key) From noreply at buildbot.pypy.org Fri Jun 7 12:15:04 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Fri, 7 Jun 2013 12:15:04 +0200 (CEST) Subject: [pypy-commit] pypy py3k-memoryview: Fix test_array.py. Message-ID: <20130607101504.E9A731C1015@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: py3k-memoryview Changeset: r64817:da366d307364 Date: 2013-06-07 12:12 +0200 http://bitbucket.org/pypy/pypy/changeset/da366d307364/ Log: Fix test_array.py. diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -231,7 +231,7 @@ bytes representation. """ cbuf = self._charbuf_start() - s = rffi.charpsize2str(cbuf, self.len * self.itemsize) + s = rffi.charpsize2str(cbuf, self.len * self.itemsize_) self._charbuf_stop() return self.space.wrapbytes(s) @@ -257,15 +257,15 @@ machine values, as if it had been read from a file using the fromfile() method). """ - if len(s) % self.itemsize != 0: + if len(s) % self.itemsize_ != 0: msg = 'string length not a multiple of item size' raise OperationError(self.space.w_ValueError, self.space.wrap(msg)) oldlen = self.len - new = len(s) / self.itemsize + new = len(s) / self.itemsize_ self.setlen(oldlen + new) cbuf = self._charbuf_start() for i in range(len(s)): - cbuf[oldlen * self.itemsize + i] = s[i] + cbuf[oldlen * self.itemsize_ + i] = s[i] self._charbuf_stop() @unwrap_spec(n=int) @@ -276,14 +276,14 @@ array. Also called as read. """ try: - size = ovfcheck(self.itemsize * n) + size = ovfcheck(self.itemsize_ * n) except OverflowError: raise MemoryError w_item = space.call_method(w_f, 'read', space.wrap(size)) item = space.bytes_w(w_item) if len(item) < size: - n = len(item) % self.itemsize - elems = max(0, len(item) - (len(item) % self.itemsize)) + n = len(item) % self.itemsize_ + elems = max(0, len(item) - (len(item) % self.itemsize_)) if n != 0: item = item[0:elems] self.descr_frombytes(space, item) @@ -393,7 +393,7 @@ rffi.c_memcpy( rffi.cast(rffi.VOIDP, w_a._buffer_as_unsigned()), rffi.cast(rffi.VOIDP, self._buffer_as_unsigned()), - self.len * self.itemsize + self.len * self.itemsize_ ) return w_a @@ -403,18 +403,18 @@ Byteswap all items of the array. If the items in the array are not 1, 2, 4, or 8 bytes in size, RuntimeError is raised. """ - if self.itemsize not in [1, 2, 4, 8]: + if self.itemsize_ not in [1, 2, 4, 8]: msg = "byteswap not supported for this array" raise OperationError(space.w_RuntimeError, space.wrap(msg)) if self.len == 0: return bytes = self._charbuf_start() - tmp = [bytes[0]] * self.itemsize - for start in range(0, self.len * self.itemsize, self.itemsize): - stop = start + self.itemsize - 1 - for i in range(self.itemsize): + tmp = [bytes[0]] * self.itemsize_ + for start in range(0, self.len * self.itemsize_, self.itemsize_): + stop = start + self.itemsize_ - 1 + for i in range(self.itemsize_): tmp[i] = bytes[start + i] - for i in range(self.itemsize): + for i in range(self.itemsize_): bytes[stop - i] = tmp[i] self._charbuf_stop() From noreply at buildbot.pypy.org Fri Jun 7 12:15:03 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Fri, 7 Jun 2013 12:15:03 +0200 (CEST) Subject: [pypy-commit] pypy py3k-memoryview: hg merge py3k Message-ID: <20130607101503.93DA31C055C@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: py3k-memoryview Changeset: r64816:5fe7421e014c Date: 2013-06-07 12:04 +0200 http://bitbucket.org/pypy/pypy/changeset/5fe7421e014c/ Log: hg merge py3k diff too long, truncating to 2000 out of 26889 lines diff --git a/lib-python/2.7/distutils/command/build_ext.py b/lib-python/2.7/distutils/command/build_ext.py --- a/lib-python/2.7/distutils/command/build_ext.py +++ b/lib-python/2.7/distutils/command/build_ext.py @@ -8,7 +8,7 @@ __revision__ = "$Id$" -import sys, os, string, re +import sys, os, string, re, imp from types import * from site import USER_BASE, USER_SITE from distutils.core import Command @@ -33,6 +33,11 @@ from distutils.ccompiler import show_compilers show_compilers() +def _get_c_extension_suffix(): + for ext, mod, typ in imp.get_suffixes(): + if typ == imp.C_EXTENSION: + return ext + class build_ext (Command): @@ -677,10 +682,18 @@ # OS/2 has an 8 character module (extension) limit :-( if os.name == "os2": ext_path[len(ext_path) - 1] = ext_path[len(ext_path) - 1][:8] + # PyPy tweak: first try to get the C extension suffix from + # 'imp'. If it fails we fall back to the 'SO' config var, like + # the previous version of this code did. This should work for + # CPython too. The point is that on PyPy with cpyext, the + # config var 'SO' is just ".so" but we want to return + # ".pypy-VERSION.so" instead. + so_ext = _get_c_extension_suffix() + if so_ext is None: + so_ext = get_config_var('SO') # fall-back # extensions in debug_mode are named 'module_d.pyd' under windows - so_ext = get_config_var('SO') if os.name == 'nt' and self.debug: - return os.path.join(*ext_path) + '_d' + so_ext + so_ext = '_d.pyd' return os.path.join(*ext_path) + so_ext def get_export_symbols (self, ext): diff --git a/lib-python/2.7/distutils/sysconfig.py b/lib-python/2.7/distutils/sysconfig.py --- a/lib-python/2.7/distutils/sysconfig.py +++ b/lib-python/2.7/distutils/sysconfig.py @@ -1,30 +1,16 @@ -"""Provide access to Python's configuration information. The specific -configuration variables available depend heavily on the platform and -configuration. The values may be retrieved using -get_config_var(name), and the list of variables is available via -get_config_vars().keys(). Additional convenience functions are also -available. - -Written by: Fred L. Drake, Jr. -Email: -""" - -__revision__ = "$Id: sysconfig.py 85358 2010-10-10 09:54:59Z antoine.pitrou $" - -import sys - # The content of this file is redirected from # sysconfig_cpython or sysconfig_pypy. +# All underscore names are imported too, because +# people like to use undocumented sysconfig._xxx +# directly. +import sys if '__pypy__' in sys.builtin_module_names: - from distutils.sysconfig_pypy import * - from distutils.sysconfig_pypy import _config_vars # needed by setuptools - from distutils.sysconfig_pypy import _variable_rx # read_setup_file() + from distutils import sysconfig_pypy as _sysconfig_module else: - from distutils.sysconfig_cpython import * - from distutils.sysconfig_cpython import _config_vars # needed by setuptools - from distutils.sysconfig_cpython import _variable_rx # read_setup_file() + from distutils import sysconfig_cpython as _sysconfig_module +globals().update(_sysconfig_module.__dict__) _USE_CLANG = None diff --git a/lib-python/2.7/distutils/sysconfig_cpython.py b/lib-python/2.7/distutils/sysconfig_cpython.py --- a/lib-python/2.7/distutils/sysconfig_cpython.py +++ b/lib-python/2.7/distutils/sysconfig_cpython.py @@ -9,7 +9,7 @@ Email: """ -__revision__ = "$Id$" +__revision__ = "$Id: sysconfig.py 85358 2010-10-10 09:54:59Z antoine.pitrou $" import os import re diff --git a/lib-python/2.7/distutils/sysconfig_pypy.py b/lib-python/2.7/distutils/sysconfig_pypy.py --- a/lib-python/2.7/distutils/sysconfig_pypy.py +++ b/lib-python/2.7/distutils/sysconfig_pypy.py @@ -1,9 +1,17 @@ -"""PyPy's minimal configuration information. +"""Provide access to Python's configuration information. +This is actually PyPy's minimal configuration information. + +The specific configuration variables available depend heavily on the +platform and configuration. The values may be retrieved using +get_config_var(name), and the list of variables is available via +get_config_vars().keys(). Additional convenience functions are also +available. """ +__revision__ = "$Id: sysconfig.py 85358 2010-10-10 09:54:59Z antoine.pitrou $" + import sys import os -import imp from distutils.errors import DistutilsPlatformError @@ -49,16 +57,11 @@ _config_vars = None -def _get_so_extension(): - for ext, mod, typ in imp.get_suffixes(): - if typ == imp.C_EXTENSION: - return ext - def _init_posix(): """Initialize the module as appropriate for POSIX systems.""" g = {} g['EXE'] = "" - g['SO'] = _get_so_extension() or ".so" + g['SO'] = ".so" g['SOABI'] = g['SO'].rsplit('.')[0] g['LIBDIR'] = os.path.join(sys.prefix, 'lib') g['CC'] = "gcc -pthread" # -pthread might not be valid on OS/X, check @@ -71,7 +74,7 @@ """Initialize the module as appropriate for NT""" g = {} g['EXE'] = ".exe" - g['SO'] = _get_so_extension() or ".pyd" + g['SO'] = ".pyd" g['SOABI'] = g['SO'].rsplit('.')[0] global _config_vars diff --git a/lib-python/2.7/logging/__init__.py b/lib-python/2.7/logging/__init__.py --- a/lib-python/2.7/logging/__init__.py +++ b/lib-python/2.7/logging/__init__.py @@ -134,21 +134,25 @@ DEBUG = 10 NOTSET = 0 -_levelNames = { - CRITICAL : 'CRITICAL', - ERROR : 'ERROR', - WARNING : 'WARNING', - INFO : 'INFO', - DEBUG : 'DEBUG', - NOTSET : 'NOTSET', - 'CRITICAL' : CRITICAL, - 'ERROR' : ERROR, - 'WARN' : WARNING, - 'WARNING' : WARNING, - 'INFO' : INFO, - 'DEBUG' : DEBUG, - 'NOTSET' : NOTSET, +_levelToName = { + CRITICAL: 'CRITICAL', + ERROR: 'ERROR', + WARNING: 'WARNING', + INFO: 'INFO', + DEBUG: 'DEBUG', + NOTSET: 'NOTSET', } +_nameToLevel = { + 'CRITICAL': CRITICAL, + 'ERROR': ERROR, + 'WARN': WARNING, + 'WARNING': WARNING, + 'INFO': INFO, + 'DEBUG': DEBUG, + 'NOTSET': NOTSET, +} +_levelNames = dict(_levelToName) +_levelNames.update(_nameToLevel) # backward compatibility def getLevelName(level): """ @@ -164,7 +168,7 @@ Otherwise, the string "Level %s" % level is returned. """ - return _levelNames.get(level, ("Level %s" % level)) + return _levelToName.get(level, ("Level %s" % level)) def addLevelName(level, levelName): """ @@ -174,8 +178,8 @@ """ _acquireLock() try: #unlikely to cause an exception, but you never know... - _levelNames[level] = levelName - _levelNames[levelName] = level + _levelToName[level] = levelName + _nameToLevel[levelName] = level finally: _releaseLock() @@ -183,9 +187,9 @@ if isinstance(level, int): rv = level elif str(level) == level: - if level not in _levelNames: + if level not in _nameToLevel: raise ValueError("Unknown level: %r" % level) - rv = _levelNames[level] + rv = _nameToLevel[level] else: raise TypeError("Level not an integer or a valid string: %r" % level) return rv @@ -277,7 +281,7 @@ self.lineno = lineno self.funcName = func self.created = ct - self.msecs = (ct - long(ct)) * 1000 + self.msecs = (ct - int(ct)) * 1000 self.relativeCreated = (self.created - _startTime) * 1000 if logThreads and thread: self.thread = thread.get_ident() diff --git a/lib-python/2.7/logging/config.py b/lib-python/2.7/logging/config.py --- a/lib-python/2.7/logging/config.py +++ b/lib-python/2.7/logging/config.py @@ -156,7 +156,7 @@ h = klass(*args) if "level" in opts: level = cp.get(sectname, "level") - h.setLevel(logging._levelNames[level]) + h.setLevel(level) if len(fmt): h.setFormatter(formatters[fmt]) if issubclass(klass, logging.handlers.MemoryHandler): @@ -187,7 +187,7 @@ opts = cp.options(sectname) if "level" in opts: level = cp.get(sectname, "level") - log.setLevel(logging._levelNames[level]) + log.setLevel(level) for h in root.handlers[:]: root.removeHandler(h) hlist = cp.get(sectname, "handlers") @@ -237,7 +237,7 @@ existing.remove(qn) if "level" in opts: level = cp.get(sectname, "level") - logger.setLevel(logging._levelNames[level]) + logger.setLevel(level) for h in logger.handlers[:]: logger.removeHandler(h) logger.propagate = propagate diff --git a/lib-python/2.7/opcode.py b/lib-python/2.7/opcode.py --- a/lib-python/2.7/opcode.py +++ b/lib-python/2.7/opcode.py @@ -193,5 +193,6 @@ hasname.append(201) def_op('CALL_METHOD', 202) # #args not including 'self' def_op('BUILD_LIST_FROM_ARG', 203) +jrel_op('JUMP_IF_NOT_DEBUG', 204) # jump over assert statements del def_op, name_op, jrel_op, jabs_op diff --git a/lib-python/2.7/socket.py b/lib-python/2.7/socket.py --- a/lib-python/2.7/socket.py +++ b/lib-python/2.7/socket.py @@ -96,6 +96,7 @@ _realsocket = socket +_type = type # WSA error codes if sys.platform.lower().startswith("win"): @@ -173,31 +174,37 @@ __doc__ = _realsocket.__doc__ + __slots__ = ["_sock", "__weakref__"] + def __init__(self, family=AF_INET, type=SOCK_STREAM, proto=0, _sock=None): if _sock is None: _sock = _realsocket(family, type, proto) + elif _type(_sock) is _realsocket: + _sock._reuse() + # PyPy note about refcounting: implemented with _reuse()/_drop() + # on the class '_socket.socket'. Python 3 did it differently + # with a reference counter on this class 'socket._socketobject' + # instead, but it is a less compatible change (breaks eventlet). self._sock = _sock - self._io_refs = 0 - self._closed = False def send(self, data, flags=0): - return self._sock.send(data, flags=flags) + return self._sock.send(data, flags) send.__doc__ = _realsocket.send.__doc__ def recv(self, buffersize, flags=0): - return self._sock.recv(buffersize, flags=flags) + return self._sock.recv(buffersize, flags) recv.__doc__ = _realsocket.recv.__doc__ def recv_into(self, buffer, nbytes=0, flags=0): - return self._sock.recv_into(buffer, nbytes=nbytes, flags=flags) + return self._sock.recv_into(buffer, nbytes, flags) recv_into.__doc__ = _realsocket.recv_into.__doc__ def recvfrom(self, buffersize, flags=0): - return self._sock.recvfrom(buffersize, flags=flags) + return self._sock.recvfrom(buffersize, flags) recvfrom.__doc__ = _realsocket.recvfrom.__doc__ def recvfrom_into(self, buffer, nbytes=0, flags=0): - return self._sock.recvfrom_into(buffer, nbytes=nbytes, flags=flags) + return self._sock.recvfrom_into(buffer, nbytes, flags) recvfrom_into.__doc__ = _realsocket.recvfrom_into.__doc__ def sendto(self, data, param2, param3=None): @@ -208,13 +215,17 @@ sendto.__doc__ = _realsocket.sendto.__doc__ def close(self): - # This function should not reference any globals. See issue #808164. + s = self._sock + if type(s) is _realsocket: + s._drop() self._sock = _closedsocket() close.__doc__ = _realsocket.close.__doc__ def accept(self): sock, addr = self._sock.accept() - return _socketobject(_sock=sock), addr + sockobj = _socketobject(_sock=sock) + sock._drop() # already a copy in the _socketobject() + return sockobj, addr accept.__doc__ = _realsocket.accept.__doc__ def dup(self): @@ -228,24 +239,7 @@ Return a regular file object corresponding to the socket. The mode and bufsize arguments are as for the built-in open() function.""" - self._io_refs += 1 - return _fileobject(self, mode, bufsize) - - def _decref_socketios(self): - if self._io_refs > 0: - self._io_refs -= 1 - if self._closed: - self.close() - - def _real_close(self): - # This function should not reference any globals. See issue #808164. - self._sock.close() - - def close(self): - # This function should not reference any globals. See issue #808164. - self._closed = True - if self._io_refs <= 0: - self._real_close() + return _fileobject(self._sock, mode, bufsize) family = property(lambda self: self._sock.family, doc="the socket family") type = property(lambda self: self._sock.type, doc="the socket type") @@ -286,6 +280,8 @@ "_close"] def __init__(self, sock, mode='rb', bufsize=-1, close=False): + if type(sock) is _realsocket: + sock._reuse() self._sock = sock self.mode = mode # Not actually used in this version if bufsize < 0: @@ -320,11 +316,11 @@ if self._sock: self.flush() finally: - if self._sock: - if self._close: - self._sock.close() - else: - self._sock._decref_socketios() + s = self._sock + if type(s) is _realsocket: + s._drop() + if self._close: + self._sock.close() self._sock = None def __del__(self): diff --git a/lib-python/2.7/test/test_code.py b/lib-python/2.7/test/test_code.py --- a/lib-python/2.7/test/test_code.py +++ b/lib-python/2.7/test/test_code.py @@ -75,7 +75,7 @@ cellvars: () freevars: () nlocals: 0 -flags: 67 +flags: 1048643 consts: ("'doc string'", 'None') """ diff --git a/lib-python/2.7/test/test_codecs.py b/lib-python/2.7/test/test_codecs.py --- a/lib-python/2.7/test/test_codecs.py +++ b/lib-python/2.7/test/test_codecs.py @@ -2,7 +2,11 @@ import unittest import codecs import locale -import sys, StringIO, _testcapi +import sys, StringIO +try: + import _testcapi +except ImportError: + _testcapi = None class Queue(object): """ @@ -1387,7 +1391,7 @@ decodedresult += reader.read() self.assertEqual(decodedresult, s, "%r != %r (encoding=%r)" % (decodedresult, s, encoding)) - if encoding not in broken_incremental_coders: + if encoding not in broken_incremental_coders and _testcapi: # check incremental decoder/encoder (fetched via the Python # and C API) and iterencode()/iterdecode() try: diff --git a/lib-python/2.7/test/test_dis.py b/lib-python/2.7/test/test_dis.py --- a/lib-python/2.7/test/test_dis.py +++ b/lib-python/2.7/test/test_dis.py @@ -53,25 +53,26 @@ pass dis_bug1333982 = """\ - %-4d 0 LOAD_CONST 1 (0) - 3 POP_JUMP_IF_TRUE 41 - 6 LOAD_GLOBAL 0 (AssertionError) - 9 LOAD_FAST 0 (x) - 12 BUILD_LIST_FROM_ARG 0 - 15 GET_ITER - >> 16 FOR_ITER 12 (to 31) - 19 STORE_FAST 1 (s) - 22 LOAD_FAST 1 (s) - 25 LIST_APPEND 2 - 28 JUMP_ABSOLUTE 16 + %-4d 0 JUMP_IF_NOT_DEBUG 41 (to 44) + 3 LOAD_CONST 1 (0) + 6 POP_JUMP_IF_TRUE 44 + 9 LOAD_GLOBAL 0 (AssertionError) + 12 LOAD_FAST 0 (x) + 15 BUILD_LIST_FROM_ARG 0 + 18 GET_ITER + >> 19 FOR_ITER 12 (to 34) + 22 STORE_FAST 1 (s) + 25 LOAD_FAST 1 (s) + 28 LIST_APPEND 2 + 31 JUMP_ABSOLUTE 19 - %-4d >> 31 LOAD_CONST 2 (1) - 34 BINARY_ADD - 35 CALL_FUNCTION 1 - 38 RAISE_VARARGS 1 + %-4d >> 34 LOAD_CONST 2 (1) + 37 BINARY_ADD + 38 CALL_FUNCTION 1 + 41 RAISE_VARARGS 1 - %-4d >> 41 LOAD_CONST 0 (None) - 44 RETURN_VALUE + %-4d >> 44 LOAD_CONST 0 (None) + 47 RETURN_VALUE """%(bug1333982.func_code.co_firstlineno + 1, bug1333982.func_code.co_firstlineno + 2, bug1333982.func_code.co_firstlineno + 3) diff --git a/lib-python/2.7/test/test_logging.py b/lib-python/2.7/test/test_logging.py --- a/lib-python/2.7/test/test_logging.py +++ b/lib-python/2.7/test/test_logging.py @@ -65,7 +65,8 @@ self.saved_handlers = logging._handlers.copy() self.saved_handler_list = logging._handlerList[:] self.saved_loggers = logger_dict.copy() - self.saved_level_names = logging._levelNames.copy() + self.saved_name_to_level = logging._nameToLevel.copy() + self.saved_level_to_name = logging._levelToName.copy() finally: logging._releaseLock() @@ -97,8 +98,10 @@ self.root_logger.setLevel(self.original_logging_level) logging._acquireLock() try: - logging._levelNames.clear() - logging._levelNames.update(self.saved_level_names) + logging._levelToName.clear() + logging._levelToName.update(self.saved_level_to_name) + logging._nameToLevel.clear() + logging._nameToLevel.update(self.saved_name_to_level) logging._handlers.clear() logging._handlers.update(self.saved_handlers) logging._handlerList[:] = self.saved_handler_list diff --git a/lib-python/2.7/test/test_sysconfig.py b/lib-python/2.7/test/test_sysconfig.py --- a/lib-python/2.7/test/test_sysconfig.py +++ b/lib-python/2.7/test/test_sysconfig.py @@ -7,7 +7,8 @@ import subprocess from copy import copy, deepcopy -from test.test_support import run_unittest, TESTFN, unlink, get_attribute +from test.test_support import (run_unittest, TESTFN, unlink, get_attribute, + import_module) import sysconfig from sysconfig import (get_paths, get_platform, get_config_vars, @@ -236,7 +237,10 @@ def test_get_config_h_filename(self): config_h = sysconfig.get_config_h_filename() - self.assertTrue(os.path.isfile(config_h), config_h) + # import_module skips the test when the CPython C Extension API + # appears to not be supported + self.assertTrue(os.path.isfile(config_h) or + not import_module('_testcapi'), config_h) def test_get_scheme_names(self): wanted = ('nt', 'nt_user', 'os2', 'os2_home', 'osx_framework_user', diff --git a/lib-python/2.7/test/test_traceback.py b/lib-python/2.7/test/test_traceback.py --- a/lib-python/2.7/test/test_traceback.py +++ b/lib-python/2.7/test/test_traceback.py @@ -1,6 +1,9 @@ """Test cases for traceback module""" -from _testcapi import traceback_print +try: + from _testcapi import traceback_print +except ImportError: + traceback_print = None from StringIO import StringIO import sys import unittest @@ -176,6 +179,8 @@ class TracebackFormatTests(unittest.TestCase): def test_traceback_format(self): + if traceback_print is None: + raise unittest.SkipTest('Requires _testcapi') try: raise KeyError('blah') except KeyError: diff --git a/lib-python/2.7/test/test_unicode.py b/lib-python/2.7/test/test_unicode.py --- a/lib-python/2.7/test/test_unicode.py +++ b/lib-python/2.7/test/test_unicode.py @@ -1609,7 +1609,10 @@ self.assertEqual("{}".format(u), '__unicode__ overridden') def test_encode_decimal(self): - from _testcapi import unicode_encodedecimal + try: + from _testcapi import unicode_encodedecimal + except ImportError: + raise unittest.SkipTest('Requires _testcapi') self.assertEqual(unicode_encodedecimal(u'123'), b'123') self.assertEqual(unicode_encodedecimal(u'\u0663.\u0661\u0664'), diff --git a/lib-python/3/distutils/command/build_ext.py b/lib-python/3/distutils/command/build_ext.py --- a/lib-python/3/distutils/command/build_ext.py +++ b/lib-python/3/distutils/command/build_ext.py @@ -4,7 +4,7 @@ modules (currently limited to C extensions, should accommodate C++ extensions ASAP).""" -import sys, os, re +import sys, os, re, imp from distutils.core import Command from distutils.errors import * from distutils.sysconfig import customize_compiler, get_python_version @@ -35,6 +35,11 @@ from distutils.ccompiler import show_compilers show_compilers() +def _get_c_extension_suffix(): + for ext, mod, typ in imp.get_suffixes(): + if typ == imp.C_EXTENSION: + return ext + class build_ext(Command): @@ -671,10 +676,18 @@ # OS/2 has an 8 character module (extension) limit :-( if os.name == "os2": ext_path[len(ext_path) - 1] = ext_path[len(ext_path) - 1][:8] + # PyPy tweak: first try to get the C extension suffix from + # 'imp'. If it fails we fall back to the 'SO' config var, like + # the previous version of this code did. This should work for + # CPython too. The point is that on PyPy with cpyext, the + # config var 'SO' is just ".so" but we want to return + # ".pypy-VERSION.so" instead. + so_ext = _get_c_extension_suffix() + if so_ext is None: + so_ext = get_config_var('SO') # fall-back # extensions in debug_mode are named 'module_d.pyd' under windows - so_ext = get_config_var('SO') if os.name == 'nt' and self.debug: - return os.path.join(*ext_path) + '_d' + so_ext + so_ext = '_d.pyd' return os.path.join(*ext_path) + so_ext def get_export_symbols(self, ext): diff --git a/lib-python/3/distutils/sysconfig_pypy.py b/lib-python/3/distutils/sysconfig_pypy.py --- a/lib-python/3/distutils/sysconfig_pypy.py +++ b/lib-python/3/distutils/sysconfig_pypy.py @@ -1,9 +1,17 @@ -"""PyPy's minimal configuration information. +"""Provide access to Python's configuration information. +This is actually PyPy's minimal configuration information. + +The specific configuration variables available depend heavily on the +platform and configuration. The values may be retrieved using +get_config_var(name), and the list of variables is available via +get_config_vars().keys(). Additional convenience functions are also +available. """ +__revision__ = "$Id: sysconfig.py 85358 2010-10-10 09:54:59Z antoine.pitrou $" + import sys import os -import imp from distutils.errors import DistutilsPlatformError @@ -49,18 +57,14 @@ _config_vars = None -def _get_so_extension(): - for ext, mod, typ in imp.get_suffixes(): - if typ == imp.C_EXTENSION: - return ext - def _init_posix(): """Initialize the module as appropriate for POSIX systems.""" g = {} g['EXE'] = "" - g['SO'] = _get_so_extension() or ".so" + g['SO'] = ".so" g['SOABI'] = g['SO'].rsplit('.')[0] g['LIBDIR'] = os.path.join(sys.prefix, 'lib') + g['CC'] = "gcc -pthread" # -pthread might not be valid on OS/X, check global _config_vars _config_vars = g @@ -70,7 +74,7 @@ """Initialize the module as appropriate for NT""" g = {} g['EXE'] = ".exe" - g['SO'] = _get_so_extension() or ".pyd" + g['SO'] = ".pyd" g['SOABI'] = g['SO'].rsplit('.')[0] global _config_vars diff --git a/lib-python/3/distutils/unixccompiler.py b/lib-python/3/distutils/unixccompiler.py --- a/lib-python/3/distutils/unixccompiler.py +++ b/lib-python/3/distutils/unixccompiler.py @@ -134,7 +134,7 @@ executables['ranlib'] = ["ranlib"] executables['linker_so'] += ['-undefined', 'dynamic_lookup'] - for k, v in executables.iteritems(): + for k, v in executables.items(): if v and v[0] == 'cc': v += ['-arch', arch] diff --git a/lib-python/3/importlib/test/extension/test_case_sensitivity.py b/lib-python/3/importlib/test/extension/test_case_sensitivity.py --- a/lib-python/3/importlib/test/extension/test_case_sensitivity.py +++ b/lib-python/3/importlib/test/extension/test_case_sensitivity.py @@ -9,6 +9,7 @@ @util.case_insensitive_tests class ExtensionModuleCaseSensitivityTest(unittest.TestCase): + @ext_util.skip_unless__testcapi def find_module(self): good_name = ext_util.NAME bad_name = good_name.upper() diff --git a/lib-python/3/importlib/test/extension/test_finder.py b/lib-python/3/importlib/test/extension/test_finder.py --- a/lib-python/3/importlib/test/extension/test_finder.py +++ b/lib-python/3/importlib/test/extension/test_finder.py @@ -8,6 +8,7 @@ """Test the finder for extension modules.""" + @util.skip_unless__testcapi def find_module(self, fullname): importer = _bootstrap._FileFinder(util.PATH, _bootstrap._ExtensionFinderDetails()) diff --git a/lib-python/3/importlib/test/extension/test_loader.py b/lib-python/3/importlib/test/extension/test_loader.py --- a/lib-python/3/importlib/test/extension/test_loader.py +++ b/lib-python/3/importlib/test/extension/test_loader.py @@ -11,6 +11,7 @@ """Test load_module() for extension modules.""" + @ext_util.skip_unless__testcapi def load_module(self, fullname): loader = _bootstrap._ExtensionFileLoader(ext_util.NAME, ext_util.FILEPATH) diff --git a/lib-python/3/importlib/test/extension/util.py b/lib-python/3/importlib/test/extension/util.py --- a/lib-python/3/importlib/test/extension/util.py +++ b/lib-python/3/importlib/test/extension/util.py @@ -1,6 +1,7 @@ import imp import os import sys +import unittest PATH = None EXT = None @@ -27,3 +28,8 @@ except StopIteration: pass del _file_exts + + +def skip_unless__testcapi(func): + msg = "Requires the CPython C Extension API ({!r} module)".format(NAME) + return unittest.skipUnless(PATH, msg)(func) diff --git a/lib-python/3/logging/__init__.py b/lib-python/3/logging/__init__.py --- a/lib-python/3/logging/__init__.py +++ b/lib-python/3/logging/__init__.py @@ -129,20 +129,22 @@ DEBUG = 10 NOTSET = 0 -_levelNames = { - CRITICAL : 'CRITICAL', - ERROR : 'ERROR', - WARNING : 'WARNING', - INFO : 'INFO', - DEBUG : 'DEBUG', - NOTSET : 'NOTSET', - 'CRITICAL' : CRITICAL, - 'ERROR' : ERROR, - 'WARN' : WARNING, - 'WARNING' : WARNING, - 'INFO' : INFO, - 'DEBUG' : DEBUG, - 'NOTSET' : NOTSET, +_levelToName = { + CRITICAL: 'CRITICAL', + ERROR: 'ERROR', + WARNING: 'WARNING', + INFO: 'INFO', + DEBUG: 'DEBUG', + NOTSET: 'NOTSET', +} +_nameToLevel = { + 'CRITICAL': CRITICAL, + 'ERROR': ERROR, + 'WARN': WARNING, + 'WARNING': WARNING, + 'INFO': INFO, + 'DEBUG': DEBUG, + 'NOTSET': NOTSET, } def getLevelName(level): @@ -159,7 +161,7 @@ Otherwise, the string "Level %s" % level is returned. """ - return _levelNames.get(level, ("Level %s" % level)) + return _levelToName.get(level, ("Level %s" % level)) def addLevelName(level, levelName): """ @@ -169,8 +171,8 @@ """ _acquireLock() try: #unlikely to cause an exception, but you never know... - _levelNames[level] = levelName - _levelNames[levelName] = level + _levelToName[level] = levelName + _nameToLevel[levelName] = level finally: _releaseLock() @@ -178,9 +180,9 @@ if isinstance(level, int): rv = level elif str(level) == level: - if level not in _levelNames: + if level not in _nameToLevel: raise ValueError("Unknown level: %r" % level) - rv = _levelNames[level] + rv = _nameToLevel[level] else: raise TypeError("Level not an integer or a valid string: %r" % level) return rv diff --git a/lib-python/3/logging/config.py b/lib-python/3/logging/config.py --- a/lib-python/3/logging/config.py +++ b/lib-python/3/logging/config.py @@ -144,7 +144,7 @@ h = klass(*args) if "level" in section: level = section["level"] - h.setLevel(logging._levelNames[level]) + h.setLevel(level) if len(fmt): h.setFormatter(formatters[fmt]) if issubclass(klass, logging.handlers.MemoryHandler): @@ -191,7 +191,7 @@ log = root if "level" in section: level = section["level"] - log.setLevel(logging._levelNames[level]) + log.setLevel(level) for h in root.handlers[:]: root.removeHandler(h) hlist = section["handlers"] @@ -237,7 +237,7 @@ existing.remove(qn) if "level" in section: level = section["level"] - logger.setLevel(logging._levelNames[level]) + logger.setLevel(level) for h in logger.handlers[:]: logger.removeHandler(h) logger.propagate = propagate diff --git a/lib-python/3/opcode.py b/lib-python/3/opcode.py --- a/lib-python/3/opcode.py +++ b/lib-python/3/opcode.py @@ -182,5 +182,6 @@ hasname.append(201) def_op('CALL_METHOD', 202) # #args not including 'self' def_op('BUILD_LIST_FROM_ARG', 203) +jrel_op('JUMP_IF_NOT_DEBUG', 204) # jump over assert statements del def_op, name_op, jrel_op, jabs_op diff --git a/lib-python/3/test/test_code.py b/lib-python/3/test/test_code.py --- a/lib-python/3/test/test_code.py +++ b/lib-python/3/test/test_code.py @@ -81,7 +81,7 @@ cellvars: () freevars: () nlocals: 0 -flags: 67 +flags: 1048643 consts: ("'doc string'", 'None') >>> def keywordonly_args(a,b,*,k1): @@ -104,7 +104,10 @@ import unittest import weakref -import _testcapi +try: + import _testcapi +except ImportError: + _testcapi = None from test import support @@ -127,6 +130,7 @@ class CodeTest(unittest.TestCase): + @unittest.skipUnless(_testcapi, 'Requires _testcapi') def test_newempty(self): co = _testcapi.code_newempty("filename", "funcname", 15) self.assertEqual(co.co_filename, "filename") diff --git a/lib-python/3/test/test_codecs.py b/lib-python/3/test/test_codecs.py --- a/lib-python/3/test/test_codecs.py +++ b/lib-python/3/test/test_codecs.py @@ -2,7 +2,11 @@ import unittest import codecs import locale -import sys, _testcapi, io +import sys, io +try: + import _testcapi +except ImportError: + _testcapi = None class Queue(object): """ @@ -1417,7 +1421,7 @@ decodedresult += reader.read() self.assertEqual(decodedresult, s, "%r != %r (encoding=%r)" % (decodedresult, s, encoding)) - if encoding not in broken_incremental_coders: + if encoding not in broken_incremental_coders and _testcapi: # check incremental decoder/encoder (fetched via the Python # and C API) and iterencode()/iterdecode() try: diff --git a/lib-python/3/test/test_descrtut.py b/lib-python/3/test/test_descrtut.py --- a/lib-python/3/test/test_descrtut.py +++ b/lib-python/3/test/test_descrtut.py @@ -188,7 +188,6 @@ '__mul__', '__ne__', '__new__', - '__radd__', '__reduce__', '__reduce_ex__', '__repr__', diff --git a/lib-python/3/test/test_dis.py b/lib-python/3/test/test_dis.py --- a/lib-python/3/test/test_dis.py +++ b/lib-python/3/test/test_dis.py @@ -218,7 +218,7 @@ Kw-only arguments: 0 Number of locals: 1 Stack size: 4 -Flags: OPTIMIZED, NEWLOCALS, NOFREE +Flags: OPTIMIZED, NEWLOCALS, NOFREE, 0x100000 Constants: 0: %r 1: '__func__' diff --git a/lib-python/3/test/test_exceptions.py b/lib-python/3/test/test_exceptions.py --- a/lib-python/3/test/test_exceptions.py +++ b/lib-python/3/test/test_exceptions.py @@ -6,6 +6,10 @@ import pickle import weakref import errno +try: + import _testcapi +except ImportError: + _testcapi = None from test.support import (TESTFN, unlink, run_unittest, captured_output, gc_collect, cpython_only) @@ -762,6 +766,7 @@ self.assertIn("maximum recursion depth exceeded", str(v)) + @unittest.skipUnless(_testcapi, 'Requires _testcapi') def test_MemoryError(self): # PyErr_NoMemory always raises the same exception instance. # Check that the traceback is not doubled. @@ -820,6 +825,7 @@ self.assertEqual(error5.a, 1) self.assertEqual(error5.__doc__, "") + @unittest.skipUnless(_testcapi, 'Requires _testcapi') def test_memory_error_cleanup(self): # Issue #5437: preallocated MemoryError instances should not keep # traceback objects alive. diff --git a/lib-python/3/test/test_logging.py b/lib-python/3/test/test_logging.py --- a/lib-python/3/test/test_logging.py +++ b/lib-python/3/test/test_logging.py @@ -69,7 +69,8 @@ self.saved_handlers = logging._handlers.copy() self.saved_handler_list = logging._handlerList[:] self.saved_loggers = saved_loggers = logger_dict.copy() - self.saved_level_names = logging._levelNames.copy() + self.saved_name_to_level = logging._nameToLevel.copy() + self.saved_level_to_name = logging._levelToName.copy() self.logger_states = logger_states = {} for name in saved_loggers: logger_states[name] = getattr(saved_loggers[name], @@ -113,8 +114,10 @@ self.root_logger.setLevel(self.original_logging_level) logging._acquireLock() try: - logging._levelNames.clear() - logging._levelNames.update(self.saved_level_names) + logging._levelToName.clear() + logging._levelToName.update(self.saved_level_to_name) + logging._nameToLevel.clear() + logging._nameToLevel.update(self.saved_name_to_level) logging._handlers.clear() logging._handlers.update(self.saved_handlers) logging._handlerList[:] = self.saved_handler_list diff --git a/lib-python/3/test/test_sysconfig.py b/lib-python/3/test/test_sysconfig.py --- a/lib-python/3/test/test_sysconfig.py +++ b/lib-python/3/test/test_sysconfig.py @@ -13,7 +13,7 @@ from test.support import (run_unittest, TESTFN, unlink, get_attribute, captured_stdout, skip_unless_symlink, - impl_detail) + impl_detail, import_module) import sysconfig from sysconfig import (get_paths, get_platform, get_config_vars, @@ -232,7 +232,10 @@ def test_get_config_h_filename(self): config_h = sysconfig.get_config_h_filename() - self.assertTrue(os.path.isfile(config_h), config_h) + # import_module skips the test when the CPython C Extension API + # appears to not be supported + self.assertTrue(os.path.isfile(config_h) or + not import_module('_testcapi'), config_h) def test_get_scheme_names(self): wanted = ('nt', 'nt_user', 'os2', 'os2_home', 'osx_framework_user', diff --git a/lib-python/3/test/test_traceback.py b/lib-python/3/test/test_traceback.py --- a/lib-python/3/test/test_traceback.py +++ b/lib-python/3/test/test_traceback.py @@ -1,6 +1,9 @@ """Test cases for traceback module""" -from _testcapi import traceback_print, exception_print +try: + import _testcapi +except ImportError: + _testcapi = None from io import StringIO import sys import unittest @@ -154,6 +157,7 @@ class TracebackFormatTests(unittest.TestCase): + @unittest.skipUnless(_testcapi, 'Requires _testcapi') def test_traceback_format(self): try: raise KeyError('blah') @@ -162,7 +166,7 @@ traceback_fmt = 'Traceback (most recent call last):\n' + \ ''.join(traceback.format_tb(tb)) file_ = StringIO() - traceback_print(tb, file_) + _testcapi.traceback_print(tb, file_) python_fmt = file_.getvalue() else: raise Error("unable to create test traceback string") @@ -326,10 +330,11 @@ # This checks built-in reporting by the interpreter. # + @unittest.skipUnless(_testcapi, 'Requires _testcapi') def get_report(self, e): e = self.get_exception(e) with captured_output("stderr") as s: - exception_print(e) + _testcapi.exception_print(e) return s.getvalue() diff --git a/lib-python/3/test/test_unicode.py b/lib-python/3/test/test_unicode.py --- a/lib-python/3/test/test_unicode.py +++ b/lib-python/3/test/test_unicode.py @@ -12,6 +12,10 @@ import warnings from test import support, string_tests import _string +try: + import _testcapi +except ImportError: + _testcapi = None # decorator to skip tests on narrow builds requires_wide_build = unittest.skipIf(sys.maxunicode == 65535, @@ -1659,6 +1663,7 @@ self.assertEqual(text, 'repr=abc\ufffd') # Test PyUnicode_AsWideChar() + @unittest.skipUnless(_testcapi, 'Requires _testcapi') def test_aswidechar(self): from _testcapi import unicode_aswidechar support.import_module('ctypes') @@ -1696,6 +1701,7 @@ self.assertEqual(wchar, nonbmp + '\0') # Test PyUnicode_AsWideCharString() + @unittest.skipUnless(_testcapi, 'Requires _testcapi') def test_aswidecharstring(self): from _testcapi import unicode_aswidecharstring support.import_module('ctypes') @@ -1769,6 +1775,7 @@ ]]) self.assertRaises(TypeError, _string.formatter_field_name_split, 1) + @unittest.skipUnless(_testcapi, 'Requires _testcapi') def test_encode_decimal(self): from _testcapi import unicode_encodedecimal self.assertEqual(unicode_encodedecimal('123'), @@ -1794,6 +1801,7 @@ self.assertEqual(unicode_encodedecimal("123\u20ac\u0660", "replace"), b'123?0') + @unittest.skipUnless(_testcapi, 'Requires _testcapi') def test_transform_decimal(self): from _testcapi import unicode_transformdecimaltoascii as transform_decimal self.assertEqual(transform_decimal('123'), diff --git a/lib-python/conftest.py b/lib-python/conftest.py --- a/lib-python/conftest.py +++ b/lib-python/conftest.py @@ -123,7 +123,7 @@ RegrTest('test_bz2.py', usemodules='bz2'), RegrTest('test_calendar.py'), RegrTest('test_call.py', core=True), - RegrTest('test_capi.py'), + RegrTest('test_capi.py', usemodules='cpyext'), RegrTest('test_cfgparser.py'), RegrTest('test_cgi.py'), RegrTest('test_charmapcodec.py', core=True), @@ -163,7 +163,7 @@ RegrTest('test_cprofile.py'), RegrTest('test_crypt.py', usemodules='crypt'), RegrTest('test_csv.py', usemodules='_csv'), - RegrTest('test_ctypes.py', usemodules="_rawffi thread"), + RegrTest('test_ctypes.py', usemodules="_rawffi thread cpyext"), RegrTest('test_curses.py'), RegrTest('test_datetime.py', usemodules='binascii struct'), RegrTest('test_dbm.py'), diff --git a/lib_pypy/_ctypes/pointer.py b/lib_pypy/_ctypes/pointer.py --- a/lib_pypy/_ctypes/pointer.py +++ b/lib_pypy/_ctypes/pointer.py @@ -28,7 +28,7 @@ # XXX check if typedict['_type_'] is any sane # XXX remember about paramfunc obj = type.__new__(self, name, cls, typedict) - for k, v in d.iteritems(): + for k, v in d.items(): setattr(obj, k, v) if '_type_' in typedict: self.set_type(obj, typedict['_type_']) @@ -119,6 +119,7 @@ return self._buffer[0] != 0 contents = property(getcontents, setcontents) + _obj = property(getcontents) # byref interface def _as_ffi_pointer_(self, ffitype): return as_ffi_pointer(self, ffitype) diff --git a/lib_pypy/_ctypes/structure.py b/lib_pypy/_ctypes/structure.py --- a/lib_pypy/_ctypes/structure.py +++ b/lib_pypy/_ctypes/structure.py @@ -166,8 +166,7 @@ if self is StructOrUnion: return if '_fields_' not in self.__dict__: - self._fields_ = [] - _set_shape(self, [], self._is_union) + self._fields_ = [] # As a side-effet, this also sets the ffishape. __setattr__ = struct_setattr diff --git a/lib_pypy/_ctypes_test.py b/lib_pypy/_ctypes_test.py --- a/lib_pypy/_ctypes_test.py +++ b/lib_pypy/_ctypes_test.py @@ -1,60 +1,7 @@ -import os, sys -import tempfile - -def compile_shared(): - """Compile '_ctypes_test.c' into an extension module, and import it - """ - thisdir = os.path.dirname(__file__) - output_dir = tempfile.mkdtemp() - - from distutils.ccompiler import new_compiler - from distutils import sysconfig - - compiler = new_compiler() - compiler.output_dir = output_dir - - # Compile .c file - include_dir = os.path.join(thisdir, '..', 'include') - if sys.platform == 'win32': - ccflags = ['-D_CRT_SECURE_NO_WARNINGS'] - else: - ccflags = ['-fPIC', '-Wimplicit-function-declaration'] - res = compiler.compile([os.path.join(thisdir, '_ctypes_test.c')], - include_dirs=[include_dir], - extra_preargs=ccflags) - object_filename = res[0] - - # set link options - output_filename = '_ctypes_test' + sysconfig.get_config_var('SO') - if sys.platform == 'win32': - # XXX libpypy-c.lib is currently not installed automatically - library = os.path.join(thisdir, '..', 'include', 'libpypy-c') - if not os.path.exists(library + '.lib'): - #For a nightly build - library = os.path.join(thisdir, '..', 'include', 'python3') - if not os.path.exists(library + '.lib'): - # For a local translation - library = os.path.join(thisdir, '..', 'pypy', 'goal', 'libpypy-c') - libraries = [library, 'oleaut32'] - extra_ldargs = ['/MANIFEST', # needed for VC10 - '/EXPORT:PyInit__ctypes_test'] - else: - libraries = [] - extra_ldargs = [] - - # link the dynamic library - compiler.link_shared_object( - [object_filename], - output_filename, - libraries=libraries, - extra_preargs=extra_ldargs) - - # Now import the newly created library, it will replace our module in sys.modules - import imp - fp, filename, description = imp.find_module('_ctypes_test', path=[output_dir]) - imp.load_module('_ctypes_test', fp, filename, description) - - +try: + import cpyext +except ImportError: + raise ImportError("No module named '_ctypes_test'") try: import _ctypes _ctypes.PyObj_FromPtr = None @@ -62,4 +9,5 @@ except ImportError: pass # obscure condition of _ctypes_test.py being imported by py.test else: - compile_shared() + import _pypy_testcapi + _pypy_testcapi.compile_shared('_ctypes_test.c', '_ctypes_test') diff --git a/lib_pypy/_functools.py b/lib_pypy/_functools.py --- a/lib_pypy/_functools.py +++ b/lib_pypy/_functools.py @@ -60,7 +60,7 @@ return "{}({})".format(name, ', '.join(tmp)) def __reduce__(self): - d = dict((k, v) for k, v in self.__dict__.iteritems() if k not in + d = dict((k, v) for k, v in self.__dict__.items() if k not in ('func', 'args', 'keywords')) if len(d) == 0: d = None diff --git a/lib_pypy/_pypy_irc_topic.py b/lib_pypy/_pypy_irc_topic.py --- a/lib_pypy/_pypy_irc_topic.py +++ b/lib_pypy/_pypy_irc_topic.py @@ -1,4 +1,4 @@ -"""eclguba: flagnk naq frznagvpf bs clguba, fcrrq bs p, erfgevpgvbaf bs wnin naq pbzcvyre reebe zrffntrf nf crargenoyr nf ZHZCF +__doc__ = """eclguba: flagnk naq frznagvpf bs clguba, fcrrq bs p, erfgevpgvbaf bs wnin naq pbzcvyre reebe zrffntrf nf crargenoyr nf ZHZCF pglcrf unf n fcva bs 1/3 ' ' vf n fcnpr gbb Clguba 2.k rfg cerfdhr zbeg, ivir Clguba! diff --git a/lib_pypy/_testcapi.py b/lib_pypy/_pypy_testcapi.py copy from lib_pypy/_testcapi.py copy to lib_pypy/_pypy_testcapi.py --- a/lib_pypy/_testcapi.py +++ b/lib_pypy/_pypy_testcapi.py @@ -1,14 +1,20 @@ -import os, sys +import os, sys, imp import tempfile -def compile_shared(): - """Compile '_testcapi.c' into an extension module, and import it +def _get_c_extension_suffix(): + for ext, mod, typ in imp.get_suffixes(): + if typ == imp.C_EXTENSION: + return ext + + +def compile_shared(csource, modulename): + """Compile '_testcapi.c' or '_ctypes_test.c' into an extension module, + and import it. """ thisdir = os.path.dirname(__file__) output_dir = tempfile.mkdtemp() from distutils.ccompiler import new_compiler - from distutils import sysconfig compiler = new_compiler() compiler.output_dir = output_dir @@ -19,13 +25,13 @@ ccflags = ['-D_CRT_SECURE_NO_WARNINGS'] else: ccflags = ['-fPIC', '-Wimplicit-function-declaration'] - res = compiler.compile([os.path.join(thisdir, '_testcapimodule.c')], + res = compiler.compile([os.path.join(thisdir, csource)], include_dirs=[include_dir], extra_preargs=ccflags) object_filename = res[0] # set link options - output_filename = '_testcapi' + sysconfig.get_config_var('SO') + output_filename = modulename + _get_c_extension_suffix() if sys.platform == 'win32': # XXX libpypy-c.lib is currently not installed automatically library = os.path.join(thisdir, '..', 'include', 'libpypy-c') @@ -37,7 +43,7 @@ library = os.path.join(thisdir, '..', 'pypy', 'goal', 'libpypy-c') libraries = [library, 'oleaut32'] extra_ldargs = ['/MANIFEST', # needed for VC10 - '/EXPORT:PyInit__testcapi'] + '/EXPORT:PyInit_' + modulename] else: libraries = [] extra_ldargs = [] @@ -49,9 +55,7 @@ libraries=libraries, extra_preargs=extra_ldargs) - # Now import the newly created library, it will replace our module in sys.modules - import imp - fp, filename, description = imp.find_module('_testcapi', path=[output_dir]) - imp.load_module('_testcapi', fp, filename, description) - -compile_shared() + # Now import the newly created library, it will replace the original + # module in sys.modules + fp, filename, description = imp.find_module(modulename, path=[output_dir]) + imp.load_module(modulename, fp, filename, description) diff --git a/lib_pypy/_testcapi.py b/lib_pypy/_testcapi.py --- a/lib_pypy/_testcapi.py +++ b/lib_pypy/_testcapi.py @@ -1,57 +1,7 @@ -import os, sys -import tempfile - -def compile_shared(): - """Compile '_testcapi.c' into an extension module, and import it - """ - thisdir = os.path.dirname(__file__) - output_dir = tempfile.mkdtemp() - - from distutils.ccompiler import new_compiler - from distutils import sysconfig - - compiler = new_compiler() - compiler.output_dir = output_dir - - # Compile .c file - include_dir = os.path.join(thisdir, '..', 'include') - if sys.platform == 'win32': - ccflags = ['-D_CRT_SECURE_NO_WARNINGS'] - else: - ccflags = ['-fPIC', '-Wimplicit-function-declaration'] - res = compiler.compile([os.path.join(thisdir, '_testcapimodule.c')], - include_dirs=[include_dir], - extra_preargs=ccflags) - object_filename = res[0] - - # set link options - output_filename = '_testcapi' + sysconfig.get_config_var('SO') - if sys.platform == 'win32': - # XXX libpypy-c.lib is currently not installed automatically - library = os.path.join(thisdir, '..', 'include', 'libpypy-c') - if not os.path.exists(library + '.lib'): - #For a nightly build - library = os.path.join(thisdir, '..', 'include', 'python3') - if not os.path.exists(library + '.lib'): - # For a local translation - library = os.path.join(thisdir, '..', 'pypy', 'goal', 'libpypy-c') - libraries = [library, 'oleaut32'] - extra_ldargs = ['/MANIFEST', # needed for VC10 - '/EXPORT:PyInit__testcapi'] - else: - libraries = [] - extra_ldargs = [] - - # link the dynamic library - compiler.link_shared_object( - [object_filename], - output_filename, - libraries=libraries, - extra_preargs=extra_ldargs) - - # Now import the newly created library, it will replace our module in sys.modules - import imp - fp, filename, description = imp.find_module('_testcapi', path=[output_dir]) - imp.load_module('_testcapi', fp, filename, description) - -compile_shared() +try: + import cpyext +except ImportError: + raise ImportError("No module named '_testcapi'") +else: + import _pypy_testcapi + _pypy_testcapi.compile_shared('_testcapimodule.c', '_testcapi') diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py --- a/lib_pypy/cffi/__init__.py +++ b/lib_pypy/cffi/__init__.py @@ -4,5 +4,5 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "0.6" -__version_info__ = (0, 6) +__version__ = "0.7" +__version_info__ = (0, 7) diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py --- a/lib_pypy/cffi/api.py +++ b/lib_pypy/cffi/api.py @@ -73,15 +73,15 @@ if name.startswith('RTLD_'): setattr(self, name, getattr(backend, name)) # - BVoidP = self._get_cached_btype(model.voidp_type) + self.BVoidP = self._get_cached_btype(model.voidp_type) if isinstance(backend, types.ModuleType): # _cffi_backend: attach these constants to the class if not hasattr(FFI, 'NULL'): - FFI.NULL = self.cast(BVoidP, 0) + FFI.NULL = self.cast(self.BVoidP, 0) FFI.CData, FFI.CType = backend._get_types() else: # ctypes backend: attach these constants to the instance - self.NULL = self.cast(BVoidP, 0) + self.NULL = self.cast(self.BVoidP, 0) self.CData, self.CType = backend._get_types() def cdef(self, csource, override=False): @@ -346,6 +346,12 @@ self._cdefsources.extend(ffi_to_include._cdefsources) self._cdefsources.append(']') + def new_handle(self, x): + return self._backend.newp_handle(self.BVoidP, x) + + def from_handle(self, x): + return self._backend.from_handle(x) + def _make_ffi_library(ffi, libname, flags): import os @@ -372,8 +378,8 @@ BType = ffi._get_cached_btype(tp) try: value = backendlib.load_function(BType, name) - except KeyError: - raise AttributeError(name) + except KeyError as e: + raise AttributeError('%s: %s' % (name, e)) library.__dict__[name] = value return # diff --git a/lib_pypy/cffi/backend_ctypes.py b/lib_pypy/cffi/backend_ctypes.py --- a/lib_pypy/cffi/backend_ctypes.py +++ b/lib_pypy/cffi/backend_ctypes.py @@ -16,6 +16,7 @@ class CTypesData(object): __metaclass__ = CTypesType __slots__ = ['__weakref__'] + __name__ = '' def __init__(self, *args): raise TypeError("cannot instantiate %r" % (self.__class__,)) @@ -491,6 +492,8 @@ elif BItem in (getbtype(model.PrimitiveType('signed char')), getbtype(model.PrimitiveType('unsigned char'))): kind = 'bytep' + elif BItem is getbtype(model.void_type): + kind = 'voidp' else: kind = 'generic' # @@ -546,13 +549,13 @@ def __setitem__(self, index, value): self._as_ctype_ptr[index] = BItem._to_ctypes(value) - if kind == 'charp': + if kind == 'charp' or kind == 'voidp': @classmethod - def _arg_to_ctypes(cls, value): - if isinstance(value, bytes): - return ctypes.c_char_p(value) + def _arg_to_ctypes(cls, *value): + if value and isinstance(value[0], bytes): + return ctypes.c_char_p(value[0]) else: - return super(CTypesPtr, cls)._arg_to_ctypes(value) + return super(CTypesPtr, cls)._arg_to_ctypes(*value) if kind == 'charp' or kind == 'bytep': def _to_string(self, maxlen): diff --git a/lib_pypy/cffi/vengine_cpy.py b/lib_pypy/cffi/vengine_cpy.py --- a/lib_pypy/cffi/vengine_cpy.py +++ b/lib_pypy/cffi/vengine_cpy.py @@ -15,6 +15,20 @@ def patch_extension_kwds(self, kwds): pass + def find_module(self, module_name, path, so_suffix): + try: + f, filename, descr = imp.find_module(module_name, path) + except ImportError: + return None + if f is not None: + f.close() + # Note that after a setuptools installation, there are both .py + # and .so files with the same basename. The code here relies on + # imp.find_module() locating the .so in priority. + if descr[0] != so_suffix: + return None + return filename + def collect_types(self): self._typesdict = {} self._generate("collecttype") @@ -142,6 +156,9 @@ class FFILibrary(object): _cffi_python_module = module _cffi_ffi = self.ffi + _cffi_dir = [] + def __dir__(self): + return FFILibrary._cffi_dir + list(self.__dict__) library = FFILibrary() module._cffi_setup(lst, ffiplatform.VerificationError, library) # @@ -427,9 +444,9 @@ prnt('static void %s(%s *p)' % (checkfuncname, cname)) prnt('{') prnt(' /* only to generate compile-time warnings or errors */') - for fname, ftype, _ in tp.enumfields(): + for fname, ftype, fbitsize in tp.enumfields(): if (isinstance(ftype, model.PrimitiveType) - and ftype.is_integer_type()): + and ftype.is_integer_type()) or fbitsize >= 0: # accept all integers, but complain on float or double prnt(' (void)((p->%s) << 1);' % fname) else: @@ -687,7 +704,8 @@ return ptr[0] def setter(library, value): ptr[0] = value - setattr(library.__class__, name, property(getter, setter)) + setattr(type(library), name, property(getter, setter)) + type(library)._cffi_dir.append(name) # ---------- diff --git a/lib_pypy/cffi/vengine_gen.py b/lib_pypy/cffi/vengine_gen.py --- a/lib_pypy/cffi/vengine_gen.py +++ b/lib_pypy/cffi/vengine_gen.py @@ -1,4 +1,4 @@ -import sys +import sys, os import types from . import model, ffiplatform @@ -20,6 +20,16 @@ # up in kwds['export_symbols']. kwds.setdefault('export_symbols', self.export_symbols) + def find_module(self, module_name, path, so_suffix): + basename = module_name + so_suffix + if path is None: + path = sys.path + for dirname in path: + filename = os.path.join(dirname, basename) + if os.path.isfile(filename): + return filename + return None + def collect_types(self): pass # not needed in the generic engine @@ -64,6 +74,9 @@ class FFILibrary(types.ModuleType): _cffi_generic_module = module _cffi_ffi = self.ffi + _cffi_dir = [] + def __dir__(self): + return FFILibrary._cffi_dir library = FFILibrary("") # # finally, call the loaded_gen_xxx() functions. This will set @@ -158,21 +171,22 @@ newfunction = self._load_constant(False, tp, name, module) else: indirections = [] - if any(isinstance(type, model.StructOrUnion) for type in tp.args): + if any(isinstance(typ, model.StructOrUnion) for typ in tp.args): indirect_args = [] - for i, type in enumerate(tp.args): - if isinstance(type, model.StructOrUnion): - type = model.PointerType(type) - indirections.append((i, type)) - indirect_args.append(type) + for i, typ in enumerate(tp.args): + if isinstance(typ, model.StructOrUnion): + typ = model.PointerType(typ) + indirections.append((i, typ)) + indirect_args.append(typ) tp = model.FunctionPtrType(tuple(indirect_args), tp.result, tp.ellipsis) BFunc = self.ffi._get_cached_btype(tp) wrappername = '_cffi_f_%s' % name newfunction = module.load_function(BFunc, wrappername) - for i, type in indirections: - newfunction = self._make_struct_wrapper(newfunction, i, type) + for i, typ in indirections: + newfunction = self._make_struct_wrapper(newfunction, i, typ) setattr(library, name, newfunction) + type(library)._cffi_dir.append(name) def _make_struct_wrapper(self, oldfunc, i, tp): backend = self.ffi._backend @@ -216,9 +230,9 @@ prnt('static void %s(%s *p)' % (checkfuncname, cname)) prnt('{') prnt(' /* only to generate compile-time warnings or errors */') - for fname, ftype, _ in tp.enumfields(): + for fname, ftype, fbitsize in tp.enumfields(): if (isinstance(ftype, model.PrimitiveType) - and ftype.is_integer_type()): + and ftype.is_integer_type()) or fbitsize >= 0: # accept all integers, but complain on float or double prnt(' (void)((p->%s) << 1);' % fname) else: @@ -380,6 +394,7 @@ is_int = isinstance(tp, model.PrimitiveType) and tp.is_integer_type() value = self._load_constant(is_int, tp, name, module) setattr(library, name, value) + type(library)._cffi_dir.append(name) # ---------- # enums @@ -427,6 +442,7 @@ def _loaded_gen_enum(self, tp, name, module, library): for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues): setattr(library, enumerator, enumvalue) + type(library)._cffi_dir.append(enumerator) # ---------- # macros: for now only for integers @@ -440,6 +456,7 @@ def _loaded_gen_macro(self, tp, name, module, library): value = self._load_constant(True, tp, name, module) setattr(library, name, value) + type(library)._cffi_dir.append(name) # ---------- # global variables @@ -465,6 +482,7 @@ BArray = self.ffi._get_cached_btype(tp) value = self.ffi.cast(BArray, value) setattr(library, name, value) + type(library)._cffi_dir.append(name) return # remove ptr= from the library instance, and replace # it by a property on the class, which reads/writes into ptr[0]. @@ -476,7 +494,8 @@ return ptr[0] def setter(library, value): ptr[0] = value - setattr(library.__class__, name, property(getter, setter)) + setattr(type(library), name, property(getter, setter)) + type(library)._cffi_dir.append(name) cffimod_header = r''' #include diff --git a/lib_pypy/cffi/verifier.py b/lib_pypy/cffi/verifier.py --- a/lib_pypy/cffi/verifier.py +++ b/lib_pypy/cffi/verifier.py @@ -102,21 +102,10 @@ path = pkg.__path__ else: path = None - try: - f, filename, descr = imp.find_module(self.get_module_name(), - path) - except ImportError: + filename = self._vengine.find_module(self.get_module_name(), path, + _get_so_suffix()) + if filename is None: return - if f is not None: - f.close() - if filename.lower().endswith('.py'): - # on PyPy, if there are both .py and .pypy-19.so files in - # the same directory, the .py file is returned. That's the - # case after a setuptools installation. We never want to - # load the .py file here... - filename = filename[:-3] + _get_so_suffix() - if not os.path.isfile(filename): - return self.modulefilename = filename self._vengine.collect_types() self._has_module = True diff --git a/lib_pypy/greenlet.egg-info b/lib_pypy/greenlet.egg-info new file mode 100644 --- /dev/null +++ b/lib_pypy/greenlet.egg-info @@ -0,0 +1,10 @@ +Metadata-Version: 1.0 +Name: greenlet +Version: 0.4.0 +Summary: Lightweight in-process concurrent programming +Home-page: https://github.com/python-greenlet/greenlet +Author: Ralf Schmitt (for CPython), PyPy team +Author-email: pypy-dev at python.org +License: MIT License +Description: UNKNOWN +Platform: UNKNOWN diff --git a/pypy/TODO b/pypy/TODO --- a/pypy/TODO +++ b/pypy/TODO @@ -1,18 +1,5 @@ TODO for the python3 test suite: -* test_decimal: - In py3k, hash(-1) is now -2 (probably as an optimisation, because - PyObject_Hash() return -1 on exception). - It's important to be compatible, since other classes like Decimal - and Fractions have to return the same hashes for equivalent values. - IOW: int.__hash__ is part of the Python language specification. - The py3k-newhash branch has an updated float hash, int's hash is - still pending - -* test_fractions -* test_numeric_tower - float.__hash__ has changed as well (fixed on py3k-newhash) - * test_float nan = float('nan'); assert nan in [nan] This has always been true in CPython, it is now guaranteed that the @@ -25,11 +12,6 @@ Needs bytes/str changes. Probably easy. Work for this has begun on py3k-memoryview (by mjacob) -* test_peepholer - 'a in [1,2,3]' is rewritten as 'a in (1, 2, 3)' - and the tuple is a prebuilt constant. - Likewise, a set becomes a frozenset. - * test_pep263 Tracebacks should be able to print unicode source code. This is really due to the tokenizer not being fully unicode aware. The @@ -45,16 +27,8 @@ own-tests: * module/test_lib_pypy - These crash the buildbots (via SyntaxErrors): some were really made - to run under Python 2.x - -* interpreter.test.test_zzpickle_and_slow test_pickle_frame_with_exc - Due to W_OperationError not being pickleable. Probably be best for - the ExceptionHandlerBlock to push *sys.exc_info() instead of it, - like CPython does - -* module.bz2.test.test_bz2_file test_open_non_existent - Some really obscure GC stuff + These crash the buildbots (via SyntaxErrors): others were really + made to run under Python 2.x and so simply fail * module.cpyext.test.test_structseq test_StructSeq structseq now subclasses tuple on py3, which breaks how diff --git a/pypy/bin/pyinteractive.py b/pypy/bin/pyinteractive.py --- a/pypy/bin/pyinteractive.py +++ b/pypy/bin/pyinteractive.py @@ -27,7 +27,8 @@ BoolOption("completer", "use readline commandline completer", default=False, cmdline="-C"), BoolOption("optimize", - "dummy optimization flag for compatibility with CPython", + "skip assert statements and remove docstrings when importing modules" + " (this is -OO in regular CPython)", default=False, cmdline="-O"), BoolOption("no_site_import", "do not 'import site' on initialization", default=False, cmdline="-S"), @@ -94,6 +95,17 @@ space.setitem(space.sys.w_dict, space.wrap('executable'), space.wrap(argv[0])) + if interactiveconfig.optimize: + #change the optimize flag's value and set __debug__ to False + space.appexec([], """(): + import sys + flags = list(sys.flags) + flags[6] = 2 + sys.flags = type(sys.flags)(flags) + import __pypy__ + __pypy__.set_debug(False) + """) + # call pypy_find_stdlib: the side-effect is that it sets sys.prefix and # sys.exec_prefix executable = argv[0] diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -67,7 +67,8 @@ del working_modules["_minimal_curses"] del working_modules["_posixsubprocess"] -# del working_modules["cppyy"] # not tested on win32 + if "cppyy" in working_modules: + del working_modules["cppyy"] # not tested on win32 # The _locale module is needed by site.py on Windows default_modules["_locale"] = None @@ -80,7 +81,8 @@ del working_modules["_minimal_curses"] del working_modules["termios"] del working_modules["_multiprocessing"] # depends on rctime -# del working_modules["cppyy"] # depends on ctypes + if "cppyy" in working_modules: + del working_modules["cppyy"] # depends on ctypes module_dependencies = { @@ -123,12 +125,10 @@ __import__(name) except (ImportError, CompilationError, py.test.skip.Exception), e: errcls = e.__class__.__name__ - config.add_warning( + raise Exception( "The module %r is disabled\n" % (modname,) + "because importing %s raised %s\n" % (name, errcls) + str(e)) - raise ConflictConfigError("--withmod-%s: %s" % (modname, - errcls)) return validator else: return None @@ -215,10 +215,6 @@ "(the empty string and potentially single-char strings)", default=False), - BoolOption("withsmalltuple", - "use small tuples", - default=False), - BoolOption("withspecialisedtuple", "use specialised tuples", default=False), @@ -356,6 +352,7 @@ # ignore names from 'essential_modules', notably 'exceptions', which # may not be present in config.objspace.usemodules at all modules = [name for name in modules if name not in essential_modules] + config.objspace.usemodules.suggest(**dict.fromkeys(modules, True)) def enable_translationmodules(config): diff --git a/pypy/doc/__pypy__-module.rst b/pypy/doc/__pypy__-module.rst --- a/pypy/doc/__pypy__-module.rst +++ b/pypy/doc/__pypy__-module.rst @@ -1,3 +1,7 @@ + +.. comment: this document is very incomplete, should we generate + it automatically? + ======================= The ``__pypy__`` module ======================= diff --git a/pypy/doc/coding-guide.rst b/pypy/doc/coding-guide.rst --- a/pypy/doc/coding-guide.rst +++ b/pypy/doc/coding-guide.rst @@ -339,8 +339,9 @@ + methods and other class attributes do not change after startup + single inheritance is fully supported -+ simple mixins work too, but the mixed in class needs a ``_mixin_ = True`` - class attribute ++ simple mixins somewhat work too, but the mixed in class needs a + ``_mixin_ = True`` class attribute. isinstance checks against the + mixin type will fail when translated. + classes are first-class objects too diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py --- a/pypy/doc/conf.py +++ b/pypy/doc/conf.py @@ -47,7 +47,7 @@ # The short X.Y version. version = '2.0' # The full version, including alpha/beta/rc tags. -release = '2.0.0' +release = '2.0.2' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/pypy/doc/cppyy.rst b/pypy/doc/cppyy.rst --- a/pypy/doc/cppyy.rst +++ b/pypy/doc/cppyy.rst @@ -163,6 +163,9 @@ $ genreflex MyClass.h $ g++ -fPIC -rdynamic -O2 -shared -I$REFLEXHOME/include MyClass_rflx.cpp -o libMyClassDict.so -L$REFLEXHOME/lib -lReflex +Next, make sure that the library can be found through the dynamic lookup path +(the ``LD_LIBRARY_PATH`` environment variable on Linux, ``PATH`` on Windows), +for example by adding ".". Now you're ready to use the bindings. Since the bindings are designed to look pythonistic, it should be straightforward:: diff --git a/pypy/doc/getting-started-dev.rst b/pypy/doc/getting-started-dev.rst --- a/pypy/doc/getting-started-dev.rst +++ b/pypy/doc/getting-started-dev.rst @@ -8,7 +8,8 @@ interpreter is written mostly in RPython (with pieces in Python), while the RPython compiler is written in Python. The hard to understand part is that Python is a meta-programming language for RPython, that is, -RPython is considered from live objects **after** the imports are done. +"being valid RPython" is a question that only makes sense on the +live objects **after** the imports are done. This might require more explanation. You start writing RPython from ``entry_point``, a good starting point is ``rpython/translator/goal/targetnopstandalone.py``. This does not do all that @@ -37,7 +38,7 @@ In this example ``entry_point`` is RPython, ``add`` and ``sub`` are RPython, however, ``generator`` is not. -A good introductory level articles are available: +The following introductory level articles are available: * Laurence Tratt -- `Fast Enough VMs in Fast Enough Time`_. diff --git a/pypy/doc/how-to-contribute.rst b/pypy/doc/how-to-contribute.rst --- a/pypy/doc/how-to-contribute.rst +++ b/pypy/doc/how-to-contribute.rst @@ -28,7 +28,8 @@ Layers ------ -PyPy has layers. Those layers help us keep the respective parts separated enough +PyPy has layers. Just like Ogres or onions. +Those layers help us keep the respective parts separated enough to be worked on independently and make the complexity manageable. This is, again, just a sanity requirement for such a complex project. For example writing a new optimization for the JIT usually does **not** involve touching a Python diff --git a/pypy/doc/how-to-release.rst b/pypy/doc/how-to-release.rst --- a/pypy/doc/how-to-release.rst +++ b/pypy/doc/how-to-release.rst @@ -22,7 +22,8 @@ will capture the revision number of this change for the release; some of the next updates may be done before or after branching; make sure things are ported back to the trunk and to the branch as - necessary + necessary; also update the version number in pypy/doc/conf.py, + and in pypy/doc/index.rst * update pypy/doc/contributor.rst (and possibly LICENSE) * rename pypy/doc/whatsnew_head.rst to whatsnew_VERSION.rst and create a fresh whatsnew_head.rst after the release diff --git a/pypy/doc/index.rst b/pypy/doc/index.rst --- a/pypy/doc/index.rst +++ b/pypy/doc/index.rst @@ -40,7 +40,7 @@ * `FAQ`_: some frequently asked questions. -* `Release 2.0`_: the latest official release +* `Release 2.0.2`_: the latest official release * `PyPy Blog`_: news and status info about PyPy @@ -110,7 +110,7 @@ .. _`Getting Started`: getting-started.html .. _`Papers`: extradoc.html .. _`Videos`: video-index.html -.. _`Release 2.0`: http://pypy.org/download.html +.. _`Release 2.0.2`: http://pypy.org/download.html .. _`speed.pypy.org`: http://speed.pypy.org .. _`RPython toolchain`: translation.html .. _`potential project ideas`: project-ideas.html diff --git a/pypy/doc/man/pypy.1.rst b/pypy/doc/man/pypy.1.rst --- a/pypy/doc/man/pypy.1.rst +++ b/pypy/doc/man/pypy.1.rst @@ -16,7 +16,10 @@ Inspect interactively after running script. -O - Dummy optimization flag for compatibility with C Python. + Skip assert statements. + +-OO + Remove docstrings when importing modules in addition to -O. -c *cmd* Program passed in as CMD (terminates option list). diff --git a/pypy/doc/release-2.0.1.rst b/pypy/doc/release-2.0.1.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-2.0.1.rst @@ -0,0 +1,46 @@ +============================== +PyPy 2.0.1 - Bohr Smørrebrød +============================== + +We're pleased to announce PyPy 2.0.1. This is a stable bugfix release +over `2.0`_. You can download it here: + + http://pypy.org/download.html + +The fixes are mainly about fatal errors or crashes in our stdlib. See +below for more details. + +What is PyPy? +============= + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7. It's fast (`pypy 2.0 and cpython 2.7.3`_ performance comparison) +due to its integrated tracing JIT compiler. + From noreply at buildbot.pypy.org Fri Jun 7 16:30:07 2013 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 7 Jun 2013 16:30:07 +0200 (CEST) Subject: [pypy-commit] pypy numpypy-nditer: fix test for -A Message-ID: <20130607143007.AEC001C328A@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: numpypy-nditer Changeset: r64818:5e7a972915e0 Date: 2013-06-07 09:55 +0300 http://bitbucket.org/pypy/pypy/changeset/5e7a972915e0/ Log: fix test for -A diff --git a/pypy/module/micronumpy/test/test_nditer.py b/pypy/module/micronumpy/test/test_nditer.py --- a/pypy/module/micronumpy/test/test_nditer.py +++ b/pypy/module/micronumpy/test/test_nditer.py @@ -57,7 +57,7 @@ for x in nditer(a, flags=['external_loop'], order='F'): r.append(x) n += 1 - assert n == 3 + assert n == 6 assert (array(r) == [[0, 6], [2, 8], [4, 10], [1, 7], [3, 9], [5, 11]]).all() def test_interface(self): From noreply at buildbot.pypy.org Fri Jun 7 16:30:09 2013 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 7 Jun 2013 16:30:09 +0200 (CEST) Subject: [pypy-commit] pypy numpypy-nditer: progress toward extern_loop Message-ID: <20130607143009.1B79F1C328A@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: numpypy-nditer Changeset: r64819:730edc51d980 Date: 2013-06-07 17:19 +0300 http://bitbucket.org/pypy/pypy/changeset/730edc51d980/ Log: progress toward extern_loop diff --git a/pypy/module/micronumpy/interp_nditer.py b/pypy/module/micronumpy/interp_nditer.py --- a/pypy/module/micronumpy/interp_nditer.py +++ b/pypy/module/micronumpy/interp_nditer.py @@ -4,7 +4,7 @@ from pypy.interpreter.error import OperationError from pypy.module.micronumpy.base import W_NDimArray, convert_to_array from pypy.module.micronumpy.strides import (calculate_broadcast_strides, - shape_agreement_multiple) + shape_agreement_multiple, calc_steps) from pypy.module.micronumpy.iter import MultiDimViewIterator, SliceIterator from pypy.module.micronumpy import support from pypy.module.micronumpy.arrayimpl.concrete import SliceArray @@ -229,11 +229,17 @@ self.iters=[] self.shape = iter_shape = shape_agreement_multiple(space, self.seq) if self.external_loop: - #XXX find longest contiguous shape - iter_shape = iter_shape[1:] + steps = [] + for seq in self.seq: + impl = seq.implementation + steps.append(calc_steps(impl.shape, impl.strides, self.order)) + #XXX #find longest contiguous shape + print 'steps',steps,'tier_shape',iter_shape + iter_shape = [1] for i in range(len(self.seq)): self.iters.append(BoxIterator(get_iter(space, self.order, - self.seq[i].implementation, iter_shape), self.op_flags[i])) + self.seq[i].implementation, iter_shape), + self.op_flags[i])) def descr_iter(self, space): return space.wrap(self) diff --git a/pypy/module/micronumpy/strides.py b/pypy/module/micronumpy/strides.py --- a/pypy/module/micronumpy/strides.py +++ b/pypy/module/micronumpy/strides.py @@ -51,8 +51,8 @@ rstrides.append(strides[i]) rbackstrides.append(backstrides[i]) if backwards: - rstrides = rstrides + [0] * (len(res_shape) - len(orig_shape)) - rbackstrides = rbackstrides + [0] * (len(res_shape) - len(orig_shape)) + rstrides = rstrides + [0] * (len(res_shape) - len(orig_shape)) + rbackstrides = rbackstrides + [0] * (len(res_shape) - len(orig_shape)) else: rstrides = [0] * (len(res_shape) - len(orig_shape)) + rstrides rbackstrides = [0] * (len(res_shape) - len(orig_shape)) + rbackstrides @@ -62,7 +62,7 @@ if (is_rec_type and space.isinstance_w(w_elem, space.w_tuple)): return True if (space.isinstance_w(w_elem, space.w_tuple) or - isinstance(w_elem, W_NDimArray) or + isinstance(w_elem, W_NDimArray) or space.isinstance_w(w_elem, space.w_list)): return False return True @@ -293,3 +293,27 @@ rbackstrides[i] = backstrides[j] j += 1 return rstrides, rbackstrides + + at jit.unroll_safe +def calc_steps(shape, strides, order='C'): + steps = [] + if order == 'K': + if strides[0] < strides[-1]: + order = 'F' + else: + order = 'C' + if order == 'F' or order == 'A': + last_step = strides[0] + for i in range(len(shape)): + steps.append(strides[i] / last_step) + last_step *= shape[i] + if order == 'A': + pass + #XXX test for all(steps==steps[0]) + elif order == 'C': + last_step = strides[-1] + for i in range(len(shape) - 1, -1, -1): + steps.insert(0, strides[i] / last_step) + last_step *= shape[i] + return steps + From noreply at buildbot.pypy.org Fri Jun 7 16:30:10 2013 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 7 Jun 2013 16:30:10 +0200 (CEST) Subject: [pypy-commit] pypy argsort-segfault: add a test that corrupts memory Message-ID: <20130607143010.5FFE91C328A@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: argsort-segfault Changeset: r64820:f5b13c9f2485 Date: 2013-06-07 17:24 +0300 http://bitbucket.org/pypy/pypy/changeset/f5b13c9f2485/ Log: add a test that corrupts memory diff --git a/pypy/module/micronumpy/arrayimpl/sort.py b/pypy/module/micronumpy/arrayimpl/sort.py --- a/pypy/module/micronumpy/arrayimpl/sort.py +++ b/pypy/module/micronumpy/arrayimpl/sort.py @@ -20,7 +20,7 @@ def make_sort_function(space, itemtype, comp_type, count=1): TP = itemtype.T step = rffi.sizeof(TP) - + class Repr(object): def __init__(self, index_stride_size, stride_size, size, values, indexes, index_start, start): @@ -32,7 +32,12 @@ self.values = values self.indexes = indexes + def __del__(self): + print 'Repr.del',self.values + def getitem(self, item): + #print 'getting',item,'of',self.size,self.values + #print 'from',item*self.stride_size + self.start,'to',item*(self.stride_size+1) + self.start if count < 2: v = raw_storage_getitem(TP, self.values, item * self.stride_size + self.start) @@ -71,11 +76,11 @@ def __init__(self, index_stride_size, stride_size, size): start = 0 dtype = interp_dtype.get_dtype_cache(space).w_longdtype - self.indexes = dtype.itemtype.malloc(size*dtype.get_size()) - self.values = alloc_raw_storage(size * stride_size, + indexes = dtype.itemtype.malloc(size*dtype.get_size()) + values = alloc_raw_storage(size * stride_size, track_allocation=False) - Repr.__init__(self, index_stride_size, stride_size, - size, self.values, self.indexes, start, start) + Repr.__init__(self, index_stride_size, stride_size, + size, values, indexes, start, start) def __del__(self): free_raw_storage(self.indexes, track_allocation=False) @@ -91,12 +96,14 @@ return lst.size def arg_getitem_slice(lst, start, stop): + print 'arg_getitem_slice',lst.values retval = ArgArrayRepWithStorage(lst.index_stride_size, lst.stride_size, stop-start) for i in range(stop-start): retval.setitem(i, lst.getitem(i+start)) + print 'arg_getitem_slice done',lst return retval - + if count < 2: def arg_lt(a, b): # Does numpy do <= ? @@ -108,7 +115,7 @@ return True elif a[0][i] > b[0][i]: return False - # Does numpy do True? + # Does numpy do True? return False ArgSort = make_timsort_class(arg_getitem, arg_setitem, arg_length, @@ -148,13 +155,16 @@ stride_size = arr.strides[axis] index_stride_size = index_impl.strides[axis] axis_size = arr.shape[axis] + print '5' while not iter.done(): for i in range(axis_size): raw_storage_setitem(storage, i * index_stride_size + index_iter.offset, i) r = Repr(index_stride_size, stride_size, axis_size, arr.get_storage(), storage, index_iter.offset, iter.offset) + print '6' ArgSort(r).sort() + print '7' iter.next() index_iter.next() return index_arr @@ -180,7 +190,7 @@ class SortCache(object): built = False - + def __init__(self, space): if self.built: return diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -2496,6 +2496,13 @@ b = a.argsort() assert (b[:3] == [0, 100, 200]).all() + def test_argsort_random(self): + from numpypy import array + from _random import Random + rnd = Random(1) + a = array([rnd.random() for i in range(512*2)]).reshape(512,2) + a.argsort() + def test_argsort_axis(self): from numpypy import array a = array([[4, 2], [1, 3]]) From noreply at buildbot.pypy.org Fri Jun 7 16:44:34 2013 From: noreply at buildbot.pypy.org (rguillebert) Date: Fri, 7 Jun 2013 16:44:34 +0200 (CEST) Subject: [pypy-commit] pypy numpypy-nditer: Implement the external_loop flag on the nditer class Message-ID: <20130607144434.DDEC81C331A@cobra.cs.uni-duesseldorf.de> Author: Romain Guillebert Branch: numpypy-nditer Changeset: r64821:4bea52d36621 Date: 2013-06-07 16:36 +0200 http://bitbucket.org/pypy/pypy/changeset/4bea52d36621/ Log: Implement the external_loop flag on the nditer class diff --git a/pypy/module/micronumpy/interp_nditer.py b/pypy/module/micronumpy/interp_nditer.py --- a/pypy/module/micronumpy/interp_nditer.py +++ b/pypy/module/micronumpy/interp_nditer.py @@ -32,13 +32,13 @@ self.it.next() def getitem(self, space, array): - return self.op_flags.get_it_item(space, array, self.it) + return self.op_flags.get_it_item[self.index](space, array, self.it) -class BoxIterator(IteratorMixin): - pass +class BoxIterator(IteratorMixin, AbstractIterator): + index = 0 -class SliceIterator(IteratorMixin): - pass +class ExternalLoopIterator(IteratorMixin, AbstractIterator): + index = 1 def parse_op_arg(space, name, w_op_flags, n, parse_one_arg): ret = [] @@ -73,7 +73,7 @@ self.native_byte_order = False self.tmp_copy = '' self.allocate = False - self.get_it_item = get_readonly_item + self.get_it_item = (get_readonly_item, get_readonly_slice) def get_readonly_item(space, array, it): return space.wrap(it.getitem()) @@ -128,9 +128,9 @@ raise OperationError(space.w_ValueError, space.wrap( 'op_flags must be a tuple or array of per-op flag-tuples')) if op_flag.rw == 'r': - op_flag.get_it_item = get_readonly_item + op_flag.get_it_item = (get_readonly_item, get_readonly_slice) elif op_flag.rw == 'rw': - op_flag.get_it_item = get_readwrite_item + op_flag.get_it_item = (get_readwrite_item, get_readwrite_slice) return op_flag def parse_func_flags(space, nditer, w_flags): @@ -180,7 +180,8 @@ 'Iterator flag EXTERNAL_LOOP cannot be used if an index or ' 'multi-index is being tracked')) -def get_iter(space, order, imp, shape): +def get_iter(space, order, arr, shape): + imp = arr.implementation if order == 'K' or (order == 'C' and imp.order == 'C'): backward = False elif order =='F' and imp.order == 'C': @@ -201,6 +202,18 @@ shape, backward) return MultiDimViewIterator(imp, imp.dtype, imp.start, r[0], r[1], shape) +def get_external_loop_iter(space, order, arr, shape): + imp = arr.implementation + if order == 'K' or (order == 'C' and imp.order == 'C'): + backward = False + elif order =='F' and imp.order == 'C': + backward = True + else: + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + return SliceIterator(arr, imp.strides, imp.backstrides, shape, order=order, backward=backward) + class W_NDIter(W_Root): @@ -229,11 +242,13 @@ self.iters=[] self.shape = iter_shape = shape_agreement_multiple(space, self.seq) if self.external_loop: - #XXX find longest contiguous shape - iter_shape = iter_shape[1:] - for i in range(len(self.seq)): - self.iters.append(BoxIterator(get_iter(space, self.order, - self.seq[i].implementation, iter_shape), self.op_flags[i])) + for i in range(len(self.seq)): + self.iters.append(ExternalLoopIterator(get_external_loop_iter(space, self.order, + self.seq[i], iter_shape), self.op_flags[i])) + else: + for i in range(len(self.seq)): + self.iters.append(BoxIterator(get_iter(space, self.order, + self.seq[i], iter_shape), self.op_flags[i])) def descr_iter(self, space): return space.wrap(self) diff --git a/pypy/module/micronumpy/iter.py b/pypy/module/micronumpy/iter.py --- a/pypy/module/micronumpy/iter.py +++ b/pypy/module/micronumpy/iter.py @@ -46,6 +46,7 @@ calculate_slice_strides from pypy.module.micronumpy.base import W_NDimArray from pypy.module.micronumpy.arrayimpl import base +from pypy.module.micronumpy import support from rpython.rlib import jit # structures to describe slicing @@ -267,28 +268,49 @@ self.offset %= self.size class SliceIterator(object): - def __init__(self, arr, stride, backstride, shape, dtype=None): - self.step = 0 + def __init__(self, arr, strides, backstrides, shape, order="C", backward=False, dtype=None): + self.indexes = [0] * (len(shape) - 1) + self.offset = 0 self.arr = arr - self.stride = stride - self.backstride = backstride - self.shape = shape if dtype is None: dtype = arr.implementation.dtype + if backward: + self.slicesize = shape[0] + self.gap = [support.product(shape[1:]) * dtype.get_size()] + self.strides = strides[1:][::-1] + self.backstrides = backstrides[1:][::-1] + self.shape = shape[1:][::-1] + self.shapelen = len(self.shape) + else: + shape = [support.product(shape)] + self.strides, self.backstrides = support.calc_strides(shape, dtype, order) + self.slicesize = support.product(shape) + self.shapelen = 0 + self.gap = self.strides self.dtype = dtype self._done = False - def done(): + def done(self): return self._done - def next(): - self.step += self.arr.implementation.dtype.get_size() - if self.step == self.backstride - self.implementation.dtype.get_size(): + @jit.unroll_safe + def next(self): + offset = self.offset + for i in range(self.shapelen - 1, -1, -1): + if self.indexes[i] < self.shape[i] - 1: + self.indexes[i] += 1 + offset += self.strides[i] + break + else: + self.indexes[i] = 0 + offset -= self.backstrides[i] + else: self._done = True + self.offset = offset def getslice(self): from pypy.module.micronumpy.arrayimpl.concrete import SliceArray - return SliceArray(self.step, [self.stride], [self.backstride], self.shape, self.arr.implementation, self.arr, self.dtype) + return SliceArray(self.offset, self.gap, self.backstrides, [self.slicesize], self.arr.implementation, self.arr, self.dtype) class AxisIterator(base.BaseArrayIterator): def __init__(self, array, shape, dim, cumultative): diff --git a/pypy/module/micronumpy/test/test_nditer.py b/pypy/module/micronumpy/test/test_nditer.py --- a/pypy/module/micronumpy/test/test_nditer.py +++ b/pypy/module/micronumpy/test/test_nditer.py @@ -44,21 +44,22 @@ def test_external_loop(self): from numpypy import arange, nditer, array - a = arange(12).reshape(2,3,2) + a = arange(24).reshape(2, 3, 4) r = [] n = 0 for x in nditer(a, flags=['external_loop']): r.append(x) n += 1 + print r assert n == 1 - assert (array(r) == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]).all() + assert (array(r) == range(24)).all() r = [] n = 0 for x in nditer(a, flags=['external_loop'], order='F'): r.append(x) n += 1 - assert n == 3 - assert (array(r) == [[0, 6], [2, 8], [4, 10], [1, 7], [3, 9], [5, 11]]).all() + assert n == 12 + assert (array(r) == [[ 0, 12], [ 4, 16], [ 8, 20], [ 1, 13], [ 5, 17], [ 9, 21], [ 2, 14], [ 6, 18], [10, 22], [ 3, 15], [ 7, 19], [11, 23]]).all() def test_interface(self): from numpypy import arange, nditer, zeros From noreply at buildbot.pypy.org Fri Jun 7 16:44:36 2013 From: noreply at buildbot.pypy.org (rguillebert) Date: Fri, 7 Jun 2013 16:44:36 +0200 (CEST) Subject: [pypy-commit] pypy numpypy-nditer: Backed out changeset 730edc51d980 Message-ID: <20130607144436.1BED41C331A@cobra.cs.uni-duesseldorf.de> Author: Romain Guillebert Branch: numpypy-nditer Changeset: r64822:7107aa4daf89 Date: 2013-06-07 16:40 +0200 http://bitbucket.org/pypy/pypy/changeset/7107aa4daf89/ Log: Backed out changeset 730edc51d980 diff --git a/pypy/module/micronumpy/interp_nditer.py b/pypy/module/micronumpy/interp_nditer.py --- a/pypy/module/micronumpy/interp_nditer.py +++ b/pypy/module/micronumpy/interp_nditer.py @@ -4,7 +4,7 @@ from pypy.interpreter.error import OperationError from pypy.module.micronumpy.base import W_NDimArray, convert_to_array from pypy.module.micronumpy.strides import (calculate_broadcast_strides, - shape_agreement_multiple, calc_steps) + shape_agreement_multiple) from pypy.module.micronumpy.iter import MultiDimViewIterator, SliceIterator from pypy.module.micronumpy import support from pypy.module.micronumpy.arrayimpl.concrete import SliceArray @@ -229,17 +229,11 @@ self.iters=[] self.shape = iter_shape = shape_agreement_multiple(space, self.seq) if self.external_loop: - steps = [] - for seq in self.seq: - impl = seq.implementation - steps.append(calc_steps(impl.shape, impl.strides, self.order)) - #XXX #find longest contiguous shape - print 'steps',steps,'tier_shape',iter_shape - iter_shape = [1] + #XXX find longest contiguous shape + iter_shape = iter_shape[1:] for i in range(len(self.seq)): self.iters.append(BoxIterator(get_iter(space, self.order, - self.seq[i].implementation, iter_shape), - self.op_flags[i])) + self.seq[i].implementation, iter_shape), self.op_flags[i])) def descr_iter(self, space): return space.wrap(self) diff --git a/pypy/module/micronumpy/strides.py b/pypy/module/micronumpy/strides.py --- a/pypy/module/micronumpy/strides.py +++ b/pypy/module/micronumpy/strides.py @@ -51,8 +51,8 @@ rstrides.append(strides[i]) rbackstrides.append(backstrides[i]) if backwards: - rstrides = rstrides + [0] * (len(res_shape) - len(orig_shape)) - rbackstrides = rbackstrides + [0] * (len(res_shape) - len(orig_shape)) + rstrides = rstrides + [0] * (len(res_shape) - len(orig_shape)) + rbackstrides = rbackstrides + [0] * (len(res_shape) - len(orig_shape)) else: rstrides = [0] * (len(res_shape) - len(orig_shape)) + rstrides rbackstrides = [0] * (len(res_shape) - len(orig_shape)) + rbackstrides @@ -62,7 +62,7 @@ if (is_rec_type and space.isinstance_w(w_elem, space.w_tuple)): return True if (space.isinstance_w(w_elem, space.w_tuple) or - isinstance(w_elem, W_NDimArray) or + isinstance(w_elem, W_NDimArray) or space.isinstance_w(w_elem, space.w_list)): return False return True @@ -293,27 +293,3 @@ rbackstrides[i] = backstrides[j] j += 1 return rstrides, rbackstrides - - at jit.unroll_safe -def calc_steps(shape, strides, order='C'): - steps = [] - if order == 'K': - if strides[0] < strides[-1]: - order = 'F' - else: - order = 'C' - if order == 'F' or order == 'A': - last_step = strides[0] - for i in range(len(shape)): - steps.append(strides[i] / last_step) - last_step *= shape[i] - if order == 'A': - pass - #XXX test for all(steps==steps[0]) - elif order == 'C': - last_step = strides[-1] - for i in range(len(shape) - 1, -1, -1): - steps.insert(0, strides[i] / last_step) - last_step *= shape[i] - return steps - From noreply at buildbot.pypy.org Fri Jun 7 16:44:37 2013 From: noreply at buildbot.pypy.org (rguillebert) Date: Fri, 7 Jun 2013 16:44:37 +0200 (CEST) Subject: [pypy-commit] pypy numpypy-nditer: Merge heads Message-ID: <20130607144437.5A9961C331A@cobra.cs.uni-duesseldorf.de> Author: Romain Guillebert Branch: numpypy-nditer Changeset: r64823:000f7ae8ea0c Date: 2013-06-07 16:43 +0200 http://bitbucket.org/pypy/pypy/changeset/000f7ae8ea0c/ Log: Merge heads diff --git a/pypy/module/micronumpy/interp_nditer.py b/pypy/module/micronumpy/interp_nditer.py --- a/pypy/module/micronumpy/interp_nditer.py +++ b/pypy/module/micronumpy/interp_nditer.py @@ -32,13 +32,13 @@ self.it.next() def getitem(self, space, array): - return self.op_flags.get_it_item(space, array, self.it) + return self.op_flags.get_it_item[self.index](space, array, self.it) -class BoxIterator(IteratorMixin): - pass +class BoxIterator(IteratorMixin, AbstractIterator): + index = 0 -class SliceIterator(IteratorMixin): - pass +class ExternalLoopIterator(IteratorMixin, AbstractIterator): + index = 1 def parse_op_arg(space, name, w_op_flags, n, parse_one_arg): ret = [] @@ -73,7 +73,7 @@ self.native_byte_order = False self.tmp_copy = '' self.allocate = False - self.get_it_item = get_readonly_item + self.get_it_item = (get_readonly_item, get_readonly_slice) def get_readonly_item(space, array, it): return space.wrap(it.getitem()) @@ -128,9 +128,9 @@ raise OperationError(space.w_ValueError, space.wrap( 'op_flags must be a tuple or array of per-op flag-tuples')) if op_flag.rw == 'r': - op_flag.get_it_item = get_readonly_item + op_flag.get_it_item = (get_readonly_item, get_readonly_slice) elif op_flag.rw == 'rw': - op_flag.get_it_item = get_readwrite_item + op_flag.get_it_item = (get_readwrite_item, get_readwrite_slice) return op_flag def parse_func_flags(space, nditer, w_flags): @@ -180,7 +180,8 @@ 'Iterator flag EXTERNAL_LOOP cannot be used if an index or ' 'multi-index is being tracked')) -def get_iter(space, order, imp, shape): +def get_iter(space, order, arr, shape): + imp = arr.implementation if order == 'K' or (order == 'C' and imp.order == 'C'): backward = False elif order =='F' and imp.order == 'C': @@ -201,6 +202,18 @@ shape, backward) return MultiDimViewIterator(imp, imp.dtype, imp.start, r[0], r[1], shape) +def get_external_loop_iter(space, order, arr, shape): + imp = arr.implementation + if order == 'K' or (order == 'C' and imp.order == 'C'): + backward = False + elif order =='F' and imp.order == 'C': + backward = True + else: + raise OperationError(space.w_NotImplementedError, space.wrap( + 'not implemented yet')) + + return SliceIterator(arr, imp.strides, imp.backstrides, shape, order=order, backward=backward) + class W_NDIter(W_Root): @@ -229,11 +242,13 @@ self.iters=[] self.shape = iter_shape = shape_agreement_multiple(space, self.seq) if self.external_loop: - #XXX find longest contiguous shape - iter_shape = iter_shape[1:] - for i in range(len(self.seq)): - self.iters.append(BoxIterator(get_iter(space, self.order, - self.seq[i].implementation, iter_shape), self.op_flags[i])) + for i in range(len(self.seq)): + self.iters.append(ExternalLoopIterator(get_external_loop_iter(space, self.order, + self.seq[i], iter_shape), self.op_flags[i])) + else: + for i in range(len(self.seq)): + self.iters.append(BoxIterator(get_iter(space, self.order, + self.seq[i], iter_shape), self.op_flags[i])) def descr_iter(self, space): return space.wrap(self) diff --git a/pypy/module/micronumpy/iter.py b/pypy/module/micronumpy/iter.py --- a/pypy/module/micronumpy/iter.py +++ b/pypy/module/micronumpy/iter.py @@ -46,6 +46,7 @@ calculate_slice_strides from pypy.module.micronumpy.base import W_NDimArray from pypy.module.micronumpy.arrayimpl import base +from pypy.module.micronumpy import support from rpython.rlib import jit # structures to describe slicing @@ -267,28 +268,49 @@ self.offset %= self.size class SliceIterator(object): - def __init__(self, arr, stride, backstride, shape, dtype=None): - self.step = 0 + def __init__(self, arr, strides, backstrides, shape, order="C", backward=False, dtype=None): + self.indexes = [0] * (len(shape) - 1) + self.offset = 0 self.arr = arr - self.stride = stride - self.backstride = backstride - self.shape = shape if dtype is None: dtype = arr.implementation.dtype + if backward: + self.slicesize = shape[0] + self.gap = [support.product(shape[1:]) * dtype.get_size()] + self.strides = strides[1:][::-1] + self.backstrides = backstrides[1:][::-1] + self.shape = shape[1:][::-1] + self.shapelen = len(self.shape) + else: + shape = [support.product(shape)] + self.strides, self.backstrides = support.calc_strides(shape, dtype, order) + self.slicesize = support.product(shape) + self.shapelen = 0 + self.gap = self.strides self.dtype = dtype self._done = False - def done(): + def done(self): return self._done - def next(): - self.step += self.arr.implementation.dtype.get_size() - if self.step == self.backstride - self.implementation.dtype.get_size(): + @jit.unroll_safe + def next(self): + offset = self.offset + for i in range(self.shapelen - 1, -1, -1): + if self.indexes[i] < self.shape[i] - 1: + self.indexes[i] += 1 + offset += self.strides[i] + break + else: + self.indexes[i] = 0 + offset -= self.backstrides[i] + else: self._done = True + self.offset = offset def getslice(self): from pypy.module.micronumpy.arrayimpl.concrete import SliceArray - return SliceArray(self.step, [self.stride], [self.backstride], self.shape, self.arr.implementation, self.arr, self.dtype) + return SliceArray(self.offset, self.gap, self.backstrides, [self.slicesize], self.arr.implementation, self.arr, self.dtype) class AxisIterator(base.BaseArrayIterator): def __init__(self, array, shape, dim, cumultative): diff --git a/pypy/module/micronumpy/test/test_nditer.py b/pypy/module/micronumpy/test/test_nditer.py --- a/pypy/module/micronumpy/test/test_nditer.py +++ b/pypy/module/micronumpy/test/test_nditer.py @@ -44,21 +44,21 @@ def test_external_loop(self): from numpypy import arange, nditer, array - a = arange(12).reshape(2,3,2) + a = arange(24).reshape(2, 3, 4) r = [] n = 0 for x in nditer(a, flags=['external_loop']): r.append(x) n += 1 assert n == 1 - assert (array(r) == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]).all() + assert (array(r) == range(24)).all() r = [] n = 0 for x in nditer(a, flags=['external_loop'], order='F'): r.append(x) n += 1 - assert n == 6 - assert (array(r) == [[0, 6], [2, 8], [4, 10], [1, 7], [3, 9], [5, 11]]).all() + assert n == 12 + assert (array(r) == [[ 0, 12], [ 4, 16], [ 8, 20], [ 1, 13], [ 5, 17], [ 9, 21], [ 2, 14], [ 6, 18], [10, 22], [ 3, 15], [ 7, 19], [11, 23]]).all() def test_interface(self): from numpypy import arange, nditer, zeros From noreply at buildbot.pypy.org Fri Jun 7 23:21:21 2013 From: noreply at buildbot.pypy.org (rguillebert) Date: Fri, 7 Jun 2013 23:21:21 +0200 (CEST) Subject: [pypy-commit] pypy numpypy-nditer: Fix translation Message-ID: <20130607212121.13D651C06B1@cobra.cs.uni-duesseldorf.de> Author: Romain Guillebert Branch: numpypy-nditer Changeset: r64824:f406998d825f Date: 2013-06-07 23:20 +0200 http://bitbucket.org/pypy/pypy/changeset/f406998d825f/ Log: Fix translation diff --git a/pypy/module/micronumpy/interp_nditer.py b/pypy/module/micronumpy/interp_nditer.py --- a/pypy/module/micronumpy/interp_nditer.py +++ b/pypy/module/micronumpy/interp_nditer.py @@ -16,7 +16,7 @@ def next(self): raise NotImplementedError("Abstract Class") - def getitem(self, array): + def getitem(self, space, array): raise NotImplementedError("Abstract Class") class IteratorMixin(object): @@ -56,8 +56,8 @@ if len(w_lst) != n: raise OperationError(space.w_ValueError, space.wrap( '%s must be a tuple or array of per-op flag-tuples' % name)) - for item in space.listview(w_lst): - ret.append(parse_one_arg(space, item)) + for item in w_lst: + ret.append(parse_one_arg(space, space.listview(item))) else: op_flag = parse_one_arg(space, w_lst) for i in range(n): @@ -174,7 +174,7 @@ nditer.zerosize_ok = True else: raise OperationError(space.w_ValueError, space.wrap( - 'Unexpected iterator global flag "%s"', item)) + 'Unexpected iterator global flag "%s"' % item)) if nditer.tracked_index and nditer.external_loop: raise OperationError(space.w_ValueError, space.wrap( 'Iterator flag EXTERNAL_LOOP cannot be used if an index or ' @@ -192,9 +192,9 @@ if (imp.strides[0] < imp.strides[-1] and not backward) or \ (imp.strides[0] > imp.strides[-1] and backward): # flip the strides. Is this always true for multidimension? - strides = [s for s in imp.strides[::-1]] - backstrides = [s for s in imp.backstrides[::-1]] - shape = [s for s in shape[::-1]] + strides = [imp.strides[i] for i in range(len(imp.strides) - 1, -1, -1)] + backstrides = [imp.backstrides[i] for i in range(len(imp.backstrides) - 1, -1, -1)] + shape = [imp.shape[i] for i in range(len(imp.shape) - 1, -1, -1)] else: strides = imp.strides backstrides = imp.backstrides diff --git a/pypy/module/micronumpy/iter.py b/pypy/module/micronumpy/iter.py --- a/pypy/module/micronumpy/iter.py +++ b/pypy/module/micronumpy/iter.py @@ -277,9 +277,12 @@ if backward: self.slicesize = shape[0] self.gap = [support.product(shape[1:]) * dtype.get_size()] - self.strides = strides[1:][::-1] - self.backstrides = backstrides[1:][::-1] - self.shape = shape[1:][::-1] + self.strides = strides[1:] + self.backstrides = backstrides[1:] + self.shape = shape[1:] + self.strides.reverse() + self.backstrides.reverse() + self.shape.reverse() self.shapelen = len(self.shape) else: shape = [support.product(shape)] From noreply at buildbot.pypy.org Sat Jun 8 11:25:03 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 8 Jun 2013 11:25:03 +0200 (CEST) Subject: [pypy-commit] pypy default: Move this logic after the checks, otherwise errors might be hidden Message-ID: <20130608092503.354F61C009D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r64825:0b26d350de79 Date: 2013-06-08 11:24 +0200 http://bitbucket.org/pypy/pypy/changeset/0b26d350de79/ Log: Move this logic after the checks, otherwise errors might be hidden (on Windows). (This follows cffi/0b90939873ae) diff --git a/pypy/module/_cffi_backend/ctypefunc.py b/pypy/module/_cffi_backend/ctypefunc.py --- a/pypy/module/_cffi_backend/ctypefunc.py +++ b/pypy/module/_cffi_backend/ctypefunc.py @@ -289,16 +289,6 @@ "with verify() (see pypy/module/_cffi_backend/ctypefunc.py " "for details)")) - if USE_C_LIBFFI_MSVC and is_result_type: - # MSVC returns small structures in registers. Pretend int32 or - # int64 return type. This is needed as a workaround for what - # is really a bug of libffi_msvc seen as an independent library - # (ctypes has a similar workaround). - if ctype.size <= 4: - return clibffi.ffi_type_sint32 - if ctype.size <= 8: - return clibffi.ffi_type_sint64 - # walk the fields, expanding arrays into repetitions; first, # only count how many flattened fields there are nflat = 0 @@ -318,6 +308,16 @@ "a struct with a zero-length array")) nflat += flat + if USE_C_LIBFFI_MSVC and is_result_type: + # MSVC returns small structures in registers. Pretend int32 or + # int64 return type. This is needed as a workaround for what + # is really a bug of libffi_msvc seen as an independent library + # (ctypes has a similar workaround). + if ctype.size <= 4: + return clibffi.ffi_type_sint32 + if ctype.size <= 8: + return clibffi.ffi_type_sint64 + # allocate an array of (nflat + 1) ffi_types elements = self.fb_alloc(rffi.sizeof(FFI_TYPE_P) * (nflat + 1)) elements = rffi.cast(FFI_TYPE_PP, elements) From noreply at buildbot.pypy.org Sat Jun 8 11:26:49 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 8 Jun 2013 11:26:49 +0200 (CEST) Subject: [pypy-commit] pypy default: Import cffi/28f10889b5aa. Message-ID: <20130608092649.DE95A1C009D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r64826:c7cda7189297 Date: 2013-06-08 11:26 +0200 http://bitbucket.org/pypy/pypy/changeset/c7cda7189297/ Log: Import cffi/28f10889b5aa. diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py --- a/lib_pypy/cffi/api.py +++ b/lib_pypy/cffi/api.py @@ -361,13 +361,13 @@ backend = ffi._backend try: if '.' not in name and '/' not in name: - raise OSError + raise OSError("library not found: %r" % (name,)) backendlib = backend.load_library(name, flags) except OSError: import ctypes.util path = ctypes.util.find_library(name) if path is None: - raise OSError("library not found: %r" % (name,)) + raise # propagate the original OSError backendlib = backend.load_library(path, flags) copied_enums = [] # diff --git a/pypy/module/test_lib_pypy/cffi_tests/test_ffi_backend.py b/pypy/module/test_lib_pypy/cffi_tests/test_ffi_backend.py --- a/pypy/module/test_lib_pypy/cffi_tests/test_ffi_backend.py +++ b/pypy/module/test_lib_pypy/cffi_tests/test_ffi_backend.py @@ -41,40 +41,43 @@ class TestBitfield: def check(self, source, expected_ofs_y, expected_align, expected_size): + # NOTE: 'expected_*' is the numbers expected from GCC. + # The numbers expected from MSVC are not explicitly written + # in this file, and will just be taken from the compiler. ffi = FFI() ffi.cdef("struct s1 { %s };" % source) ctype = ffi.typeof("struct s1") # verify the information with gcc - if sys.platform != "win32": - ffi1 = FFI() - ffi1.cdef(""" - static const int Gofs_y, Galign, Gsize; - struct s1 *try_with_value(int fieldnum, long long value); - """) - fnames = [name for name, cfield in ctype.fields - if name and cfield.bitsize > 0] - setters = ['case %d: s.%s = value; break;' % iname - for iname in enumerate(fnames)] - lib = ffi1.verify(""" - struct s1 { %s }; - struct sa { char a; struct s1 b; }; - #define Gofs_y offsetof(struct s1, y) - #define Galign offsetof(struct sa, b) - #define Gsize sizeof(struct s1) - struct s1 *try_with_value(int fieldnum, long long value) - { - static struct s1 s; - memset(&s, 0, sizeof(s)); - switch (fieldnum) { %s } - return &s; - } - """ % (source, ' '.join(setters))) - assert lib.Gofs_y == expected_ofs_y - assert lib.Galign == expected_align - assert lib.Gsize == expected_size + ffi1 = FFI() + ffi1.cdef(""" + static const int Gofs_y, Galign, Gsize; + struct s1 *try_with_value(int fieldnum, long long value); + """) + fnames = [name for name, cfield in ctype.fields + if name and cfield.bitsize > 0] + setters = ['case %d: s.%s = value; break;' % iname + for iname in enumerate(fnames)] + lib = ffi1.verify(""" + struct s1 { %s }; + struct sa { char a; struct s1 b; }; + #define Gofs_y offsetof(struct s1, y) + #define Galign offsetof(struct sa, b) + #define Gsize sizeof(struct s1) + struct s1 *try_with_value(int fieldnum, long long value) + { + static struct s1 s; + memset(&s, 0, sizeof(s)); + switch (fieldnum) { %s } + return &s; + } + """ % (source, ' '.join(setters))) + if sys.platform == 'win32': + expected_ofs_y = lib.Gofs_y + expected_align = lib.Galign + expected_size = lib.Gsize else: - lib = None - fnames = None + assert (lib.Gofs_y, lib.Galign, lib.Gsize) == ( + expected_ofs_y, expected_align, expected_size) # the real test follows assert ffi.offsetof("struct s1", "y") == expected_ofs_y assert ffi.alignof("struct s1") == expected_align @@ -99,10 +102,9 @@ setattr(s, name, value) assert getattr(s, name) == value raw1 = ffi.buffer(s)[:] - if lib is not None: - t = lib.try_with_value(fnames.index(name), value) - raw2 = ffi.buffer(t, len(raw1))[:] - assert raw1 == raw2 + t = lib.try_with_value(fnames.index(name), value) + raw2 = ffi.buffer(t, len(raw1))[:] + assert raw1 == raw2 def test_bitfield_basic(self): self.check("int a; int b:9; int c:20; int y;", 8, 4, 12) @@ -136,9 +138,11 @@ L = FFI().alignof("long long") self.check("char y; int :0;", 0, 1, 4) self.check("char x; int :0; char y;", 4, 1, 5) + self.check("char x; int :0; int :0; char y;", 4, 1, 5) self.check("char x; long long :0; char y;", L, 1, L + 1) self.check("short x, y; int :0; int :0;", 2, 2, 4) self.check("char x; int :0; short b:1; char y;", 5, 2, 6) + self.check("int a:1; int :0; int b:1; char y;", 5, 4, 8) def test_error_cases(self): ffi = FFI() From noreply at buildbot.pypy.org Sat Jun 8 15:17:01 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 8 Jun 2013 15:17:01 +0200 (CEST) Subject: [pypy-commit] stmgc default: in-progress Message-ID: <20130608131701.3F6691C125E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r78:98c0b1dadf35 Date: 2013-06-08 15:14 +0200 http://bitbucket.org/pypy/stmgc/changeset/98c0b1dadf35/ Log: in-progress diff --git a/c4/doc-objects.txt b/c4/doc-objects.txt --- a/c4/doc-objects.txt +++ b/c4/doc-objects.txt @@ -1,3 +1,22 @@ + +Design goal +----------- + +stm_read_barrier(P) -> P: the read barrier (containing a call in the +slow path) can be applied on a pointer to an object, and returns a +possibly different pointer. Afterwards, any reads from the object can +be done normally (using the returned pointer). + +stm_write_barrier(P) -> P: the same for writes (actually read/write mode). + +The returned pointers are valid until a potential transaction break --- +with the exception that the result of stm_read_barrier() will be +invalidated by a stm_write_barrier() done on the same object. + +This means we must not modify an object in-place from thread A when +thread B might be reading from it! It is the basis for the design +outlined in the sequel, in which "protected" objects are seen by only +one thread, whereas "public" objects are seen by all threads. @@ -8,29 +27,19 @@ Private freshly created \ Private, with backup - \ ^ . | ^ - \ / . commit | | - commit \ modify / . | | - \ / . commit | | modify - V / V | | - Protected, no backup V | + \ ^ | ^ + \ / commit | | + commit \ modify / | | + \ / | | modify + V / | | + Protected, no backup V | ^ ^ Protected, with backup / | gc | commit / `----------------' / / - Private copy of (the dotted arrow is followed if the - a public obj protected backup copy was stolen) - - - - Protected backup copy - \ - \ - stealing \ commit of newer version - \ ,-----------------. - V | V - Up-to-date public copy Outdated public copy + Private copy of + a public obj @@ -45,6 +54,7 @@ Protected objects: - converted from fresh private obj (old PRN) - converted from a private obj with backup ptr to backup +- converted from a private obj from public GT - backup copy of a private obj original h_revision - backup copy still attached to a protected GT - original obj after GC killed the backup GT @@ -73,13 +83,15 @@ - the PRN (private revision number): odd, negative, changes for every transaction that commits -- dict active_backup_copies = {private converted from protected: backup copy} +- list active_backup_copies = + [(private converted from protected, backup copy)] - dict public_to_private = {public obj: private copy} - list read_set containing the objects in the read set, with possibly some duplicates (but hopefully not too many) +- list stolen_objects = [(priv/prot object, public copy)] Kind of object copy distinguishing feature @@ -154,6 +166,8 @@ update the original P->h_revision to point directly to the new public copy + add (P, new public copy) to stolen_objects + Write barrier diff --git a/c4/doc-stmgc.txt b/c4/doc-stmgc.txt --- a/c4/doc-stmgc.txt +++ b/c4/doc-stmgc.txt @@ -304,3 +304,16 @@ revision numbers of all threads, and theoretically compact each interval of numbers down to only one number, but still keep one active revision number per thread. + + +Stealing +-------- + +This is done by the *stealing thread* in order to gain access to an +object that is protected by the *foreign thread*. Stealing is triggered +when we, the stealing thread, follow a "handle" created by a foreign +thread. The handle has a reference to the normal protected/private +object. The process depends on the exact state of the protected/private +copy. As a general rule, we may carefully read, but not write, to the +foreign copies during stealing. + diff --git a/c4/et.c b/c4/et.c --- a/c4/et.c +++ b/c4/et.c @@ -13,11 +13,6 @@ a transaction inevitable: we then add 1 to it. */ static revision_t global_cur_time = 2; -/* 'next_locked_value' is incremented by two for every thread that starts. - XXX it should be fixed at some point because right now the process will - die if we start more than 0x7fff threads. */ -static revision_t next_locked_value = (LOCKED + 1) | 1; - /* a negative odd number that identifies the currently running transaction within the thread. */ __thread revision_t stm_private_rev_num; @@ -85,6 +80,11 @@ } #endif +static void steal(gcptr P) +{ + abort(); +} + gcptr stm_DirectReadBarrier(gcptr G) { struct tx_descriptor *d = thread_descriptor; @@ -165,9 +165,9 @@ return P; old_to_young:; - revision_t target_lock; - target_lock = *(revision_t *)(v & ~(HANDLE_BLOCK_SIZE-1)); - if (target_lock == d->my_lock) + revision_t target_descriptor_index; + target_descriptor_index = *(revision_t *)(v & ~(HANDLE_BLOCK_SIZE-1)); + if (target_descriptor_index == d->descriptor_index) { P = (gcptr)(*(revision_t *)(v - 2)); assert(!(P->h_tid & GCFLAG_PUBLIC)); @@ -194,6 +194,7 @@ { /* stealing */ fprintf(stderr, "read_barrier: %p -> stealing %p...", G, (gcptr)v); + steal(P); abort(); } } @@ -577,7 +578,7 @@ "!!!!!!!!!!!!!!!!!!!!! [%lx] abort %d\n" "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n" "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n" - "\n", (long)d->my_lock, num); + "\n", (long)d->descriptor_index, num); if (num != ABRT_MANUAL && d->max_aborts >= 0 && !d->max_aborts--) { fprintf(stderr, "unexpected abort!\n"); @@ -773,7 +774,7 @@ handle_block = (revision_t *) ((((intptr_t)handle_block) + HANDLE_BLOCK_SIZE-1) & ~(HANDLE_BLOCK_SIZE-1)); - handle_block[0] = d->my_lock; + handle_block[0] = d->descriptor_index; handle_block[1] = v; revision_t w = ((revision_t)(handle_block + 1)) + 2; @@ -947,7 +948,7 @@ (XXX statically we should know when we're outside a transaction) */ - fprintf(stderr, "[%lx] inevitable: %s\n", (long)d->my_lock, why); + fprintf(stderr, "[%lx] inevitable: %s\n", (long)d->descriptor_index, why); cur_time = acquire_inev_mutex_and_mark_global_cur_time(); if (d->start_time != cur_time) @@ -1071,6 +1072,10 @@ /************************************************************/ +struct tx_descriptor *stm_descriptor_array[MAX_THREADS] = {0}; +static revision_t descriptor_array_next = 0; +static revision_t descriptor_array_lock = 0; + int DescriptorInit(void) { if (GCFLAG_PREBUILT != PREBUILT_FLAGS) @@ -1082,33 +1087,39 @@ if (thread_descriptor == NULL) { + revision_t i; struct tx_descriptor *d = stm_malloc(sizeof(struct tx_descriptor)); memset(d, 0, sizeof(struct tx_descriptor)); + spinlock_acquire(descriptor_array_lock, 1); - /* initialize 'my_lock' to be a unique odd number > LOCKED */ - while (1) + i = descriptor_array_next; + while (stm_descriptor_array[i] != NULL) { - d->my_lock = ACCESS_ONCE(next_locked_value); - if (d->my_lock > INTPTR_MAX - 2) + i++; + if (i == MAX_THREADS) + i = 0; + if (i == descriptor_array_next) { - /* XXX fix this limitation */ - fprintf(stderr, "XXX error: too many threads ever created " + fprintf(stderr, "error: too many threads at the same time " "in this process"); abort(); } - if (bool_cas(&next_locked_value, d->my_lock, d->my_lock + 2)) - break; } + descriptor_array_next = i; + stm_descriptor_array[i] = d; + d->descriptor_index = i; + d->my_lock = LOCKED + 2 * i; assert(d->my_lock & 1); - assert(d->my_lock > LOCKED); + assert(d->my_lock >= LOCKED); stm_private_rev_num = -1; d->private_revision_ref = &stm_private_rev_num; d->max_aborts = -1; thread_descriptor = d; fprintf(stderr, "[%lx] pthread %lx starting\n", - (long)d->my_lock, (long)pthread_self()); + (long)d->descriptor_index, (long)pthread_self()); + spinlock_release(descriptor_array_lock); return 1; } else @@ -1121,6 +1132,7 @@ assert(d != NULL); assert(d->active == 0); + stm_descriptor_array[d->descriptor_index] = NULL; thread_descriptor = NULL; g2l_delete(&d->public_to_private); @@ -1142,7 +1154,7 @@ num_spinloops += d->num_spinloops[i]; p += sprintf(p, "[%lx] finishing: %d commits, %d aborts ", - (long)d->my_lock, + (long)d->descriptor_index, d->num_commits, num_aborts); diff --git a/c4/et.h b/c4/et.h --- a/c4/et.h +++ b/c4/et.h @@ -11,7 +11,8 @@ #define _SRCSTM_ET_H -#define LOCKED ((INTPTR_MAX - 0xffff) | 1) +#define MAX_THREADS 1024 +#define LOCKED (INTPTR_MAX - 2*(MAX_THREADS-1)) #define WORD sizeof(gcptr) #define HANDLE_BLOCK_SIZE (2 * WORD) @@ -103,6 +104,7 @@ struct tx_descriptor { jmp_buf *setjmp_buf; revision_t start_time; + revision_t descriptor_index; revision_t my_lock; revision_t collection_lock; gcptr *shadowstack; @@ -127,11 +129,11 @@ long long longest_abort_info_time; struct FXCache recent_reads_cache; revision_t *private_revision_ref; - struct tx_descriptor *tx_next, *tx_prev; /* a doubly linked list */ }; extern __thread struct tx_descriptor *thread_descriptor; extern __thread revision_t stm_private_rev_num; +extern struct tx_descriptor *stm_descriptor_array[]; /************************************************************/ diff --git a/c4/test/support.py b/c4/test/support.py --- a/c4/test/support.py +++ b/c4/test/support.py @@ -84,7 +84,7 @@ gcptr pseudoprebuilt(size_t size, int tid); revision_t get_private_rev_num(void); revision_t get_start_time(void); - revision_t get_my_lock(void); + revision_t get_descriptor_index(void); //gcptr *addr_of_thread_local(void); //int in_nursery(gcptr); @@ -93,7 +93,6 @@ /* some constants normally private that are useful in the tests */ #define WORD ... #define GC_PAGE_SIZE ... - #define LOCKED ... #define HANDLE_BLOCK_SIZE ... #define GCFLAG_OLD ... #define GCFLAG_VISITED ... @@ -207,9 +206,9 @@ return thread_descriptor->start_time; } - revision_t get_my_lock(void) + revision_t get_descriptor_index(void) { - return thread_descriptor->my_lock; + return thread_descriptor->descriptor_index; } /*gcptr *addr_of_thread_local(void) @@ -551,7 +550,7 @@ def decode_handle(r): assert (r & 3) == 2 p = r & ~(lib.HANDLE_BLOCK_SIZE-1) - my_lock = ffi.cast("revision_t *", p)[0] - assert my_lock >= lib.LOCKED + dindex = ffi.cast("revision_t *", p)[0] + assert 0 <= dindex < 20 ptr = ffi.cast("gcptr *", r - 2)[0] - return ptr, my_lock + return ptr, dindex diff --git a/c4/test/test_et.py b/c4/test/test_et.py --- a/c4/test/test_et.py +++ b/c4/test/test_et.py @@ -138,7 +138,7 @@ lib.stm_begin_inevitable_transaction() assert classify(p) == "public" assert classify(p2) == "protected" - assert decode_handle(p.h_revision) == (p2, lib.get_my_lock()) + assert decode_handle(p.h_revision) == (p2, lib.get_descriptor_index()) assert lib.rawgetlong(p, 0) == 28971289 assert lib.rawgetlong(p2, 0) == 1289222 @@ -224,14 +224,36 @@ assert p4 == p2 assert list_of_read_objects() == [p2] -def test_stealing_protected_without_backup(): +def test_stealing(): p = palloc(HDR + WORD) + plist = [p] def f1(r): - lib.setlong(p, 0, 2782172) + assert (p.h_tid & GCFLAG_PUBLIC_TO_PRIVATE) == 0 + p1 = lib.stm_write_barrier(p) # private copy + assert p1 != p + assert classify(p) == "public" + assert classify(p1) == "private" + assert p.h_tid & GCFLAG_PUBLIC_TO_PRIVATE + lib.rawsetlong(p1, 0, 2782172) lib.stm_commit_transaction() lib.stm_begin_inevitable_transaction() + assert classify(p) == "public" + assert classify(p1) == "protected" + plist.append(p1) + # now p's most recent revision is protected + assert p.h_revision % 4 == 2 # a handle r.set(2) + r.wait(3) + assert lib.list_stolen_objects() == plist[-2:] + p2 = lib.stm_read_barrier(p1) + assert p2 == plist[-1] def f2(r): r.wait(2) - assert lib.getlong(p, 0) == 2782172 + p2 = lib.stm_read_barrier(p) # steals + assert lib.rawgetlong(p2, 0) == 2782172 + assert p.h_revision == int(ffi.cast("revision_t", p2)) + assert p2 == lib.stm_read_barrier(p) + assert p2 not in plist + plist.append(p2) + r.set(3) run_parallel(f1, f2) From noreply at buildbot.pypy.org Sat Jun 8 21:25:56 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 8 Jun 2013 21:25:56 +0200 (CEST) Subject: [pypy-commit] stmgc default: Introduce tx_public_descriptor to cope with threads ending Message-ID: <20130608192556.85E891C06B1@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r79:91fdce635f8f Date: 2013-06-08 21:25 +0200 http://bitbucket.org/pypy/stmgc/changeset/91fdce635f8f/ Log: Introduce tx_public_descriptor to cope with threads ending diff --git a/c4/doc-objects.txt b/c4/doc-objects.txt --- a/c4/doc-objects.txt +++ b/c4/doc-objects.txt @@ -83,8 +83,7 @@ - the PRN (private revision number): odd, negative, changes for every transaction that commits -- list active_backup_copies = - [(private converted from protected, backup copy)] +- list active_backup_copies = [(private, backup copy)] - dict public_to_private = {public obj: private copy} diff --git a/c4/et.c b/c4/et.c --- a/c4/et.c +++ b/c4/et.c @@ -82,6 +82,13 @@ static void steal(gcptr P) { + struct tx_public_descriptor *foreign_pd; + revision_t target_descriptor_index; + revision_t v = ACCESS_ONCE(P->h_revision); + if ((v & 3) != 2) + return; + target_descriptor_index = *(revision_t *)(v & ~(HANDLE_BLOCK_SIZE-1)); + //foreign_pd = ACCESS_ONCE(stm_descriptor_array[target_descriptor_index]); abort(); } @@ -167,7 +174,7 @@ old_to_young:; revision_t target_descriptor_index; target_descriptor_index = *(revision_t *)(v & ~(HANDLE_BLOCK_SIZE-1)); - if (target_descriptor_index == d->descriptor_index) + if (target_descriptor_index == d->public_descriptor_index) { P = (gcptr)(*(revision_t *)(v - 2)); assert(!(P->h_tid & GCFLAG_PUBLIC)); @@ -195,7 +202,7 @@ /* stealing */ fprintf(stderr, "read_barrier: %p -> stealing %p...", G, (gcptr)v); steal(P); - abort(); + goto retry; } } @@ -331,7 +338,7 @@ memcpy(B + 1, P + 1, size - sizeof(*B)); } assert(B->h_tid & GCFLAG_BACKUP_COPY); - g2l_insert(&d->private_to_backup, P, B); + gcptrlist_insert2(&d->public_descriptor->active_backup_copies, P, B); P->h_revision = stm_private_rev_num; return P; } @@ -394,10 +401,13 @@ gcptr stm_get_backup_copy(gcptr P) { - struct tx_descriptor *d = thread_descriptor; - wlog_t *entry; - G2L_FIND(d->private_to_backup, P, entry, return NULL); - return entry->val; + struct tx_public_descriptor *pd = thread_descriptor->public_descriptor; + long i, size = pd->active_backup_copies.size; + gcptr *items = pd->active_backup_copies.items; + for (i = 0; i < size; i += 2) + if (items[i] == P) + return items[i + 1]; + return NULL; } gcptr stm_get_read_obj(long index) @@ -568,7 +578,7 @@ } gcptrlist_clear(&d->list_of_read_objects); - g2l_clear(&d->private_to_backup); + gcptrlist_clear(&d->public_descriptor->active_backup_copies); abort();//stmgc_abort_transaction(d); fprintf(stderr, @@ -578,7 +588,7 @@ "!!!!!!!!!!!!!!!!!!!!! [%lx] abort %d\n" "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n" "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n" - "\n", (long)d->descriptor_index, num); + "\n", (long)d->public_descriptor_index, num); if (num != ABRT_MANUAL && d->max_aborts >= 0 && !d->max_aborts--) { fprintf(stderr, "unexpected abort!\n"); @@ -632,7 +642,7 @@ d->start_real_time.tv_nsec = -1; } assert(d->list_of_read_objects.size == 0); - assert(!g2l_any_entry(&d->private_to_backup)); + assert(d->public_descriptor->active_backup_copies.size == 0); assert(!g2l_any_entry(&d->public_to_private)); d->count_reads = 1; @@ -774,7 +784,7 @@ handle_block = (revision_t *) ((((intptr_t)handle_block) + HANDLE_BLOCK_SIZE-1) & ~(HANDLE_BLOCK_SIZE-1)); - handle_block[0] = d->descriptor_index; + handle_block[0] = d->public_descriptor_index; handle_block[1] = v; revision_t w = ((revision_t)(handle_block + 1)) + 2; @@ -826,18 +836,19 @@ void TurnPrivateWithBackupToProtected(struct tx_descriptor *d, revision_t cur_time) { - wlog_t *item; - G2L_LOOP_FORWARD(d->private_to_backup, item) + long i, size = d->public_descriptor->active_backup_copies.size; + gcptr *items = d->public_descriptor->active_backup_copies.items; + + for (i = 0; i < size; i += 2) { - gcptr P = item->addr; - gcptr B = item->val; + gcptr P = items[i]; + gcptr B = items[i + 1]; assert(P->h_revision == stm_private_rev_num); assert(B->h_tid & GCFLAG_BACKUP_COPY); B->h_revision = cur_time; P->h_revision = (revision_t)B; - - } G2L_LOOP_END; - g2l_clear(&d->private_to_backup); + }; + gcptrlist_clear(&d->public_descriptor->active_backup_copies); } void CommitTransaction(void) @@ -846,7 +857,7 @@ struct tx_descriptor *d = thread_descriptor; assert(d->active >= 1); - spinlock_acquire(d->collection_lock, 'C'); /* committing */ + spinlock_acquire(d->public_descriptor->collection_lock, 'C'); /*committing*/ AcquireLocks(d); if (is_inevitable(d)) @@ -868,10 +879,10 @@ if (cur_time & 1) { // there is another inevitable transaction CancelLocks(d); - spinlock_release(d->collection_lock); + spinlock_release(d->public_descriptor->collection_lock); inev_mutex_acquire(); // wait until released inev_mutex_release(); - spinlock_acquire(d->collection_lock, 'C'); + spinlock_acquire(d->public_descriptor->collection_lock, 'C'); AcquireLocks(d); continue; } @@ -907,7 +918,7 @@ UpdateChainHeads(d, cur_time, localrev); - spinlock_release(d->collection_lock); + spinlock_release(d->public_descriptor->collection_lock); d->num_commits++; d->active = 0; stm_stop_sharedlock(); @@ -948,7 +959,8 @@ (XXX statically we should know when we're outside a transaction) */ - fprintf(stderr, "[%lx] inevitable: %s\n", (long)d->descriptor_index, why); + fprintf(stderr, "[%lx] inevitable: %s\n", + (long)d->public_descriptor_index, why); cur_time = acquire_inev_mutex_and_mark_global_cur_time(); if (d->start_time != cur_time) @@ -1072,8 +1084,8 @@ /************************************************************/ -struct tx_descriptor *stm_descriptor_array[MAX_THREADS] = {0}; -static revision_t descriptor_array_next = 0; +struct tx_public_descriptor *stm_descriptor_array[MAX_THREADS] = {0}; +static revision_t descriptor_array_free_list = 0; static revision_t descriptor_array_lock = 0; int DescriptorInit(void) @@ -1092,22 +1104,30 @@ memset(d, 0, sizeof(struct tx_descriptor)); spinlock_acquire(descriptor_array_lock, 1); - i = descriptor_array_next; - while (stm_descriptor_array[i] != NULL) - { - i++; - if (i == MAX_THREADS) - i = 0; - if (i == descriptor_array_next) - { + struct tx_public_descriptor *pd; + i = descriptor_array_free_list; + pd = stm_descriptor_array[i]; + if (pd != NULL) { + /* we are reusing 'pd' */ + descriptor_array_free_list = pd->free_list_next; + assert(descriptor_array_free_list >= 0); + } + else { + /* no item in the free list */ + descriptor_array_free_list = i + 1; + if (descriptor_array_free_list == MAX_THREADS) { fprintf(stderr, "error: too many threads at the same time " "in this process"); abort(); - } - } - descriptor_array_next = i; - stm_descriptor_array[i] = d; - d->descriptor_index = i; + } + pd = stm_malloc(sizeof(struct tx_public_descriptor)); + memset(pd, 0, sizeof(struct tx_public_descriptor)); + stm_descriptor_array[i] = pd; + } + pd->free_list_next = -1; + + d->public_descriptor = pd; + d->public_descriptor_index = i; d->my_lock = LOCKED + 2 * i; assert(d->my_lock & 1); assert(d->my_lock >= LOCKED); @@ -1117,7 +1137,7 @@ thread_descriptor = d; fprintf(stderr, "[%lx] pthread %lx starting\n", - (long)d->descriptor_index, (long)pthread_self()); + (long)d->public_descriptor_index, (long)pthread_self()); spinlock_release(descriptor_array_lock); return 1; @@ -1128,15 +1148,25 @@ void DescriptorDone(void) { + revision_t i; struct tx_descriptor *d = thread_descriptor; assert(d != NULL); assert(d->active == 0); - stm_descriptor_array[d->descriptor_index] = NULL; + d->public_descriptor->collection_lock = 0; /* unlock */ + + spinlock_acquire(descriptor_array_lock, 1); + i = d->public_descriptor_index; + assert(stm_descriptor_array[i] == d->public_descriptor); + d->public_descriptor->free_list_next = descriptor_array_free_list; + descriptor_array_free_list = i; + spinlock_release(descriptor_array_lock); + thread_descriptor = NULL; g2l_delete(&d->public_to_private); - g2l_delete(&d->private_to_backup); + assert(d->public_descriptor->active_backup_copies.size == 0); + gcptrlist_delete(&d->public_descriptor->active_backup_copies); gcptrlist_delete(&d->list_of_read_objects); gcptrlist_delete(&d->abortinfo); free(d->longest_abort_info); @@ -1145,7 +1175,6 @@ #endif int num_aborts = 0, num_spinloops = 0; - int i; char line[256], *p = line; for (i=0; inum_spinloops[i]; p += sprintf(p, "[%lx] finishing: %d commits, %d aborts ", - (long)d->descriptor_index, + (long)d->public_descriptor_index, d->num_commits, num_aborts); diff --git a/c4/et.h b/c4/et.h --- a/c4/et.h +++ b/c4/et.h @@ -101,12 +101,25 @@ #define SPLP_LOCKED_COMMIT 3 #define SPINLOOP_REASONS 4 +/* this struct contains thread-local data that may be occasionally + * accessed by a foreign thread and that must stay around after the + * thread shuts down. It is reused the next time a thread starts. */ +struct tx_public_descriptor { + revision_t collection_lock; + struct GcPtrList stolen_objects; + struct GcPtrList active_backup_copies; + revision_t free_list_next; + /* xxx gcpage data here */ +}; + +/* this struct contains all thread-local data that is never accessed + * by a foreign thread */ struct tx_descriptor { + struct tx_public_descriptor *public_descriptor; + revision_t public_descriptor_index; jmp_buf *setjmp_buf; revision_t start_time; - revision_t descriptor_index; revision_t my_lock; - revision_t collection_lock; gcptr *shadowstack; gcptr **shadowstack_end_ref; @@ -123,7 +136,6 @@ unsigned int num_spinloops[SPINLOOP_REASONS]; struct GcPtrList list_of_read_objects; struct GcPtrList abortinfo; - struct G2L private_to_backup; struct G2L public_to_private; char *longest_abort_info; long long longest_abort_info_time; @@ -133,43 +145,10 @@ extern __thread struct tx_descriptor *thread_descriptor; extern __thread revision_t stm_private_rev_num; -extern struct tx_descriptor *stm_descriptor_array[]; +extern struct tx_public_descriptor *stm_descriptor_array[]; /************************************************************/ -//#define STM_BARRIER_P2R(P) -// (__builtin_expect((((gcptr)(P))->h_tid & GCFLAG_GLOBAL) == 0, 1) ? -// (P) : (typeof(P))stm_DirectReadBarrier(P)) - -//#define STM_BARRIER_G2R(G) -// (assert(((gcptr)(G))->h_tid & GCFLAG_GLOBAL), -// (typeof(G))stm_DirectReadBarrier(G)) - -//#define STM_BARRIER_O2R(O) -// (__builtin_expect((((gcptr)(O))->h_tid & GCFLAG_POSSIBLY_OUTDATED) == 0, -// 1) ? -// (O) : (typeof(O))stm_RepeatReadBarrier(O)) - -//#define STM_READ_BARRIER_P_FROM_R(P, R_container, offset) -// (__builtin_expect((((gcptr)(P))->h_tid & GCFLAG_GLOBAL) == 0, 1) ? -// (P) : (typeof(P))stm_DirectReadBarrierFromR((P), -// (R_container), -// offset)) - -//#define STM_BARRIER_P2W(P) -// (__builtin_expect((((gcptr)(P))->h_tid & GCFLAG_NOT_WRITTEN) == 0, 1) ? -// (P) : (typeof(P))stm_WriteBarrier(P)) - -//#define STM_BARRIER_G2W(G) -// (assert(((gcptr)(G))->h_tid & GCFLAG_GLOBAL), -// (typeof(G))stm_WriteBarrier(G)) - -//#define STM_BARRIER_R2W(R) -// (__builtin_expect((((gcptr)(R))->h_tid & GCFLAG_NOT_WRITTEN) == 0, 1) ? -// (R) : (typeof(R))stm_WriteBarrierFromReady(R)) - -//#define STM_BARRIER_O2W(R) STM_BARRIER_R2W(R) /* same logic works here */ - void BeginTransaction(jmp_buf *); void BeginInevitableTransaction(void); /* must save roots around this call */ diff --git a/c4/test/support.py b/c4/test/support.py --- a/c4/test/support.py +++ b/c4/test/support.py @@ -208,7 +208,7 @@ revision_t get_descriptor_index(void) { - return thread_descriptor->descriptor_index; + return thread_descriptor->public_descriptor_index; } /*gcptr *addr_of_thread_local(void) From noreply at buildbot.pypy.org Sat Jun 8 22:49:45 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 8 Jun 2013 22:49:45 +0200 (CEST) Subject: [pypy-commit] stmgc default: in-progress Message-ID: <20130608204945.C962E1C06B1@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r80:1456f52e680d Date: 2013-06-08 22:49 +0200 http://bitbucket.org/pypy/stmgc/changeset/1456f52e680d/ Log: in-progress diff --git a/c4/et.c b/c4/et.c --- a/c4/et.c +++ b/c4/et.c @@ -60,38 +60,6 @@ /************************************************************/ -#if 0 -static inline gcptr AddInReadSet(struct tx_descriptor *d, gcptr R) -{ - fprintf(stderr, "AddInReadSet(%p)\n", R); - d->count_reads++; - if (!fxcache_add(&d->recent_reads_cache, R)) { - /* not in the cache: it may be the first time we see it, - * so insert it into the list */ - gcptrlist_insert(&d->list_of_read_objects, R); - } - // break; - - // case 2: - /* already in the cache, and FX_THRESHOLD reached */ - // return Localize(d, R); - // } - return R; -} -#endif - -static void steal(gcptr P) -{ - struct tx_public_descriptor *foreign_pd; - revision_t target_descriptor_index; - revision_t v = ACCESS_ONCE(P->h_revision); - if ((v & 3) != 2) - return; - target_descriptor_index = *(revision_t *)(v & ~(HANDLE_BLOCK_SIZE-1)); - //foreign_pd = ACCESS_ONCE(stm_descriptor_array[target_descriptor_index]); - abort(); -} - gcptr stm_DirectReadBarrier(gcptr G) { struct tx_descriptor *d = thread_descriptor; @@ -106,26 +74,26 @@ v = ACCESS_ONCE(P->h_revision); if (!(v & 1)) // "is a pointer", i.e. { // "has a more recent revision" - if (v & 2) - goto old_to_young; - assert(P->h_tid & GCFLAG_PUBLIC); + /* if we land on a P in read_barrier_cache: just return it */ + gcptr P_next = (gcptr)v; + if (FXCACHE_AT(P_next) == P_next) + { + fprintf(stderr, "read_barrier: %p -> %p fxcache\n", G, P_next); + return P_next; + } + + if (P->h_tid & GCFLAG_STUB) + goto follow_stub; gcptr P_prev = P; - P = (gcptr)v; - - /* if we land on a P in read_barrier_cache: just return it */ - if (FXCACHE_AT(P) == P) - { - fprintf(stderr, "read_barrier: %p -> %p fxcache\n", G, P); - return P; - } + P = P_next; + assert(P->h_tid & GCFLAG_PUBLIC); v = ACCESS_ONCE(P->h_revision); if (!(v & 1)) // "is a pointer", i.e. { // "has a more recent revision" - if (v & 2) - goto old_to_young; - assert(P->h_tid & GCFLAG_PUBLIC); + if (P->h_tid & GCFLAG_STUB) + goto follow_stub; /* we update P_prev->h_revision as a shortcut */ /* XXX check if this really gives a worse performance than only @@ -171,12 +139,12 @@ gcptrlist_insert(&d->list_of_read_objects, P); return P; - old_to_young:; - revision_t target_descriptor_index; - target_descriptor_index = *(revision_t *)(v & ~(HANDLE_BLOCK_SIZE-1)); - if (target_descriptor_index == d->public_descriptor_index) + follow_stub:; + struct tx_public_descriptor *foreign_pd = STUB_THREAD(P); + if (foreign_pd == d->public_descriptor) { - P = (gcptr)(*(revision_t *)(v - 2)); + /* same thread */ + P = (gcptr)P->h_revision; assert(!(P->h_tid & GCFLAG_PUBLIC)); if (P->h_revision == stm_private_rev_num) { @@ -200,8 +168,8 @@ else { /* stealing */ - fprintf(stderr, "read_barrier: %p -> stealing %p...", G, (gcptr)v); - steal(P); + fprintf(stderr, "read_barrier: %p -> stealing %p...", G, P); + stm_steal_stub(P); goto retry; } } @@ -401,6 +369,8 @@ gcptr stm_get_backup_copy(gcptr P) { + assert(P->h_revision == stm_private_rev_num); + struct tx_public_descriptor *pd = thread_descriptor->public_descriptor; long i, size = pd->active_backup_copies.size; gcptr *items = pd->active_backup_copies.items; @@ -735,7 +705,7 @@ #endif } -static pthread_mutex_t mutex_prebuilt_gcroots = PTHREAD_MUTEX_INITIALIZER; +//static pthread_mutex_t mutex_prebuilt_gcroots = PTHREAD_MUTEX_INITIALIZER; static void UpdateChainHeads(struct tx_descriptor *d, revision_t cur_time, revision_t localrev) @@ -763,6 +733,11 @@ #endif L->h_revision = new_revision; + gcptr stub = stm_stub_malloc(d->public_descriptor); + stub->h_tid = GCFLAG_PUBLIC | GCFLAG_STUB; + stub->h_revision = (revision_t)L; + item->val = stub; + } G2L_LOOP_END; smp_wmb(); /* a memory barrier: make sure the new L->h_revisions are visible @@ -779,22 +754,12 @@ assert(!(R->h_tid & GCFLAG_STOLEN)); assert(R->h_revision != localrev); - /* XXX compactify and don't leak! */ - revision_t *handle_block = stm_malloc(3 * WORD); - handle_block = (revision_t *) - ((((intptr_t)handle_block) + HANDLE_BLOCK_SIZE-1) - & ~(HANDLE_BLOCK_SIZE-1)); - handle_block[0] = d->public_descriptor_index; - handle_block[1] = v; - - revision_t w = ((revision_t)(handle_block + 1)) + 2; - #ifdef DUMP_EXTRA - fprintf(stderr, "%p->h_revision = %p (UpdateChainHeads2)\n", - R, (gcptr)w); + fprintf(stderr, "%p->h_revision = %p (stub to %p)\n", + R, (gcptr)v, (gcptr)item->val->h_revision); /*mark*/ #endif - ACCESS_ONCE(R->h_revision) = w; + ACCESS_ONCE(R->h_revision) = v; #if 0 if (R->h_tid & GCFLAG_PREBUILT_ORIGINAL) diff --git a/c4/et.h b/c4/et.h --- a/c4/et.h +++ b/c4/et.h @@ -13,9 +13,7 @@ #define MAX_THREADS 1024 #define LOCKED (INTPTR_MAX - 2*(MAX_THREADS-1)) - #define WORD sizeof(gcptr) -#define HANDLE_BLOCK_SIZE (2 * WORD) /* Description of the flags * ------------------------ @@ -52,8 +50,9 @@ * GCFLAG_STOLEN is set of protected objects after we notice that they * have been stolen. * - * GCFLAG_STUB is used for debugging: it's set on stub objects made by - * stealing or by major collections. + * GCFLAG_STUB is set on stub objects made by stealing or by major + * collections. It's removed once the stub's protected h_revision + * target is stolen and replaced by a regular public object. */ #define GCFLAG_OLD (STM_FIRST_GCFLAG << 0) #define GCFLAG_VISITED (STM_FIRST_GCFLAG << 1) @@ -64,7 +63,7 @@ #define GCFLAG_WRITE_BARRIER (STM_FIRST_GCFLAG << 6) #define GCFLAG_NURSERY_MOVED (STM_FIRST_GCFLAG << 7) #define GCFLAG_STOLEN (STM_FIRST_GCFLAG << 8) -#define GCFLAG_STUB (STM_FIRST_GCFLAG << 9) /* debugging */ +#define GCFLAG_STUB (STM_FIRST_GCFLAG << 9) /* this value must be reflected in PREBUILT_FLAGS in stmgc.h */ #define GCFLAG_PREBUILT (GCFLAG_VISITED | \ @@ -106,6 +105,8 @@ * thread shuts down. It is reused the next time a thread starts. */ struct tx_public_descriptor { revision_t collection_lock; + struct stub_block_s *stub_blocks; + gcptr stub_free_list; struct GcPtrList stolen_objects; struct GcPtrList active_backup_copies; revision_t free_list_next; diff --git a/c4/steal.c b/c4/steal.c new file mode 100644 --- /dev/null +++ b/c4/steal.c @@ -0,0 +1,57 @@ +#include "stmimpl.h" + + +#define STUB_PAGE (4096 - 2*WORD) +#define STUB_NB_OBJS ((STUB_BLOCK_SIZE - 2*WORD) / \ + sizeof(struct stm_object_s)) + +struct stub_block_s { + struct tx_public_descriptor *thread; + struct stub_block_s *next; + struct stm_object_s stubs[STUB_NB_OBJS]; +}; + +gcptr stm_stub_malloc(struct tx_public_descriptor *pd) +{ + gcptr p = pd->stub_free_list; + if (p == NULL) { + assert(sizeof(struct stub_block_s) == STUB_BLOCK_SIZE); + + char *page = stm_malloc(STUB_PAGE); + char *page_end = page + STUB_PAGE; + page += (-(revision_t)page) & (STUB_BLOCK_SIZE-1); /* round up */ + + struct stub_block_s *b = (struct stub_block_s *)page; + struct stub_block_s *nextb = NULL; + gcptr nextp = NULL; + int i; + + while (((char *)(b + 1)) <= page_end) { + b->thread = pd; + b->next = nextb; + for (i = 0; i < STUB_NB_OBJS; i++) { + b->stubs[i].h_revision = (revision_t)nextp; + nextp = &b->stubs[i]; + } + b++; + } + assert(nextp != NULL); + p = nextp; + } + pd->stub_free_list = (gcptr)p->h_revision; + assert(STUB_THREAD(p) == pd); + return p; +} + +void stm_steal_stub(gcptr P) +{ + abort(); + struct tx_public_descriptor *foreign_pd; + revision_t target_descriptor_index; + revision_t v = ACCESS_ONCE(P->h_revision); + if ((v & 3) != 2) + return; + target_descriptor_index = *(revision_t *)(v & ~(STUB_BLOCK_SIZE-1)); + foreign_pd = stm_descriptor_array[target_descriptor_index]; + abort(); +} diff --git a/c4/steal.h b/c4/steal.h new file mode 100644 --- /dev/null +++ b/c4/steal.h @@ -0,0 +1,14 @@ +#ifndef _SRCSTM_STEAL_H +#define _SRCSTM_STEAL_H + + +#define STUB_BLOCK_SIZE (16 * WORD) /* power of two */ + +#define STUB_THREAD(h) (*(struct tx_public_descriptor **) \ + (((revision_t)(h)) & ~(STUB_BLOCK_SIZE-1))) + +gcptr stm_stub_malloc(struct tx_public_descriptor *); +void stm_steal_stub(gcptr); + + +#endif diff --git a/c4/stmimpl.h b/c4/stmimpl.h --- a/c4/stmimpl.h +++ b/c4/stmimpl.h @@ -31,6 +31,7 @@ #include "lists.h" #include "dbgmem.h" #include "et.h" +#include "steal.h" #include "stmsync.h" #endif diff --git a/c4/test/support.py b/c4/test/support.py --- a/c4/test/support.py +++ b/c4/test/support.py @@ -5,11 +5,11 @@ parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) header_files = [os.path.join(parent_dir, _n) for _n in - "et.h lists.h " + "et.h lists.h steal.h " "stmsync.h dbgmem.h fprintcolor.h " "stmgc.h stmimpl.h atomic_ops.h".split()] source_files = [os.path.join(parent_dir, _n) for _n in - "et.c lists.c " + "et.c lists.c steal.c " "stmsync.c dbgmem.c fprintcolor.c".split()] _pycache_ = os.path.join(parent_dir, 'test', '__pycache__') @@ -70,6 +70,7 @@ void AbortTransaction(int); gcptr stm_get_backup_copy(gcptr); gcptr stm_get_read_obj(long index); + void *STUB_THREAD(gcptr); gcptr getptr(gcptr, long); void setptr(gcptr, long, gcptr); @@ -84,7 +85,7 @@ gcptr pseudoprebuilt(size_t size, int tid); revision_t get_private_rev_num(void); revision_t get_start_time(void); - revision_t get_descriptor_index(void); + void *my_stub_thread(void); //gcptr *addr_of_thread_local(void); //int in_nursery(gcptr); @@ -93,7 +94,7 @@ /* some constants normally private that are useful in the tests */ #define WORD ... #define GC_PAGE_SIZE ... - #define HANDLE_BLOCK_SIZE ... + #define STUB_BLOCK_SIZE ... #define GCFLAG_OLD ... #define GCFLAG_VISITED ... #define GCFLAG_PUBLIC ... @@ -206,9 +207,9 @@ return thread_descriptor->start_time; } - revision_t get_descriptor_index(void) + void *my_stub_thread(void) { - return thread_descriptor->public_descriptor_index; + return (void *)thread_descriptor->public_descriptor; } /*gcptr *addr_of_thread_local(void) @@ -526,10 +527,14 @@ private = p.h_revision == lib.get_private_rev_num() public = (p.h_tid & GCFLAG_PUBLIC) != 0 backup = (p.h_tid & GCFLAG_BACKUP_COPY) != 0 + stub = (p.h_tid & GCFLAG_STUB) != 0 assert private + public + backup <= 1 + assert stub <= public if private: return "private" if public: + if stub: + return "stub" return "public" if backup: return "backup" @@ -547,10 +552,4 @@ index += 1 return result -def decode_handle(r): - assert (r & 3) == 2 - p = r & ~(lib.HANDLE_BLOCK_SIZE-1) - dindex = ffi.cast("revision_t *", p)[0] - assert 0 <= dindex < 20 - ptr = ffi.cast("gcptr *", r - 2)[0] - return ptr, dindex +stub_thread = lib.STUB_THREAD diff --git a/c4/test/test_et.py b/c4/test/test_et.py --- a/c4/test/test_et.py +++ b/c4/test/test_et.py @@ -80,10 +80,9 @@ assert p.h_revision == lib.get_private_rev_num() lib.stm_commit_transaction() lib.stm_begin_inevitable_transaction() - assert lib.stm_get_backup_copy(p) == ffi.NULL assert classify(p) == "protected" assert classify(pback) == "backup" - assert ffi.cast("revision_t *", p.h_revision) == pback + assert ffi.cast("gcptr", p.h_revision) == pback def test_protected_backup_reused(): p = nalloc(HDR + WORD) @@ -95,7 +94,6 @@ assert pback != p lib.stm_commit_transaction() lib.stm_begin_inevitable_transaction() - assert lib.stm_get_backup_copy(p) == ffi.NULL assert classify(p) == "protected" assert classify(pback) == "backup" assert lib.rawgetlong(p, 0) == 927122 @@ -138,7 +136,9 @@ lib.stm_begin_inevitable_transaction() assert classify(p) == "public" assert classify(p2) == "protected" - assert decode_handle(p.h_revision) == (p2, lib.get_descriptor_index()) + pstub = ffi.cast("gcptr", p.h_revision) + assert classify(pstub) == "stub" + assert stub_thread(pstub) == lib.my_stub_thread() assert lib.rawgetlong(p, 0) == 28971289 assert lib.rawgetlong(p2, 0) == 1289222 @@ -241,7 +241,7 @@ assert classify(p1) == "protected" plist.append(p1) # now p's most recent revision is protected - assert p.h_revision % 4 == 2 # a handle + assert classify(ffi.cast("gcptr", p.h_revision)) == "stub" r.set(2) r.wait(3) assert lib.list_stolen_objects() == plist[-2:] From noreply at buildbot.pypy.org Sat Jun 8 22:57:49 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 8 Jun 2013 22:57:49 +0200 (CEST) Subject: [pypy-commit] stmgc default: progress in stealing Message-ID: <20130608205749.E74E11C06B1@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r81:77e624bcbfd2 Date: 2013-06-08 22:57 +0200 http://bitbucket.org/pypy/stmgc/changeset/77e624bcbfd2/ Log: progress in stealing diff --git a/c4/steal.c b/c4/steal.c --- a/c4/steal.c +++ b/c4/steal.c @@ -45,13 +45,23 @@ void stm_steal_stub(gcptr P) { - abort(); - struct tx_public_descriptor *foreign_pd; - revision_t target_descriptor_index; - revision_t v = ACCESS_ONCE(P->h_revision); - if ((v & 3) != 2) - return; - target_descriptor_index = *(revision_t *)(v & ~(STUB_BLOCK_SIZE-1)); - foreign_pd = stm_descriptor_array[target_descriptor_index]; - abort(); + struct tx_public_descriptor *foreign_pd = STUB_THREAD(P); + + spinlock_acquire(foreign_pd->collection_lock, 'S'); /*stealing*/ + + if (!(P->h_tid & GCFLAG_STUB)) + goto done; /* un-stubbed while we waited for the lock */ + + /* XXX check for now that P is a regular protected object */ + gcptr L = (gcptr)P->h_revision; + gcptr Q = stmgc_duplicate(L); + Q->h_tid |= GCFLAG_PUBLIC; + + smp_wmb(); + + P->h_revision = (revision_t)Q; + P->h_tid &= ~GCFLAG_STUB; + + done: + spinlock_release(foreign_pd->collection_lock); } diff --git a/c4/test/test_et.py b/c4/test/test_et.py --- a/c4/test/test_et.py +++ b/c4/test/test_et.py @@ -251,6 +251,7 @@ r.wait(2) p2 = lib.stm_read_barrier(p) # steals assert lib.rawgetlong(p2, 0) == 2782172 + assert p2 == lib.stm_read_barrier(p) # short-circuit h_revision assert p.h_revision == int(ffi.cast("revision_t", p2)) assert p2 == lib.stm_read_barrier(p) assert p2 not in plist From noreply at buildbot.pypy.org Sat Jun 8 23:00:01 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 8 Jun 2013 23:00:01 +0200 (CEST) Subject: [pypy-commit] stmgc default: fix Message-ID: <20130608210001.318BD1C06B1@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r82:1366ee6b9a4d Date: 2013-06-08 22:59 +0200 http://bitbucket.org/pypy/stmgc/changeset/1366ee6b9a4d/ Log: fix diff --git a/c4/steal.c b/c4/steal.c --- a/c4/steal.c +++ b/c4/steal.c @@ -56,10 +56,10 @@ gcptr L = (gcptr)P->h_revision; gcptr Q = stmgc_duplicate(L); Q->h_tid |= GCFLAG_PUBLIC; + P->h_revision = (revision_t)Q; smp_wmb(); - P->h_revision = (revision_t)Q; P->h_tid &= ~GCFLAG_STUB; done: From noreply at buildbot.pypy.org Sun Jun 9 10:23:49 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 9 Jun 2013 10:23:49 +0200 (CEST) Subject: [pypy-commit] stmgc default: tweaks Message-ID: <20130609082349.6D5EB1C094A@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r83:185610f34659 Date: 2013-06-09 10:23 +0200 http://bitbucket.org/pypy/stmgc/changeset/185610f34659/ Log: tweaks diff --git a/c4/et.c b/c4/et.c --- a/c4/et.c +++ b/c4/et.c @@ -74,25 +74,18 @@ v = ACCESS_ONCE(P->h_revision); if (!(v & 1)) // "is a pointer", i.e. { // "has a more recent revision" - /* if we land on a P in read_barrier_cache: just return it */ - gcptr P_next = (gcptr)v; - if (FXCACHE_AT(P_next) == P_next) - { - fprintf(stderr, "read_barrier: %p -> %p fxcache\n", G, P_next); - return P_next; - } - - if (P->h_tid & GCFLAG_STUB) + if (v & 2) goto follow_stub; gcptr P_prev = P; - P = P_next; + P = (gcptr)v; assert(P->h_tid & GCFLAG_PUBLIC); v = ACCESS_ONCE(P->h_revision); + if (!(v & 1)) // "is a pointer", i.e. { // "has a more recent revision" - if (P->h_tid & GCFLAG_STUB) + if (v & 2) goto follow_stub; /* we update P_prev->h_revision as a shortcut */ @@ -104,6 +97,13 @@ } } + /* if we land on a P in read_barrier_cache: just return it */ + if (FXCACHE_AT(P) == P) + { + fprintf(stderr, "read_barrier: %p -> %p fxcache\n", G, P); + return P; + } + if (P->h_tid & GCFLAG_PUBLIC_TO_PRIVATE) { wlog_t *item; @@ -144,7 +144,7 @@ if (foreign_pd == d->public_descriptor) { /* same thread */ - P = (gcptr)P->h_revision; + P = (gcptr)v; assert(!(P->h_tid & GCFLAG_PUBLIC)); if (P->h_revision == stm_private_rev_num) { @@ -735,7 +735,7 @@ gcptr stub = stm_stub_malloc(d->public_descriptor); stub->h_tid = GCFLAG_PUBLIC | GCFLAG_STUB; - stub->h_revision = (revision_t)L; + stub->h_revision = ((revision_t)L) | 2; item->val = stub; } G2L_LOOP_END; diff --git a/c4/et.h b/c4/et.h --- a/c4/et.h +++ b/c4/et.h @@ -50,9 +50,10 @@ * GCFLAG_STOLEN is set of protected objects after we notice that they * have been stolen. * - * GCFLAG_STUB is set on stub objects made by stealing or by major - * collections. It's removed once the stub's protected h_revision - * target is stolen and replaced by a regular public object. + * GCFLAG_STUB is set for debugging on stub objects made by stealing or + * by major collections. 'p_stub->h_revision' might be a value + * that is == 2 (mod 4): in this case they point to a protected/private + * object that belongs to the thread 'STUB_THREAD(p_stub)'. */ #define GCFLAG_OLD (STM_FIRST_GCFLAG << 0) #define GCFLAG_VISITED (STM_FIRST_GCFLAG << 1) @@ -63,7 +64,7 @@ #define GCFLAG_WRITE_BARRIER (STM_FIRST_GCFLAG << 6) #define GCFLAG_NURSERY_MOVED (STM_FIRST_GCFLAG << 7) #define GCFLAG_STOLEN (STM_FIRST_GCFLAG << 8) -#define GCFLAG_STUB (STM_FIRST_GCFLAG << 9) +#define GCFLAG_STUB (STM_FIRST_GCFLAG << 9) /* debugging */ /* this value must be reflected in PREBUILT_FLAGS in stmgc.h */ #define GCFLAG_PREBUILT (GCFLAG_VISITED | \ @@ -166,6 +167,7 @@ gcptr _stm_nonrecord_barrier(gcptr, int *); gcptr stm_get_backup_copy(gcptr); gcptr stm_get_read_obj(long); /* debugging */ +gcptr stmgc_duplicate(gcptr); int DescriptorInit(void); void DescriptorDone(void); diff --git a/c4/steal.c b/c4/steal.c --- a/c4/steal.c +++ b/c4/steal.c @@ -49,18 +49,17 @@ spinlock_acquire(foreign_pd->collection_lock, 'S'); /*stealing*/ - if (!(P->h_tid & GCFLAG_STUB)) + revision_t v = ACCESS_ONCE(P->h_revision); + if ((v & 3) != 2) goto done; /* un-stubbed while we waited for the lock */ - /* XXX check for now that P is a regular protected object */ - gcptr L = (gcptr)P->h_revision; + gcptr L = (gcptr)(v - 2); gcptr Q = stmgc_duplicate(L); Q->h_tid |= GCFLAG_PUBLIC; - P->h_revision = (revision_t)Q; smp_wmb(); - P->h_tid &= ~GCFLAG_STUB; + P->h_revision = (revision_t)Q; done: spinlock_release(foreign_pd->collection_lock); From noreply at buildbot.pypy.org Sun Jun 9 11:41:46 2013 From: noreply at buildbot.pypy.org (antocuni) Date: Sun, 9 Jun 2013 11:41:46 +0200 (CEST) Subject: [pypy-commit] pypy fastjson: try to speedup things Message-ID: <20130609094146.27BB41C07E1@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: fastjson Changeset: r64827:374e8a9d721a Date: 2013-06-06 14:19 +0200 http://bitbucket.org/pypy/pypy/changeset/374e8a9d721a/ Log: try to speedup things diff --git a/pypy/module/_fastjson/interp_decoder.py b/pypy/module/_fastjson/interp_decoder.py --- a/pypy/module/_fastjson/interp_decoder.py +++ b/pypy/module/_fastjson/interp_decoder.py @@ -49,16 +49,19 @@ self.i = i2 def getslice(self, start, end): + assert start > 0 assert end > 0 return self.s[start:end] def skip_whitespace(self): - while not self.eof(): - ch = self.peek() + i = self.i + while i < len(self.s): + ch = self.s[i] if is_whitespace(ch): - self.next() + i+=1 else: break + self.i = i @specialize.arg(1) def _raise(self, msg, *args): @@ -240,14 +243,16 @@ def decode_string(self): start = self.i + i = self.i bits = 0 - while not self.eof(): + while i < len(self.s): # this loop is a fast path for strings which do not contain escape # characters - ch = self.next() + ch = self.s[i] + i += 1 bits |= ord(ch) if ch == '"': - content_utf8 = self.getslice(start, self.i-1) + content_utf8 = self.getslice(start, i-1) if bits & 0x80: # the 8th bit is set, it's an utf8 strnig content_unicode = unicodehelper.decode_utf8(self.space, content_utf8) @@ -255,10 +260,11 @@ # ascii only, faster to decode content_unicode = content_utf8.decode('ascii') self.last_type = TYPE_STRING + self.i = i return self.space.wrap(content_unicode) elif ch == '\\': - content_so_far = self.getslice(start, self.i-1) - self.unget() + content_so_far = self.getslice(start, i-1) + self.i = i-1 return self.decode_string_escaped(start, content_so_far) self._raise("Unterminated string starting at char %d", start) From noreply at buildbot.pypy.org Sun Jun 9 11:41:47 2013 From: noreply at buildbot.pypy.org (antocuni) Date: Sun, 9 Jun 2013 11:41:47 +0200 (CEST) Subject: [pypy-commit] pypy fastjson: here are dragons: valgrind shows that a good percentage of time was spent in Message-ID: <20130609094147.852EF1C094A@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: fastjson Changeset: r64828:c9e7d8554e5f Date: 2013-06-06 15:33 +0200 http://bitbucket.org/pypy/pypy/changeset/c9e7d8554e5f/ Log: here are dragons: valgrind shows that a good percentage of time was spent in taking the slice + converting it to unicode. Instead, we directly create an unicode string by copying the relevant characters from the original string, but we need to go to the level of "low level helpers" to do that, with llstr&co. diff --git a/pypy/module/_fastjson/interp_decoder.py b/pypy/module/_fastjson/interp_decoder.py --- a/pypy/module/_fastjson/interp_decoder.py +++ b/pypy/module/_fastjson/interp_decoder.py @@ -19,6 +19,19 @@ TYPE_UNKNOWN = 0 TYPE_STRING = 1 +def strslice2unicode_ascii(s, start, end): + from rpython.rtyper.annlowlevel import llstr, hlunicode + from rpython.rtyper.lltypesystem.rstr import malloc, UNICODE + from rpython.rtyper.lltypesystem.lltype import cast_primitive, UniChar + length = end-start + ll_s = llstr(s) + ll_res = malloc(UNICODE, length) + ll_res.hash = 0 + for i in range(length): + ch = ll_s.chars[start+i] + ll_res.chars[i] = cast_primitive(UniChar, ch) + return hlunicode(ll_res) + class JSONDecoder(object): def __init__(self, space, s): self.space = space @@ -252,13 +265,13 @@ i += 1 bits |= ord(ch) if ch == '"': - content_utf8 = self.getslice(start, i-1) if bits & 0x80: # the 8th bit is set, it's an utf8 strnig + content_utf8 = self.getslice(start, i-1) content_unicode = unicodehelper.decode_utf8(self.space, content_utf8) else: - # ascii only, faster to decode - content_unicode = content_utf8.decode('ascii') + # ascii only, fast path + content_unicode = strslice2unicode_ascii(self.s, start, i-1) self.last_type = TYPE_STRING self.i = i return self.space.wrap(content_unicode) From noreply at buildbot.pypy.org Sun Jun 9 11:41:48 2013 From: noreply at buildbot.pypy.org (antocuni) Date: Sun, 9 Jun 2013 11:41:48 +0200 (CEST) Subject: [pypy-commit] pypy fastjson: speed up parse_digits by storing self.i only at the end Message-ID: <20130609094148.D7CA51C0EB9@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: fastjson Changeset: r64829:16e72115b986 Date: 2013-06-06 16:28 +0200 http://bitbucket.org/pypy/pypy/changeset/16e72115b986/ Log: speed up parse_digits by storing self.i only at the end diff --git a/pypy/module/_fastjson/interp_decoder.py b/pypy/module/_fastjson/interp_decoder.py --- a/pypy/module/_fastjson/interp_decoder.py +++ b/pypy/module/_fastjson/interp_decoder.py @@ -178,16 +178,18 @@ "Parse a sequence of digits as a decimal number. No sign allowed" intval = 0 count = 0 - while not self.eof(): - ch = self.peek() + i = self.i + while i < len(self.s): + ch = self.s[i] if ch.isdigit(): intval = intval*10 + ord(ch)-ord('0') count += 1 - self.next() + i += 1 else: break if count == 0: - self._raise("Expected digit at char %d", self.i) + self._raise("Expected digit at char %d", i) + self.i = i return intval, count def decode_array(self): @@ -252,8 +254,6 @@ ch, self.i) self._raise("Unterminated object starting at char %d", start) - - def decode_string(self): start = self.i i = self.i From noreply at buildbot.pypy.org Sun Jun 9 11:41:50 2013 From: noreply at buildbot.pypy.org (antocuni) Date: Sun, 9 Jun 2013 11:41:50 +0200 (CEST) Subject: [pypy-commit] pypy fastjson: call math.pow only when it's really needed. Believe it or not, it saves another 5% on my benchmark Message-ID: <20130609094150.1D06F1C07E1@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: fastjson Changeset: r64830:e851f2e812ec Date: 2013-06-06 16:53 +0200 http://bitbucket.org/pypy/pypy/changeset/e851f2e812ec/ Log: call math.pow only when it's really needed. Believe it or not, it saves another 5% on my benchmark diff --git a/pypy/module/_fastjson/interp_decoder.py b/pypy/module/_fastjson/interp_decoder.py --- a/pypy/module/_fastjson/interp_decoder.py +++ b/pypy/module/_fastjson/interp_decoder.py @@ -160,7 +160,8 @@ if is_float: # build the float floatval = intval + frcval - floatval = floatval * math.pow(10, exp) + if exp != 0: + floatval = floatval * math.pow(10, exp) return self.space.wrap(floatval) else: return self.space.wrap(intval) From noreply at buildbot.pypy.org Sun Jun 9 11:41:51 2013 From: noreply at buildbot.pypy.org (antocuni) Date: Sun, 9 Jun 2013 11:41:51 +0200 (CEST) Subject: [pypy-commit] pypy fastjson: rename JSONDecoder.i to .pos, just because it's a better name Message-ID: <20130609094151.4B9FF1C07E1@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: fastjson Changeset: r64831:ee4fe752ea20 Date: 2013-06-06 17:22 +0200 http://bitbucket.org/pypy/pypy/changeset/ee4fe752ea20/ Log: rename JSONDecoder.i to .pos, just because it's a better name diff --git a/pypy/module/_fastjson/interp_decoder.py b/pypy/module/_fastjson/interp_decoder.py --- a/pypy/module/_fastjson/interp_decoder.py +++ b/pypy/module/_fastjson/interp_decoder.py @@ -36,14 +36,14 @@ def __init__(self, space, s): self.space = space self.s = s - self.i = 0 + self.pos = 0 self.last_type = TYPE_UNKNOWN def eof(self): - return self.i == len(self.s) + return self.pos == len(self.s) def peek(self): - return self.s[self.i] + return self.s[self.pos] def peek_maybe(self): if self.eof(): @@ -53,13 +53,13 @@ def next(self): ch = self.peek() - self.i += 1 + self.pos += 1 return ch def unget(self): - i2 = self.i - 1 - assert i2 > 0 # so that we can use self.i as slice start - self.i = i2 + i2 = self.pos - 1 + assert i2 > 0 # so that we can use self.pos as slice start + self.pos = i2 def getslice(self, start, end): assert start > 0 @@ -67,14 +67,14 @@ return self.s[start:end] def skip_whitespace(self): - i = self.i + i = self.pos while i < len(self.s): ch = self.s[i] if is_whitespace(ch): i+=1 else: break - self.i = i + self.pos = i @specialize.arg(1) def _raise(self, msg, *args): @@ -105,35 +105,35 @@ return self.decode_false() else: self._raise("No JSON object could be decoded: unexpected '%s' at char %d", - ch, self.i) + ch, self.pos) def decode_null(self): N = len('ull') - if (self.i+N <= len(self.s) and + if (self.pos+N <= len(self.s) and self.next() == 'u' and self.next() == 'l' and self.next() == 'l'): return self.space.w_None - self._raise("Error when decoding null at char %d", self.i) + self._raise("Error when decoding null at char %d", self.pos) def decode_true(self): N = len('rue') - if (self.i+N <= len(self.s) and + if (self.pos+N <= len(self.s) and self.next() == 'r' and self.next() == 'u' and self.next() == 'e'): return self.space.w_True - self._raise("Error when decoding true at char %d", self.i) + self._raise("Error when decoding true at char %d", self.pos) def decode_false(self): N = len('alse') - if (self.i+N <= len(self.s) and + if (self.pos+N <= len(self.s) and self.next() == 'a' and self.next() == 'l' and self.next() == 's' and self.next() == 'e'): return self.space.w_False - self._raise("Error when decoding false at char %d", self.i) + self._raise("Error when decoding false at char %d", self.pos) def decode_numeric(self): intval = self.parse_integer() @@ -179,7 +179,7 @@ "Parse a sequence of digits as a decimal number. No sign allowed" intval = 0 count = 0 - i = self.i + i = self.pos while i < len(self.s): ch = self.s[i] if ch.isdigit(): @@ -190,11 +190,11 @@ break if count == 0: self._raise("Expected digit at char %d", i) - self.i = i + self.pos = i return intval, count def decode_array(self): - start = self.i + start = self.pos w_list = self.space.newlist([]) self.skip_whitespace() while not self.eof(): @@ -214,12 +214,12 @@ pass else: self._raise("Unexpected '%s' when decoding array (char %d)", - ch, self.i) + ch, self.pos) self._raise("Unterminated array starting at char %d", start) def decode_object(self): - start = self.i + start = self.pos w_dict = self.space.newdict() while not self.eof(): ch = self.peek() @@ -237,7 +237,7 @@ break ch = self.next() if ch != ':': - self._raise("No ':' found at char %d", self.i) + self._raise("No ':' found at char %d", self.pos) self.skip_whitespace() # w_value = self.decode_any() @@ -252,12 +252,12 @@ pass else: self._raise("Unexpected '%s' when decoding object (char %d)", - ch, self.i) + ch, self.pos) self._raise("Unterminated object starting at char %d", start) def decode_string(self): - start = self.i - i = self.i + start = self.pos + i = self.pos bits = 0 while i < len(self.s): # this loop is a fast path for strings which do not contain escape @@ -274,11 +274,11 @@ # ascii only, fast path content_unicode = strslice2unicode_ascii(self.s, start, i-1) self.last_type = TYPE_STRING - self.i = i + self.pos = i return self.space.wrap(content_unicode) elif ch == '\\': content_so_far = self.getslice(start, i-1) - self.i = i-1 + self.pos = i-1 return self.decode_string_escaped(start, content_so_far) self._raise("Unterminated string starting at char %d", start) @@ -314,16 +314,16 @@ elif ch == 'u': return self.decode_escape_sequence_unicode(builder) else: - self._raise("Invalid \\escape: %s (char %d)", ch, self.i-1) + self._raise("Invalid \\escape: %s (char %d)", ch, self.pos-1) def decode_escape_sequence_unicode(self, builder): # at this point we are just after the 'u' of the \u1234 sequence. - hexdigits = self.getslice(self.i, self.i+4) - self.i += 4 + hexdigits = self.getslice(self.pos, self.pos+4) + self.pos += 4 try: uchr = unichr(int(hexdigits, 16)) except ValueError: - self._raise("Invalid \uXXXX escape (char %d)", self.i-1) + self._raise("Invalid \uXXXX escape (char %d)", self.pos-1) return # help the annotator to know that we'll never go beyond # this point # @@ -338,7 +338,7 @@ w_res = decoder.decode_any() decoder.skip_whitespace() if not decoder.eof(): - start = decoder.i + start = decoder.pos end = len(decoder.s) raise operationerrfmt(space.w_ValueError, "Extra data: char %d - %d", start, end) return w_res diff --git a/pypy/module/_fastjson/test/test__fastjson.py b/pypy/module/_fastjson/test/test__fastjson.py --- a/pypy/module/_fastjson/test/test__fastjson.py +++ b/pypy/module/_fastjson/test/test__fastjson.py @@ -4,7 +4,7 @@ def test_skip_whitespace(): dec = JSONDecoder('fake space', ' hello ') - assert dec.i == 0 + assert dec.pos == 0 dec.skip_whitespace() assert dec.next() == 'h' assert dec.next() == 'e' From noreply at buildbot.pypy.org Sun Jun 9 11:41:52 2013 From: noreply at buildbot.pypy.org (antocuni) Date: Sun, 9 Jun 2013 11:41:52 +0200 (CEST) Subject: [pypy-commit] pypy fastjson: add a comment to this function, and give it a better name Message-ID: <20130609094152.841EC1C07E1@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: fastjson Changeset: r64832:286ff8c96fd4 Date: 2013-06-06 17:26 +0200 http://bitbucket.org/pypy/pypy/changeset/286ff8c96fd4/ Log: add a comment to this function, and give it a better name diff --git a/pypy/module/_fastjson/interp_decoder.py b/pypy/module/_fastjson/interp_decoder.py --- a/pypy/module/_fastjson/interp_decoder.py +++ b/pypy/module/_fastjson/interp_decoder.py @@ -16,10 +16,17 @@ return 0.0 return x * NEG_POW_10[exp] -TYPE_UNKNOWN = 0 -TYPE_STRING = 1 +def strslice2unicode_latin1(s, start, end): + """ + Convert s[start:end] to unicode. s is supposed to be an RPython string + encoded in latin-1, which means that the numeric value of each char is the + same as the corresponding unicode code point. -def strslice2unicode_ascii(s, start, end): + Internally it's implemented at the level of low-level helpers, to avoid + the extra copy we would need if we take the actual slice first. + + No bound checking is done, use carefully. + """ from rpython.rtyper.annlowlevel import llstr, hlunicode from rpython.rtyper.lltypesystem.rstr import malloc, UNICODE from rpython.rtyper.lltypesystem.lltype import cast_primitive, UniChar @@ -32,6 +39,8 @@ ll_res.chars[i] = cast_primitive(UniChar, ch) return hlunicode(ll_res) +TYPE_UNKNOWN = 0 +TYPE_STRING = 1 class JSONDecoder(object): def __init__(self, space, s): self.space = space @@ -271,8 +280,10 @@ content_utf8 = self.getslice(start, i-1) content_unicode = unicodehelper.decode_utf8(self.space, content_utf8) else: - # ascii only, fast path - content_unicode = strslice2unicode_ascii(self.s, start, i-1) + # ascii only, fast path (ascii is a strict subset of + # latin1, and we already checked that all the chars are < + # 128) + content_unicode = strslice2unicode_latin1(self.s, start, i-1) self.last_type = TYPE_STRING self.pos = i return self.space.wrap(content_unicode) From noreply at buildbot.pypy.org Sun Jun 9 11:41:55 2013 From: noreply at buildbot.pypy.org (antocuni) Date: Sun, 9 Jun 2013 11:41:55 +0200 (CEST) Subject: [pypy-commit] pypy fastjson: start to refactor the decoder to avoid continuously updating .pos Message-ID: <20130609094155.E4E1D1C07E1@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: fastjson Changeset: r64833:a3f22cd8a330 Date: 2013-06-06 17:49 +0200 http://bitbucket.org/pypy/pypy/changeset/a3f22cd8a330/ Log: start to refactor the decoder to avoid continuously updating .pos diff --git a/pypy/module/_fastjson/interp_decoder.py b/pypy/module/_fastjson/interp_decoder.py --- a/pypy/module/_fastjson/interp_decoder.py +++ b/pypy/module/_fastjson/interp_decoder.py @@ -75,22 +75,21 @@ assert end > 0 return self.s[start:end] - def skip_whitespace(self): - i = self.pos + def skip_whitespace(self, i): while i < len(self.s): ch = self.s[i] if is_whitespace(ch): i+=1 else: break - self.pos = i + return i @specialize.arg(1) def _raise(self, msg, *args): raise operationerrfmt(self.space.w_ValueError, msg, *args) def decode_any(self): - self.skip_whitespace() + self.pos = self.skip_whitespace(self.pos) ch = self.peek() if ch == '"': self.next() @@ -203,21 +202,25 @@ return intval, count def decode_array(self): + w_list = self.space.newlist([]) start = self.pos - w_list = self.space.newlist([]) - self.skip_whitespace() - while not self.eof(): - ch = self.peek() + i = self.skip_whitespace(start) + while i < len(self.s): + ch = self.s[i] if ch == ']': - self.next() + self.pos = i+1 return w_list + self.pos = i w_item = self.decode_any() + i = self.pos self.space.call_method(w_list, 'append', w_item) - self.skip_whitespace() - if self.eof(): + i = self.skip_whitespace(i) + if i == len(self.s): break - ch = self.next() + ch = self.s[i] + i += 1 if ch == ']': + self.pos = i return w_list elif ch == ',': pass @@ -241,17 +244,17 @@ w_name = self.decode_any() if self.last_type != TYPE_STRING: self._raise("Key name must be string for object starting at char %d", start) - self.skip_whitespace() + self.pos = self.skip_whitespace(self.pos) if self.eof(): break ch = self.next() if ch != ':': self._raise("No ':' found at char %d", self.pos) - self.skip_whitespace() + self.pos = self.skip_whitespace(self.pos) # w_value = self.decode_any() self.space.setitem(w_dict, w_name, w_value) - self.skip_whitespace() + self.pos = self.skip_whitespace(self.pos) if self.eof(): break ch = self.next() @@ -347,7 +350,7 @@ def loads(space, s): decoder = JSONDecoder(space, s) w_res = decoder.decode_any() - decoder.skip_whitespace() + decoder.pos = decoder.skip_whitespace(decoder.pos) if not decoder.eof(): start = decoder.pos end = len(decoder.s) diff --git a/pypy/module/_fastjson/test/test__fastjson.py b/pypy/module/_fastjson/test/test__fastjson.py --- a/pypy/module/_fastjson/test/test__fastjson.py +++ b/pypy/module/_fastjson/test/test__fastjson.py @@ -3,16 +3,12 @@ from pypy.module._fastjson.interp_decoder import JSONDecoder def test_skip_whitespace(): - dec = JSONDecoder('fake space', ' hello ') + s = ' hello ' + dec = JSONDecoder('fake space', s) assert dec.pos == 0 - dec.skip_whitespace() - assert dec.next() == 'h' - assert dec.next() == 'e' - assert dec.next() == 'l' - assert dec.next() == 'l' - assert dec.next() == 'o' - dec.skip_whitespace() - assert dec.eof() + assert dec.skip_whitespace(0) == 3 + assert dec.skip_whitespace(3) == 3 + assert dec.skip_whitespace(8) == len(s) From noreply at buildbot.pypy.org Sun Jun 9 11:41:57 2013 From: noreply at buildbot.pypy.org (antocuni) Date: Sun, 9 Jun 2013 11:41:57 +0200 (CEST) Subject: [pypy-commit] pypy fastjson: put a \0 sentinel at the end of the string: the cost of the string copy is neligible but this let us to avoid lots of eof() checks during the parsing. Also, relax the dependency on self.pos, and explicitly pass the current index around Message-ID: <20130609094157.B19101C07E1@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: fastjson Changeset: r64834:f642cad5d507 Date: 2013-06-07 01:13 +0200 http://bitbucket.org/pypy/pypy/changeset/f642cad5d507/ Log: put a \0 sentinel at the end of the string: the cost of the string copy is neligible but this let us to avoid lots of eof() checks during the parsing. Also, relax the dependency on self.pos, and explicitly pass the current index around diff --git a/pypy/module/_fastjson/interp_decoder.py b/pypy/module/_fastjson/interp_decoder.py --- a/pypy/module/_fastjson/interp_decoder.py +++ b/pypy/module/_fastjson/interp_decoder.py @@ -4,6 +4,7 @@ from pypy.interpreter.error import OperationError, operationerrfmt from pypy.interpreter.gateway import unwrap_spec from pypy.interpreter import unicodehelper +from rpython.rtyper.annlowlevel import llstr, hlunicode def is_whitespace(ch): return ch == ' ' or ch == '\t' or ch == '\r' or ch == '\n' @@ -88,62 +89,54 @@ def _raise(self, msg, *args): raise operationerrfmt(self.space.w_ValueError, msg, *args) - def decode_any(self): - self.pos = self.skip_whitespace(self.pos) - ch = self.peek() + def decode_any(self, i): + i = self.skip_whitespace(i) + ch = self.s[i] if ch == '"': - self.next() - return self.decode_string() + return self.decode_string(i+1) elif ch.isdigit() or ch == '-': - return self.decode_numeric() + return self.decode_numeric(i) elif ch == '[': - self.next() - return self.decode_array() + return self.decode_array(i+1) elif ch == '{': - self.next() - return self.decode_object() + return self.decode_object(i+1) elif ch == 'n': - self.next() - return self.decode_null() + return self.decode_null(i+1) elif ch == 't': - self.next() - return self.decode_true() + return self.decode_true(i+1) elif ch == 'f': - self.next() - return self.decode_false() + return self.decode_false(i+1) else: self._raise("No JSON object could be decoded: unexpected '%s' at char %d", ch, self.pos) - def decode_null(self): - N = len('ull') - if (self.pos+N <= len(self.s) and - self.next() == 'u' and - self.next() == 'l' and - self.next() == 'l'): + def decode_null(self, i): + if (self.s[i] == 'u' and + self.s[i+1] == 'l' and + self.s[i+2] == 'l'): + self.pos = i+3 return self.space.w_None - self._raise("Error when decoding null at char %d", self.pos) + self._raise("Error when decoding null at char %d", i) - def decode_true(self): - N = len('rue') - if (self.pos+N <= len(self.s) and - self.next() == 'r' and - self.next() == 'u' and - self.next() == 'e'): + def decode_true(self, i): + if (self.s[i] == 'r' and + self.s[i+1] == 'u' and + self.s[i+2] == 'e'): + self.pos = i+3 return self.space.w_True - self._raise("Error when decoding true at char %d", self.pos) + self._raise("Error when decoding true at char %d", i) - def decode_false(self): - N = len('alse') - if (self.pos+N <= len(self.s) and - self.next() == 'a' and - self.next() == 'l' and - self.next() == 's' and - self.next() == 'e'): + def decode_false(self, i): + if (self.s[i] == 'a' and + self.s[i+1] == 'l' and + self.s[i+2] == 's' and + self.s[i+3] == 'e'): + self.pos = i+4 return self.space.w_False - self._raise("Error when decoding false at char %d", self.pos) + self._raise("Error when decoding false at char %d", i) - def decode_numeric(self): + def decode_numeric(self, i): + self.pos = i intval = self.parse_integer() # is_float = False @@ -201,22 +194,20 @@ self.pos = i return intval, count - def decode_array(self): + def decode_array(self, i): w_list = self.space.newlist([]) - start = self.pos + start = i + count = 0 i = self.skip_whitespace(start) while i < len(self.s): ch = self.s[i] if ch == ']': self.pos = i+1 return w_list - self.pos = i - w_item = self.decode_any() + w_item = self.decode_any(i) i = self.pos self.space.call_method(w_list, 'append', w_item) i = self.skip_whitespace(i) - if i == len(self.s): - break ch = self.s[i] i += 1 if ch == ']': @@ -230,35 +221,34 @@ self._raise("Unterminated array starting at char %d", start) - def decode_object(self): - start = self.pos + def decode_object(self, i): + start = i w_dict = self.space.newdict() - while not self.eof(): - ch = self.peek() + while i < len(self.s): + ch = self.s[i] if ch == '}': - self.next() + self.pos = i+1 return w_dict # # parse a key: value self.last_type = TYPE_UNKNOWN - w_name = self.decode_any() + w_name = self.decode_any(i) if self.last_type != TYPE_STRING: self._raise("Key name must be string for object starting at char %d", start) - self.pos = self.skip_whitespace(self.pos) - if self.eof(): - break - ch = self.next() + i = self.skip_whitespace(self.pos) + ch = self.s[i] if ch != ':': - self._raise("No ':' found at char %d", self.pos) - self.pos = self.skip_whitespace(self.pos) + self._raise("No ':' found at char %d", i) + i += 1 + i = self.skip_whitespace(i) # - w_value = self.decode_any() + w_value = self.decode_any(i) self.space.setitem(w_dict, w_name, w_value) - self.pos = self.skip_whitespace(self.pos) - if self.eof(): - break - ch = self.next() + i = self.skip_whitespace(self.pos) + ch = self.s[i] + i += 1 if ch == '}': + self.pos = i return w_dict elif ch == ',': pass @@ -267,9 +257,8 @@ ch, self.pos) self._raise("Unterminated object starting at char %d", start) - def decode_string(self): - start = self.pos - i = self.pos + def decode_string(self, i): + start = i bits = 0 while i < len(self.s): # this loop is a fast path for strings which do not contain escape @@ -348,11 +337,13 @@ @unwrap_spec(s=str) def loads(space, s): + # the '\0' serves as a sentinel, so that we can avoid the bound check + s = s + '\0' decoder = JSONDecoder(space, s) - w_res = decoder.decode_any() - decoder.pos = decoder.skip_whitespace(decoder.pos) - if not decoder.eof(): - start = decoder.pos - end = len(decoder.s) + w_res = decoder.decode_any(0) + i = decoder.skip_whitespace(decoder.pos) + if s[i] != '\0': + start = i + end = len(s) - 1 raise operationerrfmt(space.w_ValueError, "Extra data: char %d - %d", start, end) return w_res From noreply at buildbot.pypy.org Sun Jun 9 11:41:58 2013 From: noreply at buildbot.pypy.org (antocuni) Date: Sun, 9 Jun 2013 11:41:58 +0200 (CEST) Subject: [pypy-commit] pypy fastjson: apparently, checking for digits at last gives another small speedup Message-ID: <20130609094158.EF8FB1C07E1@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: fastjson Changeset: r64835:fdc8c71594f3 Date: 2013-06-07 11:12 +0200 http://bitbucket.org/pypy/pypy/changeset/fdc8c71594f3/ Log: apparently, checking for digits at last gives another small speedup diff --git a/pypy/module/_fastjson/interp_decoder.py b/pypy/module/_fastjson/interp_decoder.py --- a/pypy/module/_fastjson/interp_decoder.py +++ b/pypy/module/_fastjson/interp_decoder.py @@ -94,8 +94,6 @@ ch = self.s[i] if ch == '"': return self.decode_string(i+1) - elif ch.isdigit() or ch == '-': - return self.decode_numeric(i) elif ch == '[': return self.decode_array(i+1) elif ch == '{': @@ -106,6 +104,8 @@ return self.decode_true(i+1) elif ch == 'f': return self.decode_false(i+1) + elif ch.isdigit() or ch == '-': + return self.decode_numeric(i) else: self._raise("No JSON object could be decoded: unexpected '%s' at char %d", ch, self.pos) From noreply at buildbot.pypy.org Sun Jun 9 11:42:00 2013 From: noreply at buildbot.pypy.org (antocuni) Date: Sun, 9 Jun 2013 11:42:00 +0200 (CEST) Subject: [pypy-commit] pypy fastjson: more drangons: RPython is not smart enough to remove bound checking everywhere we are interested in. Instead, we access directly to the underlying char array: Message-ID: <20130609094200.2CDB61C07E1@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: fastjson Changeset: r64836:0cf42778c9db Date: 2013-06-07 17:07 +0200 http://bitbucket.org/pypy/pypy/changeset/0cf42778c9db/ Log: more drangons: RPython is not smart enough to remove bound checking everywhere we are interested in. Instead, we access directly to the underlying char array: diff --git a/pypy/module/_fastjson/interp_decoder.py b/pypy/module/_fastjson/interp_decoder.py --- a/pypy/module/_fastjson/interp_decoder.py +++ b/pypy/module/_fastjson/interp_decoder.py @@ -46,14 +46,16 @@ def __init__(self, space, s): self.space = space self.s = s + self.ll_chars = llstr(s).chars + self.length = len(self.s) self.pos = 0 self.last_type = TYPE_UNKNOWN def eof(self): - return self.pos == len(self.s) + return self.pos == self.length def peek(self): - return self.s[self.pos] + return self.ll_chars[self.pos] def peek_maybe(self): if self.eof(): @@ -77,8 +79,8 @@ return self.s[start:end] def skip_whitespace(self, i): - while i < len(self.s): - ch = self.s[i] + while i < self.length: + ch = self.ll_chars[i] if is_whitespace(ch): i+=1 else: @@ -91,7 +93,7 @@ def decode_any(self, i): i = self.skip_whitespace(i) - ch = self.s[i] + ch = self.ll_chars[i] if ch == '"': return self.decode_string(i+1) elif ch == '[': @@ -111,26 +113,26 @@ ch, self.pos) def decode_null(self, i): - if (self.s[i] == 'u' and - self.s[i+1] == 'l' and - self.s[i+2] == 'l'): + if (self.ll_chars[i] == 'u' and + self.ll_chars[i+1] == 'l' and + self.ll_chars[i+2] == 'l'): self.pos = i+3 return self.space.w_None self._raise("Error when decoding null at char %d", i) def decode_true(self, i): - if (self.s[i] == 'r' and - self.s[i+1] == 'u' and - self.s[i+2] == 'e'): + if (self.ll_chars[i] == 'r' and + self.ll_chars[i+1] == 'u' and + self.ll_chars[i+2] == 'e'): self.pos = i+3 return self.space.w_True self._raise("Error when decoding true at char %d", i) def decode_false(self, i): - if (self.s[i] == 'a' and - self.s[i+1] == 'l' and - self.s[i+2] == 's' and - self.s[i+3] == 'e'): + if (self.ll_chars[i] == 'a' and + self.ll_chars[i+1] == 'l' and + self.ll_chars[i+2] == 's' and + self.ll_chars[i+3] == 'e'): self.pos = i+4 return self.space.w_False self._raise("Error when decoding false at char %d", i) @@ -181,8 +183,8 @@ intval = 0 count = 0 i = self.pos - while i < len(self.s): - ch = self.s[i] + while i < self.length: + ch = self.ll_chars[i] if ch.isdigit(): intval = intval*10 + ord(ch)-ord('0') count += 1 @@ -199,8 +201,8 @@ start = i count = 0 i = self.skip_whitespace(start) - while i < len(self.s): - ch = self.s[i] + while i < self.length: + ch = self.ll_chars[i] if ch == ']': self.pos = i+1 return w_list @@ -208,7 +210,7 @@ i = self.pos self.space.call_method(w_list, 'append', w_item) i = self.skip_whitespace(i) - ch = self.s[i] + ch = self.ll_chars[i] i += 1 if ch == ']': self.pos = i @@ -224,8 +226,8 @@ def decode_object(self, i): start = i w_dict = self.space.newdict() - while i < len(self.s): - ch = self.s[i] + while i < self.length: + ch = self.ll_chars[i] if ch == '}': self.pos = i+1 return w_dict @@ -236,7 +238,7 @@ if self.last_type != TYPE_STRING: self._raise("Key name must be string for object starting at char %d", start) i = self.skip_whitespace(self.pos) - ch = self.s[i] + ch = self.ll_chars[i] if ch != ':': self._raise("No ':' found at char %d", i) i += 1 @@ -245,7 +247,7 @@ w_value = self.decode_any(i) self.space.setitem(w_dict, w_name, w_value) i = self.skip_whitespace(self.pos) - ch = self.s[i] + ch = self.ll_chars[i] i += 1 if ch == '}': self.pos = i @@ -260,10 +262,10 @@ def decode_string(self, i): start = i bits = 0 - while i < len(self.s): + while i < self.length: # this loop is a fast path for strings which do not contain escape # characters - ch = self.s[i] + ch = self.ll_chars[i] i += 1 bits |= ord(ch) if ch == '"': From noreply at buildbot.pypy.org Sun Jun 9 11:42:01 2013 From: noreply at buildbot.pypy.org (antocuni) Date: Sun, 9 Jun 2013 11:42:01 +0200 (CEST) Subject: [pypy-commit] pypy fastjson: there is no need to check for the bound, as there is the sentinel at the end of the string anyway. Morever, avoid to keep a separate counter Message-ID: <20130609094201.51C5E1C07E1@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: fastjson Changeset: r64837:d0a3ae2d5a74 Date: 2013-06-07 17:58 +0200 http://bitbucket.org/pypy/pypy/changeset/d0a3ae2d5a74/ Log: there is no need to check for the bound, as there is the sentinel at the end of the string anyway. Morever, avoid to keep a separate counter diff --git a/pypy/module/_fastjson/interp_decoder.py b/pypy/module/_fastjson/interp_decoder.py --- a/pypy/module/_fastjson/interp_decoder.py +++ b/pypy/module/_fastjson/interp_decoder.py @@ -182,15 +182,15 @@ "Parse a sequence of digits as a decimal number. No sign allowed" intval = 0 count = 0 - i = self.pos - while i < self.length: + start = i = self.pos + while True: ch = self.ll_chars[i] if ch.isdigit(): intval = intval*10 + ord(ch)-ord('0') - count += 1 i += 1 else: break + count = i - start if count == 0: self._raise("Expected digit at char %d", i) self.pos = i From noreply at buildbot.pypy.org Sun Jun 9 11:42:02 2013 From: noreply at buildbot.pypy.org (antocuni) Date: Sun, 9 Jun 2013 11:42:02 +0200 (CEST) Subject: [pypy-commit] pypy fastjson: wow, avoiding self.pos, peek_maybe() and friends gives a speedup of ~30%\! Message-ID: <20130609094202.7C7F71C07E1@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: fastjson Changeset: r64838:24a2398ade5b Date: 2013-06-07 18:05 +0200 http://bitbucket.org/pypy/pypy/changeset/24a2398ade5b/ Log: wow, avoiding self.pos, peek_maybe() and friends gives a speedup of ~30%\! diff --git a/pypy/module/_fastjson/interp_decoder.py b/pypy/module/_fastjson/interp_decoder.py --- a/pypy/module/_fastjson/interp_decoder.py +++ b/pypy/module/_fastjson/interp_decoder.py @@ -138,8 +138,7 @@ self._raise("Error when decoding false at char %d", i) def decode_numeric(self, i): - self.pos = i - intval = self.parse_integer() + i, intval = self.parse_integer(i) # is_float = False exp = 0 @@ -147,19 +146,18 @@ frccount = 0 # # check for the optional fractional part - ch = self.peek_maybe() + ch = self.ll_chars[i] if ch == '.': is_float = True - self.next() - frcval, frccount = self.parse_digits() + i, frcval, frccount = self.parse_digits(i+1) frcval = neg_pow_10(frcval, frccount) - ch = self.peek_maybe() + ch = self.ll_chars[i] # check for the optional exponent part if ch == 'E' or ch == 'e': is_float = True - self.next() - exp = self.parse_integer() + i, exp = self.parse_integer(i+1) # + self.pos = i if is_float: # build the float floatval = intval + frcval @@ -169,20 +167,19 @@ else: return self.space.wrap(intval) - def parse_integer(self): + def parse_integer(self, i): "Parse a decimal number with an optional minus sign" sign = 1 - if self.peek_maybe() == '-': + if self.ll_chars[i] == '-': sign = -1 - self.next() - intval, _ = self.parse_digits() - return sign * intval + i += 1 + i, intval, _ = self.parse_digits(i) + return i, sign * intval - def parse_digits(self): + def parse_digits(self, i): "Parse a sequence of digits as a decimal number. No sign allowed" intval = 0 - count = 0 - start = i = self.pos + start = i while True: ch = self.ll_chars[i] if ch.isdigit(): @@ -193,8 +190,7 @@ count = i - start if count == 0: self._raise("Expected digit at char %d", i) - self.pos = i - return intval, count + return i, intval, count def decode_array(self, i): w_list = self.space.newlist([]) From noreply at buildbot.pypy.org Sun Jun 9 11:42:03 2013 From: noreply at buildbot.pypy.org (antocuni) Date: Sun, 9 Jun 2013 11:42:03 +0200 (CEST) Subject: [pypy-commit] pypy fastjson: kill the last references to peek, next, eof and peek_maybe, they are no longer used Message-ID: <20130609094203.B5A7F1C07E1@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: fastjson Changeset: r64839:022385d2f170 Date: 2013-06-07 18:19 +0200 http://bitbucket.org/pypy/pypy/changeset/022385d2f170/ Log: kill the last references to peek, next, eof and peek_maybe, they are no longer used diff --git a/pypy/module/_fastjson/interp_decoder.py b/pypy/module/_fastjson/interp_decoder.py --- a/pypy/module/_fastjson/interp_decoder.py +++ b/pypy/module/_fastjson/interp_decoder.py @@ -51,28 +51,6 @@ self.pos = 0 self.last_type = TYPE_UNKNOWN - def eof(self): - return self.pos == self.length - - def peek(self): - return self.ll_chars[self.pos] - - def peek_maybe(self): - if self.eof(): - return '\0' - else: - return self.peek() - - def next(self): - ch = self.peek() - self.pos += 1 - return ch - - def unget(self): - i2 = self.pos - 1 - assert i2 > 0 # so that we can use self.pos as slice start - self.pos = i2 - def getslice(self, start, end): assert start > 0 assert end > 0 @@ -287,23 +265,27 @@ def decode_string_escaped(self, start, content_so_far): builder = StringBuilder(len(content_so_far)*2) # just an estimate builder.append(content_so_far) - while not self.eof(): - ch = self.next() + i = self.pos + while True: + ch = self.ll_chars[i] + i += 1 if ch == '"': content_utf8 = builder.build() content_unicode = unicodehelper.decode_utf8(self.space, content_utf8) self.last_type = TYPE_STRING + self.pos = i return self.space.wrap(content_unicode) elif ch == '\\': - self.decode_escape_sequence(builder) + i = self.decode_escape_sequence(i, builder) + elif ch == '\0': + self._raise("Unterminated string starting at char %d", start) else: builder.append_multiple_char(ch, 1) # we should implement append_char - # - self._raise("Unterminated string starting at char %d", start) - def decode_escape_sequence(self, builder): + def decode_escape_sequence(self, i, builder): + ch = self.ll_chars[i] + i += 1 put = builder.append_multiple_char - ch = self.next() if ch == '\\': put('\\', 1) elif ch == '"': put('"' , 1) elif ch == '/': put('/' , 1) @@ -313,24 +295,26 @@ elif ch == 'r': put('\r', 1) elif ch == 't': put('\t', 1) elif ch == 'u': - return self.decode_escape_sequence_unicode(builder) + return self.decode_escape_sequence_unicode(i, builder) else: self._raise("Invalid \\escape: %s (char %d)", ch, self.pos-1) + return i - def decode_escape_sequence_unicode(self, builder): + def decode_escape_sequence_unicode(self, i, builder): # at this point we are just after the 'u' of the \u1234 sequence. - hexdigits = self.getslice(self.pos, self.pos+4) - self.pos += 4 + start = i + i += 4 + hexdigits = self.getslice(start, i) try: uchr = unichr(int(hexdigits, 16)) except ValueError: - self._raise("Invalid \uXXXX escape (char %d)", self.pos-1) + self._raise("Invalid \uXXXX escape (char %d)", i-1) return # help the annotator to know that we'll never go beyond # this point # utf8_ch = unicodehelper.encode_utf8(self.space, uchr) builder.append(utf8_ch) - + return i @unwrap_spec(s=str) From noreply at buildbot.pypy.org Sun Jun 9 11:42:04 2013 From: noreply at buildbot.pypy.org (antocuni) Date: Sun, 9 Jun 2013 11:42:04 +0200 (CEST) Subject: [pypy-commit] pypy fastjson: remove one more bound check (safely! :)) Message-ID: <20130609094204.E44EC1C07E1@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: fastjson Changeset: r64840:b31803bdd48c Date: 2013-06-07 18:25 +0200 http://bitbucket.org/pypy/pypy/changeset/b31803bdd48c/ Log: remove one more bound check (safely! :)) diff --git a/pypy/module/_fastjson/interp_decoder.py b/pypy/module/_fastjson/interp_decoder.py --- a/pypy/module/_fastjson/interp_decoder.py +++ b/pypy/module/_fastjson/interp_decoder.py @@ -46,8 +46,10 @@ def __init__(self, space, s): self.space = space self.s = s - self.ll_chars = llstr(s).chars - self.length = len(self.s) + # we put a sentinel at the end so that we never have to check for the + # "end of string" condition + self.ll_chars = llstr(s+'\0').chars + self.length = len(s) self.pos = 0 self.last_type = TYPE_UNKNOWN @@ -57,7 +59,7 @@ return self.s[start:end] def skip_whitespace(self, i): - while i < self.length: + while True: ch = self.ll_chars[i] if is_whitespace(ch): i+=1 @@ -319,12 +321,10 @@ @unwrap_spec(s=str) def loads(space, s): - # the '\0' serves as a sentinel, so that we can avoid the bound check - s = s + '\0' decoder = JSONDecoder(space, s) w_res = decoder.decode_any(0) i = decoder.skip_whitespace(decoder.pos) - if s[i] != '\0': + if i < len(s): start = i end = len(s) - 1 raise operationerrfmt(space.w_ValueError, "Extra data: char %d - %d", start, end) From noreply at buildbot.pypy.org Sun Jun 9 11:42:06 2013 From: noreply at buildbot.pypy.org (antocuni) Date: Sun, 9 Jun 2013 11:42:06 +0200 (CEST) Subject: [pypy-commit] pypy fastjson: kill one more end-of-string check Message-ID: <20130609094206.297AC1C07E1@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: fastjson Changeset: r64841:2a2c1967e1c1 Date: 2013-06-07 18:38 +0200 http://bitbucket.org/pypy/pypy/changeset/2a2c1967e1c1/ Log: kill one more end-of-string check diff --git a/pypy/module/_fastjson/interp_decoder.py b/pypy/module/_fastjson/interp_decoder.py --- a/pypy/module/_fastjson/interp_decoder.py +++ b/pypy/module/_fastjson/interp_decoder.py @@ -177,7 +177,7 @@ start = i count = 0 i = self.skip_whitespace(start) - while i < self.length: + while True: ch = self.ll_chars[i] if ch == ']': self.pos = i+1 @@ -193,10 +193,11 @@ return w_list elif ch == ',': pass + elif ch == '\0': + self._raise("Unterminated array starting at char %d", start) else: self._raise("Unexpected '%s' when decoding array (char %d)", ch, self.pos) - self._raise("Unterminated array starting at char %d", start) def decode_object(self, i): From noreply at buildbot.pypy.org Sun Jun 9 11:42:07 2013 From: noreply at buildbot.pypy.org (antocuni) Date: Sun, 9 Jun 2013 11:42:07 +0200 (CEST) Subject: [pypy-commit] pypy fastjson: kill yet another more end-of-string check Message-ID: <20130609094207.4DDE21C07E1@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: fastjson Changeset: r64842:27044fc0ceb8 Date: 2013-06-07 18:41 +0200 http://bitbucket.org/pypy/pypy/changeset/27044fc0ceb8/ Log: kill yet another more end-of-string check diff --git a/pypy/module/_fastjson/interp_decoder.py b/pypy/module/_fastjson/interp_decoder.py --- a/pypy/module/_fastjson/interp_decoder.py +++ b/pypy/module/_fastjson/interp_decoder.py @@ -203,7 +203,7 @@ def decode_object(self, i): start = i w_dict = self.space.newdict() - while i < self.length: + while True: ch = self.ll_chars[i] if ch == '}': self.pos = i+1 @@ -231,10 +231,12 @@ return w_dict elif ch == ',': pass + elif ch == '\0': + self._raise("Unterminated object starting at char %d", start) else: self._raise("Unexpected '%s' when decoding object (char %d)", ch, self.pos) - self._raise("Unterminated object starting at char %d", start) + def decode_string(self, i): start = i From noreply at buildbot.pypy.org Sun Jun 9 11:42:08 2013 From: noreply at buildbot.pypy.org (antocuni) Date: Sun, 9 Jun 2013 11:42:08 +0200 (CEST) Subject: [pypy-commit] pypy fastjson: remove the last end-of-string check Message-ID: <20130609094208.7671C1C07E1@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: fastjson Changeset: r64843:2a7d18573294 Date: 2013-06-07 18:43 +0200 http://bitbucket.org/pypy/pypy/changeset/2a7d18573294/ Log: remove the last end-of-string check diff --git a/pypy/module/_fastjson/interp_decoder.py b/pypy/module/_fastjson/interp_decoder.py --- a/pypy/module/_fastjson/interp_decoder.py +++ b/pypy/module/_fastjson/interp_decoder.py @@ -49,7 +49,6 @@ # we put a sentinel at the end so that we never have to check for the # "end of string" condition self.ll_chars = llstr(s+'\0').chars - self.length = len(s) self.pos = 0 self.last_type = TYPE_UNKNOWN @@ -241,7 +240,7 @@ def decode_string(self, i): start = i bits = 0 - while i < self.length: + while True: # this loop is a fast path for strings which do not contain escape # characters ch = self.ll_chars[i] @@ -264,7 +263,8 @@ content_so_far = self.getslice(start, i-1) self.pos = i-1 return self.decode_string_escaped(start, content_so_far) - self._raise("Unterminated string starting at char %d", start) + elif ch == '\0': + self._raise("Unterminated string starting at char %d", start) def decode_string_escaped(self, start, content_so_far): From noreply at buildbot.pypy.org Sun Jun 9 16:07:09 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 9 Jun 2013 16:07:09 +0200 (CEST) Subject: [pypy-commit] stmgc default: in-progress, maybe? messy :-( Message-ID: <20130609140709.B35A01C07E1@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r84:d2ad521c9ac9 Date: 2013-06-09 16:07 +0200 http://bitbucket.org/pypy/stmgc/changeset/d2ad521c9ac9/ Log: in-progress, maybe? messy :-( diff --git a/c4/et.c b/c4/et.c --- a/c4/et.c +++ b/c4/et.c @@ -66,6 +66,13 @@ gcptr P = G; revision_t v; + if (UNLIKELY(d->public_descriptor->stolen_objects.size > 0)) + { + spinlock_acquire(d->public_descriptor->collection_lock, 'N'); + stm_normalize_stolen_objects(d->public_descriptor); + spinlock_release(d->public_descriptor->collection_lock); + } + if (P->h_tid & GCFLAG_PUBLIC) { /* follow the chained list of h_revision's as long as they are @@ -144,7 +151,7 @@ if (foreign_pd == d->public_descriptor) { /* same thread */ - P = (gcptr)v; + P = (gcptr)(v - 2); assert(!(P->h_tid & GCFLAG_PUBLIC)); if (P->h_revision == stm_private_rev_num) { @@ -306,7 +313,14 @@ memcpy(B + 1, P + 1, size - sizeof(*B)); } assert(B->h_tid & GCFLAG_BACKUP_COPY); - gcptrlist_insert2(&d->public_descriptor->active_backup_copies, P, B); + + gcptrlist_locked_insert2(&d->public_descriptor->active_backup_copies, P, B, + &d->public_descriptor->collection_lock); + + smp_wmb(); /* guarantees that stm_steal_stub() will see the list + up to the (P, B) pair in case it goes the path + h_revision == *foreign_pd->private_revision_ref */ + P->h_revision = stm_private_rev_num; return P; } @@ -328,7 +342,6 @@ not_found:; gcptr L = stmgc_duplicate(R); assert(!(L->h_tid & GCFLAG_BACKUP_COPY)); - assert(!(L->h_tid & GCFLAG_STOLEN)); assert(!(L->h_tid & GCFLAG_STUB)); L->h_tid &= ~(GCFLAG_OLD | GCFLAG_VISITED | @@ -367,16 +380,19 @@ return W; } -gcptr stm_get_backup_copy(gcptr P) +gcptr stm_get_backup_copy(long index) { - assert(P->h_revision == stm_private_rev_num); + struct tx_public_descriptor *pd = thread_descriptor->public_descriptor; + if (index < gcptrlist_size(&pd->active_backup_copies)) + return pd->active_backup_copies.items[index]; + return NULL; +} +gcptr stm_get_stolen_obj(long index) +{ struct tx_public_descriptor *pd = thread_descriptor->public_descriptor; - long i, size = pd->active_backup_copies.size; - gcptr *items = pd->active_backup_copies.items; - for (i = 0; i < size; i += 2) - if (items[i] == P) - return items[i + 1]; + if (index < gcptrlist_size(&pd->stolen_objects)) + return pd->stolen_objects.items[index]; return NULL; } @@ -549,7 +565,9 @@ gcptrlist_clear(&d->list_of_read_objects); gcptrlist_clear(&d->public_descriptor->active_backup_copies); - abort();//stmgc_abort_transaction(d); + abort(); + d->public_descriptor->stolen_objects;//XXX clean up + //stmgc_abort_transaction(d); fprintf(stderr, "\n" @@ -612,7 +630,6 @@ d->start_real_time.tv_nsec = -1; } assert(d->list_of_read_objects.size == 0); - assert(d->public_descriptor->active_backup_copies.size == 0); assert(!g2l_any_entry(&d->public_to_private)); d->count_reads = 1; @@ -724,7 +741,6 @@ assert(!(L->h_tid & GCFLAG_PUBLIC_TO_PRIVATE)); assert(!(L->h_tid & GCFLAG_PREBUILT_ORIGINAL)); assert(!(L->h_tid & GCFLAG_NURSERY_MOVED)); - assert(!(L->h_tid & GCFLAG_STOLEN)); assert(L->h_revision != localrev); /* modified by AcquireLocks() */ #ifdef DUMP_EXTRA @@ -751,7 +767,6 @@ assert(R->h_tid & GCFLAG_PUBLIC); assert(R->h_tid & GCFLAG_PUBLIC_TO_PRIVATE); assert(!(R->h_tid & GCFLAG_NURSERY_MOVED)); - assert(!(R->h_tid & GCFLAG_STOLEN)); assert(R->h_revision != localrev); #ifdef DUMP_EXTRA @@ -801,19 +816,21 @@ void TurnPrivateWithBackupToProtected(struct tx_descriptor *d, revision_t cur_time) { - long i, size = d->public_descriptor->active_backup_copies.size; - gcptr *items = d->public_descriptor->active_backup_copies.items; + struct tx_public_descriptor *pd = d->public_descriptor; + long i, size = pd->active_backup_copies.size; + gcptr *items = pd->active_backup_copies.items; for (i = 0; i < size; i += 2) { gcptr P = items[i]; gcptr B = items[i + 1]; + assert(B->h_tid & GCFLAG_BACKUP_COPY); + assert(!(B->h_tid & GCFLAG_PUBLIC)); assert(P->h_revision == stm_private_rev_num); - assert(B->h_tid & GCFLAG_BACKUP_COPY); B->h_revision = cur_time; P->h_revision = (revision_t)B; }; - gcptrlist_clear(&d->public_descriptor->active_backup_copies); + gcptrlist_clear(&pd->active_backup_copies); } void CommitTransaction(void) @@ -823,6 +840,9 @@ assert(d->active >= 1); spinlock_acquire(d->public_descriptor->collection_lock, 'C'); /*committing*/ + if (d->public_descriptor->stolen_objects.size) + stm_normalize_stolen_objects(d->public_descriptor); + AcquireLocks(d); if (is_inevitable(d)) @@ -879,7 +899,7 @@ assert(newrev & 1); ACCESS_ONCE(stm_private_rev_num) = newrev; fprintf(stderr, "%p: stm_local_revision = %ld\n", d, (long)newrev); - assert(d->private_revision_ref = &stm_private_rev_num); + assert(d->public_descriptor->private_revision_ref = &stm_private_rev_num); UpdateChainHeads(d, cur_time, localrev); @@ -1097,7 +1117,7 @@ assert(d->my_lock & 1); assert(d->my_lock >= LOCKED); stm_private_rev_num = -1; - d->private_revision_ref = &stm_private_rev_num; + pd->private_revision_ref = &stm_private_rev_num; d->max_aborts = -1; thread_descriptor = d; @@ -1113,12 +1133,15 @@ void DescriptorDone(void) { + static revision_t no_private_revision = 8; revision_t i; struct tx_descriptor *d = thread_descriptor; assert(d != NULL); assert(d->active == 0); - d->public_descriptor->collection_lock = 0; /* unlock */ + spinlock_acquire(d->public_descriptor->collection_lock, 'D'); /*done*/ + d->public_descriptor->private_revision_ref = &no_private_revision; + spinlock_release(d->public_descriptor->collection_lock); spinlock_acquire(descriptor_array_lock, 1); i = d->public_descriptor_index; diff --git a/c4/et.h b/c4/et.h --- a/c4/et.h +++ b/c4/et.h @@ -31,6 +31,7 @@ * GCFLAG_PUBLIC is set on public objects. * * GCFLAG_BACKUP_COPY means the object is a (protected) backup copy. + * For debugging. * * GCFLAG_PUBLIC_TO_PRIVATE is added to a *public* object that has got a * *private* copy. It is sticky, reset only at the next major collection. @@ -47,9 +48,6 @@ * * GCFLAG_NURSERY_MOVED is used temporarily during minor collections. * - * GCFLAG_STOLEN is set of protected objects after we notice that they - * have been stolen. - * * GCFLAG_STUB is set for debugging on stub objects made by stealing or * by major collections. 'p_stub->h_revision' might be a value * that is == 2 (mod 4): in this case they point to a protected/private @@ -59,12 +57,11 @@ #define GCFLAG_VISITED (STM_FIRST_GCFLAG << 1) #define GCFLAG_PUBLIC (STM_FIRST_GCFLAG << 2) #define GCFLAG_PREBUILT_ORIGINAL (STM_FIRST_GCFLAG << 3) -#define GCFLAG_BACKUP_COPY (STM_FIRST_GCFLAG << 4) -#define GCFLAG_PUBLIC_TO_PRIVATE (STM_FIRST_GCFLAG << 5) -#define GCFLAG_WRITE_BARRIER (STM_FIRST_GCFLAG << 6) -#define GCFLAG_NURSERY_MOVED (STM_FIRST_GCFLAG << 7) -#define GCFLAG_STOLEN (STM_FIRST_GCFLAG << 8) -#define GCFLAG_STUB (STM_FIRST_GCFLAG << 9) /* debugging */ +#define GCFLAG_PUBLIC_TO_PRIVATE (STM_FIRST_GCFLAG << 4) +#define GCFLAG_WRITE_BARRIER (STM_FIRST_GCFLAG << 5) +#define GCFLAG_NURSERY_MOVED (STM_FIRST_GCFLAG << 6) +#define GCFLAG_BACKUP_COPY (STM_FIRST_GCFLAG << 7) /* debugging */ +#define GCFLAG_STUB (STM_FIRST_GCFLAG << 8) /* debugging */ /* this value must be reflected in PREBUILT_FLAGS in stmgc.h */ #define GCFLAG_PREBUILT (GCFLAG_VISITED | \ @@ -108,8 +105,9 @@ revision_t collection_lock; struct stub_block_s *stub_blocks; gcptr stub_free_list; - struct GcPtrList stolen_objects; - struct GcPtrList active_backup_copies; + struct GcPtrList active_backup_copies; /* (P,B) where P=private, B=backup */ + struct GcPtrList stolen_objects; /* (P,Q) where P=priv/prot, Q=public */ + revision_t *private_revision_ref; revision_t free_list_next; /* xxx gcpage data here */ }; @@ -142,7 +140,6 @@ char *longest_abort_info; long long longest_abort_info_time; struct FXCache recent_reads_cache; - revision_t *private_revision_ref; }; extern __thread struct tx_descriptor *thread_descriptor; @@ -165,7 +162,8 @@ gcptr stm_RepeatReadBarrier(gcptr); gcptr stm_WriteBarrier(gcptr); gcptr _stm_nonrecord_barrier(gcptr, int *); -gcptr stm_get_backup_copy(gcptr); +gcptr stm_get_backup_copy(long); /* debugging */ +gcptr stm_get_stolen_obj(long); /* debugging */ gcptr stm_get_read_obj(long); /* debugging */ gcptr stmgc_duplicate(gcptr); diff --git a/c4/lists.c b/c4/lists.c --- a/c4/lists.c +++ b/c4/lists.c @@ -171,6 +171,23 @@ gcptrlist->size = i + 2; } +void gcptrlist_locked_insert2(struct GcPtrList *gcptrlist, gcptr newitem1, + gcptr newitem2, revision_t *lock) +{ + gcptr *items; + long i = gcptrlist->size; + if (UNLIKELY((gcptrlist->alloc - i) < 2)) + { + spinlock_acquire(*lock, 'I'); + _gcptrlist_grow(gcptrlist); + spinlock_release(*lock); + } + items = gcptrlist->items; + items[i+0] = newitem1; + items[i+1] = newitem2; + gcptrlist->size = i + 2; +} + void gcptrlist_insert3(struct GcPtrList *gcptrlist, gcptr newitem1, gcptr newitem2, gcptr newitem3) { diff --git a/c4/lists.h b/c4/lists.h --- a/c4/lists.h +++ b/c4/lists.h @@ -164,6 +164,9 @@ void gcptrlist_merge(struct GcPtrList *, struct GcPtrList *gcptrlist_source); void gcptrlist_move(struct GcPtrList *, struct GcPtrList *gcptrlist_source); +void gcptrlist_locked_insert2(struct GcPtrList *gcptrlist, gcptr newitem1, + gcptr newitem2, revision_t *lock); + /************************************************************/ /* The fxcache_xx functions implement a fixed-size set of gcptr's. diff --git a/c4/steal.c b/c4/steal.c --- a/c4/steal.c +++ b/c4/steal.c @@ -53,9 +53,29 @@ if ((v & 3) != 2) goto done; /* un-stubbed while we waited for the lock */ - gcptr L = (gcptr)(v - 2); - gcptr Q = stmgc_duplicate(L); + gcptr Q, L = (gcptr)(v - 2); + revision_t w = ACCESS_ONCE(L->h_revision); + + if (w == *foreign_pd->private_revision_ref) { + /* The stub points to a private object L. Because it cannot point + to "really private" objects, it must mean that L used to be + a protected object, and it has an attached backed copy. + XXX find a way to optimize this search, maybe */ + long i; + gcptr *items = foreign_pd->active_backup_copies.items; + /* we must find L as the first item of a pair in the list. We + cannot rely on how big the list is here, but we know that + it will not be resized while we hold collection_lock. */ + for (i = 0; items[i] != L; i += 2) + ; + L = items[i + 1]; + assert(L->h_tid & GCFLAG_BACKUP_COPY); + } + /* duplicate L */ + Q = stmgc_duplicate(L); XXX RACE + Q->h_tid &= ~GCFLAG_BACKUP_COPY; Q->h_tid |= GCFLAG_PUBLIC; + gcptrlist_insert2(&foreign_pd->stolen_objects, L, Q); smp_wmb(); @@ -64,3 +84,16 @@ done: spinlock_release(foreign_pd->collection_lock); } + +void stm_normalize_stolen_objects(struct tx_public_descriptor *pd) +{ + long i, size = pd->stolen_objects.size; + gcptr *items = pd->stolen_objects.items; + for (i = 0; i < size; i += 2) { + gcptr L = items[i]; + gcptr Q = items[i + 1]; + if (L->h_revision == stm_private_rev_num) { + + } + } +} diff --git a/c4/steal.h b/c4/steal.h --- a/c4/steal.h +++ b/c4/steal.h @@ -9,6 +9,8 @@ gcptr stm_stub_malloc(struct tx_public_descriptor *); void stm_steal_stub(gcptr); +gcptr stm_get_stolen_obj(long index); /* debugging */ +void stm_normalize_stolen_objects(struct tx_public_descriptor *); #endif diff --git a/c4/test/support.py b/c4/test/support.py --- a/c4/test/support.py +++ b/c4/test/support.py @@ -68,7 +68,8 @@ void stm_start_sharedlock(void); void stm_stop_sharedlock(void); void AbortTransaction(int); - gcptr stm_get_backup_copy(gcptr); + gcptr stm_get_backup_copy(long index); + gcptr stm_get_stolen_obj(long index); gcptr stm_get_read_obj(long index); void *STUB_THREAD(gcptr); @@ -103,7 +104,6 @@ #define GCFLAG_PUBLIC_TO_PRIVATE ... #define GCFLAG_WRITE_BARRIER ... #define GCFLAG_NURSERY_MOVED ... - #define GCFLAG_STOLEN ... #define GCFLAG_STUB ... #define ABRT_MANUAL ... //typedef struct { ...; } page_header_t; @@ -552,4 +552,23 @@ index += 1 return result +def _list2dict(getter): + result = {} + index = 0 + while 1: + p = getter(index) + if p == ffi.NULL: + break + q = getter(index + 1) + assert q != ffi.NULL + result[p] = q + index += 2 + return result + +def backup_copies(): + return _list2dict(lib.stm_get_backup_copy) + +def stolen_objs(): + return _list2dict(lib.stm_get_stolen_obj) + stub_thread = lib.STUB_THREAD diff --git a/c4/test/test_et.py b/c4/test/test_et.py --- a/c4/test/test_et.py +++ b/c4/test/test_et.py @@ -60,7 +60,7 @@ org_r = p.h_revision lib.setlong(p, 0, 927122) assert p.h_revision == lib.get_private_rev_num() - pback = lib.stm_get_backup_copy(p) + pback = backup_copies()[p] assert pback and pback != p assert pback.h_revision == org_r assert pback.h_tid == p.h_tid | GCFLAG_BACKUP_COPY @@ -75,7 +75,7 @@ lib.stm_commit_transaction() lib.stm_begin_inevitable_transaction() lib.setlong(p, 0, 927122) - pback = lib.stm_get_backup_copy(p) + pback = backup_copies()[p] assert pback != p assert p.h_revision == lib.get_private_rev_num() lib.stm_commit_transaction() @@ -90,7 +90,7 @@ lib.stm_commit_transaction() lib.stm_begin_inevitable_transaction() lib.setlong(p, 0, 927122) - pback = lib.stm_get_backup_copy(p) + pback = backup_copies()[p] assert pback != p lib.stm_commit_transaction() lib.stm_begin_inevitable_transaction() @@ -100,7 +100,7 @@ assert lib.rawgetlong(pback, 0) == 78927812 # but should not be used lib.setlong(p, 0, 43891) assert p.h_revision == lib.get_private_rev_num() - assert pback == lib.stm_get_backup_copy(p) + assert pback == backup_copies()[p] assert lib.rawgetlong(p, 0) == 43891 assert lib.rawgetlong(pback, 0) == 927122 @@ -239,14 +239,16 @@ lib.stm_begin_inevitable_transaction() assert classify(p) == "public" assert classify(p1) == "protected" - plist.append(p1) - # now p's most recent revision is protected + plist.append(p1) # now p's most recent revision is protected assert classify(ffi.cast("gcptr", p.h_revision)) == "stub" r.set(2) r.wait(3) - assert lib.list_stolen_objects() == plist[-2:] - p2 = lib.stm_read_barrier(p1) - assert p2 == plist[-1] + d = stolen_objs() + assert len(d) == 1 + assert d.keys() == [p1] + [p2] = d.values() + assert lib.stm_read_barrier(p) == p2 + assert lib.stm_read_barrier(p1) == p2 def f2(r): r.wait(2) p2 = lib.stm_read_barrier(p) # steals @@ -255,6 +257,7 @@ assert p.h_revision == int(ffi.cast("revision_t", p2)) assert p2 == lib.stm_read_barrier(p) assert p2 not in plist + assert classify(p2) == "public" plist.append(p2) r.set(3) run_parallel(f1, f2) From noreply at buildbot.pypy.org Sun Jun 9 18:25:56 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sun, 9 Jun 2013 18:25:56 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: added two missed optimizations Message-ID: <20130609162556.C1DD11C094A@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: extradoc Changeset: r4974:6460bc5634c4 Date: 2013-06-09 09:24 -0700 http://bitbucket.org/pypy/extradoc/changeset/6460bc5634c4/ Log: added two missed optimizations diff --git a/planning/jit.txt b/planning/jit.txt --- a/planning/jit.txt +++ b/planning/jit.txt @@ -77,6 +77,11 @@ - calling string equality does not automatically promote the argument to a constant. +- i0 = int_add_ovf(9223372036854775807, 1) + guard_overflow() + +- p0 = call_pure(ConstClass(something), ConstPtr(2)) + guard_exception(SomeException) PYTHON EXAMPLES --------------- From noreply at buildbot.pypy.org Sun Jun 9 19:02:14 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 9 Jun 2013 19:02:14 +0200 (CEST) Subject: [pypy-commit] stmgc default: Update the document with a simpler version which is a better starting point. Message-ID: <20130609170214.E49721C094A@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r85:6187b6951c91 Date: 2013-06-09 19:02 +0200 http://bitbucket.org/pypy/stmgc/changeset/6187b6951c91/ Log: Update the document with a simpler version which is a better starting point. diff --git a/c4/doc-objects.txt b/c4/doc-objects.txt --- a/c4/doc-objects.txt +++ b/c4/doc-objects.txt @@ -27,17 +27,17 @@ Private freshly created \ Private, with backup - \ ^ | ^ - \ / commit | | - commit \ modify / | | - \ / | | modify - V / | | - Protected, no backup V | - ^ ^ Protected, with backup - / | gc | - commit / `----------------' - / - / + \ ^ / + commit \ / / commit + \ modify / / + \ / / + V / V + Protected + ^ + / + commit / + / + / Private copy of a public obj @@ -48,32 +48,27 @@ Private objects: - freshly created PRN -- converted from a protected obj PRN +- converted from a protected obj ptr to backup - private copy of a public obj PRN Protected objects: - converted from fresh private obj (old PRN) -- converted from a private obj with backup ptr to backup -- converted from a private obj from public GT +- converted from a private obj GT + +Backup copy: - backup copy of a private obj original h_revision -- backup copy still attached to a protected GT -- original obj after GC killed the backup GT Public objects: - prebuilt object, never modified 1 - other public object, never modified GT -- outdated, has a protected copy HANDLE to prot/priv copy -- outdated, target stolen ptr to a more recent public copy +- outdated ptr to a more recent public copy -Public stubs: -- from stealing: like outdated public objects -- from major GC: like outdated public objects with target stolen +Public stubs (have also a ref to one thread): +- from stealing ptr (maybe to priv/prot) | 2 PRN = Private revision number (negative odd number) GT = Global time (positive odd number) -HANDLE = Reference to a prot/priv copy and its thread - (positive even number, such that: handle % 4 == 2) @@ -83,54 +78,67 @@ - the PRN (private revision number): odd, negative, changes for every transaction that commits -- list active_backup_copies = [(private, backup copy)] +- list private_from_protected = [private obj converted from protected] - dict public_to_private = {public obj: private copy} - list read_set containing the objects in the read set, with possibly some duplicates (but hopefully not too many) -- list stolen_objects = [(priv/prot object, public copy)] +- collection_lock: a thread-local lock that is acquired to change + the status of private/protected objects -Kind of object copy distinguishing feature + +Kind of object copy distinguishing feature ------------------------------------------------------------------- -Any private object h_revision == PRN -Private with a backup in active_backup_copies -Backup copy GCFLAG_BACKUP_COPY -Any public object GCFLAG_PUBLIC -Any protected object h_revision != PRN && !GCFLAG_PUBLIC -Stubs GCFLAG_STUB +Any private object h_revision == PRN or GCFLAG_PRIVATE_FROM_PROTECTED +Private with a backup GCFLAG_PRIVATE_FROM_PROTECTED +Backup copy GCFLAG_BACKUP_COPY (flag for debugging) +Any public object GCFLAG_PUBLIC +Stubs GCFLAG_STUB (flag for debugging) A public object that might \ -be key in public_to_private has additionally GCFLAG_PUBLIC_TO_PRIVATE +be key in public_to_private has additionally GCFLAG_PUBLIC_TO_PRIVATE Read barrier ----------------------------------------- -Inline check: if P in read_barrier_cache, we don't call the slow path. +Inline check: if h_revision == PRN or if P in read_barrier_cache, + we don't call the slow path. Slow path: - if h_revision == PRN, just add P to read_barrier_cache and return + if GCFLAG_PRIVATE_FROM_PROTECTED: + + check P->h_revision->h_revision: if a pointer, then it means + the backup copy has been stolen into a public object and then + modified by some other thread. Abort. + + add P to 'read_barrier_cache' and return if GCFLAG_PUBLIC: follow the chained list of h_revision's as long as they are regular pointers - if it ends with an odd revision number, check that it's older - than start_time; extend the start timestamp if not + if it ends with h_revision % 4 == 2: + then we're in a stub - if it ends with a handle (L, Thread): - - if Thread is the current thread: set P = L + if Thread is the current thread: follow P = h_revision - 2 else: do stealing and restart the read barrier - if we land on a P in read_barrier_cache: return + if we land on a P in read_barrier_cache: + return P + + if P has GCFLAG_PUBLIC_TO_PRIVATE and is in 'public_to_private': + return the private object + + if it ends with an odd revision number, check that it's older + than start_time; extend the start timestamp if not add P to 'read_set' @@ -143,29 +151,26 @@ private objects which might be a majority, vs. making the inline check larger). -Handles are stored for example in a global list, and the actual handle -encodes an index in the list. Every entry in the list is a pointer to a -prot/priv object --- excepted once every N positions, where it is a -thread descriptor giving the thread to which all N-1 following pointers -belong. The pair (L, Thread) is thus `(list[H], list[H rounded down to -a multiple of N])`. +Stub objects are public, always outdated (with h_revision a pointer) and +contain only a header; additionally they have a thread descriptor that +tells to which thread the h_revision object is a protected/private +object of. -Stealing of an object copy L is done with the "collection lock" of -the target Thread. The target would also acquire its own lock in -when doing some operations, like a minor collection, which can't -occur in parallel with stealing. +Stealing of an object copy L is done with the "collection lock" of the +target Thread. The target would also acquire its own lock in when doing +some operations, like a minor collection or a write barrier on a +protected object, which can't occur in parallel with stealing. Once we have the lock, stealing is: if the situation changed while we were waiting for the lock, return - if L has got a backup copy, turn it public; - else L must be protected, and we make a public copy of it + if L has GCFLAG_PRIVATE_FROM_PROTECTED: + set L = L->h_revision (the backup copy) - update the original P->h_revision to point directly to the new - public copy + change L from protected to public, i.e. add GCFLAG_PUBLIC - add (P, new public copy) to stolen_objects + update the original P->h_revision to point directly to L @@ -174,18 +179,15 @@ The write barrier works for both STM purposes and for GC purposes. -Inline check: if h_revision == PRN && !GCFLAG_WRITE_BARRIER, we're done. +Inline check: if h_revision == PRN or GCFLAG_PRIVATE_FROM_PROTECTED, we're done. Slow path: R = read_barrier(P) # always do a full read_barrier first - if h_revision == PRN: + if h_revision == PRN or GCFLAG_PRIVATE_FROM_PROTECTED: + return R - GC only: remove GCFLAG_WRITE_BARRIER, add R to the GC list of - modified old objects to trace at the next minor collection, - and return R - - elif GCFLAG_PUBLIC: + if GCFLAG_PUBLIC: add the flag GCFLAG_PUBLIC_TO_PRIVATE to R, if needed @@ -193,19 +195,50 @@ add {R: L} in 'public_to_private' + remove R from read_barrier_cache + return L - else: # protected object + # else, R is a protected object + with collection_lock: - if h_revision is not a pointer: + allocate a backup copy and copy the object into the backup copy - allocate a backup copy, and attach it to h_revision + change R->h_revision to be the backup copy + + set GCFLAG_PRIVATE_FROM_PROTECTED on R - copy the object into the backup copy - - change h_revision to be PRN (i.e. turn private) - - if GCFLAG_WRITE_BARRIER: remove it, add R to the GC list of - modified old objects to trace at the next minor collection + add R in 'private_from_protected' return R + + + +Commit-time change of flags +--------------------------- + +(This occurs during commit, when we have got the collection_lock.) + +public_to_private: + + write GT into the private object + + make a stub with h_revision = private object | 2 + + after a CPU write barrier, make the public h_revision to point to the stub + +private_from_protected: + + get the backup B from P->h_revision + + set P->h_revision to GT + + if B has GCFLAG_PUBLIC: it has been stolen + + if it has been modified: conflict, abort transaction + + B->h_revision = P + + else: + possibly free B now, it's not used any more + diff --git a/c4/et.c b/c4/et.c --- a/c4/et.c +++ b/c4/et.c @@ -289,12 +289,21 @@ return L; } +static gcptr LocalizePublic(struct tx_descriptor *d, gcptr R); + static gcptr LocalizeProtected(struct tx_descriptor *d, gcptr P) { gcptr B; + spinlock_acquire(d->public_descriptor->collection_lock, 'L'); + + if (P->h_tid & GCFLAG_PUBLIC) + { + /* became PUBLIC while waiting for the collection_lock */ + spinlock_release(d->public_descriptor->collection_lock); + return LocalizePublic(d, P); + } assert(P->h_revision != stm_private_rev_num); - assert(!(P->h_tid & GCFLAG_PUBLIC)); assert(!(P->h_tid & GCFLAG_PUBLIC_TO_PRIVATE)); assert(!(P->h_tid & GCFLAG_BACKUP_COPY)); assert(!(P->h_tid & GCFLAG_STUB)); @@ -307,21 +316,17 @@ } else { - size_t size = stmcb_size(P); B = (gcptr)P->h_revision; assert(B->h_tid & GCFLAG_BACKUP_COPY); + size_t size = stmcb_size(P); memcpy(B + 1, P + 1, size - sizeof(*B)); } assert(B->h_tid & GCFLAG_BACKUP_COPY); - gcptrlist_locked_insert2(&d->public_descriptor->active_backup_copies, P, B, - &d->public_descriptor->collection_lock); + gcptrlist_insert2(&d->public_descriptor->active_backup_copies, P, B); + P->h_revision = stm_private_rev_num; - smp_wmb(); /* guarantees that stm_steal_stub() will see the list - up to the (P, B) pair in case it goes the path - h_revision == *foreign_pd->private_revision_ref */ - - P->h_revision = stm_private_rev_num; + spinlock_release(d->public_descriptor->collection_lock); return P; } @@ -368,6 +373,7 @@ struct tx_descriptor *d = thread_descriptor; assert(d->active >= 1); + retry: P = stm_read_barrier(P); if (P->h_tid & GCFLAG_PUBLIC) diff --git a/c4/lists.c b/c4/lists.c --- a/c4/lists.c +++ b/c4/lists.c @@ -171,23 +171,6 @@ gcptrlist->size = i + 2; } -void gcptrlist_locked_insert2(struct GcPtrList *gcptrlist, gcptr newitem1, - gcptr newitem2, revision_t *lock) -{ - gcptr *items; - long i = gcptrlist->size; - if (UNLIKELY((gcptrlist->alloc - i) < 2)) - { - spinlock_acquire(*lock, 'I'); - _gcptrlist_grow(gcptrlist); - spinlock_release(*lock); - } - items = gcptrlist->items; - items[i+0] = newitem1; - items[i+1] = newitem2; - gcptrlist->size = i + 2; -} - void gcptrlist_insert3(struct GcPtrList *gcptrlist, gcptr newitem1, gcptr newitem2, gcptr newitem3) { diff --git a/c4/lists.h b/c4/lists.h --- a/c4/lists.h +++ b/c4/lists.h @@ -164,9 +164,6 @@ void gcptrlist_merge(struct GcPtrList *, struct GcPtrList *gcptrlist_source); void gcptrlist_move(struct GcPtrList *, struct GcPtrList *gcptrlist_source); -void gcptrlist_locked_insert2(struct GcPtrList *gcptrlist, gcptr newitem1, - gcptr newitem2, revision_t *lock); - /************************************************************/ /* The fxcache_xx functions implement a fixed-size set of gcptr's. diff --git a/c4/steal.c b/c4/steal.c --- a/c4/steal.c +++ b/c4/steal.c @@ -53,33 +53,41 @@ if ((v & 3) != 2) goto done; /* un-stubbed while we waited for the lock */ - gcptr Q, L = (gcptr)(v - 2); - revision_t w = ACCESS_ONCE(L->h_revision); + gcptr L = (gcptr)(v - 2); + revision_t w = L->h_revision; if (w == *foreign_pd->private_revision_ref) { /* The stub points to a private object L. Because it cannot point to "really private" objects, it must mean that L used to be a protected object, and it has an attached backed copy. XXX find a way to optimize this search, maybe */ - long i; + long i, size = foreign_pd->active_backup_copies.size; gcptr *items = foreign_pd->active_backup_copies.items; - /* we must find L as the first item of a pair in the list. We - cannot rely on how big the list is here, but we know that - it will not be resized while we hold collection_lock. */ - for (i = 0; items[i] != L; i += 2) - ; + for (i = size - 2; ; i -= 2) { + assert(i >= 0); + if (items[i] == L) + break; + } L = items[i + 1]; assert(L->h_tid & GCFLAG_BACKUP_COPY); + L->h_tid &= ~GCFLAG_BACKUP_COPY; } - /* duplicate L */ - Q = stmgc_duplicate(L); XXX RACE - Q->h_tid &= ~GCFLAG_BACKUP_COPY; - Q->h_tid |= GCFLAG_PUBLIC; - gcptrlist_insert2(&foreign_pd->stolen_objects, L, Q); + else if (L->h_tid & GCFLAG_PUBLIC) { + /* The stub already points to a public object */ + goto unstub; + } + else if (!(w & 1)) { + /* The stub points to a protected object L which has a backup + copy attached. Forget the backup copy. */ + w = ((gcptr)w)->h_revision; + assert(w & 1); + L->h_revision = w; + } + /* turn L into a public object */ + L->h_tid |= GCFLAG_PUBLIC; - smp_wmb(); - - P->h_revision = (revision_t)Q; + unstub: + P->h_revision = (revision_t)L; done: spinlock_release(foreign_pd->collection_lock); From noreply at buildbot.pypy.org Sun Jun 9 19:07:13 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 9 Jun 2013 19:07:13 +0200 (CEST) Subject: [pypy-commit] stmgc default: Add stealing in the diagram Message-ID: <20130609170713.0658A1C094A@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r86:d9e066ac3a1f Date: 2013-06-09 19:07 +0200 http://bitbucket.org/pypy/stmgc/changeset/d9e066ac3a1f/ Log: Add stealing in the diagram diff --git a/c4/doc-objects.txt b/c4/doc-objects.txt --- a/c4/doc-objects.txt +++ b/c4/doc-objects.txt @@ -33,12 +33,12 @@ \ / / V / V Protected - ^ - / - commit / - / - / - Private copy of + ^ \ Backup of a private + / \ / + commit / \ steal / + / \ / + / V V + Private copy of Public a public obj From noreply at buildbot.pypy.org Sun Jun 9 21:05:35 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 9 Jun 2013 21:05:35 +0200 (CEST) Subject: [pypy-commit] stmgc default: Implement most of the document, found a few issues to work on Message-ID: <20130609190535.13C981C1001@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r87:391c68e20cf5 Date: 2013-06-09 21:05 +0200 http://bitbucket.org/pypy/stmgc/changeset/391c68e20cf5/ Log: Implement most of the document, found a few issues to work on diff --git a/README.txt b/README.txt --- a/README.txt +++ b/README.txt @@ -7,13 +7,13 @@ This is a C library that combines a GC with STM capabilities. It is meant to be a general library that can be used in C programs. -The library interface is in "c3/stmgc.h". +The library interface is in "c4/stmgc.h". -The file "c3/doc-stmgc.txt" contains a high-level overview followed by -more detailled explanations. +The file "c4/doc-objects.txt" contains some low-level explanations. -A demo program can be found in "c3/demo1.c", but the code so far is -outdated (it doesn't follow what c3/doc-stmgc describes). +Run tests with "py.test". + +A demo program will be found in "c4/demo1.c" (not there yet). It can be built with "make debug-demo1" or "make build-demo1". The plan is to use this C code directly with PyPy, and not write diff --git a/c4/doc-objects.txt b/c4/doc-objects.txt --- a/c4/doc-objects.txt +++ b/c4/doc-objects.txt @@ -219,13 +219,6 @@ (This occurs during commit, when we have got the collection_lock.) -public_to_private: - - write GT into the private object - - make a stub with h_revision = private object | 2 - - after a CPU write barrier, make the public h_revision to point to the stub private_from_protected: @@ -233,6 +226,8 @@ set P->h_revision to GT + remove GCFLAG_PRIVATE_FROM_PROTECTED from P + if B has GCFLAG_PUBLIC: it has been stolen if it has been modified: conflict, abort transaction @@ -242,3 +237,11 @@ else: possibly free B now, it's not used any more + +public_to_private: + + write GT into the private object + + make a stub with h_revision = private object | 2 + + after a CPU write barrier, make the public h_revision to point to the stub diff --git a/c4/et.c b/c4/et.c --- a/c4/et.c +++ b/c4/et.c @@ -66,11 +66,16 @@ gcptr P = G; revision_t v; - if (UNLIKELY(d->public_descriptor->stolen_objects.size > 0)) + if (P->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED) { - spinlock_acquire(d->public_descriptor->collection_lock, 'N'); - stm_normalize_stolen_objects(d->public_descriptor); - spinlock_release(d->public_descriptor->collection_lock); + private_from_protected: + /* check P->h_revision->h_revision: if a pointer, then it means + the backup copy has been stolen into a public object and then + modified by some other thread. Abort. */ + if (!(((gcptr)P->h_revision)->h_revision & 1)) + AbortTransaction(ABRT_STOLEN_MODIFIED); + + goto add_in_recent_reads_cache; } if (P->h_tid & GCFLAG_PUBLIC) @@ -142,8 +147,10 @@ } register_in_list_of_read_objects: + gcptrlist_insert(&d->list_of_read_objects, P); + + add_in_recent_reads_cache: fxcache_add(&d->recent_reads_cache, P); - gcptrlist_insert(&d->list_of_read_objects, P); return P; follow_stub:; @@ -159,6 +166,12 @@ "private\n", G, P); return P; } + else if (P->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED) + { + fprintf(stderr, "read_barrier: %p -> %p handle " + "private_from_protected\n", G, P); + goto private_from_protected; + } else if (FXCACHE_AT(P) == P) { fprintf(stderr, "read_barrier: %p -> %p handle " @@ -307,24 +320,15 @@ assert(!(P->h_tid & GCFLAG_PUBLIC_TO_PRIVATE)); assert(!(P->h_tid & GCFLAG_BACKUP_COPY)); assert(!(P->h_tid & GCFLAG_STUB)); + assert(!(P->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED)); - if (P->h_revision & 1) - { - /* does not have a backup yet */ - B = stmgc_duplicate(P); - B->h_tid |= GCFLAG_BACKUP_COPY; - } - else - { - B = (gcptr)P->h_revision; - assert(B->h_tid & GCFLAG_BACKUP_COPY); - size_t size = stmcb_size(P); - memcpy(B + 1, P + 1, size - sizeof(*B)); - } - assert(B->h_tid & GCFLAG_BACKUP_COPY); + B = stmgc_duplicate(P); + B->h_tid |= GCFLAG_BACKUP_COPY; - gcptrlist_insert2(&d->public_descriptor->active_backup_copies, P, B); - P->h_revision = stm_private_rev_num; + P->h_tid |= GCFLAG_PRIVATE_FROM_PROTECTED; + P->h_revision = (revision_t)B; + + gcptrlist_insert(&d->private_from_protected, P); spinlock_release(d->public_descriptor->collection_lock); return P; @@ -333,21 +337,20 @@ static gcptr LocalizePublic(struct tx_descriptor *d, gcptr R) { assert(R->h_tid & GCFLAG_PUBLIC); - if (R->h_tid & GCFLAG_PUBLIC_TO_PRIVATE) - { - wlog_t *entry; - gcptr L; - G2L_FIND(d->public_to_private, R, entry, goto not_found); - L = entry->val; - assert(L->h_revision == stm_private_rev_num); /* private object */ - return L; - } + +#ifdef _GC_DEBUG + wlog_t *entry; + G2L_FIND(d->public_to_private, R, entry, goto not_found); + assert(!"R is already in public_to_private"); + not_found: +#endif + R->h_tid |= GCFLAG_PUBLIC_TO_PRIVATE; - not_found:; gcptr L = stmgc_duplicate(R); assert(!(L->h_tid & GCFLAG_BACKUP_COPY)); assert(!(L->h_tid & GCFLAG_STUB)); + assert(!(L->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED)); L->h_tid &= ~(GCFLAG_OLD | GCFLAG_VISITED | GCFLAG_PUBLIC | @@ -369,36 +372,31 @@ gcptr stm_WriteBarrier(gcptr P) { - gcptr W; + gcptr R, W; struct tx_descriptor *d = thread_descriptor; assert(d->active >= 1); - retry: - P = stm_read_barrier(P); + R = stm_read_barrier(P); - if (P->h_tid & GCFLAG_PUBLIC) - W = LocalizePublic(d, P); + if (R->h_revision == stm_private_rev_num || + (R->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED)) + return R; + + if (R->h_tid & GCFLAG_PUBLIC) + W = LocalizePublic(d, R); else - W = LocalizeProtected(d, P); + W = LocalizeProtected(d, R); - fprintf(stderr, "write_barrier: %p -> %p\n", P, W); + fprintf(stderr, "write_barrier: %p -> %p -> %p\n", P, R, W); return W; } -gcptr stm_get_backup_copy(long index) +gcptr stm_get_private_from_protected(long index) { - struct tx_public_descriptor *pd = thread_descriptor->public_descriptor; - if (index < gcptrlist_size(&pd->active_backup_copies)) - return pd->active_backup_copies.items[index]; - return NULL; -} - -gcptr stm_get_stolen_obj(long index) -{ - struct tx_public_descriptor *pd = thread_descriptor->public_descriptor; - if (index < gcptrlist_size(&pd->stolen_objects)) - return pd->stolen_objects.items[index]; + struct tx_descriptor *d = thread_descriptor; + if (index < gcptrlist_size(&d->private_from_protected)) + return d->private_from_protected.items[index]; return NULL; } @@ -570,9 +568,9 @@ } gcptrlist_clear(&d->list_of_read_objects); - gcptrlist_clear(&d->public_descriptor->active_backup_copies); abort(); - d->public_descriptor->stolen_objects;//XXX clean up + gcptrlist_clear(&d->private_from_protected); //XXX clean up + abort(); //stmgc_abort_transaction(d); fprintf(stderr, @@ -636,14 +634,15 @@ d->start_real_time.tv_nsec = -1; } assert(d->list_of_read_objects.size == 0); + assert(d->private_from_protected.size == 0); assert(!g2l_any_entry(&d->public_to_private)); d->count_reads = 1; fxcache_clear(&d->recent_reads_cache); #if 0 gcptrlist_clear(&d->undolog); + gcptrlist_clear(&d->abortinfo); #endif - gcptrlist_clear(&d->abortinfo); } void BeginTransaction(jmp_buf* buf) @@ -819,24 +818,41 @@ } #endif -void TurnPrivateWithBackupToProtected(struct tx_descriptor *d, - revision_t cur_time) +void CommitPrivateFromProtected(struct tx_descriptor *d, revision_t cur_time) { - struct tx_public_descriptor *pd = d->public_descriptor; - long i, size = pd->active_backup_copies.size; - gcptr *items = pd->active_backup_copies.items; + long i, size = d->private_from_protected.size; + gcptr *items = d->private_from_protected.items; - for (i = 0; i < size; i += 2) + for (i = 0; i < size; i++) { gcptr P = items[i]; - gcptr B = items[i + 1]; - assert(B->h_tid & GCFLAG_BACKUP_COPY); - assert(!(B->h_tid & GCFLAG_PUBLIC)); - assert(P->h_revision == stm_private_rev_num); - B->h_revision = cur_time; - P->h_revision = (revision_t)B; + + assert(!(P->h_revision & 1)); // "is a pointer" + gcptr B = (gcptr)P->h_revision; + + assert(P->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED); + P->h_tid &= ~GCFLAG_PRIVATE_FROM_PROTECTED; + P->h_revision = cur_time; + + if (B->h_tid & GCFLAG_PUBLIC) + { + /* B was stolen */ + while (1) + { + revision_t v = ACCESS_ONCE(B->h_revision); + if (!(v & 1)) // "is a pointer", i.e. "was modified" + AbortTransaction(ABRT_STOLEN_MODIFIED); + + if (bool_cas(&B->h_revision, v, (revision_t)P)) + break; + } + } + else + { + //stm_free(B); + } }; - gcptrlist_clear(&pd->active_backup_copies); + gcptrlist_clear(&d->private_from_protected); } void CommitTransaction(void) @@ -846,9 +862,6 @@ assert(d->active >= 1); spinlock_acquire(d->public_descriptor->collection_lock, 'C'); /*committing*/ - if (d->public_descriptor->stolen_objects.size) - stm_normalize_stolen_objects(d->public_descriptor); - AcquireLocks(d); if (is_inevitable(d)) @@ -885,6 +898,8 @@ if (!ValidateDuringTransaction(d, 1)) AbortTransaction(ABRT_VALIDATE_COMMIT); } + CommitPrivateFromProtected(d, cur_time); + /* we cannot abort any more from here */ d->setjmp_buf = NULL; gcptrlist_clear(&d->list_of_read_objects); @@ -895,8 +910,6 @@ "*************************************\n", (long)cur_time); - TurnPrivateWithBackupToProtected(d, cur_time); - revision_t localrev = stm_private_rev_num; //UpdateProtectedChainHeads(d, cur_time, localrev); //smp_wmb(); @@ -1159,12 +1172,12 @@ thread_descriptor = NULL; g2l_delete(&d->public_to_private); - assert(d->public_descriptor->active_backup_copies.size == 0); - gcptrlist_delete(&d->public_descriptor->active_backup_copies); + assert(d->private_from_protected.size == 0); + gcptrlist_delete(&d->private_from_protected); gcptrlist_delete(&d->list_of_read_objects); +#if 0 gcptrlist_delete(&d->abortinfo); free(d->longest_abort_info); -#if 0 gcptrlist_delete(&d->undolog); #endif diff --git a/c4/et.h b/c4/et.h --- a/c4/et.h +++ b/c4/et.h @@ -62,6 +62,7 @@ #define GCFLAG_NURSERY_MOVED (STM_FIRST_GCFLAG << 6) #define GCFLAG_BACKUP_COPY (STM_FIRST_GCFLAG << 7) /* debugging */ #define GCFLAG_STUB (STM_FIRST_GCFLAG << 8) /* debugging */ +#define GCFLAG_PRIVATE_FROM_PROTECTED (STM_FIRST_GCFLAG << 9) /* this value must be reflected in PREBUILT_FLAGS in stmgc.h */ #define GCFLAG_PREBUILT (GCFLAG_VISITED | \ @@ -73,24 +74,25 @@ "VISITED", \ "PUBLIC", \ "PREBUILT_ORIGINAL", \ - "BACKUP_COPY", \ "PUBLIC_TO_PRIVATE", \ "WRITE_BARRIER", \ "NURSERY_MOVED", \ - "STOLEN", \ + "BACKUP_COPY", \ "STUB", \ + "PRIVATE_FROM_PROTECTED", \ NULL } /************************************************************/ #define ABRT_MANUAL 0 #define ABRT_COMMIT 1 -#define ABRT_VALIDATE_INFLIGHT 2 -#define ABRT_VALIDATE_COMMIT 3 -#define ABRT_VALIDATE_INEV 4 -#define ABRT_COLLECT_MINOR 5 -#define ABRT_COLLECT_MAJOR 6 -#define ABORT_REASONS 7 +#define ABRT_STOLEN_MODIFIED 2 +#define ABRT_VALIDATE_INFLIGHT 3 +#define ABRT_VALIDATE_COMMIT 4 +#define ABRT_VALIDATE_INEV 5 +#define ABRT_COLLECT_MINOR 6 +#define ABRT_COLLECT_MAJOR 7 +#define ABORT_REASONS 8 #define SPLP_ABORT 0 #define SPLP_LOCKED_INFLIGHT 1 @@ -105,8 +107,6 @@ revision_t collection_lock; struct stub_block_s *stub_blocks; gcptr stub_free_list; - struct GcPtrList active_backup_copies; /* (P,B) where P=private, B=backup */ - struct GcPtrList stolen_objects; /* (P,Q) where P=priv/prot, Q=public */ revision_t *private_revision_ref; revision_t free_list_next; /* xxx gcpage data here */ @@ -135,7 +135,8 @@ unsigned int num_aborts[ABORT_REASONS]; unsigned int num_spinloops[SPINLOOP_REASONS]; struct GcPtrList list_of_read_objects; - struct GcPtrList abortinfo; + //struct GcPtrList abortinfo; + struct GcPtrList private_from_protected; struct G2L public_to_private; char *longest_abort_info; long long longest_abort_info_time; @@ -162,8 +163,7 @@ gcptr stm_RepeatReadBarrier(gcptr); gcptr stm_WriteBarrier(gcptr); gcptr _stm_nonrecord_barrier(gcptr, int *); -gcptr stm_get_backup_copy(long); /* debugging */ -gcptr stm_get_stolen_obj(long); /* debugging */ +gcptr stm_get_private_from_protected(long); /* debugging */ gcptr stm_get_read_obj(long); /* debugging */ gcptr stmgc_duplicate(gcptr); diff --git a/c4/steal.c b/c4/steal.c --- a/c4/steal.c +++ b/c4/steal.c @@ -54,54 +54,21 @@ goto done; /* un-stubbed while we waited for the lock */ gcptr L = (gcptr)(v - 2); - revision_t w = L->h_revision; - if (w == *foreign_pd->private_revision_ref) { - /* The stub points to a private object L. Because it cannot point - to "really private" objects, it must mean that L used to be - a protected object, and it has an attached backed copy. - XXX find a way to optimize this search, maybe */ - long i, size = foreign_pd->active_backup_copies.size; - gcptr *items = foreign_pd->active_backup_copies.items; - for (i = size - 2; ; i -= 2) { - assert(i >= 0); - if (items[i] == L) - break; - } - L = items[i + 1]; - assert(L->h_tid & GCFLAG_BACKUP_COPY); + if (L->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED) { + L = (gcptr)L->h_revision; /* the backup copy */ L->h_tid &= ~GCFLAG_BACKUP_COPY; } - else if (L->h_tid & GCFLAG_PUBLIC) { - /* The stub already points to a public object */ - goto unstub; - } - else if (!(w & 1)) { - /* The stub points to a protected object L which has a backup - copy attached. Forget the backup copy. */ - w = ((gcptr)w)->h_revision; - assert(w & 1); - L->h_revision = w; - } - /* turn L into a public object */ + + /* change L from protected to public */ L->h_tid |= GCFLAG_PUBLIC; - unstub: + smp_wmb(); /* the following update must occur "after" the flag + GCFLAG_PUBLIC was added, for other threads */ + + /* update the original P->h_revision to point directly to L */ P->h_revision = (revision_t)L; done: spinlock_release(foreign_pd->collection_lock); } - -void stm_normalize_stolen_objects(struct tx_public_descriptor *pd) -{ - long i, size = pd->stolen_objects.size; - gcptr *items = pd->stolen_objects.items; - for (i = 0; i < size; i += 2) { - gcptr L = items[i]; - gcptr Q = items[i + 1]; - if (L->h_revision == stm_private_rev_num) { - - } - } -} diff --git a/c4/stmsync.c b/c4/stmsync.c --- a/c4/stmsync.c +++ b/c4/stmsync.c @@ -87,8 +87,8 @@ gcptr stm_write_barrier(gcptr obj) { /* XXX inline in the caller */ - if (UNLIKELY(((obj->h_tid & GCFLAG_WRITE_BARRIER) != 0) | - (obj->h_revision != stm_private_rev_num))) + if (UNLIKELY((obj->h_revision != stm_private_rev_num) & + ((obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED) == 0))) obj = stm_WriteBarrier(obj); return obj; } diff --git a/c4/test/support.py b/c4/test/support.py --- a/c4/test/support.py +++ b/c4/test/support.py @@ -68,8 +68,7 @@ void stm_start_sharedlock(void); void stm_stop_sharedlock(void); void AbortTransaction(int); - gcptr stm_get_backup_copy(long index); - gcptr stm_get_stolen_obj(long index); + gcptr stm_get_private_from_protected(long index); gcptr stm_get_read_obj(long index); void *STUB_THREAD(gcptr); @@ -105,6 +104,7 @@ #define GCFLAG_WRITE_BARRIER ... #define GCFLAG_NURSERY_MOVED ... #define GCFLAG_STUB ... + #define GCFLAG_PRIVATE_FROM_PROTECTED ... #define ABRT_MANUAL ... //typedef struct { ...; } page_header_t; ''') @@ -524,7 +524,8 @@ lib.AbortTransaction(lib.ABRT_MANUAL) def classify(p): - private = p.h_revision == lib.get_private_rev_num() + private = (p.h_revision == lib.get_private_rev_num() or + (p.h_tid & GCFLAG_PRIVATE_FROM_PROTECTED) != 0) public = (p.h_tid & GCFLAG_PUBLIC) != 0 backup = (p.h_tid & GCFLAG_BACKUP_COPY) != 0 stub = (p.h_tid & GCFLAG_STUB) != 0 @@ -541,34 +542,26 @@ else: return "protected" -def list_of_read_objects(): +def _get_full_list(getter): result = [] index = 0 while 1: - p = lib.stm_get_read_obj(index) + p = getter(index) if p == ffi.NULL: break result.append(p) index += 1 return result -def _list2dict(getter): - result = {} - index = 0 - while 1: - p = getter(index) - if p == ffi.NULL: - break - q = getter(index + 1) - assert q != ffi.NULL - result[p] = q - index += 2 - return result +def list_of_read_objects(): + return _get_full_list(lib.stm_get_read_obj) -def backup_copies(): - return _list2dict(lib.stm_get_backup_copy) - -def stolen_objs(): - return _list2dict(lib.stm_get_stolen_obj) +def list_of_private_from_protected(): + return _get_full_list(lib.stm_get_private_from_protected) stub_thread = lib.STUB_THREAD + +def follow_revision(p): + r = p.h_revision + assert (r % 4) == 0 + return ffi.cast("gcptr", r) diff --git a/c4/test/test_et.py b/c4/test/test_et.py --- a/c4/test/test_et.py +++ b/c4/test/test_et.py @@ -49,8 +49,10 @@ assert classify(p) == "protected" p2 = lib.stm_write_barrier(p) assert p2 == p # does not move - assert p.h_revision == r2 assert classify(p) == "private" + pback = follow_revision(p) + assert classify(pback) == "backup" + assert list_of_private_from_protected() == [p] def test_get_backup_copy(): p = nalloc(HDR + WORD) @@ -58,52 +60,19 @@ lib.stm_commit_transaction() lib.stm_begin_inevitable_transaction() org_r = p.h_revision + assert classify(p) == "protected" lib.setlong(p, 0, 927122) - assert p.h_revision == lib.get_private_rev_num() - pback = backup_copies()[p] + assert classify(p) == "private" + pback = follow_revision(p) assert pback and pback != p assert pback.h_revision == org_r - assert pback.h_tid == p.h_tid | GCFLAG_BACKUP_COPY + assert pback.h_tid == ((p.h_tid & ~GCFLAG_PRIVATE_FROM_PROTECTED) | + GCFLAG_BACKUP_COPY) assert lib.rawgetlong(pback, 0) == 78927812 assert lib.rawgetlong(p, 0) == 927122 assert classify(p) == "private" assert classify(pback) == "backup" -def test_protected_with_backup(): - p = nalloc(HDR + WORD) - lib.setlong(p, 0, 78927812) - lib.stm_commit_transaction() - lib.stm_begin_inevitable_transaction() - lib.setlong(p, 0, 927122) - pback = backup_copies()[p] - assert pback != p - assert p.h_revision == lib.get_private_rev_num() - lib.stm_commit_transaction() - lib.stm_begin_inevitable_transaction() - assert classify(p) == "protected" - assert classify(pback) == "backup" - assert ffi.cast("gcptr", p.h_revision) == pback - -def test_protected_backup_reused(): - p = nalloc(HDR + WORD) - lib.setlong(p, 0, 78927812) - lib.stm_commit_transaction() - lib.stm_begin_inevitable_transaction() - lib.setlong(p, 0, 927122) - pback = backup_copies()[p] - assert pback != p - lib.stm_commit_transaction() - lib.stm_begin_inevitable_transaction() - assert classify(p) == "protected" - assert classify(pback) == "backup" - assert lib.rawgetlong(p, 0) == 927122 - assert lib.rawgetlong(pback, 0) == 78927812 # but should not be used - lib.setlong(p, 0, 43891) - assert p.h_revision == lib.get_private_rev_num() - assert pback == backup_copies()[p] - assert lib.rawgetlong(p, 0) == 43891 - assert lib.rawgetlong(pback, 0) == 927122 - def test_prebuilt_is_public(): p = palloc(HDR) assert p.h_revision == 1 @@ -240,15 +209,13 @@ assert classify(p) == "public" assert classify(p1) == "protected" plist.append(p1) # now p's most recent revision is protected - assert classify(ffi.cast("gcptr", p.h_revision)) == "stub" + assert classify(follow_revision(p)) == "stub" + assert p1.h_revision & 1 r.set(2) r.wait(3) - d = stolen_objs() - assert len(d) == 1 - assert d.keys() == [p1] - [p2] = d.values() - assert lib.stm_read_barrier(p) == p2 - assert lib.stm_read_barrier(p1) == p2 + assert classify(p1) == "public" + assert lib.stm_read_barrier(p) == p1 + assert lib.stm_read_barrier(p1) == p1 def f2(r): r.wait(2) p2 = lib.stm_read_barrier(p) # steals @@ -256,8 +223,61 @@ assert p2 == lib.stm_read_barrier(p) # short-circuit h_revision assert p.h_revision == int(ffi.cast("revision_t", p2)) assert p2 == lib.stm_read_barrier(p) - assert p2 not in plist + assert p2 == plist[-1] assert classify(p2) == "public" - plist.append(p2) r.set(3) run_parallel(f1, f2) + +def test_stealing_while_modifying(): + py.test.skip("in-progress") + p = palloc(HDR + WORD) + + def f1(r): + p1 = lib.stm_write_barrier(p) # private copy + assert classify(p) == "public" + assert classify(p1) == "private" + lib.rawsetlong(p1, 0, 2782172) + + def cb(c): + assert c == 0 + assert classify(p) == "public" + assert classify(p1) == "protected" + assert classify(follow_revision(p)) == "stub" + p2 = lib.stm_write_barrier(p) + assert p2 == p1 + lib.rawsetlong(p2, 0, -451112) + pback = follow_revision(p1) + assert classify(p1) == "private" + assert classify(pback) == "backup" + assert lib.stm_read_barrier(p) == p1 + assert lib.stm_read_barrier(p1) == p1 + assert pback.h_revision & 1 + r.wait_while_in_parallel() + assert classify(p1) == "private" + assert classify(pback) == "public" + assert pback.h_tid & GCFLAG_PUBLIC_TO_PRIVATE + assert lib.stm_read_barrier(p) == p1 + assert lib.stm_read_barrier(p1) == p1 + assert pback.h_revision & 1 + perform_transaction(cb) + + lib.stm_commit_transaction() + lib.stm_begin_inevitable_transaction() + assert classify(p1) == "protected" + assert classify(pback) == "public" + assert classify(follow_revision(pback)) == "stub" + assert follow_revision(pback).h_revision == ( + ffi.cast("revision_t", p1) | 2) + + def f2(r): + def cb(c): + assert c == 0 + r.enter_in_parallel() + p2 = lib.stm_read_barrier(p) # steals + assert lib.rawgetlong(p2, 0) == 2782172 + assert p2 == lib.stm_read_barrier(p) + assert classify(p2) == "public" + r.leave_in_parallel() + perform_transaction(cb) + + run_parallel(f1, f2) From noreply at buildbot.pypy.org Sun Jun 9 22:23:21 2013 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 9 Jun 2013 22:23:21 +0200 (CEST) Subject: [pypy-commit] stmgc default: progress Message-ID: <20130609202321.903931C1001@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r88:38cc005153e5 Date: 2013-06-09 22:23 +0200 http://bitbucket.org/pypy/stmgc/changeset/38cc005153e5/ Log: progress diff --git a/c4/et.c b/c4/et.c --- a/c4/et.c +++ b/c4/et.c @@ -30,6 +30,11 @@ { return thread_descriptor; } +static int is_private(gcptr P) +{ + return (P->h_revision == stm_private_rev_num) || + (P->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED); +} /************************************************************/ @@ -119,6 +124,7 @@ if (P->h_tid & GCFLAG_PUBLIC_TO_PRIVATE) { wlog_t *item; + retry_public_to_private:; G2L_FIND(d->public_to_private, P, item, goto no_private_obj); P = item->val; @@ -126,8 +132,14 @@ assert(P->h_revision == stm_private_rev_num); fprintf(stderr, "read_barrier: %p -> %p public_to_private\n", G, P); return P; + + no_private_obj: + if (d->public_descriptor->stolen_objects.size > 0) + { + stm_normalize_stolen_objects(d); + goto retry_public_to_private; + } } - no_private_obj: if (UNLIKELY(v > d->start_time)) // object too recent? { @@ -188,7 +200,7 @@ else { /* stealing */ - fprintf(stderr, "read_barrier: %p -> stealing %p...", G, P); + fprintf(stderr, "read_barrier: %p -> stealing %p...\n ", G, P); stm_steal_stub(P); goto retry; } @@ -378,8 +390,7 @@ R = stm_read_barrier(P); - if (R->h_revision == stm_private_rev_num || - (R->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED)) + if (is_private(R)) return R; if (R->h_tid & GCFLAG_PUBLIC) @@ -419,11 +430,14 @@ static _Bool ValidateDuringTransaction(struct tx_descriptor *d, _Bool during_commit) { - abort(); -#if 0 long i, size = d->list_of_read_objects.size; gcptr *items = d->list_of_read_objects.items; + if (size == 0) + return 1; + abort(); + +#if 0 for (i=0; ilist_of_read_objects.size == 0); assert(d->private_from_protected.size == 0); assert(!g2l_any_entry(&d->public_to_private)); + assert(d->public_descriptor->stolen_objects.size == 0); d->count_reads = 1; fxcache_clear(&d->recent_reads_cache); @@ -668,7 +683,7 @@ gcptr R = item->addr; revision_t v; retry: - assert(R->h_tid & GCFLAG_OLD); + assert(R->h_tid & GCFLAG_PUBLIC); v = ACCESS_ONCE(R->h_revision); if (!(v & 1)) // "is a pointer", i.e. { // "has a more recent revision" @@ -861,6 +876,9 @@ struct tx_descriptor *d = thread_descriptor; assert(d->active >= 1); + if (d->public_descriptor->stolen_objects.size != 0) + stm_normalize_stolen_objects(d); + spinlock_acquire(d->public_descriptor->collection_lock, 'C'); /*committing*/ AcquireLocks(d); @@ -918,7 +936,7 @@ assert(newrev & 1); ACCESS_ONCE(stm_private_rev_num) = newrev; fprintf(stderr, "%p: stm_local_revision = %ld\n", d, (long)newrev); - assert(d->public_descriptor->private_revision_ref = &stm_private_rev_num); + assert(d->private_revision_ref = &stm_private_rev_num); UpdateChainHeads(d, cur_time, localrev); @@ -1136,8 +1154,9 @@ assert(d->my_lock & 1); assert(d->my_lock >= LOCKED); stm_private_rev_num = -1; - pd->private_revision_ref = &stm_private_rev_num; + d->private_revision_ref = &stm_private_rev_num; d->max_aborts = -1; + pd->descriptor = d; thread_descriptor = d; fprintf(stderr, "[%lx] pthread %lx starting\n", @@ -1152,15 +1171,12 @@ void DescriptorDone(void) { - static revision_t no_private_revision = 8; revision_t i; struct tx_descriptor *d = thread_descriptor; assert(d != NULL); assert(d->active == 0); - - spinlock_acquire(d->public_descriptor->collection_lock, 'D'); /*done*/ - d->public_descriptor->private_revision_ref = &no_private_revision; - spinlock_release(d->public_descriptor->collection_lock); + assert(d->public_descriptor->stolen_objects.size == 0); + gcptrlist_delete(&d->public_descriptor->stolen_objects); spinlock_acquire(descriptor_array_lock, 1); i = d->public_descriptor_index; diff --git a/c4/et.h b/c4/et.h --- a/c4/et.h +++ b/c4/et.h @@ -105,9 +105,10 @@ * thread shuts down. It is reused the next time a thread starts. */ struct tx_public_descriptor { revision_t collection_lock; + struct tx_descriptor *descriptor; struct stub_block_s *stub_blocks; gcptr stub_free_list; - revision_t *private_revision_ref; + struct GcPtrList stolen_objects; revision_t free_list_next; /* xxx gcpage data here */ }; @@ -140,6 +141,7 @@ struct G2L public_to_private; char *longest_abort_info; long long longest_abort_info_time; + revision_t *private_revision_ref; struct FXCache recent_reads_cache; }; diff --git a/c4/steal.c b/c4/steal.c --- a/c4/steal.c +++ b/c4/steal.c @@ -56,8 +56,16 @@ gcptr L = (gcptr)(v - 2); if (L->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED) { - L = (gcptr)L->h_revision; /* the backup copy */ - L->h_tid &= ~GCFLAG_BACKUP_COPY; + gcptr B = (gcptr)L->h_revision; /* the backup copy */ + B->h_tid &= ~GCFLAG_BACKUP_COPY; + L->h_revision = *foreign_pd->descriptor->private_revision_ref; + + /* add {B: L} in 'public_to_private', but lazily, because we don't + want to walk over the feet of the foreign thread */ + B->h_tid |= GCFLAG_PUBLIC_TO_PRIVATE; + gcptrlist_insert2(&foreign_pd->stolen_objects, B, L); + + L = B; } /* change L from protected to public */ @@ -72,3 +80,20 @@ done: spinlock_release(foreign_pd->collection_lock); } + +void stm_normalize_stolen_objects(struct tx_descriptor *d) +{ + spinlock_acquire(d->public_descriptor->collection_lock, 'N'); + + long i, size = d->public_descriptor->stolen_objects.size; + gcptr *items = d->public_descriptor->stolen_objects.items; + + for (i = 0; i < size; i += 2) { + gcptr B = items[i]; + gcptr L = items[i + 1]; + g2l_insert(&d->public_to_private, B, L); + } + gcptrlist_clear(&d->public_descriptor->stolen_objects); + + spinlock_release(d->public_descriptor->collection_lock); +} diff --git a/c4/steal.h b/c4/steal.h --- a/c4/steal.h +++ b/c4/steal.h @@ -10,7 +10,7 @@ gcptr stm_stub_malloc(struct tx_public_descriptor *); void stm_steal_stub(gcptr); gcptr stm_get_stolen_obj(long index); /* debugging */ -void stm_normalize_stolen_objects(struct tx_public_descriptor *); +void stm_normalize_stolen_objects(struct tx_descriptor *); #endif diff --git a/c4/test/test_et.py b/c4/test/test_et.py --- a/c4/test/test_et.py +++ b/c4/test/test_et.py @@ -229,7 +229,6 @@ run_parallel(f1, f2) def test_stealing_while_modifying(): - py.test.skip("in-progress") p = palloc(HDR + WORD) def f1(r): @@ -258,6 +257,7 @@ assert pback.h_tid & GCFLAG_PUBLIC_TO_PRIVATE assert lib.stm_read_barrier(p) == p1 assert lib.stm_read_barrier(p1) == p1 + assert lib.stm_read_barrier(pback) == p1 assert pback.h_revision & 1 perform_transaction(cb) @@ -273,6 +273,8 @@ def cb(c): assert c == 0 r.enter_in_parallel() + lib.stm_commit_transaction() + lib.stm_begin_inevitable_transaction() p2 = lib.stm_read_barrier(p) # steals assert lib.rawgetlong(p2, 0) == 2782172 assert p2 == lib.stm_read_barrier(p) From noreply at buildbot.pypy.org Mon Jun 10 09:26:25 2013 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 10 Jun 2013 09:26:25 +0200 (CEST) Subject: [pypy-commit] stmgc default: Getting somewhere Message-ID: <20130610072625.98AF01C10DD@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r89:b63ed074b565 Date: 2013-06-10 09:26 +0200 http://bitbucket.org/pypy/stmgc/changeset/b63ed074b565/ Log: Getting somewhere diff --git a/c4/et.c b/c4/et.c --- a/c4/et.c +++ b/c4/et.c @@ -129,14 +129,16 @@ P = item->val; assert(!(P->h_tid & GCFLAG_PUBLIC)); - assert(P->h_revision == stm_private_rev_num); + assert(is_private(P)); fprintf(stderr, "read_barrier: %p -> %p public_to_private\n", G, P); return P; no_private_obj: if (d->public_descriptor->stolen_objects.size > 0) { + spinlock_acquire(d->public_descriptor->collection_lock, 'N'); stm_normalize_stolen_objects(d); + spinlock_release(d->public_descriptor->collection_lock); goto retry_public_to_private; } } @@ -433,11 +435,6 @@ long i, size = d->list_of_read_objects.size; gcptr *items = d->list_of_read_objects.items; - if (size == 0) - return 1; - abort(); - -#if 0 for (i=0; ih_revision); if (!(v & 1)) // "is a pointer", i.e. { // "has a more recent revision" - /* ... unless it's a protected-to-private link */ - if (((gcptr)v)->h_revision == stm_local_revision) - continue; - /* ... or unless it is a GCFLAG_STOLEN object */ - if (R->h_tid & GCFLAG_STOLEN) - { - assert(is_young(R)); - assert(!is_young((gcptr)v)); - R = (gcptr)v; - goto retry; - } - return 0; // really has a more recent revision + return 0; } if (v >= LOCKED) // locked { @@ -475,7 +461,6 @@ } } return 1; -#endif } static void ValidateNow(struct tx_descriptor *d) @@ -703,8 +688,11 @@ goto retry; gcptr L = item->val; - assert(L->h_revision == stm_private_rev_num); + assert(L->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED ? + L->h_revision == (revision_t)R : + L->h_revision == stm_private_rev_num); assert(v != stm_private_rev_num); + assert(v & 1); L->h_revision = v; /* store temporarily this value here */ } G2L_LOOP_END; @@ -712,8 +700,6 @@ static void CancelLocks(struct tx_descriptor *d) { - abort(); -#if 0 revision_t my_lock = d->my_lock; wlog_t *item; @@ -724,13 +710,20 @@ { gcptr R = item->addr; gcptr L = item->val; - revision_t v = L->h_revision; - if (v == stm_local_revision) + revision_t expected, v = L->h_revision; + + if (L->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED) + expected = (revision_t)R; + else + expected = stm_private_rev_num; + + if (v == expected) { assert(R->h_revision != my_lock); break; /* done */ } - L->h_revision = stm_local_revision; + + L->h_revision = expected; #ifdef DUMP_EXTRA fprintf(stderr, "%p->h_revision = %p (CancelLocks)\n", R, (gcptr)v); @@ -739,7 +732,6 @@ ACCESS_ONCE(R->h_revision) = v; } G2L_LOOP_END; -#endif } //static pthread_mutex_t mutex_prebuilt_gcroots = PTHREAD_MUTEX_INITIALIZER; @@ -812,27 +804,6 @@ g2l_clear(&d->public_to_private); } -#if 0 -void UpdateProtectedChainHeads(struct tx_descriptor *d, revision_t cur_time, - revision_t localrev) -{ - revision_t new_revision = cur_time + 1; // make an odd number - assert(new_revision & 1); - - long i, size = d->protected_with_private_copy.size; - gcptr *items = d->protected_with_private_copy.items; - for (i = 0; i < size; i++) - { - gcptr R = items[i]; - if (R->h_tid & GCFLAG_STOLEN) /* ignore stolen objects */ - continue; - gcptr L = (gcptr)R->h_revision; - assert(L->h_revision == localrev); - L->h_revision = new_revision; - } -} -#endif - void CommitPrivateFromProtected(struct tx_descriptor *d, revision_t cur_time) { long i, size = d->private_from_protected.size; @@ -841,12 +812,19 @@ for (i = 0; i < size; i++) { gcptr P = items[i]; - - assert(!(P->h_revision & 1)); // "is a pointer" - gcptr B = (gcptr)P->h_revision; - assert(P->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED); P->h_tid &= ~GCFLAG_PRIVATE_FROM_PROTECTED; + + if (P->h_revision & 1) // "is not a pointer" + { + /* This case occurs when a GCFLAG_PRIVATE_FROM_PROTECTED object + is stolen: it ends up as a value in 'public_to_private'. + Its h_revision is then mangled by AcquireLocks(). */ + assert(P->h_revision != stm_private_rev_num); + continue; + } + + gcptr B = (gcptr)P->h_revision; P->h_revision = cur_time; if (B->h_tid & GCFLAG_PUBLIC) @@ -876,10 +854,10 @@ struct tx_descriptor *d = thread_descriptor; assert(d->active >= 1); + spinlock_acquire(d->public_descriptor->collection_lock, 'C'); /*committing*/ if (d->public_descriptor->stolen_objects.size != 0) stm_normalize_stolen_objects(d); - spinlock_acquire(d->public_descriptor->collection_lock, 'C'); /*committing*/ AcquireLocks(d); if (is_inevitable(d)) @@ -1219,7 +1197,7 @@ d->num_spinloops[i]); p += sprintf(p, "]\n"); - fwrite(line, 1, p - line, stderr); + fprintf(stderr, "%s", line); stm_free(d, sizeof(struct tx_descriptor)); } diff --git a/c4/steal.c b/c4/steal.c --- a/c4/steal.c +++ b/c4/steal.c @@ -58,13 +58,14 @@ if (L->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED) { gcptr B = (gcptr)L->h_revision; /* the backup copy */ B->h_tid &= ~GCFLAG_BACKUP_COPY; - L->h_revision = *foreign_pd->descriptor->private_revision_ref; /* add {B: L} in 'public_to_private', but lazily, because we don't want to walk over the feet of the foreign thread */ B->h_tid |= GCFLAG_PUBLIC_TO_PRIVATE; gcptrlist_insert2(&foreign_pd->stolen_objects, B, L); + fprintf(stderr, "stolen: %p -> %p - - -> %p\n", P, B, L); + L = B; } @@ -83,17 +84,15 @@ void stm_normalize_stolen_objects(struct tx_descriptor *d) { - spinlock_acquire(d->public_descriptor->collection_lock, 'N'); - long i, size = d->public_descriptor->stolen_objects.size; gcptr *items = d->public_descriptor->stolen_objects.items; for (i = 0; i < size; i += 2) { gcptr B = items[i]; gcptr L = items[i + 1]; + + assert(L->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED); g2l_insert(&d->public_to_private, B, L); } gcptrlist_clear(&d->public_descriptor->stolen_objects); - - spinlock_release(d->public_descriptor->collection_lock); } diff --git a/c4/test/test_et.py b/c4/test/test_et.py --- a/c4/test/test_et.py +++ b/c4/test/test_et.py @@ -236,6 +236,7 @@ assert classify(p) == "public" assert classify(p1) == "private" lib.rawsetlong(p1, 0, 2782172) + pback_ = [] def cb(c): assert c == 0 @@ -246,6 +247,7 @@ assert p2 == p1 lib.rawsetlong(p2, 0, -451112) pback = follow_revision(p1) + pback_.append(pback) assert classify(p1) == "private" assert classify(pback) == "backup" assert lib.stm_read_barrier(p) == p1 @@ -263,11 +265,12 @@ lib.stm_commit_transaction() lib.stm_begin_inevitable_transaction() + [pback] = pback_ assert classify(p1) == "protected" assert classify(pback) == "public" assert classify(follow_revision(pback)) == "stub" assert follow_revision(pback).h_revision == ( - ffi.cast("revision_t", p1) | 2) + int(ffi.cast("revision_t", p1)) | 2) def f2(r): def cb(c): From noreply at buildbot.pypy.org Mon Jun 10 09:49:38 2013 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 10 Jun 2013 09:49:38 +0200 (CEST) Subject: [pypy-commit] stmgc default: Aborting, including aborting GCFLAG_PRIVATE_FROM_PROTECTED Message-ID: <20130610074938.3B11B1C10DD@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r90:50f4a1c6ad65 Date: 2013-06-10 09:49 +0200 http://bitbucket.org/pypy/stmgc/changeset/50f4a1c6ad65/ Log: Aborting, including aborting GCFLAG_PRIVATE_FROM_PROTECTED diff --git a/c4/et.c b/c4/et.c --- a/c4/et.c +++ b/c4/et.c @@ -494,6 +494,10 @@ struct timespec now; long long elapsed_time; + /* acquire the lock, but don't double-acquire it if already committing */ + if (d->public_descriptor->collection_lock != 'C') + spinlock_acquire(d->public_descriptor->collection_lock, 'C'); + assert(d->active != 0); assert(!is_inevitable(d)); assert(num < ABORT_REASONS); @@ -566,11 +570,13 @@ d->reads_size_limit_nonatomic = limit; } + AbortPrivateFromProtected(d); gcptrlist_clear(&d->list_of_read_objects); - abort(); - gcptrlist_clear(&d->private_from_protected); //XXX clean up - abort(); - //stmgc_abort_transaction(d); + g2l_clear(&d->public_to_private); + gcptrlist_clear(&d->public_descriptor->stolen_objects); + + /* release the lock */ + spinlock_release(d->public_descriptor->collection_lock); fprintf(stderr, "\n" @@ -848,6 +854,35 @@ gcptrlist_clear(&d->private_from_protected); } +void AbortPrivateFromProtected(struct tx_descriptor *d) +{ + long i, size = d->private_from_protected.size; + gcptr *items = d->private_from_protected.items; + + for (i = 0; i < size; i++) + { + gcptr P = items[i]; + assert(P->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED); + assert(!(P->h_revision & 1)); // "is a pointer" + + gcptr B = (gcptr)P->h_revision; + if (B->h_tid & GCFLAG_PUBLIC) + { + assert(!(B->h_tid & GCFLAG_BACKUP_COPY)); + P->h_tid &= ~GCFLAG_PRIVATE_FROM_PROTECTED; + P->h_tid |= GCFLAG_PUBLIC; + /* P becomes a public outdated object */ + } + else + { + assert(B->h_tid & GCFLAG_BACKUP_COPY); + memcpy(P, B, stmcb_size(P)); + P->h_tid &= ~GCFLAG_BACKUP_COPY; + } + }; + gcptrlist_clear(&d->private_from_protected); +} + void CommitTransaction(void) { /* must save roots around this call */ revision_t cur_time; diff --git a/c4/test/test_et.py b/c4/test/test_et.py --- a/c4/test/test_et.py +++ b/c4/test/test_et.py @@ -228,7 +228,7 @@ r.set(3) run_parallel(f1, f2) -def test_stealing_while_modifying(): +def test_stealing_while_modifying(aborting=False): p = palloc(HDR + WORD) def f1(r): @@ -239,7 +239,15 @@ pback_ = [] def cb(c): - assert c == 0 + if c != 0: + assert aborting + [pback] = pback_ + assert classify(p) == "public" + assert classify(p1) == "public" + assert classify(pback) == "public" + assert lib.stm_read_barrier(p) == pback + assert lib.stm_read_barrier(p1) == pback + return assert classify(p) == "public" assert classify(p1) == "protected" assert classify(follow_revision(p)) == "stub" @@ -261,16 +269,24 @@ assert lib.stm_read_barrier(p1) == p1 assert lib.stm_read_barrier(pback) == p1 assert pback.h_revision & 1 + if aborting: + abort_and_retry() perform_transaction(cb) lib.stm_commit_transaction() lib.stm_begin_inevitable_transaction() [pback] = pback_ - assert classify(p1) == "protected" - assert classify(pback) == "public" - assert classify(follow_revision(pback)) == "stub" - assert follow_revision(pback).h_revision == ( - int(ffi.cast("revision_t", p1)) | 2) + if aborting: + assert classify(p1) == "public" + assert classify(pback) == "public" + assert pback.h_revision & 1 + assert p1.h_revision == int(ffi.cast("revision_t", pback)) + else: + assert classify(p1) == "protected" + assert classify(pback) == "public" + assert classify(follow_revision(pback)) == "stub" + assert follow_revision(pback).h_revision == ( + int(ffi.cast("revision_t", p1)) | 2) def f2(r): def cb(c): @@ -286,3 +302,22 @@ perform_transaction(cb) run_parallel(f1, f2) + +def test_abort_private_from_protected(): + p = nalloc(HDR + WORD) + lib.setlong(p, 0, 897987) + lib.stm_commit_transaction() + lib.stm_begin_inevitable_transaction() + # + def cb(c): + assert classify(p) == "protected" + assert lib.getlong(p, 0) == 897987 + if c == 0: + lib.setlong(p, 0, -38383) + assert lib.getlong(p, 0) == -38383 + assert classify(p) == "private" + abort_and_retry() + perform_transaction(cb) + +def test_abort_stealing_while_modifying(): + test_stealing_while_modifying(aborting=True) From noreply at buildbot.pypy.org Mon Jun 10 09:54:36 2013 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 10 Jun 2013 09:54:36 +0200 (CEST) Subject: [pypy-commit] stmgc default: Missing forward declaration Message-ID: <20130610075436.848681C10DD@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r91:fc5246585adc Date: 2013-06-10 09:54 +0200 http://bitbucket.org/pypy/stmgc/changeset/fc5246585adc/ Log: Missing forward declaration diff --git a/c4/et.c b/c4/et.c --- a/c4/et.c +++ b/c4/et.c @@ -487,6 +487,8 @@ int abort_reason, char *output); #endif +void AbortPrivateFromProtected(struct tx_descriptor *d); + void AbortTransaction(int num) { struct tx_descriptor *d = thread_descriptor; From noreply at buildbot.pypy.org Mon Jun 10 19:00:38 2013 From: noreply at buildbot.pypy.org (lwassermann) Date: Mon, 10 Jun 2013 19:00:38 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk default: added some more primitives, some of the are stubbed Message-ID: <20130610170038.0A6331C1068@cobra.cs.uni-duesseldorf.de> Author: Lars Wassermann Branch: Changeset: r436:7917efb6170d Date: 2013-06-03 13:17 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/7917efb6170d/ Log: added some more primitives, some of the are stubbed diff --git a/spyvm/interpreter_proxy.py b/spyvm/interpreter_proxy.py --- a/spyvm/interpreter_proxy.py +++ b/spyvm/interpreter_proxy.py @@ -67,7 +67,7 @@ else: return result except error.PrimitiveFailedError: - IProxy.success_flag = False + IProxy.failed() if mapping[result_type] is sqInt: return 0 elif mapping[result_type] is sqDouble: @@ -77,6 +77,7 @@ else: raise NotImplementedError( "InterpreterProxy: unknown result_type %s" % (result_type, )) + wrapped.func_name = "wrapped_ipf_" + func.func_name functions.append(("c_" + func.func_name, f_ptr, wrapped)) return wrapped return decorator @@ -132,10 +133,7 @@ def stackFloatValue(offset): s_frame = IProxy.s_frame f = s_frame.peek(offset) - if isinstance(f, model.W_Float): - return f.value - else: - raise ProxyFunctionFailed + return IProxy.space.unwrap_float(f) @expose_on_virtual_machine_proxy([int], int) def stackIntegerValue(offset): @@ -191,23 +189,96 @@ def fetchClassOf(w_object): w_class = w_object.getclass(IProxy.space) return w_class -# sqInt (*fetchClassOf)(sqInt oop); -# double (*fetchFloatofObject)(sqInt fieldIndex, sqInt objectPointer); -# sqInt (*fetchIntegerofObject)(sqInt fieldIndex, sqInt objectPointer); -# sqInt (*fetchPointerofObject)(sqInt fieldIndex, sqInt oop); -# sqInt (*obsoleteDontUseThisFetchWordofObject)(sqInt fieldFieldIndex, sqInt oop); -# void *(*firstFixedField)(sqInt oop); -# void *(*firstIndexableField)(sqInt oop); -# sqInt (*literalofMethod)(sqInt offset, sqInt methodPointer); -# sqInt (*literalCountOf)(sqInt methodPointer); -# sqInt (*methodArgumentCount)(void); -# sqInt (*methodPrimitiveIndex)(void); -# sqInt (*primitiveIndexOf)(sqInt methodPointer); -# sqInt (*sizeOfSTArrayFromCPrimitive)(void *cPtr); -# sqInt (*slotSizeOf)(sqInt oop); -# sqInt (*stObjectat)(sqInt array, sqInt fieldIndex); -# sqInt (*stObjectatput)(sqInt array, sqInt fieldIndex, sqInt value); -# sqInt (*stSizeOf)(sqInt oop); + + at expose_on_virtual_machine_proxy([int, oop], float) +def fetchFloatofObject(fieldIndex, w_object): + space = IProxy.space + w_float = w_object.fetch(space, fieldIndex) + return space.unwrap_float(w_float) + + at expose_on_virtual_machine_proxy([int, oop], int) +def fetchIntegerofObject(fieldIndex, w_object): + space = IProxy.space + w_int = w_object.fetch(space, fieldIndex) + return space.unwrap_int(w_int) + + at expose_on_virtual_machine_proxy([int, oop], oop) +def fetchPointerofObject(fieldIndex, w_object): + return w_object.fetch(IProxy.space, fieldIndex) + + at expose_on_virtual_machine_proxy([int, oop], int) +def obsoleteDontUseThisFetchWordofObject(fieldIndex, w_object): + # XXX: correctness? + space = IProxy.space + w_int = w_object.fetch(space, fieldIndex) + return space.unwrap_uint(w_int) + + at expose_on_virtual_machine_proxy([oop], list) +def firstFixedField(w_object): + # return a list with oops (?) of w_objects instVars + raise NotImplementedError + + at expose_on_virtual_machine_proxy([oop], list) +def firstIndexableField(w_object): + # return a list with values (?) of w_objects variable-parts + raise NotImplementedError + + at expose_on_virtual_machine_proxy([int, oop], oop) +def literalofMethod(offset, w_method): + if isinstance(w_method, model.W_CompiledMethod): + return w_method.literalat0(offset) + else: + raise ProxyFunctionFailed + + at expose_on_virtual_machine_proxy([oop], int) +def literalCountOf(w_method): + if isinstance(w_method, model.W_CompiledMethod): + return w_method.getliteralsize() + else: + raise ProxyFunctionFailed + + at expose_on_virtual_machine_proxy([], int) +def methodArgumentCount(): + return IProxy.argcount + + at expose_on_virtual_machine_proxy([], int) +def methodPrimitiveIndex(): + return IProxy.s_method.primitive() + + at expose_on_virtual_machine_proxy([oop], int) +def primitiveIndexOf(w_method): + if isinstance(w_method, model.W_CompiledMethod): + return w_method.as_compiledmethod_get_shadow().primitive() + else: + raise ProxyFunctionFailed + + at expose_on_virtual_machine_proxy([list], int) +def sizeOfSTArrayFromCPrimitive(c_array): + raise NotImplementedError + + at expose_on_virtual_machine_proxy([oop], int) +def slotSizeOf(w_object): + return w_object.size() + + at expose_on_virtual_machine_proxy([oop, int], oop) +def stObjectat(w_object, n0): + from spyvm.primitives import assert_valid_index + space = IProxy.space + n0 = assert_valid_index(space, n0, w_object) + return w_object.at0(space, n0) + + at expose_on_virtual_machine_proxy([oop, int, oop], int) +def stObjectatput(w_object, n0, w_value): + from spyvm.primitives import assert_valid_index + space = IProxy.space + n0 = assert_valid_index(space, n0, w_object) + w_object.atput0(space, n0, w_value) + return 0 # XXX: check return value + + at expose_on_virtual_machine_proxy([oop], int) +def stSizeOf(w_object): + return w_object.primsize(IProxy.space) + # sqInt (*storeIntegerofObjectwithValue)(sqInt fieldIndex, sqInt oop, sqInt integer); # sqInt (*storePointerofObjectwithValue)(sqInt fieldIndex, sqInt oop, sqInt valuePointer); @@ -259,8 +330,21 @@ # /* InterpreterProxy methodsFor: 'instance creation' */ -# sqInt (*clone)(sqInt oop); -# sqInt (*instantiateClassindexableSize)(sqInt classPointer, sqInt size); + at expose_on_virtual_machine_proxy([oop], oop) +def clone(w_object): + return w_object.clone(IProxy.space) + + at expose_on_virtual_machine_proxy([oop, int], oop) +def instantiateClassindexableSize(w_class, varsize): + s_class = w_class.as_class_get_shadow(IProxy.space) + return s_class.new(varsize) + +# @expose_on_virtual_machine_proxy([int, int], oop) +# def makePointwithxValueyValue(x, y): +# space = IProxy.space +# w_x = space.wrap_int(x) +# w_y = space.wrap_int(y) + # sqInt (*makePointwithxValueyValue)(sqInt xValue, sqInt yValue); # sqInt (*popRemappableOop)(void); # sqInt (*pushRemappableOop)(sqInt oop); @@ -271,13 +355,36 @@ # sqInt (*byteSwapped)(sqInt w); # sqInt (*failed)(void); # sqInt (*fullDisplayUpdate)(void); -# sqInt (*fullGC)(void); -# sqInt (*incrementalGC)(void); -# sqInt (*primitiveFail)(void); + at expose_on_virtual_machine_proxy([], int) +def fullGC(): + # XXX: how to invoke gc? + return 0 + at expose_on_virtual_machine_proxy([], int) +def incrementalGC(): + # XXX: how to invoke gc? + return 0 + + at expose_on_virtual_machine_proxy([], int) +def primitiveFail(): + raise ProxyFunctionFailed + # sqInt (*showDisplayBitsLeftTopRightBottom)(sqInt aForm, sqInt l, sqInt t, sqInt r, sqInt b); # sqInt (*signalSemaphoreWithIndex)(sqInt semaIndex); -# sqInt (*success)(sqInt aBoolean); -# sqInt (*superclassOf)(sqInt classPointer); + + at expose_on_virtual_machine_proxy([bool], int) +def success(aBoolean): + if aBoolean: + return 0 + else: + raise ProxyFunctionFailed + + at expose_on_virtual_machine_proxy([oop], oop) +def superclassOf(w_class): + s_superclass = w_class.as_class_get_shadow(IProxy.space).s_superclass() + if s_superclass is not None: + return s_superclass.w_self() + else: + return IProxy.space.w_nil # /* InterpreterProxy methodsFor: 'compiler' */ @@ -471,6 +578,7 @@ self.argcount = 0 self.s_method = None self.success_flag = True + self.fail_reason = 0 def call(self, signature, interp, s_frame, argcount, s_method): self.interp = interp @@ -481,6 +589,8 @@ try: print "Hello World..." raise error.Exit("External Call") + if not self.success_flag: + raise error.PrimitiveFailedError finally: self.reset() @@ -490,4 +600,8 @@ def object_to_oop(self, oop): return 0 + def failed(self, reason=1): + self.success_flag = False + self.fail_reason = reason + IProxy = _InterpreterProxy() From noreply at buildbot.pypy.org Mon Jun 10 19:00:43 2013 From: noreply at buildbot.pypy.org (lwassermann) Date: Mon, 10 Jun 2013 19:00:43 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk default: added storing into object to interpreter proxy Message-ID: <20130610170043.849171C1380@cobra.cs.uni-duesseldorf.de> Author: Lars Wassermann Branch: Changeset: r441:ca33279eb6d4 Date: 2013-06-10 18:56 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/ca33279eb6d4/ Log: added storing into object to interpreter proxy diff --git a/spyvm/interpreter_proxy.py b/spyvm/interpreter_proxy.py --- a/spyvm/interpreter_proxy.py +++ b/spyvm/interpreter_proxy.py @@ -280,8 +280,23 @@ def stSizeOf(w_object): return w_object.primsize(IProxy.space) -# sqInt (*storeIntegerofObjectwithValue)(sqInt fieldIndex, sqInt oop, sqInt integer); -# sqInt (*storePointerofObjectwithValue)(sqInt fieldIndex, sqInt oop, sqInt valuePointer); + at expose_on_virtual_machine_proxy([int, oop, int], oop) +def storeIntegerofObjectwithValue(n0, w_object, a): + if w_object.size() > n0: + space = IProxy.space + w_object.store(space, n0, space.wrap_int(a)) + return space.wrap_int(a) + else: + raise ProxyFunctionFailed + + at expose_on_virtual_machine_proxy([int, oop, oop], oop) +def storePointerofObjectwithValue(n0, w_object, w_value): + if w_object.size() > n0: + w_object.store(IProxy.space, n0, w_value) + return w_value + else: + IProxy.failed() + return w_value # /* InterpreterProxy methodsFor: 'testing' */ From noreply at buildbot.pypy.org Mon Jun 10 19:00:39 2013 From: noreply at buildbot.pypy.org (lwassermann) Date: Mon, 10 Jun 2013 19:00:39 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk default: fixed some type-issues by branching in interpreter proxy Message-ID: <20130610170039.276EF1C10DD@cobra.cs.uni-duesseldorf.de> Author: Lars Wassermann Branch: Changeset: r437:d94a8bfa757f Date: 2013-06-10 13:46 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/d94a8bfa757f/ Log: fixed some type-issues by branching in interpreter proxy added maps for oop-to-object mapping diff --git a/spyvm/interpreter_proxy.py b/spyvm/interpreter_proxy.py --- a/spyvm/interpreter_proxy.py +++ b/spyvm/interpreter_proxy.py @@ -226,7 +226,7 @@ @expose_on_virtual_machine_proxy([int, oop], oop) def literalofMethod(offset, w_method): if isinstance(w_method, model.W_CompiledMethod): - return w_method.literalat0(offset) + return w_method.literalat0(IProxy.space, offset) else: raise ProxyFunctionFailed @@ -267,13 +267,13 @@ n0 = assert_valid_index(space, n0, w_object) return w_object.at0(space, n0) - at expose_on_virtual_machine_proxy([oop, int, oop], int) + at expose_on_virtual_machine_proxy([oop, int, oop], oop) def stObjectatput(w_object, n0, w_value): from spyvm.primitives import assert_valid_index space = IProxy.space n0 = assert_valid_index(space, n0, w_object) w_object.atput0(space, n0, w_value) - return 0 # XXX: check return value + return w_value @expose_on_virtual_machine_proxy([oop], int) def stSizeOf(w_object): @@ -336,6 +336,8 @@ @expose_on_virtual_machine_proxy([oop, int], oop) def instantiateClassindexableSize(w_class, varsize): + if not isinstance(w_class, model.W_PointersObject): + raise error.PrimitiveFailedError s_class = w_class.as_class_get_shadow(IProxy.space) return s_class.new(varsize) @@ -380,6 +382,8 @@ @expose_on_virtual_machine_proxy([oop], oop) def superclassOf(w_class): + if not isinstance(w_class, model.W_PointersObject): + raise error.PrimitiveFailedError s_superclass = w_class.as_class_get_shadow(IProxy.space).s_superclass() if s_superclass is not None: return s_superclass.w_self() @@ -570,6 +574,9 @@ def __init__(self): self.vm_proxy = lltype.nullptr(VMPtr.TO) self.vm_initialized = False + self._next_oop = 0 + self.oop_map = {} + self.object_map = {} self.reset() def reset(self): @@ -587,18 +594,33 @@ self.s_method = s_method self.space = interp.space try: - print "Hello World..." - raise error.Exit("External Call") + # Load the correct DLL + self.success_flag = False + # call the correct function in it... if not self.success_flag: raise error.PrimitiveFailedError finally: self.reset() def oop_to_object(self, oop): - return self.interp.space.w_nil + try: + return self.oop_map[oop] + except KeyError: + raise ProxyFunctionFailed - def object_to_oop(self, oop): - return 0 + def object_to_oop(self, w_object): + try: + return self.object_map[w_object] + except KeyError: + new_index = self.next_oop() + self.oop_map[new_index] = w_object + self.object_map[w_object] = new_index + return new_index + + def next_oop(self): + next_oop = self._next_oop + self._next_oop = next_oop + 1 + return next_oop def failed(self, reason=1): self.success_flag = False From noreply at buildbot.pypy.org Mon Jun 10 19:00:45 2013 From: noreply at buildbot.pypy.org (lwassermann) Date: Mon, 10 Jun 2013 19:00:45 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk default: added misc category functions Message-ID: <20130610170045.AAD891C13E5@cobra.cs.uni-duesseldorf.de> Author: Lars Wassermann Branch: Changeset: r442:127ac7fc16ff Date: 2013-06-10 18:57 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/127ac7fc16ff/ Log: added misc category functions diff --git a/spyvm/interpreter_proxy.py b/spyvm/interpreter_proxy.py --- a/spyvm/interpreter_proxy.py +++ b/spyvm/interpreter_proxy.py @@ -473,15 +473,21 @@ s_class = w_class.as_class_get_shadow(IProxy.space) return s_class.new(varsize) -# @expose_on_virtual_machine_proxy([int, int], oop) -# def makePointwithxValueyValue(x, y): -# space = IProxy.space -# w_x = space.wrap_int(x) -# w_y = space.wrap_int(y) + at expose_on_virtual_machine_proxy([int, int], oop) +def makePointwithxValueyValue(x, y): + space = IProxy.space + w_point = space.w_Point.as_class_get_shadow(space).new() + w_point.store(space, 0, space.wrap_int(x)) + w_point.store(space, 1, space.wrap_int(y)) + return w_point -# sqInt (*makePointwithxValueyValue)(sqInt xValue, sqInt yValue); -# sqInt (*popRemappableOop)(void); -# sqInt (*pushRemappableOop)(sqInt oop); + at expose_on_virtual_machine_proxy([], oop) +def popRemappableOop(): + return IProxy.pop_remappable() + + at expose_on_virtual_machine_proxy([oop], oop) +def pushRemappableOop(w_object): + return IProxy.push_remappable(w_object) # /* InterpreterProxy methodsFor: 'other' */ @@ -709,6 +715,7 @@ self._next_oop = 0 self.oop_map = {} self.object_map = {} + self.remappable_objects = [] self.reset() def reset(self): @@ -716,7 +723,6 @@ self.s_frame = None self.argcount = 0 self.s_method = None - self.success_flag = True self.fail_reason = 0 def call(self, signature, interp, s_frame, argcount, s_method): @@ -725,15 +731,20 @@ self.argcount = argcount self.s_method = s_method self.space = interp.space + # ensure that space.w_nil gets the first possible oop + self.object_to_oop(self.space.w_nil) try: # Load the correct DLL - self.success_flag = False + self.failed() # call the correct function in it... - if not self.success_flag: + if not self.fail_reason == 0: raise error.PrimitiveFailedError finally: self.reset() + def failed(self, reason=1): + self.fail_reason = reason + def oop_to_object(self, oop): try: return self.oop_map[oop] @@ -754,8 +765,15 @@ self._next_oop = next_oop + 1 return next_oop - def failed(self, reason=1): - self.success_flag = False - self.fail_reason = reason + def pop_remappable(self): + try: + return self.remappable_objects.pop() + except IndexError: + self.failed() + return self.space.w_nil + + def push_remappable(self, w_object): + self.remappable_objects.append(w_object) + return w_object IProxy = _InterpreterProxy() From noreply at buildbot.pypy.org Mon Jun 10 19:00:40 2013 From: noreply at buildbot.pypy.org (lwassermann) Date: Mon, 10 Jun 2013 19:00:40 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk default: added constant returning functions to interpreter-proxy Message-ID: <20130610170040.3AA131C1190@cobra.cs.uni-duesseldorf.de> Author: Lars Wassermann Branch: Changeset: r438:a5e3526739ba Date: 2013-06-10 18:53 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/a5e3526739ba/ Log: added constant returning functions to interpreter-proxy added bitmap-class to constants diff --git a/spyvm/constants.py b/spyvm/constants.py --- a/spyvm/constants.py +++ b/spyvm/constants.py @@ -107,7 +107,7 @@ # XXX more missing? classes_in_special_object_table = { -# "Bitmap" : SO_BITMAP_CLASS, + "Bitmap" : SO_BITMAP_CLASS, "SmallInteger" : SO_SMALLINTEGER_CLASS, "String" : SO_STRING_CLASS, "Array" : SO_ARRAY_CLASS, diff --git a/spyvm/interpreter_proxy.py b/spyvm/interpreter_proxy.py --- a/spyvm/interpreter_proxy.py +++ b/spyvm/interpreter_proxy.py @@ -309,24 +309,71 @@ # /* InterpreterProxy methodsFor: 'special objects' */ -# sqInt (*characterTable)(void); -# sqInt (*displayObject)(void); -# sqInt (*falseObject)(void); -# sqInt (*nilObject)(void); -# sqInt (*trueObject)(void); + at expose_on_virtual_machine_proxy([], oop) +def characterTable(): + return IProxy.space.w_charactertable + + at expose_on_virtual_machine_proxy([], oop) +def displayObject(): + return IProxy.space.objtable['w_display'] + + at expose_on_virtual_machine_proxy([], oop) +def falseObject(): + return IProxy.space.w_false + + at expose_on_virtual_machine_proxy([], oop) +def nilObject(): + return IProxy.space.w_nil + + at expose_on_virtual_machine_proxy([], oop) +def trueObject(): + return IProxy.space.w_true # /* InterpreterProxy methodsFor: 'special classes' */ -# sqInt (*classArray)(void); -# sqInt (*classBitmap)(void); -# sqInt (*classByteArray)(void); -# sqInt (*classCharacter)(void); -# sqInt (*classFloat)(void); -# sqInt (*classLargePositiveInteger)(void); -# sqInt (*classPoint)(void); -# sqInt (*classSemaphore)(void); -# sqInt (*classSmallInteger)(void); -# sqInt (*classString)(void); +# Can't generate these, because the IProxy-field-access can only be done after +# first "call"-call + + at expose_on_virtual_machine_proxy([], oop) +def classArray(): + return IProxy.space.w_Array + + at expose_on_virtual_machine_proxy([], oop) +def classBitmap(): + return IProxy.space.w_Bitmap + + at expose_on_virtual_machine_proxy([], oop) +def classByteArray(): + return IProxy.space.w_ByteArray + + at expose_on_virtual_machine_proxy([], oop) +def classCharacter(): + return IProxy.space.w_Character + + at expose_on_virtual_machine_proxy([], oop) +def classFloat(): + return IProxy.space.w_Float + + at expose_on_virtual_machine_proxy([], oop) +def classLargePositiveInteger(): + return IProxy.space.w_LargePositiveInteger + + at expose_on_virtual_machine_proxy([], oop) +def classPoint(): + return IProxy.space.w_Point + + at expose_on_virtual_machine_proxy([], oop) +def classSemaphore(): + return IProxy.space.w_Semaphore + + at expose_on_virtual_machine_proxy([], oop) +def classSmallInteger(): + return IProxy.space.w_SmallInteger + + at expose_on_virtual_machine_proxy([], oop) +def classString(): + return IProxy.space.w_String + # /* InterpreterProxy methodsFor: 'instance creation' */ diff --git a/spyvm/model.py b/spyvm/model.py --- a/spyvm/model.py +++ b/spyvm/model.py @@ -38,7 +38,8 @@ def instsize(self, space): """Return the size of the object reserved for instance variables. - Only returns something non-zero for W_PointersObjects""" + Only returns something non-zero for W_PointersObjects, W_Floats, and + W_LargePositiveInteger1Words""" return 0 def varsize(self, space): From noreply at buildbot.pypy.org Mon Jun 10 19:00:46 2013 From: noreply at buildbot.pypy.org (lwassermann) Date: Mon, 10 Jun 2013 19:00:46 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk default: added remaining other functions Message-ID: <20130610170046.E443D1C1068@cobra.cs.uni-duesseldorf.de> Author: Lars Wassermann Branch: Changeset: r443:2a52dc328e17 Date: 2013-06-10 18:58 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/2a52dc328e17/ Log: added remaining other functions diff --git a/spyvm/interpreter_proxy.py b/spyvm/interpreter_proxy.py --- a/spyvm/interpreter_proxy.py +++ b/spyvm/interpreter_proxy.py @@ -491,10 +491,30 @@ # /* InterpreterProxy methodsFor: 'other' */ -# sqInt (*becomewith)(sqInt array1, sqInt array2); -# sqInt (*byteSwapped)(sqInt w); -# sqInt (*failed)(void); -# sqInt (*fullDisplayUpdate)(void); + at expose_on_virtual_machine_proxy([list, list], int) +def becomewith(w_array1, w_array2): + # XXX: stub, until used + print "InterpreterProxy >> becomewith(list, list)" + return 0 + + at expose_on_virtual_machine_proxy([int], int) +def byteSwapped(w): + from rpython.rlib.rarithmetic import intmask + return (w >> 24) & 0xFF + (w >> 8) & 0xFF00 + (w << 8) & 0xFF0000 + (w << 24) & -16777216 + + at expose_on_virtual_machine_proxy([], bool) +def failed(): + return not IProxy.fail_reason == 0 + + at expose_on_virtual_machine_proxy([], int) +def fullDisplayUpdate(): + w_display = IProxy.space.objtable['w_display'] + if isinstance(w_display, model.W_DisplayBitmap): + w_display.flush_to_screen() + return 0 + else: + raise ProxyFunctionFailed + @expose_on_virtual_machine_proxy([], int) def fullGC(): # XXX: how to invoke gc? From noreply at buildbot.pypy.org Mon Jun 10 19:00:41 2013 From: noreply at buildbot.pypy.org (lwassermann) Date: Mon, 10 Jun 2013 19:00:41 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk default: added testing methods, some of the stubbed Message-ID: <20130610170041.551CA1C11A6@cobra.cs.uni-duesseldorf.de> Author: Lars Wassermann Branch: Changeset: r439:d6e12befe317 Date: 2013-06-10 18:54 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/d6e12befe317/ Log: added testing methods, some of the stubbed diff --git a/spyvm/interpreter_proxy.py b/spyvm/interpreter_proxy.py --- a/spyvm/interpreter_proxy.py +++ b/spyvm/interpreter_proxy.py @@ -23,6 +23,7 @@ sqLong = rffi.LONG sqDouble = rffi.DOUBLE sqIntArrayPtr = Ptr(rffi.CArray(sqInt)) +sqStr = rffi.CCHARP major = minor = 0 functions = [] @@ -33,7 +34,7 @@ pass def expose_on_virtual_machine_proxy(unwrap_spec, result_type, minor=0, major=1): - mapping = {oop: sqInt, int: sqInt, list: sqIntArrayPtr, bool: sqInt, float: sqDouble} + mapping = {oop: sqInt, int: sqInt, list: sqIntArrayPtr, bool: sqInt, float: sqDouble, str: sqStr} f_ptr = Ptr(FuncType([mapping[spec] for spec in unwrap_spec], mapping[result_type])) if minor < minor: minor = minor @@ -284,17 +285,58 @@ # /* InterpreterProxy methodsFor: 'testing' */ + at expose_on_virtual_machine_proxy([oop, str], bool) +def isKindOf(w_object, name): + # XXX: stub, until used + print "InterpreterProxy >> isKindOf(object, name)" + return False # sqInt (*isKindOf)(sqInt oop, char *aString); + + at expose_on_virtual_machine_proxy([oop, str], bool) +def isMemberOf(w_object, name): + # XXX: stub, until used + print "InterpreterProxy >> isMemberOf(object, name)" + return False # sqInt (*isMemberOf)(sqInt oop, char *aString); -# sqInt (*isBytes)(sqInt oop); -# sqInt (*isFloatObject)(sqInt oop); -# sqInt (*isIndexable)(sqInt oop); -# sqInt (*isIntegerObject)(sqInt objectPointer); -# sqInt (*isIntegerValue)(sqInt intValue); -# sqInt (*isPointers)(sqInt oop); -# sqInt (*isWeak)(sqInt oop); -# sqInt (*isWords)(sqInt oop); -# sqInt (*isWordsOrBytes)(sqInt oop); + + at expose_on_virtual_machine_proxy([oop], bool) +def isBytes(w_object): + return isinstance(w_object, model.W_BytesObject) + + at expose_on_virtual_machine_proxy([oop], bool) +def isFloatObject(w_object): + return isinstance(w_object, model.W_Float) + + at expose_on_virtual_machine_proxy([oop], bool) +def isIndexable(w_object): + space = IProxy.space + return w_object.getclass(space).as_class_get_shadow(space).isvariable() + + at expose_on_virtual_machine_proxy([oop], bool) +def isIntegerObject(w_object): + return isinstance(w_object, model.W_SmallInteger) + + at expose_on_virtual_machine_proxy([int], bool) +def isIntegerValue(n): + """Checking whether the two highest bits are equal, + which means that the value is representable as 31/63-bit value.""" + return n ^ (n << 1) >= 0 + + at expose_on_virtual_machine_proxy([oop], bool) +def isPointers(w_object): + return isinstance(w_object, model.W_PointersObject) + + at expose_on_virtual_machine_proxy([oop], bool) +def isWeak(w_object): + return isinstance(w_object, model.W_WeakPointersObject) + + at expose_on_virtual_machine_proxy([oop], bool) +def isWords(w_object): + return w_object.is_array_object() and not isinstance(w_object, model.W_BytesObject) + + at expose_on_virtual_machine_proxy([oop], bool) +def isWordsOrBytes(w_object): + return w_object.is_array_object() # /* InterpreterProxy methodsFor: 'converting' */ From noreply at buildbot.pypy.org Mon Jun 10 19:00:42 2013 From: noreply at buildbot.pypy.org (lwassermann) Date: Mon, 10 Jun 2013 19:00:42 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk default: added converting methods to interpreter proxy Message-ID: <20130610170042.7540A1C136D@cobra.cs.uni-duesseldorf.de> Author: Lars Wassermann Branch: Changeset: r440:c00e555fc976 Date: 2013-06-10 18:55 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/c00e555fc976/ Log: added converting methods to interpreter proxy diff --git a/spyvm/interpreter_proxy.py b/spyvm/interpreter_proxy.py --- a/spyvm/interpreter_proxy.py +++ b/spyvm/interpreter_proxy.py @@ -340,14 +340,42 @@ # /* InterpreterProxy methodsFor: 'converting' */ -# sqInt (*booleanValueOf)(sqInt obj); -# sqInt (*checkedIntegerValueOf)(sqInt intOop); -# sqInt (*floatObjectOf)(double aFloat); -# double (*floatValueOf)(sqInt oop); -# sqInt (*integerObjectOf)(sqInt value); -# sqInt (*integerValueOf)(sqInt oop); -# sqInt (*positive32BitIntegerFor)(sqInt integerValue); -# sqInt (*positive32BitValueOf)(sqInt oop); + at expose_on_virtual_machine_proxy([oop], bool) +def booleanValueOf(w_object): + space = IProxy.space + if w_object is space.w_true: + return True + if w_object is space.w_false: + return False + raise ProxyFunctionFailed + + at expose_on_virtual_machine_proxy([oop], int) +def checkedIntegerValueOf(w_object): + return IProxy.space.unwrap_int(w_object) + + at expose_on_virtual_machine_proxy([float], oop) +def floatObjectOf(f): + return IProxy.space.wrap_float(f) + + at expose_on_virtual_machine_proxy([oop], float) +def floatValueOf(w_object): + return IProxy.space.unwrap_float(w_object) + + at expose_on_virtual_machine_proxy([int], oop) +def integerObjectOf(n): + return IProxy.space.wrap_int(n) + + at expose_on_virtual_machine_proxy([oop], int) +def integerValueOf(w_object): + return IProxy.space.unwrap_int(w_object) + + at expose_on_virtual_machine_proxy([int], oop) +def positive32BitIntegerFor(n): + return IProxy.space.wrap_positive_32bit_int(n) + + at expose_on_virtual_machine_proxy([oop], int) +def positive32BitValueOf(n): + return IProxy.space.unwrap_positive_32bit_int(n) # /* InterpreterProxy methodsFor: 'special objects' */ From noreply at buildbot.pypy.org Mon Jun 10 19:21:28 2013 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 10 Jun 2013 19:21:28 +0200 (CEST) Subject: [pypy-commit] pypy default: Kill this hack. It's a Very Bad idea to do it this way: it means Message-ID: <20130610172128.B75D81C02C2@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r64844:123cde0fe60f Date: 2013-06-10 19:16 +0200 http://bitbucket.org/pypy/pypy/changeset/123cde0fe60f/ Log: Kill this hack. It's a Very Bad idea to do it this way: it means translation will fail on any user-provided example too :-(( diff --git a/rpython/jit/backend/llsupport/test/zrpy_gc_boehm_test.py b/rpython/jit/backend/llsupport/test/zrpy_gc_boehm_test.py --- a/rpython/jit/backend/llsupport/test/zrpy_gc_boehm_test.py +++ b/rpython/jit/backend/llsupport/test/zrpy_gc_boehm_test.py @@ -2,7 +2,6 @@ import weakref from rpython.rlib.jit import JitDriver, dont_look_inside from rpython.jit.backend.llsupport.test.zrpy_gc_test import run, get_entry, compile -from rpython.jit.backend.llsupport.test.ztranslation_test import fix_annotator_for_vrawbuffer class X(object): def __init__(self, x=0): @@ -32,8 +31,7 @@ g._dont_inline_ = True return g -def compile_boehm_test(monkeypatch): - fix_annotator_for_vrawbuffer(monkeypatch) +def compile_boehm_test(): myjitdriver = JitDriver(greens = [], reds = ['n', 'x']) @dont_look_inside def see(lst, n): diff --git a/rpython/jit/backend/llsupport/test/ztranslation_test.py b/rpython/jit/backend/llsupport/test/ztranslation_test.py --- a/rpython/jit/backend/llsupport/test/ztranslation_test.py +++ b/rpython/jit/backend/llsupport/test/ztranslation_test.py @@ -9,23 +9,10 @@ from rpython.jit.codewriter.policy import StopAtXPolicy -def fix_annotator_for_vrawbuffer(monkeypatch): - from rpython.rlib.nonconst import NonConstant - from rpython.jit.metainterp.optimizeopt.virtualize import VRawBufferValue - from rpython.jit.metainterp import warmspot - - def my_hook_for_tests(cpu): - # this is needed so that the annotator can see it - if NonConstant(False): - v = VRawBufferValue(cpu, None, -1, None, None) - monkeypatch.setattr(warmspot, 'hook_for_tests', my_hook_for_tests) - - class TranslationTest(CCompiledMixin): CPUClass = getcpuclass() - def test_stuff_translates(self, monkeypatch): - fix_annotator_for_vrawbuffer(monkeypatch) + def test_stuff_translates(self): # this is a basic test that tries to hit a number of features and their # translation: # - jitting of loops and bridges @@ -102,10 +89,9 @@ class TranslationTestCallAssembler(CCompiledMixin): CPUClass = getcpuclass() - def test_direct_assembler_call_translates(self, monkeypatch): + def test_direct_assembler_call_translates(self): """Test CALL_ASSEMBLER and the recursion limit""" from rpython.rlib.rstackovf import StackOverflow - fix_annotator_for_vrawbuffer(monkeypatch) class Thing(object): def __init__(self, val): @@ -183,8 +169,7 @@ class TranslationTestJITStats(CCompiledMixin): CPUClass = getcpuclass() - def test_jit_get_stats(self, monkeypatch): - fix_annotator_for_vrawbuffer(monkeypatch) + def test_jit_get_stats(self): driver = JitDriver(greens = [], reds = ['i']) def f(): @@ -207,8 +192,7 @@ class TranslationRemoveTypePtrTest(CCompiledMixin): CPUClass = getcpuclass() - def test_external_exception_handling_translates(self, monkeypatch): - fix_annotator_for_vrawbuffer(monkeypatch) + def test_external_exception_handling_translates(self): jitdriver = JitDriver(greens = [], reds = ['n', 'total']) class ImDone(Exception): diff --git a/rpython/jit/metainterp/warmspot.py b/rpython/jit/metainterp/warmspot.py --- a/rpython/jit/metainterp/warmspot.py +++ b/rpython/jit/metainterp/warmspot.py @@ -761,8 +761,6 @@ cpu = jd.warmstate.cpu def ll_portal_runner(*args): - hook_for_tests(cpu) # usually it's empty, but tests can monkeypatch - # it to fix the annotator start = True while 1: try: @@ -999,10 +997,3 @@ graphs = self.translator.graphs for graph, block, i in find_force_quasi_immutable(graphs): self.replace_force_quasiimmut_with_direct_call(block.operations[i]) - -def hook_for_tests(cpu): - """ - This function is empty and does nothing. Its only role is to be - monkey-patched by tests to "fix" the annotator if needed (see - e.g. x86/test/test_ztranslation::test_external_exception_handling_translates - """ From noreply at buildbot.pypy.org Mon Jun 10 19:29:26 2013 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 10 Jun 2013 19:29:26 +0200 (CEST) Subject: [pypy-commit] pypy default: fix for 123cde0fe60f Message-ID: <20130610172926.9E32A1C1026@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r64845:48357e1d63e1 Date: 2013-06-10 19:28 +0200 http://bitbucket.org/pypy/pypy/changeset/48357e1d63e1/ Log: fix for 123cde0fe60f diff --git a/rpython/jit/metainterp/optimizeopt/virtualize.py b/rpython/jit/metainterp/optimizeopt/virtualize.py --- a/rpython/jit/metainterp/optimizeopt/virtualize.py +++ b/rpython/jit/metainterp/optimizeopt/virtualize.py @@ -17,6 +17,7 @@ _attrs_ = ('keybox', 'source_op', '_cached_vinfo') box = None level = optimizer.LEVEL_NONNULL + is_about_raw = False _cached_vinfo = None def __init__(self, keybox, source_op=None): @@ -395,6 +396,7 @@ class VRawBufferValue(AbstractVArrayValue): + is_about_raw = True def __init__(self, cpu, logops, size, keybox, source_op): AbstractVirtualValue.__init__(self, keybox, source_op) @@ -457,6 +459,7 @@ class VRawSliceValue(AbstractVirtualValue): + is_about_raw = True def __init__(self, rawbuffer_value, offset, keybox, source_op): AbstractVirtualValue.__init__(self, keybox, source_op) @@ -676,13 +679,17 @@ offsetbox = self.get_constant_box(op.getarg(1)) if value.is_virtual() and offsetbox is not None: offset = offsetbox.getint() - if isinstance(value, VRawBufferValue): - self.make_virtual_raw_slice(value, offset, op.result, op) - return - elif isinstance(value, VRawSliceValue): - offset = offset + value.offset - self.make_virtual_raw_slice(value.rawbuffer_value, offset, op.result, op) - return + # the following check is constant-folded to False if the + # translation occurs without any VRawXxxValue instance around + if value.is_about_raw: + if isinstance(value, VRawBufferValue): + self.make_virtual_raw_slice(value, offset, op.result, op) + return + elif isinstance(value, VRawSliceValue): + offset = offset + value.offset + self.make_virtual_raw_slice(value.rawbuffer_value, offset, + op.result, op) + return self.emit_operation(op) def optimize_ARRAYLEN_GC(self, op): diff --git a/rpython/jit/metainterp/resume.py b/rpython/jit/metainterp/resume.py --- a/rpython/jit/metainterp/resume.py +++ b/rpython/jit/metainterp/resume.py @@ -451,6 +451,7 @@ class AbstractVirtualInfo(object): kind = REF + is_about_raw = False #def allocate(self, decoder, index): # raise NotImplementedError def equals(self, fieldnums): @@ -461,7 +462,7 @@ def debug_prints(self): raise NotImplementedError - + class AbstractVirtualStructInfo(AbstractVirtualInfo): def __init__(self, fielddescrs): @@ -547,6 +548,7 @@ class VRawBufferStateInfo(AbstractVirtualInfo): kind = INT + is_about_raw = True def __init__(self, size, offsets, descrs): self.size = size @@ -772,7 +774,9 @@ assert self.virtuals_cache is not None v = self.virtuals_cache.get_int(index) if not v: - v = self.rd_virtuals[index].allocate_int(self, index) + v = self.rd_virtuals[index] + assert v.is_about_raw and isinstance(v, VRawBufferStateInfo) + v = v.allocate_int(self, index) ll_assert(v == self.virtuals_cache.get_int(index), "resume.py: bad cache") return v From noreply at buildbot.pypy.org Mon Jun 10 19:30:58 2013 From: noreply at buildbot.pypy.org (lwassermann) Date: Mon, 10 Jun 2013 19:30:58 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk default: stubbed some more functions of interpreterproxy: Message-ID: <20130610173058.072551C1026@cobra.cs.uni-duesseldorf.de> Author: Lars Wassermann Branch: Changeset: r444:839dc50737a1 Date: 2013-06-10 19:29 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/839dc50737a1/ Log: stubbed some more functions of interpreterproxy: compiler- hooks and bitblt-calls diff --git a/spyvm/interpreter_proxy.py b/spyvm/interpreter_proxy.py --- a/spyvm/interpreter_proxy.py +++ b/spyvm/interpreter_proxy.py @@ -528,8 +528,16 @@ def primitiveFail(): raise ProxyFunctionFailed -# sqInt (*showDisplayBitsLeftTopRightBottom)(sqInt aForm, sqInt l, sqInt t, sqInt r, sqInt b); -# sqInt (*signalSemaphoreWithIndex)(sqInt semaIndex); + at expose_on_virtual_machine_proxy([oop, int, int, int, int], int) +def showDisplayBitsLeftTopRightBottom(w_form, l, t, r, b): + print 'Called InterpreterProxy >> showDisplayBitsLeftTopRightBottom' + raise ProxyFunctionFailed + + at expose_on_virtual_machine_proxy([int], int) +def signalSemaphoreWithIndex(n): + # ((Smalltalk externalObjects) at: n) signal + print 'Called InterpreterProxy >> signalSemaphoreWithIndex' + raise ProxyFunctionFailed @expose_on_virtual_machine_proxy([bool], int) def success(aBoolean): @@ -550,18 +558,36 @@ # /* InterpreterProxy methodsFor: 'compiler' */ -# CompilerHook *(*compilerHookVector)(void); -# sqInt (*setCompilerInitialized)(sqInt initFlag); -# #if VM_PROXY_MINOR > 1 + at expose_on_virtual_machine_proxy([], int) +def compilerHookVector(): + print 'Called InterpreterProxy >> compilerHookVector' + raise ProxyFunctionFailed + + at expose_on_virtual_machine_proxy([int], int) +def setCompilerInitialized(n): + print 'Called InterpreterProxy >> setCompilerInitialized' + raise ProxyFunctionFailed # /* InterpreterProxy methodsFor: 'BitBlt support' */ -# sqInt (*loadBitBltFrom)(sqInt bbOop); -# sqInt (*copyBits)(void); -# sqInt (*copyBitsFromtoat)(sqInt leftX, sqInt rightX, sqInt yValue); + at expose_on_virtual_machine_proxy([int], int, minor=1) +def loadBitBltFrom(w_bitBlit): + # bb := bbOop + print 'Called InterpreterProxy >> loadBitBltFrom' + raise ProxyFunctionFailed -# #endif + at expose_on_virtual_machine_proxy([], int, minor=1) +def copyBits(): + # bb copyBits + print 'Called InterpreterProxy >> copyBits' + raise ProxyFunctionFailed + + at expose_on_virtual_machine_proxy([int, int, int], int, minor=1) +def copyBitsFromtoat(x0, x1, y): + # bb copyBitsFrom: x0 to: x1 at: y + print 'Called InterpreterProxy >> copyBitsFromtoat' + raise ProxyFunctionFailed # #if VM_PROXY_MINOR > 2 From noreply at buildbot.pypy.org Mon Jun 10 19:30:59 2013 From: noreply at buildbot.pypy.org (lwassermann) Date: Mon, 10 Jun 2013 19:30:59 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk default: added prebuild class "Bitmap" needed for assignment from image, the interpreter proxy and (maybe eventually) bitblt Message-ID: <20130610173059.198A01C1026@cobra.cs.uni-duesseldorf.de> Author: Lars Wassermann Branch: Changeset: r445:ab55fd796e76 Date: 2013-06-10 19:30 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/ab55fd796e76/ Log: added prebuild class "Bitmap" needed for assignment from image, the interpreter proxy and (maybe eventually) bitblt diff --git a/spyvm/objspace.py b/spyvm/objspace.py --- a/spyvm/objspace.py +++ b/spyvm/objspace.py @@ -94,6 +94,7 @@ define_cls("w_ArrayedCollection", "w_SequenceableCollection") define_cls("w_Array", "w_ArrayedCollection", varsized=True) define_cls("w_String", "w_ArrayedCollection", format=shadow.BYTES) + define_cls("w_Bitmap", "w_ArrayedCollection", varsized=True, format=shadow.WORDS) define_cls("w_UndefinedObject", "w_Object") define_cls("w_Boolean", "w_Object") define_cls("w_True", "w_Boolean") From noreply at buildbot.pypy.org Tue Jun 11 12:43:54 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 11 Jun 2013 12:43:54 +0200 (CEST) Subject: [pypy-commit] pypy default: Kill unused method Message-ID: <20130611104354.1F7D21C0328@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r64846:35e9a41d7bdd Date: 2013-06-11 11:56 +0200 http://bitbucket.org/pypy/pypy/changeset/35e9a41d7bdd/ Log: Kill unused method diff --git a/pypy/module/sys/__init__.py b/pypy/module/sys/__init__.py --- a/pypy/module/sys/__init__.py +++ b/pypy/module/sys/__init__.py @@ -103,11 +103,6 @@ 'flags' : 'app.null_sysflags', } - def setbuiltinmodule(self, w_module, name): - w_name = self.space.wrap(name) - w_modules = self.get('modules') - self.space.setitem(w_modules, w_name, w_module) - def startup(self, space): if space.config.translating and not we_are_translated(): # don't get the filesystemencoding at translation time From noreply at buildbot.pypy.org Tue Jun 11 12:43:55 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 11 Jun 2013 12:43:55 +0200 (CEST) Subject: [pypy-commit] pypy default: Issue #1514 Message-ID: <20130611104355.6D8C41C0328@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r64847:92546f437072 Date: 2013-06-11 12:40 +0200 http://bitbucket.org/pypy/pypy/changeset/92546f437072/ Log: Issue #1514 A test for reimporting built-in modules (as opposed to reloading them). diff --git a/pypy/module/imp/test/test_import.py b/pypy/module/imp/test/test_import.py --- a/pypy/module/imp/test/test_import.py +++ b/pypy/module/imp/test/test_import.py @@ -565,6 +565,22 @@ assert sys.path is oldpath assert 'setdefaultencoding' in dir(sys) + def test_reimport_builtin(self): + # ...but not reload()! + import sys + oldpath = sys.path + sys.setdefaultencoding = "" + + del sys.modules['sys'] + import sys as sys1 + assert sys.modules['sys'] is sys1 is sys + + assert sys.path is oldpath + assert sys.setdefaultencoding == "" + + reload(sys) # fix it for people that want 'setdefaultencoding' + assert sys.setdefaultencoding != "" + def test_reload_infinite(self): import infinite_reload From noreply at buildbot.pypy.org Tue Jun 11 12:43:56 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 11 Jun 2013 12:43:56 +0200 (CEST) Subject: [pypy-commit] pypy default: Fix for 92546f437072. Message-ID: <20130611104356.9BA861C0328@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r64848:f26956c61773 Date: 2013-06-11 12:41 +0200 http://bitbucket.org/pypy/pypy/changeset/f26956c61773/ Log: Fix for 92546f437072. diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -381,8 +381,10 @@ except AttributeError: return self.__class__.__name__ - def setbuiltinmodule(self, importname): - """NOT_RPYTHON. load a lazy pypy/module and put it into sys.modules""" + def _prepare_mixedmodule(self, importname): + """NOT_RPYTHON. Load the RPython code of a lazy pypy/module and put + it into 'self.builtin_modules', without any further initialization. + """ if '.' in importname: fullname = importname importname = fullname.rsplit('.', 1)[1] @@ -401,20 +403,22 @@ return name - def getbuiltinmodule(self, name, force_init=False): - w_name = self.wrap(name) - w_modules = self.sys.get('modules') - try: - w_mod = self.getitem(w_modules, w_name) - except OperationError, e: - if not e.match(self, self.w_KeyError): - raise - else: - if not force_init: - return w_mod + @jit.elidable + def getbuiltinmodule(self, name): + """Return the built-in module 'name'. The first time it is seen, + it is initialized and stored in 'sys.modules'. This function is + elidable by the JIT because its effect is idempotent (if you call + it twice with the same name, you're getting the same effect as if + it was only called once). + """ + return self.loadbuiltinmodule(name, force_in_sys_modules=False, + force_init=False) - # If the module is a builtin but not yet imported, - # retrieve it and initialize it + def loadbuiltinmodule(self, name, force_in_sys_modules, force_init): + """For the importing logic. Get the built-in module, stick it + into 'sys.modules' if not initialized or if force_in_sys_modules, + and initialize it if it was not already or if force_init. + """ try: w_mod = self.builtin_modules[name] except KeyError: @@ -422,15 +426,20 @@ self.w_SystemError, "getbuiltinmodule() called " "with non-builtin module %s", name) - else: - # Add the module to sys.modules - self.setitem(w_modules, w_name, w_mod) - # And initialize it - from pypy.interpreter.module import Module - if isinstance(w_mod, Module): - w_mod.init(self) - return w_mod + from pypy.interpreter.module import Module + if isinstance(w_mod, Module) and not w_mod.startup_called: + force_in_sys_modules = True # not initialized so far: + force_init = True # force initialization + + if force_in_sys_modules: + w_sys_modules = self.sys.get('modules') + self.setitem(w_sys_modules, self.wrap(name), w_mod) + + if force_init and isinstance(w_mod, Module): + w_mod.init(self) + + return w_mod def get_builtinmodule_to_install(self): """NOT_RPYTHON""" @@ -547,12 +556,12 @@ def install_mixedmodule(self, mixedname, installed_builtin_modules): """NOT_RPYTHON""" - modname = self.setbuiltinmodule(mixedname) - if modname: - assert modname not in installed_builtin_modules, ( - "duplicate interp-level module enabled for the " - "app-level module %r" % (modname,)) - installed_builtin_modules.append(modname) + modname = self._prepare_mixedmodule(mixedname) + assert modname + assert modname not in installed_builtin_modules, ( + "duplicate interp-level module enabled for the " + "app-level module %r" % (modname,)) + installed_builtin_modules.append(modname) def setup_builtin_modules(self): "NOT_RPYTHON: only for initializing the space." diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -573,14 +573,16 @@ load_extension_module(space, filename, modulename) @jit.dont_look_inside -def load_module(space, w_modulename, find_info, reuse=False): +def load_module(space, w_modulename, find_info, reuse=False, force_init=False): if find_info is None: return if find_info.w_loader: return space.call_method(find_info.w_loader, "load_module", w_modulename) if find_info.modtype == C_BUILTIN: - return space.getbuiltinmodule(find_info.filename, force_init=True) + return space.loadbuiltinmodule(find_info.filename, + force_in_sys_modules=True, + force_init=force_init) if find_info.modtype in (PY_SOURCE, PY_COMPILED, C_EXTENSION, PKG_DIRECTORY): w_mod = None @@ -721,7 +723,8 @@ try: try: - return load_module(space, w_modulename, find_info, reuse=True) + return load_module(space, w_modulename, find_info, reuse=True, + force_init=True) finally: if find_info.stream: find_info.stream.close() From noreply at buildbot.pypy.org Tue Jun 11 13:26:16 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 11 Jun 2013 13:26:16 +0200 (CEST) Subject: [pypy-commit] pypy default: Fix this test to also pass on CPython Message-ID: <20130611112616.8D4151C02BA@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r64849:93a61b856da8 Date: 2013-06-11 13:24 +0200 http://bitbucket.org/pypy/pypy/changeset/93a61b856da8/ Log: Fix this test to also pass on CPython diff --git a/pypy/module/imp/test/test_import.py b/pypy/module/imp/test/test_import.py --- a/pypy/module/imp/test/test_import.py +++ b/pypy/module/imp/test/test_import.py @@ -566,20 +566,19 @@ assert 'setdefaultencoding' in dir(sys) def test_reimport_builtin(self): - # ...but not reload()! - import sys + import sys, time oldpath = sys.path - sys.setdefaultencoding = "" + time.tzset = "" - del sys.modules['sys'] - import sys as sys1 - assert sys.modules['sys'] is sys1 is sys + del sys.modules['time'] + import time as time1 + assert sys.modules['time'] is time1 - assert sys.path is oldpath - assert sys.setdefaultencoding == "" + assert time.tzset == "" - reload(sys) # fix it for people that want 'setdefaultencoding' - assert sys.setdefaultencoding != "" + reload(time1) # don't leave a broken time.tzset behind + import time + assert time.tzset != "" def test_reload_infinite(self): import infinite_reload From noreply at buildbot.pypy.org Tue Jun 11 13:26:17 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 11 Jun 2013 13:26:17 +0200 (CEST) Subject: [pypy-commit] pypy default: Add two new tests, one of which fails on PyPy because we don't Message-ID: <20130611112617.EFEA31C02BA@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r64850:7d2924ddfa80 Date: 2013-06-11 13:24 +0200 http://bitbucket.org/pypy/pypy/changeset/7d2924ddfa80/ Log: Add two new tests, one of which fails on PyPy because we don't create several module objects for the same *built-in* module. diff --git a/pypy/module/imp/test/test_app.py b/pypy/module/imp/test/test_app.py --- a/pypy/module/imp/test/test_app.py +++ b/pypy/module/imp/test/test_app.py @@ -200,3 +200,48 @@ except KeyError: pass rmtree(dir_name, True) + + def test_builtin_reimport(self): + # from https://bugs.pypy.org/issue1514 + import sys, marshal + + old = marshal.loads + marshal.loads = 42 + + # save, re-import, restore. + saved = sys.modules.pop('marshal') + __import__('marshal') + sys.modules['marshal'] = saved + + assert marshal.loads == 42 + import marshal + assert marshal.loads == 42 + marshal.loads = old + + def test_builtin_reimport_mess(self): + # taken from https://bugs.pypy.org/issue1514, with extra cases + # that show a difference with CPython: we can get on CPython + # several module objects for the same built-in module :-( + skip("several built-in module objects: not supported by pypy") + import sys, marshal + + old = marshal.loads + marshal.loads = 42 + + # save, re-import, restore. + saved = sys.modules.pop('marshal') + marshal2 = __import__('marshal') + assert marshal2 is not marshal + assert marshal2.loads is old + assert marshal2 is sys.modules['marshal'] + assert marshal is saved + assert marshal.loads == 42 + + import marshal + assert marshal.loads is old + + sys.modules['marshal'] = saved + import marshal + assert marshal.loads == 42 + + marshal.loads = old From noreply at buildbot.pypy.org Tue Jun 11 13:39:47 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 11 Jun 2013 13:39:47 +0200 (CEST) Subject: [pypy-commit] pypy default: Add a simpler test (missing so far) that shows what I did to be wrong. Message-ID: <20130611113947.D957C1C02BA@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r64851:4dce95a2e895 Date: 2013-06-11 13:33 +0200 http://bitbucket.org/pypy/pypy/changeset/4dce95a2e895/ Log: Add a simpler test (missing so far) that shows what I did to be wrong. diff --git a/pypy/module/imp/test/test_import.py b/pypy/module/imp/test/test_import.py --- a/pypy/module/imp/test/test_import.py +++ b/pypy/module/imp/test/test_import.py @@ -565,6 +565,13 @@ assert sys.path is oldpath assert 'setdefaultencoding' in dir(sys) + def test_reimport_builtin_simple_case(self): + import sys, time + time.foo = "bar" + del sys.modules['time'] + import time + assert not hasattr(time, 'foo') + def test_reimport_builtin(self): import sys, time oldpath = sys.path From noreply at buildbot.pypy.org Tue Jun 11 13:39:49 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 11 Jun 2013 13:39:49 +0200 (CEST) Subject: [pypy-commit] pypy default: backout f26956c61773. Message-ID: <20130611113949.3D71E1C02BA@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r64852:1265890f2384 Date: 2013-06-11 13:34 +0200 http://bitbucket.org/pypy/pypy/changeset/1265890f2384/ Log: backout f26956c61773. diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -381,10 +381,8 @@ except AttributeError: return self.__class__.__name__ - def _prepare_mixedmodule(self, importname): - """NOT_RPYTHON. Load the RPython code of a lazy pypy/module and put - it into 'self.builtin_modules', without any further initialization. - """ + def setbuiltinmodule(self, importname): + """NOT_RPYTHON. load a lazy pypy/module and put it into sys.modules""" if '.' in importname: fullname = importname importname = fullname.rsplit('.', 1)[1] @@ -403,22 +401,20 @@ return name - @jit.elidable - def getbuiltinmodule(self, name): - """Return the built-in module 'name'. The first time it is seen, - it is initialized and stored in 'sys.modules'. This function is - elidable by the JIT because its effect is idempotent (if you call - it twice with the same name, you're getting the same effect as if - it was only called once). - """ - return self.loadbuiltinmodule(name, force_in_sys_modules=False, - force_init=False) + def getbuiltinmodule(self, name, force_init=False): + w_name = self.wrap(name) + w_modules = self.sys.get('modules') + try: + w_mod = self.getitem(w_modules, w_name) + except OperationError, e: + if not e.match(self, self.w_KeyError): + raise + else: + if not force_init: + return w_mod - def loadbuiltinmodule(self, name, force_in_sys_modules, force_init): - """For the importing logic. Get the built-in module, stick it - into 'sys.modules' if not initialized or if force_in_sys_modules, - and initialize it if it was not already or if force_init. - """ + # If the module is a builtin but not yet imported, + # retrieve it and initialize it try: w_mod = self.builtin_modules[name] except KeyError: @@ -426,20 +422,15 @@ self.w_SystemError, "getbuiltinmodule() called " "with non-builtin module %s", name) + else: + # Add the module to sys.modules + self.setitem(w_modules, w_name, w_mod) - from pypy.interpreter.module import Module - if isinstance(w_mod, Module) and not w_mod.startup_called: - force_in_sys_modules = True # not initialized so far: - force_init = True # force initialization - - if force_in_sys_modules: - w_sys_modules = self.sys.get('modules') - self.setitem(w_sys_modules, self.wrap(name), w_mod) - - if force_init and isinstance(w_mod, Module): - w_mod.init(self) - - return w_mod + # And initialize it + from pypy.interpreter.module import Module + if isinstance(w_mod, Module): + w_mod.init(self) + return w_mod def get_builtinmodule_to_install(self): """NOT_RPYTHON""" @@ -556,12 +547,12 @@ def install_mixedmodule(self, mixedname, installed_builtin_modules): """NOT_RPYTHON""" - modname = self._prepare_mixedmodule(mixedname) - assert modname - assert modname not in installed_builtin_modules, ( - "duplicate interp-level module enabled for the " - "app-level module %r" % (modname,)) - installed_builtin_modules.append(modname) + modname = self.setbuiltinmodule(mixedname) + if modname: + assert modname not in installed_builtin_modules, ( + "duplicate interp-level module enabled for the " + "app-level module %r" % (modname,)) + installed_builtin_modules.append(modname) def setup_builtin_modules(self): "NOT_RPYTHON: only for initializing the space." diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -573,16 +573,14 @@ load_extension_module(space, filename, modulename) @jit.dont_look_inside -def load_module(space, w_modulename, find_info, reuse=False, force_init=False): +def load_module(space, w_modulename, find_info, reuse=False): if find_info is None: return if find_info.w_loader: return space.call_method(find_info.w_loader, "load_module", w_modulename) if find_info.modtype == C_BUILTIN: - return space.loadbuiltinmodule(find_info.filename, - force_in_sys_modules=True, - force_init=force_init) + return space.getbuiltinmodule(find_info.filename, force_init=True) if find_info.modtype in (PY_SOURCE, PY_COMPILED, C_EXTENSION, PKG_DIRECTORY): w_mod = None @@ -723,8 +721,7 @@ try: try: - return load_module(space, w_modulename, find_info, reuse=True, - force_init=True) + return load_module(space, w_modulename, find_info, reuse=True) finally: if find_info.stream: find_info.stream.close() From noreply at buildbot.pypy.org Tue Jun 11 13:39:50 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 11 Jun 2013 13:39:50 +0200 (CEST) Subject: [pypy-commit] pypy default: Skip the tests: these are new tests that should be fixed, as they pass Message-ID: <20130611113950.7BC5E1C02BA@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r64853:337bb898c894 Date: 2013-06-11 13:37 +0200 http://bitbucket.org/pypy/pypy/changeset/337bb898c894/ Log: Skip the tests: these are new tests that should be fixed, as they pass on CPython. diff --git a/pypy/module/imp/test/test_app.py b/pypy/module/imp/test/test_app.py --- a/pypy/module/imp/test/test_app.py +++ b/pypy/module/imp/test/test_app.py @@ -203,6 +203,7 @@ def test_builtin_reimport(self): # from https://bugs.pypy.org/issue1514 + skip("fix me") import sys, marshal old = marshal.loads diff --git a/pypy/module/imp/test/test_import.py b/pypy/module/imp/test/test_import.py --- a/pypy/module/imp/test/test_import.py +++ b/pypy/module/imp/test/test_import.py @@ -565,7 +565,15 @@ assert sys.path is oldpath assert 'setdefaultencoding' in dir(sys) - def test_reimport_builtin_simple_case(self): + def test_reimport_builtin_simple_case_1(self): + import sys, time + del time.tzset + del sys.modules['time'] + import time + assert hasattr(time, 'tzset') + + def test_reimport_builtin_simple_case_2(self): + skip("fix me") import sys, time time.foo = "bar" del sys.modules['time'] @@ -573,6 +581,7 @@ assert not hasattr(time, 'foo') def test_reimport_builtin(self): + skip("fix me") import sys, time oldpath = sys.path time.tzset = "" From noreply at buildbot.pypy.org Tue Jun 11 13:39:51 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 11 Jun 2013 13:39:51 +0200 (CEST) Subject: [pypy-commit] pypy default: Easy fix Message-ID: <20130611113951.B0F881C02BA@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r64854:ad3579cf28fe Date: 2013-06-11 13:39 +0200 http://bitbucket.org/pypy/pypy/changeset/ad3579cf28fe/ Log: Easy fix diff --git a/pypy/interpreter/mixedmodule.py b/pypy/interpreter/mixedmodule.py --- a/pypy/interpreter/mixedmodule.py +++ b/pypy/interpreter/mixedmodule.py @@ -43,6 +43,7 @@ # the module was already imported. Refresh its content with # the saved dict, as done with built-in and extension modules # on CPython. + space.call_method(self.w_dict, 'clear') space.call_method(self.w_dict, 'update', self.w_initialdict) for w_submodule in self.submodules_w: diff --git a/pypy/module/imp/test/test_import.py b/pypy/module/imp/test/test_import.py --- a/pypy/module/imp/test/test_import.py +++ b/pypy/module/imp/test/test_import.py @@ -573,7 +573,6 @@ assert hasattr(time, 'tzset') def test_reimport_builtin_simple_case_2(self): - skip("fix me") import sys, time time.foo = "bar" del sys.modules['time'] From noreply at buildbot.pypy.org Tue Jun 11 13:57:23 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 11 Jun 2013 13:57:23 +0200 (CEST) Subject: [pypy-commit] pypy default: Kill even the "easy fix", add a test (thanks amaury) Message-ID: <20130611115723.6BD441C0328@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r64855:ece10991099e Date: 2013-06-11 13:56 +0200 http://bitbucket.org/pypy/pypy/changeset/ece10991099e/ Log: Kill even the "easy fix", add a test (thanks amaury) diff --git a/pypy/interpreter/mixedmodule.py b/pypy/interpreter/mixedmodule.py --- a/pypy/interpreter/mixedmodule.py +++ b/pypy/interpreter/mixedmodule.py @@ -43,7 +43,6 @@ # the module was already imported. Refresh its content with # the saved dict, as done with built-in and extension modules # on CPython. - space.call_method(self.w_dict, 'clear') space.call_method(self.w_dict, 'update', self.w_initialdict) for w_submodule in self.submodules_w: diff --git a/pypy/module/imp/test/test_import.py b/pypy/module/imp/test/test_import.py --- a/pypy/module/imp/test/test_import.py +++ b/pypy/module/imp/test/test_import.py @@ -565,6 +565,12 @@ assert sys.path is oldpath assert 'setdefaultencoding' in dir(sys) + def test_reload_builtin_doesnt_clear(self): + import sys + sys.foobar = "baz" + reload(sys) + assert sys.foobar == "baz" + def test_reimport_builtin_simple_case_1(self): import sys, time del time.tzset @@ -573,6 +579,7 @@ assert hasattr(time, 'tzset') def test_reimport_builtin_simple_case_2(self): + skip("fix me") import sys, time time.foo = "bar" del sys.modules['time'] From noreply at buildbot.pypy.org Tue Jun 11 15:23:22 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 11 Jun 2013 15:23:22 +0200 (CEST) Subject: [pypy-commit] pypy default: try to be on the safe side Message-ID: <20130611132322.EA2381C02BA@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r64856:9e9835f0f54b Date: 2013-06-11 15:22 +0200 http://bitbucket.org/pypy/pypy/changeset/9e9835f0f54b/ Log: try to be on the safe side diff --git a/pypy/module/sys/app.py b/pypy/module/sys/app.py --- a/pypy/module/sys/app.py +++ b/pypy/module/sys/app.py @@ -29,14 +29,13 @@ try: # first try to print the exception's class name stderr = sys.stderr - stderr.write(getattr(exctype, '__name__', exctype)) + stderr.write(str(getattr(exctype, '__name__', exctype))) # then attempt to get the str() of the exception try: s = str(value) except: s = '' - # then print it, and don't worry too much about the extra space - # between the exception class and the ':' + # then print it if s: stderr.write(': %s\n' % (s,)) else: From noreply at buildbot.pypy.org Tue Jun 11 16:06:16 2013 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Tue, 11 Jun 2013 16:06:16 +0200 (CEST) Subject: [pypy-commit] pypy improve-docs: hg merge default Message-ID: <20130611140616.727AD1C0F88@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: improve-docs Changeset: r64857:0da9b23be0af Date: 2013-06-11 16:04 +0200 http://bitbucket.org/pypy/pypy/changeset/0da9b23be0af/ Log: hg merge default diff too long, truncating to 2000 out of 25881 lines diff --git a/lib-python/2.7/distutils/command/build_ext.py b/lib-python/2.7/distutils/command/build_ext.py --- a/lib-python/2.7/distutils/command/build_ext.py +++ b/lib-python/2.7/distutils/command/build_ext.py @@ -8,7 +8,7 @@ __revision__ = "$Id$" -import sys, os, string, re +import sys, os, string, re, imp from types import * from site import USER_BASE, USER_SITE from distutils.core import Command @@ -33,6 +33,11 @@ from distutils.ccompiler import show_compilers show_compilers() +def _get_c_extension_suffix(): + for ext, mod, typ in imp.get_suffixes(): + if typ == imp.C_EXTENSION: + return ext + class build_ext (Command): @@ -677,10 +682,18 @@ # OS/2 has an 8 character module (extension) limit :-( if os.name == "os2": ext_path[len(ext_path) - 1] = ext_path[len(ext_path) - 1][:8] + # PyPy tweak: first try to get the C extension suffix from + # 'imp'. If it fails we fall back to the 'SO' config var, like + # the previous version of this code did. This should work for + # CPython too. The point is that on PyPy with cpyext, the + # config var 'SO' is just ".so" but we want to return + # ".pypy-VERSION.so" instead. + so_ext = _get_c_extension_suffix() + if so_ext is None: + so_ext = get_config_var('SO') # fall-back # extensions in debug_mode are named 'module_d.pyd' under windows - so_ext = get_config_var('SO') if os.name == 'nt' and self.debug: - return os.path.join(*ext_path) + '_d' + so_ext + so_ext = '_d.pyd' return os.path.join(*ext_path) + so_ext def get_export_symbols (self, ext): diff --git a/lib-python/2.7/distutils/sysconfig.py b/lib-python/2.7/distutils/sysconfig.py --- a/lib-python/2.7/distutils/sysconfig.py +++ b/lib-python/2.7/distutils/sysconfig.py @@ -1,30 +1,16 @@ -"""Provide access to Python's configuration information. The specific -configuration variables available depend heavily on the platform and -configuration. The values may be retrieved using -get_config_var(name), and the list of variables is available via -get_config_vars().keys(). Additional convenience functions are also -available. - -Written by: Fred L. Drake, Jr. -Email: -""" - -__revision__ = "$Id: sysconfig.py 85358 2010-10-10 09:54:59Z antoine.pitrou $" - -import sys - # The content of this file is redirected from # sysconfig_cpython or sysconfig_pypy. +# All underscore names are imported too, because +# people like to use undocumented sysconfig._xxx +# directly. +import sys if '__pypy__' in sys.builtin_module_names: - from distutils.sysconfig_pypy import * - from distutils.sysconfig_pypy import _config_vars # needed by setuptools - from distutils.sysconfig_pypy import _variable_rx # read_setup_file() + from distutils import sysconfig_pypy as _sysconfig_module else: - from distutils.sysconfig_cpython import * - from distutils.sysconfig_cpython import _config_vars # needed by setuptools - from distutils.sysconfig_cpython import _variable_rx # read_setup_file() + from distutils import sysconfig_cpython as _sysconfig_module +globals().update(_sysconfig_module.__dict__) _USE_CLANG = None diff --git a/lib-python/2.7/distutils/sysconfig_cpython.py b/lib-python/2.7/distutils/sysconfig_cpython.py --- a/lib-python/2.7/distutils/sysconfig_cpython.py +++ b/lib-python/2.7/distutils/sysconfig_cpython.py @@ -9,7 +9,7 @@ Email: """ -__revision__ = "$Id$" +__revision__ = "$Id: sysconfig.py 85358 2010-10-10 09:54:59Z antoine.pitrou $" import os import re diff --git a/lib-python/2.7/distutils/sysconfig_pypy.py b/lib-python/2.7/distutils/sysconfig_pypy.py --- a/lib-python/2.7/distutils/sysconfig_pypy.py +++ b/lib-python/2.7/distutils/sysconfig_pypy.py @@ -1,9 +1,17 @@ -"""PyPy's minimal configuration information. +"""Provide access to Python's configuration information. +This is actually PyPy's minimal configuration information. + +The specific configuration variables available depend heavily on the +platform and configuration. The values may be retrieved using +get_config_var(name), and the list of variables is available via +get_config_vars().keys(). Additional convenience functions are also +available. """ +__revision__ = "$Id: sysconfig.py 85358 2010-10-10 09:54:59Z antoine.pitrou $" + import sys import os -import imp from distutils.errors import DistutilsPlatformError @@ -49,16 +57,11 @@ _config_vars = None -def _get_so_extension(): - for ext, mod, typ in imp.get_suffixes(): - if typ == imp.C_EXTENSION: - return ext - def _init_posix(): """Initialize the module as appropriate for POSIX systems.""" g = {} g['EXE'] = "" - g['SO'] = _get_so_extension() or ".so" + g['SO'] = ".so" g['SOABI'] = g['SO'].rsplit('.')[0] g['LIBDIR'] = os.path.join(sys.prefix, 'lib') g['CC'] = "gcc -pthread" # -pthread might not be valid on OS/X, check @@ -71,7 +74,7 @@ """Initialize the module as appropriate for NT""" g = {} g['EXE'] = ".exe" - g['SO'] = _get_so_extension() or ".pyd" + g['SO'] = ".pyd" g['SOABI'] = g['SO'].rsplit('.')[0] global _config_vars diff --git a/lib-python/2.7/logging/__init__.py b/lib-python/2.7/logging/__init__.py --- a/lib-python/2.7/logging/__init__.py +++ b/lib-python/2.7/logging/__init__.py @@ -134,21 +134,25 @@ DEBUG = 10 NOTSET = 0 -_levelNames = { - CRITICAL : 'CRITICAL', - ERROR : 'ERROR', - WARNING : 'WARNING', - INFO : 'INFO', - DEBUG : 'DEBUG', - NOTSET : 'NOTSET', - 'CRITICAL' : CRITICAL, - 'ERROR' : ERROR, - 'WARN' : WARNING, - 'WARNING' : WARNING, - 'INFO' : INFO, - 'DEBUG' : DEBUG, - 'NOTSET' : NOTSET, +_levelToName = { + CRITICAL: 'CRITICAL', + ERROR: 'ERROR', + WARNING: 'WARNING', + INFO: 'INFO', + DEBUG: 'DEBUG', + NOTSET: 'NOTSET', } +_nameToLevel = { + 'CRITICAL': CRITICAL, + 'ERROR': ERROR, + 'WARN': WARNING, + 'WARNING': WARNING, + 'INFO': INFO, + 'DEBUG': DEBUG, + 'NOTSET': NOTSET, +} +_levelNames = dict(_levelToName) +_levelNames.update(_nameToLevel) # backward compatibility def getLevelName(level): """ @@ -164,7 +168,7 @@ Otherwise, the string "Level %s" % level is returned. """ - return _levelNames.get(level, ("Level %s" % level)) + return _levelToName.get(level, ("Level %s" % level)) def addLevelName(level, levelName): """ @@ -174,8 +178,8 @@ """ _acquireLock() try: #unlikely to cause an exception, but you never know... - _levelNames[level] = levelName - _levelNames[levelName] = level + _levelToName[level] = levelName + _nameToLevel[levelName] = level finally: _releaseLock() @@ -183,9 +187,9 @@ if isinstance(level, int): rv = level elif str(level) == level: - if level not in _levelNames: + if level not in _nameToLevel: raise ValueError("Unknown level: %r" % level) - rv = _levelNames[level] + rv = _nameToLevel[level] else: raise TypeError("Level not an integer or a valid string: %r" % level) return rv @@ -277,7 +281,7 @@ self.lineno = lineno self.funcName = func self.created = ct - self.msecs = (ct - long(ct)) * 1000 + self.msecs = (ct - int(ct)) * 1000 self.relativeCreated = (self.created - _startTime) * 1000 if logThreads and thread: self.thread = thread.get_ident() diff --git a/lib-python/2.7/logging/config.py b/lib-python/2.7/logging/config.py --- a/lib-python/2.7/logging/config.py +++ b/lib-python/2.7/logging/config.py @@ -156,7 +156,7 @@ h = klass(*args) if "level" in opts: level = cp.get(sectname, "level") - h.setLevel(logging._levelNames[level]) + h.setLevel(level) if len(fmt): h.setFormatter(formatters[fmt]) if issubclass(klass, logging.handlers.MemoryHandler): @@ -187,7 +187,7 @@ opts = cp.options(sectname) if "level" in opts: level = cp.get(sectname, "level") - log.setLevel(logging._levelNames[level]) + log.setLevel(level) for h in root.handlers[:]: root.removeHandler(h) hlist = cp.get(sectname, "handlers") @@ -237,7 +237,7 @@ existing.remove(qn) if "level" in opts: level = cp.get(sectname, "level") - logger.setLevel(logging._levelNames[level]) + logger.setLevel(level) for h in logger.handlers[:]: logger.removeHandler(h) logger.propagate = propagate diff --git a/lib-python/2.7/opcode.py b/lib-python/2.7/opcode.py --- a/lib-python/2.7/opcode.py +++ b/lib-python/2.7/opcode.py @@ -193,5 +193,6 @@ hasname.append(201) def_op('CALL_METHOD', 202) # #args not including 'self' def_op('BUILD_LIST_FROM_ARG', 203) +jrel_op('JUMP_IF_NOT_DEBUG', 204) # jump over assert statements del def_op, name_op, jrel_op, jabs_op diff --git a/lib-python/2.7/socket.py b/lib-python/2.7/socket.py --- a/lib-python/2.7/socket.py +++ b/lib-python/2.7/socket.py @@ -96,6 +96,7 @@ _realsocket = socket +_type = type # WSA error codes if sys.platform.lower().startswith("win"): @@ -173,31 +174,37 @@ __doc__ = _realsocket.__doc__ + __slots__ = ["_sock", "__weakref__"] + def __init__(self, family=AF_INET, type=SOCK_STREAM, proto=0, _sock=None): if _sock is None: _sock = _realsocket(family, type, proto) + elif _type(_sock) is _realsocket: + _sock._reuse() + # PyPy note about refcounting: implemented with _reuse()/_drop() + # on the class '_socket.socket'. Python 3 did it differently + # with a reference counter on this class 'socket._socketobject' + # instead, but it is a less compatible change (breaks eventlet). self._sock = _sock - self._io_refs = 0 - self._closed = False def send(self, data, flags=0): - return self._sock.send(data, flags=flags) + return self._sock.send(data, flags) send.__doc__ = _realsocket.send.__doc__ def recv(self, buffersize, flags=0): - return self._sock.recv(buffersize, flags=flags) + return self._sock.recv(buffersize, flags) recv.__doc__ = _realsocket.recv.__doc__ def recv_into(self, buffer, nbytes=0, flags=0): - return self._sock.recv_into(buffer, nbytes=nbytes, flags=flags) + return self._sock.recv_into(buffer, nbytes, flags) recv_into.__doc__ = _realsocket.recv_into.__doc__ def recvfrom(self, buffersize, flags=0): - return self._sock.recvfrom(buffersize, flags=flags) + return self._sock.recvfrom(buffersize, flags) recvfrom.__doc__ = _realsocket.recvfrom.__doc__ def recvfrom_into(self, buffer, nbytes=0, flags=0): - return self._sock.recvfrom_into(buffer, nbytes=nbytes, flags=flags) + return self._sock.recvfrom_into(buffer, nbytes, flags) recvfrom_into.__doc__ = _realsocket.recvfrom_into.__doc__ def sendto(self, data, param2, param3=None): @@ -208,13 +215,17 @@ sendto.__doc__ = _realsocket.sendto.__doc__ def close(self): - # This function should not reference any globals. See issue #808164. + s = self._sock + if type(s) is _realsocket: + s._drop() self._sock = _closedsocket() close.__doc__ = _realsocket.close.__doc__ def accept(self): sock, addr = self._sock.accept() - return _socketobject(_sock=sock), addr + sockobj = _socketobject(_sock=sock) + sock._drop() # already a copy in the _socketobject() + return sockobj, addr accept.__doc__ = _realsocket.accept.__doc__ def dup(self): @@ -228,24 +239,7 @@ Return a regular file object corresponding to the socket. The mode and bufsize arguments are as for the built-in open() function.""" - self._io_refs += 1 - return _fileobject(self, mode, bufsize) - - def _decref_socketios(self): - if self._io_refs > 0: - self._io_refs -= 1 - if self._closed: - self.close() - - def _real_close(self): - # This function should not reference any globals. See issue #808164. - self._sock.close() - - def close(self): - # This function should not reference any globals. See issue #808164. - self._closed = True - if self._io_refs <= 0: - self._real_close() + return _fileobject(self._sock, mode, bufsize) family = property(lambda self: self._sock.family, doc="the socket family") type = property(lambda self: self._sock.type, doc="the socket type") @@ -286,6 +280,8 @@ "_close"] def __init__(self, sock, mode='rb', bufsize=-1, close=False): + if type(sock) is _realsocket: + sock._reuse() self._sock = sock self.mode = mode # Not actually used in this version if bufsize < 0: @@ -320,11 +316,11 @@ if self._sock: self.flush() finally: - if self._sock: - if self._close: - self._sock.close() - else: - self._sock._decref_socketios() + s = self._sock + if type(s) is _realsocket: + s._drop() + if self._close: + self._sock.close() self._sock = None def __del__(self): diff --git a/lib-python/2.7/test/test_code.py b/lib-python/2.7/test/test_code.py --- a/lib-python/2.7/test/test_code.py +++ b/lib-python/2.7/test/test_code.py @@ -75,7 +75,7 @@ cellvars: () freevars: () nlocals: 0 -flags: 67 +flags: 1048643 consts: ("'doc string'", 'None') """ diff --git a/lib-python/2.7/test/test_codecs.py b/lib-python/2.7/test/test_codecs.py --- a/lib-python/2.7/test/test_codecs.py +++ b/lib-python/2.7/test/test_codecs.py @@ -2,7 +2,11 @@ import unittest import codecs import locale -import sys, StringIO, _testcapi +import sys, StringIO +try: + import _testcapi +except ImportError: + _testcapi = None class Queue(object): """ @@ -1387,7 +1391,7 @@ decodedresult += reader.read() self.assertEqual(decodedresult, s, "%r != %r (encoding=%r)" % (decodedresult, s, encoding)) - if encoding not in broken_incremental_coders: + if encoding not in broken_incremental_coders and _testcapi: # check incremental decoder/encoder (fetched via the Python # and C API) and iterencode()/iterdecode() try: diff --git a/lib-python/2.7/test/test_dis.py b/lib-python/2.7/test/test_dis.py --- a/lib-python/2.7/test/test_dis.py +++ b/lib-python/2.7/test/test_dis.py @@ -53,25 +53,26 @@ pass dis_bug1333982 = """\ - %-4d 0 LOAD_CONST 1 (0) - 3 POP_JUMP_IF_TRUE 41 - 6 LOAD_GLOBAL 0 (AssertionError) - 9 LOAD_FAST 0 (x) - 12 BUILD_LIST_FROM_ARG 0 - 15 GET_ITER - >> 16 FOR_ITER 12 (to 31) - 19 STORE_FAST 1 (s) - 22 LOAD_FAST 1 (s) - 25 LIST_APPEND 2 - 28 JUMP_ABSOLUTE 16 + %-4d 0 JUMP_IF_NOT_DEBUG 41 (to 44) + 3 LOAD_CONST 1 (0) + 6 POP_JUMP_IF_TRUE 44 + 9 LOAD_GLOBAL 0 (AssertionError) + 12 LOAD_FAST 0 (x) + 15 BUILD_LIST_FROM_ARG 0 + 18 GET_ITER + >> 19 FOR_ITER 12 (to 34) + 22 STORE_FAST 1 (s) + 25 LOAD_FAST 1 (s) + 28 LIST_APPEND 2 + 31 JUMP_ABSOLUTE 19 - %-4d >> 31 LOAD_CONST 2 (1) - 34 BINARY_ADD - 35 CALL_FUNCTION 1 - 38 RAISE_VARARGS 1 + %-4d >> 34 LOAD_CONST 2 (1) + 37 BINARY_ADD + 38 CALL_FUNCTION 1 + 41 RAISE_VARARGS 1 - %-4d >> 41 LOAD_CONST 0 (None) - 44 RETURN_VALUE + %-4d >> 44 LOAD_CONST 0 (None) + 47 RETURN_VALUE """%(bug1333982.func_code.co_firstlineno + 1, bug1333982.func_code.co_firstlineno + 2, bug1333982.func_code.co_firstlineno + 3) diff --git a/lib-python/2.7/test/test_logging.py b/lib-python/2.7/test/test_logging.py --- a/lib-python/2.7/test/test_logging.py +++ b/lib-python/2.7/test/test_logging.py @@ -65,7 +65,8 @@ self.saved_handlers = logging._handlers.copy() self.saved_handler_list = logging._handlerList[:] self.saved_loggers = logger_dict.copy() - self.saved_level_names = logging._levelNames.copy() + self.saved_name_to_level = logging._nameToLevel.copy() + self.saved_level_to_name = logging._levelToName.copy() finally: logging._releaseLock() @@ -97,8 +98,10 @@ self.root_logger.setLevel(self.original_logging_level) logging._acquireLock() try: - logging._levelNames.clear() - logging._levelNames.update(self.saved_level_names) + logging._levelToName.clear() + logging._levelToName.update(self.saved_level_to_name) + logging._nameToLevel.clear() + logging._nameToLevel.update(self.saved_name_to_level) logging._handlers.clear() logging._handlers.update(self.saved_handlers) logging._handlerList[:] = self.saved_handler_list diff --git a/lib-python/2.7/test/test_sysconfig.py b/lib-python/2.7/test/test_sysconfig.py --- a/lib-python/2.7/test/test_sysconfig.py +++ b/lib-python/2.7/test/test_sysconfig.py @@ -7,7 +7,8 @@ import subprocess from copy import copy, deepcopy -from test.test_support import run_unittest, TESTFN, unlink, get_attribute +from test.test_support import (run_unittest, TESTFN, unlink, get_attribute, + import_module) import sysconfig from sysconfig import (get_paths, get_platform, get_config_vars, @@ -236,7 +237,10 @@ def test_get_config_h_filename(self): config_h = sysconfig.get_config_h_filename() - self.assertTrue(os.path.isfile(config_h), config_h) + # import_module skips the test when the CPython C Extension API + # appears to not be supported + self.assertTrue(os.path.isfile(config_h) or + not import_module('_testcapi'), config_h) def test_get_scheme_names(self): wanted = ('nt', 'nt_user', 'os2', 'os2_home', 'osx_framework_user', diff --git a/lib-python/2.7/test/test_traceback.py b/lib-python/2.7/test/test_traceback.py --- a/lib-python/2.7/test/test_traceback.py +++ b/lib-python/2.7/test/test_traceback.py @@ -1,6 +1,9 @@ """Test cases for traceback module""" -from _testcapi import traceback_print +try: + from _testcapi import traceback_print +except ImportError: + traceback_print = None from StringIO import StringIO import sys import unittest @@ -176,6 +179,8 @@ class TracebackFormatTests(unittest.TestCase): def test_traceback_format(self): + if traceback_print is None: + raise unittest.SkipTest('Requires _testcapi') try: raise KeyError('blah') except KeyError: diff --git a/lib-python/2.7/test/test_unicode.py b/lib-python/2.7/test/test_unicode.py --- a/lib-python/2.7/test/test_unicode.py +++ b/lib-python/2.7/test/test_unicode.py @@ -1609,7 +1609,10 @@ self.assertEqual("{}".format(u), '__unicode__ overridden') def test_encode_decimal(self): - from _testcapi import unicode_encodedecimal + try: + from _testcapi import unicode_encodedecimal + except ImportError: + raise unittest.SkipTest('Requires _testcapi') self.assertEqual(unicode_encodedecimal(u'123'), b'123') self.assertEqual(unicode_encodedecimal(u'\u0663.\u0661\u0664'), diff --git a/lib-python/conftest.py b/lib-python/conftest.py --- a/lib-python/conftest.py +++ b/lib-python/conftest.py @@ -130,7 +130,7 @@ RegrTest('test_bz2.py', usemodules='bz2'), RegrTest('test_calendar.py'), RegrTest('test_call.py', core=True), - RegrTest('test_capi.py'), + RegrTest('test_capi.py', usemodules='cpyext'), RegrTest('test_cd.py'), RegrTest('test_cfgparser.py'), RegrTest('test_cgi.py'), @@ -177,7 +177,7 @@ RegrTest('test_cprofile.py'), RegrTest('test_crypt.py', usemodules='crypt'), RegrTest('test_csv.py', usemodules='_csv'), - RegrTest('test_ctypes.py', usemodules="_rawffi thread"), + RegrTest('test_ctypes.py', usemodules="_rawffi thread cpyext"), RegrTest('test_curses.py'), RegrTest('test_datetime.py', usemodules='binascii struct'), RegrTest('test_dbm.py'), diff --git a/lib_pypy/_ctypes/pointer.py b/lib_pypy/_ctypes/pointer.py --- a/lib_pypy/_ctypes/pointer.py +++ b/lib_pypy/_ctypes/pointer.py @@ -120,6 +120,7 @@ return self._buffer[0] != 0 contents = property(getcontents, setcontents) + _obj = property(getcontents) # byref interface def _as_ffi_pointer_(self, ffitype): return as_ffi_pointer(self, ffitype) diff --git a/lib_pypy/_ctypes/structure.py b/lib_pypy/_ctypes/structure.py --- a/lib_pypy/_ctypes/structure.py +++ b/lib_pypy/_ctypes/structure.py @@ -166,8 +166,7 @@ if self is StructOrUnion: return if '_fields_' not in self.__dict__: - self._fields_ = [] - _set_shape(self, [], self._is_union) + self._fields_ = [] # As a side-effet, this also sets the ffishape. __setattr__ = struct_setattr diff --git a/lib_pypy/_ctypes_test.py b/lib_pypy/_ctypes_test.py --- a/lib_pypy/_ctypes_test.py +++ b/lib_pypy/_ctypes_test.py @@ -1,60 +1,7 @@ -import os, sys -import tempfile - -def compile_shared(): - """Compile '_ctypes_test.c' into an extension module, and import it - """ - thisdir = os.path.dirname(__file__) - output_dir = tempfile.mkdtemp() - - from distutils.ccompiler import new_compiler - from distutils import sysconfig - - compiler = new_compiler() - compiler.output_dir = output_dir - - # Compile .c file - include_dir = os.path.join(thisdir, '..', 'include') - if sys.platform == 'win32': - ccflags = ['-D_CRT_SECURE_NO_WARNINGS'] - else: - ccflags = ['-fPIC', '-Wimplicit-function-declaration'] - res = compiler.compile([os.path.join(thisdir, '_ctypes_test.c')], - include_dirs=[include_dir], - extra_preargs=ccflags) - object_filename = res[0] - - # set link options - output_filename = '_ctypes_test' + sysconfig.get_config_var('SO') - if sys.platform == 'win32': - # XXX libpypy-c.lib is currently not installed automatically - library = os.path.join(thisdir, '..', 'include', 'libpypy-c') - if not os.path.exists(library + '.lib'): - #For a nightly build - library = os.path.join(thisdir, '..', 'include', 'python27') - if not os.path.exists(library + '.lib'): - # For a local translation - library = os.path.join(thisdir, '..', 'pypy', 'goal', 'libpypy-c') - libraries = [library, 'oleaut32'] - extra_ldargs = ['/MANIFEST', # needed for VC10 - '/EXPORT:init_ctypes_test'] - else: - libraries = [] - extra_ldargs = [] - - # link the dynamic library - compiler.link_shared_object( - [object_filename], - output_filename, - libraries=libraries, - extra_preargs=extra_ldargs) - - # Now import the newly created library, it will replace our module in sys.modules - import imp - fp, filename, description = imp.find_module('_ctypes_test', path=[output_dir]) - imp.load_module('_ctypes_test', fp, filename, description) - - +try: + import cpyext +except ImportError: + raise ImportError("No module named '_ctypes_test'") try: import _ctypes _ctypes.PyObj_FromPtr = None @@ -62,4 +9,5 @@ except ImportError: pass # obscure condition of _ctypes_test.py being imported by py.test else: - compile_shared() + import _pypy_testcapi + _pypy_testcapi.compile_shared('_ctypes_test.c', '_ctypes_test') diff --git a/lib_pypy/_pypy_irc_topic.py b/lib_pypy/_pypy_irc_topic.py --- a/lib_pypy/_pypy_irc_topic.py +++ b/lib_pypy/_pypy_irc_topic.py @@ -1,4 +1,4 @@ -"""eclguba: flagnk naq frznagvpf bs clguba, fcrrq bs p, erfgevpgvbaf bs wnin naq pbzcvyre reebe zrffntrf nf crargenoyr nf ZHZCF +__doc__ = """eclguba: flagnk naq frznagvpf bs clguba, fcrrq bs p, erfgevpgvbaf bs wnin naq pbzcvyre reebe zrffntrf nf crargenoyr nf ZHZCF pglcrf unf n fcva bs 1/3 ' ' vf n fcnpr gbb Clguba 2.k rfg cerfdhr zbeg, ivir Clguba! diff --git a/lib_pypy/_testcapi.py b/lib_pypy/_pypy_testcapi.py copy from lib_pypy/_testcapi.py copy to lib_pypy/_pypy_testcapi.py --- a/lib_pypy/_testcapi.py +++ b/lib_pypy/_pypy_testcapi.py @@ -1,14 +1,20 @@ -import os, sys +import os, sys, imp import tempfile -def compile_shared(): - """Compile '_testcapi.c' into an extension module, and import it +def _get_c_extension_suffix(): + for ext, mod, typ in imp.get_suffixes(): + if typ == imp.C_EXTENSION: + return ext + + +def compile_shared(csource, modulename): + """Compile '_testcapi.c' or '_ctypes_test.c' into an extension module, + and import it. """ thisdir = os.path.dirname(__file__) output_dir = tempfile.mkdtemp() from distutils.ccompiler import new_compiler - from distutils import sysconfig compiler = new_compiler() compiler.output_dir = output_dir @@ -19,13 +25,13 @@ ccflags = ['-D_CRT_SECURE_NO_WARNINGS'] else: ccflags = ['-fPIC', '-Wimplicit-function-declaration'] - res = compiler.compile([os.path.join(thisdir, '_testcapimodule.c')], + res = compiler.compile([os.path.join(thisdir, csource)], include_dirs=[include_dir], extra_preargs=ccflags) object_filename = res[0] # set link options - output_filename = '_testcapi' + sysconfig.get_config_var('SO') + output_filename = modulename + _get_c_extension_suffix() if sys.platform == 'win32': # XXX libpypy-c.lib is currently not installed automatically library = os.path.join(thisdir, '..', 'include', 'libpypy-c') @@ -37,7 +43,7 @@ library = os.path.join(thisdir, '..', 'pypy', 'goal', 'libpypy-c') libraries = [library, 'oleaut32'] extra_ldargs = ['/MANIFEST', # needed for VC10 - '/EXPORT:init_testcapi'] + '/EXPORT:init' + modulename] else: libraries = [] extra_ldargs = [] @@ -49,9 +55,7 @@ libraries=libraries, extra_preargs=extra_ldargs) - # Now import the newly created library, it will replace our module in sys.modules - import imp - fp, filename, description = imp.find_module('_testcapi', path=[output_dir]) - imp.load_module('_testcapi', fp, filename, description) - -compile_shared() + # Now import the newly created library, it will replace the original + # module in sys.modules + fp, filename, description = imp.find_module(modulename, path=[output_dir]) + imp.load_module(modulename, fp, filename, description) diff --git a/lib_pypy/_testcapi.py b/lib_pypy/_testcapi.py --- a/lib_pypy/_testcapi.py +++ b/lib_pypy/_testcapi.py @@ -1,57 +1,7 @@ -import os, sys -import tempfile - -def compile_shared(): - """Compile '_testcapi.c' into an extension module, and import it - """ - thisdir = os.path.dirname(__file__) - output_dir = tempfile.mkdtemp() - - from distutils.ccompiler import new_compiler - from distutils import sysconfig - - compiler = new_compiler() - compiler.output_dir = output_dir - - # Compile .c file - include_dir = os.path.join(thisdir, '..', 'include') - if sys.platform == 'win32': - ccflags = ['-D_CRT_SECURE_NO_WARNINGS'] - else: - ccflags = ['-fPIC', '-Wimplicit-function-declaration'] - res = compiler.compile([os.path.join(thisdir, '_testcapimodule.c')], - include_dirs=[include_dir], - extra_preargs=ccflags) - object_filename = res[0] - - # set link options - output_filename = '_testcapi' + sysconfig.get_config_var('SO') - if sys.platform == 'win32': - # XXX libpypy-c.lib is currently not installed automatically - library = os.path.join(thisdir, '..', 'include', 'libpypy-c') - if not os.path.exists(library + '.lib'): - #For a nightly build - library = os.path.join(thisdir, '..', 'include', 'python27') - if not os.path.exists(library + '.lib'): - # For a local translation - library = os.path.join(thisdir, '..', 'pypy', 'goal', 'libpypy-c') - libraries = [library, 'oleaut32'] - extra_ldargs = ['/MANIFEST', # needed for VC10 - '/EXPORT:init_testcapi'] - else: - libraries = [] - extra_ldargs = [] - - # link the dynamic library - compiler.link_shared_object( - [object_filename], - output_filename, - libraries=libraries, - extra_preargs=extra_ldargs) - - # Now import the newly created library, it will replace our module in sys.modules - import imp - fp, filename, description = imp.find_module('_testcapi', path=[output_dir]) - imp.load_module('_testcapi', fp, filename, description) - -compile_shared() +try: + import cpyext +except ImportError: + raise ImportError("No module named '_testcapi'") +else: + import _pypy_testcapi + _pypy_testcapi.compile_shared('_testcapimodule.c', '_testcapi') diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py --- a/lib_pypy/cffi/__init__.py +++ b/lib_pypy/cffi/__init__.py @@ -4,5 +4,5 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "0.6" -__version_info__ = (0, 6) +__version__ = "0.7" +__version_info__ = (0, 7) diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py --- a/lib_pypy/cffi/api.py +++ b/lib_pypy/cffi/api.py @@ -73,15 +73,15 @@ if name.startswith('RTLD_'): setattr(self, name, getattr(backend, name)) # - BVoidP = self._get_cached_btype(model.voidp_type) + self.BVoidP = self._get_cached_btype(model.voidp_type) if isinstance(backend, types.ModuleType): # _cffi_backend: attach these constants to the class if not hasattr(FFI, 'NULL'): - FFI.NULL = self.cast(BVoidP, 0) + FFI.NULL = self.cast(self.BVoidP, 0) FFI.CData, FFI.CType = backend._get_types() else: # ctypes backend: attach these constants to the instance - self.NULL = self.cast(BVoidP, 0) + self.NULL = self.cast(self.BVoidP, 0) self.CData, self.CType = backend._get_types() def cdef(self, csource, override=False): @@ -346,6 +346,12 @@ self._cdefsources.extend(ffi_to_include._cdefsources) self._cdefsources.append(']') + def new_handle(self, x): + return self._backend.newp_handle(self.BVoidP, x) + + def from_handle(self, x): + return self._backend.from_handle(x) + def _make_ffi_library(ffi, libname, flags): import os @@ -355,13 +361,13 @@ backend = ffi._backend try: if '.' not in name and '/' not in name: - raise OSError + raise OSError("library not found: %r" % (name,)) backendlib = backend.load_library(name, flags) except OSError: import ctypes.util path = ctypes.util.find_library(name) if path is None: - raise OSError("library not found: %r" % (name,)) + raise # propagate the original OSError backendlib = backend.load_library(path, flags) copied_enums = [] # @@ -372,8 +378,8 @@ BType = ffi._get_cached_btype(tp) try: value = backendlib.load_function(BType, name) - except KeyError: - raise AttributeError(name) + except KeyError as e: + raise AttributeError('%s: %s' % (name, e)) library.__dict__[name] = value return # diff --git a/lib_pypy/cffi/backend_ctypes.py b/lib_pypy/cffi/backend_ctypes.py --- a/lib_pypy/cffi/backend_ctypes.py +++ b/lib_pypy/cffi/backend_ctypes.py @@ -16,6 +16,7 @@ class CTypesData(object): __metaclass__ = CTypesType __slots__ = ['__weakref__'] + __name__ = '' def __init__(self, *args): raise TypeError("cannot instantiate %r" % (self.__class__,)) @@ -491,6 +492,8 @@ elif BItem in (getbtype(model.PrimitiveType('signed char')), getbtype(model.PrimitiveType('unsigned char'))): kind = 'bytep' + elif BItem is getbtype(model.void_type): + kind = 'voidp' else: kind = 'generic' # @@ -546,13 +549,13 @@ def __setitem__(self, index, value): self._as_ctype_ptr[index] = BItem._to_ctypes(value) - if kind == 'charp': + if kind == 'charp' or kind == 'voidp': @classmethod - def _arg_to_ctypes(cls, value): - if isinstance(value, bytes): - return ctypes.c_char_p(value) + def _arg_to_ctypes(cls, *value): + if value and isinstance(value[0], bytes): + return ctypes.c_char_p(value[0]) else: - return super(CTypesPtr, cls)._arg_to_ctypes(value) + return super(CTypesPtr, cls)._arg_to_ctypes(*value) if kind == 'charp' or kind == 'bytep': def _to_string(self, maxlen): diff --git a/lib_pypy/cffi/vengine_cpy.py b/lib_pypy/cffi/vengine_cpy.py --- a/lib_pypy/cffi/vengine_cpy.py +++ b/lib_pypy/cffi/vengine_cpy.py @@ -15,6 +15,20 @@ def patch_extension_kwds(self, kwds): pass + def find_module(self, module_name, path, so_suffix): + try: + f, filename, descr = imp.find_module(module_name, path) + except ImportError: + return None + if f is not None: + f.close() + # Note that after a setuptools installation, there are both .py + # and .so files with the same basename. The code here relies on + # imp.find_module() locating the .so in priority. + if descr[0] != so_suffix: + return None + return filename + def collect_types(self): self._typesdict = {} self._generate("collecttype") @@ -142,6 +156,9 @@ class FFILibrary(object): _cffi_python_module = module _cffi_ffi = self.ffi + _cffi_dir = [] + def __dir__(self): + return FFILibrary._cffi_dir + list(self.__dict__) library = FFILibrary() module._cffi_setup(lst, ffiplatform.VerificationError, library) # @@ -427,9 +444,9 @@ prnt('static void %s(%s *p)' % (checkfuncname, cname)) prnt('{') prnt(' /* only to generate compile-time warnings or errors */') - for fname, ftype, _ in tp.enumfields(): + for fname, ftype, fbitsize in tp.enumfields(): if (isinstance(ftype, model.PrimitiveType) - and ftype.is_integer_type()): + and ftype.is_integer_type()) or fbitsize >= 0: # accept all integers, but complain on float or double prnt(' (void)((p->%s) << 1);' % fname) else: @@ -687,7 +704,8 @@ return ptr[0] def setter(library, value): ptr[0] = value - setattr(library.__class__, name, property(getter, setter)) + setattr(type(library), name, property(getter, setter)) + type(library)._cffi_dir.append(name) # ---------- diff --git a/lib_pypy/cffi/vengine_gen.py b/lib_pypy/cffi/vengine_gen.py --- a/lib_pypy/cffi/vengine_gen.py +++ b/lib_pypy/cffi/vengine_gen.py @@ -1,4 +1,4 @@ -import sys +import sys, os import types from . import model, ffiplatform @@ -20,6 +20,16 @@ # up in kwds['export_symbols']. kwds.setdefault('export_symbols', self.export_symbols) + def find_module(self, module_name, path, so_suffix): + basename = module_name + so_suffix + if path is None: + path = sys.path + for dirname in path: + filename = os.path.join(dirname, basename) + if os.path.isfile(filename): + return filename + return None + def collect_types(self): pass # not needed in the generic engine @@ -64,6 +74,9 @@ class FFILibrary(types.ModuleType): _cffi_generic_module = module _cffi_ffi = self.ffi + _cffi_dir = [] + def __dir__(self): + return FFILibrary._cffi_dir library = FFILibrary("") # # finally, call the loaded_gen_xxx() functions. This will set @@ -158,21 +171,22 @@ newfunction = self._load_constant(False, tp, name, module) else: indirections = [] - if any(isinstance(type, model.StructOrUnion) for type in tp.args): + if any(isinstance(typ, model.StructOrUnion) for typ in tp.args): indirect_args = [] - for i, type in enumerate(tp.args): - if isinstance(type, model.StructOrUnion): - type = model.PointerType(type) - indirections.append((i, type)) - indirect_args.append(type) + for i, typ in enumerate(tp.args): + if isinstance(typ, model.StructOrUnion): + typ = model.PointerType(typ) + indirections.append((i, typ)) + indirect_args.append(typ) tp = model.FunctionPtrType(tuple(indirect_args), tp.result, tp.ellipsis) BFunc = self.ffi._get_cached_btype(tp) wrappername = '_cffi_f_%s' % name newfunction = module.load_function(BFunc, wrappername) - for i, type in indirections: - newfunction = self._make_struct_wrapper(newfunction, i, type) + for i, typ in indirections: + newfunction = self._make_struct_wrapper(newfunction, i, typ) setattr(library, name, newfunction) + type(library)._cffi_dir.append(name) def _make_struct_wrapper(self, oldfunc, i, tp): backend = self.ffi._backend @@ -216,9 +230,9 @@ prnt('static void %s(%s *p)' % (checkfuncname, cname)) prnt('{') prnt(' /* only to generate compile-time warnings or errors */') - for fname, ftype, _ in tp.enumfields(): + for fname, ftype, fbitsize in tp.enumfields(): if (isinstance(ftype, model.PrimitiveType) - and ftype.is_integer_type()): + and ftype.is_integer_type()) or fbitsize >= 0: # accept all integers, but complain on float or double prnt(' (void)((p->%s) << 1);' % fname) else: @@ -380,6 +394,7 @@ is_int = isinstance(tp, model.PrimitiveType) and tp.is_integer_type() value = self._load_constant(is_int, tp, name, module) setattr(library, name, value) + type(library)._cffi_dir.append(name) # ---------- # enums @@ -427,6 +442,7 @@ def _loaded_gen_enum(self, tp, name, module, library): for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues): setattr(library, enumerator, enumvalue) + type(library)._cffi_dir.append(enumerator) # ---------- # macros: for now only for integers @@ -440,6 +456,7 @@ def _loaded_gen_macro(self, tp, name, module, library): value = self._load_constant(True, tp, name, module) setattr(library, name, value) + type(library)._cffi_dir.append(name) # ---------- # global variables @@ -465,6 +482,7 @@ BArray = self.ffi._get_cached_btype(tp) value = self.ffi.cast(BArray, value) setattr(library, name, value) + type(library)._cffi_dir.append(name) return # remove ptr= from the library instance, and replace # it by a property on the class, which reads/writes into ptr[0]. @@ -476,7 +494,8 @@ return ptr[0] def setter(library, value): ptr[0] = value - setattr(library.__class__, name, property(getter, setter)) + setattr(type(library), name, property(getter, setter)) + type(library)._cffi_dir.append(name) cffimod_header = r''' #include diff --git a/lib_pypy/cffi/verifier.py b/lib_pypy/cffi/verifier.py --- a/lib_pypy/cffi/verifier.py +++ b/lib_pypy/cffi/verifier.py @@ -102,21 +102,10 @@ path = pkg.__path__ else: path = None - try: - f, filename, descr = imp.find_module(self.get_module_name(), - path) - except ImportError: + filename = self._vengine.find_module(self.get_module_name(), path, + _get_so_suffix()) + if filename is None: return - if f is not None: - f.close() - if filename.lower().endswith('.py'): - # on PyPy, if there are both .py and .pypy-19.so files in - # the same directory, the .py file is returned. That's the - # case after a setuptools installation. We never want to - # load the .py file here... - filename = filename[:-3] + _get_so_suffix() - if not os.path.isfile(filename): - return self.modulefilename = filename self._vengine.collect_types() self._has_module = True diff --git a/lib_pypy/greenlet.egg-info b/lib_pypy/greenlet.egg-info new file mode 100644 --- /dev/null +++ b/lib_pypy/greenlet.egg-info @@ -0,0 +1,10 @@ +Metadata-Version: 1.0 +Name: greenlet +Version: 0.4.0 +Summary: Lightweight in-process concurrent programming +Home-page: https://github.com/python-greenlet/greenlet +Author: Ralf Schmitt (for CPython), PyPy team +Author-email: pypy-dev at python.org +License: MIT License +Description: UNKNOWN +Platform: UNKNOWN diff --git a/pypy/bin/pyinteractive.py b/pypy/bin/pyinteractive.py --- a/pypy/bin/pyinteractive.py +++ b/pypy/bin/pyinteractive.py @@ -27,7 +27,8 @@ BoolOption("completer", "use readline commandline completer", default=False, cmdline="-C"), BoolOption("optimize", - "dummy optimization flag for compatibility with CPython", + "skip assert statements and remove docstrings when importing modules" + " (this is -OO in regular CPython)", default=False, cmdline="-O"), BoolOption("no_site_import", "do not 'import site' on initialization", default=False, cmdline="-S"), @@ -94,6 +95,17 @@ space.setitem(space.sys.w_dict, space.wrap('executable'), space.wrap(argv[0])) + if interactiveconfig.optimize: + #change the optimize flag's value and set __debug__ to False + space.appexec([], """(): + import sys + flags = list(sys.flags) + flags[6] = 2 + sys.flags = type(sys.flags)(flags) + import __pypy__ + __pypy__.set_debug(False) + """) + # call pypy_find_stdlib: the side-effect is that it sets sys.prefix and # sys.exec_prefix executable = argv[0] diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -64,7 +64,8 @@ del working_modules["termios"] del working_modules["_minimal_curses"] - del working_modules["cppyy"] # not tested on win32 + if "cppyy" in working_modules: + del working_modules["cppyy"] # not tested on win32 # The _locale module is needed by site.py on Windows default_modules["_locale"] = None @@ -77,7 +78,8 @@ del working_modules["_minimal_curses"] del working_modules["termios"] del working_modules["_multiprocessing"] # depends on rctime - del working_modules["cppyy"] # depends on ctypes + if "cppyy" in working_modules: + del working_modules["cppyy"] # depends on ctypes module_dependencies = { @@ -120,12 +122,10 @@ __import__(name) except (ImportError, CompilationError, py.test.skip.Exception), e: errcls = e.__class__.__name__ - config.add_warning( + raise Exception( "The module %r is disabled\n" % (modname,) + "because importing %s raised %s\n" % (name, errcls) + str(e)) - raise ConflictConfigError("--withmod-%s: %s" % (modname, - errcls)) return validator else: return None @@ -216,10 +216,6 @@ "(the empty string and potentially single-char strings)", default=False), - BoolOption("withsmalltuple", - "use small tuples", - default=False), - BoolOption("withspecialisedtuple", "use specialised tuples", default=False), @@ -364,6 +360,7 @@ # ignore names from 'essential_modules', notably 'exceptions', which # may not be present in config.objspace.usemodules at all modules = [name for name in modules if name not in essential_modules] + config.objspace.usemodules.suggest(**dict.fromkeys(modules, True)) def enable_translationmodules(config): diff --git a/pypy/doc/__pypy__-module.rst b/pypy/doc/__pypy__-module.rst --- a/pypy/doc/__pypy__-module.rst +++ b/pypy/doc/__pypy__-module.rst @@ -1,3 +1,6 @@ +.. comment: this document is very incomplete, should we generate + it automatically? + The ``__pypy__`` module ======================= diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py --- a/pypy/doc/conf.py +++ b/pypy/doc/conf.py @@ -47,7 +47,7 @@ # The short X.Y version. version = '2.0' # The full version, including alpha/beta/rc tags. -release = '2.0.0' +release = '2.0.2' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/pypy/doc/cppyy.rst b/pypy/doc/cppyy.rst --- a/pypy/doc/cppyy.rst +++ b/pypy/doc/cppyy.rst @@ -157,6 +157,9 @@ $ genreflex MyClass.h $ g++ -fPIC -rdynamic -O2 -shared -I$REFLEXHOME/include MyClass_rflx.cpp -o libMyClassDict.so -L$REFLEXHOME/lib -lReflex +Next, make sure that the library can be found through the dynamic lookup path +(the ``LD_LIBRARY_PATH`` environment variable on Linux, ``PATH`` on Windows), +for example by adding ".". Now you're ready to use the bindings. Since the bindings are designed to look pythonistic, it should be straightforward:: diff --git a/pypy/doc/how-to-contribute.rst b/pypy/doc/how-to-contribute.rst --- a/pypy/doc/how-to-contribute.rst +++ b/pypy/doc/how-to-contribute.rst @@ -33,7 +33,8 @@ Layers ------ -PyPy has layers. Those layers help us keep the respective parts separated enough +PyPy has layers. Just like Ogres or onions. +Those layers help us keep the respective parts separated enough to be worked on independently and make the complexity manageable. This is, again, just a sanity requirement for such a complex project. For example writing a new optimization for the JIT usually does **not** involve touching a Python diff --git a/pypy/doc/how-to-release.rst b/pypy/doc/how-to-release.rst --- a/pypy/doc/how-to-release.rst +++ b/pypy/doc/how-to-release.rst @@ -22,7 +22,8 @@ will capture the revision number of this change for the release; some of the next updates may be done before or after branching; make sure things are ported back to the trunk and to the branch as - necessary + necessary; also update the version number in pypy/doc/conf.py, + and in pypy/doc/index.rst * update pypy/doc/contributor.rst (and possibly LICENSE) * rename pypy/doc/whatsnew_head.rst to whatsnew_VERSION.rst and create a fresh whatsnew_head.rst after the release diff --git a/pypy/doc/index-old.rst b/pypy/doc/index-old.rst --- a/pypy/doc/index-old.rst +++ b/pypy/doc/index-old.rst @@ -43,7 +43,7 @@ * :doc:`FAQ `: some frequently asked questions. -* `Release 2.0`_: the latest official release +* `Release 2.0.2`_: the latest official release * `PyPy Blog`_: news and status info about PyPy @@ -57,7 +57,7 @@ particularly organized .. _PyPy blog: http://morepypy.blogspot.com/ -.. _Release 2.0: http://pypy.org/download.html +.. _Release 2.0.2: http://pypy.org/download.html .. _speed.pypy.org: http://speed.pypy.org .. toctree:: diff --git a/pypy/doc/man/pypy.1.rst b/pypy/doc/man/pypy.1.rst --- a/pypy/doc/man/pypy.1.rst +++ b/pypy/doc/man/pypy.1.rst @@ -16,7 +16,10 @@ Inspect interactively after running script. -O - Dummy optimization flag for compatibility with C Python. + Skip assert statements. + +-OO + Remove docstrings when importing modules in addition to -O. -c *cmd* Program passed in as CMD (terminates option list). diff --git a/pypy/doc/release-2.0.1.rst b/pypy/doc/release-2.0.1.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-2.0.1.rst @@ -0,0 +1,46 @@ +============================== +PyPy 2.0.1 - Bohr Smørrebrød +============================== + +We're pleased to announce PyPy 2.0.1. This is a stable bugfix release +over `2.0`_. You can download it here: + + http://pypy.org/download.html + +The fixes are mainly about fatal errors or crashes in our stdlib. See +below for more details. + +What is PyPy? +============= + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7. It's fast (`pypy 2.0 and cpython 2.7.3`_ performance comparison) +due to its integrated tracing JIT compiler. + +This release supports x86 machines running Linux 32/64, Mac OS X 64 or +Windows 32. Support for ARM is progressing but not bug-free yet. + +.. _`pypy 2.0 and cpython 2.7.3`: http://speed.pypy.org + +Highlights +========== + +- fix an occasional crash in the JIT that ends in `RPython Fatal error: + NotImplementedError`__. + +- `id(x)` is now always a positive number (except on int/float/long/complex). + This fixes an issue in ``_sqlite.py`` (mostly for 32-bit Linux). + +- fix crashes of callback-from-C-functions (with cffi) when used together + with Stackless features, on asmgcc (i.e. Linux only). Now `gevent should + work better`__. + +- work around an eventlet issue with `socket._decref_socketios()`__. + +.. __: https://bugs.pypy.org/issue1482 +.. __: http://mail.python.org/pipermail/pypy-dev/2013-May/011362.html +.. __: https://bugs.pypy.org/issue1468 +.. _2.0: release-2.0.0.html + +Cheers, +arigo et. al. for the PyPy team diff --git a/pypy/doc/release-2.0.2.rst b/pypy/doc/release-2.0.2.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-2.0.2.rst @@ -0,0 +1,46 @@ +========================= +PyPy 2.0.2 - Fermi Panini +========================= + +We're pleased to announce PyPy 2.0.2. This is a stable bugfix release +over `2.0`_ and `2.0.1`_. You can download it here: + + http://pypy.org/download.html + +It fixes a crash in the JIT when calling external C functions (with +ctypes/cffi) in a multithreaded context. + +.. _2.0: release-2.0.0.html +.. _2.0.1: release-2.0.1.html + +What is PyPy? +============= + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7. It's fast (`pypy 2.0 and cpython 2.7.3`_ performance comparison) +due to its integrated tracing JIT compiler. + +This release supports x86 machines running Linux 32/64, Mac OS X 64 or +Windows 32. Support for ARM is progressing but not bug-free yet. + +.. _`pypy 2.0 and cpython 2.7.3`: http://speed.pypy.org + +Highlights +========== + +This release contains only the fix described above. A crash (or wrong +results) used to occur if all these conditions were true: + +- your program is multithreaded; + +- it runs on a single-core machine or a heavily-loaded multi-core one; + +- it uses ctypes or cffi to issue external calls to C functions. + +This was fixed in the branch `emit-call-x86`__ (see the example file +``bug1.py``). + +.. __: https://bitbucket.org/pypy/pypy/commits/7c80121abbf4 + +Cheers, +arigo et. al. for the PyPy team diff --git a/pypy/doc/test/test_whatsnew.py b/pypy/doc/test/test_whatsnew.py --- a/pypy/doc/test/test_whatsnew.py +++ b/pypy/doc/test/test_whatsnew.py @@ -19,23 +19,28 @@ branches.discard('default') return startrev, branches -def get_merged_branches(path, startrev, endrev): - if getstatusoutput('hg root')[0]: +def get_merged_branches(path, startrev, endrev, current_branch=None): + errcode, wc_branch = getstatusoutput('hg branch') + if errcode != 0: py.test.skip('no Mercurial repo') + if current_branch is None: + current_branch = wc_branch # X = take all the merges which are descendants of startrev and are on default # revset = all the parents of X which are not on default # ===> # revset contains all the branches which have been merged to default since # startrev - revset = 'parents(%s::%s and \ + revset = "parents(%s::%s and \ merge() and \ - branch(default)) and \ - not branch(default)' % (startrev, endrev) + branch('%s')) and \ + not branch('%s')" % (startrev, endrev, + current_branch, current_branch) cmd = r'hg log -R "%s" -r "%s" --template "{branches}\n"' % (path, revset) out = getoutput(cmd) branches = set(map(str.strip, out.splitlines())) - return branches + branches.discard("default") + return branches, current_branch def test_parse_doc(): @@ -65,7 +70,8 @@ assert branches == set(['foobar', 'hello']) def test_get_merged_branches(): - branches = get_merged_branches(ROOT, 'f34f0c11299f', '79770e0c2f93') + branches, _ = get_merged_branches(ROOT, 'f34f0c11299f', '79770e0c2f93', + 'default') assert branches == set(['numpy-indexing-by-arrays-bool', 'better-jit-hooks-2', 'numpypy-ufuncs']) @@ -76,7 +82,9 @@ whatsnew_list.sort() last_whatsnew = whatsnew_list[-1].read() startrev, documented = parse_doc(last_whatsnew) - merged = get_merged_branches(ROOT, startrev, '') + merged, branch = get_merged_branches(ROOT, startrev, '') + merged.discard('default') + merged.discard('') not_documented = merged.difference(documented) not_merged = documented.difference(merged) print 'Branches merged but not documented:' @@ -85,4 +93,6 @@ print 'Branches documented but not merged:' print '\n'.join(not_merged) print - assert not not_documented and not not_merged + assert not not_documented + if branch == 'default': + assert not not_merged diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -7,3 +7,48 @@ .. branch: numpy-pickle Pickling of numpy arrays and dtypes (including record dtypes) + +.. branch: remove-array-smm +Remove multimethods in the arraymodule + +.. branch: callback-stacklet +Fixed bug when switching stacklets from a C callback + +.. branch: remove-set-smm +Remove multi-methods on sets + +.. branch: numpy-subarrays +Implement subarrays for numpy + +.. branch: remove-dict-smm +Remove multi-methods on dict + +.. branch: remove-list-smm-2 +Remove remaining multi-methods on list + +.. branch: arm-stacklet +Stacklet support for ARM, enables _continuation support + +.. branch: remove-tuple-smm +Remove multi-methods on tuple + +.. branch: remove-iter-smm +Remove multi-methods on iterators + +.. branch: emit-call-x86 +.. branch: emit-call-arm + +.. branch: on-abort-resops +Added list of resops to the pypyjit on_abort hook. + +.. branch: logging-perf +Speeds up the stdlib logging module + +.. branch: operrfmt-NT +Adds a couple convenient format specifiers to operationerrfmt + +.. branch: win32-fixes3 +Skip and fix some non-translated (own) tests for win32 builds + +.. branch: ctypes-byref +Add the '_obj' attribute on ctypes pointer() and byref() objects diff --git a/pypy/goal/getnightly.py b/pypy/goal/getnightly.py --- a/pypy/goal/getnightly.py +++ b/pypy/goal/getnightly.py @@ -8,7 +8,9 @@ arch = 'linux' cmd = 'wget "%s"' tar = "tar -x -v --wildcards --strip-components=2 -f %s '*/bin/pypy'" -if sys.platform.startswith('darwin'): + if os.uname()[-1].startswith('arm'): + arch += '-armhf-raspbian' +elif sys.platform.startswith('darwin'): arch = 'osx' cmd = 'curl -O "%s"' tar = "tar -x -v --strip-components=2 -f %s '*/bin/pypy'" diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py --- a/pypy/goal/targetpypystandalone.py +++ b/pypy/goal/targetpypystandalone.py @@ -2,6 +2,7 @@ import os, sys +import pypy from pypy.interpreter import gateway from pypy.interpreter.error import OperationError from pypy.tool.ann_override import PyPyAnnotatorPolicy @@ -9,6 +10,8 @@ from rpython.config.config import ConflictConfigError from pypy.tool.option import make_objspace from pypy.conftest import pypydir +from rpython.rlib import rthread +from pypy.module.thread import os_thread thisdir = py.path.local(__file__).dirpath() @@ -78,13 +81,65 @@ # should be used as sparsely as possible, just to register callbacks from rpython.rlib.entrypoint import entrypoint - from rpython.rtyper.lltypesystem import rffi + from rpython.rtyper.lltypesystem import rffi, lltype + + w_pathsetter = space.appexec([], """(): + def f(path): + import sys + sys.path[:] = path + return f + """) + + @entrypoint('main', [rffi.CCHARP, lltype.Signed], c_name='pypy_setup_home') + def pypy_setup_home(ll_home, verbose): + from pypy.module.sys.initpath import pypy_find_stdlib + if ll_home: + home = rffi.charp2str(ll_home) + else: + home = pypydir + w_path = pypy_find_stdlib(space, home) + if space.is_none(w_path): + if verbose: + debug("Failed to find library based on pypy_find_stdlib") + return 1 + space.startup() + space.call_function(w_pathsetter, w_path) + # import site + try: + import_ = space.getattr(space.getbuiltinmodule('__builtin__'), + space.wrap('__import__')) + space.call_function(import_, space.wrap('site')) + return 0 + except OperationError, e: + if verbose: + debug("OperationError:") + debug(" operror-type: " + e.w_type.getname(space)) + debug(" operror-value: " + space.str_w(space.str(e.get_w_value(space)))) + return 1 @entrypoint('main', [rffi.CCHARP], c_name='pypy_execute_source') def pypy_execute_source(ll_source): source = rffi.charp2str(ll_source) return _pypy_execute_source(source) + @entrypoint('main', [], c_name='pypy_init_threads') + def pypy_init_threads(): + if not space.config.objspace.usemodules.thread: + return + os_thread.setup_threads(space) + rffi.aroundstate.before() + + @entrypoint('main', [], c_name='pypy_thread_attach') + def pypy_thread_attach(): + if not space.config.objspace.usemodules.thread: + return + os_thread.setup_threads(space) + os_thread.bootstrapper.acquire(space, None, None) + rthread.gc_thread_start() + os_thread.bootstrapper.nbthreads += 1 + os_thread.bootstrapper.release() + rffi.aroundstate.before() + w_globals = space.newdict() space.setitem(w_globals, space.wrap('__builtins__'), space.builtin_modules['__builtin__']) @@ -101,7 +156,10 @@ return 1 return 0 - return entry_point, _pypy_execute_source # for tests + return entry_point, {'pypy_execute_source': pypy_execute_source, + 'pypy_init_threads': pypy_init_threads, + 'pypy_thread_attach': pypy_thread_attach, + 'pypy_setup_home': pypy_setup_home} def call_finish(space): space.finish() diff --git a/pypy/interpreter/app_main.py b/pypy/interpreter/app_main.py --- a/pypy/interpreter/app_main.py +++ b/pypy/interpreter/app_main.py @@ -2,8 +2,8 @@ # App-level version of py.py. # See test/test_app_main. -# Missing vs CPython: -d, -OO, -t, -v, -x, -3 -"""\ +# Missing vs CPython: -d, -t, -v, -x, -3 +USAGE1 = __doc__ = """\ Options and arguments (and corresponding environment variables): -B : don't write .py[co] files on import; also PYTHONDONTWRITEBYTECODE=x -c cmd : program passed in as string (terminates option list) @@ -12,7 +12,8 @@ -i : inspect interactively after running script; forces a prompt even if stdin does not appear to be a terminal; also PYTHONINSPECT=x -m mod : run library module as a script (terminates option list) --O : dummy optimization flag for compatibility with CPython +-O : skip assert statements +-OO : remove docstrings when importing modules in addition to -O -R : ignored (see http://bugs.python.org/issue14621) -Q arg : division options: -Qold (default), -Qwarn, -Qwarnall, -Qnew -s : don't add user site directory to sys.path; also PYTHONNOUSERSITE @@ -27,7 +28,6 @@ PyPy options and arguments: --info : print translation information about this PyPy executable """ -USAGE1 = __doc__ # Missing vs CPython: PYTHONHOME, PYTHONCASEOK USAGE2 = """ Other environment variables: @@ -470,6 +470,10 @@ sys.py3kwarning = bool(sys.flags.py3k_warning) sys.dont_write_bytecode = bool(sys.flags.dont_write_bytecode) + if sys.flags.optimize >= 1: + import __pypy__ + __pypy__.set_debug(False) + if sys.py3kwarning: print >> sys.stderr, ( "Warning: pypy does not implement py3k warnings") diff --git a/pypy/interpreter/argument.py b/pypy/interpreter/argument.py --- a/pypy/interpreter/argument.py +++ b/pypy/interpreter/argument.py @@ -86,12 +86,9 @@ args_w = space.fixedview(w_stararg) except OperationError, e: if e.match(space, space.w_TypeError): - w_type = space.type(w_stararg) - typename = w_type.getname(space) - raise OperationError( + raise operationerrfmt( space.w_TypeError, - space.wrap("argument after * must be " - "a sequence, not %s" % (typename,))) + "argument after * must be a sequence, not %T", w_stararg) raise self.arguments_w = self.arguments_w + args_w @@ -116,12 +113,10 @@ w_keys = space.call_method(w_starstararg, "keys") except OperationError, e: if e.match(space, space.w_AttributeError): - w_type = space.type(w_starstararg) - typename = w_type.getname(space) - raise OperationError( + raise operationerrfmt( space.w_TypeError, - space.wrap("argument after ** must be " - "a mapping, not %s" % (typename,))) + "argument after ** must be a mapping, not %T", + w_starstararg) raise keys_w = space.unpackiterable(w_keys) keywords_w = [None] * len(keys_w) diff --git a/pypy/interpreter/astcompiler/assemble.py b/pypy/interpreter/astcompiler/assemble.py --- a/pypy/interpreter/astcompiler/assemble.py +++ b/pypy/interpreter/astcompiler/assemble.py @@ -245,6 +245,8 @@ if w_len is None: w_len = space.len(self.w_consts) space.setitem(self.w_consts, w_key, w_len) + if space.int_w(w_len) == 0: + self.scope.doc_removable = False return space.int_w(w_len) def _make_key(self, obj): @@ -632,6 +634,7 @@ ops.JUMP_IF_FALSE_OR_POP : 0, ops.POP_JUMP_IF_TRUE : -1, ops.POP_JUMP_IF_FALSE : -1, + ops.JUMP_IF_NOT_DEBUG : 0, ops.BUILD_LIST_FROM_ARG: 1, } diff --git a/pypy/interpreter/astcompiler/ast.py b/pypy/interpreter/astcompiler/ast.py --- a/pypy/interpreter/astcompiler/ast.py +++ b/pypy/interpreter/astcompiler/ast.py @@ -2793,8 +2793,7 @@ def Module_get_body(space, w_self): if not w_self.initialization_state & 1: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -2835,8 +2834,7 @@ def Interactive_get_body(space, w_self): if not w_self.initialization_state & 1: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -2881,8 +2879,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 1: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') return space.wrap(w_self.body) def Expression_set_body(space, w_self, w_new_value): @@ -2925,8 +2922,7 @@ def Suite_get_body(space, w_self): if not w_self.initialization_state & 1: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -2971,8 +2967,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 1: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'lineno') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'lineno') return space.wrap(w_self.lineno) def stmt_set_lineno(space, w_self, w_new_value): @@ -2993,8 +2988,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 2: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'col_offset') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'col_offset') return space.wrap(w_self.col_offset) def stmt_set_col_offset(space, w_self, w_new_value): @@ -3024,8 +3018,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'name') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'name') return space.wrap(w_self.name) def FunctionDef_set_name(space, w_self, w_new_value): @@ -3046,8 +3039,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'args') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'args') return space.wrap(w_self.args) def FunctionDef_set_args(space, w_self, w_new_value): @@ -3064,8 +3056,7 @@ def FunctionDef_get_body(space, w_self): if not w_self.initialization_state & 16: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -3081,8 +3072,7 @@ def FunctionDef_get_decorator_list(space, w_self): if not w_self.initialization_state & 32: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'decorator_list') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'decorator_list') if w_self.w_decorator_list is None: if w_self.decorator_list is None: list_w = [] @@ -3131,8 +3121,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'name') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'name') return space.wrap(w_self.name) def ClassDef_set_name(space, w_self, w_new_value): @@ -3149,8 +3138,7 @@ def ClassDef_get_bases(space, w_self): if not w_self.initialization_state & 8: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'bases') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'bases') if w_self.w_bases is None: if w_self.bases is None: list_w = [] @@ -3166,8 +3154,7 @@ def ClassDef_get_body(space, w_self): if not w_self.initialization_state & 16: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -3183,8 +3170,7 @@ def ClassDef_get_decorator_list(space, w_self): if not w_self.initialization_state & 32: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'decorator_list') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'decorator_list') if w_self.w_decorator_list is None: if w_self.decorator_list is None: list_w = [] @@ -3234,8 +3220,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') return space.wrap(w_self.value) def Return_set_value(space, w_self, w_new_value): @@ -3278,8 +3263,7 @@ def Delete_get_targets(space, w_self): if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'targets') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'targets') if w_self.w_targets is None: if w_self.targets is None: list_w = [] @@ -3320,8 +3304,7 @@ def Assign_get_targets(space, w_self): if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'targets') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'targets') if w_self.w_targets is None: if w_self.targets is None: list_w = [] @@ -3341,8 +3324,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') return space.wrap(w_self.value) def Assign_set_value(space, w_self, w_new_value): @@ -3391,8 +3373,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'target') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'target') return space.wrap(w_self.target) def AugAssign_set_target(space, w_self, w_new_value): @@ -3415,8 +3396,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'op') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'op') return operator_to_class[w_self.op - 1]() def AugAssign_set_op(space, w_self, w_new_value): @@ -3439,8 +3419,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 16: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') return space.wrap(w_self.value) def AugAssign_set_value(space, w_self, w_new_value): @@ -3489,8 +3468,7 @@ From noreply at buildbot.pypy.org Tue Jun 11 16:29:02 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 11 Jun 2013 16:29:02 +0200 (CEST) Subject: [pypy-commit] stmgc default: in-progress Message-ID: <20130611142903.007611C1026@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r92:b30306fc8a86 Date: 2013-06-11 16:28 +0200 http://bitbucket.org/pypy/stmgc/changeset/b30306fc8a86/ Log: in-progress diff --git a/c4/et.c b/c4/et.c --- a/c4/et.c +++ b/c4/et.c @@ -35,6 +35,10 @@ return (P->h_revision == stm_private_rev_num) || (P->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED); } +int _stm_is_private(gcptr P) +{ + return is_private(P); +} /************************************************************/ @@ -208,82 +212,89 @@ } } -#if 0 -static gcptr _latest_gcptr(gcptr R) +gcptr _stm_nonrecord_barrier(gcptr G) { - /* don't use, for tests only */ + /* follows the logic in stm_DirectReadBarrier() */ + struct tx_descriptor *d = thread_descriptor; + gcptr P = G; revision_t v; - retry: - v = R->h_revision; - if (!(v & 1)) // "is a pointer", i.e. - { // "has a more recent revision" - if (v & 2) + + if (P->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED) + { + private_from_protected: + if (!(((gcptr)P->h_revision)->h_revision & 1)) { - v &= ~2; - if (!stmgc_is_young_in(thread_descriptor, (gcptr)v)) - return NULL; /* can't access */ + fprintf(stderr, "_stm_nonrecord_barrier: %p -> NULL " + "private_from_protected but protected changed\n", G); + return NULL; } - R = (gcptr)v; - goto retry; - } - return R; -} - -gcptr _stm_nonrecord_barrier(gcptr obj, int *result) -{ - /* warning, this is for tests only, and it is not thread-safe! */ - struct tx_descriptor *d = thread_descriptor; - if (gcptrlist_size(&d->stolen_objects) > 0) - stmgc_normalize_stolen_objects(); - - enum protection_class_t e = stmgc_classify(obj); - if (e == K_PRIVATE) - { - *result = 2; /* 'obj' a private object to start with */ - return obj; - } - obj = _latest_gcptr(obj); - if (obj == NULL) - { - assert(e == K_PUBLIC); - *result = 3; /* can't check anything: we'd need foreign access */ - return NULL; - } - if (stmgc_classify(obj) == K_PRIVATE) - { - *result = 1; - return obj; + goto add_in_recent_reads_cache; } - wlog_t *item; - G2L_LOOP_FORWARD(d->public_to_private, item) + if (P->h_tid & GCFLAG_PUBLIC) { - gcptr R = item->addr; - gcptr L = item->val; - if (_latest_gcptr(R) == obj) + while (v = ACCESS_ONCE(P->h_revision), !(v & 1)) { - /* 'obj' has a private version. The way we detect this lets us - find it even if we already have a committed version that - will cause conflict. */ - *result = 1; - return L; + if (v & 2) + goto follow_stub; + + P = (gcptr)v; + assert(P->h_tid & GCFLAG_PUBLIC); } - } G2L_LOOP_END; - if (obj->h_revision > d->start_time) - { - /* 'obj' has no private version, and the public version was modified */ - *result = -1; - return NULL; + if (P->h_tid & GCFLAG_PUBLIC_TO_PRIVATE) + { + wlog_t *item; + G2L_FIND(d->public_to_private, P, item, goto no_private_obj); + + P = item->val; + found_in_stolen_objects: + assert(!(P->h_tid & GCFLAG_PUBLIC)); + assert(is_private(P)); + fprintf(stderr, "_stm_nonrecord_barrier: %p -> %p " + "public_to_private\n", G, P); + return P; + + no_private_obj:; + gcptr L = _stm_find_stolen_objects(d, P); + if (L != NULL) + { + P = L; + goto found_in_stolen_objects; + } + } + + if (UNLIKELY(v > d->start_time)) + { + fprintf(stderr, "_stm_nonrecord_barrier: %p -> NULL changed\n", G); + return NULL; // object too recent + } + fprintf(stderr, "_stm_nonrecord_barrier: %p -> %p public\n", G, P); } else { - /* 'obj' has only an up-to-date public version */ - *result = 0; - return obj; + fprintf(stderr, "_stm_nonrecord_barrier: %p -> %p protected\n", G, P); + } + + register_in_list_of_read_objects: + add_in_recent_reads_cache: + return P; + + follow_stub:; + P = (gcptr)(v - 2); + assert(!(P->h_tid & GCFLAG_PUBLIC)); + if (P->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED) + { + fprintf(stderr, "_stm_nonrecord_barrier: %p -> %p handle " + "private_from_protected\n", G, P); + goto private_from_protected; + } + else + { + fprintf(stderr, "read_barrier: %p -> %p handle\n", G, P); + goto register_in_list_of_read_objects; } } -#endif #if 0 void *stm_DirectReadBarrierFromR(void *G1, void *R_Container1, size_t offset) diff --git a/c4/et.h b/c4/et.h --- a/c4/et.h +++ b/c4/et.h @@ -164,7 +164,9 @@ gcptr stm_DirectReadBarrier(gcptr); gcptr stm_RepeatReadBarrier(gcptr); gcptr stm_WriteBarrier(gcptr); -gcptr _stm_nonrecord_barrier(gcptr, int *); +gcptr _stm_nonrecord_barrier(gcptr); /* debugging: read barrier, but + not recording anything */ +int _stm_is_private(gcptr); /* debugging */ gcptr stm_get_private_from_protected(long); /* debugging */ gcptr stm_get_read_obj(long); /* debugging */ gcptr stmgc_duplicate(gcptr); diff --git a/c4/steal.c b/c4/steal.c --- a/c4/steal.c +++ b/c4/steal.c @@ -96,3 +96,20 @@ } gcptrlist_clear(&d->public_descriptor->stolen_objects); } + +gcptr _stm_find_stolen_objects(struct tx_descriptor *d, gcptr obj) +{ + /* read-only, for debugging */ + long i, size = d->public_descriptor->stolen_objects.size; + gcptr *items = d->public_descriptor->stolen_objects.items; + + for (i = 0; i < size; i += 2) { + gcptr B = items[i]; + gcptr L = items[i + 1]; + + assert(L->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED); + if (B == obj) + return L; + } + return NULL; +} diff --git a/c4/steal.h b/c4/steal.h --- a/c4/steal.h +++ b/c4/steal.h @@ -11,6 +11,7 @@ void stm_steal_stub(gcptr); gcptr stm_get_stolen_obj(long index); /* debugging */ void stm_normalize_stolen_objects(struct tx_descriptor *); +gcptr _stm_find_stolen_objects(struct tx_descriptor *, gcptr); #endif diff --git a/c3/test/model.py b/c4/test/model.py copy from c3/test/model.py copy to c4/test/model.py diff --git a/c4/test/support.py b/c4/test/support.py --- a/c4/test/support.py +++ b/c4/test/support.py @@ -63,7 +63,8 @@ //void stmgcpage_add_prebuilt_root(gcptr); void stm_clear_between_tests(void); //void stmgc_minor_collect(void); - //gcptr _stm_nonrecord_barrier(gcptr, int *); + gcptr _stm_nonrecord_barrier(gcptr); + int _stm_is_private(gcptr); int stm_dbgmem_is_active(void *p, int allow_outside); void stm_start_sharedlock(void); void stm_stop_sharedlock(void); @@ -436,7 +437,7 @@ def nalloc_refs(nrefs): "Allocate a fresh object from the nursery, with nrefs pointers" p = lib.stm_allocate(HDR + WORD * nrefs, 421 + nrefs) - assert p.h_revision == lib.get_local_revision() + assert p.h_revision == lib.get_private_rev_num() for i in range(nrefs): assert rawgetptr(p, i) == ffi.NULL # must already be zero-filled return p diff --git a/c3/test/test_random.py b/c4/test/test_random.py copy from c3/test/test_random.py copy to c4/test/test_random.py --- a/c3/test/test_random.py +++ b/c4/test/test_random.py @@ -191,12 +191,10 @@ return newextra def nonrecord_barrier(self, ptr): - result = ffi.new("int *") - ptr = lib._stm_nonrecord_barrier(ptr, result) - return ptr, result[0] + return lib._stm_nonrecord_barrier(ptr) def is_private(self, ptr): - return ptr.h_revision == lib.get_local_revision() + return lib._stm_is_private(ptr) def check_valid(self, lst): lst = list(lst) @@ -207,23 +205,18 @@ continue self.check(p) - ptr, result = self.nonrecord_barrier(p.ptr) - if ptr == ffi.NULL and result == 3: - continue # can't check anything: we'd need foreign access + ptr = self.nonrecord_barrier(p.ptr) has_private_copy = p.obj in self.current_rev.content - assert has_private_copy == (result >= 1) if has_private_copy: + assert ptr != ffi.NULL and self.is_private(ptr) content = self.current_rev.content[p.obj] else: try: content = self.current_rev._try_read(p.obj) - is_too_recent = False except model.Deleted: - is_too_recent = True - if result < 0: - assert is_too_recent - if is_too_recent: - continue # can't really check more in this case + assert ptr == ffi.NULL + continue + assert ptr != ffi.NULL and not self.is_private(ptr) self.check_not_free(ptr) assert lib.rawgetptr(ptr, 2) == p.obj.identity From noreply at buildbot.pypy.org Tue Jun 11 23:09:50 2013 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 11 Jun 2013 23:09:50 +0200 (CEST) Subject: [pypy-commit] stmgc default: Fixes. Now I have a clear idea about which thread can change exactly Message-ID: <20130611210950.5BF731C02BA@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r93:2d0f4e06ce3a Date: 2013-06-11 22:49 +0200 http://bitbucket.org/pypy/stmgc/changeset/2d0f4e06ce3a/ Log: Fixes. Now I have a clear idea about which thread can change exactly which parts of which objects. diff --git a/c4/doc-objects.txt b/c4/doc-objects.txt --- a/c4/doc-objects.txt +++ b/c4/doc-objects.txt @@ -245,3 +245,28 @@ make a stub with h_revision = private object | 2 after a CPU write barrier, make the public h_revision to point to the stub + + + +Change to the flags and h_revision +---------------------------------- + +The flags are in `h_tid`. Changes to this field and `h_revision` must +not occur uncontrolled: + +- private copies: the thread that owns the private copy can change +freely the `h_tid` and `h_revision` fields. The other threads must not +touch them, and must read them carefully. This concerns only stealing +threads, on GCFLAG_PRIVATE_FROM_PROTECTED objects. The flag +GCFLAG_PRIVATE_FROM_PROTECTED itself is only changed when the owning +thread has got its collection_lock, and as long as it is set, h_revision +points to the backup copy. + +- protected copies (includes backup copies): any change requires the +owning thread's collection_lock. During stealing, other threads +might add (with the collection_lock) the flags GCFLAG_PUBLIC or +GCFLAG_PUBLIC_TO_PRIVATE. + +- public copies: must be changed carefully: `h_tid` is only modified to +add GCFLAG_PUBLIC_TO_PRIVATE; and `h_revision` changes are done with +bool_cas() in a thread-controlled way. diff --git a/c4/et.c b/c4/et.c --- a/c4/et.c +++ b/c4/et.c @@ -75,9 +75,11 @@ gcptr P = G; revision_t v; + restart_all: if (P->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED) { - private_from_protected: + assert(!(P->h_revision & 1)); /* pointer to the backup copy */ + /* check P->h_revision->h_revision: if a pointer, then it means the backup copy has been stolen into a public object and then modified by some other thread. Abort. */ @@ -86,15 +88,20 @@ goto add_in_recent_reads_cache; } + /* else, for the rest of this function, we can assume that P was not + a private copy */ if (P->h_tid & GCFLAG_PUBLIC) { /* follow the chained list of h_revision's as long as they are - regular pointers */ - retry: + regular pointers. We will only find more public objects + along this chain. + */ + restart_all_public: v = ACCESS_ONCE(P->h_revision); if (!(v & 1)) // "is a pointer", i.e. { // "has a more recent revision" + retry: if (v & 2) goto follow_stub; @@ -114,23 +121,33 @@ doing this write occasionally based on a counter in d */ P_prev->h_revision = v; P = (gcptr)v; - goto retry; + v = ACCESS_ONCE(P->h_revision); + if (!(v & 1)) // "is a pointer", i.e. "has a more recent rev" + goto retry; + } + + /* We reach this point if P != G only. Check again the + read_barrier_cache: if P now hits the cache, just return it + */ + if (FXCACHE_AT(P) == P) + { + fprintf(stderr, "read_barrier: %p -> %p fxcache\n", G, P); + return P; } } - /* if we land on a P in read_barrier_cache: just return it */ - if (FXCACHE_AT(P) == P) - { - fprintf(stderr, "read_barrier: %p -> %p fxcache\n", G, P); - return P; - } - + /* If we land on a P with GCFLAG_PUBLIC_TO_PRIVATE, it might be + because *we* have an entry in d->public_to_private. (It might + also be someone else.) + */ if (P->h_tid & GCFLAG_PUBLIC_TO_PRIVATE) { wlog_t *item; retry_public_to_private:; G2L_FIND(d->public_to_private, P, item, goto no_private_obj); + /* We have a key in 'public_to_private'. The value is the + corresponding private object. */ P = item->val; assert(!(P->h_tid & GCFLAG_PUBLIC)); assert(is_private(P)); @@ -138,6 +155,8 @@ return P; no_private_obj: + /* Key not found. It might be because there really is none, or + because we still have it waiting in 'stolen_objects'. */ if (d->public_descriptor->stolen_objects.size > 0) { spinlock_acquire(d->public_descriptor->collection_lock, 'N'); @@ -147,7 +166,8 @@ } } - if (UNLIKELY(v > d->start_time)) // object too recent? + /* The answer is a public object. Is it too recent? */ + if (UNLIKELY(v > d->start_time)) { if (v >= LOCKED) { @@ -161,10 +181,17 @@ } else { + /* Not private and not public: it's a protected object + */ fprintf(stderr, "read_barrier: %p -> %p protected\n", G, P); + + /* The risks are not high, but in parallel it's possible for the + object to be stolen by another thread and become public, after + which it can be outdated by another commit. So the following + assert can actually fail in that case. */ + /*assert(P->h_revision & 1);*/ } - register_in_list_of_read_objects: gcptrlist_insert(&d->list_of_read_objects, P); add_in_recent_reads_cache: @@ -172,43 +199,32 @@ return P; follow_stub:; + /* We know that P is a stub object, because only stubs can have + an h_revision that is == 2 mod 4. + */ struct tx_public_descriptor *foreign_pd = STUB_THREAD(P); if (foreign_pd == d->public_descriptor) { - /* same thread */ + /* Same thread: dereference the pointer directly. It's possible + we reach any kind of object, even a public object, in case it + was stolen. So we just repeat the whole procedure. */ P = (gcptr)(v - 2); - assert(!(P->h_tid & GCFLAG_PUBLIC)); - if (P->h_revision == stm_private_rev_num) - { - fprintf(stderr, "read_barrier: %p -> %p handle " - "private\n", G, P); - return P; - } - else if (P->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED) - { - fprintf(stderr, "read_barrier: %p -> %p handle " - "private_from_protected\n", G, P); - goto private_from_protected; - } - else if (FXCACHE_AT(P) == P) - { - fprintf(stderr, "read_barrier: %p -> %p handle " - "protected fxcache\n", G, P); - return P; - } - else - { - fprintf(stderr, "read_barrier: %p -> %p handle " - "protected\n", G, P); - goto register_in_list_of_read_objects; - } + fprintf(stderr, "read_barrier: %p -> %p via stub\n ", G, P); + + if (UNLIKELY((P->h_revision != stm_private_rev_num) && + (FXCACHE_AT(P) != P))) + goto restart_all; + + return P; } else { /* stealing */ fprintf(stderr, "read_barrier: %p -> stealing %p...\n ", G, P); stm_steal_stub(P); - goto retry; + + assert(P->h_tid & GCFLAG_PUBLIC); + goto restart_all_public; } } @@ -327,21 +343,12 @@ return L; } -static gcptr LocalizePublic(struct tx_descriptor *d, gcptr R); - static gcptr LocalizeProtected(struct tx_descriptor *d, gcptr P) { gcptr B; - spinlock_acquire(d->public_descriptor->collection_lock, 'L'); - - if (P->h_tid & GCFLAG_PUBLIC) - { - /* became PUBLIC while waiting for the collection_lock */ - spinlock_release(d->public_descriptor->collection_lock); - return LocalizePublic(d, P); - } assert(P->h_revision != stm_private_rev_num); + assert(P->h_revision & 1); assert(!(P->h_tid & GCFLAG_PUBLIC_TO_PRIVATE)); assert(!(P->h_tid & GCFLAG_BACKUP_COPY)); assert(!(P->h_tid & GCFLAG_STUB)); @@ -355,7 +362,6 @@ gcptrlist_insert(&d->private_from_protected, P); - spinlock_release(d->public_descriptor->collection_lock); return P; } @@ -406,11 +412,17 @@ if (is_private(R)) return R; + spinlock_acquire(d->public_descriptor->collection_lock, 'L'); + if (d->public_descriptor->stolen_objects.size != 0) + stm_normalize_stolen_objects(d); + if (R->h_tid & GCFLAG_PUBLIC) W = LocalizePublic(d, R); else W = LocalizeProtected(d, R); + spinlock_release(d->public_descriptor->collection_lock); + fprintf(stderr, "write_barrier: %p -> %p -> %p\n", P, R, W); return W; diff --git a/c4/steal.c b/c4/steal.c --- a/c4/steal.c +++ b/c4/steal.c @@ -55,25 +55,60 @@ gcptr L = (gcptr)(v - 2); + /* L might be a private_from_protected, or just a protected copy. + To know which case it is, read GCFLAG_PRIVATE_FROM_PROTECTED. + */ if (L->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED) { gcptr B = (gcptr)L->h_revision; /* the backup copy */ + + /* B is now a backup copy, i.e. a protected object, and we own + the foreign thread's collection_lock, so we can read/write the + flags + */ + assert(B->h_tid & GCFLAG_BACKUP_COPY); B->h_tid &= ~GCFLAG_BACKUP_COPY; - /* add {B: L} in 'public_to_private', but lazily, because we don't - want to walk over the feet of the foreign thread */ - B->h_tid |= GCFLAG_PUBLIC_TO_PRIVATE; - gcptrlist_insert2(&foreign_pd->stolen_objects, B, L); - + if (B->h_tid & GCFLAG_PUBLIC_TO_PRIVATE) { + /* already stolen */ + } + else { + B->h_tid |= GCFLAG_PUBLIC_TO_PRIVATE; + /* add {B: L} in 'public_to_private', but lazily, because we + don't want to walk over the feet of the foreign thread + */ + gcptrlist_insert2(&foreign_pd->stolen_objects, B, L); + } fprintf(stderr, "stolen: %p -> %p - - -> %p\n", P, B, L); - L = B; } - /* change L from protected to public */ + /* Here L is a protected (or backup) copy, and we own the foreign + thread's collection_lock, so we can read/write the flags. Change + it from protected to public. + */ L->h_tid |= GCFLAG_PUBLIC; - smp_wmb(); /* the following update must occur "after" the flag - GCFLAG_PUBLIC was added, for other threads */ + /* Note that all protected or backup copies have a h_revision that + is odd. + */ + assert(L->h_revision & 1); + + /* At this point, the object can only be seen by its owning foreign + thread and by us. No 3rd thread can see it as long as we own + the foreign thread's collection_lock. For the foreign thread, + it might suddenly see the GCFLAG_PUBLIC being added to L + (but it may not do any change to the flags itself, because + it cannot grab its own collection_lock). L->h_revision is an + odd number that is also valid on a public up-to-date object. + */ + + /* If another thread (the foreign or a 3rd party) does a read + barrier from P, it must only reach L if all writes to L are + visible; i.e. it must not see P->h_revision => L that still + doesn't have the GCFLAG_PUBLIC. So we need a CPU write + barrier here. + */ + smp_wmb(); /* update the original P->h_revision to point directly to L */ P->h_revision = (revision_t)L; @@ -92,6 +127,8 @@ gcptr L = items[i + 1]; assert(L->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED); + assert(!(B->h_tid & GCFLAG_BACKUP_COPY)); /* already removed */ + g2l_insert(&d->public_to_private, B, L); } gcptrlist_clear(&d->public_descriptor->stolen_objects); From noreply at buildbot.pypy.org Wed Jun 12 01:39:05 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 12 Jun 2013 01:39:05 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: py3k status update #11 Message-ID: <20130611233905.0C2C61C02BA@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: extradoc Changeset: r4975:bce4d23f15e1 Date: 2013-06-11 16:38 -0700 http://bitbucket.org/pypy/extradoc/changeset/bce4d23f15e1/ Log: py3k status update #11 diff --git a/blog/draft/py3k-status-update-11.rst b/blog/draft/py3k-status-update-11.rst new file mode 100644 --- /dev/null +++ b/blog/draft/py3k-status-update-11.rst @@ -0,0 +1,63 @@ +Py3k status update #11 +---------------------- + +This is the 11th status update about our work on the `py3k branch`_, which we +can work on thanks to all of the people who donated_ to the `py3k proposal`_. + +Here's some highlights of the progress made since the previous update: + +* PyPy py3k now matches CPython 3's hash code for + int/float/complex/Decimal/Fraction + +* Various outstanding unicode identifier related issues were + resolved. E.g. test_importlib/pep263/ucn/unicode all now fully pass. Various + usage of identifiers (in particular type and module names) have been fixed to + handle non-ascii names -- mostly around display of reprs and exception + messages. + +* The unicodedata database has been upgraded to 6.0.0. + +* Windows support has greatly improved, though it could still use some more + help (but so does the default branch to a certain degree). + +* Probably the last of the parsing related bugs/features have been taken care + of. + +* Of course various other smaller miscellaneous fixes + +This leaves the branch w/ only about 5 outstanding failures of the stdlib test +suite: + +* test_float + + 1 failing test about containment of floats in collections. + +* test_memoryview + + Various failures: requires some bytes/str changes among other things (Manuel + Jacob's has some progress on this on the `py3k-memoryview branch`_) + +* test_multiprocessing + + 1 or more tests deadlock on some platforms + +* test_sys and test_threading + + 2 failing tests for the New GIL's new API + +Probably the biggest feature left to tackle is the New GIL. + +We're now pretty close to pushing an initial release. We had planned for one +around PyCon, but having missed that we've put some more effort into the branch +to provide a more fully-fledged initial release. + +Thanks to the following for their contributions: Manuel Jacob, Amaury Forgeot +d'Arc, Karl Ramm, Jason Chu and Christian Hudson. + +cheers, +Phil + +.. _donated: http://morepypy.blogspot.com/2012/01/py3k-and-numpy-first-stage-thanks-to.html +.. _`py3k proposal`: http://pypy.org/py3donate.html +.. _`py3k branch`: https://bitbucket.org/pypy/pypy/commits/all/tip/branch%28%22py3k%22%29 +.. _`py3k-memoryview branch`: https://bitbucket.org/pypy/pypy/compare/py3k-memoryview..py3k From noreply at buildbot.pypy.org Wed Jun 12 02:01:01 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 12 Jun 2013 02:01:01 +0200 (CEST) Subject: [pypy-commit] pypy py3k: merge default Message-ID: <20130612000101.898CF1C0328@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r64858:f1b58638d8bd Date: 2013-06-11 17:00 -0700 http://bitbucket.org/pypy/pypy/changeset/f1b58638d8bd/ Log: merge default diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py --- a/lib_pypy/cffi/api.py +++ b/lib_pypy/cffi/api.py @@ -361,13 +361,13 @@ backend = ffi._backend try: if '.' not in name and '/' not in name: - raise OSError + raise OSError("library not found: %r" % (name,)) backendlib = backend.load_library(name, flags) except OSError: import ctypes.util path = ctypes.util.find_library(name) if path is None: - raise OSError("library not found: %r" % (name,)) + raise # propagate the original OSError backendlib = backend.load_library(path, flags) copied_enums = [] # diff --git a/pypy/module/_cffi_backend/ctypefunc.py b/pypy/module/_cffi_backend/ctypefunc.py --- a/pypy/module/_cffi_backend/ctypefunc.py +++ b/pypy/module/_cffi_backend/ctypefunc.py @@ -289,16 +289,6 @@ "with verify() (see pypy/module/_cffi_backend/ctypefunc.py " "for details)")) - if USE_C_LIBFFI_MSVC and is_result_type: - # MSVC returns small structures in registers. Pretend int32 or - # int64 return type. This is needed as a workaround for what - # is really a bug of libffi_msvc seen as an independent library - # (ctypes has a similar workaround). - if ctype.size <= 4: - return clibffi.ffi_type_sint32 - if ctype.size <= 8: - return clibffi.ffi_type_sint64 - # walk the fields, expanding arrays into repetitions; first, # only count how many flattened fields there are nflat = 0 @@ -318,6 +308,16 @@ "a struct with a zero-length array")) nflat += flat + if USE_C_LIBFFI_MSVC and is_result_type: + # MSVC returns small structures in registers. Pretend int32 or + # int64 return type. This is needed as a workaround for what + # is really a bug of libffi_msvc seen as an independent library + # (ctypes has a similar workaround). + if ctype.size <= 4: + return clibffi.ffi_type_sint32 + if ctype.size <= 8: + return clibffi.ffi_type_sint64 + # allocate an array of (nflat + 1) ffi_types elements = self.fb_alloc(rffi.sizeof(FFI_TYPE_P) * (nflat + 1)) elements = rffi.cast(FFI_TYPE_PP, elements) diff --git a/pypy/module/imp/test/test_app.py b/pypy/module/imp/test/test_app.py --- a/pypy/module/imp/test/test_app.py +++ b/pypy/module/imp/test/test_app.py @@ -217,6 +217,52 @@ pass rmtree(dir_name, True) + def test_builtin_reimport(self): + # from https://bugs.pypy.org/issue1514 + skip("fix me") + import sys, marshal + + old = marshal.loads + marshal.loads = 42 + + # save, re-import, restore. + saved = sys.modules.pop('marshal') + __import__('marshal') + sys.modules['marshal'] = saved + + assert marshal.loads == 42 + import marshal + assert marshal.loads == 42 + marshal.loads = old + + def test_builtin_reimport_mess(self): + # taken from https://bugs.pypy.org/issue1514, with extra cases + # that show a difference with CPython: we can get on CPython + # several module objects for the same built-in module :-( + skip("several built-in module objects: not supported by pypy") + import sys, marshal + + old = marshal.loads + marshal.loads = 42 + + # save, re-import, restore. + saved = sys.modules.pop('marshal') + marshal2 = __import__('marshal') + assert marshal2 is not marshal + assert marshal2.loads is old + assert marshal2 is sys.modules['marshal'] + assert marshal is saved + assert marshal.loads == 42 + + import marshal + assert marshal.loads is old + + sys.modules['marshal'] = saved + import marshal + assert marshal.loads == 42 + + marshal.loads = old + def test_get_tag(self): import imp import sys diff --git a/pypy/module/imp/test/test_import.py b/pypy/module/imp/test/test_import.py --- a/pypy/module/imp/test/test_import.py +++ b/pypy/module/imp/test/test_import.py @@ -625,6 +625,43 @@ assert sys.path is oldpath assert 'settrace' in dir(sys) + def test_reload_builtin_doesnt_clear(self): + import sys + sys.foobar = "baz" + reload(sys) + assert sys.foobar == "baz" + + def test_reimport_builtin_simple_case_1(self): + import sys, time + del time.tzset + del sys.modules['time'] + import time + assert hasattr(time, 'tzset') + + def test_reimport_builtin_simple_case_2(self): + skip("fix me") + import sys, time + time.foo = "bar" + del sys.modules['time'] + import time + assert not hasattr(time, 'foo') + + def test_reimport_builtin(self): + skip("fix me") + import sys, time + oldpath = sys.path + time.tzset = "" + + del sys.modules['time'] + import time as time1 + assert sys.modules['time'] is time1 + + assert time.tzset == "" + + reload(time1) # don't leave a broken time.tzset behind + import time + assert time.tzset != "" + def test_reload_infinite(self): import infinite_reload diff --git a/pypy/module/sys/__init__.py b/pypy/module/sys/__init__.py --- a/pypy/module/sys/__init__.py +++ b/pypy/module/sys/__init__.py @@ -97,11 +97,6 @@ '_xoptions' : 'app.null__xoptions', } - def setbuiltinmodule(self, w_module, name): - w_name = self.space.wrap(name) - w_modules = self.get('modules') - self.space.setitem(w_modules, w_name, w_module) - def startup(self, space): if space.config.translating and not we_are_translated(): # don't get the filesystemencoding at translation time diff --git a/pypy/module/sys/app.py b/pypy/module/sys/app.py --- a/pypy/module/sys/app.py +++ b/pypy/module/sys/app.py @@ -33,14 +33,13 @@ try: # first try to print the exception's class name stderr = sys.stderr - stderr.write(getattr(exctype, '__name__', exctype)) + stderr.write(str(getattr(exctype, '__name__', exctype))) # then attempt to get the str() of the exception try: s = str(value) except: s = '' - # then print it, and don't worry too much about the extra space - # between the exception class and the ':' + # then print it if s: stderr.write(': %s\n' % (s,)) else: diff --git a/pypy/module/test_lib_pypy/cffi_tests/test_ffi_backend.py b/pypy/module/test_lib_pypy/cffi_tests/test_ffi_backend.py --- a/pypy/module/test_lib_pypy/cffi_tests/test_ffi_backend.py +++ b/pypy/module/test_lib_pypy/cffi_tests/test_ffi_backend.py @@ -41,40 +41,43 @@ class TestBitfield: def check(self, source, expected_ofs_y, expected_align, expected_size): + # NOTE: 'expected_*' is the numbers expected from GCC. + # The numbers expected from MSVC are not explicitly written + # in this file, and will just be taken from the compiler. ffi = FFI() ffi.cdef("struct s1 { %s };" % source) ctype = ffi.typeof("struct s1") # verify the information with gcc - if sys.platform != "win32": - ffi1 = FFI() - ffi1.cdef(""" - static const int Gofs_y, Galign, Gsize; - struct s1 *try_with_value(int fieldnum, long long value); - """) - fnames = [name for name, cfield in ctype.fields - if name and cfield.bitsize > 0] - setters = ['case %d: s.%s = value; break;' % iname - for iname in enumerate(fnames)] - lib = ffi1.verify(""" - struct s1 { %s }; - struct sa { char a; struct s1 b; }; - #define Gofs_y offsetof(struct s1, y) - #define Galign offsetof(struct sa, b) - #define Gsize sizeof(struct s1) - struct s1 *try_with_value(int fieldnum, long long value) - { - static struct s1 s; - memset(&s, 0, sizeof(s)); - switch (fieldnum) { %s } - return &s; - } - """ % (source, ' '.join(setters))) - assert lib.Gofs_y == expected_ofs_y - assert lib.Galign == expected_align - assert lib.Gsize == expected_size + ffi1 = FFI() + ffi1.cdef(""" + static const int Gofs_y, Galign, Gsize; + struct s1 *try_with_value(int fieldnum, long long value); + """) + fnames = [name for name, cfield in ctype.fields + if name and cfield.bitsize > 0] + setters = ['case %d: s.%s = value; break;' % iname + for iname in enumerate(fnames)] + lib = ffi1.verify(""" + struct s1 { %s }; + struct sa { char a; struct s1 b; }; + #define Gofs_y offsetof(struct s1, y) + #define Galign offsetof(struct sa, b) + #define Gsize sizeof(struct s1) + struct s1 *try_with_value(int fieldnum, long long value) + { + static struct s1 s; + memset(&s, 0, sizeof(s)); + switch (fieldnum) { %s } + return &s; + } + """ % (source, ' '.join(setters))) + if sys.platform == 'win32': + expected_ofs_y = lib.Gofs_y + expected_align = lib.Galign + expected_size = lib.Gsize else: - lib = None - fnames = None + assert (lib.Gofs_y, lib.Galign, lib.Gsize) == ( + expected_ofs_y, expected_align, expected_size) # the real test follows assert ffi.offsetof("struct s1", "y") == expected_ofs_y assert ffi.alignof("struct s1") == expected_align @@ -99,10 +102,9 @@ setattr(s, name, value) assert getattr(s, name) == value raw1 = ffi.buffer(s)[:] - if lib is not None: - t = lib.try_with_value(fnames.index(name), value) - raw2 = ffi.buffer(t, len(raw1))[:] - assert raw1 == raw2 + t = lib.try_with_value(fnames.index(name), value) + raw2 = ffi.buffer(t, len(raw1))[:] + assert raw1 == raw2 def test_bitfield_basic(self): self.check("int a; int b:9; int c:20; int y;", 8, 4, 12) @@ -136,9 +138,11 @@ L = FFI().alignof("long long") self.check("char y; int :0;", 0, 1, 4) self.check("char x; int :0; char y;", 4, 1, 5) + self.check("char x; int :0; int :0; char y;", 4, 1, 5) self.check("char x; long long :0; char y;", L, 1, L + 1) self.check("short x, y; int :0; int :0;", 2, 2, 4) self.check("char x; int :0; short b:1; char y;", 5, 2, 6) + self.check("int a:1; int :0; int b:1; char y;", 5, 4, 8) def test_error_cases(self): ffi = FFI() diff --git a/rpython/jit/backend/llsupport/test/zrpy_gc_boehm_test.py b/rpython/jit/backend/llsupport/test/zrpy_gc_boehm_test.py --- a/rpython/jit/backend/llsupport/test/zrpy_gc_boehm_test.py +++ b/rpython/jit/backend/llsupport/test/zrpy_gc_boehm_test.py @@ -2,7 +2,6 @@ import weakref from rpython.rlib.jit import JitDriver, dont_look_inside from rpython.jit.backend.llsupport.test.zrpy_gc_test import run, get_entry, compile -from rpython.jit.backend.llsupport.test.ztranslation_test import fix_annotator_for_vrawbuffer class X(object): def __init__(self, x=0): @@ -32,8 +31,7 @@ g._dont_inline_ = True return g -def compile_boehm_test(monkeypatch): - fix_annotator_for_vrawbuffer(monkeypatch) +def compile_boehm_test(): myjitdriver = JitDriver(greens = [], reds = ['n', 'x']) @dont_look_inside def see(lst, n): diff --git a/rpython/jit/backend/llsupport/test/ztranslation_test.py b/rpython/jit/backend/llsupport/test/ztranslation_test.py --- a/rpython/jit/backend/llsupport/test/ztranslation_test.py +++ b/rpython/jit/backend/llsupport/test/ztranslation_test.py @@ -9,23 +9,10 @@ from rpython.jit.codewriter.policy import StopAtXPolicy -def fix_annotator_for_vrawbuffer(monkeypatch): - from rpython.rlib.nonconst import NonConstant - from rpython.jit.metainterp.optimizeopt.virtualize import VRawBufferValue - from rpython.jit.metainterp import warmspot - - def my_hook_for_tests(cpu): - # this is needed so that the annotator can see it - if NonConstant(False): - v = VRawBufferValue(cpu, None, -1, None, None) - monkeypatch.setattr(warmspot, 'hook_for_tests', my_hook_for_tests) - - class TranslationTest(CCompiledMixin): CPUClass = getcpuclass() - def test_stuff_translates(self, monkeypatch): - fix_annotator_for_vrawbuffer(monkeypatch) + def test_stuff_translates(self): # this is a basic test that tries to hit a number of features and their # translation: # - jitting of loops and bridges @@ -102,10 +89,9 @@ class TranslationTestCallAssembler(CCompiledMixin): CPUClass = getcpuclass() - def test_direct_assembler_call_translates(self, monkeypatch): + def test_direct_assembler_call_translates(self): """Test CALL_ASSEMBLER and the recursion limit""" from rpython.rlib.rstackovf import StackOverflow - fix_annotator_for_vrawbuffer(monkeypatch) class Thing(object): def __init__(self, val): @@ -183,8 +169,7 @@ class TranslationTestJITStats(CCompiledMixin): CPUClass = getcpuclass() - def test_jit_get_stats(self, monkeypatch): - fix_annotator_for_vrawbuffer(monkeypatch) + def test_jit_get_stats(self): driver = JitDriver(greens = [], reds = ['i']) def f(): @@ -207,8 +192,7 @@ class TranslationRemoveTypePtrTest(CCompiledMixin): CPUClass = getcpuclass() - def test_external_exception_handling_translates(self, monkeypatch): - fix_annotator_for_vrawbuffer(monkeypatch) + def test_external_exception_handling_translates(self): jitdriver = JitDriver(greens = [], reds = ['n', 'total']) class ImDone(Exception): diff --git a/rpython/jit/metainterp/optimizeopt/virtualize.py b/rpython/jit/metainterp/optimizeopt/virtualize.py --- a/rpython/jit/metainterp/optimizeopt/virtualize.py +++ b/rpython/jit/metainterp/optimizeopt/virtualize.py @@ -17,6 +17,7 @@ _attrs_ = ('keybox', 'source_op', '_cached_vinfo') box = None level = optimizer.LEVEL_NONNULL + is_about_raw = False _cached_vinfo = None def __init__(self, keybox, source_op=None): @@ -395,6 +396,7 @@ class VRawBufferValue(AbstractVArrayValue): + is_about_raw = True def __init__(self, cpu, logops, size, keybox, source_op): AbstractVirtualValue.__init__(self, keybox, source_op) @@ -457,6 +459,7 @@ class VRawSliceValue(AbstractVirtualValue): + is_about_raw = True def __init__(self, rawbuffer_value, offset, keybox, source_op): AbstractVirtualValue.__init__(self, keybox, source_op) @@ -676,13 +679,17 @@ offsetbox = self.get_constant_box(op.getarg(1)) if value.is_virtual() and offsetbox is not None: offset = offsetbox.getint() - if isinstance(value, VRawBufferValue): - self.make_virtual_raw_slice(value, offset, op.result, op) - return - elif isinstance(value, VRawSliceValue): - offset = offset + value.offset - self.make_virtual_raw_slice(value.rawbuffer_value, offset, op.result, op) - return + # the following check is constant-folded to False if the + # translation occurs without any VRawXxxValue instance around + if value.is_about_raw: + if isinstance(value, VRawBufferValue): + self.make_virtual_raw_slice(value, offset, op.result, op) + return + elif isinstance(value, VRawSliceValue): + offset = offset + value.offset + self.make_virtual_raw_slice(value.rawbuffer_value, offset, + op.result, op) + return self.emit_operation(op) def optimize_ARRAYLEN_GC(self, op): diff --git a/rpython/jit/metainterp/resume.py b/rpython/jit/metainterp/resume.py --- a/rpython/jit/metainterp/resume.py +++ b/rpython/jit/metainterp/resume.py @@ -451,6 +451,7 @@ class AbstractVirtualInfo(object): kind = REF + is_about_raw = False #def allocate(self, decoder, index): # raise NotImplementedError def equals(self, fieldnums): @@ -461,7 +462,7 @@ def debug_prints(self): raise NotImplementedError - + class AbstractVirtualStructInfo(AbstractVirtualInfo): def __init__(self, fielddescrs): @@ -547,6 +548,7 @@ class VRawBufferStateInfo(AbstractVirtualInfo): kind = INT + is_about_raw = True def __init__(self, size, offsets, descrs): self.size = size @@ -772,7 +774,9 @@ assert self.virtuals_cache is not None v = self.virtuals_cache.get_int(index) if not v: - v = self.rd_virtuals[index].allocate_int(self, index) + v = self.rd_virtuals[index] + assert v.is_about_raw and isinstance(v, VRawBufferStateInfo) + v = v.allocate_int(self, index) ll_assert(v == self.virtuals_cache.get_int(index), "resume.py: bad cache") return v diff --git a/rpython/jit/metainterp/warmspot.py b/rpython/jit/metainterp/warmspot.py --- a/rpython/jit/metainterp/warmspot.py +++ b/rpython/jit/metainterp/warmspot.py @@ -761,8 +761,6 @@ cpu = jd.warmstate.cpu def ll_portal_runner(*args): - hook_for_tests(cpu) # usually it's empty, but tests can monkeypatch - # it to fix the annotator start = True while 1: try: @@ -999,10 +997,3 @@ graphs = self.translator.graphs for graph, block, i in find_force_quasi_immutable(graphs): self.replace_force_quasiimmut_with_direct_call(block.operations[i]) - -def hook_for_tests(cpu): - """ - This function is empty and does nothing. Its only role is to be - monkey-patched by tests to "fix" the annotator if needed (see - e.g. x86/test/test_ztranslation::test_external_exception_handling_translates - """ From noreply at buildbot.pypy.org Wed Jun 12 02:16:20 2013 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 12 Jun 2013 02:16:20 +0200 (CEST) Subject: [pypy-commit] pypy py3k: simplify Message-ID: <20130612001620.EC0D61C0F12@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r64859:391ba71b1425 Date: 2013-06-11 17:14 -0700 http://bitbucket.org/pypy/pypy/changeset/391ba71b1425/ Log: simplify diff --git a/pypy/interpreter/error.py b/pypy/interpreter/error.py --- a/pypy/interpreter/error.py +++ b/pypy/interpreter/error.py @@ -379,7 +379,6 @@ return tuple(parts), tuple(formats) def get_operrcls2(valuefmt): - from rpython.rlib.runicode import str_decode_utf_8 valuefmt = valuefmt.decode('ascii') strings, formats = decompose_valuefmt(valuefmt) assert len(strings) == len(formats) + 1 @@ -407,8 +406,7 @@ if fmt == 'd': result = str(value).decode('ascii') elif fmt == '8': - result, _ = str_decode_utf_8(value, len(value), - 'strict') + result = value.decode('utf-8') elif fmt == 'R': result = space.unicode_w(space.repr(value)) elif fmt in 'NT': @@ -440,7 +438,7 @@ Supports the standard %s and %d formats, plus the following: - %8 - The result of arg.decode('utf-8', 'strict') + %8 - The result of arg.decode('utf-8') %N - The result of w_arg.getname(space) %R - The result of space.unicode_w(space.repr(w_arg)) %T - The result of space.type(w_arg).getname(space) From noreply at buildbot.pypy.org Wed Jun 12 10:20:21 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 12 Jun 2013 10:20:21 +0200 (CEST) Subject: [pypy-commit] pypy default: Check the type of the text arguments, and cast unicodes to strings in Message-ID: <20130612082021.1032B1C00B9@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r64860:0c8571de8df4 Date: 2013-06-12 10:19 +0200 http://bitbucket.org/pypy/pypy/changeset/0c8571de8df4/ Log: Check the type of the text arguments, and cast unicodes to strings in the default encoding (as CPython). Fix for issue 1513. diff --git a/lib_pypy/_curses.py b/lib_pypy/_curses.py --- a/lib_pypy/_curses.py +++ b/lib_pypy/_curses.py @@ -476,6 +476,15 @@ def _chtype(ch): return int(ffi.cast("chtype", ch)) +def _texttype(text): + if isinstance(text, str): + return text + elif isinstance(text, unicode): + return str(text) # default encoding + else: + raise TypeError("str or unicode expected, got a '%s' object" + % (type(text).__name__,)) + def _extract_yx(args): if len(args) >= 2: @@ -589,6 +598,7 @@ @_argspec(1, 1, 2) def addstr(self, y, x, text, attr=None): + text = _texttype(text) if attr is not None: attr_old = lib.getattrs(self._win) lib.wattrset(self._win, attr) @@ -602,6 +612,7 @@ @_argspec(2, 1, 2) def addnstr(self, y, x, text, n, attr=None): + text = _texttype(text) if attr is not None: attr_old = lib.getattrs(self._win) lib.wattrset(self._win, attr) @@ -780,6 +791,7 @@ @_argspec(1, 1, 2) def insstr(self, y, x, text, attr=None): + text = _texttype(text) if attr is not None: attr_old = lib.getattrs(self._win) lib.wattrset(self._win, attr) @@ -793,6 +805,7 @@ @_argspec(2, 1, 2) def insnstr(self, y, x, text, n, attr=None): + text = _texttype(text) if attr is not None: attr_old = lib.getattrs(self._win) lib.wattrset(self._win, attr) @@ -1197,6 +1210,7 @@ def putp(text): + text = _texttype(text) return _check_ERR(lib.putp(text), "putp") From noreply at buildbot.pypy.org Wed Jun 12 11:22:52 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 12 Jun 2013 11:22:52 +0200 (CEST) Subject: [pypy-commit] stmgc default: Wrote bogus value: it needs to be new_revision (which is cur_time + 1). Message-ID: <20130612092252.E292B1C02BA@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r94:0bdc2d4e6f29 Date: 2013-06-12 11:22 +0200 http://bitbucket.org/pypy/stmgc/changeset/0bdc2d4e6f29/ Log: Wrote bogus value: it needs to be new_revision (which is cur_time + 1). diff --git a/c4/et.c b/c4/et.c --- a/c4/et.c +++ b/c4/et.c @@ -839,6 +839,8 @@ { long i, size = d->private_from_protected.size; gcptr *items = d->private_from_protected.items; + revision_t new_revision = cur_time + 1; // make an odd number + assert(new_revision & 1); for (i = 0; i < size; i++) { @@ -856,7 +858,7 @@ } gcptr B = (gcptr)P->h_revision; - P->h_revision = cur_time; + P->h_revision = new_revision; if (B->h_tid & GCFLAG_PUBLIC) { From noreply at buildbot.pypy.org Wed Jun 12 11:25:51 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 12 Jun 2013 11:25:51 +0200 (CEST) Subject: [pypy-commit] stmgc default: The single-thread version passes now Message-ID: <20130612092551.9DF7B1C02BA@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r95:06effc1c372c Date: 2013-06-12 11:25 +0200 http://bitbucket.org/pypy/stmgc/changeset/06effc1c372c/ Log: The single-thread version passes now diff --git a/c4/test/test_random.py b/c4/test/test_random.py --- a/c4/test/test_random.py +++ b/c4/test/test_random.py @@ -5,6 +5,11 @@ # a default seed that changes every day, but that can be easily recovered DEFAULT_SEED = int(time.strftime("%y%m%d", time.gmtime())) +# XXXXXXXXXXXXXXXXXXXXXX +# XXX reintroduce me XXX +# XXXXXXXXXXXXXXXXXXXXXX +DO_MAJOR_COLLECTS = False + def setup_function(_): lib.stm_clear_between_tests() @@ -375,7 +380,8 @@ if k1 == 7: self.dump('major collect') self.push_roots() - major_collect() + if DO_MAJOR_COLLECTS: + major_collect() self.pop_roots() p = emptypair if k1 == 82 and self.interruptible_transaction: From noreply at buildbot.pypy.org Wed Jun 12 11:47:31 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 12 Jun 2013 11:47:31 +0200 (CEST) Subject: [pypy-commit] stmgc default: Fix: wrong label Message-ID: <20130612094731.9072E1C02BA@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r96:1c52e9c6610d Date: 2013-06-12 11:27 +0200 http://bitbucket.org/pypy/stmgc/changeset/1c52e9c6610d/ Log: Fix: wrong label diff --git a/c4/et.c b/c4/et.c --- a/c4/et.c +++ b/c4/et.c @@ -98,6 +98,7 @@ along this chain. */ restart_all_public: + assert(P->h_tid & GCFLAG_PUBLIC); v = ACCESS_ONCE(P->h_revision); if (!(v & 1)) // "is a pointer", i.e. { // "has a more recent revision" @@ -172,10 +173,10 @@ if (v >= LOCKED) { SpinLoop(SPLP_LOCKED_INFLIGHT); - goto retry; // spinloop until it is no longer LOCKED + goto restart_all_public; // spinloop until it is no longer LOCKED } ValidateNow(d); // try to move start_time forward - goto retry; // restart searching from P + goto restart_all_public; // restart searching from P } fprintf(stderr, "read_barrier: %p -> %p public\n", G, P); } From noreply at buildbot.pypy.org Wed Jun 12 11:47:32 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 12 Jun 2013 11:47:32 +0200 (CEST) Subject: [pypy-commit] stmgc default: reorganize Message-ID: <20130612094732.BB7C31C02BA@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r97:37a90d1c2610 Date: 2013-06-12 11:41 +0200 http://bitbucket.org/pypy/stmgc/changeset/37a90d1c2610/ Log: reorganize diff --git a/c4/et.c b/c4/et.c --- a/c4/et.c +++ b/c4/et.c @@ -238,49 +238,40 @@ if (P->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED) { - private_from_protected: - if (!(((gcptr)P->h_revision)->h_revision & 1)) - { - fprintf(stderr, "_stm_nonrecord_barrier: %p -> NULL " - "private_from_protected but protected changed\n", G); - return NULL; - } - goto add_in_recent_reads_cache; + assert(!(P->h_revision & 1)); + return P; } if (P->h_tid & GCFLAG_PUBLIC) { - while (v = ACCESS_ONCE(P->h_revision), !(v & 1)) + while (1) { + wlog_t *item; + gcptr L; + G2L_FIND(d->public_to_private, P, item, goto no_private_obj); + + L = item->val; + found_in_stolen_objects: + assert(P->h_tid & GCFLAG_PUBLIC_TO_PRIVATE); + assert(!(L->h_tid & GCFLAG_PUBLIC)); + assert(is_private(L)); + fprintf(stderr, "_stm_nonrecord_barrier: %p -> %p " + "public_to_private\n", G, L); + return L; + + no_private_obj:; + L = _stm_find_stolen_objects(d, P); + if (L != NULL) + goto found_in_stolen_objects; + + v = ACCESS_ONCE(P->h_revision); + if (v & 1) + break; if (v & 2) goto follow_stub; - P = (gcptr)v; assert(P->h_tid & GCFLAG_PUBLIC); } - - if (P->h_tid & GCFLAG_PUBLIC_TO_PRIVATE) - { - wlog_t *item; - G2L_FIND(d->public_to_private, P, item, goto no_private_obj); - - P = item->val; - found_in_stolen_objects: - assert(!(P->h_tid & GCFLAG_PUBLIC)); - assert(is_private(P)); - fprintf(stderr, "_stm_nonrecord_barrier: %p -> %p " - "public_to_private\n", G, P); - return P; - - no_private_obj:; - gcptr L = _stm_find_stolen_objects(d, P); - if (L != NULL) - { - P = L; - goto found_in_stolen_objects; - } - } - if (UNLIKELY(v > d->start_time)) { fprintf(stderr, "_stm_nonrecord_barrier: %p -> NULL changed\n", G); @@ -292,25 +283,12 @@ { fprintf(stderr, "_stm_nonrecord_barrier: %p -> %p protected\n", G, P); } - - register_in_list_of_read_objects: - add_in_recent_reads_cache: return P; follow_stub:; + fprintf(stderr, "_stm_nonrecord_barrier: %p -> %p stub\n ", G, P); P = (gcptr)(v - 2); - assert(!(P->h_tid & GCFLAG_PUBLIC)); - if (P->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED) - { - fprintf(stderr, "_stm_nonrecord_barrier: %p -> %p handle " - "private_from_protected\n", G, P); - goto private_from_protected; - } - else - { - fprintf(stderr, "read_barrier: %p -> %p handle\n", G, P); - goto register_in_list_of_read_objects; - } + return _stm_nonrecord_barrier(P); } #if 0 From noreply at buildbot.pypy.org Wed Jun 12 11:47:33 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 12 Jun 2013 11:47:33 +0200 (CEST) Subject: [pypy-commit] stmgc default: Fixes in test Message-ID: <20130612094733.E50461C02BA@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r98:30dd165d9c04 Date: 2013-06-12 11:47 +0200 http://bitbucket.org/pypy/stmgc/changeset/30dd165d9c04/ Log: Fixes in test diff --git a/c4/test/support.py b/c4/test/support.py --- a/c4/test/support.py +++ b/c4/test/support.py @@ -465,11 +465,9 @@ def minor_collect(): lib.stmgc_minor_collect() -STUB_TID = GCFLAG_STUB | GCFLAG_OLD | 0 # no user tid - def is_stub(p): assert lib.stm_dbgmem_is_active(p, 1) != 0 - return p.h_tid == STUB_TID + return p.h_tid & GCFLAG_STUB def check_not_free(p): assert lib.stm_dbgmem_is_active(p, 1) == 1 diff --git a/c4/test/test_et.py b/c4/test/test_et.py --- a/c4/test/test_et.py +++ b/c4/test/test_et.py @@ -321,3 +321,6 @@ def test_abort_stealing_while_modifying(): test_stealing_while_modifying(aborting=True) + +def test_stub_for_refs_from_stolen(): + xxx diff --git a/c4/test/test_random.py b/c4/test/test_random.py --- a/c4/test/test_random.py +++ b/c4/test/test_random.py @@ -83,10 +83,11 @@ def check(self, p): assert isinstance(p, Pair) - if p != emptypair and not is_stub(p.ptr): + if p != emptypair: self.check_not_free(p.ptr) - pid = lib.rawgetptr(p.ptr, 2) - assert pid == p.obj.identity + if not is_stub(p.ptr): + pid = lib.rawgetptr(p.ptr, 2) + assert pid == p.obj.identity def expected_abort(self, manual=False): if manual: From noreply at buildbot.pypy.org Wed Jun 12 13:10:47 2013 From: noreply at buildbot.pypy.org (bivab) Date: Wed, 12 Jun 2013 13:10:47 +0200 (CEST) Subject: [pypy-commit] pypy default: missing checks Message-ID: <20130612111047.F08AD1C00B9@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: Changeset: r64861:83dcd564b807 Date: 2013-06-12 04:20 -0500 http://bitbucket.org/pypy/pypy/changeset/83dcd564b807/ Log: missing checks diff --git a/rpython/jit/backend/arm/regalloc.py b/rpython/jit/backend/arm/regalloc.py --- a/rpython/jit/backend/arm/regalloc.py +++ b/rpython/jit/backend/arm/regalloc.py @@ -344,6 +344,8 @@ # note: we need to make a copy of inputargs because possibly_free_vars # is also used on op args, which is a non-resizable list self.possibly_free_vars(list(inputargs)) + self.fm.finish_binding() + self._check_invariants() def get_gcmap(self, forbidden_regs=[], noregs=False): frame_depth = self.fm.get_frame_depth() From noreply at buildbot.pypy.org Wed Jun 12 13:10:52 2013 From: noreply at buildbot.pypy.org (bivab) Date: Wed, 12 Jun 2013 13:10:52 +0200 (CEST) Subject: [pypy-commit] pypy default: merge heads Message-ID: <20130612111052.22D6D1C00B9@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: Changeset: r64862:574a2ace6557 Date: 2013-06-12 04:21 -0500 http://bitbucket.org/pypy/pypy/changeset/574a2ace6557/ Log: merge heads diff too long, truncating to 2000 out of 7890 lines diff --git a/lib-python/2.7/distutils/command/build_ext.py b/lib-python/2.7/distutils/command/build_ext.py --- a/lib-python/2.7/distutils/command/build_ext.py +++ b/lib-python/2.7/distutils/command/build_ext.py @@ -8,7 +8,7 @@ __revision__ = "$Id$" -import sys, os, string, re +import sys, os, string, re, imp from types import * from site import USER_BASE, USER_SITE from distutils.core import Command @@ -33,6 +33,11 @@ from distutils.ccompiler import show_compilers show_compilers() +def _get_c_extension_suffix(): + for ext, mod, typ in imp.get_suffixes(): + if typ == imp.C_EXTENSION: + return ext + class build_ext (Command): @@ -677,10 +682,18 @@ # OS/2 has an 8 character module (extension) limit :-( if os.name == "os2": ext_path[len(ext_path) - 1] = ext_path[len(ext_path) - 1][:8] + # PyPy tweak: first try to get the C extension suffix from + # 'imp'. If it fails we fall back to the 'SO' config var, like + # the previous version of this code did. This should work for + # CPython too. The point is that on PyPy with cpyext, the + # config var 'SO' is just ".so" but we want to return + # ".pypy-VERSION.so" instead. + so_ext = _get_c_extension_suffix() + if so_ext is None: + so_ext = get_config_var('SO') # fall-back # extensions in debug_mode are named 'module_d.pyd' under windows - so_ext = get_config_var('SO') if os.name == 'nt' and self.debug: - return os.path.join(*ext_path) + '_d' + so_ext + so_ext = '_d.pyd' return os.path.join(*ext_path) + so_ext def get_export_symbols (self, ext): diff --git a/lib-python/2.7/distutils/sysconfig_pypy.py b/lib-python/2.7/distutils/sysconfig_pypy.py --- a/lib-python/2.7/distutils/sysconfig_pypy.py +++ b/lib-python/2.7/distutils/sysconfig_pypy.py @@ -12,7 +12,6 @@ import sys import os -import imp from distutils.errors import DistutilsPlatformError @@ -58,16 +57,11 @@ _config_vars = None -def _get_so_extension(): - for ext, mod, typ in imp.get_suffixes(): - if typ == imp.C_EXTENSION: - return ext - def _init_posix(): """Initialize the module as appropriate for POSIX systems.""" g = {} g['EXE'] = "" - g['SO'] = _get_so_extension() or ".so" + g['SO'] = ".so" g['SOABI'] = g['SO'].rsplit('.')[0] g['LIBDIR'] = os.path.join(sys.prefix, 'lib') g['CC'] = "gcc -pthread" # -pthread might not be valid on OS/X, check @@ -80,7 +74,7 @@ """Initialize the module as appropriate for NT""" g = {} g['EXE'] = ".exe" - g['SO'] = _get_so_extension() or ".pyd" + g['SO'] = ".pyd" g['SOABI'] = g['SO'].rsplit('.')[0] global _config_vars diff --git a/lib-python/2.7/logging/__init__.py b/lib-python/2.7/logging/__init__.py --- a/lib-python/2.7/logging/__init__.py +++ b/lib-python/2.7/logging/__init__.py @@ -151,6 +151,8 @@ 'DEBUG': DEBUG, 'NOTSET': NOTSET, } +_levelNames = dict(_levelToName) +_levelNames.update(_nameToLevel) # backward compatibility def getLevelName(level): """ diff --git a/lib-python/2.7/opcode.py b/lib-python/2.7/opcode.py --- a/lib-python/2.7/opcode.py +++ b/lib-python/2.7/opcode.py @@ -193,5 +193,6 @@ hasname.append(201) def_op('CALL_METHOD', 202) # #args not including 'self' def_op('BUILD_LIST_FROM_ARG', 203) +jrel_op('JUMP_IF_NOT_DEBUG', 204) # jump over assert statements del def_op, name_op, jrel_op, jabs_op diff --git a/lib-python/2.7/socket.py b/lib-python/2.7/socket.py --- a/lib-python/2.7/socket.py +++ b/lib-python/2.7/socket.py @@ -96,6 +96,7 @@ _realsocket = socket +_type = type # WSA error codes if sys.platform.lower().startswith("win"): @@ -173,31 +174,37 @@ __doc__ = _realsocket.__doc__ + __slots__ = ["_sock", "__weakref__"] + def __init__(self, family=AF_INET, type=SOCK_STREAM, proto=0, _sock=None): if _sock is None: _sock = _realsocket(family, type, proto) + elif _type(_sock) is _realsocket: + _sock._reuse() + # PyPy note about refcounting: implemented with _reuse()/_drop() + # on the class '_socket.socket'. Python 3 did it differently + # with a reference counter on this class 'socket._socketobject' + # instead, but it is a less compatible change (breaks eventlet). self._sock = _sock - self._io_refs = 0 - self._closed = False def send(self, data, flags=0): - return self._sock.send(data, flags=flags) + return self._sock.send(data, flags) send.__doc__ = _realsocket.send.__doc__ def recv(self, buffersize, flags=0): - return self._sock.recv(buffersize, flags=flags) + return self._sock.recv(buffersize, flags) recv.__doc__ = _realsocket.recv.__doc__ def recv_into(self, buffer, nbytes=0, flags=0): - return self._sock.recv_into(buffer, nbytes=nbytes, flags=flags) + return self._sock.recv_into(buffer, nbytes, flags) recv_into.__doc__ = _realsocket.recv_into.__doc__ def recvfrom(self, buffersize, flags=0): - return self._sock.recvfrom(buffersize, flags=flags) + return self._sock.recvfrom(buffersize, flags) recvfrom.__doc__ = _realsocket.recvfrom.__doc__ def recvfrom_into(self, buffer, nbytes=0, flags=0): - return self._sock.recvfrom_into(buffer, nbytes=nbytes, flags=flags) + return self._sock.recvfrom_into(buffer, nbytes, flags) recvfrom_into.__doc__ = _realsocket.recvfrom_into.__doc__ def sendto(self, data, param2, param3=None): @@ -208,13 +215,17 @@ sendto.__doc__ = _realsocket.sendto.__doc__ def close(self): - # This function should not reference any globals. See issue #808164. + s = self._sock + if type(s) is _realsocket: + s._drop() self._sock = _closedsocket() close.__doc__ = _realsocket.close.__doc__ def accept(self): sock, addr = self._sock.accept() - return _socketobject(_sock=sock), addr + sockobj = _socketobject(_sock=sock) + sock._drop() # already a copy in the _socketobject() + return sockobj, addr accept.__doc__ = _realsocket.accept.__doc__ def dup(self): @@ -228,24 +239,7 @@ Return a regular file object corresponding to the socket. The mode and bufsize arguments are as for the built-in open() function.""" - self._io_refs += 1 - return _fileobject(self, mode, bufsize) - - def _decref_socketios(self): - if self._io_refs > 0: - self._io_refs -= 1 - if self._closed: - self.close() - - def _real_close(self): - # This function should not reference any globals. See issue #808164. - self._sock.close() - - def close(self): - # This function should not reference any globals. See issue #808164. - self._closed = True - if self._io_refs <= 0: - self._real_close() + return _fileobject(self._sock, mode, bufsize) family = property(lambda self: self._sock.family, doc="the socket family") type = property(lambda self: self._sock.type, doc="the socket type") @@ -286,6 +280,8 @@ "_close"] def __init__(self, sock, mode='rb', bufsize=-1, close=False): + if type(sock) is _realsocket: + sock._reuse() self._sock = sock self.mode = mode # Not actually used in this version if bufsize < 0: @@ -320,16 +316,11 @@ if self._sock: self.flush() finally: - if self._sock: - if self._close: - self._sock.close() - else: - try: - self._sock._decref_socketios() - except AttributeError: - pass # bah, someone built a _fileobject manually - # with some unexpected replacement of the - # _socketobject class + s = self._sock + if type(s) is _realsocket: + s._drop() + if self._close: + self._sock.close() self._sock = None def __del__(self): diff --git a/lib-python/2.7/test/test_code.py b/lib-python/2.7/test/test_code.py --- a/lib-python/2.7/test/test_code.py +++ b/lib-python/2.7/test/test_code.py @@ -75,7 +75,7 @@ cellvars: () freevars: () nlocals: 0 -flags: 67 +flags: 1048643 consts: ("'doc string'", 'None') """ diff --git a/lib-python/2.7/test/test_dis.py b/lib-python/2.7/test/test_dis.py --- a/lib-python/2.7/test/test_dis.py +++ b/lib-python/2.7/test/test_dis.py @@ -53,25 +53,26 @@ pass dis_bug1333982 = """\ - %-4d 0 LOAD_CONST 1 (0) - 3 POP_JUMP_IF_TRUE 41 - 6 LOAD_GLOBAL 0 (AssertionError) - 9 LOAD_FAST 0 (x) - 12 BUILD_LIST_FROM_ARG 0 - 15 GET_ITER - >> 16 FOR_ITER 12 (to 31) - 19 STORE_FAST 1 (s) - 22 LOAD_FAST 1 (s) - 25 LIST_APPEND 2 - 28 JUMP_ABSOLUTE 16 + %-4d 0 JUMP_IF_NOT_DEBUG 41 (to 44) + 3 LOAD_CONST 1 (0) + 6 POP_JUMP_IF_TRUE 44 + 9 LOAD_GLOBAL 0 (AssertionError) + 12 LOAD_FAST 0 (x) + 15 BUILD_LIST_FROM_ARG 0 + 18 GET_ITER + >> 19 FOR_ITER 12 (to 34) + 22 STORE_FAST 1 (s) + 25 LOAD_FAST 1 (s) + 28 LIST_APPEND 2 + 31 JUMP_ABSOLUTE 19 - %-4d >> 31 LOAD_CONST 2 (1) - 34 BINARY_ADD - 35 CALL_FUNCTION 1 - 38 RAISE_VARARGS 1 + %-4d >> 34 LOAD_CONST 2 (1) + 37 BINARY_ADD + 38 CALL_FUNCTION 1 + 41 RAISE_VARARGS 1 - %-4d >> 41 LOAD_CONST 0 (None) - 44 RETURN_VALUE + %-4d >> 44 LOAD_CONST 0 (None) + 47 RETURN_VALUE """%(bug1333982.func_code.co_firstlineno + 1, bug1333982.func_code.co_firstlineno + 2, bug1333982.func_code.co_firstlineno + 3) diff --git a/lib_pypy/_ctypes/pointer.py b/lib_pypy/_ctypes/pointer.py --- a/lib_pypy/_ctypes/pointer.py +++ b/lib_pypy/_ctypes/pointer.py @@ -120,6 +120,7 @@ return self._buffer[0] != 0 contents = property(getcontents, setcontents) + _obj = property(getcontents) # byref interface def _as_ffi_pointer_(self, ffitype): return as_ffi_pointer(self, ffitype) diff --git a/lib_pypy/_ctypes_test.py b/lib_pypy/_ctypes_test.py --- a/lib_pypy/_ctypes_test.py +++ b/lib_pypy/_ctypes_test.py @@ -1,60 +1,7 @@ -import os, sys -import tempfile - -def compile_shared(): - """Compile '_ctypes_test.c' into an extension module, and import it - """ - thisdir = os.path.dirname(__file__) - output_dir = tempfile.mkdtemp() - - from distutils.ccompiler import new_compiler - from distutils import sysconfig - - compiler = new_compiler() - compiler.output_dir = output_dir - - # Compile .c file - include_dir = os.path.join(thisdir, '..', 'include') - if sys.platform == 'win32': - ccflags = ['-D_CRT_SECURE_NO_WARNINGS'] - else: - ccflags = ['-fPIC', '-Wimplicit-function-declaration'] - res = compiler.compile([os.path.join(thisdir, '_ctypes_test.c')], - include_dirs=[include_dir], - extra_preargs=ccflags) - object_filename = res[0] - - # set link options - output_filename = '_ctypes_test' + sysconfig.get_config_var('SO') - if sys.platform == 'win32': - # XXX libpypy-c.lib is currently not installed automatically - library = os.path.join(thisdir, '..', 'include', 'libpypy-c') - if not os.path.exists(library + '.lib'): - #For a nightly build - library = os.path.join(thisdir, '..', 'include', 'python27') - if not os.path.exists(library + '.lib'): - # For a local translation - library = os.path.join(thisdir, '..', 'pypy', 'goal', 'libpypy-c') - libraries = [library, 'oleaut32'] - extra_ldargs = ['/MANIFEST', # needed for VC10 - '/EXPORT:init_ctypes_test'] - else: - libraries = [] - extra_ldargs = [] - - # link the dynamic library - compiler.link_shared_object( - [object_filename], - output_filename, - libraries=libraries, - extra_preargs=extra_ldargs) - - # Now import the newly created library, it will replace our module in sys.modules - import imp - fp, filename, description = imp.find_module('_ctypes_test', path=[output_dir]) - imp.load_module('_ctypes_test', fp, filename, description) - - +try: + import cpyext +except ImportError: + raise ImportError("No module named '_ctypes_test'") try: import _ctypes _ctypes.PyObj_FromPtr = None @@ -62,4 +9,5 @@ except ImportError: pass # obscure condition of _ctypes_test.py being imported by py.test else: - compile_shared() + import _pypy_testcapi + _pypy_testcapi.compile_shared('_ctypes_test.c', '_ctypes_test') diff --git a/lib_pypy/_curses.py b/lib_pypy/_curses.py --- a/lib_pypy/_curses.py +++ b/lib_pypy/_curses.py @@ -476,6 +476,15 @@ def _chtype(ch): return int(ffi.cast("chtype", ch)) +def _texttype(text): + if isinstance(text, str): + return text + elif isinstance(text, unicode): + return str(text) # default encoding + else: + raise TypeError("str or unicode expected, got a '%s' object" + % (type(text).__name__,)) + def _extract_yx(args): if len(args) >= 2: @@ -589,6 +598,7 @@ @_argspec(1, 1, 2) def addstr(self, y, x, text, attr=None): + text = _texttype(text) if attr is not None: attr_old = lib.getattrs(self._win) lib.wattrset(self._win, attr) @@ -602,6 +612,7 @@ @_argspec(2, 1, 2) def addnstr(self, y, x, text, n, attr=None): + text = _texttype(text) if attr is not None: attr_old = lib.getattrs(self._win) lib.wattrset(self._win, attr) @@ -780,6 +791,7 @@ @_argspec(1, 1, 2) def insstr(self, y, x, text, attr=None): + text = _texttype(text) if attr is not None: attr_old = lib.getattrs(self._win) lib.wattrset(self._win, attr) @@ -793,6 +805,7 @@ @_argspec(2, 1, 2) def insnstr(self, y, x, text, n, attr=None): + text = _texttype(text) if attr is not None: attr_old = lib.getattrs(self._win) lib.wattrset(self._win, attr) @@ -1197,6 +1210,7 @@ def putp(text): + text = _texttype(text) return _check_ERR(lib.putp(text), "putp") diff --git a/lib_pypy/_pypy_irc_topic.py b/lib_pypy/_pypy_irc_topic.py --- a/lib_pypy/_pypy_irc_topic.py +++ b/lib_pypy/_pypy_irc_topic.py @@ -1,4 +1,4 @@ -"""eclguba: flagnk naq frznagvpf bs clguba, fcrrq bs p, erfgevpgvbaf bs wnin naq pbzcvyre reebe zrffntrf nf crargenoyr nf ZHZCF +__doc__ = """eclguba: flagnk naq frznagvpf bs clguba, fcrrq bs p, erfgevpgvbaf bs wnin naq pbzcvyre reebe zrffntrf nf crargenoyr nf ZHZCF pglcrf unf n fcva bs 1/3 ' ' vf n fcnpr gbb Clguba 2.k rfg cerfdhr zbeg, ivir Clguba! diff --git a/lib_pypy/_pypy_testcapi.py b/lib_pypy/_pypy_testcapi.py new file mode 100644 --- /dev/null +++ b/lib_pypy/_pypy_testcapi.py @@ -0,0 +1,61 @@ +import os, sys, imp +import tempfile + +def _get_c_extension_suffix(): + for ext, mod, typ in imp.get_suffixes(): + if typ == imp.C_EXTENSION: + return ext + + +def compile_shared(csource, modulename): + """Compile '_testcapi.c' or '_ctypes_test.c' into an extension module, + and import it. + """ + thisdir = os.path.dirname(__file__) + output_dir = tempfile.mkdtemp() + + from distutils.ccompiler import new_compiler + + compiler = new_compiler() + compiler.output_dir = output_dir + + # Compile .c file + include_dir = os.path.join(thisdir, '..', 'include') + if sys.platform == 'win32': + ccflags = ['-D_CRT_SECURE_NO_WARNINGS'] + else: + ccflags = ['-fPIC', '-Wimplicit-function-declaration'] + res = compiler.compile([os.path.join(thisdir, csource)], + include_dirs=[include_dir], + extra_preargs=ccflags) + object_filename = res[0] + + # set link options + output_filename = modulename + _get_c_extension_suffix() + if sys.platform == 'win32': + # XXX libpypy-c.lib is currently not installed automatically + library = os.path.join(thisdir, '..', 'include', 'libpypy-c') + if not os.path.exists(library + '.lib'): + #For a nightly build + library = os.path.join(thisdir, '..', 'include', 'python27') + if not os.path.exists(library + '.lib'): + # For a local translation + library = os.path.join(thisdir, '..', 'pypy', 'goal', 'libpypy-c') + libraries = [library, 'oleaut32'] + extra_ldargs = ['/MANIFEST', # needed for VC10 + '/EXPORT:init' + modulename] + else: + libraries = [] + extra_ldargs = [] + + # link the dynamic library + compiler.link_shared_object( + [object_filename], + output_filename, + libraries=libraries, + extra_preargs=extra_ldargs) + + # Now import the newly created library, it will replace the original + # module in sys.modules + fp, filename, description = imp.find_module(modulename, path=[output_dir]) + imp.load_module(modulename, fp, filename, description) diff --git a/lib_pypy/_testcapi.py b/lib_pypy/_testcapi.py --- a/lib_pypy/_testcapi.py +++ b/lib_pypy/_testcapi.py @@ -1,62 +1,7 @@ -import os, sys -import tempfile - -def compile_shared(): - """Compile '_testcapi.c' into an extension module, and import it - """ - thisdir = os.path.dirname(__file__) - output_dir = tempfile.mkdtemp() - - from distutils.ccompiler import new_compiler - from distutils import sysconfig - - compiler = new_compiler() - compiler.output_dir = output_dir - - # Compile .c file - include_dir = os.path.join(thisdir, '..', 'include') - if sys.platform == 'win32': - ccflags = ['-D_CRT_SECURE_NO_WARNINGS'] - else: - ccflags = ['-fPIC', '-Wimplicit-function-declaration'] - res = compiler.compile([os.path.join(thisdir, '_testcapimodule.c')], - include_dirs=[include_dir], - extra_preargs=ccflags) - object_filename = res[0] - - # set link options - output_filename = '_testcapi' + sysconfig.get_config_var('SO') - if sys.platform == 'win32': - # XXX libpypy-c.lib is currently not installed automatically - library = os.path.join(thisdir, '..', 'include', 'libpypy-c') - if not os.path.exists(library + '.lib'): - #For a nightly build - library = os.path.join(thisdir, '..', 'include', 'python27') - if not os.path.exists(library + '.lib'): - # For a local translation - library = os.path.join(thisdir, '..', 'pypy', 'goal', 'libpypy-c') - libraries = [library, 'oleaut32'] - extra_ldargs = ['/MANIFEST', # needed for VC10 - '/EXPORT:init_testcapi'] - else: - libraries = [] - extra_ldargs = [] - - # link the dynamic library - compiler.link_shared_object( - [object_filename], - output_filename, - libraries=libraries, - extra_preargs=extra_ldargs) - - # Now import the newly created library, it will replace our module in sys.modules - import imp - fp, filename, description = imp.find_module('_testcapi', path=[output_dir]) - imp.load_module('_testcapi', fp, filename, description) - try: import cpyext except ImportError: raise ImportError("No module named '_testcapi'") else: - compile_shared() + import _pypy_testcapi + _pypy_testcapi.compile_shared('_testcapimodule.c', '_testcapi') diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py --- a/lib_pypy/cffi/api.py +++ b/lib_pypy/cffi/api.py @@ -361,13 +361,13 @@ backend = ffi._backend try: if '.' not in name and '/' not in name: - raise OSError + raise OSError("library not found: %r" % (name,)) backendlib = backend.load_library(name, flags) except OSError: import ctypes.util path = ctypes.util.find_library(name) if path is None: - raise OSError("library not found: %r" % (name,)) + raise # propagate the original OSError backendlib = backend.load_library(path, flags) copied_enums = [] # diff --git a/lib_pypy/cffi/vengine_cpy.py b/lib_pypy/cffi/vengine_cpy.py --- a/lib_pypy/cffi/vengine_cpy.py +++ b/lib_pypy/cffi/vengine_cpy.py @@ -156,6 +156,9 @@ class FFILibrary(object): _cffi_python_module = module _cffi_ffi = self.ffi + _cffi_dir = [] + def __dir__(self): + return FFILibrary._cffi_dir + list(self.__dict__) library = FFILibrary() module._cffi_setup(lst, ffiplatform.VerificationError, library) # @@ -701,7 +704,8 @@ return ptr[0] def setter(library, value): ptr[0] = value - setattr(library.__class__, name, property(getter, setter)) + setattr(type(library), name, property(getter, setter)) + type(library)._cffi_dir.append(name) # ---------- diff --git a/lib_pypy/cffi/vengine_gen.py b/lib_pypy/cffi/vengine_gen.py --- a/lib_pypy/cffi/vengine_gen.py +++ b/lib_pypy/cffi/vengine_gen.py @@ -74,6 +74,9 @@ class FFILibrary(types.ModuleType): _cffi_generic_module = module _cffi_ffi = self.ffi + _cffi_dir = [] + def __dir__(self): + return FFILibrary._cffi_dir library = FFILibrary("") # # finally, call the loaded_gen_xxx() functions. This will set @@ -168,21 +171,22 @@ newfunction = self._load_constant(False, tp, name, module) else: indirections = [] - if any(isinstance(type, model.StructOrUnion) for type in tp.args): + if any(isinstance(typ, model.StructOrUnion) for typ in tp.args): indirect_args = [] - for i, type in enumerate(tp.args): - if isinstance(type, model.StructOrUnion): - type = model.PointerType(type) - indirections.append((i, type)) - indirect_args.append(type) + for i, typ in enumerate(tp.args): + if isinstance(typ, model.StructOrUnion): + typ = model.PointerType(typ) + indirections.append((i, typ)) + indirect_args.append(typ) tp = model.FunctionPtrType(tuple(indirect_args), tp.result, tp.ellipsis) BFunc = self.ffi._get_cached_btype(tp) wrappername = '_cffi_f_%s' % name newfunction = module.load_function(BFunc, wrappername) - for i, type in indirections: - newfunction = self._make_struct_wrapper(newfunction, i, type) + for i, typ in indirections: + newfunction = self._make_struct_wrapper(newfunction, i, typ) setattr(library, name, newfunction) + type(library)._cffi_dir.append(name) def _make_struct_wrapper(self, oldfunc, i, tp): backend = self.ffi._backend @@ -390,6 +394,7 @@ is_int = isinstance(tp, model.PrimitiveType) and tp.is_integer_type() value = self._load_constant(is_int, tp, name, module) setattr(library, name, value) + type(library)._cffi_dir.append(name) # ---------- # enums @@ -437,6 +442,7 @@ def _loaded_gen_enum(self, tp, name, module, library): for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues): setattr(library, enumerator, enumvalue) + type(library)._cffi_dir.append(enumerator) # ---------- # macros: for now only for integers @@ -450,6 +456,7 @@ def _loaded_gen_macro(self, tp, name, module, library): value = self._load_constant(True, tp, name, module) setattr(library, name, value) + type(library)._cffi_dir.append(name) # ---------- # global variables @@ -475,6 +482,7 @@ BArray = self.ffi._get_cached_btype(tp) value = self.ffi.cast(BArray, value) setattr(library, name, value) + type(library)._cffi_dir.append(name) return # remove ptr= from the library instance, and replace # it by a property on the class, which reads/writes into ptr[0]. @@ -486,7 +494,8 @@ return ptr[0] def setter(library, value): ptr[0] = value - setattr(library.__class__, name, property(getter, setter)) + setattr(type(library), name, property(getter, setter)) + type(library)._cffi_dir.append(name) cffimod_header = r''' #include diff --git a/lib_pypy/greenlet.egg-info b/lib_pypy/greenlet.egg-info new file mode 100644 --- /dev/null +++ b/lib_pypy/greenlet.egg-info @@ -0,0 +1,10 @@ +Metadata-Version: 1.0 +Name: greenlet +Version: 0.4.0 +Summary: Lightweight in-process concurrent programming +Home-page: https://github.com/python-greenlet/greenlet +Author: Ralf Schmitt (for CPython), PyPy team +Author-email: pypy-dev at python.org +License: MIT License +Description: UNKNOWN +Platform: UNKNOWN diff --git a/pypy/bin/pyinteractive.py b/pypy/bin/pyinteractive.py --- a/pypy/bin/pyinteractive.py +++ b/pypy/bin/pyinteractive.py @@ -27,7 +27,8 @@ BoolOption("completer", "use readline commandline completer", default=False, cmdline="-C"), BoolOption("optimize", - "dummy optimization flag for compatibility with CPython", + "skip assert statements and remove docstrings when importing modules" + " (this is -OO in regular CPython)", default=False, cmdline="-O"), BoolOption("no_site_import", "do not 'import site' on initialization", default=False, cmdline="-S"), @@ -94,6 +95,17 @@ space.setitem(space.sys.w_dict, space.wrap('executable'), space.wrap(argv[0])) + if interactiveconfig.optimize: + #change the optimize flag's value and set __debug__ to False + space.appexec([], """(): + import sys + flags = list(sys.flags) + flags[6] = 2 + sys.flags = type(sys.flags)(flags) + import __pypy__ + __pypy__.set_debug(False) + """) + # call pypy_find_stdlib: the side-effect is that it sets sys.prefix and # sys.exec_prefix executable = argv[0] diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -32,11 +32,10 @@ "rctime" , "select", "zipimport", "_lsprof", "crypt", "signal", "_rawffi", "termios", "zlib", "bz2", "struct", "_hashlib", "_md5", "_sha", "_minimal_curses", "cStringIO", - "thread", "itertools", "pyexpat", "_ssl", "array", + "thread", "itertools", "pyexpat", "_ssl", "cpyext", "array", "binascii", "_multiprocessing", '_warnings', "_collections", "_multibytecodec", "micronumpy", "_ffi", - "_continuation", "_cffi_backend", "_csv"] # "cpyext", "cppyy"] -# disabled until problems are fixed + "_continuation", "_cffi_backend", "_csv", "cppyy"] )) translation_modules = default_modules.copy() diff --git a/pypy/doc/__pypy__-module.rst b/pypy/doc/__pypy__-module.rst --- a/pypy/doc/__pypy__-module.rst +++ b/pypy/doc/__pypy__-module.rst @@ -1,3 +1,7 @@ + +.. comment: this document is very incomplete, should we generate + it automatically? + ======================= The ``__pypy__`` module ======================= diff --git a/pypy/doc/getting-started-dev.rst b/pypy/doc/getting-started-dev.rst --- a/pypy/doc/getting-started-dev.rst +++ b/pypy/doc/getting-started-dev.rst @@ -8,7 +8,8 @@ interpreter is written mostly in RPython (with pieces in Python), while the RPython compiler is written in Python. The hard to understand part is that Python is a meta-programming language for RPython, that is, -RPython is considered from live objects **after** the imports are done. +"being valid RPython" is a question that only makes sense on the +live objects **after** the imports are done. This might require more explanation. You start writing RPython from ``entry_point``, a good starting point is ``rpython/translator/goal/targetnopstandalone.py``. This does not do all that @@ -37,7 +38,7 @@ In this example ``entry_point`` is RPython, ``add`` and ``sub`` are RPython, however, ``generator`` is not. -A good introductory level articles are available: +The following introductory level articles are available: * Laurence Tratt -- `Fast Enough VMs in Fast Enough Time`_. diff --git a/pypy/doc/man/pypy.1.rst b/pypy/doc/man/pypy.1.rst --- a/pypy/doc/man/pypy.1.rst +++ b/pypy/doc/man/pypy.1.rst @@ -16,7 +16,10 @@ Inspect interactively after running script. -O - Dummy optimization flag for compatibility with C Python. + Skip assert statements. + +-OO + Remove docstrings when importing modules in addition to -O. -c *cmd* Program passed in as CMD (terminates option list). diff --git a/pypy/doc/rffi.rst b/pypy/doc/rffi.rst --- a/pypy/doc/rffi.rst +++ b/pypy/doc/rffi.rst @@ -5,7 +5,7 @@ Purpose ------- -This document describes an FFI for RPython language, concentrating +This document describes an FFI for the RPython language, concentrating on low-level backends like C. It describes how to declare and call low-level (C) functions from RPython level. @@ -50,7 +50,7 @@ ------ In rffi_ there are various declared types for C-structures, like CCHARP -(char*), SIZE_T (size_t) and others. refer to file for details. +(char*), SIZE_T (size_t) and others. Refer to file for details. Instances of non-primitive types must be alloced by hand, with call to lltype.malloc, and freed by lltype.free both with keyword argument flavor='raw'. There are several helpers like string -> char* diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -40,3 +40,15 @@ .. branch: on-abort-resops Added list of resops to the pypyjit on_abort hook. + +.. branch: logging-perf +Speeds up the stdlib logging module + +.. branch: operrfmt-NT +Adds a couple convenient format specifiers to operationerrfmt + +.. branch: win32-fixes3 +Skip and fix some non-translated (own) tests for win32 builds + +.. branch: ctypes-byref +Add the '_obj' attribute on ctypes pointer() and byref() objects diff --git a/pypy/doc/windows.rst b/pypy/doc/windows.rst --- a/pypy/doc/windows.rst +++ b/pypy/doc/windows.rst @@ -2,7 +2,7 @@ PyPy on Windows =============== -Pypy is supported on Windows platforms, starting with Windows 2000. +PyPy is supported on Windows platforms, starting with Windows 2000. The following text gives some hints about how to translate the PyPy interpreter. @@ -199,9 +199,9 @@ or such, depending on your mingw64 download. -hacking on Pypy with the mingw compiler +hacking on PyPy with the mingw compiler ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Since hacking on Pypy means running tests, you will need a way to specify +Since hacking on PyPy means running tests, you will need a way to specify the mingw compiler when hacking (as opposed to translating). As of March 2012, --cc is not a valid option for pytest.py. However if you set an environment variable CC to the compliter exe, testing will use it. diff --git a/pypy/interpreter/app_main.py b/pypy/interpreter/app_main.py --- a/pypy/interpreter/app_main.py +++ b/pypy/interpreter/app_main.py @@ -2,8 +2,8 @@ # App-level version of py.py. # See test/test_app_main. -# Missing vs CPython: -d, -OO, -t, -v, -x, -3 -"""\ +# Missing vs CPython: -d, -t, -v, -x, -3 +USAGE1 = __doc__ = """\ Options and arguments (and corresponding environment variables): -B : don't write .py[co] files on import; also PYTHONDONTWRITEBYTECODE=x -c cmd : program passed in as string (terminates option list) @@ -12,7 +12,8 @@ -i : inspect interactively after running script; forces a prompt even if stdin does not appear to be a terminal; also PYTHONINSPECT=x -m mod : run library module as a script (terminates option list) --O : dummy optimization flag for compatibility with CPython +-O : skip assert statements +-OO : remove docstrings when importing modules in addition to -O -R : ignored (see http://bugs.python.org/issue14621) -Q arg : division options: -Qold (default), -Qwarn, -Qwarnall, -Qnew -s : don't add user site directory to sys.path; also PYTHONNOUSERSITE @@ -27,7 +28,6 @@ PyPy options and arguments: --info : print translation information about this PyPy executable """ -USAGE1 = __doc__ # Missing vs CPython: PYTHONHOME, PYTHONCASEOK USAGE2 = """ Other environment variables: @@ -470,6 +470,10 @@ sys.py3kwarning = bool(sys.flags.py3k_warning) sys.dont_write_bytecode = bool(sys.flags.dont_write_bytecode) + if sys.flags.optimize >= 1: + import __pypy__ + __pypy__.set_debug(False) + if sys.py3kwarning: print >> sys.stderr, ( "Warning: pypy does not implement py3k warnings") diff --git a/pypy/interpreter/argument.py b/pypy/interpreter/argument.py --- a/pypy/interpreter/argument.py +++ b/pypy/interpreter/argument.py @@ -86,12 +86,9 @@ args_w = space.fixedview(w_stararg) except OperationError, e: if e.match(space, space.w_TypeError): - w_type = space.type(w_stararg) - typename = w_type.getname(space) - raise OperationError( + raise operationerrfmt( space.w_TypeError, - space.wrap("argument after * must be " - "a sequence, not %s" % (typename,))) + "argument after * must be a sequence, not %T", w_stararg) raise self.arguments_w = self.arguments_w + args_w @@ -116,12 +113,10 @@ w_keys = space.call_method(w_starstararg, "keys") except OperationError, e: if e.match(space, space.w_AttributeError): - w_type = space.type(w_starstararg) - typename = w_type.getname(space) - raise OperationError( + raise operationerrfmt( space.w_TypeError, - space.wrap("argument after ** must be " - "a mapping, not %s" % (typename,))) + "argument after ** must be a mapping, not %T", + w_starstararg) raise keys_w = space.unpackiterable(w_keys) keywords_w = [None] * len(keys_w) diff --git a/pypy/interpreter/astcompiler/assemble.py b/pypy/interpreter/astcompiler/assemble.py --- a/pypy/interpreter/astcompiler/assemble.py +++ b/pypy/interpreter/astcompiler/assemble.py @@ -245,6 +245,8 @@ if w_len is None: w_len = space.len(self.w_consts) space.setitem(self.w_consts, w_key, w_len) + if space.int_w(w_len) == 0: + self.scope.doc_removable = False return space.int_w(w_len) def _make_key(self, obj): @@ -632,6 +634,7 @@ ops.JUMP_IF_FALSE_OR_POP : 0, ops.POP_JUMP_IF_TRUE : -1, ops.POP_JUMP_IF_FALSE : -1, + ops.JUMP_IF_NOT_DEBUG : 0, ops.BUILD_LIST_FROM_ARG: 1, } diff --git a/pypy/interpreter/astcompiler/ast.py b/pypy/interpreter/astcompiler/ast.py --- a/pypy/interpreter/astcompiler/ast.py +++ b/pypy/interpreter/astcompiler/ast.py @@ -2793,8 +2793,7 @@ def Module_get_body(space, w_self): if not w_self.initialization_state & 1: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -2835,8 +2834,7 @@ def Interactive_get_body(space, w_self): if not w_self.initialization_state & 1: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -2881,8 +2879,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 1: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') return space.wrap(w_self.body) def Expression_set_body(space, w_self, w_new_value): @@ -2925,8 +2922,7 @@ def Suite_get_body(space, w_self): if not w_self.initialization_state & 1: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -2971,8 +2967,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 1: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'lineno') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'lineno') return space.wrap(w_self.lineno) def stmt_set_lineno(space, w_self, w_new_value): @@ -2993,8 +2988,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 2: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'col_offset') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'col_offset') return space.wrap(w_self.col_offset) def stmt_set_col_offset(space, w_self, w_new_value): @@ -3024,8 +3018,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'name') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'name') return space.wrap(w_self.name) def FunctionDef_set_name(space, w_self, w_new_value): @@ -3046,8 +3039,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'args') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'args') return space.wrap(w_self.args) def FunctionDef_set_args(space, w_self, w_new_value): @@ -3064,8 +3056,7 @@ def FunctionDef_get_body(space, w_self): if not w_self.initialization_state & 16: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -3081,8 +3072,7 @@ def FunctionDef_get_decorator_list(space, w_self): if not w_self.initialization_state & 32: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'decorator_list') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'decorator_list') if w_self.w_decorator_list is None: if w_self.decorator_list is None: list_w = [] @@ -3131,8 +3121,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'name') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'name') return space.wrap(w_self.name) def ClassDef_set_name(space, w_self, w_new_value): @@ -3149,8 +3138,7 @@ def ClassDef_get_bases(space, w_self): if not w_self.initialization_state & 8: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'bases') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'bases') if w_self.w_bases is None: if w_self.bases is None: list_w = [] @@ -3166,8 +3154,7 @@ def ClassDef_get_body(space, w_self): if not w_self.initialization_state & 16: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -3183,8 +3170,7 @@ def ClassDef_get_decorator_list(space, w_self): if not w_self.initialization_state & 32: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'decorator_list') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'decorator_list') if w_self.w_decorator_list is None: if w_self.decorator_list is None: list_w = [] @@ -3234,8 +3220,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') return space.wrap(w_self.value) def Return_set_value(space, w_self, w_new_value): @@ -3278,8 +3263,7 @@ def Delete_get_targets(space, w_self): if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'targets') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'targets') if w_self.w_targets is None: if w_self.targets is None: list_w = [] @@ -3320,8 +3304,7 @@ def Assign_get_targets(space, w_self): if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'targets') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'targets') if w_self.w_targets is None: if w_self.targets is None: list_w = [] @@ -3341,8 +3324,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') return space.wrap(w_self.value) def Assign_set_value(space, w_self, w_new_value): @@ -3391,8 +3373,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'target') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'target') return space.wrap(w_self.target) def AugAssign_set_target(space, w_self, w_new_value): @@ -3415,8 +3396,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'op') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'op') return operator_to_class[w_self.op - 1]() def AugAssign_set_op(space, w_self, w_new_value): @@ -3439,8 +3419,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 16: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') return space.wrap(w_self.value) def AugAssign_set_value(space, w_self, w_new_value): @@ -3489,8 +3468,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'dest') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'dest') return space.wrap(w_self.dest) def Print_set_dest(space, w_self, w_new_value): @@ -3509,8 +3487,7 @@ def Print_get_values(space, w_self): if not w_self.initialization_state & 8: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'values') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'values') if w_self.w_values is None: if w_self.values is None: list_w = [] @@ -3530,8 +3507,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 16: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'nl') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'nl') return space.wrap(w_self.nl) def Print_set_nl(space, w_self, w_new_value): @@ -3579,8 +3555,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'target') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'target') return space.wrap(w_self.target) def For_set_target(space, w_self, w_new_value): @@ -3603,8 +3578,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'iter') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'iter') return space.wrap(w_self.iter) def For_set_iter(space, w_self, w_new_value): @@ -3623,8 +3597,7 @@ def For_get_body(space, w_self): if not w_self.initialization_state & 16: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -3640,8 +3613,7 @@ def For_get_orelse(space, w_self): if not w_self.initialization_state & 32: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'orelse') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'orelse') if w_self.w_orelse is None: if w_self.orelse is None: list_w = [] @@ -3690,8 +3662,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'test') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'test') return space.wrap(w_self.test) def While_set_test(space, w_self, w_new_value): @@ -3710,8 +3681,7 @@ def While_get_body(space, w_self): if not w_self.initialization_state & 8: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -3727,8 +3697,7 @@ def While_get_orelse(space, w_self): if not w_self.initialization_state & 16: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'orelse') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'orelse') if w_self.w_orelse is None: if w_self.orelse is None: list_w = [] @@ -3776,8 +3745,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'test') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'test') return space.wrap(w_self.test) def If_set_test(space, w_self, w_new_value): @@ -3796,8 +3764,7 @@ def If_get_body(space, w_self): if not w_self.initialization_state & 8: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -3813,8 +3780,7 @@ def If_get_orelse(space, w_self): if not w_self.initialization_state & 16: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'orelse') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'orelse') if w_self.w_orelse is None: if w_self.orelse is None: list_w = [] @@ -3862,8 +3828,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'context_expr') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'context_expr') return space.wrap(w_self.context_expr) def With_set_context_expr(space, w_self, w_new_value): @@ -3886,8 +3851,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'optional_vars') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'optional_vars') return space.wrap(w_self.optional_vars) def With_set_optional_vars(space, w_self, w_new_value): @@ -3906,8 +3870,7 @@ def With_get_body(space, w_self): if not w_self.initialization_state & 16: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -3954,8 +3917,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'type') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'type') return space.wrap(w_self.type) def Raise_set_type(space, w_self, w_new_value): @@ -3978,8 +3940,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'inst') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'inst') return space.wrap(w_self.inst) def Raise_set_inst(space, w_self, w_new_value): @@ -4002,8 +3963,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 16: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'tback') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'tback') return space.wrap(w_self.tback) def Raise_set_tback(space, w_self, w_new_value): @@ -4048,8 +4008,7 @@ def TryExcept_get_body(space, w_self): if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -4065,8 +4024,7 @@ def TryExcept_get_handlers(space, w_self): if not w_self.initialization_state & 8: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'handlers') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'handlers') if w_self.w_handlers is None: if w_self.handlers is None: list_w = [] @@ -4082,8 +4040,7 @@ def TryExcept_get_orelse(space, w_self): if not w_self.initialization_state & 16: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'orelse') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'orelse') if w_self.w_orelse is None: if w_self.orelse is None: list_w = [] @@ -4128,8 +4085,7 @@ def TryFinally_get_body(space, w_self): if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -4145,8 +4101,7 @@ def TryFinally_get_finalbody(space, w_self): if not w_self.initialization_state & 8: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'finalbody') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'finalbody') if w_self.w_finalbody is None: if w_self.finalbody is None: list_w = [] @@ -4193,8 +4148,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'test') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'test') return space.wrap(w_self.test) def Assert_set_test(space, w_self, w_new_value): @@ -4217,8 +4171,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'msg') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'msg') return space.wrap(w_self.msg) def Assert_set_msg(space, w_self, w_new_value): @@ -4262,8 +4215,7 @@ def Import_get_names(space, w_self): if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'names') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'names') if w_self.w_names is None: if w_self.names is None: list_w = [] @@ -4308,8 +4260,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'module') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'module') return space.wrap(w_self.module) def ImportFrom_set_module(space, w_self, w_new_value): @@ -4329,8 +4280,7 @@ def ImportFrom_get_names(space, w_self): if not w_self.initialization_state & 8: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'names') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'names') if w_self.w_names is None: if w_self.names is None: list_w = [] @@ -4350,8 +4300,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 16: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'level') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'level') return space.wrap(w_self.level) def ImportFrom_set_level(space, w_self, w_new_value): @@ -4399,8 +4348,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') return space.wrap(w_self.body) def Exec_set_body(space, w_self, w_new_value): @@ -4423,8 +4371,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'globals') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'globals') return space.wrap(w_self.globals) def Exec_set_globals(space, w_self, w_new_value): @@ -4447,8 +4394,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 16: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'locals') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'locals') return space.wrap(w_self.locals) def Exec_set_locals(space, w_self, w_new_value): @@ -4493,8 +4439,7 @@ def Global_get_names(space, w_self): if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'names') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'names') if w_self.w_names is None: if w_self.names is None: list_w = [] @@ -4539,8 +4484,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') return space.wrap(w_self.value) def Expr_set_value(space, w_self, w_new_value): @@ -4638,8 +4582,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 1: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'lineno') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'lineno') return space.wrap(w_self.lineno) def expr_set_lineno(space, w_self, w_new_value): @@ -4660,8 +4603,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 2: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'col_offset') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'col_offset') return space.wrap(w_self.col_offset) def expr_set_col_offset(space, w_self, w_new_value): @@ -4691,8 +4633,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'op') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'op') return boolop_to_class[w_self.op - 1]() def BoolOp_set_op(space, w_self, w_new_value): @@ -4711,8 +4652,7 @@ def BoolOp_get_values(space, w_self): if not w_self.initialization_state & 8: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'values') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'values') if w_self.w_values is None: if w_self.values is None: list_w = [] @@ -4758,8 +4698,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'left') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'left') return space.wrap(w_self.left) def BinOp_set_left(space, w_self, w_new_value): @@ -4782,8 +4721,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'op') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'op') return operator_to_class[w_self.op - 1]() def BinOp_set_op(space, w_self, w_new_value): @@ -4806,8 +4744,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 16: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'right') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'right') return space.wrap(w_self.right) def BinOp_set_right(space, w_self, w_new_value): @@ -4856,8 +4793,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'op') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'op') return unaryop_to_class[w_self.op - 1]() def UnaryOp_set_op(space, w_self, w_new_value): @@ -4880,8 +4816,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'operand') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'operand') return space.wrap(w_self.operand) def UnaryOp_set_operand(space, w_self, w_new_value): @@ -4929,8 +4864,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'args') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'args') return space.wrap(w_self.args) def Lambda_set_args(space, w_self, w_new_value): @@ -4951,8 +4885,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') return space.wrap(w_self.body) def Lambda_set_body(space, w_self, w_new_value): @@ -5000,8 +4933,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'test') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'test') return space.wrap(w_self.test) def IfExp_set_test(space, w_self, w_new_value): @@ -5024,8 +4956,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') return space.wrap(w_self.body) def IfExp_set_body(space, w_self, w_new_value): @@ -5048,8 +4979,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 16: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'orelse') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'orelse') return space.wrap(w_self.orelse) def IfExp_set_orelse(space, w_self, w_new_value): @@ -5094,8 +5024,7 @@ def Dict_get_keys(space, w_self): if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'keys') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'keys') if w_self.w_keys is None: if w_self.keys is None: list_w = [] @@ -5111,8 +5040,7 @@ def Dict_get_values(space, w_self): if not w_self.initialization_state & 8: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'values') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'values') if w_self.w_values is None: if w_self.values is None: list_w = [] @@ -5155,8 +5083,7 @@ def Set_get_elts(space, w_self): if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elts') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'elts') if w_self.w_elts is None: if w_self.elts is None: list_w = [] @@ -5201,8 +5128,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elt') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'elt') return space.wrap(w_self.elt) def ListComp_set_elt(space, w_self, w_new_value): @@ -5221,8 +5147,7 @@ def ListComp_get_generators(space, w_self): if not w_self.initialization_state & 8: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'generators') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'generators') if w_self.w_generators is None: if w_self.generators is None: list_w = [] @@ -5268,8 +5193,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elt') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'elt') return space.wrap(w_self.elt) def SetComp_set_elt(space, w_self, w_new_value): @@ -5288,8 +5212,7 @@ def SetComp_get_generators(space, w_self): if not w_self.initialization_state & 8: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'generators') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'generators') if w_self.w_generators is None: if w_self.generators is None: list_w = [] @@ -5335,8 +5258,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'key') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'key') return space.wrap(w_self.key) def DictComp_set_key(space, w_self, w_new_value): @@ -5359,8 +5281,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') return space.wrap(w_self.value) def DictComp_set_value(space, w_self, w_new_value): @@ -5379,8 +5300,7 @@ def DictComp_get_generators(space, w_self): if not w_self.initialization_state & 16: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'generators') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'generators') if w_self.w_generators is None: if w_self.generators is None: list_w = [] @@ -5427,8 +5347,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elt') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'elt') return space.wrap(w_self.elt) def GeneratorExp_set_elt(space, w_self, w_new_value): @@ -5447,8 +5366,7 @@ def GeneratorExp_get_generators(space, w_self): if not w_self.initialization_state & 8: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'generators') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'generators') if w_self.w_generators is None: if w_self.generators is None: list_w = [] @@ -5494,8 +5412,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') return space.wrap(w_self.value) def Yield_set_value(space, w_self, w_new_value): @@ -5542,8 +5459,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'left') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'left') return space.wrap(w_self.left) def Compare_set_left(space, w_self, w_new_value): @@ -5562,8 +5478,7 @@ def Compare_get_ops(space, w_self): if not w_self.initialization_state & 8: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ops') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'ops') if w_self.w_ops is None: if w_self.ops is None: list_w = [] @@ -5579,8 +5494,7 @@ def Compare_get_comparators(space, w_self): if not w_self.initialization_state & 16: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'comparators') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'comparators') if w_self.w_comparators is None: if w_self.comparators is None: list_w = [] @@ -5628,8 +5542,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'func') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'func') return space.wrap(w_self.func) def Call_set_func(space, w_self, w_new_value): @@ -5648,8 +5561,7 @@ def Call_get_args(space, w_self): if not w_self.initialization_state & 8: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'args') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'args') if w_self.w_args is None: if w_self.args is None: list_w = [] @@ -5665,8 +5577,7 @@ def Call_get_keywords(space, w_self): if not w_self.initialization_state & 16: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'keywords') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'keywords') if w_self.w_keywords is None: if w_self.keywords is None: list_w = [] @@ -5686,8 +5597,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 32: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'starargs') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'starargs') return space.wrap(w_self.starargs) def Call_set_starargs(space, w_self, w_new_value): @@ -5710,8 +5620,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 64: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'kwargs') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'kwargs') return space.wrap(w_self.kwargs) def Call_set_kwargs(space, w_self, w_new_value): @@ -5764,8 +5673,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') return space.wrap(w_self.value) def Repr_set_value(space, w_self, w_new_value): @@ -5812,8 +5720,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'n') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'n') return w_self.n def Num_set_n(space, w_self, w_new_value): @@ -5858,8 +5765,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 's') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 's') return w_self.s def Str_set_s(space, w_self, w_new_value): @@ -5904,8 +5810,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') return space.wrap(w_self.value) def Attribute_set_value(space, w_self, w_new_value): @@ -5928,8 +5833,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'attr') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'attr') return space.wrap(w_self.attr) def Attribute_set_attr(space, w_self, w_new_value): @@ -5950,8 +5854,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 16: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ctx') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'ctx') return expr_context_to_class[w_self.ctx - 1]() def Attribute_set_ctx(space, w_self, w_new_value): @@ -6000,8 +5903,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') return space.wrap(w_self.value) def Subscript_set_value(space, w_self, w_new_value): @@ -6024,8 +5926,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'slice') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'slice') return space.wrap(w_self.slice) def Subscript_set_slice(space, w_self, w_new_value): @@ -6048,8 +5949,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 16: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ctx') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'ctx') return expr_context_to_class[w_self.ctx - 1]() def Subscript_set_ctx(space, w_self, w_new_value): @@ -6098,8 +5998,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'id') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'id') return space.wrap(w_self.id) def Name_set_id(space, w_self, w_new_value): @@ -6120,8 +6019,7 @@ if w_obj is not None: From noreply at buildbot.pypy.org Wed Jun 12 13:10:53 2013 From: noreply at buildbot.pypy.org (bivab) Date: Wed, 12 Jun 2013 13:10:53 +0200 (CEST) Subject: [pypy-commit] pypy default: rename is_reg method to is_core_reg on location objects Message-ID: <20130612111053.796BE1C00B9@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: Changeset: r64863:d417faaac238 Date: 2013-06-12 05:46 -0500 http://bitbucket.org/pypy/pypy/changeset/d417faaac238/ Log: rename is_reg method to is_core_reg on location objects diff --git a/rpython/jit/backend/arm/assembler.py b/rpython/jit/backend/arm/assembler.py --- a/rpython/jit/backend/arm/assembler.py +++ b/rpython/jit/backend/arm/assembler.py @@ -113,7 +113,7 @@ tmpreg = r.lr mc.gen_load_int(r.ip.value, self.cpu.pos_exc_value()) if excvalloc is not None: # store - assert excvalloc.is_reg() + assert excvalloc.is_core_reg() self.load_reg(mc, excvalloc, r.ip) if on_frame: # store exc_value in JITFRAME @@ -125,7 +125,7 @@ self.store_reg(mc, r.ip, r.fp, ofs, helper=tmpreg) if exctploc is not None: # store pos_exception in exctploc - assert exctploc.is_reg() + assert exctploc.is_core_reg() mc.gen_load_int(r.ip.value, self.cpu.pos_exception()) self.load_reg(mc, exctploc, r.ip, helper=tmpreg) @@ -146,7 +146,7 @@ tmpreg = r.lr # use lr as a second temporary reg mc.gen_load_int(r.ip.value, self.cpu.pos_exc_value()) if excvalloc is not None: - assert excvalloc.is_reg() + assert excvalloc.is_core_reg() self.store_reg(mc, excvalloc, r.ip) else: assert exctploc is not r.fp @@ -947,7 +947,7 @@ # regalloc support def load(self, loc, value): """load an immediate value into a register""" - assert (loc.is_reg() and value.is_imm() + assert (loc.is_core_reg() and value.is_imm() or loc.is_vfp_reg() and value.is_imm_float()) if value.is_imm(): self.mc.gen_load_int(loc.value, value.getint()) @@ -958,7 +958,7 @@ def load_reg(self, mc, target, base, ofs=0, cond=c.AL, helper=r.ip): if target.is_vfp_reg(): return self._load_vfp_reg(mc, target, base, ofs, cond, helper) - elif target.is_reg(): + elif target.is_core_reg(): return self._load_core_reg(mc, target, base, ofs, cond, helper) def _load_vfp_reg(self, mc, target, base, ofs, cond=c.AL, helper=r.ip): @@ -1012,7 +1012,7 @@ def _mov_imm_to_loc(self, prev_loc, loc, cond=c.AL): if loc.type == FLOAT: raise AssertionError("invalid target for move from imm value") - if loc.is_reg(): + if loc.is_core_reg(): new_loc = loc elif loc.is_stack() or loc.is_raw_sp(): new_loc = r.lr @@ -1027,7 +1027,7 @@ def _mov_reg_to_loc(self, prev_loc, loc, cond=c.AL): if loc.is_imm(): raise AssertionError("mov reg to imm doesn't make sense") - if loc.is_reg(): + if loc.is_core_reg(): self.mc.MOV_rr(loc.value, prev_loc.value, cond=cond) elif loc.is_stack() and loc.type != FLOAT: # spill a core register @@ -1050,7 +1050,7 @@ helper = None offset = prev_loc.value tmp = None - if loc.is_reg(): + if loc.is_core_reg(): assert prev_loc.type != FLOAT, 'trying to load from an \ incompatible location into a core register' # unspill a core register @@ -1126,7 +1126,7 @@ """Moves a value from a previous location to some other location""" if prev_loc.is_imm(): return self._mov_imm_to_loc(prev_loc, loc, cond) - elif prev_loc.is_reg(): + elif prev_loc.is_core_reg(): self._mov_reg_to_loc(prev_loc, loc, cond) elif prev_loc.is_stack(): self._mov_stack_to_loc(prev_loc, loc, cond) @@ -1215,7 +1215,7 @@ scratch_reg = r.vfp_ip self.regalloc_mov(loc, scratch_reg, cond) self.regalloc_push(scratch_reg, cond) - elif loc.is_reg(): + elif loc.is_core_reg(): self.mc.PUSH([loc.value], cond=cond) elif loc.is_vfp_reg(): self.mc.VPUSH([loc.value], cond=cond) @@ -1238,7 +1238,7 @@ scratch_reg = r.vfp_ip self.regalloc_pop(scratch_reg) self.regalloc_mov(scratch_reg, loc) - elif loc.is_reg(): + elif loc.is_core_reg(): self.mc.POP([loc.value], cond=cond) elif loc.is_vfp_reg(): self.mc.VPOP([loc.value], cond=cond) @@ -1306,7 +1306,7 @@ # lengthloc is the length of the array, which we must not modify! assert lengthloc is not r.r0 and lengthloc is not r.r1 - if lengthloc.is_reg(): + if lengthloc.is_core_reg(): varsizeloc = lengthloc else: assert lengthloc.is_stack() diff --git a/rpython/jit/backend/arm/callbuilder.py b/rpython/jit/backend/arm/callbuilder.py --- a/rpython/jit/backend/arm/callbuilder.py +++ b/rpython/jit/backend/arm/callbuilder.py @@ -40,7 +40,7 @@ if self.fnloc.is_stack(): self.asm.mov_loc_loc(self.fnloc, r.ip) self.fnloc = r.ip - assert self.fnloc.is_reg() + assert self.fnloc.is_core_reg() self.mc.BLX(self.fnloc.value) def restore_stack_pointer(self): @@ -135,7 +135,7 @@ return [], [] if self.resloc.is_vfp_reg(): return [r.r0, r.r1], [] - assert self.resloc.is_reg() + assert self.resloc.is_core_reg() return [r.r0], [] def load_result(self): @@ -146,7 +146,7 @@ if resloc.is_vfp_reg(): # move result to the allocated register self.asm.mov_to_vfp_loc(r.r0, r.r1, resloc) - elif resloc.is_reg(): + elif resloc.is_core_reg(): # move result to the allocated register if resloc is not r.r0: self.asm.mov_loc_loc(r.r0, resloc) @@ -283,7 +283,7 @@ def load_result(self): resloc = self.resloc # ensure the result is wellformed and stored in the correct location - if resloc is not None and resloc.is_reg(): + if resloc is not None and resloc.is_core_reg(): self._ensure_result_bit_extension(resloc, self.ressize, self.ressign) @@ -292,7 +292,7 @@ return [], [] if self.resloc.is_vfp_reg(): return [], [r.d0] - assert self.resloc.is_reg() + assert self.resloc.is_core_reg() return [r.r0], [] diff --git a/rpython/jit/backend/arm/helper/assembler.py b/rpython/jit/backend/arm/helper/assembler.py --- a/rpython/jit/backend/arm/helper/assembler.py +++ b/rpython/jit/backend/arm/helper/assembler.py @@ -82,7 +82,7 @@ assert guard is not None l0 = arglocs[0] l1 = arglocs[1] - assert l0.is_reg() + assert l0.is_core_reg() if l1.is_imm(): self.mc.CMP_ri(l0.value, imm=l1.getint(), cond=fcond) diff --git a/rpython/jit/backend/arm/locations.py b/rpython/jit/backend/arm/locations.py --- a/rpython/jit/backend/arm/locations.py +++ b/rpython/jit/backend/arm/locations.py @@ -15,7 +15,7 @@ def is_raw_sp(self): return False - def is_reg(self): + def is_core_reg(self): return False def is_vfp_reg(self): @@ -43,7 +43,7 @@ def __repr__(self): return 'r%d' % self.value - def is_reg(self): + def is_core_reg(self): return True def as_key(self): @@ -62,7 +62,7 @@ def __repr__(self): return 'vfp%d' % self.value - def is_reg(self): + def is_core_reg(self): return False def is_vfp_reg(self): diff --git a/rpython/jit/backend/arm/opassembler.py b/rpython/jit/backend/arm/opassembler.py --- a/rpython/jit/backend/arm/opassembler.py +++ b/rpython/jit/backend/arm/opassembler.py @@ -248,7 +248,7 @@ l1 = arglocs[1] failargs = arglocs[2:] - if l0.is_reg(): + if l0.is_core_reg(): if l1.is_imm(): self.mc.CMP_ri(l0.value, l1.getint()) else: @@ -488,7 +488,7 @@ # case GCFLAG_CARDS_SET: emit a few instructions to do # directly the card flag setting loc_index = arglocs[1] - assert loc_index.is_reg() + assert loc_index.is_core_reg() # must save the register loc_index before it is mutated mc.PUSH([loc_index.value]) tmp1 = loc_index @@ -588,7 +588,7 @@ def emit_op_setarrayitem_gc(self, op, arglocs, regalloc, fcond): value_loc, base_loc, ofs_loc, scale, ofs = arglocs - assert ofs_loc.is_reg() + assert ofs_loc.is_core_reg() if scale.value > 0: self.mc.LSL_ri(r.ip.value, ofs_loc.value, scale.value) ofs_loc = r.ip @@ -606,7 +606,7 @@ # vstr only supports imm offsets # so if the ofset is too large we add it to the base and use an # offset of 0 - if ofs_loc.is_reg(): + if ofs_loc.is_core_reg(): tmploc, save = self.get_tmp_reg([value_loc, base_loc, ofs_loc]) assert not save self.mc.ADD_rr(tmploc.value, base_loc.value, ofs_loc.value) @@ -644,13 +644,13 @@ def emit_op_raw_store(self, op, arglocs, regalloc, fcond): value_loc, base_loc, ofs_loc, scale, ofs = arglocs - assert ofs_loc.is_reg() + assert ofs_loc.is_core_reg() self._write_to_mem(value_loc, base_loc, ofs_loc, scale, fcond) return fcond def emit_op_getarrayitem_gc(self, op, arglocs, regalloc, fcond): res_loc, base_loc, ofs_loc, scale, ofs = arglocs - assert ofs_loc.is_reg() + assert ofs_loc.is_core_reg() signed = op.getdescr().is_item_signed() # scale the offset as required @@ -672,7 +672,7 @@ # vldr only supports imm offsets # if the offset is in a register we add it to the base and use a # tmp reg - if ofs_loc.is_reg(): + if ofs_loc.is_core_reg(): tmploc, save = self.get_tmp_reg([base_loc, ofs_loc]) assert not save self.mc.ADD_rr(tmploc.value, base_loc.value, ofs_loc.value) @@ -727,7 +727,7 @@ def emit_op_raw_load(self, op, arglocs, regalloc, fcond): res_loc, base_loc, ofs_loc, scale, ofs = arglocs - assert ofs_loc.is_reg() + assert ofs_loc.is_core_reg() # no base offset assert ofs.value == 0 signed = op.getdescr().is_item_signed() @@ -805,10 +805,10 @@ bytes_box = TempBox() bytes_loc = regalloc.rm.force_allocate_reg(bytes_box, forbidden_vars) scale = self._get_unicode_item_scale() - if not length_loc.is_reg(): + if not length_loc.is_core_reg(): self.regalloc_mov(length_loc, bytes_loc) length_loc = bytes_loc - assert length_loc.is_reg() + assert length_loc.is_core_reg() self.mc.MOV_ri(r.ip.value, 1 << scale) self.mc.MUL(bytes_loc.value, r.ip.value, length_loc.value) length_box = bytes_box @@ -835,8 +835,8 @@ # result = base_loc + (scaled_loc << scale) + static_offset def _gen_address(self, result, base_loc, scaled_loc, scale=0, static_offset=0): - assert scaled_loc.is_reg() - assert base_loc.is_reg() + assert scaled_loc.is_core_reg() + assert base_loc.is_core_reg() assert check_imm_arg(scale) assert check_imm_arg(static_offset) if scale > 0: @@ -1063,7 +1063,7 @@ def emit_op_cast_float_to_int(self, op, arglocs, regalloc, fcond): arg, res = arglocs assert arg.is_vfp_reg() - assert res.is_reg() + assert res.is_core_reg() self.mc.VCVT_float_to_int(r.vfp_ip.value, arg.value) self.mc.VMOV_rc(res.value, r.ip.value, r.vfp_ip.value) return fcond @@ -1071,7 +1071,7 @@ def emit_op_cast_int_to_float(self, op, arglocs, regalloc, fcond): arg, res = arglocs assert res.is_vfp_reg() - assert arg.is_reg() + assert arg.is_core_reg() self.mc.MOV_ri(r.ip.value, 0) self.mc.VMOV_cr(res.value, arg.value, r.ip.value) self.mc.VCVT_int_to_float(res.value, res.value) @@ -1087,7 +1087,7 @@ loc = arglocs[0] res = arglocs[1] assert loc.is_vfp_reg() - assert res.is_reg() + assert res.is_core_reg() self.mc.VMOV_rc(res.value, r.ip.value, loc.value) return fcond @@ -1108,7 +1108,7 @@ def emit_op_cast_float_to_singlefloat(self, op, arglocs, regalloc, fcond): arg, res = arglocs assert arg.is_vfp_reg() - assert res.is_reg() + assert res.is_core_reg() self.mc.VCVT_f64_f32(r.vfp_ip.value, arg.value) self.mc.VMOV_rc(res.value, r.ip.value, r.vfp_ip.value) return fcond @@ -1116,7 +1116,7 @@ def emit_op_cast_singlefloat_to_float(self, op, arglocs, regalloc, fcond): arg, res = arglocs assert res.is_vfp_reg() - assert arg.is_reg() + assert arg.is_core_reg() self.mc.MOV_ri(r.ip.value, 0) self.mc.VMOV_cr(res.value, arg.value, r.ip.value) self.mc.VCVT_f32_f64(res.value, res.value) diff --git a/rpython/jit/backend/arm/regalloc.py b/rpython/jit/backend/arm/regalloc.py --- a/rpython/jit/backend/arm/regalloc.py +++ b/rpython/jit/backend/arm/regalloc.py @@ -324,7 +324,7 @@ loc = r.fp arg = inputargs[i] i += 1 - if loc.is_reg(): + if loc.is_core_reg(): self.rm.reg_bindings[arg] = loc used[loc] = None elif loc.is_vfp_reg(): @@ -358,7 +358,7 @@ continue if box.type == REF and self.rm.is_still_alive(box): assert not noregs - assert loc.is_reg() + assert loc.is_core_reg() val = loc.value gcmap[val // WORD // 8] |= r_uint(1) << (val % (WORD * 8)) for box, loc in self.fm.bindings.iteritems(): @@ -1154,7 +1154,7 @@ assert isinstance(arg, Box) loc = self.loc(arg) arglocs[i] = loc - if loc.is_reg(): + if loc.is_core_reg(): self.frame_manager.mark_as_free(arg) # descr._arm_arglocs = arglocs diff --git a/rpython/jit/backend/arm/test/test_jump.py b/rpython/jit/backend/arm/test/test_jump.py --- a/rpython/jit/backend/arm/test/test_jump.py +++ b/rpython/jit/backend/arm/test/test_jump.py @@ -255,7 +255,7 @@ else: newvalue = 'value-vfp-%d' % i regs2[loc.value] = newvalue - elif loc.is_reg(): + elif loc.is_core_reg(): regs1[loc.value] = 'value-int-%d' % i elif loc.is_stack(): stack[loc.position] = 'value-width%d-%d' % (loc.width, i) @@ -284,7 +284,7 @@ assert loc.width == expected_width*WORD if loc.is_vfp_reg(): return regs2[loc.value] - elif loc.is_reg(): + elif loc.is_core_reg(): return regs1[loc.value] elif loc.is_stack(): got = stack[loc.position] @@ -298,7 +298,7 @@ def write(loc, newvalue): if loc.is_vfp_reg(): regs2[loc.value] = newvalue - elif loc.is_reg(): + elif loc.is_core_reg(): regs1[loc.value] = newvalue elif loc.is_stack(): if loc.width > WORD: @@ -317,17 +317,17 @@ for op in assembler.ops: if op[0] == 'mov': src, dst = op[1:] - assert src.is_reg() or src.is_vfp_reg() or src.is_stack() or src.is_imm_float() or src.is_imm() - assert dst.is_reg() or dst.is_vfp_reg() or dst.is_stack() + assert src.is_core_reg() or src.is_vfp_reg() or src.is_stack() or src.is_imm_float() or src.is_imm() + assert dst.is_core_reg() or dst.is_vfp_reg() or dst.is_stack() assert not (src.is_stack() and dst.is_stack()) write(dst, read(src)) elif op[0] == 'push': src, = op[1:] - assert src.is_reg() or src.is_vfp_reg() or src.is_stack() + assert src.is_core_reg() or src.is_vfp_reg() or src.is_stack() extrapushes.append(read(src)) elif op[0] == 'pop': dst, = op[1:] - assert dst.is_reg() or dst.is_vfp_reg() or dst.is_stack() + assert dst.is_core_reg() or dst.is_vfp_reg() or dst.is_stack() write(dst, extrapushes.pop()) else: assert 0, "unknown op: %r" % (op,) diff --git a/rpython/jit/backend/llsupport/assembler.py b/rpython/jit/backend/llsupport/assembler.py --- a/rpython/jit/backend/llsupport/assembler.py +++ b/rpython/jit/backend/llsupport/assembler.py @@ -47,7 +47,7 @@ input_i += 1 if arg.type == REF: loc = fail_locs[i] - if loc.is_reg(): + if loc.is_core_reg(): val = self.cpu.all_reg_indexes[loc.value] else: val = loc.get_position() + self.cpu.JITFRAME_FIXED_SIZE diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -1551,14 +1551,14 @@ frame in jf_guard_exc """ if excvalloc is not None: - assert excvalloc.is_reg() + assert excvalloc.is_core_reg() mc.MOV(excvalloc, heap(self.cpu.pos_exc_value())) elif tmploc is not None: # if both are None, just ignore ofs = self.cpu.get_ofs_of_frame_field('jf_guard_exc') mc.MOV(tmploc, heap(self.cpu.pos_exc_value())) mc.MOV(RawEbpLoc(ofs), tmploc) if exctploc is not None: - assert exctploc.is_reg() + assert exctploc.is_core_reg() mc.MOV(exctploc, heap(self.cpu.pos_exception())) mc.MOV(heap(self.cpu.pos_exception()), imm0) diff --git a/rpython/jit/backend/x86/regloc.py b/rpython/jit/backend/x86/regloc.py --- a/rpython/jit/backend/x86/regloc.py +++ b/rpython/jit/backend/x86/regloc.py @@ -45,7 +45,7 @@ def is_stack(self): return False - def is_reg(self): + def is_core_reg(self): return False def get_position(self): @@ -169,7 +169,7 @@ def is_float(self): return self.is_xmm - def is_reg(self): + def is_core_reg(self): return True class ImmediateAssemblerLocation(AssemblerLocation): From noreply at buildbot.pypy.org Wed Jun 12 13:10:54 2013 From: noreply at buildbot.pypy.org (bivab) Date: Wed, 12 Jun 2013 13:10:54 +0200 (CEST) Subject: [pypy-commit] pypy default: add a testcase for the failing case of having a jump to a label having a float that was spilled currently loaded in a register Message-ID: <20130612111054.B87981C00B9@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: Changeset: r64864:59fdc0758a45 Date: 2013-06-12 06:00 -0500 http://bitbucket.org/pypy/pypy/changeset/59fdc0758a45/ Log: add a testcase for the failing case of having a jump to a label having a float that was spilled currently loaded in a register diff --git a/rpython/jit/backend/arm/test/test_runner.py b/rpython/jit/backend/arm/test/test_runner.py --- a/rpython/jit/backend/arm/test/test_runner.py +++ b/rpython/jit/backend/arm/test/test_runner.py @@ -11,6 +11,7 @@ from rpython.jit.codewriter.effectinfo import EffectInfo from rpython.jit.metainterp.history import JitCellToken, TargetToken from rpython.jit.backend.arm.detect import detect_arch_version +from rpython.jit.codewriter import longlong CPU = getcpuclass() @@ -261,3 +262,43 @@ l1 = ('debug_print', preambletoken.repr_of_descr() + ':1') l2 = ('debug_print', targettoken.repr_of_descr() + ':9') assert ('jit-backend-counts', [l0, l1, l2]) in dlog + + + def test_label_float_in_reg_and_on_stack(self): + targettoken = TargetToken() + ops = """ + [i0, f3] + i2 = same_as(i0) # but forced to be in a register + force_spill(i2) + force_spill(f3) + f4 = float_add(f3, 5.0) + label(f3, f4, descr=targettoken) + force_spill(f3) + f5 = same_as(f3) # but forced to be in a register + finish(f5) + """ + faildescr = BasicFailDescr(2) + loop = parse(ops, self.cpu, namespace=locals()) + looptoken = JitCellToken() + info = self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + ops2 = """ + [i0, f1] + i1 = same_as(i0) + f2 = same_as(f1) + f3 = float_add(f1, 10.0) + force_spill(f3) + force_spill(i1) + f4 = float_add(f3, f1) + jump(f3, f4, descr=targettoken) + """ + loop2 = parse(ops2, self.cpu, namespace=locals()) + looptoken2 = JitCellToken() + info = self.cpu.compile_loop(loop2.inputargs, loop2.operations, looptoken2) + + deadframe = self.cpu.execute_token(looptoken, -9, longlong.getfloatstorage(-13.5)) + res = longlong.getrealfloat(self.cpu.get_float_value(deadframe, 0)) + assert res == -13.5 + # + deadframe = self.cpu.execute_token(looptoken2, -9, longlong.getfloatstorage(-13.5)) + res = longlong.getrealfloat(self.cpu.get_float_value(deadframe, 0)) + assert res == -3.5 From noreply at buildbot.pypy.org Wed Jun 12 13:10:55 2013 From: noreply at buildbot.pypy.org (bivab) Date: Wed, 12 Jun 2013 13:10:55 +0200 (CEST) Subject: [pypy-commit] pypy default: (arigo, bivab): simple fix for a hard to track issue. When compiling a label we Message-ID: <20130612111055.E8B3A1C00B9@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: Changeset: r64865:4283f7772bc0 Date: 2013-06-12 06:05 -0500 http://bitbucket.org/pypy/pypy/changeset/4283f7772bc0/ Log: (arigo, bivab): simple fix for a hard to track issue. When compiling a label we were keeping the stacklocation of prevously spilled floats that were loaded in a reg when we hit the label. When jumping from somewehere else to the label the float value in the register is forgotten, because it is assumed to be on the stack too. diff --git a/rpython/jit/backend/arm/regalloc.py b/rpython/jit/backend/arm/regalloc.py --- a/rpython/jit/backend/arm/regalloc.py +++ b/rpython/jit/backend/arm/regalloc.py @@ -1154,7 +1154,7 @@ assert isinstance(arg, Box) loc = self.loc(arg) arglocs[i] = loc - if loc.is_core_reg(): + if loc.is_core_reg() or loc.is_vfp_reg(): self.frame_manager.mark_as_free(arg) # descr._arm_arglocs = arglocs From noreply at buildbot.pypy.org Wed Jun 12 13:15:56 2013 From: noreply at buildbot.pypy.org (bivab) Date: Wed, 12 Jun 2013 13:15:56 +0200 (CEST) Subject: [pypy-commit] pypy default: dead code Message-ID: <20130612111556.64FD11C00B9@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: Changeset: r64866:ab0dd631c220 Date: 2013-06-12 13:12 +0200 http://bitbucket.org/pypy/pypy/changeset/ab0dd631c220/ Log: dead code diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -227,18 +227,6 @@ else: return self.xrm.make_sure_var_in_reg(var, forbidden_vars) - def _frame_bindings(self, locs, inputargs): - bindings = {} - i = 0 - for loc in locs: - if loc is None: - continue - arg = inputargs[i] - i += 1 - if not isinstance(loc, RegLoc): - bindings[arg] = loc - return bindings - def _update_bindings(self, locs, inputargs): # XXX this should probably go to llsupport/regalloc.py used = {} From noreply at buildbot.pypy.org Wed Jun 12 17:56:45 2013 From: noreply at buildbot.pypy.org (rguillebert) Date: Wed, 12 Jun 2013 17:56:45 +0200 (CEST) Subject: [pypy-commit] pypy numpypy-nditer: Merge default Message-ID: <20130612155645.EFF591C00B9@cobra.cs.uni-duesseldorf.de> Author: Romain Guillebert Branch: numpypy-nditer Changeset: r64867:0466c79b5c38 Date: 2013-06-12 17:55 +0200 http://bitbucket.org/pypy/pypy/changeset/0466c79b5c38/ Log: Merge default diff too long, truncating to 2000 out of 24532 lines diff --git a/lib-python/2.7/distutils/command/build_ext.py b/lib-python/2.7/distutils/command/build_ext.py --- a/lib-python/2.7/distutils/command/build_ext.py +++ b/lib-python/2.7/distutils/command/build_ext.py @@ -8,7 +8,7 @@ __revision__ = "$Id$" -import sys, os, string, re +import sys, os, string, re, imp from types import * from site import USER_BASE, USER_SITE from distutils.core import Command @@ -33,6 +33,11 @@ from distutils.ccompiler import show_compilers show_compilers() +def _get_c_extension_suffix(): + for ext, mod, typ in imp.get_suffixes(): + if typ == imp.C_EXTENSION: + return ext + class build_ext (Command): @@ -677,10 +682,18 @@ # OS/2 has an 8 character module (extension) limit :-( if os.name == "os2": ext_path[len(ext_path) - 1] = ext_path[len(ext_path) - 1][:8] + # PyPy tweak: first try to get the C extension suffix from + # 'imp'. If it fails we fall back to the 'SO' config var, like + # the previous version of this code did. This should work for + # CPython too. The point is that on PyPy with cpyext, the + # config var 'SO' is just ".so" but we want to return + # ".pypy-VERSION.so" instead. + so_ext = _get_c_extension_suffix() + if so_ext is None: + so_ext = get_config_var('SO') # fall-back # extensions in debug_mode are named 'module_d.pyd' under windows - so_ext = get_config_var('SO') if os.name == 'nt' and self.debug: - return os.path.join(*ext_path) + '_d' + so_ext + so_ext = '_d.pyd' return os.path.join(*ext_path) + so_ext def get_export_symbols (self, ext): diff --git a/lib-python/2.7/distutils/sysconfig.py b/lib-python/2.7/distutils/sysconfig.py --- a/lib-python/2.7/distutils/sysconfig.py +++ b/lib-python/2.7/distutils/sysconfig.py @@ -1,30 +1,16 @@ -"""Provide access to Python's configuration information. The specific -configuration variables available depend heavily on the platform and -configuration. The values may be retrieved using -get_config_var(name), and the list of variables is available via -get_config_vars().keys(). Additional convenience functions are also -available. - -Written by: Fred L. Drake, Jr. -Email: -""" - -__revision__ = "$Id: sysconfig.py 85358 2010-10-10 09:54:59Z antoine.pitrou $" - -import sys - # The content of this file is redirected from # sysconfig_cpython or sysconfig_pypy. +# All underscore names are imported too, because +# people like to use undocumented sysconfig._xxx +# directly. +import sys if '__pypy__' in sys.builtin_module_names: - from distutils.sysconfig_pypy import * - from distutils.sysconfig_pypy import _config_vars # needed by setuptools - from distutils.sysconfig_pypy import _variable_rx # read_setup_file() + from distutils import sysconfig_pypy as _sysconfig_module else: - from distutils.sysconfig_cpython import * - from distutils.sysconfig_cpython import _config_vars # needed by setuptools - from distutils.sysconfig_cpython import _variable_rx # read_setup_file() + from distutils import sysconfig_cpython as _sysconfig_module +globals().update(_sysconfig_module.__dict__) _USE_CLANG = None diff --git a/lib-python/2.7/distutils/sysconfig_cpython.py b/lib-python/2.7/distutils/sysconfig_cpython.py --- a/lib-python/2.7/distutils/sysconfig_cpython.py +++ b/lib-python/2.7/distutils/sysconfig_cpython.py @@ -9,7 +9,7 @@ Email: """ -__revision__ = "$Id$" +__revision__ = "$Id: sysconfig.py 85358 2010-10-10 09:54:59Z antoine.pitrou $" import os import re diff --git a/lib-python/2.7/distutils/sysconfig_pypy.py b/lib-python/2.7/distutils/sysconfig_pypy.py --- a/lib-python/2.7/distutils/sysconfig_pypy.py +++ b/lib-python/2.7/distutils/sysconfig_pypy.py @@ -1,9 +1,17 @@ -"""PyPy's minimal configuration information. +"""Provide access to Python's configuration information. +This is actually PyPy's minimal configuration information. + +The specific configuration variables available depend heavily on the +platform and configuration. The values may be retrieved using +get_config_var(name), and the list of variables is available via +get_config_vars().keys(). Additional convenience functions are also +available. """ +__revision__ = "$Id: sysconfig.py 85358 2010-10-10 09:54:59Z antoine.pitrou $" + import sys import os -import imp from distutils.errors import DistutilsPlatformError @@ -49,16 +57,11 @@ _config_vars = None -def _get_so_extension(): - for ext, mod, typ in imp.get_suffixes(): - if typ == imp.C_EXTENSION: - return ext - def _init_posix(): """Initialize the module as appropriate for POSIX systems.""" g = {} g['EXE'] = "" - g['SO'] = _get_so_extension() or ".so" + g['SO'] = ".so" g['SOABI'] = g['SO'].rsplit('.')[0] g['LIBDIR'] = os.path.join(sys.prefix, 'lib') g['CC'] = "gcc -pthread" # -pthread might not be valid on OS/X, check @@ -71,7 +74,7 @@ """Initialize the module as appropriate for NT""" g = {} g['EXE'] = ".exe" - g['SO'] = _get_so_extension() or ".pyd" + g['SO'] = ".pyd" g['SOABI'] = g['SO'].rsplit('.')[0] global _config_vars diff --git a/lib-python/2.7/logging/__init__.py b/lib-python/2.7/logging/__init__.py --- a/lib-python/2.7/logging/__init__.py +++ b/lib-python/2.7/logging/__init__.py @@ -134,21 +134,25 @@ DEBUG = 10 NOTSET = 0 -_levelNames = { - CRITICAL : 'CRITICAL', - ERROR : 'ERROR', - WARNING : 'WARNING', - INFO : 'INFO', - DEBUG : 'DEBUG', - NOTSET : 'NOTSET', - 'CRITICAL' : CRITICAL, - 'ERROR' : ERROR, - 'WARN' : WARNING, - 'WARNING' : WARNING, - 'INFO' : INFO, - 'DEBUG' : DEBUG, - 'NOTSET' : NOTSET, +_levelToName = { + CRITICAL: 'CRITICAL', + ERROR: 'ERROR', + WARNING: 'WARNING', + INFO: 'INFO', + DEBUG: 'DEBUG', + NOTSET: 'NOTSET', } +_nameToLevel = { + 'CRITICAL': CRITICAL, + 'ERROR': ERROR, + 'WARN': WARNING, + 'WARNING': WARNING, + 'INFO': INFO, + 'DEBUG': DEBUG, + 'NOTSET': NOTSET, +} +_levelNames = dict(_levelToName) +_levelNames.update(_nameToLevel) # backward compatibility def getLevelName(level): """ @@ -164,7 +168,7 @@ Otherwise, the string "Level %s" % level is returned. """ - return _levelNames.get(level, ("Level %s" % level)) + return _levelToName.get(level, ("Level %s" % level)) def addLevelName(level, levelName): """ @@ -174,8 +178,8 @@ """ _acquireLock() try: #unlikely to cause an exception, but you never know... - _levelNames[level] = levelName - _levelNames[levelName] = level + _levelToName[level] = levelName + _nameToLevel[levelName] = level finally: _releaseLock() @@ -183,9 +187,9 @@ if isinstance(level, int): rv = level elif str(level) == level: - if level not in _levelNames: + if level not in _nameToLevel: raise ValueError("Unknown level: %r" % level) - rv = _levelNames[level] + rv = _nameToLevel[level] else: raise TypeError("Level not an integer or a valid string: %r" % level) return rv @@ -277,7 +281,7 @@ self.lineno = lineno self.funcName = func self.created = ct - self.msecs = (ct - long(ct)) * 1000 + self.msecs = (ct - int(ct)) * 1000 self.relativeCreated = (self.created - _startTime) * 1000 if logThreads and thread: self.thread = thread.get_ident() diff --git a/lib-python/2.7/logging/config.py b/lib-python/2.7/logging/config.py --- a/lib-python/2.7/logging/config.py +++ b/lib-python/2.7/logging/config.py @@ -156,7 +156,7 @@ h = klass(*args) if "level" in opts: level = cp.get(sectname, "level") - h.setLevel(logging._levelNames[level]) + h.setLevel(level) if len(fmt): h.setFormatter(formatters[fmt]) if issubclass(klass, logging.handlers.MemoryHandler): @@ -187,7 +187,7 @@ opts = cp.options(sectname) if "level" in opts: level = cp.get(sectname, "level") - log.setLevel(logging._levelNames[level]) + log.setLevel(level) for h in root.handlers[:]: root.removeHandler(h) hlist = cp.get(sectname, "handlers") @@ -237,7 +237,7 @@ existing.remove(qn) if "level" in opts: level = cp.get(sectname, "level") - logger.setLevel(logging._levelNames[level]) + logger.setLevel(level) for h in logger.handlers[:]: logger.removeHandler(h) logger.propagate = propagate diff --git a/lib-python/2.7/opcode.py b/lib-python/2.7/opcode.py --- a/lib-python/2.7/opcode.py +++ b/lib-python/2.7/opcode.py @@ -193,5 +193,6 @@ hasname.append(201) def_op('CALL_METHOD', 202) # #args not including 'self' def_op('BUILD_LIST_FROM_ARG', 203) +jrel_op('JUMP_IF_NOT_DEBUG', 204) # jump over assert statements del def_op, name_op, jrel_op, jabs_op diff --git a/lib-python/2.7/socket.py b/lib-python/2.7/socket.py --- a/lib-python/2.7/socket.py +++ b/lib-python/2.7/socket.py @@ -96,6 +96,7 @@ _realsocket = socket +_type = type # WSA error codes if sys.platform.lower().startswith("win"): @@ -173,31 +174,37 @@ __doc__ = _realsocket.__doc__ + __slots__ = ["_sock", "__weakref__"] + def __init__(self, family=AF_INET, type=SOCK_STREAM, proto=0, _sock=None): if _sock is None: _sock = _realsocket(family, type, proto) + elif _type(_sock) is _realsocket: + _sock._reuse() + # PyPy note about refcounting: implemented with _reuse()/_drop() + # on the class '_socket.socket'. Python 3 did it differently + # with a reference counter on this class 'socket._socketobject' + # instead, but it is a less compatible change (breaks eventlet). self._sock = _sock - self._io_refs = 0 - self._closed = False def send(self, data, flags=0): - return self._sock.send(data, flags=flags) + return self._sock.send(data, flags) send.__doc__ = _realsocket.send.__doc__ def recv(self, buffersize, flags=0): - return self._sock.recv(buffersize, flags=flags) + return self._sock.recv(buffersize, flags) recv.__doc__ = _realsocket.recv.__doc__ def recv_into(self, buffer, nbytes=0, flags=0): - return self._sock.recv_into(buffer, nbytes=nbytes, flags=flags) + return self._sock.recv_into(buffer, nbytes, flags) recv_into.__doc__ = _realsocket.recv_into.__doc__ def recvfrom(self, buffersize, flags=0): - return self._sock.recvfrom(buffersize, flags=flags) + return self._sock.recvfrom(buffersize, flags) recvfrom.__doc__ = _realsocket.recvfrom.__doc__ def recvfrom_into(self, buffer, nbytes=0, flags=0): - return self._sock.recvfrom_into(buffer, nbytes=nbytes, flags=flags) + return self._sock.recvfrom_into(buffer, nbytes, flags) recvfrom_into.__doc__ = _realsocket.recvfrom_into.__doc__ def sendto(self, data, param2, param3=None): @@ -208,13 +215,17 @@ sendto.__doc__ = _realsocket.sendto.__doc__ def close(self): - # This function should not reference any globals. See issue #808164. + s = self._sock + if type(s) is _realsocket: + s._drop() self._sock = _closedsocket() close.__doc__ = _realsocket.close.__doc__ def accept(self): sock, addr = self._sock.accept() - return _socketobject(_sock=sock), addr + sockobj = _socketobject(_sock=sock) + sock._drop() # already a copy in the _socketobject() + return sockobj, addr accept.__doc__ = _realsocket.accept.__doc__ def dup(self): @@ -228,24 +239,7 @@ Return a regular file object corresponding to the socket. The mode and bufsize arguments are as for the built-in open() function.""" - self._io_refs += 1 - return _fileobject(self, mode, bufsize) - - def _decref_socketios(self): - if self._io_refs > 0: - self._io_refs -= 1 - if self._closed: - self.close() - - def _real_close(self): - # This function should not reference any globals. See issue #808164. - self._sock.close() - - def close(self): - # This function should not reference any globals. See issue #808164. - self._closed = True - if self._io_refs <= 0: - self._real_close() + return _fileobject(self._sock, mode, bufsize) family = property(lambda self: self._sock.family, doc="the socket family") type = property(lambda self: self._sock.type, doc="the socket type") @@ -286,6 +280,8 @@ "_close"] def __init__(self, sock, mode='rb', bufsize=-1, close=False): + if type(sock) is _realsocket: + sock._reuse() self._sock = sock self.mode = mode # Not actually used in this version if bufsize < 0: @@ -320,11 +316,11 @@ if self._sock: self.flush() finally: - if self._sock: - if self._close: - self._sock.close() - else: - self._sock._decref_socketios() + s = self._sock + if type(s) is _realsocket: + s._drop() + if self._close: + self._sock.close() self._sock = None def __del__(self): diff --git a/lib-python/2.7/test/test_code.py b/lib-python/2.7/test/test_code.py --- a/lib-python/2.7/test/test_code.py +++ b/lib-python/2.7/test/test_code.py @@ -75,7 +75,7 @@ cellvars: () freevars: () nlocals: 0 -flags: 67 +flags: 1048643 consts: ("'doc string'", 'None') """ diff --git a/lib-python/2.7/test/test_codecs.py b/lib-python/2.7/test/test_codecs.py --- a/lib-python/2.7/test/test_codecs.py +++ b/lib-python/2.7/test/test_codecs.py @@ -2,7 +2,11 @@ import unittest import codecs import locale -import sys, StringIO, _testcapi +import sys, StringIO +try: + import _testcapi +except ImportError: + _testcapi = None class Queue(object): """ @@ -1387,7 +1391,7 @@ decodedresult += reader.read() self.assertEqual(decodedresult, s, "%r != %r (encoding=%r)" % (decodedresult, s, encoding)) - if encoding not in broken_incremental_coders: + if encoding not in broken_incremental_coders and _testcapi: # check incremental decoder/encoder (fetched via the Python # and C API) and iterencode()/iterdecode() try: diff --git a/lib-python/2.7/test/test_dis.py b/lib-python/2.7/test/test_dis.py --- a/lib-python/2.7/test/test_dis.py +++ b/lib-python/2.7/test/test_dis.py @@ -53,25 +53,26 @@ pass dis_bug1333982 = """\ - %-4d 0 LOAD_CONST 1 (0) - 3 POP_JUMP_IF_TRUE 41 - 6 LOAD_GLOBAL 0 (AssertionError) - 9 LOAD_FAST 0 (x) - 12 BUILD_LIST_FROM_ARG 0 - 15 GET_ITER - >> 16 FOR_ITER 12 (to 31) - 19 STORE_FAST 1 (s) - 22 LOAD_FAST 1 (s) - 25 LIST_APPEND 2 - 28 JUMP_ABSOLUTE 16 + %-4d 0 JUMP_IF_NOT_DEBUG 41 (to 44) + 3 LOAD_CONST 1 (0) + 6 POP_JUMP_IF_TRUE 44 + 9 LOAD_GLOBAL 0 (AssertionError) + 12 LOAD_FAST 0 (x) + 15 BUILD_LIST_FROM_ARG 0 + 18 GET_ITER + >> 19 FOR_ITER 12 (to 34) + 22 STORE_FAST 1 (s) + 25 LOAD_FAST 1 (s) + 28 LIST_APPEND 2 + 31 JUMP_ABSOLUTE 19 - %-4d >> 31 LOAD_CONST 2 (1) - 34 BINARY_ADD - 35 CALL_FUNCTION 1 - 38 RAISE_VARARGS 1 + %-4d >> 34 LOAD_CONST 2 (1) + 37 BINARY_ADD + 38 CALL_FUNCTION 1 + 41 RAISE_VARARGS 1 - %-4d >> 41 LOAD_CONST 0 (None) - 44 RETURN_VALUE + %-4d >> 44 LOAD_CONST 0 (None) + 47 RETURN_VALUE """%(bug1333982.func_code.co_firstlineno + 1, bug1333982.func_code.co_firstlineno + 2, bug1333982.func_code.co_firstlineno + 3) diff --git a/lib-python/2.7/test/test_logging.py b/lib-python/2.7/test/test_logging.py --- a/lib-python/2.7/test/test_logging.py +++ b/lib-python/2.7/test/test_logging.py @@ -65,7 +65,8 @@ self.saved_handlers = logging._handlers.copy() self.saved_handler_list = logging._handlerList[:] self.saved_loggers = logger_dict.copy() - self.saved_level_names = logging._levelNames.copy() + self.saved_name_to_level = logging._nameToLevel.copy() + self.saved_level_to_name = logging._levelToName.copy() finally: logging._releaseLock() @@ -97,8 +98,10 @@ self.root_logger.setLevel(self.original_logging_level) logging._acquireLock() try: - logging._levelNames.clear() - logging._levelNames.update(self.saved_level_names) + logging._levelToName.clear() + logging._levelToName.update(self.saved_level_to_name) + logging._nameToLevel.clear() + logging._nameToLevel.update(self.saved_name_to_level) logging._handlers.clear() logging._handlers.update(self.saved_handlers) logging._handlerList[:] = self.saved_handler_list diff --git a/lib-python/2.7/test/test_sysconfig.py b/lib-python/2.7/test/test_sysconfig.py --- a/lib-python/2.7/test/test_sysconfig.py +++ b/lib-python/2.7/test/test_sysconfig.py @@ -7,7 +7,8 @@ import subprocess from copy import copy, deepcopy -from test.test_support import run_unittest, TESTFN, unlink, get_attribute +from test.test_support import (run_unittest, TESTFN, unlink, get_attribute, + import_module) import sysconfig from sysconfig import (get_paths, get_platform, get_config_vars, @@ -236,7 +237,10 @@ def test_get_config_h_filename(self): config_h = sysconfig.get_config_h_filename() - self.assertTrue(os.path.isfile(config_h), config_h) + # import_module skips the test when the CPython C Extension API + # appears to not be supported + self.assertTrue(os.path.isfile(config_h) or + not import_module('_testcapi'), config_h) def test_get_scheme_names(self): wanted = ('nt', 'nt_user', 'os2', 'os2_home', 'osx_framework_user', diff --git a/lib-python/2.7/test/test_traceback.py b/lib-python/2.7/test/test_traceback.py --- a/lib-python/2.7/test/test_traceback.py +++ b/lib-python/2.7/test/test_traceback.py @@ -1,6 +1,9 @@ """Test cases for traceback module""" -from _testcapi import traceback_print +try: + from _testcapi import traceback_print +except ImportError: + traceback_print = None from StringIO import StringIO import sys import unittest @@ -176,6 +179,8 @@ class TracebackFormatTests(unittest.TestCase): def test_traceback_format(self): + if traceback_print is None: + raise unittest.SkipTest('Requires _testcapi') try: raise KeyError('blah') except KeyError: diff --git a/lib-python/2.7/test/test_unicode.py b/lib-python/2.7/test/test_unicode.py --- a/lib-python/2.7/test/test_unicode.py +++ b/lib-python/2.7/test/test_unicode.py @@ -1609,7 +1609,10 @@ self.assertEqual("{}".format(u), '__unicode__ overridden') def test_encode_decimal(self): - from _testcapi import unicode_encodedecimal + try: + from _testcapi import unicode_encodedecimal + except ImportError: + raise unittest.SkipTest('Requires _testcapi') self.assertEqual(unicode_encodedecimal(u'123'), b'123') self.assertEqual(unicode_encodedecimal(u'\u0663.\u0661\u0664'), diff --git a/lib-python/conftest.py b/lib-python/conftest.py --- a/lib-python/conftest.py +++ b/lib-python/conftest.py @@ -130,7 +130,7 @@ RegrTest('test_bz2.py', usemodules='bz2'), RegrTest('test_calendar.py'), RegrTest('test_call.py', core=True), - RegrTest('test_capi.py'), + RegrTest('test_capi.py', usemodules='cpyext'), RegrTest('test_cd.py'), RegrTest('test_cfgparser.py'), RegrTest('test_cgi.py'), @@ -177,7 +177,7 @@ RegrTest('test_cprofile.py'), RegrTest('test_crypt.py', usemodules='crypt'), RegrTest('test_csv.py', usemodules='_csv'), - RegrTest('test_ctypes.py', usemodules="_rawffi thread"), + RegrTest('test_ctypes.py', usemodules="_rawffi thread cpyext"), RegrTest('test_curses.py'), RegrTest('test_datetime.py', usemodules='binascii struct'), RegrTest('test_dbm.py'), diff --git a/lib_pypy/_ctypes/pointer.py b/lib_pypy/_ctypes/pointer.py --- a/lib_pypy/_ctypes/pointer.py +++ b/lib_pypy/_ctypes/pointer.py @@ -120,6 +120,7 @@ return self._buffer[0] != 0 contents = property(getcontents, setcontents) + _obj = property(getcontents) # byref interface def _as_ffi_pointer_(self, ffitype): return as_ffi_pointer(self, ffitype) diff --git a/lib_pypy/_ctypes/structure.py b/lib_pypy/_ctypes/structure.py --- a/lib_pypy/_ctypes/structure.py +++ b/lib_pypy/_ctypes/structure.py @@ -166,8 +166,7 @@ if self is StructOrUnion: return if '_fields_' not in self.__dict__: - self._fields_ = [] - _set_shape(self, [], self._is_union) + self._fields_ = [] # As a side-effet, this also sets the ffishape. __setattr__ = struct_setattr diff --git a/lib_pypy/_ctypes_test.py b/lib_pypy/_ctypes_test.py --- a/lib_pypy/_ctypes_test.py +++ b/lib_pypy/_ctypes_test.py @@ -1,60 +1,7 @@ -import os, sys -import tempfile - -def compile_shared(): - """Compile '_ctypes_test.c' into an extension module, and import it - """ - thisdir = os.path.dirname(__file__) - output_dir = tempfile.mkdtemp() - - from distutils.ccompiler import new_compiler - from distutils import sysconfig - - compiler = new_compiler() - compiler.output_dir = output_dir - - # Compile .c file - include_dir = os.path.join(thisdir, '..', 'include') - if sys.platform == 'win32': - ccflags = ['-D_CRT_SECURE_NO_WARNINGS'] - else: - ccflags = ['-fPIC', '-Wimplicit-function-declaration'] - res = compiler.compile([os.path.join(thisdir, '_ctypes_test.c')], - include_dirs=[include_dir], - extra_preargs=ccflags) - object_filename = res[0] - - # set link options - output_filename = '_ctypes_test' + sysconfig.get_config_var('SO') - if sys.platform == 'win32': - # XXX libpypy-c.lib is currently not installed automatically - library = os.path.join(thisdir, '..', 'include', 'libpypy-c') - if not os.path.exists(library + '.lib'): - #For a nightly build - library = os.path.join(thisdir, '..', 'include', 'python27') - if not os.path.exists(library + '.lib'): - # For a local translation - library = os.path.join(thisdir, '..', 'pypy', 'goal', 'libpypy-c') - libraries = [library, 'oleaut32'] - extra_ldargs = ['/MANIFEST', # needed for VC10 - '/EXPORT:init_ctypes_test'] - else: - libraries = [] - extra_ldargs = [] - - # link the dynamic library - compiler.link_shared_object( - [object_filename], - output_filename, - libraries=libraries, - extra_preargs=extra_ldargs) - - # Now import the newly created library, it will replace our module in sys.modules - import imp - fp, filename, description = imp.find_module('_ctypes_test', path=[output_dir]) - imp.load_module('_ctypes_test', fp, filename, description) - - +try: + import cpyext +except ImportError: + raise ImportError("No module named '_ctypes_test'") try: import _ctypes _ctypes.PyObj_FromPtr = None @@ -62,4 +9,5 @@ except ImportError: pass # obscure condition of _ctypes_test.py being imported by py.test else: - compile_shared() + import _pypy_testcapi + _pypy_testcapi.compile_shared('_ctypes_test.c', '_ctypes_test') diff --git a/lib_pypy/_curses.py b/lib_pypy/_curses.py --- a/lib_pypy/_curses.py +++ b/lib_pypy/_curses.py @@ -476,6 +476,15 @@ def _chtype(ch): return int(ffi.cast("chtype", ch)) +def _texttype(text): + if isinstance(text, str): + return text + elif isinstance(text, unicode): + return str(text) # default encoding + else: + raise TypeError("str or unicode expected, got a '%s' object" + % (type(text).__name__,)) + def _extract_yx(args): if len(args) >= 2: @@ -589,6 +598,7 @@ @_argspec(1, 1, 2) def addstr(self, y, x, text, attr=None): + text = _texttype(text) if attr is not None: attr_old = lib.getattrs(self._win) lib.wattrset(self._win, attr) @@ -602,6 +612,7 @@ @_argspec(2, 1, 2) def addnstr(self, y, x, text, n, attr=None): + text = _texttype(text) if attr is not None: attr_old = lib.getattrs(self._win) lib.wattrset(self._win, attr) @@ -780,6 +791,7 @@ @_argspec(1, 1, 2) def insstr(self, y, x, text, attr=None): + text = _texttype(text) if attr is not None: attr_old = lib.getattrs(self._win) lib.wattrset(self._win, attr) @@ -793,6 +805,7 @@ @_argspec(2, 1, 2) def insnstr(self, y, x, text, n, attr=None): + text = _texttype(text) if attr is not None: attr_old = lib.getattrs(self._win) lib.wattrset(self._win, attr) @@ -1197,6 +1210,7 @@ def putp(text): + text = _texttype(text) return _check_ERR(lib.putp(text), "putp") diff --git a/lib_pypy/_pypy_irc_topic.py b/lib_pypy/_pypy_irc_topic.py --- a/lib_pypy/_pypy_irc_topic.py +++ b/lib_pypy/_pypy_irc_topic.py @@ -1,4 +1,4 @@ -"""eclguba: flagnk naq frznagvpf bs clguba, fcrrq bs p, erfgevpgvbaf bs wnin naq pbzcvyre reebe zrffntrf nf crargenoyr nf ZHZCF +__doc__ = """eclguba: flagnk naq frznagvpf bs clguba, fcrrq bs p, erfgevpgvbaf bs wnin naq pbzcvyre reebe zrffntrf nf crargenoyr nf ZHZCF pglcrf unf n fcva bs 1/3 ' ' vf n fcnpr gbb Clguba 2.k rfg cerfdhr zbeg, ivir Clguba! diff --git a/lib_pypy/_pypy_testcapi.py b/lib_pypy/_pypy_testcapi.py new file mode 100644 --- /dev/null +++ b/lib_pypy/_pypy_testcapi.py @@ -0,0 +1,61 @@ +import os, sys, imp +import tempfile + +def _get_c_extension_suffix(): + for ext, mod, typ in imp.get_suffixes(): + if typ == imp.C_EXTENSION: + return ext + + +def compile_shared(csource, modulename): + """Compile '_testcapi.c' or '_ctypes_test.c' into an extension module, + and import it. + """ + thisdir = os.path.dirname(__file__) + output_dir = tempfile.mkdtemp() + + from distutils.ccompiler import new_compiler + + compiler = new_compiler() + compiler.output_dir = output_dir + + # Compile .c file + include_dir = os.path.join(thisdir, '..', 'include') + if sys.platform == 'win32': + ccflags = ['-D_CRT_SECURE_NO_WARNINGS'] + else: + ccflags = ['-fPIC', '-Wimplicit-function-declaration'] + res = compiler.compile([os.path.join(thisdir, csource)], + include_dirs=[include_dir], + extra_preargs=ccflags) + object_filename = res[0] + + # set link options + output_filename = modulename + _get_c_extension_suffix() + if sys.platform == 'win32': + # XXX libpypy-c.lib is currently not installed automatically + library = os.path.join(thisdir, '..', 'include', 'libpypy-c') + if not os.path.exists(library + '.lib'): + #For a nightly build + library = os.path.join(thisdir, '..', 'include', 'python27') + if not os.path.exists(library + '.lib'): + # For a local translation + library = os.path.join(thisdir, '..', 'pypy', 'goal', 'libpypy-c') + libraries = [library, 'oleaut32'] + extra_ldargs = ['/MANIFEST', # needed for VC10 + '/EXPORT:init' + modulename] + else: + libraries = [] + extra_ldargs = [] + + # link the dynamic library + compiler.link_shared_object( + [object_filename], + output_filename, + libraries=libraries, + extra_preargs=extra_ldargs) + + # Now import the newly created library, it will replace the original + # module in sys.modules + fp, filename, description = imp.find_module(modulename, path=[output_dir]) + imp.load_module(modulename, fp, filename, description) diff --git a/lib_pypy/_testcapi.py b/lib_pypy/_testcapi.py --- a/lib_pypy/_testcapi.py +++ b/lib_pypy/_testcapi.py @@ -1,57 +1,7 @@ -import os, sys -import tempfile - -def compile_shared(): - """Compile '_testcapi.c' into an extension module, and import it - """ - thisdir = os.path.dirname(__file__) - output_dir = tempfile.mkdtemp() - - from distutils.ccompiler import new_compiler - from distutils import sysconfig - - compiler = new_compiler() - compiler.output_dir = output_dir - - # Compile .c file - include_dir = os.path.join(thisdir, '..', 'include') - if sys.platform == 'win32': - ccflags = ['-D_CRT_SECURE_NO_WARNINGS'] - else: - ccflags = ['-fPIC', '-Wimplicit-function-declaration'] - res = compiler.compile([os.path.join(thisdir, '_testcapimodule.c')], - include_dirs=[include_dir], - extra_preargs=ccflags) - object_filename = res[0] - - # set link options - output_filename = '_testcapi' + sysconfig.get_config_var('SO') - if sys.platform == 'win32': - # XXX libpypy-c.lib is currently not installed automatically - library = os.path.join(thisdir, '..', 'include', 'libpypy-c') - if not os.path.exists(library + '.lib'): - #For a nightly build - library = os.path.join(thisdir, '..', 'include', 'python27') - if not os.path.exists(library + '.lib'): - # For a local translation - library = os.path.join(thisdir, '..', 'pypy', 'goal', 'libpypy-c') - libraries = [library, 'oleaut32'] - extra_ldargs = ['/MANIFEST', # needed for VC10 - '/EXPORT:init_testcapi'] - else: - libraries = [] - extra_ldargs = [] - - # link the dynamic library - compiler.link_shared_object( - [object_filename], - output_filename, - libraries=libraries, - extra_preargs=extra_ldargs) - - # Now import the newly created library, it will replace our module in sys.modules - import imp - fp, filename, description = imp.find_module('_testcapi', path=[output_dir]) - imp.load_module('_testcapi', fp, filename, description) - -compile_shared() +try: + import cpyext +except ImportError: + raise ImportError("No module named '_testcapi'") +else: + import _pypy_testcapi + _pypy_testcapi.compile_shared('_testcapimodule.c', '_testcapi') diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py --- a/lib_pypy/cffi/__init__.py +++ b/lib_pypy/cffi/__init__.py @@ -4,5 +4,5 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "0.6" -__version_info__ = (0, 6) +__version__ = "0.7" +__version_info__ = (0, 7) diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py --- a/lib_pypy/cffi/api.py +++ b/lib_pypy/cffi/api.py @@ -73,15 +73,15 @@ if name.startswith('RTLD_'): setattr(self, name, getattr(backend, name)) # - BVoidP = self._get_cached_btype(model.voidp_type) + self.BVoidP = self._get_cached_btype(model.voidp_type) if isinstance(backend, types.ModuleType): # _cffi_backend: attach these constants to the class if not hasattr(FFI, 'NULL'): - FFI.NULL = self.cast(BVoidP, 0) + FFI.NULL = self.cast(self.BVoidP, 0) FFI.CData, FFI.CType = backend._get_types() else: # ctypes backend: attach these constants to the instance - self.NULL = self.cast(BVoidP, 0) + self.NULL = self.cast(self.BVoidP, 0) self.CData, self.CType = backend._get_types() def cdef(self, csource, override=False): @@ -346,6 +346,12 @@ self._cdefsources.extend(ffi_to_include._cdefsources) self._cdefsources.append(']') + def new_handle(self, x): + return self._backend.newp_handle(self.BVoidP, x) + + def from_handle(self, x): + return self._backend.from_handle(x) + def _make_ffi_library(ffi, libname, flags): import os @@ -355,13 +361,13 @@ backend = ffi._backend try: if '.' not in name and '/' not in name: - raise OSError + raise OSError("library not found: %r" % (name,)) backendlib = backend.load_library(name, flags) except OSError: import ctypes.util path = ctypes.util.find_library(name) if path is None: - raise OSError("library not found: %r" % (name,)) + raise # propagate the original OSError backendlib = backend.load_library(path, flags) copied_enums = [] # @@ -372,8 +378,8 @@ BType = ffi._get_cached_btype(tp) try: value = backendlib.load_function(BType, name) - except KeyError: - raise AttributeError(name) + except KeyError as e: + raise AttributeError('%s: %s' % (name, e)) library.__dict__[name] = value return # diff --git a/lib_pypy/cffi/backend_ctypes.py b/lib_pypy/cffi/backend_ctypes.py --- a/lib_pypy/cffi/backend_ctypes.py +++ b/lib_pypy/cffi/backend_ctypes.py @@ -16,6 +16,7 @@ class CTypesData(object): __metaclass__ = CTypesType __slots__ = ['__weakref__'] + __name__ = '' def __init__(self, *args): raise TypeError("cannot instantiate %r" % (self.__class__,)) @@ -491,6 +492,8 @@ elif BItem in (getbtype(model.PrimitiveType('signed char')), getbtype(model.PrimitiveType('unsigned char'))): kind = 'bytep' + elif BItem is getbtype(model.void_type): + kind = 'voidp' else: kind = 'generic' # @@ -546,13 +549,13 @@ def __setitem__(self, index, value): self._as_ctype_ptr[index] = BItem._to_ctypes(value) - if kind == 'charp': + if kind == 'charp' or kind == 'voidp': @classmethod - def _arg_to_ctypes(cls, value): - if isinstance(value, bytes): - return ctypes.c_char_p(value) + def _arg_to_ctypes(cls, *value): + if value and isinstance(value[0], bytes): + return ctypes.c_char_p(value[0]) else: - return super(CTypesPtr, cls)._arg_to_ctypes(value) + return super(CTypesPtr, cls)._arg_to_ctypes(*value) if kind == 'charp' or kind == 'bytep': def _to_string(self, maxlen): diff --git a/lib_pypy/cffi/vengine_cpy.py b/lib_pypy/cffi/vengine_cpy.py --- a/lib_pypy/cffi/vengine_cpy.py +++ b/lib_pypy/cffi/vengine_cpy.py @@ -15,6 +15,20 @@ def patch_extension_kwds(self, kwds): pass + def find_module(self, module_name, path, so_suffix): + try: + f, filename, descr = imp.find_module(module_name, path) + except ImportError: + return None + if f is not None: + f.close() + # Note that after a setuptools installation, there are both .py + # and .so files with the same basename. The code here relies on + # imp.find_module() locating the .so in priority. + if descr[0] != so_suffix: + return None + return filename + def collect_types(self): self._typesdict = {} self._generate("collecttype") @@ -142,6 +156,9 @@ class FFILibrary(object): _cffi_python_module = module _cffi_ffi = self.ffi + _cffi_dir = [] + def __dir__(self): + return FFILibrary._cffi_dir + list(self.__dict__) library = FFILibrary() module._cffi_setup(lst, ffiplatform.VerificationError, library) # @@ -427,9 +444,9 @@ prnt('static void %s(%s *p)' % (checkfuncname, cname)) prnt('{') prnt(' /* only to generate compile-time warnings or errors */') - for fname, ftype, _ in tp.enumfields(): + for fname, ftype, fbitsize in tp.enumfields(): if (isinstance(ftype, model.PrimitiveType) - and ftype.is_integer_type()): + and ftype.is_integer_type()) or fbitsize >= 0: # accept all integers, but complain on float or double prnt(' (void)((p->%s) << 1);' % fname) else: @@ -687,7 +704,8 @@ return ptr[0] def setter(library, value): ptr[0] = value - setattr(library.__class__, name, property(getter, setter)) + setattr(type(library), name, property(getter, setter)) + type(library)._cffi_dir.append(name) # ---------- diff --git a/lib_pypy/cffi/vengine_gen.py b/lib_pypy/cffi/vengine_gen.py --- a/lib_pypy/cffi/vengine_gen.py +++ b/lib_pypy/cffi/vengine_gen.py @@ -1,4 +1,4 @@ -import sys +import sys, os import types from . import model, ffiplatform @@ -20,6 +20,16 @@ # up in kwds['export_symbols']. kwds.setdefault('export_symbols', self.export_symbols) + def find_module(self, module_name, path, so_suffix): + basename = module_name + so_suffix + if path is None: + path = sys.path + for dirname in path: + filename = os.path.join(dirname, basename) + if os.path.isfile(filename): + return filename + return None + def collect_types(self): pass # not needed in the generic engine @@ -64,6 +74,9 @@ class FFILibrary(types.ModuleType): _cffi_generic_module = module _cffi_ffi = self.ffi + _cffi_dir = [] + def __dir__(self): + return FFILibrary._cffi_dir library = FFILibrary("") # # finally, call the loaded_gen_xxx() functions. This will set @@ -158,21 +171,22 @@ newfunction = self._load_constant(False, tp, name, module) else: indirections = [] - if any(isinstance(type, model.StructOrUnion) for type in tp.args): + if any(isinstance(typ, model.StructOrUnion) for typ in tp.args): indirect_args = [] - for i, type in enumerate(tp.args): - if isinstance(type, model.StructOrUnion): - type = model.PointerType(type) - indirections.append((i, type)) - indirect_args.append(type) + for i, typ in enumerate(tp.args): + if isinstance(typ, model.StructOrUnion): + typ = model.PointerType(typ) + indirections.append((i, typ)) + indirect_args.append(typ) tp = model.FunctionPtrType(tuple(indirect_args), tp.result, tp.ellipsis) BFunc = self.ffi._get_cached_btype(tp) wrappername = '_cffi_f_%s' % name newfunction = module.load_function(BFunc, wrappername) - for i, type in indirections: - newfunction = self._make_struct_wrapper(newfunction, i, type) + for i, typ in indirections: + newfunction = self._make_struct_wrapper(newfunction, i, typ) setattr(library, name, newfunction) + type(library)._cffi_dir.append(name) def _make_struct_wrapper(self, oldfunc, i, tp): backend = self.ffi._backend @@ -216,9 +230,9 @@ prnt('static void %s(%s *p)' % (checkfuncname, cname)) prnt('{') prnt(' /* only to generate compile-time warnings or errors */') - for fname, ftype, _ in tp.enumfields(): + for fname, ftype, fbitsize in tp.enumfields(): if (isinstance(ftype, model.PrimitiveType) - and ftype.is_integer_type()): + and ftype.is_integer_type()) or fbitsize >= 0: # accept all integers, but complain on float or double prnt(' (void)((p->%s) << 1);' % fname) else: @@ -380,6 +394,7 @@ is_int = isinstance(tp, model.PrimitiveType) and tp.is_integer_type() value = self._load_constant(is_int, tp, name, module) setattr(library, name, value) + type(library)._cffi_dir.append(name) # ---------- # enums @@ -427,6 +442,7 @@ def _loaded_gen_enum(self, tp, name, module, library): for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues): setattr(library, enumerator, enumvalue) + type(library)._cffi_dir.append(enumerator) # ---------- # macros: for now only for integers @@ -440,6 +456,7 @@ def _loaded_gen_macro(self, tp, name, module, library): value = self._load_constant(True, tp, name, module) setattr(library, name, value) + type(library)._cffi_dir.append(name) # ---------- # global variables @@ -465,6 +482,7 @@ BArray = self.ffi._get_cached_btype(tp) value = self.ffi.cast(BArray, value) setattr(library, name, value) + type(library)._cffi_dir.append(name) return # remove ptr= from the library instance, and replace # it by a property on the class, which reads/writes into ptr[0]. @@ -476,7 +494,8 @@ return ptr[0] def setter(library, value): ptr[0] = value - setattr(library.__class__, name, property(getter, setter)) + setattr(type(library), name, property(getter, setter)) + type(library)._cffi_dir.append(name) cffimod_header = r''' #include diff --git a/lib_pypy/cffi/verifier.py b/lib_pypy/cffi/verifier.py --- a/lib_pypy/cffi/verifier.py +++ b/lib_pypy/cffi/verifier.py @@ -102,21 +102,10 @@ path = pkg.__path__ else: path = None - try: - f, filename, descr = imp.find_module(self.get_module_name(), - path) - except ImportError: + filename = self._vengine.find_module(self.get_module_name(), path, + _get_so_suffix()) + if filename is None: return - if f is not None: - f.close() - if filename.lower().endswith('.py'): - # on PyPy, if there are both .py and .pypy-19.so files in - # the same directory, the .py file is returned. That's the - # case after a setuptools installation. We never want to - # load the .py file here... - filename = filename[:-3] + _get_so_suffix() - if not os.path.isfile(filename): - return self.modulefilename = filename self._vengine.collect_types() self._has_module = True diff --git a/lib_pypy/greenlet.egg-info b/lib_pypy/greenlet.egg-info new file mode 100644 --- /dev/null +++ b/lib_pypy/greenlet.egg-info @@ -0,0 +1,10 @@ +Metadata-Version: 1.0 +Name: greenlet +Version: 0.4.0 +Summary: Lightweight in-process concurrent programming +Home-page: https://github.com/python-greenlet/greenlet +Author: Ralf Schmitt (for CPython), PyPy team +Author-email: pypy-dev at python.org +License: MIT License +Description: UNKNOWN +Platform: UNKNOWN diff --git a/pypy/bin/pyinteractive.py b/pypy/bin/pyinteractive.py --- a/pypy/bin/pyinteractive.py +++ b/pypy/bin/pyinteractive.py @@ -27,7 +27,8 @@ BoolOption("completer", "use readline commandline completer", default=False, cmdline="-C"), BoolOption("optimize", - "dummy optimization flag for compatibility with CPython", + "skip assert statements and remove docstrings when importing modules" + " (this is -OO in regular CPython)", default=False, cmdline="-O"), BoolOption("no_site_import", "do not 'import site' on initialization", default=False, cmdline="-S"), @@ -94,6 +95,17 @@ space.setitem(space.sys.w_dict, space.wrap('executable'), space.wrap(argv[0])) + if interactiveconfig.optimize: + #change the optimize flag's value and set __debug__ to False + space.appexec([], """(): + import sys + flags = list(sys.flags) + flags[6] = 2 + sys.flags = type(sys.flags)(flags) + import __pypy__ + __pypy__.set_debug(False) + """) + # call pypy_find_stdlib: the side-effect is that it sets sys.prefix and # sys.exec_prefix executable = argv[0] diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -64,7 +64,8 @@ del working_modules["termios"] del working_modules["_minimal_curses"] - del working_modules["cppyy"] # not tested on win32 + if "cppyy" in working_modules: + del working_modules["cppyy"] # not tested on win32 # The _locale module is needed by site.py on Windows default_modules["_locale"] = None @@ -77,7 +78,8 @@ del working_modules["_minimal_curses"] del working_modules["termios"] del working_modules["_multiprocessing"] # depends on rctime - del working_modules["cppyy"] # depends on ctypes + if "cppyy" in working_modules: + del working_modules["cppyy"] # depends on ctypes module_dependencies = { @@ -120,12 +122,10 @@ __import__(name) except (ImportError, CompilationError, py.test.skip.Exception), e: errcls = e.__class__.__name__ - config.add_warning( + raise Exception( "The module %r is disabled\n" % (modname,) + "because importing %s raised %s\n" % (name, errcls) + str(e)) - raise ConflictConfigError("--withmod-%s: %s" % (modname, - errcls)) return validator else: return None @@ -216,10 +216,6 @@ "(the empty string and potentially single-char strings)", default=False), - BoolOption("withsmalltuple", - "use small tuples", - default=False), - BoolOption("withspecialisedtuple", "use specialised tuples", default=False), @@ -364,6 +360,7 @@ # ignore names from 'essential_modules', notably 'exceptions', which # may not be present in config.objspace.usemodules at all modules = [name for name in modules if name not in essential_modules] + config.objspace.usemodules.suggest(**dict.fromkeys(modules, True)) def enable_translationmodules(config): diff --git a/pypy/doc/__pypy__-module.rst b/pypy/doc/__pypy__-module.rst --- a/pypy/doc/__pypy__-module.rst +++ b/pypy/doc/__pypy__-module.rst @@ -1,3 +1,7 @@ + +.. comment: this document is very incomplete, should we generate + it automatically? + ======================= The ``__pypy__`` module ======================= diff --git a/pypy/doc/coding-guide.rst b/pypy/doc/coding-guide.rst --- a/pypy/doc/coding-guide.rst +++ b/pypy/doc/coding-guide.rst @@ -339,8 +339,9 @@ + methods and other class attributes do not change after startup + single inheritance is fully supported -+ simple mixins work too, but the mixed in class needs a ``_mixin_ = True`` - class attribute ++ simple mixins somewhat work too, but the mixed in class needs a + ``_mixin_ = True`` class attribute. isinstance checks against the + mixin type will fail when translated. + classes are first-class objects too diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py --- a/pypy/doc/conf.py +++ b/pypy/doc/conf.py @@ -47,7 +47,7 @@ # The short X.Y version. version = '2.0' # The full version, including alpha/beta/rc tags. -release = '2.0.0' +release = '2.0.2' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/pypy/doc/cppyy.rst b/pypy/doc/cppyy.rst --- a/pypy/doc/cppyy.rst +++ b/pypy/doc/cppyy.rst @@ -163,6 +163,9 @@ $ genreflex MyClass.h $ g++ -fPIC -rdynamic -O2 -shared -I$REFLEXHOME/include MyClass_rflx.cpp -o libMyClassDict.so -L$REFLEXHOME/lib -lReflex +Next, make sure that the library can be found through the dynamic lookup path +(the ``LD_LIBRARY_PATH`` environment variable on Linux, ``PATH`` on Windows), +for example by adding ".". Now you're ready to use the bindings. Since the bindings are designed to look pythonistic, it should be straightforward:: diff --git a/pypy/doc/getting-started-dev.rst b/pypy/doc/getting-started-dev.rst --- a/pypy/doc/getting-started-dev.rst +++ b/pypy/doc/getting-started-dev.rst @@ -8,7 +8,8 @@ interpreter is written mostly in RPython (with pieces in Python), while the RPython compiler is written in Python. The hard to understand part is that Python is a meta-programming language for RPython, that is, -RPython is considered from live objects **after** the imports are done. +"being valid RPython" is a question that only makes sense on the +live objects **after** the imports are done. This might require more explanation. You start writing RPython from ``entry_point``, a good starting point is ``rpython/translator/goal/targetnopstandalone.py``. This does not do all that @@ -37,7 +38,7 @@ In this example ``entry_point`` is RPython, ``add`` and ``sub`` are RPython, however, ``generator`` is not. -A good introductory level articles are available: +The following introductory level articles are available: * Laurence Tratt -- `Fast Enough VMs in Fast Enough Time`_. diff --git a/pypy/doc/how-to-contribute.rst b/pypy/doc/how-to-contribute.rst --- a/pypy/doc/how-to-contribute.rst +++ b/pypy/doc/how-to-contribute.rst @@ -28,7 +28,8 @@ Layers ------ -PyPy has layers. Those layers help us keep the respective parts separated enough +PyPy has layers. Just like Ogres or onions. +Those layers help us keep the respective parts separated enough to be worked on independently and make the complexity manageable. This is, again, just a sanity requirement for such a complex project. For example writing a new optimization for the JIT usually does **not** involve touching a Python diff --git a/pypy/doc/how-to-release.rst b/pypy/doc/how-to-release.rst --- a/pypy/doc/how-to-release.rst +++ b/pypy/doc/how-to-release.rst @@ -22,7 +22,8 @@ will capture the revision number of this change for the release; some of the next updates may be done before or after branching; make sure things are ported back to the trunk and to the branch as - necessary + necessary; also update the version number in pypy/doc/conf.py, + and in pypy/doc/index.rst * update pypy/doc/contributor.rst (and possibly LICENSE) * rename pypy/doc/whatsnew_head.rst to whatsnew_VERSION.rst and create a fresh whatsnew_head.rst after the release diff --git a/pypy/doc/index.rst b/pypy/doc/index.rst --- a/pypy/doc/index.rst +++ b/pypy/doc/index.rst @@ -40,7 +40,7 @@ * `FAQ`_: some frequently asked questions. -* `Release 2.0`_: the latest official release +* `Release 2.0.2`_: the latest official release * `PyPy Blog`_: news and status info about PyPy @@ -110,7 +110,7 @@ .. _`Getting Started`: getting-started.html .. _`Papers`: extradoc.html .. _`Videos`: video-index.html -.. _`Release 2.0`: http://pypy.org/download.html +.. _`Release 2.0.2`: http://pypy.org/download.html .. _`speed.pypy.org`: http://speed.pypy.org .. _`RPython toolchain`: translation.html .. _`potential project ideas`: project-ideas.html diff --git a/pypy/doc/man/pypy.1.rst b/pypy/doc/man/pypy.1.rst --- a/pypy/doc/man/pypy.1.rst +++ b/pypy/doc/man/pypy.1.rst @@ -16,7 +16,10 @@ Inspect interactively after running script. -O - Dummy optimization flag for compatibility with C Python. + Skip assert statements. + +-OO + Remove docstrings when importing modules in addition to -O. -c *cmd* Program passed in as CMD (terminates option list). diff --git a/pypy/doc/release-2.0.1.rst b/pypy/doc/release-2.0.1.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-2.0.1.rst @@ -0,0 +1,46 @@ +============================== +PyPy 2.0.1 - Bohr Smørrebrød +============================== + +We're pleased to announce PyPy 2.0.1. This is a stable bugfix release +over `2.0`_. You can download it here: + + http://pypy.org/download.html + +The fixes are mainly about fatal errors or crashes in our stdlib. See +below for more details. + +What is PyPy? +============= + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7. It's fast (`pypy 2.0 and cpython 2.7.3`_ performance comparison) +due to its integrated tracing JIT compiler. + +This release supports x86 machines running Linux 32/64, Mac OS X 64 or +Windows 32. Support for ARM is progressing but not bug-free yet. + +.. _`pypy 2.0 and cpython 2.7.3`: http://speed.pypy.org + +Highlights +========== + +- fix an occasional crash in the JIT that ends in `RPython Fatal error: + NotImplementedError`__. + +- `id(x)` is now always a positive number (except on int/float/long/complex). + This fixes an issue in ``_sqlite.py`` (mostly for 32-bit Linux). + +- fix crashes of callback-from-C-functions (with cffi) when used together + with Stackless features, on asmgcc (i.e. Linux only). Now `gevent should + work better`__. + +- work around an eventlet issue with `socket._decref_socketios()`__. + +.. __: https://bugs.pypy.org/issue1482 +.. __: http://mail.python.org/pipermail/pypy-dev/2013-May/011362.html +.. __: https://bugs.pypy.org/issue1468 +.. _2.0: release-2.0.0.html + +Cheers, +arigo et. al. for the PyPy team diff --git a/pypy/doc/release-2.0.2.rst b/pypy/doc/release-2.0.2.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-2.0.2.rst @@ -0,0 +1,46 @@ +========================= +PyPy 2.0.2 - Fermi Panini +========================= + +We're pleased to announce PyPy 2.0.2. This is a stable bugfix release +over `2.0`_ and `2.0.1`_. You can download it here: + + http://pypy.org/download.html + +It fixes a crash in the JIT when calling external C functions (with +ctypes/cffi) in a multithreaded context. + +.. _2.0: release-2.0.0.html +.. _2.0.1: release-2.0.1.html + +What is PyPy? +============= + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7. It's fast (`pypy 2.0 and cpython 2.7.3`_ performance comparison) +due to its integrated tracing JIT compiler. + +This release supports x86 machines running Linux 32/64, Mac OS X 64 or +Windows 32. Support for ARM is progressing but not bug-free yet. + +.. _`pypy 2.0 and cpython 2.7.3`: http://speed.pypy.org + +Highlights +========== + +This release contains only the fix described above. A crash (or wrong +results) used to occur if all these conditions were true: + +- your program is multithreaded; + +- it runs on a single-core machine or a heavily-loaded multi-core one; + +- it uses ctypes or cffi to issue external calls to C functions. + +This was fixed in the branch `emit-call-x86`__ (see the example file +``bug1.py``). + +.. __: https://bitbucket.org/pypy/pypy/commits/7c80121abbf4 + +Cheers, +arigo et. al. for the PyPy team diff --git a/pypy/doc/rffi.rst b/pypy/doc/rffi.rst --- a/pypy/doc/rffi.rst +++ b/pypy/doc/rffi.rst @@ -5,7 +5,7 @@ Purpose ------- -This document describes an FFI for RPython language, concentrating +This document describes an FFI for the RPython language, concentrating on low-level backends like C. It describes how to declare and call low-level (C) functions from RPython level. @@ -50,7 +50,7 @@ ------ In rffi_ there are various declared types for C-structures, like CCHARP -(char*), SIZE_T (size_t) and others. refer to file for details. +(char*), SIZE_T (size_t) and others. Refer to file for details. Instances of non-primitive types must be alloced by hand, with call to lltype.malloc, and freed by lltype.free both with keyword argument flavor='raw'. There are several helpers like string -> char* diff --git a/pypy/doc/test/test_whatsnew.py b/pypy/doc/test/test_whatsnew.py --- a/pypy/doc/test/test_whatsnew.py +++ b/pypy/doc/test/test_whatsnew.py @@ -19,23 +19,28 @@ branches.discard('default') return startrev, branches -def get_merged_branches(path, startrev, endrev): - if getstatusoutput('hg root')[0]: +def get_merged_branches(path, startrev, endrev, current_branch=None): + errcode, wc_branch = getstatusoutput('hg branch') + if errcode != 0: py.test.skip('no Mercurial repo') + if current_branch is None: + current_branch = wc_branch # X = take all the merges which are descendants of startrev and are on default # revset = all the parents of X which are not on default # ===> # revset contains all the branches which have been merged to default since # startrev - revset = 'parents(%s::%s and \ + revset = "parents(%s::%s and \ merge() and \ - branch(default)) and \ - not branch(default)' % (startrev, endrev) + branch('%s')) and \ + not branch('%s')" % (startrev, endrev, + current_branch, current_branch) cmd = r'hg log -R "%s" -r "%s" --template "{branches}\n"' % (path, revset) out = getoutput(cmd) branches = set(map(str.strip, out.splitlines())) - return branches + branches.discard("default") + return branches, current_branch def test_parse_doc(): @@ -65,7 +70,8 @@ assert branches == set(['foobar', 'hello']) def test_get_merged_branches(): - branches = get_merged_branches(ROOT, 'f34f0c11299f', '79770e0c2f93') + branches, _ = get_merged_branches(ROOT, 'f34f0c11299f', '79770e0c2f93', + 'default') assert branches == set(['numpy-indexing-by-arrays-bool', 'better-jit-hooks-2', 'numpypy-ufuncs']) @@ -76,7 +82,9 @@ whatsnew_list.sort() last_whatsnew = whatsnew_list[-1].read() startrev, documented = parse_doc(last_whatsnew) - merged = get_merged_branches(ROOT, startrev, '') + merged, branch = get_merged_branches(ROOT, startrev, '') + merged.discard('default') + merged.discard('') not_documented = merged.difference(documented) not_merged = documented.difference(merged) print 'Branches merged but not documented:' @@ -85,4 +93,6 @@ print 'Branches documented but not merged:' print '\n'.join(not_merged) print - assert not not_documented and not not_merged + assert not not_documented + if branch == 'default': + assert not not_merged diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -7,3 +7,48 @@ .. branch: numpy-pickle Pickling of numpy arrays and dtypes (including record dtypes) + +.. branch: remove-array-smm +Remove multimethods in the arraymodule + +.. branch: callback-stacklet +Fixed bug when switching stacklets from a C callback + +.. branch: remove-set-smm +Remove multi-methods on sets + +.. branch: numpy-subarrays +Implement subarrays for numpy + +.. branch: remove-dict-smm +Remove multi-methods on dict + +.. branch: remove-list-smm-2 +Remove remaining multi-methods on list + +.. branch: arm-stacklet +Stacklet support for ARM, enables _continuation support + +.. branch: remove-tuple-smm +Remove multi-methods on tuple + +.. branch: remove-iter-smm +Remove multi-methods on iterators + +.. branch: emit-call-x86 +.. branch: emit-call-arm + +.. branch: on-abort-resops +Added list of resops to the pypyjit on_abort hook. + +.. branch: logging-perf +Speeds up the stdlib logging module + +.. branch: operrfmt-NT +Adds a couple convenient format specifiers to operationerrfmt + +.. branch: win32-fixes3 +Skip and fix some non-translated (own) tests for win32 builds + +.. branch: ctypes-byref +Add the '_obj' attribute on ctypes pointer() and byref() objects diff --git a/pypy/doc/windows.rst b/pypy/doc/windows.rst --- a/pypy/doc/windows.rst +++ b/pypy/doc/windows.rst @@ -2,7 +2,7 @@ PyPy on Windows =============== -Pypy is supported on Windows platforms, starting with Windows 2000. +PyPy is supported on Windows platforms, starting with Windows 2000. The following text gives some hints about how to translate the PyPy interpreter. @@ -199,9 +199,9 @@ or such, depending on your mingw64 download. -hacking on Pypy with the mingw compiler +hacking on PyPy with the mingw compiler ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Since hacking on Pypy means running tests, you will need a way to specify +Since hacking on PyPy means running tests, you will need a way to specify the mingw compiler when hacking (as opposed to translating). As of March 2012, --cc is not a valid option for pytest.py. However if you set an environment variable CC to the compliter exe, testing will use it. diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py --- a/pypy/goal/targetpypystandalone.py +++ b/pypy/goal/targetpypystandalone.py @@ -10,6 +10,8 @@ from rpython.config.config import ConflictConfigError from pypy.tool.option import make_objspace from pypy.conftest import pypydir +from rpython.rlib import rthread +from pypy.module.thread import os_thread thisdir = py.path.local(__file__).dirpath() @@ -120,6 +122,24 @@ source = rffi.charp2str(ll_source) return _pypy_execute_source(source) + @entrypoint('main', [], c_name='pypy_init_threads') + def pypy_init_threads(): + if not space.config.objspace.usemodules.thread: + return + os_thread.setup_threads(space) + rffi.aroundstate.before() + + @entrypoint('main', [], c_name='pypy_thread_attach') + def pypy_thread_attach(): + if not space.config.objspace.usemodules.thread: + return + os_thread.setup_threads(space) + os_thread.bootstrapper.acquire(space, None, None) + rthread.gc_thread_start() + os_thread.bootstrapper.nbthreads += 1 + os_thread.bootstrapper.release() + rffi.aroundstate.before() + w_globals = space.newdict() space.setitem(w_globals, space.wrap('__builtins__'), space.builtin_modules['__builtin__']) @@ -137,6 +157,8 @@ return 0 return entry_point, {'pypy_execute_source': pypy_execute_source, + 'pypy_init_threads': pypy_init_threads, + 'pypy_thread_attach': pypy_thread_attach, 'pypy_setup_home': pypy_setup_home} def call_finish(space): diff --git a/pypy/interpreter/app_main.py b/pypy/interpreter/app_main.py --- a/pypy/interpreter/app_main.py +++ b/pypy/interpreter/app_main.py @@ -2,8 +2,8 @@ # App-level version of py.py. # See test/test_app_main. -# Missing vs CPython: -d, -OO, -t, -v, -x, -3 -"""\ +# Missing vs CPython: -d, -t, -v, -x, -3 +USAGE1 = __doc__ = """\ Options and arguments (and corresponding environment variables): -B : don't write .py[co] files on import; also PYTHONDONTWRITEBYTECODE=x -c cmd : program passed in as string (terminates option list) @@ -12,7 +12,8 @@ -i : inspect interactively after running script; forces a prompt even if stdin does not appear to be a terminal; also PYTHONINSPECT=x -m mod : run library module as a script (terminates option list) --O : dummy optimization flag for compatibility with CPython +-O : skip assert statements +-OO : remove docstrings when importing modules in addition to -O -R : ignored (see http://bugs.python.org/issue14621) -Q arg : division options: -Qold (default), -Qwarn, -Qwarnall, -Qnew -s : don't add user site directory to sys.path; also PYTHONNOUSERSITE @@ -27,7 +28,6 @@ PyPy options and arguments: --info : print translation information about this PyPy executable """ -USAGE1 = __doc__ # Missing vs CPython: PYTHONHOME, PYTHONCASEOK USAGE2 = """ Other environment variables: @@ -470,6 +470,10 @@ sys.py3kwarning = bool(sys.flags.py3k_warning) sys.dont_write_bytecode = bool(sys.flags.dont_write_bytecode) + if sys.flags.optimize >= 1: + import __pypy__ + __pypy__.set_debug(False) + if sys.py3kwarning: print >> sys.stderr, ( "Warning: pypy does not implement py3k warnings") diff --git a/pypy/interpreter/argument.py b/pypy/interpreter/argument.py --- a/pypy/interpreter/argument.py +++ b/pypy/interpreter/argument.py @@ -86,12 +86,9 @@ args_w = space.fixedview(w_stararg) except OperationError, e: if e.match(space, space.w_TypeError): - w_type = space.type(w_stararg) - typename = w_type.getname(space) - raise OperationError( + raise operationerrfmt( space.w_TypeError, - space.wrap("argument after * must be " - "a sequence, not %s" % (typename,))) + "argument after * must be a sequence, not %T", w_stararg) raise self.arguments_w = self.arguments_w + args_w @@ -116,12 +113,10 @@ w_keys = space.call_method(w_starstararg, "keys") except OperationError, e: if e.match(space, space.w_AttributeError): - w_type = space.type(w_starstararg) - typename = w_type.getname(space) - raise OperationError( + raise operationerrfmt( space.w_TypeError, - space.wrap("argument after ** must be " - "a mapping, not %s" % (typename,))) + "argument after ** must be a mapping, not %T", + w_starstararg) raise keys_w = space.unpackiterable(w_keys) keywords_w = [None] * len(keys_w) diff --git a/pypy/interpreter/astcompiler/assemble.py b/pypy/interpreter/astcompiler/assemble.py --- a/pypy/interpreter/astcompiler/assemble.py +++ b/pypy/interpreter/astcompiler/assemble.py @@ -245,6 +245,8 @@ if w_len is None: w_len = space.len(self.w_consts) space.setitem(self.w_consts, w_key, w_len) + if space.int_w(w_len) == 0: + self.scope.doc_removable = False return space.int_w(w_len) def _make_key(self, obj): @@ -632,6 +634,7 @@ ops.JUMP_IF_FALSE_OR_POP : 0, ops.POP_JUMP_IF_TRUE : -1, ops.POP_JUMP_IF_FALSE : -1, + ops.JUMP_IF_NOT_DEBUG : 0, ops.BUILD_LIST_FROM_ARG: 1, } diff --git a/pypy/interpreter/astcompiler/ast.py b/pypy/interpreter/astcompiler/ast.py --- a/pypy/interpreter/astcompiler/ast.py +++ b/pypy/interpreter/astcompiler/ast.py @@ -2793,8 +2793,7 @@ def Module_get_body(space, w_self): if not w_self.initialization_state & 1: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -2835,8 +2834,7 @@ def Interactive_get_body(space, w_self): if not w_self.initialization_state & 1: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -2881,8 +2879,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 1: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') return space.wrap(w_self.body) def Expression_set_body(space, w_self, w_new_value): @@ -2925,8 +2922,7 @@ def Suite_get_body(space, w_self): if not w_self.initialization_state & 1: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -2971,8 +2967,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 1: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'lineno') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'lineno') return space.wrap(w_self.lineno) def stmt_set_lineno(space, w_self, w_new_value): @@ -2993,8 +2988,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 2: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'col_offset') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'col_offset') return space.wrap(w_self.col_offset) def stmt_set_col_offset(space, w_self, w_new_value): @@ -3024,8 +3018,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'name') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'name') return space.wrap(w_self.name) def FunctionDef_set_name(space, w_self, w_new_value): @@ -3046,8 +3039,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'args') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'args') return space.wrap(w_self.args) def FunctionDef_set_args(space, w_self, w_new_value): @@ -3064,8 +3056,7 @@ def FunctionDef_get_body(space, w_self): if not w_self.initialization_state & 16: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -3081,8 +3072,7 @@ def FunctionDef_get_decorator_list(space, w_self): if not w_self.initialization_state & 32: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'decorator_list') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'decorator_list') if w_self.w_decorator_list is None: if w_self.decorator_list is None: list_w = [] @@ -3131,8 +3121,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'name') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'name') return space.wrap(w_self.name) def ClassDef_set_name(space, w_self, w_new_value): @@ -3149,8 +3138,7 @@ def ClassDef_get_bases(space, w_self): if not w_self.initialization_state & 8: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'bases') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'bases') if w_self.w_bases is None: if w_self.bases is None: list_w = [] @@ -3166,8 +3154,7 @@ def ClassDef_get_body(space, w_self): if not w_self.initialization_state & 16: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -3183,8 +3170,7 @@ def ClassDef_get_decorator_list(space, w_self): if not w_self.initialization_state & 32: - typename = space.type(w_self).getname(space) - raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'decorator_list') + raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'decorator_list') if w_self.w_decorator_list is None: if w_self.decorator_list is None: From noreply at buildbot.pypy.org Wed Jun 12 19:12:18 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 12 Jun 2013 19:12:18 +0200 (CEST) Subject: [pypy-commit] stmgc default: Fix the reference in stolen objects: from protected to public stubs. Message-ID: <20130612171218.822B01C00B9@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r99:2632ae98315f Date: 2013-06-12 15:21 +0200 http://bitbucket.org/pypy/stmgc/changeset/2632ae98315f/ Log: Fix the reference in stolen objects: from protected to public stubs. diff --git a/c4/lists.c b/c4/lists.c --- a/c4/lists.c +++ b/c4/lists.c @@ -22,19 +22,6 @@ memset(g2l, 0, sizeof(struct G2L)); } -struct G2L *g2l_malloc(void) -{ - struct G2L *g2l = malloc(sizeof(struct G2L)); - memset(g2l, 0, sizeof(struct G2L)); - return g2l; -} - -void g2l_free(struct G2L *g2l) -{ - free(g2l->raw_start); - free(g2l); -} - wlog_t *_g2l_find(char *entry, gcptr addr) { revision_t key = (revision_t)addr; diff --git a/c4/lists.h b/c4/lists.h --- a/c4/lists.h +++ b/c4/lists.h @@ -35,8 +35,9 @@ void g2l_clear(struct G2L *g2l); void g2l_delete(struct G2L *g2l); -struct G2L *g2l_malloc(void); -void g2l_free(struct G2L *g2l); +static inline void g2l_delete_not_used_any_more(struct G2L *g2l) { + free(g2l->raw_start); +} static inline int g2l_any_entry(struct G2L *g2l) { return g2l->raw_current != g2l->raw_start; diff --git a/c4/steal.c b/c4/steal.c --- a/c4/steal.c +++ b/c4/steal.c @@ -13,6 +13,8 @@ gcptr stm_stub_malloc(struct tx_public_descriptor *pd) { + assert(pd->collection_lock != 0); + gcptr p = pd->stub_free_list; if (p == NULL) { assert(sizeof(struct stub_block_s) == STUB_BLOCK_SIZE); @@ -43,6 +45,43 @@ return p; } + +struct tx_steal_data { + struct tx_public_descriptor *foreign_pd; + struct G2L all_stubs; /* { protected: public_stub } */ +}; +static __thread struct tx_steal_data *steal_data; + +static void replace_ptr_to_protected_with_stub(gcptr *pobj) +{ + gcptr stub, obj = *pobj; + if (obj == NULL || (obj->h_tid & GCFLAG_PUBLIC) != 0) + return; + + /* we use 'all_stubs', a dictionary, in order to try to avoid + duplicate stubs for the same object. XXX maybe it would be + better to use a fast approximative cache that stays around for + several stealings. */ + struct tx_steal_data *sd = steal_data; + wlog_t *item; + G2L_FIND(sd->all_stubs, obj, item, goto not_found); + + /* found already */ + stub = item->val; + assert(stub->h_revision == (((revision_t)obj) | 2)); + goto done; + + not_found: + stub = stm_stub_malloc(sd->foreign_pd); + stub->h_tid = obj->h_tid | GCFLAG_PUBLIC | GCFLAG_STUB; + stub->h_revision = ((revision_t)obj) | 2; + g2l_insert(&sd->all_stubs, obj, stub); + + done: + *pobj = stub; + fprintf(stderr, " stolen: fixing *%p: %p -> %p\n", pobj, obj, stub); +} + void stm_steal_stub(gcptr P) { struct tx_public_descriptor *foreign_pd = STUB_THREAD(P); @@ -81,6 +120,9 @@ fprintf(stderr, "stolen: %p -> %p - - -> %p\n", P, B, L); L = B; } + else { + fprintf(stderr, "stolen: %p -> %p\n", P, L); + } /* Here L is a protected (or backup) copy, and we own the foreign thread's collection_lock, so we can read/write the flags. Change @@ -102,6 +144,17 @@ odd number that is also valid on a public up-to-date object. */ + /* Fix the content of the object: we need to change all pointers + that reference protected copies into pointers that reference + stub copies. + */ + struct tx_steal_data sd; + sd.foreign_pd = foreign_pd; + memset(&sd.all_stubs, 0, sizeof(sd.all_stubs)); + steal_data = &sd; + stmcb_trace(L, &replace_ptr_to_protected_with_stub); + g2l_delete_not_used_any_more(&sd.all_stubs); + /* If another thread (the foreign or a 3rd party) does a read barrier from P, it must only reach L if all writes to L are visible; i.e. it must not see P->h_revision => L that still diff --git a/c4/test/test_et.py b/c4/test/test_et.py --- a/c4/test/test_et.py +++ b/c4/test/test_et.py @@ -219,6 +219,7 @@ def f2(r): r.wait(2) p2 = lib.stm_read_barrier(p) # steals + assert classify(p2) == "public" assert lib.rawgetlong(p2, 0) == 2782172 assert p2 == lib.stm_read_barrier(p) # short-circuit h_revision assert p.h_revision == int(ffi.cast("revision_t", p2)) @@ -323,4 +324,40 @@ test_stealing_while_modifying(aborting=True) def test_stub_for_refs_from_stolen(): - xxx + p = palloc_refs(1) + qlist = [] + def f1(r): + assert (p.h_tid & GCFLAG_PUBLIC_TO_PRIVATE) == 0 + p1 = lib.stm_write_barrier(p) # private copy + assert p1 != p + assert classify(p) == "public" + assert classify(p1) == "private" + assert p.h_tid & GCFLAG_PUBLIC_TO_PRIVATE + q1 = nalloc(HDR + WORD) + qlist.append(q1) + lib.setlong(q1, 0, -29187) + lib.setptr(p1, 0, q1) + lib.stm_commit_transaction() + lib.stm_begin_inevitable_transaction() + assert classify(p) == "public" + assert classify(p1) == "protected" + assert classify(follow_revision(p)) == "stub" + assert p1.h_revision & 1 + r.set(2) + r.wait(3) # wait until the other thread really starts + def f2(r): + r.wait(2) + r.set(3) + p2 = lib.stm_read_barrier(p) # steals + assert classify(p2) == "public" + q2 = lib.getptr(p2, 0) + assert q2 != ffi.NULL + assert q2 != qlist[0] + assert classify(q2) == "stub" + assert q2.h_revision % 4 == 2 + q3 = lib.stm_read_barrier(q2) + assert q3 != q2 + assert q3 == qlist[0] + assert classify(q3) == "public" # has been stolen + assert lib.getlong(q3, 0) == -29187 + run_parallel(f1, f2) From noreply at buildbot.pypy.org Wed Jun 12 19:12:19 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 12 Jun 2013 19:12:19 +0200 (CEST) Subject: [pypy-commit] stmgc default: progress Message-ID: <20130612171219.B2CA21C0EB9@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r100:3f28eb08b77f Date: 2013-06-12 19:12 +0200 http://bitbucket.org/pypy/stmgc/changeset/3f28eb08b77f/ Log: progress diff --git a/c4/et.c b/c4/et.c --- a/c4/et.c +++ b/c4/et.c @@ -120,7 +120,7 @@ /* we update P_prev->h_revision as a shortcut */ /* XXX check if this really gives a worse performance than only doing this write occasionally based on a counter in d */ - P_prev->h_revision = v; + //P_prev->h_revision = v; XXX re-enable! P = (gcptr)v; v = ACCESS_ONCE(P->h_revision); if (!(v & 1)) // "is a pointer", i.e. "has a more recent rev" @@ -229,16 +229,26 @@ } } -gcptr _stm_nonrecord_barrier(gcptr G) +gcptr _stm_nonrecord_barrier(gcptr P) { /* follows the logic in stm_DirectReadBarrier() */ struct tx_descriptor *d = thread_descriptor; - gcptr P = G; revision_t v; + fprintf(stderr, "_stm_nonrecord_barrier: %p ", P); + + restart_all: + if (P->h_revision == stm_private_rev_num) + { + /* private */ + fprintf(stderr, "private\n"); + return P; + } + if (P->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED) { assert(!(P->h_revision & 1)); + fprintf(stderr, "private_from_protected\n"); return P; } @@ -246,6 +256,9 @@ { while (1) { + assert(P->h_tid & GCFLAG_PUBLIC); + fprintf(stderr, "public "); + wlog_t *item; gcptr L; G2L_FIND(d->public_to_private, P, item, goto no_private_obj); @@ -255,8 +268,7 @@ assert(P->h_tid & GCFLAG_PUBLIC_TO_PRIVATE); assert(!(L->h_tid & GCFLAG_PUBLIC)); assert(is_private(L)); - fprintf(stderr, "_stm_nonrecord_barrier: %p -> %p " - "public_to_private\n", G, L); + fprintf(stderr, "-public_to_private-> %p private\n", L); return L; no_private_obj:; @@ -270,25 +282,38 @@ if (v & 2) goto follow_stub; P = (gcptr)v; - assert(P->h_tid & GCFLAG_PUBLIC); + fprintf(stderr, "-> %p ", P); } if (UNLIKELY(v > d->start_time)) { - fprintf(stderr, "_stm_nonrecord_barrier: %p -> NULL changed\n", G); + fprintf(stderr, "too recent!\n"); return NULL; // object too recent } - fprintf(stderr, "_stm_nonrecord_barrier: %p -> %p public\n", G, P); + fprintf(stderr, "\n"); } else { - fprintf(stderr, "_stm_nonrecord_barrier: %p -> %p protected\n", G, P); + fprintf(stderr, "protected\n"); } return P; follow_stub:; - fprintf(stderr, "_stm_nonrecord_barrier: %p -> %p stub\n ", G, P); - P = (gcptr)(v - 2); - return _stm_nonrecord_barrier(P); + if (STUB_THREAD(P) == d->public_descriptor) + { + P = (gcptr)(v - 2); + fprintf(stderr, "stub -> %p ", P); + } + else + { + P = (gcptr)(v - 2); + fprintf(stderr, "stub -foreign-> %p ", P); + if (P->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED) + { + P = (gcptr)P->h_revision; /* the backup copy */ + fprintf(stderr, "-backup-> %p ", P); + } + } + goto restart_all; } #if 0 diff --git a/c4/test/test_et.py b/c4/test/test_et.py --- a/c4/test/test_et.py +++ b/c4/test/test_et.py @@ -2,6 +2,9 @@ from support import * +SHORTCUT = False # XXXXXXXXXXXXXXXXX + + def setup_function(f): lib.stm_clear_between_tests() lib.stm_initialize_tests(getattr(f, 'max_aborts', 0)) @@ -146,6 +149,8 @@ p2.h_revision = ffi.cast("revision_t", p3) assert lib.stm_read_barrier(p1) == p3 assert list_of_read_objects() == [p3] + if not SHORTCUT: + py.test.skip("re-enable!") assert p1.h_revision == int(ffi.cast("revision_t", p3)) # shortcutted def test_read_barrier_public_to_private(): @@ -222,7 +227,8 @@ assert classify(p2) == "public" assert lib.rawgetlong(p2, 0) == 2782172 assert p2 == lib.stm_read_barrier(p) # short-circuit h_revision - assert p.h_revision == int(ffi.cast("revision_t", p2)) + if SHORTCUT: + assert p.h_revision == int(ffi.cast("revision_t", p2)) assert p2 == lib.stm_read_barrier(p) assert p2 == plist[-1] assert classify(p2) == "public" From noreply at buildbot.pypy.org Wed Jun 12 19:28:28 2013 From: noreply at buildbot.pypy.org (rguillebert) Date: Wed, 12 Jun 2013 19:28:28 +0200 (CEST) Subject: [pypy-commit] pypy numpypy-nditer: Slices returned by nditer should be readonly by default Message-ID: <20130612172828.7CF841C00B9@cobra.cs.uni-duesseldorf.de> Author: Romain Guillebert Branch: numpypy-nditer Changeset: r64868:27ffa87a4dbd Date: 2013-06-12 19:27 +0200 http://bitbucket.org/pypy/pypy/changeset/27ffa87a4dbd/ Log: Slices returned by nditer should be readonly by default diff --git a/pypy/module/micronumpy/arrayimpl/concrete.py b/pypy/module/micronumpy/arrayimpl/concrete.py --- a/pypy/module/micronumpy/arrayimpl/concrete.py +++ b/pypy/module/micronumpy/arrayimpl/concrete.py @@ -424,6 +424,13 @@ return SliceArray(self.start, new_strides, new_backstrides, new_shape, self, orig_array) + def readonly(self): + return NonWritableSlice(self.start, self.strides, self.backstrides, self.shape, self.parent, self.orig_arr, self.dtype) + +class NonWritableSlice(SliceArray): + def descr_setitem(self, space, orig_array, w_index, w_value): + raise OperationError(space.w_ValueError, space.wrap( + "Assignment destination is read-only")) class ArrayBuffer(RWBuffer): def __init__(self, impl): diff --git a/pypy/module/micronumpy/interp_nditer.py b/pypy/module/micronumpy/interp_nditer.py --- a/pypy/module/micronumpy/interp_nditer.py +++ b/pypy/module/micronumpy/interp_nditer.py @@ -85,8 +85,7 @@ return W_NDimArray(res) def get_readonly_slice(space, array, it): - #XXX Not readonly - return W_NDimArray(it.getslice()) + return W_NDimArray(it.getslice().readonly()) def get_readwrite_slice(space, array, it): return W_NDimArray(it.getslice()) diff --git a/pypy/module/micronumpy/test/test_nditer.py b/pypy/module/micronumpy/test/test_nditer.py --- a/pypy/module/micronumpy/test/test_nditer.py +++ b/pypy/module/micronumpy/test/test_nditer.py @@ -59,6 +59,12 @@ n += 1 assert n == 12 assert (array(r) == [[ 0, 12], [ 4, 16], [ 8, 20], [ 1, 13], [ 5, 17], [ 9, 21], [ 2, 14], [ 6, 18], [10, 22], [ 3, 15], [ 7, 19], [11, 23]]).all() + e = None + try: + r[0][0] = 0 + except ValueError, ex: + e = ex + assert e def test_interface(self): from numpypy import arange, nditer, zeros @@ -207,3 +213,4 @@ assert (it.operands[1] == [[6, 22, 38], [54, 70, 86]]).all() assert (it.operands[1] == a.sum(axis=2)).all() + From noreply at buildbot.pypy.org Wed Jun 12 21:16:10 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 12 Jun 2013 21:16:10 +0200 (CEST) Subject: [pypy-commit] stmgc default: Still trying to figure out what _stm_nonrecord_barrier() should return to make the most sense for the tests, but I think I Message-ID: <20130612191610.165961C022D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r101:b853700f30eb Date: 2013-06-12 21:15 +0200 http://bitbucket.org/pypy/stmgc/changeset/b853700f30eb/ Log: Still trying to figure out what _stm_nonrecord_barrier() should return to make the most sense for the tests, but I think I have it now. diff --git a/c4/et.c b/c4/et.c --- a/c4/et.c +++ b/c4/et.c @@ -120,7 +120,7 @@ /* we update P_prev->h_revision as a shortcut */ /* XXX check if this really gives a worse performance than only doing this write occasionally based on a counter in d */ - //P_prev->h_revision = v; XXX re-enable! + P_prev->h_revision = v; P = (gcptr)v; v = ACCESS_ONCE(P->h_revision); if (!(v & 1)) // "is a pointer", i.e. "has a more recent rev" @@ -229,6 +229,50 @@ } } +static gcptr _match_public_to_private(gcptr P, gcptr pubobj, gcptr privobj) +{ + while ((pubobj->h_revision & 3) == 0) + { + assert(pubobj != P); + pubobj = (gcptr)pubobj->h_revision; + } + if (pubobj == P) + { + assert(!(privobj->h_tid & GCFLAG_PUBLIC)); + assert(is_private(privobj)); + fprintf(stderr, "-public_to_private-> %p private\n", privobj); + return privobj; + } + return NULL; +} + +static gcptr _find_public_to_private(gcptr P) +{ + gcptr R; + wlog_t *item; + struct tx_descriptor *d = thread_descriptor; + + G2L_LOOP_FORWARD(d->public_to_private, item) + { + R = _match_public_to_private(P, item->addr, item->val); + if (R != NULL) + return R; + + } G2L_LOOP_END; + + long i, size = d->public_descriptor->stolen_objects.size; + gcptr *items = d->public_descriptor->stolen_objects.items; + + for (i = 0; i < size; i += 2) + { + R = _match_public_to_private(P, items[i], items[i + 1]); + if (R != NULL) + return R; + } + + return NULL; +} + gcptr _stm_nonrecord_barrier(gcptr P) { /* follows the logic in stm_DirectReadBarrier() */ @@ -247,6 +291,7 @@ if (P->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED) { + /* private too, with a backup copy */ assert(!(P->h_revision & 1)); fprintf(stderr, "private_from_protected\n"); return P; @@ -254,36 +299,27 @@ if (P->h_tid & GCFLAG_PUBLIC) { - while (1) + fprintf(stderr, "public "); + + while (v = P->h_revision, !(v & 1)) { + if (v & 2) + { + gcptr L = _find_public_to_private(P); + if (L != NULL) + return L; + goto follow_stub; + } + + P = (gcptr)v; assert(P->h_tid & GCFLAG_PUBLIC); - fprintf(stderr, "public "); + fprintf(stderr, "-> %p public ", P); + } - wlog_t *item; - gcptr L; - G2L_FIND(d->public_to_private, P, item, goto no_private_obj); + gcptr L = _find_public_to_private(P); + if (L != NULL) + return L; - L = item->val; - found_in_stolen_objects: - assert(P->h_tid & GCFLAG_PUBLIC_TO_PRIVATE); - assert(!(L->h_tid & GCFLAG_PUBLIC)); - assert(is_private(L)); - fprintf(stderr, "-public_to_private-> %p private\n", L); - return L; - - no_private_obj:; - L = _stm_find_stolen_objects(d, P); - if (L != NULL) - goto found_in_stolen_objects; - - v = ACCESS_ONCE(P->h_revision); - if (v & 1) - break; - if (v & 2) - goto follow_stub; - P = (gcptr)v; - fprintf(stderr, "-> %p ", P); - } if (UNLIKELY(v > d->start_time)) { fprintf(stderr, "too recent!\n"); @@ -312,6 +348,11 @@ P = (gcptr)P->h_revision; /* the backup copy */ fprintf(stderr, "-backup-> %p ", P); } + if (!(P->h_tid & GCFLAG_PUBLIC)) + { + fprintf(stderr, "protected by someone else!\n"); + return (gcptr)-1; + } } goto restart_all; } diff --git a/c4/test/support.py b/c4/test/support.py --- a/c4/test/support.py +++ b/c4/test/support.py @@ -564,3 +564,5 @@ r = p.h_revision assert (r % 4) == 0 return ffi.cast("gcptr", r) + +nrb_protected = ffi.cast("gcptr", -1) diff --git a/c4/test/test_et.py b/c4/test/test_et.py --- a/c4/test/test_et.py +++ b/c4/test/test_et.py @@ -2,7 +2,7 @@ from support import * -SHORTCUT = False # XXXXXXXXXXXXXXXXX +SHORTCUT = True def setup_function(f): diff --git a/c4/test/test_random.py b/c4/test/test_random.py --- a/c4/test/test_random.py +++ b/c4/test/test_random.py @@ -214,14 +214,17 @@ ptr = self.nonrecord_barrier(p.ptr) has_private_copy = p.obj in self.current_rev.content if has_private_copy: - assert ptr != ffi.NULL and self.is_private(ptr) + assert ptr != ffi.NULL and ptr != nrb_protected + assert self.is_private(ptr) content = self.current_rev.content[p.obj] else: try: content = self.current_rev._try_read(p.obj) except model.Deleted: - assert ptr == ffi.NULL + assert ptr == ffi.NULL or ptr == nrb_protected continue + if ptr == nrb_protected: + continue # not much we can do assert ptr != ffi.NULL and not self.is_private(ptr) self.check_not_free(ptr) From noreply at buildbot.pypy.org Wed Jun 12 21:22:44 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 12 Jun 2013 21:22:44 +0200 (CEST) Subject: [pypy-commit] stmgc default: Fix the next issue Message-ID: <20130612192244.8E3171C022D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r102:5eaf6b65867f Date: 2013-06-12 21:22 +0200 http://bitbucket.org/pypy/stmgc/changeset/5eaf6b65867f/ Log: Fix the next issue diff --git a/c4/et.c b/c4/et.c --- a/c4/et.c +++ b/c4/et.c @@ -511,7 +511,18 @@ v = ACCESS_ONCE(R->h_revision); if (!(v & 1)) // "is a pointer", i.e. { // "has a more recent revision" - return 0; + if (R->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED) + { + /* such an object R might be listed in list_of_read_objects + before it was turned from protected to private */ + continue; + } + else + { + fprintf(stderr, "validation failed: " + "%p has a more recent revision\n", R); + return 0; + } } if (v >= LOCKED) // locked { @@ -524,7 +535,11 @@ else { if (v != d->my_lock) // not locked by me: conflict - return 0; + { + fprintf(stderr, "validation failed: " + "%p is locked by another thread\n", R); + return 0; + } } } } From noreply at buildbot.pypy.org Wed Jun 12 21:56:36 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 12 Jun 2013 21:56:36 +0200 (CEST) Subject: [pypy-commit] stmgc default: Next fix Message-ID: <20130612195636.53EFB1C02BA@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r103:8fc7970d87ba Date: 2013-06-12 21:56 +0200 http://bitbucket.org/pypy/stmgc/changeset/8fc7970d87ba/ Log: Next fix diff --git a/c4/et.c b/c4/et.c --- a/c4/et.c +++ b/c4/et.c @@ -193,6 +193,7 @@ /*assert(P->h_revision & 1);*/ } + fprintf(stderr, "readobj: %p\n", P); gcptrlist_insert(&d->list_of_read_objects, P); add_in_recent_reads_cache: @@ -231,6 +232,7 @@ static gcptr _match_public_to_private(gcptr P, gcptr pubobj, gcptr privobj) { + gcptr org_pubobj = pubobj; while ((pubobj->h_revision & 3) == 0) { assert(pubobj != P); @@ -238,8 +240,11 @@ } if (pubobj == P) { + assert(!(org_pubobj->h_tid & GCFLAG_STUB)); assert(!(privobj->h_tid & GCFLAG_PUBLIC)); assert(is_private(privobj)); + if (P != org_pubobj) + fprintf(stderr, "| actually %p ", org_pubobj); fprintf(stderr, "-public_to_private-> %p private\n", privobj); return privobj; } @@ -305,6 +310,7 @@ { if (v & 2) { + fprintf(stderr, "stub "); gcptr L = _find_public_to_private(P); if (L != NULL) return L; @@ -337,12 +343,12 @@ if (STUB_THREAD(P) == d->public_descriptor) { P = (gcptr)(v - 2); - fprintf(stderr, "stub -> %p ", P); + fprintf(stderr, "-> %p ", P); } else { P = (gcptr)(v - 2); - fprintf(stderr, "stub -foreign-> %p ", P); + fprintf(stderr, "-foreign-> %p ", P); if (P->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED) { P = (gcptr)P->h_revision; /* the backup copy */ @@ -550,6 +556,16 @@ { d->start_time = GetGlobalCurTime(d); // copy from the global time fprintf(stderr, "et.c: ValidateNow: %ld\n", (long)d->start_time); + + /* subtle: we have to normalize stolen objects, because doing so + might add a few extra objects in the list_of_read_objects */ + if (d->public_descriptor->stolen_objects.size != 0) + { + spinlock_acquire(d->public_descriptor->collection_lock, 'N'); + stm_normalize_stolen_objects(d); + spinlock_release(d->public_descriptor->collection_lock); + } + if (!ValidateDuringTransaction(d, 0)) AbortTransaction(ABRT_VALIDATE_INFLIGHT); } diff --git a/c4/steal.c b/c4/steal.c --- a/c4/steal.c +++ b/c4/steal.c @@ -183,6 +183,14 @@ assert(!(B->h_tid & GCFLAG_BACKUP_COPY)); /* already removed */ g2l_insert(&d->public_to_private, B, L); + + /* to be on the safe side */ + fxcache_remove(&d->recent_reads_cache, B); + + /* but this is definitely needed: all keys in public_to_private + must appear in list_of_read_objects */ + fprintf(stderr, "n.readobj: %p\n", B); + gcptrlist_insert(&d->list_of_read_objects, B); } gcptrlist_clear(&d->public_descriptor->stolen_objects); } diff --git a/c4/test/test_random.py b/c4/test/test_random.py --- a/c4/test/test_random.py +++ b/c4/test/test_random.py @@ -57,7 +57,7 @@ text = '%d.%d$ %s\n' % (self.seed, self.counter, text) sys.stderr.write(text) self.counter += 1 - #if text.startswith('261035.184$'): + #if text.startswith('261225.987$'): # import pdb; pdb.set_trace() def check_not_free(self, ptr): @@ -107,8 +107,9 @@ self.current_rev.write(r.obj, index, p.obj) if not self.is_private(r.ptr): self.current_rev.check_not_outdated(r.obj) - except (model.Deleted, model.Conflict): + except (model.Deleted, model.Conflict), e: # abort! try to reproduce with C code + self.dump('expecting abort: %r' % (e,)) self.expected_abort() lib.setptr(r.ptr, index, p.ptr) # should abort raise MissingAbort From noreply at buildbot.pypy.org Wed Jun 12 22:05:05 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 12 Jun 2013 22:05:05 +0200 (CEST) Subject: [pypy-commit] stmgc default: Yes, now all of test_random passes Message-ID: <20130612200505.2F6EB1C02BA@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r104:a61854f3b10b Date: 2013-06-12 22:04 +0200 http://bitbucket.org/pypy/stmgc/changeset/a61854f3b10b/ Log: Yes, now all of test_random passes diff --git a/c4/steal.c b/c4/steal.c --- a/c4/steal.c +++ b/c4/steal.c @@ -73,7 +73,8 @@ not_found: stub = stm_stub_malloc(sd->foreign_pd); - stub->h_tid = obj->h_tid | GCFLAG_PUBLIC | GCFLAG_STUB; + stub->h_tid = (obj->h_tid & STM_USER_TID_MASK) | GCFLAG_PUBLIC + | GCFLAG_STUB; stub->h_revision = ((revision_t)obj) | 2; g2l_insert(&sd->all_stubs, obj, stub); From noreply at buildbot.pypy.org Wed Jun 12 22:31:45 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 12 Jun 2013 22:31:45 +0200 (CEST) Subject: [pypy-commit] stmgc default: Pom pom pom Message-ID: <20130612203145.E68821C022D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r105:e7072b7314b1 Date: 2013-06-12 22:30 +0200 http://bitbucket.org/pypy/stmgc/changeset/e7072b7314b1/ Log: Pom pom pom diff --git a/c4/et.c b/c4/et.c --- a/c4/et.c +++ b/c4/et.c @@ -238,7 +238,8 @@ assert(pubobj != P); pubobj = (gcptr)pubobj->h_revision; } - if (pubobj == P) + if (pubobj == P || ((P->h_revision & 3) == 2 && + pubobj->h_revision == P->h_revision)) { assert(!(org_pubobj->h_tid & GCFLAG_STUB)); assert(!(privobj->h_tid & GCFLAG_PUBLIC)); From noreply at buildbot.pypy.org Wed Jun 12 23:02:04 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 12 Jun 2013 23:02:04 +0200 (CEST) Subject: [pypy-commit] stmgc default: Pass another iteration Message-ID: <20130612210204.82AA21C1241@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r108:bb408c71bf7f Date: 2013-06-12 23:00 +0200 http://bitbucket.org/pypy/stmgc/changeset/bb408c71bf7f/ Log: Pass another iteration diff --git a/c4/test/test_random.py b/c4/test/test_random.py --- a/c4/test/test_random.py +++ b/c4/test/test_random.py @@ -154,6 +154,7 @@ except (model.Deleted, model.Conflict): # abort! try to reproduce with C code self.expected_abort() + lib.stm_clear_read_cache() lib.stm_read_barrier(p.ptr) # should abort raise MissingAbort @@ -480,5 +481,5 @@ def test_more_multi_thread(): #py.test.skip("more random tests") - for i in range(7, 1000): + for i in range(9, 1000): yield test_multi_thread, i From noreply at buildbot.pypy.org Wed Jun 12 23:02:02 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 12 Jun 2013 23:02:02 +0200 (CEST) Subject: [pypy-commit] stmgc default: Clear the fxcache when we want to be sure to get a conflict. Message-ID: <20130612210202.04A271C022D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r106:e11ad4a6f501 Date: 2013-06-12 22:43 +0200 http://bitbucket.org/pypy/stmgc/changeset/e11ad4a6f501/ Log: Clear the fxcache when we want to be sure to get a conflict. diff --git a/c4/et.c b/c4/et.c --- a/c4/et.c +++ b/c4/et.c @@ -39,6 +39,10 @@ { return is_private(P); } +void stm_clear_read_cache(void) +{ + fxcache_clear(&thread_descriptor->recent_reads_cache); +} /************************************************************/ diff --git a/c4/et.h b/c4/et.h --- a/c4/et.h +++ b/c4/et.h @@ -169,6 +169,7 @@ int _stm_is_private(gcptr); /* debugging */ gcptr stm_get_private_from_protected(long); /* debugging */ gcptr stm_get_read_obj(long); /* debugging */ +void stm_clear_read_cache(void); /* debugging */ gcptr stmgc_duplicate(gcptr); int DescriptorInit(void); diff --git a/c4/test/support.py b/c4/test/support.py --- a/c4/test/support.py +++ b/c4/test/support.py @@ -72,6 +72,7 @@ gcptr stm_get_private_from_protected(long index); gcptr stm_get_read_obj(long index); void *STUB_THREAD(gcptr); + void stm_clear_read_cache(void); gcptr getptr(gcptr, long); void setptr(gcptr, long, gcptr); diff --git a/c4/test/test_random.py b/c4/test/test_random.py --- a/c4/test/test_random.py +++ b/c4/test/test_random.py @@ -126,9 +126,11 @@ pobj = self.current_rev.read(r.obj, index) if not self.is_private(r.ptr): self.current_rev.check_not_outdated(r.obj) - except (model.Deleted, model.Conflict): + except (model.Deleted, model.Conflict), e: # abort! try to reproduce with C code + self.dump('expecting abort: %r' % (e,)) self.expected_abort() + lib.stm_clear_read_cache() lib.getptr(r.ptr, index) # should abort raise MissingAbort From noreply at buildbot.pypy.org Wed Jun 12 23:02:03 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 12 Jun 2013 23:02:03 +0200 (CEST) Subject: [pypy-commit] stmgc default: Disable testing for that, doesn't seem to give much Message-ID: <20130612210203.4EF531C10DD@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r107:211b51d42996 Date: 2013-06-12 22:59 +0200 http://bitbucket.org/pypy/stmgc/changeset/211b51d42996/ Log: Disable testing for that, doesn't seem to give much diff --git a/c4/test/test_random.py b/c4/test/test_random.py --- a/c4/test/test_random.py +++ b/c4/test/test_random.py @@ -41,7 +41,7 @@ self.sync_wait = sync_wait self.counter = 0 self.current_rev = None - self.expecting_gap_in_commit_time = 2 + #self.expecting_gap_in_commit_time = 2 self.dump('run') # if sync.prebuilt_object is None: @@ -290,12 +290,12 @@ self.commit() except model.Conflict, e: self.dump("interruptible_transaction expecting %s" % (e,)) - if isinstance(e, model.ReadWriteConflict): - self.expecting_gap_in_commit_time += 2 + #if isinstance(e, model.ReadWriteConflict): + # self.expecting_gap_in_commit_time += 2 self.expected_abort() return 0 # - self.expecting_gap_in_commit_time = 2 + #self.expecting_gap_in_commit_time = 2 if restart: self.dump("interruptible_transaction break") return 1 @@ -308,16 +308,16 @@ t = lib.get_start_time() self.current_rev.start_time = t if self.current_rev.previous is not None: - t_prev = t - self.expecting_gap_in_commit_time + t_prev = t - 2 #self.expecting_gap_in_commit_time if hasattr(self.current_rev.previous, 'commit_time'): - assert self.current_rev.previous.commit_time == t_prev + pass #assert self.current_rev.previous.commit_time == t_prev else: self.current_rev.previous.commit_time = t_prev def possibly_update_time(self): t = lib.get_start_time() - t_prev = t - self.expecting_gap_in_commit_time - assert self.current_rev.previous.commit_time == t_prev + #t_prev = t - self.expecting_gap_in_commit_time + #assert self.current_rev.previous.commit_time == t_prev self.current_rev.start_time = t def commit(self): @@ -479,6 +479,6 @@ def test_more_multi_thread(): - py.test.skip("more random tests") - for i in range(2, 1000): + #py.test.skip("more random tests") + for i in range(7, 1000): yield test_multi_thread, i From noreply at buildbot.pypy.org Wed Jun 12 23:02:05 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 12 Jun 2013 23:02:05 +0200 (CEST) Subject: [pypy-commit] stmgc default: A suspicious-looking fix, but a valid fix nevertheless Message-ID: <20130612210205.C2F971C022D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r109:c9938a7b55fb Date: 2013-06-12 23:01 +0200 http://bitbucket.org/pypy/stmgc/changeset/c9938a7b55fb/ Log: A suspicious-looking fix, but a valid fix nevertheless diff --git a/c4/steal.c b/c4/steal.c --- a/c4/steal.c +++ b/c4/steal.c @@ -105,7 +105,6 @@ the foreign thread's collection_lock, so we can read/write the flags */ - assert(B->h_tid & GCFLAG_BACKUP_COPY); B->h_tid &= ~GCFLAG_BACKUP_COPY; if (B->h_tid & GCFLAG_PUBLIC_TO_PRIVATE) { From noreply at buildbot.pypy.org Wed Jun 12 23:05:23 2013 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 12 Jun 2013 23:05:23 +0200 (CEST) Subject: [pypy-commit] stmgc default: fixes Message-ID: <20130612210523.F23C01C022D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r110:ec1fe577a446 Date: 2013-06-12 23:05 +0200 http://bitbucket.org/pypy/stmgc/changeset/ec1fe577a446/ Log: fixes diff --git a/c4/test/test_random.py b/c4/test/test_random.py --- a/c4/test/test_random.py +++ b/c4/test/test_random.py @@ -112,6 +112,9 @@ self.dump('expecting abort: %r' % (e,)) self.expected_abort() lib.setptr(r.ptr, index, p.ptr) # should abort + # didn't? try again by first clearing the fxcache + lib.stm_clear_read_cache() + lib.setptr(r.ptr, index, p.ptr) raise MissingAbort lib.setptr(r.ptr, index, p.ptr) # must not abort @@ -175,6 +178,9 @@ # abort! try to reproduce with C code self.expected_abort() lib.stm_write_barrier(p.ptr) # should abort + # didn't? try again by first clearing the fxcache + lib.stm_clear_read_cache() + lib.stm_write_barrier(p.ptr) raise MissingAbort nptr = lib.stm_write_barrier(p.ptr) @@ -481,5 +487,5 @@ def test_more_multi_thread(): #py.test.skip("more random tests") - for i in range(9, 1000): + for i in range(12, 1000): yield test_multi_thread, i From noreply at buildbot.pypy.org Thu Jun 13 10:01:27 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 13 Jun 2013 10:01:27 +0200 (CEST) Subject: [pypy-commit] pypy default: test_forked_can_thread failed today on linux64. No clue, but try Message-ID: <20130613080127.E8CA71C00B9@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r64869:9b623bc48b59 Date: 2013-06-13 10:00 +0200 http://bitbucket.org/pypy/pypy/changeset/9b623bc48b59/ Log: test_forked_can_thread failed today on linux64. No clue, but try randomly to increase the kill timer, in case it was due to very high loads. diff --git a/pypy/module/thread/test/test_fork.py b/pypy/module/thread/test/test_fork.py --- a/pypy/module/thread/test/test_fork.py +++ b/pypy/module/thread/test/test_fork.py @@ -28,7 +28,7 @@ if pid == 0: os._exit(0) else: - self.timeout_killer(pid, 5) + self.timeout_killer(pid, 10) exitcode = os.waitpid(pid, 0)[1] assert exitcode == 0 # if 9, process was killed by timer! finally: @@ -54,7 +54,7 @@ thread.start_new_thread(lambda: None, ()) os._exit(0) else: - self.timeout_killer(pid, 5) + self.timeout_killer(pid, 10) exitcode = os.waitpid(pid, 0)[1] assert exitcode == 0 # if 9, process was killed by timer! @@ -73,7 +73,7 @@ signal.signal(signal.SIGUSR1, signal.SIG_IGN) os._exit(42) else: - self.timeout_killer(pid, 5) + self.timeout_killer(pid, 10) exitcode = os.waitpid(pid, 0)[1] feedback.append(exitcode) From noreply at buildbot.pypy.org Thu Jun 13 10:15:42 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 13 Jun 2013 10:15:42 +0200 (CEST) Subject: [pypy-commit] stmgc default: Fix, at least for the tests: don't use "-1" initially for all threads Message-ID: <20130613081542.BA22D1C00B9@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r111:c3ea72ae1029 Date: 2013-06-13 10:15 +0200 http://bitbucket.org/pypy/stmgc/changeset/c3ea72ae1029/ Log: Fix, at least for the tests: don't use "-1" initially for all threads diff --git a/c4/et.c b/c4/et.c --- a/c4/et.c +++ b/c4/et.c @@ -1274,7 +1274,7 @@ d->my_lock = LOCKED + 2 * i; assert(d->my_lock & 1); assert(d->my_lock >= LOCKED); - stm_private_rev_num = -1; + stm_private_rev_num = -d->my_lock; d->private_revision_ref = &stm_private_rev_num; d->max_aborts = -1; pd->descriptor = d; From noreply at buildbot.pypy.org Thu Jun 13 10:36:35 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 13 Jun 2013 10:36:35 +0200 (CEST) Subject: [pypy-commit] stmgc default: Tweak tests Message-ID: <20130613083635.60A851C153B@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r112:573298ebb564 Date: 2013-06-13 10:33 +0200 http://bitbucket.org/pypy/stmgc/changeset/573298ebb564/ Log: Tweak tests diff --git a/c4/test/support.py b/c4/test/support.py --- a/c4/test/support.py +++ b/c4/test/support.py @@ -530,7 +530,7 @@ backup = (p.h_tid & GCFLAG_BACKUP_COPY) != 0 stub = (p.h_tid & GCFLAG_STUB) != 0 assert private + public + backup <= 1 - assert stub <= public + assert (public, stub) != (False, True) if private: return "private" if public: diff --git a/c4/test/test_random.py b/c4/test/test_random.py --- a/c4/test/test_random.py +++ b/c4/test/test_random.py @@ -210,7 +210,7 @@ return lib._stm_nonrecord_barrier(ptr) def is_private(self, ptr): - return lib._stm_is_private(ptr) + return classify(ptr) == "private" def check_valid(self, lst): lst = list(lst) @@ -349,7 +349,7 @@ self.interruptible_transaction = False self.startrev() # - self.steps_remaining = 10000 + self.steps_remaining = 1000 # self.run_me(do_wait=False) # From noreply at buildbot.pypy.org Thu Jun 13 10:36:36 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 13 Jun 2013 10:36:36 +0200 (CEST) Subject: [pypy-commit] stmgc default: Fix "stealing again the same object" Message-ID: <20130613083636.8B7831C153B@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r113:b9568f14bdd9 Date: 2013-06-13 10:36 +0200 http://bitbucket.org/pypy/stmgc/changeset/b9568f14bdd9/ Log: Fix "stealing again the same object" diff --git a/c4/steal.c b/c4/steal.c --- a/c4/steal.c +++ b/c4/steal.c @@ -109,6 +109,10 @@ if (B->h_tid & GCFLAG_PUBLIC_TO_PRIVATE) { /* already stolen */ + assert(B->h_tid & GCFLAG_PUBLIC); + fprintf(stderr, "already stolen: %p -> %p <-> %p\n", P, L, B); + L = B; + goto already_stolen; } else { B->h_tid |= GCFLAG_PUBLIC_TO_PRIVATE; @@ -116,18 +120,26 @@ don't want to walk over the feet of the foreign thread */ gcptrlist_insert2(&foreign_pd->stolen_objects, B, L); + fprintf(stderr, "stolen: %p -> %p <-> %p\n", P, L, B); + L = B; } - fprintf(stderr, "stolen: %p -> %p - - -> %p\n", P, B, L); - L = B; } else { - fprintf(stderr, "stolen: %p -> %p\n", P, L); + if (L->h_tid & GCFLAG_PUBLIC) { + /* already stolen */ + fprintf(stderr, "already stolen: %p -> %p\n", P, L); + goto already_stolen; + } + else { + fprintf(stderr, "stolen: %p -> %p\n", P, L); + } } /* Here L is a protected (or backup) copy, and we own the foreign thread's collection_lock, so we can read/write the flags. Change it from protected to public. */ + assert(!(L->h_tid & GCFLAG_PUBLIC)); L->h_tid |= GCFLAG_PUBLIC; /* Note that all protected or backup copies have a h_revision that @@ -163,6 +175,7 @@ */ smp_wmb(); + already_stolen: /* update the original P->h_revision to point directly to L */ P->h_revision = (revision_t)L; From noreply at buildbot.pypy.org Thu Jun 13 11:32:51 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 13 Jun 2013 11:32:51 +0200 (CEST) Subject: [pypy-commit] stmgc default: Add demo1.c from ../c3. Works. Message-ID: <20130613093251.891151C1241@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r114:f5727433a73e Date: 2013-06-13 11:32 +0200 http://bitbucket.org/pypy/stmgc/changeset/f5727433a73e/ Log: Add demo1.c from ../c3. Works. diff --git a/c3/Makefile b/c4/Makefile copy from c3/Makefile copy to c4/Makefile --- a/c3/Makefile +++ b/c4/Makefile @@ -2,33 +2,17 @@ # Run with "make -jN" for maximum randomness. # -debug: debug-demo1 debug-demo2 +debug: debug-demo1 clean: - rm -f debug-demo1 debug-demo2 + rm -f debug-demo1 -tests: iteration-1 iteration-2 iteration-3 iteration-4 +H_FILES = atomic_ops.h stmgc.h stmimpl.h \ + dbgmem.h et.h fprintcolor.h lists.h steal.h stmsync.h +C_FILES = dbgmem.c et.c fprintcolor.c lists.c steal.c stmsync.c -all-builds: build-demo1 build-demo2 build-demo3 build-demo4 build-demo5 - -iteration-1: all-builds - +make test-demo5 test-demo1 test-demo2 test-demo3 test-demo4 - -iteration-2: all-builds - +make test-demo2 test-demo3 test-demo4 test-demo5 test-demo1 - -iteration-3: all-builds - +make test-demo3 test-demo4 test-demo5 test-demo1 test-demo2 - -iteration-4: all-builds - +make test-demo4 test-demo5 test-demo1 test-demo2 test-demo3 - - -H_FILES = et.h lists.h nursery.h gcpage.h stmsync.h dbgmem.h fprintcolor.h stmgc.h atomic_ops.h stmimpl.h -C_FILES = et.c lists.c nursery.c gcpage.c stmsync.c dbgmem.c fprintcolor.c - -DEBUG = -g -DGC_NURSERY=0x10000 -D_GC_DEBUG=1 +DEBUG = -g -DGC_NURSERY=0x10000 #-D_GC_DEBUG=1 build-%: %.c ${H_FILES} ${C_FILES} diff --git a/c3/demo1.c b/c4/demo1.c copy from c3/demo1.c copy to c4/demo1.c --- a/c3/demo1.c +++ b/c4/demo1.c @@ -106,7 +106,7 @@ static sem_t done; -extern void stmgcpage_possibly_major_collect(int force); /* temp */ +//extern void stmgcpage_possibly_major_collect(int force); /* temp */ static int thr_mynum = 0; @@ -128,7 +128,7 @@ w_node->value = start + i; stm_push_root((gcptr)w_node); stm_perform_transaction((gcptr)w_node, insert1); - stmgcpage_possibly_major_collect(0); /* temp */ + //stmgcpage_possibly_major_collect(0); /* temp */ w_node = (struct node *)stm_pop_root(); } stm_finalize(); From noreply at buildbot.pypy.org Thu Jun 13 14:36:07 2013 From: noreply at buildbot.pypy.org (bivab) Date: Thu, 13 Jun 2013 14:36:07 +0200 (CEST) Subject: [pypy-commit] pypy default: Added tag pypy-2.1-beta1-arm for changeset 9b623bc48b59 Message-ID: <20130613123607.DFEFB1C073E@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: Changeset: r64870:02ae35f0be09 Date: 2013-06-13 14:33 +0200 http://bitbucket.org/pypy/pypy/changeset/02ae35f0be09/ Log: Added tag pypy-2.1-beta1-arm for changeset 9b623bc48b59 diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -3,3 +3,4 @@ d8ac7d23d3ec5f9a0fa1264972f74a010dbfd07f release-1.6 ff4af8f318821f7f5ca998613a60fca09aa137da release-1.7 07e08e9c885ca67d89bcc304e45a32346daea2fa release-2.0-beta-1 +9b623bc48b5950cf07184462a0e48f2c4df0d720 pypy-2.1-beta1-arm From noreply at buildbot.pypy.org Thu Jun 13 14:36:09 2013 From: noreply at buildbot.pypy.org (bivab) Date: Thu, 13 Jun 2013 14:36:09 +0200 (CEST) Subject: [pypy-commit] pypy default: Added tag pypy-2.1-beta1-arm for changeset ab0dd631c220 Message-ID: <20130613123609.2B3391C073E@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: Changeset: r64871:18f1a3e22254 Date: 2013-06-13 14:35 +0200 http://bitbucket.org/pypy/pypy/changeset/18f1a3e22254/ Log: Added tag pypy-2.1-beta1-arm for changeset ab0dd631c220 diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -4,3 +4,5 @@ ff4af8f318821f7f5ca998613a60fca09aa137da release-1.7 07e08e9c885ca67d89bcc304e45a32346daea2fa release-2.0-beta-1 9b623bc48b5950cf07184462a0e48f2c4df0d720 pypy-2.1-beta1-arm +9b623bc48b5950cf07184462a0e48f2c4df0d720 pypy-2.1-beta1-arm +ab0dd631c22015ed88e583d9fdd4c43eebf0be21 pypy-2.1-beta1-arm From noreply at buildbot.pypy.org Thu Jun 13 15:09:13 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 13 Jun 2013 15:09:13 +0200 (CEST) Subject: [pypy-commit] stmgc default: Fix of the test by being more adaptable to the logic that might sometimes Message-ID: <20130613130913.2097C1C022D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r115:a3469e49931a Date: 2013-06-13 15:08 +0200 http://bitbucket.org/pypy/stmgc/changeset/a3469e49931a/ Log: Fix of the test by being more adaptable to the logic that might sometimes detect conflicts even if we have private copies. diff --git a/c4/test/test_random.py b/c4/test/test_random.py --- a/c4/test/test_random.py +++ b/c4/test/test_random.py @@ -98,46 +98,65 @@ self.aborted_rev = self.current_rev self.current_rev = None + def cancel_expected_abort(self): + lib.stm_set_max_aborts(0) + self.expected_conflict = False + self.current_rev = self.aborted_rev + del self.aborted_rev + def set_into_root(self, p, index): r = self._r if r != emptypair: self.check(r) self.check(p) - try: - self.current_rev.write(r.obj, index, p.obj) - if not self.is_private(r.ptr): - self.current_rev.check_not_outdated(r.obj) - except (model.Deleted, model.Conflict), e: - # abort! try to reproduce with C code - self.dump('expecting abort: %r' % (e,)) - self.expected_abort() - lib.setptr(r.ptr, index, p.ptr) # should abort - # didn't? try again by first clearing the fxcache - lib.stm_clear_read_cache() - lib.setptr(r.ptr, index, p.ptr) - raise MissingAbort - - lib.setptr(r.ptr, index, p.ptr) # must not abort + self.do(r, (self.current_rev.write, r.obj, index, p.obj), + (lib.setptr, r.ptr, index, p.ptr)) self.possibly_update_time() self.dump('set_into_root(%r, %r, %r)' % (r.obj, index, p.obj)) + def do(self, r, model_operation, real_operation): + abort = None + try: + x = model_operation[0](*model_operation[1:]) + except (model.Deleted, model.Conflict), e: + # the model says that we should definitely get an abort + abort = e.__class__.__name__ + else: + if r.obj.created_in_revision is not self.current_rev: + try: + self.current_rev.check_not_outdated(r.obj) + except model.Deleted: + if not self.is_private(r.ptr): + # the model says that we should definitely get an abort + abort = "CheckDeleted" + else: + # the model says that we *might* get an abort + abort = "MaybeDeleted" + # + if abort: + self.dump('expecting abort: %r' % (abort,)) + self.expected_abort() + y = real_operation[0](*real_operation[1:]) + if abort: + # didn't abort? try again by first clearing the fxcache + lib.stm_clear_read_cache() + y = real_operation[0](*real_operation[1:]) + # still didn't abort? + if abort != "MaybeDeleted": + raise MissingAbort + else: + # ok, it's fine if we don't actually get an abort + self.cancel_expected_abort() + # + return x, y + def get_ref(self, r, index): self.check(r) if r == emptypair: return emptypair - try: - pobj = self.current_rev.read(r.obj, index) - if not self.is_private(r.ptr): - self.current_rev.check_not_outdated(r.obj) - except (model.Deleted, model.Conflict), e: - # abort! try to reproduce with C code - self.dump('expecting abort: %r' % (e,)) - self.expected_abort() - lib.stm_clear_read_cache() - lib.getptr(r.ptr, index) # should abort - raise MissingAbort - - pptr = lib.getptr(r.ptr, index) + self.dump('get_ref(%s, %d)' % (r, index)) + pobj, pptr = self.do(r, (self.current_rev.read, r.obj, index), + (lib.getptr, r.ptr, index)) self.possibly_update_time() p = pair(pobj, pptr) self.check(p) @@ -150,18 +169,8 @@ def read_barrier(self, p): if p != emptypair: self.check(p) - try: - self.current_rev.read_barrier(p.obj) - if not self.is_private(p.ptr): - self.current_rev.check_not_outdated(p.obj) - except (model.Deleted, model.Conflict): - # abort! try to reproduce with C code - self.expected_abort() - lib.stm_clear_read_cache() - lib.stm_read_barrier(p.ptr) # should abort - raise MissingAbort - - nptr = lib.stm_read_barrier(p.ptr) + _, nptr = self.do(p, (self.current_rev.read_barrier, p.obj), + (lib.stm_read_barrier, p.ptr)) self.possibly_update_time() p = pair(p.obj, nptr) self.check(p) @@ -170,20 +179,8 @@ def write_barrier(self, p): if p != emptypair: self.check(p) - try: - self.current_rev.write_barrier(p.obj) - if not self.is_private(p.ptr): - self.current_rev.check_not_outdated(p.obj) - except (model.Deleted, model.Conflict): - # abort! try to reproduce with C code - self.expected_abort() - lib.stm_write_barrier(p.ptr) # should abort - # didn't? try again by first clearing the fxcache - lib.stm_clear_read_cache() - lib.stm_write_barrier(p.ptr) - raise MissingAbort - - nptr = lib.stm_write_barrier(p.ptr) + _, nptr = self.do(p, (self.current_rev.write_barrier, p.obj), + (lib.stm_write_barrier, p.ptr)) self.possibly_update_time() p = pair(p.obj, nptr) self.check(p) @@ -487,5 +484,5 @@ def test_more_multi_thread(): #py.test.skip("more random tests") - for i in range(12, 1000): + for i in range(326//2, 1000): yield test_multi_thread, i From noreply at buildbot.pypy.org Thu Jun 13 15:11:40 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 13 Jun 2013 15:11:40 +0200 (CEST) Subject: [pypy-commit] stmgc default: Re-skip this never-ending part Message-ID: <20130613131140.DDC501C02BA@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r116:f1ccf5bbcb6f Date: 2013-06-13 15:11 +0200 http://bitbucket.org/pypy/stmgc/changeset/f1ccf5bbcb6f/ Log: Re-skip this never-ending part diff --git a/c4/test/test_random.py b/c4/test/test_random.py --- a/c4/test/test_random.py +++ b/c4/test/test_random.py @@ -483,6 +483,6 @@ def test_more_multi_thread(): - #py.test.skip("more random tests") - for i in range(326//2, 1000): + py.test.skip("more random tests") + for i in range(580//2, 1000): yield test_multi_thread, i From noreply at buildbot.pypy.org Thu Jun 13 15:48:33 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 13 Jun 2013 15:48:33 +0200 (CEST) Subject: [pypy-commit] stmgc default: Added tag c4-without-gc for changeset f1ccf5bbcb6f Message-ID: <20130613134833.A95E71C03B1@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r117:7c75ea00f9fd Date: 2013-06-13 15:30 +0200 http://bitbucket.org/pypy/stmgc/changeset/7c75ea00f9fd/ Log: Added tag c4-without-gc for changeset f1ccf5bbcb6f diff --git a/.hgtags b/.hgtags new file mode 100644 --- /dev/null +++ b/.hgtags @@ -0,0 +1,1 @@ +f1ccf5bbcb6f01fb995622f2b569e9858fbea08a c4-without-gc From noreply at buildbot.pypy.org Thu Jun 13 18:21:05 2013 From: noreply at buildbot.pypy.org (amauryfa) Date: Thu, 13 Jun 2013 18:21:05 +0200 (CEST) Subject: [pypy-commit] pypy default: A new copyright holder. Thanks! Message-ID: <20130613162105.89A921C0400@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: Changeset: r64873:d3c70e7a2523 Date: 2013-06-13 14:08 +0200 http://bitbucket.org/pypy/pypy/changeset/d3c70e7a2523/ Log: A new copyright holder. Thanks! diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -218,6 +218,7 @@ Impara, Germany Change Maker, Sweden University of California Berkeley, USA + Google Inc. The PyPy Logo as used by http://speed.pypy.org and others was created by Samuel Reis and is distributed on terms of Creative Commons Share Alike From noreply at buildbot.pypy.org Thu Jun 13 18:21:04 2013 From: noreply at buildbot.pypy.org (amauryfa) Date: Thu, 13 Jun 2013 18:21:04 +0200 (CEST) Subject: [pypy-commit] pypy default: Add implementation of _tkinter, using cffi bindings. Message-ID: <20130613162104.55D1B1C03B1@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: Changeset: r64872:2c9002e840f0 Date: 2013-06-13 14:06 +0200 http://bitbucket.org/pypy/pypy/changeset/2c9002e840f0/ Log: Add implementation of _tkinter, using cffi bindings. diff --git a/lib_pypy/_tkinter/__init__.py b/lib_pypy/_tkinter/__init__.py new file mode 100644 --- /dev/null +++ b/lib_pypy/_tkinter/__init__.py @@ -0,0 +1,43 @@ +# _tkinter package -- low-level interface to libtk and libtcl. +# +# This is an internal module, applications should "import Tkinter" instead. +# +# This version is based on cffi, and is a translation of _tkinter.c +# from CPython, version 2.7.4. + +class TclError(Exception): + pass + +from .tklib import tklib, tkffi +from .app import TkApp + +TK_VERSION = tkffi.string(tklib.get_tk_version()) +TCL_VERSION = tkffi.string(tklib.get_tcl_version()) + +READABLE = tklib.TCL_READABLE +WRITABLE = tklib.TCL_WRITABLE +EXCEPTION = tklib.TCL_EXCEPTION + +def create(screenName=None, baseName=None, className=None, + interactive=False, wantobjects=False, wantTk=True, + sync=False, use=None): + return TkApp(screenName, baseName, className, + interactive, wantobjects, wantTk, sync, use) + +def _flatten(item): + def _flatten1(output, item, depth): + if depth > 1000: + raise ValueError("nesting too deep in _flatten") + if not isinstance(item, (list, tuple)): + raise TypeError("argument must be sequence") + # copy items to output tuple + for o in item: + if isinstance(o, (list, tuple)): + _flatten1(output, o, depth + 1) + elif o is not None: + output.append(o) + + result = [] + _flatten1(result, item, 0) + return tuple(result) + diff --git a/lib_pypy/_tkinter/app.py b/lib_pypy/_tkinter/app.py new file mode 100644 --- /dev/null +++ b/lib_pypy/_tkinter/app.py @@ -0,0 +1,389 @@ +# The TkApp class. + +from .tklib import tklib, tkffi +from . import TclError +from .tclobj import TclObject, FromObj, AsObj, TypeCache + +import sys + +def varname_converter(input): + if isinstance(input, TclObject): + return input.string + return input + + +def Tcl_AppInit(app): + if tklib.Tcl_Init(app.interp) == tklib.TCL_ERROR: + app.raiseTclError() + skip_tk_init = tklib.Tcl_GetVar( + app.interp, "_tkinter_skip_tk_init", tklib.TCL_GLOBAL_ONLY) + if skip_tk_init and tkffi.string(skip_tk_init) == "1": + return + + if tklib.Tk_Init(app.interp) == tklib.TCL_ERROR: + app.raiseTclError() + +class _CommandData(object): + def __new__(cls, app, name, func): + self = object.__new__(cls) + self.app = app + self.name = name + self.func = func + handle = tkffi.new_handle(self) + app._commands[name] = handle # To keep the command alive + return tkffi.cast("ClientData", handle) + + @tkffi.callback("Tcl_CmdProc") + def PythonCmd(clientData, interp, argc, argv): + self = tkffi.from_handle(clientData) + assert self.app.interp == interp + try: + args = [tkffi.string(arg) for arg in argv[1:argc]] + result = self.func(*args) + obj = AsObj(result) + tklib.Tcl_SetObjResult(interp, obj) + except: + self.app.errorInCmd = True + self.app.exc_info = sys.exc_info() + return tklib.TCL_ERROR + else: + return tklib.TCL_OK + + @tkffi.callback("Tcl_CmdDeleteProc") + def PythonCmdDelete(clientData): + self = tkffi.from_handle(clientData) + app = self.app + del app._commands[self.name] + return + + +class TkApp(object): + def __new__(cls, screenName, baseName, className, + interactive, wantobjects, wantTk, sync, use): + if not wantobjects: + raise NotImplementedError("wantobjects=True only") + self = object.__new__(cls) + self.interp = tklib.Tcl_CreateInterp() + self._wantobjects = wantobjects + self.threaded = bool(tklib.Tcl_GetVar2Ex( + self.interp, "tcl_platform", "threaded", + tklib.TCL_GLOBAL_ONLY)) + self.thread_id = tklib.Tcl_GetCurrentThread() + self.dispatching = False + self.quitMainLoop = False + self.errorInCmd = False + + self._typeCache = TypeCache() + self._commands = {} + + # Delete the 'exit' command, which can screw things up + tklib.Tcl_DeleteCommand(self.interp, "exit") + + if screenName is not None: + tklib.Tcl_SetVar2(self.interp, "env", "DISPLAY", screenName, + tklib.TCL_GLOBAL_ONLY) + + if interactive: + tklib.Tcl_SetVar(self.interp, "tcl_interactive", "1", + tklib.TCL_GLOBAL_ONLY) + else: + tklib.Tcl_SetVar(self.interp, "tcl_interactive", "0", + tklib.TCL_GLOBAL_ONLY) + + # This is used to get the application class for Tk 4.1 and up + argv0 = className.lower() + tklib.Tcl_SetVar(self.interp, "argv0", argv0, + tklib.TCL_GLOBAL_ONLY) + + if not wantTk: + tklib.Tcl_SetVar(self.interp, "_tkinter_skip_tk_init", "1", + tklib.TCL_GLOBAL_ONLY) + + # some initial arguments need to be in argv + if sync or use: + args = "" + if sync: + args += "-sync" + if use: + if sync: + args += " " + args += "-use " + use + + tklib.Tcl_SetVar(self.interp, "argv", args, + tklib.TCL_GLOBAL_ONLY) + + Tcl_AppInit(self) + # EnableEventHook() + return self + + def __del__(self): + tklib.Tcl_DeleteInterp(self.interp) + # DisableEventHook() + + def raiseTclError(self): + if self.errorInCmd: + self.errorInCmd = False + raise self.exc_info[0], self.exc_info[1], self.exc_info[2] + raise TclError(tkffi.string(tklib.Tcl_GetStringResult(self.interp))) + + def wantobjects(self): + return self._wantobjects + + def _check_tcl_appartment(self): + if self.threaded and self.thread_id != tklib.Tcl_GetCurrentThread(): + raise RuntimeError("Calling Tcl from different appartment") + + def loadtk(self): + # We want to guard against calling Tk_Init() multiple times + err = tklib.Tcl_Eval(self.interp, "info exists tk_version") + if err == tklib.TCL_ERROR: + self.raiseTclError() + tk_exists = tklib.Tcl_GetStringResult(self.interp) + if not tk_exists or tkffi.string(tk_exists) != "1": + err = tklib.Tk_Init(self.interp) + if err == tklib.TCL_ERROR: + self.raiseTclError() + + def _var_invoke(self, func, *args, **kwargs): + if self.threaded and self.thread_id != tklib.Tcl_GetCurrentThread(): + # The current thread is not the interpreter thread. + # Marshal the call to the interpreter thread, then wait + # for completion. + raise NotImplementedError("Call from another thread") + return func(*args, **kwargs) + + def _getvar(self, name1, name2=None, global_only=False): + name1 = varname_converter(name1) + if not name2: + name2 = tkffi.NULL + flags=tklib.TCL_LEAVE_ERR_MSG + if global_only: + flags |= tklib.TCL_GLOBAL_ONLY + res = tklib.Tcl_GetVar2Ex(self.interp, name1, name2, flags) + if not res: + self.raiseTclError() + assert self._wantobjects + return FromObj(self, res) + + def _setvar(self, name1, value, global_only=False): + name1 = varname_converter(name1) + newval = AsObj(value) + flags=tklib.TCL_LEAVE_ERR_MSG + if global_only: + flags |= tklib.TCL_GLOBAL_ONLY + res = tklib.Tcl_SetVar2Ex(self.interp, name1, tkffi.NULL, + newval, flags) + if not res: + self.raiseTclError() + + def _unsetvar(self, name1, name2=None, global_only=False): + name1 = varname_converter(name1) + if not name2: + name2 = tkffi.NULL + flags=tklib.TCL_LEAVE_ERR_MSG + if global_only: + flags |= tklib.TCL_GLOBAL_ONLY + res = tklib.Tcl_UnsetVar2(self.interp, name1, name2, flags) + if res == tklib.TCL_ERROR: + self.raiseTclError() + + def getvar(self, name1, name2=None): + return self._var_invoke(self._getvar, name1, name2) + + def globalgetvar(self, name1, name2=None): + return self._var_invoke(self._getvar, name1, name2, global_only=True) + + def setvar(self, name1, value): + return self._var_invoke(self._setvar, name1, value) + + def globalsetvar(self, name1, value): + return self._var_invoke(self._setvar, name1, value, global_only=True) + + def unsetvar(self, name1, name2=None): + return self._var_invoke(self._unsetvar, name1, name2) + + def globalunsetvar(self, name1, name2=None): + return self._var_invoke(self._unsetvar, name1, name2, global_only=True) + + # COMMANDS + + def createcommand(self, cmdName, func): + if not callable(func): + raise TypeError("command not callable") + + if self.threaded and self.thread_id != tklib.Tcl_GetCurrentThread(): + raise NotImplementedError("Call from another thread") + + clientData = _CommandData(self, cmdName, func) + + if self.threaded and self.thread_id != tklib.Tcl_GetCurrentThread(): + raise NotImplementedError("Call from another thread") + + res = tklib.Tcl_CreateCommand( + self.interp, cmdName, _CommandData.PythonCmd, + clientData, _CommandData.PythonCmdDelete) + if not res: + raise TclError("can't create Tcl command") + + def deletecommand(self, cmdName): + if self.threaded and self.thread_id != tklib.Tcl_GetCurrentThread(): + raise NotImplementedError("Call from another thread") + + res = tklib.Tcl_DeleteCommand(self.interp, cmdName) + if res == -1: + raise TclError("can't delete Tcl command") + + def call(self, *args): + flags = tklib.TCL_EVAL_DIRECT | tklib.TCL_EVAL_GLOBAL + + # If args is a single tuple, replace with contents of tuple + if len(args) == 1 and isinstance(args[0], tuple): + args = args[0] + + if self.threaded and self.thread_id != tklib.Tcl_GetCurrentThread(): + # We cannot call the command directly. Instead, we must + # marshal the parameters to the interpreter thread. + raise NotImplementedError("Call from another thread") + + objects = tkffi.new("Tcl_Obj*[]", len(args)) + argc = len(args) + try: + for i, arg in enumerate(args): + if arg is None: + argc = i + break + obj = AsObj(arg) + tklib.Tcl_IncrRefCount(obj) + objects[i] = obj + + res = tklib.Tcl_EvalObjv(self.interp, argc, objects, flags) + if res == tklib.TCL_ERROR: + self.raiseTclError() + else: + result = self._callResult() + finally: + for obj in objects: + if obj: + tklib.Tcl_DecrRefCount(obj) + return result + + def _callResult(self): + assert self._wantobjects + value = tklib.Tcl_GetObjResult(self.interp) + # Not sure whether the IncrRef is necessary, but something + # may overwrite the interpreter result while we are + # converting it. + tklib.Tcl_IncrRefCount(value) + res = FromObj(self, value) + tklib.Tcl_DecrRefCount(value) + return res + + def eval(self, script): + self._check_tcl_appartment() + res = tklib.Tcl_Eval(self.interp, script) + if res == tklib.TCL_ERROR: + self.raiseTclError() + return tkffi.string(tklib.Tcl_GetStringResult(self.interp)) + + def evalfile(self, filename): + self._check_tcl_appartment() + res = tklib.Tcl_EvalFile(self.interp, filename) + if res == tklib.TCL_ERROR: + self.raiseTclError() + return tkffi.string(tklib.Tcl_GetStringResult(self.interp)) + + def split(self, arg): + if isinstance(arg, tuple): + return self._splitObj(arg) + else: + return self._split(arg) + + def splitlist(self, arg): + if isinstance(arg, tuple): + return arg + if isinstance(arg, unicode): + arg = arg.encode('utf8') + + argc = tkffi.new("int*") + argv = tkffi.new("char***") + res = tklib.Tcl_SplitList(self.interp, arg, argc, argv) + if res == tklib.TCL_ERROR: + self.raiseTclError() + + result = tuple(tkffi.string(argv[0][i]) + for i in range(argc[0])) + tklib.Tcl_Free(argv[0]) + return result + + def _splitObj(self, arg): + if isinstance(arg, tuple): + size = len(arg) + # Recursively invoke SplitObj for all tuple items. + # If this does not return a new object, no action is + # needed. + result = None + newelems = (self._splitObj(elem) for elem in arg) + for elem, newelem in zip(arg, newelems): + if elem is not newelem: + return newelems + elif isinstance(arg, str): + argc = tkffi.new("int*") + argv = tkffi.new("char***") + res = tklib.Tcl_SplitList(tkffi.NULL, arg, argc, argv) + if res == tklib.TCL_ERROR: + return arg + tklib.Tcl_Free(argv[0]) + if argc[0] > 1: + return self._split(arg) + return arg + + def _split(self, arg): + argc = tkffi.new("int*") + argv = tkffi.new("char***") + res = tklib.Tcl_SplitList(tkffi.NULL, arg, argc, argv) + if res == tklib.TCL_ERROR: + # Not a list. + # Could be a quoted string containing funnies, e.g. {"}. + # Return the string itself. + return arg + + try: + if argc[0] == 0: + return "" + elif argc[0] == 1: + return argv[0][0] + else: + return (self._split(argv[0][i]) + for i in range(argc[0])) + finally: + tklib.Tcl_Free(argv[0]) + + def getboolean(self, s): + if isinstance(s, int): + return s + v = tkffi.new("int*") + res = tklib.Tcl_GetBoolean(self.interp, s, v) + if res == tklib.TCL_ERROR: + self.raiseTclError() + + def mainloop(self, threshold): + self._check_tcl_appartment() + self.dispatching = True + while (tklib.Tk_GetNumMainWindows() > threshold and + not self.quitMainLoop and not self.errorInCmd): + + if self.threaded: + result = tklib.Tcl_DoOneEvent(0) + else: + raise NotImplementedError("TCL configured without threads") + + if result < 0: + break + self.dispatching = False + self.quitMainLoop = False + if self.errorInCmd: + self.errorInCmd = False + raise self.exc_info[0], self.exc_info[1], self.exc_info[2] + + def quit(self): + self.quitMainLoop = True diff --git a/lib_pypy/_tkinter/tclobj.py b/lib_pypy/_tkinter/tclobj.py new file mode 100644 --- /dev/null +++ b/lib_pypy/_tkinter/tclobj.py @@ -0,0 +1,114 @@ +# TclObject, conversions with Python objects + +from .tklib import tklib, tkffi + +class TypeCache(object): + def __init__(self): + self.BooleanType = tklib.Tcl_GetObjType("boolean") + self.ByteArrayType = tklib.Tcl_GetObjType("bytearray") + self.DoubleType = tklib.Tcl_GetObjType("double") + self.IntType = tklib.Tcl_GetObjType("int") + self.ListType = tklib.Tcl_GetObjType("list") + self.ProcBodyType = tklib.Tcl_GetObjType("procbody") + self.StringType = tklib.Tcl_GetObjType("string") + + +def FromObj(app, value): + """Convert a TclObj pointer into a Python object.""" + typeCache = app._typeCache + if not value.typePtr: + buf = tkffi.buffer(value.bytes, value.length) + result = buf[:] + # If the result contains any bytes with the top bit set, it's + # UTF-8 and we should decode it to Unicode. + try: + result.decode('ascii') + except UnicodeDecodeError: + result = result.decode('utf8') + return result + + elif value.typePtr == typeCache.BooleanType: + return result + elif value.typePtr == typeCache.ByteArrayType: + return result + elif value.typePtr == typeCache.DoubleType: + return value.internalRep.doubleValue + elif value.typePtr == typeCache.IntType: + return value.internalRep.longValue + elif value.typePtr == typeCache.ListType: + size = tkffi.new('int*') + status = tklib.Tcl_ListObjLength(app.interp, value, size) + if status == tklib.TCL_ERROR: + app.raiseTclError() + result = [] + tcl_elem = tkffi.new("Tcl_Obj**") + for i in range(size[0]): + status = tklib.Tcl_ListObjIndex(app.interp, + value, i, tcl_elem) + if status == tklib.TCL_ERROR: + app.raiseTclError() + result.append(FromObj(app, tcl_elem[0])) + return tuple(result) + elif value.typePtr == typeCache.ProcBodyType: + return result + elif value.typePtr == typeCache.StringType: + buf = tklib.Tcl_GetUnicode(value) + length = tklib.Tcl_GetCharLength(value) + buf = tkffi.buffer(tkffi.cast("char*", buf), length*2)[:] + return buf.decode('utf-16') + + return TclObject(value) + +def AsObj(value): + if isinstance(value, str): + return tklib.Tcl_NewStringObj(value, len(value)) + elif isinstance(value, bool): + return tklib.Tcl_NewBooleanObj(value) + elif isinstance(value, int): + return tklib.Tcl_NewLongObj(value) + elif isinstance(value, float): + return tklib.Tcl_NewDoubleObj(value) + elif isinstance(value, tuple): + argv = tkffi.new("Tcl_Obj*[]", len(value)) + for i in range(len(value)): + argv[i] = AsObj(value[i]) + return tklib.Tcl_NewListObj(len(value), argv) + elif isinstance(value, unicode): + encoded = value.encode('utf-16')[2:] + buf = tkffi.new("char[]", encoded) + inbuf = tkffi.cast("Tcl_UniChar*", buf) + return tklib.Tcl_NewUnicodeObj(buf, len(encoded)/2) + elif isinstance(value, TclObject): + tklib.Tcl_IncrRefCount(value._value) + return value._value + else: + return AsObj(str(value)) + +class TclObject(object): + def __new__(cls, value): + self = object.__new__(cls) + tklib.Tcl_IncrRefCount(value) + self._value = value + self._string = None + return self + + def __del__(self): + tklib.Tcl_DecrRefCount(self._value) + + def __str__(self): + if self._string and isinstance(self._string, str): + return self._string + return tkffi.string(tklib.Tcl_GetString(self._value)) + + @property + def string(self): + if self._string is None: + length = tkffi.new("int*") + s = tklib.Tcl_GetStringFromObj(self._value, length) + value = tkffi.buffer(s, length[0])[:] + try: + value.decode('ascii') + except UnicodeDecodeError: + value = value.decode('utf8') + self._string = value + return self._string diff --git a/lib_pypy/_tkinter/tklib.py b/lib_pypy/_tkinter/tklib.py new file mode 100644 --- /dev/null +++ b/lib_pypy/_tkinter/tklib.py @@ -0,0 +1,114 @@ +# C bindings with libtcl and libtk. + +from cffi import FFI + +tkffi = FFI() + +tkffi.cdef(""" +char *get_tk_version(); +char *get_tcl_version(); +#define TCL_READABLE ... +#define TCL_WRITABLE ... +#define TCL_EXCEPTION ... +#define TCL_ERROR ... +#define TCL_OK ... + +#define TCL_LEAVE_ERR_MSG ... +#define TCL_GLOBAL_ONLY ... +#define TCL_EVAL_DIRECT ... +#define TCL_EVAL_GLOBAL ... + +typedef unsigned short Tcl_UniChar; +typedef ... Tcl_Interp; +typedef ...* Tcl_ThreadId; +typedef ...* Tcl_Command; + +typedef struct Tcl_ObjType { + char *name; + ...; +} Tcl_ObjType; +typedef struct Tcl_Obj { + char *bytes; + int length; + Tcl_ObjType *typePtr; + union { /* The internal representation: */ + long longValue; /* - an long integer value. */ + double doubleValue; /* - a double-precision floating value. */ + struct { /* - internal rep as two pointers. */ + void *ptr1; + void *ptr2; + } twoPtrValue; + } internalRep; + ...; +} Tcl_Obj; + +Tcl_Interp *Tcl_CreateInterp(); +void Tcl_DeleteInterp(Tcl_Interp* interp); +int Tcl_Init(Tcl_Interp* interp); +int Tk_Init(Tcl_Interp* interp); + +void Tcl_Free(char* ptr); + +const char *Tcl_SetVar(Tcl_Interp* interp, const char* varName, const char* newValue, int flags); +const char *Tcl_SetVar2(Tcl_Interp* interp, const char* name1, const char* name2, const char* newValue, int flags); +const char *Tcl_GetVar(Tcl_Interp* interp, const char* varName, int flags); +Tcl_Obj *Tcl_SetVar2Ex(Tcl_Interp* interp, const char* name1, const char* name2, Tcl_Obj* newValuePtr, int flags); +Tcl_Obj *Tcl_GetVar2Ex(Tcl_Interp* interp, const char* name1, const char* name2, int flags); +int Tcl_UnsetVar2(Tcl_Interp* interp, const char* name1, const char* name2, int flags); +const Tcl_ObjType *Tcl_GetObjType(const char* typeName); + +Tcl_Obj *Tcl_NewStringObj(const char* bytes, int length); +Tcl_Obj *Tcl_NewUnicodeObj(const Tcl_UniChar* unicode, int numChars); +Tcl_Obj *Tcl_NewLongObj(long longValue); +Tcl_Obj *Tcl_NewBooleanObj(int boolValue); +Tcl_Obj *Tcl_NewDoubleObj(double doubleValue); + +void Tcl_IncrRefCount(Tcl_Obj* objPtr); +void Tcl_DecrRefCount(Tcl_Obj* objPtr); + +int Tcl_GetBoolean(Tcl_Interp* interp, const char* src, int* boolPtr); +char *Tcl_GetString(Tcl_Obj* objPtr); +char *Tcl_GetStringFromObj(Tcl_Obj* objPtr, int* lengthPtr); + +Tcl_UniChar *Tcl_GetUnicode(Tcl_Obj* objPtr); +int Tcl_GetCharLength(Tcl_Obj* objPtr); + +Tcl_Obj *Tcl_NewListObj(int objc, Tcl_Obj* const objv[]); +int Tcl_ListObjLength(Tcl_Interp* interp, Tcl_Obj* listPtr, int* intPtr); +int Tcl_ListObjIndex(Tcl_Interp* interp, Tcl_Obj* listPtr, int index, Tcl_Obj** objPtrPtr); +int Tcl_SplitList(Tcl_Interp* interp, char* list, int* argcPtr, const char*** argvPtr); + +int Tcl_Eval(Tcl_Interp* interp, const char* script); +int Tcl_EvalFile(Tcl_Interp* interp, const char* filename); +int Tcl_EvalObjv(Tcl_Interp* interp, int objc, Tcl_Obj** objv, int flags); +Tcl_Obj *Tcl_GetObjResult(Tcl_Interp* interp); +const char *Tcl_GetStringResult(Tcl_Interp* interp); +void Tcl_SetObjResult(Tcl_Interp* interp, Tcl_Obj* objPtr); + +typedef void* ClientData; +typedef int Tcl_CmdProc( + ClientData clientData, + Tcl_Interp *interp, + int argc, + const char *argv[]); +typedef void Tcl_CmdDeleteProc( + ClientData clientData); +Tcl_Command Tcl_CreateCommand(Tcl_Interp* interp, const char* cmdName, Tcl_CmdProc proc, ClientData clientData, Tcl_CmdDeleteProc deleteProc); +int Tcl_DeleteCommand(Tcl_Interp* interp, const char* cmdName); + +Tcl_ThreadId Tcl_GetCurrentThread(); +int Tcl_DoOneEvent(int flags); + +int Tk_GetNumMainWindows(); +""") + +tklib = tkffi.verify(""" +#include +#include + +char *get_tk_version() { return TK_VERSION; } +char *get_tcl_version() { return TCL_VERSION; } +""", +include_dirs=['/usr/include/tcl'], +libraries=['tcl', 'tk'], +) From noreply at buildbot.pypy.org Thu Jun 13 20:02:23 2013 From: noreply at buildbot.pypy.org (rguillebert) Date: Thu, 13 Jun 2013 20:02:23 +0200 (CEST) Subject: [pypy-commit] pypy numpypy-nditer: Implement the c_index and the f_index flags on the nditer class Message-ID: <20130613180223.EC27D1C029E@cobra.cs.uni-duesseldorf.de> Author: Romain Guillebert Branch: numpypy-nditer Changeset: r64874:cbc60b20e6c2 Date: 2013-06-13 20:01 +0200 http://bitbucket.org/pypy/pypy/changeset/cbc60b20e6c2/ Log: Implement the c_index and the f_index flags on the nditer class diff --git a/pypy/module/micronumpy/interp_nditer.py b/pypy/module/micronumpy/interp_nditer.py --- a/pypy/module/micronumpy/interp_nditer.py +++ b/pypy/module/micronumpy/interp_nditer.py @@ -201,18 +201,52 @@ shape, backward) return MultiDimViewIterator(imp, imp.dtype, imp.start, r[0], r[1], shape) +def is_backward(imp, order): + if order == 'K' or (order == 'C' and imp.order == 'C'): + return False + elif order =='F' and imp.order == 'C': + return True + else: + raise NotImplementedError('not implemented yet') + def get_external_loop_iter(space, order, arr, shape): imp = arr.implementation - if order == 'K' or (order == 'C' and imp.order == 'C'): - backward = False - elif order =='F' and imp.order == 'C': - backward = True - else: - raise OperationError(space.w_NotImplementedError, space.wrap( - 'not implemented yet')) + + backward = is_backward(imp, order) return SliceIterator(arr, imp.strides, imp.backstrides, shape, order=order, backward=backward) +class IndexIterator(object): + def __init__(self, shape, backward=False): + self.shape = shape + self.index = [0] * len(shape) + self.backward = backward + self.called = False + + def next(self): + # TODO It's probably possible to refactor all the "next" method from each iterator + if not self.called: + self.called = True + return + for i in range(len(self.shape) - 1, -1, -1): + if self.index[i] < self.shape[i] - 1: + self.index[i] += 1 + break + else: + self.index[i] = 0 + + def getvalue(self): + if not self.called: + return 0 + if not self.backward: + ret = self.index[-1] + for i in range(len(self.shape) - 2, -1, -1): + ret += self.index[i] * self.shape[i - 1] + else: + ret = self.index[0] + for i in range(1, len(self.shape)): + ret += self.index[i] * self.shape[i - 1] + return ret class W_NDIter(W_Root): @@ -229,6 +263,7 @@ self.refs_ok = False self.reduce_ok = False self.zerosize_ok = False + self.index_iter = None if space.isinstance_w(w_seq, space.w_tuple) or \ space.isinstance_w(w_seq, space.w_list): w_seq_as_list = space.listview(w_seq) @@ -240,6 +275,10 @@ len(self.seq), parse_op_flag) self.iters=[] self.shape = iter_shape = shape_agreement_multiple(space, self.seq) + if self.tracked_index != "": + if self.order == "K": + self.order = self.seq[0].implementation.order + self.index_iter = IndexIterator(iter_shape, backward=self.order != self.tracked_index) if self.external_loop: for i in range(len(self.seq)): self.iters.append(ExternalLoopIterator(get_external_loop_iter(space, self.order, @@ -271,6 +310,8 @@ else: raise OperationError(space.w_StopIteration, space.w_None) res = [] + if self.index_iter: + self.index_iter.next() for i in range(len(self.iters)): res.append(self.iters[i].getitem(space, self.seq[i])) self.iters[i].next() @@ -324,12 +365,12 @@ 'not implemented yet')) def descr_get_has_index(self, space): - raise OperationError(space.w_NotImplementedError, space.wrap( - 'not implemented yet')) + return space.wrap(not self.tracked_index == "") def descr_get_index(self, space): - raise OperationError(space.w_NotImplementedError, space.wrap( - 'not implemented yet')) + if self.tracked_index == "": + raise OperationError(space.w_ValueError, "Iterator does not have an index") + return space.wrap(self.index_iter.getvalue()) def descr_get_has_multi_index(self, space): raise OperationError(space.w_NotImplementedError, space.wrap( diff --git a/pypy/module/micronumpy/test/test_nditer.py b/pypy/module/micronumpy/test/test_nditer.py --- a/pypy/module/micronumpy/test/test_nditer.py +++ b/pypy/module/micronumpy/test/test_nditer.py @@ -66,6 +66,24 @@ e = ex assert e + def test_index(self): + from numpypy import arange, nditer, zeros + a = arange(6).reshape(2,3) + + r = [] + it = nditer(a, flags=['c_index']) + assert it.has_index + for value in it: + r.append((value, it.index)) + assert r == [(0, 0), (1, 1), (2, 2), (3, 3), (4, 4), (5, 5)] + + r = [] + it = nditer(a, flags=['f_index']) + assert it.has_index + for value in it: + r.append((value, it.index)) + assert r == [(0, 0), (1, 2), (2, 4), (3, 1), (4, 3), (5, 5)] + def test_interface(self): from numpypy import arange, nditer, zeros a = arange(6).reshape(2,3) From noreply at buildbot.pypy.org Thu Jun 13 20:22:53 2013 From: noreply at buildbot.pypy.org (rguillebert) Date: Thu, 13 Jun 2013 20:22:53 +0200 (CEST) Subject: [pypy-commit] pypy numpypy-nditer: Add a test for nditers with mixed order Message-ID: <20130613182253.9751D1C0400@cobra.cs.uni-duesseldorf.de> Author: Romain Guillebert Branch: numpypy-nditer Changeset: r64875:5a7505174934 Date: 2013-06-13 20:22 +0200 http://bitbucket.org/pypy/pypy/changeset/5a7505174934/ Log: Add a test for nditers with mixed order diff --git a/pypy/module/micronumpy/test/test_nditer.py b/pypy/module/micronumpy/test/test_nditer.py --- a/pypy/module/micronumpy/test/test_nditer.py +++ b/pypy/module/micronumpy/test/test_nditer.py @@ -84,6 +84,17 @@ r.append((value, it.index)) assert r == [(0, 0), (1, 2), (2, 4), (3, 1), (4, 3), (5, 5)] + @py.test.mark.xfail(reason="Fortran order not implemented") + def test_iters_with_different_order(self): + from numpypy import nditer, array + + a = array([[1, 2], [3, 4]], order="C") + b = array([[1, 2], [3, 4]], order="F") + + it = nditer([a, b]) + + assert list(it) == zip(range(1, 5), range(1, 5)) + def test_interface(self): from numpypy import arange, nditer, zeros a = arange(6).reshape(2,3) From noreply at buildbot.pypy.org Thu Jun 13 20:36:23 2013 From: noreply at buildbot.pypy.org (rguillebert) Date: Thu, 13 Jun 2013 20:36:23 +0200 (CEST) Subject: [pypy-commit] pypy numpypy-nditer: Fix translation Message-ID: <20130613183623.5E1A61C1241@cobra.cs.uni-duesseldorf.de> Author: Romain Guillebert Branch: numpypy-nditer Changeset: r64876:0a0ae7e99b40 Date: 2013-06-13 20:35 +0200 http://bitbucket.org/pypy/pypy/changeset/0a0ae7e99b40/ Log: Fix translation diff --git a/pypy/module/micronumpy/interp_nditer.py b/pypy/module/micronumpy/interp_nditer.py --- a/pypy/module/micronumpy/interp_nditer.py +++ b/pypy/module/micronumpy/interp_nditer.py @@ -369,7 +369,7 @@ def descr_get_index(self, space): if self.tracked_index == "": - raise OperationError(space.w_ValueError, "Iterator does not have an index") + raise OperationError(space.w_ValueError, space.wrap("Iterator does not have an index")) return space.wrap(self.index_iter.getvalue()) def descr_get_has_multi_index(self, space): From noreply at buildbot.pypy.org Thu Jun 13 22:26:58 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 13 Jun 2013 22:26:58 +0200 (CEST) Subject: [pypy-commit] stmgc default: Clean-ups in preparation for nursery.c Message-ID: <20130613202658.E8EF91C022D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r118:afd0da81c314 Date: 2013-06-13 22:25 +0200 http://bitbucket.org/pypy/stmgc/changeset/afd0da81c314/ Log: Clean-ups in preparation for nursery.c diff --git a/c4/dbgmem.c b/c4/dbgmem.c --- a/c4/dbgmem.c +++ b/c4/dbgmem.c @@ -6,58 +6,18 @@ #include #define PAGE_SIZE 4096 -#define MMAP_LENGTH 67108864 /* 64MB */ - -struct zone_s { - struct zone_s *next; - char *start; - uint8_t active[MMAP_LENGTH / WORD]; -}; +#define MMAP_TOTAL 671088640 /* 640MB */ static pthread_mutex_t malloc_mutex = PTHREAD_MUTEX_INITIALIZER; -static char *free_zone = NULL, *free_zone_end = NULL; -static struct zone_s *zones = NULL; +static char *zone_current = NULL, *zone_end = NULL; -void *stm_malloc(size_t sz) -{ - pthread_mutex_lock(&malloc_mutex); - - size_t nb_pages = (sz + PAGE_SIZE - 1) / PAGE_SIZE + 1; - if (free_zone_end - free_zone < nb_pages * PAGE_SIZE) { - free_zone = mmap(NULL, MMAP_LENGTH, PROT_NONE, - MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); - if (free_zone == NULL || free_zone == MAP_FAILED) { - fprintf(stderr, "out of memory: mmap() failed\n"); - abort(); - } - free_zone_end = free_zone + MMAP_LENGTH; - assert((MMAP_LENGTH % PAGE_SIZE) == 0); - - struct zone_s *z = calloc(1, sizeof(struct zone_s)); - if (z == NULL) { - fprintf(stderr, "out of memory: malloc(zone_s) failed\n"); - abort(); - } - z->start = free_zone; - z->next = zones; - zones = z; - } - - char *result = free_zone; - free_zone += nb_pages * PAGE_SIZE; - pthread_mutex_unlock(&malloc_mutex); - - result += (-sz) & (PAGE_SIZE-1); - assert(((intptr_t)(result + sz) & (PAGE_SIZE-1)) == 0); - stm_dbgmem_used_again(result, sz, 1); - return result; -} static void _stm_dbgmem(void *p, size_t sz, int prot) { - fprintf(stderr, "_stm_dbgmem(%p, 0x%lx, %d)\n", p, (long)sz, prot); if (sz == 0) return; + + assert((ssize_t)sz > 0); intptr_t align = ((intptr_t)p) & (PAGE_SIZE-1); p = ((char *)p) - align; sz += align; @@ -65,64 +25,42 @@ assert(err == 0); } +void *stm_malloc(size_t sz) +{ + pthread_mutex_lock(&malloc_mutex); + + if (zone_current == NULL) { + zone_current = mmap(NULL, MMAP_TOTAL, PROT_NONE, + MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); + if (zone_current == NULL || zone_current == MAP_FAILED) { + fprintf(stderr, "not enough memory: mmap() failed\n"); + abort(); + } + zone_end = zone_current + MMAP_TOTAL; + assert((MMAP_TOTAL % PAGE_SIZE) == 0); + _stm_dbgmem(zone_current, MMAP_TOTAL, PROT_NONE); + } + + size_t nb_pages = (sz + PAGE_SIZE - 1) / PAGE_SIZE + 1; + char *result = zone_current; + zone_current += nb_pages * PAGE_SIZE; + if (zone_current > zone_end) { + fprintf(stderr, "dbgmem.c: %ld MB of memory have been exausted\n", + (long)(MMAP_TOTAL / (1024*1024))); + abort(); + } + pthread_mutex_unlock(&malloc_mutex); + + result += (-sz) & (PAGE_SIZE-1); + assert(((intptr_t)(result + sz) & (PAGE_SIZE-1)) == 0); + _stm_dbgmem(result, sz, PROT_READ | PROT_WRITE); + return result; +} + void stm_free(void *p, size_t sz) { - _stm_dbgmem(p, sz, PROT_READ | PROT_WRITE); memset(p, 0xDD, sz); - stm_dbgmem_not_used(p, sz, 1); -} - -static void _stm_dbg_mark(char *p, size_t sz, uint8_t marker) -{ - long startofs, numofs, i; - struct zone_s *z = zones; - while (!(z->start <= p && p < (z->start + MMAP_LENGTH))) { - z = z->next; - assert(z); - } - startofs = p - z->start; - numofs = sz; - assert((startofs & (WORD-1)) == 0); - assert((numofs & (WORD-1)) == 0); - assert(startofs + numofs <= MMAP_LENGTH); - startofs /= WORD; - numofs /= WORD; - for (i=0; iactive[startofs + i] = marker; -} - -void stm_dbgmem_not_used(void *p, size_t sz, int protect) -{ - _stm_dbg_mark(p, sz, 0); - if (protect) - _stm_dbgmem(p, sz, PROT_NONE); -} - -void stm_dbgmem_used_again(void *p, size_t sz, int protect) -{ - _stm_dbg_mark(p, sz, 42); - if (protect) - _stm_dbgmem(p, sz, PROT_READ | PROT_WRITE); -} - -int stm_dbgmem_is_active(void *p1, int allow_outside) -{ - char *p = (char *)p1; - long startofs; - uint8_t result; - struct zone_s *z = zones; - while (z && !(z->start <= p && p < (z->start + MMAP_LENGTH))) { - z = z->next; - } - if (!z) { - assert(allow_outside); - return -1; - } - startofs = p - z->start; - startofs /= WORD; - result = z->active[startofs]; - assert(result == 0 || result == 42); - return (result != 0); + _stm_dbgmem(p, sz, PROT_NONE); } /************************************************************/ diff --git a/c4/dbgmem.h b/c4/dbgmem.h --- a/c4/dbgmem.h +++ b/c4/dbgmem.h @@ -7,18 +7,10 @@ void *stm_malloc(size_t); void stm_free(void *, size_t); -/* Debugging: for tracking which memory regions should be read or not. */ -void stm_dbgmem_not_used(void *p, size_t size, int protect); -void stm_dbgmem_used_again(void *p, size_t size, int protect); -int stm_dbgmem_is_active(void *p, int allow_outside); - #else -#define stm_malloc(sz) malloc(sz) -#define stm_free(p,sz) free(p) -#define stm_dbgmem_not_used(p,sz,i) /* nothing */ -#define stm_dbgmem_used_again(p,sz,i) /* nothing */ -#define stm_dbgmem_is_active(p,i) 1 +#define stm_malloc(sz) malloc(sz) +#define stm_free(p,sz) free(p) #endif diff --git a/c4/et.c b/c4/et.c --- a/c4/et.c +++ b/c4/et.c @@ -956,7 +956,7 @@ } else { - //stm_free(B); + stm_free(B, stmcb_size(B)); } }; gcptrlist_clear(&d->private_from_protected); diff --git a/c4/et.h b/c4/et.h --- a/c4/et.h +++ b/c4/et.h @@ -105,6 +105,7 @@ * thread shuts down. It is reused the next time a thread starts. */ struct tx_public_descriptor { revision_t collection_lock; + NURSERY_FIELDS_DECL struct tx_descriptor *descriptor; struct stub_block_s *stub_blocks; gcptr stub_free_list; diff --git a/c4/stmgc.h b/c4/stmgc.h --- a/c4/stmgc.h +++ b/c4/stmgc.h @@ -26,7 +26,6 @@ /* allocate an object out of the local nursery */ -gcptr stm_allocate_object_of_size(size_t size); gcptr stm_allocate(size_t size, unsigned long tid); /* to push/pop objects into the local shadowstack */ @@ -65,10 +64,6 @@ /* callback: trace the content of an object */ extern void stmcb_trace(gcptr, void visit(gcptr *)); -/* debugging: allocate but immediately old, not via the nursery */ -gcptr _stm_allocate_object_of_size_old(size_t size); -gcptr _stm_allocate_old(size_t size, unsigned long tid); - /* You can put one GC-tracked thread-local object here. (Obviously it can be a container type containing more GC objects.) It is set to NULL by stm_initialize(). */ diff --git a/c4/stmimpl.h b/c4/stmimpl.h --- a/c4/stmimpl.h +++ b/c4/stmimpl.h @@ -30,6 +30,7 @@ #include "fprintcolor.h" #include "lists.h" #include "dbgmem.h" +#include "nursery.h" #include "et.h" #include "steal.h" #include "stmsync.h" diff --git a/c4/stmsync.c b/c4/stmsync.c --- a/c4/stmsync.c +++ b/c4/stmsync.c @@ -59,7 +59,7 @@ { int r = DescriptorInit(); assert(r == 1); - //stmgc_init_tls(); + stm_init_nursery(); init_shadowstack(); //stmgcpage_init_tls(); BeginInevitableTransaction(); @@ -93,20 +93,6 @@ return obj; } -gcptr stm_allocate(size_t size, unsigned long tid) -{ - gcptr result = stm_malloc(size); - assert(tid == (tid & STM_USER_TID_MASK)); - result->h_tid = tid; - result->h_revision = stm_private_rev_num; - return result; -} - -gcptr _stm_allocate_old(size_t size, unsigned long tid) -{ - abort(); -} - /************************************************************/ static revision_t sync_required = 0; diff --git a/c4/test/support.py b/c4/test/support.py --- a/c4/test/support.py +++ b/c4/test/support.py @@ -5,11 +5,11 @@ parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) header_files = [os.path.join(parent_dir, _n) for _n in - "et.h lists.h steal.h " + "et.h lists.h steal.h nursery.h " "stmsync.h dbgmem.h fprintcolor.h " "stmgc.h stmimpl.h atomic_ops.h".split()] source_files = [os.path.join(parent_dir, _n) for _n in - "et.c lists.c steal.c " + "et.c lists.c steal.c nursery.c " "stmsync.c dbgmem.c fprintcolor.c".split()] _pycache_ = os.path.join(parent_dir, 'test', '__pycache__') @@ -40,8 +40,7 @@ #define PREBUILT_FLAGS ... #define PREBUILT_REVISION ... - //gcptr stm_allocate_object_of_size(size_t size); - gcptr stm_allocate(size_t size, unsigned long tid); + gcptr stm_allocate(size_t size, unsigned int tid); void stm_push_root(gcptr); gcptr stm_pop_root(void); void stm_set_max_aborts(int max_aborts); @@ -65,7 +64,6 @@ //void stmgc_minor_collect(void); gcptr _stm_nonrecord_barrier(gcptr); int _stm_is_private(gcptr); - int stm_dbgmem_is_active(void *p, int allow_outside); void stm_start_sharedlock(void); void stm_stop_sharedlock(void); void AbortTransaction(int); @@ -125,7 +123,6 @@ int gettid(gcptr obj) { - assert(stm_dbgmem_is_active(obj, 1)); int result = stm_get_tid(obj); assert(0 <= result && result < 521); return result; @@ -133,13 +130,11 @@ void settid(gcptr obj, int newtid) { - assert(stm_dbgmem_is_active(obj, 1)); stm_set_tid(obj, newtid); } gcptr rawgetptr(gcptr obj, long index) { - assert(stm_dbgmem_is_active(obj, 1)); assert(gettid(obj) > 421 + index); return ((gcptr *)(obj + 1))[index]; } @@ -147,21 +142,18 @@ void rawsetptr(gcptr obj, long index, gcptr newvalue) { fprintf(stderr, "%p->[%ld] = %p\n", obj, index, newvalue); - assert(stm_dbgmem_is_active(obj, 1)); assert(gettid(obj) > 421 + index); ((gcptr *)(obj + 1))[index] = newvalue; } gcptr getptr(gcptr obj, long index) { - assert(stm_dbgmem_is_active(obj, 1)); obj = stm_read_barrier(obj); return rawgetptr(obj, index); } void setptr(gcptr obj, long index, gcptr newvalue) { - assert(stm_dbgmem_is_active(obj, 1)); obj = stm_write_barrier(obj); fprintf(stderr, "setptr: write_barrier: %p, writing [%ld] = %p\n", obj, index, newvalue); @@ -170,28 +162,24 @@ long rawgetlong(gcptr obj, long index) { - assert(stm_dbgmem_is_active(obj, 1)); assert(stmcb_size(obj) >= sizeof(gcptr *) + (index+1)*sizeof(void *)); return (long)((void **)(obj + 1))[index]; } void rawsetlong(gcptr obj, long index, long newvalue) { - assert(stm_dbgmem_is_active(obj, 1)); assert(stmcb_size(obj) >= sizeof(gcptr *) + (index+1)*sizeof(void *)); ((void **)(obj + 1))[index] = (void *)newvalue; } long getlong(gcptr obj, long index) { - assert(stm_dbgmem_is_active(obj, 1)); obj = stm_read_barrier(obj); return rawgetlong(obj, index); } void setlong(gcptr obj, long index, long newvalue) { - assert(stm_dbgmem_is_active(obj, 1)); obj = stm_write_barrier(obj); rawsetlong(obj, index, newvalue); } @@ -214,18 +202,6 @@ return (void *)thread_descriptor->public_descriptor; } - /*gcptr *addr_of_thread_local(void) - { - return &stm_thread_local_obj; - }*/ - - /*int in_nursery(gcptr obj) - { - assert(stm_dbgmem_is_active(obj, 1)); - struct tx_descriptor *d = thread_descriptor; - return (d->nursery <= (char*)obj && ((char*)obj) < d->nursery_end); - }*/ - void stm_initialize_tests(int max_aborts) { stm_initialize(); @@ -234,7 +210,6 @@ size_t stmcb_size(gcptr obj) { - assert(stm_dbgmem_is_active(obj, 1)); if (gettid(obj) < 421) { /* basic case: tid equals 42 plus the size of the object */ assert(gettid(obj) >= 42 + sizeof(struct stm_object_s)); @@ -250,7 +225,6 @@ void stmcb_trace(gcptr obj, void visit(gcptr *)) { int i; - assert(stm_dbgmem_is_active(obj, 1)); if (gettid(obj) < 421) { /* basic case: no references */ return; @@ -467,24 +441,15 @@ lib.stmgc_minor_collect() def is_stub(p): - assert lib.stm_dbgmem_is_active(p, 1) != 0 return p.h_tid & GCFLAG_STUB def check_not_free(p): - assert lib.stm_dbgmem_is_active(p, 1) == 1 assert 42 < (p.h_tid & 0xFFFF) < 521 def check_prebuilt(p): - assert lib.stm_dbgmem_is_active(p, 1) == -1 assert 42 < (p.h_tid & 0xFFFF) < 521 assert p.h_tid & GCFLAG_PREBUILT_ORIGINAL -def check_free(p): - assert not lib.stm_dbgmem_is_active(p, 0) - -def check_nursery_free(p): - assert not lib.stm_dbgmem_is_active(p, 0) or p.h_tid == 0 - def make_global(p1): assert p1.h_revision == lib.get_local_revision() p1.h_revision = (lib.stm_global_cur_time() | 1) - 2 From noreply at buildbot.pypy.org Thu Jun 13 22:49:25 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 13 Jun 2013 22:49:25 +0200 (CEST) Subject: [pypy-commit] stmgc default: Start copying from c3 portions of code and tests. Message-ID: <20130613204925.68B111C022D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r119:e26c4be262b8 Date: 2013-06-13 22:49 +0200 http://bitbucket.org/pypy/stmgc/changeset/e26c4be262b8/ Log: Start copying from c3 portions of code and tests. diff --git a/c4/et.h b/c4/et.h --- a/c4/et.h +++ b/c4/et.h @@ -105,7 +105,6 @@ * thread shuts down. It is reused the next time a thread starts. */ struct tx_public_descriptor { revision_t collection_lock; - NURSERY_FIELDS_DECL struct tx_descriptor *descriptor; struct stub_block_s *stub_blocks; gcptr stub_free_list; @@ -125,6 +124,8 @@ gcptr *shadowstack; gcptr **shadowstack_end_ref; + NURSERY_FIELDS_DECL + long atomic; /* 0 = not atomic, > 0 atomic */ unsigned long count_reads; unsigned long reads_size_limit; /* see should_break_tr. */ diff --git a/c4/nursery.c b/c4/nursery.c new file mode 100644 --- /dev/null +++ b/c4/nursery.c @@ -0,0 +1,161 @@ +#include "stmimpl.h" + + +static int is_in_nursery(struct tx_descriptor *d, gcptr obj) +{ + return (d->nursery_base <= (char*)obj && ((char*)obj) < d->nursery_end); +} + +/************************************************************/ + +void stm_init_nursery(void) +{ + struct tx_descriptor *d = thread_descriptor; + + assert(d->nursery_base == NULL); + d->nursery_base = stm_malloc(GC_NURSERY); + memset(d->nursery_base, 0, GC_NURSERY); + d->nursery_end = d->nursery_base + GC_NURSERY; + d->nursery_current = d->nursery_base; +} + +static char *collect_and_allocate_size(size_t size); /* forward */ + +gcptr stm_allocate(size_t size, unsigned long tid) +{ + /* XXX inline the fast path */ + struct tx_descriptor *d = thread_descriptor; + char *cur = d->nursery_current; + char *end = cur + size; + d->nursery_current = end; + if (end > d->nursery_end) { + cur = collect_and_allocate_size(size); + } + gcptr P = (gcptr)cur; + assert(tid == (tid & STM_USER_TID_MASK)); + P->h_tid = tid; + P->h_revision = stm_private_rev_num; + return P; +} + +/************************************************************/ + +static void visit_if_young(gcptr *root) +{ + gcptr obj = *root; + gcptr fresh_old_copy; + struct tx_descriptor *d = thread_descriptor; + + if (!is_in_nursery(d, obj)) { + /* not a nursery object */ + } + else { + /* a nursery object */ + fresh_old_copy = stmgc_duplicate(obj); + fresh_old_copy->h_tid |= GCFLAG_OLD; + obj->h_tid |= GCFLAG_NURSERY_MOVED; + obj->h_revision = (revision_t)fresh_old_copy; + *root = fresh_old_copy; + } +} + +static void mark_young_roots(gcptr *root, gcptr *end) +{ + /* XXX use a way to avoid walking all roots again and again */ + for (; root != end; root++) { + visit_if_young(root); + } +} + +static void setup_minor_collect(struct tx_descriptor *d) +{ + spinlock_acquire(d->public_descriptor->collection_lock, 'M'); /*minor*/ + assert(gcptrlist_size(&d->old_objects_to_trace) == 0); + + if (d->public_descriptor->stolen_objects.size != 0) + stm_normalize_stolen_objects(d); +} + +static void teardown_minor_collect(struct tx_descriptor *d) +{ + assert(gcptrlist_size(&d->old_objects_to_trace) == 0); + assert(gcptrlist_size(&d->public_descriptor->stolen_objects) == 0); + + spinlock_release(d->public_descriptor->collection_lock); +} + +static void minor_collect(struct tx_descriptor *d) +{ + fprintf(stderr, "minor collection [%p to %p]\n", + d->nursery_base, d->nursery_end); + + /* acquire the "collection lock" first */ + setup_minor_collect(d); + + mark_young_roots(d->shadowstack, *d->shadowstack_end_ref); + +#if 0 + mark_private_from_protected(d); + + mark_public_to_young(d); + + mark_private_old_pointing_to_young(d); + + visit_all_outside_objects(d); + fix_list_of_read_objects(d); + + /* now all surviving nursery objects have been moved out, and all + surviving young-but-outside-the-nursery objects have been flagged + with GCFLAG_OLD */ + finish_public_to_young(d); + + if (g2l_any_entry(&d->young_objects_outside_nursery)) + free_unvisited_young_objects_outside_nursery(d); +#endif + + teardown_minor_collect(d); + + /* clear the nursery */ + memset(d->nursery_base, 0, GC_NURSERY); + d->nursery_current = d->nursery_base; + + assert(!stmgc_minor_collect_anything_to_do(d)); +} + +void stmgc_minor_collect(void) +{ + struct tx_descriptor *d = thread_descriptor; + assert(d->active >= 1); + minor_collect(d); + AbortNowIfDelayed(); +} + +int stmgc_minor_collect_anything_to_do(struct tx_descriptor *d) +{ + if (d->nursery_current == d->nursery_base /*&& + !g2l_any_entry(&d->young_objects_outside_nursery)*/ ) { + /* there is no young object */ + //assert(gcptrlist_size(&d->private_old_pointing_to_young) == 0); + //assert(gcptrlist_size(&d->public_to_young) == 0); + return 0; + } + else { + /* there are young objects */ + return 1; + } +} + +static char *collect_and_allocate_size(size_t allocate_size) +{ + stmgc_minor_collect(); + //stmgcpage_possibly_major_collect(0); + + struct tx_descriptor *d = thread_descriptor; + assert(d->nursery_current == d->nursery_base); + + //_debug_roots(d->shadowstack, *d->shadowstack_end_ref); + + d->nursery_current = d->nursery_base + allocate_size; + assert(d->nursery_current <= d->nursery_end); /* XXX object too big */ + return d->nursery_base; +} diff --git a/c4/nursery.h b/c4/nursery.h new file mode 100644 --- /dev/null +++ b/c4/nursery.h @@ -0,0 +1,21 @@ +#ifndef _SRCSTM_NURSERY_H +#define _SRCSTM_NURSERY_H + +#ifndef GC_NURSERY +#define GC_NURSERY 4194304 /* 4 MB */ +#endif + + +#define NURSERY_FIELDS_DECL \ + char *nursery_current; \ + char *nursery_end; \ + char *nursery_base; \ + struct GcPtrList old_objects_to_trace; + +struct tx_descriptor; /* from et.h */ + +void stm_init_nursery(void); +void stmgc_minor_collect(void); +int stmgc_minor_collect_anything_to_do(struct tx_descriptor *); + +#endif diff --git a/c4/test/test_nursery.py b/c4/test/test_nursery.py new file mode 100644 --- /dev/null +++ b/c4/test/test_nursery.py @@ -0,0 +1,33 @@ +import py +from support import * + + +def setup_function(f): + lib.stm_clear_between_tests() + lib.stm_initialize_tests(getattr(f, 'max_aborts', 0)) + +def teardown_function(_): + lib.stm_finalize() + + +def test_nursery_alloc(): + for i in range(20): + p = nalloc(HDR) + check_not_free(p) + +def test_stm_roots(): + p1 = nalloc(HDR) + p2 = nalloc(HDR) + p3 = nalloc(HDR) + seen = set() + for i in range(20): + lib.stm_push_root(p1) + lib.stm_push_root(p3) + p = nalloc(HDR) + check_not_free(p) + seen.add(p) + p3 = lib.stm_pop_root() + p1 = lib.stm_pop_root() + check_not_free(p1) + check_not_free(p3) + assert p2 in seen # the pointer location was reused From noreply at buildbot.pypy.org Thu Jun 13 22:54:01 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 13 Jun 2013 22:54:01 +0200 (CEST) Subject: [pypy-commit] stmgc default: stmgc_done_nursery() Message-ID: <20130613205401.A228A1C022D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r120:e1d5588970ba Date: 2013-06-13 22:53 +0200 http://bitbucket.org/pypy/stmgc/changeset/e1d5588970ba/ Log: stmgc_done_nursery() diff --git a/c4/nursery.c b/c4/nursery.c --- a/c4/nursery.c +++ b/c4/nursery.c @@ -8,7 +8,7 @@ /************************************************************/ -void stm_init_nursery(void) +void stmgc_init_nursery(void) { struct tx_descriptor *d = thread_descriptor; @@ -19,6 +19,15 @@ d->nursery_current = d->nursery_base; } +void stmgc_done_nursery(void) +{ + struct tx_descriptor *d = thread_descriptor; + assert(!stmgc_minor_collect_anything_to_do(d)); + stm_free(d->nursery_base, GC_NURSERY); + + gcptrlist_delete(&d->old_objects_to_trace); +} + static char *collect_and_allocate_size(size_t size); /* forward */ gcptr stm_allocate(size_t size, unsigned long tid) diff --git a/c4/nursery.h b/c4/nursery.h --- a/c4/nursery.h +++ b/c4/nursery.h @@ -14,7 +14,8 @@ struct tx_descriptor; /* from et.h */ -void stm_init_nursery(void); +void stmgc_init_nursery(void); +void stmgc_done_nursery(void); void stmgc_minor_collect(void); int stmgc_minor_collect_anything_to_do(struct tx_descriptor *); diff --git a/c4/stmsync.c b/c4/stmsync.c --- a/c4/stmsync.c +++ b/c4/stmsync.c @@ -59,7 +59,7 @@ { int r = DescriptorInit(); assert(r == 1); - stm_init_nursery(); + stmgc_init_nursery(); init_shadowstack(); //stmgcpage_init_tls(); BeginInevitableTransaction(); @@ -67,11 +67,11 @@ void stm_finalize(void) { - //stmgc_minor_collect(); /* force everything out of the nursery */ + stmgc_minor_collect(); /* force everything out of the nursery */ CommitTransaction(); //stmgcpage_done_tls(); done_shadowstack(); - //stmgc_done_tls(); + stmgc_done_nursery(); DescriptorDone(); } From noreply at buildbot.pypy.org Thu Jun 13 23:13:13 2013 From: noreply at buildbot.pypy.org (rguillebert) Date: Thu, 13 Jun 2013 23:13:13 +0200 (CEST) Subject: [pypy-commit] pypy default: Add aliases for the float64 and string dtypes Message-ID: <20130613211313.1A28A1C00B9@cobra.cs.uni-duesseldorf.de> Author: Romain Guillebert Branch: Changeset: r64877:c0ad61f37cbe Date: 2013-06-13 23:11 +0200 http://bitbucket.org/pypy/pypy/changeset/c0ad61f37cbe/ Log: Add aliases for the float64 and string dtypes diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -582,7 +582,7 @@ alternate_constructors=[space.w_float, space.gettypefor(interp_boxes.W_NumberBox), ], - aliases=["float"], + aliases=["float", "double"], ) self.w_complex64dtype = W_ComplexDtype( types.Complex64(), @@ -663,6 +663,7 @@ char='S', w_box_type = space.gettypefor(interp_boxes.W_StringBox), alternate_constructors=[space.w_str], + aliases=["str"], ) self.w_unicodedtype = W_Dtype( types.UnicodeType(1), From noreply at buildbot.pypy.org Thu Jun 13 23:20:47 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 13 Jun 2013 23:20:47 +0200 (CEST) Subject: [pypy-commit] stmgc default: Copy more code, the next test passes Message-ID: <20130613212047.F04EF1C00B9@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r121:17c239320c14 Date: 2013-06-13 23:00 +0200 http://bitbucket.org/pypy/stmgc/changeset/17c239320c14/ Log: Copy more code, the next test passes diff --git a/c4/nursery.c b/c4/nursery.c --- a/c4/nursery.c +++ b/c4/nursery.c @@ -60,11 +60,20 @@ } else { /* a nursery object */ + assert(!(obj->h_tid & GCFLAG_WRITE_BARRIER)); + assert(!(obj->h_tid & GCFLAG_OLD)); + assert(!(obj->h_tid & GCFLAG_PREBUILT_ORIGINAL)); + + /* make a copy of it outside */ fresh_old_copy = stmgc_duplicate(obj); - fresh_old_copy->h_tid |= GCFLAG_OLD; obj->h_tid |= GCFLAG_NURSERY_MOVED; obj->h_revision = (revision_t)fresh_old_copy; + + /* fix the original reference */ *root = fresh_old_copy; + + /* add 'fresh_old_copy' to the list of objects to trace */ + gcptrlist_insert(&d->old_objects_to_trace, fresh_old_copy); } } @@ -76,6 +85,19 @@ } } +static void visit_all_outside_objects(struct tx_descriptor *d) +{ + while (gcptrlist_size(&d->old_objects_to_trace) > 0) { + gcptr obj = gcptrlist_pop(&d->old_objects_to_trace); + + assert(!(obj->h_tid & GCFLAG_OLD)); + assert(!(obj->h_tid & GCFLAG_WRITE_BARRIER)); + obj->h_tid |= GCFLAG_OLD | GCFLAG_WRITE_BARRIER; + + stmcb_trace(obj, &visit_if_young); + } +} + static void setup_minor_collect(struct tx_descriptor *d) { spinlock_acquire(d->public_descriptor->collection_lock, 'M'); /*minor*/ @@ -109,8 +131,10 @@ mark_public_to_young(d); mark_private_old_pointing_to_young(d); +#endif visit_all_outside_objects(d); +#if 0 fix_list_of_read_objects(d); /* now all surviving nursery objects have been moved out, and all diff --git a/c4/test/support.py b/c4/test/support.py --- a/c4/test/support.py +++ b/c4/test/support.py @@ -61,7 +61,7 @@ revision_t stm_global_cur_time(void); //void stmgcpage_add_prebuilt_root(gcptr); void stm_clear_between_tests(void); - //void stmgc_minor_collect(void); + void stmgc_minor_collect(void); gcptr _stm_nonrecord_barrier(gcptr); int _stm_is_private(gcptr); void stm_start_sharedlock(void); @@ -446,6 +446,9 @@ def check_not_free(p): assert 42 < (p.h_tid & 0xFFFF) < 521 +def check_nursery_free(p): + assert p.h_tid == p.h_revision == 0 + def check_prebuilt(p): assert 42 < (p.h_tid & 0xFFFF) < 521 assert p.h_tid & GCFLAG_PREBUILT_ORIGINAL diff --git a/c4/test/test_nursery.py b/c4/test/test_nursery.py --- a/c4/test/test_nursery.py +++ b/c4/test/test_nursery.py @@ -31,3 +31,15 @@ check_not_free(p1) check_not_free(p3) assert p2 in seen # the pointer location was reused + +def test_nursery_follows(): + p1 = nalloc_refs(1) + p2 = nalloc_refs(1) + rawsetptr(p1, 0, p2) + lib.stm_push_root(p1) + minor_collect() + check_nursery_free(p1) + check_nursery_free(p2) + p1b = lib.stm_pop_root() + p2b = rawgetptr(p1b, 0) + assert rawgetptr(p2b, 0) == ffi.NULL From noreply at buildbot.pypy.org Thu Jun 13 23:20:49 2013 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 13 Jun 2013 23:20:49 +0200 (CEST) Subject: [pypy-commit] stmgc default: Next test Message-ID: <20130613212049.218C41C00B9@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r122:03a8ca843df4 Date: 2013-06-13 23:20 +0200 http://bitbucket.org/pypy/stmgc/changeset/03a8ca843df4/ Log: Next test diff --git a/c4/dbgmem.c b/c4/dbgmem.c --- a/c4/dbgmem.c +++ b/c4/dbgmem.c @@ -9,7 +9,8 @@ #define MMAP_TOTAL 671088640 /* 640MB */ static pthread_mutex_t malloc_mutex = PTHREAD_MUTEX_INITIALIZER; -static char *zone_current = NULL, *zone_end = NULL; +static char *zone_start, *zone_current = NULL, *zone_end = NULL; +static signed char accessible_pages[MMAP_TOTAL / PAGE_SIZE] = {0}; static void _stm_dbgmem(void *p, size_t sz, int prot) @@ -30,15 +31,16 @@ pthread_mutex_lock(&malloc_mutex); if (zone_current == NULL) { - zone_current = mmap(NULL, MMAP_TOTAL, PROT_NONE, - MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); - if (zone_current == NULL || zone_current == MAP_FAILED) { + zone_start = mmap(NULL, MMAP_TOTAL, PROT_NONE, + MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); + if (zone_start == NULL || zone_start == MAP_FAILED) { fprintf(stderr, "not enough memory: mmap() failed\n"); abort(); } - zone_end = zone_current + MMAP_TOTAL; + zone_current = zone_start; + zone_end = zone_start + MMAP_TOTAL; assert((MMAP_TOTAL % PAGE_SIZE) == 0); - _stm_dbgmem(zone_current, MMAP_TOTAL, PROT_NONE); + _stm_dbgmem(zone_start, MMAP_TOTAL, PROT_NONE); } size_t nb_pages = (sz + PAGE_SIZE - 1) / PAGE_SIZE + 1; @@ -54,14 +56,33 @@ result += (-sz) & (PAGE_SIZE-1); assert(((intptr_t)(result + sz) & (PAGE_SIZE-1)) == 0); _stm_dbgmem(result, sz, PROT_READ | PROT_WRITE); + + long i, base = (result - zone_start) / PAGE_SIZE; + for (i = 0; i < nb_pages; i++) + accessible_pages[base + i] = 42; + return result; } void stm_free(void *p, size_t sz) { + size_t nb_pages = (sz + PAGE_SIZE - 1) / PAGE_SIZE + 1; + long i, base = ((char *)p - zone_start) / PAGE_SIZE; + assert(0 <= base && base < (MMAP_TOTAL / PAGE_SIZE)); + for (i = 0; i < nb_pages; i++) { + assert(accessible_pages[base + i] == 42); + accessible_pages[base + i] = -1; + } memset(p, 0xDD, sz); _stm_dbgmem(p, sz, PROT_NONE); } +int _stm_can_access_memory(char *p) +{ + long base = ((char *)p - zone_start) / PAGE_SIZE; + assert(0 <= base && base < (MMAP_TOTAL / PAGE_SIZE)); + return accessible_pages[base] == 42; +} + /************************************************************/ #endif diff --git a/c4/dbgmem.h b/c4/dbgmem.h --- a/c4/dbgmem.h +++ b/c4/dbgmem.h @@ -6,6 +6,7 @@ void *stm_malloc(size_t); void stm_free(void *, size_t); +int _stm_can_access_memory(char *); #else diff --git a/c4/test/support.py b/c4/test/support.py --- a/c4/test/support.py +++ b/c4/test/support.py @@ -87,8 +87,7 @@ revision_t get_start_time(void); void *my_stub_thread(void); - //gcptr *addr_of_thread_local(void); - //int in_nursery(gcptr); + int _stm_can_access_memory(char *); void stm_initialize_tests(int max_aborts); /* some constants normally private that are useful in the tests */ @@ -449,6 +448,9 @@ def check_nursery_free(p): assert p.h_tid == p.h_revision == 0 +def check_inaccessible(p): + assert not lib._stm_can_access_memory(p) + def check_prebuilt(p): assert 42 < (p.h_tid & 0xFFFF) < 521 assert p.h_tid & GCFLAG_PREBUILT_ORIGINAL diff --git a/c4/test/test_nursery.py b/c4/test/test_nursery.py --- a/c4/test/test_nursery.py +++ b/c4/test/test_nursery.py @@ -43,3 +43,9 @@ p1b = lib.stm_pop_root() p2b = rawgetptr(p1b, 0) assert rawgetptr(p2b, 0) == ffi.NULL + +def test_free_nursery_at_thread_end(): + p1 = nalloc(HDR) + lib.stm_finalize() + check_inaccessible(p1) + lib.stm_initialize_tests(0) From noreply at buildbot.pypy.org Thu Jun 13 23:48:17 2013 From: noreply at buildbot.pypy.org (amauryfa) Date: Thu, 13 Jun 2013 23:48:17 +0200 (CEST) Subject: [pypy-commit] pypy default: Implement dtype.str. Try hard to do the same as cNumpy. Message-ID: <20130613214817.9C3AB1C022D@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: Changeset: r64878:96ef4e26e420 Date: 2013-06-13 23:47 +0200 http://bitbucket.org/pypy/pypy/changeset/96ef4e26e420/ Log: Implement dtype.str. Try hard to do the same as cNumpy. diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -115,6 +115,21 @@ return space.wrap('=') return space.wrap(nonnative_byteorder_prefix) + def descr_get_str(self, space): + size = self.get_size() + basic = self.kind + if basic == UNICODELTR: + size >>= 2 + endian = byteorder_prefix + elif size <= 1: + endian = '|' # ignore + elif self.native: + endian = byteorder_prefix + else: + endian = nonnative_byteorder_prefix + + return space.wrap("%s%s%s" % (endian, basic, size)) + def descr_get_alignment(self, space): return space.wrap(self.itemtype.alignment) @@ -421,6 +436,7 @@ char = interp_attrproperty("char", cls=W_Dtype), type = interp_attrproperty_w("w_box_type", cls=W_Dtype), byteorder = GetSetProperty(W_Dtype.descr_get_byteorder), + str = GetSetProperty(W_Dtype.descr_get_str), itemsize = GetSetProperty(W_Dtype.descr_get_itemsize), alignment = GetSetProperty(W_Dtype.descr_get_alignment), shape = GetSetProperty(W_Dtype.descr_get_shape), @@ -666,7 +682,7 @@ aliases=["str"], ) self.w_unicodedtype = W_Dtype( - types.UnicodeType(1), + types.UnicodeType(0), num=19, kind=UNICODELTR, name='unicode', diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -683,6 +683,20 @@ assert dtype('=i8').byteorder == '=' assert dtype(byteorder + 'i8').byteorder == '=' + def test_dtype_str(self): + from numpypy import dtype + byteorder = self.native_prefix + assert dtype('i8').str == byteorder + 'i8' + assert dtype('i8').str == '>i8' + assert dtype('int8').str == '|i1' + assert dtype('float').str == byteorder + 'f8' + # strange + assert dtype('string').str == '|S0' + assert dtype('unicode').str == byteorder + 'U0' + # assert dtype(('string', 7)).str == '|S7' + # assert dtype(('unicode', 7)).str == ' Author: Amaury Forgeot d'Arc Branch: Changeset: r64879:2ebf68f7c7be Date: 2013-06-14 00:12 +0200 http://bitbucket.org/pypy/pypy/changeset/2ebf68f7c7be/ Log: Fill more values in array.__array_interface__ diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -418,8 +418,16 @@ addr = self.implementation.get_storage_as_int(space) # will explode if it can't w_d = space.newdict() - space.setitem_str(w_d, 'data', space.newtuple([space.wrap(addr), - space.w_False])) + space.setitem_str(w_d, 'data', + space.newtuple([space.wrap(addr), space.w_False])) + space.setitem_str(w_d, 'shape', self.descr_get_shape(space)) + space.setitem_str(w_d, 'typestr', self.get_dtype().descr_get_str(space)) + if self.implementation.order == 'C': + # Array is contiguous, no strides in the interface. + strides = space.w_None + else: + strides = self.descr_get_strides(space) + space.setitem_str(w_d, 'strides', strides) return w_d w_pypy_data = None diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -2138,6 +2138,9 @@ a = array([1, 2, 3]) i = a.__array_interface__ assert isinstance(i['data'][0], int) + assert i['shape'] == (3,) + assert i['strides'] == None # Because array is in C order + assert i['typestr'] == a.dtype.str a = a[::2] i = a.__array_interface__ assert isinstance(i['data'][0], int) From noreply at buildbot.pypy.org Fri Jun 14 14:58:25 2013 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 14 Jun 2013 14:58:25 +0200 (CEST) Subject: [pypy-commit] pypy argsort-segfault: fix malloc, cleanup Message-ID: <20130614125825.6FB421C13B0@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: argsort-segfault Changeset: r64880:938d68eb8a73 Date: 2013-06-14 15:57 +0300 http://bitbucket.org/pypy/pypy/changeset/938d68eb8a73/ Log: fix malloc, cleanup diff --git a/pypy/module/micronumpy/arrayimpl/sort.py b/pypy/module/micronumpy/arrayimpl/sort.py --- a/pypy/module/micronumpy/arrayimpl/sort.py +++ b/pypy/module/micronumpy/arrayimpl/sort.py @@ -33,11 +33,8 @@ self.indexes = indexes def __del__(self): - print 'Repr.del',self.values def getitem(self, item): - #print 'getting',item,'of',self.size,self.values - #print 'from',item*self.stride_size + self.start,'to',item*(self.stride_size+1) + self.start if count < 2: v = raw_storage_getitem(TP, self.values, item * self.stride_size + self.start) @@ -79,7 +76,7 @@ indexes = dtype.itemtype.malloc(size*dtype.get_size()) values = alloc_raw_storage(size * stride_size, track_allocation=False) - Repr.__init__(self, index_stride_size, stride_size, + Repr.__init__(self, dtype.get_size(), stride_size, size, values, indexes, start, start) def __del__(self): @@ -96,12 +93,10 @@ return lst.size def arg_getitem_slice(lst, start, stop): - print 'arg_getitem_slice',lst.values retval = ArgArrayRepWithStorage(lst.index_stride_size, lst.stride_size, stop-start) for i in range(stop-start): retval.setitem(i, lst.getitem(i+start)) - print 'arg_getitem_slice done',lst return retval if count < 2: @@ -155,16 +150,13 @@ stride_size = arr.strides[axis] index_stride_size = index_impl.strides[axis] axis_size = arr.shape[axis] - print '5' while not iter.done(): for i in range(axis_size): raw_storage_setitem(storage, i * index_stride_size + index_iter.offset, i) r = Repr(index_stride_size, stride_size, axis_size, arr.get_storage(), storage, index_iter.offset, iter.offset) - print '6' ArgSort(r).sort() - print '7' iter.next() index_iter.next() return index_arr From noreply at buildbot.pypy.org Fri Jun 14 15:08:45 2013 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 14 Jun 2013 15:08:45 +0200 (CEST) Subject: [pypy-commit] pypy argsort-segfault: whoops Message-ID: <20130614130845.B193A1C13B0@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: argsort-segfault Changeset: r64881:65ca8e4122fb Date: 2013-06-14 16:07 +0300 http://bitbucket.org/pypy/pypy/changeset/65ca8e4122fb/ Log: whoops diff --git a/pypy/module/micronumpy/arrayimpl/sort.py b/pypy/module/micronumpy/arrayimpl/sort.py --- a/pypy/module/micronumpy/arrayimpl/sort.py +++ b/pypy/module/micronumpy/arrayimpl/sort.py @@ -32,8 +32,6 @@ self.values = values self.indexes = indexes - def __del__(self): - def getitem(self, item): if count < 2: v = raw_storage_getitem(TP, self.values, item * self.stride_size From noreply at buildbot.pypy.org Fri Jun 14 15:18:10 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 14 Jun 2013 15:18:10 +0200 (CEST) Subject: [pypy-commit] stmgc default: Next test Message-ID: <20130614131810.3E1B61C3354@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r123:171f8b47b009 Date: 2013-06-14 15:18 +0200 http://bitbucket.org/pypy/stmgc/changeset/171f8b47b009/ Log: Next test diff --git a/c4/et.c b/c4/et.c --- a/c4/et.c +++ b/c4/et.c @@ -391,14 +391,6 @@ #endif } -gcptr stmgc_duplicate(gcptr P) -{ - size_t size = stmcb_size(P); - gcptr L = stm_malloc(size); - memcpy(L, P, size); - return L; -} - static gcptr LocalizeProtected(struct tx_descriptor *d, gcptr P) { gcptr B; @@ -447,6 +439,7 @@ 0); L->h_revision = stm_private_rev_num; g2l_insert(&d->public_to_private, R, L); + gcptrlist_insert(&d->public_to_young, R); fprintf(stderr, "write_barrier: adding %p -> %p to public_to_private\n", R, L); diff --git a/c4/et.h b/c4/et.h --- a/c4/et.h +++ b/c4/et.h @@ -172,7 +172,6 @@ gcptr stm_get_private_from_protected(long); /* debugging */ gcptr stm_get_read_obj(long); /* debugging */ void stm_clear_read_cache(void); /* debugging */ -gcptr stmgc_duplicate(gcptr); int DescriptorInit(void); void DescriptorDone(void); diff --git a/c4/nursery.c b/c4/nursery.c --- a/c4/nursery.c +++ b/c4/nursery.c @@ -26,13 +26,13 @@ stm_free(d->nursery_base, GC_NURSERY); gcptrlist_delete(&d->old_objects_to_trace); + gcptrlist_delete(&d->public_to_young); } static char *collect_and_allocate_size(size_t size); /* forward */ -gcptr stm_allocate(size_t size, unsigned long tid) +inline static char *allocate_nursery(size_t size) { - /* XXX inline the fast path */ struct tx_descriptor *d = thread_descriptor; char *cur = d->nursery_current; char *end = cur + size; @@ -40,15 +40,47 @@ if (end > d->nursery_end) { cur = collect_and_allocate_size(size); } - gcptr P = (gcptr)cur; + return cur; +} + +gcptr stm_allocate(size_t size, unsigned long tid) +{ + /* XXX inline the fast path */ + gcptr P = (gcptr)allocate_nursery(size); assert(tid == (tid & STM_USER_TID_MASK)); P->h_tid = tid; P->h_revision = stm_private_rev_num; return P; } +gcptr stmgc_duplicate(gcptr P) +{ + size_t size = stmcb_size(P); + gcptr L = (gcptr)allocate_nursery(size); + memcpy(L, P, size); + L->h_tid &= ~GCFLAG_OLD; + return L; +} + /************************************************************/ +static inline gcptr create_old_object_copy(gcptr obj) +{ + assert(!(obj->h_tid & GCFLAG_NURSERY_MOVED)); + assert(!(obj->h_tid & GCFLAG_VISITED)); + assert(!(obj->h_tid & GCFLAG_WRITE_BARRIER)); + assert(!(obj->h_tid & GCFLAG_PREBUILT_ORIGINAL)); + assert(!(obj->h_tid & GCFLAG_OLD)); + + size_t size = stmcb_size(obj); + gcptr fresh_old_copy = stm_malloc(size); + memcpy(fresh_old_copy, obj, size); + fresh_old_copy->h_tid |= GCFLAG_OLD; + + fprintf(stderr, "minor: %p is copied to %p\n", obj, fresh_old_copy); + return fresh_old_copy; +} + static void visit_if_young(gcptr *root) { gcptr obj = *root; @@ -60,12 +92,8 @@ } else { /* a nursery object */ - assert(!(obj->h_tid & GCFLAG_WRITE_BARRIER)); - assert(!(obj->h_tid & GCFLAG_OLD)); - assert(!(obj->h_tid & GCFLAG_PREBUILT_ORIGINAL)); - /* make a copy of it outside */ - fresh_old_copy = stmgc_duplicate(obj); + fresh_old_copy = create_old_object_copy(obj); obj->h_tid |= GCFLAG_NURSERY_MOVED; obj->h_revision = (revision_t)fresh_old_copy; @@ -77,22 +105,40 @@ } } -static void mark_young_roots(gcptr *root, gcptr *end) +static void mark_young_roots(struct tx_descriptor *d) { + gcptr *root = d->shadowstack; + gcptr *end = *d->shadowstack_end_ref; + /* XXX use a way to avoid walking all roots again and again */ for (; root != end; root++) { visit_if_young(root); } } +static void mark_public_to_young(struct tx_descriptor *d) +{ + long i, size = d->public_to_young.size; + gcptr *items = d->public_to_young.items; + + for (i = 0; i < size; i++) { + gcptr P = items[i]; + wlog_t *item; + + G2L_FIND(d->public_to_private, P, item, continue); + visit_if_young(&item->val); + } + gcptrlist_clear(&d->public_to_young); +} + static void visit_all_outside_objects(struct tx_descriptor *d) { while (gcptrlist_size(&d->old_objects_to_trace) > 0) { gcptr obj = gcptrlist_pop(&d->old_objects_to_trace); - assert(!(obj->h_tid & GCFLAG_OLD)); + assert(obj->h_tid & GCFLAG_OLD); assert(!(obj->h_tid & GCFLAG_WRITE_BARRIER)); - obj->h_tid |= GCFLAG_OLD | GCFLAG_WRITE_BARRIER; + obj->h_tid |= GCFLAG_WRITE_BARRIER; stmcb_trace(obj, &visit_if_young); } @@ -123,13 +169,15 @@ /* acquire the "collection lock" first */ setup_minor_collect(d); - mark_young_roots(d->shadowstack, *d->shadowstack_end_ref); + mark_young_roots(d); #if 0 mark_private_from_protected(d); +#endif mark_public_to_young(d); +#if 0 mark_private_old_pointing_to_young(d); #endif diff --git a/c4/nursery.h b/c4/nursery.h --- a/c4/nursery.h +++ b/c4/nursery.h @@ -10,7 +10,8 @@ char *nursery_current; \ char *nursery_end; \ char *nursery_base; \ - struct GcPtrList old_objects_to_trace; + struct GcPtrList old_objects_to_trace; \ + struct GcPtrList public_to_young; struct tx_descriptor; /* from et.h */ @@ -18,5 +19,6 @@ void stmgc_done_nursery(void); void stmgc_minor_collect(void); int stmgc_minor_collect_anything_to_do(struct tx_descriptor *); +gcptr stmgc_duplicate(gcptr); #endif diff --git a/c4/test/support.py b/c4/test/support.py --- a/c4/test/support.py +++ b/c4/test/support.py @@ -71,6 +71,7 @@ gcptr stm_get_read_obj(long index); void *STUB_THREAD(gcptr); void stm_clear_read_cache(void); + int in_nursery(gcptr); gcptr getptr(gcptr, long); void setptr(gcptr, long, gcptr); @@ -183,6 +184,13 @@ rawsetlong(obj, index, newvalue); } + int in_nursery(gcptr obj) + { + struct tx_descriptor *d = thread_descriptor; + return (d->nursery_base <= (char*)obj && + ((char*)obj) < d->nursery_end); + } + gcptr pseudoprebuilt(size_t size, int tid) { gcptr x = calloc(1, size); diff --git a/c4/test/test_nursery.py b/c4/test/test_nursery.py --- a/c4/test/test_nursery.py +++ b/c4/test/test_nursery.py @@ -49,3 +49,25 @@ lib.stm_finalize() check_inaccessible(p1) lib.stm_initialize_tests(0) + +def test_local_copy_out_of_nursery(): + p1 = palloc(HDR + WORD) + lib.rawsetlong(p1, 0, 420063) + assert not lib.in_nursery(p1) + assert p1.h_revision != lib.get_private_rev_num() + # + p2 = lib.stm_write_barrier(p1) + assert lib.rawgetlong(p2, 0) == 420063 + lib.rawsetlong(p2, 0, -91467) + assert lib.in_nursery(p2) + assert p2.h_revision == lib.get_private_rev_num() + # + lib.stm_push_root(p1) + minor_collect() + p1b = lib.stm_pop_root() + assert p1b == p1 + assert lib.rawgetlong(p1b, 0) == 420063 + # + p3 = lib.stm_read_barrier(p1) + assert not lib.in_nursery(p3) and p3 != p2 + assert lib.rawgetlong(p3, 0) == -91467 From noreply at buildbot.pypy.org Fri Jun 14 15:19:22 2013 From: noreply at buildbot.pypy.org (amauryfa) Date: Fri, 14 Jun 2013 15:19:22 +0200 (CEST) Subject: [pypy-commit] benchmarks default: Add support for an opcode recently introduced in pypy. Message-ID: <20130614131922.7C6E61C3354@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: Changeset: r214:92c5c74ce34c Date: 2013-06-14 15:19 +0200 http://bitbucket.org/pypy/benchmarks/changeset/92c5c74ce34c/ Log: Add support for an opcode recently introduced in pypy. Necessary when translating with such a recent interpreter. diff --git a/lib/pypy/rpython/flowspace/flowcontext.py b/lib/pypy/rpython/flowspace/flowcontext.py --- a/lib/pypy/rpython/flowspace/flowcontext.py +++ b/lib/pypy/rpython/flowspace/flowcontext.py @@ -800,6 +800,9 @@ self.popvalue() return next_instr + def JUMP_IF_NOT_DEBUG(self, target, next_instr): + return next_instr + def GET_ITER(self, oparg, next_instr): w_iterable = self.popvalue() w_iterator = self.space.iter(w_iterable) From noreply at buildbot.pypy.org Fri Jun 14 15:26:33 2013 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 14 Jun 2013 15:26:33 +0200 (CEST) Subject: [pypy-commit] pypy argsort-segfault: merge default into branch Message-ID: <20130614132633.095E21C3354@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: argsort-segfault Changeset: r64882:2312717ee8a5 Date: 2013-06-14 16:21 +0300 http://bitbucket.org/pypy/pypy/changeset/2312717ee8a5/ Log: merge default into branch diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -3,3 +3,6 @@ d8ac7d23d3ec5f9a0fa1264972f74a010dbfd07f release-1.6 ff4af8f318821f7f5ca998613a60fca09aa137da release-1.7 07e08e9c885ca67d89bcc304e45a32346daea2fa release-2.0-beta-1 +9b623bc48b5950cf07184462a0e48f2c4df0d720 pypy-2.1-beta1-arm +9b623bc48b5950cf07184462a0e48f2c4df0d720 pypy-2.1-beta1-arm +ab0dd631c22015ed88e583d9fdd4c43eebf0be21 pypy-2.1-beta1-arm diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -218,6 +218,7 @@ Impara, Germany Change Maker, Sweden University of California Berkeley, USA + Google Inc. The PyPy Logo as used by http://speed.pypy.org and others was created by Samuel Reis and is distributed on terms of Creative Commons Share Alike diff --git a/lib_pypy/_curses.py b/lib_pypy/_curses.py --- a/lib_pypy/_curses.py +++ b/lib_pypy/_curses.py @@ -476,6 +476,15 @@ def _chtype(ch): return int(ffi.cast("chtype", ch)) +def _texttype(text): + if isinstance(text, str): + return text + elif isinstance(text, unicode): + return str(text) # default encoding + else: + raise TypeError("str or unicode expected, got a '%s' object" + % (type(text).__name__,)) + def _extract_yx(args): if len(args) >= 2: @@ -589,6 +598,7 @@ @_argspec(1, 1, 2) def addstr(self, y, x, text, attr=None): + text = _texttype(text) if attr is not None: attr_old = lib.getattrs(self._win) lib.wattrset(self._win, attr) @@ -602,6 +612,7 @@ @_argspec(2, 1, 2) def addnstr(self, y, x, text, n, attr=None): + text = _texttype(text) if attr is not None: attr_old = lib.getattrs(self._win) lib.wattrset(self._win, attr) @@ -780,6 +791,7 @@ @_argspec(1, 1, 2) def insstr(self, y, x, text, attr=None): + text = _texttype(text) if attr is not None: attr_old = lib.getattrs(self._win) lib.wattrset(self._win, attr) @@ -793,6 +805,7 @@ @_argspec(2, 1, 2) def insnstr(self, y, x, text, n, attr=None): + text = _texttype(text) if attr is not None: attr_old = lib.getattrs(self._win) lib.wattrset(self._win, attr) @@ -1197,6 +1210,7 @@ def putp(text): + text = _texttype(text) return _check_ERR(lib.putp(text), "putp") diff --git a/lib_pypy/_tkinter/__init__.py b/lib_pypy/_tkinter/__init__.py new file mode 100644 --- /dev/null +++ b/lib_pypy/_tkinter/__init__.py @@ -0,0 +1,43 @@ +# _tkinter package -- low-level interface to libtk and libtcl. +# +# This is an internal module, applications should "import Tkinter" instead. +# +# This version is based on cffi, and is a translation of _tkinter.c +# from CPython, version 2.7.4. + +class TclError(Exception): + pass + +from .tklib import tklib, tkffi +from .app import TkApp + +TK_VERSION = tkffi.string(tklib.get_tk_version()) +TCL_VERSION = tkffi.string(tklib.get_tcl_version()) + +READABLE = tklib.TCL_READABLE +WRITABLE = tklib.TCL_WRITABLE +EXCEPTION = tklib.TCL_EXCEPTION + +def create(screenName=None, baseName=None, className=None, + interactive=False, wantobjects=False, wantTk=True, + sync=False, use=None): + return TkApp(screenName, baseName, className, + interactive, wantobjects, wantTk, sync, use) + +def _flatten(item): + def _flatten1(output, item, depth): + if depth > 1000: + raise ValueError("nesting too deep in _flatten") + if not isinstance(item, (list, tuple)): + raise TypeError("argument must be sequence") + # copy items to output tuple + for o in item: + if isinstance(o, (list, tuple)): + _flatten1(output, o, depth + 1) + elif o is not None: + output.append(o) + + result = [] + _flatten1(result, item, 0) + return tuple(result) + diff --git a/lib_pypy/_tkinter/app.py b/lib_pypy/_tkinter/app.py new file mode 100644 --- /dev/null +++ b/lib_pypy/_tkinter/app.py @@ -0,0 +1,389 @@ +# The TkApp class. + +from .tklib import tklib, tkffi +from . import TclError +from .tclobj import TclObject, FromObj, AsObj, TypeCache + +import sys + +def varname_converter(input): + if isinstance(input, TclObject): + return input.string + return input + + +def Tcl_AppInit(app): + if tklib.Tcl_Init(app.interp) == tklib.TCL_ERROR: + app.raiseTclError() + skip_tk_init = tklib.Tcl_GetVar( + app.interp, "_tkinter_skip_tk_init", tklib.TCL_GLOBAL_ONLY) + if skip_tk_init and tkffi.string(skip_tk_init) == "1": + return + + if tklib.Tk_Init(app.interp) == tklib.TCL_ERROR: + app.raiseTclError() + +class _CommandData(object): + def __new__(cls, app, name, func): + self = object.__new__(cls) + self.app = app + self.name = name + self.func = func + handle = tkffi.new_handle(self) + app._commands[name] = handle # To keep the command alive + return tkffi.cast("ClientData", handle) + + @tkffi.callback("Tcl_CmdProc") + def PythonCmd(clientData, interp, argc, argv): + self = tkffi.from_handle(clientData) + assert self.app.interp == interp + try: + args = [tkffi.string(arg) for arg in argv[1:argc]] + result = self.func(*args) + obj = AsObj(result) + tklib.Tcl_SetObjResult(interp, obj) + except: + self.app.errorInCmd = True + self.app.exc_info = sys.exc_info() + return tklib.TCL_ERROR + else: + return tklib.TCL_OK + + @tkffi.callback("Tcl_CmdDeleteProc") + def PythonCmdDelete(clientData): + self = tkffi.from_handle(clientData) + app = self.app + del app._commands[self.name] + return + + +class TkApp(object): + def __new__(cls, screenName, baseName, className, + interactive, wantobjects, wantTk, sync, use): + if not wantobjects: + raise NotImplementedError("wantobjects=True only") + self = object.__new__(cls) + self.interp = tklib.Tcl_CreateInterp() + self._wantobjects = wantobjects + self.threaded = bool(tklib.Tcl_GetVar2Ex( + self.interp, "tcl_platform", "threaded", + tklib.TCL_GLOBAL_ONLY)) + self.thread_id = tklib.Tcl_GetCurrentThread() + self.dispatching = False + self.quitMainLoop = False + self.errorInCmd = False + + self._typeCache = TypeCache() + self._commands = {} + + # Delete the 'exit' command, which can screw things up + tklib.Tcl_DeleteCommand(self.interp, "exit") + + if screenName is not None: + tklib.Tcl_SetVar2(self.interp, "env", "DISPLAY", screenName, + tklib.TCL_GLOBAL_ONLY) + + if interactive: + tklib.Tcl_SetVar(self.interp, "tcl_interactive", "1", + tklib.TCL_GLOBAL_ONLY) + else: + tklib.Tcl_SetVar(self.interp, "tcl_interactive", "0", + tklib.TCL_GLOBAL_ONLY) + + # This is used to get the application class for Tk 4.1 and up + argv0 = className.lower() + tklib.Tcl_SetVar(self.interp, "argv0", argv0, + tklib.TCL_GLOBAL_ONLY) + + if not wantTk: + tklib.Tcl_SetVar(self.interp, "_tkinter_skip_tk_init", "1", + tklib.TCL_GLOBAL_ONLY) + + # some initial arguments need to be in argv + if sync or use: + args = "" + if sync: + args += "-sync" + if use: + if sync: + args += " " + args += "-use " + use + + tklib.Tcl_SetVar(self.interp, "argv", args, + tklib.TCL_GLOBAL_ONLY) + + Tcl_AppInit(self) + # EnableEventHook() + return self + + def __del__(self): + tklib.Tcl_DeleteInterp(self.interp) + # DisableEventHook() + + def raiseTclError(self): + if self.errorInCmd: + self.errorInCmd = False + raise self.exc_info[0], self.exc_info[1], self.exc_info[2] + raise TclError(tkffi.string(tklib.Tcl_GetStringResult(self.interp))) + + def wantobjects(self): + return self._wantobjects + + def _check_tcl_appartment(self): + if self.threaded and self.thread_id != tklib.Tcl_GetCurrentThread(): + raise RuntimeError("Calling Tcl from different appartment") + + def loadtk(self): + # We want to guard against calling Tk_Init() multiple times + err = tklib.Tcl_Eval(self.interp, "info exists tk_version") + if err == tklib.TCL_ERROR: + self.raiseTclError() + tk_exists = tklib.Tcl_GetStringResult(self.interp) + if not tk_exists or tkffi.string(tk_exists) != "1": + err = tklib.Tk_Init(self.interp) + if err == tklib.TCL_ERROR: + self.raiseTclError() + + def _var_invoke(self, func, *args, **kwargs): + if self.threaded and self.thread_id != tklib.Tcl_GetCurrentThread(): + # The current thread is not the interpreter thread. + # Marshal the call to the interpreter thread, then wait + # for completion. + raise NotImplementedError("Call from another thread") + return func(*args, **kwargs) + + def _getvar(self, name1, name2=None, global_only=False): + name1 = varname_converter(name1) + if not name2: + name2 = tkffi.NULL + flags=tklib.TCL_LEAVE_ERR_MSG + if global_only: + flags |= tklib.TCL_GLOBAL_ONLY + res = tklib.Tcl_GetVar2Ex(self.interp, name1, name2, flags) + if not res: + self.raiseTclError() + assert self._wantobjects + return FromObj(self, res) + + def _setvar(self, name1, value, global_only=False): + name1 = varname_converter(name1) + newval = AsObj(value) + flags=tklib.TCL_LEAVE_ERR_MSG + if global_only: + flags |= tklib.TCL_GLOBAL_ONLY + res = tklib.Tcl_SetVar2Ex(self.interp, name1, tkffi.NULL, + newval, flags) + if not res: + self.raiseTclError() + + def _unsetvar(self, name1, name2=None, global_only=False): + name1 = varname_converter(name1) + if not name2: + name2 = tkffi.NULL + flags=tklib.TCL_LEAVE_ERR_MSG + if global_only: + flags |= tklib.TCL_GLOBAL_ONLY + res = tklib.Tcl_UnsetVar2(self.interp, name1, name2, flags) + if res == tklib.TCL_ERROR: + self.raiseTclError() + + def getvar(self, name1, name2=None): + return self._var_invoke(self._getvar, name1, name2) + + def globalgetvar(self, name1, name2=None): + return self._var_invoke(self._getvar, name1, name2, global_only=True) + + def setvar(self, name1, value): + return self._var_invoke(self._setvar, name1, value) + + def globalsetvar(self, name1, value): + return self._var_invoke(self._setvar, name1, value, global_only=True) + + def unsetvar(self, name1, name2=None): + return self._var_invoke(self._unsetvar, name1, name2) + + def globalunsetvar(self, name1, name2=None): + return self._var_invoke(self._unsetvar, name1, name2, global_only=True) + + # COMMANDS + + def createcommand(self, cmdName, func): + if not callable(func): + raise TypeError("command not callable") + + if self.threaded and self.thread_id != tklib.Tcl_GetCurrentThread(): + raise NotImplementedError("Call from another thread") + + clientData = _CommandData(self, cmdName, func) + + if self.threaded and self.thread_id != tklib.Tcl_GetCurrentThread(): + raise NotImplementedError("Call from another thread") + + res = tklib.Tcl_CreateCommand( + self.interp, cmdName, _CommandData.PythonCmd, + clientData, _CommandData.PythonCmdDelete) + if not res: + raise TclError("can't create Tcl command") + + def deletecommand(self, cmdName): + if self.threaded and self.thread_id != tklib.Tcl_GetCurrentThread(): + raise NotImplementedError("Call from another thread") + + res = tklib.Tcl_DeleteCommand(self.interp, cmdName) + if res == -1: + raise TclError("can't delete Tcl command") + + def call(self, *args): + flags = tklib.TCL_EVAL_DIRECT | tklib.TCL_EVAL_GLOBAL + + # If args is a single tuple, replace with contents of tuple + if len(args) == 1 and isinstance(args[0], tuple): + args = args[0] + + if self.threaded and self.thread_id != tklib.Tcl_GetCurrentThread(): + # We cannot call the command directly. Instead, we must + # marshal the parameters to the interpreter thread. + raise NotImplementedError("Call from another thread") + + objects = tkffi.new("Tcl_Obj*[]", len(args)) + argc = len(args) + try: + for i, arg in enumerate(args): + if arg is None: + argc = i + break + obj = AsObj(arg) + tklib.Tcl_IncrRefCount(obj) + objects[i] = obj + + res = tklib.Tcl_EvalObjv(self.interp, argc, objects, flags) + if res == tklib.TCL_ERROR: + self.raiseTclError() + else: + result = self._callResult() + finally: + for obj in objects: + if obj: + tklib.Tcl_DecrRefCount(obj) + return result + + def _callResult(self): + assert self._wantobjects + value = tklib.Tcl_GetObjResult(self.interp) + # Not sure whether the IncrRef is necessary, but something + # may overwrite the interpreter result while we are + # converting it. + tklib.Tcl_IncrRefCount(value) + res = FromObj(self, value) + tklib.Tcl_DecrRefCount(value) + return res + + def eval(self, script): + self._check_tcl_appartment() + res = tklib.Tcl_Eval(self.interp, script) + if res == tklib.TCL_ERROR: + self.raiseTclError() + return tkffi.string(tklib.Tcl_GetStringResult(self.interp)) + + def evalfile(self, filename): + self._check_tcl_appartment() + res = tklib.Tcl_EvalFile(self.interp, filename) + if res == tklib.TCL_ERROR: + self.raiseTclError() + return tkffi.string(tklib.Tcl_GetStringResult(self.interp)) + + def split(self, arg): + if isinstance(arg, tuple): + return self._splitObj(arg) + else: + return self._split(arg) + + def splitlist(self, arg): + if isinstance(arg, tuple): + return arg + if isinstance(arg, unicode): + arg = arg.encode('utf8') + + argc = tkffi.new("int*") + argv = tkffi.new("char***") + res = tklib.Tcl_SplitList(self.interp, arg, argc, argv) + if res == tklib.TCL_ERROR: + self.raiseTclError() + + result = tuple(tkffi.string(argv[0][i]) + for i in range(argc[0])) + tklib.Tcl_Free(argv[0]) + return result + + def _splitObj(self, arg): + if isinstance(arg, tuple): + size = len(arg) + # Recursively invoke SplitObj for all tuple items. + # If this does not return a new object, no action is + # needed. + result = None + newelems = (self._splitObj(elem) for elem in arg) + for elem, newelem in zip(arg, newelems): + if elem is not newelem: + return newelems + elif isinstance(arg, str): + argc = tkffi.new("int*") + argv = tkffi.new("char***") + res = tklib.Tcl_SplitList(tkffi.NULL, arg, argc, argv) + if res == tklib.TCL_ERROR: + return arg + tklib.Tcl_Free(argv[0]) + if argc[0] > 1: + return self._split(arg) + return arg + + def _split(self, arg): + argc = tkffi.new("int*") + argv = tkffi.new("char***") + res = tklib.Tcl_SplitList(tkffi.NULL, arg, argc, argv) + if res == tklib.TCL_ERROR: + # Not a list. + # Could be a quoted string containing funnies, e.g. {"}. + # Return the string itself. + return arg + + try: + if argc[0] == 0: + return "" + elif argc[0] == 1: + return argv[0][0] + else: + return (self._split(argv[0][i]) + for i in range(argc[0])) + finally: + tklib.Tcl_Free(argv[0]) + + def getboolean(self, s): + if isinstance(s, int): + return s + v = tkffi.new("int*") + res = tklib.Tcl_GetBoolean(self.interp, s, v) + if res == tklib.TCL_ERROR: + self.raiseTclError() + + def mainloop(self, threshold): + self._check_tcl_appartment() + self.dispatching = True + while (tklib.Tk_GetNumMainWindows() > threshold and + not self.quitMainLoop and not self.errorInCmd): + + if self.threaded: + result = tklib.Tcl_DoOneEvent(0) + else: + raise NotImplementedError("TCL configured without threads") + + if result < 0: + break + self.dispatching = False + self.quitMainLoop = False + if self.errorInCmd: + self.errorInCmd = False + raise self.exc_info[0], self.exc_info[1], self.exc_info[2] + + def quit(self): + self.quitMainLoop = True diff --git a/lib_pypy/_tkinter/tclobj.py b/lib_pypy/_tkinter/tclobj.py new file mode 100644 --- /dev/null +++ b/lib_pypy/_tkinter/tclobj.py @@ -0,0 +1,114 @@ +# TclObject, conversions with Python objects + +from .tklib import tklib, tkffi + +class TypeCache(object): + def __init__(self): + self.BooleanType = tklib.Tcl_GetObjType("boolean") + self.ByteArrayType = tklib.Tcl_GetObjType("bytearray") + self.DoubleType = tklib.Tcl_GetObjType("double") + self.IntType = tklib.Tcl_GetObjType("int") + self.ListType = tklib.Tcl_GetObjType("list") + self.ProcBodyType = tklib.Tcl_GetObjType("procbody") + self.StringType = tklib.Tcl_GetObjType("string") + + +def FromObj(app, value): + """Convert a TclObj pointer into a Python object.""" + typeCache = app._typeCache + if not value.typePtr: + buf = tkffi.buffer(value.bytes, value.length) + result = buf[:] + # If the result contains any bytes with the top bit set, it's + # UTF-8 and we should decode it to Unicode. + try: + result.decode('ascii') + except UnicodeDecodeError: + result = result.decode('utf8') + return result + + elif value.typePtr == typeCache.BooleanType: + return result + elif value.typePtr == typeCache.ByteArrayType: + return result + elif value.typePtr == typeCache.DoubleType: + return value.internalRep.doubleValue + elif value.typePtr == typeCache.IntType: + return value.internalRep.longValue + elif value.typePtr == typeCache.ListType: + size = tkffi.new('int*') + status = tklib.Tcl_ListObjLength(app.interp, value, size) + if status == tklib.TCL_ERROR: + app.raiseTclError() + result = [] + tcl_elem = tkffi.new("Tcl_Obj**") + for i in range(size[0]): + status = tklib.Tcl_ListObjIndex(app.interp, + value, i, tcl_elem) + if status == tklib.TCL_ERROR: + app.raiseTclError() + result.append(FromObj(app, tcl_elem[0])) + return tuple(result) + elif value.typePtr == typeCache.ProcBodyType: + return result + elif value.typePtr == typeCache.StringType: + buf = tklib.Tcl_GetUnicode(value) + length = tklib.Tcl_GetCharLength(value) + buf = tkffi.buffer(tkffi.cast("char*", buf), length*2)[:] + return buf.decode('utf-16') + + return TclObject(value) + +def AsObj(value): + if isinstance(value, str): + return tklib.Tcl_NewStringObj(value, len(value)) + elif isinstance(value, bool): + return tklib.Tcl_NewBooleanObj(value) + elif isinstance(value, int): + return tklib.Tcl_NewLongObj(value) + elif isinstance(value, float): + return tklib.Tcl_NewDoubleObj(value) + elif isinstance(value, tuple): + argv = tkffi.new("Tcl_Obj*[]", len(value)) + for i in range(len(value)): + argv[i] = AsObj(value[i]) + return tklib.Tcl_NewListObj(len(value), argv) + elif isinstance(value, unicode): + encoded = value.encode('utf-16')[2:] + buf = tkffi.new("char[]", encoded) + inbuf = tkffi.cast("Tcl_UniChar*", buf) + return tklib.Tcl_NewUnicodeObj(buf, len(encoded)/2) + elif isinstance(value, TclObject): + tklib.Tcl_IncrRefCount(value._value) + return value._value + else: + return AsObj(str(value)) + +class TclObject(object): + def __new__(cls, value): + self = object.__new__(cls) + tklib.Tcl_IncrRefCount(value) + self._value = value + self._string = None + return self + + def __del__(self): + tklib.Tcl_DecrRefCount(self._value) + + def __str__(self): + if self._string and isinstance(self._string, str): + return self._string + return tkffi.string(tklib.Tcl_GetString(self._value)) + + @property + def string(self): + if self._string is None: + length = tkffi.new("int*") + s = tklib.Tcl_GetStringFromObj(self._value, length) + value = tkffi.buffer(s, length[0])[:] + try: + value.decode('ascii') + except UnicodeDecodeError: + value = value.decode('utf8') + self._string = value + return self._string diff --git a/lib_pypy/_tkinter/tklib.py b/lib_pypy/_tkinter/tklib.py new file mode 100644 --- /dev/null +++ b/lib_pypy/_tkinter/tklib.py @@ -0,0 +1,114 @@ +# C bindings with libtcl and libtk. + +from cffi import FFI + +tkffi = FFI() + +tkffi.cdef(""" +char *get_tk_version(); +char *get_tcl_version(); +#define TCL_READABLE ... +#define TCL_WRITABLE ... +#define TCL_EXCEPTION ... +#define TCL_ERROR ... +#define TCL_OK ... + +#define TCL_LEAVE_ERR_MSG ... +#define TCL_GLOBAL_ONLY ... +#define TCL_EVAL_DIRECT ... +#define TCL_EVAL_GLOBAL ... + +typedef unsigned short Tcl_UniChar; +typedef ... Tcl_Interp; +typedef ...* Tcl_ThreadId; +typedef ...* Tcl_Command; + +typedef struct Tcl_ObjType { + char *name; + ...; +} Tcl_ObjType; +typedef struct Tcl_Obj { + char *bytes; + int length; + Tcl_ObjType *typePtr; + union { /* The internal representation: */ + long longValue; /* - an long integer value. */ + double doubleValue; /* - a double-precision floating value. */ + struct { /* - internal rep as two pointers. */ + void *ptr1; + void *ptr2; + } twoPtrValue; + } internalRep; + ...; +} Tcl_Obj; + +Tcl_Interp *Tcl_CreateInterp(); +void Tcl_DeleteInterp(Tcl_Interp* interp); +int Tcl_Init(Tcl_Interp* interp); +int Tk_Init(Tcl_Interp* interp); + +void Tcl_Free(char* ptr); + +const char *Tcl_SetVar(Tcl_Interp* interp, const char* varName, const char* newValue, int flags); +const char *Tcl_SetVar2(Tcl_Interp* interp, const char* name1, const char* name2, const char* newValue, int flags); +const char *Tcl_GetVar(Tcl_Interp* interp, const char* varName, int flags); +Tcl_Obj *Tcl_SetVar2Ex(Tcl_Interp* interp, const char* name1, const char* name2, Tcl_Obj* newValuePtr, int flags); +Tcl_Obj *Tcl_GetVar2Ex(Tcl_Interp* interp, const char* name1, const char* name2, int flags); +int Tcl_UnsetVar2(Tcl_Interp* interp, const char* name1, const char* name2, int flags); +const Tcl_ObjType *Tcl_GetObjType(const char* typeName); + +Tcl_Obj *Tcl_NewStringObj(const char* bytes, int length); +Tcl_Obj *Tcl_NewUnicodeObj(const Tcl_UniChar* unicode, int numChars); +Tcl_Obj *Tcl_NewLongObj(long longValue); +Tcl_Obj *Tcl_NewBooleanObj(int boolValue); +Tcl_Obj *Tcl_NewDoubleObj(double doubleValue); + +void Tcl_IncrRefCount(Tcl_Obj* objPtr); +void Tcl_DecrRefCount(Tcl_Obj* objPtr); + +int Tcl_GetBoolean(Tcl_Interp* interp, const char* src, int* boolPtr); +char *Tcl_GetString(Tcl_Obj* objPtr); +char *Tcl_GetStringFromObj(Tcl_Obj* objPtr, int* lengthPtr); + +Tcl_UniChar *Tcl_GetUnicode(Tcl_Obj* objPtr); +int Tcl_GetCharLength(Tcl_Obj* objPtr); + +Tcl_Obj *Tcl_NewListObj(int objc, Tcl_Obj* const objv[]); +int Tcl_ListObjLength(Tcl_Interp* interp, Tcl_Obj* listPtr, int* intPtr); +int Tcl_ListObjIndex(Tcl_Interp* interp, Tcl_Obj* listPtr, int index, Tcl_Obj** objPtrPtr); +int Tcl_SplitList(Tcl_Interp* interp, char* list, int* argcPtr, const char*** argvPtr); + +int Tcl_Eval(Tcl_Interp* interp, const char* script); +int Tcl_EvalFile(Tcl_Interp* interp, const char* filename); +int Tcl_EvalObjv(Tcl_Interp* interp, int objc, Tcl_Obj** objv, int flags); +Tcl_Obj *Tcl_GetObjResult(Tcl_Interp* interp); +const char *Tcl_GetStringResult(Tcl_Interp* interp); +void Tcl_SetObjResult(Tcl_Interp* interp, Tcl_Obj* objPtr); + +typedef void* ClientData; +typedef int Tcl_CmdProc( + ClientData clientData, + Tcl_Interp *interp, + int argc, + const char *argv[]); +typedef void Tcl_CmdDeleteProc( + ClientData clientData); +Tcl_Command Tcl_CreateCommand(Tcl_Interp* interp, const char* cmdName, Tcl_CmdProc proc, ClientData clientData, Tcl_CmdDeleteProc deleteProc); +int Tcl_DeleteCommand(Tcl_Interp* interp, const char* cmdName); + +Tcl_ThreadId Tcl_GetCurrentThread(); +int Tcl_DoOneEvent(int flags); + +int Tk_GetNumMainWindows(); +""") + +tklib = tkffi.verify(""" +#include +#include + +char *get_tk_version() { return TK_VERSION; } +char *get_tcl_version() { return TCL_VERSION; } +""", +include_dirs=['/usr/include/tcl'], +libraries=['tcl', 'tk'], +) diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py --- a/lib_pypy/cffi/api.py +++ b/lib_pypy/cffi/api.py @@ -361,13 +361,13 @@ backend = ffi._backend try: if '.' not in name and '/' not in name: - raise OSError + raise OSError("library not found: %r" % (name,)) backendlib = backend.load_library(name, flags) except OSError: import ctypes.util path = ctypes.util.find_library(name) if path is None: - raise OSError("library not found: %r" % (name,)) + raise # propagate the original OSError backendlib = backend.load_library(path, flags) copied_enums = [] # diff --git a/pypy/module/_cffi_backend/ctypefunc.py b/pypy/module/_cffi_backend/ctypefunc.py --- a/pypy/module/_cffi_backend/ctypefunc.py +++ b/pypy/module/_cffi_backend/ctypefunc.py @@ -289,16 +289,6 @@ "with verify() (see pypy/module/_cffi_backend/ctypefunc.py " "for details)")) - if USE_C_LIBFFI_MSVC and is_result_type: - # MSVC returns small structures in registers. Pretend int32 or - # int64 return type. This is needed as a workaround for what - # is really a bug of libffi_msvc seen as an independent library - # (ctypes has a similar workaround). - if ctype.size <= 4: - return clibffi.ffi_type_sint32 - if ctype.size <= 8: - return clibffi.ffi_type_sint64 - # walk the fields, expanding arrays into repetitions; first, # only count how many flattened fields there are nflat = 0 @@ -318,6 +308,16 @@ "a struct with a zero-length array")) nflat += flat + if USE_C_LIBFFI_MSVC and is_result_type: + # MSVC returns small structures in registers. Pretend int32 or + # int64 return type. This is needed as a workaround for what + # is really a bug of libffi_msvc seen as an independent library + # (ctypes has a similar workaround). + if ctype.size <= 4: + return clibffi.ffi_type_sint32 + if ctype.size <= 8: + return clibffi.ffi_type_sint64 + # allocate an array of (nflat + 1) ffi_types elements = self.fb_alloc(rffi.sizeof(FFI_TYPE_P) * (nflat + 1)) elements = rffi.cast(FFI_TYPE_PP, elements) diff --git a/pypy/module/imp/test/test_app.py b/pypy/module/imp/test/test_app.py --- a/pypy/module/imp/test/test_app.py +++ b/pypy/module/imp/test/test_app.py @@ -200,3 +200,49 @@ except KeyError: pass rmtree(dir_name, True) + + def test_builtin_reimport(self): + # from https://bugs.pypy.org/issue1514 + skip("fix me") + import sys, marshal + + old = marshal.loads + marshal.loads = 42 + + # save, re-import, restore. + saved = sys.modules.pop('marshal') + __import__('marshal') + sys.modules['marshal'] = saved + + assert marshal.loads == 42 + import marshal + assert marshal.loads == 42 + marshal.loads = old + + def test_builtin_reimport_mess(self): + # taken from https://bugs.pypy.org/issue1514, with extra cases + # that show a difference with CPython: we can get on CPython + # several module objects for the same built-in module :-( + skip("several built-in module objects: not supported by pypy") + import sys, marshal + + old = marshal.loads + marshal.loads = 42 + + # save, re-import, restore. + saved = sys.modules.pop('marshal') + marshal2 = __import__('marshal') + assert marshal2 is not marshal + assert marshal2.loads is old + assert marshal2 is sys.modules['marshal'] + assert marshal is saved + assert marshal.loads == 42 + + import marshal + assert marshal.loads is old + + sys.modules['marshal'] = saved + import marshal + assert marshal.loads == 42 + + marshal.loads = old diff --git a/pypy/module/imp/test/test_import.py b/pypy/module/imp/test/test_import.py --- a/pypy/module/imp/test/test_import.py +++ b/pypy/module/imp/test/test_import.py @@ -565,6 +565,43 @@ assert sys.path is oldpath assert 'setdefaultencoding' in dir(sys) + def test_reload_builtin_doesnt_clear(self): + import sys + sys.foobar = "baz" + reload(sys) + assert sys.foobar == "baz" + + def test_reimport_builtin_simple_case_1(self): + import sys, time + del time.tzset + del sys.modules['time'] + import time + assert hasattr(time, 'tzset') + + def test_reimport_builtin_simple_case_2(self): + skip("fix me") + import sys, time + time.foo = "bar" + del sys.modules['time'] + import time + assert not hasattr(time, 'foo') + + def test_reimport_builtin(self): + skip("fix me") + import sys, time + oldpath = sys.path + time.tzset = "" + + del sys.modules['time'] + import time as time1 + assert sys.modules['time'] is time1 + + assert time.tzset == "" + + reload(time1) # don't leave a broken time.tzset behind + import time + assert time.tzset != "" + def test_reload_infinite(self): import infinite_reload diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -115,6 +115,21 @@ return space.wrap('=') return space.wrap(nonnative_byteorder_prefix) + def descr_get_str(self, space): + size = self.get_size() + basic = self.kind + if basic == UNICODELTR: + size >>= 2 + endian = byteorder_prefix + elif size <= 1: + endian = '|' # ignore + elif self.native: + endian = byteorder_prefix + else: + endian = nonnative_byteorder_prefix + + return space.wrap("%s%s%s" % (endian, basic, size)) + def descr_get_alignment(self, space): return space.wrap(self.itemtype.alignment) @@ -421,6 +436,7 @@ char = interp_attrproperty("char", cls=W_Dtype), type = interp_attrproperty_w("w_box_type", cls=W_Dtype), byteorder = GetSetProperty(W_Dtype.descr_get_byteorder), + str = GetSetProperty(W_Dtype.descr_get_str), itemsize = GetSetProperty(W_Dtype.descr_get_itemsize), alignment = GetSetProperty(W_Dtype.descr_get_alignment), shape = GetSetProperty(W_Dtype.descr_get_shape), @@ -582,7 +598,7 @@ alternate_constructors=[space.w_float, space.gettypefor(interp_boxes.W_NumberBox), ], - aliases=["float"], + aliases=["float", "double"], ) self.w_complex64dtype = W_ComplexDtype( types.Complex64(), @@ -663,9 +679,10 @@ char='S', w_box_type = space.gettypefor(interp_boxes.W_StringBox), alternate_constructors=[space.w_str], + aliases=["str"], ) self.w_unicodedtype = W_Dtype( - types.UnicodeType(1), + types.UnicodeType(0), num=19, kind=UNICODELTR, name='unicode', diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -418,8 +418,16 @@ addr = self.implementation.get_storage_as_int(space) # will explode if it can't w_d = space.newdict() - space.setitem_str(w_d, 'data', space.newtuple([space.wrap(addr), - space.w_False])) + space.setitem_str(w_d, 'data', + space.newtuple([space.wrap(addr), space.w_False])) + space.setitem_str(w_d, 'shape', self.descr_get_shape(space)) + space.setitem_str(w_d, 'typestr', self.get_dtype().descr_get_str(space)) + if self.implementation.order == 'C': + # Array is contiguous, no strides in the interface. + strides = space.w_None + else: + strides = self.descr_get_strides(space) + space.setitem_str(w_d, 'strides', strides) return w_d w_pypy_data = None diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -683,6 +683,20 @@ assert dtype('=i8').byteorder == '=' assert dtype(byteorder + 'i8').byteorder == '=' + def test_dtype_str(self): + from numpypy import dtype + byteorder = self.native_prefix + assert dtype('i8').str == byteorder + 'i8' + assert dtype('i8').str == '>i8' + assert dtype('int8').str == '|i1' + assert dtype('float').str == byteorder + 'f8' + # strange + assert dtype('string').str == '|S0' + assert dtype('unicode').str == byteorder + 'U0' + # assert dtype(('string', 7)).str == '|S7' + # assert dtype(('unicode', 7)).str == ' 0] - setters = ['case %d: s.%s = value; break;' % iname - for iname in enumerate(fnames)] - lib = ffi1.verify(""" - struct s1 { %s }; - struct sa { char a; struct s1 b; }; - #define Gofs_y offsetof(struct s1, y) - #define Galign offsetof(struct sa, b) - #define Gsize sizeof(struct s1) - struct s1 *try_with_value(int fieldnum, long long value) - { - static struct s1 s; - memset(&s, 0, sizeof(s)); - switch (fieldnum) { %s } - return &s; - } - """ % (source, ' '.join(setters))) - assert lib.Gofs_y == expected_ofs_y - assert lib.Galign == expected_align - assert lib.Gsize == expected_size + ffi1 = FFI() + ffi1.cdef(""" + static const int Gofs_y, Galign, Gsize; + struct s1 *try_with_value(int fieldnum, long long value); + """) + fnames = [name for name, cfield in ctype.fields + if name and cfield.bitsize > 0] + setters = ['case %d: s.%s = value; break;' % iname + for iname in enumerate(fnames)] + lib = ffi1.verify(""" + struct s1 { %s }; + struct sa { char a; struct s1 b; }; + #define Gofs_y offsetof(struct s1, y) + #define Galign offsetof(struct sa, b) + #define Gsize sizeof(struct s1) + struct s1 *try_with_value(int fieldnum, long long value) + { + static struct s1 s; + memset(&s, 0, sizeof(s)); + switch (fieldnum) { %s } + return &s; + } + """ % (source, ' '.join(setters))) + if sys.platform == 'win32': + expected_ofs_y = lib.Gofs_y + expected_align = lib.Galign + expected_size = lib.Gsize else: - lib = None - fnames = None + assert (lib.Gofs_y, lib.Galign, lib.Gsize) == ( + expected_ofs_y, expected_align, expected_size) # the real test follows assert ffi.offsetof("struct s1", "y") == expected_ofs_y assert ffi.alignof("struct s1") == expected_align @@ -99,10 +102,9 @@ setattr(s, name, value) assert getattr(s, name) == value raw1 = ffi.buffer(s)[:] - if lib is not None: - t = lib.try_with_value(fnames.index(name), value) - raw2 = ffi.buffer(t, len(raw1))[:] - assert raw1 == raw2 + t = lib.try_with_value(fnames.index(name), value) + raw2 = ffi.buffer(t, len(raw1))[:] + assert raw1 == raw2 def test_bitfield_basic(self): self.check("int a; int b:9; int c:20; int y;", 8, 4, 12) @@ -136,9 +138,11 @@ L = FFI().alignof("long long") self.check("char y; int :0;", 0, 1, 4) self.check("char x; int :0; char y;", 4, 1, 5) + self.check("char x; int :0; int :0; char y;", 4, 1, 5) self.check("char x; long long :0; char y;", L, 1, L + 1) self.check("short x, y; int :0; int :0;", 2, 2, 4) self.check("char x; int :0; short b:1; char y;", 5, 2, 6) + self.check("int a:1; int :0; int b:1; char y;", 5, 4, 8) def test_error_cases(self): ffi = FFI() diff --git a/pypy/module/thread/test/test_fork.py b/pypy/module/thread/test/test_fork.py --- a/pypy/module/thread/test/test_fork.py +++ b/pypy/module/thread/test/test_fork.py @@ -28,7 +28,7 @@ if pid == 0: os._exit(0) else: - self.timeout_killer(pid, 5) + self.timeout_killer(pid, 10) exitcode = os.waitpid(pid, 0)[1] assert exitcode == 0 # if 9, process was killed by timer! finally: @@ -54,7 +54,7 @@ thread.start_new_thread(lambda: None, ()) os._exit(0) else: - self.timeout_killer(pid, 5) + self.timeout_killer(pid, 10) exitcode = os.waitpid(pid, 0)[1] assert exitcode == 0 # if 9, process was killed by timer! @@ -73,7 +73,7 @@ signal.signal(signal.SIGUSR1, signal.SIG_IGN) os._exit(42) else: - self.timeout_killer(pid, 5) + self.timeout_killer(pid, 10) exitcode = os.waitpid(pid, 0)[1] feedback.append(exitcode) diff --git a/rpython/jit/backend/arm/assembler.py b/rpython/jit/backend/arm/assembler.py --- a/rpython/jit/backend/arm/assembler.py +++ b/rpython/jit/backend/arm/assembler.py @@ -113,7 +113,7 @@ tmpreg = r.lr mc.gen_load_int(r.ip.value, self.cpu.pos_exc_value()) if excvalloc is not None: # store - assert excvalloc.is_reg() + assert excvalloc.is_core_reg() self.load_reg(mc, excvalloc, r.ip) if on_frame: # store exc_value in JITFRAME @@ -125,7 +125,7 @@ self.store_reg(mc, r.ip, r.fp, ofs, helper=tmpreg) if exctploc is not None: # store pos_exception in exctploc - assert exctploc.is_reg() + assert exctploc.is_core_reg() mc.gen_load_int(r.ip.value, self.cpu.pos_exception()) self.load_reg(mc, exctploc, r.ip, helper=tmpreg) @@ -146,7 +146,7 @@ tmpreg = r.lr # use lr as a second temporary reg mc.gen_load_int(r.ip.value, self.cpu.pos_exc_value()) if excvalloc is not None: - assert excvalloc.is_reg() + assert excvalloc.is_core_reg() self.store_reg(mc, excvalloc, r.ip) else: assert exctploc is not r.fp @@ -947,7 +947,7 @@ # regalloc support def load(self, loc, value): """load an immediate value into a register""" - assert (loc.is_reg() and value.is_imm() + assert (loc.is_core_reg() and value.is_imm() or loc.is_vfp_reg() and value.is_imm_float()) if value.is_imm(): self.mc.gen_load_int(loc.value, value.getint()) @@ -958,7 +958,7 @@ def load_reg(self, mc, target, base, ofs=0, cond=c.AL, helper=r.ip): if target.is_vfp_reg(): return self._load_vfp_reg(mc, target, base, ofs, cond, helper) - elif target.is_reg(): + elif target.is_core_reg(): return self._load_core_reg(mc, target, base, ofs, cond, helper) def _load_vfp_reg(self, mc, target, base, ofs, cond=c.AL, helper=r.ip): @@ -1012,7 +1012,7 @@ def _mov_imm_to_loc(self, prev_loc, loc, cond=c.AL): if loc.type == FLOAT: raise AssertionError("invalid target for move from imm value") - if loc.is_reg(): + if loc.is_core_reg(): new_loc = loc elif loc.is_stack() or loc.is_raw_sp(): new_loc = r.lr @@ -1027,7 +1027,7 @@ def _mov_reg_to_loc(self, prev_loc, loc, cond=c.AL): if loc.is_imm(): raise AssertionError("mov reg to imm doesn't make sense") - if loc.is_reg(): + if loc.is_core_reg(): self.mc.MOV_rr(loc.value, prev_loc.value, cond=cond) elif loc.is_stack() and loc.type != FLOAT: # spill a core register @@ -1050,7 +1050,7 @@ helper = None offset = prev_loc.value tmp = None - if loc.is_reg(): + if loc.is_core_reg(): assert prev_loc.type != FLOAT, 'trying to load from an \ incompatible location into a core register' # unspill a core register @@ -1126,7 +1126,7 @@ """Moves a value from a previous location to some other location""" if prev_loc.is_imm(): return self._mov_imm_to_loc(prev_loc, loc, cond) - elif prev_loc.is_reg(): + elif prev_loc.is_core_reg(): self._mov_reg_to_loc(prev_loc, loc, cond) elif prev_loc.is_stack(): self._mov_stack_to_loc(prev_loc, loc, cond) @@ -1215,7 +1215,7 @@ scratch_reg = r.vfp_ip self.regalloc_mov(loc, scratch_reg, cond) self.regalloc_push(scratch_reg, cond) - elif loc.is_reg(): + elif loc.is_core_reg(): self.mc.PUSH([loc.value], cond=cond) elif loc.is_vfp_reg(): self.mc.VPUSH([loc.value], cond=cond) @@ -1238,7 +1238,7 @@ scratch_reg = r.vfp_ip self.regalloc_pop(scratch_reg) self.regalloc_mov(scratch_reg, loc) - elif loc.is_reg(): + elif loc.is_core_reg(): self.mc.POP([loc.value], cond=cond) elif loc.is_vfp_reg(): self.mc.VPOP([loc.value], cond=cond) @@ -1306,7 +1306,7 @@ # lengthloc is the length of the array, which we must not modify! assert lengthloc is not r.r0 and lengthloc is not r.r1 - if lengthloc.is_reg(): + if lengthloc.is_core_reg(): varsizeloc = lengthloc else: assert lengthloc.is_stack() diff --git a/rpython/jit/backend/arm/callbuilder.py b/rpython/jit/backend/arm/callbuilder.py --- a/rpython/jit/backend/arm/callbuilder.py +++ b/rpython/jit/backend/arm/callbuilder.py @@ -40,7 +40,7 @@ if self.fnloc.is_stack(): self.asm.mov_loc_loc(self.fnloc, r.ip) self.fnloc = r.ip - assert self.fnloc.is_reg() + assert self.fnloc.is_core_reg() self.mc.BLX(self.fnloc.value) def restore_stack_pointer(self): @@ -135,7 +135,7 @@ return [], [] if self.resloc.is_vfp_reg(): return [r.r0, r.r1], [] - assert self.resloc.is_reg() + assert self.resloc.is_core_reg() return [r.r0], [] def load_result(self): @@ -146,7 +146,7 @@ if resloc.is_vfp_reg(): # move result to the allocated register self.asm.mov_to_vfp_loc(r.r0, r.r1, resloc) - elif resloc.is_reg(): + elif resloc.is_core_reg(): # move result to the allocated register if resloc is not r.r0: self.asm.mov_loc_loc(r.r0, resloc) @@ -283,7 +283,7 @@ def load_result(self): resloc = self.resloc # ensure the result is wellformed and stored in the correct location - if resloc is not None and resloc.is_reg(): + if resloc is not None and resloc.is_core_reg(): self._ensure_result_bit_extension(resloc, self.ressize, self.ressign) @@ -292,7 +292,7 @@ return [], [] if self.resloc.is_vfp_reg(): return [], [r.d0] - assert self.resloc.is_reg() + assert self.resloc.is_core_reg() return [r.r0], [] diff --git a/rpython/jit/backend/arm/helper/assembler.py b/rpython/jit/backend/arm/helper/assembler.py --- a/rpython/jit/backend/arm/helper/assembler.py +++ b/rpython/jit/backend/arm/helper/assembler.py @@ -82,7 +82,7 @@ assert guard is not None l0 = arglocs[0] l1 = arglocs[1] - assert l0.is_reg() + assert l0.is_core_reg() if l1.is_imm(): self.mc.CMP_ri(l0.value, imm=l1.getint(), cond=fcond) diff --git a/rpython/jit/backend/arm/locations.py b/rpython/jit/backend/arm/locations.py --- a/rpython/jit/backend/arm/locations.py +++ b/rpython/jit/backend/arm/locations.py @@ -15,7 +15,7 @@ def is_raw_sp(self): return False - def is_reg(self): + def is_core_reg(self): return False def is_vfp_reg(self): @@ -43,7 +43,7 @@ def __repr__(self): return 'r%d' % self.value - def is_reg(self): + def is_core_reg(self): return True def as_key(self): @@ -62,7 +62,7 @@ def __repr__(self): return 'vfp%d' % self.value - def is_reg(self): + def is_core_reg(self): return False def is_vfp_reg(self): diff --git a/rpython/jit/backend/arm/opassembler.py b/rpython/jit/backend/arm/opassembler.py --- a/rpython/jit/backend/arm/opassembler.py +++ b/rpython/jit/backend/arm/opassembler.py @@ -248,7 +248,7 @@ l1 = arglocs[1] failargs = arglocs[2:] - if l0.is_reg(): + if l0.is_core_reg(): if l1.is_imm(): self.mc.CMP_ri(l0.value, l1.getint()) else: @@ -488,7 +488,7 @@ # case GCFLAG_CARDS_SET: emit a few instructions to do # directly the card flag setting loc_index = arglocs[1] - assert loc_index.is_reg() + assert loc_index.is_core_reg() # must save the register loc_index before it is mutated mc.PUSH([loc_index.value]) tmp1 = loc_index @@ -588,7 +588,7 @@ def emit_op_setarrayitem_gc(self, op, arglocs, regalloc, fcond): value_loc, base_loc, ofs_loc, scale, ofs = arglocs - assert ofs_loc.is_reg() + assert ofs_loc.is_core_reg() if scale.value > 0: self.mc.LSL_ri(r.ip.value, ofs_loc.value, scale.value) ofs_loc = r.ip @@ -606,7 +606,7 @@ # vstr only supports imm offsets # so if the ofset is too large we add it to the base and use an # offset of 0 - if ofs_loc.is_reg(): + if ofs_loc.is_core_reg(): tmploc, save = self.get_tmp_reg([value_loc, base_loc, ofs_loc]) assert not save self.mc.ADD_rr(tmploc.value, base_loc.value, ofs_loc.value) @@ -644,13 +644,13 @@ def emit_op_raw_store(self, op, arglocs, regalloc, fcond): value_loc, base_loc, ofs_loc, scale, ofs = arglocs - assert ofs_loc.is_reg() + assert ofs_loc.is_core_reg() self._write_to_mem(value_loc, base_loc, ofs_loc, scale, fcond) return fcond def emit_op_getarrayitem_gc(self, op, arglocs, regalloc, fcond): res_loc, base_loc, ofs_loc, scale, ofs = arglocs - assert ofs_loc.is_reg() + assert ofs_loc.is_core_reg() signed = op.getdescr().is_item_signed() # scale the offset as required @@ -672,7 +672,7 @@ # vldr only supports imm offsets # if the offset is in a register we add it to the base and use a # tmp reg - if ofs_loc.is_reg(): + if ofs_loc.is_core_reg(): tmploc, save = self.get_tmp_reg([base_loc, ofs_loc]) assert not save self.mc.ADD_rr(tmploc.value, base_loc.value, ofs_loc.value) @@ -727,7 +727,7 @@ def emit_op_raw_load(self, op, arglocs, regalloc, fcond): res_loc, base_loc, ofs_loc, scale, ofs = arglocs - assert ofs_loc.is_reg() + assert ofs_loc.is_core_reg() # no base offset assert ofs.value == 0 signed = op.getdescr().is_item_signed() @@ -805,10 +805,10 @@ bytes_box = TempBox() bytes_loc = regalloc.rm.force_allocate_reg(bytes_box, forbidden_vars) scale = self._get_unicode_item_scale() - if not length_loc.is_reg(): + if not length_loc.is_core_reg(): self.regalloc_mov(length_loc, bytes_loc) length_loc = bytes_loc - assert length_loc.is_reg() + assert length_loc.is_core_reg() self.mc.MOV_ri(r.ip.value, 1 << scale) self.mc.MUL(bytes_loc.value, r.ip.value, length_loc.value) length_box = bytes_box @@ -835,8 +835,8 @@ # result = base_loc + (scaled_loc << scale) + static_offset def _gen_address(self, result, base_loc, scaled_loc, scale=0, static_offset=0): - assert scaled_loc.is_reg() - assert base_loc.is_reg() + assert scaled_loc.is_core_reg() + assert base_loc.is_core_reg() assert check_imm_arg(scale) assert check_imm_arg(static_offset) if scale > 0: @@ -1063,7 +1063,7 @@ def emit_op_cast_float_to_int(self, op, arglocs, regalloc, fcond): arg, res = arglocs assert arg.is_vfp_reg() - assert res.is_reg() + assert res.is_core_reg() self.mc.VCVT_float_to_int(r.vfp_ip.value, arg.value) self.mc.VMOV_rc(res.value, r.ip.value, r.vfp_ip.value) return fcond @@ -1071,7 +1071,7 @@ def emit_op_cast_int_to_float(self, op, arglocs, regalloc, fcond): arg, res = arglocs assert res.is_vfp_reg() - assert arg.is_reg() + assert arg.is_core_reg() self.mc.MOV_ri(r.ip.value, 0) self.mc.VMOV_cr(res.value, arg.value, r.ip.value) self.mc.VCVT_int_to_float(res.value, res.value) @@ -1087,7 +1087,7 @@ loc = arglocs[0] res = arglocs[1] assert loc.is_vfp_reg() - assert res.is_reg() + assert res.is_core_reg() self.mc.VMOV_rc(res.value, r.ip.value, loc.value) return fcond @@ -1108,7 +1108,7 @@ def emit_op_cast_float_to_singlefloat(self, op, arglocs, regalloc, fcond): arg, res = arglocs assert arg.is_vfp_reg() - assert res.is_reg() + assert res.is_core_reg() self.mc.VCVT_f64_f32(r.vfp_ip.value, arg.value) self.mc.VMOV_rc(res.value, r.ip.value, r.vfp_ip.value) return fcond @@ -1116,7 +1116,7 @@ def emit_op_cast_singlefloat_to_float(self, op, arglocs, regalloc, fcond): arg, res = arglocs assert res.is_vfp_reg() - assert arg.is_reg() + assert arg.is_core_reg() self.mc.MOV_ri(r.ip.value, 0) self.mc.VMOV_cr(res.value, arg.value, r.ip.value) self.mc.VCVT_f32_f64(res.value, res.value) diff --git a/rpython/jit/backend/arm/regalloc.py b/rpython/jit/backend/arm/regalloc.py --- a/rpython/jit/backend/arm/regalloc.py +++ b/rpython/jit/backend/arm/regalloc.py @@ -324,7 +324,7 @@ loc = r.fp arg = inputargs[i] i += 1 - if loc.is_reg(): + if loc.is_core_reg(): self.rm.reg_bindings[arg] = loc used[loc] = None elif loc.is_vfp_reg(): @@ -346,6 +346,8 @@ # note: we need to make a copy of inputargs because possibly_free_vars # is also used on op args, which is a non-resizable list self.possibly_free_vars(list(inputargs)) + self.fm.finish_binding() + self._check_invariants() def get_gcmap(self, forbidden_regs=[], noregs=False): frame_depth = self.fm.get_frame_depth() @@ -356,7 +358,7 @@ continue if box.type == REF and self.rm.is_still_alive(box): assert not noregs - assert loc.is_reg() + assert loc.is_core_reg() val = loc.value gcmap[val // WORD // 8] |= r_uint(1) << (val % (WORD * 8)) for box, loc in self.fm.bindings.iteritems(): @@ -1152,7 +1154,7 @@ assert isinstance(arg, Box) loc = self.loc(arg) arglocs[i] = loc - if loc.is_reg(): + if loc.is_core_reg() or loc.is_vfp_reg(): self.frame_manager.mark_as_free(arg) # descr._arm_arglocs = arglocs diff --git a/rpython/jit/backend/arm/test/test_jump.py b/rpython/jit/backend/arm/test/test_jump.py --- a/rpython/jit/backend/arm/test/test_jump.py +++ b/rpython/jit/backend/arm/test/test_jump.py @@ -255,7 +255,7 @@ else: newvalue = 'value-vfp-%d' % i regs2[loc.value] = newvalue - elif loc.is_reg(): + elif loc.is_core_reg(): regs1[loc.value] = 'value-int-%d' % i elif loc.is_stack(): stack[loc.position] = 'value-width%d-%d' % (loc.width, i) @@ -284,7 +284,7 @@ assert loc.width == expected_width*WORD if loc.is_vfp_reg(): return regs2[loc.value] - elif loc.is_reg(): + elif loc.is_core_reg(): return regs1[loc.value] elif loc.is_stack(): got = stack[loc.position] @@ -298,7 +298,7 @@ def write(loc, newvalue): if loc.is_vfp_reg(): regs2[loc.value] = newvalue - elif loc.is_reg(): + elif loc.is_core_reg(): regs1[loc.value] = newvalue elif loc.is_stack(): if loc.width > WORD: @@ -317,17 +317,17 @@ for op in assembler.ops: if op[0] == 'mov': src, dst = op[1:] - assert src.is_reg() or src.is_vfp_reg() or src.is_stack() or src.is_imm_float() or src.is_imm() - assert dst.is_reg() or dst.is_vfp_reg() or dst.is_stack() + assert src.is_core_reg() or src.is_vfp_reg() or src.is_stack() or src.is_imm_float() or src.is_imm() + assert dst.is_core_reg() or dst.is_vfp_reg() or dst.is_stack() assert not (src.is_stack() and dst.is_stack()) write(dst, read(src)) elif op[0] == 'push': src, = op[1:] - assert src.is_reg() or src.is_vfp_reg() or src.is_stack() + assert src.is_core_reg() or src.is_vfp_reg() or src.is_stack() extrapushes.append(read(src)) elif op[0] == 'pop': dst, = op[1:] - assert dst.is_reg() or dst.is_vfp_reg() or dst.is_stack() + assert dst.is_core_reg() or dst.is_vfp_reg() or dst.is_stack() write(dst, extrapushes.pop()) else: assert 0, "unknown op: %r" % (op,) diff --git a/rpython/jit/backend/arm/test/test_runner.py b/rpython/jit/backend/arm/test/test_runner.py --- a/rpython/jit/backend/arm/test/test_runner.py +++ b/rpython/jit/backend/arm/test/test_runner.py @@ -11,6 +11,7 @@ from rpython.jit.codewriter.effectinfo import EffectInfo from rpython.jit.metainterp.history import JitCellToken, TargetToken from rpython.jit.backend.arm.detect import detect_arch_version +from rpython.jit.codewriter import longlong CPU = getcpuclass() @@ -261,3 +262,43 @@ l1 = ('debug_print', preambletoken.repr_of_descr() + ':1') l2 = ('debug_print', targettoken.repr_of_descr() + ':9') assert ('jit-backend-counts', [l0, l1, l2]) in dlog + + + def test_label_float_in_reg_and_on_stack(self): + targettoken = TargetToken() + ops = """ + [i0, f3] + i2 = same_as(i0) # but forced to be in a register + force_spill(i2) + force_spill(f3) + f4 = float_add(f3, 5.0) + label(f3, f4, descr=targettoken) + force_spill(f3) + f5 = same_as(f3) # but forced to be in a register + finish(f5) + """ + faildescr = BasicFailDescr(2) + loop = parse(ops, self.cpu, namespace=locals()) + looptoken = JitCellToken() + info = self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + ops2 = """ + [i0, f1] + i1 = same_as(i0) + f2 = same_as(f1) + f3 = float_add(f1, 10.0) + force_spill(f3) + force_spill(i1) + f4 = float_add(f3, f1) + jump(f3, f4, descr=targettoken) + """ + loop2 = parse(ops2, self.cpu, namespace=locals()) + looptoken2 = JitCellToken() + info = self.cpu.compile_loop(loop2.inputargs, loop2.operations, looptoken2) + + deadframe = self.cpu.execute_token(looptoken, -9, longlong.getfloatstorage(-13.5)) + res = longlong.getrealfloat(self.cpu.get_float_value(deadframe, 0)) + assert res == -13.5 + # + deadframe = self.cpu.execute_token(looptoken2, -9, longlong.getfloatstorage(-13.5)) + res = longlong.getrealfloat(self.cpu.get_float_value(deadframe, 0)) + assert res == -3.5 diff --git a/rpython/jit/backend/llsupport/assembler.py b/rpython/jit/backend/llsupport/assembler.py --- a/rpython/jit/backend/llsupport/assembler.py +++ b/rpython/jit/backend/llsupport/assembler.py @@ -47,7 +47,7 @@ input_i += 1 if arg.type == REF: loc = fail_locs[i] - if loc.is_reg(): + if loc.is_core_reg(): val = self.cpu.all_reg_indexes[loc.value] else: val = loc.get_position() + self.cpu.JITFRAME_FIXED_SIZE diff --git a/rpython/jit/backend/llsupport/test/zrpy_gc_boehm_test.py b/rpython/jit/backend/llsupport/test/zrpy_gc_boehm_test.py --- a/rpython/jit/backend/llsupport/test/zrpy_gc_boehm_test.py +++ b/rpython/jit/backend/llsupport/test/zrpy_gc_boehm_test.py @@ -2,7 +2,6 @@ import weakref from rpython.rlib.jit import JitDriver, dont_look_inside from rpython.jit.backend.llsupport.test.zrpy_gc_test import run, get_entry, compile -from rpython.jit.backend.llsupport.test.ztranslation_test import fix_annotator_for_vrawbuffer class X(object): def __init__(self, x=0): @@ -32,8 +31,7 @@ g._dont_inline_ = True return g -def compile_boehm_test(monkeypatch): - fix_annotator_for_vrawbuffer(monkeypatch) +def compile_boehm_test(): myjitdriver = JitDriver(greens = [], reds = ['n', 'x']) @dont_look_inside def see(lst, n): diff --git a/rpython/jit/backend/llsupport/test/ztranslation_test.py b/rpython/jit/backend/llsupport/test/ztranslation_test.py --- a/rpython/jit/backend/llsupport/test/ztranslation_test.py +++ b/rpython/jit/backend/llsupport/test/ztranslation_test.py @@ -9,23 +9,10 @@ from rpython.jit.codewriter.policy import StopAtXPolicy -def fix_annotator_for_vrawbuffer(monkeypatch): - from rpython.rlib.nonconst import NonConstant - from rpython.jit.metainterp.optimizeopt.virtualize import VRawBufferValue - from rpython.jit.metainterp import warmspot - - def my_hook_for_tests(cpu): - # this is needed so that the annotator can see it - if NonConstant(False): - v = VRawBufferValue(cpu, None, -1, None, None) - monkeypatch.setattr(warmspot, 'hook_for_tests', my_hook_for_tests) - - class TranslationTest(CCompiledMixin): CPUClass = getcpuclass() - def test_stuff_translates(self, monkeypatch): - fix_annotator_for_vrawbuffer(monkeypatch) + def test_stuff_translates(self): # this is a basic test that tries to hit a number of features and their # translation: # - jitting of loops and bridges @@ -102,10 +89,9 @@ class TranslationTestCallAssembler(CCompiledMixin): CPUClass = getcpuclass() - def test_direct_assembler_call_translates(self, monkeypatch): + def test_direct_assembler_call_translates(self): """Test CALL_ASSEMBLER and the recursion limit""" from rpython.rlib.rstackovf import StackOverflow - fix_annotator_for_vrawbuffer(monkeypatch) class Thing(object): def __init__(self, val): @@ -183,8 +169,7 @@ class TranslationTestJITStats(CCompiledMixin): CPUClass = getcpuclass() - def test_jit_get_stats(self, monkeypatch): - fix_annotator_for_vrawbuffer(monkeypatch) + def test_jit_get_stats(self): driver = JitDriver(greens = [], reds = ['i']) def f(): @@ -207,8 +192,7 @@ class TranslationRemoveTypePtrTest(CCompiledMixin): CPUClass = getcpuclass() - def test_external_exception_handling_translates(self, monkeypatch): - fix_annotator_for_vrawbuffer(monkeypatch) + def test_external_exception_handling_translates(self): jitdriver = JitDriver(greens = [], reds = ['n', 'total']) class ImDone(Exception): diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -1551,14 +1551,14 @@ frame in jf_guard_exc """ if excvalloc is not None: - assert excvalloc.is_reg() + assert excvalloc.is_core_reg() mc.MOV(excvalloc, heap(self.cpu.pos_exc_value())) elif tmploc is not None: # if both are None, just ignore ofs = self.cpu.get_ofs_of_frame_field('jf_guard_exc') mc.MOV(tmploc, heap(self.cpu.pos_exc_value())) mc.MOV(RawEbpLoc(ofs), tmploc) if exctploc is not None: - assert exctploc.is_reg() + assert exctploc.is_core_reg() mc.MOV(exctploc, heap(self.cpu.pos_exception())) mc.MOV(heap(self.cpu.pos_exception()), imm0) diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -227,18 +227,6 @@ else: return self.xrm.make_sure_var_in_reg(var, forbidden_vars) - def _frame_bindings(self, locs, inputargs): - bindings = {} - i = 0 - for loc in locs: - if loc is None: - continue - arg = inputargs[i] - i += 1 - if not isinstance(loc, RegLoc): - bindings[arg] = loc - return bindings - def _update_bindings(self, locs, inputargs): # XXX this should probably go to llsupport/regalloc.py used = {} diff --git a/rpython/jit/backend/x86/regloc.py b/rpython/jit/backend/x86/regloc.py --- a/rpython/jit/backend/x86/regloc.py +++ b/rpython/jit/backend/x86/regloc.py @@ -45,7 +45,7 @@ def is_stack(self): return False - def is_reg(self): + def is_core_reg(self): return False def get_position(self): @@ -169,7 +169,7 @@ def is_float(self): return self.is_xmm - def is_reg(self): + def is_core_reg(self): return True class ImmediateAssemblerLocation(AssemblerLocation): diff --git a/rpython/jit/metainterp/optimizeopt/virtualize.py b/rpython/jit/metainterp/optimizeopt/virtualize.py --- a/rpython/jit/metainterp/optimizeopt/virtualize.py +++ b/rpython/jit/metainterp/optimizeopt/virtualize.py @@ -17,6 +17,7 @@ _attrs_ = ('keybox', 'source_op', '_cached_vinfo') box = None level = optimizer.LEVEL_NONNULL + is_about_raw = False _cached_vinfo = None def __init__(self, keybox, source_op=None): @@ -395,6 +396,7 @@ class VRawBufferValue(AbstractVArrayValue): + is_about_raw = True def __init__(self, cpu, logops, size, keybox, source_op): AbstractVirtualValue.__init__(self, keybox, source_op) @@ -457,6 +459,7 @@ class VRawSliceValue(AbstractVirtualValue): + is_about_raw = True def __init__(self, rawbuffer_value, offset, keybox, source_op): AbstractVirtualValue.__init__(self, keybox, source_op) @@ -676,13 +679,17 @@ offsetbox = self.get_constant_box(op.getarg(1)) if value.is_virtual() and offsetbox is not None: offset = offsetbox.getint() - if isinstance(value, VRawBufferValue): - self.make_virtual_raw_slice(value, offset, op.result, op) - return - elif isinstance(value, VRawSliceValue): - offset = offset + value.offset - self.make_virtual_raw_slice(value.rawbuffer_value, offset, op.result, op) - return + # the following check is constant-folded to False if the + # translation occurs without any VRawXxxValue instance around + if value.is_about_raw: + if isinstance(value, VRawBufferValue): + self.make_virtual_raw_slice(value, offset, op.result, op) + return + elif isinstance(value, VRawSliceValue): + offset = offset + value.offset + self.make_virtual_raw_slice(value.rawbuffer_value, offset, + op.result, op) + return self.emit_operation(op) def optimize_ARRAYLEN_GC(self, op): diff --git a/rpython/jit/metainterp/resume.py b/rpython/jit/metainterp/resume.py --- a/rpython/jit/metainterp/resume.py +++ b/rpython/jit/metainterp/resume.py @@ -451,6 +451,7 @@ class AbstractVirtualInfo(object): kind = REF + is_about_raw = False #def allocate(self, decoder, index): # raise NotImplementedError def equals(self, fieldnums): @@ -461,7 +462,7 @@ def debug_prints(self): raise NotImplementedError - + class AbstractVirtualStructInfo(AbstractVirtualInfo): def __init__(self, fielddescrs): @@ -547,6 +548,7 @@ class VRawBufferStateInfo(AbstractVirtualInfo): kind = INT + is_about_raw = True def __init__(self, size, offsets, descrs): self.size = size @@ -772,7 +774,9 @@ assert self.virtuals_cache is not None v = self.virtuals_cache.get_int(index) if not v: - v = self.rd_virtuals[index].allocate_int(self, index) + v = self.rd_virtuals[index] + assert v.is_about_raw and isinstance(v, VRawBufferStateInfo) + v = v.allocate_int(self, index) ll_assert(v == self.virtuals_cache.get_int(index), "resume.py: bad cache") return v diff --git a/rpython/jit/metainterp/warmspot.py b/rpython/jit/metainterp/warmspot.py --- a/rpython/jit/metainterp/warmspot.py +++ b/rpython/jit/metainterp/warmspot.py @@ -761,8 +761,6 @@ cpu = jd.warmstate.cpu def ll_portal_runner(*args): - hook_for_tests(cpu) # usually it's empty, but tests can monkeypatch - # it to fix the annotator start = True while 1: try: @@ -999,10 +997,3 @@ graphs = self.translator.graphs for graph, block, i in find_force_quasi_immutable(graphs): self.replace_force_quasiimmut_with_direct_call(block.operations[i]) - -def hook_for_tests(cpu): - """ - This function is empty and does nothing. Its only role is to be - monkey-patched by tests to "fix" the annotator if needed (see - e.g. x86/test/test_ztranslation::test_external_exception_handling_translates - """ From noreply at buildbot.pypy.org Fri Jun 14 15:26:34 2013 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 14 Jun 2013 15:26:34 +0200 (CEST) Subject: [pypy-commit] pypy argsort-segfault: document branch Message-ID: <20130614132634.300AE1C3354@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: argsort-segfault Changeset: r64883:4bcabd580cba Date: 2013-06-14 16:25 +0300 http://bitbucket.org/pypy/pypy/changeset/4bcabd580cba/ Log: document branch diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -52,3 +52,7 @@ .. branch: ctypes-byref Add the '_obj' attribute on ctypes pointer() and byref() objects + +.. branch: argsort-segfault +Fix a segfault in argsort when sorting by chunks on multidim numpypy arrays (mikefc) + From noreply at buildbot.pypy.org Fri Jun 14 15:48:47 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 14 Jun 2013 15:48:47 +0200 (CEST) Subject: [pypy-commit] stmgc default: Next test Message-ID: <20130614134847.1546B1C0134@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r124:bbee0ad53806 Date: 2013-06-14 15:48 +0200 http://bitbucket.org/pypy/stmgc/changeset/bbee0ad53806/ Log: Next test diff --git a/c4/et.c b/c4/et.c --- a/c4/et.c +++ b/c4/et.c @@ -450,17 +450,30 @@ return L; } +static inline gcptr check_flag_write_barrier(gcptr W) +{ + if (W->h_tid & GCFLAG_WRITE_BARRIER) + { + struct tx_descriptor *d = thread_descriptor; + gcptrlist_insert(&d->private_old_pointing_to_young, W); + W->h_tid &= ~GCFLAG_WRITE_BARRIER; + } + return W; +} + gcptr stm_WriteBarrier(gcptr P) { + if (P->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED) + return check_flag_write_barrier(P); + gcptr R, W; + R = stm_read_barrier(P); + if (is_private(R)) + return check_flag_write_barrier(R); + struct tx_descriptor *d = thread_descriptor; assert(d->active >= 1); - R = stm_read_barrier(P); - - if (is_private(R)) - return R; - spinlock_acquire(d->public_descriptor->collection_lock, 'L'); if (d->public_descriptor->stolen_objects.size != 0) stm_normalize_stolen_objects(d); diff --git a/c4/nursery.c b/c4/nursery.c --- a/c4/nursery.c +++ b/c4/nursery.c @@ -27,6 +27,7 @@ gcptrlist_delete(&d->old_objects_to_trace); gcptrlist_delete(&d->public_to_young); + gcptrlist_delete(&d->private_old_pointing_to_young); } static char *collect_and_allocate_size(size_t size); /* forward */ @@ -131,6 +132,13 @@ gcptrlist_clear(&d->public_to_young); } +static void mark_private_old_pointing_to_young(struct tx_descriptor *d) +{ + /* trace the objects recorded earlier by stmgc_write_barrier() */ + gcptrlist_move(&d->old_objects_to_trace, + &d->private_old_pointing_to_young); +} + static void visit_all_outside_objects(struct tx_descriptor *d) { while (gcptrlist_size(&d->old_objects_to_trace) > 0) { @@ -177,9 +185,7 @@ mark_public_to_young(d); -#if 0 mark_private_old_pointing_to_young(d); -#endif visit_all_outside_objects(d); #if 0 @@ -216,8 +222,8 @@ if (d->nursery_current == d->nursery_base /*&& !g2l_any_entry(&d->young_objects_outside_nursery)*/ ) { /* there is no young object */ - //assert(gcptrlist_size(&d->private_old_pointing_to_young) == 0); - //assert(gcptrlist_size(&d->public_to_young) == 0); + assert(gcptrlist_size(&d->private_old_pointing_to_young) == 0); + assert(gcptrlist_size(&d->public_to_young) == 0); return 0; } else { diff --git a/c4/nursery.h b/c4/nursery.h --- a/c4/nursery.h +++ b/c4/nursery.h @@ -11,7 +11,8 @@ char *nursery_end; \ char *nursery_base; \ struct GcPtrList old_objects_to_trace; \ - struct GcPtrList public_to_young; + struct GcPtrList public_to_young; \ + struct GcPtrList private_old_pointing_to_young; struct tx_descriptor; /* from et.h */ diff --git a/c4/stmsync.c b/c4/stmsync.c --- a/c4/stmsync.c +++ b/c4/stmsync.c @@ -87,8 +87,8 @@ gcptr stm_write_barrier(gcptr obj) { /* XXX inline in the caller */ - if (UNLIKELY((obj->h_revision != stm_private_rev_num) & - ((obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED) == 0))) + if (UNLIKELY((obj->h_revision != stm_private_rev_num) | + ((obj->h_tid & GCFLAG_WRITE_BARRIER) != 0))) obj = stm_WriteBarrier(obj); return obj; } diff --git a/c4/test/test_nursery.py b/c4/test/test_nursery.py --- a/c4/test/test_nursery.py +++ b/c4/test/test_nursery.py @@ -71,3 +71,24 @@ p3 = lib.stm_read_barrier(p1) assert not lib.in_nursery(p3) and p3 != p2 assert lib.rawgetlong(p3, 0) == -91467 + +def test_outer2inner(): # test mark_private_old_pointing_to_young() + p1 = nalloc_refs(1) + lib.stm_push_root(p1) + minor_collect() + check_nursery_free(p1) + p1 = lib.stm_pop_root() + assert classify(p1) == "private" + p2 = nalloc(HDR + WORD) + lib.setlong(p2, 0, 8972981) + lib.setptr(p1, 0, p2) + # + lib.stm_push_root(p1) + minor_collect() + p1b = lib.stm_pop_root() + assert p1b == p1 + check_nursery_free(p2) + p2b = lib.getptr(p1b, 0) + assert p2b != p2 + check_not_free(p2b) + assert lib.getlong(p2b, 0) == 8972981 From noreply at buildbot.pypy.org Fri Jun 14 15:55:23 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 14 Jun 2013 15:55:23 +0200 (CEST) Subject: [pypy-commit] pypy default: untabbify Message-ID: <20130614135523.B1C2D1C0134@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r64884:8d8aa8c2badb Date: 2013-06-14 15:54 +0200 http://bitbucket.org/pypy/pypy/changeset/8d8aa8c2badb/ Log: untabbify diff --git a/lib_pypy/_tkinter/tklib.py b/lib_pypy/_tkinter/tklib.py --- a/lib_pypy/_tkinter/tklib.py +++ b/lib_pypy/_tkinter/tklib.py @@ -31,13 +31,13 @@ char *bytes; int length; Tcl_ObjType *typePtr; - union { /* The internal representation: */ - long longValue; /* - an long integer value. */ - double doubleValue; /* - a double-precision floating value. */ - struct { /* - internal rep as two pointers. */ - void *ptr1; - void *ptr2; - } twoPtrValue; + union { /* The internal representation: */ + long longValue; /* - an long integer value. */ + double doubleValue; /* - a double-precision floating value. */ + struct { /* - internal rep as two pointers. */ + void *ptr1; + void *ptr2; + } twoPtrValue; } internalRep; ...; } Tcl_Obj; From noreply at buildbot.pypy.org Fri Jun 14 16:54:26 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 14 Jun 2013 16:54:26 +0200 (CEST) Subject: [pypy-commit] stmgc default: Next test Message-ID: <20130614145426.76C501C3322@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r125:61b9c5a13a48 Date: 2013-06-14 16:54 +0200 http://bitbucket.org/pypy/stmgc/changeset/61b9c5a13a48/ Log: Next test diff --git a/c4/nursery.c b/c4/nursery.c --- a/c4/nursery.c +++ b/c4/nursery.c @@ -119,16 +119,75 @@ static void mark_public_to_young(struct tx_descriptor *d) { + /* "public_to_young" contains ptrs to the public copies used as + key of "public_to_private", but only the ones that were added + since the last minor collection. Once the transaction commit, + they stay in "public_to_young", and so they become public + objects whose h_revision is a public stub, which itself points + (originally) to a protected young object. + + Be careful and accept more or less any object in the list, which + can show up because of aborted transactions. + */ long i, size = d->public_to_young.size; gcptr *items = d->public_to_young.items; for (i = 0; i < size; i++) { gcptr P = items[i]; + assert(P->h_tid & GCFLAG_PUBLIC); + + revision_t v = ACCESS_ONCE(P->h_revision); wlog_t *item; + G2L_FIND(d->public_to_private, P, item, goto not_in_public_to_private); - G2L_FIND(d->public_to_private, P, item, continue); + if (!(v & 1)) { // "is a pointer" + /* P is both a key in public_to_private and an outdated copy. + We are in a case where we know the transaction will not + be able to commit successfully. + */ + abort(); + AbortTransactionAfterCollect(d, ABRT_COLLECT_MINOR); + //... + } + visit_if_young(&item->val); + continue; + + not_in_public_to_private: + if (v & 1) { // "is not a pointer" + /* P is neither a key in public_to_private nor outdated. + It must come from an older transaction that aborted. + Nothing to do now. + */ + continue; + } + + gcptr S = (gcptr)v; + revision_t w = ACCESS_ONCE(S->h_revision); + if ((w & 3) != 2) { + /* P has a ptr in h_revision, but this object is not a stub + with a protected pointer. It has likely been the case + in the past, but someone made even more changes. + Nothing to do now. + */ + continue; + } + + if (STUB_THREAD(S) != d->public_descriptor) { + /* Bah, it's indeed a stub but for another thread. Nothing + to do now. + */ + continue; + } + + /* It's a stub for us. It cannot be un-stubbed under our + feet because we hold our own collection_lock. + */ + gcptr L = (gcptr)(w - 2); + visit_if_young(&L); + S->h_revision = ((revision_t)L) | 2; } + gcptrlist_clear(&d->public_to_young); } diff --git a/c4/test/test_nursery.py b/c4/test/test_nursery.py --- a/c4/test/test_nursery.py +++ b/c4/test/test_nursery.py @@ -92,3 +92,35 @@ assert p2b != p2 check_not_free(p2b) assert lib.getlong(p2b, 0) == 8972981 + +def test_outer2inner_after_transaction_end(): + p1 = palloc(HDR + WORD) + lib.rawsetlong(p1, 0, 420063) + p2 = lib.stm_write_barrier(p1) + lib.rawsetlong(p2, 0, -91467) + assert lib.in_nursery(p2) + lib.stm_push_root(p1) + print "committing..." + transaction_break() + print "done" + + # first check that the situation is still the same in the next transaction + p1b = lib.stm_pop_root() + assert p1b == p1 + assert classify(p1b) == "public" + p2b = lib.stm_read_barrier(p1b) + assert lib.in_nursery(p2b) + assert p2b == p2 + assert classify(p2) == "protected" + check_not_free(p2b) + lib.stm_push_root(p1b) + print 'ok' + + # then do a minor collection + minor_collect() + p1b = lib.stm_pop_root() + assert p1b == p1 + # check that the link p1 -> p2 was kept alive by moving p2 outside + p2b = lib.stm_read_barrier(p1b) + assert not lib.in_nursery(p2b) + check_not_free(p2b) From noreply at buildbot.pypy.org Fri Jun 14 17:01:37 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 14 Jun 2013 17:01:37 +0200 (CEST) Subject: [pypy-commit] stmgc default: More tests that pass Message-ID: <20130614150137.6686E1C029E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r126:60f02332d53f Date: 2013-06-14 16:55 +0200 http://bitbucket.org/pypy/stmgc/changeset/60f02332d53f/ Log: More tests that pass diff --git a/c4/test/test_nursery.py b/c4/test/test_nursery.py --- a/c4/test/test_nursery.py +++ b/c4/test/test_nursery.py @@ -124,3 +124,35 @@ p2b = lib.stm_read_barrier(p1b) assert not lib.in_nursery(p2b) check_not_free(p2b) + +def test_minor_collection_at_thread_end(): + p1 = palloc_refs(1) + p2 = nalloc(HDR) + setptr(p1, 0, p2) + lib.stm_finalize() + lib.stm_initialize_tests(0) + p1b = getptr(p1, 0) + assert p1b != p1 + assert not lib.in_nursery(p1b) + check_not_free(p1b) + +def test_prebuilt_keeps_alive(): + p0 = palloc_refs(1) + p1 = nalloc(HDR) + lib.setptr(p0, 0, p1) + minor_collect() + check_nursery_free(p1) + check_prebuilt(p0) + p2 = lib.getptr(p0, 0) + assert not lib.in_nursery(p2) + check_not_free(p2) + +def test_prebuilt_keeps_alive_at_thread_end(): + p0 = palloc_refs(1) + p1 = nalloc(HDR) + lib.setptr(p0, 0, p1) + lib.stm_finalize() + lib.stm_initialize_tests(0) + check_prebuilt(p0) + p2 = lib.getptr(p0, 0) + check_not_free(p2) From noreply at buildbot.pypy.org Fri Jun 14 17:01:38 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 14 Jun 2013 17:01:38 +0200 (CEST) Subject: [pypy-commit] stmgc default: Reintroduce oalloc() Message-ID: <20130614150138.B67841C029E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r127:25521e54d778 Date: 2013-06-14 17:01 +0200 http://bitbucket.org/pypy/stmgc/changeset/25521e54d778/ Log: Reintroduce oalloc() diff --git a/c4/test/support.py b/c4/test/support.py --- a/c4/test/support.py +++ b/c4/test/support.py @@ -54,6 +54,7 @@ void stm_set_transaction_length(long length_max); /* extra non-public code */ + void *stm_malloc(size_t size); //gcptr stmgcpage_malloc(size_t size); //void stmgcpage_free(gcptr obj); //long stmgcpage_count(int quantity); @@ -390,20 +391,18 @@ # ____________________________________________________________ def oalloc(size): - "Allocate an 'old' object, i.e. outside any nursery" - p = lib.stmgcpage_malloc(size) - p.h_tid = GCFLAG_WRITE_BARRIER | GCFLAG_OLD - p.h_revision = lib.get_local_revision() + "Allocate an 'old' public object, outside any nursery" + p = ffi.cast("gcptr", lib.stm_malloc(size)) + p.h_tid = GCFLAG_OLD | GCFLAG_PUBLIC + p.h_revision = 1 lib.settid(p, 42 + size) return p -#ofree = lib.stmgcpage_free - def oalloc_refs(nrefs): - "Allocate an 'old' object, i.e. outside any nursery, with nrefs pointers" - p = lib.stmgcpage_malloc(HDR + WORD * nrefs) - p.h_tid = GCFLAG_WRITE_BARRIER | GCFLAG_OLD - p.h_revision = lib.get_local_revision() + "Allocate an 'old' public object, outside any nursery, with nrefs pointers" + p = ffi.cast("gcptr", lib.stm_malloc(HDR + WORD * nrefs)) + p.h_tid = GCFLAG_OLD | GCFLAG_PUBLIC + p.h_revision = 1 lib.settid(p, 421 + nrefs) for i in range(nrefs): rawsetptr(p, i, ffi.NULL) diff --git a/c4/test/test_nursery.py b/c4/test/test_nursery.py --- a/c4/test/test_nursery.py +++ b/c4/test/test_nursery.py @@ -126,7 +126,7 @@ check_not_free(p2b) def test_minor_collection_at_thread_end(): - p1 = palloc_refs(1) + p1 = oalloc_refs(1) p2 = nalloc(HDR) setptr(p1, 0, p2) lib.stm_finalize() From noreply at buildbot.pypy.org Fri Jun 14 17:04:03 2013 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 14 Jun 2013 17:04:03 +0200 (CEST) Subject: [pypy-commit] pypy argsort-segfault: close branch about to be merged Message-ID: <20130614150403.2BA2B1C029E@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: argsort-segfault Changeset: r64885:c7483245daca Date: 2013-06-14 18:00 +0300 http://bitbucket.org/pypy/pypy/changeset/c7483245daca/ Log: close branch about to be merged From noreply at buildbot.pypy.org Fri Jun 14 17:04:04 2013 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 14 Jun 2013 17:04:04 +0200 (CEST) Subject: [pypy-commit] pypy default: merge argsort-segfault which fixes issue 1510 Message-ID: <20130614150404.5EA611C029E@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r64886:c9e36178fe49 Date: 2013-06-14 18:02 +0300 http://bitbucket.org/pypy/pypy/changeset/c9e36178fe49/ Log: merge argsort-segfault which fixes issue 1510 diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -52,3 +52,7 @@ .. branch: ctypes-byref Add the '_obj' attribute on ctypes pointer() and byref() objects + +.. branch: argsort-segfault +Fix a segfault in argsort when sorting by chunks on multidim numpypy arrays (mikefc) + diff --git a/pypy/module/micronumpy/arrayimpl/sort.py b/pypy/module/micronumpy/arrayimpl/sort.py --- a/pypy/module/micronumpy/arrayimpl/sort.py +++ b/pypy/module/micronumpy/arrayimpl/sort.py @@ -20,7 +20,7 @@ def make_sort_function(space, itemtype, comp_type, count=1): TP = itemtype.T step = rffi.sizeof(TP) - + class Repr(object): def __init__(self, index_stride_size, stride_size, size, values, indexes, index_start, start): @@ -71,11 +71,11 @@ def __init__(self, index_stride_size, stride_size, size): start = 0 dtype = interp_dtype.get_dtype_cache(space).w_longdtype - self.indexes = dtype.itemtype.malloc(size*dtype.get_size()) - self.values = alloc_raw_storage(size * stride_size, + indexes = dtype.itemtype.malloc(size*dtype.get_size()) + values = alloc_raw_storage(size * stride_size, track_allocation=False) - Repr.__init__(self, index_stride_size, stride_size, - size, self.values, self.indexes, start, start) + Repr.__init__(self, dtype.get_size(), stride_size, + size, values, indexes, start, start) def __del__(self): free_raw_storage(self.indexes, track_allocation=False) @@ -96,7 +96,7 @@ for i in range(stop-start): retval.setitem(i, lst.getitem(i+start)) return retval - + if count < 2: def arg_lt(a, b): # Does numpy do <= ? @@ -108,7 +108,7 @@ return True elif a[0][i] > b[0][i]: return False - # Does numpy do True? + # Does numpy do True? return False ArgSort = make_timsort_class(arg_getitem, arg_setitem, arg_length, @@ -180,7 +180,7 @@ class SortCache(object): built = False - + def __init__(self, space): if self.built: return diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -2499,6 +2499,13 @@ b = a.argsort() assert (b[:3] == [0, 100, 200]).all() + def test_argsort_random(self): + from numpypy import array + from _random import Random + rnd = Random(1) + a = array([rnd.random() for i in range(512*2)]).reshape(512,2) + a.argsort() + def test_argsort_axis(self): from numpypy import array a = array([[4, 2], [1, 3]]) From noreply at buildbot.pypy.org Fri Jun 14 17:04:05 2013 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 14 Jun 2013 17:04:05 +0200 (CEST) Subject: [pypy-commit] pypy default: merge heads Message-ID: <20130614150405.8FD421C029E@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r64887:3ed53260e74c Date: 2013-06-14 18:03 +0300 http://bitbucket.org/pypy/pypy/changeset/3ed53260e74c/ Log: merge heads diff --git a/lib_pypy/_tkinter/tklib.py b/lib_pypy/_tkinter/tklib.py --- a/lib_pypy/_tkinter/tklib.py +++ b/lib_pypy/_tkinter/tklib.py @@ -31,13 +31,13 @@ char *bytes; int length; Tcl_ObjType *typePtr; - union { /* The internal representation: */ - long longValue; /* - an long integer value. */ - double doubleValue; /* - a double-precision floating value. */ - struct { /* - internal rep as two pointers. */ - void *ptr1; - void *ptr2; - } twoPtrValue; + union { /* The internal representation: */ + long longValue; /* - an long integer value. */ + double doubleValue; /* - a double-precision floating value. */ + struct { /* - internal rep as two pointers. */ + void *ptr1; + void *ptr2; + } twoPtrValue; } internalRep; ...; } Tcl_Obj; From noreply at buildbot.pypy.org Fri Jun 14 18:45:02 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 14 Jun 2013 18:45:02 +0200 (CEST) Subject: [pypy-commit] stmgc default: in-the-middle-of-progress Message-ID: <20130614164502.E3D491C1398@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r128:9cfc156e53f7 Date: 2013-06-14 18:44 +0200 http://bitbucket.org/pypy/stmgc/changeset/9cfc156e53f7/ Log: in-the-middle-of-progress diff --git a/c4/et.c b/c4/et.c --- a/c4/et.c +++ b/c4/et.c @@ -33,7 +33,7 @@ static int is_private(gcptr P) { return (P->h_revision == stm_private_rev_num) || - (P->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED); + gcflag_private_from_protected(P); } int _stm_is_private(gcptr P) { @@ -80,7 +80,7 @@ revision_t v; restart_all: - if (P->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED) + if (gcflag_private_from_protected(P)) { assert(!(P->h_revision & 1)); /* pointer to the backup copy */ @@ -95,14 +95,14 @@ /* else, for the rest of this function, we can assume that P was not a private copy */ - if (P->h_tid & GCFLAG_PUBLIC) + if (gcflag_public(P)) { /* follow the chained list of h_revision's as long as they are regular pointers. We will only find more public objects along this chain. */ restart_all_public: - assert(P->h_tid & GCFLAG_PUBLIC); + assert(gcflag_public(P)); v = ACCESS_ONCE(P->h_revision); if (!(v & 1)) // "is a pointer", i.e. { // "has a more recent revision" @@ -112,7 +112,7 @@ gcptr P_prev = P; P = (gcptr)v; - assert(P->h_tid & GCFLAG_PUBLIC); + assert(gcflag_public(P)); v = ACCESS_ONCE(P->h_revision); @@ -145,7 +145,7 @@ because *we* have an entry in d->public_to_private. (It might also be someone else.) */ - if (P->h_tid & GCFLAG_PUBLIC_TO_PRIVATE) + if (gcflag_public_to_private(P)) { wlog_t *item; retry_public_to_private:; @@ -154,7 +154,7 @@ /* We have a key in 'public_to_private'. The value is the corresponding private object. */ P = item->val; - assert(!(P->h_tid & GCFLAG_PUBLIC)); + assert(!gcflag_public(P)); assert(is_private(P)); fprintf(stderr, "read_barrier: %p -> %p public_to_private\n", G, P); return P; @@ -229,7 +229,7 @@ fprintf(stderr, "read_barrier: %p -> stealing %p...\n ", G, P); stm_steal_stub(P); - assert(P->h_tid & GCFLAG_PUBLIC); + assert(gcflag_public(P)); goto restart_all_public; } } @@ -245,8 +245,8 @@ if (pubobj == P || ((P->h_revision & 3) == 2 && pubobj->h_revision == P->h_revision)) { - assert(!(org_pubobj->h_tid & GCFLAG_STUB)); - assert(!(privobj->h_tid & GCFLAG_PUBLIC)); + assert(!gcflag_stub(org_pubobj)); + assert(!gcflag_public(privobj)); assert(is_private(privobj)); if (P != org_pubobj) fprintf(stderr, "| actually %p ", org_pubobj); @@ -299,7 +299,7 @@ return P; } - if (P->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED) + if (gcflag_private_from_protected(P)) { /* private too, with a backup copy */ assert(!(P->h_revision & 1)); @@ -307,7 +307,7 @@ return P; } - if (P->h_tid & GCFLAG_PUBLIC) + if (gcflag_public(P)) { fprintf(stderr, "public "); @@ -323,7 +323,7 @@ } P = (gcptr)v; - assert(P->h_tid & GCFLAG_PUBLIC); + assert(gcflag_public(P)); fprintf(stderr, "-> %p public ", P); } @@ -354,12 +354,12 @@ { P = (gcptr)(v - 2); fprintf(stderr, "-foreign-> %p ", P); - if (P->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED) + if (gcflag_private_from_protected(P)) { P = (gcptr)P->h_revision; /* the backup copy */ fprintf(stderr, "-backup-> %p ", P); } - if (!(P->h_tid & GCFLAG_PUBLIC)) + if (!gcflag_public(P)) { fprintf(stderr, "protected by someone else!\n"); return (gcptr)-1; @@ -397,10 +397,10 @@ assert(P->h_revision != stm_private_rev_num); assert(P->h_revision & 1); - assert(!(P->h_tid & GCFLAG_PUBLIC_TO_PRIVATE)); - assert(!(P->h_tid & GCFLAG_BACKUP_COPY)); - assert(!(P->h_tid & GCFLAG_STUB)); - assert(!(P->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED)); + assert(!gcflag_public_to_private(P)); + assert(!gcflag_backup_copy(P)); + assert(!gcflag_stub(P)); + assert(!gcflag_private_from_protected(P)); B = stmgc_duplicate(P); B->h_tid |= GCFLAG_BACKUP_COPY; @@ -410,12 +410,12 @@ gcptrlist_insert(&d->private_from_protected, P); - return P; + return P; /* always returns its arg: the object is converted in-place */ } static gcptr LocalizePublic(struct tx_descriptor *d, gcptr R) { - assert(R->h_tid & GCFLAG_PUBLIC); + assert(gcflag_public(R)); #ifdef _GC_DEBUG wlog_t *entry; @@ -427,9 +427,9 @@ R->h_tid |= GCFLAG_PUBLIC_TO_PRIVATE; gcptr L = stmgc_duplicate(R); - assert(!(L->h_tid & GCFLAG_BACKUP_COPY)); - assert(!(L->h_tid & GCFLAG_STUB)); - assert(!(L->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED)); + assert(!gcflag_backup_copy(L)); + assert(!gcflag_stub(L)); + assert(!gcflag_private_from_protected(L)); L->h_tid &= ~(GCFLAG_OLD | GCFLAG_VISITED | GCFLAG_PUBLIC | @@ -439,7 +439,6 @@ 0); L->h_revision = stm_private_rev_num; g2l_insert(&d->public_to_private, R, L); - gcptrlist_insert(&d->public_to_young, R); fprintf(stderr, "write_barrier: adding %p -> %p to public_to_private\n", R, L); @@ -450,38 +449,64 @@ return L; } -static inline gcptr check_flag_write_barrier(gcptr W) +static inline void record_write_barrier(gcptr P) { - if (W->h_tid & GCFLAG_WRITE_BARRIER) + if (gcflag_write_barrier(P)) { - struct tx_descriptor *d = thread_descriptor; - gcptrlist_insert(&d->private_old_pointing_to_young, W); - W->h_tid &= ~GCFLAG_WRITE_BARRIER; + P->h_tid &= ~GCFLAG_WRITE_BARRIER; + gcptrlist_insert(&thread_descriptor->old_with_young_pointers_inside, P); } - return W; } gcptr stm_WriteBarrier(gcptr P) { - if (P->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED) - return check_flag_write_barrier(P); + if (is_private(P)) + { + /* If we have GCFLAG_WRITE_BARRIER in P, then list it into + old_with_young_pointers_inside: it's a private object that may + be modified by the program after we return, and the mutation + may be to write young pointers (in fact it's a common case). + */ + record_write_barrier(P); + return P; + } gcptr R, W; R = stm_read_barrier(P); + if (is_private(R)) - return check_flag_write_barrier(R); + { + record_write_barrier(P); + return P; + } struct tx_descriptor *d = thread_descriptor; assert(d->active >= 1); + /* We need the collection_lock for the sequel; this is required notably + because we're about to edit flags on a protected object. + */ spinlock_acquire(d->public_descriptor->collection_lock, 'L'); if (d->public_descriptor->stolen_objects.size != 0) stm_normalize_stolen_objects(d); - if (R->h_tid & GCFLAG_PUBLIC) - W = LocalizePublic(d, R); + if (gcflag_public(R)) + { + /* Make and return a new (young) private copy of the public R. + Add R into the list 'old_public_with_young_copy'. + */ + assert(gcflag_old(R)); + gcptrlist_insert(&d->old_public_with_young_copy, R); + W = LocalizePublic(d, R); + } else - W = LocalizeProtected(d, R); + { + /* Turn the protected copy in-place into a private copy. If it's + an old object that still has GCFLAG_WRITE_BARRIER, then we must + also record it in the list 'old_with_young_pointers_inside'. */ + W = LocalizeProtected(d, R); + record_write_barrier(W); + } spinlock_release(d->public_descriptor->collection_lock); @@ -528,7 +553,7 @@ v = ACCESS_ONCE(R->h_revision); if (!(v & 1)) // "is a pointer", i.e. { // "has a more recent revision" - if (R->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED) + if (gcflag_private_from_protected(R)) { /* such an object R might be listed in list_of_read_objects before it was turned from protected to private */ @@ -786,7 +811,7 @@ gcptr R = item->addr; revision_t v; retry: - assert(R->h_tid & GCFLAG_PUBLIC); + assert(gcflag_public(R)); v = ACCESS_ONCE(R->h_revision); if (!(v & 1)) // "is a pointer", i.e. { // "has a more recent revision" @@ -806,7 +831,7 @@ goto retry; gcptr L = item->val; - assert(L->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED ? + assert(gcflag_private_from_protected(L) ? L->h_revision == (revision_t)R : L->h_revision == stm_private_rev_num); assert(v != stm_private_rev_num); @@ -830,7 +855,7 @@ gcptr L = item->val; revision_t expected, v = L->h_revision; - if (L->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED) + if (gcflag_private_from_protected(L)) expected = (revision_t)R; else expected = stm_private_rev_num; @@ -867,10 +892,10 @@ G2L_LOOP_FORWARD(d->public_to_private, item) { gcptr L = item->val; - assert(!(L->h_tid & GCFLAG_VISITED)); - assert(!(L->h_tid & GCFLAG_PUBLIC_TO_PRIVATE)); - assert(!(L->h_tid & GCFLAG_PREBUILT_ORIGINAL)); - assert(!(L->h_tid & GCFLAG_NURSERY_MOVED)); + assert(!gcflag_visited(L)); + assert(!gcflag_public_to_private(L)); + assert(!gcflag_prebuilt_original(L)); + assert(!gcflag_nursery_moved(L)); assert(L->h_revision != localrev); /* modified by AcquireLocks() */ #ifdef DUMP_EXTRA @@ -894,9 +919,9 @@ gcptr R = item->addr; revision_t v = (revision_t)item->val; - assert(R->h_tid & GCFLAG_PUBLIC); - assert(R->h_tid & GCFLAG_PUBLIC_TO_PRIVATE); - assert(!(R->h_tid & GCFLAG_NURSERY_MOVED)); + assert(gcflag_public(R)); + assert(gcflag_public_to_private(R)); + assert(!gcflag_nursery_moved(R)); assert(R->h_revision != localrev); #ifdef DUMP_EXTRA @@ -907,7 +932,7 @@ ACCESS_ONCE(R->h_revision) = v; #if 0 - if (R->h_tid & GCFLAG_PREBUILT_ORIGINAL) + if (gcflag_prebuilt_original(R)) { /* cannot possibly get here more than once for a given value of R */ pthread_mutex_lock(&mutex_prebuilt_gcroots); @@ -932,7 +957,7 @@ for (i = 0; i < size; i++) { gcptr P = items[i]; - assert(P->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED); + assert(gcflag_private_from_protected(P)); P->h_tid &= ~GCFLAG_PRIVATE_FROM_PROTECTED; if (P->h_revision & 1) // "is not a pointer" @@ -947,7 +972,7 @@ gcptr B = (gcptr)P->h_revision; P->h_revision = new_revision; - if (B->h_tid & GCFLAG_PUBLIC) + if (gcflag_public(B)) { /* B was stolen */ while (1) @@ -976,20 +1001,20 @@ for (i = 0; i < size; i++) { gcptr P = items[i]; - assert(P->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED); + assert(gcflag_private_from_protected(P)); assert(!(P->h_revision & 1)); // "is a pointer" gcptr B = (gcptr)P->h_revision; - if (B->h_tid & GCFLAG_PUBLIC) + if (gcflag_public(B)) { - assert(!(B->h_tid & GCFLAG_BACKUP_COPY)); + assert(!gcflag_backup_copy(B)); P->h_tid &= ~GCFLAG_PRIVATE_FROM_PROTECTED; P->h_tid |= GCFLAG_PUBLIC; /* P becomes a public outdated object */ } else { - assert(B->h_tid & GCFLAG_BACKUP_COPY); + assert(gcflag_backup_copy(B)); memcpy(P, B, stmcb_size(P)); P->h_tid &= ~GCFLAG_BACKUP_COPY; } diff --git a/c4/et.h b/c4/et.h --- a/c4/et.h +++ b/c4/et.h @@ -39,11 +39,12 @@ * GCFLAG_PREBUILT_ORIGINAL is only set on the original version of * prebuilt objects. * - * GCFLAG_WRITE_BARRIER is set on *old* objects to track old-to- young - * pointers. It may be left set on *public* objects but is ignored - * there, because public objects are read-only. The flag is removed - * once a write occurs and the object is recorded in the list - * 'old_pointing_to_young'; it is set again at the next minor + * GCFLAG_WRITE_BARRIER is set on *old* objects to track old-to-young + * pointers. It is only useful on private objects, and on protected + * objects (which may be turned private again). It may be left set on + * public objects but is ignored there, because such objects are read-only. + * The flag is removed once a write occurs and the object is recorded in + * the list 'old_pointing_to_young'; it is set again at the next minor * collection. * * GCFLAG_NURSERY_MOVED is used temporarily during minor collections. @@ -82,6 +83,20 @@ "PRIVATE_FROM_PROTECTED", \ NULL } +#define _DECLARE_FLAG(funcname, flagname) \ + static inline _Bool funcname(gcptr P) { \ + return (P->h_tid & flagname) != 0; } +_DECLARE_FLAG(gcflag_old, GCFLAG_OLD) +_DECLARE_FLAG(gcflag_visited, GCFLAG_VISITED) +_DECLARE_FLAG(gcflag_public, GCFLAG_PUBLIC) +_DECLARE_FLAG(gcflag_prebuilt_original, GCFLAG_PREBUILT_ORIGINAL) +_DECLARE_FLAG(gcflag_public_to_private, GCFLAG_PUBLIC_TO_PRIVATE) +_DECLARE_FLAG(gcflag_write_barrier, GCFLAG_WRITE_BARRIER) +_DECLARE_FLAG(gcflag_nursery_moved, GCFLAG_NURSERY_MOVED) +_DECLARE_FLAG(gcflag_backup_copy, GCFLAG_BACKUP_COPY) +_DECLARE_FLAG(gcflag_stub, GCFLAG_STUB) +_DECLARE_FLAG(gcflag_private_from_protected, GCFLAG_PRIVATE_FROM_PROTECTED) + /************************************************************/ #define ABRT_MANUAL 0 diff --git a/c4/nursery.c b/c4/nursery.c --- a/c4/nursery.c +++ b/c4/nursery.c @@ -67,11 +67,11 @@ static inline gcptr create_old_object_copy(gcptr obj) { - assert(!(obj->h_tid & GCFLAG_NURSERY_MOVED)); - assert(!(obj->h_tid & GCFLAG_VISITED)); - assert(!(obj->h_tid & GCFLAG_WRITE_BARRIER)); - assert(!(obj->h_tid & GCFLAG_PREBUILT_ORIGINAL)); - assert(!(obj->h_tid & GCFLAG_OLD)); + assert(!gcflag_nursery_moved(obj)); + assert(!gcflag_visited(obj)); + assert(!gcflag_write_barrier(obj)); + assert(!gcflag_prebuilt_original(obj)); + assert(!gcflag_old(obj)); size_t size = stmcb_size(obj); gcptr fresh_old_copy = stm_malloc(size); @@ -134,7 +134,7 @@ for (i = 0; i < size; i++) { gcptr P = items[i]; - assert(P->h_tid & GCFLAG_PUBLIC); + assert(gcflag_public(P)); revision_t v = ACCESS_ONCE(P->h_revision); wlog_t *item; @@ -145,11 +145,14 @@ We are in a case where we know the transaction will not be able to commit successfully. */ + fprintf(stderr, "public_to_young: %p was modified! abort!\n", P); abort(); AbortTransactionAfterCollect(d, ABRT_COLLECT_MINOR); //... } + fprintf(stderr, "public_to_young: %p -> %p in public_to_private\n", + item->addr, item->val); visit_if_young(&item->val); continue; @@ -159,6 +162,7 @@ It must come from an older transaction that aborted. Nothing to do now. */ + fprintf(stderr, "public_to_young: %p ignored\n", P); continue; } @@ -170,6 +174,8 @@ in the past, but someone made even more changes. Nothing to do now. */ + fprintf(stderr, "public_to_young: %p -> %p not a stub, ignored\n", + P, S); continue; } @@ -177,6 +183,8 @@ /* Bah, it's indeed a stub but for another thread. Nothing to do now. */ + fprintf(stderr, "public_to_young: %p -> %p stub wrong thread, " + "ignored\n", P, S); continue; } @@ -184,6 +192,9 @@ feet because we hold our own collection_lock. */ gcptr L = (gcptr)(w - 2); + fprintf(stderr, "public_to_young: %p -> %p stub -> %p\n", + P, S, L); + visit_if_young(&L); S->h_revision = ((revision_t)L) | 2; } @@ -203,8 +214,8 @@ while (gcptrlist_size(&d->old_objects_to_trace) > 0) { gcptr obj = gcptrlist_pop(&d->old_objects_to_trace); - assert(obj->h_tid & GCFLAG_OLD); - assert(!(obj->h_tid & GCFLAG_WRITE_BARRIER)); + assert(gcflag_old(obj)); + assert(!gcflag_write_barrier(obj)); obj->h_tid |= GCFLAG_WRITE_BARRIER; stmcb_trace(obj, &visit_if_young); diff --git a/c4/nursery.h b/c4/nursery.h --- a/c4/nursery.h +++ b/c4/nursery.h @@ -11,8 +11,8 @@ char *nursery_end; \ char *nursery_base; \ struct GcPtrList old_objects_to_trace; \ - struct GcPtrList public_to_young; \ - struct GcPtrList private_old_pointing_to_young; + struct GcPtrList old_public_with_young_copy; \ + struct GcPtrList old_with_young_pointers_inside; struct tx_descriptor; /* from et.h */ diff --git a/c4/steal.c b/c4/steal.c --- a/c4/steal.c +++ b/c4/steal.c @@ -55,7 +55,7 @@ static void replace_ptr_to_protected_with_stub(gcptr *pobj) { gcptr stub, obj = *pobj; - if (obj == NULL || (obj->h_tid & GCFLAG_PUBLIC) != 0) + if (obj == NULL || gcflag_public(obj)) return; /* we use 'all_stubs', a dictionary, in order to try to avoid @@ -98,7 +98,7 @@ /* L might be a private_from_protected, or just a protected copy. To know which case it is, read GCFLAG_PRIVATE_FROM_PROTECTED. */ - if (L->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED) { + if (gcflag_private_from_protected(L)) { gcptr B = (gcptr)L->h_revision; /* the backup copy */ /* B is now a backup copy, i.e. a protected object, and we own @@ -107,9 +107,9 @@ */ B->h_tid &= ~GCFLAG_BACKUP_COPY; - if (B->h_tid & GCFLAG_PUBLIC_TO_PRIVATE) { + if (gcflag_public_to_private(B)) { /* already stolen */ - assert(B->h_tid & GCFLAG_PUBLIC); + assert(gcflag_public(B)); fprintf(stderr, "already stolen: %p -> %p <-> %p\n", P, L, B); L = B; goto already_stolen; @@ -125,7 +125,7 @@ } } else { - if (L->h_tid & GCFLAG_PUBLIC) { + if (gcflag_public(L)) { /* already stolen */ fprintf(stderr, "already stolen: %p -> %p\n", P, L); goto already_stolen; @@ -139,7 +139,7 @@ thread's collection_lock, so we can read/write the flags. Change it from protected to public. */ - assert(!(L->h_tid & GCFLAG_PUBLIC)); + assert(!gcflag_public(L)); L->h_tid |= GCFLAG_PUBLIC; /* Note that all protected or backup copies have a h_revision that @@ -192,8 +192,8 @@ gcptr B = items[i]; gcptr L = items[i + 1]; - assert(L->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED); - assert(!(B->h_tid & GCFLAG_BACKUP_COPY)); /* already removed */ + assert(gcflag_private_from_protected(L)); + assert(!gcflag_backup_copy(B)); /* already removed */ g2l_insert(&d->public_to_private, B, L); @@ -218,7 +218,7 @@ gcptr B = items[i]; gcptr L = items[i + 1]; - assert(L->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED); + assert(gcflag_private_from_protected(L)); if (B == obj) return L; } diff --git a/c4/test/support.py b/c4/test/support.py --- a/c4/test/support.py +++ b/c4/test/support.py @@ -391,18 +391,19 @@ # ____________________________________________________________ def oalloc(size): - "Allocate an 'old' public object, outside any nursery" + "Allocate an 'old' protected object, outside any nursery" p = ffi.cast("gcptr", lib.stm_malloc(size)) - p.h_tid = GCFLAG_OLD | GCFLAG_PUBLIC - p.h_revision = 1 + p.h_tid = GCFLAG_OLD | GCFLAG_WRITE_BARRIER + p.h_revision = -sys.maxint lib.settid(p, 42 + size) return p def oalloc_refs(nrefs): - "Allocate an 'old' public object, outside any nursery, with nrefs pointers" + """Allocate an 'old' protected object, outside any nursery, + with nrefs pointers""" p = ffi.cast("gcptr", lib.stm_malloc(HDR + WORD * nrefs)) - p.h_tid = GCFLAG_OLD | GCFLAG_PUBLIC - p.h_revision = 1 + p.h_tid = GCFLAG_OLD | GCFLAG_WRITE_BARRIER + p.h_revision = -sys.maxint lib.settid(p, 421 + nrefs) for i in range(nrefs): rawsetptr(p, i, ffi.NULL) diff --git a/c4/test/test_nursery.py b/c4/test/test_nursery.py --- a/c4/test/test_nursery.py +++ b/c4/test/test_nursery.py @@ -126,7 +126,7 @@ check_not_free(p2b) def test_minor_collection_at_thread_end(): - p1 = oalloc_refs(1) + p1 = palloc_refs(1) p2 = nalloc(HDR) setptr(p1, 0, p2) lib.stm_finalize() @@ -156,3 +156,31 @@ check_prebuilt(p0) p2 = lib.getptr(p0, 0) check_not_free(p2) + +def test_old_protected_stay_alive(): + p0 = oalloc(HDR + WORD) + assert classify(p0) == "protected" + lib.rawsetlong(p0, 0, 81211) + lib.stm_push_root(p0) + minor_collect() + p0b = lib.stm_pop_root() + assert p0b == p0 + assert classify(p0) == "protected" + assert lib.rawgetlong(p0, 0) == 81211 + +def test_old_private_from_protected_to_young_private(): + p0 = oalloc_refs(1) + assert classify(p0) == "protected" + p1 = nalloc(HDR) + lib.setptr(p0, 0, p1) + assert classify(p0) == "private" # private_from_protected + lib.stm_push_root(p0) + minor_collect() + p0b = lib.stm_pop_root() + assert p0b == p0 + check_nursery_free(p1) + assert classify(p0) == "private" # private_from_protected + p2 = lib.getptr(p0, 0) + assert not lib.in_nursery(p2) + check_not_free(p2) + assert classify(p2) == "private" From noreply at buildbot.pypy.org Fri Jun 14 18:55:47 2013 From: noreply at buildbot.pypy.org (andrewsmedina) Date: Fri, 14 Jun 2013 18:55:47 +0200 (CEST) Subject: [pypy-commit] pypy dtype-isnative: implemented dtype.isnative. Message-ID: <20130614165547.3BCA81C1398@cobra.cs.uni-duesseldorf.de> Author: Andrews Medina Branch: dtype-isnative Changeset: r64888:ec63aaab8cf5 Date: 2013-06-14 13:47 -0300 http://bitbucket.org/pypy/pypy/changeset/ec63aaab8cf5/ Log: implemented dtype.isnative. diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -133,6 +133,9 @@ def descr_get_alignment(self, space): return space.wrap(self.itemtype.alignment) + def descr_get_isnative(self, space): + return space.wrap(self.native) + def descr_get_base(self, space): return space.wrap(self.base) @@ -439,6 +442,7 @@ str = GetSetProperty(W_Dtype.descr_get_str), itemsize = GetSetProperty(W_Dtype.descr_get_itemsize), alignment = GetSetProperty(W_Dtype.descr_get_alignment), + isnative = GetSetProperty(W_Dtype.descr_get_isnative), shape = GetSetProperty(W_Dtype.descr_get_shape), name = interp_attrproperty('name', cls=W_Dtype), fields = GetSetProperty(W_Dtype.descr_get_fields), diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -706,6 +706,11 @@ from numpypy import dtype assert dtype('i4').alignment == 4 + def test_isnative(self): + from numpypy import dtype + assert dtype('i4').isnative == True + assert dtype('>i8').isnative == False + def test_any_all(self): import numpypy as numpy x = numpy.bool_(True) From noreply at buildbot.pypy.org Fri Jun 14 18:55:48 2013 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Fri, 14 Jun 2013 18:55:48 +0200 (CEST) Subject: [pypy-commit] pypy default: Merged in andrewsmedina/numpypy/dtype-isnative (pull request #155) Message-ID: <20130614165548.6239F1C1398@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r64889:23314bb14a1f Date: 2013-06-14 09:55 -0700 http://bitbucket.org/pypy/pypy/changeset/23314bb14a1f/ Log: Merged in andrewsmedina/numpypy/dtype-isnative (pull request #155) implemented dtype.isnative. diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -133,6 +133,9 @@ def descr_get_alignment(self, space): return space.wrap(self.itemtype.alignment) + def descr_get_isnative(self, space): + return space.wrap(self.native) + def descr_get_base(self, space): return space.wrap(self.base) @@ -439,6 +442,7 @@ str = GetSetProperty(W_Dtype.descr_get_str), itemsize = GetSetProperty(W_Dtype.descr_get_itemsize), alignment = GetSetProperty(W_Dtype.descr_get_alignment), + isnative = GetSetProperty(W_Dtype.descr_get_isnative), shape = GetSetProperty(W_Dtype.descr_get_shape), name = interp_attrproperty('name', cls=W_Dtype), fields = GetSetProperty(W_Dtype.descr_get_fields), diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -706,6 +706,11 @@ from numpypy import dtype assert dtype('i4').alignment == 4 + def test_isnative(self): + from numpypy import dtype + assert dtype('i4').isnative == True + assert dtype('>i8').isnative == False + def test_any_all(self): import numpypy as numpy x = numpy.bool_(True) From noreply at buildbot.pypy.org Fri Jun 14 21:21:00 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 14 Jun 2013 21:21:00 +0200 (CEST) Subject: [pypy-commit] stmgc default: Revert the gcflag_xyz() functions; instead found a way to keep the Message-ID: <20130614192100.A9DDF1C0134@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r129:0fdcdfbbc387 Date: 2013-06-14 20:20 +0200 http://bitbucket.org/pypy/stmgc/changeset/0fdcdfbbc387/ Log: Revert the gcflag_xyz() functions; instead found a way to keep the assertion error message readable, by using "static const" values. More progress. diff --git a/c4/dbgmem.c b/c4/dbgmem.c --- a/c4/dbgmem.c +++ b/c4/dbgmem.c @@ -22,6 +22,7 @@ intptr_t align = ((intptr_t)p) & (PAGE_SIZE-1); p = ((char *)p) - align; sz += align; + fprintf(stderr, "dbgmem: %p, %ld, %d\n", p, (long)sz, prot); int err = mprotect(p, sz, prot); assert(err == 0); } diff --git a/c4/et.c b/c4/et.c --- a/c4/et.c +++ b/c4/et.c @@ -33,7 +33,7 @@ static int is_private(gcptr P) { return (P->h_revision == stm_private_rev_num) || - gcflag_private_from_protected(P); + (P->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED); } int _stm_is_private(gcptr P) { @@ -80,7 +80,7 @@ revision_t v; restart_all: - if (gcflag_private_from_protected(P)) + if (P->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED) { assert(!(P->h_revision & 1)); /* pointer to the backup copy */ @@ -95,14 +95,14 @@ /* else, for the rest of this function, we can assume that P was not a private copy */ - if (gcflag_public(P)) + if (P->h_tid & GCFLAG_PUBLIC) { /* follow the chained list of h_revision's as long as they are regular pointers. We will only find more public objects along this chain. */ restart_all_public: - assert(gcflag_public(P)); + assert(P->h_tid & GCFLAG_PUBLIC); v = ACCESS_ONCE(P->h_revision); if (!(v & 1)) // "is a pointer", i.e. { // "has a more recent revision" @@ -112,7 +112,7 @@ gcptr P_prev = P; P = (gcptr)v; - assert(gcflag_public(P)); + assert(P->h_tid & GCFLAG_PUBLIC); v = ACCESS_ONCE(P->h_revision); @@ -145,7 +145,7 @@ because *we* have an entry in d->public_to_private. (It might also be someone else.) */ - if (gcflag_public_to_private(P)) + if (P->h_tid & GCFLAG_PUBLIC_TO_PRIVATE) { wlog_t *item; retry_public_to_private:; @@ -154,7 +154,7 @@ /* We have a key in 'public_to_private'. The value is the corresponding private object. */ P = item->val; - assert(!gcflag_public(P)); + assert(!(P->h_tid & GCFLAG_PUBLIC)); assert(is_private(P)); fprintf(stderr, "read_barrier: %p -> %p public_to_private\n", G, P); return P; @@ -229,7 +229,7 @@ fprintf(stderr, "read_barrier: %p -> stealing %p...\n ", G, P); stm_steal_stub(P); - assert(gcflag_public(P)); + assert(P->h_tid & GCFLAG_PUBLIC); goto restart_all_public; } } @@ -245,8 +245,8 @@ if (pubobj == P || ((P->h_revision & 3) == 2 && pubobj->h_revision == P->h_revision)) { - assert(!gcflag_stub(org_pubobj)); - assert(!gcflag_public(privobj)); + assert(!(org_pubobj->h_tid & GCFLAG_STUB)); + assert(!(privobj->h_tid & GCFLAG_PUBLIC)); assert(is_private(privobj)); if (P != org_pubobj) fprintf(stderr, "| actually %p ", org_pubobj); @@ -299,7 +299,7 @@ return P; } - if (gcflag_private_from_protected(P)) + if (P->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED) { /* private too, with a backup copy */ assert(!(P->h_revision & 1)); @@ -307,7 +307,7 @@ return P; } - if (gcflag_public(P)) + if (P->h_tid & GCFLAG_PUBLIC) { fprintf(stderr, "public "); @@ -323,7 +323,7 @@ } P = (gcptr)v; - assert(gcflag_public(P)); + assert(P->h_tid & GCFLAG_PUBLIC); fprintf(stderr, "-> %p public ", P); } @@ -354,12 +354,12 @@ { P = (gcptr)(v - 2); fprintf(stderr, "-foreign-> %p ", P); - if (gcflag_private_from_protected(P)) + if (P->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED) { P = (gcptr)P->h_revision; /* the backup copy */ fprintf(stderr, "-backup-> %p ", P); } - if (!gcflag_public(P)) + if (!(P->h_tid & GCFLAG_PUBLIC)) { fprintf(stderr, "protected by someone else!\n"); return (gcptr)-1; @@ -397,10 +397,10 @@ assert(P->h_revision != stm_private_rev_num); assert(P->h_revision & 1); - assert(!gcflag_public_to_private(P)); - assert(!gcflag_backup_copy(P)); - assert(!gcflag_stub(P)); - assert(!gcflag_private_from_protected(P)); + assert(!(P->h_tid & GCFLAG_PUBLIC_TO_PRIVATE)); + assert(!(P->h_tid & GCFLAG_BACKUP_COPY)); + assert(!(P->h_tid & GCFLAG_STUB)); + assert(!(P->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED)); B = stmgc_duplicate(P); B->h_tid |= GCFLAG_BACKUP_COPY; @@ -415,7 +415,7 @@ static gcptr LocalizePublic(struct tx_descriptor *d, gcptr R) { - assert(gcflag_public(R)); + assert(R->h_tid & GCFLAG_PUBLIC); #ifdef _GC_DEBUG wlog_t *entry; @@ -427,9 +427,9 @@ R->h_tid |= GCFLAG_PUBLIC_TO_PRIVATE; gcptr L = stmgc_duplicate(R); - assert(!gcflag_backup_copy(L)); - assert(!gcflag_stub(L)); - assert(!gcflag_private_from_protected(L)); + assert(!(L->h_tid & GCFLAG_BACKUP_COPY)); + assert(!(L->h_tid & GCFLAG_STUB)); + assert(!(L->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED)); L->h_tid &= ~(GCFLAG_OLD | GCFLAG_VISITED | GCFLAG_PUBLIC | @@ -451,10 +451,10 @@ static inline void record_write_barrier(gcptr P) { - if (gcflag_write_barrier(P)) + if (P->h_tid & GCFLAG_WRITE_BARRIER) { P->h_tid &= ~GCFLAG_WRITE_BARRIER; - gcptrlist_insert(&thread_descriptor->old_with_young_pointers_inside, P); + gcptrlist_insert(&thread_descriptor->old_objects_to_trace, P); } } @@ -463,9 +463,9 @@ if (is_private(P)) { /* If we have GCFLAG_WRITE_BARRIER in P, then list it into - old_with_young_pointers_inside: it's a private object that may - be modified by the program after we return, and the mutation - may be to write young pointers (in fact it's a common case). + old_objects_to_trace: it's a private object that may be + modified by the program after we return, and the mutation may + be to write young pointers (in fact it's a common case). */ record_write_barrier(P); return P; @@ -490,20 +490,20 @@ if (d->public_descriptor->stolen_objects.size != 0) stm_normalize_stolen_objects(d); - if (gcflag_public(R)) + if (R->h_tid & GCFLAG_PUBLIC) { /* Make and return a new (young) private copy of the public R. - Add R into the list 'old_public_with_young_copy'. + Add R into the list 'public_with_young_copy'. */ - assert(gcflag_old(R)); - gcptrlist_insert(&d->old_public_with_young_copy, R); + assert(R->h_tid & GCFLAG_OLD); + gcptrlist_insert(&d->public_with_young_copy, R); W = LocalizePublic(d, R); } else { /* Turn the protected copy in-place into a private copy. If it's an old object that still has GCFLAG_WRITE_BARRIER, then we must - also record it in the list 'old_with_young_pointers_inside'. */ + also record it in the list 'old_objects_to_trace'. */ W = LocalizeProtected(d, R); record_write_barrier(W); } @@ -553,7 +553,7 @@ v = ACCESS_ONCE(R->h_revision); if (!(v & 1)) // "is a pointer", i.e. { // "has a more recent revision" - if (gcflag_private_from_protected(R)) + if (R->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED) { /* such an object R might be listed in list_of_read_objects before it was turned from protected to private */ @@ -811,7 +811,7 @@ gcptr R = item->addr; revision_t v; retry: - assert(gcflag_public(R)); + assert(R->h_tid & GCFLAG_PUBLIC); v = ACCESS_ONCE(R->h_revision); if (!(v & 1)) // "is a pointer", i.e. { // "has a more recent revision" @@ -831,7 +831,7 @@ goto retry; gcptr L = item->val; - assert(gcflag_private_from_protected(L) ? + assert(L->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED ? L->h_revision == (revision_t)R : L->h_revision == stm_private_rev_num); assert(v != stm_private_rev_num); @@ -855,7 +855,7 @@ gcptr L = item->val; revision_t expected, v = L->h_revision; - if (gcflag_private_from_protected(L)) + if (L->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED) expected = (revision_t)R; else expected = stm_private_rev_num; @@ -892,10 +892,10 @@ G2L_LOOP_FORWARD(d->public_to_private, item) { gcptr L = item->val; - assert(!gcflag_visited(L)); - assert(!gcflag_public_to_private(L)); - assert(!gcflag_prebuilt_original(L)); - assert(!gcflag_nursery_moved(L)); + assert(!(L->h_tid & GCFLAG_VISITED)); + assert(!(L->h_tid & GCFLAG_PUBLIC_TO_PRIVATE)); + assert(!(L->h_tid & GCFLAG_PREBUILT_ORIGINAL)); + assert(!(L->h_tid & GCFLAG_NURSERY_MOVED)); assert(L->h_revision != localrev); /* modified by AcquireLocks() */ #ifdef DUMP_EXTRA @@ -919,9 +919,9 @@ gcptr R = item->addr; revision_t v = (revision_t)item->val; - assert(gcflag_public(R)); - assert(gcflag_public_to_private(R)); - assert(!gcflag_nursery_moved(R)); + assert(R->h_tid & GCFLAG_PUBLIC); + assert(R->h_tid & GCFLAG_PUBLIC_TO_PRIVATE); + assert(!(R->h_tid & GCFLAG_NURSERY_MOVED)); assert(R->h_revision != localrev); #ifdef DUMP_EXTRA @@ -932,7 +932,7 @@ ACCESS_ONCE(R->h_revision) = v; #if 0 - if (gcflag_prebuilt_original(R)) + if (R->h_tid & GCFLAG_PREBUILT_ORIGINAL) { /* cannot possibly get here more than once for a given value of R */ pthread_mutex_lock(&mutex_prebuilt_gcroots); @@ -957,7 +957,7 @@ for (i = 0; i < size; i++) { gcptr P = items[i]; - assert(gcflag_private_from_protected(P)); + assert(P->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED); P->h_tid &= ~GCFLAG_PRIVATE_FROM_PROTECTED; if (P->h_revision & 1) // "is not a pointer" @@ -972,7 +972,7 @@ gcptr B = (gcptr)P->h_revision; P->h_revision = new_revision; - if (gcflag_public(B)) + if (B->h_tid & GCFLAG_PUBLIC) { /* B was stolen */ while (1) @@ -987,7 +987,7 @@ } else { - stm_free(B, stmcb_size(B)); + //stm_free(B, stmcb_size(B)); } }; gcptrlist_clear(&d->private_from_protected); @@ -1001,20 +1001,20 @@ for (i = 0; i < size; i++) { gcptr P = items[i]; - assert(gcflag_private_from_protected(P)); + assert(P->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED); assert(!(P->h_revision & 1)); // "is a pointer" gcptr B = (gcptr)P->h_revision; - if (gcflag_public(B)) + if (B->h_tid & GCFLAG_PUBLIC) { - assert(!gcflag_backup_copy(B)); + assert(!(B->h_tid & GCFLAG_BACKUP_COPY)); P->h_tid &= ~GCFLAG_PRIVATE_FROM_PROTECTED; P->h_tid |= GCFLAG_PUBLIC; /* P becomes a public outdated object */ } else { - assert(gcflag_backup_copy(B)); + assert(B->h_tid & GCFLAG_BACKUP_COPY); memcpy(P, B, stmcb_size(P)); P->h_tid &= ~GCFLAG_BACKUP_COPY; } diff --git a/c4/et.h b/c4/et.h --- a/c4/et.h +++ b/c4/et.h @@ -44,7 +44,7 @@ * objects (which may be turned private again). It may be left set on * public objects but is ignored there, because such objects are read-only. * The flag is removed once a write occurs and the object is recorded in - * the list 'old_pointing_to_young'; it is set again at the next minor + * the list 'old_objects_to_trace'; it is set again at the next minor * collection. * * GCFLAG_NURSERY_MOVED is used temporarily during minor collections. @@ -54,16 +54,16 @@ * that is == 2 (mod 4): in this case they point to a protected/private * object that belongs to the thread 'STUB_THREAD(p_stub)'. */ -#define GCFLAG_OLD (STM_FIRST_GCFLAG << 0) -#define GCFLAG_VISITED (STM_FIRST_GCFLAG << 1) -#define GCFLAG_PUBLIC (STM_FIRST_GCFLAG << 2) -#define GCFLAG_PREBUILT_ORIGINAL (STM_FIRST_GCFLAG << 3) -#define GCFLAG_PUBLIC_TO_PRIVATE (STM_FIRST_GCFLAG << 4) -#define GCFLAG_WRITE_BARRIER (STM_FIRST_GCFLAG << 5) -#define GCFLAG_NURSERY_MOVED (STM_FIRST_GCFLAG << 6) -#define GCFLAG_BACKUP_COPY (STM_FIRST_GCFLAG << 7) /* debugging */ -#define GCFLAG_STUB (STM_FIRST_GCFLAG << 8) /* debugging */ -#define GCFLAG_PRIVATE_FROM_PROTECTED (STM_FIRST_GCFLAG << 9) +static const revision_t GCFLAG_OLD = STM_FIRST_GCFLAG << 0; +static const revision_t GCFLAG_VISITED = STM_FIRST_GCFLAG << 1; +static const revision_t GCFLAG_PUBLIC = STM_FIRST_GCFLAG << 2; +static const revision_t GCFLAG_PREBUILT_ORIGINAL = STM_FIRST_GCFLAG << 3; +static const revision_t GCFLAG_PUBLIC_TO_PRIVATE = STM_FIRST_GCFLAG << 4; +static const revision_t GCFLAG_WRITE_BARRIER = STM_FIRST_GCFLAG << 5; +static const revision_t GCFLAG_NURSERY_MOVED = STM_FIRST_GCFLAG << 6; +static const revision_t GCFLAG_BACKUP_COPY /*debug*/ = STM_FIRST_GCFLAG << 7; +static const revision_t GCFLAG_STUB /*debug*/ = STM_FIRST_GCFLAG << 8; +static const revision_t GCFLAG_PRIVATE_FROM_PROTECTED = STM_FIRST_GCFLAG << 9; /* this value must be reflected in PREBUILT_FLAGS in stmgc.h */ #define GCFLAG_PREBUILT (GCFLAG_VISITED | \ @@ -83,20 +83,6 @@ "PRIVATE_FROM_PROTECTED", \ NULL } -#define _DECLARE_FLAG(funcname, flagname) \ - static inline _Bool funcname(gcptr P) { \ - return (P->h_tid & flagname) != 0; } -_DECLARE_FLAG(gcflag_old, GCFLAG_OLD) -_DECLARE_FLAG(gcflag_visited, GCFLAG_VISITED) -_DECLARE_FLAG(gcflag_public, GCFLAG_PUBLIC) -_DECLARE_FLAG(gcflag_prebuilt_original, GCFLAG_PREBUILT_ORIGINAL) -_DECLARE_FLAG(gcflag_public_to_private, GCFLAG_PUBLIC_TO_PRIVATE) -_DECLARE_FLAG(gcflag_write_barrier, GCFLAG_WRITE_BARRIER) -_DECLARE_FLAG(gcflag_nursery_moved, GCFLAG_NURSERY_MOVED) -_DECLARE_FLAG(gcflag_backup_copy, GCFLAG_BACKUP_COPY) -_DECLARE_FLAG(gcflag_stub, GCFLAG_STUB) -_DECLARE_FLAG(gcflag_private_from_protected, GCFLAG_PRIVATE_FROM_PROTECTED) - /************************************************************/ #define ABRT_MANUAL 0 diff --git a/c4/nursery.c b/c4/nursery.c --- a/c4/nursery.c +++ b/c4/nursery.c @@ -26,8 +26,7 @@ stm_free(d->nursery_base, GC_NURSERY); gcptrlist_delete(&d->old_objects_to_trace); - gcptrlist_delete(&d->public_to_young); - gcptrlist_delete(&d->private_old_pointing_to_young); + gcptrlist_delete(&d->public_with_young_copy); } static char *collect_and_allocate_size(size_t size); /* forward */ @@ -67,11 +66,11 @@ static inline gcptr create_old_object_copy(gcptr obj) { - assert(!gcflag_nursery_moved(obj)); - assert(!gcflag_visited(obj)); - assert(!gcflag_write_barrier(obj)); - assert(!gcflag_prebuilt_original(obj)); - assert(!gcflag_old(obj)); + assert(!(obj->h_tid & GCFLAG_NURSERY_MOVED)); + assert(!(obj->h_tid & GCFLAG_VISITED)); + assert(!(obj->h_tid & GCFLAG_WRITE_BARRIER)); + assert(!(obj->h_tid & GCFLAG_PREBUILT_ORIGINAL)); + assert(!(obj->h_tid & GCFLAG_OLD)); size_t size = stmcb_size(obj); gcptr fresh_old_copy = stm_malloc(size); @@ -119,27 +118,27 @@ static void mark_public_to_young(struct tx_descriptor *d) { - /* "public_to_young" contains ptrs to the public copies used as - key of "public_to_private", but only the ones that were added - since the last minor collection. Once the transaction commit, - they stay in "public_to_young", and so they become public - objects whose h_revision is a public stub, which itself points - (originally) to a protected young object. + /* "public_with_young_copy" lists the public copies that may have + a more recent (or in-progress) private or protected object that + is young. Note that public copies themselves are always old. - Be careful and accept more or less any object in the list, which - can show up because of aborted transactions. + The list should only contain public objects, but beyong that, be + careful and ignore any strange object: it can show up because of + aborted transactions (and then some different changes). */ - long i, size = d->public_to_young.size; - gcptr *items = d->public_to_young.items; + long i, size = d->public_with_young_copy.size; + gcptr *items = d->public_with_young_copy.items; for (i = 0; i < size; i++) { gcptr P = items[i]; - assert(gcflag_public(P)); + assert(P->h_tid & GCFLAG_PUBLIC); revision_t v = ACCESS_ONCE(P->h_revision); wlog_t *item; G2L_FIND(d->public_to_private, P, item, goto not_in_public_to_private); + /* found P in 'public_to_private' */ + if (!(v & 1)) { // "is a pointer" /* P is both a key in public_to_private and an outdated copy. We are in a case where we know the transaction will not @@ -153,6 +152,7 @@ fprintf(stderr, "public_to_young: %p -> %p in public_to_private\n", item->addr, item->val); + assert(_stm_is_private(item->val)); visit_if_young(&item->val); continue; @@ -199,14 +199,7 @@ S->h_revision = ((revision_t)L) | 2; } - gcptrlist_clear(&d->public_to_young); -} - -static void mark_private_old_pointing_to_young(struct tx_descriptor *d) -{ - /* trace the objects recorded earlier by stmgc_write_barrier() */ - gcptrlist_move(&d->old_objects_to_trace, - &d->private_old_pointing_to_young); + gcptrlist_clear(&d->public_with_young_copy); } static void visit_all_outside_objects(struct tx_descriptor *d) @@ -214,8 +207,8 @@ while (gcptrlist_size(&d->old_objects_to_trace) > 0) { gcptr obj = gcptrlist_pop(&d->old_objects_to_trace); - assert(gcflag_old(obj)); - assert(!gcflag_write_barrier(obj)); + assert(obj->h_tid & GCFLAG_OLD); + assert(!(obj->h_tid & GCFLAG_WRITE_BARRIER)); obj->h_tid |= GCFLAG_WRITE_BARRIER; stmcb_trace(obj, &visit_if_young); @@ -225,8 +218,6 @@ static void setup_minor_collect(struct tx_descriptor *d) { spinlock_acquire(d->public_descriptor->collection_lock, 'M'); /*minor*/ - assert(gcptrlist_size(&d->old_objects_to_trace) == 0); - if (d->public_descriptor->stolen_objects.size != 0) stm_normalize_stolen_objects(d); } @@ -234,6 +225,7 @@ static void teardown_minor_collect(struct tx_descriptor *d) { assert(gcptrlist_size(&d->old_objects_to_trace) == 0); + assert(gcptrlist_size(&d->public_with_young_copy) == 0); assert(gcptrlist_size(&d->public_descriptor->stolen_objects) == 0); spinlock_release(d->public_descriptor->collection_lock); @@ -255,8 +247,6 @@ mark_public_to_young(d); - mark_private_old_pointing_to_young(d); - visit_all_outside_objects(d); #if 0 fix_list_of_read_objects(d); @@ -292,8 +282,7 @@ if (d->nursery_current == d->nursery_base /*&& !g2l_any_entry(&d->young_objects_outside_nursery)*/ ) { /* there is no young object */ - assert(gcptrlist_size(&d->private_old_pointing_to_young) == 0); - assert(gcptrlist_size(&d->public_to_young) == 0); + assert(gcptrlist_size(&d->public_with_young_copy) == 0); return 0; } else { diff --git a/c4/nursery.h b/c4/nursery.h --- a/c4/nursery.h +++ b/c4/nursery.h @@ -11,8 +11,7 @@ char *nursery_end; \ char *nursery_base; \ struct GcPtrList old_objects_to_trace; \ - struct GcPtrList old_public_with_young_copy; \ - struct GcPtrList old_with_young_pointers_inside; + struct GcPtrList public_with_young_copy; struct tx_descriptor; /* from et.h */ diff --git a/c4/steal.c b/c4/steal.c --- a/c4/steal.c +++ b/c4/steal.c @@ -55,7 +55,7 @@ static void replace_ptr_to_protected_with_stub(gcptr *pobj) { gcptr stub, obj = *pobj; - if (obj == NULL || gcflag_public(obj)) + if (obj == NULL || (obj->h_tid & GCFLAG_PUBLIC) != 0) return; /* we use 'all_stubs', a dictionary, in order to try to avoid @@ -98,7 +98,7 @@ /* L might be a private_from_protected, or just a protected copy. To know which case it is, read GCFLAG_PRIVATE_FROM_PROTECTED. */ - if (gcflag_private_from_protected(L)) { + if (L->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED) { gcptr B = (gcptr)L->h_revision; /* the backup copy */ /* B is now a backup copy, i.e. a protected object, and we own @@ -107,9 +107,9 @@ */ B->h_tid &= ~GCFLAG_BACKUP_COPY; - if (gcflag_public_to_private(B)) { + if (B->h_tid & GCFLAG_PUBLIC_TO_PRIVATE) { /* already stolen */ - assert(gcflag_public(B)); + assert(B->h_tid & GCFLAG_PUBLIC); fprintf(stderr, "already stolen: %p -> %p <-> %p\n", P, L, B); L = B; goto already_stolen; @@ -125,7 +125,7 @@ } } else { - if (gcflag_public(L)) { + if (L->h_tid & GCFLAG_PUBLIC) { /* already stolen */ fprintf(stderr, "already stolen: %p -> %p\n", P, L); goto already_stolen; @@ -139,7 +139,7 @@ thread's collection_lock, so we can read/write the flags. Change it from protected to public. */ - assert(!gcflag_public(L)); + assert(!(L->h_tid & GCFLAG_PUBLIC)); L->h_tid |= GCFLAG_PUBLIC; /* Note that all protected or backup copies have a h_revision that @@ -192,8 +192,8 @@ gcptr B = items[i]; gcptr L = items[i + 1]; - assert(gcflag_private_from_protected(L)); - assert(!gcflag_backup_copy(B)); /* already removed */ + assert(L->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED); + assert(!(B->h_tid & GCFLAG_BACKUP_COPY)); /* already removed */ g2l_insert(&d->public_to_private, B, L); @@ -218,7 +218,7 @@ gcptr B = items[i]; gcptr L = items[i + 1]; - assert(gcflag_private_from_protected(L)); + assert(L->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED); if (B == obj) return L; } diff --git a/c4/test/support.py b/c4/test/support.py --- a/c4/test/support.py +++ b/c4/test/support.py @@ -502,14 +502,16 @@ lib.AbortTransaction(lib.ABRT_MANUAL) def classify(p): - private = (p.h_revision == lib.get_private_rev_num() or - (p.h_tid & GCFLAG_PRIVATE_FROM_PROTECTED) != 0) + private_from_protected = (p.h_tid & GCFLAG_PRIVATE_FROM_PROTECTED) != 0 + private_other = p.h_revision == lib.get_private_rev_num() public = (p.h_tid & GCFLAG_PUBLIC) != 0 backup = (p.h_tid & GCFLAG_BACKUP_COPY) != 0 stub = (p.h_tid & GCFLAG_STUB) != 0 - assert private + public + backup <= 1 + assert private_from_protected + private_other + public + backup <= 1 assert (public, stub) != (False, True) - if private: + if private_from_protected: + return "private_from_protected" + if private_other: return "private" if public: if stub: diff --git a/c4/test/test_et.py b/c4/test/test_et.py --- a/c4/test/test_et.py +++ b/c4/test/test_et.py @@ -52,7 +52,7 @@ assert classify(p) == "protected" p2 = lib.stm_write_barrier(p) assert p2 == p # does not move - assert classify(p) == "private" + assert classify(p) == "private_from_protected" pback = follow_revision(p) assert classify(pback) == "backup" assert list_of_private_from_protected() == [p] @@ -65,7 +65,7 @@ org_r = p.h_revision assert classify(p) == "protected" lib.setlong(p, 0, 927122) - assert classify(p) == "private" + assert classify(p) == "private_from_protected" pback = follow_revision(p) assert pback and pback != p assert pback.h_revision == org_r @@ -73,7 +73,7 @@ GCFLAG_BACKUP_COPY) assert lib.rawgetlong(pback, 0) == 78927812 assert lib.rawgetlong(p, 0) == 927122 - assert classify(p) == "private" + assert classify(p) == "private_from_protected" assert classify(pback) == "backup" def test_prebuilt_is_public(): @@ -263,13 +263,13 @@ lib.rawsetlong(p2, 0, -451112) pback = follow_revision(p1) pback_.append(pback) - assert classify(p1) == "private" + assert classify(p1) == "private_from_protected" assert classify(pback) == "backup" assert lib.stm_read_barrier(p) == p1 assert lib.stm_read_barrier(p1) == p1 assert pback.h_revision & 1 r.wait_while_in_parallel() - assert classify(p1) == "private" + assert classify(p1) == "private_from_protected" assert classify(pback) == "public" assert pback.h_tid & GCFLAG_PUBLIC_TO_PRIVATE assert lib.stm_read_barrier(p) == p1 @@ -322,7 +322,7 @@ if c == 0: lib.setlong(p, 0, -38383) assert lib.getlong(p, 0) == -38383 - assert classify(p) == "private" + assert classify(p) == "private_from_protected" abort_and_retry() perform_transaction(cb) diff --git a/c4/test/test_nursery.py b/c4/test/test_nursery.py --- a/c4/test/test_nursery.py +++ b/c4/test/test_nursery.py @@ -168,18 +168,29 @@ assert classify(p0) == "protected" assert lib.rawgetlong(p0, 0) == 81211 +def test_old_private_from_protected(): + p0 = oalloc(HDR + WORD) + assert classify(p0) == "protected" + lib.setlong(p0, 0, 29820298) + assert classify(p0) == "private_from_protected" + lib.stm_commit_transaction() + lib.stm_begin_inevitable_transaction() + assert classify(p0) == "protected" + assert lib.getlong(p0, 0) == 29820298 + assert classify(p0) == "protected" + def test_old_private_from_protected_to_young_private(): p0 = oalloc_refs(1) assert classify(p0) == "protected" p1 = nalloc(HDR) lib.setptr(p0, 0, p1) - assert classify(p0) == "private" # private_from_protected + assert classify(p0) == "private_from_protected" lib.stm_push_root(p0) minor_collect() p0b = lib.stm_pop_root() assert p0b == p0 check_nursery_free(p1) - assert classify(p0) == "private" # private_from_protected + assert classify(p0) == "private_from_protected" p2 = lib.getptr(p0, 0) assert not lib.in_nursery(p2) check_not_free(p2) From noreply at buildbot.pypy.org Fri Jun 14 21:21:01 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 14 Jun 2013 21:21:01 +0200 (CEST) Subject: [pypy-commit] stmgc default: Test passes Message-ID: <20130614192101.CED5D1C0134@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r130:aa0f0a2a6f74 Date: 2013-06-14 20:22 +0200 http://bitbucket.org/pypy/stmgc/changeset/aa0f0a2a6f74/ Log: Test passes diff --git a/c4/test/test_nursery.py b/c4/test/test_nursery.py --- a/c4/test/test_nursery.py +++ b/c4/test/test_nursery.py @@ -195,3 +195,24 @@ assert not lib.in_nursery(p2) check_not_free(p2) assert classify(p2) == "private" + +def test_new_version(): + p1 = oalloc(HDR) + assert lib.stm_write_barrier(p1) == p1 + lib.stm_push_root(p1) + transaction_break() + p1b = lib.stm_pop_root() + assert p1b == p1 + p2 = lib.stm_write_barrier(p1) + assert p2 == p1 + assert not lib.in_nursery(p2) + check_not_free(p1) + lib.stm_push_root(p1) + minor_collect() + p1b = lib.stm_pop_root() + assert p1b == p1 + check_not_free(p1) + p2 = lib.stm_read_barrier(p1) + assert p2 == p1 + assert not lib.in_nursery(p2) + assert classify(p2) == "private_from_protected" From noreply at buildbot.pypy.org Fri Jun 14 21:21:02 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 14 Jun 2013 21:21:02 +0200 (CEST) Subject: [pypy-commit] stmgc default: Test and bug fix Message-ID: <20130614192102.EDD831C0134@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r131:69b1046ea32d Date: 2013-06-14 20:27 +0200 http://bitbucket.org/pypy/stmgc/changeset/69b1046ea32d/ Log: Test and bug fix diff --git a/c4/et.c b/c4/et.c --- a/c4/et.c +++ b/c4/et.c @@ -476,8 +476,8 @@ if (is_private(R)) { - record_write_barrier(P); - return P; + record_write_barrier(R); + return R; } struct tx_descriptor *d = thread_descriptor; @@ -498,6 +498,7 @@ assert(R->h_tid & GCFLAG_OLD); gcptrlist_insert(&d->public_with_young_copy, R); W = LocalizePublic(d, R); + assert(is_private(W)); } else { @@ -505,6 +506,7 @@ an old object that still has GCFLAG_WRITE_BARRIER, then we must also record it in the list 'old_objects_to_trace'. */ W = LocalizeProtected(d, R); + assert(is_private(W)); record_write_barrier(W); } diff --git a/c4/test/test_nursery.py b/c4/test/test_nursery.py --- a/c4/test/test_nursery.py +++ b/c4/test/test_nursery.py @@ -216,3 +216,19 @@ assert p2 == p1 assert not lib.in_nursery(p2) assert classify(p2) == "private_from_protected" + +def test_prebuilt_version(): + p1 = lib.pseudoprebuilt(HDR, 42 + HDR) + p2 = lib.stm_write_barrier(p1) + assert p2 != p1 + check_prebuilt(p1) + check_not_free(p2) + minor_collect() + check_prebuilt(p1) + check_nursery_free(p2) + p2 = lib.stm_read_barrier(p1) + assert classify(p2) == "private" + p3 = lib.stm_write_barrier(p1) + assert classify(p3) == "private" + assert p3 == p2 != p1 + assert not lib.in_nursery(p2) From noreply at buildbot.pypy.org Fri Jun 14 21:21:04 2013 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 14 Jun 2013 21:21:04 +0200 (CEST) Subject: [pypy-commit] stmgc default: Fix test Message-ID: <20130614192104.0D9001C0134@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r132:c46d7e01ec15 Date: 2013-06-14 20:30 +0200 http://bitbucket.org/pypy/stmgc/changeset/c46d7e01ec15/ Log: Fix test diff --git a/c4/test/test_et.py b/c4/test/test_et.py --- a/c4/test/test_et.py +++ b/c4/test/test_et.py @@ -189,7 +189,7 @@ p2b = lib.stm_write_barrier(p) assert p2b == p2 assert classify(p) == "public" - assert classify(p2) == "private" + assert classify(p2) == "private_from_protected" assert list_of_read_objects() == [p2] p3 = lib.stm_read_barrier(p) assert p3 == p2 From noreply at buildbot.pypy.org Sat Jun 15 07:33:17 2013 From: noreply at buildbot.pypy.org (fijal) Date: Sat, 15 Jun 2013 07:33:17 +0200 (CEST) Subject: [pypy-commit] pypy default: Add a jit_merge_point to list.find Message-ID: <20130615053317.7CEA11C02BA@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r64890:87bfc8884702 Date: 2013-06-15 13:32 +0800 http://bitbucket.org/pypy/pypy/changeset/87bfc8884702/ Log: Add a jit_merge_point to list.find diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -693,6 +693,7 @@ raise OperationError(space.w_ValueError, space.wrap("list modified during sort")) +find_jmp = jit.JitDriver(greens = [], reds = 'auto', name = 'list.find') class ListStrategy(object): sizehint = -1 @@ -717,6 +718,7 @@ i = start # needs to be safe against eq_w mutating stuff while i < stop and i < w_list.length(): + find_jmp.jit_merge_point() if space.eq_w(w_list.getitem(i), w_item): return i i += 1 From noreply at buildbot.pypy.org Sat Jun 15 13:36:29 2013 From: noreply at buildbot.pypy.org (bivab) Date: Sat, 15 Jun 2013 13:36:29 +0200 (CEST) Subject: [pypy-commit] buildbot buildbot-0.8.7: use got_revision here Message-ID: <20130615113629.843391C1398@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: buildbot-0.8.7 Changeset: r826:d0caac6ed8bf Date: 2013-06-15 13:36 +0200 http://bitbucket.org/pypy/buildbot/changeset/d0caac6ed8bf/ Log: use got_revision here diff --git a/bot2/pypybuildbot/test/test_builds.py b/bot2/pypybuildbot/test/test_builds.py --- a/bot2/pypybuildbot/test/test_builds.py +++ b/bot2/pypybuildbot/test/test_builds.py @@ -62,7 +62,7 @@ def test_pypy_upload(): pth = py.test.ensuretemp('buildbot') inst = builds.PyPyUpload(slavesrc='slavesrc', masterdest=str(pth.join('mstr')), - basename='base-%(final_file_name)s', workdir='.', + basename='base-%(got_revision)s', workdir='.', blocksize=100) factory = inst._getStepFactory().factory kw = inst._getStepFactory().kwargs @@ -73,7 +73,7 @@ rebuilt.start() assert pth.join('mstr').check(dir=True) assert rebuilt.masterdest == str(pth.join('mstr', 'trunk', - 'base-123-ea5ca8')) + 'base-123')) assert rebuilt.symlinkname == str(pth.join('mstr', 'trunk', 'base-latest')) From noreply at buildbot.pypy.org Sat Jun 15 13:36:28 2013 From: noreply at buildbot.pypy.org (bivab) Date: Sat, 15 Jun 2013 13:36:28 +0200 (CEST) Subject: [pypy-commit] buildbot buildbot-0.8.7: merge default Message-ID: <20130615113628.4E7DB1C0F1D@cobra.cs.uni-duesseldorf.de> Author: David Schneider Branch: buildbot-0.8.7 Changeset: r825:41d58f6cac9e Date: 2013-06-15 13:07 +0200 http://bitbucket.org/pypy/buildbot/changeset/41d58f6cac9e/ Log: merge default diff --git a/bbhook/hook.py b/bbhook/hook.py --- a/bbhook/hook.py +++ b/bbhook/hook.py @@ -3,6 +3,8 @@ import subprocess import sys import time +import thread, Queue +import traceback from .main import app from . import scm @@ -39,7 +41,24 @@ yield commit -def handle(payload, test=False): + +def _handle_thread(): + while True: + local_repo = payload = None + try: + local_repo, payload = queue.get() + _do_handle(local_repo, payload) + except: + traceback.print_exc() + print >> sys.stderr, 'payload:' + pprint.pprint(payload, sys.stderr) + print >> sys.stderr + +queue = Queue.Queue() +thread.start_new_thread(_handle_thread, ()) + + +def handle(payload, test=True): path = payload['repository']['absolute_url'] owner = payload['repository']['owner'] local_repo = app.config['LOCAL_REPOS'].join(path) @@ -47,6 +66,12 @@ if not check_for_local_repo(local_repo, remote_repo, owner): print >> sys.stderr, 'Ignoring unknown repo', path return + if test: + _do_handle(local_repo, payload, test) + else: + queue.put((local_repo, payload)) + +def _do_handle(local_repo, payload, test=False): scm.hg('pull', '-R', local_repo) for commit in get_commits(payload): for handler in HANDLERS: diff --git a/bot2/pypybuildbot/arm_master.py b/bot2/pypybuildbot/arm_master.py --- a/bot2/pypybuildbot/arm_master.py +++ b/bot2/pypybuildbot/arm_master.py @@ -23,8 +23,7 @@ translationArgs=crosstranslationargs + ['-O2'], platform='linux-armel', interpreter='pypy', - prefix=['schroot', '-c', 'armel'], - trigger='APPLVLLINUXARM_scheduler') + prefix=['schroot', '-c', 'armel']) pypyJITCrossTranslationFactoryARM = pypybuilds.NightlyBuild( translationArgs=(crosstranslationargs @@ -51,6 +50,14 @@ prefix=['schroot', '-c', 'raspbian'], trigger='JITLINUXARMHF_RASPBIAN_scheduler') +pypyJITCrossTranslationFactoryRaringHF = pypybuilds.NightlyBuild( + translationArgs=(crosstranslationargs + + jit_translation_args + + crosstranslationjitargs), + platform='linux-armhf-raring', + interpreter='pypy', + prefix=['schroot', '-c', 'raring']) + pypyARMJITTranslatedTestFactory = pypybuilds.TranslatedTests( translationArgs=(crosstranslationargs + jit_translation_args @@ -100,6 +107,7 @@ BUILDJITLINUXARM = "build-pypy-c-jit-linux-armel" BUILDLINUXARMHF_RASPBIAN = "build-pypy-c-linux-armhf-raspbian" BUILDJITLINUXARMHF_RASPBIAN = "build-pypy-c-jit-linux-armhf-raspbian" +BUILDJITLINUXARMHF_RARING = "build-pypy-c-jit-linux-armhf-raring" builderNames = [ APPLVLLINUXARM, @@ -121,12 +129,13 @@ Nightly("nighly-arm-0-00", [ BUILDJITLINUXARM, # on hhu-cross-armel, uses 1 core BUILDJITLINUXARMHF_RASPBIAN, # on hhu-cross-raspbianhf, uses 1 core + BUILDJITLINUXARMHF_RARING, # on hhu-cross-raring-armhf, uses 1 core BUILDLINUXARM, # on hhu-cross-armel, uses 1 core BUILDLINUXARMHF_RASPBIAN, # on hhu-cross-raspbianhf, uses 1 core JITBACKENDONLYLINUXARMEL, # on hhu-imx.53 - JITBACKENDONLYLINUXARMHF, # on hhu-raspberry-pi + JITBACKENDONLYLINUXARMHF, JITBACKENDONLYLINUXARMHF_v7, # on cubieboard-bob ], branch=None, hour=0, minute=0), @@ -138,12 +147,12 @@ JITLINUXARM, # triggered by BUILDJITLINUXARM, on hhu-beagleboard ]), Triggerable("APPLVLLINUXARMHF_RASPBIAN_scheduler", [ - APPLVLLINUXARMHF_RASPBIAN, # triggered by BUILDLINUXARMHF_RASPBIAN, on hhu-raspberry-pi + APPLVLLINUXARMHF_RASPBIAN, # triggered by BUILDLINUXARMHF_RASPBIAN APPLVLLINUXARMHF_v7, # triggered by BUILDLINUXARMHF_RASPBIAN, on cubieboard-bob ]), Triggerable("JITLINUXARMHF_RASPBIAN_scheduler", [ - JITLINUXARMHF_RASPBIAN, # triggered by BUILDJITLINUXARMHF_RASPBIAN, on hhu-raspberry-pi + JITLINUXARMHF_RASPBIAN, # triggered by BUILDJITLINUXARMHF_RASPBIAN JITLINUXARMHF_v7, # triggered by BUILDJITLINUXARMHF_RASPBIAN, on cubieboard-bob ]), ] @@ -162,7 +171,7 @@ # armhf ## armv6 {"name": JITBACKENDONLYLINUXARMHF, - "slavenames": ['hhu-raspberry-pi'], + "slavenames": ['hhu-raspberry-pi', 'hhu-pypy-pi', 'hhu-pypy-pi2'], "builddir": JITBACKENDONLYLINUXARMHF, "factory": pypyJitBackendOnlyOwnTestFactoryARM, "category": 'linux-armhf', @@ -194,14 +203,14 @@ }, ## armv6 hardfloat {"name": APPLVLLINUXARMHF_RASPBIAN, - "slavenames": ["hhu-raspberry-pi"], + "slavenames": ['hhu-raspberry-pi', 'hhu-pypy-pi', 'hhu-pypy-pi2'], "builddir": APPLVLLINUXARMHF_RASPBIAN, "factory": pypyARMHF_RASPBIAN_TranslatedAppLevelTestFactory, "category": "linux-armhf", "locks": [ARMBoardLock.access('counting')], }, {"name": JITLINUXARMHF_RASPBIAN, - "slavenames": ["hhu-raspberry-pi"], + "slavenames": ['hhu-raspberry-pi', 'hhu-pypy-pi', 'hhu-pypy-pi2'], 'builddir': JITLINUXARMHF_RASPBIAN, 'factory': pypyARMHF_RASPBIAN_JITTranslatedTestFactory, 'category': 'linux-armhf', @@ -251,4 +260,11 @@ "category": 'linux-armhf', "locks": [ARMCrossLock.access('counting')], }, + {"name": BUILDJITLINUXARMHF_RARING, + "slavenames": ['hhu-cross-raring'], + "builddir": BUILDJITLINUXARMHF_RARING, + "factory": pypyJITCrossTranslationFactoryRaringHF, + "category": 'linux-armhf', + "locks": [ARMCrossLock.access('counting')], + }, ] diff --git a/bot2/pypybuildbot/master.py b/bot2/pypybuildbot/master.py --- a/bot2/pypybuildbot/master.py +++ b/bot2/pypybuildbot/master.py @@ -181,9 +181,12 @@ CPYTHON_64 = "cpython-2-benchmark-x86-64" -extra_opts= {'xerxes': {'keepalive_interval': 15}, +extra_opts = {'xerxes': {'keepalive_interval': 15}, 'aurora': {'max_builds': 1}, 'salsa': {'max_builds': 1}, + 'hhu-raspberry-pi': {'max_builds': 1}, + 'hhu-pypy-pi': {'max_builds': 1}, + 'hhu-pypy-pi2': {'max_builds': 1}, } BuildmasterConfig = { @@ -204,7 +207,7 @@ APPLVLLINUX32, # on tannit32, uses 1 core APPLVLLINUX64, # on allegro64, uses 1 core # other platforms - MACOSX32, # on minime + #MACOSX32, # on minime JITWIN32, # on aurora JITFREEBSD764, # on headless JITFREEBSD864, # on ananke diff --git a/bot2/pypybuildbot/pypylist.py b/bot2/pypybuildbot/pypylist.py --- a/bot2/pypybuildbot/pypylist.py +++ b/bot2/pypybuildbot/pypylist.py @@ -28,6 +28,8 @@ 'linux64': 50, 'osx': 30, 'win32': 10, + 'linux_armhf_raspbian': 7, + 'linux_armhf_raring': 6, 'linux_armel': 5, } @@ -66,6 +68,8 @@ name = self.filename.replace(ext, '') # remove the dash from linux-armel, else the split does not work name = name.replace('-armel', '_armel') + name = name.replace('-libc2', '_libc2') + name = name.replace('-armhf-ra', '_armhf_ra') dashes = name.count('-') if dashes == 4: # svn based diff --git a/master/public_html/summary.css b/master/public_html/summary.css --- a/master/public_html/summary.css +++ b/master/public_html/summary.css @@ -73,6 +73,10 @@ border: 1px gray solid; } +div.header { + margin-top: 15px; +} + div.footer { font-size: 80%; } diff --git a/master/templates/summary.html b/master/templates/summary.html --- a/master/templates/summary.html +++ b/master/templates/summary.html @@ -1,7 +1,7 @@ {% extends "layout.html" %} {% block morehead %} -' +