[pypy-commit] pypy numpy-record-dtypes: merge default
fijal
noreply at buildbot.pypy.org
Wed Mar 7 06:40:02 CET 2012
Author: Maciej Fijalkowski <fijall at gmail.com>
Branch: numpy-record-dtypes
Changeset: r53252:d662c9c283b2
Date: 2012-03-06 21:39 -0800
http://bitbucket.org/pypy/pypy/changeset/d662c9c283b2/
Log: merge default
diff --git a/pypy/annotation/test/test_annrpython.py b/pypy/annotation/test/test_annrpython.py
--- a/pypy/annotation/test/test_annrpython.py
+++ b/pypy/annotation/test/test_annrpython.py
@@ -2431,6 +2431,47 @@
assert isinstance(s.items[1], annmodel.SomeChar)
assert isinstance(s.items[2], annmodel.SomeChar)
+ def test_multiple_mixins_mro(self):
+ # an obscure situation, but it occurred in module/micronumpy/types.py
+ class A(object):
+ _mixin_ = True
+ def foo(self): return 1
+ class B(A):
+ _mixin_ = True
+ def foo(self): return 2
+ class C(A):
+ _mixin_ = True
+ class D(B, C):
+ _mixin_ = True
+ class Concrete(D):
+ pass
+ def f():
+ return Concrete().foo()
+
+ assert f() == 2
+ a = self.RPythonAnnotator()
+ s = a.build_types(f, [])
+ assert s.const == 2
+
+ def test_multiple_mixins_mro_2(self):
+ class A(object):
+ _mixin_ = True
+ def foo(self): return 1
+ class B(A):
+ _mixin_ = True
+ def foo(self): return 2
+ class C(A):
+ _mixin_ = True
+ class Concrete(C, B):
+ pass
+ def f():
+ return Concrete().foo()
+
+ assert f() == 2
+ a = self.RPythonAnnotator()
+ s = a.build_types(f, [])
+ assert s.const == 2
+
def test___class___attribute(self):
class Base(object): pass
class A(Base): pass
diff --git a/pypy/interpreter/buffer.py b/pypy/interpreter/buffer.py
--- a/pypy/interpreter/buffer.py
+++ b/pypy/interpreter/buffer.py
@@ -20,6 +20,7 @@
from pypy.interpreter.gateway import interp2app, unwrap_spec
from pypy.interpreter.error import OperationError
from pypy.rlib.objectmodel import compute_hash
+from pypy.rlib.rstring import StringBuilder
class Buffer(Wrappable):
@@ -152,12 +153,13 @@
if space.isinstance_w(w_object, space.w_unicode):
# unicode objects support the old buffer interface
# but not the new buffer interface (change in python 2.7)
- from pypy.rlib.rstruct.unichar import pack_unichar
- charlist = []
- for unich in space.unicode_w(w_object):
- pack_unichar(unich, charlist)
+ from pypy.rlib.rstruct.unichar import pack_unichar, UNICODE_SIZE
+ unistr = space.unicode_w(w_object)
+ builder = StringBuilder(len(unistr) * UNICODE_SIZE)
+ for unich in unistr:
+ pack_unichar(unich, builder)
from pypy.interpreter.buffer import StringBuffer
- w_buffer = space.wrap(StringBuffer(''.join(charlist)))
+ w_buffer = space.wrap(StringBuffer(builder.build()))
else:
w_buffer = space.buffer(w_object)
diff --git a/pypy/jit/backend/llgraph/llimpl.py b/pypy/jit/backend/llgraph/llimpl.py
--- a/pypy/jit/backend/llgraph/llimpl.py
+++ b/pypy/jit/backend/llgraph/llimpl.py
@@ -171,7 +171,7 @@
'unicodesetitem' : (('ref', 'int', 'int'), 'int'),
'cast_ptr_to_int' : (('ref',), 'int'),
'cast_int_to_ptr' : (('int',), 'ref'),
- 'debug_merge_point': (('ref', 'int'), None),
+ 'debug_merge_point': (('ref', 'int', 'int'), None),
'force_token' : ((), 'int'),
'call_may_force' : (('int', 'varargs'), 'intorptr'),
'guard_not_forced': ((), None),
diff --git a/pypy/jit/backend/test/runner_test.py b/pypy/jit/backend/test/runner_test.py
--- a/pypy/jit/backend/test/runner_test.py
+++ b/pypy/jit/backend/test/runner_test.py
@@ -604,7 +604,7 @@
[funcbox, BoxInt(arg1), BoxInt(arg2)],
'int', descr=calldescr)
assert res.getint() == f(arg1, arg2)
-
+
def test_call_stack_alignment(self):
# test stack alignment issues, notably for Mac OS/X.
# also test the ordering of the arguments.
@@ -1490,7 +1490,8 @@
def test_noops(self):
c_box = self.alloc_string("hi there").constbox()
c_nest = ConstInt(0)
- self.execute_operation(rop.DEBUG_MERGE_POINT, [c_box, c_nest], 'void')
+ c_id = ConstInt(0)
+ self.execute_operation(rop.DEBUG_MERGE_POINT, [c_box, c_nest, c_id], 'void')
self.execute_operation(rop.JIT_DEBUG, [c_box, c_nest, c_nest,
c_nest, c_nest], 'void')
@@ -3061,7 +3062,7 @@
ResOperation(rop.JUMP, [i2], None, descr=targettoken2),
]
self.cpu.compile_bridge(faildescr, inputargs, operations, looptoken)
-
+
fail = self.cpu.execute_token(looptoken, 2)
assert fail.identifier == 3
res = self.cpu.get_latest_value_int(0)
@@ -3106,7 +3107,7 @@
assert len(mc) == len(ops)
for i in range(len(mc)):
assert mc[i].split("\t")[-1].startswith(ops[i])
-
+
data = ctypes.string_at(info.asmaddr, info.asmlen)
mc = list(machine_code_dump(data, info.asmaddr, cpuname))
lines = [line for line in mc if line.count('\t') == 2]
diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py
--- a/pypy/jit/backend/x86/assembler.py
+++ b/pypy/jit/backend/x86/assembler.py
@@ -88,7 +88,6 @@
self._debug = False
self.debug_counter_descr = cpu.fielddescrof(DEBUG_COUNTER, 'i')
self.fail_boxes_count = 0
- self._current_depths_cache = (0, 0)
self.datablockwrapper = None
self.stack_check_slowpath = 0
self.propagate_exception_path = 0
@@ -442,10 +441,8 @@
looppos = self.mc.get_relative_pos()
looptoken._x86_loop_code = looppos
clt.frame_depth = -1 # temporarily
- clt.param_depth = -1 # temporarily
- frame_depth, param_depth = self._assemble(regalloc, operations)
+ frame_depth = self._assemble(regalloc, operations)
clt.frame_depth = frame_depth
- clt.param_depth = param_depth
#
size_excluding_failure_stuff = self.mc.get_relative_pos()
self.write_pending_failure_recoveries()
@@ -459,8 +456,7 @@
rawstart + size_excluding_failure_stuff,
rawstart))
debug_stop("jit-backend-addr")
- self._patch_stackadjust(rawstart + stackadjustpos,
- frame_depth + param_depth)
+ self._patch_stackadjust(rawstart + stackadjustpos, frame_depth)
self.patch_pending_failure_recoveries(rawstart)
#
ops_offset = self.mc.ops_offset
@@ -500,14 +496,13 @@
assert ([loc.assembler() for loc in arglocs] ==
[loc.assembler() for loc in faildescr._x86_debug_faillocs])
regalloc = RegAlloc(self, self.cpu.translate_support_code)
- fail_depths = faildescr._x86_current_depths
startpos = self.mc.get_relative_pos()
- operations = regalloc.prepare_bridge(fail_depths, inputargs, arglocs,
+ operations = regalloc.prepare_bridge(inputargs, arglocs,
operations,
self.current_clt.allgcrefs)
stackadjustpos = self._patchable_stackadjust()
- frame_depth, param_depth = self._assemble(regalloc, operations)
+ frame_depth = self._assemble(regalloc, operations)
codeendpos = self.mc.get_relative_pos()
self.write_pending_failure_recoveries()
fullsize = self.mc.get_relative_pos()
@@ -517,19 +512,16 @@
debug_print("bridge out of Guard %d has address %x to %x" %
(descr_number, rawstart, rawstart + codeendpos))
debug_stop("jit-backend-addr")
- self._patch_stackadjust(rawstart + stackadjustpos,
- frame_depth + param_depth)
+ self._patch_stackadjust(rawstart + stackadjustpos, frame_depth)
self.patch_pending_failure_recoveries(rawstart)
if not we_are_translated():
# for the benefit of tests
faildescr._x86_bridge_frame_depth = frame_depth
- faildescr._x86_bridge_param_depth = param_depth
# patch the jump from original guard
self.patch_jump_for_descr(faildescr, rawstart)
ops_offset = self.mc.ops_offset
self.fixup_target_tokens(rawstart)
self.current_clt.frame_depth = max(self.current_clt.frame_depth, frame_depth)
- self.current_clt.param_depth = max(self.current_clt.param_depth, param_depth)
self.teardown()
# oprofile support
if self.cpu.profile_agent is not None:
@@ -700,15 +692,12 @@
regalloc.walk_operations(operations)
if we_are_translated() or self.cpu.dont_keepalive_stuff:
self._regalloc = None # else keep it around for debugging
- frame_depth = regalloc.fm.get_frame_depth()
- param_depth = regalloc.param_depth
+ frame_depth = regalloc.get_final_frame_depth()
jump_target_descr = regalloc.jump_target_descr
if jump_target_descr is not None:
target_frame_depth = jump_target_descr._x86_clt.frame_depth
- target_param_depth = jump_target_descr._x86_clt.param_depth
frame_depth = max(frame_depth, target_frame_depth)
- param_depth = max(param_depth, target_param_depth)
- return frame_depth, param_depth
+ return frame_depth
def _patchable_stackadjust(self):
# stack adjustment LEA
@@ -892,10 +881,9 @@
genop_math_list[oopspecindex](self, op, arglocs, resloc)
def regalloc_perform_with_guard(self, op, guard_op, faillocs,
- arglocs, resloc, current_depths):
+ arglocs, resloc):
faildescr = guard_op.getdescr()
assert isinstance(faildescr, AbstractFailDescr)
- faildescr._x86_current_depths = current_depths
failargs = guard_op.getfailargs()
guard_opnum = guard_op.getopnum()
guard_token = self.implement_guard_recovery(guard_opnum,
@@ -911,10 +899,9 @@
# must be added by the genop_guard_list[]()
assert guard_token is self.pending_guard_tokens[-1]
- def regalloc_perform_guard(self, guard_op, faillocs, arglocs, resloc,
- current_depths):
+ def regalloc_perform_guard(self, guard_op, faillocs, arglocs, resloc):
self.regalloc_perform_with_guard(None, guard_op, faillocs, arglocs,
- resloc, current_depths)
+ resloc)
def load_effective_addr(self, sizereg, baseofs, scale, result, frm=imm0):
self.mc.LEA(result, addr_add(frm, sizereg, baseofs, scale))
@@ -1038,13 +1025,14 @@
self.mc.MOV(tmp, loc)
self.mc.MOV_sr(p, tmp.value)
p += loc.get_width()
- self._regalloc.reserve_param(p//WORD)
# x is a location
self.mc.CALL(x)
self.mark_gc_roots(force_index)
#
if callconv != FFI_DEFAULT_ABI:
self._fix_stdcall(callconv, p)
+ #
+ self._regalloc.needed_extra_stack_locations(p//WORD)
def _fix_stdcall(self, callconv, p):
from pypy.rlib.clibffi import FFI_STDCALL
@@ -1127,9 +1115,9 @@
x = r10
remap_frame_layout(self, src_locs, dst_locs, X86_64_SCRATCH_REG)
- self._regalloc.reserve_param(len(pass_on_stack))
self.mc.CALL(x)
self.mark_gc_roots(force_index)
+ self._regalloc.needed_extra_stack_locations(len(pass_on_stack))
def call(self, addr, args, res):
force_index = self.write_new_force_index()
@@ -2136,7 +2124,6 @@
if reg in save_registers:
self.mc.MOV_sr(p, reg.value)
p += WORD
- self._regalloc.reserve_param(p//WORD)
#
if gcrootmap.is_shadow_stack:
args = []
@@ -2192,6 +2179,7 @@
if reg in save_registers:
self.mc.MOV_rs(reg.value, p)
p += WORD
+ self._regalloc.needed_extra_stack_locations(p//WORD)
def call_reacquire_gil(self, gcrootmap, save_loc):
# save the previous result (eax/xmm0) into the stack temporarily.
@@ -2199,7 +2187,6 @@
# to save xmm0 in this case.
if isinstance(save_loc, RegLoc) and not save_loc.is_xmm:
self.mc.MOV_sr(WORD, save_loc.value)
- self._regalloc.reserve_param(2)
# call the reopenstack() function (also reacquiring the GIL)
if gcrootmap.is_shadow_stack:
args = []
@@ -2219,6 +2206,7 @@
# restore the result from the stack
if isinstance(save_loc, RegLoc) and not save_loc.is_xmm:
self.mc.MOV_rs(save_loc.value, WORD)
+ self._regalloc.needed_extra_stack_locations(2)
def genop_guard_call_assembler(self, op, guard_op, guard_token,
arglocs, result_loc):
@@ -2495,11 +2483,6 @@
# copy of heap(nursery_free_adr), so that the final MOV below is
# a no-op.
- # reserve room for the argument to the real malloc and the
- # saved XMM regs (on 32 bit: 8 * 2 words; on 64 bit: 16 * 1
- # word)
- self._regalloc.reserve_param(1+16)
-
gcrootmap = self.cpu.gc_ll_descr.gcrootmap
shadow_stack = (gcrootmap is not None and gcrootmap.is_shadow_stack)
if not shadow_stack:
@@ -2510,6 +2493,11 @@
slowpath_addr2 = self.malloc_slowpath2
self.mc.CALL(imm(slowpath_addr2))
+ # reserve room for the argument to the real malloc and the
+ # saved XMM regs (on 32 bit: 8 * 2 words; on 64 bit: 16 * 1
+ # word)
+ self._regalloc.needed_extra_stack_locations(1+16)
+
offset = self.mc.get_relative_pos() - jmp_adr
assert 0 < offset <= 127
self.mc.overwrite(jmp_adr-1, chr(offset))
diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py
--- a/pypy/jit/backend/x86/regalloc.py
+++ b/pypy/jit/backend/x86/regalloc.py
@@ -168,7 +168,7 @@
def _prepare(self, inputargs, operations, allgcrefs):
self.fm = X86FrameManager()
- self.param_depth = 0
+ self.min_frame_depth = 0
cpu = self.assembler.cpu
operations = cpu.gc_ll_descr.rewrite_assembler(cpu, operations,
allgcrefs)
@@ -193,11 +193,9 @@
self.min_bytes_before_label = 13
return operations
- def prepare_bridge(self, prev_depths, inputargs, arglocs, operations,
- allgcrefs):
+ def prepare_bridge(self, inputargs, arglocs, operations, allgcrefs):
operations = self._prepare(inputargs, operations, allgcrefs)
self._update_bindings(arglocs, inputargs)
- self.param_depth = prev_depths[1]
self.min_bytes_before_label = 0
return operations
@@ -205,8 +203,15 @@
self.min_bytes_before_label = max(self.min_bytes_before_label,
at_least_position)
- def reserve_param(self, n):
- self.param_depth = max(self.param_depth, n)
+ def needed_extra_stack_locations(self, n):
+ # call *after* you needed extra stack locations: (%esp), (%esp+4)...
+ min_frame_depth = self.fm.get_frame_depth() + n
+ if min_frame_depth > self.min_frame_depth:
+ self.min_frame_depth = min_frame_depth
+
+ def get_final_frame_depth(self):
+ self.needed_extra_stack_locations(0) # update min_frame_depth
+ return self.min_frame_depth
def _set_initial_bindings(self, inputargs):
if IS_X86_64:
@@ -376,25 +381,12 @@
def locs_for_fail(self, guard_op):
return [self.loc(v) for v in guard_op.getfailargs()]
- def get_current_depth(self):
- # return (self.fm.frame_depth, self.param_depth), but trying to share
- # the resulting tuple among several calls
- arg0 = self.fm.get_frame_depth()
- arg1 = self.param_depth
- result = self.assembler._current_depths_cache
- if result[0] != arg0 or result[1] != arg1:
- result = (arg0, arg1)
- self.assembler._current_depths_cache = result
- return result
-
def perform_with_guard(self, op, guard_op, arglocs, result_loc):
faillocs = self.locs_for_fail(guard_op)
self.rm.position += 1
self.xrm.position += 1
- current_depths = self.get_current_depth()
self.assembler.regalloc_perform_with_guard(op, guard_op, faillocs,
- arglocs, result_loc,
- current_depths)
+ arglocs, result_loc)
if op.result is not None:
self.possibly_free_var(op.result)
self.possibly_free_vars(guard_op.getfailargs())
@@ -407,10 +399,8 @@
arglocs))
else:
self.assembler.dump('%s(%s)' % (guard_op, arglocs))
- current_depths = self.get_current_depth()
self.assembler.regalloc_perform_guard(guard_op, faillocs, arglocs,
- result_loc,
- current_depths)
+ result_loc)
self.possibly_free_vars(guard_op.getfailargs())
def PerformDiscard(self, op, arglocs):
diff --git a/pypy/jit/backend/x86/test/test_gc_integration.py b/pypy/jit/backend/x86/test/test_gc_integration.py
--- a/pypy/jit/backend/x86/test/test_gc_integration.py
+++ b/pypy/jit/backend/x86/test/test_gc_integration.py
@@ -28,7 +28,7 @@
class MockGcRootMap(object):
is_shadow_stack = False
- def get_basic_shape(self, is_64_bit):
+ def get_basic_shape(self):
return ['shape']
def add_frame_offset(self, shape, offset):
shape.append(offset)
diff --git a/pypy/jit/backend/x86/test/test_recompilation.py b/pypy/jit/backend/x86/test/test_recompilation.py
--- a/pypy/jit/backend/x86/test/test_recompilation.py
+++ b/pypy/jit/backend/x86/test/test_recompilation.py
@@ -34,7 +34,6 @@
'''
loop = self.interpret(ops, [0])
previous = loop._jitcelltoken.compiled_loop_token.frame_depth
- assert loop._jitcelltoken.compiled_loop_token.param_depth == 0
assert self.getint(0) == 20
ops = '''
[i1]
@@ -51,7 +50,6 @@
bridge = self.attach_bridge(ops, loop, -2)
descr = loop.operations[3].getdescr()
new = descr._x86_bridge_frame_depth
- assert descr._x86_bridge_param_depth == 0
# the force_spill() forces the stack to grow
assert new > previous
fail = self.run(loop, 0)
@@ -116,10 +114,8 @@
loop_frame_depth = loop._jitcelltoken.compiled_loop_token.frame_depth
bridge = self.attach_bridge(ops, loop, 6)
guard_op = loop.operations[6]
- assert loop._jitcelltoken.compiled_loop_token.param_depth == 0
# the force_spill() forces the stack to grow
assert guard_op.getdescr()._x86_bridge_frame_depth > loop_frame_depth
- assert guard_op.getdescr()._x86_bridge_param_depth == 0
self.run(loop, 0, 0, 0, 0, 0, 0)
assert self.getint(0) == 1
assert self.getint(1) == 20
diff --git a/pypy/jit/backend/x86/test/test_regalloc.py b/pypy/jit/backend/x86/test/test_regalloc.py
--- a/pypy/jit/backend/x86/test/test_regalloc.py
+++ b/pypy/jit/backend/x86/test/test_regalloc.py
@@ -606,23 +606,37 @@
assert self.getints(9) == [0, 1, 1, 1, 1, 1, 1, 1, 1]
class TestRegAllocCallAndStackDepth(BaseTestRegalloc):
- def expected_param_depth(self, num_args):
+ def expected_frame_depth(self, num_call_args, num_pushed_input_args=0):
# Assumes the arguments are all non-float
if IS_X86_32:
- return num_args
+ extra_esp = num_call_args
+ return extra_esp
elif IS_X86_64:
- return max(num_args - 6, 0)
+ # 'num_pushed_input_args' is for X86_64 only
+ extra_esp = max(num_call_args - 6, 0)
+ return num_pushed_input_args + extra_esp
def test_one_call(self):
ops = '''
- [i0, i1, i2, i3, i4, i5, i6, i7, i8, i9]
+ [i0, i1, i2, i3, i4, i5, i6, i7, i8, i9, i9b]
i10 = call(ConstClass(f1ptr), i0, descr=f1_calldescr)
- finish(i10, i1, i2, i3, i4, i5, i6, i7, i8, i9)
+ finish(i10, i1, i2, i3, i4, i5, i6, i7, i8, i9, i9b)
'''
- loop = self.interpret(ops, [4, 7, 9, 9 ,9, 9, 9, 9, 9, 9])
- assert self.getints(10) == [5, 7, 9, 9, 9, 9, 9, 9, 9, 9]
+ loop = self.interpret(ops, [4, 7, 9, 9 ,9, 9, 9, 9, 9, 9, 8])
+ assert self.getints(11) == [5, 7, 9, 9, 9, 9, 9, 9, 9, 9, 8]
clt = loop._jitcelltoken.compiled_loop_token
- assert clt.param_depth == self.expected_param_depth(1)
+ assert clt.frame_depth == self.expected_frame_depth(1, 5)
+
+ def test_one_call_reverse(self):
+ ops = '''
+ [i1, i2, i3, i4, i5, i6, i7, i8, i9, i9b, i0]
+ i10 = call(ConstClass(f1ptr), i0, descr=f1_calldescr)
+ finish(i10, i1, i2, i3, i4, i5, i6, i7, i8, i9, i9b)
+ '''
+ loop = self.interpret(ops, [7, 9, 9 ,9, 9, 9, 9, 9, 9, 8, 4])
+ assert self.getints(11) == [5, 7, 9, 9, 9, 9, 9, 9, 9, 9, 8]
+ clt = loop._jitcelltoken.compiled_loop_token
+ assert clt.frame_depth == self.expected_frame_depth(1, 6)
def test_two_calls(self):
ops = '''
@@ -634,7 +648,7 @@
loop = self.interpret(ops, [4, 7, 9, 9 ,9, 9, 9, 9, 9, 9])
assert self.getints(10) == [5*7, 7, 9, 9, 9, 9, 9, 9, 9, 9]
clt = loop._jitcelltoken.compiled_loop_token
- assert clt.param_depth == self.expected_param_depth(2)
+ assert clt.frame_depth == self.expected_frame_depth(2, 5)
def test_call_many_arguments(self):
# NB: The first and last arguments in the call are constants. This
@@ -648,25 +662,31 @@
loop = self.interpret(ops, [2, 3, 4, 5, 6, 7, 8, 9])
assert self.getint(0) == 55
clt = loop._jitcelltoken.compiled_loop_token
- assert clt.param_depth == self.expected_param_depth(10)
+ assert clt.frame_depth == self.expected_frame_depth(10)
def test_bridge_calls_1(self):
ops = '''
[i0, i1]
i2 = call(ConstClass(f1ptr), i0, descr=f1_calldescr)
- guard_value(i2, 0, descr=fdescr1) [i2, i1]
+ guard_value(i2, 0, descr=fdescr1) [i2, i0, i1]
finish(i1)
'''
loop = self.interpret(ops, [4, 7])
assert self.getint(0) == 5
+ clt = loop._jitcelltoken.compiled_loop_token
+ orgdepth = clt.frame_depth
+ assert orgdepth == self.expected_frame_depth(1, 2)
+
ops = '''
- [i2, i1]
+ [i2, i0, i1]
i3 = call(ConstClass(f2ptr), i2, i1, descr=f2_calldescr)
- finish(i3, descr=fdescr2)
+ finish(i3, i0, descr=fdescr2)
'''
bridge = self.attach_bridge(ops, loop, -2)
- assert loop.operations[-2].getdescr()._x86_bridge_param_depth == self.expected_param_depth(2)
+ assert clt.frame_depth == max(orgdepth, self.expected_frame_depth(2, 2))
+ assert loop.operations[-2].getdescr()._x86_bridge_frame_depth == \
+ self.expected_frame_depth(2, 2)
self.run(loop, 4, 7)
assert self.getint(0) == 5*7
@@ -676,10 +696,14 @@
[i0, i1]
i2 = call(ConstClass(f2ptr), i0, i1, descr=f2_calldescr)
guard_value(i2, 0, descr=fdescr1) [i2]
- finish(i1)
+ finish(i2)
'''
loop = self.interpret(ops, [4, 7])
assert self.getint(0) == 4*7
+ clt = loop._jitcelltoken.compiled_loop_token
+ orgdepth = clt.frame_depth
+ assert orgdepth == self.expected_frame_depth(2)
+
ops = '''
[i2]
i3 = call(ConstClass(f1ptr), i2, descr=f1_calldescr)
@@ -687,7 +711,9 @@
'''
bridge = self.attach_bridge(ops, loop, -2)
- assert loop.operations[-2].getdescr()._x86_bridge_param_depth == self.expected_param_depth(2)
+ assert clt.frame_depth == max(orgdepth, self.expected_frame_depth(1))
+ assert loop.operations[-2].getdescr()._x86_bridge_frame_depth == \
+ self.expected_frame_depth(1)
self.run(loop, 4, 7)
assert self.getint(0) == 29
diff --git a/pypy/jit/backend/x86/test/test_runner.py b/pypy/jit/backend/x86/test/test_runner.py
--- a/pypy/jit/backend/x86/test/test_runner.py
+++ b/pypy/jit/backend/x86/test/test_runner.py
@@ -371,7 +371,7 @@
operations = [
ResOperation(rop.LABEL, [i0], None, descr=targettoken),
- ResOperation(rop.DEBUG_MERGE_POINT, [FakeString("hello"), 0], None),
+ ResOperation(rop.DEBUG_MERGE_POINT, [FakeString("hello"), 0, 0], None),
ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1),
ResOperation(rop.INT_LE, [i1, ConstInt(9)], i2),
ResOperation(rop.GUARD_TRUE, [i2], None, descr=faildescr1),
@@ -390,7 +390,7 @@
bridge = [
ResOperation(rop.INT_LE, [i1b, ConstInt(19)], i3),
ResOperation(rop.GUARD_TRUE, [i3], None, descr=faildescr2),
- ResOperation(rop.DEBUG_MERGE_POINT, [FakeString("bye"), 0], None),
+ ResOperation(rop.DEBUG_MERGE_POINT, [FakeString("bye"), 0, 0], None),
ResOperation(rop.JUMP, [i1b], None, descr=targettoken),
]
bridge[1].setfailargs([i1b])
@@ -531,12 +531,12 @@
loop = """
[i0]
label(i0, descr=preambletoken)
- debug_merge_point('xyz', 0)
+ debug_merge_point('xyz', 0, 0)
i1 = int_add(i0, 1)
i2 = int_ge(i1, 10)
guard_false(i2) []
label(i1, descr=targettoken)
- debug_merge_point('xyz', 0)
+ debug_merge_point('xyz', 0, 0)
i11 = int_add(i1, 1)
i12 = int_ge(i11, 10)
guard_false(i12) []
@@ -569,7 +569,7 @@
loop = """
[i0]
label(i0, descr=targettoken)
- debug_merge_point('xyz', 0)
+ debug_merge_point('xyz', 0, 0)
i1 = int_add(i0, 1)
i2 = int_ge(i1, 10)
guard_false(i2) []
diff --git a/pypy/jit/metainterp/graphpage.py b/pypy/jit/metainterp/graphpage.py
--- a/pypy/jit/metainterp/graphpage.py
+++ b/pypy/jit/metainterp/graphpage.py
@@ -169,9 +169,9 @@
if op.getopnum() == rop.DEBUG_MERGE_POINT:
jd_sd = self.metainterp_sd.jitdrivers_sd[op.getarg(0).getint()]
if jd_sd._get_printable_location_ptr:
- s = jd_sd.warmstate.get_location_str(op.getarglist()[2:])
+ s = jd_sd.warmstate.get_location_str(op.getarglist()[3:])
s = s.replace(',', '.') # we use comma for argument splitting
- op_repr = "debug_merge_point(%d, '%s')" % (op.getarg(1).getint(), s)
+ op_repr = "debug_merge_point(%d, %d, '%s')" % (op.getarg(1).getint(), op.getarg(2).getint(), s)
lines.append(op_repr)
if is_interesting_guard(op):
tgt = op.getdescr()._debug_suboperations[0]
diff --git a/pypy/jit/metainterp/logger.py b/pypy/jit/metainterp/logger.py
--- a/pypy/jit/metainterp/logger.py
+++ b/pypy/jit/metainterp/logger.py
@@ -110,9 +110,9 @@
def repr_of_resop(self, op, ops_offset=None):
if op.getopnum() == rop.DEBUG_MERGE_POINT:
jd_sd = self.metainterp_sd.jitdrivers_sd[op.getarg(0).getint()]
- s = jd_sd.warmstate.get_location_str(op.getarglist()[2:])
+ s = jd_sd.warmstate.get_location_str(op.getarglist()[3:])
s = s.replace(',', '.') # we use comma for argument splitting
- return "debug_merge_point(%d, '%s')" % (op.getarg(1).getint(), s)
+ return "debug_merge_point(%d, %d, '%s')" % (op.getarg(1).getint(), op.getarg(2).getint(), s)
if ops_offset is None:
offset = -1
else:
@@ -149,7 +149,7 @@
if target_token.exported_state:
for op in target_token.exported_state.inputarg_setup_ops:
debug_print(' ' + self.repr_of_resop(op))
-
+
def _log_operations(self, inputargs, operations, ops_offset):
if not have_debug_prints():
return
diff --git a/pypy/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/pypy/jit/metainterp/optimizeopt/test/test_optimizebasic.py
--- a/pypy/jit/metainterp/optimizeopt/test/test_optimizebasic.py
+++ b/pypy/jit/metainterp/optimizeopt/test/test_optimizebasic.py
@@ -5031,6 +5031,42 @@
"""
self.optimize_loop(ops, expected)
+ def test_str_copy_virtual(self):
+ ops = """
+ [i0]
+ p0 = newstr(8)
+ strsetitem(p0, 0, i0)
+ strsetitem(p0, 1, i0)
+ strsetitem(p0, 2, i0)
+ strsetitem(p0, 3, i0)
+ strsetitem(p0, 4, i0)
+ strsetitem(p0, 5, i0)
+ strsetitem(p0, 6, i0)
+ strsetitem(p0, 7, i0)
+ p1 = newstr(12)
+ copystrcontent(p0, p1, 0, 0, 8)
+ strsetitem(p1, 8, 3)
+ strsetitem(p1, 9, 0)
+ strsetitem(p1, 10, 0)
+ strsetitem(p1, 11, 0)
+ finish(p1)
+ """
+ expected = """
+ [i0]
+ p1 = newstr(12)
+ strsetitem(p1, 0, i0)
+ strsetitem(p1, 1, i0)
+ strsetitem(p1, 2, i0)
+ strsetitem(p1, 3, i0)
+ strsetitem(p1, 4, i0)
+ strsetitem(p1, 5, i0)
+ strsetitem(p1, 6, i0)
+ strsetitem(p1, 7, i0)
+ strsetitem(p1, 8, 3)
+ finish(p1)
+ """
+ self.optimize_strunicode_loop(ops, expected)
+
class TestLLtype(BaseTestOptimizeBasic, LLtypeMixin):
pass
diff --git a/pypy/jit/metainterp/optimizeopt/vstring.py b/pypy/jit/metainterp/optimizeopt/vstring.py
--- a/pypy/jit/metainterp/optimizeopt/vstring.py
+++ b/pypy/jit/metainterp/optimizeopt/vstring.py
@@ -505,14 +505,23 @@
if length.is_constant() and length.box.getint() == 0:
return
- copy_str_content(self,
- src.force_box(self),
- dst.force_box(self),
- srcstart.force_box(self),
- dststart.force_box(self),
- length.force_box(self),
- mode, need_next_offset=False
- )
+ elif (src.is_virtual() and dst.is_virtual() and srcstart.is_constant() and
+ dststart.is_constant() and length.is_constant()):
+
+ src_start = srcstart.force_box(self).getint()
+ dst_start = dststart.force_box(self).getint()
+ for index in range(length.force_box(self).getint()):
+ vresult = self.strgetitem(src, optimizer.ConstantValue(ConstInt(index + src_start)), mode)
+ dst.setitem(index + dst_start, vresult)
+ else:
+ copy_str_content(self,
+ src.force_box(self),
+ dst.force_box(self),
+ srcstart.force_box(self),
+ dststart.force_box(self),
+ length.force_box(self),
+ mode, need_next_offset=False
+ )
def optimize_CALL(self, op):
# dispatch based on 'oopspecindex' to a method that handles
diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py
--- a/pypy/jit/metainterp/pyjitpl.py
+++ b/pypy/jit/metainterp/pyjitpl.py
@@ -974,9 +974,11 @@
any_operation = len(self.metainterp.history.operations) > 0
jitdriver_sd = self.metainterp.staticdata.jitdrivers_sd[jdindex]
self.verify_green_args(jitdriver_sd, greenboxes)
- self.debug_merge_point(jitdriver_sd, jdindex, self.metainterp.portal_call_depth,
+ self.debug_merge_point(jitdriver_sd, jdindex,
+ self.metainterp.portal_call_depth,
+ self.metainterp.call_ids[-1],
greenboxes)
-
+
if self.metainterp.seen_loop_header_for_jdindex < 0:
if not any_operation:
return
@@ -1028,11 +1030,11 @@
assembler_call=True)
raise ChangeFrame
- def debug_merge_point(self, jitdriver_sd, jd_index, portal_call_depth, greenkey):
+ def debug_merge_point(self, jitdriver_sd, jd_index, portal_call_depth, current_call_id, greenkey):
# debugging: produce a DEBUG_MERGE_POINT operation
loc = jitdriver_sd.warmstate.get_location_str(greenkey)
debug_print(loc)
- args = [ConstInt(jd_index), ConstInt(portal_call_depth)] + greenkey
+ args = [ConstInt(jd_index), ConstInt(portal_call_depth), ConstInt(current_call_id)] + greenkey
self.metainterp.history.record(rop.DEBUG_MERGE_POINT, args, None)
@arguments("box", "label")
@@ -1574,11 +1576,14 @@
self.call_pure_results = args_dict_box()
self.heapcache = HeapCache()
+ self.call_ids = []
+ self.current_call_id = 0
+
def retrace_needed(self, trace):
self.partial_trace = trace
self.retracing_from = len(self.history.operations) - 1
self.heapcache.reset()
-
+
def perform_call(self, jitcode, boxes, greenkey=None):
# causes the metainterp to enter the given subfunction
@@ -1592,6 +1597,8 @@
def newframe(self, jitcode, greenkey=None):
if jitcode.is_portal:
self.portal_call_depth += 1
+ self.call_ids.append(self.current_call_id)
+ self.current_call_id += 1
if greenkey is not None and self.is_main_jitcode(jitcode):
self.portal_trace_positions.append(
(greenkey, len(self.history.operations)))
@@ -1608,6 +1615,7 @@
jitcode = frame.jitcode
if jitcode.is_portal:
self.portal_call_depth -= 1
+ self.call_ids.pop()
if frame.greenkey is not None and self.is_main_jitcode(jitcode):
self.portal_trace_positions.append(
(None, len(self.history.operations)))
@@ -1976,7 +1984,7 @@
# Found! Compile it as a loop.
# raises in case it works -- which is the common case
if self.partial_trace:
- if start != self.retracing_from:
+ if start != self.retracing_from:
raise SwitchToBlackhole(ABORT_BAD_LOOP) # For now
self.compile_loop(original_boxes, live_arg_boxes, start, resumedescr)
# creation of the loop was cancelled!
@@ -2085,7 +2093,7 @@
if not token.target_tokens:
return None
return token
-
+
def compile_loop(self, original_boxes, live_arg_boxes, start, resume_at_jump_descr):
num_green_args = self.jitdriver_sd.num_green_args
greenkey = original_boxes[:num_green_args]
diff --git a/pypy/jit/metainterp/test/test_logger.py b/pypy/jit/metainterp/test/test_logger.py
--- a/pypy/jit/metainterp/test/test_logger.py
+++ b/pypy/jit/metainterp/test/test_logger.py
@@ -54,7 +54,7 @@
class FakeJitDriver(object):
class warmstate(object):
get_location_str = staticmethod(lambda args: "dupa")
-
+
class FakeMetaInterpSd:
cpu = AbstractCPU()
cpu.ts = self.ts
@@ -77,7 +77,7 @@
equaloplists(loop.operations, oloop.operations)
assert oloop.inputargs == loop.inputargs
return logger, loop, oloop
-
+
def test_simple(self):
inp = '''
[i0, i1, i2, p3, p4, p5]
@@ -116,12 +116,13 @@
def test_debug_merge_point(self):
inp = '''
[]
- debug_merge_point(0, 0)
+ debug_merge_point(0, 0, 0)
'''
_, loop, oloop = self.reparse(inp, check_equal=False)
assert loop.operations[0].getarg(1).getint() == 0
- assert oloop.operations[0].getarg(1)._get_str() == "dupa"
-
+ assert loop.operations[0].getarg(2).getint() == 0
+ assert oloop.operations[0].getarg(2)._get_str() == "dupa"
+
def test_floats(self):
inp = '''
[f0]
@@ -142,7 +143,7 @@
output = logger.log_loop(loop)
assert output.splitlines()[-1] == "jump(i0, descr=<Loop3>)"
pure_parse(output)
-
+
def test_guard_descr(self):
namespace = {'fdescr': BasicFailDescr()}
inp = '''
@@ -154,7 +155,7 @@
output = logger.log_loop(loop)
assert output.splitlines()[-1] == "guard_true(i0, descr=<Guard0>) [i0]"
pure_parse(output)
-
+
logger = Logger(self.make_metainterp_sd(), guard_number=False)
output = logger.log_loop(loop)
lastline = output.splitlines()[-1]
diff --git a/pypy/jit/metainterp/test/test_warmspot.py b/pypy/jit/metainterp/test/test_warmspot.py
--- a/pypy/jit/metainterp/test/test_warmspot.py
+++ b/pypy/jit/metainterp/test/test_warmspot.py
@@ -13,7 +13,7 @@
class WarmspotTests(object):
-
+
def test_basic(self):
mydriver = JitDriver(reds=['a'],
greens=['i'])
@@ -77,16 +77,16 @@
self.meta_interp(f, [123, 10])
assert len(get_stats().locations) >= 4
for loc in get_stats().locations:
- assert loc == (0, 123)
+ assert loc == (0, 0, 123)
def test_set_param_enable_opts(self):
from pypy.rpython.annlowlevel import llstr, hlstr
-
+
myjitdriver = JitDriver(greens = [], reds = ['n'])
class A(object):
def m(self, n):
return n-1
-
+
def g(n):
while n > 0:
myjitdriver.can_enter_jit(n=n)
@@ -332,7 +332,7 @@
ts = llhelper
translate_support_code = False
stats = "stats"
-
+
def get_fail_descr_number(self, d):
return -1
@@ -352,7 +352,7 @@
return "not callable"
driver = JitDriver(reds = ['red'], greens = ['green'])
-
+
def f(green):
red = 0
while red < 10:
diff --git a/pypy/jit/tool/test/test_oparser.py b/pypy/jit/tool/test/test_oparser.py
--- a/pypy/jit/tool/test/test_oparser.py
+++ b/pypy/jit/tool/test/test_oparser.py
@@ -146,16 +146,18 @@
def test_debug_merge_point(self):
x = '''
[]
- debug_merge_point(0, "info")
- debug_merge_point(0, 'info')
- debug_merge_point(1, '<some ('other.')> info')
- debug_merge_point(0, '(stuff) #1')
+ debug_merge_point(0, 0, "info")
+ debug_merge_point(0, 0, 'info')
+ debug_merge_point(1, 1, '<some ('other.')> info')
+ debug_merge_point(0, 0, '(stuff) #1')
'''
loop = self.parse(x)
- assert loop.operations[0].getarg(1)._get_str() == 'info'
- assert loop.operations[1].getarg(1)._get_str() == 'info'
- assert loop.operations[2].getarg(1)._get_str() == "<some ('other.')> info"
- assert loop.operations[3].getarg(1)._get_str() == "(stuff) #1"
+ assert loop.operations[0].getarg(2)._get_str() == 'info'
+ assert loop.operations[0].getarg(1).value == 0
+ assert loop.operations[1].getarg(2)._get_str() == 'info'
+ assert loop.operations[2].getarg(2)._get_str() == "<some ('other.')> info"
+ assert loop.operations[2].getarg(1).value == 1
+ assert loop.operations[3].getarg(2)._get_str() == "(stuff) #1"
def test_descr_with_obj_print(self):
diff --git a/pypy/module/_ffi/test/test__ffi.py b/pypy/module/_ffi/test/test__ffi.py
--- a/pypy/module/_ffi/test/test__ffi.py
+++ b/pypy/module/_ffi/test/test__ffi.py
@@ -100,7 +100,10 @@
from _ffi import CDLL, types
libm = CDLL(self.libm_name)
pow_addr = libm.getaddressindll('pow')
- assert pow_addr == self.pow_addr & (sys.maxint*2-1)
+ fff = sys.maxint*2-1
+ if sys.platform == 'win32':
+ fff = sys.maxint*2+1
+ assert pow_addr == self.pow_addr & fff
def test_func_fromaddr(self):
import sys
diff --git a/pypy/module/_lsprof/interp_lsprof.py b/pypy/module/_lsprof/interp_lsprof.py
--- a/pypy/module/_lsprof/interp_lsprof.py
+++ b/pypy/module/_lsprof/interp_lsprof.py
@@ -22,7 +22,7 @@
eci = ExternalCompilationInfo(
separate_module_files=[srcdir.join('profiling.c')],
export_symbols=['pypy_setup_profiling', 'pypy_teardown_profiling'])
-
+
c_setup_profiling = rffi.llexternal('pypy_setup_profiling',
[], lltype.Void,
compilation_info = eci)
@@ -228,7 +228,7 @@
if w_self.builtins:
key = create_spec(space, w_arg)
w_self._enter_builtin_call(key)
- elif event == 'c_return':
+ elif event == 'c_return' or event == 'c_exception':
if w_self.builtins:
key = create_spec(space, w_arg)
w_self._enter_builtin_return(key)
@@ -237,7 +237,7 @@
pass
class W_Profiler(Wrappable):
-
+
def __init__(self, space, w_callable, time_unit, subcalls, builtins):
self.subcalls = subcalls
self.builtins = builtins
diff --git a/pypy/module/_lsprof/test/test_cprofile.py b/pypy/module/_lsprof/test/test_cprofile.py
--- a/pypy/module/_lsprof/test/test_cprofile.py
+++ b/pypy/module/_lsprof/test/test_cprofile.py
@@ -117,6 +117,20 @@
assert 0.9 < subentry.totaltime < 2.9
#assert 0.9 < subentry.inlinetime < 2.9
+ def test_builtin_exception(self):
+ import math
+ import _lsprof
+
+ prof = _lsprof.Profiler()
+ prof.enable()
+ try:
+ math.sqrt("a")
+ except TypeError:
+ pass
+ prof.disable()
+ stats = prof.getstats()
+ assert len(stats) == 2
+
def test_use_cprofile(self):
import sys, os
# XXX this is evil trickery to walk around the fact that we don't
diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py
--- a/pypy/module/cpyext/api.py
+++ b/pypy/module/cpyext/api.py
@@ -352,6 +352,9 @@
'PyObject_AsReadBuffer', 'PyObject_AsWriteBuffer', 'PyObject_CheckReadBuffer',
'PyOS_getsig', 'PyOS_setsig',
+ 'PyThread_create_key', 'PyThread_delete_key', 'PyThread_set_key_value',
+ 'PyThread_get_key_value', 'PyThread_delete_key_value',
+ 'PyThread_ReInitTLS',
'PyStructSequence_InitType', 'PyStructSequence_New',
]
@@ -617,6 +620,10 @@
lambda space: init_pycobject(),
lambda space: init_capsule(),
])
+ from pypy.module.posix.interp_posix import add_fork_hook
+ reinit_tls = rffi.llexternal('PyThread_ReInitTLS', [], lltype.Void,
+ compilation_info=eci)
+ add_fork_hook('child', reinit_tls)
def init_function(func):
INIT_FUNCTIONS.append(func)
@@ -926,6 +933,7 @@
source_dir / "structseq.c",
source_dir / "capsule.c",
source_dir / "pysignals.c",
+ source_dir / "thread.c",
],
separate_module_sources=separate_module_sources,
export_symbols=export_symbols_eci,
diff --git a/pypy/module/cpyext/include/pythread.h b/pypy/module/cpyext/include/pythread.h
--- a/pypy/module/cpyext/include/pythread.h
+++ b/pypy/module/cpyext/include/pythread.h
@@ -3,8 +3,26 @@
#define WITH_THREAD
+#ifdef __cplusplus
+extern "C" {
+#endif
+
typedef void *PyThread_type_lock;
#define WAIT_LOCK 1
#define NOWAIT_LOCK 0
+/* Thread Local Storage (TLS) API */
+PyAPI_FUNC(int) PyThread_create_key(void);
+PyAPI_FUNC(void) PyThread_delete_key(int);
+PyAPI_FUNC(int) PyThread_set_key_value(int, void *);
+PyAPI_FUNC(void *) PyThread_get_key_value(int);
+PyAPI_FUNC(void) PyThread_delete_key_value(int key);
+
+/* Cleanup after a fork */
+PyAPI_FUNC(void) PyThread_ReInitTLS(void);
+
+#ifdef __cplusplus
+}
#endif
+
+#endif
diff --git a/pypy/module/cpyext/src/thread.c b/pypy/module/cpyext/src/thread.c
new file mode 100644
--- /dev/null
+++ b/pypy/module/cpyext/src/thread.c
@@ -0,0 +1,313 @@
+#include <Python.h>
+#include "pythread.h"
+
+/* ------------------------------------------------------------------------
+Per-thread data ("key") support.
+
+Use PyThread_create_key() to create a new key. This is typically shared
+across threads.
+
+Use PyThread_set_key_value(thekey, value) to associate void* value with
+thekey in the current thread. Each thread has a distinct mapping of thekey
+to a void* value. Caution: if the current thread already has a mapping
+for thekey, value is ignored.
+
+Use PyThread_get_key_value(thekey) to retrieve the void* value associated
+with thekey in the current thread. This returns NULL if no value is
+associated with thekey in the current thread.
+
+Use PyThread_delete_key_value(thekey) to forget the current thread's associated
+value for thekey. PyThread_delete_key(thekey) forgets the values associated
+with thekey across *all* threads.
+
+While some of these functions have error-return values, none set any
+Python exception.
+
+None of the functions does memory management on behalf of the void* values.
+You need to allocate and deallocate them yourself. If the void* values
+happen to be PyObject*, these functions don't do refcount operations on
+them either.
+
+The GIL does not need to be held when calling these functions; they supply
+their own locking. This isn't true of PyThread_create_key(), though (see
+next paragraph).
+
+There's a hidden assumption that PyThread_create_key() will be called before
+any of the other functions are called. There's also a hidden assumption
+that calls to PyThread_create_key() are serialized externally.
+------------------------------------------------------------------------ */
+
+#ifdef MS_WINDOWS
+#include <windows.h>
+
+/* use native Windows TLS functions */
+#define Py_HAVE_NATIVE_TLS
+
+int
+PyThread_create_key(void)
+{
+ return (int) TlsAlloc();
+}
+
+void
+PyThread_delete_key(int key)
+{
+ TlsFree(key);
+}
+
+/* We must be careful to emulate the strange semantics implemented in thread.c,
+ * where the value is only set if it hasn't been set before.
+ */
+int
+PyThread_set_key_value(int key, void *value)
+{
+ BOOL ok;
+ void *oldvalue;
+
+ assert(value != NULL);
+ oldvalue = TlsGetValue(key);
+ if (oldvalue != NULL)
+ /* ignore value if already set */
+ return 0;
+ ok = TlsSetValue(key, value);
+ if (!ok)
+ return -1;
+ return 0;
+}
+
+void *
+PyThread_get_key_value(int key)
+{
+ /* because TLS is used in the Py_END_ALLOW_THREAD macro,
+ * it is necessary to preserve the windows error state, because
+ * it is assumed to be preserved across the call to the macro.
+ * Ideally, the macro should be fixed, but it is simpler to
+ * do it here.
+ */
+ DWORD error = GetLastError();
+ void *result = TlsGetValue(key);
+ SetLastError(error);
+ return result;
+}
+
+void
+PyThread_delete_key_value(int key)
+{
+ /* NULL is used as "key missing", and it is also the default
+ * given by TlsGetValue() if nothing has been set yet.
+ */
+ TlsSetValue(key, NULL);
+}
+
+/* reinitialization of TLS is not necessary after fork when using
+ * the native TLS functions. And forking isn't supported on Windows either.
+ */
+void
+PyThread_ReInitTLS(void)
+{}
+
+#else /* MS_WINDOWS */
+
+/* A singly-linked list of struct key objects remembers all the key->value
+ * associations. File static keyhead heads the list. keymutex is used
+ * to enforce exclusion internally.
+ */
+struct key {
+ /* Next record in the list, or NULL if this is the last record. */
+ struct key *next;
+
+ /* The thread id, according to PyThread_get_thread_ident(). */
+ long id;
+
+ /* The key and its associated value. */
+ int key;
+ void *value;
+};
+
+static struct key *keyhead = NULL;
+static PyThread_type_lock keymutex = NULL;
+static int nkeys = 0; /* PyThread_create_key() hands out nkeys+1 next */
+
+/* Internal helper.
+ * If the current thread has a mapping for key, the appropriate struct key*
+ * is returned. NB: value is ignored in this case!
+ * If there is no mapping for key in the current thread, then:
+ * If value is NULL, NULL is returned.
+ * Else a mapping of key to value is created for the current thread,
+ * and a pointer to a new struct key* is returned; except that if
+ * malloc() can't find room for a new struct key*, NULL is returned.
+ * So when value==NULL, this acts like a pure lookup routine, and when
+ * value!=NULL, this acts like dict.setdefault(), returning an existing
+ * mapping if one exists, else creating a new mapping.
+ *
+ * Caution: this used to be too clever, trying to hold keymutex only
+ * around the "p->next = keyhead; keyhead = p" pair. That allowed
+ * another thread to mutate the list, via key deletion, concurrent with
+ * find_key() crawling over the list. Hilarity ensued. For example, when
+ * the for-loop here does "p = p->next", p could end up pointing at a
+ * record that PyThread_delete_key_value() was concurrently free()'ing.
+ * That could lead to anything, from failing to find a key that exists, to
+ * segfaults. Now we lock the whole routine.
+ */
+static struct key *
+find_key(int key, void *value)
+{
+ struct key *p, *prev_p;
+ long id = PyThread_get_thread_ident();
+
+ if (!keymutex)
+ return NULL;
+ PyThread_acquire_lock(keymutex, 1);
+ prev_p = NULL;
+ for (p = keyhead; p != NULL; p = p->next) {
+ if (p->id == id && p->key == key)
+ goto Done;
+ /* Sanity check. These states should never happen but if
+ * they do we must abort. Otherwise we'll end up spinning in
+ * in a tight loop with the lock held. A similar check is done
+ * in pystate.c tstate_delete_common(). */
+ if (p == prev_p)
+ Py_FatalError("tls find_key: small circular list(!)");
+ prev_p = p;
+ if (p->next == keyhead)
+ Py_FatalError("tls find_key: circular list(!)");
+ }
+ if (value == NULL) {
+ assert(p == NULL);
+ goto Done;
+ }
+ p = (struct key *)malloc(sizeof(struct key));
+ if (p != NULL) {
+ p->id = id;
+ p->key = key;
+ p->value = value;
+ p->next = keyhead;
+ keyhead = p;
+ }
+ Done:
+ PyThread_release_lock(keymutex);
+ return p;
+}
+
+/* Return a new key. This must be called before any other functions in
+ * this family, and callers must arrange to serialize calls to this
+ * function. No violations are detected.
+ */
+int
+PyThread_create_key(void)
+{
+ /* All parts of this function are wrong if it's called by multiple
+ * threads simultaneously.
+ */
+ if (keymutex == NULL)
+ keymutex = PyThread_allocate_lock();
+ return ++nkeys;
+}
+
+/* Forget the associations for key across *all* threads. */
+void
+PyThread_delete_key(int key)
+{
+ struct key *p, **q;
+
+ PyThread_acquire_lock(keymutex, 1);
+ q = &keyhead;
+ while ((p = *q) != NULL) {
+ if (p->key == key) {
+ *q = p->next;
+ free((void *)p);
+ /* NB This does *not* free p->value! */
+ }
+ else
+ q = &p->next;
+ }
+ PyThread_release_lock(keymutex);
+}
+
+/* Confusing: If the current thread has an association for key,
+ * value is ignored, and 0 is returned. Else an attempt is made to create
+ * an association of key to value for the current thread. 0 is returned
+ * if that succeeds, but -1 is returned if there's not enough memory
+ * to create the association. value must not be NULL.
+ */
+int
+PyThread_set_key_value(int key, void *value)
+{
+ struct key *p;
+
+ assert(value != NULL);
+ p = find_key(key, value);
+ if (p == NULL)
+ return -1;
+ else
+ return 0;
+}
+
+/* Retrieve the value associated with key in the current thread, or NULL
+ * if the current thread doesn't have an association for key.
+ */
+void *
+PyThread_get_key_value(int key)
+{
+ struct key *p = find_key(key, NULL);
+
+ if (p == NULL)
+ return NULL;
+ else
+ return p->value;
+}
+
+/* Forget the current thread's association for key, if any. */
+void
+PyThread_delete_key_value(int key)
+{
+ long id = PyThread_get_thread_ident();
+ struct key *p, **q;
+
+ PyThread_acquire_lock(keymutex, 1);
+ q = &keyhead;
+ while ((p = *q) != NULL) {
+ if (p->key == key && p->id == id) {
+ *q = p->next;
+ free((void *)p);
+ /* NB This does *not* free p->value! */
+ break;
+ }
+ else
+ q = &p->next;
+ }
+ PyThread_release_lock(keymutex);
+}
+
+/* Forget everything not associated with the current thread id.
+ * This function is called from PyOS_AfterFork(). It is necessary
+ * because other thread ids which were in use at the time of the fork
+ * may be reused for new threads created in the forked process.
+ */
+void
+PyThread_ReInitTLS(void)
+{
+ long id = PyThread_get_thread_ident();
+ struct key *p, **q;
+
+ if (!keymutex)
+ return;
+
+ /* As with interpreter_lock in PyEval_ReInitThreads()
+ we just create a new lock without freeing the old one */
+ keymutex = PyThread_allocate_lock();
+
+ /* Delete all keys which do not match the current thread id */
+ q = &keyhead;
+ while ((p = *q) != NULL) {
+ if (p->id != id) {
+ *q = p->next;
+ free((void *)p);
+ /* NB This does *not* free p->value! */
+ }
+ else
+ q = &p->next;
+ }
+}
+
+#endif /* !MS_WINDOWS */
diff --git a/pypy/module/cpyext/test/test_thread.py b/pypy/module/cpyext/test/test_thread.py
--- a/pypy/module/cpyext/test/test_thread.py
+++ b/pypy/module/cpyext/test/test_thread.py
@@ -5,6 +5,7 @@
from pypy.module.thread.ll_thread import allocate_ll_lock
from pypy.module.cpyext.test.test_api import BaseApiTest
+from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase
class TestPyThread(BaseApiTest):
@@ -38,3 +39,51 @@
api.PyThread_release_lock(lock)
assert api.PyThread_acquire_lock(lock, 0) == 1
api.PyThread_free_lock(lock)
+
+
+class AppTestThread(AppTestCpythonExtensionBase):
+ def test_tls(self):
+ module = self.import_extension('foo', [
+ ("create_key", "METH_NOARGS",
+ """
+ return PyInt_FromLong(PyThread_create_key());
+ """),
+ ("test_key", "METH_O",
+ """
+ int key = PyInt_AsLong(args);
+ if (PyThread_get_key_value(key) != NULL) {
+ PyErr_SetNone(PyExc_ValueError);
+ return NULL;
+ }
+ if (PyThread_set_key_value(key, (void*)123) < 0) {
+ PyErr_SetNone(PyExc_ValueError);
+ return NULL;
+ }
+ if (PyThread_get_key_value(key) != (void*)123) {
+ PyErr_SetNone(PyExc_ValueError);
+ return NULL;
+ }
+ Py_RETURN_NONE;
+ """),
+ ])
+ key = module.create_key()
+ assert key > 0
+ # Test value in main thread.
+ module.test_key(key)
+ raises(ValueError, module.test_key, key)
+ # Same test, in another thread.
+ result = []
+ import thread, time
+ def in_thread():
+ try:
+ module.test_key(key)
+ raises(ValueError, module.test_key, key)
+ except Exception, e:
+ result.append(e)
+ else:
+ result.append(True)
+ thread.start_new_thread(in_thread, ())
+ while not result:
+ print "."
+ time.sleep(.5)
+ assert result == [True]
diff --git a/pypy/module/cpyext/unicodeobject.py b/pypy/module/cpyext/unicodeobject.py
--- a/pypy/module/cpyext/unicodeobject.py
+++ b/pypy/module/cpyext/unicodeobject.py
@@ -178,7 +178,7 @@
@cpython_api([], Py_UNICODE, error=CANNOT_FAIL)
def PyUnicode_GetMax(space):
"""Get the maximum ordinal for a Unicode character."""
- return unichr(runicode.MAXUNICODE)
+ return runicode.UNICHR(runicode.MAXUNICODE)
@cpython_api([PyObject], rffi.CCHARP, error=CANNOT_FAIL)
def PyUnicode_AS_DATA(space, ref):
diff --git a/pypy/module/pypyjit/interp_resop.py b/pypy/module/pypyjit/interp_resop.py
--- a/pypy/module/pypyjit/interp_resop.py
+++ b/pypy/module/pypyjit/interp_resop.py
@@ -72,7 +72,7 @@
Set a compiling hook that will be called each time a loop is optimized,
but before assembler compilation. This allows to add additional
optimizations on Python level.
-
+
The hook will be called with the following signature:
hook(jitdriver_name, loop_type, greenkey or guard_number, operations)
@@ -121,13 +121,14 @@
ofs = ops_offset.get(op, 0)
if op.opnum == rop.DEBUG_MERGE_POINT:
jd_sd = jitdrivers_sd[op.getarg(0).getint()]
- greenkey = op.getarglist()[2:]
+ greenkey = op.getarglist()[3:]
repr = jd_sd.warmstate.get_location_str(greenkey)
w_greenkey = wrap_greenkey(space, jd_sd.jitdriver, greenkey, repr)
l_w.append(DebugMergePoint(space, jit_hooks._cast_to_gcref(op),
logops.repr_of_resop(op),
jd_sd.jitdriver.name,
op.getarg(1).getint(),
+ op.getarg(2).getint(),
w_greenkey))
else:
l_w.append(WrappedOp(jit_hooks._cast_to_gcref(op), ofs,
@@ -164,14 +165,16 @@
llres = res.llbox
return WrappedOp(jit_hooks.resop_new(num, args, llres), offset, repr)
- at unwrap_spec(repr=str, jd_name=str, call_depth=int)
-def descr_new_dmp(space, w_tp, w_args, repr, jd_name, call_depth, w_greenkey):
+ at unwrap_spec(repr=str, jd_name=str, call_depth=int, call_id=int)
+def descr_new_dmp(space, w_tp, w_args, repr, jd_name, call_depth, call_id,
+ w_greenkey):
+
args = [space.interp_w(WrappedBox, w_arg).llbox for w_arg in
space.listview(w_args)]
num = rop.DEBUG_MERGE_POINT
return DebugMergePoint(space,
jit_hooks.resop_new(num, args, jit_hooks.emptyval()),
- repr, jd_name, call_depth, w_greenkey)
+ repr, jd_name, call_depth, call_id, w_greenkey)
class WrappedOp(Wrappable):
""" A class representing a single ResOperation, wrapped nicely
@@ -206,10 +209,13 @@
jit_hooks.resop_setresult(self.op, box.llbox)
class DebugMergePoint(WrappedOp):
- def __init__(self, space, op, repr_of_resop, jd_name, call_depth, w_greenkey):
+ def __init__(self, space, op, repr_of_resop, jd_name, call_depth, call_id,
+ w_greenkey):
+
WrappedOp.__init__(self, op, -1, repr_of_resop)
self.jd_name = jd_name
self.call_depth = call_depth
+ self.call_id = call_id
self.w_greenkey = w_greenkey
def get_pycode(self, space):
@@ -246,6 +252,7 @@
pycode = GetSetProperty(DebugMergePoint.get_pycode),
bytecode_no = GetSetProperty(DebugMergePoint.get_bytecode_no),
call_depth = interp_attrproperty("call_depth", cls=DebugMergePoint),
+ call_id = interp_attrproperty("call_id", cls=DebugMergePoint),
jitdriver_name = GetSetProperty(DebugMergePoint.get_jitdriver_name),
)
DebugMergePoint.acceptable_as_base_class = False
diff --git a/pypy/module/pypyjit/policy.py b/pypy/module/pypyjit/policy.py
--- a/pypy/module/pypyjit/policy.py
+++ b/pypy/module/pypyjit/policy.py
@@ -127,7 +127,7 @@
'imp', 'sys', 'array', '_ffi', 'itertools', 'operator',
'posix', '_socket', '_sre', '_lsprof', '_weakref',
'__pypy__', 'cStringIO', '_collections', 'struct',
- 'mmap', 'marshal', '_codecs']:
+ 'mmap', 'marshal', '_codecs', 'rctime']:
if modname == 'pypyjit' and 'interp_resop' in rest:
return False
return True
diff --git a/pypy/module/pypyjit/test/test_jit_hook.py b/pypy/module/pypyjit/test/test_jit_hook.py
--- a/pypy/module/pypyjit/test/test_jit_hook.py
+++ b/pypy/module/pypyjit/test/test_jit_hook.py
@@ -54,7 +54,7 @@
oplist = parse("""
[i1, i2, p2]
i3 = int_add(i1, i2)
- debug_merge_point(0, 0, 0, 0, ConstPtr(ptr0))
+ debug_merge_point(0, 0, 0, 0, 0, ConstPtr(ptr0))
guard_nonnull(p2) []
guard_true(i3) []
""", namespace={'ptr0': code_gcref}).operations
@@ -87,7 +87,7 @@
def interp_on_abort():
pypy_hooks.on_abort(ABORT_TOO_LONG, pypyjitdriver, greenkey,
'blah')
-
+
cls.w_on_compile = space.wrap(interp2app(interp_on_compile))
cls.w_on_compile_bridge = space.wrap(interp2app(interp_on_compile_bridge))
cls.w_on_abort = space.wrap(interp2app(interp_on_abort))
@@ -105,7 +105,7 @@
def hook(name, looptype, tuple_or_guard_no, ops, asmstart, asmlen):
all.append((name, looptype, tuple_or_guard_no, ops))
-
+
self.on_compile()
pypyjit.set_compile_hook(hook)
assert not all
@@ -123,6 +123,7 @@
assert dmp.pycode is self.f.func_code
assert dmp.greenkey == (self.f.func_code, 0, False)
assert dmp.call_depth == 0
+ assert dmp.call_id == 0
assert int_add.name == 'int_add'
assert int_add.num == self.int_add_num
self.on_compile_bridge()
@@ -151,18 +152,18 @@
def test_non_reentrant(self):
import pypyjit
l = []
-
+
def hook(*args):
l.append(None)
self.on_compile()
self.on_compile_bridge()
-
+
pypyjit.set_compile_hook(hook)
self.on_compile()
assert len(l) == 1 # and did not crash
self.on_compile_bridge()
assert len(l) == 2 # and did not crash
-
+
def test_on_compile_types(self):
import pypyjit
l = []
@@ -182,7 +183,7 @@
def hook(jitdriver_name, greenkey, reason):
l.append((jitdriver_name, reason))
-
+
pypyjit.set_abort_hook(hook)
self.on_abort()
assert l == [('pypyjit', 'ABORT_TOO_LONG')]
@@ -224,13 +225,14 @@
def f():
pass
- op = DebugMergePoint([Box(0)], 'repr', 'pypyjit', 2, (f.func_code, 0, 0))
+ op = DebugMergePoint([Box(0)], 'repr', 'pypyjit', 2, 3, (f.func_code, 0, 0))
assert op.bytecode_no == 0
assert op.pycode is f.func_code
assert repr(op) == 'repr'
assert op.jitdriver_name == 'pypyjit'
assert op.num == self.dmp_num
assert op.call_depth == 2
- op = DebugMergePoint([Box(0)], 'repr', 'notmain', 5, ('str',))
+ assert op.call_id == 3
+ op = DebugMergePoint([Box(0)], 'repr', 'notmain', 5, 4, ('str',))
raises(AttributeError, 'op.pycode')
assert op.call_depth == 5
diff --git a/pypy/module/pypyjit/test/test_policy.py b/pypy/module/pypyjit/test/test_policy.py
--- a/pypy/module/pypyjit/test/test_policy.py
+++ b/pypy/module/pypyjit/test/test_policy.py
@@ -38,6 +38,10 @@
assert pypypolicy.look_inside_function(Local.getdict.im_func)
assert pypypolicy.look_inside_function(get_ident)
+def test_time():
+ from pypy.module.rctime.interp_time import time
+ assert pypypolicy.look_inside_function(time)
+
def test_pypy_module():
from pypy.module._collections.interp_deque import W_Deque
from pypy.module._random.interp_random import W_Random
diff --git a/pypy/module/rctime/interp_time.py b/pypy/module/rctime/interp_time.py
--- a/pypy/module/rctime/interp_time.py
+++ b/pypy/module/rctime/interp_time.py
@@ -24,6 +24,7 @@
from pypy.module.thread import ll_thread as thread
eci = ExternalCompilationInfo(
+ post_include_bits = ["BOOL pypy_timemodule_setCtrlHandler(HANDLE event);"],
separate_module_sources=['''
#include <windows.h>
diff --git a/pypy/module/select/__init__.py b/pypy/module/select/__init__.py
--- a/pypy/module/select/__init__.py
+++ b/pypy/module/select/__init__.py
@@ -22,6 +22,13 @@
if value is not None:
interpleveldefs[symbol] = "space.wrap(%r)" % value
+ if 'bsd' in sys.platform or sys.platform.startswith('darwin'):
+ interpleveldefs["kqueue"] = "interp_kqueue.W_Kqueue"
+ interpleveldefs["kevent"] = "interp_kqueue.W_Kevent"
+ from pypy.module.select.interp_kqueue import symbol_map
+ for symbol in symbol_map:
+ interpleveldefs[symbol] = "space.wrap(interp_kqueue.%s)" % symbol
+
def buildloaders(cls):
from pypy.rlib import rpoll
for name in rpoll.eventnames:
diff --git a/pypy/module/select/interp_epoll.py b/pypy/module/select/interp_epoll.py
--- a/pypy/module/select/interp_epoll.py
+++ b/pypy/module/select/interp_epoll.py
@@ -58,7 +58,7 @@
)
epoll_wait = rffi.llexternal(
"epoll_wait",
- [rffi.INT, lltype.Ptr(rffi.CArray(epoll_event)), rffi.INT, rffi.INT],
+ [rffi.INT, rffi.CArrayPtr(epoll_event), rffi.INT, rffi.INT],
rffi.INT,
compilation_info=eci,
)
diff --git a/pypy/module/select/interp_kqueue.py b/pypy/module/select/interp_kqueue.py
new file mode 100644
--- /dev/null
+++ b/pypy/module/select/interp_kqueue.py
@@ -0,0 +1,344 @@
+from pypy.interpreter.baseobjspace import Wrappable
+from pypy.interpreter.error import OperationError, operationerrfmt, exception_from_errno
+from pypy.interpreter.gateway import interp2app, unwrap_spec
+from pypy.interpreter.typedef import TypeDef, generic_new_descr, GetSetProperty
+from pypy.rlib._rsocket_rffi import socketclose
+from pypy.rpython.lltypesystem import rffi, lltype
+from pypy.rpython.tool import rffi_platform
+from pypy.translator.tool.cbuild import ExternalCompilationInfo
+
+
+eci = ExternalCompilationInfo(
+ includes = ["sys/types.h",
+ "sys/event.h",
+ "sys/time.h"],
+)
+
+
+class CConfig:
+ _compilation_info_ = eci
+
+
+CConfig.kevent = rffi_platform.Struct("struct kevent", [
+ ("ident", rffi.UINTPTR_T),
+ ("filter", rffi.SHORT),
+ ("flags", rffi.USHORT),
+ ("fflags", rffi.UINT),
+ ("data", rffi.INTPTR_T),
+ ("udata", rffi.VOIDP),
+])
+
+
+CConfig.timespec = rffi_platform.Struct("struct timespec", [
+ ("tv_sec", rffi.TIME_T),
+ ("tv_nsec", rffi.LONG),
+])
+
+
+symbol_map = {
+ "KQ_FILTER_READ": "EVFILT_READ",
+ "KQ_FILTER_WRITE": "EVFILT_WRITE",
+ "KQ_FILTER_AIO": "EVFILT_AIO",
+ "KQ_FILTER_VNODE": "EVFILT_VNODE",
+ "KQ_FILTER_PROC": "EVFILT_PROC",
+# "KQ_FILTER_NETDEV": None, # deprecated on FreeBSD .. no longer defined
+ "KQ_FILTER_SIGNAL": "EVFILT_SIGNAL",
+ "KQ_FILTER_TIMER": "EVFILT_TIMER",
+ "KQ_EV_ADD": "EV_ADD",
+ "KQ_EV_DELETE": "EV_DELETE",
+ "KQ_EV_ENABLE": "EV_ENABLE",
+ "KQ_EV_DISABLE": "EV_DISABLE",
+ "KQ_EV_ONESHOT": "EV_ONESHOT",
+ "KQ_EV_CLEAR": "EV_CLEAR",
+# "KQ_EV_SYSFLAGS": None, # Python docs says "internal event" .. not defined on FreeBSD
+# "KQ_EV_FLAG1": None, # Python docs says "internal event" .. not defined on FreeBSD
+ "KQ_EV_EOF": "EV_EOF",
+ "KQ_EV_ERROR": "EV_ERROR"
+}
+
+for symbol in symbol_map.values():
+ setattr(CConfig, symbol, rffi_platform.DefinedConstantInteger(symbol))
+
+cconfig = rffi_platform.configure(CConfig)
+
+kevent = cconfig["kevent"]
+timespec = cconfig["timespec"]
+
+for symbol in symbol_map:
+ globals()[symbol] = cconfig[symbol_map[symbol]]
+
+
+syscall_kqueue = rffi.llexternal(
+ "kqueue",
+ [],
+ rffi.INT,
+ compilation_info=eci
+)
+
+syscall_kevent = rffi.llexternal(
+ "kevent",
+ [rffi.INT,
+ lltype.Ptr(rffi.CArray(kevent)),
+ rffi.INT,
+ lltype.Ptr(rffi.CArray(kevent)),
+ rffi.INT,
+ lltype.Ptr(timespec)
+ ],
+ rffi.INT,
+ compilation_info=eci
+)
+
+
+class W_Kqueue(Wrappable):
+ def __init__(self, space, kqfd):
+ self.kqfd = kqfd
+
+ def descr__new__(space, w_subtype):
+ kqfd = syscall_kqueue()
+ if kqfd < 0:
+ raise exception_from_errno(space, space.w_IOError)
+ return space.wrap(W_Kqueue(space, kqfd))
+
+ @unwrap_spec(fd=int)
+ def descr_fromfd(space, w_cls, fd):
+ return space.wrap(W_Kqueue(space, fd))
+
+ def __del__(self):
+ self.close()
+
+ def get_closed(self):
+ return self.kqfd < 0
+
+ def close(self):
+ if not self.get_closed():
+ kqfd = self.kqfd
+ self.kqfd = -1
+ socketclose(kqfd)
+
+ def check_closed(self, space):
+ if self.get_closed():
+ raise OperationError(space.w_ValueError, space.wrap("I/O operation on closed kqueue fd"))
+
+ def descr_get_closed(self, space):
+ return space.wrap(self.get_closed())
+
+ def descr_fileno(self, space):
+ self.check_closed(space)
+ return space.wrap(self.kqfd)
+
+ def descr_close(self, space):
+ self.close()
+
+ @unwrap_spec(max_events=int)
+ def descr_control(self, space, w_changelist, max_events, w_timeout=None):
+
+ self.check_closed(space)
+
+ if max_events < 0:
+ raise operationerrfmt(space.w_ValueError,
+ "Length of eventlist must be 0 or positive, got %d", max_events
+ )
+
+ if space.is_w(w_changelist, space.w_None):
+ changelist_len = 0
+ else:
+ changelist_len = space.len_w(w_changelist)
+
+ with lltype.scoped_alloc(rffi.CArray(kevent), changelist_len) as changelist, \
+ lltype.scoped_alloc(rffi.CArray(kevent), max_events) as eventlist, \
+ lltype.scoped_alloc(timespec) as timeout:
+
+ if not space.is_w(w_timeout, space.w_None):
+ _timeout = space.float_w(w_timeout)
+ if _timeout < 0:
+ raise operationerrfmt(space.w_ValueError,
+ "Timeout must be None or >= 0, got %s", str(_timeout)
+ )
+ sec = int(_timeout)
+ nsec = int(1e9 * (_timeout - sec))
+ rffi.setintfield(timeout, 'c_tv_sec', sec)
+ rffi.setintfield(timeout, 'c_tv_nsec', nsec)
+ ptimeout = timeout
+ else:
+ ptimeout = lltype.nullptr(timespec)
+
+ if not space.is_w(w_changelist, space.w_None):
+ i = 0
+ for w_ev in space.listview(w_changelist):
+ ev = space.interp_w(W_Kevent, w_ev)
+ changelist[i].c_ident = ev.event.c_ident
+ changelist[i].c_filter = ev.event.c_filter
+ changelist[i].c_flags = ev.event.c_flags
+ changelist[i].c_fflags = ev.event.c_fflags
+ changelist[i].c_data = ev.event.c_data
+ changelist[i].c_udata = ev.event.c_udata
+ i += 1
+ pchangelist = changelist
+ else:
+ pchangelist = lltype.nullptr(rffi.CArray(kevent))
+
+ nfds = syscall_kevent(self.kqfd,
+ pchangelist,
+ changelist_len,
+ eventlist,
+ max_events,
+ ptimeout)
+ if nfds < 0:
+ raise exception_from_errno(space, space.w_IOError)
+ else:
+ elist_w = [None] * nfds
+ for i in xrange(nfds):
+
+ evt = eventlist[i]
+
+ w_event = W_Kevent(space)
+ w_event.event = lltype.malloc(kevent, flavor="raw")
+ w_event.event.c_ident = evt.c_ident
+ w_event.event.c_filter = evt.c_filter
+ w_event.event.c_flags = evt.c_flags
+ w_event.event.c_fflags = evt.c_fflags
+ w_event.event.c_data = evt.c_data
+ w_event.event.c_udata = evt.c_udata
+
+ elist_w[i] = w_event
+
+ return space.newlist(elist_w)
+
+
+
+W_Kqueue.typedef = TypeDef("select.kqueue",
+ __new__ = interp2app(W_Kqueue.descr__new__.im_func),
+ fromfd = interp2app(W_Kqueue.descr_fromfd.im_func, as_classmethod=True),
+
+ closed = GetSetProperty(W_Kqueue.descr_get_closed),
+ fileno = interp2app(W_Kqueue.descr_fileno),
+
+ close = interp2app(W_Kqueue.descr_close),
+ control = interp2app(W_Kqueue.descr_control),
+)
+W_Kqueue.typedef.acceptable_as_base_class = False
+
+
+class W_Kevent(Wrappable):
+ def __init__(self, space):
+ self.event = lltype.nullptr(kevent)
+
+ def __del__(self):
+ if self.event:
+ lltype.free(self.event, flavor="raw")
+
+ @unwrap_spec(filter=int, flags=rffi.r_uint, fflags=rffi.r_uint, data=int, udata=rffi.r_uint)
+ def descr__init__(self, space, w_ident, filter=KQ_FILTER_READ, flags=KQ_EV_ADD, fflags=0, data=0, udata=0):
+ ident = space.c_filedescriptor_w(w_ident)
+
+ self.event = lltype.malloc(kevent, flavor="raw")
+ rffi.setintfield(self.event, "c_ident", ident)
+ rffi.setintfield(self.event, "c_filter", filter)
+ rffi.setintfield(self.event, "c_flags", flags)
+ rffi.setintfield(self.event, "c_fflags", fflags)
+ rffi.setintfield(self.event, "c_data", data)
+ self.event.c_udata = rffi.cast(rffi.VOIDP, udata)
+
+ def _compare_all_fields(self, other, op):
+ l_ident = self.event.c_ident
+ r_ident = other.event.c_ident
+ l_filter = rffi.cast(lltype.Signed, self.event.c_filter)
+ r_filter = rffi.cast(lltype.Signed, other.event.c_filter)
+ l_flags = rffi.cast(lltype.Unsigned, self.event.c_flags)
+ r_flags = rffi.cast(lltype.Unsigned, other.event.c_flags)
+ l_fflags = self.event.c_fflags
+ r_fflags = other.event.c_fflags
+ l_data = self.event.c_data
+ r_data = other.event.c_data
+ l_udata = rffi.cast(lltype.Unsigned, self.event.c_udata)
+ r_udata = rffi.cast(lltype.Unsigned, other.event.c_udata)
+
+ if op == "eq":
+ return l_ident == r_ident and \
+ l_filter == r_filter and \
+ l_flags == r_flags and \
+ l_fflags == r_fflags and \
+ l_data == r_data and \
+ l_udata == r_udata
+ elif op == "lt":
+ return (l_ident < r_ident) or \
+ (l_ident == r_ident and l_filter < r_filter) or \
+ (l_ident == r_ident and l_filter == r_filter and l_flags < r_flags) or \
+ (l_ident == r_ident and l_filter == r_filter and l_flags == r_flags and l_fflags < r_fflags) or \
+ (l_ident == r_ident and l_filter == r_filter and l_flags == r_flags and l_fflags == r_fflags and l_data < r_data) or \
+ (l_ident == r_ident and l_filter == r_filter and l_flags == r_flags and l_fflags == r_fflags and l_data == r_data and l_udata < r_udata)
+ elif op == "gt":
+ return (l_ident > r_ident) or \
+ (l_ident == r_ident and l_filter > r_filter) or \
+ (l_ident == r_ident and l_filter == r_filter and l_flags > r_flags) or \
+ (l_ident == r_ident and l_filter == r_filter and l_flags == r_flags and l_fflags > r_fflags) or \
+ (l_ident == r_ident and l_filter == r_filter and l_flags == r_flags and l_fflags == r_fflags and l_data > r_data) or \
+ (l_ident == r_ident and l_filter == r_filter and l_flags == r_flags and l_fflags == r_fflags and l_data == r_data and l_udata > r_udata)
+ else:
+ assert False
+
+ def compare_all_fields(self, space, other, op):
+ if not space.interp_w(W_Kevent, other):
+ if op == "eq":
+ return False
+ elif op == "ne":
+ return True
+ else:
+ raise OperationError(space.w_TypeError, space.wrap('cannot compare kevent to incompatible type'))
+ return self._compare_all_fields(space.interp_w(W_Kevent, other), op)
+
+ def descr__eq__(self, space, w_other):
+ return space.wrap(self.compare_all_fields(space, w_other, "eq"))
+
+ def descr__ne__(self, space, w_other):
+ return space.wrap(not self.compare_all_fields(space, w_other, "eq"))
+
+ def descr__le__(self, space, w_other):
+ return space.wrap(not self.compare_all_fields(space, w_other, "gt"))
+
+ def descr__lt__(self, space, w_other):
+ return space.wrap(self.compare_all_fields(space, w_other, "lt"))
+
+ def descr__ge__(self, space, w_other):
+ return space.wrap(not self.compare_all_fields(space, w_other, "lt"))
+
+ def descr__gt__(self, space, w_other):
+ return space.wrap(self.compare_all_fields(space, w_other, "gt"))
+
+ def descr_get_ident(self, space):
+ return space.wrap(self.event.c_ident)
+
+ def descr_get_filter(self, space):
+ return space.wrap(self.event.c_filter)
+
+ def descr_get_flags(self, space):
+ return space.wrap(self.event.c_flags)
+
+ def descr_get_fflags(self, space):
+ return space.wrap(self.event.c_fflags)
+
+ def descr_get_data(self, space):
+ return space.wrap(self.event.c_data)
+
+ def descr_get_udata(self, space):
+ return space.wrap(rffi.cast(rffi.SIZE_T, self.event.c_udata))
+
+
+W_Kevent.typedef = TypeDef("select.kevent",
+ __new__ = generic_new_descr(W_Kevent),
+ __init__ = interp2app(W_Kevent.descr__init__),
+ __eq__ = interp2app(W_Kevent.descr__eq__),
+ __ne__ = interp2app(W_Kevent.descr__ne__),
+ __le__ = interp2app(W_Kevent.descr__le__),
+ __lt__ = interp2app(W_Kevent.descr__lt__),
+ __ge__ = interp2app(W_Kevent.descr__ge__),
+ __gt__ = interp2app(W_Kevent.descr__gt__),
+
+ ident = GetSetProperty(W_Kevent.descr_get_ident),
+ filter = GetSetProperty(W_Kevent.descr_get_filter),
+ flags = GetSetProperty(W_Kevent.descr_get_flags),
+ fflags = GetSetProperty(W_Kevent.descr_get_fflags),
+ data = GetSetProperty(W_Kevent.descr_get_data),
+ udata = GetSetProperty(W_Kevent.descr_get_udata),
+)
+W_Kevent.typedef.acceptable_as_base_class = False
diff --git a/pypy/module/select/test/test_kqueue.py b/pypy/module/select/test/test_kqueue.py
new file mode 100644
--- /dev/null
+++ b/pypy/module/select/test/test_kqueue.py
@@ -0,0 +1,190 @@
+# adapted from CPython: Lib/test/test_kqueue.py
+
+import py
+import sys
+
+from pypy.conftest import gettestobjspace
+
+
+class AppTestKqueue(object):
+ def setup_class(cls):
+ if not 'bsd' in sys.platform and \
+ not sys.platform.startswith('darwin'):
+ py.test.skip("test requires BSD")
+ cls.space = gettestobjspace(usemodules=["select", "_socket", "posix"])
+
+ def test_create(self):
+ import select
+
+ kq = select.kqueue()
+ assert kq.fileno() > 0
+ assert not kq.closed
+ kq.close()
+ assert kq.closed
+ raises(ValueError, kq.fileno)
+
+ def test_create_event(self):
+ import select
+ import sys
+
+ fd = sys.stderr.fileno()
+ ev = select.kevent(fd)
+ other = select.kevent(1000)
+ assert ev.ident == fd
+ assert ev.filter == select.KQ_FILTER_READ
+ assert ev.flags == select.KQ_EV_ADD
+ assert ev.fflags == 0
+ assert ev.data == 0
+ assert ev.udata == 0
+ assert ev == ev
+ assert ev != other
+ assert cmp(ev, other) == -1
+ assert ev < other
+ assert other >= ev
+ raises(TypeError, cmp, ev, None)
+ raises(TypeError, cmp, ev, 1)
+ raises(TypeError, cmp, ev, "ev")
+
+ ev = select.kevent(fd, select.KQ_FILTER_WRITE)
+ assert ev.ident == fd
+ assert ev.filter == select.KQ_FILTER_WRITE
+ assert ev.flags == select.KQ_EV_ADD
+ assert ev.fflags == 0
+ assert ev.data == 0
+ assert ev.udata == 0
+ assert ev == ev
+ assert ev != other
+
+ ev = select.kevent(fd, select.KQ_FILTER_WRITE, select.KQ_EV_ONESHOT)
+ assert ev.ident == fd
+ assert ev.filter == select.KQ_FILTER_WRITE
+ assert ev.flags == select.KQ_EV_ONESHOT
+ assert ev.fflags == 0
+ assert ev.data == 0
+ assert ev.udata == 0
+ assert ev == ev
+ assert ev != other
+
+ ev = select.kevent(1, 2, 3, 4, 5, 6)
+ assert ev.ident == 1
+ assert ev.filter == 2
+ assert ev.flags == 3
+ assert ev.fflags == 4
+ assert ev.data == 5
+ assert ev.udata == 6
+ assert ev == ev
+ assert ev != other
+
+ bignum = sys.maxsize * 2 + 1
+ fd = sys.maxsize
+ ev = select.kevent(fd, 1, 2, bignum, sys.maxsize, bignum)
+ assert ev.ident == fd
+ assert ev.filter == 1
+ assert ev.flags == 2
+ assert ev.fflags == bignum
+ assert ev.data == sys.maxsize
+ assert ev.udata == bignum
+ assert ev == ev
+ assert ev != other
+
+ def test_queue_event(self):
+ import errno
+ import select
+ import socket
+ import sys
+
+ server_socket = socket.socket()
+ server_socket.bind(("127.0.0.1", 0))
+ server_socket.listen(1)
+ client = socket.socket()
+ client.setblocking(False)
+ try:
+ client.connect(("127.0.0.1", server_socket.getsockname()[1]))
+ except socket.error as e:
+ if 'bsd' in sys.platform:
+ assert e.args[0] == errno.ENOENT
+ else:
+ assert e.args[0] == errno.EINPROGRESS
+ server, addr = server_socket.accept()
+
+ if sys.platform.startswith("darwin"):
+ flags = select.KQ_EV_ADD | select.KQ_EV_ENABLE
+ else:
+ flags = 0
+
+ kq1 = select.kqueue()
+ kq2 = select.kqueue.fromfd(kq1.fileno())
+
+ ev = select.kevent(server.fileno(), select.KQ_FILTER_WRITE, select.KQ_EV_ADD | select.KQ_EV_ENABLE)
+ kq1.control([ev], 0)
+ ev = select.kevent(server.fileno(), select.KQ_FILTER_READ, select.KQ_EV_ADD | select.KQ_EV_ENABLE)
+ kq1.control([ev], 0)
+ ev = select.kevent(client.fileno(), select.KQ_FILTER_WRITE, select.KQ_EV_ADD | select.KQ_EV_ENABLE)
+ kq2.control([ev], 0)
+ ev = select.kevent(client.fileno(), select.KQ_FILTER_READ, select.KQ_EV_ADD | select.KQ_EV_ENABLE)
+ kq2.control([ev], 0)
+
+ events = kq1.control(None, 4, 1)
+ events = [(e.ident, e.filter, e.flags) for e in events]
+ events.sort()
+ assert events == [
+ (client.fileno(), select.KQ_FILTER_WRITE, flags),
+ (server.fileno(), select.KQ_FILTER_WRITE, flags),
+ ]
+ client.send("Hello!")
+ server.send("world!!!")
+
+ for i in xrange(10):
+ events = kq1.control(None, 4, 1)
+ if len(events) == 4:
+ break
+ time.sleep(1.0)
+ else:
+ assert False, "timeout waiting for event notification"
+
+ events = [(e.ident, e.filter, e.flags) for e in events]
+ events.sort()
+ assert events == [
+ (client.fileno(), select.KQ_FILTER_WRITE, flags),
+ (client.fileno(), select.KQ_FILTER_READ, flags),
+ (server.fileno(), select.KQ_FILTER_WRITE, flags),
+ (server.fileno(), select.KQ_FILTER_READ, flags),
+ ]
+
+ ev = select.kevent(client.fileno(), select.KQ_FILTER_WRITE, select.KQ_EV_DELETE)
+ kq1.control([ev], 0)
+ ev = select.kevent(client.fileno(), select.KQ_FILTER_READ, select.KQ_EV_DELETE)
+ kq1.control([ev], 0)
+ ev = select.kevent(server.fileno(), select.KQ_FILTER_READ, select.KQ_EV_DELETE)
+ kq1.control([ev], 0, 0)
+
+ events = kq1.control([], 4, 0.99)
+ events = [(e.ident, e.filter, e.flags) for e in events]
+ events.sort()
+ assert events == [
+ (server.fileno(), select.KQ_FILTER_WRITE, flags),
+ ]
+
+ client.close()
+ server.close()
+ server_socket.close()
+
+ def test_pair(self):
+ import select
+ import socket
+
+ kq = select.kqueue()
+ a, b = socket.socketpair()
+
+ a.send('foo')
+ event1 = select.kevent(a, select.KQ_FILTER_READ, select.KQ_EV_ADD | select.KQ_EV_ENABLE)
+ event2 = select.kevent(b, select.KQ_FILTER_READ, select.KQ_EV_ADD | select.KQ_EV_ENABLE)
+ r = kq.control([event1, event2], 1, 1)
+ assert r
+ assert r[0].flags & select.KQ_EV_ERROR == 0
+ data = b.recv(r[0].data)
+ assert data == 'foo'
+
+ a.close()
+ b.close()
+ kq.close()
diff --git a/pypy/module/struct/formatiterator.py b/pypy/module/struct/formatiterator.py
--- a/pypy/module/struct/formatiterator.py
+++ b/pypy/module/struct/formatiterator.py
@@ -1,5 +1,6 @@
from pypy.rlib import jit
from pypy.rlib.objectmodel import specialize
+from pypy.rlib.rstring import StringBuilder
from pypy.rlib.rstruct.error import StructError
from pypy.rlib.rstruct.formatiterator import FormatIterator
from pypy.rlib.rstruct.standardfmttable import PACK_ACCEPTS_BROKEN_INPUT
@@ -8,11 +9,11 @@
class PackFormatIterator(FormatIterator):
- def __init__(self, space, args_w):
+ def __init__(self, space, args_w, size):
self.space = space
self.args_w = args_w
self.args_index = 0
- self.result = [] # list of characters
+ self.result = StringBuilder(size)
# This *should* be always unroll safe, the only way to get here is by
# unroll the interpret function, which means the fmt is const, and thus
@@ -29,9 +30,8 @@
@jit.unroll_safe
def align(self, mask):
- pad = (-len(self.result)) & mask
- for i in range(pad):
- self.result.append('\x00')
+ pad = (-self.result.getlength()) & mask
+ self.result.append_multiple_char('\x00', pad)
def finished(self):
if self.args_index != len(self.args_w):
diff --git a/pypy/module/struct/interp_struct.py b/pypy/module/struct/interp_struct.py
--- a/pypy/module/struct/interp_struct.py
+++ b/pypy/module/struct/interp_struct.py
@@ -1,28 +1,34 @@
from pypy.interpreter.gateway import unwrap_spec
from pypy.module.struct.formatiterator import PackFormatIterator, UnpackFormatIterator
+from pypy.rlib import jit
from pypy.rlib.rstruct.error import StructError
from pypy.rlib.rstruct.formatiterator import CalcSizeFormatIterator
@unwrap_spec(format=str)
def calcsize(space, format):
+ return space.wrap(_calcsize(space, format))
+
+def _calcsize(space, format):
fmtiter = CalcSizeFormatIterator()
try:
fmtiter.interpret(format)
except StructError, e:
raise e.at_applevel(space)
- return space.wrap(fmtiter.totalsize)
-
+ return fmtiter.totalsize
@unwrap_spec(format=str)
def pack(space, format, args_w):
- fmtiter = PackFormatIterator(space, args_w)
+ if jit.isconstant(format):
+ size = _calcsize(space, format)
+ else:
+ size = 8
+ fmtiter = PackFormatIterator(space, args_w, size)
try:
fmtiter.interpret(format)
except StructError, e:
raise e.at_applevel(space)
- result = ''.join(fmtiter.result)
- return space.wrap(result)
+ return space.wrap(fmtiter.result.build())
@unwrap_spec(format=str, input='bufferstr')
diff --git a/pypy/module/struct/test/test_ztranslation.py b/pypy/module/struct/test/test_ztranslation.py
new file mode 100644
--- /dev/null
+++ b/pypy/module/struct/test/test_ztranslation.py
@@ -0,0 +1,6 @@
+from pypy.objspace.fake.checkmodule import checkmodule
+
+
+def test_checkmodule():
+ checkmodule('struct')
+
diff --git a/pypy/objspace/std/marshal_impl.py b/pypy/objspace/std/marshal_impl.py
--- a/pypy/objspace/std/marshal_impl.py
+++ b/pypy/objspace/std/marshal_impl.py
@@ -16,6 +16,7 @@
from pypy.interpreter.pycode import PyCode
from pypy.interpreter import gateway, unicodehelper
from pypy.rlib.rstruct import ieee
+from pypy.rlib.rstring import StringBuilder
from pypy.objspace.std.boolobject import W_BoolObject
from pypy.objspace.std.complexobject import W_ComplexObject
@@ -153,9 +154,9 @@
register(TYPE_INT64, unmarshal_Int64)
def pack_float(f):
- result = []
+ result = StringBuilder(8)
ieee.pack_float(result, f, 8, False)
- return ''.join(result)
+ return result.build()
def unpack_float(s):
return ieee.unpack_float(s, False)
diff --git a/pypy/objspace/std/ropeunicodeobject.py b/pypy/objspace/std/ropeunicodeobject.py
--- a/pypy/objspace/std/ropeunicodeobject.py
+++ b/pypy/objspace/std/ropeunicodeobject.py
@@ -8,6 +8,7 @@
from pypy.objspace.std.ropeobject import W_RopeObject
from pypy.objspace.std.noneobject import W_NoneObject
from pypy.rlib import rope
+from pypy.rlib.rstring import StringBuilder
from pypy.objspace.std.sliceobject import W_SliceObject, normalize_simple_slice
from pypy.objspace.std import unicodeobject, slicetype, iterobject
from pypy.objspace.std.tupleobject import W_TupleObject
@@ -946,15 +947,16 @@
return mod_format(space, w_format, w_values, do_unicode=True)
def buffer__RopeUnicode(space, w_unicode):
- from pypy.rlib.rstruct.unichar import pack_unichar
- charlist = []
+ from pypy.rlib.rstruct.unichar import pack_unichar, UNICODE_SIZE
node = w_unicode._node
iter = rope.ItemIterator(node)
- for idx in range(node.length()):
+ length = node.length()
+ builder = StringBuilder(length * UNICODE_SIZE)
+ for idx in range(length):
unich = unichr(iter.nextint())
- pack_unichar(unich, charlist)
+ pack_unichar(unich, builder)
from pypy.interpreter.buffer import StringBuffer
- return space.wrap(StringBuffer(''.join(charlist)))
+ return space.wrap(StringBuffer(builder.build()))
# methods of the iterator
diff --git a/pypy/rlib/rstruct/ieee.py b/pypy/rlib/rstruct/ieee.py
--- a/pypy/rlib/rstruct/ieee.py
+++ b/pypy/rlib/rstruct/ieee.py
@@ -4,7 +4,7 @@
import math
-from pypy.rlib import rarithmetic, rfloat, objectmodel
+from pypy.rlib import rarithmetic, rfloat, objectmodel, jit
from pypy.rlib.rarithmetic import r_ulonglong
@@ -135,14 +135,15 @@
return ((sign << BITS - 1) | (exp << MANT_DIG - 1)) | mant
+ at jit.unroll_safe
def pack_float(result, x, size, be):
- l = [] if be else result
+ l = []
unsigned = float_pack(x, size)
for i in range(size):
l.append(chr((unsigned >> (i * 8)) & 0xFF))
if be:
l.reverse()
- result.extend(l)
+ result.append("".join(l))
def unpack_float(s, be):
diff --git a/pypy/rlib/rstruct/nativefmttable.py b/pypy/rlib/rstruct/nativefmttable.py
--- a/pypy/rlib/rstruct/nativefmttable.py
+++ b/pypy/rlib/rstruct/nativefmttable.py
@@ -3,6 +3,7 @@
The table 'native_fmttable' is also used by pypy.module.array.interp_array.
"""
import struct
+from pypy.rlib import jit
from pypy.rlib.rstruct import standardfmttable as std
from pypy.rlib.rstruct.error import StructError
from pypy.rpython.tool import rffi_platform
@@ -25,12 +26,15 @@
double_buf = lltype.malloc(rffi.DOUBLEP.TO, 1, flavor='raw', immortal=True)
float_buf = lltype.malloc(rffi.FLOATP.TO, 1, flavor='raw', immortal=True)
+ at jit.dont_look_inside
+def double_to_ccharp(doubleval):
+ double_buf[0] = doubleval
+ return rffi.cast(rffi.CCHARP, double_buf)
+
def pack_double(fmtiter):
doubleval = fmtiter.accept_float_arg()
- double_buf[0] = doubleval
- p = rffi.cast(rffi.CCHARP, double_buf)
- for i in range(sizeof_double):
- fmtiter.result.append(p[i])
+ p = double_to_ccharp(doubleval)
+ fmtiter.result.append_charpsize(p, rffi.sizeof(rffi.DOUBLE))
@specialize.argtype(0)
def unpack_double(fmtiter):
@@ -41,13 +45,16 @@
doubleval = double_buf[0]
fmtiter.appendobj(doubleval)
+ at jit.dont_look_inside
+def float_to_ccharp(floatval):
+ float_buf[0] = floatval
+ return rffi.cast(rffi.CCHARP, float_buf)
+
def pack_float(fmtiter):
doubleval = fmtiter.accept_float_arg()
floatval = r_singlefloat(doubleval)
- float_buf[0] = floatval
- p = rffi.cast(rffi.CCHARP, float_buf)
- for i in range(sizeof_float):
- fmtiter.result.append(p[i])
+ p = float_to_ccharp(floatval)
+ fmtiter.result.append_charpsize(p, rffi.sizeof(rffi.FLOAT))
@specialize.argtype(0)
def unpack_float(fmtiter):
diff --git a/pypy/rlib/rstruct/standardfmttable.py b/pypy/rlib/rstruct/standardfmttable.py
--- a/pypy/rlib/rstruct/standardfmttable.py
+++ b/pypy/rlib/rstruct/standardfmttable.py
@@ -21,8 +21,7 @@
# ____________________________________________________________
def pack_pad(fmtiter, count):
- for i in range(count):
- fmtiter.result.append('\x00')
+ fmtiter.result.append_multiple_char('\x00', count)
def pack_char(fmtiter):
string = fmtiter.accept_str_arg()
@@ -38,11 +37,10 @@
def pack_string(fmtiter, count):
string = fmtiter.accept_str_arg()
if len(string) < count:
- fmtiter.result += string
- for i in range(len(string), count):
- fmtiter.result.append('\x00')
+ fmtiter.result.append(string)
+ fmtiter.result.append_multiple_char('\x00', count - len(string))
else:
- fmtiter.result += string[:count]
+ fmtiter.result.append_slice(string, 0, count)
def pack_pascal(fmtiter, count):
string = fmtiter.accept_str_arg()
@@ -56,9 +54,8 @@
else:
prefixchar = chr(prefix)
fmtiter.result.append(prefixchar)
- fmtiter.result += string[:prefix]
- for i in range(1 + prefix, count):
- fmtiter.result.append('\x00')
+ fmtiter.result.append_slice(string, 0, prefix)
+ fmtiter.result.append_multiple_char('\x00', count - (1 + prefix))
def make_float_packer(size):
def packer(fmtiter):
diff --git a/pypy/rpython/lltypesystem/rffi.py b/pypy/rpython/lltypesystem/rffi.py
--- a/pypy/rpython/lltypesystem/rffi.py
+++ b/pypy/rpython/lltypesystem/rffi.py
@@ -433,7 +433,8 @@
TYPES.append(name)
TYPES += ['signed char', 'unsigned char',
'long long', 'unsigned long long',
- 'size_t', 'time_t', 'wchar_t']
+ 'size_t', 'time_t', 'wchar_t',
+ 'uintptr_t', 'intptr_t']
if os.name != 'nt':
TYPES.append('mode_t')
TYPES.append('pid_t')
@@ -617,8 +618,6 @@
# (use SIGNEDCHAR or UCHAR for the small integer types)
CHAR = lltype.Char
-INTPTR_T = SSIZE_T
-
# double
DOUBLE = lltype.Float
LONGDOUBLE = lltype.LongFloat
diff --git a/pypy/rpython/rclass.py b/pypy/rpython/rclass.py
--- a/pypy/rpython/rclass.py
+++ b/pypy/rpython/rclass.py
@@ -364,6 +364,8 @@
def get_ll_hash_function(self):
return ll_inst_hash
+ get_ll_fasthash_function = get_ll_hash_function
+
def rtype_type(self, hop):
raise NotImplementedError
diff --git a/pypy/rpython/test/test_rdict.py b/pypy/rpython/test/test_rdict.py
--- a/pypy/rpython/test/test_rdict.py
+++ b/pypy/rpython/test/test_rdict.py
@@ -449,6 +449,21 @@
assert r_AB_dic.lowleveltype == r_BA_dic.lowleveltype
+ def test_identity_hash_is_fast(self):
+ class A(object):
+ pass
+
+ def f():
+ return {A(): 1}
+
+ t = TranslationContext()
+ s = t.buildannotator().build_types(f, [])
+ rtyper = t.buildrtyper()
+ rtyper.specialize()
+
+ r_dict = rtyper.getrepr(s)
+ assert not hasattr(r_dict.lowleveltype.TO.entries.TO.OF, "f_hash")
+
def test_tuple_dict(self):
def f(i):
d = {}
diff --git a/pypy/rpython/tool/rfficache.py b/pypy/rpython/tool/rfficache.py
--- a/pypy/rpython/tool/rfficache.py
+++ b/pypy/rpython/tool/rfficache.py
@@ -14,6 +14,8 @@
def ask_gcc(question, add_source=""):
includes = ['stdlib.h', 'stdio.h', 'sys/types.h']
+ if os.name != 'nt':
+ includes += ['inttypes.h']
include_string = "\n".join(["#include <%s>" % i for i in includes])
c_source = py.code.Source('''
// includes
diff --git a/pypy/tool/jitlogparser/parser.py b/pypy/tool/jitlogparser/parser.py
--- a/pypy/tool/jitlogparser/parser.py
+++ b/pypy/tool/jitlogparser/parser.py
@@ -93,7 +93,7 @@
end_index += 1
op.asm = '\n'.join([asm[i][1] for i in range(asm_index, end_index)])
return loop
-
+
def _asm_disassemble(self, d, origin_addr, tp):
from pypy.jit.backend.x86.tool.viewcode import machine_code_dump
return list(machine_code_dump(d, tp, origin_addr))
@@ -109,7 +109,7 @@
if not argspec.strip():
return [], None
if opname == 'debug_merge_point':
- return argspec.split(", ", 1), None
+ return argspec.split(", ", 2), None
else:
args = argspec.split(', ')
descr = None
@@ -159,7 +159,7 @@
for op in operations:
if op.name == 'debug_merge_point':
self.inline_level = int(op.args[0])
- self.parse_code_data(op.args[1][1:-1])
+ self.parse_code_data(op.args[2][1:-1])
break
else:
self.inline_level = 0
@@ -417,7 +417,7 @@
part.descr = descrs[i]
part.comment = trace.comment
parts.append(part)
-
+
return parts
def parse_log_counts(input, loops):
diff --git a/pypy/tool/jitlogparser/test/test_parser.py b/pypy/tool/jitlogparser/test/test_parser.py
--- a/pypy/tool/jitlogparser/test/test_parser.py
+++ b/pypy/tool/jitlogparser/test/test_parser.py
@@ -29,7 +29,7 @@
def test_parse_non_code():
ops = parse('''
[]
- debug_merge_point(0, "SomeRandomStuff")
+ debug_merge_point(0, 0, "SomeRandomStuff")
''')
res = Function.from_operations(ops.operations, LoopStorage())
assert len(res.chunks) == 1
@@ -39,10 +39,10 @@
ops = parse('''
[i0]
label()
- debug_merge_point(0, "<code object stuff. file '/I/dont/exist.py'. line 200> #10 ADD")
- debug_merge_point(0, "<code object stuff. file '/I/dont/exist.py'. line 200> #11 SUB")
+ debug_merge_point(0, 0, "<code object stuff. file '/I/dont/exist.py'. line 200> #10 ADD")
+ debug_merge_point(0, 0, "<code object stuff. file '/I/dont/exist.py'. line 200> #11 SUB")
i1 = int_add(i0, 1)
- debug_merge_point(0, "<code object stuff. file '/I/dont/exist.py'. line 200> #11 SUB")
+ debug_merge_point(0, 0, "<code object stuff. file '/I/dont/exist.py'. line 200> #11 SUB")
i2 = int_add(i1, 1)
''')
res = Function.from_operations(ops.operations, LoopStorage(), loopname='<loopname>')
@@ -57,12 +57,12 @@
def test_inlined_call():
ops = parse("""
[]
- debug_merge_point(0, '<code object inlined_call. file 'source.py'. line 12> #28 CALL_FUNCTION')
+ debug_merge_point(0, 0, '<code object inlined_call. file 'source.py'. line 12> #28 CALL_FUNCTION')
i18 = getfield_gc(p0, descr=<BoolFieldDescr pypy.interpreter.pyframe.PyFrame.inst_is_being_profiled 89>)
- debug_merge_point(1, '<code object inner. file 'source.py'. line 9> #0 LOAD_FAST')
- debug_merge_point(1, '<code object inner. file 'source.py'. line 9> #3 LOAD_CONST')
- debug_merge_point(1, '<code object inner. file 'source.py'. line 9> #7 RETURN_VALUE')
- debug_merge_point(0, '<code object inlined_call. file 'source.py'. line 12> #31 STORE_FAST')
+ debug_merge_point(1, 1, '<code object inner. file 'source.py'. line 9> #0 LOAD_FAST')
+ debug_merge_point(1, 1, '<code object inner. file 'source.py'. line 9> #3 LOAD_CONST')
+ debug_merge_point(1, 1, '<code object inner. file 'source.py'. line 9> #7 RETURN_VALUE')
+ debug_merge_point(0, 0, '<code object inlined_call. file 'source.py'. line 12> #31 STORE_FAST')
""")
res = Function.from_operations(ops.operations, LoopStorage())
assert len(res.chunks) == 3 # two chunks + inlined call
@@ -75,10 +75,10 @@
def test_name():
ops = parse('''
[i0]
- debug_merge_point(0, "<code object stuff. file '/I/dont/exist.py'. line 200> #10 ADD")
- debug_merge_point(0, "<code object stuff. file '/I/dont/exist.py'. line 201> #11 SUB")
+ debug_merge_point(0, 0, "<code object stuff. file '/I/dont/exist.py'. line 200> #10 ADD")
+ debug_merge_point(0, 0, "<code object stuff. file '/I/dont/exist.py'. line 201> #11 SUB")
i1 = int_add(i0, 1)
- debug_merge_point(0, "<code object stuff. file '/I/dont/exist.py'. line 202> #11 SUB")
+ debug_merge_point(0, 0, "<code object stuff. file '/I/dont/exist.py'. line 202> #11 SUB")
i2 = int_add(i1, 1)
''')
res = Function.from_operations(ops.operations, LoopStorage())
@@ -92,10 +92,10 @@
ops = parse('''
[i0]
i3 = int_add(i0, 1)
- debug_merge_point(0, "<code object stuff. file '/I/dont/exist.py'. line 200> #10 ADD")
- debug_merge_point(0, "<code object stuff. file '/I/dont/exist.py'. line 201> #11 SUB")
+ debug_merge_point(0, 0, "<code object stuff. file '/I/dont/exist.py'. line 200> #10 ADD")
+ debug_merge_point(0, 0, "<code object stuff. file '/I/dont/exist.py'. line 201> #11 SUB")
i1 = int_add(i0, 1)
- debug_merge_point(0, "<code object stuff. file '/I/dont/exist.py'. line 202> #11 SUB")
+ debug_merge_point(0, 0, "<code object stuff. file '/I/dont/exist.py'. line 202> #11 SUB")
i2 = int_add(i1, 1)
''')
res = Function.from_operations(ops.operations, LoopStorage())
@@ -105,10 +105,10 @@
fname = str(py.path.local(__file__).join('..', 'x.py'))
ops = parse('''
[i0, i1]
- debug_merge_point(0, "<code object f. file '%(fname)s'. line 2> #0 LOAD_FAST")
- debug_merge_point(0, "<code object f. file '%(fname)s'. line 2> #3 LOAD_FAST")
- debug_merge_point(0, "<code object f. file '%(fname)s'. line 2> #6 BINARY_ADD")
- debug_merge_point(0, "<code object f. file '%(fname)s'. line 2> #7 RETURN_VALUE")
+ debug_merge_point(0, 0, "<code object f. file '%(fname)s'. line 2> #0 LOAD_FAST")
+ debug_merge_point(0, 0, "<code object f. file '%(fname)s'. line 2> #3 LOAD_FAST")
+ debug_merge_point(0, 0, "<code object f. file '%(fname)s'. line 2> #6 BINARY_ADD")
+ debug_merge_point(0, 0, "<code object f. file '%(fname)s'. line 2> #7 RETURN_VALUE")
''' % locals())
res = Function.from_operations(ops.operations, LoopStorage())
assert res.chunks[1].lineno == 3
@@ -119,11 +119,11 @@
fname = str(py.path.local(__file__).join('..', 'x.py'))
ops = parse('''
[i0, i1]
- debug_merge_point(0, "<code object g. file '%(fname)s'. line 5> #9 LOAD_FAST")
- debug_merge_point(0, "<code object g. file '%(fname)s'. line 5> #12 LOAD_CONST")
- debug_merge_point(0, "<code object g. file '%(fname)s'. line 5> #22 LOAD_CONST")
- debug_merge_point(0, "<code object g. file '%(fname)s'. line 5> #28 LOAD_CONST")
- debug_merge_point(0, "<code object g. file '%(fname)s'. line 5> #6 SETUP_LOOP")
+ debug_merge_point(0, 0, "<code object g. file '%(fname)s'. line 5> #9 LOAD_FAST")
+ debug_merge_point(0, 0, "<code object g. file '%(fname)s'. line 5> #12 LOAD_CONST")
+ debug_merge_point(0, 0, "<code object g. file '%(fname)s'. line 5> #22 LOAD_CONST")
+ debug_merge_point(0, 0, "<code object g. file '%(fname)s'. line 5> #28 LOAD_CONST")
+ debug_merge_point(0, 0, "<code object g. file '%(fname)s'. line 5> #6 SETUP_LOOP")
''' % locals())
res = Function.from_operations(ops.operations, LoopStorage())
assert res.linerange == (7, 9)
@@ -135,7 +135,7 @@
fname = str(py.path.local(__file__).join('..', 'x.py'))
ops = parse("""
[p6, p1]
- debug_merge_point(0, '<code object h. file '%(fname)s'. line 11> #17 FOR_ITER')
+ debug_merge_point(0, 0, '<code object h. file '%(fname)s'. line 11> #17 FOR_ITER')
guard_class(p6, 144264192, descr=<Guard2>)
p12 = getfield_gc(p6, descr=<GcPtrFieldDescr pypy.objspace.std.iterobject.W_AbstractSeqIterObject.inst_w_seq 12>)
""" % locals())
@@ -181,7 +181,7 @@
def test_parsing_strliteral():
loop = parse("""
- debug_merge_point(0, 'StrLiteralSearch at 11/51 [17, 8, 3, 1, 1, 1, 1, 51, 0, 19, 51, 1]')
+ debug_merge_point(0, 0, 'StrLiteralSearch at 11/51 [17, 8, 3, 1, 1, 1, 1, 51, 0, 19, 51, 1]')
""")
ops = Function.from_operations(loop.operations, LoopStorage())
chunk = ops.chunks[0]
@@ -193,12 +193,12 @@
loop = parse("""
# Loop 0 : loop with 19 ops
[p0, p1, p2, p3, i4]
- debug_merge_point(0, '<code object f. file 'x.py'. line 2> #15 COMPARE_OP')
+ debug_merge_point(0, 0, '<code object f. file 'x.py'. line 2> #15 COMPARE_OP')
+166: i6 = int_lt(i4, 10000)
guard_true(i6, descr=<Guard3>) [p1, p0, p2, p3, i4]
- debug_merge_point(0, '<code object f. file 'x.py'. line 2> #27 INPLACE_ADD')
+ debug_merge_point(0, 0, '<code object f. file 'x.py'. line 2> #27 INPLACE_ADD')
+179: i8 = int_add(i4, 1)
- debug_merge_point(0, '<code object f. file 'x.py'. line 2> #31 JUMP_ABSOLUTE')
+ debug_merge_point(0, 0, '<code object f. file 'x.py'. line 2> #31 JUMP_ABSOLUTE')
+183: i10 = getfield_raw(40564608, descr=<SignedFieldDescr pypysig_long_struct.c_value 0>)
+191: i12 = int_sub(i10, 1)
+195: setfield_raw(40564608, i12, descr=<SignedFieldDescr pypysig_long_struct.c_value 0>)
@@ -287,8 +287,8 @@
def test_parse_nonpython():
loop = parse("""
[]
- debug_merge_point(0, 'random')
- debug_merge_point(0, '<code object f. file 'x.py'. line 2> #15 COMPARE_OP')
+ debug_merge_point(0, 0, 'random')
+ debug_merge_point(0, 0, '<code object f. file 'x.py'. line 2> #15 COMPARE_OP')
""")
f = Function.from_operations(loop.operations, LoopStorage())
assert f.chunks[-1].filename == 'x.py'
diff --git a/pypy/tool/release/package.py b/pypy/tool/release/package.py
--- a/pypy/tool/release/package.py
+++ b/pypy/tool/release/package.py
@@ -60,7 +60,7 @@
if sys.platform == 'win32':
# Can't rename a DLL: it is always called 'libpypy-c.dll'
for extra in ['libpypy-c.dll',
- 'libexpat.dll', 'sqlite3.dll', 'msvcr90.dll',
+ 'libexpat.dll', 'sqlite3.dll', 'msvcr100.dll',
'libeay32.dll', 'ssleay32.dll']:
p = pypy_c.dirpath().join(extra)
if not p.check():
diff --git a/pypy/translator/c/src/libffi_msvc/ffi.c b/pypy/translator/c/src/libffi_msvc/ffi.c
--- a/pypy/translator/c/src/libffi_msvc/ffi.c
+++ b/pypy/translator/c/src/libffi_msvc/ffi.c
@@ -71,31 +71,31 @@
switch ((*p_arg)->type)
{
case FFI_TYPE_SINT8:
- *(signed int *) argp = (signed int)*(SINT8 *)(* p_argv);
+ *(signed int *) argp = (signed int)*(ffi_SINT8 *)(* p_argv);
break;
case FFI_TYPE_UINT8:
- *(unsigned int *) argp = (unsigned int)*(UINT8 *)(* p_argv);
+ *(unsigned int *) argp = (unsigned int)*(ffi_UINT8 *)(* p_argv);
break;
case FFI_TYPE_SINT16:
- *(signed int *) argp = (signed int)*(SINT16 *)(* p_argv);
+ *(signed int *) argp = (signed int)*(ffi_SINT16 *)(* p_argv);
break;
case FFI_TYPE_UINT16:
- *(unsigned int *) argp = (unsigned int)*(UINT16 *)(* p_argv);
+ *(unsigned int *) argp = (unsigned int)*(ffi_UINT16 *)(* p_argv);
break;
case FFI_TYPE_SINT32:
- *(signed int *) argp = (signed int)*(SINT32 *)(* p_argv);
+ *(signed int *) argp = (signed int)*(ffi_SINT32 *)(* p_argv);
break;
case FFI_TYPE_UINT32:
- *(unsigned int *) argp = (unsigned int)*(UINT32 *)(* p_argv);
+ *(unsigned int *) argp = (unsigned int)*(ffi_UINT32 *)(* p_argv);
break;
case FFI_TYPE_STRUCT:
- *(unsigned int *) argp = (unsigned int)*(UINT32 *)(* p_argv);
+ *(unsigned int *) argp = (unsigned int)*(ffi_UINT32 *)(* p_argv);
break;
default:
diff --git a/pypy/translator/c/src/libffi_msvc/ffi_common.h b/pypy/translator/c/src/libffi_msvc/ffi_common.h
--- a/pypy/translator/c/src/libffi_msvc/ffi_common.h
+++ b/pypy/translator/c/src/libffi_msvc/ffi_common.h
@@ -56,16 +56,18 @@
} extended_cif;
/* Terse sized type definitions. */
-typedef unsigned int UINT8 __attribute__((__mode__(__QI__)));
-typedef signed int SINT8 __attribute__((__mode__(__QI__)));
-typedef unsigned int UINT16 __attribute__((__mode__(__HI__)));
-typedef signed int SINT16 __attribute__((__mode__(__HI__)));
-typedef unsigned int UINT32 __attribute__((__mode__(__SI__)));
-typedef signed int SINT32 __attribute__((__mode__(__SI__)));
-typedef unsigned int UINT64 __attribute__((__mode__(__DI__)));
-typedef signed int SINT64 __attribute__((__mode__(__DI__)));
+/* Fix for PyPy: these names are fine, but are bound to conflict with
+ * some other name from somewhere else :-( Added a 'ffi_' prefix. */
+typedef unsigned int ffi_UINT8 __attribute__((__mode__(__QI__)));
+typedef signed int ffi_SINT8 __attribute__((__mode__(__QI__)));
+typedef unsigned int ffi_UINT16 __attribute__((__mode__(__HI__)));
+typedef signed int ffi_SINT16 __attribute__((__mode__(__HI__)));
+typedef unsigned int ffi_UINT32 __attribute__((__mode__(__SI__)));
+typedef signed int ffi_SINT32 __attribute__((__mode__(__SI__)));
+typedef unsigned int ffi_UINT64 __attribute__((__mode__(__DI__)));
+typedef signed int ffi_SINT64 __attribute__((__mode__(__DI__)));
-typedef float FLOAT32;
+typedef float ffi_FLOAT32;
#ifdef __cplusplus
diff --git a/testrunner/runner.py b/testrunner/runner.py
--- a/testrunner/runner.py
+++ b/testrunner/runner.py
@@ -24,13 +24,16 @@
#Try to avoid opeing a dialog box if one of the tests causes a system error
import ctypes
winapi = ctypes.windll.kernel32
+ SetErrorMode = winapi.SetErrorMode
+ SetErrorMode.argtypes=[ctypes.c_int]
+
SEM_FAILCRITICALERRORS = 1
SEM_NOGPFAULTERRORBOX = 2
SEM_NOOPENFILEERRORBOX = 0x8000
flags = SEM_FAILCRITICALERRORS | SEM_NOGPFAULTERRORBOX | SEM_NOOPENFILEERRORBOX
#Since there is no GetErrorMode, do a double Set
- old_mode = winapi.SetErrorMode(flags)
- winapi.SetErrorMode(old_mode | flags)
+ old_mode = SetErrorMode(flags)
+ SetErrorMode(old_mode | flags)
SIGKILL = SIGTERM = 0
READ_MODE = 'rU'
More information about the pypy-commit
mailing list