[pypy-commit] pypy reflex-support: merge default into branch
wlav
noreply at buildbot.pypy.org
Wed Jul 11 10:12:04 CEST 2012
Author: Wim Lavrijsen <WLavrijsen at lbl.gov>
Branch: reflex-support
Changeset: r56023:545e834cd1b6
Date: 2012-07-11 01:11 -0700
http://bitbucket.org/pypy/pypy/changeset/545e834cd1b6/
Log: merge default into branch
diff --git a/lib_pypy/disassembler.py b/lib_pypy/disassembler.py
--- a/lib_pypy/disassembler.py
+++ b/lib_pypy/disassembler.py
@@ -24,6 +24,11 @@
self.lineno = lineno
self.line_starts_here = False
+ def __str__(self):
+ if self.arg is None:
+ return "%s" % (self.__class__.__name__,)
+ return "%s (%s)" % (self.__class__.__name__, self.arg)
+
def __repr__(self):
if self.arg is None:
return "<%s at %d>" % (self.__class__.__name__, self.pos)
diff --git a/pypy/annotation/annrpython.py b/pypy/annotation/annrpython.py
--- a/pypy/annotation/annrpython.py
+++ b/pypy/annotation/annrpython.py
@@ -133,44 +133,6 @@
self.build_graph_types(graph, inputcells, complete_now=False)
self.complete_helpers(policy)
return graph
-
- def annotate_helper_method(self, _class, attr, args_s, policy=None):
- """ Warning! this method is meant to be used between
- annotation and rtyping
- """
- if policy is None:
- from pypy.annotation.policy import AnnotatorPolicy
- policy = AnnotatorPolicy()
-
- assert attr != '__class__'
- classdef = self.bookkeeper.getuniqueclassdef(_class)
- attrdef = classdef.find_attribute(attr)
- s_result = attrdef.getvalue()
- classdef.add_source_for_attribute(attr, classdef.classdesc)
- self.bookkeeper
- assert isinstance(s_result, annmodel.SomePBC)
- olddesc = s_result.any_description()
- desc = olddesc.bind_self(classdef)
- args = self.bookkeeper.build_args("simple_call", args_s[:])
- desc.consider_call_site(self.bookkeeper, desc.getcallfamily(), [desc],
- args, annmodel.s_ImpossibleValue, None)
- result = []
- def schedule(graph, inputcells):
- result.append((graph, inputcells))
- return annmodel.s_ImpossibleValue
-
- prevpolicy = self.policy
- self.policy = policy
- self.bookkeeper.enter(None)
- try:
- desc.pycall(schedule, args, annmodel.s_ImpossibleValue)
- finally:
- self.bookkeeper.leave()
- self.policy = prevpolicy
- [(graph, inputcells)] = result
- self.build_graph_types(graph, inputcells, complete_now=False)
- self.complete_helpers(policy)
- return graph
def complete_helpers(self, policy):
saved = self.policy, self.added_blocks
diff --git a/pypy/annotation/description.py b/pypy/annotation/description.py
--- a/pypy/annotation/description.py
+++ b/pypy/annotation/description.py
@@ -514,9 +514,9 @@
continue
self.add_source_attribute(name, value, mixin=True)
- def add_sources_for_class(self, cls, mixin=False):
+ def add_sources_for_class(self, cls):
for name, value in cls.__dict__.items():
- self.add_source_attribute(name, value, mixin)
+ self.add_source_attribute(name, value)
def getallclassdefs(self):
return self._classdefs.values()
diff --git a/pypy/annotation/test/test_annrpython.py b/pypy/annotation/test/test_annrpython.py
--- a/pypy/annotation/test/test_annrpython.py
+++ b/pypy/annotation/test/test_annrpython.py
@@ -2747,20 +2747,6 @@
s = a.build_types(f, [])
assert s.knowntype == int
- def test_helper_method_annotator(self):
- def fun():
- return 21
-
- class A(object):
- def helper(self):
- return 42
-
- a = self.RPythonAnnotator()
- a.build_types(fun, [])
- a.annotate_helper_method(A, "helper", [])
- assert a.bookkeeper.getdesc(A.helper).getuniquegraph()
- assert a.bookkeeper.getdesc(A().helper).getuniquegraph()
-
def test_chr_out_of_bounds(self):
def g(n, max):
if n < max:
diff --git a/pypy/config/test/test_pypyoption.py b/pypy/config/test/test_pypyoption.py
--- a/pypy/config/test/test_pypyoption.py
+++ b/pypy/config/test/test_pypyoption.py
@@ -71,7 +71,7 @@
c = Config(descr)
for path in c.getpaths(include_groups=True):
fn = prefix + "." + path + ".txt"
- yield check_file_exists, fn
+ yield fn, check_file_exists, fn
def test__ffi_opt():
config = get_pypy_config(translating=True)
diff --git a/pypy/doc/config/objspace.usemodules.cppyy.txt b/pypy/doc/config/objspace.usemodules.cppyy.txt
new file mode 100644
--- /dev/null
+++ b/pypy/doc/config/objspace.usemodules.cppyy.txt
@@ -0,0 +1,1 @@
+Use the 'cppyy' module
diff --git a/pypy/doc/image/agile-talk.jpg b/pypy/doc/image/agile-talk.jpg
deleted file mode 100644
Binary file pypy/doc/image/agile-talk.jpg has changed
diff --git a/pypy/doc/image/architecture-session.jpg b/pypy/doc/image/architecture-session.jpg
deleted file mode 100644
Binary file pypy/doc/image/architecture-session.jpg has changed
diff --git a/pypy/doc/image/bram.jpg b/pypy/doc/image/bram.jpg
deleted file mode 100644
Binary file pypy/doc/image/bram.jpg has changed
diff --git a/pypy/doc/image/coding-discussion.jpg b/pypy/doc/image/coding-discussion.jpg
deleted file mode 100644
Binary file pypy/doc/image/coding-discussion.jpg has changed
diff --git a/pypy/doc/image/guido.jpg b/pypy/doc/image/guido.jpg
deleted file mode 100644
Binary file pypy/doc/image/guido.jpg has changed
diff --git a/pypy/doc/image/interview-bobippolito.jpg b/pypy/doc/image/interview-bobippolito.jpg
deleted file mode 100644
Binary file pypy/doc/image/interview-bobippolito.jpg has changed
diff --git a/pypy/doc/image/interview-timpeters.jpg b/pypy/doc/image/interview-timpeters.jpg
deleted file mode 100644
Binary file pypy/doc/image/interview-timpeters.jpg has changed
diff --git a/pypy/doc/image/introductory-student-talk.jpg b/pypy/doc/image/introductory-student-talk.jpg
deleted file mode 100644
Binary file pypy/doc/image/introductory-student-talk.jpg has changed
diff --git a/pypy/doc/image/introductory-talk-pycon.jpg b/pypy/doc/image/introductory-talk-pycon.jpg
deleted file mode 100644
Binary file pypy/doc/image/introductory-talk-pycon.jpg has changed
diff --git a/pypy/doc/image/ironpython.jpg b/pypy/doc/image/ironpython.jpg
deleted file mode 100644
Binary file pypy/doc/image/ironpython.jpg has changed
diff --git a/pypy/doc/image/mallorca-trailer.jpg b/pypy/doc/image/mallorca-trailer.jpg
deleted file mode 100644
Binary file pypy/doc/image/mallorca-trailer.jpg has changed
diff --git a/pypy/doc/image/pycon-trailer.jpg b/pypy/doc/image/pycon-trailer.jpg
deleted file mode 100644
Binary file pypy/doc/image/pycon-trailer.jpg has changed
diff --git a/pypy/doc/image/sprint-tutorial.jpg b/pypy/doc/image/sprint-tutorial.jpg
deleted file mode 100644
Binary file pypy/doc/image/sprint-tutorial.jpg has changed
diff --git a/pypy/doc/video-index.rst b/pypy/doc/video-index.rst
--- a/pypy/doc/video-index.rst
+++ b/pypy/doc/video-index.rst
@@ -2,39 +2,11 @@
PyPy video documentation
=========================
-Requirements to download and view
----------------------------------
-
-In order to download the videos you need to point a
-BitTorrent client at the torrent files provided below.
-We do not provide any other download method at this
-time. Please get a BitTorrent client (such as bittorrent).
-For a list of clients please
-see http://en.wikipedia.org/wiki/Category:Free_BitTorrent_clients or
-http://en.wikipedia.org/wiki/Comparison_of_BitTorrent_clients.
-For more information about Bittorrent see
-http://en.wikipedia.org/wiki/Bittorrent.
-
-In order to view the downloaded movies you need to
-have a video player that supports DivX AVI files (DivX 5, mp3 audio)
-such as `mplayer`_, `xine`_, `vlc`_ or the windows media player.
-
-.. _`mplayer`: http://www.mplayerhq.hu/design7/dload.html
-.. _`xine`: http://www.xine-project.org
-.. _`vlc`: http://www.videolan.org/vlc/
-
-You can find the necessary codecs in the ffdshow-library:
-http://sourceforge.net/projects/ffdshow/
-
-or use the original divx codec (for Windows):
-http://www.divx.com/software/divx-plus
-
-
Copyrights and Licensing
----------------------------
-The following videos are copyrighted by merlinux gmbh and
-published under the Creative Commons Attribution License 2.0 Germany: http://creativecommons.org/licenses/by/2.0/de/
+The following videos are copyrighted by merlinux gmbh and available on
+YouTube.
If you need another license, don't hesitate to contact us.
@@ -42,255 +14,202 @@
Trailer: PyPy at the PyCon 2006
-------------------------------
-130mb: http://buildbot.pypy.org/misc/torrent/pycon-trailer.avi.torrent
+This trailer shows the PyPy team at the PyCon 2006, a behind-the-scenes at
+sprints, talks and everywhere else.
-71mb: http://buildbot.pypy.org/misc/torrent/pycon-trailer-medium.avi.torrent
+.. raw:: html
-50mb: http://buildbot.pypy.org/misc/torrent/pycon-trailer-320x240.avi.torrent
-
-.. image:: image/pycon-trailer.jpg
- :scale: 100
- :alt: Trailer PyPy at PyCon
- :align: left
-
-This trailer shows the PyPy team at the PyCon 2006, a behind-the-scenes at sprints, talks and everywhere else.
-
-PAL, 9 min, DivX AVI
-
+ <iframe width="420" height="315"
+ src="http://www.youtube.com/embed/WfGszrRUdtc?rel=0"
+ frameborder="0" allowfullscreen></iframe>
Interview with Tim Peters
-------------------------
-440mb: http://buildbot.pypy.org/misc/torrent/interview-timpeters-v2.avi.torrent
+Interview with CPython core developer Tim Peters at PyCon 2006, Dallas,
+US. (2006-03-02)
-138mb: http://buildbot.pypy.org/misc/torrent/interview-timpeters-320x240.avi.torrent
+Tim Peters, a longtime CPython core developer talks about how he got into
+Python, what he thinks about the PyPy project and why he thinks it would have
+never been possible in the US.
-.. image:: image/interview-timpeters.jpg
- :scale: 100
- :alt: Interview with Tim Peters
- :align: left
+.. raw:: html
-Interview with CPython core developer Tim Peters at PyCon 2006, Dallas, US. (2006-03-02)
-
-PAL, 23 min, DivX AVI
-
-Tim Peters, a longtime CPython core developer talks about how he got into Python, what he thinks about the PyPy project and why he thinks it would have never been possible in the US.
-
+ <iframe width="420" height="315"
+ src="http://www.youtube.com/embed/1wAOy88WxmY?rel=0"
+ frameborder="0" allowfullscreen></iframe>
Interview with Bob Ippolito
---------------------------
-155mb: http://buildbot.pypy.org/misc/torrent/interview-bobippolito-v2.avi.torrent
+What do you think about PyPy? Interview with American software developer Bob
+Ippolito at PyCon 2006, Dallas, US. (2006-03-01)
-50mb: http://buildbot.pypy.org/misc/torrent/interview-bobippolito-320x240.avi.torrent
+Bob Ippolito is an Open Source software developer from San Francisco and has
+been to two PyPy sprints. In this interview he is giving his opinion on the
+project.
-.. image:: image/interview-bobippolito.jpg
- :scale: 100
- :alt: Interview with Bob Ippolito
- :align: left
+.. raw:: html
-What do you think about PyPy? Interview with American software developer Bob Ippolito at tPyCon 2006, Dallas, US. (2006-03-01)
-
-PAL 8 min, DivX AVI
-
-Bob Ippolito is an Open Source software developer from San Francisco and has been to two PyPy sprints. In this interview he is giving his opinion on the project.
-
+ <iframe width="420" height="315"
+ src="http://www.youtube.com/embed/c5rq4Q03zgg?rel=0"
+ frameborder="0" allowfullscreen></iframe>
Introductory talk on PyPy
-------------------------
-430mb: http://buildbot.pypy.org/misc/torrent/introductory-talk-pycon-v1.avi.torrent
-
-166mb: http://buildbot.pypy.org/misc/torrent/introductory-talk-pycon-320x240.avi.torrent
-
-.. image:: image/introductory-talk-pycon.jpg
- :scale: 100
- :alt: Introductory talk at PyCon 2006
- :align: left
-
-This introductory talk is given by core developers Michael Hudson and Christian Tismer at PyCon 2006, Dallas, US. (2006-02-26)
-
-PAL, 28 min, divx AVI
+This introductory talk is given by core developers Michael Hudson and
+Christian Tismer at PyCon 2006, Dallas, US. (2006-02-26)
Michael Hudson talks about the basic building blocks of Python, the currently
available back-ends, and the status of PyPy in general. Christian Tismer takes
-over to explain how co-routines can be used to implement things like
-Stackless and Greenlets in PyPy.
+over to explain how co-routines can be used to implement things like Stackless
+and Greenlets in PyPy.
+.. raw:: html
+
+ <iframe width="420" height="315"
+ src="http://www.youtube.com/embed/AWUhXW2pLDE?rel=0"
+ frameborder="0" allowfullscreen></iframe>
Talk on Agile Open Source Methods in the PyPy project
-----------------------------------------------------
-395mb: http://buildbot.pypy.org/misc/torrent/agile-talk-v1.avi.torrent
-
-153mb: http://buildbot.pypy.org/misc/torrent/agile-talk-320x240.avi.torrent
-
-.. image:: image/agile-talk.jpg
- :scale: 100
- :alt: Agile talk
- :align: left
-
-Core developer Holger Krekel and project manager Beatrice During are giving a talk on the agile open source methods used in the PyPy project at PyCon 2006, Dallas, US. (2006-02-26)
-
-PAL, 26 min, divx AVI
+Core developer Holger Krekel and project manager Beatrice During are giving a
+talk on the agile open source methods used in the PyPy project at PyCon 2006,
+Dallas, US. (2006-02-26)
Holger Krekel explains more about the goals and history of PyPy, and the
structure and organization behind it. Bea During describes the intricacies of
driving a distributed community in an agile way, and how to combine that with
the formalities required for EU funding.
+.. raw:: html
+
+ <iframe width="420" height="315"
+ src="http://www.youtube.com/embed/ed-zAxZtGlY?rel=0"
+ frameborder="0" allowfullscreen></iframe>
PyPy Architecture session
-------------------------
-744mb: http://buildbot.pypy.org/misc/torrent/architecture-session-v1.avi.torrent
-
-288mb: http://buildbot.pypy.org/misc/torrent/architecture-session-320x240.avi.torrent
-
-.. image:: image/architecture-session.jpg
- :scale: 100
- :alt: Architecture session
- :align: left
-
-This architecture session is given by core developers Holger Krekel and Armin Rigo at PyCon 2006, Dallas, US. (2006-02-26)
-
-PAL, 48 min, divx AVI
+This architecture session is given by core developers Holger Krekel and Armin
+Rigo at PyCon 2006, Dallas, US. (2006-02-26)
Holger Krekel and Armin Rigo talk about the basic implementation,
-implementation level aspects and the RPython translation toolchain. This
-talk also gives an insight into how a developer works with these tools on
-a daily basis, and pays special attention to flow graphs.
+implementation level aspects and the RPython translation toolchain. This talk
+also gives an insight into how a developer works with these tools on a daily
+basis, and pays special attention to flow graphs.
+.. raw:: html
+
+ <iframe width="420" height="315"
+ src="http://www.youtube.com/embed/7opXGaQUUA4?rel=0"
+ frameborder="0" allowfullscreen></iframe>
Sprint tutorial
---------------
-680mb: http://buildbot.pypy.org/misc/torrent/sprint-tutorial-v2.avi.torrent
+Sprint tutorial by core developer Michael Hudson at PyCon 2006, Dallas,
+US. (2006-02-27)
-263mb: http://buildbot.pypy.org/misc/torrent/sprint-tutorial-320x240.avi.torrent
+Michael Hudson gives an in-depth, very technical introduction to a PyPy
+sprint. The film provides a detailed and hands-on overview about the
+architecture of PyPy, especially the RPython translation toolchain.
-.. image:: image/sprint-tutorial.jpg
- :scale: 100
- :alt: Sprint Tutorial
- :align: left
+.. raw:: html
-Sprint tutorial by core developer Michael Hudson at PyCon 2006, Dallas, US. (2006-02-27)
-
-PAL, 44 min, divx AVI
-
-Michael Hudson gives an in-depth, very technical introduction to a PyPy sprint. The film provides a detailed and hands-on overview about the architecture of PyPy, especially the RPython translation toolchain.
+ <iframe width="420" height="315"
+ src="http://www.youtube.com/embed/1YV7J74xrMI?rel=0"
+ frameborder="0" allowfullscreen></iframe>
Scripting .NET with IronPython by Jim Hugunin
---------------------------------------------
-372mb: http://buildbot.pypy.org/misc/torrent/ironpython-talk-v2.avi.torrent
+Talk by Jim Hugunin (Microsoft) on the IronPython implementation on the .NET
+framework at the PyCon 2006, Dallas, US.
-270mb: http://buildbot.pypy.org/misc/torrent/ironpython-talk-320x240.avi.torrent
+Jim Hugunin talks about regression tests, the code generation and the object
+layout, the new-style instance and gives a CLS interop demo.
-.. image:: image/ironpython.jpg
- :scale: 100
- :alt: Jim Hugunin on IronPython
- :align: left
+.. raw:: html
-Talk by Jim Hugunin (Microsoft) on the IronPython implementation on the .NET framework at this years PyCon, Dallas, US.
-
-PAL, 44 min, DivX AVI
-
-Jim Hugunin talks about regression tests, the code generation and the object layout, the new-style instance and gives a CLS interop demo.
+ <iframe width="420" height="315"
+ src="http://www.youtube.com/embed/bq9ZGN3-o80?rel=0"
+ frameborder="0" allowfullscreen></iframe>
Bram Cohen, founder and developer of BitTorrent
-----------------------------------------------
-509mb: http://buildbot.pypy.org/misc/torrent/bram-cohen-interview-v1.avi.torrent
+Bram Cohen is interviewed by Steve Holden at the PyCon 2006, Dallas, US.
-370mb: http://buildbot.pypy.org/misc/torrent/bram-cohen-interview-320x240.avi.torrent
+.. raw:: html
-.. image:: image/bram.jpg
- :scale: 100
- :alt: Bram Cohen on BitTorrent
- :align: left
-
-Bram Cohen is interviewed by Steve Holden at this years PyCon, Dallas, US.
-
-PAL, 60 min, DivX AVI
+ <iframe width="420" height="315"
+ src="http://www.youtube.com/embed/EopmJWrLmWI?rel=0"
+ frameborder="0" allowfullscreen></iframe>
Keynote speech by Guido van Rossum on the new Python 2.5 features
-----------------------------------------------------------------
-695mb: http://buildbot.pypy.org/misc/torrent/keynote-speech_guido-van-rossum_v1.avi.torrent
+Guido van Rossum explains the new Python 2.5 features at the PyCon 2006,
+Dallas, US.
-430mb: http://buildbot.pypy.org/misc/torrent/keynote-speech_guido-van-rossum_320x240.avi.torrent
+.. raw:: html
-.. image:: image/guido.jpg
- :scale: 100
- :alt: Guido van Rossum on Python 2.5
- :align: left
-
-Guido van Rossum explains the new Python 2.5 features at this years PyCon, Dallas, US.
-
-PAL, 70 min, DivX AVI
+ <iframe width="420" height="315"
+ src="http://www.youtube.com/embed/RR2sX8tFGsI?rel=0"
+ frameborder="0" allowfullscreen></iframe>
Trailer: PyPy sprint at the University of Palma de Mallorca
-----------------------------------------------------------
-166mb: http://buildbot.pypy.org/misc/torrent/mallorca-trailer-v1.avi.torrent
+This trailer shows the PyPy team at the sprint in Mallorca, a
+behind-the-scenes of a typical PyPy coding sprint and talk as well as
+everything else.
-88mb: http://buildbot.pypy.org/misc/torrent/mallorca-trailer-medium.avi.torrent
+.. raw:: html
-64mb: http://buildbot.pypy.org/misc/torrent/mallorca-trailer-320x240.avi.torrent
-
-.. image:: image/mallorca-trailer.jpg
- :scale: 100
- :alt: Trailer PyPy sprint in Mallorca
- :align: left
-
-This trailer shows the PyPy team at the sprint in Mallorca, a behind-the-scenes of a typical PyPy coding sprint and talk as well as everything else.
-
-PAL, 11 min, DivX AVI
+ <iframe width="420" height="315"
+ src="http://www.youtube.com/embed/swsnRfj_cek?rel=0"
+ frameborder="0" allowfullscreen></iframe>
Coding discussion of core developers Armin Rigo and Samuele Pedroni
-------------------------------------------------------------------
-620mb: http://buildbot.pypy.org/misc/torrent/coding-discussion-v1.avi.torrent
+Coding discussion between Armin Rigo and Samuele Pedroni during the PyPy
+sprint at the University of Palma de Mallorca, Spain. 27.1.2006
-240mb: http://buildbot.pypy.org/misc/torrent/coding-discussion-320x240.avi.torrent
+.. raw:: html
-.. image:: image/coding-discussion.jpg
- :scale: 100
- :alt: Coding discussion
- :align: left
-
-Coding discussion between Armin Rigo and Samuele Pedroni during the PyPy sprint at the University of Palma de Mallorca, Spain. 27.1.2006
-
-PAL 40 min, DivX AVI
+ <iframe width="420" height="315"
+ src="http://www.youtube.com/embed/H_IgK9qmEss?rel=0"
+ frameborder="0" allowfullscreen></iframe>
PyPy technical talk at the University of Palma de Mallorca
----------------------------------------------------------
-865mb: http://buildbot.pypy.org/misc/torrent/introductory-student-talk-v2.avi.torrent
-
-437mb: http://buildbot.pypy.org/misc/torrent/introductory-student-talk-320x240.avi.torrent
-
-.. image:: image/introductory-student-talk.jpg
- :scale: 100
- :alt: Introductory student talk
- :align: left
-
Technical talk on the PyPy project at the University of Palma de Mallorca, Spain. 27.1.2006
-PAL 72 min, DivX AVI
+Core developers Armin Rigo, Samuele Pedroni and Carl Friedrich Bolz are giving
+an overview of the PyPy architecture, the standard interpreter, the RPython
+translation toolchain and the just-in-time compiler.
-Core developers Armin Rigo, Samuele Pedroni and Carl Friedrich Bolz are giving an overview of the PyPy architecture, the standard interpreter, the RPython translation toolchain and the just-in-time compiler.
+.. raw:: html
+ <iframe width="420" height="315"
+ src="http://www.youtube.com/embed/6dnUzVQaSlg?rel=0"
+ frameborder="0" allowfullscreen></iframe>
+
diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py
--- a/pypy/interpreter/gateway.py
+++ b/pypy/interpreter/gateway.py
@@ -496,7 +496,12 @@
# apply kw_spec
for name, spec in kw_spec.items():
- unwrap_spec[argnames.index(name)] = spec
+ try:
+ unwrap_spec[argnames.index(name)] = spec
+ except ValueError:
+ raise ValueError("unwrap_spec() got a keyword %r but it is not "
+ "the name of an argument of the following "
+ "function" % (name,))
return unwrap_spec
diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py
--- a/pypy/module/array/interp_array.py
+++ b/pypy/module/array/interp_array.py
@@ -9,7 +9,7 @@
from pypy.objspace.std.multimethod import FailedToImplement
from pypy.objspace.std.stdtypedef import SMM, StdTypeDef
from pypy.objspace.std.register_all import register_all
-from pypy.rlib.rarithmetic import ovfcheck
+from pypy.rlib.rarithmetic import ovfcheck, widen
from pypy.rlib.unroll import unrolling_iterable
from pypy.rlib.objectmodel import specialize, keepalive_until_here
from pypy.rpython.lltypesystem import lltype, rffi
@@ -227,20 +227,29 @@
# length
self.setlen(0)
- def setlen(self, size):
+ def setlen(self, size, zero=False, overallocate=True):
if size > 0:
if size > self.allocated or size < self.allocated / 2:
- if size < 9:
- some = 3
+ if overallocate:
+ if size < 9:
+ some = 3
+ else:
+ some = 6
+ some += size >> 3
else:
- some = 6
- some += size >> 3
+ some = 0
self.allocated = size + some
- new_buffer = lltype.malloc(mytype.arraytype,
- self.allocated, flavor='raw',
- add_memory_pressure=True)
- for i in range(min(size, self.len)):
- new_buffer[i] = self.buffer[i]
+ if zero:
+ new_buffer = lltype.malloc(mytype.arraytype,
+ self.allocated, flavor='raw',
+ add_memory_pressure=True,
+ zero=True)
+ else:
+ new_buffer = lltype.malloc(mytype.arraytype,
+ self.allocated, flavor='raw',
+ add_memory_pressure=True)
+ for i in range(min(size, self.len)):
+ new_buffer[i] = self.buffer[i]
else:
self.len = size
return
@@ -346,7 +355,7 @@
def getitem__Array_Slice(space, self, w_slice):
start, stop, step, size = space.decode_index4(w_slice, self.len)
w_a = mytype.w_class(self.space)
- w_a.setlen(size)
+ w_a.setlen(size, overallocate=False)
assert step != 0
j = 0
for i in range(start, stop, step):
@@ -368,26 +377,18 @@
def setitem__Array_Slice_Array(space, self, w_idx, w_item):
start, stop, step, size = self.space.decode_index4(w_idx, self.len)
assert step != 0
- if w_item.len != size:
+ if w_item.len != size or self is w_item:
+ # XXX this is a giant slow hack
w_lst = array_tolist__Array(space, self)
w_item = space.call_method(w_item, 'tolist')
space.setitem(w_lst, w_idx, w_item)
self.setlen(0)
self.fromsequence(w_lst)
else:
- if self is w_item:
- with lltype.scoped_alloc(mytype.arraytype, self.allocated) as new_buffer:
- for i in range(self.len):
- new_buffer[i] = w_item.buffer[i]
- j = 0
- for i in range(start, stop, step):
- self.buffer[i] = new_buffer[j]
- j += 1
- else:
- j = 0
- for i in range(start, stop, step):
- self.buffer[i] = w_item.buffer[j]
- j += 1
+ j = 0
+ for i in range(start, stop, step):
+ self.buffer[i] = w_item.buffer[j]
+ j += 1
def setslice__Array_ANY_ANY_ANY(space, self, w_i, w_j, w_x):
space.setitem(self, space.newslice(w_i, w_j, space.w_None), w_x)
@@ -459,6 +460,7 @@
self.buffer[i] = val
def delitem__Array_ANY(space, self, w_idx):
+ # XXX this is a giant slow hack
w_lst = array_tolist__Array(space, self)
space.delitem(w_lst, w_idx)
self.setlen(0)
@@ -471,7 +473,7 @@
def add__Array_Array(space, self, other):
a = mytype.w_class(space)
- a.setlen(self.len + other.len)
+ a.setlen(self.len + other.len, overallocate=False)
for i in range(self.len):
a.buffer[i] = self.buffer[i]
for i in range(other.len):
@@ -487,46 +489,58 @@
return self
def mul__Array_ANY(space, self, w_repeat):
+ return _mul_helper(space, self, w_repeat, False)
+
+ def mul__ANY_Array(space, w_repeat, self):
+ return _mul_helper(space, self, w_repeat, False)
+
+ def inplace_mul__Array_ANY(space, self, w_repeat):
+ return _mul_helper(space, self, w_repeat, True)
+
+ def _mul_helper(space, self, w_repeat, is_inplace):
try:
repeat = space.getindex_w(w_repeat, space.w_OverflowError)
except OperationError, e:
if e.match(space, space.w_TypeError):
raise FailedToImplement
raise
- a = mytype.w_class(space)
repeat = max(repeat, 0)
try:
newlen = ovfcheck(self.len * repeat)
except OverflowError:
raise MemoryError
- a.setlen(newlen)
- for r in range(repeat):
- for i in range(self.len):
- a.buffer[r * self.len + i] = self.buffer[i]
+ oldlen = self.len
+ if is_inplace:
+ a = self
+ start = 1
+ else:
+ a = mytype.w_class(space)
+ start = 0
+ # <a performance hack>
+ if oldlen == 1:
+ if mytype.unwrap == 'str_w' or mytype.unwrap == 'unicode_w':
+ zero = not ord(self.buffer[0])
+ elif mytype.unwrap == 'int_w' or mytype.unwrap == 'bigint_w':
+ zero = not widen(self.buffer[0])
+ #elif mytype.unwrap == 'float_w':
+ # value = ...float(self.buffer[0]) xxx handle the case of -0.0
+ else:
+ zero = False
+ if zero:
+ a.setlen(newlen, zero=True, overallocate=False)
+ return a
+ a.setlen(newlen, overallocate=False)
+ item = self.buffer[0]
+ for r in range(start, repeat):
+ a.buffer[r] = item
+ return a
+ # </a performance hack>
+ a.setlen(newlen, overallocate=False)
+ for r in range(start, repeat):
+ for i in range(oldlen):
+ a.buffer[r * oldlen + i] = self.buffer[i]
return a
- def mul__ANY_Array(space, w_repeat, self):
- return mul__Array_ANY(space, self, w_repeat)
-
- def inplace_mul__Array_ANY(space, self, w_repeat):
- try:
- repeat = space.getindex_w(w_repeat, space.w_OverflowError)
- except OperationError, e:
- if e.match(space, space.w_TypeError):
- raise FailedToImplement
- raise
- oldlen = self.len
- repeat = max(repeat, 0)
- try:
- newlen = ovfcheck(self.len * repeat)
- except OverflowError:
- raise MemoryError
- self.setlen(newlen)
- for r in range(1, repeat):
- for i in range(oldlen):
- self.buffer[r * oldlen + i] = self.buffer[i]
- return self
-
# Convertions
def array_tolist__Array(space, self):
@@ -602,6 +616,7 @@
# Compare methods
@specialize.arg(3)
def _cmp_impl(space, self, other, space_fn):
+ # XXX this is a giant slow hack
w_lst1 = array_tolist__Array(space, self)
w_lst2 = space.call_method(other, 'tolist')
return space_fn(w_lst1, w_lst2)
@@ -648,7 +663,7 @@
def array_copy__Array(space, self):
w_a = mytype.w_class(self.space)
- w_a.setlen(self.len)
+ w_a.setlen(self.len, overallocate=False)
rffi.c_memcpy(
rffi.cast(rffi.VOIDP, w_a.buffer),
rffi.cast(rffi.VOIDP, self.buffer),
diff --git a/pypy/module/array/test/test_array.py b/pypy/module/array/test/test_array.py
--- a/pypy/module/array/test/test_array.py
+++ b/pypy/module/array/test/test_array.py
@@ -890,6 +890,54 @@
a[::-1] = a
assert a == self.array('b', [3, 2, 1, 0])
+ def test_array_multiply(self):
+ a = self.array('b', [0])
+ b = a * 13
+ assert b[12] == 0
+ b = 13 * a
+ assert b[12] == 0
+ a *= 13
+ assert a[12] == 0
+ a = self.array('b', [1])
+ b = a * 13
+ assert b[12] == 1
+ b = 13 * a
+ assert b[12] == 1
+ a *= 13
+ assert a[12] == 1
+ a = self.array('i', [0])
+ b = a * 13
+ assert b[12] == 0
+ b = 13 * a
+ assert b[12] == 0
+ a *= 13
+ assert a[12] == 0
+ a = self.array('i', [1])
+ b = a * 13
+ assert b[12] == 1
+ b = 13 * a
+ assert b[12] == 1
+ a *= 13
+ assert a[12] == 1
+ a = self.array('i', [0, 0])
+ b = a * 13
+ assert len(b) == 26
+ assert b[22] == 0
+ b = 13 * a
+ assert len(b) == 26
+ assert b[22] == 0
+ a *= 13
+ assert a[22] == 0
+ assert len(a) == 26
+ a = self.array('f', [-0.0])
+ b = a * 13
+ assert len(b) == 13
+ assert str(b[12]) == "-0.0"
+ a = self.array('d', [-0.0])
+ b = a * 13
+ assert len(b) == 13
+ assert str(b[12]) == "-0.0"
+
class AppTestArrayBuiltinShortcut(AppTestArray):
OPTIONS = {'objspace.std.builtinshortcut': True}
diff --git a/pypy/module/cpyext/include/object.h b/pypy/module/cpyext/include/object.h
--- a/pypy/module/cpyext/include/object.h
+++ b/pypy/module/cpyext/include/object.h
@@ -38,12 +38,14 @@
PyObject_VAR_HEAD
} PyVarObject;
-#ifndef PYPY_DEBUG_REFCOUNT
+#ifdef PYPY_DEBUG_REFCOUNT
+/* Slow version, but useful for debugging */
#define Py_INCREF(ob) (Py_IncRef((PyObject *)ob))
#define Py_DECREF(ob) (Py_DecRef((PyObject *)ob))
#define Py_XINCREF(ob) (Py_IncRef((PyObject *)ob))
#define Py_XDECREF(ob) (Py_DecRef((PyObject *)ob))
#else
+/* Fast version */
#define Py_INCREF(ob) (((PyObject *)ob)->ob_refcnt++)
#define Py_DECREF(ob) ((((PyObject *)ob)->ob_refcnt > 1) ? \
((PyObject *)ob)->ob_refcnt-- : (Py_DecRef((PyObject *)ob)))
diff --git a/pypy/module/cpyext/intobject.py b/pypy/module/cpyext/intobject.py
--- a/pypy/module/cpyext/intobject.py
+++ b/pypy/module/cpyext/intobject.py
@@ -6,7 +6,7 @@
PyObject, PyObjectFields, CONST_STRING, CANNOT_FAIL, Py_ssize_t)
from pypy.module.cpyext.pyobject import (
make_typedescr, track_reference, RefcountState, from_ref)
-from pypy.rlib.rarithmetic import r_uint, intmask, LONG_TEST
+from pypy.rlib.rarithmetic import r_uint, intmask, LONG_TEST, r_ulonglong
from pypy.objspace.std.intobject import W_IntObject
import sys
@@ -83,6 +83,20 @@
num = space.bigint_w(w_int)
return num.uintmask()
+ at cpython_api([PyObject], rffi.ULONGLONG, error=-1)
+def PyInt_AsUnsignedLongLongMask(space, w_obj):
+ """Will first attempt to cast the object to a PyIntObject or
+ PyLongObject, if it is not already one, and then return its value as
+ unsigned long long, without checking for overflow.
+ """
+ w_int = space.int(w_obj)
+ if space.is_true(space.isinstance(w_int, space.w_int)):
+ num = space.int_w(w_int)
+ return r_ulonglong(num)
+ else:
+ num = space.bigint_w(w_int)
+ return num.ulonglongmask()
+
@cpython_api([PyObject], lltype.Signed, error=CANNOT_FAIL)
def PyInt_AS_LONG(space, w_int):
"""Return the value of the object w_int. No error checking is performed."""
diff --git a/pypy/module/cpyext/test/test_intobject.py b/pypy/module/cpyext/test/test_intobject.py
--- a/pypy/module/cpyext/test/test_intobject.py
+++ b/pypy/module/cpyext/test/test_intobject.py
@@ -34,6 +34,11 @@
assert (api.PyInt_AsUnsignedLongMask(space.wrap(10**30))
== 10**30 % ((sys.maxint + 1) * 2))
+ assert (api.PyInt_AsUnsignedLongLongMask(space.wrap(sys.maxint))
+ == sys.maxint)
+ assert (api.PyInt_AsUnsignedLongLongMask(space.wrap(10**30))
+ == 10**30 % (2**64))
+
def test_coerce(self, space, api):
class Coerce(object):
def __int__(self):
diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py
--- a/pypy/module/imp/importing.py
+++ b/pypy/module/imp/importing.py
@@ -429,7 +429,12 @@
def find_in_path_hooks(space, w_modulename, w_pathitem):
w_importer = _getimporter(space, w_pathitem)
if w_importer is not None and space.is_true(w_importer):
- w_loader = space.call_method(w_importer, "find_module", w_modulename)
+ try:
+ w_loader = space.call_method(w_importer, "find_module", w_modulename)
+ except OperationError, e:
+ if e.match(space, space.w_ImportError):
+ return None
+ raise
if space.is_true(w_loader):
return w_loader
diff --git a/pypy/module/imp/test/hooktest.py b/pypy/module/imp/test/hooktest.py
new file mode 100644
--- /dev/null
+++ b/pypy/module/imp/test/hooktest.py
@@ -0,0 +1,30 @@
+import sys, imp
+
+__path__ = [ ]
+
+class Loader(object):
+ def __init__(self, file, filename, stuff):
+ self.file = file
+ self.filename = filename
+ self.stuff = stuff
+
+ def load_module(self, fullname):
+ mod = imp.load_module(fullname, self.file, self.filename, self.stuff)
+ if self.file:
+ self.file.close()
+ mod.__loader__ = self # for introspection
+ return mod
+
+class Importer(object):
+ def __init__(self, path):
+ if path not in __path__:
+ raise ImportError
+
+ def find_module(self, fullname, path=None):
+ if not fullname.startswith('hooktest'):
+ return None
+
+ _, mod_name = fullname.rsplit('.',1)
+ found = imp.find_module(mod_name, path or __path__)
+
+ return Loader(*found)
diff --git a/pypy/module/imp/test/hooktest/foo.py b/pypy/module/imp/test/hooktest/foo.py
new file mode 100644
--- /dev/null
+++ b/pypy/module/imp/test/hooktest/foo.py
@@ -0,0 +1,1 @@
+import errno # Any existing toplevel module
diff --git a/pypy/module/imp/test/test_import.py b/pypy/module/imp/test/test_import.py
--- a/pypy/module/imp/test/test_import.py
+++ b/pypy/module/imp/test/test_import.py
@@ -989,8 +989,22 @@
class AppTestImportHooks(object):
def setup_class(cls):
- cls.space = gettestobjspace(usemodules=('struct',))
-
+ space = cls.space = gettestobjspace(usemodules=('struct',))
+ mydir = os.path.dirname(__file__)
+ cls.w_hooktest = space.wrap(os.path.join(mydir, 'hooktest'))
+ space.appexec([space.wrap(mydir)], """
+ (mydir):
+ import sys
+ sys.path.append(mydir)
+ """)
+
+ def teardown_class(cls):
+ cls.space.appexec([], """
+ ():
+ import sys
+ sys.path.pop()
+ """)
+
def test_meta_path(self):
tried_imports = []
class Importer(object):
@@ -1127,6 +1141,23 @@
sys.meta_path.pop()
sys.path_hooks.pop()
+ def test_path_hooks_module(self):
+ "Verify that non-sibling imports from module loaded by path hook works"
+
+ import sys
+ import hooktest
+
+ hooktest.__path__.append(self.hooktest) # Avoid importing os at applevel
+
+ sys.path_hooks.append(hooktest.Importer)
+
+ try:
+ import hooktest.foo
+ def import_nonexisting():
+ import hooktest.errno
+ raises(ImportError, import_nonexisting)
+ finally:
+ sys.path_hooks.pop()
class AppTestPyPyExtension(object):
def setup_class(cls):
diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py
--- a/pypy/module/micronumpy/__init__.py
+++ b/pypy/module/micronumpy/__init__.py
@@ -163,6 +163,8 @@
'sum': 'app_numpy.sum',
'min': 'app_numpy.min',
'identity': 'app_numpy.identity',
+ 'eye': 'app_numpy.eye',
'max': 'app_numpy.max',
'arange': 'app_numpy.arange',
+ 'count_nonzero': 'app_numpy.count_nonzero',
}
diff --git a/pypy/module/micronumpy/app_numpy.py b/pypy/module/micronumpy/app_numpy.py
--- a/pypy/module/micronumpy/app_numpy.py
+++ b/pypy/module/micronumpy/app_numpy.py
@@ -2,6 +2,10 @@
import _numpypy
+def count_nonzero(a):
+ if not hasattr(a, 'count_nonzero'):
+ a = _numpypy.array(a)
+ return a.count_nonzero()
def average(a):
# This implements a weighted average, for now we don't implement the
@@ -16,6 +20,26 @@
a[i][i] = 1
return a
+def eye(n, m=None, k=0, dtype=None):
+ if m is None:
+ m = n
+ a = _numpypy.zeros((n, m), dtype=dtype)
+ ni = 0
+ mi = 0
+
+ if k < 0:
+ p = n + k
+ ni = -k
+ else:
+ p = n - k
+ mi = k
+
+ while ni < n and mi < m:
+ a[ni][mi] = 1
+ ni += 1
+ mi += 1
+ return a
+
def sum(a,axis=None, out=None):
'''sum(a, axis=None)
Sum of array elements over a given axis.
diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py
--- a/pypy/module/micronumpy/compile.py
+++ b/pypy/module/micronumpy/compile.py
@@ -35,7 +35,7 @@
pass
SINGLE_ARG_FUNCTIONS = ["sum", "prod", "max", "min", "all", "any",
- "unegative", "flat", "tostring"]
+ "unegative", "flat", "tostring","count_nonzero"]
TWO_ARG_FUNCTIONS = ["dot", 'take']
THREE_ARG_FUNCTIONS = ['where']
@@ -445,6 +445,8 @@
elif self.name == "tostring":
arr.descr_tostring(interp.space)
w_res = None
+ elif self.name == "count_nonzero":
+ w_res = arr.descr_count_nonzero(interp.space)
else:
assert False # unreachable code
elif self.name in TWO_ARG_FUNCTIONS:
@@ -478,6 +480,8 @@
return w_res
if isinstance(w_res, FloatObject):
dtype = get_dtype_cache(interp.space).w_float64dtype
+ elif isinstance(w_res, IntObject):
+ dtype = get_dtype_cache(interp.space).w_int64dtype
elif isinstance(w_res, BoolObject):
dtype = get_dtype_cache(interp.space).w_booldtype
elif isinstance(w_res, interp_boxes.W_GenericBox):
diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py
--- a/pypy/module/micronumpy/interp_numarray.py
+++ b/pypy/module/micronumpy/interp_numarray.py
@@ -402,6 +402,11 @@
i += 1
return Chunks(result)
+ def descr_count_nonzero(self, space):
+ concr = self.get_concrete()
+ res = concr.count_all_true()
+ return space.wrap(res)
+
def count_all_true(self):
sig = self.find_sig()
frame = sig.create_frame(self)
@@ -1486,6 +1491,7 @@
take = interp2app(BaseArray.descr_take),
compress = interp2app(BaseArray.descr_compress),
repeat = interp2app(BaseArray.descr_repeat),
+ count_nonzero = interp2app(BaseArray.descr_count_nonzero),
)
diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py
--- a/pypy/module/micronumpy/test/test_numarray.py
+++ b/pypy/module/micronumpy/test/test_numarray.py
@@ -1155,6 +1155,38 @@
assert d.shape == (3, 3)
assert d.dtype == dtype('int32')
assert (d == [[1, 0, 0], [0, 1, 0], [0, 0, 1]]).all()
+
+ def test_eye(self):
+ from _numpypy import eye, array
+ from _numpypy import int32, float64, dtype
+ a = eye(0)
+ assert len(a) == 0
+ assert a.dtype == dtype('float64')
+ assert a.shape == (0, 0)
+ b = eye(1, dtype=int32)
+ assert len(b) == 1
+ assert b[0][0] == 1
+ assert b.shape == (1, 1)
+ assert b.dtype == dtype('int32')
+ c = eye(2)
+ assert c.shape == (2, 2)
+ assert (c == [[1, 0], [0, 1]]).all()
+ d = eye(3, dtype='int32')
+ assert d.shape == (3, 3)
+ assert d.dtype == dtype('int32')
+ assert (d == [[1, 0, 0], [0, 1, 0], [0, 0, 1]]).all()
+ e = eye(3, 4)
+ assert e.shape == (3, 4)
+ assert (e == [[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0]]).all()
+ f = eye(2, 4, k=3)
+ assert f.shape == (2, 4)
+ assert (f == [[0, 0, 0, 1], [0, 0, 0, 0]]).all()
+ g = eye(3, 4, k=-1)
+ assert g.shape == (3, 4)
+ assert (g == [[0, 0, 0, 0], [1, 0, 0, 0], [0, 1, 0, 0]]).all()
+
+
+
def test_prod(self):
from _numpypy import array
@@ -2010,6 +2042,12 @@
raises(ValueError, "array(5).item(1)")
assert array([1]).item() == 1
+ def test_count_nonzero(self):
+ from _numpypy import array
+ a = array([1,0,5,0,10])
+ assert a.count_nonzero() == 3
+
+
class AppTestSupport(BaseNumpyAppTest):
def setup_class(cls):
import struct
diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py
--- a/pypy/module/micronumpy/test/test_ufuncs.py
+++ b/pypy/module/micronumpy/test/test_ufuncs.py
@@ -640,6 +640,13 @@
raises(ValueError, count_reduce_items, a, -4)
raises(ValueError, count_reduce_items, a, (0, 2, -4))
+ def test_count_nonzero(self):
+ from _numpypy import where, count_nonzero, arange
+ a = arange(10)
+ assert count_nonzero(a) == 9
+ a[9] = 0
+ assert count_nonzero(a) == 8
+
def test_true_divide(self):
from _numpypy import arange, array, true_divide
assert (true_divide(arange(3), array([2, 2, 2])) == array([0, 0.5, 1])).all()
diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py
--- a/pypy/module/micronumpy/test/test_zjit.py
+++ b/pypy/module/micronumpy/test/test_zjit.py
@@ -479,3 +479,22 @@
'int_sub': 3,
'jump': 1,
'setinteriorfield_raw': 1})
+
+ def define_count_nonzero():
+ return """
+ a = [[0, 2, 3, 4], [5, 6, 0, 8], [9, 10, 11, 0]]
+ count_nonzero(a)
+ """
+
+ def test_count_nonzero(self):
+ result = self.run("count_nonzero")
+ assert result == 9
+ self.check_simple_loop({'setfield_gc': 3,
+ 'getinteriorfield_raw': 1,
+ 'guard_false': 1,
+ 'jump': 1,
+ 'int_ge': 1,
+ 'new_with_vtable': 1,
+ 'int_add': 2,
+ 'float_ne': 1})
+
diff --git a/pypy/module/select/interp_kqueue.py b/pypy/module/select/interp_kqueue.py
--- a/pypy/module/select/interp_kqueue.py
+++ b/pypy/module/select/interp_kqueue.py
@@ -7,6 +7,7 @@
from pypy.rpython.lltypesystem import rffi, lltype
from pypy.rpython.tool import rffi_platform
from pypy.translator.tool.cbuild import ExternalCompilationInfo
+import sys
eci = ExternalCompilationInfo(
@@ -20,14 +21,26 @@
_compilation_info_ = eci
-CConfig.kevent = rffi_platform.Struct("struct kevent", [
- ("ident", rffi.UINTPTR_T),
- ("filter", rffi.SHORT),
- ("flags", rffi.USHORT),
- ("fflags", rffi.UINT),
- ("data", rffi.INTPTR_T),
- ("udata", rffi.VOIDP),
-])
+if "openbsd" in sys.platform:
+ IDENT_UINT = True
+ CConfig.kevent = rffi_platform.Struct("struct kevent", [
+ ("ident", rffi.UINT),
+ ("filter", rffi.SHORT),
+ ("flags", rffi.USHORT),
+ ("fflags", rffi.UINT),
+ ("data", rffi.INT),
+ ("udata", rffi.VOIDP),
+ ])
+else:
+ IDENT_UINT = False
+ CConfig.kevent = rffi_platform.Struct("struct kevent", [
+ ("ident", rffi.UINTPTR_T),
+ ("filter", rffi.SHORT),
+ ("flags", rffi.USHORT),
+ ("fflags", rffi.UINT),
+ ("data", rffi.INTPTR_T),
+ ("udata", rffi.VOIDP),
+ ])
CConfig.timespec = rffi_platform.Struct("struct timespec", [
@@ -243,16 +256,24 @@
self.event.c_udata = rffi.cast(rffi.VOIDP, udata)
def _compare_all_fields(self, other, op):
- l_ident = self.event.c_ident
- r_ident = other.event.c_ident
+ if IDENT_UINT:
+ l_ident = rffi.cast(lltype.Unsigned, self.event.c_ident)
+ r_ident = rffi.cast(lltype.Unsigned, other.event.c_ident)
+ else:
+ l_ident = self.event.c_ident
+ r_ident = other.event.c_ident
l_filter = rffi.cast(lltype.Signed, self.event.c_filter)
r_filter = rffi.cast(lltype.Signed, other.event.c_filter)
l_flags = rffi.cast(lltype.Unsigned, self.event.c_flags)
r_flags = rffi.cast(lltype.Unsigned, other.event.c_flags)
l_fflags = rffi.cast(lltype.Unsigned, self.event.c_fflags)
r_fflags = rffi.cast(lltype.Unsigned, other.event.c_fflags)
- l_data = self.event.c_data
- r_data = other.event.c_data
+ if IDENT_UINT:
+ l_data = rffi.cast(lltype.Signed, self.event.c_data)
+ r_data = rffi.cast(lltype.Signed, other.event.c_data)
+ else:
+ l_data = self.event.c_data
+ r_data = other.event.c_data
l_udata = rffi.cast(lltype.Unsigned, self.event.c_udata)
r_udata = rffi.cast(lltype.Unsigned, other.event.c_udata)
diff --git a/pypy/objspace/std/fake.py b/pypy/objspace/std/fake.py
--- a/pypy/objspace/std/fake.py
+++ b/pypy/objspace/std/fake.py
@@ -50,7 +50,7 @@
raise OperationError, OperationError(w_exc, w_value), tb
def fake_type(cpy_type):
- assert type(cpy_type) is type
+ assert isinstance(type(cpy_type), type)
try:
return _fake_type_cache[cpy_type]
except KeyError:
@@ -100,12 +100,19 @@
fake__new__.func_name = "fake__new__" + cpy_type.__name__
kw['__new__'] = gateway.interp2app(fake__new__)
- if cpy_type.__base__ is not object and not issubclass(cpy_type, Exception):
- assert cpy_type.__base__ is basestring, cpy_type
+ if cpy_type.__base__ is object or issubclass(cpy_type, Exception):
+ base = None
+ elif cpy_type.__base__ is basestring:
from pypy.objspace.std.basestringtype import basestring_typedef
base = basestring_typedef
+ elif cpy_type.__base__ is tuple:
+ from pypy.objspace.std.tupletype import tuple_typedef
+ base = tuple_typedef
+ elif cpy_type.__base__ is type:
+ from pypy.objspace.std.typetype import type_typedef
+ base = type_typedef
else:
- base = None
+ raise NotImplementedError(cpy_type, cpy_type.__base__)
class W_Fake(W_Object):
typedef = StdTypeDef(
cpy_type.__name__, base, **kw)
diff --git a/pypy/rpython/lltypesystem/rlist.py b/pypy/rpython/lltypesystem/rlist.py
--- a/pypy/rpython/lltypesystem/rlist.py
+++ b/pypy/rpython/lltypesystem/rlist.py
@@ -170,8 +170,8 @@
# adapted C code
- at enforceargs(None, int)
-def _ll_list_resize_really(l, newsize):
+ at enforceargs(None, int, None)
+def _ll_list_resize_really(l, newsize, overallocate):
"""
Ensure l.items has room for at least newsize elements, and set
l.length to newsize. Note that l.items may change, and even if
@@ -188,13 +188,15 @@
l.length = 0
l.items = _ll_new_empty_item_array(typeOf(l).TO)
return
- else:
+ elif overallocate:
if newsize < 9:
some = 3
else:
some = 6
some += newsize >> 3
new_allocated = newsize + some
+ else:
+ new_allocated = newsize
# new_allocated is a bit more than newsize, enough to ensure an amortized
# linear complexity for e.g. repeated usage of l.append(). In case
# it overflows sys.maxint, it is guaranteed negative, and the following
@@ -214,31 +216,36 @@
# this common case was factored out of _ll_list_resize
# to see if inlining it gives some speed-up.
+ at jit.dont_look_inside
def _ll_list_resize(l, newsize):
- # Bypass realloc() when a previous overallocation is large enough
- # to accommodate the newsize. If the newsize falls lower than half
- # the allocated size, then proceed with the realloc() to shrink the list.
- allocated = len(l.items)
- if allocated >= newsize and newsize >= ((allocated >> 1) - 5):
- l.length = newsize
- else:
- _ll_list_resize_really(l, newsize)
+ """Called only in special cases. Forces the allocated and actual size
+ of the list to be 'newsize'."""
+ _ll_list_resize_really(l, newsize, False)
@jit.look_inside_iff(lambda l, newsize: jit.isconstant(len(l.items)) and jit.isconstant(newsize))
@jit.oopspec("list._resize_ge(l, newsize)")
def _ll_list_resize_ge(l, newsize):
+ """This is called with 'newsize' larger than the current length of the
+ list. If the list storage doesn't have enough space, then really perform
+ a realloc(). In the common case where we already overallocated enough,
+ then this is a very fast operation.
+ """
if len(l.items) >= newsize:
l.length = newsize
else:
- _ll_list_resize_really(l, newsize)
+ _ll_list_resize_really(l, newsize, True)
@jit.look_inside_iff(lambda l, newsize: jit.isconstant(len(l.items)) and jit.isconstant(newsize))
@jit.oopspec("list._resize_le(l, newsize)")
def _ll_list_resize_le(l, newsize):
+ """This is called with 'newsize' smaller than the current length of the
+ list. If 'newsize' falls lower than half the allocated size, proceed
+ with the realloc() to shrink the list.
+ """
if newsize >= (len(l.items) >> 1) - 5:
l.length = newsize
else:
- _ll_list_resize_really(l, newsize)
+ _ll_list_resize_really(l, newsize, False)
def ll_append_noresize(l, newitem):
length = l.length
diff --git a/pypy/rpython/normalizecalls.py b/pypy/rpython/normalizecalls.py
--- a/pypy/rpython/normalizecalls.py
+++ b/pypy/rpython/normalizecalls.py
@@ -39,7 +39,8 @@
row)
if did_something:
assert not callfamily.normalized, "change in call family normalisation"
- assert nshapes == 1, "XXX call table too complex"
+ if nshapes != 1:
+ raise_call_table_too_complex_error(callfamily, annotator)
while True:
progress = False
for shape, table in callfamily.calltables.items():
@@ -50,6 +51,38 @@
return # done
assert not callfamily.normalized, "change in call family normalisation"
+def raise_call_table_too_complex_error(callfamily, annotator):
+ msg = []
+ items = callfamily.calltables.items()
+ for i, (shape1, table1) in enumerate(items):
+ for shape2, table2 in items[i + 1:]:
+ if shape1 == shape2:
+ continue
+ row1 = table1[0]
+ row2 = table2[0]
+ problematic_function_graphs = set(row1.values()).union(set(row2.values()))
+ pfg = [str(graph) for graph in problematic_function_graphs]
+ pfg.sort()
+ msg.append("the following functions:")
+ msg.append(" %s" % ("\n ".join(pfg), ))
+ msg.append("are called with inconsistent numbers of arguments")
+ if shape1[0] != shape2[0]:
+ msg.append("sometimes with %s arguments, sometimes with %s" % (shape1[0], shape2[0]))
+ else:
+ pass # XXX better message in this case
+ callers = []
+ msg.append("the callers of these functions are:")
+ for tag, (caller, callee) in annotator.translator.callgraph.iteritems():
+ if callee not in problematic_function_graphs:
+ continue
+ if str(caller) in callers:
+ continue
+ callers.append(str(caller))
+ callers.sort()
+ for caller in callers:
+ msg.append(" %s" % (caller, ))
+ raise TyperError("\n".join(msg))
+
def normalize_calltable_row_signature(annotator, shape, row):
graphs = row.values()
assert graphs, "no graph??"
diff --git a/pypy/rpython/rlist.py b/pypy/rpython/rlist.py
--- a/pypy/rpython/rlist.py
+++ b/pypy/rpython/rlist.py
@@ -20,8 +20,11 @@
'll_setitem_fast': (['self', Signed, 'item'], Void),
})
ADTIList = ADTInterface(ADTIFixedList, {
+ # grow the length if needed, overallocating a bit
'_ll_resize_ge': (['self', Signed ], Void),
+ # shrink the length, keeping it overallocated if useful
'_ll_resize_le': (['self', Signed ], Void),
+ # resize to exactly the given size
'_ll_resize': (['self', Signed ], Void),
})
@@ -1018,6 +1021,8 @@
ll_delitem_nonneg(dum_nocheck, lst, index)
def ll_inplace_mul(l, factor):
+ if factor == 1:
+ return l
length = l.ll_length()
if factor < 0:
factor = 0
@@ -1027,7 +1032,6 @@
raise MemoryError
res = l
res._ll_resize(resultlen)
- #res._ll_resize_ge(resultlen)
j = length
while j < resultlen:
i = 0
diff --git a/pypy/rpython/rmodel.py b/pypy/rpython/rmodel.py
--- a/pypy/rpython/rmodel.py
+++ b/pypy/rpython/rmodel.py
@@ -339,7 +339,7 @@
def _get_opprefix(self):
if self._opprefix is None:
- raise TyperError("arithmetic not supported on %r, it's size is too small" %
+ raise TyperError("arithmetic not supported on %r, its size is too small" %
self.lowleveltype)
return self._opprefix
diff --git a/pypy/rpython/test/test_normalizecalls.py b/pypy/rpython/test/test_normalizecalls.py
--- a/pypy/rpython/test/test_normalizecalls.py
+++ b/pypy/rpython/test/test_normalizecalls.py
@@ -2,6 +2,7 @@
from pypy.annotation import model as annmodel
from pypy.translator.translator import TranslationContext, graphof
from pypy.rpython.llinterp import LLInterpreter
+from pypy.rpython.error import TyperError
from pypy.rpython.test.test_llinterp import interpret
from pypy.rpython.lltypesystem import lltype
from pypy.rpython.normalizecalls import TotalOrderSymbolic, MAX
@@ -158,6 +159,39 @@
res = llinterp.eval_graph(graphof(translator, dummyfn), [2])
assert res == -2
+ def test_methods_with_defaults(self):
+ class Base:
+ def fn(self):
+ raise NotImplementedError
+ class Sub1(Base):
+ def fn(self, x=1):
+ return 1 + x
+ class Sub2(Base):
+ def fn(self):
+ return -2
+ def otherfunc(x):
+ return x.fn()
+ def dummyfn(n):
+ if n == 1:
+ x = Sub1()
+ n = x.fn(2)
+ else:
+ x = Sub2()
+ return otherfunc(x) + x.fn()
+
+ excinfo = py.test.raises(TyperError, "self.rtype(dummyfn, [int], int)")
+ msg = """the following functions:
+ .+Base.fn
+ .+Sub1.fn
+ .+Sub2.fn
+are called with inconsistent numbers of arguments
+sometimes with 2 arguments, sometimes with 1
+the callers of these functions are:
+ .+otherfunc
+ .+dummyfn"""
+ import re
+ assert re.match(msg, excinfo.value.args[0])
+
class PBase:
def fn(self):
diff --git a/pypy/tool/jitlogparser/parser.py b/pypy/tool/jitlogparser/parser.py
--- a/pypy/tool/jitlogparser/parser.py
+++ b/pypy/tool/jitlogparser/parser.py
@@ -5,6 +5,22 @@
from pypy.tool.logparser import parse_log_file, extract_category
from copy import copy
+def parse_code_data(arg):
+ name = None
+ lineno = 0
+ filename = None
+ bytecode_no = 0
+ bytecode_name = None
+ m = re.search('<code object ([<>\w]+)[\.,] file \'(.+?)\'[\.,] line (\d+)> #(\d+) (\w+)',
+ arg)
+ if m is None:
+ # a non-code loop, like StrLiteralSearch or something
+ if arg:
+ bytecode_name = arg
+ else:
+ name, filename, lineno, bytecode_no, bytecode_name = m.groups()
+ return name, bytecode_name, filename, int(lineno), int(bytecode_no)
+
class Op(object):
bridge = None
offset = None
@@ -132,38 +148,24 @@
pass
class TraceForOpcode(object):
- filename = None
- startlineno = 0
- name = None
code = None
- bytecode_no = 0
- bytecode_name = None
is_bytecode = True
inline_level = None
has_dmp = False
- def parse_code_data(self, arg):
- m = re.search('<code object ([<>\w]+)[\.,] file \'(.+?)\'[\.,] line (\d+)> #(\d+) (\w+)',
- arg)
- if m is None:
- # a non-code loop, like StrLiteralSearch or something
- if arg:
- self.bytecode_name = arg
- else:
- self.name, self.filename, lineno, bytecode_no, self.bytecode_name = m.groups()
- self.startlineno = int(lineno)
- self.bytecode_no = int(bytecode_no)
-
-
def __init__(self, operations, storage, loopname):
for op in operations:
if op.name == 'debug_merge_point':
self.inline_level = int(op.args[0])
- self.parse_code_data(op.args[2][1:-1])
+ parsed = parse_code_data(op.args[2][1:-1])
+ (self.name, self.bytecode_name, self.filename,
+ self.startlineno, self.bytecode_no) = parsed
break
else:
self.inline_level = 0
- self.parse_code_data(loopname)
+ parsed = parse_code_data(loopname)
+ (self.name, self.bytecode_name, self.filename,
+ self.startlineno, self.bytecode_no) = parsed
self.operations = operations
self.storage = storage
self.code = storage.disassemble_code(self.filename, self.startlineno,
More information about the pypy-commit
mailing list