[pypy-svn] r76290 - pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86

jcreigh at codespeak.net jcreigh at codespeak.net
Tue Jul 20 15:24:30 CEST 2010


Author: jcreigh
Date: Tue Jul 20 15:24:22 2010
New Revision: 76290

Modified:
   pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/assembler.py
   pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/regloc.py
Log:
add some comments, remove useless @specialize.arg

Modified: pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/assembler.py
==============================================================================
--- pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/assembler.py	(original)
+++ pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/assembler.py	Tue Jul 20 15:24:22 2010
@@ -208,7 +208,9 @@
         float_constants = (float_constants + 15) & ~15    # align to 16 bytes
         addr = rffi.cast(rffi.CArrayPtr(lltype.Char), float_constants)
         qword_padding = '\x00\x00\x00\x00\x00\x00\x00\x00'
+        # 0x8000000000000000
         neg_const = '\x00\x00\x00\x00\x00\x00\x00\x80'
+        # 0x7FFFFFFFFFFFFFFF
         abs_const = '\xFF\xFF\xFF\xFF\xFF\xFF\xFF\x7F'
         data = neg_const + qword_padding + abs_const + qword_padding
         for i in range(len(data)):
@@ -1460,8 +1462,9 @@
         # returns in eax the fail_index
 
         # now we return from the complete frame, which starts from
-        # _assemble_bootstrap_code().  The LEA below throws away most
-        # of the frame, including all the PUSHes that we did just above.
+        # _assemble_bootstrap_code().  The LEA in _call_footer below throws
+        # away most of the frame, including all the PUSHes that we did just
+        # above.
 
         self._call_footer()
         self.mc.done()
@@ -1510,9 +1513,6 @@
         # exit function
         self._call_footer()
 
-    # FIXME: I changed the third argument to this method, but I don't know
-    # what to do with @specialize
-    @specialize.arg(2)
     def implement_guard(self, guard_token, condition=None):
         self.mc.reserve_bytes(guard_token.recovery_stub_size())
         self.pending_guard_tokens.append(guard_token)

Modified: pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/regloc.py
==============================================================================
--- pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/regloc.py	(original)
+++ pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/regloc.py	Tue Jul 20 15:24:22 2010
@@ -149,6 +149,19 @@
 eax, ecx, edx, ebx, esp, ebp, esi, edi, r8, r9, r10, r11, r12, r13, r14, r15 = REGLOCS
 xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15 = XMMREGLOCS
 
+# We use a scratch register to simulate having 64-bit immediates. When we
+# want to do something like:
+#     mov rax, [0xDEADBEEFDEADBEEF]
+# we actually do:
+#     mov r11, 0xDEADBEEFDEADBEEF
+#     mov rax, [r11]
+# 
+# NB: You can use the scratch register as a temporary register in
+# assembly.py, but great care must be taken when doing so. A call to a
+# method in LocationCodeBuilder could clobber the scratch register when
+# certain location types are passed in. In additional, if a new MC is
+# allocated, and it happens to be more than 32-bits away, the JMP to it
+# will also clobber the scratch register.
 X86_64_SCRATCH_REG = r11
 # XXX: a GPR scratch register is definitely needed, but we could probably do
 # without an xmm scratch reg.



More information about the Pypy-commit mailing list