x86_64: Fixups for x32
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71
diff --git a/src/x86/ffi64.c b/src/x86/ffi64.c
index 384a93a..65fb595 100644
--- a/src/x86/ffi64.c
+++ b/src/x86/ffi64.c
@@ -568,6 +568,7 @@ ffi_prep_closure_loc (ffi_closure* closure,
0x0f, 0x1f, 0x00
};
void (*dest)(void);
+ char *tramp = closure->tramp;
if (cif->abi != FFI_UNIX64)
return FFI_BAD_ABI;
@@ -577,8 +578,8 @@ ffi_prep_closure_loc (ffi_closure* closure,
else
dest = ffi_closure_unix64;
- memcpy (closure->tramp, trampoline, sizeof(trampoline));
- *(UINT64 *)(closure->tramp + 16) = (uintptr_t)dest;
+ memcpy (tramp, trampoline, sizeof(trampoline));
+ *(UINT64 *)(tramp + 16) = (uintptr_t)dest;
closure->cif = cif;
closure->fun = fun;
diff --git a/src/x86/unix64.S b/src/x86/unix64.S
index 134cb3d..797b9d9 100644
--- a/src/x86/unix64.S
+++ b/src/x86/unix64.S
@@ -32,7 +32,7 @@
#include <ffi.h>
#include <ffi_cfi.h>
-.text
+ .text
/* ffi_call_unix64 (void *args, unsigned long bytes, unsigned flags,
void *raddr, void (*fnaddr)(void));
@@ -272,9 +272,15 @@ ffi_closure_unix64:
movq %r8, ffi_closure_OFS_G+0x20(%rsp)
movq %r9, ffi_closure_OFS_G+0x28(%rsp)
- movq 24(%r10), %rdi /* Load cif */
- movq 32(%r10), %rsi /* Load fun */
- movq 40(%r10), %rdx /* Load user_data */
+#ifdef __ILP32__
+ movl FFI_TRAMPOLINE_SIZE(%r10), %edi /* Load cif */
+ movl FFI_TRAMPOLINE_SIZE+4(%r10), %esi /* Load fun */
+ movl FFI_TRAMPOLINE_SIZE+8(%r10), %edx /* Load user_data */
+#else
+ movq FFI_TRAMPOLINE_SIZE(%r10), %rdi /* Load cif */
+ movq FFI_TRAMPOLINE_SIZE+8(%r10), %rsi /* Load fun */
+ movq FFI_TRAMPOLINE_SIZE+16(%r10), %rdx /* Load user_data */
+#endif
.Ldo_closure:
leaq ffi_closure_OFS_RVALUE(%rsp), %rcx /* Load rvalue */
movq %rsp, %r8 /* Load reg_args */
@@ -407,9 +413,15 @@ ffi_go_closure_unix64:
movq %r8, ffi_closure_OFS_G+0x20(%rsp)
movq %r9, ffi_closure_OFS_G+0x28(%rsp)
+#ifdef __ILP32__
+ movl 4(%r10), %edi /* Load cif */
+ movl 8(%r10), %esi /* Load fun */
+ movl %r10d, %edx /* Load closure (user_data) */
+#else
movq 8(%r10), %rdi /* Load cif */
movq 16(%r10), %rsi /* Load fun */
movq %r10, %rdx /* Load closure (user_data) */
+#endif
jmp .Ldo_closure
cfi_endproc