Commit a97cf1fae575d8bfd5259c5c422025ad43911326

Alan Modra 2013-11-16T06:40:13

This patch prepares for ELFv2, where sizes of these areas change. It also makes some minor changes to improve code efficiency.

diff --git a/ChangeLog b/ChangeLog
index e7216c9..bca1211 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,5 +1,14 @@
 2013-11-16  Alan Modra  <amodra@gmail.com>
 
+	* src/powerpc/linux64.S (ffi_call_LINUX64): Tweak restore of r28.
+	(.note.GNU-stack): Move inside outer #ifdef.
+	* src/powerpc/linux64_closure.S (STACKFRAME, PARMSAVE,
+	RETVAL): Define and use throughout.
+	(ffi_closure_LINUX64): Save fprs before buying stack.
+	(.note.GNU-stack): Move inside outer #ifdef.
+
+2013-11-16  Alan Modra  <amodra@gmail.com>
+
 	* src/powerpc/ffitarget.h (FFI_TARGET_SPECIFIC_VARIADIC): Define.
 	(FFI_EXTRA_CIF_FIELDS): Define.
 	* src/powerpc/ffi.c (ffi_prep_args64): Save fprs as per the
diff --git a/src/powerpc/linux64.S b/src/powerpc/linux64.S
index f28da81..7f89934 100644
--- a/src/powerpc/linux64.S
+++ b/src/powerpc/linux64.S
@@ -130,7 +130,7 @@ ffi_call_LINUX64:
 	/* Restore the registers we used and return.  */
 	mr	%r1, %r28
 	ld	%r0, 16(%r28)
-	ld	%r28, -32(%r1)
+	ld	%r28, -32(%r28)
 	mtlr	%r0
 	ld	%r29, -24(%r1)
 	ld	%r30, -16(%r1)
@@ -197,8 +197,8 @@ ffi_call_LINUX64:
 	.uleb128 0x4
 	.align 3
 .LEFDE1:
-#endif
 
-#if defined __ELF__ && defined __linux__
+# if (defined __ELF__ && defined __linux__) || _CALL_ELF == 2
 	.section	.note.GNU-stack,"",@progbits
+# endif
 #endif
diff --git a/src/powerpc/linux64_closure.S b/src/powerpc/linux64_closure.S
index ac4a226..bc677fc 100644
--- a/src/powerpc/linux64_closure.S
+++ b/src/powerpc/linux64_closure.S
@@ -50,53 +50,57 @@ ffi_closure_LINUX64:
 	.text
 .ffi_closure_LINUX64:
 #endif
+
+#  48 bytes special reg save area + 64 bytes parm save area
+#  + 16 bytes retval area + 13*8 bytes fpr save area + round to 16
+#  define STACKFRAME 240
+#  define PARMSAVE 48
+#  define RETVAL PARMSAVE+64
+
 .LFB1:
-	# save general regs into parm save area
-	std	%r3, 48(%r1)
-	std	%r4, 56(%r1)
-	std	%r5, 64(%r1)
-	std	%r6, 72(%r1)
 	mflr	%r0
+	# Save general regs into parm save area
+	# This is the parameter save area set up by our caller.
+	std	%r3, PARMSAVE+0(%r1)
+	std	%r4, PARMSAVE+8(%r1)
+	std	%r5, PARMSAVE+16(%r1)
+	std	%r6, PARMSAVE+24(%r1)
+	std	%r7, PARMSAVE+32(%r1)
+	std	%r8, PARMSAVE+40(%r1)
+	std	%r9, PARMSAVE+48(%r1)
+	std	%r10, PARMSAVE+56(%r1)
 
-	std	%r7, 80(%r1)
-	std	%r8, 88(%r1)
-	std	%r9, 96(%r1)
-	std	%r10, 104(%r1)
 	std	%r0, 16(%r1)
 
-	# mandatory 48 bytes special reg save area + 64 bytes parm save area
-	# + 16 bytes retval area + 13*8 bytes fpr save area + round to 16
-	stdu	%r1, -240(%r1)
-.LCFI0:
+	# load up the pointer to the parm save area
+	addi	%r5, %r1, PARMSAVE
 
 	# next save fpr 1 to fpr 13
-	stfd  %f1, 128+(0*8)(%r1)
-	stfd  %f2, 128+(1*8)(%r1)
-	stfd  %f3, 128+(2*8)(%r1)
-	stfd  %f4, 128+(3*8)(%r1)
-	stfd  %f5, 128+(4*8)(%r1)
-	stfd  %f6, 128+(5*8)(%r1)
-	stfd  %f7, 128+(6*8)(%r1)
-	stfd  %f8, 128+(7*8)(%r1)
-	stfd  %f9, 128+(8*8)(%r1)
-	stfd  %f10, 128+(9*8)(%r1)
-	stfd  %f11, 128+(10*8)(%r1)
-	stfd  %f12, 128+(11*8)(%r1)
-	stfd  %f13, 128+(12*8)(%r1)
+	stfd	%f1, -104+(0*8)(%r1)
+	stfd	%f2, -104+(1*8)(%r1)
+	stfd	%f3, -104+(2*8)(%r1)
+	stfd	%f4, -104+(3*8)(%r1)
+	stfd	%f5, -104+(4*8)(%r1)
+	stfd	%f6, -104+(5*8)(%r1)
+	stfd	%f7, -104+(6*8)(%r1)
+	stfd	%f8, -104+(7*8)(%r1)
+	stfd	%f9, -104+(8*8)(%r1)
+	stfd	%f10, -104+(9*8)(%r1)
+	stfd	%f11, -104+(10*8)(%r1)
+	stfd	%f12, -104+(11*8)(%r1)
+	stfd	%f13, -104+(12*8)(%r1)
 
-	# set up registers for the routine that actually does the work
-	# get the context pointer from the trampoline
-	mr %r3, %r11
+	# load up the pointer to the saved fpr registers */
+	addi	%r6, %r1, -104
 
-	# now load up the pointer to the result storage
-	addi %r4, %r1, 112
+	# load up the pointer to the result storage
+	addi	%r4, %r1, -STACKFRAME+RETVAL
 
-	# now load up the pointer to the parameter save area
-	# in the previous frame
-	addi %r5, %r1, 240 + 48
+	stdu	%r1, -STACKFRAME(%r1)
+.LCFI0:
 
-	# now load up the pointer to the saved fpr registers */
-	addi %r6, %r1, 128
+	# get the context pointer from the trampoline
+	mr	%r3, %r11
 
 	# make the call
 #ifdef _CALL_LINUX
@@ -115,7 +119,7 @@ ffi_closure_LINUX64:
 	mflr %r4		# move address of .Lret to r4
 	sldi %r3, %r3, 4	# now multiply return type by 16
 	addi %r4, %r4, .Lret_type0 - .Lret
-	ld %r0, 240+16(%r1)
+	ld %r0, STACKFRAME+16(%r1)
 	add %r3, %r3, %r4	# add contents of table to table address
 	mtctr %r3
 	bctr			# jump to it
@@ -128,107 +132,107 @@ ffi_closure_LINUX64:
 .Lret_type0:
 # case FFI_TYPE_VOID
 	mtlr %r0
-	addi %r1, %r1, 240
+	addi %r1, %r1, STACKFRAME
 	blr
 	nop
 # case FFI_TYPE_INT
-#ifdef __LITTLE_ENDIAN__
-	lwa %r3, 112+0(%r1)
-#else
-	lwa %r3, 112+4(%r1)
-#endif
+# ifdef __LITTLE_ENDIAN__
+	lwa %r3, RETVAL+0(%r1)
+# else
+	lwa %r3, RETVAL+4(%r1)
+# endif
 	mtlr %r0
-	addi %r1, %r1, 240
+	addi %r1, %r1, STACKFRAME
 	blr
 # case FFI_TYPE_FLOAT
-	lfs %f1, 112+0(%r1)
+	lfs %f1, RETVAL+0(%r1)
 	mtlr %r0
-	addi %r1, %r1, 240
+	addi %r1, %r1, STACKFRAME
 	blr
 # case FFI_TYPE_DOUBLE
-	lfd %f1, 112+0(%r1)
+	lfd %f1, RETVAL+0(%r1)
 	mtlr %r0
-	addi %r1, %r1, 240
+	addi %r1, %r1, STACKFRAME
 	blr
 # case FFI_TYPE_LONGDOUBLE
-	lfd %f1, 112+0(%r1)
+	lfd %f1, RETVAL+0(%r1)
 	mtlr %r0
-	lfd %f2, 112+8(%r1)
+	lfd %f2, RETVAL+8(%r1)
 	b .Lfinish
 # case FFI_TYPE_UINT8
-#ifdef __LITTLE_ENDIAN__
-	lbz %r3, 112+0(%r1)
-#else
-	lbz %r3, 112+7(%r1)
-#endif
+# ifdef __LITTLE_ENDIAN__
+	lbz %r3, RETVAL+0(%r1)
+# else
+	lbz %r3, RETVAL+7(%r1)
+# endif
 	mtlr %r0
-	addi %r1, %r1, 240
+	addi %r1, %r1, STACKFRAME
 	blr
 # case FFI_TYPE_SINT8
-#ifdef __LITTLE_ENDIAN__
-	lbz %r3, 112+0(%r1)
-#else
-	lbz %r3, 112+7(%r1)
-#endif
+# ifdef __LITTLE_ENDIAN__
+	lbz %r3, RETVAL+0(%r1)
+# else
+	lbz %r3, RETVAL+7(%r1)
+# endif
 	extsb %r3,%r3
 	mtlr %r0
 	b .Lfinish
 # case FFI_TYPE_UINT16
-#ifdef __LITTLE_ENDIAN__
-	lhz %r3, 112+0(%r1)
-#else
-	lhz %r3, 112+6(%r1)
-#endif
+# ifdef __LITTLE_ENDIAN__
+	lhz %r3, RETVAL+0(%r1)
+# else
+	lhz %r3, RETVAL+6(%r1)
+# endif
 	mtlr %r0
 .Lfinish:
-	addi %r1, %r1, 240
+	addi %r1, %r1, STACKFRAME
 	blr
 # case FFI_TYPE_SINT16
-#ifdef __LITTLE_ENDIAN__
-	lha %r3, 112+0(%r1)
-#else
-	lha %r3, 112+6(%r1)
-#endif
+# ifdef __LITTLE_ENDIAN__
+	lha %r3, RETVAL+0(%r1)
+# else
+	lha %r3, RETVAL+6(%r1)
+# endif
 	mtlr %r0
-	addi %r1, %r1, 240
+	addi %r1, %r1, STACKFRAME
 	blr
 # case FFI_TYPE_UINT32
-#ifdef __LITTLE_ENDIAN__
-	lwz %r3, 112+0(%r1)
-#else
-	lwz %r3, 112+4(%r1)
-#endif
+# ifdef __LITTLE_ENDIAN__
+	lwz %r3, RETVAL+0(%r1)
+# else
+	lwz %r3, RETVAL+4(%r1)
+# endif
 	mtlr %r0
-	addi %r1, %r1, 240
+	addi %r1, %r1, STACKFRAME
 	blr
 # case FFI_TYPE_SINT32
-#ifdef __LITTLE_ENDIAN__
-	lwa %r3, 112+0(%r1)
-#else
-	lwa %r3, 112+4(%r1)
-#endif
+# ifdef __LITTLE_ENDIAN__
+	lwa %r3, RETVAL+0(%r1)
+# else
+	lwa %r3, RETVAL+4(%r1)
+# endif
 	mtlr %r0
-	addi %r1, %r1, 240
+	addi %r1, %r1, STACKFRAME
 	blr
 # case FFI_TYPE_UINT64
-	ld %r3, 112+0(%r1)
+	ld %r3, RETVAL+0(%r1)
 	mtlr %r0
-	addi %r1, %r1, 240
+	addi %r1, %r1, STACKFRAME
 	blr
 # case FFI_TYPE_SINT64
-	ld %r3, 112+0(%r1)
+	ld %r3, RETVAL+0(%r1)
 	mtlr %r0
-	addi %r1, %r1, 240
+	addi %r1, %r1, STACKFRAME
 	blr
 # case FFI_TYPE_STRUCT
 	mtlr %r0
-	addi %r1, %r1, 240
+	addi %r1, %r1, STACKFRAME
 	blr
 	nop
 # case FFI_TYPE_POINTER
-	ld %r3, 112+0(%r1)
+	ld %r3, RETVAL+0(%r1)
 	mtlr %r0
-	addi %r1, %r1, 240
+	addi %r1, %r1, STACKFRAME
 	blr
 # esac
 .LFE1:
@@ -267,14 +271,14 @@ ffi_closure_LINUX64:
 	.byte	0x2	 # DW_CFA_advance_loc1
 	.byte	.LCFI0-.LFB1
 	.byte	0xe	 # DW_CFA_def_cfa_offset
-	.uleb128 240
+	.uleb128 STACKFRAME
 	.byte	0x11	 # DW_CFA_offset_extended_sf
 	.uleb128 0x41
 	.sleb128 -2
 	.align 3
 .LEFDE1:
-#endif
 
-#if defined __ELF__ && defined __linux__
+# if defined __ELF__ && defined __linux__
 	.section	.note.GNU-stack,"",@progbits
+# endif
 #endif