Commit 4800832b1d4f2a87cfb1b7385995b066c4d8ee9b

Con Kolivas 2011-07-06T08:31:06

Import updated asm for cpu mining based on ufasoft assembly.

diff --git a/x86_64/sha256_xmm_amd64.asm b/x86_64/sha256_xmm_amd64.asm
index 4fa0ea9..b5d5001 100644
--- a/x86_64/sha256_xmm_amd64.asm
+++ b/x86_64/sha256_xmm_amd64.asm
@@ -4,6 +4,10 @@
 ; Version 2011
 ; This software is Public Domain
 
+; Significant re-write/optimisation and reordering by,
+; Neil Kettle <mu-b@digit-labs.org>
+; ~18% performance improvement
+
 ; SHA-256 CPU SSE cruncher for Bitcoin Miner
 
 ALIGN 32
@@ -13,96 +17,133 @@ BITS 64
 %define data rsi
 %define init rdx
 
+; 0 = (1024 - 256) (mod (LAB_CALC_UNROLL*LAB_CALC_PARA*16))
+%define LAB_CALC_PARA	2
+%define LAB_CALC_UNROLL	8
+
+%define LAB_LOOP_UNROLL 8
+
 extern g_4sha256_k
 
-global CalcSha256_x64	
+global CalcSha256_x64
 ;	CalcSha256	hash(rdi), data(rsi), init(rdx)
-CalcSha256_x64:	
+CalcSha256_x64:
 
 	push	rbx
 
 LAB_NEXT_NONCE:
-	mov	r11, data
-;	mov	rax, pnonce
-;	mov	eax, [rax]
-;	mov	[rbx+3*16], eax
-;	inc	eax
-;	mov	[rbx+3*16+4], eax
-;	inc	eax
-;	mov	[rbx+3*16+8], eax
-;	inc	eax
-;	mov	[rbx+3*16+12], eax
-
-	mov	rcx, 64*4 ;rcx is # of SHA-2 rounds
-	mov	rax, 16*4 ;rax is where we expand to
+
+	mov	rcx, 64*4					; 256 - rcx is # of SHA-2 rounds
+	mov	rax, 16*4					; 64 - rax is where we expand to
 
 LAB_SHA:
 	push	rcx
-	lea	rcx, qword [r11+rcx*4]
-	lea	r11, qword [r11+rax*4]
-LAB_CALC:
-	movdqa	xmm0, [r11-15*16]
-	movdqa	xmm2, xmm0					; (Rotr32(w_15, 7) ^ Rotr32(w_15, 18) ^ (w_15 >> 3))
-	psrld	xmm0, 3
-	movdqa	xmm1, xmm0
-	pslld	xmm2, 14
-	psrld	xmm1, 4
-	pxor	xmm0, xmm1
-	pxor	xmm0, xmm2
-	pslld	xmm2, 11
-	psrld	xmm1, 11
-	pxor	xmm0, xmm1
-	pxor	xmm0, xmm2
+	lea	rcx, qword [data+rcx*4]				; + 1024
+	lea	r11, qword [data+rax*4]				; + 256
 
-	paddd	xmm0, [r11-16*16]
-
-	movdqa	xmm3, [r11-2*16]
-	movdqa	xmm2, xmm3					; (Rotr32(w_2, 17) ^ Rotr32(w_2, 19) ^ (w_2 >> 10))
-	psrld	xmm3, 10
-	movdqa	xmm1, xmm3
-	pslld	xmm2, 13
-	psrld	xmm1, 7
-	pxor	xmm3, xmm1
-	pxor	xmm3, xmm2
-	pslld	xmm2, 2
-	psrld	xmm1, 2
-	pxor	xmm3, xmm1
-	pxor	xmm3, xmm2
-	paddd	xmm0, xmm3
-	
-	paddd	xmm0, [r11-7*16]
-	movdqa	[r11], xmm0
-	add	r11, 16
+LAB_CALC:
+%macro	lab_calc_blk 1
+	movdqa	xmm0, [r11-(15-%1)*16]				; xmm0 = W[I-15]
+	movdqa	xmm4, [r11-(15-(%1+1))*16]			; xmm4 = W[I-15+1]
+	movdqa	xmm2, xmm0					; xmm2 = W[I-15]
+	movdqa	xmm6, xmm4					; xmm6 = W[I-15+1]
+	psrld	xmm0, 3						; xmm0 = W[I-15] >> 3
+	psrld	xmm4, 3						; xmm4 = W[I-15+1] >> 3
+	movdqa	xmm1, xmm0					; xmm1 = W[I-15] >> 3
+	movdqa	xmm5, xmm4					; xmm5 = W[I-15+1] >> 3
+	pslld	xmm2, 14					; xmm2 = W[I-15] << 14
+	pslld	xmm6, 14					; xmm6 = W[I-15+1] << 14
+	psrld	xmm1, 4						; xmm1 = W[I-15] >> 7
+	psrld	xmm5, 4						; xmm5 = W[I-15+1] >> 7
+	pxor	xmm0, xmm1					; xmm0 = (W[I-15] >> 3) ^ (W[I-15] >> 7)
+	pxor	xmm4, xmm5					; xmm4 = (W[I-15+1] >> 3) ^ (W[I-15+1] >> 7)
+	psrld	xmm1, 11					; xmm1 = W[I-15] >> 18
+	psrld	xmm5, 11					; xmm5 = W[I-15+1] >> 18
+	pxor	xmm0, xmm2					; xmm0 = (W[I-15] >> 3) ^ (W[I-15] >> 7) ^ (W[I-15] << 14)
+	pxor	xmm4, xmm6					; xmm4 = (W[I-15+1] >> 3) ^ (W[I-15+1] >> 7) ^ (W[I-15+1] << 14)
+	pslld	xmm2, 11					; xmm2 = W[I-15] << 25
+	pslld	xmm6, 11					; xmm6 = W[I-15+1] << 25
+	pxor	xmm0, xmm1					; xmm0 = (W[I-15] >> 3) ^ (W[I-15] >> 7) ^ (W[I-15] << 14) ^ (W[I-15] >> 18)
+	pxor	xmm4, xmm5					; xmm4 = (W[I-15+1] >> 3) ^ (W[I-15+1] >> 7) ^ (W[I-15+1] << 14) ^ (W[I-15+1] >> 18)
+	pxor	xmm0, xmm2					; xmm0 = (W[I-15] >> 3) ^ (W[I-15] >> 7) ^ (W[I-15] << 14) ^ (W[I-15] >> 18) ^ (W[I-15] << 25)
+	pxor	xmm4, xmm6					; xmm4 = (W[I-15+1] >> 3) ^ (W[I-15+1] >> 7) ^ (W[I-15+1] << 14) ^ (W[I-15+1] >> 18) ^ (W[I-15+1] << 25)
+
+	movdqa	xmm3, [r11-(2-%1)*16]				; xmm3 = W[I-2]
+	movdqa	xmm7, [r11-(2-(%1+1))*16]			; xmm7 = W[I-2+1]
+
+	paddd	xmm0, [r11-(16-%1)*16]				; xmm0 = s0(W[I-15]) + W[I-16]
+	paddd	xmm4, [r11-(16-(%1+1))*16]			; xmm4 = s0(W[I-15+1]) + W[I-16+1]
+
+;;;;;;;;;;;;;;;;;;
+
+	movdqa	xmm2, xmm3					; xmm2 = W[I-2]
+	movdqa	xmm6, xmm7					; xmm6 = W[I-2+1]
+	psrld	xmm3, 10					; xmm3 = W[I-2] >> 10
+	psrld	xmm7, 10					; xmm7 = W[I-2+1] >> 10
+	movdqa	xmm1, xmm3					; xmm1 = W[I-2] >> 10
+	movdqa	xmm5, xmm7					; xmm5 = W[I-2+1] >> 10
+
+	paddd	xmm0, [r11-(7-%1)*16]				; xmm0 = s0(W[I-15]) + W[I-16] + W[I-7]
+
+	pslld	xmm2, 13					; xmm2 = W[I-2] << 13
+	pslld	xmm6, 13					; xmm6 = W[I-2+1] << 13
+	psrld	xmm1, 7						; xmm1 = W[I-2] >> 17
+	psrld	xmm5, 7						; xmm5 = W[I-2+1] >> 17
+
+	paddd	xmm4, [r11-(7-(%1+1))*16]			; xmm4 = s0(W[I-15+1]) + W[I-16+1] + W[I-7+1]
+
+	pxor	xmm3, xmm1					; xmm3 = (W[I-2] >> 10) ^ (W[I-2] >> 17)
+	pxor	xmm7, xmm5					; xmm7 = (W[I-2+1] >> 10) ^ (W[I-2+1] >> 17)
+	psrld	xmm1, 2						; xmm1 = W[I-2] >> 19
+	psrld	xmm5, 2						; xmm5 = W[I-2+1] >> 19
+	pxor	xmm3, xmm2					; xmm3 = (W[I-2] >> 10) ^ (W[I-2] >> 17) ^ (W[I-2] << 13)
+	pxor	xmm7, xmm6					; xmm7 = (W[I-2+1] >> 10) ^ (W[I-2+1] >> 17) ^ (W[I-2+1] << 13)
+	pslld	xmm2, 2						; xmm2 = W[I-2] << 15
+	pslld	xmm6, 2						; xmm6 = W[I-2+1] << 15
+	pxor	xmm3, xmm1					; xmm3 = (W[I-2] >> 10) ^ (W[I-2] >> 17) ^ (W[I-2] << 13) ^ (W[I-2] >> 19)
+	pxor	xmm7, xmm5					; xmm7 = (W[I-2+1] >> 10) ^ (W[I-2+1] >> 17) ^ (W[I-2+1] << 13) ^ (W[I-2+1] >> 19)
+	pxor	xmm3, xmm2					; xmm3 = (W[I-2] >> 10) ^ (W[I-2] >> 17) ^ (W[I-2] << 13) ^ (W[I-2] >> 19) ^ (W[I-2] << 15)
+	pxor	xmm7, xmm6					; xmm7 = (W[I-2+1] >> 10) ^ (W[I-2+1] >> 17) ^ (W[I-2+1] << 13) ^ (W[I-2+1] >> 19) ^ (W[I-2+1] << 15)
+
+	paddd	xmm0, xmm3					; xmm0 = s0(W[I-15]) + W[I-16] + s1(W[I-2]) + W[I-7]
+	paddd	xmm4, xmm7					; xmm4 = s0(W[I-15+1]) + W[I-16+1] + s1(W[I-2+1]) + W[I-7+1]
+	movdqa	[r11+(%1*16)], xmm0
+	movdqa	[r11+((%1+1)*16)], xmm4
+%endmacro
+
+%assign i 0
+%rep    LAB_CALC_UNROLL
+        lab_calc_blk i
+%assign i i+LAB_CALC_PARA
+%endrep
+
+	add	r11, LAB_CALC_UNROLL*LAB_CALC_PARA*16
 	cmp	r11, rcx
 	jb	LAB_CALC
-	pop	rcx
 
-	mov rax, 0
+	pop	rcx
+	mov	rax, 0
 
 ; Load the init values of the message into the hash.
 
-	movd	xmm0, dword [rdx+4*4]		; xmm0 == e
-	pshufd  xmm0, xmm0, 0
-	movd	xmm3, dword [rdx+3*4]		; xmm3 == d
-	pshufd  xmm3, xmm3, 0
-	movd	xmm4, dword [rdx+2*4]		; xmm4 == c
-	pshufd  xmm4, xmm4, 0
-	movd	xmm5, dword [rdx+1*4]		; xmm5 == b
-	pshufd  xmm5, xmm5, 0
-	movd	xmm7, dword [rdx+0*4]		; xmm7 == a
-	pshufd  xmm7, xmm7, 0
-	movd	xmm8, dword [rdx+5*4]		; xmm8 == f
-	pshufd  xmm8, xmm8, 0
-	movd	xmm9, dword [rdx+6*4]		; xmm9 == g
-	pshufd  xmm9, xmm9, 0
-	movd	xmm10, dword [rdx+7*4]		; xmm10 == h
-	pshufd  xmm10, xmm10, 0
+	movdqa	xmm7, [init]
+	pshufd	xmm5, xmm7, 0x55		; xmm5 == b
+	pshufd	xmm4, xmm7, 0xAA		; xmm4 == c
+	pshufd	xmm3, xmm7, 0xFF		; xmm3 == d
+	pshufd	xmm7, xmm7, 0			; xmm7 == a
+
+	movdqa	xmm0, [init+4*4]
+	pshufd	xmm8, xmm0, 0x55		; xmm8 == f
+	pshufd	xmm9, xmm0, 0xAA		; xmm9 == g
+	pshufd	xmm10, xmm0, 0xFF		; xmm10 == h
+	pshufd	xmm0, xmm0, 0			; xmm0 == e
 
 LAB_LOOP:
 
 ;; T t1 = h + (Rotr32(e, 6) ^ Rotr32(e, 11) ^ Rotr32(e, 25)) + ((e & f) ^ AndNot(e, g)) + Expand32<T>(g_sha256_k[j]) + w[j]
 
-	movdqa	xmm6, [rsi+rax*4]
+%macro	lab_loop_blk 0
+	movdqa	xmm6, [data+rax*4]
 	paddd	xmm6, g_4sha256_k[rax*4]
 	add	rax, 4
 
@@ -151,10 +192,10 @@ LAB_LOOP:
 	movdqa	xmm5, xmm7	; b = a
 	pxor	xmm1, xmm2	; (a & c) ^ (a & d) ^ (c & d)
 	paddd	xmm6, xmm1	; t1 + ((a & c) ^ (a & d) ^ (c & d))
-		
+
 	movdqa	xmm2, xmm7
 	psrld	xmm7, 2
-	movdqa	xmm1, xmm7	
+	movdqa	xmm1, xmm7
 	pslld	xmm2, 10
 	psrld	xmm1, 11
 	pxor	xmm7, xmm2
@@ -165,54 +206,50 @@ LAB_LOOP:
 	pxor	xmm7, xmm1
 	pslld	xmm2, 11
 	pxor	xmm7, xmm2
-	paddd	xmm7, xmm6	; a = t1 + (Rotr32(a, 2) ^ Rotr32(a, 13) ^ Rotr32(a, 22)) + ((a & c) ^ (a & d) ^ (c & d));	
+	paddd	xmm7, xmm6	; a = t1 + (Rotr32(a, 2) ^ Rotr32(a, 13) ^ Rotr32(a, 22)) + ((a & c) ^ (a & d) ^ (c & d));
+%endmacro
+
+%assign i 0
+%rep    LAB_LOOP_UNROLL
+        lab_loop_blk
+%assign i i+1
+%endrep
 
 	cmp	rax, rcx
 	jb	LAB_LOOP
 
 ; Finished the 64 rounds, calculate hash and save
 
-	movd	xmm1, dword [rdx+0*4]
-	pshufd  xmm1, xmm1, 0
-	paddd	xmm7, xmm1
+	movdqa	xmm1, [rdx]
+	pshufd	xmm2, xmm1, 0x55
+	pshufd	xmm6, xmm1, 0xAA
+	pshufd	xmm11, xmm1, 0xFF
+	pshufd	xmm1, xmm1, 0
 
-	movd	xmm1, dword [rdx+1*4]
-	pshufd  xmm1, xmm1, 0
-	paddd	xmm5, xmm1
-
-	movd	xmm1, dword [rdx+2*4]
-	pshufd  xmm1, xmm1, 0
-	paddd	xmm4, xmm1
+	paddd	xmm5, xmm2
+	paddd	xmm4, xmm6
+	paddd	xmm3, xmm11
+	paddd	xmm7, xmm1
 
-	movd	xmm1, dword [rdx+3*4]
-	pshufd  xmm1, xmm1, 0
-	paddd	xmm3, xmm1
+	movdqa	xmm1, [rdx+4*4]
+	pshufd	xmm2, xmm1, 0x55
+	pshufd	xmm6, xmm1, 0xAA
+	pshufd	xmm11, xmm1, 0xFF
+	pshufd	xmm1, xmm1, 0
 
-	movd	xmm1, dword [rdx+4*4]
-	pshufd  xmm1, xmm1, 0
+	paddd	xmm8, xmm2
+	paddd	xmm9, xmm6
+	paddd	xmm10, xmm11
 	paddd	xmm0, xmm1
 
-	movd	xmm1, dword [rdx+5*4]
-	pshufd  xmm1, xmm1, 0
-	paddd	xmm8, xmm1
-
-	movd	xmm1, dword [rdx+6*4]
-	pshufd  xmm1, xmm1, 0
-	paddd	xmm9, xmm1
-
-	movd	xmm1, dword [rdx+7*4]
-	pshufd  xmm1, xmm1, 0
-	paddd	xmm10, xmm1
-
-debug_me:
-	movdqa	[rdi+0*16], xmm7	
-	movdqa	[rdi+1*16], xmm5	
-	movdqa	[rdi+2*16], xmm4
-	movdqa	[rdi+3*16], xmm3
-	movdqa	[rdi+4*16], xmm0
-	movdqa	[rdi+5*16], xmm8
-	movdqa	[rdi+6*16], xmm9	
-	movdqa	[rdi+7*16], xmm10
+	movdqa	[hash+0*16], xmm7
+	movdqa	[hash+1*16], xmm5
+	movdqa	[hash+2*16], xmm4
+	movdqa	[hash+3*16], xmm3
+	movdqa	[hash+4*16], xmm0
+	movdqa	[hash+5*16], xmm8
+	movdqa	[hash+6*16], xmm9
+	movdqa	[hash+7*16], xmm10
 
 LAB_RET:
 	pop	rbx