Import updated asm for cpu mining based on ufasoft assembly.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318
diff --git a/x86_64/sha256_xmm_amd64.asm b/x86_64/sha256_xmm_amd64.asm
index 4fa0ea9..b5d5001 100644
--- a/x86_64/sha256_xmm_amd64.asm
+++ b/x86_64/sha256_xmm_amd64.asm
@@ -4,6 +4,10 @@
; Version 2011
; This software is Public Domain
+; Significant re-write/optimisation and reordering by,
+; Neil Kettle <mu-b@digit-labs.org>
+; ~18% performance improvement
+
; SHA-256 CPU SSE cruncher for Bitcoin Miner
ALIGN 32
@@ -13,96 +17,133 @@ BITS 64
%define data rsi
%define init rdx
+; 0 = (1024 - 256) (mod (LAB_CALC_UNROLL*LAB_CALC_PARA*16))
+%define LAB_CALC_PARA 2
+%define LAB_CALC_UNROLL 8
+
+%define LAB_LOOP_UNROLL 8
+
extern g_4sha256_k
-global CalcSha256_x64
+global CalcSha256_x64
; CalcSha256 hash(rdi), data(rsi), init(rdx)
-CalcSha256_x64:
+CalcSha256_x64:
push rbx
LAB_NEXT_NONCE:
- mov r11, data
-; mov rax, pnonce
-; mov eax, [rax]
-; mov [rbx+3*16], eax
-; inc eax
-; mov [rbx+3*16+4], eax
-; inc eax
-; mov [rbx+3*16+8], eax
-; inc eax
-; mov [rbx+3*16+12], eax
-
- mov rcx, 64*4 ;rcx is # of SHA-2 rounds
- mov rax, 16*4 ;rax is where we expand to
+
+ mov rcx, 64*4 ; 256 - rcx is # of SHA-2 rounds
+ mov rax, 16*4 ; 64 - rax is where we expand to
LAB_SHA:
push rcx
- lea rcx, qword [r11+rcx*4]
- lea r11, qword [r11+rax*4]
-LAB_CALC:
- movdqa xmm0, [r11-15*16]
- movdqa xmm2, xmm0 ; (Rotr32(w_15, 7) ^ Rotr32(w_15, 18) ^ (w_15 >> 3))
- psrld xmm0, 3
- movdqa xmm1, xmm0
- pslld xmm2, 14
- psrld xmm1, 4
- pxor xmm0, xmm1
- pxor xmm0, xmm2
- pslld xmm2, 11
- psrld xmm1, 11
- pxor xmm0, xmm1
- pxor xmm0, xmm2
+ lea rcx, qword [data+rcx*4] ; + 1024
+ lea r11, qword [data+rax*4] ; + 256
- paddd xmm0, [r11-16*16]
-
- movdqa xmm3, [r11-2*16]
- movdqa xmm2, xmm3 ; (Rotr32(w_2, 17) ^ Rotr32(w_2, 19) ^ (w_2 >> 10))
- psrld xmm3, 10
- movdqa xmm1, xmm3
- pslld xmm2, 13
- psrld xmm1, 7
- pxor xmm3, xmm1
- pxor xmm3, xmm2
- pslld xmm2, 2
- psrld xmm1, 2
- pxor xmm3, xmm1
- pxor xmm3, xmm2
- paddd xmm0, xmm3
-
- paddd xmm0, [r11-7*16]
- movdqa [r11], xmm0
- add r11, 16
+LAB_CALC:
+%macro lab_calc_blk 1
+ movdqa xmm0, [r11-(15-%1)*16] ; xmm0 = W[I-15]
+ movdqa xmm4, [r11-(15-(%1+1))*16] ; xmm4 = W[I-15+1]
+ movdqa xmm2, xmm0 ; xmm2 = W[I-15]
+ movdqa xmm6, xmm4 ; xmm6 = W[I-15+1]
+ psrld xmm0, 3 ; xmm0 = W[I-15] >> 3
+ psrld xmm4, 3 ; xmm4 = W[I-15+1] >> 3
+ movdqa xmm1, xmm0 ; xmm1 = W[I-15] >> 3
+ movdqa xmm5, xmm4 ; xmm5 = W[I-15+1] >> 3
+ pslld xmm2, 14 ; xmm2 = W[I-15] << 14
+ pslld xmm6, 14 ; xmm6 = W[I-15+1] << 14
+ psrld xmm1, 4 ; xmm1 = W[I-15] >> 7
+ psrld xmm5, 4 ; xmm5 = W[I-15+1] >> 7
+ pxor xmm0, xmm1 ; xmm0 = (W[I-15] >> 3) ^ (W[I-15] >> 7)
+ pxor xmm4, xmm5 ; xmm4 = (W[I-15+1] >> 3) ^ (W[I-15+1] >> 7)
+ psrld xmm1, 11 ; xmm1 = W[I-15] >> 18
+ psrld xmm5, 11 ; xmm5 = W[I-15+1] >> 18
+ pxor xmm0, xmm2 ; xmm0 = (W[I-15] >> 3) ^ (W[I-15] >> 7) ^ (W[I-15] << 14)
+ pxor xmm4, xmm6 ; xmm4 = (W[I-15+1] >> 3) ^ (W[I-15+1] >> 7) ^ (W[I-15+1] << 14)
+ pslld xmm2, 11 ; xmm2 = W[I-15] << 25
+ pslld xmm6, 11 ; xmm6 = W[I-15+1] << 25
+ pxor xmm0, xmm1 ; xmm0 = (W[I-15] >> 3) ^ (W[I-15] >> 7) ^ (W[I-15] << 14) ^ (W[I-15] >> 18)
+ pxor xmm4, xmm5 ; xmm4 = (W[I-15+1] >> 3) ^ (W[I-15+1] >> 7) ^ (W[I-15+1] << 14) ^ (W[I-15+1] >> 18)
+ pxor xmm0, xmm2 ; xmm0 = (W[I-15] >> 3) ^ (W[I-15] >> 7) ^ (W[I-15] << 14) ^ (W[I-15] >> 18) ^ (W[I-15] << 25)
+ pxor xmm4, xmm6 ; xmm4 = (W[I-15+1] >> 3) ^ (W[I-15+1] >> 7) ^ (W[I-15+1] << 14) ^ (W[I-15+1] >> 18) ^ (W[I-15+1] << 25)
+
+ movdqa xmm3, [r11-(2-%1)*16] ; xmm3 = W[I-2]
+ movdqa xmm7, [r11-(2-(%1+1))*16] ; xmm7 = W[I-2+1]
+
+ paddd xmm0, [r11-(16-%1)*16] ; xmm0 = s0(W[I-15]) + W[I-16]
+ paddd xmm4, [r11-(16-(%1+1))*16] ; xmm4 = s0(W[I-15+1]) + W[I-16+1]
+
+;;;;;;;;;;;;;;;;;;
+
+ movdqa xmm2, xmm3 ; xmm2 = W[I-2]
+ movdqa xmm6, xmm7 ; xmm6 = W[I-2+1]
+ psrld xmm3, 10 ; xmm3 = W[I-2] >> 10
+ psrld xmm7, 10 ; xmm7 = W[I-2+1] >> 10
+ movdqa xmm1, xmm3 ; xmm1 = W[I-2] >> 10
+ movdqa xmm5, xmm7 ; xmm5 = W[I-2+1] >> 10
+
+ paddd xmm0, [r11-(7-%1)*16] ; xmm0 = s0(W[I-15]) + W[I-16] + W[I-7]
+
+ pslld xmm2, 13 ; xmm2 = W[I-2] << 13
+ pslld xmm6, 13 ; xmm6 = W[I-2+1] << 13
+ psrld xmm1, 7 ; xmm1 = W[I-2] >> 17
+ psrld xmm5, 7 ; xmm5 = W[I-2+1] >> 17
+
+ paddd xmm4, [r11-(7-(%1+1))*16] ; xmm4 = s0(W[I-15+1]) + W[I-16+1] + W[I-7+1]
+
+ pxor xmm3, xmm1 ; xmm3 = (W[I-2] >> 10) ^ (W[I-2] >> 17)
+ pxor xmm7, xmm5 ; xmm7 = (W[I-2+1] >> 10) ^ (W[I-2+1] >> 17)
+ psrld xmm1, 2 ; xmm1 = W[I-2] >> 19
+ psrld xmm5, 2 ; xmm5 = W[I-2+1] >> 19
+ pxor xmm3, xmm2 ; xmm3 = (W[I-2] >> 10) ^ (W[I-2] >> 17) ^ (W[I-2] << 13)
+ pxor xmm7, xmm6 ; xmm7 = (W[I-2+1] >> 10) ^ (W[I-2+1] >> 17) ^ (W[I-2+1] << 13)
+ pslld xmm2, 2 ; xmm2 = W[I-2] << 15
+ pslld xmm6, 2 ; xmm6 = W[I-2+1] << 15
+ pxor xmm3, xmm1 ; xmm3 = (W[I-2] >> 10) ^ (W[I-2] >> 17) ^ (W[I-2] << 13) ^ (W[I-2] >> 19)
+ pxor xmm7, xmm5 ; xmm7 = (W[I-2+1] >> 10) ^ (W[I-2+1] >> 17) ^ (W[I-2+1] << 13) ^ (W[I-2+1] >> 19)
+ pxor xmm3, xmm2 ; xmm3 = (W[I-2] >> 10) ^ (W[I-2] >> 17) ^ (W[I-2] << 13) ^ (W[I-2] >> 19) ^ (W[I-2] << 15)
+ pxor xmm7, xmm6 ; xmm7 = (W[I-2+1] >> 10) ^ (W[I-2+1] >> 17) ^ (W[I-2+1] << 13) ^ (W[I-2+1] >> 19) ^ (W[I-2+1] << 15)
+
+ paddd xmm0, xmm3 ; xmm0 = s0(W[I-15]) + W[I-16] + s1(W[I-2]) + W[I-7]
+ paddd xmm4, xmm7 ; xmm4 = s0(W[I-15+1]) + W[I-16+1] + s1(W[I-2+1]) + W[I-7+1]
+ movdqa [r11+(%1*16)], xmm0
+ movdqa [r11+((%1+1)*16)], xmm4
+%endmacro
+
+%assign i 0
+%rep LAB_CALC_UNROLL
+ lab_calc_blk i
+%assign i i+LAB_CALC_PARA
+%endrep
+
+ add r11, LAB_CALC_UNROLL*LAB_CALC_PARA*16
cmp r11, rcx
jb LAB_CALC
- pop rcx
- mov rax, 0
+ pop rcx
+ mov rax, 0
; Load the init values of the message into the hash.
- movd xmm0, dword [rdx+4*4] ; xmm0 == e
- pshufd xmm0, xmm0, 0
- movd xmm3, dword [rdx+3*4] ; xmm3 == d
- pshufd xmm3, xmm3, 0
- movd xmm4, dword [rdx+2*4] ; xmm4 == c
- pshufd xmm4, xmm4, 0
- movd xmm5, dword [rdx+1*4] ; xmm5 == b
- pshufd xmm5, xmm5, 0
- movd xmm7, dword [rdx+0*4] ; xmm7 == a
- pshufd xmm7, xmm7, 0
- movd xmm8, dword [rdx+5*4] ; xmm8 == f
- pshufd xmm8, xmm8, 0
- movd xmm9, dword [rdx+6*4] ; xmm9 == g
- pshufd xmm9, xmm9, 0
- movd xmm10, dword [rdx+7*4] ; xmm10 == h
- pshufd xmm10, xmm10, 0
+ movdqa xmm7, [init]
+ pshufd xmm5, xmm7, 0x55 ; xmm5 == b
+ pshufd xmm4, xmm7, 0xAA ; xmm4 == c
+ pshufd xmm3, xmm7, 0xFF ; xmm3 == d
+ pshufd xmm7, xmm7, 0 ; xmm7 == a
+
+ movdqa xmm0, [init+4*4]
+ pshufd xmm8, xmm0, 0x55 ; xmm8 == f
+ pshufd xmm9, xmm0, 0xAA ; xmm9 == g
+ pshufd xmm10, xmm0, 0xFF ; xmm10 == h
+ pshufd xmm0, xmm0, 0 ; xmm0 == e
LAB_LOOP:
;; T t1 = h + (Rotr32(e, 6) ^ Rotr32(e, 11) ^ Rotr32(e, 25)) + ((e & f) ^ AndNot(e, g)) + Expand32<T>(g_sha256_k[j]) + w[j]
- movdqa xmm6, [rsi+rax*4]
+%macro lab_loop_blk 0
+ movdqa xmm6, [data+rax*4]
paddd xmm6, g_4sha256_k[rax*4]
add rax, 4
@@ -151,10 +192,10 @@ LAB_LOOP:
movdqa xmm5, xmm7 ; b = a
pxor xmm1, xmm2 ; (a & c) ^ (a & d) ^ (c & d)
paddd xmm6, xmm1 ; t1 + ((a & c) ^ (a & d) ^ (c & d))
-
+
movdqa xmm2, xmm7
psrld xmm7, 2
- movdqa xmm1, xmm7
+ movdqa xmm1, xmm7
pslld xmm2, 10
psrld xmm1, 11
pxor xmm7, xmm2
@@ -165,54 +206,50 @@ LAB_LOOP:
pxor xmm7, xmm1
pslld xmm2, 11
pxor xmm7, xmm2
- paddd xmm7, xmm6 ; a = t1 + (Rotr32(a, 2) ^ Rotr32(a, 13) ^ Rotr32(a, 22)) + ((a & c) ^ (a & d) ^ (c & d));
+ paddd xmm7, xmm6 ; a = t1 + (Rotr32(a, 2) ^ Rotr32(a, 13) ^ Rotr32(a, 22)) + ((a & c) ^ (a & d) ^ (c & d));
+%endmacro
+
+%assign i 0
+%rep LAB_LOOP_UNROLL
+ lab_loop_blk
+%assign i i+1
+%endrep
cmp rax, rcx
jb LAB_LOOP
; Finished the 64 rounds, calculate hash and save
- movd xmm1, dword [rdx+0*4]
- pshufd xmm1, xmm1, 0
- paddd xmm7, xmm1
+ movdqa xmm1, [rdx]
+ pshufd xmm2, xmm1, 0x55
+ pshufd xmm6, xmm1, 0xAA
+ pshufd xmm11, xmm1, 0xFF
+ pshufd xmm1, xmm1, 0
- movd xmm1, dword [rdx+1*4]
- pshufd xmm1, xmm1, 0
- paddd xmm5, xmm1
-
- movd xmm1, dword [rdx+2*4]
- pshufd xmm1, xmm1, 0
- paddd xmm4, xmm1
+ paddd xmm5, xmm2
+ paddd xmm4, xmm6
+ paddd xmm3, xmm11
+ paddd xmm7, xmm1
- movd xmm1, dword [rdx+3*4]
- pshufd xmm1, xmm1, 0
- paddd xmm3, xmm1
+ movdqa xmm1, [rdx+4*4]
+ pshufd xmm2, xmm1, 0x55
+ pshufd xmm6, xmm1, 0xAA
+ pshufd xmm11, xmm1, 0xFF
+ pshufd xmm1, xmm1, 0
- movd xmm1, dword [rdx+4*4]
- pshufd xmm1, xmm1, 0
+ paddd xmm8, xmm2
+ paddd xmm9, xmm6
+ paddd xmm10, xmm11
paddd xmm0, xmm1
- movd xmm1, dword [rdx+5*4]
- pshufd xmm1, xmm1, 0
- paddd xmm8, xmm1
-
- movd xmm1, dword [rdx+6*4]
- pshufd xmm1, xmm1, 0
- paddd xmm9, xmm1
-
- movd xmm1, dword [rdx+7*4]
- pshufd xmm1, xmm1, 0
- paddd xmm10, xmm1
-
-debug_me:
- movdqa [rdi+0*16], xmm7
- movdqa [rdi+1*16], xmm5
- movdqa [rdi+2*16], xmm4
- movdqa [rdi+3*16], xmm3
- movdqa [rdi+4*16], xmm0
- movdqa [rdi+5*16], xmm8
- movdqa [rdi+6*16], xmm9
- movdqa [rdi+7*16], xmm10
+ movdqa [hash+0*16], xmm7
+ movdqa [hash+1*16], xmm5
+ movdqa [hash+2*16], xmm4
+ movdqa [hash+3*16], xmm3
+ movdqa [hash+4*16], xmm0
+ movdqa [hash+5*16], xmm8
+ movdqa [hash+6*16], xmm9
+ movdqa [hash+7*16], xmm10
LAB_RET:
pop rbx