mirror of
https://github.com/emmansun/gmsm.git
synced 2025-04-27 04:36:19 +08:00
sm3: refactoring
This commit is contained in:
parent
76bafca3c4
commit
edcba25490
@ -43,14 +43,12 @@
|
||||
#define a AX
|
||||
#define b BX
|
||||
#define c CX
|
||||
#define d R8
|
||||
#define e DX
|
||||
#define d DX
|
||||
#define e R8
|
||||
#define f R9
|
||||
#define g R10
|
||||
#define h R11
|
||||
|
||||
#define SRND SI // SRND is same register as CTX
|
||||
|
||||
#define y0 R12
|
||||
#define y1 R13
|
||||
#define y2 R14
|
||||
@ -66,6 +64,12 @@
|
||||
#define _INP _INP_END + INP_END_SIZE
|
||||
#define STACK_SIZE _INP + INP_SIZE
|
||||
|
||||
#define P0(tt2, tmp, out) \
|
||||
RORXL $23, tt2, tmp; \
|
||||
RORXL $15, tt2, out; \
|
||||
XORL tmp, out; \
|
||||
XORL tt2, out
|
||||
|
||||
// For rounds [0 - 16)
|
||||
#define ROUND_AND_SCHED_N_0_0(disp, const, a, b, c, d, e, f, g, h, XDWORD0, XDWORD1, XDWORD2, XDWORD3) \
|
||||
; \ // ############################# RND N + 0 ############################//
|
||||
@ -77,9 +81,9 @@
|
||||
ROLL $7, y2; \ // y2 = SS1
|
||||
XORL y2, y0 \ // y0 = SS2
|
||||
VPSLLD $7, XTMP0, XTMP1; \ // XTMP1 = W[-13] << 7 = {w6<<7,w5<<7,w4<<7,w3<<7}
|
||||
ADDL (disp + 0*4)(SP)(SRND*1), y2; \ // y2 = SS1 + W
|
||||
ADDL (disp + 0*4)(SP), y2; \ // y2 = SS1 + W
|
||||
ADDL h, y2; \ // y2 = h + SS1 + W
|
||||
ADDL (disp + 0*4 + 32)(SP)(SRND*1), y0;\ // y0 = SS2 + W'
|
||||
ADDL (disp + 0*4 + 32)(SP), y0; \ // y0 = SS2 + W'
|
||||
VPSRLD $(32-7), XTMP0, XTMP0; \ // XTMP0 = W[-13] >> 25 = {w6>>25,w5>>25,w4>>25,w3>>25}
|
||||
ADDL d, y0; \ // y0 = d + SS2 + W'
|
||||
MOVL a, h; \
|
||||
@ -95,11 +99,8 @@
|
||||
VPXOR XTMP1, XTMP0, XTMP0; \ // XTMP0 = W[-6] ^ (W[-13] rol 7)
|
||||
ROLL $9, b; \
|
||||
ROLL $19, f; \
|
||||
RORXL $23, y2, y0; \
|
||||
VPALIGNR $12, XDWORD1, XDWORD2, XTMP1; \ // XTMP1 = W[-9] = {w10,w9,w8,w7}
|
||||
RORXL $15, y2, d; \
|
||||
XORL y0, d; \
|
||||
XORL y2, d; \ // d = P(tt2)
|
||||
P0(y2, y0, d); \
|
||||
VPXOR XDWORD0, XTMP1, XTMP1; \ // XTMP1 = W[-9] ^ W[-16]
|
||||
|
||||
#define ROUND_AND_SCHED_N_0_1(disp, const, a, b, c, d, e, f, g, h, XDWORD0, XDWORD1, XDWORD2, XDWORD3) \
|
||||
@ -112,9 +113,9 @@
|
||||
ROLL $7, y2; \ // y2 = SS1
|
||||
XORL y2, y0 \ // y0 = SS2
|
||||
VPSRLQ $17, XTMP2, XTMP2; \ // XTMP2 = W[-3] rol 15 {xBxA}
|
||||
ADDL (disp + 1*4)(SP)(SRND*1), y2; \ // y2 = SS1 + W
|
||||
ADDL (disp + 1*4)(SP), y2; \ // y2 = SS1 + W
|
||||
ADDL h, y2; \ // y2 = h + SS1 + W
|
||||
ADDL (disp + 1*4 + 32)(SP)(SRND*1), y0;\ // y0 = SS2 + W'
|
||||
ADDL (disp + 1*4 + 32)(SP), y0; \ // y0 = SS2 + W'
|
||||
VPXOR XTMP1, XTMP2, XTMP2; \ // XTMP2 = W[-9] ^ W[-16] ^ (W[-3] rol 15) {xxxA}
|
||||
ADDL d, y0; \ // y0 = d + SS2 + W'
|
||||
MOVL a, h; \
|
||||
@ -130,10 +131,7 @@
|
||||
ROLL $9, b; \
|
||||
ROLL $19, f; \
|
||||
VPSRLQ $9, XTMP2, XTMP4; \ // XTMP4 = XTMP2 rol 23 {xxxA}
|
||||
RORXL $23, y2, y0; \
|
||||
RORXL $15, y2, d; \
|
||||
XORL y0, d; \
|
||||
XORL y2, d; \ // d = P(tt2)
|
||||
P0(y2, y0, d); \
|
||||
VPXOR XTMP2, XTMP4, XTMP4; \ // XTMP4 = XTMP2 ^ (XTMP2 rol 23 {xxxA})
|
||||
|
||||
#define ROUND_AND_SCHED_N_0_2(disp, const, a, b, c, d, e, f, g, h, XDWORD0, XDWORD1, XDWORD2, XDWORD3) \
|
||||
@ -145,10 +143,10 @@
|
||||
VPXOR XTMP4, XTMP3, XTMP4; \ // XTMP4 = XTMP2 ^ (XTMP2 rol 15 {xxxA}) ^ (XTMP2 rol 23 {xxxA})
|
||||
ROLL $7, y2; \ // y2 = SS1
|
||||
XORL y2, y0 \ // y0 = SS2
|
||||
ADDL (disp + 2*4)(SP)(SRND*1), y2; \ // y2 = SS1 + W
|
||||
ADDL (disp + 2*4)(SP), y2; \ // y2 = SS1 + W
|
||||
VPXOR XTMP4, XTMP0, XTMP2; \ // XTMP2 = {..., ..., ..., W[0]}
|
||||
ADDL h, y2; \ // y2 = h + SS1 + W
|
||||
ADDL (disp + 2*4 + 32)(SP)(SRND*1), y0;\ // y0 = SS2 + W'
|
||||
ADDL (disp + 2*4 + 32)(SP), y0; \ // y0 = SS2 + W'
|
||||
ADDL d, y0; \ // y0 = d + SS2 + W'
|
||||
VPALIGNR $4, XDWORD3, XTMP2, XTMP3; \ // XTMP3 = {W[0], w15, w14, w13}
|
||||
MOVL a, h; \
|
||||
@ -164,10 +162,7 @@
|
||||
ROLL $9, b; \
|
||||
ROLL $19, f; \
|
||||
VPOR XTMP3, XTMP4, XTMP4; \ // XTMP4 = (W[-3] rol 15) {DCBA}
|
||||
RORXL $23, y2, y0; \
|
||||
RORXL $15, y2, d; \
|
||||
XORL y0, d; \
|
||||
XORL y2, d; \ // d = P(tt2)
|
||||
P0(y2, y0, d); \
|
||||
VPXOR XTMP1, XTMP4, XTMP4; \ // XTMP4 = W[-9] ^ W[-16] ^ (W[-3] rol 15) {DCBA}
|
||||
|
||||
#define ROUND_AND_SCHED_N_0_3(disp, const, a, b, c, d, e, f, g, h, XDWORD0, XDWORD1, XDWORD2, XDWORD3) \
|
||||
@ -180,9 +175,9 @@
|
||||
ROLL $7, y2; \ // y2 = SS1
|
||||
XORL y2, y0 \ // y0 = SS2
|
||||
VPSRLD $(32-15), XTMP4, XTMP3; \
|
||||
ADDL (disp + 3*4)(SP)(SRND*1), y2; \ // y2 = SS1 + W
|
||||
ADDL (disp + 3*4)(SP), y2; \ // y2 = SS1 + W
|
||||
ADDL h, y2; \ // y2 = h + SS1 + W
|
||||
ADDL (disp + 3*4 + 32)(SP)(SRND*1), y0;\ // y2 = SS2 + W'
|
||||
ADDL (disp + 3*4 + 32)(SP), y0; \ // y2 = SS2 + W'
|
||||
ADDL d, y0; \ // y0 = d + SS2 + W'
|
||||
VPOR XTMP3, XTMP2, XTMP3; \ // XTMP3 = XTMP4 rol 15 {DCBA}
|
||||
MOVL a, h; \
|
||||
@ -198,10 +193,7 @@
|
||||
ROLL $9, b; \
|
||||
ROLL $19, f; \
|
||||
VPXOR XTMP3, XTMP1, XTMP1; \ // XTMP1 = XTMP4 ^ (XTMP4 rol 15 {DCBA}) ^ (XTMP4 rol 23 {DCBA})
|
||||
RORXL $23, y2, y0; \
|
||||
RORXL $15, y2, d; \
|
||||
XORL y0, d; \
|
||||
XORL y2, d; \ // d = P(tt2)
|
||||
P0(y2, y0, d); \
|
||||
VPXOR XTMP1, XTMP0, XDWORD0; \ // XDWORD0 = {W[3], W[2], W[1], W[0]}
|
||||
|
||||
// For rounds [16 - 64)
|
||||
@ -215,9 +207,9 @@
|
||||
ROLL $7, y2; \ // y2 = SS1
|
||||
XORL y2, y0 \ // y0 = SS2
|
||||
VPSLLD $7, XTMP0, XTMP1; \ // XTMP1 = W[-13] << 7 = {w6<<7,w5<<7,w4<<7,w3<<7}
|
||||
ADDL (disp + 0*4)(SP)(SRND*1), y2; \ // y2 = SS1 + W
|
||||
ADDL (disp + 0*4)(SP), y2; \ // y2 = SS1 + W
|
||||
ADDL h, y2; \ // y2 = h + SS1 + W
|
||||
ADDL (disp + 0*4 + 32)(SP)(SRND*1), y0;\ // y0 = SS2 + W'
|
||||
ADDL (disp + 0*4 + 32)(SP), y0; \ // y0 = SS2 + W'
|
||||
VPSRLD $(32-7), XTMP0, XTMP0; \ // XTMP0 = W[-13] >> 25 = {w6>>25,w5>>25,w4>>25,w3>>25}
|
||||
ADDL d, y0; \ // y0 = d + SS2 + W'
|
||||
MOVL a, y1; \
|
||||
@ -238,10 +230,7 @@
|
||||
ROLL $9, b; \
|
||||
ROLL $19, f; \
|
||||
VPALIGNR $12, XDWORD1, XDWORD2, XTMP1; \ // XTMP1 = W[-9] = {w10,w9,w8,w7}
|
||||
RORXL $23, y2, y0; \
|
||||
RORXL $15, y2, d; \
|
||||
XORL y0, d; \
|
||||
XORL y2, d; \ // d = P(tt2)
|
||||
P0(y2, y0, d); \
|
||||
VPXOR XDWORD0, XTMP1, XTMP1; \ // XTMP1 = W[-9] ^ W[-16]
|
||||
|
||||
#define ROUND_AND_SCHED_N_1_1(disp, const, a, b, c, d, e, f, g, h, XDWORD0, XDWORD1, XDWORD2, XDWORD3) \
|
||||
@ -253,10 +242,10 @@
|
||||
VPSHUFD $0xA5, XDWORD3, XTMP2; \ // XTMP2 = W[-3] {BBAA} {w14,w14,w13,w13}
|
||||
ROLL $7, y2; \ // y2 = SS1
|
||||
XORL y2, y0 \ // y0 = SS2
|
||||
ADDL (disp + 1*4)(SP)(SRND*1), y2; \ // y2 = SS1 + W
|
||||
ADDL (disp + 1*4)(SP), y2; \ // y2 = SS1 + W
|
||||
ADDL h, y2; \ // y2 = h + SS1 + W
|
||||
VPSRLQ $17, XTMP2, XTMP2; \ // XTMP2 = W[-3] rol 15 {xBxA}
|
||||
ADDL (disp + 1*4 + 32)(SP)(SRND*1), y0;\ // y0 = SS2 + W'
|
||||
ADDL (disp + 1*4 + 32)(SP), y0; \ // y0 = SS2 + W'
|
||||
ADDL d, y0; \ // y0 = d + SS2 + W'
|
||||
MOVL a, y1; \
|
||||
ORL b, y1; \
|
||||
@ -276,10 +265,7 @@
|
||||
ROLL $9, b; \
|
||||
ROLL $19, f; \
|
||||
VPSRLQ $9, XTMP2, XTMP4; \ // XTMP4 = XTMP2 rol 23 {xxxA}
|
||||
RORXL $23, y2, y0; \
|
||||
RORXL $15, y2, d; \
|
||||
XORL y0, d; \
|
||||
XORL y2, d; \ // d = P(tt2)
|
||||
P0(y2, y0, d); \
|
||||
VPXOR XTMP2, XTMP4, XTMP4; \ // XTMP4 = XTMP2 XOR (XTMP2 rol 23 {xxxA})
|
||||
|
||||
#define ROUND_AND_SCHED_N_1_2(disp, const, a, b, c, d, e, f, g, h, XDWORD0, XDWORD1, XDWORD2, XDWORD3) \
|
||||
@ -291,10 +277,10 @@
|
||||
VPXOR XTMP4, XTMP3, XTMP4; \ // XTMP4 = XTMP2 ^ (XTMP2 rol 15 {xxxA}) ^ (XTMP2 rol 23 {xxxA})
|
||||
ROLL $7, y2; \ // y2 = SS1
|
||||
XORL y2, y0 \ // y0 = SS2
|
||||
ADDL (disp + 2*4)(SP)(SRND*1), y2; \ // y2 = SS1 + W
|
||||
ADDL (disp + 2*4)(SP), y2; \ // y2 = SS1 + W
|
||||
ADDL h, y2; \ // y2 = h + SS1 + W
|
||||
VPXOR XTMP4, XTMP0, XTMP2; \ // XTMP2 = {..., ..., W[1], W[0]}
|
||||
ADDL (disp + 2*4 + 32)(SP)(SRND*1), y0;\ // y0 = SS2 + W'
|
||||
ADDL (disp + 2*4 + 32)(SP), y0; \ // y0 = SS2 + W'
|
||||
ADDL d, y0; \ // y0 = d + SS2 + W'
|
||||
MOVL a, y1; \
|
||||
ORL b, y1; \
|
||||
@ -314,10 +300,7 @@
|
||||
ROLL $9, b; \
|
||||
ROLL $19, f; \
|
||||
VPOR XTMP3, XTMP4, XTMP4; \ // XTMP4 = (W[-3] rol 15) {DCBA}
|
||||
RORXL $23, y2, y0; \
|
||||
RORXL $15, y2, d; \
|
||||
XORL y0, d; \
|
||||
XORL y2, d; \ // d = P(tt2)
|
||||
P0(y2, y0, d); \
|
||||
VPXOR XTMP1, XTMP4, XTMP4; \ // XTMP4 = W[-9] ^ W[-16] ^ (W[-3] rol 15) {DCBA}
|
||||
|
||||
#define ROUND_AND_SCHED_N_1_3(disp, const, a, b, c, d, e, f, g, h, XDWORD0, XDWORD1, XDWORD2, XDWORD3) \
|
||||
@ -329,10 +312,10 @@
|
||||
VPSLLD $15, XTMP4, XTMP2; \
|
||||
ROLL $7, y2; \ // y2 = SS1
|
||||
XORL y2, y0 \ // y0 = SS2
|
||||
ADDL (disp + 3*4)(SP)(SRND*1), y2; \ // y2 = SS1 + W
|
||||
ADDL (disp + 3*4)(SP), y2; \ // y2 = SS1 + W
|
||||
ADDL h, y2; \ // y2 = h + SS1 + W
|
||||
VPSRLD $(32-15), XTMP4, XTMP3; \
|
||||
ADDL (disp + 3*4 + 32)(SP)(SRND*1), y0;\ // y0 = SS2 + W'
|
||||
ADDL (disp + 3*4 + 32)(SP), y0; \ // y0 = SS2 + W'
|
||||
ADDL d, y0; \ // y0 = d + SS2 + W'
|
||||
MOVL a, y1; \
|
||||
ORL b, y1; \
|
||||
@ -352,24 +335,24 @@
|
||||
ROLL $9, b; \
|
||||
ROLL $19, f; \
|
||||
VPXOR XTMP3, XTMP1, XTMP1; \ // XTMP1 = XTMP4 ^ (XTMP4 rol 15 {DCBA}) ^ (XTMP4 rol 23 {DCBA})
|
||||
RORXL $23, y2, y0; \
|
||||
RORXL $15, y2, d; \
|
||||
XORL y0, d; \
|
||||
XORL y2, d; \ // d = P(tt2)
|
||||
P0(y2, y0, d); \
|
||||
VPXOR XTMP1, XTMP0, XDWORD0; \ // XWORD0 = {W[3], W[2], W[1], W[0]}
|
||||
|
||||
#define SS12(a, e, const, ss1, ss2) \
|
||||
RORXL $20, a, ss2; \
|
||||
MOVL e, ss1; \
|
||||
ADDL $const, ss1; \
|
||||
ADDL ss2, ss1; \
|
||||
ROLL $7, ss1; \ // ss1 = (a <<< 12 + e + T) <<< 7
|
||||
XORL ss1, ss2
|
||||
|
||||
// For rounds [0 - 16)
|
||||
#define DO_ROUND_N_0(disp, idx, const, a, b, c, d, e, f, g, h) \
|
||||
; \ // ############################# RND N + 0 ############################//
|
||||
RORXL $20, a, y0; \ // y0 = a <<< 12
|
||||
MOVL e, y2; \
|
||||
ADDL $const, y2; \
|
||||
ADDL y0, y2; \ // y2 = a <<< 12 + e + T
|
||||
ROLL $7, y2; \ // y2 = SS1
|
||||
XORL y2, y0 \ // y0 = SS2
|
||||
ADDL (disp + idx*4)(SP)(SRND*1), y2; \ // y2 = SS1 + W
|
||||
SS12(a, e, const, y2, y0); \
|
||||
ADDL (disp + idx*4)(SP), y2; \ // y2 = SS1 + W
|
||||
ADDL h, y2; \ // y2 = h + SS1 + W
|
||||
ADDL (disp + idx*4 + 32)(SP)(SRND*1), y0;\ // y0 = SS2 + W'
|
||||
ADDL (disp + idx*4 + 32)(SP), y0; \ // y0 = SS2 + W'
|
||||
ADDL d, y0; \ // y0 = d + SS2 + W'
|
||||
; \
|
||||
MOVL a, h; \
|
||||
@ -385,23 +368,15 @@
|
||||
ROLL $9, b; \
|
||||
ROLL $19, f; \
|
||||
; \
|
||||
RORXL $23, y2, y0; \
|
||||
RORXL $15, y2, d; \
|
||||
XORL y0, d; \
|
||||
XORL y2, d; \ // d = P(tt2)
|
||||
P0(y2, y0, d)
|
||||
|
||||
// For rounds [16 - 64)
|
||||
#define DO_ROUND_N_1(disp, idx, const, a, b, c, d, e, f, g, h) \
|
||||
; \ // ############################# RND N + 0 ############################//
|
||||
RORXL $20, a, y0; \ // y0 = a <<< 12
|
||||
MOVL e, y2; \
|
||||
ADDL $const, y2; \
|
||||
ADDL y0, y2; \ // y2 = a <<< 12 + e + T
|
||||
ROLL $7, y2; \ // y2 = SS1
|
||||
XORL y2, y0 \ // y0 = SS2
|
||||
ADDL (disp + idx*4)(SP)(SRND*1), y2; \ // y2 = SS1 + W
|
||||
SS12(a, e, const, y2, y0); \
|
||||
ADDL (disp + idx*4)(SP), y2; \ // y2 = SS1 + W
|
||||
ADDL h, y2; \ // y2 = h + SS1 + W
|
||||
ADDL (disp + idx*4 + 32)(SP)(SRND*1), y0;\ // y0 = SS2 + W'
|
||||
ADDL (disp + idx*4 + 32)(SP), y0; \ // y0 = SS2 + W'
|
||||
ADDL d, y0; \ // y0 = d + SS2 + W'
|
||||
; \
|
||||
MOVL a, y1; \
|
||||
@ -421,10 +396,7 @@
|
||||
ROLL $9, b; \
|
||||
ROLL $19, f; \
|
||||
; \
|
||||
RORXL $23, y2, y0; \
|
||||
RORXL $15, y2, d; \
|
||||
XORL y0, d; \
|
||||
XORL y2, d; \ // d = P(tt2)
|
||||
P0(y2, y0, d)
|
||||
|
||||
TEXT ·blockAVX2(SB), 0, $1048-32
|
||||
MOVQ dig+0(FP), CTX // d.h[8]
|
||||
@ -472,162 +444,154 @@ avx2_loop: // at each iteration works with one block (512 bit)
|
||||
avx2_last_block_enter:
|
||||
ADDQ $64, INP
|
||||
MOVQ INP, _INP(SP)
|
||||
XORQ SRND, SRND
|
||||
|
||||
avx2_schedule_compress: // for w0 - w47
|
||||
// Do 4 rounds and scheduling
|
||||
VMOVDQU XDWORD0, (_XFER + 0*32)(SP)(SRND*1)
|
||||
VMOVDQU XDWORD0, (_XFER + 0*32)(SP)
|
||||
VPXOR XDWORD0, XDWORD1, XFER
|
||||
VMOVDQU XFER, (_XFER + 1*32)(SP)(SRND*1)
|
||||
VMOVDQU XFER, (_XFER + 1*32)(SP)
|
||||
ROUND_AND_SCHED_N_0_0(_XFER + 0*32, T0, a, b, c, d, e, f, g, h, XDWORD0, XDWORD1, XDWORD2, XDWORD3)
|
||||
ROUND_AND_SCHED_N_0_1(_XFER + 0*32, T1, h, a, b, c, d, e, f, g, XDWORD0, XDWORD1, XDWORD2, XDWORD3)
|
||||
ROUND_AND_SCHED_N_0_2(_XFER + 0*32, T2, g, h, a, b, c, d, e, f, XDWORD0, XDWORD1, XDWORD2, XDWORD3)
|
||||
ROUND_AND_SCHED_N_0_3(_XFER + 0*32, T3, f, g, h, a, b, c, d, e, XDWORD0, XDWORD1, XDWORD2, XDWORD3)
|
||||
|
||||
// Do 4 rounds and scheduling
|
||||
VMOVDQU XDWORD1, (_XFER + 2*32)(SP)(SRND*1)
|
||||
VMOVDQU XDWORD1, (_XFER + 2*32)(SP)
|
||||
VPXOR XDWORD1, XDWORD2, XFER
|
||||
VMOVDQU XFER, (_XFER + 3*32)(SP)(SRND*1)
|
||||
VMOVDQU XFER, (_XFER + 3*32)(SP)
|
||||
ROUND_AND_SCHED_N_0_0(_XFER + 2*32, T4, e, f, g, h, a, b, c, d, XDWORD1, XDWORD2, XDWORD3, XDWORD0)
|
||||
ROUND_AND_SCHED_N_0_1(_XFER + 2*32, T5, d, e, f, g, h, a, b, c, XDWORD1, XDWORD2, XDWORD3, XDWORD0)
|
||||
ROUND_AND_SCHED_N_0_2(_XFER + 2*32, T6, c, d, e, f, g, h, a, b, XDWORD1, XDWORD2, XDWORD3, XDWORD0)
|
||||
ROUND_AND_SCHED_N_0_3(_XFER + 2*32, T7, b, c, d, e, f, g, h, a, XDWORD1, XDWORD2, XDWORD3, XDWORD0)
|
||||
|
||||
// Do 4 rounds and scheduling
|
||||
VMOVDQU XDWORD2, (_XFER + 4*32)(SP)(SRND*1)
|
||||
VMOVDQU XDWORD2, (_XFER + 4*32)(SP)
|
||||
VPXOR XDWORD2, XDWORD3, XFER
|
||||
VMOVDQU XFER, (_XFER + 5*32)(SP)(SRND*1)
|
||||
VMOVDQU XFER, (_XFER + 5*32)(SP)
|
||||
ROUND_AND_SCHED_N_0_0(_XFER + 4*32, T8, a, b, c, d, e, f, g, h, XDWORD2, XDWORD3, XDWORD0, XDWORD1)
|
||||
ROUND_AND_SCHED_N_0_1(_XFER + 4*32, T9, h, a, b, c, d, e, f, g, XDWORD2, XDWORD3, XDWORD0, XDWORD1)
|
||||
ROUND_AND_SCHED_N_0_2(_XFER + 4*32, T10, g, h, a, b, c, d, e, f, XDWORD2, XDWORD3, XDWORD0, XDWORD1)
|
||||
ROUND_AND_SCHED_N_0_3(_XFER + 4*32, T11, f, g, h, a, b, c, d, e, XDWORD2, XDWORD3, XDWORD0, XDWORD1)
|
||||
|
||||
// Do 4 rounds and scheduling
|
||||
VMOVDQU XDWORD3, (_XFER + 6*32)(SP)(SRND*1)
|
||||
VMOVDQU XDWORD3, (_XFER + 6*32)(SP)
|
||||
VPXOR XDWORD3, XDWORD0, XFER
|
||||
VMOVDQU XFER, (_XFER + 7*32)(SP)(SRND*1)
|
||||
VMOVDQU XFER, (_XFER + 7*32)(SP)
|
||||
ROUND_AND_SCHED_N_0_0(_XFER + 6*32, T12, e, f, g, h, a, b, c, d, XDWORD3, XDWORD0, XDWORD1, XDWORD2)
|
||||
ROUND_AND_SCHED_N_0_1(_XFER + 6*32, T13, d, e, f, g, h, a, b, c, XDWORD3, XDWORD0, XDWORD1, XDWORD2)
|
||||
ROUND_AND_SCHED_N_0_2(_XFER + 6*32, T14, c, d, e, f, g, h, a, b, XDWORD3, XDWORD0, XDWORD1, XDWORD2)
|
||||
ROUND_AND_SCHED_N_0_3(_XFER + 6*32, T15, b, c, d, e, f, g, h, a, XDWORD3, XDWORD0, XDWORD1, XDWORD2)
|
||||
|
||||
ADDQ $8*32, SRND
|
||||
|
||||
// Do 4 rounds and scheduling
|
||||
VMOVDQU XDWORD0, (_XFER + 0*32)(SP)(SRND*1)
|
||||
VMOVDQU XDWORD0, (_XFER + 8*32)(SP)
|
||||
VPXOR XDWORD0, XDWORD1, XFER
|
||||
VMOVDQU XFER, (_XFER + 1*32)(SP)(SRND*1)
|
||||
ROUND_AND_SCHED_N_1_0(_XFER + 0*32, T16, a, b, c, d, e, f, g, h, XDWORD0, XDWORD1, XDWORD2, XDWORD3)
|
||||
ROUND_AND_SCHED_N_1_1(_XFER + 0*32, T17, h, a, b, c, d, e, f, g, XDWORD0, XDWORD1, XDWORD2, XDWORD3)
|
||||
ROUND_AND_SCHED_N_1_2(_XFER + 0*32, T18, g, h, a, b, c, d, e, f, XDWORD0, XDWORD1, XDWORD2, XDWORD3)
|
||||
ROUND_AND_SCHED_N_1_3(_XFER + 0*32, T19, f, g, h, a, b, c, d, e, XDWORD0, XDWORD1, XDWORD2, XDWORD3)
|
||||
VMOVDQU XFER, (_XFER + 9*32)(SP)
|
||||
ROUND_AND_SCHED_N_1_0(_XFER + 8*32, T16, a, b, c, d, e, f, g, h, XDWORD0, XDWORD1, XDWORD2, XDWORD3)
|
||||
ROUND_AND_SCHED_N_1_1(_XFER + 8*32, T17, h, a, b, c, d, e, f, g, XDWORD0, XDWORD1, XDWORD2, XDWORD3)
|
||||
ROUND_AND_SCHED_N_1_2(_XFER + 8*32, T18, g, h, a, b, c, d, e, f, XDWORD0, XDWORD1, XDWORD2, XDWORD3)
|
||||
ROUND_AND_SCHED_N_1_3(_XFER + 8*32, T19, f, g, h, a, b, c, d, e, XDWORD0, XDWORD1, XDWORD2, XDWORD3)
|
||||
|
||||
// Do 4 rounds and scheduling
|
||||
VMOVDQU XDWORD1, (_XFER + 2*32)(SP)(SRND*1)
|
||||
VMOVDQU XDWORD1, (_XFER + 10*32)(SP)
|
||||
VPXOR XDWORD1, XDWORD2, XFER
|
||||
VMOVDQU XFER, (_XFER + 3*32)(SP)(SRND*1)
|
||||
ROUND_AND_SCHED_N_1_0(_XFER + 2*32, T20, e, f, g, h, a, b, c, d, XDWORD1, XDWORD2, XDWORD3, XDWORD0)
|
||||
ROUND_AND_SCHED_N_1_1(_XFER + 2*32, T21, d, e, f, g, h, a, b, c, XDWORD1, XDWORD2, XDWORD3, XDWORD0)
|
||||
ROUND_AND_SCHED_N_1_2(_XFER + 2*32, T22, c, d, e, f, g, h, a, b, XDWORD1, XDWORD2, XDWORD3, XDWORD0)
|
||||
ROUND_AND_SCHED_N_1_3(_XFER + 2*32, T23, b, c, d, e, f, g, h, a, XDWORD1, XDWORD2, XDWORD3, XDWORD0)
|
||||
VMOVDQU XFER, (_XFER + 11*32)(SP)
|
||||
ROUND_AND_SCHED_N_1_0(_XFER + 10*32, T20, e, f, g, h, a, b, c, d, XDWORD1, XDWORD2, XDWORD3, XDWORD0)
|
||||
ROUND_AND_SCHED_N_1_1(_XFER + 10*32, T21, d, e, f, g, h, a, b, c, XDWORD1, XDWORD2, XDWORD3, XDWORD0)
|
||||
ROUND_AND_SCHED_N_1_2(_XFER + 10*32, T22, c, d, e, f, g, h, a, b, XDWORD1, XDWORD2, XDWORD3, XDWORD0)
|
||||
ROUND_AND_SCHED_N_1_3(_XFER + 10*32, T23, b, c, d, e, f, g, h, a, XDWORD1, XDWORD2, XDWORD3, XDWORD0)
|
||||
|
||||
// Do 4 rounds and scheduling
|
||||
VMOVDQU XDWORD2, (_XFER + 4*32)(SP)(SRND*1)
|
||||
VMOVDQU XDWORD2, (_XFER + 12*32)(SP)
|
||||
VPXOR XDWORD2, XDWORD3, XFER
|
||||
VMOVDQU XFER, (_XFER + 5*32)(SP)(SRND*1)
|
||||
ROUND_AND_SCHED_N_1_0(_XFER + 4*32, T24, a, b, c, d, e, f, g, h, XDWORD2, XDWORD3, XDWORD0, XDWORD1)
|
||||
ROUND_AND_SCHED_N_1_1(_XFER + 4*32, T25, h, a, b, c, d, e, f, g, XDWORD2, XDWORD3, XDWORD0, XDWORD1)
|
||||
ROUND_AND_SCHED_N_1_2(_XFER + 4*32, T26, g, h, a, b, c, d, e, f, XDWORD2, XDWORD3, XDWORD0, XDWORD1)
|
||||
ROUND_AND_SCHED_N_1_3(_XFER + 4*32, T27, f, g, h, a, b, c, d, e, XDWORD2, XDWORD3, XDWORD0, XDWORD1)
|
||||
VMOVDQU XFER, (_XFER + 13*32)(SP)
|
||||
ROUND_AND_SCHED_N_1_0(_XFER + 12*32, T24, a, b, c, d, e, f, g, h, XDWORD2, XDWORD3, XDWORD0, XDWORD1)
|
||||
ROUND_AND_SCHED_N_1_1(_XFER + 12*32, T25, h, a, b, c, d, e, f, g, XDWORD2, XDWORD3, XDWORD0, XDWORD1)
|
||||
ROUND_AND_SCHED_N_1_2(_XFER + 12*32, T26, g, h, a, b, c, d, e, f, XDWORD2, XDWORD3, XDWORD0, XDWORD1)
|
||||
ROUND_AND_SCHED_N_1_3(_XFER + 12*32, T27, f, g, h, a, b, c, d, e, XDWORD2, XDWORD3, XDWORD0, XDWORD1)
|
||||
|
||||
// Do 4 rounds and scheduling
|
||||
VMOVDQU XDWORD3, (_XFER + 6*32)(SP)(SRND*1)
|
||||
VMOVDQU XDWORD3, (_XFER + 14*32)(SP)
|
||||
VPXOR XDWORD3, XDWORD0, XFER
|
||||
VMOVDQU XFER, (_XFER + 7*32)(SP)(SRND*1)
|
||||
ROUND_AND_SCHED_N_1_0(_XFER + 6*32, T28, e, f, g, h, a, b, c, d, XDWORD3, XDWORD0, XDWORD1, XDWORD2)
|
||||
ROUND_AND_SCHED_N_1_1(_XFER + 6*32, T29, d, e, f, g, h, a, b, c, XDWORD3, XDWORD0, XDWORD1, XDWORD2)
|
||||
ROUND_AND_SCHED_N_1_2(_XFER + 6*32, T30, c, d, e, f, g, h, a, b, XDWORD3, XDWORD0, XDWORD1, XDWORD2)
|
||||
ROUND_AND_SCHED_N_1_3(_XFER + 6*32, T31, b, c, d, e, f, g, h, a, XDWORD3, XDWORD0, XDWORD1, XDWORD2)
|
||||
|
||||
ADDQ $8*32, SRND
|
||||
VMOVDQU XFER, (_XFER + 15*32)(SP)
|
||||
ROUND_AND_SCHED_N_1_0(_XFER + 14*32, T28, e, f, g, h, a, b, c, d, XDWORD3, XDWORD0, XDWORD1, XDWORD2)
|
||||
ROUND_AND_SCHED_N_1_1(_XFER + 14*32, T29, d, e, f, g, h, a, b, c, XDWORD3, XDWORD0, XDWORD1, XDWORD2)
|
||||
ROUND_AND_SCHED_N_1_2(_XFER + 14*32, T30, c, d, e, f, g, h, a, b, XDWORD3, XDWORD0, XDWORD1, XDWORD2)
|
||||
ROUND_AND_SCHED_N_1_3(_XFER + 14*32, T31, b, c, d, e, f, g, h, a, XDWORD3, XDWORD0, XDWORD1, XDWORD2)
|
||||
|
||||
// Do 4 rounds and scheduling
|
||||
VMOVDQU XDWORD0, (_XFER + 0*32)(SP)(SRND*1)
|
||||
VMOVDQU XDWORD0, (_XFER + 16*32)(SP)
|
||||
VPXOR XDWORD0, XDWORD1, XFER
|
||||
VMOVDQU XFER, (_XFER + 1*32)(SP)(SRND*1)
|
||||
ROUND_AND_SCHED_N_1_0(_XFER + 0*32, T32, a, b, c, d, e, f, g, h, XDWORD0, XDWORD1, XDWORD2, XDWORD3)
|
||||
ROUND_AND_SCHED_N_1_1(_XFER + 0*32, T33, h, a, b, c, d, e, f, g, XDWORD0, XDWORD1, XDWORD2, XDWORD3)
|
||||
ROUND_AND_SCHED_N_1_2(_XFER + 0*32, T34, g, h, a, b, c, d, e, f, XDWORD0, XDWORD1, XDWORD2, XDWORD3)
|
||||
ROUND_AND_SCHED_N_1_3(_XFER + 0*32, T35, f, g, h, a, b, c, d, e, XDWORD0, XDWORD1, XDWORD2, XDWORD3)
|
||||
VMOVDQU XFER, (_XFER + 17*32)(SP)
|
||||
ROUND_AND_SCHED_N_1_0(_XFER + 16*32, T32, a, b, c, d, e, f, g, h, XDWORD0, XDWORD1, XDWORD2, XDWORD3)
|
||||
ROUND_AND_SCHED_N_1_1(_XFER + 16*32, T33, h, a, b, c, d, e, f, g, XDWORD0, XDWORD1, XDWORD2, XDWORD3)
|
||||
ROUND_AND_SCHED_N_1_2(_XFER + 16*32, T34, g, h, a, b, c, d, e, f, XDWORD0, XDWORD1, XDWORD2, XDWORD3)
|
||||
ROUND_AND_SCHED_N_1_3(_XFER + 16*32, T35, f, g, h, a, b, c, d, e, XDWORD0, XDWORD1, XDWORD2, XDWORD3)
|
||||
|
||||
// Do 4 rounds and scheduling
|
||||
VMOVDQU XDWORD1, (_XFER + 2*32)(SP)(SRND*1)
|
||||
VMOVDQU XDWORD1, (_XFER + 18*32)(SP)
|
||||
VPXOR XDWORD1, XDWORD2, XFER
|
||||
VMOVDQU XFER, (_XFER + 3*32)(SP)(SRND*1)
|
||||
ROUND_AND_SCHED_N_1_0(_XFER + 2*32, T36, e, f, g, h, a, b, c, d, XDWORD1, XDWORD2, XDWORD3, XDWORD0)
|
||||
ROUND_AND_SCHED_N_1_1(_XFER + 2*32, T37, d, e, f, g, h, a, b, c, XDWORD1, XDWORD2, XDWORD3, XDWORD0)
|
||||
ROUND_AND_SCHED_N_1_2(_XFER + 2*32, T38, c, d, e, f, g, h, a, b, XDWORD1, XDWORD2, XDWORD3, XDWORD0)
|
||||
ROUND_AND_SCHED_N_1_3(_XFER + 2*32, T39, b, c, d, e, f, g, h, a, XDWORD1, XDWORD2, XDWORD3, XDWORD0)
|
||||
VMOVDQU XFER, (_XFER + 19*32)(SP)
|
||||
ROUND_AND_SCHED_N_1_0(_XFER + 18*32, T36, e, f, g, h, a, b, c, d, XDWORD1, XDWORD2, XDWORD3, XDWORD0)
|
||||
ROUND_AND_SCHED_N_1_1(_XFER + 18*32, T37, d, e, f, g, h, a, b, c, XDWORD1, XDWORD2, XDWORD3, XDWORD0)
|
||||
ROUND_AND_SCHED_N_1_2(_XFER + 18*32, T38, c, d, e, f, g, h, a, b, XDWORD1, XDWORD2, XDWORD3, XDWORD0)
|
||||
ROUND_AND_SCHED_N_1_3(_XFER + 18*32, T39, b, c, d, e, f, g, h, a, XDWORD1, XDWORD2, XDWORD3, XDWORD0)
|
||||
|
||||
// Do 4 rounds and scheduling
|
||||
VMOVDQU XDWORD2, (_XFER + 4*32)(SP)(SRND*1)
|
||||
VMOVDQU XDWORD2, (_XFER + 20*32)(SP)
|
||||
VPXOR XDWORD2, XDWORD3, XFER
|
||||
VMOVDQU XFER, (_XFER + 5*32)(SP)(SRND*1)
|
||||
ROUND_AND_SCHED_N_1_0(_XFER + 4*32, T40, a, b, c, d, e, f, g, h, XDWORD2, XDWORD3, XDWORD0, XDWORD1)
|
||||
ROUND_AND_SCHED_N_1_1(_XFER + 4*32, T41, h, a, b, c, d, e, f, g, XDWORD2, XDWORD3, XDWORD0, XDWORD1)
|
||||
ROUND_AND_SCHED_N_1_2(_XFER + 4*32, T42, g, h, a, b, c, d, e, f, XDWORD2, XDWORD3, XDWORD0, XDWORD1)
|
||||
ROUND_AND_SCHED_N_1_3(_XFER + 4*32, T43, f, g, h, a, b, c, d, e, XDWORD2, XDWORD3, XDWORD0, XDWORD1)
|
||||
VMOVDQU XFER, (_XFER + 21*32)(SP)
|
||||
ROUND_AND_SCHED_N_1_0(_XFER + 20*32, T40, a, b, c, d, e, f, g, h, XDWORD2, XDWORD3, XDWORD0, XDWORD1)
|
||||
ROUND_AND_SCHED_N_1_1(_XFER + 20*32, T41, h, a, b, c, d, e, f, g, XDWORD2, XDWORD3, XDWORD0, XDWORD1)
|
||||
ROUND_AND_SCHED_N_1_2(_XFER + 20*32, T42, g, h, a, b, c, d, e, f, XDWORD2, XDWORD3, XDWORD0, XDWORD1)
|
||||
ROUND_AND_SCHED_N_1_3(_XFER + 20*32, T43, f, g, h, a, b, c, d, e, XDWORD2, XDWORD3, XDWORD0, XDWORD1)
|
||||
|
||||
// Do 4 rounds and scheduling
|
||||
VMOVDQU XDWORD3, (_XFER + 6*32)(SP)(SRND*1)
|
||||
VMOVDQU XDWORD3, (_XFER + 22*32)(SP)
|
||||
VPXOR XDWORD3, XDWORD0, XFER
|
||||
VMOVDQU XFER, (_XFER + 7*32)(SP)(SRND*1)
|
||||
ROUND_AND_SCHED_N_1_0(_XFER + 6*32, T44, e, f, g, h, a, b, c, d, XDWORD3, XDWORD0, XDWORD1, XDWORD2)
|
||||
ROUND_AND_SCHED_N_1_1(_XFER + 6*32, T45, d, e, f, g, h, a, b, c, XDWORD3, XDWORD0, XDWORD1, XDWORD2)
|
||||
ROUND_AND_SCHED_N_1_2(_XFER + 6*32, T46, c, d, e, f, g, h, a, b, XDWORD3, XDWORD0, XDWORD1, XDWORD2)
|
||||
ROUND_AND_SCHED_N_1_3(_XFER + 6*32, T47, b, c, d, e, f, g, h, a, XDWORD3, XDWORD0, XDWORD1, XDWORD2)
|
||||
|
||||
ADDQ $8*32, SRND
|
||||
VMOVDQU XFER, (_XFER + 23*32)(SP)
|
||||
ROUND_AND_SCHED_N_1_0(_XFER + 22*32, T44, e, f, g, h, a, b, c, d, XDWORD3, XDWORD0, XDWORD1, XDWORD2)
|
||||
ROUND_AND_SCHED_N_1_1(_XFER + 22*32, T45, d, e, f, g, h, a, b, c, XDWORD3, XDWORD0, XDWORD1, XDWORD2)
|
||||
ROUND_AND_SCHED_N_1_2(_XFER + 22*32, T46, c, d, e, f, g, h, a, b, XDWORD3, XDWORD0, XDWORD1, XDWORD2)
|
||||
ROUND_AND_SCHED_N_1_3(_XFER + 22*32, T47, b, c, d, e, f, g, h, a, XDWORD3, XDWORD0, XDWORD1, XDWORD2)
|
||||
|
||||
// w48 - w63 processed with only 4 rounds scheduling (last 16 rounds)
|
||||
// Do 4 rounds and scheduling
|
||||
VMOVDQU XDWORD0, (_XFER + 0*32)(SP)(SRND*1)
|
||||
VMOVDQU XDWORD0, (_XFER + 24*32)(SP)
|
||||
VPXOR XDWORD0, XDWORD1, XFER
|
||||
VMOVDQU XFER, (_XFER + 1*32)(SP)(SRND*1)
|
||||
ROUND_AND_SCHED_N_1_0(_XFER + 0*32, T48, a, b, c, d, e, f, g, h, XDWORD0, XDWORD1, XDWORD2, XDWORD3)
|
||||
ROUND_AND_SCHED_N_1_1(_XFER + 0*32, T49, h, a, b, c, d, e, f, g, XDWORD0, XDWORD1, XDWORD2, XDWORD3)
|
||||
ROUND_AND_SCHED_N_1_2(_XFER + 0*32, T50, g, h, a, b, c, d, e, f, XDWORD0, XDWORD1, XDWORD2, XDWORD3)
|
||||
ROUND_AND_SCHED_N_1_3(_XFER + 0*32, T51, f, g, h, a, b, c, d, e, XDWORD0, XDWORD1, XDWORD2, XDWORD3)
|
||||
VMOVDQU XFER, (_XFER + 25*32)(SP)
|
||||
ROUND_AND_SCHED_N_1_0(_XFER + 24*32, T48, a, b, c, d, e, f, g, h, XDWORD0, XDWORD1, XDWORD2, XDWORD3)
|
||||
ROUND_AND_SCHED_N_1_1(_XFER + 24*32, T49, h, a, b, c, d, e, f, g, XDWORD0, XDWORD1, XDWORD2, XDWORD3)
|
||||
ROUND_AND_SCHED_N_1_2(_XFER + 24*32, T50, g, h, a, b, c, d, e, f, XDWORD0, XDWORD1, XDWORD2, XDWORD3)
|
||||
ROUND_AND_SCHED_N_1_3(_XFER + 24*32, T51, f, g, h, a, b, c, d, e, XDWORD0, XDWORD1, XDWORD2, XDWORD3)
|
||||
|
||||
// w52 - w63 processed with no scheduling (last 12 rounds)
|
||||
// Do 4 rounds
|
||||
VMOVDQU XDWORD1, (_XFER + 2*32)(SP)(SRND*1)
|
||||
VMOVDQU XDWORD1, (_XFER + 26*32)(SP)
|
||||
VPXOR XDWORD1, XDWORD2, XFER
|
||||
VMOVDQU XFER, (_XFER + 3*32)(SP)(SRND*1)
|
||||
DO_ROUND_N_1(_XFER + 2*32, 0, T52, e, f, g, h, a, b, c, d)
|
||||
DO_ROUND_N_1(_XFER + 2*32, 1, T53, d, e, f, g, h, a, b, c)
|
||||
DO_ROUND_N_1(_XFER + 2*32, 2, T54, c, d, e, f, g, h, a, b)
|
||||
DO_ROUND_N_1(_XFER + 2*32, 3, T55, b, c, d, e, f, g, h, a)
|
||||
VMOVDQU XFER, (_XFER + 27*32)(SP)
|
||||
DO_ROUND_N_1(_XFER + 26*32, 0, T52, e, f, g, h, a, b, c, d)
|
||||
DO_ROUND_N_1(_XFER + 26*32, 1, T53, d, e, f, g, h, a, b, c)
|
||||
DO_ROUND_N_1(_XFER + 26*32, 2, T54, c, d, e, f, g, h, a, b)
|
||||
DO_ROUND_N_1(_XFER + 26*32, 3, T55, b, c, d, e, f, g, h, a)
|
||||
|
||||
// Do 4 rounds
|
||||
VMOVDQU XDWORD2, (_XFER + 4*32)(SP)(SRND*1)
|
||||
VMOVDQU XDWORD2, (_XFER + 28*32)(SP)
|
||||
VPXOR XDWORD2, XDWORD3, XFER
|
||||
VMOVDQU XFER, (_XFER + 5*32)(SP)(SRND*1)
|
||||
DO_ROUND_N_1(_XFER + 4*32, 0, T56, a, b, c, d, e, f, g, h)
|
||||
DO_ROUND_N_1(_XFER + 4*32, 1, T57, h, a, b, c, d, e, f, g)
|
||||
DO_ROUND_N_1(_XFER + 4*32, 2, T58, g, h, a, b, c, d, e, f)
|
||||
DO_ROUND_N_1(_XFER + 4*32, 3, T59, f, g, h, a, b, c, d, e)
|
||||
VMOVDQU XFER, (_XFER + 29*32)(SP)
|
||||
DO_ROUND_N_1(_XFER + 28*32, 0, T56, a, b, c, d, e, f, g, h)
|
||||
DO_ROUND_N_1(_XFER + 28*32, 1, T57, h, a, b, c, d, e, f, g)
|
||||
DO_ROUND_N_1(_XFER + 28*32, 2, T58, g, h, a, b, c, d, e, f)
|
||||
DO_ROUND_N_1(_XFER + 28*32, 3, T59, f, g, h, a, b, c, d, e)
|
||||
|
||||
// Do 4 rounds
|
||||
VMOVDQU XDWORD3, (_XFER + 6*32)(SP)(SRND*1)
|
||||
VMOVDQU XDWORD3, (_XFER + 30*32)(SP)
|
||||
VPXOR XDWORD3, XDWORD0, XFER
|
||||
VMOVDQU XFER, (_XFER + 7*32)(SP)(SRND*1)
|
||||
DO_ROUND_N_1(_XFER + 6*32, 0, T60, e, f, g, h, a, b, c, d)
|
||||
DO_ROUND_N_1(_XFER + 6*32, 1, T61, d, e, f, g, h, a, b, c)
|
||||
DO_ROUND_N_1(_XFER + 6*32, 2, T62, c, d, e, f, g, h, a, b)
|
||||
DO_ROUND_N_1(_XFER + 6*32, 3, T63, b, c, d, e, f, g, h, a)
|
||||
VMOVDQU XFER, (_XFER + 31*32)(SP)
|
||||
DO_ROUND_N_1(_XFER + 30*32, 0, T60, e, f, g, h, a, b, c, d)
|
||||
DO_ROUND_N_1(_XFER + 30*32, 1, T61, d, e, f, g, h, a, b, c)
|
||||
DO_ROUND_N_1(_XFER + 30*32, 2, T62, c, d, e, f, g, h, a, b)
|
||||
DO_ROUND_N_1(_XFER + 30*32, 3, T63, b, c, d, e, f, g, h, a)
|
||||
|
||||
MOVQ dig+0(FP), CTX // d.h[8]
|
||||
MOVQ _INP(SP), INP
|
||||
|
||||
xorm( 0(CTX), a)
|
||||
@ -642,8 +606,6 @@ avx2_schedule_compress: // for w0 - w47
|
||||
CMPQ _INP_END(SP), INP
|
||||
JB done_hash
|
||||
|
||||
XORQ SRND, SRND
|
||||
|
||||
avx2_compress: // Do second block using previously scheduled results
|
||||
DO_ROUND_N_0(_XFER + 0*32 + 16, 0, T0, a, b, c, d, e, f, g, h)
|
||||
DO_ROUND_N_0(_XFER + 0*32 + 16, 1, T1, h, a, b, c, d, e, f, g)
|
||||
@ -665,73 +627,66 @@ avx2_compress: // Do second block using previously scheduled results
|
||||
DO_ROUND_N_0(_XFER + 6*32 + 16, 2, T14, c, d, e, f, g, h, a, b)
|
||||
DO_ROUND_N_0(_XFER + 6*32 + 16, 3, T15, b, c, d, e, f, g, h, a)
|
||||
|
||||
ADDQ $8*32, SRND
|
||||
DO_ROUND_N_1(_XFER + 8*32 + 16, 0, T16, a, b, c, d, e, f, g, h)
|
||||
DO_ROUND_N_1(_XFER + 8*32 + 16, 1, T17, h, a, b, c, d, e, f, g)
|
||||
DO_ROUND_N_1(_XFER + 8*32 + 16, 2, T18, g, h, a, b, c, d, e, f)
|
||||
DO_ROUND_N_1(_XFER + 8*32 + 16, 3, T19, f, g, h, a, b, c, d, e)
|
||||
|
||||
DO_ROUND_N_1(_XFER + 0*32 + 16, 0, T16, a, b, c, d, e, f, g, h)
|
||||
DO_ROUND_N_1(_XFER + 0*32 + 16, 1, T17, h, a, b, c, d, e, f, g)
|
||||
DO_ROUND_N_1(_XFER + 0*32 + 16, 2, T18, g, h, a, b, c, d, e, f)
|
||||
DO_ROUND_N_1(_XFER + 0*32 + 16, 3, T19, f, g, h, a, b, c, d, e)
|
||||
DO_ROUND_N_1(_XFER + 10*32 + 16, 0, T20, e, f, g, h, a, b, c, d)
|
||||
DO_ROUND_N_1(_XFER + 10*32 + 16, 1, T21, d, e, f, g, h, a, b, c)
|
||||
DO_ROUND_N_1(_XFER + 10*32 + 16, 2, T22, c, d, e, f, g, h, a, b)
|
||||
DO_ROUND_N_1(_XFER + 10*32 + 16, 3, T23, b, c, d, e, f, g, h, a)
|
||||
|
||||
DO_ROUND_N_1(_XFER + 2*32 + 16, 0, T20, e, f, g, h, a, b, c, d)
|
||||
DO_ROUND_N_1(_XFER + 2*32 + 16, 1, T21, d, e, f, g, h, a, b, c)
|
||||
DO_ROUND_N_1(_XFER + 2*32 + 16, 2, T22, c, d, e, f, g, h, a, b)
|
||||
DO_ROUND_N_1(_XFER + 2*32 + 16, 3, T23, b, c, d, e, f, g, h, a)
|
||||
DO_ROUND_N_1(_XFER + 12*32 + 16, 0, T24, a, b, c, d, e, f, g, h)
|
||||
DO_ROUND_N_1(_XFER + 12*32 + 16, 1, T25, h, a, b, c, d, e, f, g)
|
||||
DO_ROUND_N_1(_XFER + 12*32 + 16, 2, T26, g, h, a, b, c, d, e, f)
|
||||
DO_ROUND_N_1(_XFER + 12*32 + 16, 3, T27, f, g, h, a, b, c, d, e)
|
||||
|
||||
DO_ROUND_N_1(_XFER + 4*32 + 16, 0, T24, a, b, c, d, e, f, g, h)
|
||||
DO_ROUND_N_1(_XFER + 4*32 + 16, 1, T25, h, a, b, c, d, e, f, g)
|
||||
DO_ROUND_N_1(_XFER + 4*32 + 16, 2, T26, g, h, a, b, c, d, e, f)
|
||||
DO_ROUND_N_1(_XFER + 4*32 + 16, 3, T27, f, g, h, a, b, c, d, e)
|
||||
DO_ROUND_N_1(_XFER + 14*32 + 16, 0, T28, e, f, g, h, a, b, c, d)
|
||||
DO_ROUND_N_1(_XFER + 14*32 + 16, 1, T29, d, e, f, g, h, a, b, c)
|
||||
DO_ROUND_N_1(_XFER + 14*32 + 16, 2, T30, c, d, e, f, g, h, a, b)
|
||||
DO_ROUND_N_1(_XFER + 14*32 + 16, 3, T31, b, c, d, e, f, g, h, a)
|
||||
|
||||
DO_ROUND_N_1(_XFER + 6*32 + 16, 0, T28, e, f, g, h, a, b, c, d)
|
||||
DO_ROUND_N_1(_XFER + 6*32 + 16, 1, T29, d, e, f, g, h, a, b, c)
|
||||
DO_ROUND_N_1(_XFER + 6*32 + 16, 2, T30, c, d, e, f, g, h, a, b)
|
||||
DO_ROUND_N_1(_XFER + 6*32 + 16, 3, T31, b, c, d, e, f, g, h, a)
|
||||
DO_ROUND_N_1(_XFER + 16*32 + 16, 0, T32, a, b, c, d, e, f, g, h)
|
||||
DO_ROUND_N_1(_XFER + 16*32 + 16, 1, T33, h, a, b, c, d, e, f, g)
|
||||
DO_ROUND_N_1(_XFER + 16*32 + 16, 2, T34, g, h, a, b, c, d, e, f)
|
||||
DO_ROUND_N_1(_XFER + 16*32 + 16, 3, T35, f, g, h, a, b, c, d, e)
|
||||
|
||||
ADDQ $8*32, SRND
|
||||
DO_ROUND_N_1(_XFER + 18*32 + 16, 0, T36, e, f, g, h, a, b, c, d)
|
||||
DO_ROUND_N_1(_XFER + 18*32 + 16, 1, T37, d, e, f, g, h, a, b, c)
|
||||
DO_ROUND_N_1(_XFER + 18*32 + 16, 2, T38, c, d, e, f, g, h, a, b)
|
||||
DO_ROUND_N_1(_XFER + 18*32 + 16, 3, T39, b, c, d, e, f, g, h, a)
|
||||
|
||||
DO_ROUND_N_1(_XFER + 0*32 + 16, 0, T32, a, b, c, d, e, f, g, h)
|
||||
DO_ROUND_N_1(_XFER + 0*32 + 16, 1, T33, h, a, b, c, d, e, f, g)
|
||||
DO_ROUND_N_1(_XFER + 0*32 + 16, 2, T34, g, h, a, b, c, d, e, f)
|
||||
DO_ROUND_N_1(_XFER + 0*32 + 16, 3, T35, f, g, h, a, b, c, d, e)
|
||||
DO_ROUND_N_1(_XFER + 20*32 + 16, 0, T40, a, b, c, d, e, f, g, h)
|
||||
DO_ROUND_N_1(_XFER + 20*32 + 16, 1, T41, h, a, b, c, d, e, f, g)
|
||||
DO_ROUND_N_1(_XFER + 20*32 + 16, 2, T42, g, h, a, b, c, d, e, f)
|
||||
DO_ROUND_N_1(_XFER + 20*32 + 16, 3, T43, f, g, h, a, b, c, d, e)
|
||||
|
||||
DO_ROUND_N_1(_XFER + 2*32 + 16, 0, T36, e, f, g, h, a, b, c, d)
|
||||
DO_ROUND_N_1(_XFER + 2*32 + 16, 1, T37, d, e, f, g, h, a, b, c)
|
||||
DO_ROUND_N_1(_XFER + 2*32 + 16, 2, T38, c, d, e, f, g, h, a, b)
|
||||
DO_ROUND_N_1(_XFER + 2*32 + 16, 3, T39, b, c, d, e, f, g, h, a)
|
||||
DO_ROUND_N_1(_XFER + 22*32 + 16, 0, T44, e, f, g, h, a, b, c, d)
|
||||
DO_ROUND_N_1(_XFER + 22*32 + 16, 1, T45, d, e, f, g, h, a, b, c)
|
||||
DO_ROUND_N_1(_XFER + 22*32 + 16, 2, T46, c, d, e, f, g, h, a, b)
|
||||
DO_ROUND_N_1(_XFER + 22*32 + 16, 3, T47, b, c, d, e, f, g, h, a)
|
||||
|
||||
DO_ROUND_N_1(_XFER + 4*32 + 16, 0, T40, a, b, c, d, e, f, g, h)
|
||||
DO_ROUND_N_1(_XFER + 4*32 + 16, 1, T41, h, a, b, c, d, e, f, g)
|
||||
DO_ROUND_N_1(_XFER + 4*32 + 16, 2, T42, g, h, a, b, c, d, e, f)
|
||||
DO_ROUND_N_1(_XFER + 4*32 + 16, 3, T43, f, g, h, a, b, c, d, e)
|
||||
DO_ROUND_N_1(_XFER + 24*32 + 16, 0, T48, a, b, c, d, e, f, g, h)
|
||||
DO_ROUND_N_1(_XFER + 24*32 + 16, 1, T49, h, a, b, c, d, e, f, g)
|
||||
DO_ROUND_N_1(_XFER + 24*32 + 16, 2, T50, g, h, a, b, c, d, e, f)
|
||||
DO_ROUND_N_1(_XFER + 24*32 + 16, 3, T51, f, g, h, a, b, c, d, e)
|
||||
|
||||
DO_ROUND_N_1(_XFER + 6*32 + 16, 0, T44, e, f, g, h, a, b, c, d)
|
||||
DO_ROUND_N_1(_XFER + 6*32 + 16, 1, T45, d, e, f, g, h, a, b, c)
|
||||
DO_ROUND_N_1(_XFER + 6*32 + 16, 2, T46, c, d, e, f, g, h, a, b)
|
||||
DO_ROUND_N_1(_XFER + 6*32 + 16, 3, T47, b, c, d, e, f, g, h, a)
|
||||
DO_ROUND_N_1(_XFER + 26*32 + 16, 0, T52, e, f, g, h, a, b, c, d)
|
||||
DO_ROUND_N_1(_XFER + 26*32 + 16, 1, T53, d, e, f, g, h, a, b, c)
|
||||
DO_ROUND_N_1(_XFER + 26*32 + 16, 2, T54, c, d, e, f, g, h, a, b)
|
||||
DO_ROUND_N_1(_XFER + 26*32 + 16, 3, T55, b, c, d, e, f, g, h, a)
|
||||
|
||||
ADDQ $8*32, SRND
|
||||
DO_ROUND_N_1(_XFER + 28*32 + 16, 0, T56, a, b, c, d, e, f, g, h)
|
||||
DO_ROUND_N_1(_XFER + 28*32 + 16, 1, T57, h, a, b, c, d, e, f, g)
|
||||
DO_ROUND_N_1(_XFER + 28*32 + 16, 2, T58, g, h, a, b, c, d, e, f)
|
||||
DO_ROUND_N_1(_XFER + 28*32 + 16, 3, T59, f, g, h, a, b, c, d, e)
|
||||
|
||||
DO_ROUND_N_1(_XFER + 0*32 + 16, 0, T48, a, b, c, d, e, f, g, h)
|
||||
DO_ROUND_N_1(_XFER + 0*32 + 16, 1, T49, h, a, b, c, d, e, f, g)
|
||||
DO_ROUND_N_1(_XFER + 0*32 + 16, 2, T50, g, h, a, b, c, d, e, f)
|
||||
DO_ROUND_N_1(_XFER + 0*32 + 16, 3, T51, f, g, h, a, b, c, d, e)
|
||||
DO_ROUND_N_1(_XFER + 30*32 + 16, 0, T60, e, f, g, h, a, b, c, d)
|
||||
DO_ROUND_N_1(_XFER + 30*32 + 16, 1, T61, d, e, f, g, h, a, b, c)
|
||||
DO_ROUND_N_1(_XFER + 30*32 + 16, 2, T62, c, d, e, f, g, h, a, b)
|
||||
DO_ROUND_N_1(_XFER + 30*32 + 16, 3, T63, b, c, d, e, f, g, h, a)
|
||||
|
||||
DO_ROUND_N_1(_XFER + 2*32 + 16, 0, T52, e, f, g, h, a, b, c, d)
|
||||
DO_ROUND_N_1(_XFER + 2*32 + 16, 1, T53, d, e, f, g, h, a, b, c)
|
||||
DO_ROUND_N_1(_XFER + 2*32 + 16, 2, T54, c, d, e, f, g, h, a, b)
|
||||
DO_ROUND_N_1(_XFER + 2*32 + 16, 3, T55, b, c, d, e, f, g, h, a)
|
||||
|
||||
DO_ROUND_N_1(_XFER + 4*32 + 16, 0, T56, a, b, c, d, e, f, g, h)
|
||||
DO_ROUND_N_1(_XFER + 4*32 + 16, 1, T57, h, a, b, c, d, e, f, g)
|
||||
DO_ROUND_N_1(_XFER + 4*32 + 16, 2, T58, g, h, a, b, c, d, e, f)
|
||||
DO_ROUND_N_1(_XFER + 4*32 + 16, 3, T59, f, g, h, a, b, c, d, e)
|
||||
|
||||
DO_ROUND_N_1(_XFER + 6*32 + 16, 0, T60, e, f, g, h, a, b, c, d)
|
||||
DO_ROUND_N_1(_XFER + 6*32 + 16, 1, T61, d, e, f, g, h, a, b, c)
|
||||
DO_ROUND_N_1(_XFER + 6*32 + 16, 2, T62, c, d, e, f, g, h, a, b)
|
||||
DO_ROUND_N_1(_XFER + 6*32 + 16, 3, T63, b, c, d, e, f, g, h, a)
|
||||
|
||||
MOVQ dig+0(FP), CTX // d.h[8]
|
||||
MOVQ _INP(SP), INP
|
||||
ADDQ $64, INP
|
||||
|
||||
|
@ -696,8 +696,7 @@ sse_schedule_compress: // for w0 - w47
|
||||
xorm( 28(CTX), h)
|
||||
|
||||
CMPQ _INP_END(SP), INP
|
||||
JB sse_done_hash
|
||||
JMP sse_loop
|
||||
JAE sse_loop
|
||||
|
||||
sse_done_hash:
|
||||
RET
|
||||
@ -881,8 +880,7 @@ avx_schedule_compress: // for w0 - w47
|
||||
xorm( 28(CTX), h)
|
||||
|
||||
CMPQ _INP_END(SP), INP
|
||||
JB done_hash
|
||||
JMP avx_loop
|
||||
JAE avx_loop
|
||||
|
||||
done_hash:
|
||||
RET
|
||||
|
Loading…
x
Reference in New Issue
Block a user