sm3: arm64, do not use local storage #162

This commit is contained in:
Sun Yimin 2023-09-18 08:21:32 +08:00 committed by GitHub
parent 77893f07f4
commit d56b3d1ed0
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -14,6 +14,8 @@
#define XTMP3 V7 #define XTMP3 V7
#define XTMP4 V8 #define XTMP4 V8
#define Wt V9
#define a R0 #define a R0
#define b R1 #define b R1
#define c R2 #define c R2
@ -42,18 +44,18 @@
#define h1 R24 #define h1 R24
// For rounds [0 - 16) // For rounds [0 - 16)
#define ROUND_AND_SCHED_N_0_0(disp, const, a, b, c, d, e, f, g, h, XWORD0, XWORD1, XWORD2, XWORD3) \ #define ROUND_AND_SCHED_N_0_0(disp, const, a, b, c, d, e, f, g, h, XWORD0, XWORD1, XWORD2, XWORD3, Wt) \
RORW $20, a, y0; \ // y0 = a <<< 12 RORW $20, a, y0; \ // y0 = a <<< 12
ADDW $const, e, y1; \ ADDW $const, e, y1; \
ADDW y0, y1; \ // y1 = a <<< 12 + e + T ADDW y0, y1; \ // y1 = a <<< 12 + e + T
VEXT $12, XWORD1.B16, XWORD0.B16, XTMP0.B16; \ // XTMP0 = W[-13] = {w6,w5,w4,w3}, Vm = XWORD1, Vn = XWORD0 VEXT $12, XWORD1.B16, XWORD0.B16, XTMP0.B16; \ // XTMP0 = W[-13] = {w6,w5,w4,w3}, Vm = XWORD1, Vn = XWORD0
RORW $25, y1, y2; \ // y2 = SS1 RORW $25, y1, y2; \ // y2 = SS1
EORW y2, y0; \ // y0 = SS2 EORW y2, y0; \ // y0 = SS2
MOVW (disp + 0*4)(RSP), y1; \ VMOV XWORD0.S[0], y1; \
VSHL $7, XTMP0.S4, XTMP1.S4; \ VSHL $7, XTMP0.S4, XTMP1.S4; \
ADDW y1, y2; \ // y2 = SS1 + W ADDW y1, y2; \ // y2 = SS1 + W
ADDW h, y2; \ // y2 = h + SS1 + W ADDW h, y2; \ // y2 = h + SS1 + W
MOVW (disp + 16 + 0*4)(RSP), y1; \ VMOV Wt.S[0], y1; \
VSRI $25, XTMP0.S4, XTMP1.S4; \ // XTMP1 = W[-13] rol 7 VSRI $25, XTMP0.S4, XTMP1.S4; \ // XTMP1 = W[-13] rol 7
ADDW y1, y0; \ // y0 = SS2 + W' ADDW y1, y0; \ // y0 = SS2 + W'
ADDW d, y0; \ // y0 = d + SS2 + W' ADDW d, y0; \ // y0 = d + SS2 + W'
@ -79,18 +81,18 @@
EORW y2, d; \ // d = P(tt2) EORW y2, d; \ // d = P(tt2)
VEXT $4, XWORD2.B16, XWORD3.B16, XTMP3.B16; \ // XTMP3 = W[-3] {w11,w15,w14,w13} VEXT $4, XWORD2.B16, XWORD3.B16, XTMP3.B16; \ // XTMP3 = W[-3] {w11,w15,w14,w13}
#define ROUND_AND_SCHED_N_0_1(disp, const, a, b, c, d, e, f, g, h, XWORD0, XWORD1, XWORD2, XWORD3) \ #define ROUND_AND_SCHED_N_0_1(disp, const, a, b, c, d, e, f, g, h, XWORD0, XWORD1, XWORD2, XWORD3, Wt) \
RORW $20, a, y0; \ // y0 = a <<< 12 RORW $20, a, y0; \ // y0 = a <<< 12
ADDW $const, e, y1; \ ADDW $const, e, y1; \
ADDW y0, y1; \ // y1 = a <<< 12 + e + T ADDW y0, y1; \ // y1 = a <<< 12 + e + T
VSHL $15, XTMP3.S4, XTMP2.S4; \ VSHL $15, XTMP3.S4, XTMP2.S4; \
RORW $25, y1, y2; \ // y2 = SS1 RORW $25, y1, y2; \ // y2 = SS1
EORW y2, y0; \ // y0 = SS2 EORW y2, y0; \ // y0 = SS2
MOVW (disp + 1*4)(RSP), y1; \ VMOV XWORD0.S[1], y1; \
VSRI $17, XTMP3.S4, XTMP2.S4; \ // XTMP2 = W[-3] rol 15 {xxBA} VSRI $17, XTMP3.S4, XTMP2.S4; \ // XTMP2 = W[-3] rol 15 {xxBA}
ADDW y1, y2; \ // y2 = SS1 + W ADDW y1, y2; \ // y2 = SS1 + W
ADDW h, y2; \ // y2 = h + SS1 + W ADDW h, y2; \ // y2 = h + SS1 + W
MOVW (disp + 16 + 1*4)(RSP), y1; \ VMOV Wt.S[1], y1; \
VEOR XTMP1.B16, XTMP2.B16, XTMP2.B16; \ // XTMP2 = W[-9] ^ W[-16] ^ (W[-3] rol 15) {xxBA} VEOR XTMP1.B16, XTMP2.B16, XTMP2.B16; \ // XTMP2 = W[-9] ^ W[-16] ^ (W[-3] rol 15) {xxBA}
ADDW y1, y0; \ // y0 = SS2 + W' ADDW y1, y0; \ // y0 = SS2 + W'
ADDW d, y0; \ // y0 = d + SS2 + W' ADDW d, y0; \ // y0 = d + SS2 + W'
@ -116,18 +118,18 @@
EORW y2, d; \ // d = P(tt2) EORW y2, d; \ // d = P(tt2)
VEOR XTMP2.B16, XTMP4.B16, XTMP4.B16; \ // XTMP4 = XTMP2 XOR (XTMP2 rol 15 {xxBA}) VEOR XTMP2.B16, XTMP4.B16, XTMP4.B16; \ // XTMP4 = XTMP2 XOR (XTMP2 rol 15 {xxBA})
#define ROUND_AND_SCHED_N_0_2(disp, const, a, b, c, d, e, f, g, h, XWORD0, XWORD1, XWORD2, XWORD3) \ #define ROUND_AND_SCHED_N_0_2(disp, const, a, b, c, d, e, f, g, h, XWORD0, XWORD1, XWORD2, XWORD3, Wt) \
RORW $20, a, y0; \ // y0 = a <<< 12 RORW $20, a, y0; \ // y0 = a <<< 12
ADDW $const, e, y1; \ ADDW $const, e, y1; \
ADDW y0, y1; \ // y1 = a <<< 12 + e + T ADDW y0, y1; \ // y1 = a <<< 12 + e + T
VEOR XTMP4.B16, XTMP3.B16, XTMP4.B16; \ // XTMP4 = XTMP2 XOR (XTMP2 rol 15 {xxBA}) XOR (XTMP2 rol 23 {xxBA}) VEOR XTMP4.B16, XTMP3.B16, XTMP4.B16; \ // XTMP4 = XTMP2 XOR (XTMP2 rol 15 {xxBA}) XOR (XTMP2 rol 23 {xxBA})
RORW $25, y1, y2; \ // y2 = SS1 RORW $25, y1, y2; \ // y2 = SS1
EORW y2, y0; \ // y0 = SS2 EORW y2, y0; \ // y0 = SS2
MOVW (disp + 2*4)(RSP), y1; \ VMOV XWORD0.S[2], y1; \
VEOR XTMP4.B16, XTMP0.B16, XTMP2.B16; \ // XTMP2 = {..., ..., W[1], W[0]} VEOR XTMP4.B16, XTMP0.B16, XTMP2.B16; \ // XTMP2 = {..., ..., W[1], W[0]}
ADDW y1, y2; \ // y2 = SS1 + W ADDW y1, y2; \ // y2 = SS1 + W
ADDW h, y2; \ // y2 = h + SS1 + W ADDW h, y2; \ // y2 = h + SS1 + W
MOVW (disp + 16 + 2*4)(RSP), y1; \ VMOV Wt.S[2], y1; \
VEXT $4, XTMP2.B16, XWORD3.B16, XTMP3.B16; \ // XTMP3 = W[-3] {W[0],w15, w14, w13}, Vm = XTMP2, Vn = XWORD3 VEXT $4, XTMP2.B16, XWORD3.B16, XTMP3.B16; \ // XTMP3 = W[-3] {W[0],w15, w14, w13}, Vm = XTMP2, Vn = XWORD3
ADDW y1, y0; \ // y0 = SS2 + W' ADDW y1, y0; \ // y0 = SS2 + W'
ADDW d, y0; \ // y0 = d + SS2 + W' ADDW d, y0; \ // y0 = d + SS2 + W'
@ -153,18 +155,18 @@
EORW y2, d; \ // d = P(tt2) EORW y2, d; \ // d = P(tt2)
VSRI $17, XTMP4.S4, XTMP3.S4; \ // XTMP3 = XTMP4 rol 15 {DCxx} VSRI $17, XTMP4.S4, XTMP3.S4; \ // XTMP3 = XTMP4 rol 15 {DCxx}
#define ROUND_AND_SCHED_N_0_3(disp, const, a, b, c, d, e, f, g, h, XWORD0, XWORD1, XWORD2, XWORD3) \ #define ROUND_AND_SCHED_N_0_3(disp, const, a, b, c, d, e, f, g, h, XWORD0, XWORD1, XWORD2, XWORD3, Wt) \
RORW $20, a, y0; \ // y0 = a <<< 12 RORW $20, a, y0; \ // y0 = a <<< 12
ADDW $const, e, y1; \ ADDW $const, e, y1; \
ADDW y0, y1; \ // y1 = a <<< 12 + e + T ADDW y0, y1; \ // y1 = a <<< 12 + e + T
RORW $25, y1, y2; \ // y2 = SS1 RORW $25, y1, y2; \ // y2 = SS1
VSHL $8, XTMP3.S4, XTMP1.S4; \ VSHL $8, XTMP3.S4, XTMP1.S4; \
EORW y2, y0; \ // y0 = SS2 EORW y2, y0; \ // y0 = SS2
MOVW (disp + 3*4)(RSP), y1; \ VMOV XWORD0.S[3], y1; \
ADDW y1, y2; \ // y2 = SS1 + W ADDW y1, y2; \ // y2 = SS1 + W
ADDW h, y2; \ // y2 = h + SS1 + W ADDW h, y2; \ // y2 = h + SS1 + W
VSRI $24, XTMP3.S4, XTMP1.S4; \ // XTMP1 = XTMP4 rol 23 {DCxx} VSRI $24, XTMP3.S4, XTMP1.S4; \ // XTMP1 = XTMP4 rol 23 {DCxx}
MOVW (disp + 16 + 3*4)(RSP), y1; \ VMOV Wt.S[3], y1; \
ADDW y1, y0; \ // y0 = SS2 + W' ADDW y1, y0; \ // y0 = SS2 + W'
ADDW d, y0; \ // y0 = d + SS2 + W' ADDW d, y0; \ // y0 = d + SS2 + W'
; \ ; \
@ -190,18 +192,18 @@
VEXT $8, XTMP3.B16, XTMP3.B16, XWORD0.B16; \ // XWORD0 = {W[3], W[2], W[1], W[0]} VEXT $8, XTMP3.B16, XTMP3.B16, XWORD0.B16; \ // XWORD0 = {W[3], W[2], W[1], W[0]}
// For rounds [16 - 64) // For rounds [16 - 64)
#define ROUND_AND_SCHED_N_1_0(disp, const, a, b, c, d, e, f, g, h, XWORD0, XWORD1, XWORD2, XWORD3) \ #define ROUND_AND_SCHED_N_1_0(disp, const, a, b, c, d, e, f, g, h, XWORD0, XWORD1, XWORD2, XWORD3, Wt) \
RORW $20, a, y0; \ // y0 = a <<< 12 RORW $20, a, y0; \ // y0 = a <<< 12
ADDW $const, e, y1; \ ADDW $const, e, y1; \
ADDW y0, y1; \ // y1 = a <<< 12 + e + T ADDW y0, y1; \ // y1 = a <<< 12 + e + T
VEXT $12, XWORD1.B16, XWORD0.B16, XTMP0.B16; \ // XTMP0 = W[-13] = {w6,w5,w4,w3}, Vm = XWORD1, Vn = XWORD0 VEXT $12, XWORD1.B16, XWORD0.B16, XTMP0.B16; \ // XTMP0 = W[-13] = {w6,w5,w4,w3}, Vm = XWORD1, Vn = XWORD0
RORW $25, y1, y2; \ // y2 = SS1 RORW $25, y1, y2; \ // y2 = SS1
EORW y2, y0; \ // y0 = SS2 EORW y2, y0; \ // y0 = SS2
MOVW (disp + 0*4)(RSP), y1; \ VMOV XWORD0.S[0], y1; \
VSHL $7, XTMP0.S4, XTMP1.S4; \ VSHL $7, XTMP0.S4, XTMP1.S4; \
ADDW y1, y2; \ // y2 = SS1 + W ADDW y1, y2; \ // y2 = SS1 + W
ADDW h, y2; \ // y2 = h + SS1 + W ADDW h, y2; \ // y2 = h + SS1 + W
MOVW (disp + 16 + 0*4)(RSP), y1; \ VMOV Wt.S[0], y1; \
VSRI $25, XTMP0.S4, XTMP1.S4; \ // XTMP1 = W[-13] rol 7 VSRI $25, XTMP0.S4, XTMP1.S4; \ // XTMP1 = W[-13] rol 7
ADDW y1, y0; \ // y0 = SS2 + W' ADDW y1, y0; \ // y0 = SS2 + W'
ADDW d, y0; \ // y0 = d + SS2 + W' ADDW d, y0; \ // y0 = d + SS2 + W'
@ -230,18 +232,18 @@
EORW y2, d; \ // d = P(tt2) EORW y2, d; \ // d = P(tt2)
VEXT $4, XWORD2.B16, XWORD3.B16, XTMP3.B16; \ // XTMP3 = W[-3] {w11,w15,w14,w13} VEXT $4, XWORD2.B16, XWORD3.B16, XTMP3.B16; \ // XTMP3 = W[-3] {w11,w15,w14,w13}
#define ROUND_AND_SCHED_N_1_1(disp, const, a, b, c, d, e, f, g, h, XWORD0, XWORD1, XWORD2, XWORD3) \ #define ROUND_AND_SCHED_N_1_1(disp, const, a, b, c, d, e, f, g, h, XWORD0, XWORD1, XWORD2, XWORD3, Wt) \
RORW $20, a, y0; \ // y0 = a <<< 12 RORW $20, a, y0; \ // y0 = a <<< 12
ADDW $const, e, y1; \ ADDW $const, e, y1; \
ADDW y0, y1; \ // y1 = a <<< 12 + e + T ADDW y0, y1; \ // y1 = a <<< 12 + e + T
VSHL $15, XTMP3.S4, XTMP2.S4; \ VSHL $15, XTMP3.S4, XTMP2.S4; \
RORW $25, y1, y2; \ // y2 = SS1 RORW $25, y1, y2; \ // y2 = SS1
EORW y2, y0; \ // y0 = SS2 EORW y2, y0; \ // y0 = SS2
MOVW (disp + 1*4)(RSP), y1; \ VMOV XWORD0.S[1], y1; \
VSRI $17, XTMP3.S4, XTMP2.S4; \ // XTMP2 = W[-3] rol 15 {xxBA} VSRI $17, XTMP3.S4, XTMP2.S4; \ // XTMP2 = W[-3] rol 15 {xxBA}
ADDW y1, y2; \ // y2 = SS1 + W ADDW y1, y2; \ // y2 = SS1 + W
ADDW h, y2; \ // y2 = h + SS1 + W ADDW h, y2; \ // y2 = h + SS1 + W
MOVW (disp + 16 + 1*4)(RSP), y1; \ VMOV Wt.S[1], y1; \
VEOR XTMP1.B16, XTMP2.B16, XTMP2.B16; \ // XTMP2 = W[-9] ^ W[-16] ^ (W[-3] rol 15) {xxBA} VEOR XTMP1.B16, XTMP2.B16, XTMP2.B16; \ // XTMP2 = W[-9] ^ W[-16] ^ (W[-3] rol 15) {xxBA}
ADDW y1, y0; \ // y0 = SS2 + W' ADDW y1, y0; \ // y0 = SS2 + W'
ADDW d, y0; \ // y0 = d + SS2 + W' ADDW d, y0; \ // y0 = d + SS2 + W'
@ -270,18 +272,18 @@
EORW y2, d; \ // d = P(tt2) EORW y2, d; \ // d = P(tt2)
VEOR XTMP2.B16, XTMP4.B16, XTMP4.B16; \ // XTMP4 = XTMP2 XOR (XTMP2 rol 15 {xxBA}) VEOR XTMP2.B16, XTMP4.B16, XTMP4.B16; \ // XTMP4 = XTMP2 XOR (XTMP2 rol 15 {xxBA})
#define ROUND_AND_SCHED_N_1_2(disp, const, a, b, c, d, e, f, g, h, XWORD0, XWORD1, XWORD2, XWORD3) \ #define ROUND_AND_SCHED_N_1_2(disp, const, a, b, c, d, e, f, g, h, XWORD0, XWORD1, XWORD2, XWORD3, Wt) \
RORW $20, a, y0; \ // y0 = a <<< 12 RORW $20, a, y0; \ // y0 = a <<< 12
ADDW $const, e, y1; \ ADDW $const, e, y1; \
ADDW y0, y1; \ // y1 = a <<< 12 + e + T ADDW y0, y1; \ // y1 = a <<< 12 + e + T
VEOR XTMP4.B16, XTMP3.B16, XTMP4.B16; \ // XTMP4 = XTMP2 XOR (XTMP2 rol 15 {xxBA}) XOR (XTMP2 rol 23 {xxBA}) VEOR XTMP4.B16, XTMP3.B16, XTMP4.B16; \ // XTMP4 = XTMP2 XOR (XTMP2 rol 15 {xxBA}) XOR (XTMP2 rol 23 {xxBA})
RORW $25, y1, y2; \ // y2 = SS1 RORW $25, y1, y2; \ // y2 = SS1
EORW y2, y0; \ // y0 = SS2 EORW y2, y0; \ // y0 = SS2
MOVW (disp + 2*4)(RSP), y1; \ VMOV XWORD0.S[2], y1; \
VEOR XTMP4.B16, XTMP0.B16, XTMP2.B16; \ // XTMP2 = {..., ..., W[1], W[0]} VEOR XTMP4.B16, XTMP0.B16, XTMP2.B16; \ // XTMP2 = {..., ..., W[1], W[0]}
ADDW y1, y2; \ // y2 = SS1 + W ADDW y1, y2; \ // y2 = SS1 + W
ADDW h, y2; \ // y2 = h + SS1 + W ADDW h, y2; \ // y2 = h + SS1 + W
MOVW (disp + 16 + 2*4)(RSP), y1; \ VMOV Wt.S[2], y1; \
VEXT $4, XTMP2.B16, XWORD3.B16, XTMP3.B16; \ // XTMP3 = W[-3] {W[0],w15, w14, w13}, Vm = XTMP2, Vn = XWORD3 VEXT $4, XTMP2.B16, XWORD3.B16, XTMP3.B16; \ // XTMP3 = W[-3] {W[0],w15, w14, w13}, Vm = XTMP2, Vn = XWORD3
ADDW y1, y0; \ // y0 = SS2 + W' ADDW y1, y0; \ // y0 = SS2 + W'
ADDW d, y0; \ // y0 = d + SS2 + W' ADDW d, y0; \ // y0 = d + SS2 + W'
@ -310,18 +312,18 @@
EORW y2, d; \ // d = P(tt2) EORW y2, d; \ // d = P(tt2)
VSRI $17, XTMP4.S4, XTMP3.S4; \ // XTMP3 = XTMP4 rol 15 {DCxx} VSRI $17, XTMP4.S4, XTMP3.S4; \ // XTMP3 = XTMP4 rol 15 {DCxx}
#define ROUND_AND_SCHED_N_1_3(disp, const, a, b, c, d, e, f, g, h, XWORD0, XWORD1, XWORD2, XWORD3) \ #define ROUND_AND_SCHED_N_1_3(disp, const, a, b, c, d, e, f, g, h, XWORD0, XWORD1, XWORD2, XWORD3, Wt) \
RORW $20, a, y0; \ // y0 = a <<< 12 RORW $20, a, y0; \ // y0 = a <<< 12
ADDW $const, e, y1; \ ADDW $const, e, y1; \
ADDW y0, y1; \ // y1 = a <<< 12 + e + T ADDW y0, y1; \ // y1 = a <<< 12 + e + T
RORW $25, y1, y2; \ // y2 = SS1 RORW $25, y1, y2; \ // y2 = SS1
VSHL $8, XTMP3.S4, XTMP1.S4; \ VSHL $8, XTMP3.S4, XTMP1.S4; \
EORW y2, y0; \ // y0 = SS2 EORW y2, y0; \ // y0 = SS2
MOVW (disp + 3*4)(RSP), y1; \ VMOV XWORD0.S[3], y1; \
ADDW y1, y2; \ // y2 = SS1 + W ADDW y1, y2; \ // y2 = SS1 + W
ADDW h, y2; \ // y2 = h + SS1 + W ADDW h, y2; \ // y2 = h + SS1 + W
VSRI $24, XTMP3.S4, XTMP1.S4; \ // XTMP1 = XTMP4 rol 23 {DCxx} VSRI $24, XTMP3.S4, XTMP1.S4; \ // XTMP1 = XTMP4 rol 23 {DCxx}
MOVW (disp + 16 + 3*4)(RSP), y1; \ VMOV Wt.S[3], y1; \
ADDW y1, y0; \ // y0 = SS2 + W' ADDW y1, y0; \ // y0 = SS2 + W'
ADDW d, y0; \ // y0 = d + SS2 + W' ADDW d, y0; \ // y0 = d + SS2 + W'
; \ ; \
@ -350,16 +352,16 @@
VEXT $8, XTMP3.B16, XTMP3.B16, XWORD0.B16; \ // XWORD0 = {W[3], W[2], W[1], W[0]} VEXT $8, XTMP3.B16, XTMP3.B16, XWORD0.B16; \ // XWORD0 = {W[3], W[2], W[1], W[0]}
// For rounds [16 - 64) // For rounds [16 - 64)
#define DO_ROUND_N_1(disp, idx, const, a, b, c, d, e, f, g, h) \ #define DO_ROUND_N_1(disp, idx, const, a, b, c, d, e, f, g, h, W, Wt) \
RORW $20, a, y0; \ // y0 = a <<< 12 RORW $20, a, y0; \ // y0 = a <<< 12
ADDW $const, e, y1; \ ADDW $const, e, y1; \
ADDW y0, y1; \ // y1 = a <<< 12 + e + T ADDW y0, y1; \ // y1 = a <<< 12 + e + T
RORW $25, y1, y2; \ // y2 = SS1 RORW $25, y1, y2; \ // y2 = SS1
EORW y2, y0; \ // y0 = SS2 EORW y2, y0; \ // y0 = SS2
MOVW (disp + idx*4)(RSP), y1; \ VMOV W.S[idx], y1; \
ADDW y1, y2; \ // y2 = SS1 + W ADDW y1, y2; \ // y2 = SS1 + W
ADDW h, y2; \ // y2 = h + SS1 + W ADDW h, y2; \ // y2 = h + SS1 + W
MOVW (disp + 16 + idx*4)(RSP), y1; \ VMOV Wt.S[idx], y1; \
ADDW y1, y0; \ // y0 = SS2 + W' ADDW y1, y0; \ // y0 = SS2 + W'
ADDW d, y0; \ // y0 = d + SS2 + W' ADDW d, y0; \ // y0 = d + SS2 + W'
; \ ; \
@ -383,7 +385,7 @@
EORW y2, d; \ // d = P(tt2) EORW y2, d; \ // d = P(tt2)
// func blockARM64(dig *digest, p []byte) // func blockARM64(dig *digest, p []byte)
TEXT ·blockARM64(SB), 0, $32-32 TEXT ·blockARM64(SB), NOSPLIT, $0
MOVD dig+0(FP), CTX MOVD dig+0(FP), CTX
MOVD p_base+8(FP), INP MOVD p_base+8(FP), INP
MOVD p_len+16(FP), NUM_BYTES MOVD p_len+16(FP), NUM_BYTES
@ -414,150 +416,118 @@ loop:
schedule_compress: // for w0 - w47 schedule_compress: // for w0 - w47
// Do 4 rounds and scheduling // Do 4 rounds and scheduling
VMOV XWORD0.B16, XTMP0.B16 VEOR XWORD0.B16, XWORD1.B16, Wt.B16
VEOR XWORD0.B16, XWORD1.B16, XTMP1.B16 ROUND_AND_SCHED_N_0_0(0*16, 0x79cc4519, a, b, c, d, e, f, g, h, XWORD0, XWORD1, XWORD2, XWORD3, Wt)
VST1 [XTMP0.B16, XTMP1.B16], (RSP) ROUND_AND_SCHED_N_0_1(0*16, 0xf3988a32, h, a, b, c, d, e, f, g, XWORD0, XWORD1, XWORD2, XWORD3, Wt)
ROUND_AND_SCHED_N_0_0(0*16, 0x79cc4519, a, b, c, d, e, f, g, h, XWORD0, XWORD1, XWORD2, XWORD3) ROUND_AND_SCHED_N_0_2(0*16, 0xe7311465, g, h, a, b, c, d, e, f, XWORD0, XWORD1, XWORD2, XWORD3, Wt)
ROUND_AND_SCHED_N_0_1(0*16, 0xf3988a32, h, a, b, c, d, e, f, g, XWORD0, XWORD1, XWORD2, XWORD3) ROUND_AND_SCHED_N_0_3(0*16, 0xce6228cb, f, g, h, a, b, c, d, e, XWORD0, XWORD1, XWORD2, XWORD3, Wt)
ROUND_AND_SCHED_N_0_2(0*16, 0xe7311465, g, h, a, b, c, d, e, f, XWORD0, XWORD1, XWORD2, XWORD3)
ROUND_AND_SCHED_N_0_3(0*16, 0xce6228cb, f, g, h, a, b, c, d, e, XWORD0, XWORD1, XWORD2, XWORD3)
// Do 4 rounds and scheduling // Do 4 rounds and scheduling
VMOV XWORD1.B16, XTMP0.B16 VEOR XWORD1.B16, XWORD2.B16, Wt.B16
VEOR XWORD1.B16, XWORD2.B16, XTMP1.B16 ROUND_AND_SCHED_N_0_0(0*16, 0x9cc45197, e, f, g, h, a, b, c, d, XWORD1, XWORD2, XWORD3, XWORD0, Wt)
VST1 [XTMP0.B16, XTMP1.B16], (RSP) ROUND_AND_SCHED_N_0_1(0*16, 0x3988a32f, d, e, f, g, h, a, b, c, XWORD1, XWORD2, XWORD3, XWORD0, Wt)
ROUND_AND_SCHED_N_0_0(0*16, 0x9cc45197, e, f, g, h, a, b, c, d, XWORD1, XWORD2, XWORD3, XWORD0) ROUND_AND_SCHED_N_0_2(0*16, 0x7311465e, c, d, e, f, g, h, a, b, XWORD1, XWORD2, XWORD3, XWORD0, Wt)
ROUND_AND_SCHED_N_0_1(0*16, 0x3988a32f, d, e, f, g, h, a, b, c, XWORD1, XWORD2, XWORD3, XWORD0) ROUND_AND_SCHED_N_0_3(0*16, 0xe6228cbc, b, c, d, e, f, g, h, a, XWORD1, XWORD2, XWORD3, XWORD0, Wt)
ROUND_AND_SCHED_N_0_2(0*16, 0x7311465e, c, d, e, f, g, h, a, b, XWORD1, XWORD2, XWORD3, XWORD0)
ROUND_AND_SCHED_N_0_3(0*16, 0xe6228cbc, b, c, d, e, f, g, h, a, XWORD1, XWORD2, XWORD3, XWORD0)
// Do 4 rounds and scheduling // Do 4 rounds and scheduling
VMOV XWORD2.B16, XTMP0.B16 VEOR XWORD2.B16, XWORD3.B16, Wt.B16
VEOR XWORD2.B16, XWORD3.B16, XTMP1.B16 ROUND_AND_SCHED_N_0_0(0*16, 0xcc451979, a, b, c, d, e, f, g, h, XWORD2, XWORD3, XWORD0, XWORD1, Wt)
VST1 [XTMP0.B16, XTMP1.B16], (RSP) ROUND_AND_SCHED_N_0_1(0*16, 0x988a32f3, h, a, b, c, d, e, f, g, XWORD2, XWORD3, XWORD0, XWORD1, Wt)
ROUND_AND_SCHED_N_0_0(0*16, 0xcc451979, a, b, c, d, e, f, g, h, XWORD2, XWORD3, XWORD0, XWORD1) ROUND_AND_SCHED_N_0_2(0*16, 0x311465e7, g, h, a, b, c, d, e, f, XWORD2, XWORD3, XWORD0, XWORD1, Wt)
ROUND_AND_SCHED_N_0_1(0*16, 0x988a32f3, h, a, b, c, d, e, f, g, XWORD2, XWORD3, XWORD0, XWORD1) ROUND_AND_SCHED_N_0_3(0*16, 0x6228cbce, f, g, h, a, b, c, d, e, XWORD2, XWORD3, XWORD0, XWORD1, Wt)
ROUND_AND_SCHED_N_0_2(0*16, 0x311465e7, g, h, a, b, c, d, e, f, XWORD2, XWORD3, XWORD0, XWORD1)
ROUND_AND_SCHED_N_0_3(0*16, 0x6228cbce, f, g, h, a, b, c, d, e, XWORD2, XWORD3, XWORD0, XWORD1)
// Do 4 rounds and scheduling // Do 4 rounds and scheduling
VMOV XWORD3.B16, XTMP0.B16 VEOR XWORD3.B16, XWORD0.B16, Wt.B16
VEOR XWORD3.B16, XWORD0.B16, XTMP1.B16 ROUND_AND_SCHED_N_0_0(0*16, 0xc451979c, e, f, g, h, a, b, c, d, XWORD3, XWORD0, XWORD1, XWORD2, Wt)
VST1 [XTMP0.B16, XTMP1.B16], (RSP) ROUND_AND_SCHED_N_0_1(0*16, 0x88a32f39, d, e, f, g, h, a, b, c, XWORD3, XWORD0, XWORD1, XWORD2, Wt)
ROUND_AND_SCHED_N_0_0(0*16, 0xc451979c, e, f, g, h, a, b, c, d, XWORD3, XWORD0, XWORD1, XWORD2) ROUND_AND_SCHED_N_0_2(0*16, 0x11465e73, c, d, e, f, g, h, a, b, XWORD3, XWORD0, XWORD1, XWORD2, Wt)
ROUND_AND_SCHED_N_0_1(0*16, 0x88a32f39, d, e, f, g, h, a, b, c, XWORD3, XWORD0, XWORD1, XWORD2) ROUND_AND_SCHED_N_0_3(0*16, 0x228cbce6, b, c, d, e, f, g, h, a, XWORD3, XWORD0, XWORD1, XWORD2, Wt)
ROUND_AND_SCHED_N_0_2(0*16, 0x11465e73, c, d, e, f, g, h, a, b, XWORD3, XWORD0, XWORD1, XWORD2)
ROUND_AND_SCHED_N_0_3(0*16, 0x228cbce6, b, c, d, e, f, g, h, a, XWORD3, XWORD0, XWORD1, XWORD2)
// Do 4 rounds and scheduling // Do 4 rounds and scheduling
VMOV XWORD0.B16, XTMP0.B16 VEOR XWORD0.B16, XWORD1.B16, Wt.B16
VEOR XWORD0.B16, XWORD1.B16, XTMP1.B16 ROUND_AND_SCHED_N_1_0(0*16, 0x9d8a7a87, a, b, c, d, e, f, g, h, XWORD0, XWORD1, XWORD2, XWORD3, Wt)
VST1 [XTMP0.B16, XTMP1.B16], (RSP) ROUND_AND_SCHED_N_1_1(0*16, 0x3b14f50f, h, a, b, c, d, e, f, g, XWORD0, XWORD1, XWORD2, XWORD3, Wt)
ROUND_AND_SCHED_N_1_0(0*16, 0x9d8a7a87, a, b, c, d, e, f, g, h, XWORD0, XWORD1, XWORD2, XWORD3) ROUND_AND_SCHED_N_1_2(0*16, 0x7629ea1e, g, h, a, b, c, d, e, f, XWORD0, XWORD1, XWORD2, XWORD3, Wt)
ROUND_AND_SCHED_N_1_1(0*16, 0x3b14f50f, h, a, b, c, d, e, f, g, XWORD0, XWORD1, XWORD2, XWORD3) ROUND_AND_SCHED_N_1_3(0*16, 0xec53d43c, f, g, h, a, b, c, d, e, XWORD0, XWORD1, XWORD2, XWORD3, Wt)
ROUND_AND_SCHED_N_1_2(0*16, 0x7629ea1e, g, h, a, b, c, d, e, f, XWORD0, XWORD1, XWORD2, XWORD3)
ROUND_AND_SCHED_N_1_3(0*16, 0xec53d43c, f, g, h, a, b, c, d, e, XWORD0, XWORD1, XWORD2, XWORD3)
// Do 4 rounds and scheduling // Do 4 rounds and scheduling
VMOV XWORD1.B16, XTMP0.B16 VEOR XWORD1.B16, XWORD2.B16, Wt.B16
VEOR XWORD1.B16, XWORD2.B16, XTMP1.B16 ROUND_AND_SCHED_N_1_0(0*16, 0xd8a7a879, e, f, g, h, a, b, c, d, XWORD1, XWORD2, XWORD3, XWORD0, Wt)
VST1 [XTMP0.B16, XTMP1.B16], (RSP) ROUND_AND_SCHED_N_1_1(0*16, 0xb14f50f3, d, e, f, g, h, a, b, c, XWORD1, XWORD2, XWORD3, XWORD0, Wt)
ROUND_AND_SCHED_N_1_0(0*16, 0xd8a7a879, e, f, g, h, a, b, c, d, XWORD1, XWORD2, XWORD3, XWORD0) ROUND_AND_SCHED_N_1_2(0*16, 0x629ea1e7, c, d, e, f, g, h, a, b, XWORD1, XWORD2, XWORD3, XWORD0, Wt)
ROUND_AND_SCHED_N_1_1(0*16, 0xb14f50f3, d, e, f, g, h, a, b, c, XWORD1, XWORD2, XWORD3, XWORD0) ROUND_AND_SCHED_N_1_3(0*16, 0xc53d43ce, b, c, d, e, f, g, h, a, XWORD1, XWORD2, XWORD3, XWORD0, Wt)
ROUND_AND_SCHED_N_1_2(0*16, 0x629ea1e7, c, d, e, f, g, h, a, b, XWORD1, XWORD2, XWORD3, XWORD0)
ROUND_AND_SCHED_N_1_3(0*16, 0xc53d43ce, b, c, d, e, f, g, h, a, XWORD1, XWORD2, XWORD3, XWORD0)
// Do 4 rounds and scheduling // Do 4 rounds and scheduling
VMOV XWORD2.B16, XTMP0.B16 VEOR XWORD2.B16, XWORD3.B16, Wt.B16
VEOR XWORD2.B16, XWORD3.B16, XTMP1.B16 ROUND_AND_SCHED_N_1_0(0*16, 0x8a7a879d, a, b, c, d, e, f, g, h, XWORD2, XWORD3, XWORD0, XWORD1, Wt)
VST1 [XTMP0.B16, XTMP1.B16], (RSP) ROUND_AND_SCHED_N_1_1(0*16, 0x14f50f3b, h, a, b, c, d, e, f, g, XWORD2, XWORD3, XWORD0, XWORD1, Wt)
ROUND_AND_SCHED_N_1_0(0*16, 0x8a7a879d, a, b, c, d, e, f, g, h, XWORD2, XWORD3, XWORD0, XWORD1) ROUND_AND_SCHED_N_1_2(0*16, 0x29ea1e76, g, h, a, b, c, d, e, f, XWORD2, XWORD3, XWORD0, XWORD1, Wt)
ROUND_AND_SCHED_N_1_1(0*16, 0x14f50f3b, h, a, b, c, d, e, f, g, XWORD2, XWORD3, XWORD0, XWORD1) ROUND_AND_SCHED_N_1_3(0*16, 0x53d43cec, f, g, h, a, b, c, d, e, XWORD2, XWORD3, XWORD0, XWORD1, Wt)
ROUND_AND_SCHED_N_1_2(0*16, 0x29ea1e76, g, h, a, b, c, d, e, f, XWORD2, XWORD3, XWORD0, XWORD1)
ROUND_AND_SCHED_N_1_3(0*16, 0x53d43cec, f, g, h, a, b, c, d, e, XWORD2, XWORD3, XWORD0, XWORD1)
// Do 4 rounds and scheduling // Do 4 rounds and scheduling
VMOV XWORD3.B16, XTMP0.B16 VEOR XWORD3.B16, XWORD0.B16, Wt.B16
VEOR XWORD3.B16, XWORD0.B16, XTMP1.B16 ROUND_AND_SCHED_N_1_0(0*16, 0xa7a879d8, e, f, g, h, a, b, c, d, XWORD3, XWORD0, XWORD1, XWORD2, Wt)
VST1 [XTMP0.B16, XTMP1.B16], (RSP) ROUND_AND_SCHED_N_1_1(0*16, 0x4f50f3b1, d, e, f, g, h, a, b, c, XWORD3, XWORD0, XWORD1, XWORD2, Wt)
ROUND_AND_SCHED_N_1_0(0*16, 0xa7a879d8, e, f, g, h, a, b, c, d, XWORD3, XWORD0, XWORD1, XWORD2) ROUND_AND_SCHED_N_1_2(0*16, 0x9ea1e762, c, d, e, f, g, h, a, b, XWORD3, XWORD0, XWORD1, XWORD2, Wt)
ROUND_AND_SCHED_N_1_1(0*16, 0x4f50f3b1, d, e, f, g, h, a, b, c, XWORD3, XWORD0, XWORD1, XWORD2) ROUND_AND_SCHED_N_1_3(0*16, 0x3d43cec5, b, c, d, e, f, g, h, a, XWORD3, XWORD0, XWORD1, XWORD2, Wt)
ROUND_AND_SCHED_N_1_2(0*16, 0x9ea1e762, c, d, e, f, g, h, a, b, XWORD3, XWORD0, XWORD1, XWORD2)
ROUND_AND_SCHED_N_1_3(0*16, 0x3d43cec5, b, c, d, e, f, g, h, a, XWORD3, XWORD0, XWORD1, XWORD2)
// Do 4 rounds and scheduling // Do 4 rounds and scheduling
VMOV XWORD0.B16, XTMP0.B16 VEOR XWORD0.B16, XWORD1.B16, Wt.B16
VEOR XWORD0.B16, XWORD1.B16, XTMP1.B16 ROUND_AND_SCHED_N_1_0(0*16, 0x7a879d8a, a, b, c, d, e, f, g, h, XWORD0, XWORD1, XWORD2, XWORD3, Wt)
VST1 [XTMP0.B16, XTMP1.B16], (RSP) ROUND_AND_SCHED_N_1_1(0*16, 0xf50f3b14, h, a, b, c, d, e, f, g, XWORD0, XWORD1, XWORD2, XWORD3, Wt)
ROUND_AND_SCHED_N_1_0(0*16, 0x7a879d8a, a, b, c, d, e, f, g, h, XWORD0, XWORD1, XWORD2, XWORD3) ROUND_AND_SCHED_N_1_2(0*16, 0xea1e7629, g, h, a, b, c, d, e, f, XWORD0, XWORD1, XWORD2, XWORD3, Wt)
ROUND_AND_SCHED_N_1_1(0*16, 0xf50f3b14, h, a, b, c, d, e, f, g, XWORD0, XWORD1, XWORD2, XWORD3) ROUND_AND_SCHED_N_1_3(0*16, 0xd43cec53, f, g, h, a, b, c, d, e, XWORD0, XWORD1, XWORD2, XWORD3, Wt)
ROUND_AND_SCHED_N_1_2(0*16, 0xea1e7629, g, h, a, b, c, d, e, f, XWORD0, XWORD1, XWORD2, XWORD3)
ROUND_AND_SCHED_N_1_3(0*16, 0xd43cec53, f, g, h, a, b, c, d, e, XWORD0, XWORD1, XWORD2, XWORD3)
// Do 4 rounds and scheduling // Do 4 rounds and scheduling
VMOV XWORD1.B16, XTMP0.B16 VEOR XWORD1.B16, XWORD2.B16, Wt.B16
VEOR XWORD1.B16, XWORD2.B16, XTMP1.B16 ROUND_AND_SCHED_N_1_0(0*16, 0xa879d8a7, e, f, g, h, a, b, c, d, XWORD1, XWORD2, XWORD3, XWORD0, Wt)
VST1 [XTMP0.B16, XTMP1.B16], (RSP) ROUND_AND_SCHED_N_1_1(0*16, 0x50f3b14f, d, e, f, g, h, a, b, c, XWORD1, XWORD2, XWORD3, XWORD0, Wt)
ROUND_AND_SCHED_N_1_0(0*16, 0xa879d8a7, e, f, g, h, a, b, c, d, XWORD1, XWORD2, XWORD3, XWORD0) ROUND_AND_SCHED_N_1_2(0*16, 0xa1e7629e, c, d, e, f, g, h, a, b, XWORD1, XWORD2, XWORD3, XWORD0, Wt)
ROUND_AND_SCHED_N_1_1(0*16, 0x50f3b14f, d, e, f, g, h, a, b, c, XWORD1, XWORD2, XWORD3, XWORD0) ROUND_AND_SCHED_N_1_3(0*16, 0x43cec53d, b, c, d, e, f, g, h, a, XWORD1, XWORD2, XWORD3, XWORD0, Wt)
ROUND_AND_SCHED_N_1_2(0*16, 0xa1e7629e, c, d, e, f, g, h, a, b, XWORD1, XWORD2, XWORD3, XWORD0)
ROUND_AND_SCHED_N_1_3(0*16, 0x43cec53d, b, c, d, e, f, g, h, a, XWORD1, XWORD2, XWORD3, XWORD0)
// Do 4 rounds and scheduling // Do 4 rounds and scheduling
VMOV XWORD2.B16, XTMP0.B16 VEOR XWORD2.B16, XWORD3.B16, Wt.B16
VEOR XWORD2.B16, XWORD3.B16, XTMP1.B16 ROUND_AND_SCHED_N_1_0(0*16, 0x879d8a7a, a, b, c, d, e, f, g, h, XWORD2, XWORD3, XWORD0, XWORD1, Wt)
VST1 [XTMP0.B16, XTMP1.B16], (RSP) ROUND_AND_SCHED_N_1_1(0*16, 0xf3b14f5, h, a, b, c, d, e, f, g, XWORD2, XWORD3, XWORD0, XWORD1, Wt)
ROUND_AND_SCHED_N_1_0(0*16, 0x879d8a7a, a, b, c, d, e, f, g, h, XWORD2, XWORD3, XWORD0, XWORD1) ROUND_AND_SCHED_N_1_2(0*16, 0x1e7629ea, g, h, a, b, c, d, e, f, XWORD2, XWORD3, XWORD0, XWORD1, Wt)
ROUND_AND_SCHED_N_1_1(0*16, 0xf3b14f5, h, a, b, c, d, e, f, g, XWORD2, XWORD3, XWORD0, XWORD1) ROUND_AND_SCHED_N_1_3(0*16, 0x3cec53d4, f, g, h, a, b, c, d, e, XWORD2, XWORD3, XWORD0, XWORD1, Wt)
ROUND_AND_SCHED_N_1_2(0*16, 0x1e7629ea, g, h, a, b, c, d, e, f, XWORD2, XWORD3, XWORD0, XWORD1)
ROUND_AND_SCHED_N_1_3(0*16, 0x3cec53d4, f, g, h, a, b, c, d, e, XWORD2, XWORD3, XWORD0, XWORD1)
// Do 4 rounds and scheduling // Do 4 rounds and scheduling
VMOV XWORD3.B16, XTMP0.B16 VEOR XWORD3.B16, XWORD0.B16, Wt.B16
VEOR XWORD3.B16, XWORD0.B16, XTMP1.B16 ROUND_AND_SCHED_N_1_0(0*16, 0x79d8a7a8, e, f, g, h, a, b, c, d, XWORD3, XWORD0, XWORD1, XWORD2, Wt)
VST1 [XTMP0.B16, XTMP1.B16], (RSP) ROUND_AND_SCHED_N_1_1(0*16, 0xf3b14f50, d, e, f, g, h, a, b, c, XWORD3, XWORD0, XWORD1, XWORD2, Wt)
ROUND_AND_SCHED_N_1_0(0*16, 0x79d8a7a8, e, f, g, h, a, b, c, d, XWORD3, XWORD0, XWORD1, XWORD2) ROUND_AND_SCHED_N_1_2(0*16, 0xe7629ea1, c, d, e, f, g, h, a, b, XWORD3, XWORD0, XWORD1, XWORD2, Wt)
ROUND_AND_SCHED_N_1_1(0*16, 0xf3b14f50, d, e, f, g, h, a, b, c, XWORD3, XWORD0, XWORD1, XWORD2) ROUND_AND_SCHED_N_1_3(0*16, 0xcec53d43, b, c, d, e, f, g, h, a, XWORD3, XWORD0, XWORD1, XWORD2, Wt)
ROUND_AND_SCHED_N_1_2(0*16, 0xe7629ea1, c, d, e, f, g, h, a, b, XWORD3, XWORD0, XWORD1, XWORD2)
ROUND_AND_SCHED_N_1_3(0*16, 0xcec53d43, b, c, d, e, f, g, h, a, XWORD3, XWORD0, XWORD1, XWORD2)
// w48 - w63 processed with only 4 rounds scheduling (last 16 rounds) // w48 - w63 processed with only 4 rounds scheduling (last 16 rounds)
// Do 4 rounds and scheduling // Do 4 rounds and scheduling
VMOV XWORD0.B16, XTMP0.B16 VEOR XWORD0.B16, XWORD1.B16, Wt.B16
VEOR XWORD0.B16, XWORD1.B16, XTMP1.B16 ROUND_AND_SCHED_N_1_0(0*16, 0x9d8a7a87, a, b, c, d, e, f, g, h, XWORD0, XWORD1, XWORD2, XWORD3, Wt)
VST1 [XTMP0.B16, XTMP1.B16], (RSP) ROUND_AND_SCHED_N_1_1(0*16, 0x3b14f50f, h, a, b, c, d, e, f, g, XWORD0, XWORD1, XWORD2, XWORD3, Wt)
ROUND_AND_SCHED_N_1_0(0*16, 0x9d8a7a87, a, b, c, d, e, f, g, h, XWORD0, XWORD1, XWORD2, XWORD3) ROUND_AND_SCHED_N_1_2(0*16, 0x7629ea1e, g, h, a, b, c, d, e, f, XWORD0, XWORD1, XWORD2, XWORD3, Wt)
ROUND_AND_SCHED_N_1_1(0*16, 0x3b14f50f, h, a, b, c, d, e, f, g, XWORD0, XWORD1, XWORD2, XWORD3) ROUND_AND_SCHED_N_1_3(0*16, 0xec53d43c, f, g, h, a, b, c, d, e, XWORD0, XWORD1, XWORD2, XWORD3, Wt)
ROUND_AND_SCHED_N_1_2(0*16, 0x7629ea1e, g, h, a, b, c, d, e, f, XWORD0, XWORD1, XWORD2, XWORD3)
ROUND_AND_SCHED_N_1_3(0*16, 0xec53d43c, f, g, h, a, b, c, d, e, XWORD0, XWORD1, XWORD2, XWORD3)
// w52 - w63 processed with no scheduling (last 12 rounds) // w52 - w63 processed with no scheduling (last 12 rounds)
// Do 4 rounds // Do 4 rounds
VMOV XWORD1.B16, XTMP0.B16 VEOR XWORD1.B16, XWORD2.B16, Wt.B16
VEOR XWORD1.B16, XWORD2.B16, XTMP1.B16 DO_ROUND_N_1(0*16, 0, 0xd8a7a879, e, f, g, h, a, b, c, d, XWORD1, Wt)
VST1 [XTMP0.B16, XTMP1.B16], (RSP) DO_ROUND_N_1(0*16, 1, 0xb14f50f3, d, e, f, g, h, a, b, c, XWORD1, Wt)
DO_ROUND_N_1(0*16, 0, 0xd8a7a879, e, f, g, h, a, b, c, d) DO_ROUND_N_1(0*16, 2, 0x629ea1e7, c, d, e, f, g, h, a, b, XWORD1, Wt)
DO_ROUND_N_1(0*16, 1, 0xb14f50f3, d, e, f, g, h, a, b, c) DO_ROUND_N_1(0*16, 3, 0xc53d43ce, b, c, d, e, f, g, h, a, XWORD1, Wt)
DO_ROUND_N_1(0*16, 2, 0x629ea1e7, c, d, e, f, g, h, a, b)
DO_ROUND_N_1(0*16, 3, 0xc53d43ce, b, c, d, e, f, g, h, a)
// Do 4 rounds // Do 4 rounds
VMOV XWORD2.B16, XTMP0.B16 VEOR XWORD2.B16, XWORD3.B16, Wt.B16
VEOR XWORD2.B16, XWORD3.B16, XTMP1.B16 DO_ROUND_N_1(0*16, 0, 0x8a7a879d, a, b, c, d, e, f, g, h, XWORD2, Wt)
VST1 [XTMP0.B16, XTMP1.B16], (RSP) DO_ROUND_N_1(0*16, 1, 0x14f50f3b, h, a, b, c, d, e, f, g, XWORD2, Wt)
DO_ROUND_N_1(0*16, 0, 0x8a7a879d, a, b, c, d, e, f, g, h) DO_ROUND_N_1(0*16, 2, 0x29ea1e76, g, h, a, b, c, d, e, f, XWORD2, Wt)
DO_ROUND_N_1(0*16, 1, 0x14f50f3b, h, a, b, c, d, e, f, g) DO_ROUND_N_1(0*16, 3, 0x53d43cec, f, g, h, a, b, c, d, e, XWORD2, Wt)
DO_ROUND_N_1(0*16, 2, 0x29ea1e76, g, h, a, b, c, d, e, f)
DO_ROUND_N_1(0*16, 3, 0x53d43cec, f, g, h, a, b, c, d, e)
// Do 4 rounds // Do 4 rounds
VMOV XWORD3.B16, XTMP0.B16 VEOR XWORD3.B16, XWORD0.B16, Wt.B16
VEOR XWORD3.B16, XWORD0.B16, XTMP1.B16 DO_ROUND_N_1(0*16, 0, 0xa7a879d8, e, f, g, h, a, b, c, d, XWORD3, Wt)
VST1 [XTMP0.B16, XTMP1.B16], (RSP) DO_ROUND_N_1(0*16, 1, 0x4f50f3b1, d, e, f, g, h, a, b, c, XWORD3, Wt)
DO_ROUND_N_1(0*16, 0, 0xa7a879d8, e, f, g, h, a, b, c, d) DO_ROUND_N_1(0*16, 2, 0x9ea1e762, c, d, e, f, g, h, a, b, XWORD3, Wt)
DO_ROUND_N_1(0*16, 1, 0x4f50f3b1, d, e, f, g, h, a, b, c) DO_ROUND_N_1(0*16, 3, 0x3d43cec5, b, c, d, e, f, g, h, a, XWORD3, Wt)
DO_ROUND_N_1(0*16, 2, 0x9ea1e762, c, d, e, f, g, h, a, b)
DO_ROUND_N_1(0*16, 3, 0x3d43cec5, b, c, d, e, f, g, h, a)
EORW a1, a // H0 = a XOR H0 EORW a1, a // H0 = a XOR H0
EORW b1, b // H1 = b XOR H1 EORW b1, b // H1 = b XOR H1