[SM4] do not use VMOVQ to support golang 1.15.x

This commit is contained in:
Emman 2022-01-13 13:15:35 +08:00
parent efedf2654a
commit c6292b9704
2 changed files with 185 additions and 47 deletions

View File

@ -20,6 +20,51 @@
#define XTMP6 V6
#define XTMP7 V7
//nibble mask
DATA nibble_mask<>+0x00(SB)/8, $0x0F0F0F0F0F0F0F0F
DATA nibble_mask<>+0x08(SB)/8, $0x0F0F0F0F0F0F0F0F
GLOBL nibble_mask<>(SB), (NOPTR+RODATA), $16
// inverse shift rows
DATA inverse_shift_rows<>+0x00(SB)/8, $0x0B0E0104070A0D00
DATA inverse_shift_rows<>+0x08(SB)/8, $0x0306090C0F020508
GLOBL inverse_shift_rows<>(SB), (NOPTR+RODATA), $16
// Affine transform 1 (low and high hibbles)
DATA m1_low<>+0x00(SB)/8, $0x9197E2E474720701
DATA m1_low<>+0x08(SB)/8, $0xC7C1B4B222245157
GLOBL m1_low<>(SB), (NOPTR+RODATA), $16
DATA m1_high<>+0x00(SB)/8, $0xE240AB09EB49A200
DATA m1_high<>+0x08(SB)/8, $0xF052B91BF95BB012
GLOBL m1_high<>(SB), (NOPTR+RODATA), $16
// Affine transform 2 (low and high hibbles)
DATA m2_low<>+0x00(SB)/8, $0x5B67F2CEA19D0834
DATA m2_low<>+0x08(SB)/8, $0xEDD14478172BBE82
GLOBL m2_low<>(SB), (NOPTR+RODATA), $16
DATA m2_high<>+0x00(SB)/8, $0xAE7201DD73AFDC00
DATA m2_high<>+0x08(SB)/8, $0x11CDBE62CC1063BF
GLOBL m2_high<>(SB), (NOPTR+RODATA), $16
// left rotations of 32-bit words by 8-bit increments
DATA r08_mask<>+0x00(SB)/8, $0x0605040702010003
DATA r08_mask<>+0x08(SB)/8, $0x0E0D0C0F0A09080B
GLOBL r08_mask<>(SB), (NOPTR+RODATA), $16
DATA r16_mask<>+0x00(SB)/8, $0x0504070601000302
DATA r16_mask<>+0x08(SB)/8, $0x0D0C0F0E09080B0A
GLOBL r16_mask<>(SB), (NOPTR+RODATA), $16
DATA r24_mask<>+0x00(SB)/8, $0x0407060500030201
DATA r24_mask<>+0x08(SB)/8, $0x0C0F0E0D080B0A09
GLOBL r24_mask<>(SB), (NOPTR+RODATA), $16
DATA fk_mask<>+0x00(SB)/8, $0x56aa3350a3b1bac6
DATA fk_mask<>+0x08(SB)/8, $0xb27022dc677d9197
GLOBL fk_mask<>(SB), (NOPTR+RODATA), $16
#define SM4_SBOX(x, y) \
; \ //############################# inner affine ############################//
VAND x.B16, NIBBLE_MASK.B16, XTMP7.B16; \
@ -85,19 +130,39 @@
MOVW.P R2, -4(R11)
#define load_global_data_1() \
VMOVQ $0x0F0F0F0F0F0F0F0F, $0x0F0F0F0F0F0F0F0F, NIBBLE_MASK; \ // nibble mask
VMOVQ $0x9197E2E474720701, $0xC7C1B4B222245157, M1L; \
VMOVQ $0xE240AB09EB49A200, $0xF052B91BF95BB012, M1H; \
VMOVQ $0x5B67F2CEA19D0834, $0xEDD14478172BBE82, M2L; \
VMOVQ $0xAE7201DD73AFDC00, $0x11CDBE62CC1063BF, M2H; \
VMOVQ $0x56aa3350a3b1bac6, $0xb27022dc677d9197, FK_MASK; \
VMOVQ $0x0B0E0104070A0D00, $0x0306090C0F020508, INVERSE_SHIFT_ROWS
LDP nibble_mask<>(SB), (R0, R1) \
VMOV R0, NIBBLE_MASK.D[0] \
VMOV R1, NIBBLE_MASK.D[1] \
LDP m1_low<>(SB), (R0, R1) \
VMOV R0, M1L.D[0] \
VMOV R1, M1L.D[1] \
LDP m1_high<>(SB), (R0, R1) \
VMOV R0, M1H.D[0] \
VMOV R1, M1H.D[1] \
LDP m2_low<>(SB), (R0, R1) \
VMOV R0, M2L.D[0] \
VMOV R1, M2L.D[1] \
LDP m2_high<>(SB), (R0, R1) \
VMOV R0, M2H.D[0] \
VMOV R1, M2H.D[1] \
LDP fk_mask<>(SB), (R0, R1) \
VMOV R0, FK_MASK.D[0] \
VMOV R1, FK_MASK.D[1] \
LDP inverse_shift_rows<>(SB), (R0, R1) \
VMOV R0, INVERSE_SHIFT_ROWS.D[0] \
VMOV R1, INVERSE_SHIFT_ROWS.D[1]
#define load_global_data_2() \
load_global_data_1(); \
VMOVQ $0x0605040702010003, $0x0E0D0C0F0A09080B, R08_MASK; \
VMOVQ $0x0504070601000302, $0x0D0C0F0E09080B0A, R16_MASK; \
VMOVQ $0x0407060500030201, $0x0C0F0E0D080B0A09, R24_MASK
load_global_data_1() \
LDP r08_mask<>(SB), (R0, R1) \
VMOV R0, R08_MASK.D[0] \
VMOV R1, R08_MASK.D[1] \
LDP r16_mask<>(SB), (R0, R1) \
VMOV R0, R16_MASK.D[0] \
VMOV R1, R16_MASK.D[1] \
LDP r24_mask<>(SB), (R0, R1) \
VMOV R0, R24_MASK.D[0] \
VMOV R1, R24_MASK.D[1]
// func expandKeyAsm(key *byte, ck, enc, dec *uint32)
TEXT ·expandKeyAsm(SB),NOSPLIT,$0

View File

@ -1,5 +1,46 @@
#include "textflag.h"
//nibble mask
DATA nibble_mask<>+0x00(SB)/8, $0x0F0F0F0F0F0F0F0F
DATA nibble_mask<>+0x08(SB)/8, $0x0F0F0F0F0F0F0F0F
GLOBL nibble_mask<>(SB), (NOPTR+RODATA), $16
// inverse shift rows
DATA inverse_shift_rows<>+0x00(SB)/8, $0x0B0E0104070A0D00
DATA inverse_shift_rows<>+0x08(SB)/8, $0x0306090C0F020508
GLOBL inverse_shift_rows<>(SB), (NOPTR+RODATA), $16
// Affine transform 1 (low and high hibbles)
DATA m1_low<>+0x00(SB)/8, $0x9197E2E474720701
DATA m1_low<>+0x08(SB)/8, $0xC7C1B4B222245157
GLOBL m1_low<>(SB), (NOPTR+RODATA), $16
DATA m1_high<>+0x00(SB)/8, $0xE240AB09EB49A200
DATA m1_high<>+0x08(SB)/8, $0xF052B91BF95BB012
GLOBL m1_high<>(SB), (NOPTR+RODATA), $16
// Affine transform 2 (low and high hibbles)
DATA m2_low<>+0x00(SB)/8, $0x5B67F2CEA19D0834
DATA m2_low<>+0x08(SB)/8, $0xEDD14478172BBE82
GLOBL m2_low<>(SB), (NOPTR+RODATA), $16
DATA m2_high<>+0x00(SB)/8, $0xAE7201DD73AFDC00
DATA m2_high<>+0x08(SB)/8, $0x11CDBE62CC1063BF
GLOBL m2_high<>(SB), (NOPTR+RODATA), $16
// left rotations of 32-bit words by 8-bit increments
DATA r08_mask<>+0x00(SB)/8, $0x0605040702010003
DATA r08_mask<>+0x08(SB)/8, $0x0E0D0C0F0A09080B
GLOBL r08_mask<>(SB), (NOPTR+RODATA), $16
DATA r16_mask<>+0x00(SB)/8, $0x0504070601000302
DATA r16_mask<>+0x08(SB)/8, $0x0D0C0F0E09080B0A
GLOBL r16_mask<>(SB), (NOPTR+RODATA), $16
DATA r24_mask<>+0x00(SB)/8, $0x0407060500030201
DATA r24_mask<>+0x08(SB)/8, $0x0C0F0E0D080B0A09
GLOBL r24_mask<>(SB), (NOPTR+RODATA), $16
#define B0 V0
#define B1 V1
#define B2 V2
@ -37,6 +78,16 @@
#define K11 V30
#define KLAST V31
#define NIBBLE_MASK V23
#define INVERSE_SHIFT_ROWS V24
#define M1L V25
#define M1H V26
#define M2L V27
#define M2H V28
#define R08_MASK V29
#define R16_MASK V30
#define R24_MASK V31
#define reduce() \
VEOR ACC0.B16, ACCM.B16, ACCM.B16 \
VEOR ACC1.B16, ACCM.B16, ACCM.B16 \
@ -106,51 +157,72 @@ TEXT ·gcmSm4Finish(SB),NOSPLIT,$0
#undef plen
#undef dlen
#define SM4_SBOX(x, y, z, z1, z2) \
VMOVQ $0x0F0F0F0F0F0F0F0F, $0x0F0F0F0F0F0F0F0F, z1; \ // nibble mask
VAND x.B16, z1.B16, z2.B16; \
VMOVQ $0x9197E2E474720701, $0xC7C1B4B222245157, z; \
VTBL z2.B16, [z.B16], y.B16; \
VUSHR $4, x.D2, x.D2; \
VAND x.B16, z1.B16, z2.B16; \
VMOVQ $0xE240AB09EB49A200, $0xF052B91BF95BB012, z; \
VTBL z2.B16, [z.B16], z2.B16; \
VEOR y.B16, z2.B16, x.B16; \
VMOVQ $0x0B0E0104070A0D00, $0x0306090C0F020508, z; \
VTBL z.B16, [x.B16], x.B16; \
AESE ZERO.B16, x.B16; \
VAND x.B16, z1.B16, z2.B16; \
VMOVQ $0x5B67F2CEA19D0834, $0xEDD14478172BBE82, z; \
VTBL z2.B16, [z.B16], y.B16; \
VUSHR $4, x.D2, x.D2; \
VAND x.B16, z1.B16, z2.B16; \
VMOVQ $0xAE7201DD73AFDC00, $0x11CDBE62CC1063BF, z; \
VTBL z2.B16, [z.B16], z2.B16; \
VEOR y.B16, z2.B16, x.B16
#define LOAD_SM4_AESNI_CONSTS() \
LDP nibble_mask<>(SB), (R20, R21) \
VMOV R20, NIBBLE_MASK.D[0] \
VMOV R21, NIBBLE_MASK.D[1] \
LDP m1_low<>(SB), (R20, R21) \
VMOV R20, M1L.D[0] \
VMOV R21, M1L.D[1] \
LDP m1_high<>(SB), (R20, R21) \
VMOV R20, M1H.D[0] \
VMOV R21, M1H.D[1] \
LDP m2_low<>(SB), (R20, R21) \
VMOV R20, M2L.D[0] \
VMOV R21, M2L.D[1] \
LDP m2_high<>(SB), (R20, R21) \
VMOV R20, M2H.D[0] \
VMOV R21, M2H.D[1] \
LDP inverse_shift_rows<>(SB), (R20, R21) \
VMOV R20, INVERSE_SHIFT_ROWS.D[0] \
VMOV R21, INVERSE_SHIFT_ROWS.D[1] \
LDP r08_mask<>(SB), (R20, R21) \
VMOV R20, R08_MASK.D[0] \
VMOV R21, R08_MASK.D[1] \
LDP r16_mask<>(SB), (R20, R21) \
VMOV R20, R16_MASK.D[0] \
VMOV R21, R16_MASK.D[1] \
LDP r24_mask<>(SB), (R20, R21) \
VMOV R20, R24_MASK.D[0] \
VMOV R21, R24_MASK.D[1]
#define SM4_TAO_L1(x, y, z, z1, z2) \
SM4_SBOX(x, y, z, z1, z2); \
VMOVQ $0x0605040702010003, $0x0E0D0C0F0A09080B, z; \
VTBL z.B16, [x.B16], y.B16; \
#define SM4_SBOX(x, y, z) \
; \
VAND x.B16, NIBBLE_MASK.B16, z.B16; \
VTBL z.B16, [M1L.B16], y.B16; \
VUSHR $4, x.D2, x.D2; \
VAND x.B16, NIBBLE_MASK.B16, z.B16; \
VTBL z.B16, [M1H.B16], z.B16; \
VEOR y.B16, z.B16, x.B16; \
VTBL INVERSE_SHIFT_ROWS.B16, [x.B16], x.B16; \
AESE ZERO.B16, x.B16; \
VAND x.B16, NIBBLE_MASK.B16, z.B16; \
VTBL z.B16, [M2L.B16], y.B16; \
VUSHR $4, x.D2, x.D2; \
VAND x.B16, NIBBLE_MASK.B16, z.B16; \
VTBL z.B16, [M2H.B16], z.B16; \
VEOR y.B16, z.B16, x.B16
#define SM4_TAO_L1(x, y, z) \
SM4_SBOX(x, y, z); \
VTBL R08_MASK.B16, [x.B16], y.B16; \
VEOR y.B16, x.B16, y.B16; \
VMOVQ $0x0504070601000302, $0x0D0C0F0E09080B0A, z; \
VTBL z.B16, [x.B16], z.B16; \
VTBL R16_MASK.B16, [x.B16], z.B16; \
VEOR z.B16, y.B16, y.B16; \
VSHL $2, y.S4, z.S4; \
VUSHR $30, y.S4, y.S4; \
VORR y.B16, z.B16, y.B16; \
VMOVQ $0x0407060500030201, $0x0C0F0E0D080B0A09, z; \
VTBL z.B16, [x.B16], z.B16; \
VTBL R24_MASK.B16, [x.B16], z.B16; \
VEOR z.B16, x.B16, x.B16; \
VEOR y.B16, x.B16, x.B16
#define SM4_ROUND(RK, x, y, z, z1, z2, t0, t1, t2, t3) \
#define SM4_ROUND(RK, x, y, z, t0, t1, t2, t3) \
MOVW.P 4(RK), R19; \
VMOV R19, x.S4; \
VEOR t1.B16, x.B16, x.B16; \
VEOR t2.B16, x.B16, x.B16; \
VEOR t3.B16, x.B16, x.B16; \
SM4_TAO_L1(x, y, z, z1, z2); \
SM4_TAO_L1(x, y, z); \
VEOR x.B16, t0.B16, t0.B16
// func gcmSm4Init(productTable *[256]byte, rk []uint32)
@ -170,6 +242,7 @@ TEXT gcmSm4Init(SB),NOSPLIT,$0
VEOR ZERO.B16, ZERO.B16, ZERO.B16
// Encrypt block 0 with the SM4 keys to generate the hash key H
LOAD_SM4_AESNI_CONSTS()
VEOR B0.B16, B0.B16, B0.B16
VEOR B1.B16, B1.B16, B1.B16
VEOR B2.B16, B2.B16, B2.B16
@ -177,10 +250,10 @@ TEXT gcmSm4Init(SB),NOSPLIT,$0
EOR R3, R3
sm4InitEncLoop:
SM4_ROUND(RK, K0, K1, K2, K3, K4, B0, B1, B2, B3)
SM4_ROUND(RK, K0, K1, K2, K3, K4, B1, B2, B3, B0)
SM4_ROUND(RK, K0, K1, K2, K3, K4, B2, B3, B0, B1)
SM4_ROUND(RK, K0, K1, K2, K3, K4, B3, B0, B1, B2)
SM4_ROUND(RK, K0, K1, K2, B0, B1, B2, B3)
SM4_ROUND(RK, K0, K1, K2, B1, B2, B3, B0)
SM4_ROUND(RK, K0, K1, K2, B2, B3, B0, B1)
SM4_ROUND(RK, K0, K1, K2, B3, B0, B1, B2)
ADD $1, R3
CMP $8, R3