[SM4] arm64, use VMOVQ

This commit is contained in:
Emman 2022-01-13 10:06:37 +08:00
parent bb20b52bd1
commit 7914d29bcd

View File

@ -20,51 +20,6 @@
#define XTMP6 V6 #define XTMP6 V6
#define XTMP7 V7 #define XTMP7 V7
//nibble mask
DATA nibble_mask<>+0x00(SB)/8, $0x0F0F0F0F0F0F0F0F
DATA nibble_mask<>+0x08(SB)/8, $0x0F0F0F0F0F0F0F0F
GLOBL nibble_mask<>(SB), (NOPTR+RODATA), $16
// inverse shift rows
DATA inverse_shift_rows<>+0x00(SB)/8, $0x0B0E0104070A0D00
DATA inverse_shift_rows<>+0x08(SB)/8, $0x0306090C0F020508
GLOBL inverse_shift_rows<>(SB), (NOPTR+RODATA), $16
// Affine transform 1 (low and high hibbles)
DATA m1_low<>+0x00(SB)/8, $0x9197E2E474720701
DATA m1_low<>+0x08(SB)/8, $0xC7C1B4B222245157
GLOBL m1_low<>(SB), (NOPTR+RODATA), $16
DATA m1_high<>+0x00(SB)/8, $0xE240AB09EB49A200
DATA m1_high<>+0x08(SB)/8, $0xF052B91BF95BB012
GLOBL m1_high<>(SB), (NOPTR+RODATA), $16
// Affine transform 2 (low and high hibbles)
DATA m2_low<>+0x00(SB)/8, $0x5B67F2CEA19D0834
DATA m2_low<>+0x08(SB)/8, $0xEDD14478172BBE82
GLOBL m2_low<>(SB), (NOPTR+RODATA), $16
DATA m2_high<>+0x00(SB)/8, $0xAE7201DD73AFDC00
DATA m2_high<>+0x08(SB)/8, $0x11CDBE62CC1063BF
GLOBL m2_high<>(SB), (NOPTR+RODATA), $16
// left rotations of 32-bit words by 8-bit increments
DATA r08_mask<>+0x00(SB)/8, $0x0605040702010003
DATA r08_mask<>+0x08(SB)/8, $0x0E0D0C0F0A09080B
GLOBL r08_mask<>(SB), (NOPTR+RODATA), $16
DATA r16_mask<>+0x00(SB)/8, $0x0504070601000302
DATA r16_mask<>+0x08(SB)/8, $0x0D0C0F0E09080B0A
GLOBL r16_mask<>(SB), (NOPTR+RODATA), $16
DATA r24_mask<>+0x00(SB)/8, $0x0407060500030201
DATA r24_mask<>+0x08(SB)/8, $0x0C0F0E0D080B0A09
GLOBL r24_mask<>(SB), (NOPTR+RODATA), $16
DATA fk_mask<>+0x00(SB)/8, $0x56aa3350a3b1bac6
DATA fk_mask<>+0x08(SB)/8, $0xb27022dc677d9197
GLOBL fk_mask<>(SB), (NOPTR+RODATA), $16
#define SM4_SBOX(x, y) \ #define SM4_SBOX(x, y) \
; \ //############################# inner affine ############################// ; \ //############################# inner affine ############################//
VAND x.B16, NIBBLE_MASK.B16, XTMP7.B16; \ VAND x.B16, NIBBLE_MASK.B16, XTMP7.B16; \
@ -131,36 +86,18 @@ GLOBL fk_mask<>(SB), (NOPTR+RODATA), $16
#define load_global_data_1() \ #define load_global_data_1() \
VMOVQ $0x0F0F0F0F0F0F0F0F, $0x0F0F0F0F0F0F0F0F, NIBBLE_MASK; \ // nibble mask VMOVQ $0x0F0F0F0F0F0F0F0F, $0x0F0F0F0F0F0F0F0F, NIBBLE_MASK; \ // nibble mask
LDP m1_low<>(SB), (R0, R1) \ VMOVQ $0xC7C1B4B222245157, $0x9197E2E474720701, M1L; \
VMOV R0, M1L.D[0] \ VMOVQ $0xF052B91BF95BB012, $0xE240AB09EB49A200, M1H; \
VMOV R1, M1L.D[1] \ VMOVQ $0xEDD14478172BBE82, $0x5B67F2CEA19D0834, M2L; \
LDP m1_high<>(SB), (R0, R1) \ VMOVQ $0x11CDBE62CC1063BF, $0xAE7201DD73AFDC00, M2H; \
VMOV R0, M1H.D[0] \ VMOVQ $0xb27022dc677d9197, $0x56aa3350a3b1bac6, FK_MASK; \
VMOV R1, M1H.D[1] \ VMOVQ $0x0306090C0F020508, $0x0B0E0104070A0D00, INVERSE_SHIFT_ROWS
LDP m2_low<>(SB), (R0, R1) \
VMOV R0, M2L.D[0] \
VMOV R1, M2L.D[1] \
LDP m2_high<>(SB), (R0, R1) \
VMOV R0, M2H.D[0] \
VMOV R1, M2H.D[1] \
LDP fk_mask<>(SB), (R0, R1) \
VMOV R0, FK_MASK.D[0] \
VMOV R1, FK_MASK.D[1] \
LDP inverse_shift_rows<>(SB), (R0, R1) \
VMOV R0, INVERSE_SHIFT_ROWS.D[0] \
VMOV R1, INVERSE_SHIFT_ROWS.D[1]
#define load_global_data_2() \ #define load_global_data_2() \
load_global_data_1() \ load_global_data_1(); \
LDP r08_mask<>(SB), (R0, R1) \ VMOVQ $0x0E0D0C0F0A09080B, $0x0605040702010003, R08_MASK; \
VMOV R0, R08_MASK.D[0] \ VMOVQ $0x0D0C0F0E09080B0A, $0x0504070601000302, R16_MASK; \
VMOV R1, R08_MASK.D[1] \ VMOVQ $0x0C0F0E0D080B0A09, $0x0407060500030201, R24_MASK
LDP r16_mask<>(SB), (R0, R1) \
VMOV R0, R16_MASK.D[0] \
VMOV R1, R16_MASK.D[1] \
LDP r24_mask<>(SB), (R0, R1) \
VMOV R0, R24_MASK.D[0] \
VMOV R1, R24_MASK.D[1]
// func expandKeyAsm(key *byte, ck, enc, dec *uint32) // func expandKeyAsm(key *byte, ck, enc, dec *uint32)
TEXT ·expandKeyAsm(SB),NOSPLIT,$0 TEXT ·expandKeyAsm(SB),NOSPLIT,$0