diff --git a/sm4/asm_arm64.s b/sm4/asm_arm64.s index 24efc3b..282993a 100644 --- a/sm4/asm_arm64.s +++ b/sm4/asm_arm64.s @@ -20,51 +20,6 @@ #define XTMP6 V6 #define XTMP7 V7 -//nibble mask -DATA nibble_mask<>+0x00(SB)/8, $0x0F0F0F0F0F0F0F0F -DATA nibble_mask<>+0x08(SB)/8, $0x0F0F0F0F0F0F0F0F -GLOBL nibble_mask<>(SB), (NOPTR+RODATA), $16 - -// inverse shift rows -DATA inverse_shift_rows<>+0x00(SB)/8, $0x0B0E0104070A0D00 -DATA inverse_shift_rows<>+0x08(SB)/8, $0x0306090C0F020508 -GLOBL inverse_shift_rows<>(SB), (NOPTR+RODATA), $16 - -// Affine transform 1 (low and high hibbles) -DATA m1_low<>+0x00(SB)/8, $0x9197E2E474720701 -DATA m1_low<>+0x08(SB)/8, $0xC7C1B4B222245157 -GLOBL m1_low<>(SB), (NOPTR+RODATA), $16 - -DATA m1_high<>+0x00(SB)/8, $0xE240AB09EB49A200 -DATA m1_high<>+0x08(SB)/8, $0xF052B91BF95BB012 -GLOBL m1_high<>(SB), (NOPTR+RODATA), $16 - -// Affine transform 2 (low and high hibbles) -DATA m2_low<>+0x00(SB)/8, $0x5B67F2CEA19D0834 -DATA m2_low<>+0x08(SB)/8, $0xEDD14478172BBE82 -GLOBL m2_low<>(SB), (NOPTR+RODATA), $16 - -DATA m2_high<>+0x00(SB)/8, $0xAE7201DD73AFDC00 -DATA m2_high<>+0x08(SB)/8, $0x11CDBE62CC1063BF -GLOBL m2_high<>(SB), (NOPTR+RODATA), $16 - -// left rotations of 32-bit words by 8-bit increments -DATA r08_mask<>+0x00(SB)/8, $0x0605040702010003 -DATA r08_mask<>+0x08(SB)/8, $0x0E0D0C0F0A09080B -GLOBL r08_mask<>(SB), (NOPTR+RODATA), $16 - -DATA r16_mask<>+0x00(SB)/8, $0x0504070601000302 -DATA r16_mask<>+0x08(SB)/8, $0x0D0C0F0E09080B0A -GLOBL r16_mask<>(SB), (NOPTR+RODATA), $16 - -DATA r24_mask<>+0x00(SB)/8, $0x0407060500030201 -DATA r24_mask<>+0x08(SB)/8, $0x0C0F0E0D080B0A09 -GLOBL r24_mask<>(SB), (NOPTR+RODATA), $16 - -DATA fk_mask<>+0x00(SB)/8, $0x56aa3350a3b1bac6 -DATA fk_mask<>+0x08(SB)/8, $0xb27022dc677d9197 -GLOBL fk_mask<>(SB), (NOPTR+RODATA), $16 - #define SM4_SBOX(x, y) \ ; \ //############################# inner affine ############################// VAND x.B16, NIBBLE_MASK.B16, XTMP7.B16; \ @@ -131,36 +86,18 @@ GLOBL fk_mask<>(SB), (NOPTR+RODATA), $16 #define load_global_data_1() \ VMOVQ $0x0F0F0F0F0F0F0F0F, $0x0F0F0F0F0F0F0F0F, NIBBLE_MASK; \ // nibble mask - LDP m1_low<>(SB), (R0, R1) \ - VMOV R0, M1L.D[0] \ - VMOV R1, M1L.D[1] \ - LDP m1_high<>(SB), (R0, R1) \ - VMOV R0, M1H.D[0] \ - VMOV R1, M1H.D[1] \ - LDP m2_low<>(SB), (R0, R1) \ - VMOV R0, M2L.D[0] \ - VMOV R1, M2L.D[1] \ - LDP m2_high<>(SB), (R0, R1) \ - VMOV R0, M2H.D[0] \ - VMOV R1, M2H.D[1] \ - LDP fk_mask<>(SB), (R0, R1) \ - VMOV R0, FK_MASK.D[0] \ - VMOV R1, FK_MASK.D[1] \ - LDP inverse_shift_rows<>(SB), (R0, R1) \ - VMOV R0, INVERSE_SHIFT_ROWS.D[0] \ - VMOV R1, INVERSE_SHIFT_ROWS.D[1] + VMOVQ $0xC7C1B4B222245157, $0x9197E2E474720701, M1L; \ + VMOVQ $0xF052B91BF95BB012, $0xE240AB09EB49A200, M1H; \ + VMOVQ $0xEDD14478172BBE82, $0x5B67F2CEA19D0834, M2L; \ + VMOVQ $0x11CDBE62CC1063BF, $0xAE7201DD73AFDC00, M2H; \ + VMOVQ $0xb27022dc677d9197, $0x56aa3350a3b1bac6, FK_MASK; \ + VMOVQ $0x0306090C0F020508, $0x0B0E0104070A0D00, INVERSE_SHIFT_ROWS #define load_global_data_2() \ - load_global_data_1() \ - LDP r08_mask<>(SB), (R0, R1) \ - VMOV R0, R08_MASK.D[0] \ - VMOV R1, R08_MASK.D[1] \ - LDP r16_mask<>(SB), (R0, R1) \ - VMOV R0, R16_MASK.D[0] \ - VMOV R1, R16_MASK.D[1] \ - LDP r24_mask<>(SB), (R0, R1) \ - VMOV R0, R24_MASK.D[0] \ - VMOV R1, R24_MASK.D[1] + load_global_data_1(); \ + VMOVQ $0x0E0D0C0F0A09080B, $0x0605040702010003, R08_MASK; \ + VMOVQ $0x0D0C0F0E09080B0A, $0x0504070601000302, R16_MASK; \ + VMOVQ $0x0C0F0E0D080B0A09, $0x0407060500030201, R24_MASK // func expandKeyAsm(key *byte, ck, enc, dec *uint32) TEXT ·expandKeyAsm(SB),NOSPLIT,$0