mirror of
https://github.com/emmansun/gmsm.git
synced 2025-05-12 12:06:18 +08:00
ppc64x: sm4/zuc reduce VAND
This commit is contained in:
parent
1924799006
commit
16219eef8a
@ -86,8 +86,7 @@
|
|||||||
#define AFFINE_TRANSFORM(L, H, V_FOUR, x, y, z) \
|
#define AFFINE_TRANSFORM(L, H, V_FOUR, x, y, z) \
|
||||||
VAND NIBBLE_MASK, x, z; \
|
VAND NIBBLE_MASK, x, z; \
|
||||||
VPERM L, L, z, y; \
|
VPERM L, L, z, y; \
|
||||||
VSRD x, V_FOUR, x; \
|
VSRB x, V_FOUR, z; \
|
||||||
VAND NIBBLE_MASK, x, z; \
|
|
||||||
VPERM H, H, z, x; \
|
VPERM H, H, z, x; \
|
||||||
VXOR y, x, x
|
VXOR y, x, x
|
||||||
|
|
||||||
@ -102,8 +101,7 @@
|
|||||||
VNOR x, x, z; \ // z = NOT(x)
|
VNOR x, x, z; \ // z = NOT(x)
|
||||||
VAND NIBBLE_MASK, z, z; \
|
VAND NIBBLE_MASK, z, z; \
|
||||||
VPERM L, L, z, y; \
|
VPERM L, L, z, y; \
|
||||||
VSRD x, V_FOUR, x; \
|
VSRB x, V_FOUR, z; \
|
||||||
VAND NIBBLE_MASK, x, z; \
|
|
||||||
VPERM H, H, z, x; \
|
VPERM H, H, z, x; \
|
||||||
VXOR y, x, x
|
VXOR y, x, x
|
||||||
|
|
||||||
|
@ -60,7 +60,7 @@ GLOBL ·rcon(SB), RODATA, $112
|
|||||||
// func expandKeyAsm(key *byte, ck, enc, dec *uint32, inst int)
|
// func expandKeyAsm(key *byte, ck, enc, dec *uint32, inst int)
|
||||||
TEXT ·expandKeyAsm(SB),NOSPLIT,$0
|
TEXT ·expandKeyAsm(SB),NOSPLIT,$0
|
||||||
// prepare/load constants
|
// prepare/load constants
|
||||||
VSPLTISW $4, V_FOUR;
|
VSPLTISB $4, V_FOUR;
|
||||||
#ifdef NEEDS_PERMW
|
#ifdef NEEDS_PERMW
|
||||||
MOVD $·rcon(SB), R4
|
MOVD $·rcon(SB), R4
|
||||||
LVX (R4), ESPERMW
|
LVX (R4), ESPERMW
|
||||||
@ -115,7 +115,7 @@ ksLoop:
|
|||||||
// func encryptBlockAsm(xk *uint32, dst, src *byte, inst int)
|
// func encryptBlockAsm(xk *uint32, dst, src *byte, inst int)
|
||||||
TEXT ·encryptBlockAsm(SB),NOSPLIT,$0
|
TEXT ·encryptBlockAsm(SB),NOSPLIT,$0
|
||||||
// prepare/load constants
|
// prepare/load constants
|
||||||
VSPLTISW $4, V_FOUR;
|
VSPLTISB $4, V_FOUR;
|
||||||
#ifdef NEEDS_PERMW
|
#ifdef NEEDS_PERMW
|
||||||
MOVD $·rcon(SB), R4
|
MOVD $·rcon(SB), R4
|
||||||
LVX (R4), ESPERMW
|
LVX (R4), ESPERMW
|
||||||
@ -156,7 +156,7 @@ encryptBlockLoop:
|
|||||||
// func encryptBlocksAsm(xk *uint32, dst, src []byte, inst int)
|
// func encryptBlocksAsm(xk *uint32, dst, src []byte, inst int)
|
||||||
TEXT ·encryptBlocksAsm(SB),NOSPLIT,$0
|
TEXT ·encryptBlocksAsm(SB),NOSPLIT,$0
|
||||||
// prepare/load constants
|
// prepare/load constants
|
||||||
VSPLTISW $4, V_FOUR;
|
VSPLTISB $4, V_FOUR;
|
||||||
#ifdef NEEDS_PERMW
|
#ifdef NEEDS_PERMW
|
||||||
MOVD $·rcon(SB), R4
|
MOVD $·rcon(SB), R4
|
||||||
LVX (R4), ESPERMW
|
LVX (R4), ESPERMW
|
||||||
|
@ -45,7 +45,7 @@ TEXT ·decryptBlocksChain(SB),NOSPLIT,$0
|
|||||||
#define rk R5
|
#define rk R5
|
||||||
#define srcLen R6
|
#define srcLen R6
|
||||||
// prepare/load constants
|
// prepare/load constants
|
||||||
VSPLTISW $4, V_FOUR;
|
VSPLTISB $4, V_FOUR;
|
||||||
#ifdef NEEDS_PERMW
|
#ifdef NEEDS_PERMW
|
||||||
MOVD $·rcon(SB), R4
|
MOVD $·rcon(SB), R4
|
||||||
LVX (R4), ESPERMW
|
LVX (R4), ESPERMW
|
||||||
|
@ -30,7 +30,7 @@ TEXT ·encryptSm4Ecb(SB),NOSPLIT,$0
|
|||||||
#define rk R5
|
#define rk R5
|
||||||
#define srcLen R6
|
#define srcLen R6
|
||||||
// prepare/load constants
|
// prepare/load constants
|
||||||
VSPLTISW $4, V_FOUR;
|
VSPLTISB $4, V_FOUR;
|
||||||
#ifdef NEEDS_PERMW
|
#ifdef NEEDS_PERMW
|
||||||
MOVD $·rcon(SB), R4
|
MOVD $·rcon(SB), R4
|
||||||
LVX (R4), ESPERMW
|
LVX (R4), ESPERMW
|
||||||
|
@ -42,7 +42,7 @@ GLOBL rcon<>(SB), RODATA, $160
|
|||||||
#define P3 V30
|
#define P3 V30
|
||||||
|
|
||||||
#define LOAD_CONSTS \
|
#define LOAD_CONSTS \
|
||||||
VSPLTISW $4, V_FOUR \
|
VSPLTISB $4, V_FOUR \
|
||||||
MOVD $rcon<>+0x00(SB), R4 \
|
MOVD $rcon<>+0x00(SB), R4 \
|
||||||
LXVD2X (R4)(R0), NIBBLE_MASK \
|
LXVD2X (R4)(R0), NIBBLE_MASK \
|
||||||
MOVD $0x10, R5 \
|
MOVD $0x10, R5 \
|
||||||
@ -89,8 +89,7 @@ GLOBL rcon<>(SB), RODATA, $160
|
|||||||
#define AFFINE_TRANSFORM(L, H, V_FOUR, x, y, z) \
|
#define AFFINE_TRANSFORM(L, H, V_FOUR, x, y, z) \
|
||||||
VAND NIBBLE_MASK, x, z; \
|
VAND NIBBLE_MASK, x, z; \
|
||||||
VPERM L, L, z, y; \
|
VPERM L, L, z, y; \
|
||||||
VSRD x, V_FOUR, x; \
|
VSRB x, V_FOUR, z; \
|
||||||
VAND NIBBLE_MASK, x, z; \
|
|
||||||
VPERM H, H, z, x; \
|
VPERM H, H, z, x; \
|
||||||
VXOR y, x, x
|
VXOR y, x, x
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user