From a98463ee7f6e7eb653b76dd75d08b8e96de1161f Mon Sep 17 00:00:00 2001 From: Sun Yimin Date: Mon, 11 Sep 2023 15:42:56 +0800 Subject: [PATCH] =?UTF-8?q?Updated=20SM3=E6=80=A7=E8=83=BD=E4=BC=98?= =?UTF-8?q?=E5=8C=96=20(markdown)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- SM3性能优化.md | 47 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 47 insertions(+) diff --git a/SM3性能优化.md b/SM3性能优化.md index 3c8a0d4..b2b6fa2 100644 --- a/SM3性能优化.md +++ b/SM3性能优化.md @@ -115,6 +115,53 @@ SM3的第一版,比SHA256复杂,不知道有没有继续优化的空间。 VPSHUFD $0x4E, XTMP3, XDWORD0; \ // XDWORD0 = {W[3], W[2], W[1], W[0]} ``` +第二版(用VPSHUFB优化循环左移): +```asm + // Wj ← P1(Wj−16 ⊕ Wj−9 ⊕ (Wj−3 ≪ 15)) ⊕ (Wj−13 ≪ 7) ⊕ Wj−6 + // Transpose data into high/low parts + VPERM2I128 $0x20, XTMP2, XTMP0, XDWORD0 // w3, w2, w1, w0 + VPERM2I128 $0x31, XTMP2, XTMP0, XDWORD1 // w7, w6, w5, w4 + VPERM2I128 $0x20, XTMP3, XTMP1, XDWORD2 // w11, w10, w9, w8 + VPERM2I128 $0x31, XTMP3, XTMP1, XDWORD3 // w15, w14, w13, w12 + + VPALIGNR $12, XDWORD0, XDWORD1, XTMP0; \ // XTMP0 = W[-13] = {w6,w5,w4,w3} + VPSLLD $7, XTMP0, XTMP1; \ + VPSRLD $(32-7), XTMP0, XTMP0; \ + VPOR XTMP0, XTMP1, XTMP1; \ // XTMP1 = W[-13] rol 7 + VPALIGNR $8, XDWORD2, XDWORD3, XTMP0; \ // XTMP0 = W[-6] = {w13,w12,w11,w10} + VPXOR XTMP1, XTMP0, XTMP0; \ // XTMP0 = W[-6] XOR (W[-13] rol 7) + VPALIGNR $12, XDWORD1, XDWORD2, XTMP1; \ // XTMP1 = W[-9] = {w10,w9,w8,w7} + VPXOR XDWORD0, XTMP1, XTMP1; \ // XTMP1 = W[-9] XOR W[-16] + + VPSHUFD $0xA5, XDWORD3, XTMP2; \ // XTMP2 = W[-3] {BBAA} {w14,w14,w13,w13} + VPSLLQ $15, XTMP2, XTMP2; \ // XTMP2 = W[-3] rol 15 {BxAx} + VPSHUFB shuff_00BA<>(SB), XTMP2, XTMP2;\ // XTMP2 = W[-3] rol 15 {00BA} + VPXOR XTMP1, XTMP2, XTMP2; \ // XTMP2 = W[-9] XOR W[-16] XOR (W[-3] rol 15) {xxBA} + VPSLLD $15, XTMP2, XTMP3; \ + VPSRLD $(32-15), XTMP2, XTMP4; \ + VPOR XTMP3, XTMP4, XTMP4; \ // XTMP4 = XTMP2 rol 15 {xxBA} + VPSHUFB r08_mask<>(SB), XTMP4, XTMP3; \ // XTMP3 = XTMP2 rol 23 {xxBA} + + VPXOR XTMP2, XTMP4, XTMP4; \ // XTMP4 = XTMP2 XOR (XTMP2 rol 15 {xxBA}) + VPXOR XTMP4, XTMP3, XTMP4; \ // XTMP4 = XTMP2 XOR (XTMP2 rol 15 {xxBA}) XOR (XTMP2 rol 23 {xxBA}) + VPXOR XTMP4, XTMP0, XTMP2; \ // XTMP2 = {..., ..., W[1], W[0]} + VPALIGNR $12, XDWORD3, XTMP2, XTMP3; \ // XTMP3 = {..., W[1], W[0], w15} + VPSHUFD $80, XTMP3, XTMP4; \ // XTMP4 = = W[-3] {DDCC} + VPSLLQ $15, XTMP4, XTMP4; \ // XTMP4 = W[-3] rol 15 {DxCx} + VPSHUFB shuff_DC00<>(SB), XTMP4, XTMP4;\ // XTMP4 = W[-3] rol 15 {DC00} + VPXOR XTMP1, XTMP4, XTMP4; \ // XTMP4 = W[-9] XOR W[-16] XOR (W[-3] rol 15) {DCxx} + + VPSLLD $15, XTMP4, XTMP5; \ + VPSRLD $(32-15), XTMP4, XTMP3; \ + VPOR XTMP3, XTMP5, XTMP3; \ // XTMP3 = XTMP4 rol 15 {DCxx} + VPSHUFB r08_mask<>(SB), XTMP3, XTMP1; \ // XTMP1 = XTMP4 rol 23 {DCxx} + VPXOR XTMP3, XTMP4, XTMP3; \ // XTMP3 = XTMP4 XOR (XTMP4 rol 15 {DCxx}) + VPXOR XTMP3, XTMP1, XTMP1; \ // XTMP1 = XTMP4 XOR (XTMP4 rol 15 {DCxx}) XOR (XTMP4 rol 23 {DCxx}) + VPXOR XTMP1, XTMP0, XTMP1; \ // XTMP1 = {W[3], W[2], ..., ...} + VPALIGNR $8, XTMP1, XTMP2, XTMP3; \ // XTMP3 = {W[1], W[0], W[3], W[2]} + VPSHUFD $0x4E, XTMP3, XDWORD0; \ // XDWORD0 = {W[3], W[2], W[1], W[0]} +``` + 由于要算**52**个DWORDs,所以 | Ingteger Execution | SIMD Execution |