diff --git a/SM3性能优化.md b/SM3性能优化.md index 244bc75..51644e0 100644 --- a/SM3性能优化.md +++ b/SM3性能优化.md @@ -55,4 +55,49 @@ SM3的message scheduler有两个显著差别: 1. 比SHA256需要多算4个DWORDs。 Intel 指令参考: -https://software.intel.com/sites/landingpage/IntrinsicsGuide/ \ No newline at end of file +https://software.intel.com/sites/landingpage/IntrinsicsGuide/ + +SM3的第一版,比SHA256复杂: + + // Wj ← P1(Wj−16 ⊕ Wj−9 ⊕ (Wj−3 ≪ 15)) ⊕ (Wj−13 ≪ 7) ⊕ Wj−6 + VPALIGNR $12, XDWORD0, XDWORD1, XTMP0; \ // XTMP0 = W[-13] = {w6,w5,w4,w3} + VPSLLD $7, XTMP0, XTMP1; \ + VPSRLD $(32-7), XTMP0, XTMP0; \ + VPOR XTMP0, XTMP1, XTMP1; \ // XTMP1 = W[-13] rol 7 + VPALIGNR $8, XDWORD2, XDWORD3, XTMP0; \ // XTMP0 = W[-6] = {w13,w12,w11,w10} + VPXOR XTMP1, XTMP0, XTMP0; \ // XTMP0 = W[-6] XOR (W[-13] rol 7) + VPALIGNR $12, XDWORD1, XDWORD2, XTMP1; \ // XTMP1 = W[-9] = {w10,w9,w8,w7} + VPXOR XDWORD0, XTMP1, XTMP1; \ // XTMP1 = W[-9] XOR W[-16] + VPSHUFD $0xA5, XDWORD3, XTMP2; \ // XTMP2 = W[-3] {BBAA} {w14,w14,w13,w13} + + VPSLLQ $15, XTMP2, XTMP2; \ // XTMP2 = W[-3] rol 15 {xBxA} + VPSHUFB shuff_00BA<>(SB), XTMP2, XTMP2;\ // XTMP2 = W[-3] rol 15 {00BA} + VPXOR XTMP1, XTMP2, XTMP2; \ // XTMP2 = W[-9] XOR W[-16] XOR (W[-3] rol 15) {xxBA} + VPSLLD $15, XTMP2, XTMP3; \ + VPSRLD $(32-15), XTMP2, XTMP4; \ + VPOR XTMP3, XTMP4, XTMP4; \ // XTMP4 = XTMP2 rol 15 {xxBA} + VPXOR XTMP2, XTMP4, XTMP4; \ // XTMP4 = XTMP2 XOR (XTMP2 rol 15 {xxBA}) + VPSLLD $23, XTMP2, XTMP3; \ + VPSRLD $(32-23), XTMP2, XTMP5; \ + + VPOR XTMP3, XTMP5, XTMP5; \ //XTMP5 = XTMP2 rol 23 {xxBA} + VPXOR XTMP4, XTMP5, XTMP4; \ // XTMP4 = XTMP2 XOR (XTMP2 rol 15 {xxBA}) XOR (XTMP2 rol 23 {xxBA}) + VPXOR XTMP4, XTMP0, XTMP2; \ // XTMP2 = {..., ..., W[1], W[0]} + VPALIGNR $12, XDWORD3, XTMP2, XTMP3; \ // XTMP3 = {..., W[1], W[0], w15} + VPSHUFD $80, XTMP3, XTMP4; \ // XTMP4 = = W[-3] {DDCC} + VPSLLQ $15, XTMP4, XTMP4; \ // XTMP4 = W[-3] rol 15 {xDxC} + VPSHUFB shuff_DC00<>(SB), XTMP4, XTMP4;\ // XTMP4 = W[-9] XOR W[-16] XOR (W[-3] rol 15){DC00} + VPXOR XTMP1, XTMP4, XTMP4; \ // XTMP4 = W[-9] XOR W[-16] XOR (W[-3] rol 15) {DCxx} + VPSLLD $15, XTMP4, XTMP5; \ + + VPSRLD $(32-15), XTMP4, XTMP3; \ + VPOR XTMP3, XTMP5, XTMP3; \ // XTMP3 = XTMP4 rol 15 {DCxx} + VPXOR XTMP3, XTMP4, XTMP3; \ // XTMP3 = XTMP4 XOR (XTMP4 rol 15 {DCxx}) + VPSLLD $23, XTMP4, XTMP5; \ + VPSRLD $(32-23), XTMP4, XTMP1; \ + VPOR XTMP1, XTMP5, XTMP1; \ // XTMP1 = XTMP4 rol 23 {DCxx} + VPXOR XTMP3, XTMP1, XTMP1; \ // XTMP1 = XTMP4 XOR (XTMP4 rol 15 {DCxx}) XOR (XTMP4 rol 23 {DCxx}) + VPXOR XTMP1, XTMP0, XTMP1; \ // XTMP1 = {W[3], W[2], ..., ...} + VPALIGNR $8, XTMP1, XTMP2, XTMP3; \ // XTMP3 = {W[1], W[0], W[3], W[2]} + VPSHUFD $0x4E, XTMP3, XDWORD0; \ // XDWORD0 = {W[3], W[2], W[1], W[0]} +