package sm4 import ( "crypto/cipher" "crypto/subtle" "encoding/binary" "errors" ) // Assert that sm4CipherAsm implements the gcmAble interface. var _ gcmAble = (*sm4CipherAsm)(nil) // NewGCM returns the AES cipher wrapped in Galois Counter Mode. This is only // called by crypto/cipher.NewGCM via the gcmAble interface. func (c *sm4CipherAsm) NewGCM(nonceSize, tagSize int) (cipher.AEAD, error) { var key [gcmBlockSize]byte c.Encrypt(key[:], key[:]) g := &gcm{cipher: c, nonceSize: nonceSize, tagSize: tagSize} // We precompute 16 multiples of |key|. However, when we do lookups // into this table we'll be using bits from a field element and // therefore the bits will be in the reverse order. So normally one // would expect, say, 4*key to be in index 4 of the table but due to // this bit ordering it will actually be in index 0010 (base 2) = 2. x := gcmFieldElement{ binary.BigEndian.Uint64(key[:8]), binary.BigEndian.Uint64(key[8:]), } g.productTable[reverseBits(1)] = x for i := 2; i < 16; i += 2 { g.productTable[reverseBits(i)] = gcmDouble(&g.productTable[reverseBits(i/2)]) g.productTable[reverseBits(i+1)] = gcmAdd(&g.productTable[reverseBits(i)], &x) } return g, nil } // gcmFieldElement represents a value in GF(2¹²⁸). In order to reflect the GCM // standard and make binary.BigEndian suitable for marshaling these values, the // bits are stored in big endian order. For example: // the coefficient of x⁰ can be obtained by v.low >> 63. // the coefficient of x⁶³ can be obtained by v.low & 1. // the coefficient of x⁶⁴ can be obtained by v.high >> 63. // the coefficient of x¹²⁷ can be obtained by v.high & 1. type gcmFieldElement struct { low, high uint64 } // gcm represents a Galois Counter Mode with a specific key. See // https://csrc.nist.gov/groups/ST/toolkit/BCM/documents/proposedmodes/gcm/gcm-revised-spec.pdf type gcm struct { cipher *sm4CipherAsm nonceSize int tagSize int // productTable contains the first sixteen powers of the key, H. // However, they are in bit reversed order. See NewGCMWithNonceSize. productTable [16]gcmFieldElement } const ( gcmBlockSize = 16 gcmTagSize = 16 gcmMinimumTagSize = 12 // NIST SP 800-38D recommends tags with 12 or more bytes. gcmStandardNonceSize = 12 ) func (g *gcm) NonceSize() int { return g.nonceSize } func (g *gcm) Overhead() int { return g.tagSize } func (g *gcm) Seal(dst, nonce, plaintext, data []byte) []byte { if len(nonce) != g.nonceSize { panic("crypto/cipher: incorrect nonce length given to GCM") } if uint64(len(plaintext)) > ((1<<32)-2)*uint64(g.cipher.BlockSize()) { panic("crypto/cipher: message too large for GCM") } ret, out := sliceForAppend(dst, len(plaintext)+g.tagSize) if InexactOverlap(out, plaintext) { panic("crypto/cipher: invalid buffer overlap") } var counter, tagMask [gcmBlockSize]byte g.deriveCounter(&counter, nonce) g.cipher.Encrypt(tagMask[:], counter[:]) gcmInc32(&counter) g.counterCrypt(out, plaintext, &counter) var tag [gcmTagSize]byte g.auth(tag[:], out[:len(plaintext)], data, &tagMask) copy(out[len(plaintext):], tag[:]) return ret } var errOpen = errors.New("cipher: message authentication failed") func (g *gcm) Open(dst, nonce, ciphertext, data []byte) ([]byte, error) { if len(nonce) != g.nonceSize { panic("crypto/cipher: incorrect nonce length given to GCM") } // Sanity check to prevent the authentication from always succeeding if an implementation // leaves tagSize uninitialized, for example. if g.tagSize < gcmMinimumTagSize { panic("crypto/cipher: incorrect GCM tag size") } if len(ciphertext) < g.tagSize { return nil, errOpen } if uint64(len(ciphertext)) > ((1<<32)-2)*uint64(g.cipher.BlockSize())+uint64(g.tagSize) { return nil, errOpen } tag := ciphertext[len(ciphertext)-g.tagSize:] ciphertext = ciphertext[:len(ciphertext)-g.tagSize] var counter, tagMask [gcmBlockSize]byte g.deriveCounter(&counter, nonce) g.cipher.Encrypt(tagMask[:], counter[:]) gcmInc32(&counter) var expectedTag [gcmTagSize]byte g.auth(expectedTag[:], ciphertext, data, &tagMask) ret, out := sliceForAppend(dst, len(ciphertext)) if InexactOverlap(out, ciphertext) { panic("crypto/cipher: invalid buffer overlap") } if subtle.ConstantTimeCompare(expectedTag[:g.tagSize], tag) != 1 { // The AESNI code decrypts and authenticates concurrently, and // so overwrites dst in the event of a tag mismatch. That // behavior is mimicked here in order to be consistent across // platforms. for i := range out { out[i] = 0 } return nil, errOpen } g.counterCrypt(out, ciphertext, &counter) return ret, nil } // reverseBits reverses the order of the bits of 4-bit number in i. func reverseBits(i int) int { i = ((i << 2) & 0xc) | ((i >> 2) & 0x3) i = ((i << 1) & 0xa) | ((i >> 1) & 0x5) return i } // gcmAdd adds two elements of GF(2¹²⁸) and returns the sum. func gcmAdd(x, y *gcmFieldElement) gcmFieldElement { // Addition in a characteristic 2 field is just XOR. return gcmFieldElement{x.low ^ y.low, x.high ^ y.high} } // gcmDouble returns the result of doubling an element of GF(2¹²⁸). func gcmDouble(x *gcmFieldElement) (double gcmFieldElement) { msbSet := x.high&1 == 1 // Because of the bit-ordering, doubling is actually a right shift. double.high = x.high >> 1 double.high |= x.low << 63 double.low = x.low >> 1 // If the most-significant bit was set before shifting then it, // conceptually, becomes a term of x^128. This is greater than the // irreducible polynomial so the result has to be reduced. The // irreducible polynomial is 1+x+x^2+x^7+x^128. We can subtract that to // eliminate the term at x^128 which also means subtracting the other // four terms. In characteristic 2 fields, subtraction == addition == // XOR. if msbSet { double.low ^= 0xe100000000000000 } return } var gcmReductionTable = []uint16{ 0x0000, 0x1c20, 0x3840, 0x2460, 0x7080, 0x6ca0, 0x48c0, 0x54e0, 0xe100, 0xfd20, 0xd940, 0xc560, 0x9180, 0x8da0, 0xa9c0, 0xb5e0, } // mul sets y to y*H, where H is the GCM key, fixed during NewGCMWithNonceSize. func (g *gcm) mul(y *gcmFieldElement) { var z gcmFieldElement for i := 0; i < 2; i++ { word := y.high if i == 1 { word = y.low } // Multiplication works by multiplying z by 16 and adding in // one of the precomputed multiples of H. for j := 0; j < 64; j += 4 { msw := z.high & 0xf z.high >>= 4 z.high |= z.low << 60 z.low >>= 4 z.low ^= uint64(gcmReductionTable[msw]) << 48 // the values in |table| are ordered for // little-endian bit positions. See the comment // in NewGCMWithNonceSize. t := &g.productTable[word&0xf] z.low ^= t.low z.high ^= t.high word >>= 4 } } *y = z } // updateBlocks extends y with more polynomial terms from blocks, based on // Horner's rule. There must be a multiple of gcmBlockSize bytes in blocks. func (g *gcm) updateBlocks(y *gcmFieldElement, blocks []byte) { for len(blocks) > 0 { y.low ^= binary.BigEndian.Uint64(blocks) y.high ^= binary.BigEndian.Uint64(blocks[8:]) g.mul(y) blocks = blocks[gcmBlockSize:] } } // update extends y with more polynomial terms from data. If data is not a // multiple of gcmBlockSize bytes long then the remainder is zero padded. func (g *gcm) update(y *gcmFieldElement, data []byte) { fullBlocks := (len(data) >> 4) << 4 g.updateBlocks(y, data[:fullBlocks]) if len(data) != fullBlocks { var partialBlock [gcmBlockSize]byte copy(partialBlock[:], data[fullBlocks:]) g.updateBlocks(y, partialBlock[:]) } } // gcmInc32 treats the final four bytes of counterBlock as a big-endian value // and increments it. func gcmInc32(counterBlock *[16]byte) { ctr := counterBlock[len(counterBlock)-4:] binary.BigEndian.PutUint32(ctr, binary.BigEndian.Uint32(ctr)+1) } // sliceForAppend takes a slice and a requested number of bytes. It returns a // slice with the contents of the given slice followed by that many bytes and a // second slice that aliases into it and contains only the extra bytes. If the // original slice has sufficient capacity then no allocation is performed. func sliceForAppend(in []byte, n int) (head, tail []byte) { if total := len(in) + n; cap(in) >= total { head = in[:total] } else { head = make([]byte, total) copy(head, in) } tail = head[len(in):] return } // counterCrypt crypts in to out using g.cipher in counter mode. func (g *gcm) counterCrypt(out, in []byte, counter *[gcmBlockSize]byte) { var mask [FourBlocksSize]byte var couters [FourBlocksSize]byte for len(in) >= FourBlocksSize { copy(couters[:], counter[:]) gcmInc32(counter) copy(couters[gcmBlockSize:], counter[:]) gcmInc32(counter) copy(couters[2*gcmBlockSize:], counter[:]) gcmInc32(counter) copy(couters[3*gcmBlockSize:], counter[:]) encryptBlocksAsm(&g.cipher.enc[0], &mask[0], &couters[0]) gcmInc32(counter) xorWords(out, in, mask[:]) out = out[FourBlocksSize:] in = in[FourBlocksSize:] } if len(in) > 0 { blocks := (len(in) + gcmBlockSize - 1) / gcmBlockSize for i := 0; i < blocks; i++ { copy(couters[i*gcmBlockSize:], counter[:]) gcmInc32(counter) } encryptBlocksAsm(&g.cipher.enc[0], &mask[0], &couters[0]) xorBytes(out, in, mask[:blocks*gcmBlockSize]) } } // deriveCounter computes the initial GCM counter state from the given nonce. // See NIST SP 800-38D, section 7.1. This assumes that counter is filled with // zeros on entry. func (g *gcm) deriveCounter(counter *[gcmBlockSize]byte, nonce []byte) { // GCM has two modes of operation with respect to the initial counter // state: a "fast path" for 96-bit (12-byte) nonces, and a "slow path" // for nonces of other lengths. For a 96-bit nonce, the nonce, along // with a four-byte big-endian counter starting at one, is used // directly as the starting counter. For other nonce sizes, the counter // is computed by passing it through the GHASH function. if len(nonce) == gcmStandardNonceSize { copy(counter[:], nonce) counter[gcmBlockSize-1] = 1 } else { var y gcmFieldElement g.update(&y, nonce) y.high ^= uint64(len(nonce)) * 8 g.mul(&y) binary.BigEndian.PutUint64(counter[:8], y.low) binary.BigEndian.PutUint64(counter[8:], y.high) } } // auth calculates GHASH(ciphertext, additionalData), masks the result with // tagMask and writes the result to out. func (g *gcm) auth(out, ciphertext, additionalData []byte, tagMask *[gcmTagSize]byte) { var y gcmFieldElement g.update(&y, additionalData) g.update(&y, ciphertext) y.low ^= uint64(len(additionalData)) * 8 y.high ^= uint64(len(ciphertext)) * 8 g.mul(&y) binary.BigEndian.PutUint64(out, y.low) binary.BigEndian.PutUint64(out[8:], y.high) xorWords(out, out, tagMask[:]) }