mirror of
https://github.com/emmansun/gmsm.git
synced 2025-10-13 23:00:47 +08:00
Release v0.34.0
* build(deps): bump github/codeql-action from 3.29.11 to 3.30.0 (#361) Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.29.11 to 3.30.0. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](3c3833e0f8...2d92b76c45
) --- updated-dependencies: - dependency-name: github/codeql-action dependency-version: 3.30.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] <support@github.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * build(deps): bump codecov/codecov-action from 5.5.0 to 5.5.1 (#362) Bumps [codecov/codecov-action](https://github.com/codecov/codecov-action) from 5.5.0 to 5.5.1. - [Release notes](https://github.com/codecov/codecov-action/releases) - [Changelog](https://github.com/codecov/codecov-action/blob/main/CHANGELOG.md) - [Commits](fdcc847654...5a1091511a
) --- updated-dependencies: - dependency-name: codecov/codecov-action dependency-version: 5.5.1 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] <support@github.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * build(deps): bump actions/setup-go from 5.5.0 to 6.0.0 (#363) Bumps [actions/setup-go](https://github.com/actions/setup-go) from 5.5.0 to 6.0.0. - [Release notes](https://github.com/actions/setup-go/releases) - [Commits](d35c59abb0...4469467582
) --- updated-dependencies: - dependency-name: actions/setup-go dependency-version: 6.0.0 dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] <support@github.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * build(deps): bump github/codeql-action from 3.30.0 to 3.30.1 (#364) Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.30.0 to 3.30.1. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](2d92b76c45...f1f6e5f6af
) --- updated-dependencies: - dependency-name: github/codeql-action dependency-version: 3.30.1 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] <support@github.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * build(deps): bump step-security/harden-runner from 2.13.0 to 2.13.1 (#367) Bumps [step-security/harden-runner](https://github.com/step-security/harden-runner) from 2.13.0 to 2.13.1. - [Release notes](https://github.com/step-security/harden-runner/releases) - [Commits](ec9f2d5744...f4a75cfd61
) --- updated-dependencies: - dependency-name: step-security/harden-runner dependency-version: 2.13.1 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] <support@github.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * build(deps): bump github/codeql-action from 3.30.1 to 3.30.2 (#368) Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.30.1 to 3.30.2. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](f1f6e5f6af...d3678e237b
) --- updated-dependencies: - dependency-name: github/codeql-action dependency-version: 3.30.2 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] <support@github.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * feat(mlkem): initialize mlkem from golang standard library * chore(mlkem): refactoring, reduce alloc times * build(deps): bump github/codeql-action from 3.30.2 to 3.30.3 (#369) Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.30.2 to 3.30.3. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](d3678e237b...192325c861
) --- updated-dependencies: - dependency-name: github/codeql-action dependency-version: 3.30.3 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] <support@github.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * doc(README): include MLKEM * mldsa: refactor the implementation of key and sign/verify * mldsa,slhdsa: crypto.Signer assertion * fix(slhdsa): GenerateKey slice issue #72 * fix(slhdsa): copy/paste issue * slhdsa: supplements package level document * internal/zuc: eea supports encoding.BinaryMarshaler & encoding.BinaryUnmarshaler interfaces * mlkem: use clear built-in * build(deps): bump github/codeql-action from 3.30.3 to 3.30.4 (#376) Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.30.3 to 3.30.4. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](192325c861...303c0aef88
) --- updated-dependencies: - dependency-name: github/codeql-action dependency-version: 3.30.4 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] <support@github.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * cipher: initial support gxm & mur modes * cipher: update comments * build(deps): bump github/codeql-action from 3.30.4 to 3.30.5 (#377) Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.30.4 to 3.30.5. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](303c0aef88...3599b3baa1
) --- updated-dependencies: - dependency-name: github/codeql-action dependency-version: 3.30.5 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] <support@github.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * 增加了DRBG销毁内部状态的方法 (#378) * 增加了DRBG销毁内部状态的方法 * 统一前缀 * 修改随机数长度 * 分组和注释 * 错误函数描述 * zuc: expose methods to support encoding.BinaryMarshaler and encoding.BinaryUnmarshaler * drbg: align comments style * internal/zuc: support fast forward * internal/zuc: supplement comments --------- Signed-off-by: dependabot[bot] <support@github.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Sun Yimin <emmansun@users.noreply.github.com> Co-authored-by: Guanyu Quan <quanguanyu@qq.com>
This commit is contained in:
parent
9e364cb1d8
commit
d57142dda1
6
.github/workflows/codeql-analysis.yml
vendored
6
.github/workflows/codeql-analysis.yml
vendored
@ -37,12 +37,12 @@ jobs:
|
|||||||
|
|
||||||
# Initializes the CodeQL tools for scanning.
|
# Initializes the CodeQL tools for scanning.
|
||||||
- name: Initialize CodeQL
|
- name: Initialize CodeQL
|
||||||
uses: github/codeql-action/init@192325c86100d080feab897ff886c34abd4c83a3 # v3.29.5
|
uses: github/codeql-action/init@3599b3baa15b485a2e49ef411a7a4bb2452e7f93 # v3.29.5
|
||||||
with:
|
with:
|
||||||
languages: go
|
languages: go
|
||||||
|
|
||||||
- name: Autobuild
|
- name: Autobuild
|
||||||
uses: github/codeql-action/autobuild@192325c86100d080feab897ff886c34abd4c83a3 # v3.29.5
|
uses: github/codeql-action/autobuild@3599b3baa15b485a2e49ef411a7a4bb2452e7f93 # v3.29.5
|
||||||
|
|
||||||
- name: Perform CodeQL Analysis
|
- name: Perform CodeQL Analysis
|
||||||
uses: github/codeql-action/analyze@192325c86100d080feab897ff886c34abd4c83a3 # v3.29.5
|
uses: github/codeql-action/analyze@3599b3baa15b485a2e49ef411a7a4bb2452e7f93 # v3.29.5
|
||||||
|
2
.github/workflows/scorecard.yml
vendored
2
.github/workflows/scorecard.yml
vendored
@ -78,6 +78,6 @@ jobs:
|
|||||||
# Upload the results to GitHub's code scanning dashboard (optional).
|
# Upload the results to GitHub's code scanning dashboard (optional).
|
||||||
# Commenting out will disable upload of results to your repo's Code Scanning dashboard
|
# Commenting out will disable upload of results to your repo's Code Scanning dashboard
|
||||||
- name: "Upload to code-scanning"
|
- name: "Upload to code-scanning"
|
||||||
uses: github/codeql-action/upload-sarif@192325c86100d080feab897ff886c34abd4c83a3 # v3.29.5
|
uses: github/codeql-action/upload-sarif@3599b3baa15b485a2e49ef411a7a4bb2452e7f93 # v3.29.5
|
||||||
with:
|
with:
|
||||||
sarif_file: results.sarif
|
sarif_file: results.sarif
|
||||||
|
130
cipher/ghash.go
Normal file
130
cipher/ghash.go
Normal file
@ -0,0 +1,130 @@
|
|||||||
|
// Copyright 2025 Sun Yimin. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package cipher
|
||||||
|
|
||||||
|
import "github.com/emmansun/gmsm/internal/byteorder"
|
||||||
|
|
||||||
|
const (
|
||||||
|
ghashBlockSize = 16
|
||||||
|
)
|
||||||
|
|
||||||
|
// ghashFieldElement represents a value in GF(2¹²⁸). In order to reflect the GCM
|
||||||
|
// standard and make binary.BigEndian suitable for marshaling these values, the
|
||||||
|
// bits are stored in big endian order. For example:
|
||||||
|
//
|
||||||
|
// the coefficient of x⁰ can be obtained by v.low >> 63.
|
||||||
|
// the coefficient of x⁶³ can be obtained by v.low & 1.
|
||||||
|
// the coefficient of x⁶⁴ can be obtained by v.high >> 63.
|
||||||
|
// the coefficient of x¹²⁷ can be obtained by v.high & 1.
|
||||||
|
type ghashFieldElement struct {
|
||||||
|
low, high uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// reverseBits reverses the order of the bits of 4-bit number in i.
|
||||||
|
func reverseBits(i int) int {
|
||||||
|
i = ((i << 2) & 0xc) | ((i >> 2) & 0x3)
|
||||||
|
i = ((i << 1) & 0xa) | ((i >> 1) & 0x5)
|
||||||
|
return i
|
||||||
|
}
|
||||||
|
|
||||||
|
// hctrAdd adds two elements of GF(2¹²⁸) and returns the sum.
|
||||||
|
func ghashAdd(x, y *ghashFieldElement) ghashFieldElement {
|
||||||
|
// Addition in a characteristic 2 field is just XOR.
|
||||||
|
return ghashFieldElement{x.low ^ y.low, x.high ^ y.high}
|
||||||
|
}
|
||||||
|
|
||||||
|
// hctrDouble returns the result of doubling an element of GF(2¹²⁸).
|
||||||
|
func ghashDouble(x *ghashFieldElement) (double ghashFieldElement) {
|
||||||
|
msbSet := x.high&1 == 1
|
||||||
|
|
||||||
|
// Because of the bit-ordering, doubling is actually a right shift.
|
||||||
|
double.high = x.high >> 1
|
||||||
|
double.high |= x.low << 63
|
||||||
|
double.low = x.low >> 1
|
||||||
|
|
||||||
|
// If the most-significant bit was set before shifting then it,
|
||||||
|
// conceptually, becomes a term of x^128. This is greater than the
|
||||||
|
// irreducible polynomial so the result has to be reduced. The
|
||||||
|
// irreducible polynomial is 1+x+x^2+x^7+x^128. We can subtract that to
|
||||||
|
// eliminate the term at x^128 which also means subtracting the other
|
||||||
|
// four terms. In characteristic 2 fields, subtraction == addition ==
|
||||||
|
// XOR.
|
||||||
|
if msbSet {
|
||||||
|
double.low ^= 0xe100000000000000
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// ghashReductionTable is stored irreducible polynomial's double & add precomputed results.
|
||||||
|
// 0000 - 0
|
||||||
|
// 0001 - irreducible polynomial >> 3
|
||||||
|
// 0010 - irreducible polynomial >> 2
|
||||||
|
// 0011 - (irreducible polynomial >> 3 xor irreducible polynomial >> 2)
|
||||||
|
// ...
|
||||||
|
// 1000 - just the irreducible polynomial
|
||||||
|
var ghashReductionTable = []uint16{
|
||||||
|
0x0000, 0x1c20, 0x3840, 0x2460, 0x7080, 0x6ca0, 0x48c0, 0x54e0,
|
||||||
|
0xe100, 0xfd20, 0xd940, 0xc560, 0x9180, 0x8da0, 0xa9c0, 0xb5e0,
|
||||||
|
}
|
||||||
|
|
||||||
|
// ghashMul sets y to y*H, where H is the GHASH key, fixed during New.
|
||||||
|
func ghashMul(productTable *[16]ghashFieldElement, y *ghashFieldElement) {
|
||||||
|
var z ghashFieldElement
|
||||||
|
|
||||||
|
// Eliminate bounds checks in the loop.
|
||||||
|
_ = ghashReductionTable[0xf]
|
||||||
|
|
||||||
|
for i := 0; i < 2; i++ {
|
||||||
|
word := y.high
|
||||||
|
if i == 1 {
|
||||||
|
word = y.low
|
||||||
|
}
|
||||||
|
|
||||||
|
// Multiplication works by multiplying z by 16 and adding in
|
||||||
|
// one of the precomputed multiples of hash key.
|
||||||
|
for j := 0; j < 64; j += 4 {
|
||||||
|
msw := z.high & 0xf
|
||||||
|
z.high >>= 4
|
||||||
|
z.high |= z.low << 60
|
||||||
|
z.low >>= 4
|
||||||
|
z.low ^= uint64(ghashReductionTable[msw]) << 48
|
||||||
|
|
||||||
|
// the values in |table| are ordered for
|
||||||
|
// little-endian bit positions.
|
||||||
|
t := &productTable[word&0xf]
|
||||||
|
|
||||||
|
z.low ^= t.low
|
||||||
|
z.high ^= t.high
|
||||||
|
word >>= 4
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
*y = z
|
||||||
|
}
|
||||||
|
|
||||||
|
// updateBlocks extends y with more polynomial terms from blocks, based on
|
||||||
|
// Horner's rule. There must be a multiple of gcmBlockSize bytes in blocks.
|
||||||
|
func updateBlocks(productTable *[16]ghashFieldElement, y *ghashFieldElement, blocks []byte) {
|
||||||
|
for len(blocks) > 0 {
|
||||||
|
y.low ^= byteorder.BEUint64(blocks)
|
||||||
|
y.high ^= byteorder.BEUint64(blocks[8:])
|
||||||
|
ghashMul(productTable, y)
|
||||||
|
blocks = blocks[blockSize:]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ghashUpdate extends y with more polynomial terms from data. If data is not a
|
||||||
|
// multiple of gcmBlockSize bytes long then the remainder is zero padded.
|
||||||
|
func ghashUpdate(productTable *[16]ghashFieldElement, y *ghashFieldElement, data []byte) {
|
||||||
|
fullBlocks := (len(data) >> 4) << 4
|
||||||
|
updateBlocks(productTable, y, data[:fullBlocks])
|
||||||
|
|
||||||
|
if len(data) != fullBlocks {
|
||||||
|
var partialBlock [blockSize]byte
|
||||||
|
copy(partialBlock[:], data[fullBlocks:])
|
||||||
|
updateBlocks(productTable, y, partialBlock[:])
|
||||||
|
}
|
||||||
|
}
|
149
cipher/gxm.go
Normal file
149
cipher/gxm.go
Normal file
@ -0,0 +1,149 @@
|
|||||||
|
// Copyright 2025 Sun Yimin. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package cipher
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/cipher"
|
||||||
|
"crypto/subtle"
|
||||||
|
"errors"
|
||||||
|
|
||||||
|
"github.com/emmansun/gmsm/internal/alias"
|
||||||
|
"github.com/emmansun/gmsm/internal/byteorder"
|
||||||
|
)
|
||||||
|
|
||||||
|
type gxm struct {
|
||||||
|
stream cipher.Stream
|
||||||
|
tagSize int
|
||||||
|
tagMask [ghashBlockSize]byte
|
||||||
|
// productTable contains the first sixteen powers of the hash key.
|
||||||
|
// However, they are in bit reversed order.
|
||||||
|
productTable [16]ghashFieldElement
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewGXM creates a new GXM instance using the provided cipher stream and hash key.
|
||||||
|
// It uses the default tag size of 16 bytes.
|
||||||
|
//
|
||||||
|
// Due to the nature of GXM, the same stream cipher instance should not be reused.
|
||||||
|
func NewGXM(stream cipher.Stream, hkey []byte) (*gxm, error) {
|
||||||
|
return NewGXMWithTagSize(stream, hkey, 16)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewGXMWithTagSize creates a new instance of GXM (Galois XOR Mode) with a specified tag size.
|
||||||
|
//
|
||||||
|
// Due to the nature of GXM, the same stream cipher instance should not be reused.
|
||||||
|
func NewGXMWithTagSize(stream cipher.Stream, hkey []byte, tagSize int) (*gxm, error) {
|
||||||
|
if len(hkey) != ghashBlockSize {
|
||||||
|
return nil, errors.New("cipher: invalid hash key length")
|
||||||
|
}
|
||||||
|
if tagSize < 8 || tagSize > 16 {
|
||||||
|
return nil, errors.New("cipher: invalid tag size")
|
||||||
|
}
|
||||||
|
c := &gxm{}
|
||||||
|
c.stream = stream
|
||||||
|
c.tagSize = tagSize
|
||||||
|
// We precompute 16 multiples of |key|. However, when we do lookups
|
||||||
|
// into this table we'll be using bits from a field element and
|
||||||
|
// therefore the bits will be in the reverse order. So normally one
|
||||||
|
// would expect, say, 4*key to be in index 4 of the table but due to
|
||||||
|
// this bit ordering it will actually be in index 0010 (base 2) = 2.
|
||||||
|
x := ghashFieldElement{
|
||||||
|
byteorder.BEUint64(hkey[:8]),
|
||||||
|
byteorder.BEUint64(hkey[8:blockSize]),
|
||||||
|
}
|
||||||
|
c.productTable[reverseBits(1)] = x
|
||||||
|
|
||||||
|
for i := 2; i < 16; i += 2 {
|
||||||
|
c.productTable[reverseBits(i)] = ghashDouble(&c.productTable[reverseBits(i/2)])
|
||||||
|
c.productTable[reverseBits(i+1)] = ghashAdd(&c.productTable[reverseBits(i)], &x)
|
||||||
|
}
|
||||||
|
|
||||||
|
// encrypt zero block to get the tag mask
|
||||||
|
stream.XORKeyStream(c.tagMask[:tagSize], c.tagMask[:tagSize])
|
||||||
|
|
||||||
|
return c, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Overhead returns the maximum difference between the lengths of a
|
||||||
|
// plaintext and its ciphertext.
|
||||||
|
func (g *gxm) Overhead() int {
|
||||||
|
return g.tagSize
|
||||||
|
}
|
||||||
|
|
||||||
|
// Seal encrypts and authenticates plaintext, authenticates the
|
||||||
|
// additional data and appends the result to dst, returning the updated
|
||||||
|
// slice.
|
||||||
|
//
|
||||||
|
// To reuse plaintext's storage for the encrypted output, use plaintext[:0]
|
||||||
|
// as dst. Otherwise, the remaining capacity of dst must not overlap plaintext.
|
||||||
|
// dst and additionalData may not overlap.
|
||||||
|
func (g *gxm) Seal(dst, plaintext, additionalData []byte) []byte {
|
||||||
|
ret, out := alias.SliceForAppend(dst, len(plaintext)+g.tagSize)
|
||||||
|
if alias.InexactOverlap(out, plaintext) {
|
||||||
|
panic("cipher: invalid buffer overlap of output and input")
|
||||||
|
}
|
||||||
|
if alias.AnyOverlap(out, additionalData) {
|
||||||
|
panic("cipher: invalid buffer overlap of output and additional data")
|
||||||
|
}
|
||||||
|
|
||||||
|
g.stream.XORKeyStream(out, plaintext)
|
||||||
|
g.gxmAuth(out[len(plaintext):], out[:len(plaintext)], additionalData)
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
// Open decrypts and authenticates ciphertext, authenticates the
|
||||||
|
// additional data and, if successful, appends the resulting plaintext
|
||||||
|
// to dst, returning the updated slice. The additional data must match the
|
||||||
|
// value passed to Seal.
|
||||||
|
//
|
||||||
|
// To reuse ciphertext's storage for the decrypted output, use ciphertext[:0]
|
||||||
|
// as dst. Otherwise, the remaining capacity of dst must not overlap ciphertext.
|
||||||
|
// dst and additionalData may not overlap.
|
||||||
|
//
|
||||||
|
// Even if the function fails, the contents of dst, up to its capacity,
|
||||||
|
// may be overwritten.
|
||||||
|
func (g *gxm) Open(dst, ciphertext, additionalData []byte) ([]byte, error) {
|
||||||
|
if len(ciphertext) < g.tagSize {
|
||||||
|
return nil, errOpen
|
||||||
|
}
|
||||||
|
ret, out := alias.SliceForAppend(dst, len(ciphertext)-g.tagSize)
|
||||||
|
if alias.InexactOverlap(out, ciphertext) {
|
||||||
|
panic("cipher: invalid buffer overlap of output and input")
|
||||||
|
}
|
||||||
|
if alias.AnyOverlap(out, additionalData) {
|
||||||
|
panic("cipher: invalid buffer overlap of output and additional data")
|
||||||
|
}
|
||||||
|
tag := ciphertext[len(ciphertext)-g.tagSize:]
|
||||||
|
ciphertext = ciphertext[:len(ciphertext)-g.tagSize]
|
||||||
|
|
||||||
|
var expectedTag [blockSize]byte
|
||||||
|
g.gxmAuth(expectedTag[:], ciphertext, additionalData)
|
||||||
|
|
||||||
|
// Use subtle.ConstantTimeCompare to avoid leaking timing information.
|
||||||
|
if subtle.ConstantTimeCompare(expectedTag[:g.tagSize], tag) != 1 {
|
||||||
|
// We sometimes decrypt and authenticate concurrently, so we overwrite
|
||||||
|
// dst in the event of a tag mismatch. To be consistent across platforms
|
||||||
|
// and to avoid releasing unauthenticated plaintext, we clear the buffer
|
||||||
|
// in the event of an error.
|
||||||
|
clear(out)
|
||||||
|
return nil, errOpen
|
||||||
|
}
|
||||||
|
g.stream.XORKeyStream(out, ciphertext)
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *gxm) gxmAuth(out, ciphertext, additionalData []byte) {
|
||||||
|
var tag [ghashBlockSize]byte
|
||||||
|
tagField := ghashFieldElement{}
|
||||||
|
ghashUpdate(&g.productTable, &tagField, additionalData)
|
||||||
|
ghashUpdate(&g.productTable, &tagField, ciphertext)
|
||||||
|
lenBlock := make([]byte, 16)
|
||||||
|
byteorder.BEPutUint64(lenBlock[:8], uint64(len(additionalData))*8)
|
||||||
|
byteorder.BEPutUint64(lenBlock[8:], uint64(len(ciphertext))*8)
|
||||||
|
ghashUpdate(&g.productTable, &tagField, lenBlock)
|
||||||
|
byteorder.BEPutUint64(tag[:], tagField.low)
|
||||||
|
byteorder.BEPutUint64(tag[8:], tagField.high)
|
||||||
|
subtle.XORBytes(tag[:], tag[:], g.tagMask[:])
|
||||||
|
copy(out, tag[:g.tagSize])
|
||||||
|
}
|
111
cipher/hctr.go
111
cipher/hctr.go
@ -1,3 +1,7 @@
|
|||||||
|
// Copyright 2024 Sun Yimin. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
package cipher
|
package cipher
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@ -40,66 +44,6 @@ type LengthPreservingMode interface {
|
|||||||
BlockSize() int
|
BlockSize() int
|
||||||
}
|
}
|
||||||
|
|
||||||
// hctrFieldElement represents a value in GF(2¹²⁸). In order to reflect the HCTR
|
|
||||||
// standard and make binary.BigEndian suitable for marshaling these values, the
|
|
||||||
// bits are stored in big endian order. For example:
|
|
||||||
//
|
|
||||||
// the coefficient of x⁰ can be obtained by v.low >> 63.
|
|
||||||
// the coefficient of x⁶³ can be obtained by v.low & 1.
|
|
||||||
// the coefficient of x⁶⁴ can be obtained by v.high >> 63.
|
|
||||||
// the coefficient of x¹²⁷ can be obtained by v.high & 1.
|
|
||||||
type hctrFieldElement struct {
|
|
||||||
low, high uint64
|
|
||||||
}
|
|
||||||
|
|
||||||
// reverseBits reverses the order of the bits of 4-bit number in i.
|
|
||||||
func reverseBits(i int) int {
|
|
||||||
i = ((i << 2) & 0xc) | ((i >> 2) & 0x3)
|
|
||||||
i = ((i << 1) & 0xa) | ((i >> 1) & 0x5)
|
|
||||||
return i
|
|
||||||
}
|
|
||||||
|
|
||||||
// hctrAdd adds two elements of GF(2¹²⁸) and returns the sum.
|
|
||||||
func hctrAdd(x, y *hctrFieldElement) hctrFieldElement {
|
|
||||||
// Addition in a characteristic 2 field is just XOR.
|
|
||||||
return hctrFieldElement{x.low ^ y.low, x.high ^ y.high}
|
|
||||||
}
|
|
||||||
|
|
||||||
// hctrDouble returns the result of doubling an element of GF(2¹²⁸).
|
|
||||||
func hctrDouble(x *hctrFieldElement) (double hctrFieldElement) {
|
|
||||||
msbSet := x.high&1 == 1
|
|
||||||
|
|
||||||
// Because of the bit-ordering, doubling is actually a right shift.
|
|
||||||
double.high = x.high >> 1
|
|
||||||
double.high |= x.low << 63
|
|
||||||
double.low = x.low >> 1
|
|
||||||
|
|
||||||
// If the most-significant bit was set before shifting then it,
|
|
||||||
// conceptually, becomes a term of x^128. This is greater than the
|
|
||||||
// irreducible polynomial so the result has to be reduced. The
|
|
||||||
// irreducible polynomial is 1+x+x^2+x^7+x^128. We can subtract that to
|
|
||||||
// eliminate the term at x^128 which also means subtracting the other
|
|
||||||
// four terms. In characteristic 2 fields, subtraction == addition ==
|
|
||||||
// XOR.
|
|
||||||
if msbSet {
|
|
||||||
double.low ^= 0xe100000000000000
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// hctrReductionTable is stored irreducible polynomial's double & add precomputed results.
|
|
||||||
// 0000 - 0
|
|
||||||
// 0001 - irreducible polynomial >> 3
|
|
||||||
// 0010 - irreducible polynomial >> 2
|
|
||||||
// 0011 - (irreducible polynomial >> 3 xor irreducible polynomial >> 2)
|
|
||||||
// ...
|
|
||||||
// 1000 - just the irreducible polynomial
|
|
||||||
var hctrReductionTable = []uint16{
|
|
||||||
0x0000, 0x1c20, 0x3840, 0x2460, 0x7080, 0x6ca0, 0x48c0, 0x54e0,
|
|
||||||
0xe100, 0xfd20, 0xd940, 0xc560, 0x9180, 0x8da0, 0xa9c0, 0xb5e0,
|
|
||||||
}
|
|
||||||
|
|
||||||
// hctr represents a Variable-Input-Length enciphering mode with a specific block cipher,
|
// hctr represents a Variable-Input-Length enciphering mode with a specific block cipher,
|
||||||
// and specific tweak and a hash key. See
|
// and specific tweak and a hash key. See
|
||||||
// https://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.470.5288
|
// https://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.470.5288
|
||||||
@ -109,7 +53,7 @@ type hctr struct {
|
|||||||
tweak [blockSize]byte
|
tweak [blockSize]byte
|
||||||
// productTable contains the first sixteen powers of the hash key.
|
// productTable contains the first sixteen powers of the hash key.
|
||||||
// However, they are in bit reversed order.
|
// However, they are in bit reversed order.
|
||||||
productTable [16]hctrFieldElement
|
productTable [16]ghashFieldElement
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *hctr) BlockSize() int {
|
func (h *hctr) BlockSize() int {
|
||||||
@ -130,56 +74,25 @@ func NewHCTR(cipher cipher.Block, tweak, hkey []byte) (LengthPreservingMode, err
|
|||||||
// therefore the bits will be in the reverse order. So normally one
|
// therefore the bits will be in the reverse order. So normally one
|
||||||
// would expect, say, 4*key to be in index 4 of the table but due to
|
// would expect, say, 4*key to be in index 4 of the table but due to
|
||||||
// this bit ordering it will actually be in index 0010 (base 2) = 2.
|
// this bit ordering it will actually be in index 0010 (base 2) = 2.
|
||||||
x := hctrFieldElement{
|
x := ghashFieldElement{
|
||||||
byteorder.BEUint64(hkey[:8]),
|
byteorder.BEUint64(hkey[:8]),
|
||||||
byteorder.BEUint64(hkey[8:blockSize]),
|
byteorder.BEUint64(hkey[8:blockSize]),
|
||||||
}
|
}
|
||||||
c.productTable[reverseBits(1)] = x
|
c.productTable[reverseBits(1)] = x
|
||||||
|
|
||||||
for i := 2; i < 16; i += 2 {
|
for i := 2; i < 16; i += 2 {
|
||||||
c.productTable[reverseBits(i)] = hctrDouble(&c.productTable[reverseBits(i/2)])
|
c.productTable[reverseBits(i)] = ghashDouble(&c.productTable[reverseBits(i/2)])
|
||||||
c.productTable[reverseBits(i+1)] = hctrAdd(&c.productTable[reverseBits(i)], &x)
|
c.productTable[reverseBits(i+1)] = ghashAdd(&c.productTable[reverseBits(i)], &x)
|
||||||
}
|
}
|
||||||
return c, nil
|
return c, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// mul sets y to y*H, where H is the GCM key, fixed during NewHCTR.
|
// mul sets y to y*H, where H is the GCM key, fixed during NewHCTR.
|
||||||
func (h *hctr) mul(y *hctrFieldElement) {
|
func (h *hctr) mul(y *ghashFieldElement) {
|
||||||
var z hctrFieldElement
|
ghashMul(&h.productTable, y)
|
||||||
|
|
||||||
// Eliminate bounds checks in the loop.
|
|
||||||
_ = hctrReductionTable[0xf]
|
|
||||||
|
|
||||||
for i := 0; i < 2; i++ {
|
|
||||||
word := y.high
|
|
||||||
if i == 1 {
|
|
||||||
word = y.low
|
|
||||||
}
|
|
||||||
|
|
||||||
// Multiplication works by multiplying z by 16 and adding in
|
|
||||||
// one of the precomputed multiples of hash key.
|
|
||||||
for j := 0; j < 64; j += 4 {
|
|
||||||
msw := z.high & 0xf
|
|
||||||
z.high >>= 4
|
|
||||||
z.high |= z.low << 60
|
|
||||||
z.low >>= 4
|
|
||||||
z.low ^= uint64(hctrReductionTable[msw]) << 48
|
|
||||||
|
|
||||||
// the values in |table| are ordered for
|
|
||||||
// little-endian bit positions. See the comment
|
|
||||||
// in NewHCTR.
|
|
||||||
t := &h.productTable[word&0xf]
|
|
||||||
|
|
||||||
z.low ^= t.low
|
|
||||||
z.high ^= t.high
|
|
||||||
word >>= 4
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
*y = z
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *hctr) updateBlock(block []byte, y *hctrFieldElement) {
|
func (h *hctr) updateBlock(block []byte, y *ghashFieldElement) {
|
||||||
y.low ^= byteorder.BEUint64(block)
|
y.low ^= byteorder.BEUint64(block)
|
||||||
y.high ^= byteorder.BEUint64(block[8:])
|
y.high ^= byteorder.BEUint64(block[8:])
|
||||||
h.mul(y)
|
h.mul(y)
|
||||||
@ -188,7 +101,7 @@ func (h *hctr) updateBlock(block []byte, y *hctrFieldElement) {
|
|||||||
// Universal Hash Function.
|
// Universal Hash Function.
|
||||||
// Chapter 3.3 in https://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.470.5288.
|
// Chapter 3.3 in https://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.470.5288.
|
||||||
func (h *hctr) uhash(m []byte, out *[blockSize]byte) {
|
func (h *hctr) uhash(m []byte, out *[blockSize]byte) {
|
||||||
var y hctrFieldElement
|
var y ghashFieldElement
|
||||||
msg := m
|
msg := m
|
||||||
// update blocks
|
// update blocks
|
||||||
for len(msg) >= blockSize {
|
for len(msg) >= blockSize {
|
||||||
|
187
cipher/mur.go
Normal file
187
cipher/mur.go
Normal file
@ -0,0 +1,187 @@
|
|||||||
|
// Copyright 2025 Sun Yimin. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package cipher
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/cipher"
|
||||||
|
"crypto/subtle"
|
||||||
|
"errors"
|
||||||
|
|
||||||
|
"github.com/emmansun/gmsm/internal/alias"
|
||||||
|
"github.com/emmansun/gmsm/internal/byteorder"
|
||||||
|
)
|
||||||
|
|
||||||
|
type StreamCipherCreator func(key, iv []byte) (cipher.Stream, error)
|
||||||
|
|
||||||
|
const (
|
||||||
|
maxIVSize = 32
|
||||||
|
maxTagSize = 16
|
||||||
|
)
|
||||||
|
|
||||||
|
type mur struct {
|
||||||
|
streamCipherCreator StreamCipherCreator
|
||||||
|
|
||||||
|
tagSize int
|
||||||
|
// productTable contains the first sixteen powers of the hash key.
|
||||||
|
// However, they are in bit reversed order.
|
||||||
|
productTable [16]ghashFieldElement
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMUR creates a new MUR (misuse-resistant AEAD mode) instance with a default tag size of 16 bytes.
|
||||||
|
// It takes a StreamCipherCreator function for generating the underlying stream cipher and an ghash key.
|
||||||
|
func NewMUR(streamCipherCreator StreamCipherCreator, hkey []byte) (*mur, error) {
|
||||||
|
return NewMURWithTagSize(streamCipherCreator, hkey, 16)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMURWithTagSize creates a new MUR (misuse-resistant AEAD mode) instance with the specified tag size.
|
||||||
|
func NewMURWithTagSize(streamCipherCreator StreamCipherCreator, hkey []byte, tagSize int) (*mur, error) {
|
||||||
|
if len(hkey) != ghashBlockSize {
|
||||||
|
return nil, errors.New("cipher: invalid hash key length")
|
||||||
|
}
|
||||||
|
if tagSize < 8 || tagSize > 16 {
|
||||||
|
return nil, errors.New("cipher: invalid tag size")
|
||||||
|
}
|
||||||
|
|
||||||
|
c := &mur{}
|
||||||
|
c.streamCipherCreator = streamCipherCreator
|
||||||
|
c.tagSize = tagSize
|
||||||
|
// We precompute 16 multiples of |key|. However, when we do lookups
|
||||||
|
// into this table we'll be using bits from a field element and
|
||||||
|
// therefore the bits will be in the reverse order. So normally one
|
||||||
|
// would expect, say, 4*key to be in index 4 of the table but due to
|
||||||
|
// this bit ordering it will actually be in index 0010 (base 2) = 2.
|
||||||
|
x := ghashFieldElement{
|
||||||
|
byteorder.BEUint64(hkey[:8]),
|
||||||
|
byteorder.BEUint64(hkey[8:ghashBlockSize]),
|
||||||
|
}
|
||||||
|
c.productTable[reverseBits(1)] = x
|
||||||
|
|
||||||
|
for i := 2; i < 16; i += 2 {
|
||||||
|
c.productTable[reverseBits(i)] = ghashDouble(&c.productTable[reverseBits(i/2)])
|
||||||
|
c.productTable[reverseBits(i+1)] = ghashAdd(&c.productTable[reverseBits(i)], &x)
|
||||||
|
}
|
||||||
|
|
||||||
|
return c, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Overhead returns the maximum difference between the lengths of a
|
||||||
|
// plaintext and its ciphertext.
|
||||||
|
func (g *mur) Overhead() int {
|
||||||
|
return g.tagSize
|
||||||
|
}
|
||||||
|
|
||||||
|
// Seal encrypts and authenticates plaintext, authenticates the
|
||||||
|
// additional data and appends the result to dst, returning the updated
|
||||||
|
// slice.
|
||||||
|
//
|
||||||
|
// To reuse plaintext's storage for the encrypted output, use plaintext[:0]
|
||||||
|
// as dst. Otherwise, the remaining capacity of dst must not overlap plaintext.
|
||||||
|
// dst and additionalData may not overlap.
|
||||||
|
func (g *mur) Seal(iv, dataKey, tagKey, dst, plaintext, additionalData []byte) ([]byte, error) {
|
||||||
|
ret, out := alias.SliceForAppend(dst, len(plaintext)+g.tagSize)
|
||||||
|
if alias.InexactOverlap(out, plaintext) {
|
||||||
|
panic("cipher: invalid buffer overlap")
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
tmpIV [maxIVSize]byte
|
||||||
|
tag [maxTagSize]byte
|
||||||
|
ivLen = len(iv)
|
||||||
|
)
|
||||||
|
|
||||||
|
if ivLen > maxIVSize {
|
||||||
|
panic("cipher: iv too large")
|
||||||
|
}
|
||||||
|
|
||||||
|
copy(tmpIV[:], iv)
|
||||||
|
g.murAuth(tmpIV[:], plaintext, additionalData)
|
||||||
|
subtle.XORBytes(tmpIV[:], tmpIV[:], iv)
|
||||||
|
tagStream, err := g.streamCipherCreator(tagKey, tmpIV[:ivLen])
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
tagStream.XORKeyStream(tag[:g.tagSize], tag[:g.tagSize])
|
||||||
|
|
||||||
|
clear(tmpIV[:])
|
||||||
|
subtle.XORBytes(tmpIV[:], iv, tag[:])
|
||||||
|
dataStream, err := g.streamCipherCreator(dataKey, tmpIV[:ivLen])
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
dataStream.XORKeyStream(out, plaintext)
|
||||||
|
copy(out[len(plaintext):], tag[:g.tagSize])
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Open decrypts and authenticates ciphertext, authenticates the
|
||||||
|
// additional data and, if successful, appends the resulting plaintext
|
||||||
|
// to dst, returning the updated slice. The iv, dataKey, tagKey
|
||||||
|
// and the additional data must match the value passed to Seal.
|
||||||
|
//
|
||||||
|
// To reuse ciphertext's storage for the decrypted output, use ciphertext[:0]
|
||||||
|
// as dst. Otherwise, the remaining capacity of dst must not overlap ciphertext.
|
||||||
|
// dst and additionalData may not overlap.
|
||||||
|
//
|
||||||
|
// Even if the function fails, the contents of dst, up to its capacity,
|
||||||
|
// may be overwritten.
|
||||||
|
func (g *mur) Open(iv, dataKey, tagKey, dst, ciphertext, additionalData []byte) ([]byte, error) {
|
||||||
|
if len(ciphertext) < g.tagSize {
|
||||||
|
return nil, errOpen
|
||||||
|
}
|
||||||
|
ret, out := alias.SliceForAppend(dst, len(ciphertext)-g.tagSize)
|
||||||
|
if alias.InexactOverlap(out, ciphertext) {
|
||||||
|
panic("cipher: invalid buffer overlap of output and input")
|
||||||
|
}
|
||||||
|
if alias.AnyOverlap(out, additionalData) {
|
||||||
|
panic("cipher: invalid buffer overlap of output and additional data")
|
||||||
|
}
|
||||||
|
tag := ciphertext[len(ciphertext)-g.tagSize:]
|
||||||
|
ciphertext = ciphertext[:len(ciphertext)-g.tagSize]
|
||||||
|
|
||||||
|
var (
|
||||||
|
tmpIV [maxIVSize]byte
|
||||||
|
calTag [maxTagSize]byte
|
||||||
|
ivLen = len(iv)
|
||||||
|
)
|
||||||
|
if ivLen > maxIVSize {
|
||||||
|
panic("cipher: iv too large")
|
||||||
|
}
|
||||||
|
copy(tmpIV[:], tag)
|
||||||
|
subtle.XORBytes(tmpIV[:], iv, tmpIV[:])
|
||||||
|
dataStream, err := g.streamCipherCreator(dataKey, tmpIV[:ivLen])
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
dataStream.XORKeyStream(out, ciphertext)
|
||||||
|
|
||||||
|
clear(tmpIV[:])
|
||||||
|
g.murAuth(tmpIV[:], out, additionalData)
|
||||||
|
subtle.XORBytes(tmpIV[:], tmpIV[:], iv)
|
||||||
|
tagStream, err := g.streamCipherCreator(tagKey, tmpIV[:ivLen])
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
tagStream.XORKeyStream(calTag[:g.tagSize], calTag[:g.tagSize])
|
||||||
|
|
||||||
|
if subtle.ConstantTimeCompare(tag, calTag[:g.tagSize]) != 1 {
|
||||||
|
clear(out)
|
||||||
|
return nil, errOpen
|
||||||
|
}
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *mur) murAuth(out []byte, plaintext, additionalData []byte) {
|
||||||
|
var tag [ghashBlockSize]byte
|
||||||
|
tagField := ghashFieldElement{}
|
||||||
|
ghashUpdate(&g.productTable, &tagField, additionalData)
|
||||||
|
ghashUpdate(&g.productTable, &tagField, plaintext)
|
||||||
|
lenBlock := make([]byte, 16)
|
||||||
|
byteorder.BEPutUint64(lenBlock[:8], uint64(len(additionalData))*8)
|
||||||
|
byteorder.BEPutUint64(lenBlock[8:], uint64(len(plaintext))*8)
|
||||||
|
ghashUpdate(&g.productTable, &tagField, lenBlock)
|
||||||
|
byteorder.BEPutUint64(tag[:], tagField.low)
|
||||||
|
byteorder.BEPutUint64(tag[8:], tagField.high)
|
||||||
|
copy(out, tag[:])
|
||||||
|
}
|
122
cipher/zuc_gxm_test.go
Normal file
122
cipher/zuc_gxm_test.go
Normal file
@ -0,0 +1,122 @@
|
|||||||
|
// Copyright 2025 Sun Yimin. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package cipher_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/hex"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/emmansun/gmsm/cipher"
|
||||||
|
"github.com/emmansun/gmsm/zuc"
|
||||||
|
)
|
||||||
|
|
||||||
|
// GM/T 0001.4 - 2024 Appendix C.2
|
||||||
|
var gxmTestCases = []struct {
|
||||||
|
iv string
|
||||||
|
h string
|
||||||
|
k string
|
||||||
|
a string
|
||||||
|
p string
|
||||||
|
result string
|
||||||
|
tagSize int
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
iv: "b3a6db3c870c3e99245e0d1c06b747de",
|
||||||
|
h: "6db45e4f9572f4e6fe0d91acda6801d5",
|
||||||
|
k: "edbe06afed8075576aad04afdec91d32",
|
||||||
|
a: "9de18b1fdab0ca9902b9729d492c807ec599d5",
|
||||||
|
p: "",
|
||||||
|
result: "2a14afaeb6e5ecc784fad24ddeb457d2",
|
||||||
|
tagSize: 16,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
iv: "2923be84e16cd6ae529049f1f1bbe9eb",
|
||||||
|
h: "27bede74018082da87d4e5b69f18bf66",
|
||||||
|
k: "32070e0f39b7b692b4673edc3184a48e",
|
||||||
|
a: "",
|
||||||
|
p: "",
|
||||||
|
result: "5d8a045ac89a681a4bc910380bbadccf",
|
||||||
|
tagSize: 16,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
iv: "2d2086832cc2fe3fd18cb51d6c5e99a5",
|
||||||
|
h: "9d6cb51623fd847f2e45d7f52f900db8",
|
||||||
|
k: "56131c03e457f6226b5477633b873984",
|
||||||
|
a: "",
|
||||||
|
p: "ffffffffffffffffffffffffffffff",
|
||||||
|
result: "b78e2f30cf70252d58767997f1b086efb30febbfe0c88a1e77b1dde9d45525",
|
||||||
|
tagSize: 16,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
iv: "bb8b76cfe5f0d9335029008b2a3b2b21",
|
||||||
|
h: "ee767d503bb3d5d1b585f57a0418c673",
|
||||||
|
k: "e4b5c1f8578034ce6424f58c675597ac",
|
||||||
|
a: "fcdd4cb97995da30efd957194eac4d2a8610470f99c88657f462f68dff7561a5",
|
||||||
|
p: "5fee5517627f17b22a96caf97b77ec7f667cc47d13c34923be2441300066a6c150b24d66c947ca7b2e708eb62bb352",
|
||||||
|
result: "b56da5c99238b04a45e3d9d96f12f3dc052e428fa5a5817292ee23dbdad9782cf66f55c846e55dc68f47eaf8378e7051c7aedd9e1c7d74c38059f5e7e3a742",
|
||||||
|
tagSize: 16,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
iv: "3615df810cc677f15080faa1dd44aad3",
|
||||||
|
h: "fdfaddc476785c25906fe42ba63a93b7",
|
||||||
|
k: "f405d652b6362e70f8362bd383b7298b",
|
||||||
|
a: "5fee5517627f17b22a96caf97b77ec7f667cc47d13c34923be2441300066a6c150b24d66c947ca7b2e708eb62bb352fc",
|
||||||
|
p: "dd4cb97995da30efd957194eac4d2a8610470f99c88657f462f68dff7561a5f3",
|
||||||
|
result: "1134ffc119ad163e914989474be6c072fd5867f3989d8b15899ebd10a4a248c98829aaa4f9891822",
|
||||||
|
tagSize: 8,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGXMSeal(t *testing.T) {
|
||||||
|
for i, tc := range gxmTestCases {
|
||||||
|
key, _ := hex.DecodeString(tc.k)
|
||||||
|
iv, _ := hex.DecodeString(tc.iv)
|
||||||
|
h, _ := hex.DecodeString(tc.h)
|
||||||
|
a, _ := hex.DecodeString(tc.a)
|
||||||
|
p, _ := hex.DecodeString(tc.p)
|
||||||
|
expected, _ := hex.DecodeString(tc.result)
|
||||||
|
|
||||||
|
eea, err := zuc.NewCipher(key, iv)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("case %d: NewCipher error: %s", i, err)
|
||||||
|
}
|
||||||
|
c, err := cipher.NewGXMWithTagSize(eea, h, tc.tagSize)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("case %d: NewGXM error: %s", i, err)
|
||||||
|
}
|
||||||
|
out := c.Seal(nil, p, a)
|
||||||
|
if !bytes.Equal(out, expected) {
|
||||||
|
t.Errorf("case %d: incorrect ciphertext\n got: %x\nwant: %x", i, out, expected)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGXMOpen(t *testing.T) {
|
||||||
|
for i, tc := range gxmTestCases {
|
||||||
|
key, _ := hex.DecodeString(tc.k)
|
||||||
|
iv, _ := hex.DecodeString(tc.iv)
|
||||||
|
h, _ := hex.DecodeString(tc.h)
|
||||||
|
a, _ := hex.DecodeString(tc.a)
|
||||||
|
p, _ := hex.DecodeString(tc.p)
|
||||||
|
expected, _ := hex.DecodeString(tc.result)
|
||||||
|
|
||||||
|
eea, err := zuc.NewCipher(key, iv)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("case %d: NewCipher error: %s", i, err)
|
||||||
|
}
|
||||||
|
c, err := cipher.NewGXMWithTagSize(eea, h, tc.tagSize)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("case %d: NewGXM error: %s", i, err)
|
||||||
|
}
|
||||||
|
out, err := c.Open(nil, expected, a)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("case %d: Open error: %s", i, err)
|
||||||
|
}
|
||||||
|
if !bytes.Equal(out, p) {
|
||||||
|
t.Errorf("case %d: incorrect plaintext\n got: %x\nwant: %x", i, out, p)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
138
cipher/zuc_mur_test.go
Normal file
138
cipher/zuc_mur_test.go
Normal file
@ -0,0 +1,138 @@
|
|||||||
|
// Copyright 2025 Sun Yimin. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package cipher_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
_cipher "crypto/cipher"
|
||||||
|
"encoding/hex"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/emmansun/gmsm/cipher"
|
||||||
|
"github.com/emmansun/gmsm/zuc"
|
||||||
|
)
|
||||||
|
|
||||||
|
var murTestCases = []struct {
|
||||||
|
iv string
|
||||||
|
h string
|
||||||
|
k1 string
|
||||||
|
k2 string
|
||||||
|
a string
|
||||||
|
p string
|
||||||
|
result string
|
||||||
|
tagSize int
|
||||||
|
}{
|
||||||
|
// GM/T 0001.4 - 2024 Appendix C.3
|
||||||
|
{
|
||||||
|
iv: "bb8b76cfe5f0d9335029008b2a3b2b21",
|
||||||
|
h: "ee767d503bb3d5d1b585f57a0418c673",
|
||||||
|
k1: "e4b5c1f8578034ce6424f58c675597ac",
|
||||||
|
k2: "608053f6af9efda562d95dc013bea6b5",
|
||||||
|
a: "fcdd4cb97995da30efd957194eac4d2a8610470f99c88657f462f68dff7561a5",
|
||||||
|
p: "5fee5517627f17b22a96caf97b77ec7f667cc47d13c34923be2441300066a6c150b24d66c947ca7b2e708eb62bb352",
|
||||||
|
result: "cf5594bd30c0da0fb41fa6054e534d0494c9d6c4f132fc85771a473458b09583b825c662bfd82278178a845e281e5415c5d1a78a42c4dcd67db05fa1a640a0",
|
||||||
|
tagSize: 16,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
iv: "2923be84e16cd6ae529049f1f1bbe9eb",
|
||||||
|
h: "27bede74018082da87d4e5b69f18bf66",
|
||||||
|
k1: "32070e0f39b7b692b4673edc3184a48e",
|
||||||
|
k2: "27636f4414510d62cc15cfe194ec4f6d",
|
||||||
|
a: "",
|
||||||
|
p: "",
|
||||||
|
result: "c0016e0772c9983d0fd9fd8c1b012845",
|
||||||
|
tagSize: 16,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
iv: "2d2086832cc2fe3fd18cb51d6c5e99a5",
|
||||||
|
h: "9d6cb51623fd847f2e45d7f52f900db8",
|
||||||
|
k1: "56131c03e457f6226b5477633b873984",
|
||||||
|
k2: "a88981534db331a386de3e52fb46029b",
|
||||||
|
a: "",
|
||||||
|
p: "ffffffffffffffffffffffffffffff",
|
||||||
|
result: "234c2d51eaa582da9be3cc3828aa670a7afb7d817efa0777826f1e33a53cf3",
|
||||||
|
tagSize: 16,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
iv: "b3a6db3c870c3e99245e0d1c06b747de",
|
||||||
|
h: "6db45e4f9572f4e6fe0d91acda6801d5",
|
||||||
|
k1: "edbe06afed8075576aad04afdec91d32",
|
||||||
|
k2: "61d4fca6b2c2bb48b4b1172531333620",
|
||||||
|
a: "9de18b1fdab0ca9902b9729d492c807ec599d5",
|
||||||
|
p: "",
|
||||||
|
result: "8213c29606d02bba10f13ffad1d26a42",
|
||||||
|
tagSize: 16,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
iv: "b3a6db3c870c3e99245e0d1c06b747de",
|
||||||
|
h: "6db45e4f9572f4e6fe0d91acda6801d5",
|
||||||
|
k1: "edbe06afed8075576aad04afdec91d32",
|
||||||
|
k2: "61d4fca6b2c2bb48b4b1172531333620",
|
||||||
|
a: "9de18b1fdab0ca9902b9729d492c807ec599d5e980b2eac9cc53bf67d6bf14d67e2ddc8e6683ef574961ff698f61cdd1",
|
||||||
|
p: "b3124dc843bb8ba61f035a7d0938251f5dd4cbfc96f5453b130d890a1cdbae32",
|
||||||
|
result: "dabbbe23d8f0ea42e31a9bdd9706a4275d8aacd2cf27c4a4c0d0ba6fb8f31da7a276827b74509357",
|
||||||
|
tagSize: 8,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMurSeal(t *testing.T) {
|
||||||
|
zucCipherCreator := func(key, iv []byte) (_cipher.Stream, error) {
|
||||||
|
return zuc.NewCipher(key, iv)
|
||||||
|
}
|
||||||
|
for i, tc := range murTestCases {
|
||||||
|
iv, _ := hex.DecodeString(tc.iv)
|
||||||
|
h, _ := hex.DecodeString(tc.h)
|
||||||
|
k1, _ := hex.DecodeString(tc.k1)
|
||||||
|
k2, _ := hex.DecodeString(tc.k2)
|
||||||
|
a, _ := hex.DecodeString(tc.a)
|
||||||
|
p, _ := hex.DecodeString(tc.p)
|
||||||
|
result, _ := hex.DecodeString(tc.result)
|
||||||
|
|
||||||
|
g, err := cipher.NewMURWithTagSize(zucCipherCreator, h, tc.tagSize)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("case %d: NewMURWithTagSize error: %s", i, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
c, err := g.Seal(iv, k1, k2, nil, p, a)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("case %d: Seal error: %s", i, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if !bytes.Equal(c, result) {
|
||||||
|
t.Errorf("case %d: Seal mismatch\ngot: %x\nwant: %x", i, c, result)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMurOpen(t *testing.T) {
|
||||||
|
zucCipherCreator := func(key, iv []byte) (_cipher.Stream, error) {
|
||||||
|
return zuc.NewCipher(key, iv)
|
||||||
|
}
|
||||||
|
for i, tc := range murTestCases {
|
||||||
|
iv, _ := hex.DecodeString(tc.iv)
|
||||||
|
h, _ := hex.DecodeString(tc.h)
|
||||||
|
k1, _ := hex.DecodeString(tc.k1)
|
||||||
|
k2, _ := hex.DecodeString(tc.k2)
|
||||||
|
a, _ := hex.DecodeString(tc.a)
|
||||||
|
p, _ := hex.DecodeString(tc.p)
|
||||||
|
result, _ := hex.DecodeString(tc.result)
|
||||||
|
|
||||||
|
g, err := cipher.NewMURWithTagSize(zucCipherCreator, h, tc.tagSize)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("case %d: NewMURWithTagSize error: %s", i, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
out, err := g.Open(iv, k1, k2, nil, result, a)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("case %d: Open error: %s", i, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if !bytes.Equal(out, p) {
|
||||||
|
t.Errorf("case %d: Open mismatch\ngot: %x\nwant: %x", i, out, p)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -7,6 +7,8 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"hash"
|
"hash"
|
||||||
"io"
|
"io"
|
||||||
|
"runtime"
|
||||||
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/emmansun/gmsm/sm3"
|
"github.com/emmansun/gmsm/sm3"
|
||||||
@ -226,6 +228,8 @@ type DRBG interface {
|
|||||||
Generate(b, additional []byte) error
|
Generate(b, additional []byte) error
|
||||||
// MaxBytesPerRequest return max bytes per request
|
// MaxBytesPerRequest return max bytes per request
|
||||||
MaxBytesPerRequest() int
|
MaxBytesPerRequest() int
|
||||||
|
// Destroy internal state
|
||||||
|
Destroy()
|
||||||
}
|
}
|
||||||
|
|
||||||
type BaseDrbg struct {
|
type BaseDrbg struct {
|
||||||
@ -258,6 +262,26 @@ func (hd *BaseDrbg) setSecurityLevel(securityLevel SecurityLevel) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Destroy securely clears all internal state data of the DRBG instance.
|
||||||
|
//
|
||||||
|
// This method should be called when the DRBG instance is no longer needed to
|
||||||
|
// ensure sensitive data is removed from memory.
|
||||||
|
//
|
||||||
|
// References:
|
||||||
|
// - GM/T 0105-2021 B.2, E.2: Specifies that internal states must be cleared when no longer needed.
|
||||||
|
// - NIST SP 800-90A Rev.1: Recommends securely erasing sensitive data to prevent leakage.
|
||||||
|
func (hd *BaseDrbg) Destroy() {
|
||||||
|
setZero(hd.v)
|
||||||
|
hd.seedLength = 0
|
||||||
|
atomic.StoreUint64(&hd.reseedCounter, 0xFFFFFFFFFFFFFFFF)
|
||||||
|
atomic.StoreUint64(&hd.reseedCounter, 0x00)
|
||||||
|
atomic.StoreUint64(&hd.reseedIntervalInCounter, 0xFFFFFFFFFFFFFFFF)
|
||||||
|
atomic.StoreUint64(&hd.reseedIntervalInCounter, 0x00)
|
||||||
|
hd.reseedTime = time.Time{}
|
||||||
|
atomic.StoreInt64((*int64)(&hd.reseedIntervalInTime), int64(1<<63-1))
|
||||||
|
atomic.StoreInt64((*int64)(&hd.reseedIntervalInTime), int64(0))
|
||||||
|
}
|
||||||
|
|
||||||
// Set security_strength to the lowest security strength greater than or equal to
|
// Set security_strength to the lowest security strength greater than or equal to
|
||||||
// requested_instantiation_security_strength from the set {112, 128, 192, 256}.
|
// requested_instantiation_security_strength from the set {112, 128, 192, 256}.
|
||||||
func selectSecurityStrength(requested int) int {
|
func selectSecurityStrength(requested int) int {
|
||||||
@ -292,3 +316,32 @@ func addOne(data []byte, len int) {
|
|||||||
temp >>= 8
|
temp >>= 8
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// setZero securely erases the content of a byte slice by overwriting it multiple times.
|
||||||
|
// It follows a secure erasure pattern by first writing 0xFF and then 0x00 to each byte
|
||||||
|
// three times in succession. Memory barriers (via runtime.KeepAlive) are used between
|
||||||
|
// operations to ensure write completion and prevent compiler optimizations from eliminating
|
||||||
|
// the seemingly redundant writes.
|
||||||
|
//
|
||||||
|
// This function is used to clear sensitive data (like cryptographic keys or passwords)
|
||||||
|
// from memory to minimize the risk of data exposure in memory dumps or through
|
||||||
|
// side-channel attacks.
|
||||||
|
//
|
||||||
|
// If the provided slice is nil, the function returns immediately without action.
|
||||||
|
func setZero(data []byte) {
|
||||||
|
if data == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
for range 3 {
|
||||||
|
for i := range data {
|
||||||
|
data[i] = 0xFF
|
||||||
|
}
|
||||||
|
runtime.KeepAlive(data)
|
||||||
|
|
||||||
|
clear(data)
|
||||||
|
// This should keep buf's backing array live and thus prevent dead store
|
||||||
|
// elimination, according to discussion at
|
||||||
|
// https://github.com/golang/go/issues/33325 .
|
||||||
|
runtime.KeepAlive(data)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -95,7 +95,6 @@ func TestNistHashDrbgPrng(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
func TestNistHmacDrbgPrng(t *testing.T) {
|
func TestNistHmacDrbgPrng(t *testing.T) {
|
||||||
prng, err := NewNistHmacDrbgPrng(sha256.New, nil, 32, SECURITY_LEVEL_TEST, nil)
|
prng, err := NewNistHmacDrbgPrng(sha256.New, nil, 32, SECURITY_LEVEL_TEST, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -121,3 +120,24 @@ func TestGMSecurityStrengthValidation(t *testing.T) {
|
|||||||
t.Fatalf("expected error here")
|
t.Fatalf("expected error here")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func Test_setZero(t *testing.T) {
|
||||||
|
|
||||||
|
cases := []struct {
|
||||||
|
name string
|
||||||
|
args []byte
|
||||||
|
}{
|
||||||
|
{"nil", nil},
|
||||||
|
{"empty", []byte{}},
|
||||||
|
{"normal", []byte{1, 2, 3, 4, 5}},
|
||||||
|
{"large", bytes.Repeat([]byte{1, 2, 3, 4, 5}, 100)},
|
||||||
|
}
|
||||||
|
for _, tt := range cases {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
setZero(tt.args)
|
||||||
|
if !bytes.Equal(tt.args, make([]byte, len(tt.args))) {
|
||||||
|
t.Errorf("setZero() = %v, want %v", tt.args, make([]byte, len(tt.args)))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -162,7 +162,7 @@ func (cd *CtrDrbg) update(seedMaterial []byte) {
|
|||||||
v := make([]byte, outlen)
|
v := make([]byte, outlen)
|
||||||
output := make([]byte, outlen)
|
output := make([]byte, outlen)
|
||||||
copy(v, cd.v)
|
copy(v, cd.v)
|
||||||
for i := range (cd.seedLength+outlen-1)/outlen {
|
for i := range (cd.seedLength + outlen - 1) / outlen {
|
||||||
// V = (V + 1) mod 2^outlen
|
// V = (V + 1) mod 2^outlen
|
||||||
addOne(v, outlen)
|
addOne(v, outlen)
|
||||||
// output_block = Encrypt(Key, V)
|
// output_block = Encrypt(Key, V)
|
||||||
@ -222,3 +222,10 @@ func (cd *CtrDrbg) bcc(block cipher.Block, data []byte) []byte {
|
|||||||
}
|
}
|
||||||
return chainingValue
|
return chainingValue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Destroy destroys the internal state of DRBG instance
|
||||||
|
// working_state = {V, Key, reseed_counter, last_reseed_time,reseed_interval_in_counter, reseed_interval_in_time}
|
||||||
|
func (cd *CtrDrbg) Destroy() {
|
||||||
|
cd.BaseDrbg.Destroy()
|
||||||
|
setZero(cd.key)
|
||||||
|
}
|
||||||
|
@ -4,6 +4,7 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"crypto/aes"
|
"crypto/aes"
|
||||||
"crypto/cipher"
|
"crypto/cipher"
|
||||||
|
"crypto/rand"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
@ -303,3 +304,19 @@ func TestGmCtrDRBG_Validation(t *testing.T) {
|
|||||||
t.Fatalf("expected error here")
|
t.Fatalf("expected error here")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestCtrDrbg_Destroy(t *testing.T) {
|
||||||
|
entropyInput := make([]byte, 64)
|
||||||
|
_, _ = rand.Reader.Read(entropyInput)
|
||||||
|
cd, err := NewCtrDrbg(sm4.NewCipher, 16, SECURITY_LEVEL_ONE, true, entropyInput[:32], entropyInput[32:64], nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("NewCtrDrbg failed: %v", err)
|
||||||
|
}
|
||||||
|
cd.Destroy()
|
||||||
|
if !bytes.Equal(cd.key, make([]byte, len(cd.key))) {
|
||||||
|
t.Errorf("Destroy failed: v not zeroed")
|
||||||
|
}
|
||||||
|
if !bytes.Equal(cd.v, make([]byte, len(cd.v))) {
|
||||||
|
t.Errorf("Destroy failed: key not zeroed")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -222,3 +222,10 @@ func (hd *HashDrbg) derive(seedMaterial []byte, len int) []byte {
|
|||||||
}
|
}
|
||||||
return k
|
return k
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Destroy destroys the internal state of DRBG instance
|
||||||
|
// working_state = {V, C, reseed_counter, last_reseed_time,reseed_interval_in_counter, reseed_interval_in_time}
|
||||||
|
func (hd *HashDrbg) Destroy() {
|
||||||
|
hd.BaseDrbg.Destroy()
|
||||||
|
setZero(hd.c)
|
||||||
|
}
|
||||||
|
@ -2,6 +2,7 @@ package drbg
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"crypto/rand"
|
||||||
"crypto/sha1"
|
"crypto/sha1"
|
||||||
"crypto/sha256"
|
"crypto/sha256"
|
||||||
"crypto/sha512"
|
"crypto/sha512"
|
||||||
@ -249,3 +250,19 @@ func TestGmHashDRBG_Validation(t *testing.T) {
|
|||||||
t.Fatalf("expected error here")
|
t.Fatalf("expected error here")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestHashDrbg_Destroy(t *testing.T) {
|
||||||
|
entropyInput := make([]byte, 64)
|
||||||
|
_, _ = rand.Reader.Read(entropyInput)
|
||||||
|
hd, err := NewHashDrbg(sm3.New, SECURITY_LEVEL_ONE, true, entropyInput[:32], entropyInput[32:48], nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("NewHashDrbg failed: %v", err)
|
||||||
|
}
|
||||||
|
hd.Destroy()
|
||||||
|
if !bytes.Equal(hd.c, make([]byte, len(hd.c))) {
|
||||||
|
t.Errorf("Destroy failed: v not zeroed")
|
||||||
|
}
|
||||||
|
if !bytes.Equal(hd.v, make([]byte, len(hd.v))) {
|
||||||
|
t.Errorf("Destroy failed: key not zeroed")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -153,3 +153,10 @@ func (hd *HmacDrbg) update(byteSlices ...[]byte) error {
|
|||||||
hd.v = md.Sum(hd.v[:0])
|
hd.v = md.Sum(hd.v[:0])
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Destroy destroys the internal state of DRBG instance
|
||||||
|
// working_state = {V, Key, reseed_counter, last_reseed_time,reseed_interval_in_counter, reseed_interval_in_time}
|
||||||
|
func (hd *HmacDrbg) Destroy() {
|
||||||
|
hd.BaseDrbg.Destroy()
|
||||||
|
setZero(hd.key)
|
||||||
|
}
|
||||||
|
@ -2,12 +2,15 @@ package drbg
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"crypto/rand"
|
||||||
"crypto/sha1"
|
"crypto/sha1"
|
||||||
"crypto/sha256"
|
"crypto/sha256"
|
||||||
"crypto/sha512"
|
"crypto/sha512"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"hash"
|
"hash"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/emmansun/gmsm/sm3"
|
||||||
)
|
)
|
||||||
|
|
||||||
var hmactests = []struct {
|
var hmactests = []struct {
|
||||||
@ -802,3 +805,19 @@ func TestHmacDRBG(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestHmacDrbg_Destroy(t *testing.T) {
|
||||||
|
entropyInput := make([]byte, 64)
|
||||||
|
_, _ = rand.Reader.Read(entropyInput)
|
||||||
|
hd, err := NewHmacDrbg(sm3.New, SECURITY_LEVEL_ONE, true, entropyInput[:32], entropyInput[32:48], nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("NewHmacDrbg failed: %v", err)
|
||||||
|
}
|
||||||
|
hd.Destroy()
|
||||||
|
if !bytes.Equal(hd.key, make([]byte, len(hd.key))) {
|
||||||
|
t.Errorf("Destroy failed: v not zeroed")
|
||||||
|
}
|
||||||
|
if !bytes.Equal(hd.v, make([]byte, len(hd.v))) {
|
||||||
|
t.Errorf("Destroy failed: key not zeroed")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -2,6 +2,7 @@ package zuc
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"crypto/subtle"
|
"crypto/subtle"
|
||||||
|
"errors"
|
||||||
|
|
||||||
"github.com/emmansun/gmsm/internal/alias"
|
"github.com/emmansun/gmsm/internal/alias"
|
||||||
"github.com/emmansun/gmsm/internal/byteorder"
|
"github.com/emmansun/gmsm/internal/byteorder"
|
||||||
@ -26,6 +27,20 @@ type eea struct {
|
|||||||
bucketSize int // size of the state bucket, 0 means no bucket
|
bucketSize int // size of the state bucket, 0 means no bucket
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
magic = "zuceea"
|
||||||
|
stateSize = (16 + 6) * 4 // zucState32 size in bytes
|
||||||
|
minMarshaledSize = len(magic) + stateSize + 8 + 4*3
|
||||||
|
)
|
||||||
|
|
||||||
|
// NewEmptyCipher creates and returns a new empty ZUC-EEA cipher instance.
|
||||||
|
// This function initializes an empty eea struct that can be used for
|
||||||
|
// unmarshaling a previously saved state using the UnmarshalBinary method.
|
||||||
|
// The returned cipher instance is not ready for encryption or decryption.
|
||||||
|
func NewEmptyCipher() *eea {
|
||||||
|
return new(eea)
|
||||||
|
}
|
||||||
|
|
||||||
// NewCipher creates a stream cipher based on key and iv aguments.
|
// NewCipher creates a stream cipher based on key and iv aguments.
|
||||||
// The key must be 16 bytes long and iv must be 16 bytes long for zuc 128;
|
// The key must be 16 bytes long and iv must be 16 bytes long for zuc 128;
|
||||||
// or the key must be 32 bytes long and iv must be 23 bytes long for zuc 256;
|
// or the key must be 32 bytes long and iv must be 23 bytes long for zuc 256;
|
||||||
@ -57,6 +72,114 @@ func NewCipherWithBucketSize(key, iv []byte, bucketSize int) (*eea, error) {
|
|||||||
return c, nil
|
return c, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func appendState(b []byte, e *zucState32) []byte {
|
||||||
|
for i := range 16 {
|
||||||
|
b = byteorder.BEAppendUint32(b, e.lfsr[i])
|
||||||
|
}
|
||||||
|
b = byteorder.BEAppendUint32(b, e.r1)
|
||||||
|
b = byteorder.BEAppendUint32(b, e.r2)
|
||||||
|
b = byteorder.BEAppendUint32(b, e.x0)
|
||||||
|
b = byteorder.BEAppendUint32(b, e.x1)
|
||||||
|
b = byteorder.BEAppendUint32(b, e.x2)
|
||||||
|
b = byteorder.BEAppendUint32(b, e.x3)
|
||||||
|
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *eea) MarshalBinary() ([]byte, error) {
|
||||||
|
return e.AppendBinary(make([]byte, 0, minMarshaledSize))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *eea) AppendBinary(b []byte) ([]byte, error) {
|
||||||
|
b = append(b, magic...)
|
||||||
|
b = appendState(b, &e.zucState32)
|
||||||
|
b = byteorder.BEAppendUint32(b, uint32(e.xLen))
|
||||||
|
b = byteorder.BEAppendUint64(b, e.used)
|
||||||
|
b = byteorder.BEAppendUint32(b, uint32(e.stateIndex))
|
||||||
|
b = byteorder.BEAppendUint32(b, uint32(e.bucketSize))
|
||||||
|
if e.xLen > 0 {
|
||||||
|
b = append(b, e.x[:e.xLen]...)
|
||||||
|
}
|
||||||
|
for _, state := range e.states {
|
||||||
|
b = appendState(b, state)
|
||||||
|
}
|
||||||
|
return b, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func unmarshalState(b []byte, e *zucState32) []byte {
|
||||||
|
for i := range 16 {
|
||||||
|
b, e.lfsr[i] = consumeUint32(b)
|
||||||
|
}
|
||||||
|
b, e.r1 = consumeUint32(b)
|
||||||
|
b, e.r2 = consumeUint32(b)
|
||||||
|
b, e.x0 = consumeUint32(b)
|
||||||
|
b, e.x1 = consumeUint32(b)
|
||||||
|
b, e.x2 = consumeUint32(b)
|
||||||
|
b, e.x3 = consumeUint32(b)
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
func UnmarshalCipher(b []byte) (*eea, error) {
|
||||||
|
var e eea
|
||||||
|
if err := e.UnmarshalBinary(b); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &e, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *eea) UnmarshalBinary(b []byte) error {
|
||||||
|
if len(b) < len(magic) || (string(b[:len(magic)]) != magic) {
|
||||||
|
return errors.New("zuc: invalid eea state identifier")
|
||||||
|
}
|
||||||
|
if len(b) < minMarshaledSize {
|
||||||
|
return errors.New("zuc: invalid eea state size")
|
||||||
|
}
|
||||||
|
b = b[len(magic):]
|
||||||
|
b = unmarshalState(b, &e.zucState32)
|
||||||
|
var tmpUint32 uint32
|
||||||
|
b, tmpUint32 = consumeUint32(b)
|
||||||
|
e.xLen = int(tmpUint32)
|
||||||
|
b, e.used = consumeUint64(b)
|
||||||
|
b, tmpUint32 = consumeUint32(b)
|
||||||
|
e.stateIndex = int(tmpUint32)
|
||||||
|
b, tmpUint32 = consumeUint32(b)
|
||||||
|
e.bucketSize = int(tmpUint32)
|
||||||
|
if e.xLen < 0 || e.xLen > RoundBytes {
|
||||||
|
return errors.New("zuc: invalid eea remaining bytes length")
|
||||||
|
}
|
||||||
|
if e.xLen > 0 {
|
||||||
|
if len(b) < e.xLen {
|
||||||
|
return errors.New("zuc: invalid eea remaining bytes")
|
||||||
|
}
|
||||||
|
copy(e.x[:e.xLen], b[:e.xLen])
|
||||||
|
b = b[e.xLen:]
|
||||||
|
}
|
||||||
|
statesCount := len(b) / stateSize
|
||||||
|
if len(b)%stateSize != 0 {
|
||||||
|
return errors.New("zuc: invalid eea states size")
|
||||||
|
}
|
||||||
|
|
||||||
|
for range statesCount {
|
||||||
|
var state zucState32
|
||||||
|
b = unmarshalState(b, &state)
|
||||||
|
e.states = append(e.states, &state)
|
||||||
|
}
|
||||||
|
|
||||||
|
if e.stateIndex >= len(e.states) {
|
||||||
|
return errors.New("zuc: invalid eea state index")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func consumeUint64(b []byte) ([]byte, uint64) {
|
||||||
|
return b[8:], byteorder.BEUint64(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
func consumeUint32(b []byte) ([]byte, uint32) {
|
||||||
|
return b[4:], byteorder.BEUint32(b)
|
||||||
|
}
|
||||||
|
|
||||||
// reference GB/T 33133.2-2021 A.2
|
// reference GB/T 33133.2-2021 A.2
|
||||||
func construcIV4EEA(count, bearer, direction uint32) []byte {
|
func construcIV4EEA(count, bearer, direction uint32) []byte {
|
||||||
iv := make([]byte, 16)
|
iv := make([]byte, 16)
|
||||||
@ -153,20 +276,46 @@ func (c *eea) reset(offset uint64) {
|
|||||||
c.used = n * uint64(c.bucketSize)
|
c.used = n * uint64(c.bucketSize)
|
||||||
}
|
}
|
||||||
|
|
||||||
// seek sets the offset for the next XORKeyStream operation.
|
// fastForward advances the ZUC cipher state to handle a given offset
|
||||||
//
|
// without having to process each intermediate byte. This optimization
|
||||||
// If the offset is less than the current offset, the state will be reset to the initial state.
|
// leverages precomputed states stored in buckets to move the cipher
|
||||||
// If the offset is equal to the current offset, the function behaves the same as XORKeyStream.
|
// state forward efficiently.
|
||||||
// If the offset is greater than the current offset, the function will forward the state to the offset.
|
func (c *eea) fastForward(offset uint64) {
|
||||||
// Note: This method is not thread-safe.
|
// fast forward, check and adjust state if needed
|
||||||
|
var n uint64
|
||||||
|
if c.bucketSize > 0 {
|
||||||
|
n = offset / uint64(c.bucketSize)
|
||||||
|
expectedStateIndex := int(n)
|
||||||
|
if expectedStateIndex > c.stateIndex && expectedStateIndex < len(c.states) {
|
||||||
|
c.stateIndex = int(n)
|
||||||
|
c.zucState32 = *c.states[n]
|
||||||
|
c.xLen = 0
|
||||||
|
c.used = n * uint64(c.bucketSize)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// seek advances the internal state of the ZUC stream cipher to a given offset in the
|
||||||
|
// key stream. It efficiently positions the cipher state to allow encryption or decryption
|
||||||
|
// starting from the specified byte offset.
|
||||||
func (c *eea) seek(offset uint64) {
|
func (c *eea) seek(offset uint64) {
|
||||||
|
// 1. fast forward to the nearest precomputed state
|
||||||
|
c.fastForward(offset)
|
||||||
|
|
||||||
|
// 2. check if need to reset and backward, regardless of bucketSize
|
||||||
if offset < c.used {
|
if offset < c.used {
|
||||||
c.reset(offset)
|
c.reset(offset)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// 3. if offset equals to c.used, nothing to do
|
||||||
if offset == c.used {
|
if offset == c.used {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// 4. offset > used, need to forward
|
||||||
gap := offset - c.used
|
gap := offset - c.used
|
||||||
|
|
||||||
|
// 5. gap <= c.xLen, consume remaining key bytes, adjust buffer and return
|
||||||
if gap <= uint64(c.xLen) {
|
if gap <= uint64(c.xLen) {
|
||||||
// offset is within the remaining key bytes
|
// offset is within the remaining key bytes
|
||||||
c.xLen -= int(gap)
|
c.xLen -= int(gap)
|
||||||
@ -177,14 +326,15 @@ func (c *eea) seek(offset uint64) {
|
|||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// consumed all remaining key bytes first
|
|
||||||
|
// 6. gap > c.xLen, consume remaining key bytes first
|
||||||
if c.xLen > 0 {
|
if c.xLen > 0 {
|
||||||
c.used += uint64(c.xLen)
|
c.used += uint64(c.xLen)
|
||||||
gap -= uint64(c.xLen)
|
gap -= uint64(c.xLen)
|
||||||
c.xLen = 0
|
c.xLen = 0
|
||||||
}
|
}
|
||||||
|
|
||||||
// forward the state to the offset
|
// 7. for the remaining gap, generate and discard key bytes in chunks
|
||||||
nextBucketOffset := c.bucketSize * len(c.states)
|
nextBucketOffset := c.bucketSize * len(c.states)
|
||||||
stepLen := uint64(RoundBytes)
|
stepLen := uint64(RoundBytes)
|
||||||
var keyStream [RoundWords]uint32
|
var keyStream [RoundWords]uint32
|
||||||
@ -198,6 +348,8 @@ func (c *eea) seek(offset uint64) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// 8. finally consume remaining gap < RoundBytes
|
||||||
|
// and save remaining key bytes if any
|
||||||
if gap > 0 {
|
if gap > 0 {
|
||||||
var keyBytes [RoundBytes]byte
|
var keyBytes [RoundBytes]byte
|
||||||
genKeyStreamRev32(keyBytes[:], &c.zucState32)
|
genKeyStreamRev32(keyBytes[:], &c.zucState32)
|
||||||
|
@ -3,6 +3,7 @@ package zuc
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"crypto/cipher"
|
"crypto/cipher"
|
||||||
|
"encoding"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
@ -113,9 +114,11 @@ func TestXORStreamAt(t *testing.T) {
|
|||||||
t.Errorf("expected=%x, result=%x\n", expected[32:64], dst[32:64])
|
t.Errorf("expected=%x, result=%x\n", expected[32:64], dst[32:64])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
data, _ := c.MarshalBinary()
|
||||||
|
c2, _ := UnmarshalCipher(data)
|
||||||
for i := 1; i < 4; i++ {
|
for i := 1; i < 4; i++ {
|
||||||
c.XORKeyStreamAt(dst[:i], src[:i], 0)
|
c.XORKeyStreamAt(dst[:i], src[:i], 0)
|
||||||
c.XORKeyStreamAt(dst[32:64], src[32:64], 32)
|
c2.XORKeyStreamAt(dst[32:64], src[32:64], 32)
|
||||||
if !bytes.Equal(dst[32:64], expected[32:64]) {
|
if !bytes.Equal(dst[32:64], expected[32:64]) {
|
||||||
t.Errorf("expected=%x, result=%x\n", expected[32:64], dst[32:64])
|
t.Errorf("expected=%x, result=%x\n", expected[32:64], dst[32:64])
|
||||||
}
|
}
|
||||||
@ -128,8 +131,10 @@ func TestXORStreamAt(t *testing.T) {
|
|||||||
if !bytes.Equal(dst[3:16], expected[3:16]) {
|
if !bytes.Equal(dst[3:16], expected[3:16]) {
|
||||||
t.Errorf("expected=%x, result=%x\n", expected[3:16], dst[3:16])
|
t.Errorf("expected=%x, result=%x\n", expected[3:16], dst[3:16])
|
||||||
}
|
}
|
||||||
|
data, _ := c.MarshalBinary()
|
||||||
|
c2, _ := UnmarshalCipher(data)
|
||||||
c.XORKeyStreamAt(dst[:1], src[:1], 0)
|
c.XORKeyStreamAt(dst[:1], src[:1], 0)
|
||||||
c.XORKeyStreamAt(dst[4:16], src[4:16], 4)
|
c2.XORKeyStreamAt(dst[4:16], src[4:16], 4)
|
||||||
if !bytes.Equal(dst[4:16], expected[4:16]) {
|
if !bytes.Equal(dst[4:16], expected[4:16]) {
|
||||||
t.Errorf("expected=%x, result=%x\n", expected[3:16], dst[3:16])
|
t.Errorf("expected=%x, result=%x\n", expected[3:16], dst[3:16])
|
||||||
}
|
}
|
||||||
@ -215,7 +220,7 @@ func TestEEAXORKeyStreamAtWithBucketSize(t *testing.T) {
|
|||||||
src := make([]byte, 10000)
|
src := make([]byte, 10000)
|
||||||
expected := make([]byte, 10000)
|
expected := make([]byte, 10000)
|
||||||
dst := make([]byte, 10000)
|
dst := make([]byte, 10000)
|
||||||
stateCount := 1 + (10000 + RoundBytes -1) / RoundBytes
|
stateCount := 1 + (10000+RoundBytes-1)/RoundBytes
|
||||||
noBucketCipher.XORKeyStream(expected, src)
|
noBucketCipher.XORKeyStream(expected, src)
|
||||||
|
|
||||||
t.Run("Make sure the cached states are used once backward", func(t *testing.T) {
|
t.Run("Make sure the cached states are used once backward", func(t *testing.T) {
|
||||||
@ -270,7 +275,7 @@ func TestEEAXORKeyStreamAtWithBucketSize(t *testing.T) {
|
|||||||
}
|
}
|
||||||
clear(dst)
|
clear(dst)
|
||||||
bucketCipher.XORKeyStreamAt(dst[513:768], src[513:768], 513)
|
bucketCipher.XORKeyStreamAt(dst[513:768], src[513:768], 513)
|
||||||
if bucketCipher.stateIndex != 0 {
|
if bucketCipher.stateIndex != 4 {
|
||||||
t.Fatalf("expected=%d, result=%d\n", 0, bucketCipher.stateIndex)
|
t.Fatalf("expected=%d, result=%d\n", 0, bucketCipher.stateIndex)
|
||||||
}
|
}
|
||||||
if len(bucketCipher.states) != 7 {
|
if len(bucketCipher.states) != 7 {
|
||||||
@ -291,6 +296,134 @@ func TestEEAXORKeyStreamAtWithBucketSize(t *testing.T) {
|
|||||||
t.Fatalf("expected=%x, result=%x\n", expected[512:768], dst[512:768])
|
t.Fatalf("expected=%x, result=%x\n", expected[512:768], dst[512:768])
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
|
t.Run("Rotate end to start, end to start", func(t *testing.T) {
|
||||||
|
bucketCipher, err := NewEEACipherWithBucketSize(key, zucEEATests[0].count, zucEEATests[0].bearer, zucEEATests[0].direction, 128)
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
clear(dst)
|
||||||
|
for i := len(src) - RoundBytes; i >= 0; i -= RoundBytes {
|
||||||
|
offset := i
|
||||||
|
bucketCipher.XORKeyStreamAt(dst[offset:offset+RoundBytes], src[offset:offset+RoundBytes], uint64(offset))
|
||||||
|
if !bytes.Equal(expected[offset:offset+RoundBytes], dst[offset:offset+RoundBytes]) {
|
||||||
|
t.Fatalf("at %d, expected=%x, result=%x\n", offset, expected[offset:offset+RoundBytes], dst[offset:offset+RoundBytes])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
clear(dst)
|
||||||
|
for i := len(src) - RoundBytes; i >= 0; i -= RoundBytes {
|
||||||
|
offset := i
|
||||||
|
bucketCipher.XORKeyStreamAt(dst[offset:offset+RoundBytes], src[offset:offset+RoundBytes], uint64(offset))
|
||||||
|
if !bytes.Equal(expected[offset:offset+RoundBytes], dst[offset:offset+RoundBytes]) {
|
||||||
|
t.Fatalf("at %d, expected=%x, result=%x\n", offset, expected[offset:offset+RoundBytes], dst[offset:offset+RoundBytes])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMarshalUnmarshalBinary(t *testing.T) {
|
||||||
|
key := bytes.Repeat([]byte{0x11}, 16)
|
||||||
|
iv := bytes.Repeat([]byte{0x22}, 16)
|
||||||
|
c, err := NewCipher(key, iv)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("NewCipher failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Marshal and Unmarshal should round-trip
|
||||||
|
data, err := c.MarshalBinary()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("MarshalBinary failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var c2 encoding.BinaryMarshaler
|
||||||
|
if c2, err = UnmarshalCipher(data); err != nil {
|
||||||
|
t.Fatalf("UnmarshalBinary failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Marshal again and compare
|
||||||
|
data2, err := c2.MarshalBinary()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("MarshalBinary (after unmarshal) failed: %v", err)
|
||||||
|
}
|
||||||
|
if !bytes.Equal(data, data2) {
|
||||||
|
t.Errorf("MarshalBinary output mismatch after round-trip")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUnmarshalBinary_InvalidMagic(t *testing.T) {
|
||||||
|
key := bytes.Repeat([]byte{0x11}, 16)
|
||||||
|
iv := bytes.Repeat([]byte{0x22}, 16)
|
||||||
|
c, _ := NewCipher(key, iv)
|
||||||
|
data, _ := c.MarshalBinary()
|
||||||
|
data[0] ^= 0xFF // corrupt magic
|
||||||
|
|
||||||
|
_, err := UnmarshalCipher(data)
|
||||||
|
if err == nil || err.Error() != "zuc: invalid eea state identifier" {
|
||||||
|
t.Errorf("expected invalid eea state identifier error, got %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUnmarshalBinary_ShortData(t *testing.T) {
|
||||||
|
_, err := UnmarshalCipher([]byte("zuceea"))
|
||||||
|
if err == nil || err.Error() != "zuc: invalid eea state size" {
|
||||||
|
t.Errorf("expected invalid eea state size error, got %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUnmarshalBinary_InvalidXLen(t *testing.T) {
|
||||||
|
key := bytes.Repeat([]byte{0x11}, 16)
|
||||||
|
iv := bytes.Repeat([]byte{0x22}, 16)
|
||||||
|
c, _ := NewCipher(key, iv)
|
||||||
|
data, _ := c.MarshalBinary()
|
||||||
|
// corrupt xLen to an invalid value (e.g. 9999)
|
||||||
|
xLenOffset := len(magic) + stateSize
|
||||||
|
copy(data[xLenOffset:], bytes.Repeat([]byte{0xFF}, 4))
|
||||||
|
_, err := UnmarshalCipher(data)
|
||||||
|
if err == nil || err.Error() != "zuc: invalid eea remaining bytes length" {
|
||||||
|
t.Errorf("expected invalid eea remaining bytes length error, got %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUnmarshalBinary_InvalidStatesSize(t *testing.T) {
|
||||||
|
key := bytes.Repeat([]byte{0x11}, 16)
|
||||||
|
iv := bytes.Repeat([]byte{0x22}, 16)
|
||||||
|
c, _ := NewCipher(key, iv)
|
||||||
|
data, _ := c.MarshalBinary()
|
||||||
|
// Truncate data to make states size not a multiple of stateSize
|
||||||
|
data = append(data, 0x00)
|
||||||
|
_, err := UnmarshalCipher(data)
|
||||||
|
if err == nil || err.Error() != "zuc: invalid eea states size" {
|
||||||
|
t.Errorf("expected invalid eea states size error, got %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUnmarshalBinary_InvalidRemainingBytes(t *testing.T) {
|
||||||
|
key := bytes.Repeat([]byte{0x11}, 16)
|
||||||
|
iv := bytes.Repeat([]byte{0x22}, 16)
|
||||||
|
c, err := NewCipher(key, iv)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("NewCipher failed: %v", err)
|
||||||
|
}
|
||||||
|
data, err := c.MarshalBinary()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("MarshalBinary failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Modify xLen to a valid value > 0
|
||||||
|
xLenOffset := len(magic) + stateSize
|
||||||
|
data[xLenOffset+0] = 0
|
||||||
|
data[xLenOffset+1] = 0
|
||||||
|
data[xLenOffset+2] = 0
|
||||||
|
data[xLenOffset+3] = 8 // xLen = 8
|
||||||
|
|
||||||
|
// Truncate data so remaining bytes < xLen
|
||||||
|
truncated := data[:minMarshaledSize+4]
|
||||||
|
|
||||||
|
c2 := NewEmptyCipher()
|
||||||
|
err = c2.UnmarshalBinary(truncated)
|
||||||
|
if err == nil || err.Error() != "zuc: invalid eea remaining bytes" {
|
||||||
|
t.Errorf("expected error 'zuc: invalid eea remaining bytes', got %v", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func benchmarkStream(b *testing.B, buf []byte) {
|
func benchmarkStream(b *testing.B, buf []byte) {
|
||||||
|
@ -206,9 +206,7 @@ func sliceForAppend(in []byte, n int) (head, tail []byte) {
|
|||||||
// followed by ByteEncode₁, according to FIPS 203, Algorithm 5.
|
// followed by ByteEncode₁, according to FIPS 203, Algorithm 5.
|
||||||
func ringCompressAndEncode1(s []byte, f ringElement) []byte {
|
func ringCompressAndEncode1(s []byte, f ringElement) []byte {
|
||||||
s, b := sliceForAppend(s, encodingSize1)
|
s, b := sliceForAppend(s, encodingSize1)
|
||||||
for i := range b {
|
clear(b)
|
||||||
b[i] = 0
|
|
||||||
}
|
|
||||||
for i := range f {
|
for i := range f {
|
||||||
b[i/8] |= uint8(compress(f[i], 1) << (i % 8))
|
b[i/8] |= uint8(compress(f[i], 1) << (i % 8))
|
||||||
}
|
}
|
||||||
|
@ -2,6 +2,11 @@
|
|||||||
// Use of this source code is governed by a MIT-style
|
// Use of this source code is governed by a MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// Package slhdsa implements the quantum-resistant stateless hash-based digital signature standard
|
||||||
|
// SLH-DSA (based on SPHINCS+), as specified in [NIST FIPS 205].
|
||||||
|
//
|
||||||
|
// [NIST FIPS 205]: https://doi.org/10.6028/NIST.FIPS.205
|
||||||
|
//
|
||||||
//go:build go1.24
|
//go:build go1.24
|
||||||
|
|
||||||
package slhdsa
|
package slhdsa
|
||||||
|
15
zuc/eea.go
15
zuc/eea.go
@ -53,3 +53,18 @@ func NewCipherWithBucketSize(key, iv []byte, bucketSize int) (cipher.SeekableStr
|
|||||||
func NewEEACipherWithBucketSize(key []byte, count, bearer, direction uint32, bucketSize int) (cipher.SeekableStream, error) {
|
func NewEEACipherWithBucketSize(key []byte, count, bearer, direction uint32, bucketSize int) (cipher.SeekableStream, error) {
|
||||||
return zuc.NewEEACipherWithBucketSize(key, count, bearer, direction, bucketSize)
|
return zuc.NewEEACipherWithBucketSize(key, count, bearer, direction, bucketSize)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewEmptyEEACipher creates and returns a new empty ZUC-EEA cipher instance.
|
||||||
|
// This function initializes an empty eea struct that can be used for
|
||||||
|
// unmarshaling a previously saved state using the UnmarshalBinary method.
|
||||||
|
// The returned cipher instance is not ready for encryption or decryption.
|
||||||
|
func NewEmptyEEACipher() cipher.SeekableStream {
|
||||||
|
return zuc.NewEmptyCipher()
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalEEACipher reconstructs a ZUC cipher instance from a serialized byte slice.
|
||||||
|
// It attempts to deserialize the provided data into a seekable stream cipher
|
||||||
|
// that can be used for encryption/decryption operations.
|
||||||
|
func UnmarshalEEACipher(data []byte) (cipher.SeekableStream, error) {
|
||||||
|
return zuc.UnmarshalCipher(data)
|
||||||
|
}
|
||||||
|
@ -3,6 +3,7 @@ package zuc
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"crypto/cipher"
|
"crypto/cipher"
|
||||||
|
"encoding"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
@ -113,9 +114,12 @@ func TestXORStreamAt(t *testing.T) {
|
|||||||
t.Errorf("expected=%x, result=%x\n", expected[32:64], dst[32:64])
|
t.Errorf("expected=%x, result=%x\n", expected[32:64], dst[32:64])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
data, _ := c.(encoding.BinaryMarshaler).MarshalBinary()
|
||||||
|
c2 := NewEmptyEEACipher()
|
||||||
|
c2.(encoding.BinaryUnmarshaler).UnmarshalBinary(data)
|
||||||
for i := 1; i < 4; i++ {
|
for i := 1; i < 4; i++ {
|
||||||
c.XORKeyStreamAt(dst[:i], src[:i], 0)
|
c.XORKeyStreamAt(dst[:i], src[:i], 0)
|
||||||
c.XORKeyStreamAt(dst[32:64], src[32:64], 32)
|
c2.XORKeyStreamAt(dst[32:64], src[32:64], 32)
|
||||||
if !bytes.Equal(dst[32:64], expected[32:64]) {
|
if !bytes.Equal(dst[32:64], expected[32:64]) {
|
||||||
t.Errorf("expected=%x, result=%x\n", expected[32:64], dst[32:64])
|
t.Errorf("expected=%x, result=%x\n", expected[32:64], dst[32:64])
|
||||||
}
|
}
|
||||||
@ -128,8 +132,10 @@ func TestXORStreamAt(t *testing.T) {
|
|||||||
if !bytes.Equal(dst[3:16], expected[3:16]) {
|
if !bytes.Equal(dst[3:16], expected[3:16]) {
|
||||||
t.Errorf("expected=%x, result=%x\n", expected[3:16], dst[3:16])
|
t.Errorf("expected=%x, result=%x\n", expected[3:16], dst[3:16])
|
||||||
}
|
}
|
||||||
|
data, _ := c.(encoding.BinaryMarshaler).MarshalBinary()
|
||||||
|
c2, _ := UnmarshalEEACipher(data)
|
||||||
c.XORKeyStreamAt(dst[:1], src[:1], 0)
|
c.XORKeyStreamAt(dst[:1], src[:1], 0)
|
||||||
c.XORKeyStreamAt(dst[4:16], src[4:16], 4)
|
c2.XORKeyStreamAt(dst[4:16], src[4:16], 4)
|
||||||
if !bytes.Equal(dst[4:16], expected[4:16]) {
|
if !bytes.Equal(dst[4:16], expected[4:16]) {
|
||||||
t.Errorf("expected=%x, result=%x\n", expected[3:16], dst[3:16])
|
t.Errorf("expected=%x, result=%x\n", expected[3:16], dst[3:16])
|
||||||
}
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user