all: golint

This commit is contained in:
Sun Yimin 2025-06-19 16:37:53 +08:00 committed by GitHub
parent 4593cdb30b
commit fca59d1a92
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
20 changed files with 106 additions and 104 deletions

View File

@ -75,7 +75,8 @@ func NewCCMWithTagSize(cipher cipher.Block, tagSize int) (cipher.AEAD, error) {
return NewCCMWithNonceAndTagSize(cipher, ccmStandardNonceSize, tagSize)
}
// https://tools.ietf.org/html/rfc3610
// NewCCMWithNonceAndTagSize creates a new Counter with CBC-MAC (CCM) mode AEAD
// with the given nonce size and tag size.
func NewCCMWithNonceAndTagSize(cipher cipher.Block, nonceSize, tagSize int) (cipher.AEAD, error) {
if tagSize < ccmMinimumTagSize || tagSize > ccmBlockSize || tagSize&1 != 0 {
return nil, errors.New("cipher: incorrect tag size given to CCM")

View File

@ -41,7 +41,6 @@ func TestCCM(t *testing.T) {
continue
}
//func (c *ccm) Open(dst, nonce, ciphertext, data []byte) ([]byte, error)
pt, err := sm4ccm.Open(nil, nonce, ct, ad)
if err != nil {
t.Fatal(err)

View File

@ -277,7 +277,7 @@ func (h *hctr) ctr(dst, src []byte, baseCtr *[blockSize]byte) {
if concCipher, ok := h.cipher.(concurrentBlocks); ok {
batchSize := concCipher.Concurrency() * blockSize
if len(src) >= batchSize {
var ctrs []byte = make([]byte, batchSize)
var ctrs = make([]byte, batchSize)
for len(src) >= batchSize {
for j := 0; j < concCipher.Concurrency(); j++ {
// (i)₂

View File

@ -13,16 +13,18 @@ import (
"github.com/emmansun/gmsm/sm4"
)
const DRBG_RESEED_COUNTER_INTERVAL_LEVEL_TEST uint64 = 8
const DRBG_RESEED_COUNTER_INTERVAL_LEVEL2 uint64 = 1 << 10
const DRBG_RESEED_COUNTER_INTERVAL_LEVEL1 uint64 = 1 << 20
const (
reseedCounterIntervalLevelTest = uint64(8)
reseedCounterIntervalLevel2 = 1 << 10
reseedCounterIntervalLevel1 = 1 << 20
const DRBG_RESEED_TIME_INTERVAL_LEVEL_TEST = time.Duration(6) * time.Second
const DRBG_RESEED_TIME_INTERVAL_LEVEL2 = time.Duration(60) * time.Second
const DRBG_RESEED_TIME_INTERVAL_LEVEL1 = time.Duration(600) * time.Second
reseedTimeIntervalLevelTest = time.Duration(6) * time.Second
reseedTimeIntervalLevel2 = time.Duration(60) * time.Second
reseedTimeIntervalLevel1 = time.Duration(600) * time.Second
const MAX_BYTES = 1 << 27
const MAX_BYTES_PER_GENERATE = 1 << 11
maxBytes = 1 << 27
maxBytesPerGenerate = 1 << 11
)
var ErrReseedRequired = errors.New("drbg: reseed reuqired")
@ -245,14 +247,14 @@ func (hd *BaseDrbg) setSecurityLevel(securityLevel SecurityLevel) {
hd.securityLevel = securityLevel
switch securityLevel {
case SECURITY_LEVEL_TWO:
hd.reseedIntervalInCounter = DRBG_RESEED_COUNTER_INTERVAL_LEVEL2
hd.reseedIntervalInTime = DRBG_RESEED_TIME_INTERVAL_LEVEL2
hd.reseedIntervalInCounter = reseedCounterIntervalLevel2
hd.reseedIntervalInTime = reseedTimeIntervalLevel2
case SECURITY_LEVEL_TEST:
hd.reseedIntervalInCounter = DRBG_RESEED_COUNTER_INTERVAL_LEVEL_TEST
hd.reseedIntervalInTime = DRBG_RESEED_TIME_INTERVAL_LEVEL_TEST
hd.reseedIntervalInCounter = reseedCounterIntervalLevelTest
hd.reseedIntervalInTime = reseedTimeIntervalLevelTest
default:
hd.reseedIntervalInCounter = DRBG_RESEED_COUNTER_INTERVAL_LEVEL1
hd.reseedIntervalInTime = DRBG_RESEED_TIME_INTERVAL_LEVEL1
hd.reseedIntervalInCounter = reseedCounterIntervalLevel1
hd.reseedIntervalInTime = reseedTimeIntervalLevel1
}
}

View File

@ -13,7 +13,7 @@ func TestGmCtrDrbgPrng(t *testing.T) {
t.Fatal(err)
}
data := make([]byte, 33)
for i := 0; i < int(DRBG_RESEED_COUNTER_INTERVAL_LEVEL_TEST+1); i++ {
for i := 0; i < int(reseedCounterIntervalLevelTest+1); i++ {
n, err := prng.Read(data)
if err != nil {
t.Fatal(err)
@ -31,7 +31,7 @@ func TestGmCtrDrbgPrngReseedCase(t *testing.T) {
}
data := make([]byte, 64)
for i := 0; i < int(DRBG_RESEED_COUNTER_INTERVAL_LEVEL_TEST+1); i++ {
for i := 0; i < int(reseedCounterIntervalLevelTest+1); i++ {
for j := 0; j < 64; j++ {
data[j] = 0
}
@ -53,12 +53,12 @@ func TestNistCtrDrbgPrng(t *testing.T) {
if err != nil {
t.Fatal(err)
}
data := make([]byte, MAX_BYTES_PER_GENERATE+1)
data := make([]byte, maxBytesPerGenerate+1)
n, err := prng.Read(data)
if err != nil {
t.Fatal(err)
}
if n != MAX_BYTES_PER_GENERATE+1 {
if n != maxBytesPerGenerate+1 {
t.Errorf("not got enough random bytes")
}
}
@ -69,7 +69,7 @@ func TestGmHashDrbgPrng(t *testing.T) {
t.Fatal(err)
}
data := make([]byte, 33)
for i := 0; i < int(DRBG_RESEED_COUNTER_INTERVAL_LEVEL_TEST+1); i++ {
for i := 0; i < int(reseedCounterIntervalLevelTest+1); i++ {
n, err := prng.Read(data)
if err != nil {
t.Fatal(err)
@ -85,12 +85,12 @@ func TestNistHashDrbgPrng(t *testing.T) {
if err != nil {
t.Fatal(err)
}
data := make([]byte, MAX_BYTES_PER_GENERATE+1)
data := make([]byte, maxBytesPerGenerate+1)
n, err := prng.Read(data)
if err != nil {
t.Fatal(err)
}
if n != MAX_BYTES_PER_GENERATE+1 {
if n != maxBytesPerGenerate+1 {
t.Errorf("not got enough random bytes")
}
}
@ -101,12 +101,12 @@ func TestNistHmacDrbgPrng(t *testing.T) {
if err != nil {
t.Fatal(err)
}
data := make([]byte, MAX_BYTES_PER_GENERATE+1)
data := make([]byte, maxBytesPerGenerate+1)
n, err := prng.Read(data)
if err != nil {
t.Fatal(err)
}
if n != MAX_BYTES_PER_GENERATE+1 {
if n != maxBytesPerGenerate+1 {
t.Errorf("not got enough random bytes")
}
}

View File

@ -26,16 +26,16 @@ func NewCtrDrbg(cipherProvider func(key []byte) (cipher.Block, error), keyLen in
hd.setSecurityLevel(securityLevel)
// here for the min length, we just check <=0 now
if len(entropy) == 0 || (hd.gm && len(entropy) < 32) || len(entropy) >= MAX_BYTES {
if len(entropy) == 0 || (hd.gm && len(entropy) < 32) || len(entropy) >= maxBytes {
return nil, errors.New("drbg: invalid entropy length")
}
// here for the min length, we just check <=0 now
if len(nonce) == 0 || (hd.gm && len(nonce) < 16) || len(nonce) >= MAX_BYTES>>1 {
if len(nonce) == 0 || (hd.gm && len(nonce) < 16) || len(nonce) >= maxBytes>>1 {
return nil, errors.New("drbg: invalid nonce length")
}
if len(personalization) >= MAX_BYTES {
if len(personalization) >= maxBytes {
return nil, errors.New("drbg: personalization is too long")
}
@ -75,13 +75,13 @@ func NewGMCtrDrbg(securityLevel SecurityLevel, entropy, nonce, personalization [
return NewCtrDrbg(sm4.NewCipher, 16, securityLevel, true, entropy, nonce, personalization)
}
func (hd *CtrDrbg) Reseed(entropy, additional []byte) error {
func (cd *CtrDrbg) Reseed(entropy, additional []byte) error {
// here for the min length, we just check <=0 now
if len(entropy) == 0 || (hd.gm && len(entropy) < 32) || len(entropy) >= MAX_BYTES {
if len(entropy) == 0 || (cd.gm && len(entropy) < 32) || len(entropy) >= maxBytes {
return errors.New("drbg: invalid entropy length")
}
if len(additional) >= MAX_BYTES {
if len(additional) >= maxBytes {
return errors.New("drbg: additional input too long")
}
@ -95,37 +95,37 @@ func (hd *CtrDrbg) Reseed(entropy, additional []byte) error {
copy(seedMaterial[len(entropy):], additional)
}
// seed_material = Block_Cipher_df(seed_material, seed_length)
seedMaterial = hd.derive(seedMaterial, hd.seedLength)
seedMaterial = cd.derive(seedMaterial, cd.seedLength)
// CTR_DRBG_Updae(seed_material, Key, V)
hd.update(seedMaterial)
cd.update(seedMaterial)
hd.reseedCounter = 1
hd.reseedTime = time.Now()
cd.reseedCounter = 1
cd.reseedTime = time.Now()
return nil
}
func (hd *CtrDrbg) newBlockCipher(key []byte) cipher.Block {
block, err := hd.cipherProvider(key)
func (cd *CtrDrbg) newBlockCipher(key []byte) cipher.Block {
block, err := cd.cipherProvider(key)
if err != nil {
panic(err)
}
return block
}
func (hd *CtrDrbg) MaxBytesPerRequest() int {
if hd.gm {
return len(hd.v)
func (cd *CtrDrbg) MaxBytesPerRequest() int {
if cd.gm {
return len(cd.v)
}
return MAX_BYTES_PER_GENERATE
return maxBytesPerGenerate
}
// Generate CTR DRBG pseudorandom bits generate process.
func (hd *CtrDrbg) Generate(out, additional []byte) error {
if hd.NeedReseed() {
func (cd *CtrDrbg) Generate(out, additional []byte) error {
if cd.NeedReseed() {
return ErrReseedRequired
}
outlen := len(hd.v)
if (hd.gm && len(out) > outlen) || (!hd.gm && len(out) > MAX_BYTES_PER_GENERATE) {
outlen := len(cd.v)
if (cd.gm && len(out) > outlen) || (!cd.gm && len(out) > maxBytesPerGenerate) {
return errors.New("drbg: too many bytes requested")
}
@ -133,24 +133,24 @@ func (hd *CtrDrbg) Generate(out, additional []byte) error {
// additional_input = Block_Cipher_df(additional_input, seed_length)
// CTR_DRBG_Update(additional_input, Key, V)
if len(additional) > 0 {
additional = hd.derive(additional, hd.seedLength)
hd.update(additional)
additional = cd.derive(additional, cd.seedLength)
cd.update(additional)
}
block := hd.newBlockCipher(hd.key)
block := cd.newBlockCipher(cd.key)
temp := make([]byte, outlen)
m := len(out)
limit := uint64(m+outlen-1) / uint64(outlen)
for i := range int(limit) {
// V = (V + 1) mod 2^outlen)
addOne(hd.v, outlen)
addOne(cd.v, outlen)
// output_block = Encrypt(Key, V)
block.Encrypt(temp, hd.v)
block.Encrypt(temp, cd.v)
copy(out[i*outlen:], temp)
}
hd.update(additional)
hd.reseedCounter++
cd.update(additional)
cd.reseedCounter++
return nil
}

View File

@ -32,16 +32,16 @@ func NewHashDrbg(newHash func() hash.Hash, securityLevel SecurityLevel, gm bool,
hd.hashSize = md.Size()
// here for the min length, we just check <=0 now
if len(entropy) == 0 || (hd.gm && len(entropy) < hd.hashSize) || len(entropy) >= MAX_BYTES {
if len(entropy) == 0 || (hd.gm && len(entropy) < hd.hashSize) || len(entropy) >= maxBytes {
return nil, errors.New("drbg: invalid entropy length")
}
// here for the min length, we just check <=0 now
if len(nonce) == 0 || (hd.gm && len(nonce) < hd.hashSize/2) || len(nonce) >= MAX_BYTES>>1 {
if len(nonce) == 0 || (hd.gm && len(nonce) < hd.hashSize/2) || len(nonce) >= maxBytes>>1 {
return nil, errors.New("drbg: invalid nonce length")
}
if len(personalization) >= MAX_BYTES {
if len(personalization) >= maxBytes {
return nil, errors.New("drbg: personalization is too long")
}
@ -91,11 +91,11 @@ func NewGMHashDrbg(securityLevel SecurityLevel, entropy, nonce, personalization
// Reseed hash DRBG reseed process. GM/T 0105-2021 has a little different with NIST.
func (hd *HashDrbg) Reseed(entropy, additional []byte) error {
// here for the min length, we just check <=0 now
if len(entropy) == 0 || (hd.gm && len(entropy) < hd.hashSize) || len(entropy) >= MAX_BYTES {
if len(entropy) == 0 || (hd.gm && len(entropy) < hd.hashSize) || len(entropy) >= maxBytes {
return errors.New("drbg: invalid entropy length")
}
if len(additional) >= MAX_BYTES {
if len(additional) >= maxBytes {
return errors.New("drbg: additional input too long")
}
seedMaterial := make([]byte, len(entropy)+hd.seedLength+len(additional)+1)
@ -154,7 +154,7 @@ func (hd *HashDrbg) MaxBytesPerRequest() int {
if hd.gm {
return hd.hashSize
}
return MAX_BYTES_PER_GENERATE
return maxBytesPerGenerate
}
// Generate hash DRBG pseudorandom bits process. GM/T 0105-2021 has a little different with NIST.
@ -163,7 +163,7 @@ func (hd *HashDrbg) Generate(b, additional []byte) error {
if hd.NeedReseed() {
return ErrReseedRequired
}
if (hd.gm && len(b) > hd.hashSize) || (!hd.gm && len(b) > MAX_BYTES_PER_GENERATE) {
if (hd.gm && len(b) > hd.hashSize) || (!hd.gm && len(b) > maxBytesPerGenerate) {
return errors.New("drbg: too many bytes requested")
}
md := hd.newHash()

View File

@ -29,16 +29,16 @@ func NewHmacDrbg(newHash func() hash.Hash, securityLevel SecurityLevel, gm bool,
hd.hashSize = md.Size()
// here for the min length, we just check <=0 now
if len(entropy) == 0 || len(entropy) >= MAX_BYTES {
if len(entropy) == 0 || len(entropy) >= maxBytes {
return nil, errors.New("drbg: invalid entropy length")
}
// here for the min length, we just check <=0 now
if len(nonce) == 0 || len(nonce) >= MAX_BYTES>>1 {
if len(nonce) == 0 || len(nonce) >= maxBytes>>1 {
return nil, errors.New("drbg: invalid nonce length")
}
if len(personalization) >= MAX_BYTES {
if len(personalization) >= maxBytes {
return nil, errors.New("drbg: personalization is too long")
}
@ -95,11 +95,11 @@ func (hd *HmacDrbg) Generate(output, additional []byte) error {
// reference to NIST.SP.800-90Ar1.pdf section 10.1.2.4
func (hd *HmacDrbg) Reseed(entropy, additional []byte) error {
// here for the min length, we just check <=0 now
if len(entropy) == 0 || (hd.gm && len(entropy) < hd.hashSize) || len(entropy) >= MAX_BYTES {
if len(entropy) == 0 || (hd.gm && len(entropy) < hd.hashSize) || len(entropy) >= maxBytes {
return errors.New("drbg: invalid entropy length")
}
if len(additional) >= MAX_BYTES {
if len(additional) >= maxBytes {
return errors.New("drbg: additional input too long")
}
hd.update(entropy, additional)
@ -109,7 +109,7 @@ func (hd *HmacDrbg) Reseed(entropy, additional []byte) error {
}
func (hd *HmacDrbg) MaxBytesPerRequest() int {
return MAX_BYTES_PER_GENERATE
return maxBytesPerGenerate
}
// The HMAC_DRBG_Update function updates the internal state of

View File

@ -135,7 +135,7 @@ func (c *xtsEncrypter) CryptBlocks(ciphertext, plaintext []byte) {
if concCipher, ok := c.b.(concurrentBlocks); ok {
batchSize := concCipher.Concurrency() * blockSize
var tweaks []byte = make([]byte, batchSize)
var tweaks = make([]byte, batchSize)
for len(plaintext) >= batchSize {
doubleTweaks(&c.tweak, tweaks, c.isGB)
subtle.XORBytes(ciphertext, plaintext, tweaks)
@ -194,7 +194,7 @@ func (c *xtsDecrypter) CryptBlocks(plaintext, ciphertext []byte) {
if concCipher, ok := c.b.(concurrentBlocks); ok {
batchSize := concCipher.Concurrency() * blockSize
var tweaks []byte = make([]byte, batchSize)
var tweaks = make([]byte, batchSize)
for len(ciphertext) >= batchSize {
doubleTweaks(&c.tweak, tweaks, c.isGB)

View File

@ -21,7 +21,7 @@ func p256OrdSqr(res, in *p256OrdElement, n int)
// into the Montgomery domain.
var RR = &p256OrdElement{0x901192af7c114f20, 0x3464504ade6fa2fa, 0x620fc84c3affe0d4, 0x1eb5e412a22b3d3b}
// P256OrdInverse, sets out to in⁻¹ mod org(G). If in is zero, out will be zero.
// P256OrdInverse sets out to in⁻¹ mod org(G). If in is zero, out will be zero.
// n-2 =
// 1111111111111111111111111111111011111111111111111111111111111111
// 1111111111111111111111111111111111111111111111111111111111111111

View File

@ -35,7 +35,7 @@ var p256Zero = p256Element{}
var p256P = p256Element{0xffffffffffffffff, 0xffffffff00000000,
0xffffffffffffffff, 0xfffffffeffffffff}
// P256Point is a P-256 point. The zero value should not be assumed to be valid
// SM2P256Point is a SM2 P-256 point. The zero value should not be assumed to be valid
// (although it is in this implementation).
type SM2P256Point struct {
// (X:Y:Z) are Jacobian coordinates where x = X/Z² and y = Y/Z³. The point

View File

@ -19,7 +19,7 @@ import (
"github.com/emmansun/gmsm/internal/byteorder"
)
// The rc2 block size in bytes
// BlockSize is the rc2 block size in bytes
const BlockSize = 8
type rc2Cipher struct {

View File

@ -64,7 +64,7 @@ var (
ErrPBEDecryption = errors.New("pbes: decryption error, please verify the password and try again")
)
// PBKDF2Opts contains algorithm identifiers and related parameters for PBKDF2 key derivation function.
// PBES2Params contains algorithm identifiers and related parameters for PBKDF2 key derivation function.
//
// PBES2-params ::= SEQUENCE {
// keyDerivationFunc AlgorithmIdentifier {{PBES2-KDFs}},
@ -95,7 +95,7 @@ var DefaultOpts = &PBES2Opts{
pbesOID: oidPBES2,
}
// NewPBES2Encrypter returns a new PBES2Encrypter with the given cipher and KDF options.
// NewPBESEncrypter returns a new PBESEncrypter with the given cipher and KDF options.
func NewPBESEncrypter(cipher Cipher, kdfOpts KDFOpts) PBESEncrypter {
return &PBES2Opts{
Cipher: cipher,

View File

@ -23,11 +23,11 @@ func (sk *PrivateKey) Sign(message, context, addRand []byte) ([]byte, error) {
return nil, errors.New("slhdsa: addrnd should be nil (deterministic variant) or of length n")
}
ctxLen := len(context)
if ctxLen > MAX_CONTEXT_LEN {
if ctxLen > maxContextLen {
return nil, errors.New("slhdsa: context too long")
}
var mPrefix [MAX_CONTEXT_LEN + 2]byte
var mPrefix [maxContextLen + 2]byte
mPrefix[1] = byte(ctxLen)
if ctxLen > 0 {
@ -50,7 +50,7 @@ func (sk *PrivateKey) signInternal(msgPrefix, message, addRand []byte) ([]byte,
signature := signatureHead[sk.params.n:]
// compute message digest
var digest [MAX_M]byte
var digest [maxM]byte
sk.h.hMsg(&sk.PublicKey, R, msgPrefix, message, digest[:])
// Grab the first mdLen() bytes of digest to use in fors_sign()
mdLen := sk.params.mdLen()
@ -73,7 +73,7 @@ func (sk *PrivateKey) signInternal(msgPrefix, message, addRand []byte) ([]byte,
// generate the FORS signature and append it to the SLH-DSA signature
sk.forsSign(md, adrs, signature)
var pkFors [MAX_N]byte
var pkFors [maxN]byte
// calculate the FORS public key using the generated FORS signature
signature = sk.forsPkFromSig(md, signature, adrs, pkFors[:])
// generate ht signature and append to the SLH-DSA signature
@ -89,12 +89,12 @@ func (pk *PublicKey) Verify(signature, message, context []byte) bool {
if len(message) == 0 {
return false
}
if len(context) > MAX_CONTEXT_LEN {
if len(context) > maxContextLen {
return false
}
ctxLen := len(context)
var msgPrefix [MAX_CONTEXT_LEN + 2]byte
var msgPrefix [maxContextLen + 2]byte
msgPrefix[1] = byte(ctxLen)
if ctxLen > 0 {
copy(msgPrefix[2:], context)
@ -112,7 +112,7 @@ func (pk *PublicKey) verifyInternal(signature []byte, msgPrefix []byte, message
signature = signature[pk.params.n:]
// compute message digest
var digest [MAX_M]byte
var digest [maxM]byte
pk.h.hMsg(pk, R, msgPrefix, message, digest[:])
// Grab the first mdLen() bytes of digest to use in fors_sign()
mdLen := pk.params.mdLen()
@ -130,7 +130,7 @@ func (pk *PublicKey) verifyInternal(signature []byte, msgPrefix []byte, message
adrs.setTypeAndClear(AddressTypeFORSTree)
adrs.setKeyPairAddress(leafIdx)
var pkFors [MAX_N]byte
var pkFors [maxN]byte
// calculate the FORS public key using the given FORS signature
signature = pk.forsPkFromSig(md, signature, adrs, pkFors[:])

View File

@ -12,7 +12,7 @@ package slhdsa
//
// See FIPS 205 Algorithm 16 fors_sign
func (sk *PrivateKey) forsSign(md []byte, adrs adrsOperations, sigFors []byte) {
var indices [MAX_K]uint32
var indices [maxK]uint32
// split md into k a-bits values, eatch of which is interpreted as an integer between 0 and 2^a-1.
base2b(md, sk.params.a, indices[:sk.params.k])
@ -42,7 +42,7 @@ func (sk *PrivateKey) forsSign(md []byte, adrs adrsOperations, sigFors []byte) {
//
// See FIPS 205 Algorithm 17 fors_pkFromSig
func (pk *PublicKey) forsPkFromSig(md, signature []byte, adrs adrsOperations, out []byte) []byte {
var indices [MAX_K]uint32
var indices [maxK]uint32
base2b(md, pk.params.a, indices[:pk.params.k])
twoPowerA := uint32(1 << pk.params.a)
@ -101,7 +101,7 @@ func (sk *PrivateKey) forsNode(nodeID, layer uint32, adrs adrsOperations, out []
} else {
// otherwise, it computes the roots of the left subtree and right subtree
// and hashs them togeter.
var lnode, rnode [MAX_N]byte
var lnode, rnode [maxN]byte
sk.forsNode(nodeID*2, layer-1, adrs, lnode[:])
sk.forsNode(nodeID*2+1, layer-1, adrs, rnode[:])
adrs.setTreeHeight(layer)

View File

@ -17,7 +17,7 @@ func (sk *PrivateKey) htSign(pkFors []byte, treeIdx uint64, leafIdx uint32, sign
sigLenPerLayer := (sk.params.hm + sk.params.len) * sk.params.n
mask := sk.params.leafIdxMask()
var rootBuf [MAX_N]byte
var rootBuf [maxN]byte
root := rootBuf[:sk.params.n]
copy(root, pkFors)
tmpBuf := make([]byte, sk.params.n*sk.params.len)
@ -46,7 +46,7 @@ func (pk *PublicKey) htVerify(pkFors []byte, signature []byte, treeIdx uint64, l
sigLenPerLayer := (pk.params.hm + pk.params.len) * pk.params.n
mask := pk.params.leafIdxMask()
var rootBuf [MAX_N]byte
var rootBuf [maxN]byte
root := rootBuf[:pk.params.n]
copy(root, pkFors)
tmpBuf := make([]byte, pk.params.n*pk.params.len)

View File

@ -19,8 +19,8 @@ import (
)
type PublicKey struct {
seed [MAX_N]byte
root [MAX_N]byte
seed [maxN]byte
root [maxN]byte
params *params
md hash.Hash
mdBig hash.Hash
@ -32,14 +32,14 @@ type PublicKey struct {
type PrivateKey struct {
PublicKey
seed [MAX_N]byte
prf [MAX_N]byte
seed [maxN]byte
prf [maxN]byte
}
// Bytes returns the byte representation of the PublicKey.
// It combines the seed and root fields of the PublicKey.
func (pk *PublicKey) Bytes() []byte {
var key [2 * MAX_N]byte
var key [2 * maxN]byte
copy(key[:], pk.seed[:pk.params.n])
copy(key[pk.params.n:], pk.root[:pk.params.n])
return key[:2*pk.params.n]
@ -56,7 +56,7 @@ func (pk *PublicKey) Equal(x any) bool {
// Bytes serializes the PrivateKey into a byte slice.
func (sk *PrivateKey) Bytes() []byte {
var key [4 * MAX_N]byte
var key [4 * maxN]byte
keySlice := key[:]
copy(keySlice, sk.seed[:sk.params.n])
keySlice = keySlice[sk.params.n:]
@ -68,7 +68,7 @@ func (sk *PrivateKey) Bytes() []byte {
return key[:4*sk.params.n]
}
// PublicKey returns the public key of the private key.
// Public returns the public key of the private key.
func (sk *PrivateKey) Public() *PublicKey {
return &sk.PublicKey
}

View File

@ -9,14 +9,14 @@ package slhdsa
import "io"
const (
MAX_N = 32
MAX_M = 49
MAX_K = 35
MAX_A = 9
MAX_K_TIMES_A = MAX_K * MAX_A
MAX_WOTS_LEN = 2*MAX_N + 3
maxN = 32
maxM = 49
maxK = 35
maxA = 9
maxKTimesA = maxK * maxA
maxWotsLen = 2*maxN + 3
MAX_CONTEXT_LEN = 255
maxContextLen = 255
)
type params struct {

View File

@ -48,7 +48,7 @@ func (sk *PrivateKey) wotsPkGen(out, tmpBuf []byte, addr adrsOperations) {
//
// See FIPS 205 Algorithm 10 wots_sign
func (sk *PrivateKey) wotsSign(msg []byte, adrs adrsOperations, sigWots []byte) {
var msgAndCsum [MAX_WOTS_LEN]byte
var msgAndCsum [maxWotsLen]byte
// convert message to base w=16
bytes2nibbles(msg, msgAndCsum[:])
// compute checksum
@ -84,7 +84,7 @@ func (sk *PrivateKey) wotsSign(msg []byte, adrs adrsOperations, sigWots []byte)
//
// See FIPS 205 Algorithm 8 wots_pkFromSig
func (pk *PublicKey) wotsPkFromSig(signature, msg, tmpBuf []byte, adrs adrsOperations, out []byte) {
var msgAndCsum [MAX_WOTS_LEN]byte
var msgAndCsum [maxWotsLen]byte
// convert message to base w=16
bytes2nibbles(msg, msgAndCsum[:])
// compute checksum

View File

@ -17,7 +17,7 @@ func (sk *PrivateKey) xmssNode(out, tmpBuf []byte, i, z uint32, adrs adrsOperati
sk.wotsPkGen(out, tmpBuf, adrs)
} else {
// otherwise, it computes the root of the subtree by hashing the two child nodes
var lnode, rnode [MAX_N]byte
var lnode, rnode [maxN]byte
sk.xmssNode(lnode[:], tmpBuf, 2*i, z-1, adrs)
sk.xmssNode(rnode[:], tmpBuf, 2*i+1, z-1, adrs)
adrs.setTypeAndClear(AddressTypeTree)