mirror of
https://github.com/emmansun/gmsm.git
synced 2025-06-28 00:13:26 +08:00
all: golint
This commit is contained in:
parent
4593cdb30b
commit
fca59d1a92
@ -75,7 +75,8 @@ func NewCCMWithTagSize(cipher cipher.Block, tagSize int) (cipher.AEAD, error) {
|
|||||||
return NewCCMWithNonceAndTagSize(cipher, ccmStandardNonceSize, tagSize)
|
return NewCCMWithNonceAndTagSize(cipher, ccmStandardNonceSize, tagSize)
|
||||||
}
|
}
|
||||||
|
|
||||||
// https://tools.ietf.org/html/rfc3610
|
// NewCCMWithNonceAndTagSize creates a new Counter with CBC-MAC (CCM) mode AEAD
|
||||||
|
// with the given nonce size and tag size.
|
||||||
func NewCCMWithNonceAndTagSize(cipher cipher.Block, nonceSize, tagSize int) (cipher.AEAD, error) {
|
func NewCCMWithNonceAndTagSize(cipher cipher.Block, nonceSize, tagSize int) (cipher.AEAD, error) {
|
||||||
if tagSize < ccmMinimumTagSize || tagSize > ccmBlockSize || tagSize&1 != 0 {
|
if tagSize < ccmMinimumTagSize || tagSize > ccmBlockSize || tagSize&1 != 0 {
|
||||||
return nil, errors.New("cipher: incorrect tag size given to CCM")
|
return nil, errors.New("cipher: incorrect tag size given to CCM")
|
||||||
|
@ -41,7 +41,6 @@ func TestCCM(t *testing.T) {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
//func (c *ccm) Open(dst, nonce, ciphertext, data []byte) ([]byte, error)
|
|
||||||
pt, err := sm4ccm.Open(nil, nonce, ct, ad)
|
pt, err := sm4ccm.Open(nil, nonce, ct, ad)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
|
@ -277,7 +277,7 @@ func (h *hctr) ctr(dst, src []byte, baseCtr *[blockSize]byte) {
|
|||||||
if concCipher, ok := h.cipher.(concurrentBlocks); ok {
|
if concCipher, ok := h.cipher.(concurrentBlocks); ok {
|
||||||
batchSize := concCipher.Concurrency() * blockSize
|
batchSize := concCipher.Concurrency() * blockSize
|
||||||
if len(src) >= batchSize {
|
if len(src) >= batchSize {
|
||||||
var ctrs []byte = make([]byte, batchSize)
|
var ctrs = make([]byte, batchSize)
|
||||||
for len(src) >= batchSize {
|
for len(src) >= batchSize {
|
||||||
for j := 0; j < concCipher.Concurrency(); j++ {
|
for j := 0; j < concCipher.Concurrency(); j++ {
|
||||||
// (i)₂
|
// (i)₂
|
||||||
|
@ -13,16 +13,18 @@ import (
|
|||||||
"github.com/emmansun/gmsm/sm4"
|
"github.com/emmansun/gmsm/sm4"
|
||||||
)
|
)
|
||||||
|
|
||||||
const DRBG_RESEED_COUNTER_INTERVAL_LEVEL_TEST uint64 = 8
|
const (
|
||||||
const DRBG_RESEED_COUNTER_INTERVAL_LEVEL2 uint64 = 1 << 10
|
reseedCounterIntervalLevelTest = uint64(8)
|
||||||
const DRBG_RESEED_COUNTER_INTERVAL_LEVEL1 uint64 = 1 << 20
|
reseedCounterIntervalLevel2 = 1 << 10
|
||||||
|
reseedCounterIntervalLevel1 = 1 << 20
|
||||||
|
|
||||||
const DRBG_RESEED_TIME_INTERVAL_LEVEL_TEST = time.Duration(6) * time.Second
|
reseedTimeIntervalLevelTest = time.Duration(6) * time.Second
|
||||||
const DRBG_RESEED_TIME_INTERVAL_LEVEL2 = time.Duration(60) * time.Second
|
reseedTimeIntervalLevel2 = time.Duration(60) * time.Second
|
||||||
const DRBG_RESEED_TIME_INTERVAL_LEVEL1 = time.Duration(600) * time.Second
|
reseedTimeIntervalLevel1 = time.Duration(600) * time.Second
|
||||||
|
|
||||||
const MAX_BYTES = 1 << 27
|
maxBytes = 1 << 27
|
||||||
const MAX_BYTES_PER_GENERATE = 1 << 11
|
maxBytesPerGenerate = 1 << 11
|
||||||
|
)
|
||||||
|
|
||||||
var ErrReseedRequired = errors.New("drbg: reseed reuqired")
|
var ErrReseedRequired = errors.New("drbg: reseed reuqired")
|
||||||
|
|
||||||
@ -245,18 +247,18 @@ func (hd *BaseDrbg) setSecurityLevel(securityLevel SecurityLevel) {
|
|||||||
hd.securityLevel = securityLevel
|
hd.securityLevel = securityLevel
|
||||||
switch securityLevel {
|
switch securityLevel {
|
||||||
case SECURITY_LEVEL_TWO:
|
case SECURITY_LEVEL_TWO:
|
||||||
hd.reseedIntervalInCounter = DRBG_RESEED_COUNTER_INTERVAL_LEVEL2
|
hd.reseedIntervalInCounter = reseedCounterIntervalLevel2
|
||||||
hd.reseedIntervalInTime = DRBG_RESEED_TIME_INTERVAL_LEVEL2
|
hd.reseedIntervalInTime = reseedTimeIntervalLevel2
|
||||||
case SECURITY_LEVEL_TEST:
|
case SECURITY_LEVEL_TEST:
|
||||||
hd.reseedIntervalInCounter = DRBG_RESEED_COUNTER_INTERVAL_LEVEL_TEST
|
hd.reseedIntervalInCounter = reseedCounterIntervalLevelTest
|
||||||
hd.reseedIntervalInTime = DRBG_RESEED_TIME_INTERVAL_LEVEL_TEST
|
hd.reseedIntervalInTime = reseedTimeIntervalLevelTest
|
||||||
default:
|
default:
|
||||||
hd.reseedIntervalInCounter = DRBG_RESEED_COUNTER_INTERVAL_LEVEL1
|
hd.reseedIntervalInCounter = reseedCounterIntervalLevel1
|
||||||
hd.reseedIntervalInTime = DRBG_RESEED_TIME_INTERVAL_LEVEL1
|
hd.reseedIntervalInTime = reseedTimeIntervalLevel1
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set security_strength to the lowest security strength greater than or equal to
|
// Set security_strength to the lowest security strength greater than or equal to
|
||||||
// requested_instantiation_security_strength from the set {112, 128, 192, 256}.
|
// requested_instantiation_security_strength from the set {112, 128, 192, 256}.
|
||||||
func selectSecurityStrength(requested int) int {
|
func selectSecurityStrength(requested int) int {
|
||||||
switch {
|
switch {
|
||||||
|
@ -13,7 +13,7 @@ func TestGmCtrDrbgPrng(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
data := make([]byte, 33)
|
data := make([]byte, 33)
|
||||||
for i := 0; i < int(DRBG_RESEED_COUNTER_INTERVAL_LEVEL_TEST+1); i++ {
|
for i := 0; i < int(reseedCounterIntervalLevelTest+1); i++ {
|
||||||
n, err := prng.Read(data)
|
n, err := prng.Read(data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
@ -31,7 +31,7 @@ func TestGmCtrDrbgPrngReseedCase(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
data := make([]byte, 64)
|
data := make([]byte, 64)
|
||||||
for i := 0; i < int(DRBG_RESEED_COUNTER_INTERVAL_LEVEL_TEST+1); i++ {
|
for i := 0; i < int(reseedCounterIntervalLevelTest+1); i++ {
|
||||||
for j := 0; j < 64; j++ {
|
for j := 0; j < 64; j++ {
|
||||||
data[j] = 0
|
data[j] = 0
|
||||||
}
|
}
|
||||||
@ -53,12 +53,12 @@ func TestNistCtrDrbgPrng(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
data := make([]byte, MAX_BYTES_PER_GENERATE+1)
|
data := make([]byte, maxBytesPerGenerate+1)
|
||||||
n, err := prng.Read(data)
|
n, err := prng.Read(data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
if n != MAX_BYTES_PER_GENERATE+1 {
|
if n != maxBytesPerGenerate+1 {
|
||||||
t.Errorf("not got enough random bytes")
|
t.Errorf("not got enough random bytes")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -69,7 +69,7 @@ func TestGmHashDrbgPrng(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
data := make([]byte, 33)
|
data := make([]byte, 33)
|
||||||
for i := 0; i < int(DRBG_RESEED_COUNTER_INTERVAL_LEVEL_TEST+1); i++ {
|
for i := 0; i < int(reseedCounterIntervalLevelTest+1); i++ {
|
||||||
n, err := prng.Read(data)
|
n, err := prng.Read(data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
@ -85,12 +85,12 @@ func TestNistHashDrbgPrng(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
data := make([]byte, MAX_BYTES_PER_GENERATE+1)
|
data := make([]byte, maxBytesPerGenerate+1)
|
||||||
n, err := prng.Read(data)
|
n, err := prng.Read(data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
if n != MAX_BYTES_PER_GENERATE+1 {
|
if n != maxBytesPerGenerate+1 {
|
||||||
t.Errorf("not got enough random bytes")
|
t.Errorf("not got enough random bytes")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -101,12 +101,12 @@ func TestNistHmacDrbgPrng(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
data := make([]byte, MAX_BYTES_PER_GENERATE+1)
|
data := make([]byte, maxBytesPerGenerate+1)
|
||||||
n, err := prng.Read(data)
|
n, err := prng.Read(data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
if n != MAX_BYTES_PER_GENERATE+1 {
|
if n != maxBytesPerGenerate+1 {
|
||||||
t.Errorf("not got enough random bytes")
|
t.Errorf("not got enough random bytes")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -26,16 +26,16 @@ func NewCtrDrbg(cipherProvider func(key []byte) (cipher.Block, error), keyLen in
|
|||||||
hd.setSecurityLevel(securityLevel)
|
hd.setSecurityLevel(securityLevel)
|
||||||
|
|
||||||
// here for the min length, we just check <=0 now
|
// here for the min length, we just check <=0 now
|
||||||
if len(entropy) == 0 || (hd.gm && len(entropy) < 32) || len(entropy) >= MAX_BYTES {
|
if len(entropy) == 0 || (hd.gm && len(entropy) < 32) || len(entropy) >= maxBytes {
|
||||||
return nil, errors.New("drbg: invalid entropy length")
|
return nil, errors.New("drbg: invalid entropy length")
|
||||||
}
|
}
|
||||||
|
|
||||||
// here for the min length, we just check <=0 now
|
// here for the min length, we just check <=0 now
|
||||||
if len(nonce) == 0 || (hd.gm && len(nonce) < 16) || len(nonce) >= MAX_BYTES>>1 {
|
if len(nonce) == 0 || (hd.gm && len(nonce) < 16) || len(nonce) >= maxBytes>>1 {
|
||||||
return nil, errors.New("drbg: invalid nonce length")
|
return nil, errors.New("drbg: invalid nonce length")
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(personalization) >= MAX_BYTES {
|
if len(personalization) >= maxBytes {
|
||||||
return nil, errors.New("drbg: personalization is too long")
|
return nil, errors.New("drbg: personalization is too long")
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -75,13 +75,13 @@ func NewGMCtrDrbg(securityLevel SecurityLevel, entropy, nonce, personalization [
|
|||||||
return NewCtrDrbg(sm4.NewCipher, 16, securityLevel, true, entropy, nonce, personalization)
|
return NewCtrDrbg(sm4.NewCipher, 16, securityLevel, true, entropy, nonce, personalization)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (hd *CtrDrbg) Reseed(entropy, additional []byte) error {
|
func (cd *CtrDrbg) Reseed(entropy, additional []byte) error {
|
||||||
// here for the min length, we just check <=0 now
|
// here for the min length, we just check <=0 now
|
||||||
if len(entropy) == 0 || (hd.gm && len(entropy) < 32) || len(entropy) >= MAX_BYTES {
|
if len(entropy) == 0 || (cd.gm && len(entropy) < 32) || len(entropy) >= maxBytes {
|
||||||
return errors.New("drbg: invalid entropy length")
|
return errors.New("drbg: invalid entropy length")
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(additional) >= MAX_BYTES {
|
if len(additional) >= maxBytes {
|
||||||
return errors.New("drbg: additional input too long")
|
return errors.New("drbg: additional input too long")
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -95,37 +95,37 @@ func (hd *CtrDrbg) Reseed(entropy, additional []byte) error {
|
|||||||
copy(seedMaterial[len(entropy):], additional)
|
copy(seedMaterial[len(entropy):], additional)
|
||||||
}
|
}
|
||||||
// seed_material = Block_Cipher_df(seed_material, seed_length)
|
// seed_material = Block_Cipher_df(seed_material, seed_length)
|
||||||
seedMaterial = hd.derive(seedMaterial, hd.seedLength)
|
seedMaterial = cd.derive(seedMaterial, cd.seedLength)
|
||||||
// CTR_DRBG_Updae(seed_material, Key, V)
|
// CTR_DRBG_Updae(seed_material, Key, V)
|
||||||
hd.update(seedMaterial)
|
cd.update(seedMaterial)
|
||||||
|
|
||||||
hd.reseedCounter = 1
|
cd.reseedCounter = 1
|
||||||
hd.reseedTime = time.Now()
|
cd.reseedTime = time.Now()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (hd *CtrDrbg) newBlockCipher(key []byte) cipher.Block {
|
func (cd *CtrDrbg) newBlockCipher(key []byte) cipher.Block {
|
||||||
block, err := hd.cipherProvider(key)
|
block, err := cd.cipherProvider(key)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
return block
|
return block
|
||||||
}
|
}
|
||||||
|
|
||||||
func (hd *CtrDrbg) MaxBytesPerRequest() int {
|
func (cd *CtrDrbg) MaxBytesPerRequest() int {
|
||||||
if hd.gm {
|
if cd.gm {
|
||||||
return len(hd.v)
|
return len(cd.v)
|
||||||
}
|
}
|
||||||
return MAX_BYTES_PER_GENERATE
|
return maxBytesPerGenerate
|
||||||
}
|
}
|
||||||
|
|
||||||
// Generate CTR DRBG pseudorandom bits generate process.
|
// Generate CTR DRBG pseudorandom bits generate process.
|
||||||
func (hd *CtrDrbg) Generate(out, additional []byte) error {
|
func (cd *CtrDrbg) Generate(out, additional []byte) error {
|
||||||
if hd.NeedReseed() {
|
if cd.NeedReseed() {
|
||||||
return ErrReseedRequired
|
return ErrReseedRequired
|
||||||
}
|
}
|
||||||
outlen := len(hd.v)
|
outlen := len(cd.v)
|
||||||
if (hd.gm && len(out) > outlen) || (!hd.gm && len(out) > MAX_BYTES_PER_GENERATE) {
|
if (cd.gm && len(out) > outlen) || (!cd.gm && len(out) > maxBytesPerGenerate) {
|
||||||
return errors.New("drbg: too many bytes requested")
|
return errors.New("drbg: too many bytes requested")
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -133,24 +133,24 @@ func (hd *CtrDrbg) Generate(out, additional []byte) error {
|
|||||||
// additional_input = Block_Cipher_df(additional_input, seed_length)
|
// additional_input = Block_Cipher_df(additional_input, seed_length)
|
||||||
// CTR_DRBG_Update(additional_input, Key, V)
|
// CTR_DRBG_Update(additional_input, Key, V)
|
||||||
if len(additional) > 0 {
|
if len(additional) > 0 {
|
||||||
additional = hd.derive(additional, hd.seedLength)
|
additional = cd.derive(additional, cd.seedLength)
|
||||||
hd.update(additional)
|
cd.update(additional)
|
||||||
}
|
}
|
||||||
|
|
||||||
block := hd.newBlockCipher(hd.key)
|
block := cd.newBlockCipher(cd.key)
|
||||||
temp := make([]byte, outlen)
|
temp := make([]byte, outlen)
|
||||||
|
|
||||||
m := len(out)
|
m := len(out)
|
||||||
limit := uint64(m+outlen-1) / uint64(outlen)
|
limit := uint64(m+outlen-1) / uint64(outlen)
|
||||||
for i := range int(limit) {
|
for i := range int(limit) {
|
||||||
// V = (V + 1) mod 2^outlen)
|
// V = (V + 1) mod 2^outlen)
|
||||||
addOne(hd.v, outlen)
|
addOne(cd.v, outlen)
|
||||||
// output_block = Encrypt(Key, V)
|
// output_block = Encrypt(Key, V)
|
||||||
block.Encrypt(temp, hd.v)
|
block.Encrypt(temp, cd.v)
|
||||||
copy(out[i*outlen:], temp)
|
copy(out[i*outlen:], temp)
|
||||||
}
|
}
|
||||||
hd.update(additional)
|
cd.update(additional)
|
||||||
hd.reseedCounter++
|
cd.reseedCounter++
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -32,16 +32,16 @@ func NewHashDrbg(newHash func() hash.Hash, securityLevel SecurityLevel, gm bool,
|
|||||||
hd.hashSize = md.Size()
|
hd.hashSize = md.Size()
|
||||||
|
|
||||||
// here for the min length, we just check <=0 now
|
// here for the min length, we just check <=0 now
|
||||||
if len(entropy) == 0 || (hd.gm && len(entropy) < hd.hashSize) || len(entropy) >= MAX_BYTES {
|
if len(entropy) == 0 || (hd.gm && len(entropy) < hd.hashSize) || len(entropy) >= maxBytes {
|
||||||
return nil, errors.New("drbg: invalid entropy length")
|
return nil, errors.New("drbg: invalid entropy length")
|
||||||
}
|
}
|
||||||
|
|
||||||
// here for the min length, we just check <=0 now
|
// here for the min length, we just check <=0 now
|
||||||
if len(nonce) == 0 || (hd.gm && len(nonce) < hd.hashSize/2) || len(nonce) >= MAX_BYTES>>1 {
|
if len(nonce) == 0 || (hd.gm && len(nonce) < hd.hashSize/2) || len(nonce) >= maxBytes>>1 {
|
||||||
return nil, errors.New("drbg: invalid nonce length")
|
return nil, errors.New("drbg: invalid nonce length")
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(personalization) >= MAX_BYTES {
|
if len(personalization) >= maxBytes {
|
||||||
return nil, errors.New("drbg: personalization is too long")
|
return nil, errors.New("drbg: personalization is too long")
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -91,11 +91,11 @@ func NewGMHashDrbg(securityLevel SecurityLevel, entropy, nonce, personalization
|
|||||||
// Reseed hash DRBG reseed process. GM/T 0105-2021 has a little different with NIST.
|
// Reseed hash DRBG reseed process. GM/T 0105-2021 has a little different with NIST.
|
||||||
func (hd *HashDrbg) Reseed(entropy, additional []byte) error {
|
func (hd *HashDrbg) Reseed(entropy, additional []byte) error {
|
||||||
// here for the min length, we just check <=0 now
|
// here for the min length, we just check <=0 now
|
||||||
if len(entropy) == 0 || (hd.gm && len(entropy) < hd.hashSize) || len(entropy) >= MAX_BYTES {
|
if len(entropy) == 0 || (hd.gm && len(entropy) < hd.hashSize) || len(entropy) >= maxBytes {
|
||||||
return errors.New("drbg: invalid entropy length")
|
return errors.New("drbg: invalid entropy length")
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(additional) >= MAX_BYTES {
|
if len(additional) >= maxBytes {
|
||||||
return errors.New("drbg: additional input too long")
|
return errors.New("drbg: additional input too long")
|
||||||
}
|
}
|
||||||
seedMaterial := make([]byte, len(entropy)+hd.seedLength+len(additional)+1)
|
seedMaterial := make([]byte, len(entropy)+hd.seedLength+len(additional)+1)
|
||||||
@ -154,7 +154,7 @@ func (hd *HashDrbg) MaxBytesPerRequest() int {
|
|||||||
if hd.gm {
|
if hd.gm {
|
||||||
return hd.hashSize
|
return hd.hashSize
|
||||||
}
|
}
|
||||||
return MAX_BYTES_PER_GENERATE
|
return maxBytesPerGenerate
|
||||||
}
|
}
|
||||||
|
|
||||||
// Generate hash DRBG pseudorandom bits process. GM/T 0105-2021 has a little different with NIST.
|
// Generate hash DRBG pseudorandom bits process. GM/T 0105-2021 has a little different with NIST.
|
||||||
@ -163,7 +163,7 @@ func (hd *HashDrbg) Generate(b, additional []byte) error {
|
|||||||
if hd.NeedReseed() {
|
if hd.NeedReseed() {
|
||||||
return ErrReseedRequired
|
return ErrReseedRequired
|
||||||
}
|
}
|
||||||
if (hd.gm && len(b) > hd.hashSize) || (!hd.gm && len(b) > MAX_BYTES_PER_GENERATE) {
|
if (hd.gm && len(b) > hd.hashSize) || (!hd.gm && len(b) > maxBytesPerGenerate) {
|
||||||
return errors.New("drbg: too many bytes requested")
|
return errors.New("drbg: too many bytes requested")
|
||||||
}
|
}
|
||||||
md := hd.newHash()
|
md := hd.newHash()
|
||||||
|
@ -29,16 +29,16 @@ func NewHmacDrbg(newHash func() hash.Hash, securityLevel SecurityLevel, gm bool,
|
|||||||
hd.hashSize = md.Size()
|
hd.hashSize = md.Size()
|
||||||
|
|
||||||
// here for the min length, we just check <=0 now
|
// here for the min length, we just check <=0 now
|
||||||
if len(entropy) == 0 || len(entropy) >= MAX_BYTES {
|
if len(entropy) == 0 || len(entropy) >= maxBytes {
|
||||||
return nil, errors.New("drbg: invalid entropy length")
|
return nil, errors.New("drbg: invalid entropy length")
|
||||||
}
|
}
|
||||||
|
|
||||||
// here for the min length, we just check <=0 now
|
// here for the min length, we just check <=0 now
|
||||||
if len(nonce) == 0 || len(nonce) >= MAX_BYTES>>1 {
|
if len(nonce) == 0 || len(nonce) >= maxBytes>>1 {
|
||||||
return nil, errors.New("drbg: invalid nonce length")
|
return nil, errors.New("drbg: invalid nonce length")
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(personalization) >= MAX_BYTES {
|
if len(personalization) >= maxBytes {
|
||||||
return nil, errors.New("drbg: personalization is too long")
|
return nil, errors.New("drbg: personalization is too long")
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -95,11 +95,11 @@ func (hd *HmacDrbg) Generate(output, additional []byte) error {
|
|||||||
// reference to NIST.SP.800-90Ar1.pdf section 10.1.2.4
|
// reference to NIST.SP.800-90Ar1.pdf section 10.1.2.4
|
||||||
func (hd *HmacDrbg) Reseed(entropy, additional []byte) error {
|
func (hd *HmacDrbg) Reseed(entropy, additional []byte) error {
|
||||||
// here for the min length, we just check <=0 now
|
// here for the min length, we just check <=0 now
|
||||||
if len(entropy) == 0 || (hd.gm && len(entropy) < hd.hashSize) || len(entropy) >= MAX_BYTES {
|
if len(entropy) == 0 || (hd.gm && len(entropy) < hd.hashSize) || len(entropy) >= maxBytes {
|
||||||
return errors.New("drbg: invalid entropy length")
|
return errors.New("drbg: invalid entropy length")
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(additional) >= MAX_BYTES {
|
if len(additional) >= maxBytes {
|
||||||
return errors.New("drbg: additional input too long")
|
return errors.New("drbg: additional input too long")
|
||||||
}
|
}
|
||||||
hd.update(entropy, additional)
|
hd.update(entropy, additional)
|
||||||
@ -109,7 +109,7 @@ func (hd *HmacDrbg) Reseed(entropy, additional []byte) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (hd *HmacDrbg) MaxBytesPerRequest() int {
|
func (hd *HmacDrbg) MaxBytesPerRequest() int {
|
||||||
return MAX_BYTES_PER_GENERATE
|
return maxBytesPerGenerate
|
||||||
}
|
}
|
||||||
|
|
||||||
// The HMAC_DRBG_Update function updates the internal state of
|
// The HMAC_DRBG_Update function updates the internal state of
|
||||||
|
@ -135,7 +135,7 @@ func (c *xtsEncrypter) CryptBlocks(ciphertext, plaintext []byte) {
|
|||||||
|
|
||||||
if concCipher, ok := c.b.(concurrentBlocks); ok {
|
if concCipher, ok := c.b.(concurrentBlocks); ok {
|
||||||
batchSize := concCipher.Concurrency() * blockSize
|
batchSize := concCipher.Concurrency() * blockSize
|
||||||
var tweaks []byte = make([]byte, batchSize)
|
var tweaks = make([]byte, batchSize)
|
||||||
for len(plaintext) >= batchSize {
|
for len(plaintext) >= batchSize {
|
||||||
doubleTweaks(&c.tweak, tweaks, c.isGB)
|
doubleTweaks(&c.tweak, tweaks, c.isGB)
|
||||||
subtle.XORBytes(ciphertext, plaintext, tweaks)
|
subtle.XORBytes(ciphertext, plaintext, tweaks)
|
||||||
@ -194,7 +194,7 @@ func (c *xtsDecrypter) CryptBlocks(plaintext, ciphertext []byte) {
|
|||||||
|
|
||||||
if concCipher, ok := c.b.(concurrentBlocks); ok {
|
if concCipher, ok := c.b.(concurrentBlocks); ok {
|
||||||
batchSize := concCipher.Concurrency() * blockSize
|
batchSize := concCipher.Concurrency() * blockSize
|
||||||
var tweaks []byte = make([]byte, batchSize)
|
var tweaks = make([]byte, batchSize)
|
||||||
|
|
||||||
for len(ciphertext) >= batchSize {
|
for len(ciphertext) >= batchSize {
|
||||||
doubleTweaks(&c.tweak, tweaks, c.isGB)
|
doubleTweaks(&c.tweak, tweaks, c.isGB)
|
||||||
|
@ -21,7 +21,7 @@ func p256OrdSqr(res, in *p256OrdElement, n int)
|
|||||||
// into the Montgomery domain.
|
// into the Montgomery domain.
|
||||||
var RR = &p256OrdElement{0x901192af7c114f20, 0x3464504ade6fa2fa, 0x620fc84c3affe0d4, 0x1eb5e412a22b3d3b}
|
var RR = &p256OrdElement{0x901192af7c114f20, 0x3464504ade6fa2fa, 0x620fc84c3affe0d4, 0x1eb5e412a22b3d3b}
|
||||||
|
|
||||||
// P256OrdInverse, sets out to in⁻¹ mod org(G). If in is zero, out will be zero.
|
// P256OrdInverse sets out to in⁻¹ mod org(G). If in is zero, out will be zero.
|
||||||
// n-2 =
|
// n-2 =
|
||||||
// 1111111111111111111111111111111011111111111111111111111111111111
|
// 1111111111111111111111111111111011111111111111111111111111111111
|
||||||
// 1111111111111111111111111111111111111111111111111111111111111111
|
// 1111111111111111111111111111111111111111111111111111111111111111
|
||||||
|
@ -35,7 +35,7 @@ var p256Zero = p256Element{}
|
|||||||
var p256P = p256Element{0xffffffffffffffff, 0xffffffff00000000,
|
var p256P = p256Element{0xffffffffffffffff, 0xffffffff00000000,
|
||||||
0xffffffffffffffff, 0xfffffffeffffffff}
|
0xffffffffffffffff, 0xfffffffeffffffff}
|
||||||
|
|
||||||
// P256Point is a P-256 point. The zero value should not be assumed to be valid
|
// SM2P256Point is a SM2 P-256 point. The zero value should not be assumed to be valid
|
||||||
// (although it is in this implementation).
|
// (although it is in this implementation).
|
||||||
type SM2P256Point struct {
|
type SM2P256Point struct {
|
||||||
// (X:Y:Z) are Jacobian coordinates where x = X/Z² and y = Y/Z³. The point
|
// (X:Y:Z) are Jacobian coordinates where x = X/Z² and y = Y/Z³. The point
|
||||||
|
@ -19,7 +19,7 @@ import (
|
|||||||
"github.com/emmansun/gmsm/internal/byteorder"
|
"github.com/emmansun/gmsm/internal/byteorder"
|
||||||
)
|
)
|
||||||
|
|
||||||
// The rc2 block size in bytes
|
// BlockSize is the rc2 block size in bytes
|
||||||
const BlockSize = 8
|
const BlockSize = 8
|
||||||
|
|
||||||
type rc2Cipher struct {
|
type rc2Cipher struct {
|
||||||
|
@ -64,7 +64,7 @@ var (
|
|||||||
ErrPBEDecryption = errors.New("pbes: decryption error, please verify the password and try again")
|
ErrPBEDecryption = errors.New("pbes: decryption error, please verify the password and try again")
|
||||||
)
|
)
|
||||||
|
|
||||||
// PBKDF2Opts contains algorithm identifiers and related parameters for PBKDF2 key derivation function.
|
// PBES2Params contains algorithm identifiers and related parameters for PBKDF2 key derivation function.
|
||||||
//
|
//
|
||||||
// PBES2-params ::= SEQUENCE {
|
// PBES2-params ::= SEQUENCE {
|
||||||
// keyDerivationFunc AlgorithmIdentifier {{PBES2-KDFs}},
|
// keyDerivationFunc AlgorithmIdentifier {{PBES2-KDFs}},
|
||||||
@ -95,7 +95,7 @@ var DefaultOpts = &PBES2Opts{
|
|||||||
pbesOID: oidPBES2,
|
pbesOID: oidPBES2,
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewPBES2Encrypter returns a new PBES2Encrypter with the given cipher and KDF options.
|
// NewPBESEncrypter returns a new PBESEncrypter with the given cipher and KDF options.
|
||||||
func NewPBESEncrypter(cipher Cipher, kdfOpts KDFOpts) PBESEncrypter {
|
func NewPBESEncrypter(cipher Cipher, kdfOpts KDFOpts) PBESEncrypter {
|
||||||
return &PBES2Opts{
|
return &PBES2Opts{
|
||||||
Cipher: cipher,
|
Cipher: cipher,
|
||||||
|
@ -23,11 +23,11 @@ func (sk *PrivateKey) Sign(message, context, addRand []byte) ([]byte, error) {
|
|||||||
return nil, errors.New("slhdsa: addrnd should be nil (deterministic variant) or of length n")
|
return nil, errors.New("slhdsa: addrnd should be nil (deterministic variant) or of length n")
|
||||||
}
|
}
|
||||||
ctxLen := len(context)
|
ctxLen := len(context)
|
||||||
if ctxLen > MAX_CONTEXT_LEN {
|
if ctxLen > maxContextLen {
|
||||||
return nil, errors.New("slhdsa: context too long")
|
return nil, errors.New("slhdsa: context too long")
|
||||||
}
|
}
|
||||||
|
|
||||||
var mPrefix [MAX_CONTEXT_LEN + 2]byte
|
var mPrefix [maxContextLen + 2]byte
|
||||||
|
|
||||||
mPrefix[1] = byte(ctxLen)
|
mPrefix[1] = byte(ctxLen)
|
||||||
if ctxLen > 0 {
|
if ctxLen > 0 {
|
||||||
@ -50,7 +50,7 @@ func (sk *PrivateKey) signInternal(msgPrefix, message, addRand []byte) ([]byte,
|
|||||||
signature := signatureHead[sk.params.n:]
|
signature := signatureHead[sk.params.n:]
|
||||||
|
|
||||||
// compute message digest
|
// compute message digest
|
||||||
var digest [MAX_M]byte
|
var digest [maxM]byte
|
||||||
sk.h.hMsg(&sk.PublicKey, R, msgPrefix, message, digest[:])
|
sk.h.hMsg(&sk.PublicKey, R, msgPrefix, message, digest[:])
|
||||||
// Grab the first mdLen() bytes of digest to use in fors_sign()
|
// Grab the first mdLen() bytes of digest to use in fors_sign()
|
||||||
mdLen := sk.params.mdLen()
|
mdLen := sk.params.mdLen()
|
||||||
@ -73,7 +73,7 @@ func (sk *PrivateKey) signInternal(msgPrefix, message, addRand []byte) ([]byte,
|
|||||||
// generate the FORS signature and append it to the SLH-DSA signature
|
// generate the FORS signature and append it to the SLH-DSA signature
|
||||||
sk.forsSign(md, adrs, signature)
|
sk.forsSign(md, adrs, signature)
|
||||||
|
|
||||||
var pkFors [MAX_N]byte
|
var pkFors [maxN]byte
|
||||||
// calculate the FORS public key using the generated FORS signature
|
// calculate the FORS public key using the generated FORS signature
|
||||||
signature = sk.forsPkFromSig(md, signature, adrs, pkFors[:])
|
signature = sk.forsPkFromSig(md, signature, adrs, pkFors[:])
|
||||||
// generate ht signature and append to the SLH-DSA signature
|
// generate ht signature and append to the SLH-DSA signature
|
||||||
@ -89,12 +89,12 @@ func (pk *PublicKey) Verify(signature, message, context []byte) bool {
|
|||||||
if len(message) == 0 {
|
if len(message) == 0 {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
if len(context) > MAX_CONTEXT_LEN {
|
if len(context) > maxContextLen {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
ctxLen := len(context)
|
ctxLen := len(context)
|
||||||
var msgPrefix [MAX_CONTEXT_LEN + 2]byte
|
var msgPrefix [maxContextLen + 2]byte
|
||||||
msgPrefix[1] = byte(ctxLen)
|
msgPrefix[1] = byte(ctxLen)
|
||||||
if ctxLen > 0 {
|
if ctxLen > 0 {
|
||||||
copy(msgPrefix[2:], context)
|
copy(msgPrefix[2:], context)
|
||||||
@ -112,7 +112,7 @@ func (pk *PublicKey) verifyInternal(signature []byte, msgPrefix []byte, message
|
|||||||
signature = signature[pk.params.n:]
|
signature = signature[pk.params.n:]
|
||||||
|
|
||||||
// compute message digest
|
// compute message digest
|
||||||
var digest [MAX_M]byte
|
var digest [maxM]byte
|
||||||
pk.h.hMsg(pk, R, msgPrefix, message, digest[:])
|
pk.h.hMsg(pk, R, msgPrefix, message, digest[:])
|
||||||
// Grab the first mdLen() bytes of digest to use in fors_sign()
|
// Grab the first mdLen() bytes of digest to use in fors_sign()
|
||||||
mdLen := pk.params.mdLen()
|
mdLen := pk.params.mdLen()
|
||||||
@ -130,7 +130,7 @@ func (pk *PublicKey) verifyInternal(signature []byte, msgPrefix []byte, message
|
|||||||
adrs.setTypeAndClear(AddressTypeFORSTree)
|
adrs.setTypeAndClear(AddressTypeFORSTree)
|
||||||
adrs.setKeyPairAddress(leafIdx)
|
adrs.setKeyPairAddress(leafIdx)
|
||||||
|
|
||||||
var pkFors [MAX_N]byte
|
var pkFors [maxN]byte
|
||||||
// calculate the FORS public key using the given FORS signature
|
// calculate the FORS public key using the given FORS signature
|
||||||
signature = pk.forsPkFromSig(md, signature, adrs, pkFors[:])
|
signature = pk.forsPkFromSig(md, signature, adrs, pkFors[:])
|
||||||
|
|
||||||
|
@ -12,7 +12,7 @@ package slhdsa
|
|||||||
//
|
//
|
||||||
// See FIPS 205 Algorithm 16 fors_sign
|
// See FIPS 205 Algorithm 16 fors_sign
|
||||||
func (sk *PrivateKey) forsSign(md []byte, adrs adrsOperations, sigFors []byte) {
|
func (sk *PrivateKey) forsSign(md []byte, adrs adrsOperations, sigFors []byte) {
|
||||||
var indices [MAX_K]uint32
|
var indices [maxK]uint32
|
||||||
// split md into k a-bits values, eatch of which is interpreted as an integer between 0 and 2^a-1.
|
// split md into k a-bits values, eatch of which is interpreted as an integer between 0 and 2^a-1.
|
||||||
base2b(md, sk.params.a, indices[:sk.params.k])
|
base2b(md, sk.params.a, indices[:sk.params.k])
|
||||||
|
|
||||||
@ -42,7 +42,7 @@ func (sk *PrivateKey) forsSign(md []byte, adrs adrsOperations, sigFors []byte) {
|
|||||||
//
|
//
|
||||||
// See FIPS 205 Algorithm 17 fors_pkFromSig
|
// See FIPS 205 Algorithm 17 fors_pkFromSig
|
||||||
func (pk *PublicKey) forsPkFromSig(md, signature []byte, adrs adrsOperations, out []byte) []byte {
|
func (pk *PublicKey) forsPkFromSig(md, signature []byte, adrs adrsOperations, out []byte) []byte {
|
||||||
var indices [MAX_K]uint32
|
var indices [maxK]uint32
|
||||||
base2b(md, pk.params.a, indices[:pk.params.k])
|
base2b(md, pk.params.a, indices[:pk.params.k])
|
||||||
|
|
||||||
twoPowerA := uint32(1 << pk.params.a)
|
twoPowerA := uint32(1 << pk.params.a)
|
||||||
@ -101,7 +101,7 @@ func (sk *PrivateKey) forsNode(nodeID, layer uint32, adrs adrsOperations, out []
|
|||||||
} else {
|
} else {
|
||||||
// otherwise, it computes the roots of the left subtree and right subtree
|
// otherwise, it computes the roots of the left subtree and right subtree
|
||||||
// and hashs them togeter.
|
// and hashs them togeter.
|
||||||
var lnode, rnode [MAX_N]byte
|
var lnode, rnode [maxN]byte
|
||||||
sk.forsNode(nodeID*2, layer-1, adrs, lnode[:])
|
sk.forsNode(nodeID*2, layer-1, adrs, lnode[:])
|
||||||
sk.forsNode(nodeID*2+1, layer-1, adrs, rnode[:])
|
sk.forsNode(nodeID*2+1, layer-1, adrs, rnode[:])
|
||||||
adrs.setTreeHeight(layer)
|
adrs.setTreeHeight(layer)
|
||||||
|
@ -17,7 +17,7 @@ func (sk *PrivateKey) htSign(pkFors []byte, treeIdx uint64, leafIdx uint32, sign
|
|||||||
sigLenPerLayer := (sk.params.hm + sk.params.len) * sk.params.n
|
sigLenPerLayer := (sk.params.hm + sk.params.len) * sk.params.n
|
||||||
mask := sk.params.leafIdxMask()
|
mask := sk.params.leafIdxMask()
|
||||||
|
|
||||||
var rootBuf [MAX_N]byte
|
var rootBuf [maxN]byte
|
||||||
root := rootBuf[:sk.params.n]
|
root := rootBuf[:sk.params.n]
|
||||||
copy(root, pkFors)
|
copy(root, pkFors)
|
||||||
tmpBuf := make([]byte, sk.params.n*sk.params.len)
|
tmpBuf := make([]byte, sk.params.n*sk.params.len)
|
||||||
@ -46,7 +46,7 @@ func (pk *PublicKey) htVerify(pkFors []byte, signature []byte, treeIdx uint64, l
|
|||||||
sigLenPerLayer := (pk.params.hm + pk.params.len) * pk.params.n
|
sigLenPerLayer := (pk.params.hm + pk.params.len) * pk.params.n
|
||||||
mask := pk.params.leafIdxMask()
|
mask := pk.params.leafIdxMask()
|
||||||
|
|
||||||
var rootBuf [MAX_N]byte
|
var rootBuf [maxN]byte
|
||||||
root := rootBuf[:pk.params.n]
|
root := rootBuf[:pk.params.n]
|
||||||
copy(root, pkFors)
|
copy(root, pkFors)
|
||||||
tmpBuf := make([]byte, pk.params.n*pk.params.len)
|
tmpBuf := make([]byte, pk.params.n*pk.params.len)
|
||||||
|
@ -19,8 +19,8 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type PublicKey struct {
|
type PublicKey struct {
|
||||||
seed [MAX_N]byte
|
seed [maxN]byte
|
||||||
root [MAX_N]byte
|
root [maxN]byte
|
||||||
params *params
|
params *params
|
||||||
md hash.Hash
|
md hash.Hash
|
||||||
mdBig hash.Hash
|
mdBig hash.Hash
|
||||||
@ -32,14 +32,14 @@ type PublicKey struct {
|
|||||||
|
|
||||||
type PrivateKey struct {
|
type PrivateKey struct {
|
||||||
PublicKey
|
PublicKey
|
||||||
seed [MAX_N]byte
|
seed [maxN]byte
|
||||||
prf [MAX_N]byte
|
prf [maxN]byte
|
||||||
}
|
}
|
||||||
|
|
||||||
// Bytes returns the byte representation of the PublicKey.
|
// Bytes returns the byte representation of the PublicKey.
|
||||||
// It combines the seed and root fields of the PublicKey.
|
// It combines the seed and root fields of the PublicKey.
|
||||||
func (pk *PublicKey) Bytes() []byte {
|
func (pk *PublicKey) Bytes() []byte {
|
||||||
var key [2 * MAX_N]byte
|
var key [2 * maxN]byte
|
||||||
copy(key[:], pk.seed[:pk.params.n])
|
copy(key[:], pk.seed[:pk.params.n])
|
||||||
copy(key[pk.params.n:], pk.root[:pk.params.n])
|
copy(key[pk.params.n:], pk.root[:pk.params.n])
|
||||||
return key[:2*pk.params.n]
|
return key[:2*pk.params.n]
|
||||||
@ -56,7 +56,7 @@ func (pk *PublicKey) Equal(x any) bool {
|
|||||||
|
|
||||||
// Bytes serializes the PrivateKey into a byte slice.
|
// Bytes serializes the PrivateKey into a byte slice.
|
||||||
func (sk *PrivateKey) Bytes() []byte {
|
func (sk *PrivateKey) Bytes() []byte {
|
||||||
var key [4 * MAX_N]byte
|
var key [4 * maxN]byte
|
||||||
keySlice := key[:]
|
keySlice := key[:]
|
||||||
copy(keySlice, sk.seed[:sk.params.n])
|
copy(keySlice, sk.seed[:sk.params.n])
|
||||||
keySlice = keySlice[sk.params.n:]
|
keySlice = keySlice[sk.params.n:]
|
||||||
@ -68,7 +68,7 @@ func (sk *PrivateKey) Bytes() []byte {
|
|||||||
return key[:4*sk.params.n]
|
return key[:4*sk.params.n]
|
||||||
}
|
}
|
||||||
|
|
||||||
// PublicKey returns the public key of the private key.
|
// Public returns the public key of the private key.
|
||||||
func (sk *PrivateKey) Public() *PublicKey {
|
func (sk *PrivateKey) Public() *PublicKey {
|
||||||
return &sk.PublicKey
|
return &sk.PublicKey
|
||||||
}
|
}
|
||||||
|
@ -9,14 +9,14 @@ package slhdsa
|
|||||||
import "io"
|
import "io"
|
||||||
|
|
||||||
const (
|
const (
|
||||||
MAX_N = 32
|
maxN = 32
|
||||||
MAX_M = 49
|
maxM = 49
|
||||||
MAX_K = 35
|
maxK = 35
|
||||||
MAX_A = 9
|
maxA = 9
|
||||||
MAX_K_TIMES_A = MAX_K * MAX_A
|
maxKTimesA = maxK * maxA
|
||||||
MAX_WOTS_LEN = 2*MAX_N + 3
|
maxWotsLen = 2*maxN + 3
|
||||||
|
|
||||||
MAX_CONTEXT_LEN = 255
|
maxContextLen = 255
|
||||||
)
|
)
|
||||||
|
|
||||||
type params struct {
|
type params struct {
|
||||||
|
@ -48,7 +48,7 @@ func (sk *PrivateKey) wotsPkGen(out, tmpBuf []byte, addr adrsOperations) {
|
|||||||
//
|
//
|
||||||
// See FIPS 205 Algorithm 10 wots_sign
|
// See FIPS 205 Algorithm 10 wots_sign
|
||||||
func (sk *PrivateKey) wotsSign(msg []byte, adrs adrsOperations, sigWots []byte) {
|
func (sk *PrivateKey) wotsSign(msg []byte, adrs adrsOperations, sigWots []byte) {
|
||||||
var msgAndCsum [MAX_WOTS_LEN]byte
|
var msgAndCsum [maxWotsLen]byte
|
||||||
// convert message to base w=16
|
// convert message to base w=16
|
||||||
bytes2nibbles(msg, msgAndCsum[:])
|
bytes2nibbles(msg, msgAndCsum[:])
|
||||||
// compute checksum
|
// compute checksum
|
||||||
@ -84,7 +84,7 @@ func (sk *PrivateKey) wotsSign(msg []byte, adrs adrsOperations, sigWots []byte)
|
|||||||
//
|
//
|
||||||
// See FIPS 205 Algorithm 8 wots_pkFromSig
|
// See FIPS 205 Algorithm 8 wots_pkFromSig
|
||||||
func (pk *PublicKey) wotsPkFromSig(signature, msg, tmpBuf []byte, adrs adrsOperations, out []byte) {
|
func (pk *PublicKey) wotsPkFromSig(signature, msg, tmpBuf []byte, adrs adrsOperations, out []byte) {
|
||||||
var msgAndCsum [MAX_WOTS_LEN]byte
|
var msgAndCsum [maxWotsLen]byte
|
||||||
// convert message to base w=16
|
// convert message to base w=16
|
||||||
bytes2nibbles(msg, msgAndCsum[:])
|
bytes2nibbles(msg, msgAndCsum[:])
|
||||||
// compute checksum
|
// compute checksum
|
||||||
|
@ -17,7 +17,7 @@ func (sk *PrivateKey) xmssNode(out, tmpBuf []byte, i, z uint32, adrs adrsOperati
|
|||||||
sk.wotsPkGen(out, tmpBuf, adrs)
|
sk.wotsPkGen(out, tmpBuf, adrs)
|
||||||
} else {
|
} else {
|
||||||
// otherwise, it computes the root of the subtree by hashing the two child nodes
|
// otherwise, it computes the root of the subtree by hashing the two child nodes
|
||||||
var lnode, rnode [MAX_N]byte
|
var lnode, rnode [maxN]byte
|
||||||
sk.xmssNode(lnode[:], tmpBuf, 2*i, z-1, adrs)
|
sk.xmssNode(lnode[:], tmpBuf, 2*i, z-1, adrs)
|
||||||
sk.xmssNode(rnode[:], tmpBuf, 2*i+1, z-1, adrs)
|
sk.xmssNode(rnode[:], tmpBuf, 2*i+1, z-1, adrs)
|
||||||
adrs.setTypeAndClear(AddressTypeTree)
|
adrs.setTypeAndClear(AddressTypeTree)
|
||||||
|
Loading…
x
Reference in New Issue
Block a user