Skip to content

Commit e8ea5aa

Browse files
fjlkaralabe
authored andcommitted
trie: reduce hasher allocations (#16896)
* trie: reduce hasher allocations name old time/op new time/op delta Hash-8 4.05µs ±12% 3.56µs ± 9% -12.13% (p=0.000 n=20+19) name old alloc/op new alloc/op delta Hash-8 1.30kB ± 0% 0.66kB ± 0% -49.15% (p=0.000 n=20+20) name old allocs/op new allocs/op delta Hash-8 11.0 ± 0% 8.0 ± 0% -27.27% (p=0.000 n=20+20) * trie: bump initial buffer cap in hasher
1 parent 5bee5d6 commit e8ea5aa

File tree

1 file changed

+38
-12
lines changed

1 file changed

+38
-12
lines changed

trie/hasher.go

Lines changed: 38 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,6 @@
1717
package trie
1818

1919
import (
20-
"bytes"
2120
"hash"
2221
"sync"
2322

@@ -27,17 +26,39 @@ import (
2726
)
2827

2928
type hasher struct {
30-
tmp *bytes.Buffer
31-
sha hash.Hash
29+
tmp sliceBuffer
30+
sha keccakState
3231
cachegen uint16
3332
cachelimit uint16
3433
onleaf LeafCallback
3534
}
3635

36+
// keccakState wraps sha3.state. In addition to the usual hash methods, it also supports
37+
// Read to get a variable amount of data from the hash state. Read is faster than Sum
38+
// because it doesn't copy the internal state, but also modifies the internal state.
39+
type keccakState interface {
40+
hash.Hash
41+
Read([]byte) (int, error)
42+
}
43+
44+
type sliceBuffer []byte
45+
46+
func (b *sliceBuffer) Write(data []byte) (n int, err error) {
47+
*b = append(*b, data...)
48+
return len(data), nil
49+
}
50+
51+
func (b *sliceBuffer) Reset() {
52+
*b = (*b)[:0]
53+
}
54+
3755
// hashers live in a global db.
3856
var hasherPool = sync.Pool{
3957
New: func() interface{} {
40-
return &hasher{tmp: new(bytes.Buffer), sha: sha3.NewKeccak256()}
58+
return &hasher{
59+
tmp: make(sliceBuffer, 0, 550), // cap is as large as a full fullNode.
60+
sha: sha3.NewKeccak256().(keccakState),
61+
}
4162
},
4263
}
4364

@@ -157,26 +178,23 @@ func (h *hasher) store(n node, db *Database, force bool) (node, error) {
157178
}
158179
// Generate the RLP encoding of the node
159180
h.tmp.Reset()
160-
if err := rlp.Encode(h.tmp, n); err != nil {
181+
if err := rlp.Encode(&h.tmp, n); err != nil {
161182
panic("encode error: " + err.Error())
162183
}
163-
if h.tmp.Len() < 32 && !force {
184+
if len(h.tmp) < 32 && !force {
164185
return n, nil // Nodes smaller than 32 bytes are stored inside their parent
165186
}
166187
// Larger nodes are replaced by their hash and stored in the database.
167188
hash, _ := n.cache()
168189
if hash == nil {
169-
h.sha.Reset()
170-
h.sha.Write(h.tmp.Bytes())
171-
hash = hashNode(h.sha.Sum(nil))
190+
hash = h.makeHashNode(h.tmp)
172191
}
192+
173193
if db != nil {
174194
// We are pooling the trie nodes into an intermediate memory cache
175195
db.lock.Lock()
176-
177196
hash := common.BytesToHash(hash)
178-
db.insert(hash, h.tmp.Bytes())
179-
197+
db.insert(hash, h.tmp)
180198
// Track all direct parent->child node references
181199
switch n := n.(type) {
182200
case *shortNode:
@@ -210,3 +228,11 @@ func (h *hasher) store(n node, db *Database, force bool) (node, error) {
210228
}
211229
return hash, nil
212230
}
231+
232+
func (h *hasher) makeHashNode(data []byte) hashNode {
233+
n := make(hashNode, h.sha.Size())
234+
h.sha.Reset()
235+
h.sha.Write(data)
236+
h.sha.Read(n)
237+
return n
238+
}

0 commit comments

Comments
 (0)