@@ -202,9 +202,8 @@ type storageResponse struct {
202
202
accounts []common.Hash // Account hashes requested, may be only partially filled
203
203
roots []common.Hash // Storage roots requested, may be only partially filled
204
204
205
- hashes [][]common.Hash // Storage slot hashes in the returned range
206
- slots [][][]byte // Storage slot values in the returned range
207
- nodes []ethdb.KeyValueStore // Database containing the reconstructed trie nodes
205
+ hashes [][]common.Hash // Storage slot hashes in the returned range
206
+ slots [][][]byte // Storage slot values in the returned range
208
207
209
208
cont bool // Whether the last storage range has a continuation
210
209
}
@@ -680,12 +679,22 @@ func (s *Syncer) loadSyncStatus() {
680
679
}
681
680
s .tasks = progress .Tasks
682
681
for _ , task := range s .tasks {
683
- task .genBatch = s .db .NewBatch ()
682
+ task .genBatch = ethdb.HookedBatch {
683
+ Batch : s .db .NewBatch (),
684
+ OnPut : func (key []byte , value []byte ) {
685
+ s .accountBytes += common .StorageSize (len (key ) + len (value ))
686
+ },
687
+ }
684
688
task .genTrie = trie .NewStackTrie (task .genBatch )
685
689
686
690
for _ , subtasks := range task .SubTasks {
687
691
for _ , subtask := range subtasks {
688
- subtask .genBatch = s .db .NewBatch ()
692
+ subtask .genBatch = ethdb.HookedBatch {
693
+ Batch : s .db .NewBatch (),
694
+ OnPut : func (key []byte , value []byte ) {
695
+ s .storageBytes += common .StorageSize (len (key ) + len (value ))
696
+ },
697
+ }
689
698
subtask .genTrie = trie .NewStackTrie (task .genBatch )
690
699
}
691
700
}
@@ -729,7 +738,12 @@ func (s *Syncer) loadSyncStatus() {
729
738
// Make sure we don't overflow if the step is not a proper divisor
730
739
last = common .HexToHash ("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" )
731
740
}
732
- batch := s .db .NewBatch ()
741
+ batch := ethdb.HookedBatch {
742
+ Batch : s .db .NewBatch (),
743
+ OnPut : func (key []byte , value []byte ) {
744
+ s .accountBytes += common .StorageSize (len (key ) + len (value ))
745
+ },
746
+ }
733
747
s .tasks = append (s .tasks , & accountTask {
734
748
Next : next ,
735
749
Last : last ,
@@ -746,19 +760,14 @@ func (s *Syncer) loadSyncStatus() {
746
760
func (s * Syncer ) saveSyncStatus () {
747
761
// Serialize any partial progress to disk before spinning down
748
762
for _ , task := range s .tasks {
749
- keys , bytes := task .genBatch .KeyCount (), task .genBatch .ValueSize ()
750
763
if err := task .genBatch .Write (); err != nil {
751
764
log .Error ("Failed to persist account slots" , "err" , err )
752
765
}
753
- s .accountBytes += common .StorageSize (keys * common .HashLength + bytes )
754
-
755
766
for _ , subtasks := range task .SubTasks {
756
767
for _ , subtask := range subtasks {
757
- keys , bytes := subtask .genBatch .KeyCount (), subtask .genBatch .ValueSize ()
758
768
if err := subtask .genBatch .Write (); err != nil {
759
769
log .Error ("Failed to persist storage slots" , "err" , err )
760
770
}
761
- s .accountBytes += common .StorageSize (keys * common .HashLength + bytes )
762
771
}
763
772
}
764
773
}
@@ -1763,12 +1772,15 @@ func (s *Syncer) processStorageResponse(res *storageResponse) {
1763
1772
if res .subTask != nil {
1764
1773
res .subTask .req = nil
1765
1774
}
1766
- batch := s .db .NewBatch ()
1767
-
1775
+ batch := ethdb.HookedBatch {
1776
+ Batch : s .db .NewBatch (),
1777
+ OnPut : func (key []byte , value []byte ) {
1778
+ s .storageBytes += common .StorageSize (len (key ) + len (value ))
1779
+ },
1780
+ }
1768
1781
var (
1769
- slots int
1770
- nodes int
1771
- bytes common.StorageSize
1782
+ slots int
1783
+ oldStorageBytes = s .storageBytes
1772
1784
)
1773
1785
// Iterate over all the accounts and reconstruct their storage tries from the
1774
1786
// delivered slots
@@ -1829,7 +1841,12 @@ func (s *Syncer) processStorageResponse(res *storageResponse) {
1829
1841
r := newHashRange (lastKey , chunks )
1830
1842
1831
1843
// Our first task is the one that was just filled by this response.
1832
- batch := s .db .NewBatch ()
1844
+ batch := ethdb.HookedBatch {
1845
+ Batch : s .db .NewBatch (),
1846
+ OnPut : func (key []byte , value []byte ) {
1847
+ s .storageBytes += common .StorageSize (len (key ) + len (value ))
1848
+ },
1849
+ }
1833
1850
tasks = append (tasks , & storageTask {
1834
1851
Next : common.Hash {},
1835
1852
Last : r .End (),
@@ -1838,7 +1855,12 @@ func (s *Syncer) processStorageResponse(res *storageResponse) {
1838
1855
genTrie : trie .NewStackTrie (batch ),
1839
1856
})
1840
1857
for r .Next () {
1841
- batch := s .db .NewBatch ()
1858
+ batch := ethdb.HookedBatch {
1859
+ Batch : s .db .NewBatch (),
1860
+ OnPut : func (key []byte , value []byte ) {
1861
+ s .storageBytes += common .StorageSize (len (key ) + len (value ))
1862
+ },
1863
+ }
1842
1864
tasks = append (tasks , & storageTask {
1843
1865
Next : r .Start (),
1844
1866
Last : r .End (),
@@ -1883,27 +1905,23 @@ func (s *Syncer) processStorageResponse(res *storageResponse) {
1883
1905
}
1884
1906
}
1885
1907
}
1886
- // Iterate over all the reconstructed trie nodes and push them to disk
1887
- // if the contract is fully delivered . If it's chunked, the trie nodes
1888
- // will be reconstructed later.
1908
+ // Iterate over all the complete contracts, reconstruct the trie nodes and
1909
+ // push them to disk . If the contract is chunked, the trie nodes will be
1910
+ // reconstructed later.
1889
1911
slots += len (res .hashes [i ])
1890
1912
1891
1913
if i < len (res .hashes )- 1 || res .subTask == nil {
1892
- it := res .nodes [i ].NewIterator (nil , nil )
1893
- for it .Next () {
1894
- batch .Put (it .Key (), it .Value ())
1895
-
1896
- bytes += common .StorageSize (common .HashLength + len (it .Value ()))
1897
- nodes ++
1914
+ tr := trie .NewStackTrie (batch )
1915
+ for j := 0 ; j < len (res .hashes [i ]); j ++ {
1916
+ tr .Update (res.hashes [i ][j ][:], res.slots [i ][j ])
1898
1917
}
1899
- it . Release ()
1918
+ tr . Commit ()
1900
1919
}
1901
1920
// Persist the received storage segements. These flat state maybe
1902
1921
// outdated during the sync, but it can be fixed later during the
1903
1922
// snapshot generation.
1904
1923
for j := 0 ; j < len (res .hashes [i ]); j ++ {
1905
1924
rawdb .WriteStorageSnapshot (batch , account , res.hashes [i ][j ], res.slots [i ][j ])
1906
- bytes += common .StorageSize (1 + 2 * common .HashLength + len (res.slots [i ][j ]))
1907
1925
1908
1926
// If we're storing large contracts, generate the trie nodes
1909
1927
// on the fly to not trash the gluing points
@@ -1926,25 +1944,20 @@ func (s *Syncer) processStorageResponse(res *storageResponse) {
1926
1944
}
1927
1945
}
1928
1946
}
1929
- if data := res .subTask .genBatch .ValueSize (); data > ethdb .IdealBatchSize || res .subTask .done {
1930
- keys := res .subTask .genBatch .KeyCount ()
1947
+ if res .subTask .genBatch .ValueSize () > ethdb .IdealBatchSize || res .subTask .done {
1931
1948
if err := res .subTask .genBatch .Write (); err != nil {
1932
1949
log .Error ("Failed to persist stack slots" , "err" , err )
1933
1950
}
1934
1951
res .subTask .genBatch .Reset ()
1935
-
1936
- bytes += common .StorageSize (keys * common .HashLength + data )
1937
- nodes += keys
1938
1952
}
1939
1953
}
1940
1954
// Flush anything written just now and update the stats
1941
1955
if err := batch .Write (); err != nil {
1942
1956
log .Crit ("Failed to persist storage slots" , "err" , err )
1943
1957
}
1944
1958
s .storageSynced += uint64 (slots )
1945
- s .storageBytes += bytes
1946
1959
1947
- log .Debug ("Persisted set of storage slots" , "accounts" , len (res .hashes ), "slots" , slots , "nodes" , nodes , " bytes" , bytes )
1960
+ log .Debug ("Persisted set of storage slots" , "accounts" , len (res .hashes ), "slots" , slots , "bytes" , s . storageBytes - oldStorageBytes )
1948
1961
1949
1962
// If this delivery completed the last pending task, forward the account task
1950
1963
// to the next chunk
@@ -2042,18 +2055,20 @@ func (s *Syncer) forwardAccountTask(task *accountTask) {
2042
2055
// Persist the received account segements. These flat state maybe
2043
2056
// outdated during the sync, but it can be fixed later during the
2044
2057
// snapshot generation.
2045
- var (
2046
- nodes int
2047
- bytes common.StorageSize
2048
- )
2049
- batch := s .db .NewBatch ()
2058
+ oldAccountBytes := s .accountBytes
2059
+
2060
+ batch := ethdb.HookedBatch {
2061
+ Batch : s .db .NewBatch (),
2062
+ OnPut : func (key []byte , value []byte ) {
2063
+ s .accountBytes += common .StorageSize (len (key ) + len (value ))
2064
+ },
2065
+ }
2050
2066
for i , hash := range res .hashes {
2051
2067
if task .needCode [i ] || task .needState [i ] {
2052
2068
break
2053
2069
}
2054
2070
slim := snapshot .SlimAccountRLP (res .accounts [i ].Nonce , res .accounts [i ].Balance , res .accounts [i ].Root , res .accounts [i ].CodeHash )
2055
2071
rawdb .WriteAccountSnapshot (batch , hash , slim )
2056
- bytes += common .StorageSize (1 + common .HashLength + len (slim ))
2057
2072
2058
2073
// If the task is complete, drop it into the stack trie to generate
2059
2074
// account trie nodes for it
@@ -2069,7 +2084,6 @@ func (s *Syncer) forwardAccountTask(task *accountTask) {
2069
2084
if err := batch .Write (); err != nil {
2070
2085
log .Crit ("Failed to persist accounts" , "err" , err )
2071
2086
}
2072
- s .accountBytes += bytes
2073
2087
s .accountSynced += uint64 (len (res .accounts ))
2074
2088
2075
2089
// Task filling persisted, push it the chunk marker forward to the first
@@ -2091,17 +2105,13 @@ func (s *Syncer) forwardAccountTask(task *accountTask) {
2091
2105
log .Error ("Failed to commit stack account" , "err" , err )
2092
2106
}
2093
2107
}
2094
- if data := task .genBatch .ValueSize (); data > ethdb .IdealBatchSize || task .done {
2095
- keys := task .genBatch .KeyCount ()
2108
+ if task .genBatch .ValueSize () > ethdb .IdealBatchSize || task .done {
2096
2109
if err := task .genBatch .Write (); err != nil {
2097
2110
log .Error ("Failed to persist stack account" , "err" , err )
2098
2111
}
2099
2112
task .genBatch .Reset ()
2100
-
2101
- nodes += keys
2102
- bytes += common .StorageSize (keys * common .HashLength + data )
2103
2113
}
2104
- log .Debug ("Persisted range of accounts" , "accounts" , len (res .accounts ), "nodes" , nodes , " bytes" , bytes )
2114
+ log .Debug ("Persisted range of accounts" , "accounts" , len (res .accounts ), "bytes" , s . accountBytes - oldAccountBytes )
2105
2115
}
2106
2116
2107
2117
// OnAccounts is a callback method to invoke when a range of accounts are
@@ -2176,7 +2186,7 @@ func (s *Syncer) OnAccounts(peer SyncPeer, id uint64, hashes []common.Hash, acco
2176
2186
if len (keys ) > 0 {
2177
2187
end = keys [len (keys )- 1 ]
2178
2188
}
2179
- _ , cont , err := trie .VerifyRangeProof (root , req .origin [:], end , keys , accounts , proofdb )
2189
+ cont , err := trie .VerifyRangeProof (root , req .origin [:], end , keys , accounts , proofdb )
2180
2190
if err != nil {
2181
2191
logger .Warn ("Account range failed proof" , "err" , err )
2182
2192
// Signal this request as failed, and ready for rescheduling
@@ -2393,10 +2403,8 @@ func (s *Syncer) OnStorage(peer SyncPeer, id uint64, hashes [][]common.Hash, slo
2393
2403
s .lock .Unlock ()
2394
2404
2395
2405
// Reconstruct the partial tries from the response and verify them
2396
- var (
2397
- dbs = make ([]ethdb.KeyValueStore , len (hashes ))
2398
- cont bool
2399
- )
2406
+ var cont bool
2407
+
2400
2408
for i := 0 ; i < len (hashes ); i ++ {
2401
2409
// Convert the keys and proofs into an internal format
2402
2410
keys := make ([][]byte , len (hashes [i ]))
@@ -2413,7 +2421,7 @@ func (s *Syncer) OnStorage(peer SyncPeer, id uint64, hashes [][]common.Hash, slo
2413
2421
if len (nodes ) == 0 {
2414
2422
// No proof has been attached, the response must cover the entire key
2415
2423
// space and hash to the origin root.
2416
- dbs [ i ], _ , err = trie .VerifyRangeProof (req .roots [i ], nil , nil , keys , slots [i ], nil )
2424
+ _ , err = trie .VerifyRangeProof (req .roots [i ], nil , nil , keys , slots [i ], nil )
2417
2425
if err != nil {
2418
2426
s .scheduleRevertStorageRequest (req ) // reschedule request
2419
2427
logger .Warn ("Storage slots failed proof" , "err" , err )
@@ -2428,7 +2436,7 @@ func (s *Syncer) OnStorage(peer SyncPeer, id uint64, hashes [][]common.Hash, slo
2428
2436
if len (keys ) > 0 {
2429
2437
end = keys [len (keys )- 1 ]
2430
2438
}
2431
- dbs [ i ], cont , err = trie .VerifyRangeProof (req .roots [i ], req .origin [:], end , keys , slots [i ], proofdb )
2439
+ cont , err = trie .VerifyRangeProof (req .roots [i ], req .origin [:], end , keys , slots [i ], proofdb )
2432
2440
if err != nil {
2433
2441
s .scheduleRevertStorageRequest (req ) // reschedule request
2434
2442
logger .Warn ("Storage range failed proof" , "err" , err )
@@ -2444,7 +2452,6 @@ func (s *Syncer) OnStorage(peer SyncPeer, id uint64, hashes [][]common.Hash, slo
2444
2452
roots : req .roots ,
2445
2453
hashes : hashes ,
2446
2454
slots : slots ,
2447
- nodes : dbs ,
2448
2455
cont : cont ,
2449
2456
}
2450
2457
select {
0 commit comments