-
Notifications
You must be signed in to change notification settings - Fork 136
Nimbus Verified Proxy #3100
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: master
Are you sure you want to change the base?
Nimbus Verified Proxy #3100
Changes from all commits
91c9839
55b3c15
9f68995
cf579fe
5f696a0
d8584d8
1d5a061
06f9abc
8f62f39
e61f2e3
1604592
1d4adde
1502a10
d873e21
03f5c34
e0fab22
c139f21
2c62fc8
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
This file was deleted.
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,141 @@ | ||
# nimbus_verified_proxy | ||
# Copyright (c) 2022-2025 Status Research & Development GmbH | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. How is this file from 2022-2024? It pops into existence in this PR. |
||
# Licensed and distributed under either of | ||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). | ||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). | ||
# at your option. This file may not be copied, modified, or distributed except according to those terms. | ||
|
||
{.push raises: [].} | ||
|
||
import | ||
eth/common/hashes, | ||
eth/common/headers, | ||
web3/eth_api_types, | ||
std/tables, | ||
beacon_chain/spec/beaconstate, | ||
beacon_chain/spec/datatypes/[phase0, altair, bellatrix], | ||
beacon_chain/[light_client, nimbus_binary_common], | ||
beacon_chain/el/engine_api_conversions, | ||
minilru, | ||
results | ||
|
||
type HeaderStore* = ref object | ||
headers: LruCache[Hash32, Header] | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I guess it works the use an It is basically being used/selected mostly for its "automatic" pruning of the oldest headers here, but not at all for keeping its most "touched" content, hence the use of only The required full scan on add also makes the efficient add of the lru cache undone, but with the set max value of This implementation is also easy to make mistakes against in future changes, so it should at least have a big warning on how to use (already has this warning on the add currently). There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. FYI: I do realize that the original Also, as @bhartnett pointed out, there is an issue with pruning the number-to-hashes-mapping table, which will be more difficult to solve cleanly as the LRU prunes behind the scenes (without hook into it). But I guess you could delete numbers lower than n - max size on the add of a new header. |
||
hashes: Table[base.BlockNumber, Hash32] | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. It appears that the There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. In retrospect, that makes a lot of sense, i'll add it in. Initially, it did not hurt to let it grow because even a million blocks would amount to only ~32MB but since we are probably also aiming for resource constrained embedded devices and not just smartphone wallets it makes sense to add a limit. Regardless of the resource constraints, it still does make sense to add a limit and just set it to a high number There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Yeah it can hold more items but it should still be bounded. |
||
|
||
func convHeader(lcHeader: ForkedLightClientHeader): Header = | ||
withForkyHeader(lcHeader): | ||
template p(): auto = | ||
forkyHeader.execution | ||
|
||
when lcDataFork >= LightClientDataFork.Capella: | ||
let withdrawalsRoot = Opt.some(p.withdrawals_root.asBlockHash) | ||
else: | ||
let withdrawalsRoot = Opt.none(Hash32) | ||
|
||
when lcDataFork >= LightClientDataFork.Deneb: | ||
let | ||
blobGasUsed = Opt.some(p.blob_gas_used) | ||
excessBlobGas = Opt.some(p.excess_blob_gas) | ||
parentBeaconBlockRoot = Opt.some(forkyHeader.beacon.parent_root.asBlockHash) | ||
else: | ||
let | ||
blobGasUsed = Opt.none(uint64) | ||
excessBlobGas = Opt.none(uint64) | ||
parentBeaconBlockRoot = Opt.none(Hash32) | ||
|
||
when lcDataFork >= LightClientDataFork.Electra: | ||
# TODO: there is no visibility of the execution requests hash in light client header | ||
let requestsHash = Opt.none(Hash32) | ||
else: | ||
let requestsHash = Opt.none(Hash32) | ||
|
||
when lcDataFork > LightClientDataFork.Altair: | ||
let h = Header( | ||
parentHash: p.parent_hash.asBlockHash, | ||
ommersHash: EMPTY_UNCLE_HASH, | ||
coinbase: addresses.Address(p.fee_recipient.data), | ||
stateRoot: p.state_root.asBlockHash, | ||
transactionsRoot: p.transactions_root.asBlockHash, | ||
receiptsRoot: p.receipts_root.asBlockHash, | ||
logsBloom: FixedBytes[BYTES_PER_LOGS_BLOOM](p.logs_bloom.data), | ||
difficulty: DifficultyInt(0.u256), | ||
number: base.BlockNumber(p.block_number), | ||
gasLimit: GasInt(p.gas_limit), | ||
gasUsed: GasInt(p.gas_used), | ||
timestamp: EthTime(p.timestamp), | ||
extraData: seq[byte](p.extra_data), | ||
mixHash: p.prev_randao.data.to(Bytes32), | ||
nonce: default(Bytes8), | ||
baseFeePerGas: Opt.some(p.base_fee_per_gas), | ||
withdrawalsRoot: withdrawalsRoot, | ||
blobGasUsed: blobGasUsed, | ||
excessBlobGas: excessBlobGas, | ||
parentBeaconBlockRoot: parentBeaconBlockRoot, | ||
requestsHash: requestsHash, | ||
) | ||
else: | ||
# INFO: should never reach this point because running verified | ||
# proxy for altair doesn't make sense | ||
let h = Header() | ||
return h | ||
|
||
proc new*(T: type HeaderStore, max: int): T = | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. https://status-im.github.io/nim-style-guide/language.proc.html
Also for |
||
HeaderStore(headers: LruCache[Hash32, Header].init(max)) | ||
|
||
func len*(self: HeaderStore): int = | ||
len(self.headers) | ||
|
||
func isEmpty*(self: HeaderStore): bool = | ||
len(self.headers) == 0 | ||
|
||
proc add*(self: HeaderStore, header: ForkedLightClientHeader) = | ||
# Only add if it didn't exist before - the implementation of `latest` relies | ||
# on this.. | ||
let execHeader = convHeader(header) | ||
withForkyHeader(header): | ||
when lcDataFork > LightClientDataFork.Altair: | ||
let execHash = forkyHeader.execution.block_hash.asBlockHash | ||
|
||
if execHash notin self.headers: | ||
self.headers.put(execHash, execHeader) | ||
self.hashes[execHeader.number] = execHash | ||
|
||
proc latest*(self: HeaderStore): Opt[Header] = | ||
for h in self.headers.values: | ||
return Opt.some(h) | ||
|
||
Opt.none(Header) | ||
|
||
proc latestHash*(self: HeaderStore): Opt[Hash32] = | ||
for h in self.headers.values: | ||
let hash = | ||
try: | ||
self.hashes[h.number] | ||
except: | ||
return Opt.none(Hash32) | ||
return Opt.some(hash) | ||
|
||
Opt.none(Hash32) | ||
|
||
proc earliest*(self: HeaderStore): Opt[Header] = | ||
if self.headers.len() == 0: | ||
return Opt.none(Header) | ||
|
||
var hash: Hash32 | ||
for h in self.headers.keys: | ||
hash = h | ||
|
||
self.headers.peek(hash) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Why not just store the earliest and latest block number (or hash) and then use them to return the earliest and latest headers. |
||
|
||
proc get*(self: HeaderStore, number: base.BlockNumber): Opt[Header] = | ||
let hash = | ||
try: | ||
self.hashes[number] | ||
except: | ||
return Opt.none(Header) | ||
|
||
return self.headers.peek(hash) | ||
|
||
proc get*(self: HeaderStore, hash: Hash32): Opt[Header] = | ||
self.headers.peek(hash) |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
You shouldn't need to pass in a CommonRef directly. I'm not sure if this could lead to problems if the in memory database used by the evm isn't empty. I did provide the NetworkId parameter in the above init proc which is the one you should use.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I did not want to initialize two instances
CommonRef
. More so because the other instance is merely used for gas price APIs (which internally useCommonRef
for fork calculation, I assume)There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
We probably don't need an instance of
CommonRef
in the NVP just for determining a fork. After looking at that area of code, it appears that you should be able to do something like this to get the fork without creating an instance ofCommonRef
:I haven't tried this by it should be possible even with some small changes to Nimbus code if needed.