diff --git a/latest-coreth-commit.txt b/latest-coreth-commit.txt index 264a78d891..dc93b49f30 100644 --- a/latest-coreth-commit.txt +++ b/latest-coreth-commit.txt @@ -1,3 +1,6 @@ # This is the latest commit of coreth that is synced with the subnet-evm -8222fcf9d9fe0630538318c6e90235d8e78ecccd \ No newline at end of file +d09d7cb8610be5ee5b8e9de771b4076ab194bf8f + +# Notes: +- Coreth PR 1034 has already been synced (ahead of schedule) - see https://github.com/ava-labs/subnet-evm/pull/1622 \ No newline at end of file diff --git a/params/extras/config.go b/params/extras/config.go index 531d20e625..901a3c017d 100644 --- a/params/extras/config.go +++ b/params/extras/config.go @@ -38,7 +38,6 @@ var ( } TestChainConfig = &ChainConfig{ - AvalancheContext: AvalancheContext{SnowCtx: utils.TestSnowContext()}, FeeConfig: DefaultFeeConfig, NetworkUpgrades: GetNetworkUpgrades(upgrade.GetConfig(constants.UnitTestID)), // This can be changed to correct network (local, test) via VM. GenesisPrecompiles: Precompiles{}, diff --git a/params/extras/precompile_config_test.go b/params/extras/precompile_config_test.go index ef84db1b5a..d73ff13c19 100644 --- a/params/extras/precompile_config_test.go +++ b/params/extras/precompile_config_test.go @@ -16,6 +16,7 @@ import ( "github.com/ava-labs/subnet-evm/precompile/contracts/rewardmanager" "github.com/ava-labs/subnet-evm/precompile/contracts/txallowlist" "github.com/ava-labs/subnet-evm/utils" + "github.com/ava-labs/subnet-evm/utils/utilstest" "github.com/stretchr/testify/require" ) @@ -23,6 +24,7 @@ func TestVerifyWithChainConfig(t *testing.T) { admins := []common.Address{{1}} copy := *TestChainConfig config := © + config.SnowCtx = utilstest.NewTestSnowContext(t) config.GenesisPrecompiles = Precompiles{ txallowlist.ConfigKey: txallowlist.NewConfig(utils.NewUint64(2), nil, nil, nil), } @@ -68,6 +70,7 @@ func TestVerifyWithChainConfigAtNilTimestamp(t *testing.T) { admins := []common.Address{{0}} copy := *TestChainConfig config := © + config.SnowCtx = utilstest.NewTestSnowContext(t) config.PrecompileUpgrades = []PrecompileUpgrade{ // this does NOT enable the precompile, so it should be upgradeable. {Config: txallowlist.NewConfig(nil, nil, nil, nil)}, @@ -188,6 +191,7 @@ func TestVerifyPrecompileUpgrades(t *testing.T) { require := require.New(t) copy := *TestChainConfig config := © + config.SnowCtx = utilstest.NewTestSnowContext(t) config.PrecompileUpgrades = tt.upgrades err := config.Verify() @@ -232,6 +236,7 @@ func TestVerifyPrecompiles(t *testing.T) { require := require.New(t) copy := *TestChainConfig config := © + config.SnowCtx = utilstest.NewTestSnowContext(t) config.GenesisPrecompiles = tt.precompiles err := config.Verify() @@ -248,6 +253,9 @@ func TestVerifyRequiresSortedTimestamps(t *testing.T) { admins := []common.Address{{1}} config := &ChainConfig{ FeeConfig: DefaultFeeConfig, + AvalancheContext: AvalancheContext{ + SnowCtx: utilstest.NewTestSnowContext(t), + }, } config.PrecompileUpgrades = []PrecompileUpgrade{ { diff --git a/params/extras/state_upgrade_test.go b/params/extras/state_upgrade_test.go index 1bb20a76c9..abeaaf2937 100644 --- a/params/extras/state_upgrade_test.go +++ b/params/extras/state_upgrade_test.go @@ -11,6 +11,7 @@ import ( "github.com/ava-labs/libevm/common" "github.com/ava-labs/libevm/common/math" "github.com/ava-labs/subnet-evm/utils" + "github.com/ava-labs/subnet-evm/utils/utilstest" "github.com/stretchr/testify/require" ) @@ -61,6 +62,7 @@ func TestVerifyStateUpgrades(t *testing.T) { require := require.New(t) copy := *TestChainConfig config := © + config.SnowCtx = utilstest.NewTestSnowContext(t) config.StateUpgrades = tt.upgrades err := config.Verify() diff --git a/plugin/evm/gossiper_eth_gossiping_test.go b/plugin/evm/gossiper_eth_gossiping_test.go index 6a7b1f454e..28825d9380 100644 --- a/plugin/evm/gossiper_eth_gossiping_test.go +++ b/plugin/evm/gossiper_eth_gossiping_test.go @@ -83,25 +83,28 @@ func TestMempoolEthTxsAppGossipHandling(t *testing.T) { genesisJSON, err := fundAddressByGenesis([]common.Address{addr}) assert.NoError(err) - vm, _, sender := GenesisVM(t, true, genesisJSON, "", "") + tvm := newVM(t, testVMConfig{ + genesisJSON: genesisJSON, + }) + defer func() { - err := vm.Shutdown(context.Background()) + err := tvm.vm.Shutdown(context.Background()) assert.NoError(err) }() - vm.txPool.SetGasTip(common.Big1) - vm.txPool.SetMinFee(common.Big0) + tvm.vm.txPool.SetGasTip(common.Big1) + tvm.vm.txPool.SetMinFee(common.Big0) var ( wg sync.WaitGroup txRequested bool ) - sender.CantSendAppGossip = false - sender.SendAppRequestF = func(context.Context, set.Set[ids.NodeID], uint32, []byte) error { + tvm.appSender.CantSendAppGossip = false + tvm.appSender.SendAppRequestF = func(context.Context, set.Set[ids.NodeID], uint32, []byte) error { txRequested = true return nil } wg.Add(1) - sender.SendAppGossipF = func(context.Context, commonEng.SendConfig, []byte) error { + tvm.appSender.SendAppGossipF = func(context.Context, commonEng.SendConfig, []byte) error { wg.Done() return nil } @@ -111,7 +114,7 @@ func TestMempoolEthTxsAppGossipHandling(t *testing.T) { // Txs must be submitted over the API to be included in push gossip. // (i.e., txs received via p2p are not included in push gossip) - err = vm.eth.APIBackend.SendTx(context.Background(), tx) + err = tvm.vm.eth.APIBackend.SendTx(context.Background(), tx) assert.NoError(err) assert.False(txRequested, "tx should not be requested") diff --git a/plugin/evm/syncervm_test.go b/plugin/evm/syncervm_test.go index ea699bb70d..a232ba8515 100644 --- a/plugin/evm/syncervm_test.go +++ b/plugin/evm/syncervm_test.go @@ -23,6 +23,7 @@ import ( commonEng "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/engine/enginetest" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" + "github.com/ava-labs/avalanchego/upgrade/upgradetest" "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/libevm/common" @@ -140,7 +141,7 @@ func TestStateSyncToggleEnabledToDisabled(t *testing.T) { context.Background(), vmSetup.syncerVM.ctx, vmSetup.syncerDB, - []byte(genesisJSONLatest), + []byte(toGenesisJSON(forkToChainConfig[upgradetest.Latest])), nil, []byte(stateSyncDisabledConfigJSON), []*commonEng.Fx{}, @@ -204,7 +205,7 @@ func TestStateSyncToggleEnabledToDisabled(t *testing.T) { context.Background(), vmSetup.syncerVM.ctx, vmSetup.syncerDB, - []byte(genesisJSONLatest), + []byte(toGenesisJSON(forkToChainConfig[upgradetest.Latest])), nil, []byte(configJSON), []*commonEng.Fx{}, @@ -273,12 +274,15 @@ func TestVMShutdownWhileSyncing(t *testing.T) { func createSyncServerAndClientVMs(t *testing.T, test syncTest, numBlocks int) *syncVMSetup { require := require.New(t) // configure [serverVM] - serverVM, _, serverAppSender := GenesisVM(t, true, genesisJSONLatest, "", "") + serverVM := newVM(t, testVMConfig{ + genesisJSON: toGenesisJSON(forkToChainConfig[upgradetest.Latest]), + }) + t.Cleanup(func() { log.Info("Shutting down server VM") - require.NoError(serverVM.Shutdown(context.Background())) + require.NoError(serverVM.vm.Shutdown(context.Background())) }) - generateAndAcceptBlocks(t, serverVM, numBlocks, func(i int, gen *core.BlockGen) { + generateAndAcceptBlocks(t, serverVM.vm, numBlocks, func(i int, gen *core.BlockGen) { b, err := predicate.NewResults().Bytes() if err != nil { t.Fatal(err) @@ -286,47 +290,52 @@ func createSyncServerAndClientVMs(t *testing.T, test syncTest, numBlocks int) *s gen.AppendExtra(b) tx := types.NewTransaction(gen.TxNonce(testEthAddrs[0]), testEthAddrs[1], common.Big1, ethparams.TxGas, big.NewInt(testMinGasPrice), nil) - signedTx, err := types.SignTx(tx, types.NewEIP155Signer(serverVM.chainConfig.ChainID), testKeys[0]) + signedTx, err := types.SignTx(tx, types.NewEIP155Signer(serverVM.vm.chainConfig.ChainID), testKeys[0].ToECDSA()) require.NoError(err) gen.AddTx(signedTx) }, nil) // make some accounts - trieDB := triedb.NewDatabase(serverVM.chaindb, nil) + trieDB := triedb.NewDatabase(serverVM.vm.chaindb, nil) root, accounts := statesync.FillAccountsWithOverlappingStorage(t, trieDB, types.EmptyRootHash, 1000, 16) // patch serverVM's lastAcceptedBlock to have the new root // and update the vm's state so the trie with accounts will // be returned by StateSyncGetLastSummary - lastAccepted := serverVM.blockChain.LastAcceptedBlock() - patchedBlock := patchBlock(lastAccepted, root, serverVM.chaindb) + lastAccepted := serverVM.vm.blockChain.LastAcceptedBlock() + patchedBlock := patchBlock(lastAccepted, root, serverVM.vm.chaindb) blockBytes, err := rlp.EncodeToBytes(patchedBlock) require.NoError(err) - internalBlock, err := serverVM.parseBlock(context.Background(), blockBytes) + internalBlock, err := serverVM.vm.parseBlock(context.Background(), blockBytes) require.NoError(err) - require.NoError(serverVM.State.SetLastAcceptedBlock(internalBlock)) + require.NoError(serverVM.vm.State.SetLastAcceptedBlock(internalBlock)) // patch syncableInterval for test - serverVM.StateSyncServer.(*stateSyncServer).syncableInterval = test.syncableInterval + serverVM.vm.StateSyncServer.(*stateSyncServer).syncableInterval = test.syncableInterval // initialise [syncerVM] with blank genesis state stateSyncEnabledJSON := fmt.Sprintf(`{"state-sync-enabled":true, "state-sync-min-blocks": %d, "tx-lookup-limit": %d}`, test.stateSyncMinBlocks, 4) - syncerVM, syncerDB, syncerAppSender := GenesisVM(t, false, genesisJSONLatest, stateSyncEnabledJSON, "") - shutdownOnceSyncerVM := &shutdownOnceVM{VM: syncerVM} + syncerVM := newVM(t, testVMConfig{ + genesisJSON: toGenesisJSON(forkToChainConfig[upgradetest.Latest]), + configJSON: stateSyncEnabledJSON, + isSyncing: true, + }) + + shutdownOnceSyncerVM := &shutdownOnceVM{VM: syncerVM.vm} t.Cleanup(func() { require.NoError(shutdownOnceSyncerVM.Shutdown(context.Background())) }) - require.NoError(syncerVM.SetState(context.Background(), snow.StateSyncing)) - enabled, err := syncerVM.StateSyncEnabled(context.Background()) + require.NoError(syncerVM.vm.SetState(context.Background(), snow.StateSyncing)) + enabled, err := syncerVM.vm.StateSyncEnabled(context.Background()) require.NoError(err) require.True(enabled) // override [serverVM]'s SendAppResponse function to trigger AppResponse on [syncerVM] - serverAppSender.SendAppResponseF = func(ctx context.Context, nodeID ids.NodeID, requestID uint32, response []byte) error { + serverVM.appSender.SendAppResponseF = func(ctx context.Context, nodeID ids.NodeID, requestID uint32, response []byte) error { if test.responseIntercept == nil { - go syncerVM.AppResponse(ctx, nodeID, requestID, response) + go syncerVM.vm.AppResponse(ctx, nodeID, requestID, response) } else { - go test.responseIntercept(syncerVM, nodeID, requestID, response) + go test.responseIntercept(syncerVM.vm, nodeID, requestID, response) } return nil @@ -334,28 +343,28 @@ func createSyncServerAndClientVMs(t *testing.T, test syncTest, numBlocks int) *s // connect peer to [syncerVM] require.NoError( - syncerVM.Connected( + syncerVM.vm.Connected( context.Background(), - serverVM.ctx.NodeID, + serverVM.vm.ctx.NodeID, statesyncclient.StateSyncVersion, ), ) // override [syncerVM]'s SendAppRequest function to trigger AppRequest on [serverVM] - syncerAppSender.SendAppRequestF = func(ctx context.Context, nodeSet set.Set[ids.NodeID], requestID uint32, request []byte) error { + syncerVM.appSender.SendAppRequestF = func(ctx context.Context, nodeSet set.Set[ids.NodeID], requestID uint32, request []byte) error { nodeID, hasItem := nodeSet.Pop() require.True(hasItem, "expected nodeSet to contain at least 1 nodeID") - err := serverVM.AppRequest(ctx, nodeID, requestID, time.Now().Add(1*time.Second), request) + err := serverVM.vm.AppRequest(ctx, nodeID, requestID, time.Now().Add(1*time.Second), request) require.NoError(err) return nil } return &syncVMSetup{ - serverVM: serverVM, - serverAppSender: serverAppSender, + serverVM: serverVM.vm, + serverAppSender: serverVM.appSender, fundedAccounts: accounts, - syncerVM: syncerVM, - syncerDB: syncerDB, + syncerVM: syncerVM.vm, + syncerDB: syncerVM.db, shutdownOnceSyncerVM: shutdownOnceSyncerVM, } } diff --git a/plugin/evm/tx_gossip_test.go b/plugin/evm/tx_gossip_test.go index 0754aefd04..0d72e0ab13 100644 --- a/plugin/evm/tx_gossip_test.go +++ b/plugin/evm/tx_gossip_test.go @@ -19,6 +19,7 @@ import ( "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/engine/enginetest" "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/upgrade/upgradetest" agoUtils "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/set" @@ -28,14 +29,14 @@ import ( "google.golang.org/protobuf/proto" "github.com/ava-labs/libevm/core/types" - "github.com/ava-labs/subnet-evm/utils" + "github.com/ava-labs/subnet-evm/utils/utilstest" ) func TestEthTxGossip(t *testing.T) { require := require.New(t) ctx := context.Background() - snowCtx := utils.TestSnowContext() - validatorState := utils.NewTestValidatorState() + snowCtx := utilstest.NewTestSnowContext(t) + validatorState := utilstest.NewTestValidatorState() snowCtx.ValidatorState = validatorState responseSender := &enginetest.SenderStub{ @@ -49,7 +50,7 @@ func TestEthTxGossip(t *testing.T) { ctx, snowCtx, memdb.New(), - []byte(genesisJSONLatest), + []byte(toGenesisJSON(forkToChainConfig[upgradetest.Latest])), nil, nil, nil, @@ -114,9 +115,8 @@ func TestEthTxGossip(t *testing.T) { // Issue a tx to the VM address := testEthAddrs[0] - key := testKeys[0] tx := types.NewTransaction(0, address, big.NewInt(10), 21000, big.NewInt(testMinGasPrice), nil) - signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm.chainConfig.ChainID), key) + signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm.chainConfig.ChainID), testKeys[0].ToECDSA()) require.NoError(err) errs := vm.txPool.Add([]*types.Transaction{signedTx}, true, true) @@ -151,7 +151,7 @@ func TestEthTxGossip(t *testing.T) { func TestEthTxPushGossipOutbound(t *testing.T) { require := require.New(t) ctx := context.Background() - snowCtx := utils.TestSnowContext() + snowCtx := utilstest.NewTestSnowContext(t) sender := &enginetest.SenderStub{ SentAppGossip: make(chan []byte, 1), } @@ -164,7 +164,7 @@ func TestEthTxPushGossipOutbound(t *testing.T) { ctx, snowCtx, memdb.New(), - []byte(genesisJSONLatest), + []byte(toGenesisJSON(forkToChainConfig[upgradetest.Latest])), nil, nil, nil, @@ -177,9 +177,8 @@ func TestEthTxPushGossipOutbound(t *testing.T) { }() address := testEthAddrs[0] - key := testKeys[0] tx := types.NewTransaction(0, address, big.NewInt(10), 21000, big.NewInt(testMinGasPrice), nil) - signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm.chainConfig.ChainID), key) + signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm.chainConfig.ChainID), testKeys[0].ToECDSA()) require.NoError(err) // issue a tx @@ -205,7 +204,7 @@ func TestEthTxPushGossipOutbound(t *testing.T) { func TestEthTxPushGossipInbound(t *testing.T) { require := require.New(t) ctx := context.Background() - snowCtx := utils.TestSnowContext() + snowCtx := utilstest.NewTestSnowContext(t) sender := &enginetest.Sender{} vm := &VM{ @@ -216,7 +215,7 @@ func TestEthTxPushGossipInbound(t *testing.T) { ctx, snowCtx, memdb.New(), - []byte(genesisJSONLatest), + []byte(toGenesisJSON(forkToChainConfig[upgradetest.Latest])), nil, nil, nil, @@ -229,9 +228,8 @@ func TestEthTxPushGossipInbound(t *testing.T) { }() address := testEthAddrs[0] - key := testKeys[0] tx := types.NewTransaction(0, address, big.NewInt(10), 21000, big.NewInt(testMinGasPrice), nil) - signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm.chainConfig.ChainID), key) + signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm.chainConfig.ChainID), testKeys[0].ToECDSA()) require.NoError(err) marshaller := GossipEthTxMarshaller{} diff --git a/plugin/evm/vm_test.go b/plugin/evm/vm_test.go index 056db593a5..425be29219 100644 --- a/plugin/evm/vm_test.go +++ b/plugin/evm/vm_test.go @@ -5,7 +5,6 @@ package evm import ( "context" - "crypto/ecdsa" "encoding/json" "errors" "fmt" @@ -19,12 +18,13 @@ import ( "github.com/ava-labs/libevm/common" "github.com/ava-labs/libevm/core/types" - "github.com/ava-labs/libevm/crypto" "github.com/ava-labs/libevm/log" + "github.com/ava-labs/libevm/trie" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/api/metrics" "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/memdb" @@ -35,10 +35,10 @@ import ( commonEng "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/engine/enginetest" "github.com/ava-labs/avalanchego/upgrade" + "github.com/ava-labs/avalanchego/upgrade/upgradetest" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/vms/components/chain" - "github.com/ava-labs/avalanchego/api/metrics" - "github.com/ava-labs/libevm/trie" "github.com/ava-labs/subnet-evm/commontype" "github.com/ava-labs/subnet-evm/constants" "github.com/ava-labs/subnet-evm/core" @@ -58,6 +58,7 @@ import ( "github.com/ava-labs/subnet-evm/precompile/contracts/txallowlist" "github.com/ava-labs/subnet-evm/rpc" "github.com/ava-labs/subnet-evm/utils" + "github.com/ava-labs/subnet-evm/utils/utilstest" avagoconstants "github.com/ava-labs/avalanchego/utils/constants" ) @@ -65,13 +66,13 @@ import ( var ( testNetworkID uint32 = avagoconstants.UnitTestID - testMinGasPrice int64 = 225_000_000_000 - testKeys []*ecdsa.PrivateKey + testMinGasPrice int64 = 225_000_000_000 + testKeys = secp256k1.TestKeys()[:3] testEthAddrs []common.Address // testEthAddrs[i] corresponds to testKeys[i] firstTxAmount = new(big.Int).Mul(big.NewInt(testMinGasPrice), big.NewInt(21000*100)) - genesisJSON = func(cfg *params.ChainConfig) string { + toGenesisJSON = func(cfg *params.ChainConfig) string { g := new(core.Genesis) g.Difficulty = big.NewInt(0) g.GasLimit = 8000000 @@ -83,8 +84,15 @@ var ( cpy.ChainID = big.NewInt(43111) g.Config = &cpy - allocStr := `{"0x71562b71999873DB5b286dF957af199Ec94617F7": {"balance":"0x4192927743b88000"}, "0x703c4b2bD70c169f5717101CaeE543299Fc946C7": {"balance":"0x4192927743b88000"}}` - json.Unmarshal([]byte(allocStr), &g.Alloc) + // Create allocation for the test addresses + g.Alloc = make(types.GenesisAlloc) + for _, addr := range testEthAddrs { + balance := new(big.Int) + balance.SetString("0x4192927743b88000", 0) + g.Alloc[addr] = types.Account{ + Balance: balance, + } + } b, err := json.Marshal(g) if err != nil { @@ -93,38 +101,56 @@ var ( return string(b) } - genesisJSONPreSubnetEVM = genesisJSON(params.TestPreSubnetEVMChainConfig) - genesisJSONSubnetEVM = genesisJSON(params.TestSubnetEVMChainConfig) - genesisJSONDurango = genesisJSON(params.TestDurangoChainConfig) - genesisJSONEtna = genesisJSON(params.TestEtnaChainConfig) - // genesisJSONFortuna = genesisJSON(params.TestFortunaChainConfig) - genesisJSONGranite = genesisJSON(params.TestGraniteChainConfig) - genesisJSONLatest = genesisJSONGranite + // forkToChainConfig maps a fork to a chain config + forkToChainConfig = map[upgradetest.Fork]*params.ChainConfig{ + upgradetest.Durango: params.TestDurangoChainConfig, + upgradetest.Etna: params.TestEtnaChainConfig, + // upgradetest.Fortuna: params.TestFortunaChainConfig, + upgradetest.Granite: params.TestGraniteChainConfig, + } + + // These will be initialized after init() runs + genesisJSONPreSubnetEVM string + genesisJSONSubnetEVM string ) func init() { - key1, _ := crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - key2, _ := crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") - testKeys = append(testKeys, key1, key2) - addr1 := crypto.PubkeyToAddress(key1.PublicKey) - addr2 := crypto.PubkeyToAddress(key2.PublicKey) - testEthAddrs = append(testEthAddrs, addr1, addr2) + for _, key := range testKeys { + testEthAddrs = append(testEthAddrs, key.EthAddress()) + } + + genesisJSONPreSubnetEVM = toGenesisJSON(params.TestPreSubnetEVMChainConfig) + genesisJSONSubnetEVM = toGenesisJSON(params.TestSubnetEVMChainConfig) } -// setupGenesis sets up the genesis -// If [genesisJSON] is empty, defaults to using [genesisJSONLatest] -func setupGenesis( - t *testing.T, - genesisJSON string, -) (*snow.Context, - database.Database, - []byte, - *atomic.Memory, -) { - if len(genesisJSON) == 0 { - genesisJSON = genesisJSONLatest +type testVMConfig struct { + isSyncing bool + fork *upgradetest.Fork + // If genesisJSON is empty, defaults to the genesis corresponding to the + // fork. + genesisJSON string + upgradeJSON string + configJSON string +} + +type testVM struct { + vm *VM + db *prefixdb.Database + atomicMemory *atomic.Memory + appSender *enginetest.Sender +} + +func newVM(t *testing.T, config testVMConfig) *testVM { + ctx := utilstest.NewTestSnowContext(t) + fork := upgradetest.Latest + if config.fork != nil { + fork = *config.fork + } + ctx.NetworkUpgrades = upgradetest.GetConfig(fork) + + if len(config.genesisJSON) == 0 { + config.genesisJSON = toGenesisJSON(forkToChainConfig[fork]) } - ctx := utils.TestSnowContext() baseDB := memdb.New() @@ -132,45 +158,78 @@ func setupGenesis( atomicMemory := atomic.NewMemory(prefixdb.New([]byte{0}, baseDB)) ctx.SharedMemory = atomicMemory.NewSharedMemory(ctx.ChainID) + // NB: this lock is intentionally left locked when this function returns. + // The caller of this function is responsible for unlocking. + ctx.Lock.Lock() + prefixedDB := prefixdb.New([]byte{1}, baseDB) - return ctx, prefixedDB, []byte(genesisJSON), atomicMemory -} -// GenesisVM creates a VM instance with the genesis test bytes and returns -// the channel use to send messages to the engine, the VM, database manager, -// and sender. -// If [genesisJSON] is empty, defaults to using [genesisJSONLatest] -func GenesisVM(t *testing.T, finishBootstrapping bool, genesisJSON string, configJSON string, upgradeJSON string) (*VM, database.Database, *enginetest.Sender) { vm := &VM{} - ctx, dbManager, genesisBytes, _ := setupGenesis(t, genesisJSON) appSender := &enginetest.Sender{T: t} appSender.CantSendAppGossip = true appSender.SendAppGossipF = func(context.Context, commonEng.SendConfig, []byte) error { return nil } + err := vm.Initialize( context.Background(), ctx, - dbManager, - genesisBytes, - []byte(upgradeJSON), - []byte(configJSON), + prefixedDB, + []byte(config.genesisJSON), + []byte(config.upgradeJSON), + []byte(config.configJSON), []*commonEng.Fx{}, appSender, ) - require.NoError(t, err, "error initializing GenesisVM") + require.NoError(t, err, "error initializing vm") - if finishBootstrapping { + if !config.isSyncing { require.NoError(t, vm.SetState(context.Background(), snow.Bootstrapping)) require.NoError(t, vm.SetState(context.Background(), snow.NormalOp)) } - return vm, dbManager, appSender + return &testVM{ + vm: vm, + db: prefixedDB, + atomicMemory: atomicMemory, + appSender: appSender, + } +} + +// setupGenesis sets up the genesis +func setupGenesis( + t *testing.T, + fork upgradetest.Fork, +) (*snow.Context, + *prefixdb.Database, + []byte, + *atomic.Memory, +) { + ctx := utilstest.NewTestSnowContext(t) + + genesisJSON := toGenesisJSON(forkToChainConfig[fork]) + ctx.NetworkUpgrades = upgradetest.GetConfig(fork) + + baseDB := memdb.New() + + // initialize the atomic memory + atomicMemory := atomic.NewMemory(prefixdb.New([]byte{0}, baseDB)) + ctx.SharedMemory = atomicMemory.NewSharedMemory(ctx.ChainID) + + // NB: this lock is intentionally left locked when this function returns. + // The caller of this function is responsible for unlocking. + ctx.Lock.Lock() + + prefixedDB := prefixdb.New([]byte{1}, baseDB) + + return ctx, prefixedDB, []byte(genesisJSON), atomicMemory } func TestVMConfig(t *testing.T) { txFeeCap := float64(11) enabledEthAPIs := []string{"debug"} - configJSON := fmt.Sprintf(`{"rpc-tx-fee-cap": %g,"eth-apis": %s}`, txFeeCap, fmt.Sprintf("[%q]", enabledEthAPIs[0])) - vm, _, _ := GenesisVM(t, false, "", configJSON, "") + vm := newVM(t, testVMConfig{ + configJSON: fmt.Sprintf(`{"rpc-tx-fee-cap": %g,"eth-apis": %s}`, txFeeCap, fmt.Sprintf("[%q]", enabledEthAPIs[0])), + }).vm + require.Equal(t, vm.config.RPCTxFeeCap, txFeeCap, "Tx Fee Cap should be set") require.Equal(t, vm.config.EthAPIs(), enabledEthAPIs, "EnabledEthAPIs should be set") require.NoError(t, vm.Shutdown(context.Background())) @@ -179,8 +238,9 @@ func TestVMConfig(t *testing.T) { func TestVMConfigDefaults(t *testing.T) { txFeeCap := float64(11) enabledEthAPIs := []string{"debug"} - configJSON := fmt.Sprintf(`{"rpc-tx-fee-cap": %g,"eth-apis": %s}`, txFeeCap, fmt.Sprintf("[%q]", enabledEthAPIs[0])) - vm, _, _ := GenesisVM(t, false, "", configJSON, "") + vm := newVM(t, testVMConfig{ + configJSON: fmt.Sprintf(`{"rpc-tx-fee-cap": %g,"eth-apis": %s}`, txFeeCap, fmt.Sprintf("[%q]", enabledEthAPIs[0])), + }).vm var vmConfig config.Config vmConfig.SetDefaults(defaultTxPoolConfig) @@ -191,7 +251,7 @@ func TestVMConfigDefaults(t *testing.T) { } func TestVMNilConfig(t *testing.T) { - vm, _, _ := GenesisVM(t, false, "", "", "") + vm := newVM(t, testVMConfig{}).vm // VM Config should match defaults if no config is passed in var vmConfig config.Config @@ -203,8 +263,10 @@ func TestVMNilConfig(t *testing.T) { func TestVMContinuousProfiler(t *testing.T) { profilerDir := t.TempDir() profilerFrequency := 500 * time.Millisecond - configJSON := fmt.Sprintf(`{"continuous-profiler-dir": %q,"continuous-profiler-frequency": "500ms"}`, profilerDir) - vm, _, _ := GenesisVM(t, false, "", configJSON, "") + vm := newVM(t, testVMConfig{ + configJSON: fmt.Sprintf(`{"continuous-profiler-dir": %q,"continuous-profiler-frequency": "500ms"}`, profilerDir), + }).vm + require.Equal(t, vm.config.ContinuousProfilerDir, profilerDir, "profiler dir should be set") require.Equal(t, vm.config.ContinuousProfilerFrequency.Duration, profilerFrequency, "profiler frequency should be set") @@ -222,17 +284,17 @@ func TestVMContinuousProfiler(t *testing.T) { func TestVMUpgrades(t *testing.T) { genesisTests := []struct { name string - genesis string + genesisJSON string expectedGasPrice *big.Int }{ { name: "Subnet EVM", - genesis: genesisJSONSubnetEVM, + genesisJSON: genesisJSONSubnetEVM, expectedGasPrice: big.NewInt(0), }, { name: "Durango", - genesis: genesisJSONDurango, + genesisJSON: toGenesisJSON(params.TestDurangoChainConfig), expectedGasPrice: big.NewInt(0), }, } @@ -240,7 +302,10 @@ func TestVMUpgrades(t *testing.T) { t.Run(test.name, func(t *testing.T) { require := require.New(t) - vm, _, _ := GenesisVM(t, true, test.genesis, "", "") + vm := newVM(t, testVMConfig{ + genesisJSON: test.genesisJSON, + }).vm + defer func() { require.NoError(vm.Shutdown(context.Background())) }() @@ -291,32 +356,35 @@ func issueAndAccept(t *testing.T, vm *VM) snowman.Block { func TestBuildEthTxBlock(t *testing.T) { // reduce block gas cost - vm, dbManager, _ := GenesisVM(t, true, genesisJSONSubnetEVM, `{"pruning-enabled":true}`, "") + tvm := newVM(t, testVMConfig{ + genesisJSON: genesisJSONSubnetEVM, + configJSON: `{"pruning-enabled":true}`, + }) defer func() { - if err := vm.Shutdown(context.Background()); err != nil { + if err := tvm.vm.Shutdown(context.Background()); err != nil { t.Fatal(err) } }() newTxPoolHeadChan := make(chan core.NewTxPoolReorgEvent, 1) - vm.txPool.SubscribeNewReorgEvent(newTxPoolHeadChan) + tvm.vm.txPool.SubscribeNewReorgEvent(newTxPoolHeadChan) key := testutils.NewKey(t) tx := types.NewTransaction(uint64(0), key.Address, firstTxAmount, 21000, big.NewInt(testMinGasPrice), nil) - signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm.chainConfig.ChainID), testKeys[0]) + signedTx, err := types.SignTx(tx, types.NewEIP155Signer(tvm.vm.chainConfig.ChainID), testKeys[0].ToECDSA()) if err != nil { t.Fatal(err) } - errs := vm.txPool.AddRemotesSync([]*types.Transaction{signedTx}) + errs := tvm.vm.txPool.AddRemotesSync([]*types.Transaction{signedTx}) for i, err := range errs { if err != nil { t.Fatalf("Failed to add tx at index %d: %s", i, err) } } - blk1 := issueAndAccept(t, vm) + blk1 := issueAndAccept(t, tvm.vm) newHead := <-newTxPoolHeadChan if newHead.Head.Hash() != common.Hash(blk1.ID()) { t.Fatalf("Expected new block to match") @@ -325,27 +393,27 @@ func TestBuildEthTxBlock(t *testing.T) { txs := make([]*types.Transaction, 10) for i := 0; i < 10; i++ { tx := types.NewTransaction(uint64(i), key.Address, big.NewInt(10), 21000, big.NewInt(testMinGasPrice), nil) - signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm.chainConfig.ChainID), key.PrivateKey) + signedTx, err := types.SignTx(tx, types.NewEIP155Signer(tvm.vm.chainConfig.ChainID), key.PrivateKey) if err != nil { t.Fatal(err) } txs[i] = signedTx } - errs = vm.txPool.AddRemotesSync(txs) + errs = tvm.vm.txPool.AddRemotesSync(txs) for i, err := range errs { if err != nil { t.Fatalf("Failed to add tx at index %d: %s", i, err) } } - vm.clock.Set(vm.clock.Time().Add(2 * time.Second)) - blk2 := issueAndAccept(t, vm) + tvm.vm.clock.Set(tvm.vm.clock.Time().Add(2 * time.Second)) + blk2 := issueAndAccept(t, tvm.vm) newHead = <-newTxPoolHeadChan if newHead.Head.Hash() != common.Hash(blk2.ID()) { t.Fatalf("Expected new block to match") } - lastAcceptedID, err := vm.LastAccepted(context.Background()) + lastAcceptedID, err := tvm.vm.LastAccepted(context.Background()) if err != nil { t.Fatal(err) } @@ -354,19 +422,19 @@ func TestBuildEthTxBlock(t *testing.T) { } ethBlk1 := blk1.(*chain.BlockWrapper).Block.(*Block).ethBlock - if ethBlk1Root := ethBlk1.Root(); !vm.blockChain.HasState(ethBlk1Root) { + if ethBlk1Root := ethBlk1.Root(); !tvm.vm.blockChain.HasState(ethBlk1Root) { t.Fatalf("Expected blk1 state root to not yet be pruned after blk2 was accepted because of tip buffer") } // Clear the cache and ensure that GetBlock returns internal blocks with the correct status - vm.State.Flush() - blk2Refreshed, err := vm.GetBlockInternal(context.Background(), blk2.ID()) + tvm.vm.State.Flush() + blk2Refreshed, err := tvm.vm.GetBlockInternal(context.Background(), blk2.ID()) if err != nil { t.Fatal(err) } blk1RefreshedID := blk2Refreshed.Parent() - blk1Refreshed, err := vm.GetBlockInternal(context.Background(), blk1RefreshedID) + blk1Refreshed, err := tvm.vm.GetBlockInternal(context.Background(), blk1RefreshedID) if err != nil { t.Fatal(err) } @@ -376,11 +444,14 @@ func TestBuildEthTxBlock(t *testing.T) { } restartedVM := &VM{} + newCTX := utilstest.NewTestSnowContext(t) + // Use the network upgrades from the existing VM's context to ensure consistency + newCTX.NetworkUpgrades = tvm.vm.ctx.NetworkUpgrades if err := restartedVM.Initialize( context.Background(), - utils.TestSnowContext(), - dbManager, + newCTX, + tvm.db, []byte(genesisJSONSubnetEVM), []byte(""), []byte(`{"pruning-enabled":true}`), @@ -415,8 +486,15 @@ func TestBuildEthTxBlock(t *testing.T) { func TestSetPreferenceRace(t *testing.T) { // Create two VMs which will agree on block A and then // build the two distinct preferred chains above - vm1, _, _ := GenesisVM(t, true, genesisJSONSubnetEVM, `{"pruning-enabled":true}`, "") - vm2, _, _ := GenesisVM(t, true, genesisJSONSubnetEVM, `{"pruning-enabled":true}`, "") + tvmConfig := testVMConfig{ + genesisJSON: genesisJSONSubnetEVM, + configJSON: `{"pruning-enabled":false}`, + } + tvm1 := newVM(t, tvmConfig) + tvm2 := newVM(t, tvmConfig) + + vm1 := tvm1.vm + vm2 := tvm2.vm defer func() { if err := vm1.Shutdown(context.Background()); err != nil { @@ -434,7 +512,7 @@ func TestSetPreferenceRace(t *testing.T) { vm2.txPool.SubscribeNewReorgEvent(newTxPoolHeadChan2) tx := types.NewTransaction(uint64(0), testEthAddrs[1], firstTxAmount, 21000, big.NewInt(testMinGasPrice), nil) - signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm1.chainConfig.ChainID), testKeys[0]) + signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm1.chainConfig.ChainID), testKeys[0].ToECDSA()) if err != nil { t.Fatal(err) } @@ -495,7 +573,7 @@ func TestSetPreferenceRace(t *testing.T) { txs := make([]*types.Transaction, 10) for i := 0; i < 10; i++ { tx := types.NewTransaction(uint64(i), testEthAddrs[0], big.NewInt(10), 21000, big.NewInt(testMinGasPrice), nil) - signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm1.chainConfig.ChainID), testKeys[1]) + signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm1.chainConfig.ChainID), testKeys[1].ToECDSA()) if err != nil { t.Fatal(err) } @@ -656,8 +734,15 @@ func TestSetPreferenceRace(t *testing.T) { // accept block C, which should be an orphaned block at this point and // get rejected. func TestReorgProtection(t *testing.T) { - vm1, _, _ := GenesisVM(t, true, genesisJSONSubnetEVM, `{"pruning-enabled":false}`, "") - vm2, _, _ := GenesisVM(t, true, genesisJSONSubnetEVM, `{"pruning-enabled":false}`, "") + tvmConfig := testVMConfig{ + genesisJSON: genesisJSONSubnetEVM, + configJSON: `{"pruning-enabled":false}`, + } + tvm1 := newVM(t, tvmConfig) + tvm2 := newVM(t, tvmConfig) + + vm1 := tvm1.vm + vm2 := tvm2.vm defer func() { if err := vm1.Shutdown(context.Background()); err != nil { @@ -675,7 +760,7 @@ func TestReorgProtection(t *testing.T) { vm2.txPool.SubscribeNewReorgEvent(newTxPoolHeadChan2) tx := types.NewTransaction(uint64(0), testEthAddrs[1], firstTxAmount, 21000, big.NewInt(testMinGasPrice), nil) - signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm1.chainConfig.ChainID), testKeys[0]) + signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm1.chainConfig.ChainID), testKeys[0].ToECDSA()) if err != nil { t.Fatal(err) } @@ -736,7 +821,7 @@ func TestReorgProtection(t *testing.T) { txs := make([]*types.Transaction, 10) for i := 0; i < 10; i++ { tx := types.NewTransaction(uint64(i), testEthAddrs[0], big.NewInt(10), 21000, big.NewInt(testMinGasPrice), nil) - signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm1.chainConfig.ChainID), testKeys[1]) + signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm1.chainConfig.ChainID), testKeys[1].ToECDSA()) if err != nil { t.Fatal(err) } @@ -827,8 +912,14 @@ func TestReorgProtection(t *testing.T) { // / \ // B C func TestNonCanonicalAccept(t *testing.T) { - vm1, _, _ := GenesisVM(t, true, genesisJSONSubnetEVM, "", "") - vm2, _, _ := GenesisVM(t, true, genesisJSONSubnetEVM, "", "") + tvmConfig := testVMConfig{ + genesisJSON: genesisJSONSubnetEVM, + } + tvm1 := newVM(t, tvmConfig) + tvm2 := newVM(t, tvmConfig) + + vm1 := tvm1.vm + vm2 := tvm2.vm defer func() { if err := vm1.Shutdown(context.Background()); err != nil { @@ -846,7 +937,7 @@ func TestNonCanonicalAccept(t *testing.T) { vm2.txPool.SubscribeNewReorgEvent(newTxPoolHeadChan2) tx := types.NewTransaction(uint64(0), testEthAddrs[1], firstTxAmount, 21000, big.NewInt(testMinGasPrice), nil) - signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm1.chainConfig.ChainID), testKeys[0]) + signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm1.chainConfig.ChainID), testKeys[0].ToECDSA()) if err != nil { t.Fatal(err) } @@ -924,7 +1015,7 @@ func TestNonCanonicalAccept(t *testing.T) { txs := make([]*types.Transaction, 10) for i := 0; i < 10; i++ { tx := types.NewTransaction(uint64(i), testEthAddrs[0], big.NewInt(10), 21000, big.NewInt(testMinGasPrice), nil) - signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm1.chainConfig.ChainID), testKeys[1]) + signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm1.chainConfig.ChainID), testKeys[1].ToECDSA()) if err != nil { t.Fatal(err) } @@ -1023,8 +1114,14 @@ func TestNonCanonicalAccept(t *testing.T) { // | // D func TestStickyPreference(t *testing.T) { - vm1, _, _ := GenesisVM(t, true, genesisJSONSubnetEVM, "", "") - vm2, _, _ := GenesisVM(t, true, genesisJSONSubnetEVM, "", "") + tvmConfig := testVMConfig{ + genesisJSON: genesisJSONSubnetEVM, + } + tvm1 := newVM(t, tvmConfig) + tvm2 := newVM(t, tvmConfig) + + vm1 := tvm1.vm + vm2 := tvm2.vm defer func() { if err := vm1.Shutdown(context.Background()); err != nil { @@ -1042,7 +1139,7 @@ func TestStickyPreference(t *testing.T) { vm2.txPool.SubscribeNewReorgEvent(newTxPoolHeadChan2) tx := types.NewTransaction(uint64(0), testEthAddrs[1], firstTxAmount, 21000, big.NewInt(testMinGasPrice), nil) - signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm1.chainConfig.ChainID), testKeys[0]) + signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm1.chainConfig.ChainID), testKeys[0].ToECDSA()) if err != nil { t.Fatal(err) } @@ -1103,7 +1200,7 @@ func TestStickyPreference(t *testing.T) { txs := make([]*types.Transaction, 10) for i := 0; i < 10; i++ { tx := types.NewTransaction(uint64(i), testEthAddrs[0], big.NewInt(10), 21000, big.NewInt(testMinGasPrice), nil) - signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm1.chainConfig.ChainID), testKeys[1]) + signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm1.chainConfig.ChainID), testKeys[1].ToECDSA()) if err != nil { t.Fatal(err) } @@ -1286,8 +1383,14 @@ func TestStickyPreference(t *testing.T) { // | // D func TestUncleBlock(t *testing.T) { - vm1, _, _ := GenesisVM(t, true, genesisJSONSubnetEVM, "", "") - vm2, _, _ := GenesisVM(t, true, genesisJSONSubnetEVM, "", "") + tvmConfig := testVMConfig{ + genesisJSON: genesisJSONSubnetEVM, + } + tvm1 := newVM(t, tvmConfig) + tvm2 := newVM(t, tvmConfig) + + vm1 := tvm1.vm + vm2 := tvm2.vm defer func() { if err := vm1.Shutdown(context.Background()); err != nil { @@ -1304,7 +1407,7 @@ func TestUncleBlock(t *testing.T) { vm2.txPool.SubscribeNewReorgEvent(newTxPoolHeadChan2) tx := types.NewTransaction(uint64(0), testEthAddrs[1], firstTxAmount, 21000, big.NewInt(testMinGasPrice), nil) - signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm1.chainConfig.ChainID), testKeys[0]) + signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm1.chainConfig.ChainID), testKeys[0].ToECDSA()) if err != nil { t.Fatal(err) } @@ -1363,7 +1466,7 @@ func TestUncleBlock(t *testing.T) { txs := make([]*types.Transaction, 10) for i := 0; i < 10; i++ { tx := types.NewTransaction(uint64(i), testEthAddrs[0], big.NewInt(10), 21000, big.NewInt(testMinGasPrice), nil) - signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm1.chainConfig.ChainID), testKeys[1]) + signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm1.chainConfig.ChainID), testKeys[1].ToECDSA()) if err != nil { t.Fatal(err) } @@ -1470,32 +1573,34 @@ func TestUncleBlock(t *testing.T) { // Regression test to ensure that a VM that is not able to parse a block that // contains no transactions. func TestEmptyBlock(t *testing.T) { - vm, _, _ := GenesisVM(t, true, genesisJSONSubnetEVM, "", "") + tvm := newVM(t, testVMConfig{ + genesisJSON: genesisJSONSubnetEVM, + }) defer func() { - if err := vm.Shutdown(context.Background()); err != nil { + if err := tvm.vm.Shutdown(context.Background()); err != nil { t.Fatal(err) } }() tx := types.NewTransaction(uint64(0), testEthAddrs[1], firstTxAmount, 21000, big.NewInt(testMinGasPrice), nil) - signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm.chainConfig.ChainID), testKeys[0]) + signedTx, err := types.SignTx(tx, types.NewEIP155Signer(tvm.vm.chainConfig.ChainID), testKeys[0].ToECDSA()) if err != nil { t.Fatal(err) } - txErrors := vm.txPool.AddRemotesSync([]*types.Transaction{signedTx}) + txErrors := tvm.vm.txPool.AddRemotesSync([]*types.Transaction{signedTx}) for i, err := range txErrors { if err != nil { t.Fatalf("Failed to add tx at index %d: %s", i, err) } } - msg, err := vm.WaitForEvent(context.Background()) + msg, err := tvm.vm.WaitForEvent(context.Background()) require.NoError(t, err) require.Equal(t, commonEng.PendingTxs, msg) - blk, err := vm.BuildBlock(context.Background()) + blk, err := tvm.vm.BuildBlock(context.Background()) if err != nil { t.Fatalf("Failed to build block with import transaction: %s", err) } @@ -1511,9 +1616,9 @@ func TestEmptyBlock(t *testing.T) { new(trie.Trie), ) - emptyBlock := vm.newBlock(emptyEthBlock) + emptyBlock := tvm.vm.newBlock(emptyEthBlock) - if _, err := vm.ParseBlock(context.Background(), emptyBlock.Bytes()); !errors.Is(err, errEmptyBlock) { + if _, err := tvm.vm.ParseBlock(context.Background(), emptyBlock.Bytes()); !errors.Is(err, errEmptyBlock) { t.Fatalf("VM should have failed with errEmptyBlock but got %s", err.Error()) } if err := emptyBlock.Verify(context.Background()); !errors.Is(err, errEmptyBlock) { @@ -1530,8 +1635,14 @@ func TestEmptyBlock(t *testing.T) { // | // D func TestAcceptReorg(t *testing.T) { - vm1, _, _ := GenesisVM(t, true, genesisJSONSubnetEVM, "", "") - vm2, _, _ := GenesisVM(t, true, genesisJSONSubnetEVM, "", "") + tvmConfig := testVMConfig{ + genesisJSON: genesisJSONSubnetEVM, + } + tvm1 := newVM(t, tvmConfig) + tvm2 := newVM(t, tvmConfig) + + vm1 := tvm1.vm + vm2 := tvm2.vm defer func() { if err := vm1.Shutdown(context.Background()); err != nil { @@ -1549,7 +1660,7 @@ func TestAcceptReorg(t *testing.T) { vm2.txPool.SubscribeNewReorgEvent(newTxPoolHeadChan2) tx := types.NewTransaction(uint64(0), testEthAddrs[1], firstTxAmount, 21000, big.NewInt(testMinGasPrice), nil) - signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm1.chainConfig.ChainID), testKeys[0]) + signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm1.chainConfig.ChainID), testKeys[0].ToECDSA()) if err != nil { t.Fatal(err) } @@ -1610,7 +1721,7 @@ func TestAcceptReorg(t *testing.T) { txs := make([]*types.Transaction, 10) for i := 0; i < 10; i++ { tx := types.NewTransaction(uint64(i), testEthAddrs[0], big.NewInt(10), 21000, big.NewInt(testMinGasPrice), nil) - signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm1.chainConfig.ChainID), testKeys[1]) + signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm1.chainConfig.ChainID), testKeys[1].ToECDSA()) if err != nil { t.Fatal(err) } @@ -1733,32 +1844,34 @@ func TestAcceptReorg(t *testing.T) { } func TestFutureBlock(t *testing.T) { - vm, _, _ := GenesisVM(t, true, genesisJSONSubnetEVM, "", "") + tvm := newVM(t, testVMConfig{ + genesisJSON: genesisJSONSubnetEVM, + }) defer func() { - if err := vm.Shutdown(context.Background()); err != nil { + if err := tvm.vm.Shutdown(context.Background()); err != nil { t.Fatal(err) } }() tx := types.NewTransaction(uint64(0), testEthAddrs[1], firstTxAmount, 21000, big.NewInt(testMinGasPrice), nil) - signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm.chainConfig.ChainID), testKeys[0]) + signedTx, err := types.SignTx(tx, types.NewEIP155Signer(tvm.vm.chainConfig.ChainID), testKeys[0].ToECDSA()) if err != nil { t.Fatal(err) } - txErrors := vm.txPool.AddRemotesSync([]*types.Transaction{signedTx}) + txErrors := tvm.vm.txPool.AddRemotesSync([]*types.Transaction{signedTx}) for i, err := range txErrors { if err != nil { t.Fatalf("Failed to add tx at index %d: %s", i, err) } } - msg, err := vm.WaitForEvent(context.Background()) + msg, err := tvm.vm.WaitForEvent(context.Background()) require.NoError(t, err) require.Equal(t, commonEng.PendingTxs, msg) - blkA, err := vm.BuildBlock(context.Background()) + blkA, err := tvm.vm.BuildBlock(context.Background()) if err != nil { t.Fatalf("Failed to build block with import transaction: %s", err) } @@ -1767,7 +1880,7 @@ func TestFutureBlock(t *testing.T) { internalBlkA := blkA.(*chain.BlockWrapper).Block.(*Block) modifiedHeader := types.CopyHeader(internalBlkA.ethBlock.Header()) // Set the VM's clock to the time of the produced block - vm.clock.Set(time.Unix(int64(modifiedHeader.Time), 0)) + tvm.vm.clock.Set(time.Unix(int64(modifiedHeader.Time), 0)) // Set the modified time to exceed the allowed future time modifiedTime := modifiedHeader.Time + uint64(maxFutureBlockTime.Seconds()+1) modifiedHeader.Time = modifiedTime @@ -1779,7 +1892,7 @@ func TestFutureBlock(t *testing.T) { trie.NewStackTrie(nil), ) - futureBlock := vm.newBlock(modifiedBlock) + futureBlock := tvm.vm.newBlock(modifiedBlock) if err := futureBlock.Verify(context.Background()); err == nil { t.Fatal("Future block should have failed verification due to block timestamp too far in the future") @@ -1789,32 +1902,34 @@ func TestFutureBlock(t *testing.T) { } func TestLastAcceptedBlockNumberAllow(t *testing.T) { - vm, _, _ := GenesisVM(t, true, genesisJSONSubnetEVM, "", "") + tvm := newVM(t, testVMConfig{ + genesisJSON: genesisJSONSubnetEVM, + }) defer func() { - if err := vm.Shutdown(context.Background()); err != nil { + if err := tvm.vm.Shutdown(context.Background()); err != nil { t.Fatal(err) } }() tx := types.NewTransaction(uint64(0), testEthAddrs[1], firstTxAmount, 21000, big.NewInt(testMinGasPrice), nil) - signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm.chainConfig.ChainID), testKeys[0]) + signedTx, err := types.SignTx(tx, types.NewEIP155Signer(tvm.vm.chainConfig.ChainID), testKeys[0].ToECDSA()) if err != nil { t.Fatal(err) } - txErrors := vm.txPool.AddRemotesSync([]*types.Transaction{signedTx}) + txErrors := tvm.vm.txPool.AddRemotesSync([]*types.Transaction{signedTx}) for i, err := range txErrors { if err != nil { t.Fatalf("Failed to add tx at index %d: %s", i, err) } } - msg, err := vm.WaitForEvent(context.Background()) + msg, err := tvm.vm.WaitForEvent(context.Background()) require.NoError(t, err) require.Equal(t, commonEng.PendingTxs, msg) - blk, err := vm.BuildBlock(context.Background()) + blk, err := tvm.vm.BuildBlock(context.Background()) if err != nil { t.Fatalf("Failed to build block with import transaction: %s", err) } @@ -1823,17 +1938,17 @@ func TestLastAcceptedBlockNumberAllow(t *testing.T) { t.Fatalf("Block failed verification on VM: %s", err) } - if err := vm.SetPreference(context.Background(), blk.ID()); err != nil { + if err := tvm.vm.SetPreference(context.Background(), blk.ID()); err != nil { t.Fatal(err) } blkHeight := blk.Height() blkHash := blk.(*chain.BlockWrapper).Block.(*Block).ethBlock.Hash() - vm.eth.APIBackend.SetAllowUnfinalizedQueries(true) + tvm.vm.eth.APIBackend.SetAllowUnfinalizedQueries(true) ctx := context.Background() - b, err := vm.eth.APIBackend.BlockByNumber(ctx, rpc.BlockNumber(blkHeight)) + b, err := tvm.vm.eth.APIBackend.BlockByNumber(ctx, rpc.BlockNumber(blkHeight)) if err != nil { t.Fatal(err) } @@ -1841,9 +1956,9 @@ func TestLastAcceptedBlockNumberAllow(t *testing.T) { t.Fatalf("expected block at %d to have hash %s but got %s", blkHeight, blkHash.Hex(), b.Hash().Hex()) } - vm.eth.APIBackend.SetAllowUnfinalizedQueries(false) + tvm.vm.eth.APIBackend.SetAllowUnfinalizedQueries(false) - _, err = vm.eth.APIBackend.BlockByNumber(ctx, rpc.BlockNumber(blkHeight)) + _, err = tvm.vm.eth.APIBackend.BlockByNumber(ctx, rpc.BlockNumber(blkHeight)) if !errors.Is(err, eth.ErrUnfinalizedData) { t.Fatalf("expected ErrUnfinalizedData but got %s", err.Error()) } @@ -1852,7 +1967,7 @@ func TestLastAcceptedBlockNumberAllow(t *testing.T) { t.Fatalf("VM failed to accept block: %s", err) } - if b := vm.blockChain.GetBlockByNumber(blkHeight); b.Hash() != blkHash { + if b := tvm.vm.blockChain.GetBlockByNumber(blkHeight); b.Hash() != blkHash { t.Fatalf("expected block at %d to have hash %s but got %s", blkHeight, blkHash.Hex(), b.Hash().Hex()) } } @@ -1860,31 +1975,33 @@ func TestLastAcceptedBlockNumberAllow(t *testing.T) { // Regression test to ensure we can build blocks if we are starting with the // Subnet EVM ruleset in genesis. func TestBuildSubnetEVMBlock(t *testing.T) { - vm, _, _ := GenesisVM(t, true, genesisJSONSubnetEVM, "", "") + tvm := newVM(t, testVMConfig{ + genesisJSON: genesisJSONSubnetEVM, + }) defer func() { - if err := vm.Shutdown(context.Background()); err != nil { + if err := tvm.vm.Shutdown(context.Background()); err != nil { t.Fatal(err) } }() newTxPoolHeadChan := make(chan core.NewTxPoolReorgEvent, 1) - vm.txPool.SubscribeNewReorgEvent(newTxPoolHeadChan) + tvm.vm.txPool.SubscribeNewReorgEvent(newTxPoolHeadChan) tx := types.NewTransaction(uint64(0), testEthAddrs[1], new(big.Int).Mul(firstTxAmount, big.NewInt(4)), 21000, big.NewInt(testMinGasPrice*3), nil) - signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm.chainConfig.ChainID), testKeys[0]) + signedTx, err := types.SignTx(tx, types.NewEIP155Signer(tvm.vm.chainConfig.ChainID), testKeys[0].ToECDSA()) if err != nil { t.Fatal(err) } - txErrors := vm.txPool.AddRemotesSync([]*types.Transaction{signedTx}) + txErrors := tvm.vm.txPool.AddRemotesSync([]*types.Transaction{signedTx}) for i, err := range txErrors { if err != nil { t.Fatalf("Failed to add tx at index %d: %s", i, err) } } - blk := issueAndAccept(t, vm) + blk := issueAndAccept(t, tvm.vm) newHead := <-newTxPoolHeadChan if newHead.Head.Hash() != common.Hash(blk.ID()) { t.Fatalf("Expected new block to match") @@ -1893,25 +2010,25 @@ func TestBuildSubnetEVMBlock(t *testing.T) { txs := make([]*types.Transaction, 10) for i := 0; i < 10; i++ { tx := types.NewTransaction(uint64(i), testEthAddrs[0], big.NewInt(10), 21000, big.NewInt(testMinGasPrice*3), nil) - signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm.chainConfig.ChainID), testKeys[1]) + signedTx, err := types.SignTx(tx, types.NewEIP155Signer(tvm.vm.chainConfig.ChainID), testKeys[1].ToECDSA()) if err != nil { t.Fatal(err) } txs[i] = signedTx } - errs := vm.txPool.AddRemotesSync(txs) + errs := tvm.vm.txPool.AddRemotesSync(txs) for i, err := range errs { if err != nil { t.Fatalf("Failed to add tx at index %d: %s", i, err) } } - blk = issueAndAccept(t, vm) + blk = issueAndAccept(t, tvm.vm) ethBlk := blk.(*chain.BlockWrapper).Block.(*Block).ethBlock if customtypes.BlockGasCost(ethBlk) == nil || customtypes.BlockGasCost(ethBlk).Cmp(big.NewInt(100)) < 0 { t.Fatalf("expected blockGasCost to be at least 100 but got %d", customtypes.BlockGasCost(ethBlk)) } - chainConfig := params.GetExtra(vm.chainConfig) + chainConfig := params.GetExtra(tvm.vm.chainConfig) minRequiredTip, err := header.EstimateRequiredTip(chainConfig, ethBlk.Header()) if err != nil { t.Fatal(err) @@ -1920,7 +2037,7 @@ func TestBuildSubnetEVMBlock(t *testing.T) { t.Fatalf("expected minRequiredTip to be at least 0.05 gwei but got %d", minRequiredTip) } - lastAcceptedID, err := vm.LastAccepted(context.Background()) + lastAcceptedID, err := tvm.vm.LastAccepted(context.Background()) if err != nil { t.Fatal(err) } @@ -1929,7 +2046,7 @@ func TestBuildSubnetEVMBlock(t *testing.T) { } // Confirm all txs are present - ethBlkTxs := vm.blockChain.GetBlockByNumber(2).Transactions() + ethBlkTxs := tvm.vm.blockChain.GetBlockByNumber(2).Transactions() for i, tx := range txs { if len(ethBlkTxs) <= i { t.Fatalf("missing transactions expected: %d but found: %d", len(txs), len(ethBlkTxs)) @@ -1953,18 +2070,20 @@ func TestBuildAllowListActivationBlock(t *testing.T) { if err != nil { t.Fatal(err) } - vm, _, _ := GenesisVM(t, true, string(genesisJSON), "", "") + tvm := newVM(t, testVMConfig{ + genesisJSON: string(genesisJSON), + }) defer func() { - if err := vm.Shutdown(context.Background()); err != nil { + if err := tvm.vm.Shutdown(context.Background()); err != nil { t.Fatal(err) } }() newTxPoolHeadChan := make(chan core.NewTxPoolReorgEvent, 1) - vm.txPool.SubscribeNewReorgEvent(newTxPoolHeadChan) + tvm.vm.txPool.SubscribeNewReorgEvent(newTxPoolHeadChan) - genesisState, err := vm.blockChain.StateAt(vm.blockChain.Genesis().Root()) + genesisState, err := tvm.vm.blockChain.StateAt(tvm.vm.blockChain.Genesis().Root()) if err != nil { t.Fatal(err) } @@ -1975,26 +2094,26 @@ func TestBuildAllowListActivationBlock(t *testing.T) { // Send basic transaction to construct a simple block and confirm that the precompile state configuration in the worker behaves correctly. tx := types.NewTransaction(uint64(0), testEthAddrs[1], new(big.Int).Mul(firstTxAmount, big.NewInt(4)), 21000, big.NewInt(testMinGasPrice*3), nil) - signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm.chainConfig.ChainID), testKeys[0]) + signedTx, err := types.SignTx(tx, types.NewEIP155Signer(tvm.vm.chainConfig.ChainID), testKeys[0].ToECDSA()) if err != nil { t.Fatal(err) } - txErrors := vm.txPool.AddRemotesSync([]*types.Transaction{signedTx}) + txErrors := tvm.vm.txPool.AddRemotesSync([]*types.Transaction{signedTx}) for i, err := range txErrors { if err != nil { t.Fatalf("Failed to add tx at index %d: %s", i, err) } } - blk := issueAndAccept(t, vm) + blk := issueAndAccept(t, tvm.vm) newHead := <-newTxPoolHeadChan if newHead.Head.Hash() != common.Hash(blk.ID()) { t.Fatalf("Expected new block to match") } // Verify that the allow list config activation was handled correctly in the first block. - blkState, err := vm.blockChain.StateAt(blk.(*chain.BlockWrapper).Block.(*Block).ethBlock.Root()) + blkState, err := tvm.vm.blockChain.StateAt(blk.(*chain.BlockWrapper).Block.(*Block).ethBlock.Root()) if err != nil { t.Fatal(err) } @@ -2010,7 +2129,7 @@ func TestTxAllowListSuccessfulTx(t *testing.T) { managerKey := testKeys[1] managerAddress := testEthAddrs[1] genesis := &core.Genesis{} - if err := genesis.UnmarshalJSON([]byte(genesisJSONDurango)); err != nil { + if err := genesis.UnmarshalJSON([]byte(toGenesisJSON(forkToChainConfig[upgradetest.Durango]))); err != nil { t.Fatal(err) } // this manager role should not be activated because DurangoTimestamp is in the future @@ -2040,18 +2159,22 @@ func TestTxAllowListSuccessfulTx(t *testing.T) { } upgradeBytesJSON, err := json.Marshal(upgradeConfig) require.NoError(t, err) - vm, _, _ := GenesisVM(t, true, string(genesisJSON), "", string(upgradeBytesJSON)) + + tvm := newVM(t, testVMConfig{ + genesisJSON: string(genesisJSON), + upgradeJSON: string(upgradeBytesJSON), + }) defer func() { - if err := vm.Shutdown(context.Background()); err != nil { + if err := tvm.vm.Shutdown(context.Background()); err != nil { t.Fatal(err) } }() newTxPoolHeadChan := make(chan core.NewTxPoolReorgEvent, 1) - vm.txPool.SubscribeNewReorgEvent(newTxPoolHeadChan) + tvm.vm.txPool.SubscribeNewReorgEvent(newTxPoolHeadChan) - genesisState, err := vm.blockChain.StateAt(vm.blockChain.Genesis().Root()) + genesisState, err := tvm.vm.blockChain.StateAt(tvm.vm.blockChain.Genesis().Root()) if err != nil { t.Fatal(err) } @@ -2071,35 +2194,35 @@ func TestTxAllowListSuccessfulTx(t *testing.T) { // Submit a successful transaction tx0 := types.NewTransaction(uint64(0), testEthAddrs[0], big.NewInt(1), 21000, big.NewInt(testMinGasPrice), nil) - signedTx0, err := types.SignTx(tx0, types.NewEIP155Signer(vm.chainConfig.ChainID), testKeys[0]) + signedTx0, err := types.SignTx(tx0, types.NewEIP155Signer(tvm.vm.chainConfig.ChainID), testKeys[0].ToECDSA()) require.NoError(t, err) - errs := vm.txPool.AddRemotesSync([]*types.Transaction{signedTx0}) + errs := tvm.vm.txPool.AddRemotesSync([]*types.Transaction{signedTx0}) if err := errs[0]; err != nil { t.Fatalf("Failed to add tx at index: %s", err) } // Submit a rejected transaction, should throw an error tx1 := types.NewTransaction(uint64(0), testEthAddrs[1], big.NewInt(2), 21000, big.NewInt(testMinGasPrice), nil) - signedTx1, err := types.SignTx(tx1, types.NewEIP155Signer(vm.chainConfig.ChainID), testKeys[1]) + signedTx1, err := types.SignTx(tx1, types.NewEIP155Signer(tvm.vm.chainConfig.ChainID), testKeys[1].ToECDSA()) if err != nil { t.Fatal(err) } - errs = vm.txPool.AddRemotesSync([]*types.Transaction{signedTx1}) + errs = tvm.vm.txPool.AddRemotesSync([]*types.Transaction{signedTx1}) if err := errs[0]; !errors.Is(err, vmerrors.ErrSenderAddressNotAllowListed) { t.Fatalf("expected ErrSenderAddressNotAllowListed, got: %s", err) } // Submit a rejected transaction, should throw an error because manager is not activated tx2 := types.NewTransaction(uint64(0), managerAddress, big.NewInt(2), 21000, big.NewInt(testMinGasPrice), nil) - signedTx2, err := types.SignTx(tx2, types.NewEIP155Signer(vm.chainConfig.ChainID), managerKey) + signedTx2, err := types.SignTx(tx2, types.NewEIP155Signer(tvm.vm.chainConfig.ChainID), managerKey.ToECDSA()) require.NoError(t, err) - errs = vm.txPool.AddRemotesSync([]*types.Transaction{signedTx2}) + errs = tvm.vm.txPool.AddRemotesSync([]*types.Transaction{signedTx2}) require.ErrorIs(t, errs[0], vmerrors.ErrSenderAddressNotAllowListed) - blk := issueAndAccept(t, vm) + blk := issueAndAccept(t, tvm.vm) newHead := <-newTxPoolHeadChan require.Equal(t, newHead.Head.Hash(), common.Hash(blk.ID())) @@ -2114,23 +2237,23 @@ func TestTxAllowListSuccessfulTx(t *testing.T) { require.Equal(t, signedTx0.Hash(), txs[0].Hash()) - vm.clock.Set(reenableAllowlistTime.Add(time.Hour)) + tvm.vm.clock.Set(reenableAllowlistTime.Add(time.Hour)) // Re-Submit a successful transaction tx0 = types.NewTransaction(uint64(1), testEthAddrs[0], big.NewInt(1), 21000, big.NewInt(testMinGasPrice), nil) - signedTx0, err = types.SignTx(tx0, types.NewEIP155Signer(vm.chainConfig.ChainID), testKeys[0]) + signedTx0, err = types.SignTx(tx0, types.NewEIP155Signer(tvm.vm.chainConfig.ChainID), testKeys[0].ToECDSA()) require.NoError(t, err) - errs = vm.txPool.AddRemotesSync([]*types.Transaction{signedTx0}) + errs = tvm.vm.txPool.AddRemotesSync([]*types.Transaction{signedTx0}) require.NoError(t, errs[0]) // accept block to trigger upgrade - blk = issueAndAccept(t, vm) + blk = issueAndAccept(t, tvm.vm) newHead = <-newTxPoolHeadChan require.Equal(t, newHead.Head.Hash(), common.Hash(blk.ID())) block = blk.(*chain.BlockWrapper).Block.(*Block).ethBlock - blkState, err := vm.blockChain.StateAt(block.Root()) + blkState, err := tvm.vm.blockChain.StateAt(block.Root()) require.NoError(t, err) // Check that address 0 is admin and address 1 is manager @@ -2139,17 +2262,17 @@ func TestTxAllowListSuccessfulTx(t *testing.T) { role = txallowlist.GetTxAllowListStatus(blkState, managerAddress) require.Equal(t, allowlist.ManagerRole, role) - vm.clock.Set(vm.clock.Time().Add(2 * time.Second)) // add 2 seconds for gas fee to adjust + tvm.vm.clock.Set(tvm.vm.clock.Time().Add(2 * time.Second)) // add 2 seconds for gas fee to adjust // Submit a successful transaction, should not throw an error because manager is activated tx3 := types.NewTransaction(uint64(0), managerAddress, big.NewInt(1), 21000, big.NewInt(testMinGasPrice), nil) - signedTx3, err := types.SignTx(tx3, types.NewEIP155Signer(vm.chainConfig.ChainID), managerKey) + signedTx3, err := types.SignTx(tx3, types.NewEIP155Signer(tvm.vm.chainConfig.ChainID), managerKey.ToECDSA()) require.NoError(t, err) - vm.clock.Set(vm.clock.Time().Add(2 * time.Second)) // add 2 seconds for gas fee to adjust - errs = vm.txPool.AddRemotesSync([]*types.Transaction{signedTx3}) + tvm.vm.clock.Set(tvm.vm.clock.Time().Add(2 * time.Second)) // add 2 seconds for gas fee to adjust + errs = tvm.vm.txPool.AddRemotesSync([]*types.Transaction{signedTx3}) require.NoError(t, errs[0]) - blk = issueAndAccept(t, vm) + blk = issueAndAccept(t, tvm.vm) newHead = <-newTxPoolHeadChan require.Equal(t, newHead.Head.Hash(), common.Hash(blk.ID())) @@ -2163,7 +2286,8 @@ func TestTxAllowListSuccessfulTx(t *testing.T) { func TestVerifyManagerConfig(t *testing.T) { genesis := &core.Genesis{} - require.NoError(t, genesis.UnmarshalJSON([]byte(genesisJSONDurango))) + ctx, dbManager, genesisBytes, _ := setupGenesis(t, upgradetest.Durango) + require.NoError(t, genesis.UnmarshalJSON(genesisBytes)) durangoTimestamp := time.Now().Add(10 * time.Hour) params.GetExtra(genesis.Config).DurangoTimestamp = utils.TimeToNewUint64(durangoTimestamp) @@ -2176,12 +2300,11 @@ func TestVerifyManagerConfig(t *testing.T) { require.NoError(t, err) vm := &VM{} - ctx, dbManager, genesisBytes, _ := setupGenesis(t, string(genesisJSON)) err = vm.Initialize( context.Background(), ctx, dbManager, - genesisBytes, + genesisJSON, // Manually set genesis bytes due to custom genesis []byte(""), []byte(""), []*commonEng.Fx{}, @@ -2190,7 +2313,7 @@ func TestVerifyManagerConfig(t *testing.T) { require.ErrorIs(t, err, allowlist.ErrCannotAddManagersBeforeDurango) genesis = &core.Genesis{} - require.NoError(t, genesis.UnmarshalJSON([]byte(genesisJSONDurango))) + require.NoError(t, genesis.UnmarshalJSON([]byte(toGenesisJSON(forkToChainConfig[upgradetest.Durango])))) params.GetExtra(genesis.Config).DurangoTimestamp = utils.TimeToNewUint64(durangoTimestamp) genesisJSON, err = genesis.MarshalJSON() require.NoError(t, err) @@ -2206,12 +2329,12 @@ func TestVerifyManagerConfig(t *testing.T) { require.NoError(t, err) vm = &VM{} - ctx, dbManager, genesisBytes, _ = setupGenesis(t, string(genesisJSON)) + ctx, dbManager, _, _ = setupGenesis(t, upgradetest.Latest) err = vm.Initialize( context.Background(), ctx, dbManager, - genesisBytes, + genesisJSON, // Manually set genesis bytes due to custom genesis upgradeBytesJSON, []byte(""), []*commonEng.Fx{}, @@ -2253,20 +2376,23 @@ func TestTxAllowListDisablePrecompile(t *testing.T) { } `, disableAllowListTimestamp.Unix()) - vm, _, _ := GenesisVM(t, true, string(genesisJSON), "", upgradeConfig) + tvm := newVM(t, testVMConfig{ + genesisJSON: string(genesisJSON), + upgradeJSON: upgradeConfig, + }) - vm.clock.Set(disableAllowListTimestamp) // upgrade takes effect after a block is issued, so we can set vm's clock here. + tvm.vm.clock.Set(disableAllowListTimestamp) // upgrade takes effect after a block is issued, so we can set vm's clock here. defer func() { - if err := vm.Shutdown(context.Background()); err != nil { + if err := tvm.vm.Shutdown(context.Background()); err != nil { t.Fatal(err) } }() newTxPoolHeadChan := make(chan core.NewTxPoolReorgEvent, 1) - vm.txPool.SubscribeNewReorgEvent(newTxPoolHeadChan) + tvm.vm.txPool.SubscribeNewReorgEvent(newTxPoolHeadChan) - genesisState, err := vm.blockChain.StateAt(vm.blockChain.Genesis().Root()) + genesisState, err := tvm.vm.blockChain.StateAt(tvm.vm.blockChain.Genesis().Root()) if err != nil { t.Fatal(err) } @@ -2283,27 +2409,27 @@ func TestTxAllowListDisablePrecompile(t *testing.T) { // Submit a successful transaction tx0 := types.NewTransaction(uint64(0), testEthAddrs[0], big.NewInt(1), 21000, big.NewInt(testMinGasPrice), nil) - signedTx0, err := types.SignTx(tx0, types.NewEIP155Signer(vm.chainConfig.ChainID), testKeys[0]) + signedTx0, err := types.SignTx(tx0, types.NewEIP155Signer(tvm.vm.chainConfig.ChainID), testKeys[0].ToECDSA()) require.NoError(t, err) - errs := vm.txPool.AddRemotesSync([]*types.Transaction{signedTx0}) + errs := tvm.vm.txPool.AddRemotesSync([]*types.Transaction{signedTx0}) if err := errs[0]; err != nil { t.Fatalf("Failed to add tx at index: %s", err) } // Submit a rejected transaction, should throw an error tx1 := types.NewTransaction(uint64(0), testEthAddrs[1], big.NewInt(2), 21000, big.NewInt(testMinGasPrice), nil) - signedTx1, err := types.SignTx(tx1, types.NewEIP155Signer(vm.chainConfig.ChainID), testKeys[1]) + signedTx1, err := types.SignTx(tx1, types.NewEIP155Signer(tvm.vm.chainConfig.ChainID), testKeys[1].ToECDSA()) if err != nil { t.Fatal(err) } - errs = vm.txPool.AddRemotesSync([]*types.Transaction{signedTx1}) + errs = tvm.vm.txPool.AddRemotesSync([]*types.Transaction{signedTx1}) if err := errs[0]; !errors.Is(err, vmerrors.ErrSenderAddressNotAllowListed) { t.Fatalf("expected ErrSenderAddressNotAllowListed, got: %s", err) } - blk := issueAndAccept(t, vm) + blk := issueAndAccept(t, tvm.vm) // Verify that the constructed block only has the whitelisted tx block := blk.(*chain.BlockWrapper).Block.(*Block).ethBlock @@ -2319,13 +2445,13 @@ func TestTxAllowListDisablePrecompile(t *testing.T) { <-newTxPoolHeadChan // wait for new head in tx pool // retry the rejected Tx, which should now succeed - errs = vm.txPool.AddRemotesSync([]*types.Transaction{signedTx1}) + errs = tvm.vm.txPool.AddRemotesSync([]*types.Transaction{signedTx1}) if err := errs[0]; err != nil { t.Fatalf("Failed to add tx at index: %s", err) } - vm.clock.Set(vm.clock.Time().Add(2 * time.Second)) // add 2 seconds for gas fee to adjust - blk = issueAndAccept(t, vm) + tvm.vm.clock.Set(tvm.vm.clock.Time().Add(2 * time.Second)) // add 2 seconds for gas fee to adjust + blk = issueAndAccept(t, tvm.vm) // Verify that the constructed block only has the previously rejected tx block = blk.(*chain.BlockWrapper).Block.(*Block).ethBlock @@ -2367,18 +2493,20 @@ func TestFeeManagerChangeFee(t *testing.T) { if err != nil { t.Fatal(err) } - vm, _, _ := GenesisVM(t, true, string(genesisJSON), "", "") + tvm := newVM(t, testVMConfig{ + genesisJSON: string(genesisJSON), + }) defer func() { - if err := vm.Shutdown(context.Background()); err != nil { + if err := tvm.vm.Shutdown(context.Background()); err != nil { t.Fatal(err) } }() newTxPoolHeadChan := make(chan core.NewTxPoolReorgEvent, 1) - vm.txPool.SubscribeNewReorgEvent(newTxPoolHeadChan) + tvm.vm.txPool.SubscribeNewReorgEvent(newTxPoolHeadChan) - genesisState, err := vm.blockChain.StateAt(vm.blockChain.Genesis().Root()) + genesisState, err := tvm.vm.blockChain.StateAt(tvm.vm.blockChain.Genesis().Root()) if err != nil { t.Fatal(err) } @@ -2393,10 +2521,10 @@ func TestFeeManagerChangeFee(t *testing.T) { t.Fatalf("Expected fee manager list status to be set to no role: %s, but found: %s", allowlist.NoRole, role) } // Contract is initialized but no preconfig is given, reader should return genesis fee config - feeConfig, lastChangedAt, err := vm.blockChain.GetFeeConfigAt(vm.blockChain.Genesis().Header()) + feeConfig, lastChangedAt, err := tvm.vm.blockChain.GetFeeConfigAt(tvm.vm.blockChain.Genesis().Header()) require.NoError(t, err) require.EqualValues(t, feeConfig, testLowFeeConfig) - require.Zero(t, vm.blockChain.CurrentBlock().Number.Cmp(lastChangedAt)) + require.Zero(t, tvm.vm.blockChain.CurrentBlock().Number.Cmp(lastChangedAt)) // set a different fee config now testHighFeeConfig := testLowFeeConfig @@ -2416,17 +2544,17 @@ func TestFeeManagerChangeFee(t *testing.T) { Data: data, }) - signedTx, err := types.SignTx(tx, types.LatestSigner(genesis.Config), testKeys[0]) + signedTx, err := types.SignTx(tx, types.LatestSigner(genesis.Config), testKeys[0].ToECDSA()) if err != nil { t.Fatal(err) } - errs := vm.txPool.AddRemotesSync([]*types.Transaction{signedTx}) + errs := tvm.vm.txPool.AddRemotesSync([]*types.Transaction{signedTx}) if err := errs[0]; err != nil { t.Fatalf("Failed to add tx at index: %s", err) } - blk := issueAndAccept(t, vm) + blk := issueAndAccept(t, tvm.vm) newHead := <-newTxPoolHeadChan if newHead.Head.Hash() != common.Hash(blk.ID()) { t.Fatalf("Expected new block to match") @@ -2434,10 +2562,10 @@ func TestFeeManagerChangeFee(t *testing.T) { block := blk.(*chain.BlockWrapper).Block.(*Block).ethBlock - feeConfig, lastChangedAt, err = vm.blockChain.GetFeeConfigAt(block.Header()) + feeConfig, lastChangedAt, err = tvm.vm.blockChain.GetFeeConfigAt(block.Header()) require.NoError(t, err) require.EqualValues(t, testHighFeeConfig, feeConfig) - require.EqualValues(t, vm.blockChain.CurrentBlock().Number, lastChangedAt) + require.EqualValues(t, tvm.vm.blockChain.CurrentBlock().Number, lastChangedAt) // should fail, with same params since fee is higher now tx2 := types.NewTx(&types.DynamicFeeTx{ @@ -2451,12 +2579,12 @@ func TestFeeManagerChangeFee(t *testing.T) { Data: data, }) - signedTx2, err := types.SignTx(tx2, types.LatestSigner(genesis.Config), testKeys[0]) + signedTx2, err := types.SignTx(tx2, types.LatestSigner(genesis.Config), testKeys[0].ToECDSA()) if err != nil { t.Fatal(err) } - err = vm.txPool.AddRemotesSync([]*types.Transaction{signedTx2})[0] + err = tvm.vm.txPool.AddRemotesSync([]*types.Transaction{signedTx2})[0] require.ErrorIs(t, err, txpool.ErrUnderpriced) } @@ -2471,36 +2599,38 @@ func TestAllowFeeRecipientDisabled(t *testing.T) { if err != nil { t.Fatal(err) } - vm, _, _ := GenesisVM(t, true, string(genesisJSON), "", "") + tvm := newVM(t, testVMConfig{ + genesisJSON: string(genesisJSON), + }) - vm.miner.SetEtherbase(common.HexToAddress("0x0123456789")) // set non-blackhole address by force + tvm.vm.miner.SetEtherbase(common.HexToAddress("0x0123456789")) // set non-blackhole address by force defer func() { - if err := vm.Shutdown(context.Background()); err != nil { + if err := tvm.vm.Shutdown(context.Background()); err != nil { t.Fatal(err) } }() newTxPoolHeadChan := make(chan core.NewTxPoolReorgEvent, 1) - vm.txPool.SubscribeNewReorgEvent(newTxPoolHeadChan) + tvm.vm.txPool.SubscribeNewReorgEvent(newTxPoolHeadChan) tx := types.NewTransaction(uint64(0), testEthAddrs[1], new(big.Int).Mul(firstTxAmount, big.NewInt(4)), 21000, big.NewInt(testMinGasPrice*3), nil) - signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm.chainConfig.ChainID), testKeys[0]) + signedTx, err := types.SignTx(tx, types.NewEIP155Signer(tvm.vm.chainConfig.ChainID), testKeys[0].ToECDSA()) if err != nil { t.Fatal(err) } - txErrors := vm.txPool.AddRemotesSync([]*types.Transaction{signedTx}) + txErrors := tvm.vm.txPool.AddRemotesSync([]*types.Transaction{signedTx}) for i, err := range txErrors { if err != nil { t.Fatalf("Failed to add tx at index %d: %s", i, err) } } - msg, err := vm.WaitForEvent(context.Background()) + msg, err := tvm.vm.WaitForEvent(context.Background()) require.NoError(t, err) require.Equal(t, commonEng.PendingTxs, msg) - blk, err := vm.BuildBlock(context.Background()) + blk, err := tvm.vm.BuildBlock(context.Background()) require.NoError(t, err) // this won't return an error since miner will set the etherbase to blackhole address ethBlock := blk.(*chain.BlockWrapper).Block.(*Block).ethBlock @@ -2518,7 +2648,7 @@ func TestAllowFeeRecipientDisabled(t *testing.T) { trie.NewStackTrie(nil), ) - modifiedBlk := vm.newBlock(modifiedBlock) + modifiedBlk := tvm.vm.newBlock(modifiedBlock) require.ErrorIs(t, modifiedBlk.Verify(context.Background()), vmerrors.ErrInvalidCoinbase) } @@ -2542,31 +2672,34 @@ func TestAllowFeeRecipientEnabled(t *testing.T) { if err != nil { t.Fatal(err) } - vm, _, _ := GenesisVM(t, true, string(genesisJSON), string(configJSON), "") + tvm := newVM(t, testVMConfig{ + genesisJSON: string(genesisJSON), + configJSON: string(configJSON), + }) defer func() { - if err := vm.Shutdown(context.Background()); err != nil { + if err := tvm.vm.Shutdown(context.Background()); err != nil { t.Fatal(err) } }() newTxPoolHeadChan := make(chan core.NewTxPoolReorgEvent, 1) - vm.txPool.SubscribeNewReorgEvent(newTxPoolHeadChan) + tvm.vm.txPool.SubscribeNewReorgEvent(newTxPoolHeadChan) tx := types.NewTransaction(uint64(0), testEthAddrs[1], new(big.Int).Mul(firstTxAmount, big.NewInt(4)), 21000, big.NewInt(testMinGasPrice*3), nil) - signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm.chainConfig.ChainID), testKeys[0]) + signedTx, err := types.SignTx(tx, types.NewEIP155Signer(tvm.vm.chainConfig.ChainID), testKeys[0].ToECDSA()) if err != nil { t.Fatal(err) } - txErrors := vm.txPool.AddRemotesSync([]*types.Transaction{signedTx}) + txErrors := tvm.vm.txPool.AddRemotesSync([]*types.Transaction{signedTx}) for i, err := range txErrors { if err != nil { t.Fatalf("Failed to add tx at index %d: %s", i, err) } } - blk := issueAndAccept(t, vm) + blk := issueAndAccept(t, tvm.vm) newHead := <-newTxPoolHeadChan if newHead.Head.Hash() != common.Hash(blk.ID()) { t.Fatalf("Expected new block to match") @@ -2574,7 +2707,7 @@ func TestAllowFeeRecipientEnabled(t *testing.T) { ethBlock := blk.(*chain.BlockWrapper).Block.(*Block).ethBlock require.Equal(t, etherBase, ethBlock.Coinbase()) // Verify that etherBase has received fees - blkState, err := vm.blockChain.StateAt(ethBlock.Root()) + blkState, err := tvm.vm.blockChain.StateAt(ethBlock.Root()) if err != nil { t.Fatal(err) } @@ -2619,15 +2752,19 @@ func TestRewardManagerPrecompileSetRewardAddress(t *testing.T) { } `, disableTime.Unix()) - vm, _, _ := GenesisVM(t, true, string(genesisJSON), string(configJSON), upgradeConfig) + tvm := newVM(t, testVMConfig{ + genesisJSON: string(genesisJSON), + configJSON: string(configJSON), + upgradeJSON: upgradeConfig, + }) defer func() { - err := vm.Shutdown(context.Background()) + err := tvm.vm.Shutdown(context.Background()) require.NoError(t, err) }() newTxPoolHeadChan := make(chan core.NewTxPoolReorgEvent, 1) - vm.txPool.SubscribeNewReorgEvent(newTxPoolHeadChan) + tvm.vm.txPool.SubscribeNewReorgEvent(newTxPoolHeadChan) testAddr := common.HexToAddress("0x9999991111") data, err := rewardmanager.PackSetRewardAddress(testAddr) @@ -2637,36 +2774,36 @@ func TestRewardManagerPrecompileSetRewardAddress(t *testing.T) { tx := types.NewTransaction(uint64(0), rewardmanager.ContractAddress, big.NewInt(1), gas, big.NewInt(testMinGasPrice), data) - signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm.chainConfig.ChainID), testKeys[0]) + signedTx, err := types.SignTx(tx, types.NewEIP155Signer(tvm.vm.chainConfig.ChainID), testKeys[0].ToECDSA()) require.NoError(t, err) - txErrors := vm.txPool.AddRemotesSync([]*types.Transaction{signedTx}) + txErrors := tvm.vm.txPool.AddRemotesSync([]*types.Transaction{signedTx}) for _, err := range txErrors { require.NoError(t, err) } - blk := issueAndAccept(t, vm) + blk := issueAndAccept(t, tvm.vm) newHead := <-newTxPoolHeadChan require.Equal(t, newHead.Head.Hash(), common.Hash(blk.ID())) ethBlock := blk.(*chain.BlockWrapper).Block.(*Block).ethBlock require.Equal(t, etherBase, ethBlock.Coinbase()) // reward address is activated at this block so this is fine tx1 := types.NewTransaction(uint64(0), testEthAddrs[0], big.NewInt(2), 21000, big.NewInt(testMinGasPrice*3), nil) - signedTx1, err := types.SignTx(tx1, types.NewEIP155Signer(vm.chainConfig.ChainID), testKeys[1]) + signedTx1, err := types.SignTx(tx1, types.NewEIP155Signer(tvm.vm.chainConfig.ChainID), testKeys[1].ToECDSA()) require.NoError(t, err) - txErrors = vm.txPool.AddRemotesSync([]*types.Transaction{signedTx1}) + txErrors = tvm.vm.txPool.AddRemotesSync([]*types.Transaction{signedTx1}) for _, err := range txErrors { require.NoError(t, err) } - blk = issueAndAccept(t, vm) + blk = issueAndAccept(t, tvm.vm) newHead = <-newTxPoolHeadChan require.Equal(t, newHead.Head.Hash(), common.Hash(blk.ID())) ethBlock = blk.(*chain.BlockWrapper).Block.(*Block).ethBlock require.Equal(t, testAddr, ethBlock.Coinbase()) // reward address was activated at previous block // Verify that etherBase has received fees - blkState, err := vm.blockChain.StateAt(ethBlock.Root()) + blkState, err := tvm.vm.blockChain.StateAt(ethBlock.Root()) require.NoError(t, err) balance := blkState.GetBalance(testAddr) @@ -2677,17 +2814,17 @@ func TestRewardManagerPrecompileSetRewardAddress(t *testing.T) { previousBalance := blkState.GetBalance(etherBase) // issue a new block to trigger the upgrade - vm.clock.Set(disableTime) // upgrade takes effect after a block is issued, so we can set vm's clock here. + tvm.vm.clock.Set(disableTime) // upgrade takes effect after a block is issued, so we can set vm's clock here. tx2 := types.NewTransaction(uint64(1), testEthAddrs[0], big.NewInt(2), 21000, big.NewInt(testMinGasPrice), nil) - signedTx2, err := types.SignTx(tx2, types.NewEIP155Signer(vm.chainConfig.ChainID), testKeys[1]) + signedTx2, err := types.SignTx(tx2, types.NewEIP155Signer(tvm.vm.chainConfig.ChainID), testKeys[1].ToECDSA()) require.NoError(t, err) - txErrors = vm.txPool.AddRemotesSync([]*types.Transaction{signedTx2}) + txErrors = tvm.vm.txPool.AddRemotesSync([]*types.Transaction{signedTx2}) for _, err := range txErrors { require.NoError(t, err) } - blk = issueAndAccept(t, vm) + blk = issueAndAccept(t, tvm.vm) newHead = <-newTxPoolHeadChan require.Equal(t, newHead.Head.Hash(), common.Hash(blk.ID())) ethBlock = blk.(*chain.BlockWrapper).Block.(*Block).ethBlock @@ -2697,18 +2834,18 @@ func TestRewardManagerPrecompileSetRewardAddress(t *testing.T) { require.Equal(t, testAddr, ethBlock.Coinbase()) require.GreaterOrEqual(t, int64(ethBlock.Time()), disableTime.Unix()) - vm.clock.Set(vm.clock.Time().Add(3 * time.Hour)) // let time pass to decrease gas price + tvm.vm.clock.Set(tvm.vm.clock.Time().Add(3 * time.Hour)) // let time pass to decrease gas price // issue another block to verify that the reward manager is disabled tx2 = types.NewTransaction(uint64(2), testEthAddrs[0], big.NewInt(2), 21000, big.NewInt(testMinGasPrice), nil) - signedTx2, err = types.SignTx(tx2, types.NewEIP155Signer(vm.chainConfig.ChainID), testKeys[1]) + signedTx2, err = types.SignTx(tx2, types.NewEIP155Signer(tvm.vm.chainConfig.ChainID), testKeys[1].ToECDSA()) require.NoError(t, err) - txErrors = vm.txPool.AddRemotesSync([]*types.Transaction{signedTx2}) + txErrors = tvm.vm.txPool.AddRemotesSync([]*types.Transaction{signedTx2}) for _, err := range txErrors { require.NoError(t, err) } - blk = issueAndAccept(t, vm) + blk = issueAndAccept(t, tvm.vm) newHead = <-newTxPoolHeadChan require.Equal(t, newHead.Head.Hash(), common.Hash(blk.ID())) ethBlock = blk.(*chain.BlockWrapper).Block.(*Block).ethBlock @@ -2718,7 +2855,7 @@ func TestRewardManagerPrecompileSetRewardAddress(t *testing.T) { require.GreaterOrEqual(t, int64(ethBlock.Time()), disableTime.Unix()) // Verify that Blackhole has received fees - blkState, err = vm.blockChain.StateAt(ethBlock.Root()) + blkState, err = tvm.vm.blockChain.StateAt(ethBlock.Root()) require.NoError(t, err) balance = blkState.GetBalance(etherBase) @@ -2759,14 +2896,18 @@ func TestRewardManagerPrecompileAllowFeeRecipients(t *testing.T) { ] } `, disableTime.Unix()) - vm, _, _ := GenesisVM(t, true, string(genesisJSON), string(configJSON), upgradeConfig) + tvm := newVM(t, testVMConfig{ + genesisJSON: string(genesisJSON), + configJSON: string(configJSON), + upgradeJSON: upgradeConfig, + }) defer func() { - require.NoError(t, vm.Shutdown(context.Background())) + require.NoError(t, tvm.vm.Shutdown(context.Background())) }() newTxPoolHeadChan := make(chan core.NewTxPoolReorgEvent, 1) - vm.txPool.SubscribeNewReorgEvent(newTxPoolHeadChan) + tvm.vm.txPool.SubscribeNewReorgEvent(newTxPoolHeadChan) data, err := rewardmanager.PackAllowFeeRecipients() require.NoError(t, err) @@ -2775,36 +2916,36 @@ func TestRewardManagerPrecompileAllowFeeRecipients(t *testing.T) { tx := types.NewTransaction(uint64(0), rewardmanager.ContractAddress, big.NewInt(1), gas, big.NewInt(testMinGasPrice), data) - signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm.chainConfig.ChainID), testKeys[0]) + signedTx, err := types.SignTx(tx, types.NewEIP155Signer(tvm.vm.chainConfig.ChainID), testKeys[0].ToECDSA()) require.NoError(t, err) - txErrors := vm.txPool.AddRemotesSync([]*types.Transaction{signedTx}) + txErrors := tvm.vm.txPool.AddRemotesSync([]*types.Transaction{signedTx}) for _, err := range txErrors { require.NoError(t, err) } - blk := issueAndAccept(t, vm) + blk := issueAndAccept(t, tvm.vm) newHead := <-newTxPoolHeadChan require.Equal(t, newHead.Head.Hash(), common.Hash(blk.ID())) ethBlock := blk.(*chain.BlockWrapper).Block.(*Block).ethBlock require.Equal(t, constants.BlackholeAddr, ethBlock.Coinbase()) // reward address is activated at this block so this is fine tx1 := types.NewTransaction(uint64(0), testEthAddrs[0], big.NewInt(2), 21000, big.NewInt(testMinGasPrice*3), nil) - signedTx1, err := types.SignTx(tx1, types.NewEIP155Signer(vm.chainConfig.ChainID), testKeys[1]) + signedTx1, err := types.SignTx(tx1, types.NewEIP155Signer(tvm.vm.chainConfig.ChainID), testKeys[1].ToECDSA()) require.NoError(t, err) - txErrors = vm.txPool.AddRemotesSync([]*types.Transaction{signedTx1}) + txErrors = tvm.vm.txPool.AddRemotesSync([]*types.Transaction{signedTx1}) for _, err := range txErrors { require.NoError(t, err) } - blk = issueAndAccept(t, vm) + blk = issueAndAccept(t, tvm.vm) newHead = <-newTxPoolHeadChan require.Equal(t, newHead.Head.Hash(), common.Hash(blk.ID())) ethBlock = blk.(*chain.BlockWrapper).Block.(*Block).ethBlock require.Equal(t, etherBase, ethBlock.Coinbase()) // reward address was activated at previous block // Verify that etherBase has received fees - blkState, err := vm.blockChain.StateAt(ethBlock.Root()) + blkState, err := tvm.vm.blockChain.StateAt(ethBlock.Root()) require.NoError(t, err) balance := blkState.GetBalance(etherBase) @@ -2814,34 +2955,34 @@ func TestRewardManagerPrecompileAllowFeeRecipients(t *testing.T) { // This should revert back to burning fees previousBalance := blkState.GetBalance(constants.BlackholeAddr) - vm.clock.Set(disableTime) // upgrade takes effect after a block is issued, so we can set vm's clock here. + tvm.vm.clock.Set(disableTime) // upgrade takes effect after a block is issued, so we can set vm's clock here. tx2 := types.NewTransaction(uint64(1), testEthAddrs[0], big.NewInt(2), 21000, big.NewInt(testMinGasPrice), nil) - signedTx2, err := types.SignTx(tx2, types.NewEIP155Signer(vm.chainConfig.ChainID), testKeys[1]) + signedTx2, err := types.SignTx(tx2, types.NewEIP155Signer(tvm.vm.chainConfig.ChainID), testKeys[1].ToECDSA()) require.NoError(t, err) - txErrors = vm.txPool.AddRemotesSync([]*types.Transaction{signedTx2}) + txErrors = tvm.vm.txPool.AddRemotesSync([]*types.Transaction{signedTx2}) for _, err := range txErrors { require.NoError(t, err) } - blk = issueAndAccept(t, vm) + blk = issueAndAccept(t, tvm.vm) newHead = <-newTxPoolHeadChan require.Equal(t, newHead.Head.Hash(), common.Hash(blk.ID())) ethBlock = blk.(*chain.BlockWrapper).Block.(*Block).ethBlock require.Equal(t, etherBase, ethBlock.Coinbase()) // reward address was activated at previous block require.GreaterOrEqual(t, int64(ethBlock.Time()), disableTime.Unix()) - vm.clock.Set(vm.clock.Time().Add(3 * time.Hour)) // let time pass so that gas price is reduced + tvm.vm.clock.Set(tvm.vm.clock.Time().Add(3 * time.Hour)) // let time pass so that gas price is reduced tx2 = types.NewTransaction(uint64(2), testEthAddrs[0], big.NewInt(2), 21000, big.NewInt(testMinGasPrice), nil) - signedTx2, err = types.SignTx(tx2, types.NewEIP155Signer(vm.chainConfig.ChainID), testKeys[1]) + signedTx2, err = types.SignTx(tx2, types.NewEIP155Signer(tvm.vm.chainConfig.ChainID), testKeys[1].ToECDSA()) require.NoError(t, err) - txErrors = vm.txPool.AddRemotesSync([]*types.Transaction{signedTx2}) + txErrors = tvm.vm.txPool.AddRemotesSync([]*types.Transaction{signedTx2}) for _, err := range txErrors { require.NoError(t, err) } - blk = issueAndAccept(t, vm) + blk = issueAndAccept(t, tvm.vm) newHead = <-newTxPoolHeadChan require.Equal(t, newHead.Head.Hash(), common.Hash(blk.ID())) ethBlock = blk.(*chain.BlockWrapper).Block.(*Block).ethBlock @@ -2849,7 +2990,7 @@ func TestRewardManagerPrecompileAllowFeeRecipients(t *testing.T) { require.Greater(t, int64(ethBlock.Time()), disableTime.Unix()) // Verify that Blackhole has received fees - blkState, err = vm.blockChain.StateAt(ethBlock.Root()) + blkState, err = tvm.vm.blockChain.StateAt(ethBlock.Root()) require.NoError(t, err) balance = blkState.GetBalance(constants.BlackholeAddr) @@ -2862,32 +3003,35 @@ func TestSkipChainConfigCheckCompatible(t *testing.T) { // TODO update this test when there is a future network upgrade that can be skipped in the config. t.Skip("no skippable upgrades") - vm, dbManager, appSender := GenesisVM(t, true, genesisJSONPreSubnetEVM, `{"pruning-enabled":true}`, "") + tvm := newVM(t, testVMConfig{ + genesisJSON: genesisJSONPreSubnetEVM, + configJSON: `{"pruning-enabled":true}`, + }) defer func() { - if err := vm.Shutdown(context.Background()); err != nil { + if err := tvm.vm.Shutdown(context.Background()); err != nil { t.Fatal(err) } }() newTxPoolHeadChan := make(chan core.NewTxPoolReorgEvent, 1) - vm.txPool.SubscribeNewReorgEvent(newTxPoolHeadChan) + tvm.vm.txPool.SubscribeNewReorgEvent(newTxPoolHeadChan) key := testutils.NewKey(t) tx := types.NewTransaction(uint64(0), key.Address, firstTxAmount, 21000, big.NewInt(testMinGasPrice), nil) - signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm.chainConfig.ChainID), testKeys[0]) + signedTx, err := types.SignTx(tx, types.NewEIP155Signer(tvm.vm.chainConfig.ChainID), testKeys[0].ToECDSA()) if err != nil { t.Fatal(err) } - errs := vm.txPool.AddRemotesSync([]*types.Transaction{signedTx}) + errs := tvm.vm.txPool.AddRemotesSync([]*types.Transaction{signedTx}) for i, err := range errs { if err != nil { t.Fatalf("Failed to add tx at index %d: %s", i, err) } } - blk := issueAndAccept(t, vm) + blk := issueAndAccept(t, tvm.vm) newHead := <-newTxPoolHeadChan if newHead.Head.Hash() != common.Hash(blk.ID()) { t.Fatalf("Expected new block to match") @@ -2897,24 +3041,24 @@ func TestSkipChainConfigCheckCompatible(t *testing.T) { // use the block's timestamp instead of 0 since rewind to genesis // is hardcoded to be allowed in core/genesis.go. genesisWithUpgrade := &core.Genesis{} - require.NoError(t, json.Unmarshal([]byte(genesisJSONDurango), genesisWithUpgrade)) + require.NoError(t, json.Unmarshal([]byte(toGenesisJSON(forkToChainConfig[upgradetest.Durango])), genesisWithUpgrade)) params.GetExtra(genesisWithUpgrade.Config).EtnaTimestamp = utils.TimeToNewUint64(blk.Timestamp()) genesisWithUpgradeBytes, err := json.Marshal(genesisWithUpgrade) require.NoError(t, err) // Reset metrics to allow re-initialization - vm.ctx.Metrics = metrics.NewPrefixGatherer() + tvm.vm.ctx.Metrics = metrics.NewPrefixGatherer() // this will not be allowed - err = reinitVM.Initialize(context.Background(), vm.ctx, dbManager, genesisWithUpgradeBytes, []byte{}, []byte{}, []*commonEng.Fx{}, appSender) + err = reinitVM.Initialize(context.Background(), tvm.vm.ctx, tvm.db, genesisWithUpgradeBytes, []byte{}, []byte{}, []*commonEng.Fx{}, tvm.appSender) require.ErrorContains(t, err, "mismatching Cancun fork timestamp in database") // Reset metrics to allow re-initialization - vm.ctx.Metrics = metrics.NewPrefixGatherer() + tvm.vm.ctx.Metrics = metrics.NewPrefixGatherer() // try again with skip-upgrade-check config := []byte(`{"skip-upgrade-check": true}`) - err = reinitVM.Initialize(context.Background(), vm.ctx, dbManager, genesisWithUpgradeBytes, []byte{}, config, []*commonEng.Fx{}, appSender) + err = reinitVM.Initialize(context.Background(), tvm.vm.ctx, tvm.db, genesisWithUpgradeBytes, []byte{}, config, []*commonEng.Fx{}, tvm.appSender) require.NoError(t, err) require.NoError(t, reinitVM.Shutdown(context.Background())) } @@ -2929,39 +3073,39 @@ func TestParentBeaconRootBlock(t *testing.T) { }{ { name: "non-empty parent beacon root in Durango", - genesisJSON: genesisJSONDurango, + genesisJSON: toGenesisJSON(forkToChainConfig[upgradetest.Durango]), beaconRoot: &common.Hash{0x01}, expectedError: true, // err string wont work because it will also fail with blob gas is non-empty (zeroed) }, { name: "empty parent beacon root in Durango", - genesisJSON: genesisJSONDurango, + genesisJSON: toGenesisJSON(forkToChainConfig[upgradetest.Durango]), beaconRoot: &common.Hash{}, expectedError: true, }, { name: "nil parent beacon root in Durango", - genesisJSON: genesisJSONDurango, + genesisJSON: toGenesisJSON(forkToChainConfig[upgradetest.Durango]), beaconRoot: nil, expectedError: false, }, { name: "non-empty parent beacon root in E-Upgrade (Cancun)", - genesisJSON: genesisJSONEtna, + genesisJSON: toGenesisJSON(forkToChainConfig[upgradetest.Etna]), beaconRoot: &common.Hash{0x01}, expectedError: true, errString: "expected empty hash", }, { name: "empty parent beacon root in E-Upgrade (Cancun)", - genesisJSON: genesisJSONEtna, + genesisJSON: toGenesisJSON(forkToChainConfig[upgradetest.Etna]), beaconRoot: &common.Hash{}, expectedError: false, }, { name: "nil parent beacon root in E-Upgrade (Cancun)", - genesisJSON: genesisJSONEtna, + genesisJSON: toGenesisJSON(forkToChainConfig[upgradetest.Etna]), beaconRoot: nil, expectedError: true, errString: "header is missing parentBeaconRoot", @@ -2970,32 +3114,34 @@ func TestParentBeaconRootBlock(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - vm, _, _ := GenesisVM(t, true, test.genesisJSON, "", "") + tvm := newVM(t, testVMConfig{ + genesisJSON: test.genesisJSON, + }) defer func() { - if err := vm.Shutdown(context.Background()); err != nil { + if err := tvm.vm.Shutdown(context.Background()); err != nil { t.Fatal(err) } }() tx := types.NewTransaction(uint64(0), testEthAddrs[1], firstTxAmount, 21000, big.NewInt(testMinGasPrice), nil) - signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm.chainConfig.ChainID), testKeys[0]) + signedTx, err := types.SignTx(tx, types.NewEIP155Signer(tvm.vm.chainConfig.ChainID), testKeys[0].ToECDSA()) if err != nil { t.Fatal(err) } - txErrors := vm.txPool.AddRemotesSync([]*types.Transaction{signedTx}) + txErrors := tvm.vm.txPool.AddRemotesSync([]*types.Transaction{signedTx}) for i, err := range txErrors { if err != nil { t.Fatalf("Failed to add tx at index %d: %s", i, err) } } - msg, err := vm.WaitForEvent(context.Background()) + msg, err := tvm.vm.WaitForEvent(context.Background()) require.NoError(t, err) require.Equal(t, commonEng.PendingTxs, msg) - blk, err := vm.BuildBlock(context.Background()) + blk, err := tvm.vm.BuildBlock(context.Background()) if err != nil { t.Fatalf("Failed to build block with import transaction: %s", err) } @@ -3006,7 +3152,7 @@ func TestParentBeaconRootBlock(t *testing.T) { header.ParentBeaconRoot = test.beaconRoot parentBeaconEthBlock := ethBlock.WithSeal(header) - parentBeaconBlock := vm.newBlock(parentBeaconEthBlock) + parentBeaconBlock := tvm.vm.newBlock(parentBeaconEthBlock) errCheck := func(err error) { if test.expectedError { @@ -3020,7 +3166,7 @@ func TestParentBeaconRootBlock(t *testing.T) { } } - _, err = vm.ParseBlock(context.Background(), parentBeaconBlock.Bytes()) + _, err = tvm.vm.ParseBlock(context.Background(), parentBeaconBlock.Bytes()) errCheck(err) err = parentBeaconBlock.Verify(context.Background()) errCheck(err) @@ -3030,7 +3176,7 @@ func TestParentBeaconRootBlock(t *testing.T) { func TestStandaloneDB(t *testing.T) { vm := &VM{} - ctx := utils.TestSnowContext() + ctx := utilstest.NewTestSnowContext(t) baseDB := memdb.New() atomicMemory := atomic.NewMemory(prefixdb.New([]byte{0}, baseDB)) ctx.SharedMemory = atomicMemory.NewSharedMemory(ctx.ChainID) @@ -3054,7 +3200,7 @@ func TestStandaloneDB(t *testing.T) { context.Background(), ctx, sharedDB, - []byte(genesisJSONLatest), + []byte(toGenesisJSON(forkToChainConfig[upgradetest.Latest])), nil, []byte(configJSON), []*commonEng.Fx{}, @@ -3069,7 +3215,7 @@ func TestStandaloneDB(t *testing.T) { acceptedBlockEvent := make(chan core.ChainEvent, 1) vm.blockChain.SubscribeChainAcceptedEvent(acceptedBlockEvent) tx0 := types.NewTransaction(uint64(0), testEthAddrs[0], big.NewInt(1), 21000, big.NewInt(testMinGasPrice), nil) - signedTx0, err := types.SignTx(tx0, types.NewEIP155Signer(vm.chainConfig.ChainID), testKeys[0]) + signedTx0, err := types.SignTx(tx0, types.NewEIP155Signer(vm.chainConfig.ChainID), testKeys[0].ToECDSA()) require.NoError(t, err) errs := vm.txPool.AddRemotesSync([]*types.Transaction{signedTx0}) require.NoError(t, errs[0]) @@ -3117,7 +3263,9 @@ func TestFeeManagerRegressionMempoolMinFeeAfterRestart(t *testing.T) { if err != nil { t.Fatal(err) } - vm, sharedDB, appSender := GenesisVM(t, true, string(genesisJSON), "", "") + tvm := newVM(t, testVMConfig{ + genesisJSON: string(genesisJSON), + }) // tx pool min base fee should be the high fee config tx := types.NewTx(&types.DynamicFeeTx{ @@ -3130,15 +3278,15 @@ func TestFeeManagerRegressionMempoolMinFeeAfterRestart(t *testing.T) { GasTipCap: common.Big0, Data: nil, }) - signedTx, err := types.SignTx(tx, types.LatestSigner(genesis.Config), testKeys[0]) + signedTx, err := types.SignTx(tx, types.LatestSigner(genesis.Config), testKeys[0].ToECDSA()) require.NoError(t, err) - errs := vm.txPool.AddRemotesSync([]*types.Transaction{signedTx}) + errs := tvm.vm.txPool.AddRemotesSync([]*types.Transaction{signedTx}) require.Len(t, errs, 1) require.ErrorIs(t, errs[0], txpool.ErrUnderpriced) // should fail because mempool expects higher fee // restart vm and try again - restartedVM, err := restartVM(vm, sharedDB, genesisJSON, appSender, true) + restartedVM, err := restartVM(tvm.vm, tvm.db, genesisJSON, tvm.appSender, true) require.NoError(t, err) // it still should fail @@ -3151,7 +3299,7 @@ func TestFeeManagerRegressionMempoolMinFeeAfterRestart(t *testing.T) { restartedVM.txPool.SubscribeNewReorgEvent(newTxPoolHeadChan) restartedVM.clock.Set(utils.Uint64ToTime(precompileActivationTime).Add(time.Second * 10)) tx = types.NewTransaction(uint64(0), testEthAddrs[0], common.Big0, 21000, big.NewInt(testHighFeeConfig.MinBaseFee.Int64()), nil) - signedTx, err = types.SignTx(tx, types.LatestSigner(genesis.Config), testKeys[0]) + signedTx, err = types.SignTx(tx, types.LatestSigner(genesis.Config), testKeys[0].ToECDSA()) require.NoError(t, err) errs = restartedVM.txPool.AddRemotesSync([]*types.Transaction{signedTx}) require.NoError(t, errs[0]) @@ -3159,10 +3307,13 @@ func TestFeeManagerRegressionMempoolMinFeeAfterRestart(t *testing.T) { newHead := <-newTxPoolHeadChan require.Equal(t, newHead.Head.Hash(), common.Hash(blk.ID())) // Contract is initialized but no preconfig is given, reader should return genesis fee config - feeConfig, lastChangedAt, err := vm.blockChain.GetFeeConfigAt(vm.blockChain.Genesis().Header()) + // We must query the current block header here (not genesis) because the FeeManager precompile + // is only activated at precompileActivationTime, not at genesis. Querying the genesis header would + // return the chain config fee config and lastChangedAt as zero, which is not correct after activation. + feeConfig, lastChangedAt, err := restartedVM.blockChain.GetFeeConfigAt(restartedVM.blockChain.CurrentBlock()) require.NoError(t, err) require.EqualValues(t, feeConfig, testHighFeeConfig) - require.Zero(t, vm.blockChain.CurrentBlock().Number.Cmp(lastChangedAt)) + require.EqualValues(t, restartedVM.blockChain.CurrentBlock().Number, lastChangedAt) // set a lower fee config now through feemanager testLowFeeConfig := testHighFeeConfig @@ -3180,7 +3331,7 @@ func TestFeeManagerRegressionMempoolMinFeeAfterRestart(t *testing.T) { }) // let some time pass for block gas cost restartedVM.clock.Set(restartedVM.clock.Time().Add(time.Second * 10)) - signedTx, err = types.SignTx(tx, types.LatestSigner(genesis.Config), testKeys[0]) + signedTx, err = types.SignTx(tx, types.LatestSigner(genesis.Config), testKeys[0].ToECDSA()) require.NoError(t, err) errs = restartedVM.txPool.AddRemotesSync([]*types.Transaction{signedTx}) require.NoError(t, errs[0]) @@ -3197,7 +3348,7 @@ func TestFeeManagerRegressionMempoolMinFeeAfterRestart(t *testing.T) { // send another tx with low fee tx = types.NewTransaction(uint64(2), testEthAddrs[0], common.Big0, 21000, big.NewInt(testLowFeeConfig.MinBaseFee.Int64()), nil) - signedTx, err = types.SignTx(tx, types.LatestSigner(genesis.Config), testKeys[0]) + signedTx, err = types.SignTx(tx, types.LatestSigner(genesis.Config), testKeys[0].ToECDSA()) require.NoError(t, err) errs = restartedVM.txPool.AddRemotesSync([]*types.Transaction{signedTx}) require.NoError(t, errs[0]) @@ -3208,13 +3359,13 @@ func TestFeeManagerRegressionMempoolMinFeeAfterRestart(t *testing.T) { require.Equal(t, newHead.Head.Hash(), common.Hash(blk.ID())) // Regression: Mempool should see the new config after restart - restartedVM, err = restartVM(restartedVM, sharedDB, genesisJSON, appSender, true) + restartedVM, err = restartVM(restartedVM, tvm.db, genesisJSON, tvm.appSender, true) require.NoError(t, err) newTxPoolHeadChan = make(chan core.NewTxPoolReorgEvent, 1) restartedVM.txPool.SubscribeNewReorgEvent(newTxPoolHeadChan) // send a tx with low fee tx = types.NewTransaction(uint64(3), testEthAddrs[0], common.Big0, 21000, big.NewInt(testLowFeeConfig.MinBaseFee.Int64()), nil) - signedTx, err = types.SignTx(tx, types.LatestSigner(genesis.Config), testKeys[0]) + signedTx, err = types.SignTx(tx, types.LatestSigner(genesis.Config), testKeys[0].ToECDSA()) require.NoError(t, err) errs = restartedVM.txPool.AddRemotesSync([]*types.Transaction{signedTx}) require.NoError(t, errs[0]) @@ -3284,7 +3435,7 @@ func TestWaitForEvent(t *testing.T) { }() tx := types.NewTransaction(uint64(0), testEthAddrs[1], firstTxAmount, 21000, big.NewInt(testMinGasPrice), nil) - signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm.chainConfig.ChainID), testKeys[0]) + signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm.chainConfig.ChainID), testKeys[0].ToECDSA()) require.NoError(t, err) for _, err := range vm.txPool.AddRemotesSync([]*types.Transaction{signedTx}) { @@ -3298,7 +3449,7 @@ func TestWaitForEvent(t *testing.T) { name: "WaitForEvent doesn't return once a block is built and accepted", testCase: func(t *testing.T, vm *VM) { tx := types.NewTransaction(uint64(0), testEthAddrs[1], firstTxAmount, 21000, big.NewInt(testMinGasPrice), nil) - signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm.chainConfig.ChainID), testKeys[0]) + signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm.chainConfig.ChainID), testKeys[0].ToECDSA()) require.NoError(t, err) for _, err := range vm.txPool.AddRemotesSync([]*types.Transaction{signedTx}) { @@ -3335,7 +3486,7 @@ func TestWaitForEvent(t *testing.T) { time.Sleep(time.Second * 2) // sleep some time to let the gas capacity to refill tx = types.NewTransaction(uint64(1), testEthAddrs[1], firstTxAmount, 21000, big.NewInt(testMinGasPrice), nil) - signedTx, err = types.SignTx(tx, types.NewEIP155Signer(vm.chainConfig.ChainID), testKeys[0]) + signedTx, err = types.SignTx(tx, types.NewEIP155Signer(vm.chainConfig.ChainID), testKeys[0].ToECDSA()) require.NoError(t, err) for _, err := range vm.txPool.AddRemotesSync([]*types.Transaction{signedTx}) { @@ -3368,7 +3519,7 @@ func TestWaitForEvent(t *testing.T) { name: "WaitForEvent waits some time after a block is built", testCase: func(t *testing.T, vm *VM) { tx := types.NewTransaction(uint64(0), testEthAddrs[1], firstTxAmount, 21000, big.NewInt(testMinGasPrice), nil) - signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm.chainConfig.ChainID), testKeys[0]) + signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm.chainConfig.ChainID), testKeys[0].ToECDSA()) require.NoError(t, err) for _, err := range vm.txPool.AddRemotesSync([]*types.Transaction{signedTx}) { @@ -3387,7 +3538,7 @@ func TestWaitForEvent(t *testing.T) { require.NoError(t, blk.Accept(context.Background())) tx = types.NewTransaction(uint64(1), testEthAddrs[1], firstTxAmount, 21000, big.NewInt(testMinGasPrice), nil) - signedTx, err = types.SignTx(tx, types.NewEIP155Signer(vm.chainConfig.ChainID), testKeys[0]) + signedTx, err = types.SignTx(tx, types.NewEIP155Signer(vm.chainConfig.ChainID), testKeys[0].ToECDSA()) require.NoError(t, err) for _, err := range vm.txPool.AddRemotesSync([]*types.Transaction{signedTx}) { @@ -3414,9 +3565,12 @@ func TestWaitForEvent(t *testing.T) { require.NoError(t, genesis.UnmarshalJSON([]byte(genesisJSONSubnetEVM))) genesisJSON, err := genesis.MarshalJSON() require.NoError(t, err) - vm, _, _ := GenesisVM(t, true, string(genesisJSON), "", "") - testCase.testCase(t, vm) - vm.Shutdown(context.Background()) + tvm := newVM(t, testVMConfig{ + genesisJSON: string(genesisJSON), + }).vm + + testCase.testCase(t, tvm) + tvm.Shutdown(context.Background()) }) } } diff --git a/plugin/evm/vm_upgrade_bytes_test.go b/plugin/evm/vm_upgrade_bytes_test.go index 8900b40e9d..ab19603b16 100644 --- a/plugin/evm/vm_upgrade_bytes_test.go +++ b/plugin/evm/vm_upgrade_bytes_test.go @@ -17,6 +17,7 @@ import ( commonEng "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/engine/enginetest" "github.com/ava-labs/avalanchego/upgrade" + "github.com/ava-labs/avalanchego/upgrade/upgradetest" "github.com/ava-labs/avalanchego/vms/components/chain" "github.com/ava-labs/libevm/common" "github.com/ava-labs/libevm/common/hexutil" @@ -51,37 +52,41 @@ func TestVMUpgradeBytesPrecompile(t *testing.T) { } // initialize the VM with these upgrade bytes - vm, dbManager, appSender := GenesisVM(t, true, genesisJSONSubnetEVM, "", string(upgradeBytesJSON)) - vm.clock.Set(enableAllowListTimestamp) + tvm := newVM(t, testVMConfig{ + genesisJSON: genesisJSONSubnetEVM, + upgradeJSON: string(upgradeBytesJSON), + }) + + tvm.vm.clock.Set(enableAllowListTimestamp) // Submit a successful transaction tx0 := types.NewTransaction(uint64(0), testEthAddrs[0], big.NewInt(1), 21000, big.NewInt(testMinGasPrice), nil) - signedTx0, err := types.SignTx(tx0, types.NewEIP155Signer(vm.chainConfig.ChainID), testKeys[0]) + signedTx0, err := types.SignTx(tx0, types.NewEIP155Signer(tvm.vm.chainConfig.ChainID), testKeys[0].ToECDSA()) assert.NoError(t, err) - errs := vm.txPool.AddRemotesSync([]*types.Transaction{signedTx0}) + errs := tvm.vm.txPool.AddRemotesSync([]*types.Transaction{signedTx0}) if err := errs[0]; err != nil { t.Fatalf("Failed to add tx at index: %s", err) } // Submit a rejected transaction, should throw an error tx1 := types.NewTransaction(uint64(0), testEthAddrs[1], big.NewInt(2), 21000, big.NewInt(testMinGasPrice), nil) - signedTx1, err := types.SignTx(tx1, types.NewEIP155Signer(vm.chainConfig.ChainID), testKeys[1]) + signedTx1, err := types.SignTx(tx1, types.NewEIP155Signer(tvm.vm.chainConfig.ChainID), testKeys[1].ToECDSA()) if err != nil { t.Fatal(err) } - errs = vm.txPool.AddRemotesSync([]*types.Transaction{signedTx1}) + errs = tvm.vm.txPool.AddRemotesSync([]*types.Transaction{signedTx1}) if err := errs[0]; !errors.Is(err, vmerrors.ErrSenderAddressNotAllowListed) { t.Fatalf("expected ErrSenderAddressNotAllowListed, got: %s", err) } // shutdown the vm - if err := vm.Shutdown(context.Background()); err != nil { + if err := tvm.vm.Shutdown(context.Background()); err != nil { t.Fatal(err) } // prepare the new upgrade bytes to disable the TxAllowList - disableAllowListTimestamp := vm.clock.Time().Add(10 * time.Hour) // arbitrary choice + disableAllowListTimestamp := tvm.vm.clock.Time().Add(10 * time.Hour) // arbitrary choice upgradeConfig.PrecompileUpgrades = append( upgradeConfig.PrecompileUpgrades, extras.PrecompileUpgrade{ @@ -96,43 +101,43 @@ func TestVMUpgradeBytesPrecompile(t *testing.T) { // restart the vm // Reset metrics to allow re-initialization - vm.ctx.Metrics = metrics.NewPrefixGatherer() + tvm.vm.ctx.Metrics = metrics.NewPrefixGatherer() - if err := vm.Initialize( - context.Background(), vm.ctx, dbManager, []byte(genesisJSONSubnetEVM), upgradeBytesJSON, []byte{}, []*commonEng.Fx{}, appSender, + if err := tvm.vm.Initialize( + context.Background(), tvm.vm.ctx, tvm.db, []byte(genesisJSONSubnetEVM), upgradeBytesJSON, []byte{}, []*commonEng.Fx{}, tvm.appSender, ); err != nil { t.Fatal(err) } defer func() { - if err := vm.Shutdown(context.Background()); err != nil { + if err := tvm.vm.Shutdown(context.Background()); err != nil { t.Fatal(err) } }() // Set the VM's state to NormalOp to initialize the tx pool. - if err := vm.SetState(context.Background(), snow.Bootstrapping); err != nil { + if err := tvm.vm.SetState(context.Background(), snow.Bootstrapping); err != nil { t.Fatal(err) } - if err := vm.SetState(context.Background(), snow.NormalOp); err != nil { + if err := tvm.vm.SetState(context.Background(), snow.NormalOp); err != nil { t.Fatal(err) } newTxPoolHeadChan := make(chan core.NewTxPoolReorgEvent, 1) - vm.txPool.SubscribeNewReorgEvent(newTxPoolHeadChan) - vm.clock.Set(disableAllowListTimestamp) + tvm.vm.txPool.SubscribeNewReorgEvent(newTxPoolHeadChan) + tvm.vm.clock.Set(disableAllowListTimestamp) // Make a block, previous rules still apply (TxAllowList is active) // Submit a successful transaction - errs = vm.txPool.AddRemotesSync([]*types.Transaction{signedTx0}) + errs = tvm.vm.txPool.AddRemotesSync([]*types.Transaction{signedTx0}) if err := errs[0]; err != nil { t.Fatalf("Failed to add tx at index: %s", err) } // Submit a rejected transaction, should throw an error - errs = vm.txPool.AddRemotesSync([]*types.Transaction{signedTx1}) + errs = tvm.vm.txPool.AddRemotesSync([]*types.Transaction{signedTx1}) if err := errs[0]; !errors.Is(err, vmerrors.ErrSenderAddressNotAllowListed) { t.Fatalf("expected ErrSenderAddressNotAllowListed, got: %s", err) } - blk := issueAndAccept(t, vm) + blk := issueAndAccept(t, tvm.vm) // Verify that the constructed block only has the whitelisted tx block := blk.(*chain.BlockWrapper).Block.(*Block).ethBlock @@ -148,13 +153,13 @@ func TestVMUpgradeBytesPrecompile(t *testing.T) { <-newTxPoolHeadChan // wait for new head in tx pool // retry the rejected Tx, which should now succeed - errs = vm.txPool.AddRemotesSync([]*types.Transaction{signedTx1}) + errs = tvm.vm.txPool.AddRemotesSync([]*types.Transaction{signedTx1}) if err := errs[0]; err != nil { t.Fatalf("Failed to add tx at index: %s", err) } - vm.clock.Set(vm.clock.Time().Add(2 * time.Second)) // add 2 seconds for gas fee to adjust - blk = issueAndAccept(t, vm) + tvm.vm.clock.Set(tvm.vm.clock.Time().Add(2 * time.Second)) // add 2 seconds for gas fee to adjust + blk = issueAndAccept(t, tvm.vm) // Verify that the constructed block only has the previously rejected tx block = blk.(*chain.BlockWrapper).Block.(*Block).ethBlock @@ -183,7 +188,7 @@ func TestNetworkUpgradesOverriden(t *testing.T) { }` vm := &VM{} - ctx, dbManager, genesisBytes, _ := setupGenesis(t, string(genesisBytes)) + ctx, dbManager, _, _ := setupGenesis(t, upgradetest.Latest) appSender := &enginetest.Sender{T: t} appSender.CantSendAppGossip = true appSender.SendAppGossipF = func(context.Context, commonEng.SendConfig, []byte) error { return nil } @@ -278,32 +283,36 @@ func TestVMStateUpgrade(t *testing.T) { require.Contains(t, upgradeBytesJSON, upgradedCodeStr) // initialize the VM with these upgrade bytes - vm, _, _ := GenesisVM(t, true, genesisStr, "", upgradeBytesJSON) - defer func() { require.NoError(t, vm.Shutdown(context.Background())) }() + tvm := newVM(t, testVMConfig{ + genesisJSON: genesisStr, + upgradeJSON: upgradeBytesJSON, + }) + + defer func() { require.NoError(t, tvm.vm.Shutdown(context.Background())) }() // Verify the new account doesn't exist yet - genesisState, err := vm.blockChain.State() + genesisState, err := tvm.vm.blockChain.State() require.NoError(t, err) require.Equal(t, common.U2560, genesisState.GetBalance(newAccount)) // Advance the chain to the upgrade time - vm.clock.Set(upgradeTimestamp) + tvm.vm.clock.Set(upgradeTimestamp) // Submit a successful (unrelated) transaction, so we can build a block // in this tx, testEthAddrs[1] sends 1 wei to itself. tx0 := types.NewTransaction(uint64(0), testEthAddrs[1], big.NewInt(1), 21000, big.NewInt(testMinGasPrice), nil) - signedTx0, err := types.SignTx(tx0, types.NewEIP155Signer(vm.chainConfig.ChainID), testKeys[1]) + signedTx0, err := types.SignTx(tx0, types.NewEIP155Signer(tvm.vm.chainConfig.ChainID), testKeys[1].ToECDSA()) require.NoError(t, err) - errs := vm.txPool.AddRemotesSync([]*types.Transaction{signedTx0}) + errs := tvm.vm.txPool.AddRemotesSync([]*types.Transaction{signedTx0}) require.NoError(t, errs[0], "Failed to add tx") - blk := issueAndAccept(t, vm) + blk := issueAndAccept(t, tvm.vm) require.NotNil(t, blk) require.EqualValues(t, 1, blk.Height()) // Verify the state upgrade was applied - state, err := vm.blockChain.State() + state, err := tvm.vm.blockChain.State() require.NoError(t, err) // Existing account @@ -337,14 +346,14 @@ func TestVMEupgradeActivatesCancun(t *testing.T) { }{ { name: "Etna activates Cancun", - genesisJSON: genesisJSONEtna, + genesisJSON: toGenesisJSON(forkToChainConfig[upgradetest.Etna]), check: func(t *testing.T, vm *VM) { require.True(t, vm.chainConfig.IsCancun(common.Big0, DefaultEtnaTime)) }, }, { name: "Later Etna activates Cancun", - genesisJSON: genesisJSONDurango, + genesisJSON: toGenesisJSON(forkToChainConfig[upgradetest.Durango]), upgradeJSON: func() string { upgrade := &extras.UpgradeConfig{ NetworkUpgradeOverrides: &extras.NetworkUpgrades{ @@ -362,7 +371,7 @@ func TestVMEupgradeActivatesCancun(t *testing.T) { }, { name: "Changed Etna changes Cancun", - genesisJSON: genesisJSONEtna, + genesisJSON: toGenesisJSON(forkToChainConfig[upgradetest.Etna]), upgradeJSON: func() string { upgrade := &extras.UpgradeConfig{ NetworkUpgradeOverrides: &extras.NetworkUpgrades{ @@ -381,9 +390,13 @@ func TestVMEupgradeActivatesCancun(t *testing.T) { } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - vm, _, _ := GenesisVM(t, true, test.genesisJSON, "", test.upgradeJSON) - defer func() { require.NoError(t, vm.Shutdown(context.Background())) }() - test.check(t, vm) + tvm := newVM(t, testVMConfig{ + genesisJSON: test.genesisJSON, + upgradeJSON: test.upgradeJSON, + }) + + defer func() { require.NoError(t, tvm.vm.Shutdown(context.Background())) }() + test.check(t, tvm.vm) }) } } diff --git a/plugin/evm/vm_validators_test.go b/plugin/evm/vm_validators_test.go index eb8779d18c..93a2345fcd 100644 --- a/plugin/evm/vm_validators_test.go +++ b/plugin/evm/vm_validators_test.go @@ -14,22 +14,19 @@ import ( "github.com/ava-labs/avalanchego/snow/engine/enginetest" avagovalidators "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/snow/validators/validatorstest" - "github.com/ava-labs/subnet-evm/core" + "github.com/ava-labs/avalanchego/upgrade/upgradetest" "github.com/ava-labs/subnet-evm/plugin/evm/validators" - "github.com/ava-labs/subnet-evm/utils" + "github.com/ava-labs/subnet-evm/utils/utilstest" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestValidatorState(t *testing.T) { require := require.New(t) - genesis := &core.Genesis{} - require.NoError(genesis.UnmarshalJSON([]byte(genesisJSONLatest))) - genesisJSON, err := genesis.MarshalJSON() - require.NoError(err) + ctx, dbManager, genesisBytes, _ := setupGenesis(t, upgradetest.Latest) vm := &VM{} - ctx, dbManager, genesisBytes, _ := setupGenesis(t, string(genesisJSON)) + appSender := &enginetest.Sender{T: t} appSender.CantSendAppGossip = true testNodeIDs := []ids.NodeID{ @@ -64,7 +61,7 @@ func TestValidatorState(t *testing.T) { }, } appSender.SendAppGossipF = func(context.Context, commonEng.SendConfig, []byte) error { return nil } - err = vm.Initialize( + err := vm.Initialize( context.Background(), ctx, dbManager, @@ -98,7 +95,7 @@ func TestValidatorState(t *testing.T) { vm = &VM{} err = vm.Initialize( context.Background(), - utils.TestSnowContext(), // this context does not have validators state, making VM to source it from the database + utilstest.NewTestSnowContext(t), // this context does not have validators state, making VM to source it from the database dbManager, genesisBytes, []byte(""), diff --git a/plugin/evm/vm_warp_test.go b/plugin/evm/vm_warp_test.go index 86ae71fe25..5979bae143 100644 --- a/plugin/evm/vm_warp_test.go +++ b/plugin/evm/vm_warp_test.go @@ -19,6 +19,7 @@ import ( "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/snow/validators/validatorstest" "github.com/ava-labs/avalanchego/upgrade" + "github.com/ava-labs/avalanchego/upgrade/upgradetest" avagoUtils "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/bls" @@ -70,20 +71,22 @@ const ( func TestSendWarpMessage(t *testing.T) { require := require.New(t) genesis := &core.Genesis{} - require.NoError(genesis.UnmarshalJSON([]byte(genesisJSONDurango))) + require.NoError(genesis.UnmarshalJSON([]byte(toGenesisJSON(forkToChainConfig[upgradetest.Durango])))) params.GetExtra(genesis.Config).GenesisPrecompiles = extras.Precompiles{ warpcontract.ConfigKey: warpcontract.NewDefaultConfig(utils.TimeToNewUint64(upgrade.InitiallyActiveTime)), } genesisJSON, err := genesis.MarshalJSON() require.NoError(err) - vm, _, _ := GenesisVM(t, true, string(genesisJSON), "", "") + tvm := newVM(t, testVMConfig{ + genesisJSON: string(genesisJSON), + }) defer func() { - require.NoError(vm.Shutdown(context.Background())) + require.NoError(tvm.vm.Shutdown(context.Background())) }() acceptedLogsChan := make(chan []*types.Log, 10) - logsSub := vm.eth.APIBackend.SubscribeAcceptedLogsEvent(acceptedLogsChan) + logsSub := tvm.vm.eth.APIBackend.SubscribeAcceptedLogsEvent(acceptedLogsChan) defer logsSub.Unsubscribe() payloadData := avagoUtils.RandomBytes(100) @@ -96,25 +99,25 @@ func TestSendWarpMessage(t *testing.T) { ) require.NoError(err) expectedUnsignedMessage, err := avalancheWarp.NewUnsignedMessage( - vm.ctx.NetworkID, - vm.ctx.ChainID, + tvm.vm.ctx.NetworkID, + tvm.vm.ctx.ChainID, addressedPayload.Bytes(), ) require.NoError(err) // Submit a transaction to trigger sending a warp message tx0 := types.NewTransaction(uint64(0), warpcontract.ContractAddress, big.NewInt(1), 100_000, big.NewInt(testMinGasPrice), warpSendMessageInput) - signedTx0, err := types.SignTx(tx0, types.LatestSignerForChainID(vm.chainConfig.ChainID), testKeys[0]) + signedTx0, err := types.SignTx(tx0, types.LatestSignerForChainID(tvm.vm.chainConfig.ChainID), testKeys[0].ToECDSA()) require.NoError(err) - errs := vm.txPool.AddRemotesSync([]*types.Transaction{signedTx0}) + errs := tvm.vm.txPool.AddRemotesSync([]*types.Transaction{signedTx0}) require.NoError(errs[0]) - msg, err := vm.WaitForEvent(context.Background()) + msg, err := tvm.vm.WaitForEvent(context.Background()) require.NoError(err) require.Equal(commonEng.PendingTxs, msg) - blk, err := vm.BuildBlock(context.Background()) + blk, err := tvm.vm.BuildBlock(context.Background()) require.NoError(err) require.NoError(blk.Verify(context.Background())) @@ -122,7 +125,7 @@ func TestSendWarpMessage(t *testing.T) { // Verify that the constructed block contains the expected log with an unsigned warp message in the log data ethBlock1 := blk.(*chain.BlockWrapper).Block.(*Block).ethBlock require.Len(ethBlock1.Transactions(), 1) - receipts := rawdb.ReadReceipts(vm.chaindb, ethBlock1.Hash(), ethBlock1.NumberU64(), ethBlock1.Time(), vm.chainConfig) + receipts := rawdb.ReadReceipts(tvm.vm.chaindb, ethBlock1.Hash(), ethBlock1.NumberU64(), ethBlock1.Time(), tvm.vm.chainConfig) require.Len(receipts, 1) require.Len(receipts[0].Logs, 1) @@ -137,17 +140,17 @@ func TestSendWarpMessage(t *testing.T) { require.NoError(err) // Verify the signature cannot be fetched before the block is accepted - _, err = vm.warpBackend.GetMessageSignature(context.TODO(), unsignedMessage) + _, err = tvm.vm.warpBackend.GetMessageSignature(context.TODO(), unsignedMessage) require.Error(err) - _, err = vm.warpBackend.GetBlockSignature(context.TODO(), blk.ID()) + _, err = tvm.vm.warpBackend.GetBlockSignature(context.TODO(), blk.ID()) require.Error(err) - require.NoError(vm.SetPreference(context.Background(), blk.ID())) + require.NoError(tvm.vm.SetPreference(context.Background(), blk.ID())) require.NoError(blk.Accept(context.Background())) - vm.blockChain.DrainAcceptorQueue() + tvm.vm.blockChain.DrainAcceptorQueue() // Verify the message signature after accepting the block. - rawSignatureBytes, err := vm.warpBackend.GetMessageSignature(context.TODO(), unsignedMessage) + rawSignatureBytes, err := tvm.vm.warpBackend.GetMessageSignature(context.TODO(), unsignedMessage) require.NoError(err) blsSignature, err := bls.SignatureFromBytes(rawSignatureBytes[:]) require.NoError(err) @@ -161,21 +164,21 @@ func TestSendWarpMessage(t *testing.T) { } // Verify the produced message signature is valid - require.True(bls.Verify(vm.ctx.PublicKey, blsSignature, unsignedMessage.Bytes())) + require.True(bls.Verify(tvm.vm.ctx.PublicKey, blsSignature, unsignedMessage.Bytes())) // Verify the blockID will now be signed by the backend and produces a valid signature. - rawSignatureBytes, err = vm.warpBackend.GetBlockSignature(context.TODO(), blk.ID()) + rawSignatureBytes, err = tvm.vm.warpBackend.GetBlockSignature(context.TODO(), blk.ID()) require.NoError(err) blsSignature, err = bls.SignatureFromBytes(rawSignatureBytes[:]) require.NoError(err) blockHashPayload, err := payload.NewHash(blk.ID()) require.NoError(err) - unsignedMessage, err = avalancheWarp.NewUnsignedMessage(vm.ctx.NetworkID, vm.ctx.ChainID, blockHashPayload.Bytes()) + unsignedMessage, err = avalancheWarp.NewUnsignedMessage(tvm.vm.ctx.NetworkID, tvm.vm.ctx.ChainID, blockHashPayload.Bytes()) require.NoError(err) // Verify the produced message signature is valid - require.True(bls.Verify(vm.ctx.PublicKey, blsSignature, unsignedMessage.Bytes())) + require.True(bls.Verify(tvm.vm.ctx.PublicKey, blsSignature, unsignedMessage.Bytes())) } func TestValidateWarpMessage(t *testing.T) { @@ -270,20 +273,22 @@ func TestValidateInvalidWarpBlockHash(t *testing.T) { func testWarpVMTransaction(t *testing.T, unsignedMessage *avalancheWarp.UnsignedMessage, validSignature bool, txPayload []byte) { require := require.New(t) genesis := &core.Genesis{} - require.NoError(genesis.UnmarshalJSON([]byte(genesisJSONDurango))) + require.NoError(genesis.UnmarshalJSON([]byte(toGenesisJSON(forkToChainConfig[upgradetest.Durango])))) params.GetExtra(genesis.Config).GenesisPrecompiles = extras.Precompiles{ warpcontract.ConfigKey: warpcontract.NewDefaultConfig(utils.TimeToNewUint64(upgrade.InitiallyActiveTime)), } genesisJSON, err := genesis.MarshalJSON() require.NoError(err) - vm, _, _ := GenesisVM(t, true, string(genesisJSON), "", "") + tvm := newVM(t, testVMConfig{ + genesisJSON: string(genesisJSON), + }) defer func() { - require.NoError(vm.Shutdown(context.Background())) + require.NoError(tvm.vm.Shutdown(context.Background())) }() acceptedLogsChan := make(chan []*types.Log, 10) - logsSub := vm.eth.APIBackend.SubscribeAcceptedLogsEvent(acceptedLogsChan) + logsSub := tvm.vm.eth.APIBackend.SubscribeAcceptedLogsEvent(acceptedLogsChan) defer logsSub.Unsubscribe() nodeID1 := ids.GenerateTestNodeID() @@ -306,7 +311,7 @@ func testWarpVMTransaction(t *testing.T, unsignedMessage *avalancheWarp.Unsigned minimumValidPChainHeight := uint64(10) getValidatorSetTestErr := errors.New("can't get validator set test error") - vm.ctx.ValidatorState = &validatorstest.State{ + tvm.vm.ctx.ValidatorState = &validatorstest.State{ // TODO: test both Primary Network / C-Chain and non-Primary Network GetSubnetIDF: func(ctx context.Context, chainID ids.ID) (ids.ID, error) { return ids.Empty, nil @@ -349,15 +354,15 @@ func testWarpVMTransaction(t *testing.T, unsignedMessage *avalancheWarp.Unsigned createTx, err := types.SignTx( types.NewContractCreation(0, common.Big0, 7_000_000, big.NewInt(225*utils.GWei), common.Hex2Bytes(exampleWarpBin)), - types.LatestSignerForChainID(vm.chainConfig.ChainID), - testKeys[0], + types.LatestSignerForChainID(tvm.vm.chainConfig.ChainID), + testKeys[0].ToECDSA(), ) require.NoError(err) exampleWarpAddress := crypto.CreateAddress(testEthAddrs[0], 0) tx, err := types.SignTx( predicate.NewPredicateTx( - vm.chainConfig.ChainID, + tvm.vm.chainConfig.ChainID, 1, &exampleWarpAddress, 1_000_000, @@ -369,11 +374,11 @@ func testWarpVMTransaction(t *testing.T, unsignedMessage *avalancheWarp.Unsigned warpcontract.ContractAddress, signedMessage.Bytes(), ), - types.LatestSignerForChainID(vm.chainConfig.ChainID), - testKeys[0], + types.LatestSignerForChainID(tvm.vm.chainConfig.ChainID), + testKeys[0].ToECDSA(), ) require.NoError(err) - errs := vm.txPool.AddRemotesSync([]*types.Transaction{createTx, tx}) + errs := tvm.vm.txPool.AddRemotesSync([]*types.Transaction{createTx, tx}) for i, err := range errs { require.NoError(err, "failed to add tx at index %d", i) } @@ -385,13 +390,13 @@ func testWarpVMTransaction(t *testing.T, unsignedMessage *avalancheWarp.Unsigned if validSignature { blockCtx.PChainHeight = minimumValidPChainHeight } - vm.clock.Set(vm.clock.Time().Add(2 * time.Second)) + tvm.vm.clock.Set(tvm.vm.clock.Time().Add(2 * time.Second)) - msg, err := vm.WaitForEvent(context.Background()) + msg, err := tvm.vm.WaitForEvent(context.Background()) require.NoError(err) require.Equal(commonEng.PendingTxs, msg) - warpBlock, err := vm.BuildBlockWithContext(context.Background(), blockCtx) + warpBlock, err := tvm.vm.BuildBlockWithContext(context.Background(), blockCtx) require.NoError(err) warpBlockVerifyWithCtx, ok := warpBlock.(block.WithVerifyContext) @@ -400,18 +405,18 @@ func testWarpVMTransaction(t *testing.T, unsignedMessage *avalancheWarp.Unsigned require.NoError(err) require.True(shouldVerifyWithCtx) require.NoError(warpBlockVerifyWithCtx.VerifyWithContext(context.Background(), blockCtx)) - require.NoError(vm.SetPreference(context.Background(), warpBlock.ID())) + require.NoError(tvm.vm.SetPreference(context.Background(), warpBlock.ID())) require.NoError(warpBlock.Accept(context.Background())) - vm.blockChain.DrainAcceptorQueue() + tvm.vm.blockChain.DrainAcceptorQueue() ethBlock := warpBlock.(*chain.BlockWrapper).Block.(*Block).ethBlock - verifiedMessageReceipts := vm.blockChain.GetReceiptsByHash(ethBlock.Hash()) + verifiedMessageReceipts := tvm.vm.blockChain.GetReceiptsByHash(ethBlock.Hash()) require.Len(verifiedMessageReceipts, 2) for i, receipt := range verifiedMessageReceipts { require.Equal(types.ReceiptStatusSuccessful, receipt.Status, "index: %d", i) } - tracerAPI := tracers.NewAPI(vm.eth.APIBackend) + tracerAPI := tracers.NewAPI(tvm.vm.eth.APIBackend) txTraceResults, err := tracerAPI.TraceBlockByHash(context.Background(), ethBlock.Hash(), nil) require.NoError(err) require.Len(txTraceResults, 2) @@ -431,7 +436,7 @@ func testWarpVMTransaction(t *testing.T, unsignedMessage *avalancheWarp.Unsigned func TestReceiveWarpMessage(t *testing.T) { require := require.New(t) genesis := &core.Genesis{} - require.NoError(genesis.UnmarshalJSON([]byte(genesisJSONDurango))) + require.NoError(genesis.UnmarshalJSON([]byte(toGenesisJSON(forkToChainConfig[upgradetest.Durango])))) params.GetExtra(genesis.Config).GenesisPrecompiles = extras.Precompiles{ // Note that warp is enabled without RequirePrimaryNetworkSigners // by default in the genesis configuration. @@ -461,10 +466,13 @@ func TestReceiveWarpMessage(t *testing.T) { upgradeBytes, err := json.Marshal(upgradeConfig) require.NoError(err) - vm, _, _ := GenesisVM(t, true, string(genesisJSON), "", string(upgradeBytes)) + tvm := newVM(t, testVMConfig{ + genesisJSON: string(genesisJSON), + upgradeJSON: string(upgradeBytes), + }) defer func() { - require.NoError(vm.Shutdown(context.Background())) + require.NoError(tvm.vm.Shutdown(context.Background())) }() type test struct { @@ -479,7 +487,7 @@ func TestReceiveWarpMessage(t *testing.T) { tests := []test{ { name: "subnet message should be signed by subnet without RequirePrimaryNetworkSigners", - sourceChainID: vm.ctx.ChainID, + sourceChainID: tvm.vm.ctx.ChainID, msgFrom: fromSubnet, useSigners: signersSubnet, blockTime: upgrade.InitiallyActiveTime, @@ -493,7 +501,7 @@ func TestReceiveWarpMessage(t *testing.T) { }, { name: "C-Chain message should be signed by subnet without RequirePrimaryNetworkSigners", - sourceChainID: vm.ctx.CChainID, + sourceChainID: tvm.vm.ctx.CChainID, msgFrom: fromPrimary, useSigners: signersSubnet, blockTime: upgrade.InitiallyActiveTime.Add(2 * blockGap), @@ -502,7 +510,7 @@ func TestReceiveWarpMessage(t *testing.T) { // by using reEnableTime. { name: "subnet message should be signed by subnet with RequirePrimaryNetworkSigners (unimpacted)", - sourceChainID: vm.ctx.ChainID, + sourceChainID: tvm.vm.ctx.ChainID, msgFrom: fromSubnet, useSigners: signersSubnet, blockTime: reEnableTime, @@ -516,7 +524,7 @@ func TestReceiveWarpMessage(t *testing.T) { }, { name: "C-Chain message should be signed by primary with RequirePrimaryNetworkSigners (impacted)", - sourceChainID: vm.ctx.CChainID, + sourceChainID: tvm.vm.ctx.CChainID, msgFrom: fromPrimary, useSigners: signersPrimary, blockTime: reEnableTime.Add(2 * blockGap), @@ -526,7 +534,7 @@ func TestReceiveWarpMessage(t *testing.T) { // time and cannot, eg be run in parallel or a separate golang test. for _, test := range tests { testReceiveWarpMessage( - t, vm, test.sourceChainID, test.msgFrom, test.useSigners, test.blockTime, + t, tvm.vm, test.sourceChainID, test.msgFrom, test.useSigners, test.blockTime, ) } } @@ -662,7 +670,7 @@ func testReceiveWarpMessage( signedMessage.Bytes(), ), types.LatestSignerForChainID(vm.chainConfig.ChainID), - testKeys[0], + testKeys[0].ToECDSA(), ) require.NoError(err) errs := vm.txPool.AddRemotesSync([]*types.Transaction{getVerifiedWarpMessageTx}) @@ -754,21 +762,23 @@ func testReceiveWarpMessage( } func TestMessageSignatureRequestsToVM(t *testing.T) { - vm, _, appSender := GenesisVM(t, true, genesisJSONSubnetEVM, "", "") + tvm := newVM(t, testVMConfig{ + genesisJSON: genesisJSONSubnetEVM, + }) defer func() { - err := vm.Shutdown(context.Background()) + err := tvm.vm.Shutdown(context.Background()) require.NoError(t, err) }() // Generate a new warp unsigned message and add to warp backend - warpMessage, err := avalancheWarp.NewUnsignedMessage(vm.ctx.NetworkID, vm.ctx.ChainID, []byte{1, 2, 3}) + warpMessage, err := avalancheWarp.NewUnsignedMessage(tvm.vm.ctx.NetworkID, tvm.vm.ctx.ChainID, []byte{1, 2, 3}) require.NoError(t, err) // Add the known message and get its signature to confirm. - err = vm.warpBackend.AddMessage(warpMessage) + err = tvm.vm.warpBackend.AddMessage(warpMessage) require.NoError(t, err) - signature, err := vm.warpBackend.GetMessageSignature(context.TODO(), warpMessage) + signature, err := tvm.vm.warpBackend.GetMessageSignature(context.TODO(), warpMessage) require.NoError(t, err) var knownSignature [bls.SignatureLen]byte copy(knownSignature[:], signature) @@ -789,7 +799,7 @@ func TestMessageSignatureRequestsToVM(t *testing.T) { for name, test := range tests { calledSendAppResponseFn := false - appSender.SendAppResponseF = func(ctx context.Context, nodeID ids.NodeID, requestID uint32, responseBytes []byte) error { + tvm.appSender.SendAppResponseF = func(ctx context.Context, nodeID ids.NodeID, requestID uint32, responseBytes []byte) error { calledSendAppResponseFn = true var response message.SignatureResponse _, err := message.Codec.Unmarshal(responseBytes, &response) @@ -808,24 +818,26 @@ func TestMessageSignatureRequestsToVM(t *testing.T) { // Send the app request and make sure we called SendAppResponseFn deadline := time.Now().Add(60 * time.Second) - require.NoError(t, vm.Network.AppRequest(context.Background(), ids.GenerateTestNodeID(), peertest.TestPeerRequestID, deadline, requestBytes)) + require.NoError(t, tvm.vm.Network.AppRequest(context.Background(), ids.GenerateTestNodeID(), peertest.TestPeerRequestID, deadline, requestBytes)) require.True(t, calledSendAppResponseFn) }) } } func TestBlockSignatureRequestsToVM(t *testing.T) { - vm, _, appSender := GenesisVM(t, true, genesisJSONSubnetEVM, "", "") + tvm := newVM(t, testVMConfig{ + genesisJSON: genesisJSONSubnetEVM, + }) defer func() { - err := vm.Shutdown(context.Background()) + err := tvm.vm.Shutdown(context.Background()) require.NoError(t, err) }() - lastAcceptedID, err := vm.LastAccepted(context.Background()) + lastAcceptedID, err := tvm.vm.LastAccepted(context.Background()) require.NoError(t, err) - signature, err := vm.warpBackend.GetBlockSignature(context.TODO(), lastAcceptedID) + signature, err := tvm.vm.warpBackend.GetBlockSignature(context.TODO(), lastAcceptedID) require.NoError(t, err) var knownSignature [bls.SignatureLen]byte copy(knownSignature[:], signature) @@ -846,7 +858,7 @@ func TestBlockSignatureRequestsToVM(t *testing.T) { for name, test := range tests { calledSendAppResponseFn := false - appSender.SendAppResponseF = func(ctx context.Context, nodeID ids.NodeID, requestID uint32, responseBytes []byte) error { + tvm.appSender.SendAppResponseF = func(ctx context.Context, nodeID ids.NodeID, requestID uint32, responseBytes []byte) error { calledSendAppResponseFn = true var response message.SignatureResponse _, err := message.Codec.Unmarshal(responseBytes, &response) @@ -865,14 +877,14 @@ func TestBlockSignatureRequestsToVM(t *testing.T) { // Send the app request and make sure we called SendAppResponseFn deadline := time.Now().Add(60 * time.Second) - require.NoError(t, vm.Network.AppRequest(context.Background(), ids.GenerateTestNodeID(), peertest.TestPeerRequestID, deadline, requestBytes)) + require.NoError(t, tvm.vm.Network.AppRequest(context.Background(), ids.GenerateTestNodeID(), peertest.TestPeerRequestID, deadline, requestBytes)) require.True(t, calledSendAppResponseFn) }) } } func TestClearWarpDB(t *testing.T) { - ctx, db, genesisBytes, _ := setupGenesis(t, genesisJSONLatest) + ctx, db, genesisBytes, _ := setupGenesis(t, upgradetest.Latest) vm := &VM{} err := vm.Initialize(context.Background(), ctx, db, genesisBytes, []byte{}, []byte{}, []*commonEng.Fx{}, &enginetest.Sender{}) require.NoError(t, err) @@ -898,7 +910,7 @@ func TestClearWarpDB(t *testing.T) { // Restart VM with the same database default should not prune the warp db vm = &VM{} // we need new context since the previous one has registered metrics. - ctx, _, _, _ = setupGenesis(t, genesisJSONLatest) + ctx, _, _, _ = setupGenesis(t, upgradetest.Latest) err = vm.Initialize(context.Background(), ctx, db, genesisBytes, []byte{}, []byte{}, []*commonEng.Fx{}, &enginetest.Sender{}) require.NoError(t, err) @@ -914,7 +926,7 @@ func TestClearWarpDB(t *testing.T) { // restart the VM with pruning enabled vm = &VM{} config := `{"prune-warp-db-enabled": true}` - ctx, _, _, _ = setupGenesis(t, genesisJSONLatest) + ctx, _, _, _ = setupGenesis(t, upgradetest.Latest) err = vm.Initialize(context.Background(), ctx, db, genesisBytes, []byte{}, []byte(config), []*commonEng.Fx{}, &enginetest.Sender{}) require.NoError(t, err) diff --git a/precompile/contracts/warp/contract_test.go b/precompile/contracts/warp/contract_test.go index 300ced1e13..0799b9f8b0 100644 --- a/precompile/contracts/warp/contract_test.go +++ b/precompile/contracts/warp/contract_test.go @@ -19,14 +19,14 @@ import ( "github.com/ava-labs/subnet-evm/precompile/contract" "github.com/ava-labs/subnet-evm/precompile/testutils" "github.com/ava-labs/subnet-evm/predicate" - "github.com/ava-labs/subnet-evm/utils" + "github.com/ava-labs/subnet-evm/utils/utilstest" "github.com/stretchr/testify/require" ) func TestGetBlockchainID(t *testing.T) { callerAddr := common.HexToAddress("0x0123") - defaultSnowCtx := utils.TestSnowContext() + defaultSnowCtx := utilstest.NewTestSnowContext(t) blockchainID := defaultSnowCtx.ChainID tests := map[string]testutils.PrecompileTest{ @@ -84,7 +84,7 @@ func TestGetBlockchainID(t *testing.T) { func TestSendWarpMessage(t *testing.T) { callerAddr := common.HexToAddress("0x0123") - defaultSnowCtx := utils.TestSnowContext() + defaultSnowCtx := utilstest.NewTestSnowContext(t) blockchainID := defaultSnowCtx.ChainID sendWarpMessagePayload := agoUtils.RandomBytes(100) diff --git a/precompile/contracts/warp/predicate_test.go b/precompile/contracts/warp/predicate_test.go index 460dd4be2c..a5fa8602cb 100644 --- a/precompile/contracts/warp/predicate_test.go +++ b/precompile/contracts/warp/predicate_test.go @@ -25,6 +25,7 @@ import ( "github.com/ava-labs/subnet-evm/precompile/testutils" "github.com/ava-labs/subnet-evm/predicate" "github.com/ava-labs/subnet-evm/utils" + "github.com/ava-labs/subnet-evm/utils/utilstest" "github.com/stretchr/testify/require" ) @@ -34,7 +35,6 @@ var ( _ agoUtils.Sortable[*testValidator] = (*testValidator)(nil) errTest = errors.New("non-nil error") - networkID = uint32(54321) sourceChainID = ids.GenerateTestID() sourceSubnetID = ids.GenerateTestID() @@ -49,8 +49,6 @@ var ( numTestVdrs = 10_000 testVdrs []*testValidator vdrs map[ids.NodeID]*validators.GetValidatorOutput - - predicateTests = make(map[string]testutils.PredicateTest) ) func init() { @@ -88,7 +86,7 @@ func init() { panic(err) } addressedPayloadBytes = addressedPayload.Bytes() - unsignedMsg, err = avalancheWarp.NewUnsignedMessage(networkID, sourceChainID, addressedPayload.Bytes()) + unsignedMsg, err = avalancheWarp.NewUnsignedMessage(constants.UnitTestID, sourceChainID, addressedPayload.Bytes()) if err != nil { panic(err) } @@ -100,8 +98,6 @@ func init() { } blsSignatures = append(blsSignatures, blsSignature) } - - initWarpPredicateTests() } type testValidator struct { @@ -174,7 +170,7 @@ type validatorRange struct { } // createSnowCtx creates a snow.Context instance with a validator state specified by the given validatorRanges -func createSnowCtx(validatorRanges []validatorRange) *snow.Context { +func createSnowCtx(tb testing.TB, validatorRanges []validatorRange) *snow.Context { getValidatorsOutput := make(map[ids.NodeID]*validators.GetValidatorOutput) for _, validatorRange := range validatorRanges { @@ -190,7 +186,7 @@ func createSnowCtx(validatorRanges []validatorRange) *snow.Context { } } - snowCtx := utils.TestSnowContext() + snowCtx := utilstest.NewTestSnowContext(tb) state := &validatorstest.State{ GetSubnetIDF: func(ctx context.Context, chainID ids.ID) (ids.ID, error) { return sourceSubnetID, nil @@ -200,7 +196,6 @@ func createSnowCtx(validatorRanges []validatorRange) *snow.Context { }, } snowCtx.ValidatorState = state - snowCtx.NetworkID = networkID return snowCtx } @@ -232,7 +227,7 @@ func testWarpMessageFromPrimaryNetwork(t *testing.T, requirePrimaryNetworkSigner cChainID := ids.GenerateTestID() addressedCall, err := payload.NewAddressedCall(agoUtils.RandomBytes(20), agoUtils.RandomBytes(100)) require.NoError(err) - unsignedMsg, err := avalancheWarp.NewUnsignedMessage(networkID, cChainID, addressedCall.Bytes()) + unsignedMsg, err := avalancheWarp.NewUnsignedMessage(constants.UnitTestID, cChainID, addressedCall.Bytes()) require.NoError(err) getValidatorsOutput := make(map[ids.NodeID]*validators.GetValidatorOutput) @@ -264,11 +259,10 @@ func testWarpMessageFromPrimaryNetwork(t *testing.T, requirePrimaryNetworkSigner predicateBytes := predicate.PackPredicate(warpMsg.Bytes()) - snowCtx := utils.TestSnowContext() + snowCtx := utilstest.NewTestSnowContext(t) snowCtx.SubnetID = ids.GenerateTestID() snowCtx.ChainID = ids.GenerateTestID() snowCtx.CChainID = cChainID - snowCtx.NetworkID = networkID snowCtx.ValidatorState = &validatorstest.State{ GetSubnetIDF: func(ctx context.Context, chainID ids.ID) (ids.ID, error) { require.Equal(chainID, cChainID) @@ -303,7 +297,7 @@ func testWarpMessageFromPrimaryNetwork(t *testing.T, requirePrimaryNetworkSigner func TestInvalidPredicatePacking(t *testing.T) { numKeys := 1 - snowCtx := createSnowCtx([]validatorRange{ + snowCtx := createSnowCtx(t, []validatorRange{ { start: 0, end: numKeys, @@ -332,7 +326,7 @@ func TestInvalidPredicatePacking(t *testing.T) { func TestInvalidWarpMessage(t *testing.T) { numKeys := 1 - snowCtx := createSnowCtx([]validatorRange{ + snowCtx := createSnowCtx(t, []validatorRange{ { start: 0, end: numKeys, @@ -363,7 +357,7 @@ func TestInvalidWarpMessage(t *testing.T) { func TestInvalidAddressedPayload(t *testing.T) { numKeys := 1 - snowCtx := createSnowCtx([]validatorRange{ + snowCtx := createSnowCtx(t, []validatorRange{ { start: 0, end: numKeys, @@ -382,7 +376,7 @@ func TestInvalidAddressedPayload(t *testing.T) { } copy(warpSignature.Signature[:], bls.SignatureToBytes(aggregateSignature)) // Create an unsigned message with an invalid addressed payload - unsignedMsg, err := avalancheWarp.NewUnsignedMessage(networkID, sourceChainID, []byte{1, 2, 3}) + unsignedMsg, err := avalancheWarp.NewUnsignedMessage(constants.UnitTestID, sourceChainID, []byte{1, 2, 3}) require.NoError(t, err) warpMsg, err := avalancheWarp.NewMessage(unsignedMsg, warpSignature) require.NoError(t, err) @@ -409,7 +403,7 @@ func TestInvalidBitSet(t *testing.T) { addressedCall, err := payload.NewAddressedCall(agoUtils.RandomBytes(20), agoUtils.RandomBytes(100)) require.NoError(t, err) unsignedMsg, err := avalancheWarp.NewUnsignedMessage( - networkID, + constants.UnitTestID, sourceChainID, addressedCall.Bytes(), ) @@ -425,7 +419,7 @@ func TestInvalidBitSet(t *testing.T) { require.NoError(t, err) numKeys := 1 - snowCtx := createSnowCtx([]validatorRange{ + snowCtx := createSnowCtx(t, []validatorRange{ { start: 0, end: numKeys, @@ -451,7 +445,7 @@ func TestInvalidBitSet(t *testing.T) { } func TestWarpSignatureWeightsDefaultQuorumNumerator(t *testing.T) { - snowCtx := createSnowCtx([]validatorRange{ + snowCtx := createSnowCtx(t, []validatorRange{ { start: 0, end: 100, @@ -498,7 +492,7 @@ func TestWarpSignatureWeightsDefaultQuorumNumerator(t *testing.T) { // multiple messages all correct, multiple messages all incorrect, mixed bag func TestWarpMultiplePredicates(t *testing.T) { - snowCtx := createSnowCtx([]validatorRange{ + snowCtx := createSnowCtx(t, []validatorRange{ { start: 0, end: 100, @@ -556,7 +550,7 @@ func TestWarpMultiplePredicates(t *testing.T) { } func TestWarpSignatureWeightsNonDefaultQuorumNumerator(t *testing.T) { - snowCtx := createSnowCtx([]validatorRange{ + snowCtx := createSnowCtx(t, []validatorRange{ { start: 0, end: 100, @@ -599,12 +593,13 @@ func TestWarpSignatureWeightsNonDefaultQuorumNumerator(t *testing.T) { testutils.RunPredicateTests(t, tests) } -func initWarpPredicateTests() { +func makeWarpPredicateTests(tb testing.TB) map[string]testutils.PredicateTest { + predicateTests := make(map[string]testutils.PredicateTest) for _, totalNodes := range []int{10, 100, 1_000, 10_000} { testName := fmt.Sprintf("%d signers/%d validators", totalNodes, totalNodes) predicateBytes := createPredicate(totalNodes) - snowCtx := createSnowCtx([]validatorRange{ + snowCtx := createSnowCtx(tb, []validatorRange{ { start: 0, end: totalNodes, @@ -620,7 +615,7 @@ func initWarpPredicateTests() { testName := fmt.Sprintf("%d signers (heavily weighted)/%d validators", numSigners, totalNodes) predicateBytes := createPredicate(numSigners) - snowCtx := createSnowCtx([]validatorRange{ + snowCtx := createSnowCtx(tb, []validatorRange{ { start: 0, end: numSigners, @@ -641,7 +636,7 @@ func initWarpPredicateTests() { testName := fmt.Sprintf("%d signers (heavily weighted)/%d validators (non-signers without registered PublicKey)", numSigners, totalNodes) predicateBytes := createPredicate(numSigners) - snowCtx := createSnowCtx([]validatorRange{ + snowCtx := createSnowCtx(tb, []validatorRange{ { start: 0, end: numSigners, @@ -671,8 +666,8 @@ func initWarpPredicateTests() { } } - snowCtx := utils.TestSnowContext() - snowCtx.NetworkID = networkID + snowCtx := utilstest.NewTestSnowContext(tb) + state := &validatorstest.State{ GetSubnetIDF: func(ctx context.Context, chainID ids.ID) (ids.ID, error) { return sourceSubnetID, nil @@ -685,12 +680,15 @@ func initWarpPredicateTests() { predicateTests[testName] = createValidPredicateTest(snowCtx, uint64(numSigners), predicateBytes) } + return predicateTests } func TestWarpPredicate(t *testing.T) { + predicateTests := makeWarpPredicateTests(t) testutils.RunPredicateTests(t, predicateTests) } func BenchmarkWarpPredicate(b *testing.B) { + predicateTests := makeWarpPredicateTests(b) testutils.RunPredicateBenchmarks(b, predicateTests) } diff --git a/precompile/contracts/warp/signature_verification_test.go b/precompile/contracts/warp/signature_verification_test.go index e3b33e2d4d..5751cdffbf 100644 --- a/precompile/contracts/warp/signature_verification_test.go +++ b/precompile/contracts/warp/signature_verification_test.go @@ -11,6 +11,7 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/snow/validators/validatorsmock" + "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/utils/set" avalancheWarp "github.com/ava-labs/avalanchego/vms/platformvm/warp" @@ -43,7 +44,7 @@ func TestSignatureVerification(t *testing.T) { quorumDen: 2, msgF: func(require *require.Assertions) *avalancheWarp.Message { unsignedMsg, err := avalancheWarp.NewUnsignedMessage( - networkID, + constants.UnitTestID, sourceChainID, addressedPayloadBytes, ) @@ -70,7 +71,7 @@ func TestSignatureVerification(t *testing.T) { quorumDen: 2, msgF: func(require *require.Assertions) *avalancheWarp.Message { unsignedMsg, err := avalancheWarp.NewUnsignedMessage( - networkID, + constants.UnitTestID, sourceChainID, addressedPayloadBytes, ) @@ -108,7 +109,7 @@ func TestSignatureVerification(t *testing.T) { quorumDen: 2, msgF: func(require *require.Assertions) *avalancheWarp.Message { unsignedMsg, err := avalancheWarp.NewUnsignedMessage( - networkID, + constants.UnitTestID, sourceChainID, addressedPayloadBytes, ) @@ -137,7 +138,7 @@ func TestSignatureVerification(t *testing.T) { quorumDen: 2, msgF: func(require *require.Assertions) *avalancheWarp.Message { unsignedMsg, err := avalancheWarp.NewUnsignedMessage( - networkID, + constants.UnitTestID, sourceChainID, addressedPayloadBytes, ) @@ -167,7 +168,7 @@ func TestSignatureVerification(t *testing.T) { quorumDen: 2, msgF: func(require *require.Assertions) *avalancheWarp.Message { unsignedMsg, err := avalancheWarp.NewUnsignedMessage( - networkID, + constants.UnitTestID, sourceChainID, addressedPayloadBytes, ) @@ -200,7 +201,7 @@ func TestSignatureVerification(t *testing.T) { quorumDen: 1, msgF: func(require *require.Assertions) *avalancheWarp.Message { unsignedMsg, err := avalancheWarp.NewUnsignedMessage( - networkID, + constants.UnitTestID, sourceChainID, addressedPayloadBytes, ) @@ -246,7 +247,7 @@ func TestSignatureVerification(t *testing.T) { quorumDen: 2, msgF: func(require *require.Assertions) *avalancheWarp.Message { unsignedMsg, err := avalancheWarp.NewUnsignedMessage( - networkID, + constants.UnitTestID, sourceChainID, addressedPayloadBytes, ) @@ -280,7 +281,7 @@ func TestSignatureVerification(t *testing.T) { quorumDen: 2, msgF: func(require *require.Assertions) *avalancheWarp.Message { unsignedMsg, err := avalancheWarp.NewUnsignedMessage( - networkID, + constants.UnitTestID, sourceChainID, addressedPayloadBytes, ) @@ -316,7 +317,7 @@ func TestSignatureVerification(t *testing.T) { quorumDen: 5, msgF: func(require *require.Assertions) *avalancheWarp.Message { unsignedMsg, err := avalancheWarp.NewUnsignedMessage( - networkID, + constants.UnitTestID, sourceChainID, addressedPayloadBytes, ) @@ -362,7 +363,7 @@ func TestSignatureVerification(t *testing.T) { quorumDen: 5, msgF: func(require *require.Assertions) *avalancheWarp.Message { unsignedMsg, err := avalancheWarp.NewUnsignedMessage( - networkID, + constants.UnitTestID, sourceChainID, addressedPayloadBytes, ) @@ -403,7 +404,7 @@ func TestSignatureVerification(t *testing.T) { quorumDen: 5, msgF: func(require *require.Assertions) *avalancheWarp.Message { unsignedMsg, err := avalancheWarp.NewUnsignedMessage( - networkID, + constants.UnitTestID, sourceChainID, addressedPayloadBytes, ) @@ -451,7 +452,7 @@ func TestSignatureVerification(t *testing.T) { quorumDen: 2, msgF: func(require *require.Assertions) *avalancheWarp.Message { unsignedMsg, err := avalancheWarp.NewUnsignedMessage( - networkID, + constants.UnitTestID, sourceChainID, addressedPayloadBytes, ) @@ -497,7 +498,7 @@ func TestSignatureVerification(t *testing.T) { quorumDen: 3, msgF: func(require *require.Assertions) *avalancheWarp.Message { unsignedMsg, err := avalancheWarp.NewUnsignedMessage( - networkID, + constants.UnitTestID, sourceChainID, addressedPayloadBytes, ) @@ -559,7 +560,7 @@ func TestSignatureVerification(t *testing.T) { quorumDen: 3, msgF: func(require *require.Assertions) *avalancheWarp.Message { unsignedMsg, err := avalancheWarp.NewUnsignedMessage( - networkID, + constants.UnitTestID, sourceChainID, addressedPayloadBytes, ) @@ -622,7 +623,7 @@ func TestSignatureVerification(t *testing.T) { quorumDen: 3, msgF: func(require *require.Assertions) *avalancheWarp.Message { unsignedMsg, err := avalancheWarp.NewUnsignedMessage( - networkID, + constants.UnitTestID, sourceChainID, addressedPayloadBytes, ) @@ -677,7 +678,7 @@ func TestSignatureVerification(t *testing.T) { } err = msg.Signature.Verify( &msg.UnsignedMessage, - networkID, + constants.UnitTestID, validatorSet, tt.quorumNum, tt.quorumDen, diff --git a/precompile/testutils/test_precompile.go b/precompile/testutils/test_precompile.go index d345f9ada8..8af64afc52 100644 --- a/precompile/testutils/test_precompile.go +++ b/precompile/testutils/test_precompile.go @@ -13,7 +13,7 @@ import ( "github.com/ava-labs/subnet-evm/precompile/contract" "github.com/ava-labs/subnet-evm/precompile/modules" "github.com/ava-labs/subnet-evm/precompile/precompileconfig" - "github.com/ava-labs/subnet-evm/utils" + "github.com/ava-labs/subnet-evm/utils/utilstest" "github.com/stretchr/testify/require" "go.uber.org/mock/gomock" ) @@ -108,7 +108,7 @@ func (test PrecompileTest) setup(t testing.TB, module modules.Module, state cont blockContext.EXPECT().Number().Return(big.NewInt(0)).AnyTimes() blockContext.EXPECT().Timestamp().Return(uint64(time.Now().Unix())).AnyTimes() } - snowContext := utils.TestSnowContext() + snowContext := utilstest.NewTestSnowContext(t) accessibleState := contract.NewMockAccessibleState(ctrl) accessibleState.EXPECT().GetStateDB().Return(state).AnyTimes() diff --git a/utils/snow.go b/utils/snow.go deleted file mode 100644 index f89b9cffdf..0000000000 --- a/utils/snow.go +++ /dev/null @@ -1,86 +0,0 @@ -// (c) 2019-2020, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package utils - -import ( - "context" - "errors" - - "github.com/ava-labs/avalanchego/api/metrics" - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow" - "github.com/ava-labs/avalanchego/snow/validators" - "github.com/ava-labs/avalanchego/snow/validators/validatorstest" - "github.com/ava-labs/avalanchego/upgrade/upgradetest" - "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/avalanchego/utils/crypto/bls/signer/localsigner" - "github.com/ava-labs/avalanchego/utils/logging" - "github.com/ava-labs/avalanchego/vms/platformvm/warp" -) - -var ( - testCChainID = ids.ID{'c', 'c', 'h', 'a', 'i', 'n', 't', 'e', 's', 't'} - testXChainID = ids.ID{'t', 'e', 's', 't', 'x'} - testChainID = ids.ID{'t', 'e', 's', 't', 'c', 'h', 'a', 'i', 'n'} -) - -func TestSnowContext() *snow.Context { - sk, err := localsigner.New() - if err != nil { - panic(err) - } - pk := sk.PublicKey() - networkID := constants.UnitTestID - chainID := testChainID - - ctx := &snow.Context{ - NetworkID: networkID, - SubnetID: ids.Empty, - ChainID: chainID, - NodeID: ids.GenerateTestNodeID(), - XChainID: testXChainID, - CChainID: testCChainID, - NetworkUpgrades: upgradetest.GetConfig(upgradetest.Latest), - PublicKey: pk, - WarpSigner: warp.NewSigner(sk, networkID, chainID), - Log: logging.NoLog{}, - BCLookup: ids.NewAliaser(), - Metrics: metrics.NewPrefixGatherer(), - ChainDataDir: "", - ValidatorState: NewTestValidatorState(), - } - - aliaser := ctx.BCLookup.(ids.Aliaser) - _ = aliaser.Alias(testCChainID, "C") - _ = aliaser.Alias(testCChainID, testCChainID.String()) - _ = aliaser.Alias(testXChainID, "X") - _ = aliaser.Alias(testXChainID, testXChainID.String()) - - return ctx -} - -func NewTestValidatorState() *validatorstest.State { - return &validatorstest.State{ - GetCurrentHeightF: func(context.Context) (uint64, error) { - return 0, nil - }, - GetSubnetIDF: func(_ context.Context, chainID ids.ID) (ids.ID, error) { - subnetID, ok := map[ids.ID]ids.ID{ - constants.PlatformChainID: constants.PrimaryNetworkID, - testXChainID: constants.PrimaryNetworkID, - testCChainID: constants.PrimaryNetworkID, - }[chainID] - if !ok { - return ids.Empty, errors.New("unknown chain") - } - return subnetID, nil - }, - GetValidatorSetF: func(context.Context, uint64, ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) { - return map[ids.NodeID]*validators.GetValidatorOutput{}, nil - }, - GetCurrentValidatorSetF: func(context.Context, ids.ID) (map[ids.ID]*validators.GetCurrentValidatorOutput, uint64, error) { - return map[ids.ID]*validators.GetCurrentValidatorOutput{}, 0, nil - }, - } -} diff --git a/utils/utilstest/context.go b/utils/utilstest/context.go new file mode 100644 index 0000000000..44b3026cd4 --- /dev/null +++ b/utils/utilstest/context.go @@ -0,0 +1,74 @@ +package utilstest + +import ( + "context" + "errors" + "testing" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/snow/snowtest" + "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/snow/validators/validatorstest" + "github.com/ava-labs/avalanchego/utils/constants" +) + +// SubnetEVMTestChainID is a subnet-evm specific chain ID for testing +var SubnetEVMTestChainID = ids.GenerateTestID() + +// @TODO: This should eventually be replaced by a more robust solution, or alternatively, the presence of nil +// validator states shouldn't be depended upon by tests +func NewTestValidatorState() *validatorstest.State { + return &validatorstest.State{ + GetCurrentHeightF: func(context.Context) (uint64, error) { + return 0, nil + }, + GetSubnetIDF: func(_ context.Context, chainID ids.ID) (ids.ID, error) { + subnetID, ok := map[ids.ID]ids.ID{ + constants.PlatformChainID: constants.PrimaryNetworkID, + snowtest.XChainID: constants.PrimaryNetworkID, + snowtest.CChainID: constants.PrimaryNetworkID, + SubnetEVMTestChainID: constants.PrimaryNetworkID, + }[chainID] + if !ok { + return ids.Empty, errors.New("unknown chain") + } + return subnetID, nil + }, + GetValidatorSetF: func(context.Context, uint64, ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) { + return map[ids.NodeID]*validators.GetValidatorOutput{}, nil + }, + GetCurrentValidatorSetF: func(context.Context, ids.ID) (map[ids.ID]*validators.GetCurrentValidatorOutput, uint64, error) { + return map[ids.ID]*validators.GetCurrentValidatorOutput{}, 0, nil + }, + } +} + +// NewTestSnowContext returns a snow.Context with validator state properly configured for testing. +// This wraps snowtest.Context and sets the validator state to avoid the missing GetValidatorSetF issue. +// +// Usage example: +// +// // Instead of: +// // snowCtx := utilstest.NewTestSnowContext(t, snowtest.CChainID) +// // validatorState := utils.NewTestValidatorState() +// // snowCtx.ValidatorState = validatorState +// +// // Use: +// snowCtx := utils.NewTestSnowContext(t) +// +// This function ensures that the snow context has a properly configured validator state +// that includes the GetValidatorSetF function, which is required by many tests. +func NewTestSnowContext(t testing.TB) *snow.Context { + snowCtx := snowtest.Context(t, SubnetEVMTestChainID) + snowCtx.ValidatorState = NewTestValidatorState() + return snowCtx +} + +// NewTestSnowContextWithChainID returns a snow.Context with validator state properly configured for testing +// with a specific chain ID. This is provided for backward compatibility when a specific chain ID is needed. +func NewTestSnowContextWithChainID(t testing.TB, chainID ids.ID) *snow.Context { + snowCtx := snowtest.Context(t, chainID) + snowCtx.ValidatorState = NewTestValidatorState() + return snowCtx +} diff --git a/utils/utilstest/context_test.go b/utils/utilstest/context_test.go new file mode 100644 index 0000000000..378fb07b6c --- /dev/null +++ b/utils/utilstest/context_test.go @@ -0,0 +1,33 @@ +// (c) 2025 Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package utilstest + +import ( + "context" + "testing" + + "github.com/ava-labs/avalanchego/ids" + "github.com/stretchr/testify/require" +) + +func TestNewTestSnowContext(t *testing.T) { + // Test that NewTestSnowContext creates a context with validator state + snowCtx := NewTestSnowContext(t) + require.NotNil(t, snowCtx.ValidatorState) + + // Test that the validator state has the required functions + validatorState := snowCtx.ValidatorState + require.NotNil(t, validatorState) + + // Test that we can call GetValidatorSetF without panicking + validators, err := validatorState.GetValidatorSet(context.TODO(), 0, ids.Empty) + require.NoError(t, err) + require.NotNil(t, validators) + + // Test that we can call GetCurrentValidatorSetF without panicking + currentValidators, height, err := validatorState.GetCurrentValidatorSet(context.TODO(), ids.Empty) + require.NoError(t, err) + require.NotNil(t, currentValidators) + require.Equal(t, uint64(0), height) +} diff --git a/warp/handlers/signature_request_test.go b/warp/handlers/signature_request_test.go index fa12b96937..b47e0be3c1 100644 --- a/warp/handlers/signature_request_test.go +++ b/warp/handlers/signature_request_test.go @@ -11,12 +11,11 @@ import ( "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/crypto/bls" - "github.com/ava-labs/avalanchego/utils/crypto/bls/signer/localsigner" avalancheWarp "github.com/ava-labs/avalanchego/vms/platformvm/warp" "github.com/ava-labs/avalanchego/vms/platformvm/warp/payload" "github.com/ava-labs/subnet-evm/internal/testutils" "github.com/ava-labs/subnet-evm/plugin/evm/message" - "github.com/ava-labs/subnet-evm/utils" + "github.com/ava-labs/subnet-evm/utils/utilstest" "github.com/ava-labs/subnet-evm/warp" "github.com/ava-labs/subnet-evm/warp/warptest" "github.com/stretchr/testify/require" @@ -28,10 +27,7 @@ func TestMessageSignatureHandler(t *testing.T) { testutils.WithMetrics(t) database := memdb.New() - snowCtx := utils.TestSnowContext() - blsSecretKey, err := localsigner.New() - require.NoError(t, err) - warpSigner := avalancheWarp.NewSigner(blsSecretKey, snowCtx.NetworkID, snowCtx.ChainID) + snowCtx := utilstest.NewTestSnowContext(t) addressedPayload, err := payload.NewAddressedCall([]byte{1, 2, 3}, []byte{1, 2, 3}) require.NoError(t, err) @@ -39,7 +35,16 @@ func TestMessageSignatureHandler(t *testing.T) { require.NoError(t, err) messageSignatureCache := lru.NewCache[ids.ID, []byte](100) - backend, err := warp.NewBackend(snowCtx.NetworkID, snowCtx.ChainID, warpSigner, warptest.EmptyBlockClient, warptest.NoOpValidatorReader{}, database, messageSignatureCache, [][]byte{offchainMessage.Bytes()}) + backend, err := warp.NewBackend( + snowCtx.NetworkID, + snowCtx.ChainID, + snowCtx.WarpSigner, + warptest.EmptyBlockClient, + nil, + database, + messageSignatureCache, + [][]byte{offchainMessage.Bytes()}, + ) require.NoError(t, err) msg, err := avalancheWarp.NewUnsignedMessage(snowCtx.NetworkID, snowCtx.ChainID, []byte("test")) @@ -134,18 +139,15 @@ func TestBlockSignatureHandler(t *testing.T) { testutils.WithMetrics(t) database := memdb.New() - snowCtx := utils.TestSnowContext() - blsSecretKey, err := localsigner.New() - require.NoError(t, err) + snowCtx := utilstest.NewTestSnowContext(t) - warpSigner := avalancheWarp.NewSigner(blsSecretKey, snowCtx.NetworkID, snowCtx.ChainID) blkID := ids.GenerateTestID() blockClient := warptest.MakeBlockClient(blkID) messageSignatureCache := lru.NewCache[ids.ID, []byte](100) backend, err := warp.NewBackend( snowCtx.NetworkID, snowCtx.ChainID, - warpSigner, + snowCtx.WarpSigner, blockClient, warptest.NoOpValidatorReader{}, database, diff --git a/warp/validators/state_test.go b/warp/validators/state_test.go index 2a67415865..b80cce563b 100644 --- a/warp/validators/state_test.go +++ b/warp/validators/state_test.go @@ -11,7 +11,7 @@ import ( "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/snow/validators/validatorsmock" "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/subnet-evm/utils" + "github.com/ava-labs/subnet-evm/utils/utilstest" "github.com/stretchr/testify/require" "go.uber.org/mock/gomock" ) @@ -24,7 +24,7 @@ func TestGetValidatorSetPrimaryNetwork(t *testing.T) { otherSubnetID := ids.GenerateTestID() mockState := validatorsmock.NewState(ctrl) - snowCtx := utils.TestSnowContext() + snowCtx := utilstest.NewTestSnowContext(t) snowCtx.SubnetID = mySubnetID snowCtx.ValidatorState = mockState state := NewState(snowCtx.ValidatorState, snowCtx.SubnetID, snowCtx.ChainID, false) diff --git a/warp/verifier_backend_test.go b/warp/verifier_backend_test.go index d75d4668a7..c8669e5b06 100644 --- a/warp/verifier_backend_test.go +++ b/warp/verifier_backend_test.go @@ -16,14 +16,13 @@ import ( "github.com/ava-labs/avalanchego/network/p2p/acp118" "github.com/ava-labs/avalanchego/proto/pb/sdk" "github.com/ava-labs/avalanchego/snow/engine/common" - "github.com/ava-labs/avalanchego/utils/crypto/bls/signer/localsigner" "github.com/ava-labs/avalanchego/utils/timer/mockable" avalancheWarp "github.com/ava-labs/avalanchego/vms/platformvm/warp" "github.com/ava-labs/avalanchego/vms/platformvm/warp/payload" "github.com/ava-labs/subnet-evm/internal/testutils" "github.com/ava-labs/subnet-evm/plugin/evm/validators" stateinterfaces "github.com/ava-labs/subnet-evm/plugin/evm/validators/state/interfaces" - "github.com/ava-labs/subnet-evm/utils" + "github.com/ava-labs/subnet-evm/utils/utilstest" "github.com/ava-labs/subnet-evm/warp/messages" "github.com/ava-labs/subnet-evm/warp/warptest" "github.com/stretchr/testify/require" @@ -34,16 +33,13 @@ func TestAddressedCallSignatures(t *testing.T) { testutils.WithMetrics(t) database := memdb.New() - snowCtx := utils.TestSnowContext() - blsSecretKey, err := localsigner.New() - require.NoError(t, err) - warpSigner := avalancheWarp.NewSigner(blsSecretKey, snowCtx.NetworkID, snowCtx.ChainID) + snowCtx := utilstest.NewTestSnowContext(t) offChainPayload, err := payload.NewAddressedCall([]byte{1, 2, 3}, []byte{1, 2, 3}) require.NoError(t, err) offchainMessage, err := avalancheWarp.NewUnsignedMessage(snowCtx.NetworkID, snowCtx.ChainID, offChainPayload.Bytes()) require.NoError(t, err) - offchainSignature, err := warpSigner.Sign(offchainMessage) + offchainSignature, err := snowCtx.WarpSigner.Sign(offchainMessage) require.NoError(t, err) tests := map[string]struct { @@ -57,7 +53,7 @@ func TestAddressedCallSignatures(t *testing.T) { require.NoError(t, err) msg, err := avalancheWarp.NewUnsignedMessage(snowCtx.NetworkID, snowCtx.ChainID, knownPayload.Bytes()) require.NoError(t, err) - signature, err := warpSigner.Sign(msg) + signature, err := snowCtx.WarpSigner.Sign(msg) require.NoError(t, err) backend.AddMessage(msg) @@ -107,9 +103,18 @@ func TestAddressedCallSignatures(t *testing.T) { } else { sigCache = &cache.Empty[ids.ID, []byte]{} } - warpBackend, err := NewBackend(snowCtx.NetworkID, snowCtx.ChainID, warpSigner, warptest.EmptyBlockClient, warptest.NoOpValidatorReader{}, database, sigCache, [][]byte{offchainMessage.Bytes()}) + warpBackend, err := NewBackend( + snowCtx.NetworkID, + snowCtx.ChainID, + snowCtx.WarpSigner, + warptest.EmptyBlockClient, + nil, + database, + sigCache, + [][]byte{offchainMessage.Bytes()}, + ) require.NoError(t, err) - handler := acp118.NewCachedHandler(sigCache, warpBackend, warpSigner) + handler := acp118.NewCachedHandler(sigCache, warpBackend, snowCtx.WarpSigner) requestBytes, expectedResponse := test.setup(warpBackend) protoMsg := &sdk.SignatureRequest{Message: requestBytes} @@ -150,11 +155,8 @@ func TestBlockSignatures(t *testing.T) { testutils.WithMetrics(t) database := memdb.New() - snowCtx := utils.TestSnowContext() - blsSecretKey, err := localsigner.New() - require.NoError(t, err) + snowCtx := utilstest.NewTestSnowContext(t) - warpSigner := avalancheWarp.NewSigner(blsSecretKey, snowCtx.NetworkID, snowCtx.ChainID) knownBlkID := ids.GenerateTestID() blockClient := warptest.MakeBlockClient(knownBlkID) @@ -183,7 +185,7 @@ func TestBlockSignatures(t *testing.T) { require.NoError(t, err) unsignedMessage, err := avalancheWarp.NewUnsignedMessage(snowCtx.NetworkID, snowCtx.ChainID, hashPayload.Bytes()) require.NoError(t, err) - signature, err := warpSigner.Sign(unsignedMessage) + signature, err := snowCtx.WarpSigner.Sign(unsignedMessage) require.NoError(t, err) return toMessageBytes(knownBlkID), signature[:] }, @@ -222,7 +224,7 @@ func TestBlockSignatures(t *testing.T) { warpBackend, err := NewBackend( snowCtx.NetworkID, snowCtx.ChainID, - warpSigner, + snowCtx.WarpSigner, blockClient, warptest.NoOpValidatorReader{}, database, @@ -230,7 +232,7 @@ func TestBlockSignatures(t *testing.T) { nil, ) require.NoError(t, err) - handler := acp118.NewCachedHandler(sigCache, warpBackend, warpSigner) + handler := acp118.NewCachedHandler(sigCache, warpBackend, snowCtx.WarpSigner) requestBytes, expectedResponse := test.setup() protoMsg := &sdk.SignatureRequest{Message: requestBytes} @@ -268,10 +270,7 @@ func TestBlockSignatures(t *testing.T) { func TestUptimeSignatures(t *testing.T) { database := memdb.New() - snowCtx := utils.TestSnowContext() - blsSecretKey, err := localsigner.New() - require.NoError(t, err) - warpSigner := avalancheWarp.NewSigner(blsSecretKey, snowCtx.NetworkID, snowCtx.ChainID) + snowCtx := utilstest.NewTestSnowContext(t) getUptimeMessageBytes := func(sourceAddress []byte, vID ids.ID, totalUptime uint64) ([]byte, *avalancheWarp.UnsignedMessage) { uptimePayload, err := messages.NewValidatorUptime(vID, 80) @@ -294,16 +293,25 @@ func TestUptimeSignatures(t *testing.T) { } else { sigCache = &cache.Empty[ids.ID, []byte]{} } - chainCtx := utils.TestSnowContext() + chainCtx := utilstest.NewTestSnowContext(t) clk := &mockable.Clock{} validatorsManager, err := validators.NewManager(chainCtx, memdb.New(), clk) require.NoError(t, err) lock := &sync.RWMutex{} newLockedValidatorManager := validators.NewLockedValidatorReader(validatorsManager, lock) validatorsManager.StartTracking([]ids.NodeID{}) - warpBackend, err := NewBackend(snowCtx.NetworkID, snowCtx.ChainID, warpSigner, warptest.EmptyBlockClient, newLockedValidatorManager, database, sigCache, nil) + warpBackend, err := NewBackend( + snowCtx.NetworkID, + snowCtx.ChainID, + snowCtx.WarpSigner, + warptest.EmptyBlockClient, + newLockedValidatorManager, + database, + sigCache, + nil, + ) require.NoError(t, err) - handler := acp118.NewCachedHandler(sigCache, warpBackend, warpSigner) + handler := acp118.NewCachedHandler(sigCache, warpBackend, snowCtx.WarpSigner) // sourceAddress nonZero protoBytes, _ := getUptimeMessageBytes([]byte{1, 2, 3}, ids.GenerateTestID(), 80) @@ -347,7 +355,7 @@ func TestUptimeSignatures(t *testing.T) { protoBytes, msg := getUptimeMessageBytes([]byte{}, validationID, 80) responseBytes, appErr := handler.AppRequest(context.Background(), nodeID, time.Time{}, protoBytes) require.Nil(t, appErr) - expectedSignature, err := warpSigner.Sign(msg) + expectedSignature, err := snowCtx.WarpSigner.Sign(msg) require.NoError(t, err) response := &sdk.SignatureResponse{} require.NoError(t, proto.Unmarshal(responseBytes, response))