diff --git a/.avalanche-golangci.yml b/.avalanche-golangci.yml index e9966716cd..12650b5146 100644 --- a/.avalanche-golangci.yml +++ b/.avalanche-golangci.yml @@ -57,7 +57,7 @@ linters: # - depguard # - errcheck - errorlint - # - forbidigo + - forbidigo - goconst - gocritic - goprintffuncname @@ -105,12 +105,12 @@ linters: forbidigo: # Forbid the following identifiers (list of regexp). forbid: - - pattern: require\.Error$(# ErrorIs should be used instead)? - - pattern: require\.ErrorContains$(# ErrorIs should be used instead)? - - pattern: require\.EqualValues$(# Equal should be used instead)? - - pattern: require\.NotEqualValues$(# NotEqual should be used instead)? + # - pattern: require\.Error$(# ErrorIs should be used instead)? + # - pattern: require\.ErrorContains$(# ErrorIs should be used instead)? + # - pattern: require\.EqualValues$(# Equal should be used instead)? + # - pattern: require\.NotEqualValues$(# NotEqual should be used instead)? - pattern: ^(t|b|tb|f)\.(Fatal|Fatalf|Error|Errorf)$(# the require library should be used instead)? - - pattern: ^sort\.(Slice|Strings)$(# the slices package should be used instead)? + # - pattern: ^sort\.(Slice|Strings)$(# the slices package should be used instead)? # Exclude godoc examples from forbidigo checks. exclude-godoc-examples: false gosec: diff --git a/accounts/abi/bind/precompilebind/precompile_bind_test.go b/accounts/abi/bind/precompilebind/precompile_bind_test.go index 28a1b9a858..90967d6ba1 100644 --- a/accounts/abi/bind/precompilebind/precompile_bind_test.go +++ b/accounts/abi/bind/precompilebind/precompile_bind_test.go @@ -628,9 +628,7 @@ func TestPrecompileBind(t *testing.T) { ws := t.TempDir() pkg := filepath.Join(ws, "precompilebindtest") - if err := os.MkdirAll(pkg, 0o700); err != nil { - t.Fatalf("failed to create package: %v", err) - } + require.NoError(t, os.MkdirAll(pkg, 0o700), "failed to create package") // Generate the test suite for all the contracts for i, tt := range bindTests { t.Run(tt.name, func(t *testing.T) { @@ -642,14 +640,10 @@ func TestPrecompileBind(t *testing.T) { require.ErrorContains(t, err, tt.errMsg) return } - if err != nil { - t.Fatalf("test %d: failed to generate binding: %v", i, err) - } + require.NoError(t, err, "test %d: failed to generate binding: %v", i, err) precompilePath := filepath.Join(pkg, tt.name) - if err := os.MkdirAll(precompilePath, 0o700); err != nil { - t.Fatalf("failed to create package: %v", err) - } + require.NoError(t, os.MkdirAll(precompilePath, 0o700), "failed to create package") for _, file := range bindedFiles { switch file.FileName { case ContractFileName: @@ -663,13 +657,9 @@ func TestPrecompileBind(t *testing.T) { // change address to a suitable one for testing file.Content = strings.Replace(file.Content, `common.HexToAddress("{ASUITABLEHEXADDRESS}")`, `common.HexToAddress("0x03000000000000000000000000000000000000ff")`, 1) } - if err = os.WriteFile(filepath.Join(precompilePath, file.FileName), []byte(file.Content), 0o600); err != nil { - t.Fatalf("test %d: failed to write binding: %v", i, err) - } - } - if err = os.WriteFile(filepath.Join(precompilePath, "contract.abi"), []byte(tt.abi), 0o600); err != nil { - t.Fatalf("test %d: failed to write binding: %v", i, err) + require.NoError(t, os.WriteFile(filepath.Join(precompilePath, file.FileName), []byte(file.Content), 0o600), "test %d: failed to write binding", i) } + require.NoError(t, os.WriteFile(filepath.Join(precompilePath, "contract.abi"), []byte(tt.abi), 0o600), "test %d: failed to write binding", i) // Generate the test file with the injected test code code := fmt.Sprintf(` @@ -684,32 +674,29 @@ func TestPrecompileBind(t *testing.T) { %s } `, tt.name, tt.imports, tt.name, tt.tester) - if err := os.WriteFile(filepath.Join(precompilePath, strings.ToLower(tt.name)+"_test.go"), []byte(code), 0o600); err != nil { - t.Fatalf("test %d: failed to write tests: %v", i, err) - } + require.NoError(t, os.WriteFile(filepath.Join(precompilePath, strings.ToLower(tt.name)+"_test.go"), []byte(code), 0o600), "test %d: failed to write tests", i) }) } moder := exec.Command(gocmd, "mod", "init", "precompilebindtest") moder.Dir = pkg - if out, err := moder.CombinedOutput(); err != nil { - t.Fatalf("failed to convert binding test to modules: %v\n%s", err, out) - } + out, err := moder.CombinedOutput() + require.NoError(t, err, "failed to convert binding test to modules: %v\n%s", err, out) + pwd, _ := os.Getwd() replacer := exec.Command(gocmd, "mod", "edit", "-x", "-require", "github.com/ava-labs/subnet-evm@v0.0.0", "-replace", "github.com/ava-labs/subnet-evm="+filepath.Join(pwd, "..", "..", "..", "..")) // Repo root replacer.Dir = pkg - if out, err := replacer.CombinedOutput(); err != nil { - t.Fatalf("failed to replace binding test dependency to current source tree: %v\n%s", err, out) - } + out, err = replacer.CombinedOutput() + require.NoError(t, err, "failed to replace binding test dependency to current source tree: %v\n%s", err, out) + tidier := exec.Command(gocmd, "mod", "tidy", "-compat=1.24") tidier.Dir = pkg - if out, err := tidier.CombinedOutput(); err != nil { - t.Fatalf("failed to tidy Go module file: %v\n%s", err, out) - } + out, err = tidier.CombinedOutput() + require.NoError(t, err, "failed to tidy Go module file: %v\n%s", err, out) + // Test the entire package and report any failures cmd := exec.Command(gocmd, "test", "./...", "-v", "-count", "1") cmd.Dir = pkg - if out, err := cmd.CombinedOutput(); err != nil { - t.Fatalf("failed to run binding test: %v\n%s", err, out) - } + out, err = cmd.CombinedOutput() + require.NoError(t, err, "failed to run binding test: %v\n%s", err, out) } diff --git a/core/blockchain_ext_test.go b/core/blockchain_ext_test.go index 15842b7fc2..d2be424eff 100644 --- a/core/blockchain_ext_test.go +++ b/core/blockchain_ext_test.go @@ -179,85 +179,60 @@ func checkBlockChainState( checkState func(sdb *state.StateDB) error, ) (*BlockChain, *BlockChain) { var ( + require = require.New(t) lastAcceptedBlock = bc.LastConsensusAcceptedBlock() newDB = rawdb.NewMemoryDatabase() ) acceptedState, err := bc.StateAt(lastAcceptedBlock.Root()) - if err != nil { - t.Fatal(err) - } - if err := checkState(acceptedState); err != nil { - t.Fatalf("Check state failed for original blockchain due to: %s", err) - } + require.NoError(err) + require.NoError(checkState(acceptedState), "Check state failed for original blockchain") oldChainDataDir := bc.CacheConfig().ChainDataDir // cacheConfig uses same reference in most tests newBlockChain, err := create(newDB, gspec, common.Hash{}, t.TempDir()) - if err != nil { - t.Fatalf("Failed to create new blockchain instance: %s", err) - } + require.NoError(err, "Failed to create new blockchain instance") defer newBlockChain.Stop() for i := uint64(1); i <= lastAcceptedBlock.NumberU64(); i++ { block := bc.GetBlockByNumber(i) - if block == nil { - t.Fatalf("Failed to retrieve block by number %d from original chain", i) - } - if err := newBlockChain.InsertBlock(block); err != nil { - t.Fatalf("Failed to insert block %s:%d due to %s", block.Hash().Hex(), block.NumberU64(), err) - } - if err := newBlockChain.Accept(block); err != nil { - t.Fatalf("Failed to accept block %s:%d due to %s", block.Hash().Hex(), block.NumberU64(), err) - } + require.NotNilf(block, "Failed to retrieve block by number %d from original chain", i) + require.NoErrorf(newBlockChain.InsertBlock(block), "Failed to insert block %s:%d", block.Hash().Hex(), block.NumberU64()) + require.NoErrorf(newBlockChain.Accept(block), "Failed to accept block %s:%d", block.Hash().Hex(), block.NumberU64()) } newBlockChain.DrainAcceptorQueue() newLastAcceptedBlock := newBlockChain.LastConsensusAcceptedBlock() - if newLastAcceptedBlock.Hash() != lastAcceptedBlock.Hash() { - t.Fatalf("Expected new blockchain to have last accepted block %s:%d, but found %s:%d", lastAcceptedBlock.Hash().Hex(), lastAcceptedBlock.NumberU64(), newLastAcceptedBlock.Hash().Hex(), newLastAcceptedBlock.NumberU64()) - } + require.Equal(lastAcceptedBlock.Hash(), newLastAcceptedBlock.Hash()) // Check that the state of [newBlockChain] passes the check acceptedState, err = newBlockChain.StateAt(lastAcceptedBlock.Root()) - if err != nil { - t.Fatal(err) - } - if err := checkState(acceptedState); err != nil { - t.Fatalf("Check state failed for newly generated blockchain due to: %s", err) - } + require.NoError(err) + require.NoErrorf(checkState(acceptedState), "Check state failed for newly generated blockchain") // Copy the database over to prevent any issues when re-using [originalDB] after this call. originalDB, err = copyMemDB(originalDB) - if err != nil { - t.Fatal(err) - } + require.NoError(err) newChainDataDir := copyFlatDir(t, oldChainDataDir) restartedChain, err := create(originalDB, gspec, lastAcceptedBlock.Hash(), newChainDataDir) - if err != nil { - t.Fatal(err) - } + require.NoError(err) defer restartedChain.Stop() - if currentBlock := restartedChain.CurrentBlock(); currentBlock.Hash() != lastAcceptedBlock.Hash() { - t.Fatalf("Expected restarted chain to have current block %s:%d, but found %s:%d", lastAcceptedBlock.Hash().Hex(), lastAcceptedBlock.NumberU64(), currentBlock.Hash().Hex(), currentBlock.Number.Uint64()) - } - if restartedLastAcceptedBlock := restartedChain.LastConsensusAcceptedBlock(); restartedLastAcceptedBlock.Hash() != lastAcceptedBlock.Hash() { - t.Fatalf("Expected restarted chain to have current block %s:%d, but found %s:%d", lastAcceptedBlock.Hash().Hex(), lastAcceptedBlock.NumberU64(), restartedLastAcceptedBlock.Hash().Hex(), restartedLastAcceptedBlock.NumberU64()) - } + + currentBlock := restartedChain.CurrentBlock() + require.Equal(lastAcceptedBlock.Hash(), currentBlock.Hash(), "Restarted chain's current block does not match last accepted block") + restartedLastAcceptedBlock := restartedChain.LastConsensusAcceptedBlock() + require.Equal(lastAcceptedBlock.Hash(), restartedLastAcceptedBlock.Hash(), "Restarted chain's last accepted block does not match last accepted block") // Check that the state of [restartedChain] passes the check acceptedState, err = restartedChain.StateAt(lastAcceptedBlock.Root()) - if err != nil { - t.Fatal(err) - } - if err := checkState(acceptedState); err != nil { - t.Fatalf("Check state failed for restarted blockchain due to: %s", err) - } + require.NoError(err) + require.NoError(checkState(acceptedState), "Check state failed for restarted blockchain") return newBlockChain, restartedChain } func InsertChainAcceptSingleBlock(t *testing.T, create createFunc) { var ( + require = require.New(t) key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") addr1 = crypto.PubkeyToAddress(key1.PublicKey) @@ -272,10 +247,8 @@ func InsertChainAcceptSingleBlock(t *testing.T, create createFunc) { Alloc: types.GenesisAlloc{addr1: {Balance: genesisBalance}}, } blockchain, err := create(chainDB, gspec, common.Hash{}, t.TempDir()) - if err != nil { - t.Fatal(err) - } - defer blockchain.Stop() + require.NoError(err) + t.Cleanup(blockchain.Stop) // This call generates a chain of 3 blocks. signer := types.HomesteadSigner{} @@ -283,17 +256,12 @@ func InsertChainAcceptSingleBlock(t *testing.T, create createFunc) { tx, _ := types.SignTx(types.NewTransaction(gen.TxNonce(addr1), addr2, big.NewInt(10000), ethparams.TxGas, nil, nil), signer, key1) gen.AddTx(tx) }) - if err != nil { - t.Fatal(err) - } + require.NoError(err) // Insert three blocks into the chain and accept only the first block. - if _, err := blockchain.InsertChain(chain); err != nil { - t.Fatal(err) - } - if err := blockchain.Accept(chain[0]); err != nil { - t.Fatal(err) - } + _, err = blockchain.InsertChain(chain) + require.NoError(err) + require.NoError(blockchain.Accept(chain[0])) blockchain.DrainAcceptorQueue() // check the state of the last accepted block @@ -328,6 +296,7 @@ func InsertChainAcceptSingleBlock(t *testing.T, create createFunc) { func InsertLongForkedChain(t *testing.T, create createFunc) { var ( + require = require.New(t) key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") addr1 = crypto.PubkeyToAddress(key1.PublicKey) @@ -343,10 +312,8 @@ func InsertLongForkedChain(t *testing.T, create createFunc) { } blockchain, err := create(chainDB, gspec, common.Hash{}, t.TempDir()) - if err != nil { - t.Fatal(err) - } - defer blockchain.Stop() + require.NoError(err) + t.Cleanup(blockchain.Stop) numBlocks := 129 signer := types.HomesteadSigner{} @@ -355,9 +322,7 @@ func InsertLongForkedChain(t *testing.T, create createFunc) { tx, _ := types.SignTx(types.NewTransaction(gen.TxNonce(addr1), addr2, big.NewInt(10000), ethparams.TxGas, nil, nil), signer, key1) gen.AddTx(tx) }) - if err != nil { - t.Fatal(err) - } + require.NoError(err) // Generate the forked chain to be longer than the original chain to check for a regression where // a longer chain can trigger a reorg. _, chain2, _, err := GenerateChainWithGenesis(gspec, blockchain.engine, numBlocks+1, 10, func(_ int, gen *BlockGen) { @@ -365,103 +330,74 @@ func InsertLongForkedChain(t *testing.T, create createFunc) { tx, _ := types.SignTx(types.NewTransaction(gen.TxNonce(addr1), addr2, big.NewInt(5000), ethparams.TxGas, nil, nil), signer, key1) gen.AddTx(tx) }) - if err != nil { - t.Fatal(err) - } + require.NoError(err) if blockchain.snaps != nil { - if want, got := 1, blockchain.snaps.NumBlockLayers(); got != want { - t.Fatalf("incorrect snapshot layer count; got %d, want %d", got, want) - } + got := blockchain.snaps.NumBlockLayers() + require.Equal(1, got, "incorrect snapshot layer count") } // Insert both chains. - if _, err := blockchain.InsertChain(chain1); err != nil { - t.Fatal(err) - } + _, err = blockchain.InsertChain(chain1) + require.NoError(err) if blockchain.snaps != nil { - if want, got := 1+len(chain1), blockchain.snaps.NumBlockLayers(); got != want { - t.Fatalf("incorrect snapshot layer count; got %d, want %d", got, want) - } + got := blockchain.snaps.NumBlockLayers() + require.Equal(1+len(chain1), got, "incorrect snapshot layer count") } - if _, err := blockchain.InsertChain(chain2); err != nil { - t.Fatal(err) - } + _, err = blockchain.InsertChain(chain2) + require.NoError(err) if blockchain.snaps != nil { - if want, got := 1+len(chain1)+len(chain2), blockchain.snaps.NumBlockLayers(); got != want { - t.Fatalf("incorrect snapshot layer count; got %d, want %d", got, want) - } + got := blockchain.snaps.NumBlockLayers() + require.Equal(1+len(chain1)+len(chain2), got, "incorrect snapshot layer count") } currentBlock := blockchain.CurrentBlock() expectedCurrentBlock := chain1[len(chain1)-1] - if currentBlock.Hash() != expectedCurrentBlock.Hash() { - t.Fatalf("Expected current block to be %s:%d, but found %s%d", expectedCurrentBlock.Hash().Hex(), expectedCurrentBlock.NumberU64(), currentBlock.Hash().Hex(), currentBlock.Number.Uint64()) - } + require.Equal(expectedCurrentBlock.Hash(), currentBlock.Hash()) - if err := blockchain.ValidateCanonicalChain(); err != nil { - t.Fatal(err) - } + require.NoError(blockchain.ValidateCanonicalChain()) // Accept the first block in [chain1], reject all blocks in [chain2] to // mimic the order that the consensus engine will call Accept/Reject in // and then Accept the rest of the blocks in [chain1]. - if err := blockchain.Accept(chain1[0]); err != nil { - t.Fatal(err) - } + require.NoError(blockchain.Accept(chain1[0])) blockchain.DrainAcceptorQueue() if blockchain.snaps != nil { - // Snap layer count should be 1 fewer - if want, got := len(chain1)+len(chain2), blockchain.snaps.NumBlockLayers(); got != want { - t.Fatalf("incorrect snapshot layer count; got %d, want %d", got, want) - } + // Snap layer count should match chain length + require.Equal(len(chain1)+len(chain2), blockchain.snaps.NumBlockLayers(), "incorrect snapshot layer count") } for i := 0; i < len(chain2); i++ { - if err := blockchain.Reject(chain2[i]); err != nil { - t.Fatal(err) - } + require.NoError(blockchain.Reject(chain2[i])) if blockchain.snaps != nil { // Snap layer count should decrease by 1 per Reject - if want, got := len(chain1)+len(chain2)-i-1, blockchain.snaps.NumBlockLayers(); got != want { - t.Fatalf("incorrect snapshot layer count; got %d, want %d", got, want) - } + require.Equal(len(chain1)+len(chain2)-i-1, blockchain.snaps.NumBlockLayers(), "incorrect snapshot layer count") } } if blockchain.snaps != nil { - if want, got := len(chain1), blockchain.snaps.NumBlockLayers(); got != want { - t.Fatalf("incorrect snapshot layer count; got %d, want %d", got, want) - } + require.Equal(len(chain1), blockchain.snaps.NumBlockLayers(), "incorrect snapshot layer count") } for i := 1; i < len(chain1); i++ { - if err := blockchain.Accept(chain1[i]); err != nil { - t.Fatal(err) - } + require.NoError(blockchain.Accept(chain1[i])) blockchain.DrainAcceptorQueue() if blockchain.snaps != nil { // Snap layer count should decrease by 1 per Accept - if want, got := len(chain1)-i, blockchain.snaps.NumBlockLayers(); got != want { - t.Fatalf("incorrect snapshot layer count; got %d, want %d", got, want) - } + require.Equal(len(chain1)-i, blockchain.snaps.NumBlockLayers(), "incorrect snapshot layer count") } } lastAcceptedBlock := blockchain.LastConsensusAcceptedBlock() expectedLastAcceptedBlock := chain1[len(chain1)-1] - if lastAcceptedBlock.Hash() != expectedLastAcceptedBlock.Hash() { - t.Fatalf("Expected last accepted block to be %s:%d, but found %s%d", expectedLastAcceptedBlock.Hash().Hex(), expectedLastAcceptedBlock.NumberU64(), lastAcceptedBlock.Hash().Hex(), lastAcceptedBlock.NumberU64()) - } - if err := blockchain.ValidateCanonicalChain(); err != nil { - t.Fatal(err) - } + require.Equal(expectedLastAcceptedBlock.Hash(), lastAcceptedBlock.Hash()) + require.NoError(blockchain.ValidateCanonicalChain()) // check the state of the last accepted block checkState := func(sdb *state.StateDB) error { @@ -492,6 +428,7 @@ func InsertLongForkedChain(t *testing.T, create createFunc) { func AcceptNonCanonicalBlock(t *testing.T, create createFunc) { var ( + require = require.New(t) key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") addr1 = crypto.PubkeyToAddress(key1.PublicKey) @@ -507,10 +444,8 @@ func AcceptNonCanonicalBlock(t *testing.T, create createFunc) { } blockchain, err := create(chainDB, gspec, common.Hash{}, t.TempDir()) - if err != nil { - t.Fatal(err) - } - defer blockchain.Stop() + require.NoError(err) + t.Cleanup(blockchain.Stop) numBlocks := 3 signer := types.HomesteadSigner{} @@ -519,58 +454,40 @@ func AcceptNonCanonicalBlock(t *testing.T, create createFunc) { tx, _ := types.SignTx(types.NewTransaction(gen.TxNonce(addr1), addr2, big.NewInt(10000), ethparams.TxGas, nil, nil), signer, key1) gen.AddTx(tx) }) - if err != nil { - t.Fatal(err) - } + require.NoError(err) _, chain2, _, err := GenerateChainWithGenesis(gspec, blockchain.engine, numBlocks, 10, func(_ int, gen *BlockGen) { // Generate a transaction with a different amount to create a chain of blocks different from [chain1] tx, _ := types.SignTx(types.NewTransaction(gen.TxNonce(addr1), addr2, big.NewInt(5000), ethparams.TxGas, nil, nil), signer, key1) gen.AddTx(tx) }) - if err != nil { - t.Fatal(err) - } + require.NoError(err) // Insert three blocks into the chain and accept only the first. - if _, err := blockchain.InsertChain(chain1); err != nil { - t.Fatal(err) - } - if _, err := blockchain.InsertChain(chain2); err != nil { - t.Fatal(err) - } + _, err = blockchain.InsertChain(chain1) + require.NoError(err) + _, err = blockchain.InsertChain(chain2) + require.NoError(err) currentBlock := blockchain.CurrentBlock() expectedCurrentBlock := chain1[len(chain1)-1] - if currentBlock.Hash() != expectedCurrentBlock.Hash() { - t.Fatalf("Expected current block to be %s:%d, but found %s%d", expectedCurrentBlock.Hash().Hex(), expectedCurrentBlock.NumberU64(), currentBlock.Hash().Hex(), currentBlock.Number.Uint64()) - } + require.Equal(expectedCurrentBlock.Hash(), currentBlock.Hash()) - if err := blockchain.ValidateCanonicalChain(); err != nil { - t.Fatal(err) - } + require.NoError(blockchain.ValidateCanonicalChain()) // Accept the first block in [chain2], reject all blocks in [chain1] to // mimic the order that the consensus engine will call Accept/Reject in. - if err := blockchain.Accept(chain2[0]); err != nil { - t.Fatal(err) - } + require.NoError(blockchain.Accept(chain2[0])) blockchain.DrainAcceptorQueue() for i := 0; i < len(chain1); i++ { - if err := blockchain.Reject(chain1[i]); err != nil { - t.Fatal(err) - } - require.False(t, blockchain.HasBlock(chain1[i].Hash(), chain1[i].NumberU64())) + require.NoError(blockchain.Reject(chain1[i])) + require.False(blockchain.HasBlock(chain1[i].Hash(), chain1[i].NumberU64())) } lastAcceptedBlock := blockchain.LastConsensusAcceptedBlock() expectedLastAcceptedBlock := chain2[0] - if lastAcceptedBlock.Hash() != expectedLastAcceptedBlock.Hash() { - t.Fatalf("Expected last accepted block to be %s:%d, but found %s%d", expectedLastAcceptedBlock.Hash().Hex(), expectedLastAcceptedBlock.NumberU64(), lastAcceptedBlock.Hash().Hex(), lastAcceptedBlock.NumberU64()) - } - if err := blockchain.ValidateCanonicalChain(); err != nil { - t.Fatal(err) - } + require.Equal(expectedLastAcceptedBlock.Hash(), lastAcceptedBlock.Hash()) + require.NoError(blockchain.ValidateCanonicalChain()) // check the state of the last accepted block checkState := func(sdb *state.StateDB) error { @@ -601,6 +518,7 @@ func AcceptNonCanonicalBlock(t *testing.T, create createFunc) { func SetPreferenceRewind(t *testing.T, create createFunc) { var ( + require = require.New(t) key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") addr1 = crypto.PubkeyToAddress(key1.PublicKey) @@ -616,10 +534,8 @@ func SetPreferenceRewind(t *testing.T, create createFunc) { } blockchain, err := create(chainDB, gspec, common.Hash{}, t.TempDir()) - if err != nil { - t.Fatal(err) - } - defer blockchain.Stop() + require.NoError(err) + t.Cleanup(blockchain.Stop) numBlocks := 3 signer := types.HomesteadSigner{} @@ -628,45 +544,30 @@ func SetPreferenceRewind(t *testing.T, create createFunc) { tx, _ := types.SignTx(types.NewTransaction(gen.TxNonce(addr1), addr2, big.NewInt(10000), ethparams.TxGas, nil, nil), signer, key1) gen.AddTx(tx) }) - if err != nil { - t.Fatal(err) - } + require.NoError(err) // Insert three blocks into the chain and accept only the first. - if _, err := blockchain.InsertChain(chain); err != nil { - t.Fatal(err) - } + _, err = blockchain.InsertChain(chain) + require.NoError(err) currentBlock := blockchain.CurrentBlock() expectedCurrentBlock := chain[len(chain)-1] - if currentBlock.Hash() != expectedCurrentBlock.Hash() { - t.Fatalf("Expected current block to be %s:%d, but found %s%d", expectedCurrentBlock.Hash().Hex(), expectedCurrentBlock.NumberU64(), currentBlock.Hash().Hex(), currentBlock.Number.Uint64()) - } - - if err := blockchain.ValidateCanonicalChain(); err != nil { - t.Fatal(err) - } + require.Equal(expectedCurrentBlock.Hash(), currentBlock.Hash()) + require.NoError(blockchain.ValidateCanonicalChain()) // SetPreference to an ancestor of the currently preferred block. Test that this unlikely, but possible behavior // is handled correctly. - if err := blockchain.SetPreference(chain[0]); err != nil { - t.Fatal(err) - } + require.NoError(blockchain.SetPreference(chain[0])) currentBlock = blockchain.CurrentBlock() expectedCurrentBlock = chain[0] - if currentBlock.Hash() != expectedCurrentBlock.Hash() { - t.Fatalf("Expected current block to be %s:%d, but found %s%d", expectedCurrentBlock.Hash().Hex(), expectedCurrentBlock.NumberU64(), currentBlock.Hash().Hex(), currentBlock.Number.Uint64()) - } + require.Equal(expectedCurrentBlock.Hash(), currentBlock.Hash()) lastAcceptedBlock := blockchain.LastConsensusAcceptedBlock() expectedLastAcceptedBlock := blockchain.Genesis() - if lastAcceptedBlock.Hash() != expectedLastAcceptedBlock.Hash() { - t.Fatalf("Expected last accepted block to be %s:%d, but found %s%d", expectedLastAcceptedBlock.Hash().Hex(), expectedLastAcceptedBlock.NumberU64(), lastAcceptedBlock.Hash().Hex(), lastAcceptedBlock.NumberU64()) - } - if err := blockchain.ValidateCanonicalChain(); err != nil { - t.Fatal(err) - } + require.Equal(expectedLastAcceptedBlock.Hash(), lastAcceptedBlock.Hash()) + require.NoError(blockchain.ValidateCanonicalChain()) + // check the state of the last accepted block checkGenesisState := func(sdb *state.StateDB) error { nonce1 := sdb.GetNonce(addr1) @@ -690,19 +591,14 @@ func SetPreferenceRewind(t *testing.T, create createFunc) { } checkBlockChainState(t, blockchain, gspec, chainDB, create, checkGenesisState) - if err := blockchain.Accept(chain[0]); err != nil { - t.Fatal(err) - } + require.NoError(blockchain.Accept(chain[0])) blockchain.DrainAcceptorQueue() lastAcceptedBlock = blockchain.LastConsensusAcceptedBlock() expectedLastAcceptedBlock = chain[0] - if lastAcceptedBlock.Hash() != expectedLastAcceptedBlock.Hash() { - t.Fatalf("Expected last accepted block to be %s:%d, but found %s%d", expectedLastAcceptedBlock.Hash().Hex(), expectedLastAcceptedBlock.NumberU64(), lastAcceptedBlock.Hash().Hex(), lastAcceptedBlock.NumberU64()) - } - if err := blockchain.ValidateCanonicalChain(); err != nil { - t.Fatal(err) - } + require.Equal(expectedLastAcceptedBlock.Hash(), lastAcceptedBlock.Hash()) + require.NoError(blockchain.ValidateCanonicalChain()) + checkUpdatedState := func(sdb *state.StateDB) error { nonce := sdb.GetNonce(addr1) if nonce != 1 { @@ -733,6 +629,7 @@ func SetPreferenceRewind(t *testing.T, create createFunc) { func BuildOnVariousStages(t *testing.T, create createFunc) { var ( + require = require.New(t) key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") key3, _ = crypto.HexToECDSA("49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee") @@ -753,10 +650,8 @@ func BuildOnVariousStages(t *testing.T, create createFunc) { } blockchain, err := create(chainDB, gspec, common.Hash{}, t.TempDir()) - if err != nil { - t.Fatal(err) - } - defer blockchain.Stop() + require.NoError(err) + t.Cleanup(blockchain.Stop) // This call generates a chain of 3 blocks. signer := types.HomesteadSigner{} @@ -770,9 +665,8 @@ func BuildOnVariousStages(t *testing.T, create createFunc) { gen.AddTx(tx) } }) - if err != nil { - t.Fatal(err) - } + require.NoError(err) + // Build second chain forked off of the 10th block in [chain1] chain2, _, err := GenerateChain(gspec.Config, chain1[9], blockchain.engine, genDB, 10, 10, func(i int, gen *BlockGen) { // Send all funds back and forth between the two accounts @@ -784,9 +678,8 @@ func BuildOnVariousStages(t *testing.T, create createFunc) { gen.AddTx(tx) } }) - if err != nil { - t.Fatal(err) - } + require.NoError(err) + // Build third chain forked off of the 5th block in [chain1]. // The parent of this chain will be accepted before this fork // is inserted. @@ -800,62 +693,45 @@ func BuildOnVariousStages(t *testing.T, create createFunc) { gen.AddTx(tx) } }) - if err != nil { - t.Fatal(err) - } + require.NoError(err) // Insert first 10 blocks from [chain1] - if _, err := blockchain.InsertChain(chain1); err != nil { - t.Fatal(err) - } + _, err = blockchain.InsertChain(chain1) + require.NoError(err) // Accept the first 5 blocks for _, block := range chain1[0:5] { - if err := blockchain.Accept(block); err != nil { - t.Fatal(err) - } + require.NoError(blockchain.Accept(block)) } blockchain.DrainAcceptorQueue() // Insert the forked chain [chain2] which starts at the 10th // block in [chain1] ie. a block that is still in processing. - if _, err := blockchain.InsertChain(chain2); err != nil { - t.Fatal(err) - } + _, err = blockchain.InsertChain(chain2) + require.NoError(err) // Insert another forked chain starting at the last accepted // block from [chain1]. - if _, err := blockchain.InsertChain(chain3); err != nil { - t.Fatal(err) - } + _, err = blockchain.InsertChain(chain3) + require.NoError(err) // Accept the next block in [chain1] and then reject all // of the blocks in [chain3], which would then be rejected. - if err := blockchain.Accept(chain1[5]); err != nil { - t.Fatal(err) - } + require.NoError(blockchain.Accept(chain1[5])) blockchain.DrainAcceptorQueue() for _, block := range chain3 { - if err := blockchain.Reject(block); err != nil { - t.Fatal(err) - } + require.NoError(blockchain.Reject(block)) } // Accept the rest of the blocks in [chain1] for _, block := range chain1[6:10] { - if err := blockchain.Accept(block); err != nil { - t.Fatal(err) - } + require.NoError(blockchain.Accept(block)) } blockchain.DrainAcceptorQueue() // Accept the first block in [chain2] and reject the // subsequent blocks in [chain1] which would then be rejected. - if err := blockchain.Accept(chain2[0]); err != nil { - t.Fatal(err) - } + require.NoError(blockchain.Accept(chain2[0])) blockchain.DrainAcceptorQueue() for _, block := range chain1[10:] { - if err := blockchain.Reject(block); err != nil { - t.Fatal(err) - } + require.NoError(blockchain.Reject(block)) } // check the state of the last accepted block @@ -899,32 +775,25 @@ func BuildOnVariousStages(t *testing.T, create createFunc) { } func EmptyBlocks(t *testing.T, create createFunc) { + require := require.New(t) chainDB := rawdb.NewMemoryDatabase() - gspec := &Genesis{ Config: ¶ms.ChainConfig{HomesteadBlock: new(big.Int)}, Alloc: types.GenesisAlloc{}, } blockchain, err := create(chainDB, gspec, common.Hash{}, t.TempDir()) - if err != nil { - t.Fatal(err) - } - defer blockchain.Stop() + require.NoError(err) + t.Cleanup(blockchain.Stop) - _, chain, _, err := GenerateChainWithGenesis(gspec, blockchain.engine, 3, 10, func(int, *BlockGen) {}) - if err != nil { - t.Fatal(err) - } + _, chain, _, err := GenerateChainWithGenesis(gspec, blockchain.engine, 3, 10, func(_ int, _ *BlockGen) {}) + require.NoError(err) // Insert three blocks into the chain and accept only the first block. - if _, err := blockchain.InsertChain(chain); err != nil { - t.Fatal(err) - } + _, err = blockchain.InsertChain(chain) + require.NoError(err) for _, block := range chain { - if err := blockchain.Accept(block); err != nil { - t.Fatal(err) - } + require.NoError(blockchain.Accept(block)) } blockchain.DrainAcceptorQueue() @@ -938,6 +807,7 @@ func EmptyBlocks(t *testing.T, create createFunc) { func EmptyAndNonEmptyBlocks(t *testing.T, create createFunc) { var ( + require = require.New(t) key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") addr1 = crypto.PubkeyToAddress(key1.PublicKey) @@ -953,10 +823,8 @@ func EmptyAndNonEmptyBlocks(t *testing.T, create createFunc) { } blockchain, err := create(chainDB, gspec, common.Hash{}, t.TempDir()) - if err != nil { - t.Fatal(err) - } - defer blockchain.Stop() + require.NoError(err) + t.Cleanup(blockchain.Stop) _, chain, _, err := GenerateChainWithGenesis(gspec, blockchain.engine, 5, 10, func(i int, gen *BlockGen) { if i == 3 { @@ -965,17 +833,12 @@ func EmptyAndNonEmptyBlocks(t *testing.T, create createFunc) { gen.AddTx(tx) } }) - if err != nil { - t.Fatal(err) - } + require.NoError(err) - if _, err := blockchain.InsertChain(chain); err != nil { - t.Fatal(err) - } + _, err = blockchain.InsertChain(chain) + require.NoError(err) for _, block := range chain { - if err := blockchain.Accept(block); err != nil { - t.Fatal(err) - } + require.NoError(blockchain.Accept(block)) } blockchain.DrainAcceptorQueue() @@ -1008,6 +871,7 @@ func EmptyAndNonEmptyBlocks(t *testing.T, create createFunc) { func ReorgReInsert(t *testing.T, create createFunc) { var ( + require = require.New(t) key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") addr1 = crypto.PubkeyToAddress(key1.PublicKey) @@ -1023,10 +887,8 @@ func ReorgReInsert(t *testing.T, create createFunc) { } blockchain, err := create(chainDB, gspec, common.Hash{}, t.TempDir()) - if err != nil { - t.Fatal(err) - } - defer blockchain.Stop() + require.NoError(err) + t.Cleanup(blockchain.Stop) signer := types.HomesteadSigner{} numBlocks := 3 @@ -1035,41 +897,24 @@ func ReorgReInsert(t *testing.T, create createFunc) { tx, _ := types.SignTx(types.NewTransaction(gen.TxNonce(addr1), addr2, big.NewInt(10000), ethparams.TxGas, nil, nil), signer, key1) gen.AddTx(tx) }) - if err != nil { - t.Fatal(err) - } + require.NoError(err) // Insert and accept first block - if err := blockchain.InsertBlock(chain[0]); err != nil { - t.Fatal(err) - } - if err := blockchain.Accept(chain[0]); err != nil { - t.Fatal(err) - } + + require.NoError(blockchain.InsertBlock(chain[0])) + require.NoError(blockchain.Accept(chain[0])) // Insert block and then set preference back (rewind) to last accepted blck - if err := blockchain.InsertBlock(chain[1]); err != nil { - t.Fatal(err) - } - if err := blockchain.SetPreference(chain[0]); err != nil { - t.Fatal(err) - } + require.NoError(blockchain.InsertBlock(chain[1])) + require.NoError(blockchain.SetPreference(chain[0])) // Re-insert and accept block - if err := blockchain.InsertBlock(chain[1]); err != nil { - t.Fatal(err) - } - if err := blockchain.Accept(chain[1]); err != nil { - t.Fatal(err) - } + require.NoError(blockchain.InsertBlock(chain[1])) + require.NoError(blockchain.Accept(chain[1])) // Build on top of the re-inserted block and accept - if err := blockchain.InsertBlock(chain[2]); err != nil { - t.Fatal(err) - } - if err := blockchain.Accept(chain[2]); err != nil { - t.Fatal(err) - } + require.NoError(blockchain.InsertBlock(chain[2])) + require.NoError(blockchain.Accept(chain[2])) blockchain.DrainAcceptorQueue() // Nothing to assert about the state @@ -1114,6 +959,7 @@ func ReorgReInsert(t *testing.T, create createFunc) { //nolint:goimports func AcceptBlockIdenticalStateRoot(t *testing.T, create createFunc) { var ( + require = require.New(t) key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") addr1 = crypto.PubkeyToAddress(key1.PublicKey) @@ -1129,10 +975,8 @@ func AcceptBlockIdenticalStateRoot(t *testing.T, create createFunc) { } blockchain, err := create(chainDB, gspec, common.Hash{}, t.TempDir()) - if err != nil { - t.Fatal(err) - } - defer blockchain.Stop() + require.NoError(err) + t.Cleanup(blockchain.Stop) signer := types.HomesteadSigner{} _, chain1, _, err := GenerateChainWithGenesis(gspec, blockchain.engine, 3, 10, func(i int, gen *BlockGen) { @@ -1143,9 +987,7 @@ func AcceptBlockIdenticalStateRoot(t *testing.T, create createFunc) { } // Allow the third block to be empty. }) - if err != nil { - t.Fatal(err) - } + require.NoError(err) _, chain2, _, err := GenerateChainWithGenesis(gspec, blockchain.engine, 2, 10, func(i int, gen *BlockGen) { // Send 1/4 of the funds from addr1 to addr2 in tx1 and 3/4 of the funds in tx2. This will produce the identical state // root in the second block of [chain2] as is present in the second block of [chain1]. @@ -1157,63 +999,43 @@ func AcceptBlockIdenticalStateRoot(t *testing.T, create createFunc) { gen.AddTx(tx) } }) - if err != nil { - t.Fatal(err) - } + require.NoError(err) // Assert that the block root of the second block in both chains is identical - if chain1[1].Root() != chain2[1].Root() { - t.Fatalf("Expected the latter block in both chain1 and chain2 to have identical state root, but found %s and %s", chain1[1].Root(), chain2[1].Root()) - } + require.Equal(chain1[1].Root(), chain2[1].Root()) // Insert first two blocks of [chain1] and both blocks in [chain2] // This leaves us one additional block to insert on top of [chain1] // after testing that the state roots are handled correctly. - if _, err := blockchain.InsertChain(chain1[:2]); err != nil { - t.Fatal(err) - } - if _, err := blockchain.InsertChain(chain2); err != nil { - t.Fatal(err) - } + _, err = blockchain.InsertChain(chain1[:2]) + require.NoError(err) + _, err = blockchain.InsertChain(chain2) + require.NoError(err) currentBlock := blockchain.CurrentBlock() expectedCurrentBlock := chain1[1] - if currentBlock.Hash() != expectedCurrentBlock.Hash() { - t.Fatalf("Expected current block to be %s:%d, but found %s%d", expectedCurrentBlock.Hash().Hex(), expectedCurrentBlock.NumberU64(), currentBlock.Hash().Hex(), currentBlock.Number.Uint64()) - } + require.Equal(expectedCurrentBlock.Hash(), currentBlock.Hash()) // Accept the first block in [chain1] and reject all of [chain2] - if err := blockchain.Accept(chain1[0]); err != nil { - t.Fatal(err) - } + require.NoError(blockchain.Accept(chain1[0])) blockchain.DrainAcceptorQueue() for _, block := range chain2 { - if err := blockchain.Reject(block); err != nil { - t.Fatal(err) - } + require.NoError(blockchain.Reject(block)) } // Accept the last two blocks in [chain1]. This is a regression test to ensure // that we do not discard a snapshot difflayer that is still in use by a // processing block, when a different block with the same root is rejected. - if err := blockchain.Accept(chain1[1]); err != nil { - t.Fatal(err) - } + require.NoError(blockchain.Accept(chain1[1])) blockchain.DrainAcceptorQueue() lastAcceptedBlock := blockchain.LastConsensusAcceptedBlock() expectedLastAcceptedBlock := chain1[1] - if lastAcceptedBlock.Hash() != expectedLastAcceptedBlock.Hash() { - t.Fatalf("Expected last accepted block to be %s:%d, but found %s%d", expectedLastAcceptedBlock.Hash().Hex(), expectedLastAcceptedBlock.NumberU64(), lastAcceptedBlock.Hash().Hex(), lastAcceptedBlock.NumberU64()) - } + require.Equal(expectedLastAcceptedBlock.Hash(), lastAcceptedBlock.Hash()) - if err := blockchain.InsertBlock(chain1[2]); err != nil { - t.Fatal(err) - } - if err := blockchain.Accept(chain1[2]); err != nil { - t.Fatal(err) - } + require.NoError(blockchain.InsertBlock(chain1[2])) + require.NoError(blockchain.Accept(chain1[2])) blockchain.DrainAcceptorQueue() // check the state of the last accepted block @@ -1258,6 +1080,7 @@ func AcceptBlockIdenticalStateRoot(t *testing.T, create createFunc) { //nolint:goimports func ReprocessAcceptBlockIdenticalStateRoot(t *testing.T, create createFunc) { var ( + require = require.New(t) key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") addr1 = crypto.PubkeyToAddress(key1.PublicKey) @@ -1273,9 +1096,8 @@ func ReprocessAcceptBlockIdenticalStateRoot(t *testing.T, create createFunc) { } blockchain, err := create(chainDB, gspec, common.Hash{}, t.TempDir()) - if err != nil { - t.Fatal(err) - } + require.NoError(err) + t.Cleanup(blockchain.Stop) signer := types.HomesteadSigner{} _, chain1, _, err := GenerateChainWithGenesis(gspec, blockchain.engine, 3, 10, func(i int, gen *BlockGen) { @@ -1286,9 +1108,7 @@ func ReprocessAcceptBlockIdenticalStateRoot(t *testing.T, create createFunc) { } // Allow the third block to be empty. }) - if err != nil { - t.Fatal(err) - } + require.NoError(err) _, chain2, _, err := GenerateChainWithGenesis(gspec, blockchain.engine, 2, 10, func(i int, gen *BlockGen) { // Send 1/4 of the funds from addr1 to addr2 in tx1 and 3/4 of the funds in tx2. This will produce the identical state // root in the second block of [chain2] as is present in the second block of [chain1]. @@ -1300,88 +1120,61 @@ func ReprocessAcceptBlockIdenticalStateRoot(t *testing.T, create createFunc) { gen.AddTx(tx) } }) - if err != nil { - t.Fatal(err) - } + require.NoError(err) // Assert that the block root of the second block in both chains is identical - if chain1[1].Root() != chain2[1].Root() { - t.Fatalf("Expected the latter block in both chain1 and chain2 to have identical state root, but found %s and %s", chain1[1].Root(), chain2[1].Root()) - } + require.Equal(chain1[1].Root(), chain2[1].Root()) // Insert first two blocks of [chain1] and both blocks in [chain2] // This leaves us one additional block to insert on top of [chain1] // after testing that the state roots are handled correctly. - if _, err := blockchain.InsertChain(chain1[:2]); err != nil { - t.Fatal(err) - } - if _, err := blockchain.InsertChain(chain2); err != nil { - t.Fatal(err) - } + _, err = blockchain.InsertChain(chain1[:2]) + require.NoError(err) + _, err = blockchain.InsertChain(chain2) + require.NoError(err) currentBlock := blockchain.CurrentBlock() expectedCurrentBlock := chain1[1] - if currentBlock.Hash() != expectedCurrentBlock.Hash() { - t.Fatalf("Expected current block to be %s:%d, but found %s%d", expectedCurrentBlock.Hash().Hex(), expectedCurrentBlock.NumberU64(), currentBlock.Hash().Hex(), currentBlock.Number.Uint64()) - } - + require.Equal(expectedCurrentBlock.Hash(), currentBlock.Hash()) blockchain.Stop() chainDB = rawdb.NewMemoryDatabase() blockchain, err = create(chainDB, gspec, common.Hash{}, t.TempDir()) - if err != nil { - t.Fatal(err) - } - defer blockchain.Stop() + require.NoError(err) + t.Cleanup(blockchain.Stop) // Insert first two blocks of [chain1] and both blocks in [chain2] // This leaves us one additional block to insert on top of [chain1] // after testing that the state roots are handled correctly. - if _, err := blockchain.InsertChain(chain1[:2]); err != nil { - t.Fatal(err) - } - if _, err := blockchain.InsertChain(chain2); err != nil { - t.Fatal(err) - } + _, err = blockchain.InsertChain(chain1[:2]) + require.NoError(err) + _, err = blockchain.InsertChain(chain2) + require.NoError(err) currentBlock = blockchain.CurrentBlock() expectedCurrentBlock = chain1[1] - if currentBlock.Hash() != expectedCurrentBlock.Hash() { - t.Fatalf("Expected current block to be %s:%d, but found %s%d", expectedCurrentBlock.Hash().Hex(), expectedCurrentBlock.NumberU64(), currentBlock.Hash().Hex(), currentBlock.Number.Uint64()) - } + require.Equalf(expectedCurrentBlock.Hash(), currentBlock.Hash(), "block hash mismatch for expected height %d, actual height %d", expectedCurrentBlock.NumberU64(), currentBlock.Number.Uint64()) // Accept the first block in [chain1] and reject all of [chain2] - if err := blockchain.Accept(chain1[0]); err != nil { - t.Fatal(err) - } + require.NoError(blockchain.Accept(chain1[0])) blockchain.DrainAcceptorQueue() for _, block := range chain2 { - if err := blockchain.Reject(block); err != nil { - t.Fatal(err) - } + require.NoError(blockchain.Reject(block)) } // Accept the last two blocks in [chain1]. This is a regression test to ensure // that we do not discard a snapshot difflayer that is still in use by a // processing block, when a different block with the same root is rejected. - if err := blockchain.Accept(chain1[1]); err != nil { - t.Fatal(err) - } + require.NoError(blockchain.Accept(chain1[1])) blockchain.DrainAcceptorQueue() lastAcceptedBlock := blockchain.LastConsensusAcceptedBlock() expectedLastAcceptedBlock := chain1[1] - if lastAcceptedBlock.Hash() != expectedLastAcceptedBlock.Hash() { - t.Fatalf("Expected last accepted block to be %s:%d, but found %s%d", expectedLastAcceptedBlock.Hash().Hex(), expectedLastAcceptedBlock.NumberU64(), lastAcceptedBlock.Hash().Hex(), lastAcceptedBlock.NumberU64()) - } + require.Equal(expectedLastAcceptedBlock.Hash(), lastAcceptedBlock.Hash()) - if err := blockchain.InsertBlock(chain1[2]); err != nil { - t.Fatal(err) - } - if err := blockchain.Accept(chain1[2]); err != nil { - t.Fatal(err) - } + require.NoError(blockchain.InsertBlock(chain1[2])) + require.NoError(blockchain.Accept(chain1[2])) blockchain.DrainAcceptorQueue() // check the state of the last accepted block @@ -1602,9 +1395,7 @@ func StatefulPrecompiles(t *testing.T, create createFunc) { } blockchain, err := create(chainDB, gspec, common.Hash{}, t.TempDir()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer blockchain.Stop() signer := types.LatestSigner(params.TestChainConfig) @@ -1634,9 +1425,7 @@ func StatefulPrecompiles(t *testing.T, create createFunc) { addTx: func(gen *BlockGen) { feeCap := new(big.Int).Add(gen.BaseFee(), tip) input, err := allowlist.PackModifyAllowList(addr2, allowlist.AdminRole) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) tx := types.NewTx(&types.DynamicFeeTx{ ChainID: params.TestChainConfig.ChainID, Nonce: gen.TxNonce(addr1), @@ -1649,9 +1438,7 @@ func StatefulPrecompiles(t *testing.T, create createFunc) { }) signedTx, err := types.SignTx(tx, signer, key1) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) gen.AddTx(signedTx) }, verifyState: func(sdb *state.StateDB) error { @@ -1667,22 +1454,16 @@ func StatefulPrecompiles(t *testing.T, create createFunc) { }, verifyGenesis: func(sdb *state.StateDB) { res := deployerallowlist.GetContractDeployerAllowListStatus(sdb, addr1) - if allowlist.AdminRole != res { - t.Fatalf("unexpected allow list status for addr1 %s, expected %s", res, allowlist.AdminRole) - } + require.Equal(t, allowlist.AdminRole, res, "unexpected allow list status for addr1 %s, expected %s", res, allowlist.AdminRole) res = deployerallowlist.GetContractDeployerAllowListStatus(sdb, addr2) - if allowlist.NoRole != res { - t.Fatalf("unexpected allow list status for addr2 %s, expected %s", res, allowlist.NoRole) - } + require.Equal(t, allowlist.NoRole, res, "unexpected allow list status for addr2 %s, expected %s", res, allowlist.NoRole) }, }, "fee manager set config": { addTx: func(gen *BlockGen) { feeCap := new(big.Int).Add(gen.BaseFee(), tip) input, err := feemanager.PackSetFeeConfig(testFeeConfig) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) tx := types.NewTx(&types.DynamicFeeTx{ ChainID: params.TestChainConfig.ChainID, Nonce: gen.TxNonce(addr1), @@ -1695,9 +1476,7 @@ func StatefulPrecompiles(t *testing.T, create createFunc) { }) signedTx, err := types.SignTx(tx, signer, key1) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) gen.AddTx(signedTx) }, verifyState: func(sdb *state.StateDB) error { @@ -1730,23 +1509,16 @@ func StatefulPrecompiles(t *testing.T, create createFunc) { test.addTx(gen) } }) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // Insert three blocks into the chain and accept only the first block. - if _, err := blockchain.InsertChain(chain); err != nil { - t.Fatal(err) - } - if err := blockchain.Accept(chain[0]); err != nil { - t.Fatal(err) - } + _, err = blockchain.InsertChain(chain) + require.NoError(t, err) + require.NoError(t, blockchain.Accept(chain[0])) blockchain.DrainAcceptorQueue() genesisState, err := blockchain.StateAt(blockchain.Genesis().Root()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) for _, test := range tests { if test.verifyGenesis == nil { continue @@ -1770,6 +1542,7 @@ func StatefulPrecompiles(t *testing.T, create createFunc) { func ReexecBlocks(t *testing.T, create ReexecTestFunc) { var ( + require = require.New(t) key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") addr1 = crypto.PubkeyToAddress(key1.PublicKey) @@ -1785,10 +1558,8 @@ func ReexecBlocks(t *testing.T, create ReexecTestFunc) { } blockchain, err := create(chainDB, gspec, common.Hash{}, t.TempDir(), 4096) - if err != nil { - t.Fatal(err) - } - defer blockchain.Stop() + require.NoError(err) + t.Cleanup(blockchain.Stop) // This call generates a chain of 10 blocks. signer := types.HomesteadSigner{} @@ -1796,21 +1567,16 @@ func ReexecBlocks(t *testing.T, create ReexecTestFunc) { tx, _ := types.SignTx(types.NewTransaction(gen.TxNonce(addr1), addr2, big.NewInt(10000), ethparams.TxGas, nil, nil), signer, key1) gen.AddTx(tx) }) - if err != nil { - t.Fatal(err) - } + require.NoError(err) // Insert three blocks into the chain and accept only the first block. - if _, err := blockchain.InsertChain(chain); err != nil { - t.Fatal(err) - } + _, err = blockchain.InsertChain(chain) + require.NoError(err) foundTxs := []common.Hash{} missingTxs := []common.Hash{} for i, block := range chain { - if err := blockchain.Accept(block); err != nil { - t.Fatal(err) - } + require.NoError(blockchain.Accept(block)) if i == 3 { // At height 3, kill the async accepted block processor to force an @@ -1836,15 +1602,11 @@ func ReexecBlocks(t *testing.T, create ReexecTestFunc) { // async worker shutdown cannot be found. for _, tx := range foundTxs { txLookup, _, _ := blockchain.GetTransactionLookup(tx) - if txLookup == nil { - t.Fatalf("missing transaction: %v", tx) - } + require.NotNilf(txLookup, "missing tx: %v", tx) } for _, tx := range missingTxs { txLookup, _, _ := blockchain.GetTransactionLookup(tx) - if txLookup != nil { - t.Fatalf("transaction should be missing: %v", tx) - } + require.Nilf(txLookup, "transaction should be missing: %v", tx) } // check the state of the last accepted block @@ -1884,22 +1646,21 @@ func ReexecBlocks(t *testing.T, create ReexecTestFunc) { allTxs := slices.Concat(foundTxs, missingTxs) for _, bc := range []*BlockChain{newChain, restartedChain} { // We should confirm that snapshots were properly initialized - if bc.snaps == nil && bc.cacheConfig.SnapshotLimit > 0 { - t.Fatal("snapshot initialization failed") + if bc.cacheConfig.SnapshotLimit > 0 { + require.NotNil(bc.snaps, "snapshot initialization failed") } // We should confirm all transactions can now be queried for _, tx := range allTxs { txLookup, _, _ := bc.GetTransactionLookup(tx) - if txLookup == nil { - t.Fatalf("missing transaction: %v", tx) - } + require.NotNilf(txLookup, "missing tx: %v", tx) } } } func ReexecMaxBlocks(t *testing.T, create ReexecTestFunc) { var ( + require = require.New(t) key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") addr1 = crypto.PubkeyToAddress(key1.PublicKey) @@ -1915,10 +1676,8 @@ func ReexecMaxBlocks(t *testing.T, create ReexecTestFunc) { } blockchain, err := create(chainDB, gspec, common.Hash{}, t.TempDir(), 4096) - if err != nil { - t.Fatal(err) - } - defer blockchain.Stop() + require.NoError(err) + t.Cleanup(blockchain.Stop) // Check that we are generating enough blocks to test the reexec functionality. genNumBlocks := 20 @@ -1930,21 +1689,16 @@ func ReexecMaxBlocks(t *testing.T, create ReexecTestFunc) { tx, _ := types.SignTx(types.NewTransaction(gen.TxNonce(addr1), addr2, big.NewInt(10000), ethparams.TxGas, nil, nil), signer, key1) gen.AddTx(tx) }) - if err != nil { - t.Fatal(err) - } + require.NoError(err) // Insert three blocks into the chain and accept only the first block. - if _, err := blockchain.InsertChain(chain); err != nil { - t.Fatal(err) - } + _, err = blockchain.InsertChain(chain) + require.NoError(err) foundTxs := []common.Hash{} missingTxs := []common.Hash{} for i, block := range chain { - if err := blockchain.Accept(block); err != nil { - t.Fatal(err) - } + require.NoError(blockchain.Accept(block)) if i == numAcceptedBlocks { // kill the async accepted block processor to force an @@ -1970,15 +1724,11 @@ func ReexecMaxBlocks(t *testing.T, create ReexecTestFunc) { // async worker shutdown cannot be found. for _, tx := range foundTxs { txLookup, _, _ := blockchain.GetTransactionLookup(tx) - if txLookup == nil { - t.Fatalf("missing transaction: %v", tx) - } + require.NotNilf(txLookup, "missing transaction: %v", tx) } for _, tx := range missingTxs { txLookup, _, _ := blockchain.GetTransactionLookup(tx) - if txLookup != nil { - t.Fatalf("transaction should be missing: %v", tx) - } + require.Nilf(txLookup, "transaction should be missing: %v", tx) } // check the state of the last accepted block @@ -2016,16 +1766,14 @@ func ReexecMaxBlocks(t *testing.T, create ReexecTestFunc) { allTxs := slices.Concat(foundTxs, missingTxs) for _, bc := range []*BlockChain{newChain, restartedChain} { // We should confirm that snapshots were properly initialized - if bc.snaps == nil && bc.cacheConfig.SnapshotLimit > 0 { - t.Fatal("snapshot initialization failed") + if bc.cacheConfig.SnapshotLimit > 0 { + require.NotNil(bc.snaps, "snapshot initialization failed") } // We should confirm all transactions can now be queried for _, tx := range allTxs { txLookup, _, _ := bc.GetTransactionLookup(tx) - if txLookup == nil { - t.Fatalf("missing transaction: %v", tx) - } + require.NotNilf(txLookup, "missing transaction: %v", tx) } } } @@ -2048,9 +1796,7 @@ func ReexecCorruptedStateTest(t *testing.T, create ReexecTestFunc) { } blockchain, err := create(chainDB, gspec, common.Hash{}, tempDir, 4096) - if err != nil { - t.Fatalf("failed to create blockchain: %v", err) - } + require.NoError(t, err) // Check that we are generating enough blocks to test the reexec functionality. signer := types.HomesteadSigner{} @@ -2058,18 +1804,14 @@ func ReexecCorruptedStateTest(t *testing.T, create ReexecTestFunc) { tx, _ := types.SignTx(types.NewTransaction(gen.TxNonce(addr1), addr2, big.NewInt(10000), ethparams.TxGas, nil, nil), signer, key1) gen.AddTx(tx) }) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // Insert three blocks into the chain and accept only the first block. - if _, err := blockchain.InsertChain(chain); err != nil { - t.Fatal(err) - } + _, err = blockchain.InsertChain(chain) + require.NoError(t, err) + // Accept only the first block. - if err := blockchain.Accept(chain[0]); err != nil { - t.Fatal(err) - } + require.NoError(t, blockchain.Accept(chain[0])) // Simulate a crash by updating the acceptor tip require.NoError(t, blockchain.writeBlockAcceptedIndices(chain[1])) @@ -2083,12 +1825,8 @@ func ReexecCorruptedStateTest(t *testing.T, create ReexecTestFunc) { // We should be able to accept the remaining blocks for _, block := range chain[2:] { - if err := restartedBlockchain.InsertBlock(block); err != nil { - t.Fatalf("failed to insert block %d: %v", block.NumberU64(), err) - } - if err := restartedBlockchain.Accept(block); err != nil { - t.Fatalf("failed to accept block %d: %v", block.NumberU64(), err) - } + require.NoErrorf(t, restartedBlockchain.InsertBlock(block), "inserting block %d", block.NumberU64()) + require.NoErrorf(t, restartedBlockchain.Accept(block), "accepting block %d", block.NumberU64()) } // check the state of the last accepted block diff --git a/core/extstate/database_test.go b/core/extstate/database_test.go index e52db05c61..902aeae468 100644 --- a/core/extstate/database_test.go +++ b/core/extstate/database_test.go @@ -352,7 +352,7 @@ func FuzzTree(f *testing.F) { fuzzState.deleteStorage(rand.Intn(len(fuzzState.currentAddrs)), rand.Uint64()) } default: - t.Fatalf("unknown step: %d", step) + require.Failf(t, "unknown step", "got: %d", step) } } }) diff --git a/core/state/trie_prefetcher_extra_test.go b/core/state/trie_prefetcher_extra_test.go index fde2e77f6b..d312b98d4d 100644 --- a/core/state/trie_prefetcher_extra_test.go +++ b/core/state/trie_prefetcher_extra_test.go @@ -123,11 +123,8 @@ func BenchmarkPrefetcherDatabase(b *testing.B) { commit(levelDB, snaps, db) b.Logf("Root: %v, kvs: %d, block: %d (committed)", root, count, block) } - if previous != root { - require.NoError(db.TrieDB().Dereference(previous)) - } else { - b.Fatal("root did not change") - } + require.NotEqual(root, previous, "root and previous should not be equal") + require.NoError(db.TrieDB().Dereference(previous)) } require.NoError(levelDB.Close()) b.Log("Starting benchmarks") diff --git a/core/state_processor_ext_test.go b/core/state_processor_ext_test.go index 5e40a1fa47..f101287eba 100644 --- a/core/state_processor_ext_test.go +++ b/core/state_processor_ext_test.go @@ -12,10 +12,12 @@ import ( "github.com/ava-labs/libevm/core/types" "github.com/ava-labs/libevm/core/vm" "github.com/ava-labs/libevm/crypto" + "github.com/stretchr/testify/require" "github.com/ava-labs/subnet-evm/consensus/dummy" "github.com/ava-labs/subnet-evm/params" "github.com/ava-labs/subnet-evm/params/extras" + "github.com/ava-labs/subnet-evm/plugin/evm/vmerrors" "github.com/ava-labs/subnet-evm/precompile/contracts/txallowlist" "github.com/ava-labs/subnet-evm/utils" @@ -87,22 +89,17 @@ func TestBadTxAllowListBlock(t *testing.T) { defer blockchain.Stop() for i, tt := range []struct { txs []*types.Transaction - want string + want error }{ { // Nonwhitelisted address txs: []*types.Transaction{ mkDynamicTx(0, common.Address{}, ethparams.TxGas, big.NewInt(0), big.NewInt(225000000000)), }, - want: "could not apply tx 0 [0xc5725e8baac950b2925dd4fea446ccddead1cc0affdae18b31a7d910629d9225]: cannot issue transaction from non-allow listed address: 0x71562b71999873DB5b286dF957af199Ec94617F7", + want: vmerrors.ErrSenderAddressNotAllowListed, }, } { block := GenerateBadBlock(gspec.ToBlock(), dummy.NewCoinbaseFaker(), tt.txs, gspec.Config) _, err := blockchain.InsertChain(types.Blocks{block}) - if err == nil { - t.Fatal("block imported without errors") - } - if have, want := err.Error(), tt.want; have != want { - t.Errorf("test %d:\nhave \"%v\"\nwant \"%v\"\n", i, have, want) - } + require.ErrorIs(t, err, tt.want, "test %d", i) } } diff --git a/eth/tracers/api_extra_test.go b/eth/tracers/api_extra_test.go index 08fba7e252..d4be414037 100644 --- a/eth/tracers/api_extra_test.go +++ b/eth/tracers/api_extra_test.go @@ -132,25 +132,12 @@ func testTraceBlockPrecompileActivation(t *testing.T, scheme string) { } for i, tc := range testSuite { result, err := api.TraceBlockByNumber(t.Context(), tc.blockNumber, tc.config) + require.ErrorIs(t, err, tc.expectErr, "test %d", i) if tc.expectErr != nil { - if err == nil { - t.Errorf("test %d, want error %v", i, tc.expectErr) - continue - } - if !reflect.DeepEqual(err, tc.expectErr) { - t.Errorf("test %d: error mismatch, want %v, get %v", i, tc.expectErr, err) - } - continue - } - if err != nil { - t.Errorf("test %d, want no error, have %v", i, err) continue } have, _ := json.Marshal(result) - want := tc.want - if string(have) != want { - t.Errorf("test %d, result mismatch, have\n%v\n, want\n%v\n", i, string(have), want) - } + require.Equal(t, tc.want, string(have), "test %d", i) } } @@ -305,28 +292,19 @@ func testTraceChainPrecompileActivation(t *testing.T, scheme string) { next := c.start + 1 for result := range resCh { - if have, want := uint64(result.Block), next; have != want { - t.Fatalf("unexpected tracing block, have %d want %d", have, want) - } - if have, want := len(result.Traces), int(next); have != want { - t.Fatalf("unexpected result length, have %d want %d", have, want) - } + require.Equal(t, next, uint64(result.Block), "unexpected tracing block") + require.Len(t, result.Traces, int(next), "unexpected result length") for _, trace := range result.Traces { trace.TxHash = common.Hash{} blob, _ := json.Marshal(trace) - if have, want := string(blob), single; have != want { - t.Fatalf("unexpected tracing result, have\n%v\nwant:\n%v", have, want) - } + require.Equal(t, single, string(blob), "unexpected tracing result") } next += 1 } - if next != c.end+1 { - t.Error("Missing tracing block") - } + require.Equal(t, c.end+1, next, "Missing tracing block") - if nref, nrel := ref.Load(), rel.Load(); nref != nrel { - t.Errorf("Ref and deref actions are not equal, ref %d rel %d", nref, nrel) - } + nref, nrel := ref.Load(), rel.Load() + require.Equal(t, nrel, nref, "Ref and deref actions are not equal") } } @@ -443,25 +421,14 @@ func testTraceCallWithOverridesStateUpgrade(t *testing.T, scheme string) { } for i, testspec := range testSuite { result, err := api.TraceCall(t.Context(), testspec.call, rpc.BlockNumberOrHash{BlockNumber: &testspec.blockNumber}, testspec.config) - if testspec.expectErr != nil { - require.ErrorIs(t, err, testspec.expectErr, "test %d", i) + require.ErrorIs(t, err, testspec.expectErr, "test %d", i) + if err != nil { continue - } else { - if err != nil { - t.Errorf("test %d: expect no error, got %v", i, err) - continue - } - var have *logger.ExecutionResult - if err := json.Unmarshal(result.(json.RawMessage), &have); err != nil { - t.Errorf("test %d: failed to unmarshal result %v", i, err) - } - var want *logger.ExecutionResult - if err := json.Unmarshal([]byte(testspec.expect), &want); err != nil { - t.Errorf("test %d: failed to unmarshal result %v", i, err) - } - if !reflect.DeepEqual(have, want) { - t.Errorf("test %d: result mismatch, want %v, got %v", i, testspec.expect, string(result.(json.RawMessage))) - } } + var have *logger.ExecutionResult + require.NoError(t, json.Unmarshal(result.(json.RawMessage), &have), "test %d: failed to unmarshal result", i) + var want *logger.ExecutionResult + require.NoError(t, json.Unmarshal([]byte(testspec.expect), &want), "test %d: failed to unmarshal result", i) + require.Equal(t, want, have, "test %d: result mismatch", i) } } diff --git a/network/network_test.go b/network/network_test.go index ac1a7b8b5c..1608ca30ef 100644 --- a/network/network_test.go +++ b/network/network_test.go @@ -241,9 +241,7 @@ func TestRequestRequestsRoutingAndResponse(t *testing.T) { senderWg.Wait() require.Equal(t, totalCalls, int(atomic.LoadUint32(&callNum))) for _, nodeID := range nodes { - if _, exists := contactedNodes[nodeID]; !exists { - t.Fatalf("expected nodeID %s to be contacted but was not", nodeID) - } + require.Contains(t, contactedNodes, nodeID, "node %s was not contacted", nodeID) } // ensure empty nodeID is not allowed @@ -438,9 +436,8 @@ func TestRequestMinVersion(t *testing.T) { require.NoError(t, err) var response TestMessage - if _, err = codecManager.Unmarshal(responseBytes, &response); err != nil { - t.Fatal("unexpected error during unmarshal", err) - } + _, err = codecManager.Unmarshal(responseBytes, &response) + require.NoError(t, err) require.Equal(t, "this is a response", response.Message) } diff --git a/params/extras/precompile_upgrade_test.go b/params/extras/precompile_upgrade_test.go index 140611318c..8e00e75022 100644 --- a/params/extras/precompile_upgrade_test.go +++ b/params/extras/precompile_upgrade_test.go @@ -288,9 +288,7 @@ func (tt *upgradeCompatibilityTest) run(t *testing.T, chainConfig ChainConfig) { // if this is not the final upgradeBytes, continue applying // the next upgradeBytes. (only check the result on the last apply) if i != len(tt.configs)-1 { - if err != nil { - t.Fatalf("expecting checkConfigCompatible call %d to return nil, got %s", i+1, err) - } + require.Nil(t, err, "expecting checkConfigCompatible call %d to return nil", i+1) chainConfig = newCfg continue } @@ -298,7 +296,7 @@ func (tt *upgradeCompatibilityTest) run(t *testing.T, chainConfig ChainConfig) { if tt.expectedErrorString != "" { require.ErrorContains(t, err, tt.expectedErrorString) } else { - require.Nil(t, err) + require.Nil(t, err, "expecting checkConfigCompatible call %d to return nil", i+1) } } } diff --git a/plugin/evm/customheader/dynamic_fee_windower_test.go b/plugin/evm/customheader/dynamic_fee_windower_test.go index c3bc8dc2a7..7386c975ca 100644 --- a/plugin/evm/customheader/dynamic_fee_windower_test.go +++ b/plugin/evm/customheader/dynamic_fee_windower_test.go @@ -6,6 +6,8 @@ package customheader import ( "math/big" "testing" + + "github.com/stretchr/testify/require" ) func TestSelectBigWithinBounds(t *testing.T) { @@ -49,9 +51,7 @@ func TestSelectBigWithinBounds(t *testing.T) { for name, test := range tests { t.Run(name, func(t *testing.T) { v := selectBigWithinBounds(test.lower, test.value, test.upper) - if v.Cmp(test.expected) != 0 { - t.Fatalf("Expected (%d), found (%d)", test.expected, v) - } + require.Zerof(t, v.Cmp(test.expected), "Expected (%d), found (%d)", test.expected, v) }) } } diff --git a/plugin/evm/customtypes/block_ext_test.go b/plugin/evm/customtypes/block_ext_test.go index 0985d2ead6..a0082a76eb 100644 --- a/plugin/evm/customtypes/block_ext_test.go +++ b/plugin/evm/customtypes/block_ext_test.go @@ -12,6 +12,7 @@ import ( "github.com/ava-labs/avalanchego/vms/evm/acp226" "github.com/ava-labs/libevm/common" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "github.com/ava-labs/subnet-evm/internal/blocktest" "github.com/ava-labs/subnet-evm/utils" @@ -139,7 +140,7 @@ func exportedFieldsPointToDifferentMemory[T interface { case []uint8: assertDifferentPointers(t, unsafe.SliceData(f), unsafe.SliceData(fieldCp.([]uint8))) default: - t.Errorf("field %q type %T needs to be added to switch cases of exportedFieldsDeepCopied", field.Name, f) + require.Failf(t, "invalid type", "field %q type %T needs to be added to switch cases of exportedFieldsDeepCopied", field.Name, f) } }) } @@ -149,14 +150,9 @@ func exportedFieldsPointToDifferentMemory[T interface { // pointers pointing to different memory locations. func assertDifferentPointers[T any](t *testing.T, a *T, b any) { t.Helper() - switch { - case a == nil: - t.Errorf("a (%T) cannot be nil", a) - case b == nil: - t.Errorf("b (%T) cannot be nil", b) - case a == b: - t.Errorf("pointers to same memory") - } + require.NotNil(t, a, "a (%T) cannot be nil", a) + require.NotNil(t, b, "b (%T) cannot be nil", b) + require.NotSame(t, a, b, "pointers to same memory") // Note: no need to check `b` is of the same type as `a`, otherwise // the memory address would be different as well. } diff --git a/plugin/evm/customtypes/header_ext_test.go b/plugin/evm/customtypes/header_ext_test.go index 2a77264faf..8652a4709c 100644 --- a/plugin/evm/customtypes/header_ext_test.go +++ b/plugin/evm/customtypes/header_ext_test.go @@ -175,7 +175,7 @@ func allFieldsSet[T interface { case []uint8, []*Header, Transactions, []*Transaction, Withdrawals, []*Withdrawal: assert.NotEmpty(t, f) default: - t.Errorf("Field %q has unsupported type %T", field.Name, f) + assert.Failf(t, "invalid type", "Field %q has unsupported type %T", field.Name, f) } }) } @@ -186,10 +186,7 @@ func assertNonZero[T interface { *big.Int | *common.Hash | *uint64 | *[]uint8 | *Header | *acp226.DelayExcess }](t *testing.T, v T) { t.Helper() - var zero T - if v == zero { - t.Errorf("must not be zero value for %T", v) - } + require.NotZero(t, v) } // Note [TestCopyHeader] tests the [HeaderExtra.PostCopy] method. diff --git a/plugin/evm/syncervm_test.go b/plugin/evm/syncervm_test.go index 5d1f8e5d2e..228f4ef70d 100644 --- a/plugin/evm/syncervm_test.go +++ b/plugin/evm/syncervm_test.go @@ -102,12 +102,8 @@ func TestStateSyncToggleEnabledToDisabled(t *testing.T) { reqCount++ // Fail all requests after number 50 to interrupt the sync if reqCount > 50 { - if err := syncerVM.AppRequestFailed(t.Context(), nodeID, requestID, commonEng.ErrTimeout); err != nil { - panic(err) - } - if err := syncerVM.Client.Shutdown(); err != nil { - panic(err) - } + require.NoError(t, syncerVM.AppRequestFailed(t.Context(), nodeID, requestID, commonEng.ErrTimeout)) + require.NoError(t, syncerVM.Client.Shutdown()) } else { syncerVM.AppResponse(t.Context(), nodeID, requestID, response) } @@ -128,16 +124,14 @@ func TestStateSyncToggleEnabledToDisabled(t *testing.T) { appSender.SendAppGossipF = func(context.Context, commonEng.SendConfig, []byte) error { return nil } appSender.SendAppRequestF = func(ctx context.Context, nodeSet set.Set[ids.NodeID], requestID uint32, request []byte) error { nodeID, hasItem := nodeSet.Pop() - if !hasItem { - t.Fatal("expected nodeSet to contain at least 1 nodeID") - } + require.True(t, hasItem, "expected nodeSet to contain at least 1 nodeID") go vmSetup.serverVM.AppRequest(ctx, nodeID, requestID, time.Now().Add(1*time.Second), request) return nil } // Reset metrics to allow re-initialization vmSetup.syncerVM.ctx.Metrics = metrics.NewPrefixGatherer() stateSyncDisabledConfigJSON := `{"state-sync-enabled":false}` - if err := syncDisabledVM.Initialize( + require.NoError(t, syncDisabledVM.Initialize( t.Context(), vmSetup.syncerVM.ctx, vmSetup.syncerDB, @@ -146,19 +140,14 @@ func TestStateSyncToggleEnabledToDisabled(t *testing.T) { []byte(stateSyncDisabledConfigJSON), []*commonEng.Fx{}, appSender, - ); err != nil { - t.Fatal(err) - } + )) defer func() { - if err := syncDisabledVM.Shutdown(t.Context()); err != nil { - t.Fatal(err) - } + require.NoError(t, syncDisabledVM.Shutdown(t.Context())) }() - if height := syncDisabledVM.LastAcceptedBlockInternal().Height(); height != 0 { - t.Fatalf("Unexpected last accepted height: %d", height) - } + height := syncDisabledVM.LastAcceptedBlockInternal().Height() + require.Zero(t, height, "Unexpected last accepted height: %d", height) enabled, err := syncDisabledVM.StateSyncEnabled(t.Context()) require.NoError(t, err) @@ -167,29 +156,17 @@ func TestStateSyncToggleEnabledToDisabled(t *testing.T) { // Process the first 10 blocks from the serverVM for i := uint64(1); i < 10; i++ { ethBlock := vmSetup.serverVM.blockChain.GetBlockByNumber(i) - if ethBlock == nil { - t.Fatalf("VM Server did not have a block available at height %d", i) - } + require.NotNil(t, ethBlock, "VM Server did not have a block available at height %d", i) b, err := rlp.EncodeToBytes(ethBlock) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) blk, err := syncDisabledVM.ParseBlock(t.Context(), b) - if err != nil { - t.Fatal(err) - } - if err := blk.Verify(t.Context()); err != nil { - t.Fatal(err) - } - if err := blk.Accept(t.Context()); err != nil { - t.Fatal(err) - } + require.NoError(t, err) + require.NoError(t, blk.Verify(t.Context())) + require.NoError(t, blk.Accept(t.Context())) } // Verify the snapshot disk layer matches the last block root lastRoot := syncDisabledVM.blockChain.CurrentBlock().Root - if err := syncDisabledVM.blockChain.Snapshots().Verify(lastRoot); err != nil { - t.Fatal(err) - } + require.NoError(t, syncDisabledVM.blockChain.Snapshots().Verify(lastRoot)) syncDisabledVM.blockChain.DrainAcceptorQueue() // Create a new VM from the same database with state sync enabled. @@ -201,7 +178,7 @@ func TestStateSyncToggleEnabledToDisabled(t *testing.T) { ) // Reset metrics to allow re-initialization vmSetup.syncerVM.ctx.Metrics = metrics.NewPrefixGatherer() - if err := syncReEnabledVM.Initialize( + require.NoError(t, syncReEnabledVM.Initialize( t.Context(), vmSetup.syncerVM.ctx, vmSetup.syncerDB, @@ -210,9 +187,7 @@ func TestStateSyncToggleEnabledToDisabled(t *testing.T) { []byte(configJSON), []*commonEng.Fx{}, appSender, - ); err != nil { - t.Fatal(err) - } + )) // override [serverVM]'s SendAppResponse function to trigger AppResponse on [syncerVM] vmSetup.serverAppSender.SendAppResponseF = func(ctx context.Context, nodeID ids.NodeID, requestID uint32, response []byte) error { @@ -290,14 +265,12 @@ func createSyncServerAndClientVMs(t *testing.T, test syncTest, numBlocks int) *s generateAndAcceptBlocks(t, serverVM.vm, numBlocks, func(_ int, gen *core.BlockGen) { br := predicate.BlockResults{} b, err := br.Bytes() - if err != nil { - t.Fatal(err) - } + require.NoError(err) gen.AppendExtra(b) tx := types.NewTransaction(gen.TxNonce(testEthAddrs[0]), testEthAddrs[1], common.Big1, ethparams.TxGas, big.NewInt(testMinGasPrice), nil) signedTx, err := types.SignTx(tx, types.NewEIP155Signer(serverVM.vm.chainConfig.ChainID), testKeys[0].ToECDSA()) - require.NoError(err) + require.NoError(err, "failed to sign transaction") gen.AddTx(signedTx) }, nil) @@ -477,15 +450,13 @@ func testSyncerVM(t *testing.T, vmSetup *syncVMSetup, test syncTest) { generateAndAcceptBlocks(t, syncerVM, blocksToBuild, func(_ int, gen *core.BlockGen) { br := predicate.BlockResults{} b, err := br.Bytes() - if err != nil { - t.Fatal(err) - } + require.NoError(err) gen.AppendExtra(b) i := 0 for k := range fundedAccounts { tx := types.NewTransaction(gen.TxNonce(k.Address), toAddress, big.NewInt(1), 21000, big.NewInt(testMinGasPrice), nil) signedTx, err := types.SignTx(tx, types.NewEIP155Signer(serverVM.chainConfig.ChainID), k.PrivateKey) - require.NoError(err) + require.NoError(err, "failed to sign transaction") gen.AddTx(signedTx) i++ if i >= txsPerBlock { @@ -567,19 +538,11 @@ func generateAndAcceptBlocks(t *testing.T, vm *VM, numBlocks int, gen func(int, // generated by GenerateChain acceptExternalBlock := func(block *types.Block) { bytes, err := rlp.EncodeToBytes(block) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) vmBlock, err := vm.ParseBlock(t.Context(), bytes) - if err != nil { - t.Fatal(err) - } - if err := vmBlock.Verify(t.Context()); err != nil { - t.Fatal(err) - } - if err := vmBlock.Accept(t.Context()); err != nil { - t.Fatal(err) - } + require.NoError(t, err) + require.NoError(t, vmBlock.Verify(t.Context())) + require.NoError(t, vmBlock.Accept(t.Context())) if accepted != nil { accepted(block) @@ -598,9 +561,7 @@ func generateAndAcceptBlocks(t *testing.T, vm *VM, numBlocks int, gen func(int, gen(i, g) }, ) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) vm.blockChain.DrainAcceptorQueue() } diff --git a/plugin/evm/version_test.go b/plugin/evm/version_test.go index 8347ac8359..c795b738c5 100644 --- a/plugin/evm/version_test.go +++ b/plugin/evm/version_test.go @@ -28,12 +28,9 @@ func TestCompatibility(t *testing.T) { require.NoError(t, err, "json decoding compatibility file") rpcChainVMVersion, valueInJSON := parsedCompat.RPCChainVMProtocolVersion[Version] - if !valueInJSON { - t.Fatalf("%s has subnet-evm version %s missing from rpcChainVMProtocolVersion object", - filepath.Base(compatibilityFile), Version) - } - if rpcChainVMVersion != version.RPCChainVMProtocol { - t.Fatalf("%s has subnet-evm version %s stated as compatible with RPC chain VM protocol version %d but AvalancheGo protocol version is %d", - filepath.Base(compatibilityFile), Version, rpcChainVMVersion, version.RPCChainVMProtocol) - } + require.Truef(t, valueInJSON, "%s has subnet-evm version %s missing from rpcChainVMProtocolVersion object", + filepath.Base(compatibilityFile), Version) + require.Equalf(t, version.RPCChainVMProtocol, rpcChainVMVersion, + "%s has subnet-evm version %s stated as compatible with RPC chain VM protocol version %d but AvalancheGo protocol version is %d", + filepath.Base(compatibilityFile), Version, rpcChainVMVersion, version.RPCChainVMProtocol) } diff --git a/plugin/evm/vm_test.go b/plugin/evm/vm_test.go index a99359cd91..2679d4c047 100644 --- a/plugin/evm/vm_test.go +++ b/plugin/evm/vm_test.go @@ -12,7 +12,6 @@ import ( "math/big" "os" "path/filepath" - "strings" "testing" "time" @@ -346,21 +345,13 @@ func issueAndAccept(t *testing.T, vm *VM) snowman.Block { require.Equal(t, commonEng.PendingTxs, msg) blk, err := vm.BuildBlock(t.Context()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) - if err := blk.Verify(t.Context()); err != nil { - t.Fatal(err) - } + require.NoError(t, blk.Verify(t.Context())) - if err := vm.SetPreference(t.Context(), blk.ID()); err != nil { - t.Fatal(err) - } + require.NoError(t, vm.SetPreference(t.Context(), blk.ID())) - if err := blk.Accept(t.Context()); err != nil { - t.Fatal(err) - } + require.NoError(t, blk.Accept(t.Context())) return blk } @@ -388,86 +379,61 @@ func testBuildEthTxBlock(t *testing.T, scheme string) { tx := types.NewTransaction(uint64(0), key.Address, firstTxAmount, 21000, big.NewInt(testMinGasPrice), nil) signedTx, err := types.SignTx(tx, types.NewEIP155Signer(tvm.vm.chainConfig.ChainID), testKeys[0].ToECDSA()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) errs := tvm.vm.txPool.AddRemotesSync([]*types.Transaction{signedTx}) for i, err := range errs { - if err != nil { - t.Fatalf("Failed to add tx at index %d: %s", i, err) - } + require.NoError(t, err, "Failed to add tx at index %d: %s", i, err) } blk1 := issueAndAccept(t, tvm.vm) newHead := <-newTxPoolHeadChan - if newHead.Head.Hash() != common.Hash(blk1.ID()) { - t.Fatalf("Expected new block to match") - } + require.Equal(t, common.Hash(blk1.ID()), newHead.Head.Hash(), "Expected new block to match") txs := make([]*types.Transaction, 10) for i := 0; i < 10; i++ { tx := types.NewTransaction(uint64(i), key.Address, big.NewInt(10), 21000, big.NewInt(testMinGasPrice), nil) signedTx, err := types.SignTx(tx, types.NewEIP155Signer(tvm.vm.chainConfig.ChainID), key.PrivateKey) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) txs[i] = signedTx } errs = tvm.vm.txPool.AddRemotesSync(txs) for i, err := range errs { - if err != nil { - t.Fatalf("Failed to add tx at index %d: %s", i, err) - } + require.NoError(t, err, "Failed to add tx at index %d: %s", i, err) } tvm.vm.clock.Set(tvm.vm.clock.Time().Add(2 * time.Second)) blk2 := issueAndAccept(t, tvm.vm) newHead = <-newTxPoolHeadChan - if newHead.Head.Hash() != common.Hash(blk2.ID()) { - t.Fatalf("Expected new block to match") - } + require.Equal(t, common.Hash(blk2.ID()), newHead.Head.Hash(), "Expected new block to match") lastAcceptedID, err := tvm.vm.LastAccepted(t.Context()) - if err != nil { - t.Fatal(err) - } - if lastAcceptedID != blk2.ID() { - t.Fatalf("Expected last accepted blockID to be the accepted block: %s, but found %s", blk2.ID(), lastAcceptedID) - } + require.NoError(t, err) + require.Equal(t, blk2.ID(), lastAcceptedID, "Expected last accepted blockID to be the accepted block: %s, but found %s", blk2.ID(), lastAcceptedID) ethBlk1 := blk1.(*chain.BlockWrapper).Block.(*wrappedBlock).ethBlock - if ethBlk1Root := ethBlk1.Root(); !tvm.vm.blockChain.HasState(ethBlk1Root) { - t.Fatalf("Expected blk1 state root to not yet be pruned after blk2 was accepted because of tip buffer") - } + ethBlk1Root := ethBlk1.Root() + require.True(t, tvm.vm.blockChain.HasState(ethBlk1Root), "Expected blk1 state root to not yet be pruned after blk2 was accepted because of tip buffer") // Clear the cache and ensure that GetBlock returns internal blocks with the correct status tvm.vm.State.Flush() blk2Refreshed, err := tvm.vm.GetBlockInternal(t.Context(), blk2.ID()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) blk1RefreshedID := blk2Refreshed.Parent() blk1Refreshed, err := tvm.vm.GetBlockInternal(t.Context(), blk1RefreshedID) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) - if blk1Refreshed.ID() != blk1.ID() { - t.Fatalf("Found unexpected blkID for parent of blk2") - } + require.Equal(t, blk1.ID(), blk1Refreshed.ID(), "Found unexpected blkID for parent of blk2") // Close the vm and all databases - if err := tvm.vm.Shutdown(t.Context()); err != nil { - t.Fatal(err) - } + require.NoError(t, tvm.vm.Shutdown(t.Context())) restartedVM := &VM{} newCTX := snowtest.Context(t, snowtest.CChainID) newCTX.NetworkUpgrades = upgradetest.GetConfig(fork) newCTX.ChainDataDir = tvm.vm.ctx.ChainDataDir conf := getConfig(scheme, "") - if err := restartedVM.Initialize( + require.NoError(t, restartedVM.Initialize( t.Context(), newCTX, tvm.db, @@ -476,25 +442,16 @@ func testBuildEthTxBlock(t *testing.T, scheme string) { []byte(conf), []*commonEng.Fx{}, nil, - ); err != nil { - t.Fatal(err) - } + )) // State root should not have been committed and discarded on restart - if ethBlk1Root := ethBlk1.Root(); restartedVM.blockChain.HasState(ethBlk1Root) { - t.Fatalf("Expected blk1 state root to be pruned after blk2 was accepted on top of it in pruning mode") - } + require.False(t, restartedVM.blockChain.HasState(ethBlk1Root), "Expected blk1 state root to be pruned after blk2 was accepted on top of it in pruning mode") // State root should be committed when accepted tip on shutdown - ethBlk2 := blk2.(*chain.BlockWrapper).Block.(*wrappedBlock).ethBlock - if ethBlk2Root := ethBlk2.Root(); !restartedVM.blockChain.HasState(ethBlk2Root) { - t.Fatalf("Expected blk2 state root to not be pruned after shutdown (last accepted tip should be committed)") - } + require.True(t, restartedVM.blockChain.HasState(blk2.(*chain.BlockWrapper).Block.(*wrappedBlock).ethBlock.Root()), "Expected blk2 state root to not be pruned after shutdown (last accepted tip should be committed)") // Shutdown the newest VM - if err := restartedVM.Shutdown(t.Context()); err != nil { - t.Fatal(err) - } + require.NoError(t, restartedVM.Shutdown(t.Context())) } // Regression test to ensure that after accepting block A @@ -529,13 +486,9 @@ func testSetPreferenceRace(t *testing.T, scheme string) { vm2 := tvm2.vm defer func() { - if err := vm1.Shutdown(t.Context()); err != nil { - t.Fatal(err) - } + require.NoError(t, vm1.Shutdown(t.Context())) - if err := vm2.Shutdown(t.Context()); err != nil { - t.Fatal(err) - } + require.NoError(t, vm2.Shutdown(t.Context())) }() newTxPoolHeadChan1 := make(chan core.NewTxPoolReorgEvent, 1) @@ -545,15 +498,11 @@ func testSetPreferenceRace(t *testing.T, scheme string) { tx := types.NewTransaction(uint64(0), testEthAddrs[1], firstTxAmount, 21000, big.NewInt(testMinGasPrice), nil) signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm1.chainConfig.ChainID), testKeys[0].ToECDSA()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) txErrors := vm1.txPool.AddRemotesSync([]*types.Transaction{signedTx}) for i, err := range txErrors { - if err != nil { - t.Fatalf("Failed to add tx at index %d: %s", i, err) - } + require.NoError(t, err, "Failed to add tx at index %d: %s", i, err) } msg, err := vm1.WaitForEvent(t.Context()) @@ -561,44 +510,24 @@ func testSetPreferenceRace(t *testing.T, scheme string) { require.Equal(t, commonEng.PendingTxs, msg) vm1BlkA, err := vm1.BuildBlock(t.Context()) - if err != nil { - t.Fatalf("Failed to build block with import transaction: %s", err) - } + require.NoError(t, err, "Failed to build block with import transaction") - if err := vm1BlkA.Verify(t.Context()); err != nil { - t.Fatalf("Block failed verification on VM1: %s", err) - } + require.NoError(t, vm1BlkA.Verify(t.Context()), "Block failed verification on VM1") - if err := vm1.SetPreference(t.Context(), vm1BlkA.ID()); err != nil { - t.Fatal(err) - } + require.NoError(t, vm1.SetPreference(t.Context(), vm1BlkA.ID())) vm2BlkA, err := vm2.ParseBlock(t.Context(), vm1BlkA.Bytes()) - if err != nil { - t.Fatalf("Unexpected error parsing block from vm2: %s", err) - } - if err := vm2BlkA.Verify(t.Context()); err != nil { - t.Fatalf("Block failed verification on VM2: %s", err) - } - if err := vm2.SetPreference(t.Context(), vm2BlkA.ID()); err != nil { - t.Fatal(err) - } + require.NoError(t, err, "Unexpected error parsing block from vm2") + require.NoError(t, vm2BlkA.Verify(t.Context()), "Block failed verification on VM2") + require.NoError(t, vm2.SetPreference(t.Context(), vm2BlkA.ID())) - if err := vm1BlkA.Accept(t.Context()); err != nil { - t.Fatalf("VM1 failed to accept block: %s", err) - } - if err := vm2BlkA.Accept(t.Context()); err != nil { - t.Fatalf("VM2 failed to accept block: %s", err) - } + require.NoError(t, vm1BlkA.Accept(t.Context()), "VM1 failed to accept block") + require.NoError(t, vm2BlkA.Accept(t.Context()), "VM2 failed to accept block") newHead := <-newTxPoolHeadChan1 - if newHead.Head.Hash() != common.Hash(vm1BlkA.ID()) { - t.Fatalf("Expected new block to match") - } + require.Equal(t, common.Hash(vm1BlkA.ID()), newHead.Head.Hash(), "Expected new block to match") newHead = <-newTxPoolHeadChan2 - if newHead.Head.Hash() != common.Hash(vm2BlkA.ID()) { - t.Fatalf("Expected new block to match") - } + require.Equal(t, common.Hash(vm2BlkA.ID()), newHead.Head.Hash(), "Expected new block to match") // Create list of 10 successive transactions to build block A on vm1 // and to be split into two separate blocks on VM2 @@ -606,9 +535,7 @@ func testSetPreferenceRace(t *testing.T, scheme string) { for i := 0; i < 10; i++ { tx := types.NewTransaction(uint64(i), testEthAddrs[0], big.NewInt(10), 21000, big.NewInt(testMinGasPrice), nil) signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm1.chainConfig.ChainID), testKeys[1].ToECDSA()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) txs[i] = signedTx } @@ -617,9 +544,7 @@ func testSetPreferenceRace(t *testing.T, scheme string) { // Add the remote transactions, build the block, and set VM1's preference for block A errs = vm1.txPool.AddRemotesSync(txs) for i, err := range errs { - if err != nil { - t.Fatalf("Failed to add transaction to VM1 at index %d: %s", i, err) - } + require.NoError(t, err, "Failed to add transaction to VM1 at index %d: %s", i, err) } msg, err = vm1.WaitForEvent(t.Context()) @@ -627,26 +552,18 @@ func testSetPreferenceRace(t *testing.T, scheme string) { require.Equal(t, commonEng.PendingTxs, msg) vm1BlkB, err := vm1.BuildBlock(t.Context()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) - if err := vm1BlkB.Verify(t.Context()); err != nil { - t.Fatal(err) - } + require.NoError(t, vm1BlkB.Verify(t.Context())) - if err := vm1.SetPreference(t.Context(), vm1BlkB.ID()); err != nil { - t.Fatal(err) - } + require.NoError(t, vm1.SetPreference(t.Context(), vm1BlkB.ID())) // Split the transactions over two blocks, and set VM2's preference to them in sequence // after building each block // Block C errs = vm2.txPool.AddRemotesSync(txs[0:5]) for i, err := range errs { - if err != nil { - t.Fatalf("Failed to add transaction to VM2 at index %d: %s", i, err) - } + require.NoError(t, err, "Failed to add transaction to VM2 at index %d: %s", i, err) } msg, err = vm2.WaitForEvent(t.Context()) @@ -654,46 +571,30 @@ func testSetPreferenceRace(t *testing.T, scheme string) { require.Equal(t, commonEng.PendingTxs, msg) vm2BlkC, err := vm2.BuildBlock(t.Context()) - if err != nil { - t.Fatalf("Failed to build BlkC on VM2: %s", err) - } + require.NoError(t, err, "Failed to build BlkC on VM2") - if err := vm2BlkC.Verify(t.Context()); err != nil { - t.Fatalf("BlkC failed verification on VM2: %s", err) - } + require.NoError(t, vm2BlkC.Verify(t.Context()), "BlkC failed verification on VM2") - if err := vm2.SetPreference(t.Context(), vm2BlkC.ID()); err != nil { - t.Fatal(err) - } + require.NoError(t, vm2.SetPreference(t.Context(), vm2BlkC.ID())) newHead = <-newTxPoolHeadChan2 - if newHead.Head.Hash() != common.Hash(vm2BlkC.ID()) { - t.Fatalf("Expected new block to match") - } + require.Equal(t, common.Hash(vm2BlkC.ID()), newHead.Head.Hash(), "Expected new block to match") // Block D errs = vm2.txPool.AddRemotesSync(txs[5:10]) for i, err := range errs { - if err != nil { - t.Fatalf("Failed to add transaction to VM2 at index %d: %s", i, err) - } + require.NoError(t, err, "Failed to add transaction to VM2 at index %d: %s", i, err) } msg, err = vm2.WaitForEvent(t.Context()) require.NoError(t, err) require.Equal(t, commonEng.PendingTxs, msg) vm2BlkD, err := vm2.BuildBlock(t.Context()) - if err != nil { - t.Fatalf("Failed to build BlkD on VM2: %s", err) - } + require.NoError(t, err, "Failed to build BlkD on VM2") - if err := vm2BlkD.Verify(t.Context()); err != nil { - t.Fatalf("BlkD failed verification on VM2: %s", err) - } + require.NoError(t, vm2BlkD.Verify(t.Context()), "BlkD failed verification on VM2") - if err := vm2.SetPreference(t.Context(), vm2BlkD.ID()); err != nil { - t.Fatal(err) - } + require.NoError(t, vm2.SetPreference(t.Context(), vm2BlkD.ID())) // VM1 receives blkC and blkD from VM1 // and happens to call SetPreference on blkD without ever calling SetPreference @@ -702,54 +603,32 @@ func testSetPreferenceRace(t *testing.T, scheme string) { // back to the last accepted block as would typically be the case in the consensus // engine vm1BlkD, err := vm1.ParseBlock(t.Context(), vm2BlkD.Bytes()) - if err != nil { - t.Fatalf("VM1 errored parsing blkD: %s", err) - } + require.NoError(t, err, "VM1 errored parsing blkD") vm1BlkC, err := vm1.ParseBlock(t.Context(), vm2BlkC.Bytes()) - if err != nil { - t.Fatalf("VM1 errored parsing blkC: %s", err) - } + require.NoError(t, err, "VM1 errored parsing blkC") // The blocks must be verified in order. This invariant is maintained // in the consensus engine. - if err := vm1BlkC.Verify(t.Context()); err != nil { - t.Fatalf("VM1 BlkC failed verification: %s", err) - } - if err := vm1BlkD.Verify(t.Context()); err != nil { - t.Fatalf("VM1 BlkD failed verification: %s", err) - } + require.NoError(t, vm1BlkC.Verify(t.Context()), "VM1 BlkC failed verification") + require.NoError(t, vm1BlkD.Verify(t.Context()), "VM1 BlkD failed verification") // Set VM1's preference to blockD, skipping blockC - if err := vm1.SetPreference(t.Context(), vm1BlkD.ID()); err != nil { - t.Fatal(err) - } + require.NoError(t, vm1.SetPreference(t.Context(), vm1BlkD.ID())) // Accept the longer chain on both VMs and ensure there are no errors // VM1 Accepts the blocks in order - if err := vm1BlkC.Accept(t.Context()); err != nil { - t.Fatalf("VM1 BlkC failed on accept: %s", err) - } - if err := vm1BlkD.Accept(t.Context()); err != nil { - t.Fatalf("VM1 BlkC failed on accept: %s", err) - } + require.NoError(t, vm1BlkC.Accept(t.Context()), "VM1 BlkC failed on accept") + require.NoError(t, vm1BlkD.Accept(t.Context()), "VM1 BlkC failed on accept") // VM2 Accepts the blocks in order - if err := vm2BlkC.Accept(t.Context()); err != nil { - t.Fatalf("VM2 BlkC failed on accept: %s", err) - } - if err := vm2BlkD.Accept(t.Context()); err != nil { - t.Fatalf("VM2 BlkC failed on accept: %s", err) - } + require.NoError(t, vm2BlkC.Accept(t.Context()), "VM2 BlkC failed on accept") + require.NoError(t, vm2BlkD.Accept(t.Context()), "VM2 BlkC failed on accept") log.Info("Validating canonical chain") // Verify the Canonical Chain for Both VMs - if err := vm2.blockChain.ValidateCanonicalChain(); err != nil { - t.Fatalf("VM2 failed canonical chain verification due to: %s", err) - } + require.NoError(t, vm2.blockChain.ValidateCanonicalChain(), "VM2 failed canonical chain verification due to") - if err := vm1.blockChain.ValidateCanonicalChain(); err != nil { - t.Fatalf("VM1 failed canonical chain verification due to: %s", err) - } + require.NoError(t, vm1.blockChain.ValidateCanonicalChain(), "VM1 failed canonical chain verification due to") } // Regression test to ensure that a VM that accepts block A and B @@ -785,13 +664,9 @@ func testReorgProtection(t *testing.T, scheme string) { vm2 := tvm2.vm defer func() { - if err := vm1.Shutdown(t.Context()); err != nil { - t.Fatal(err) - } + require.NoError(t, vm1.Shutdown(t.Context())) - if err := vm2.Shutdown(t.Context()); err != nil { - t.Fatal(err) - } + require.NoError(t, vm2.Shutdown(t.Context())) }() newTxPoolHeadChan1 := make(chan core.NewTxPoolReorgEvent, 1) @@ -801,15 +676,11 @@ func testReorgProtection(t *testing.T, scheme string) { tx := types.NewTransaction(uint64(0), testEthAddrs[1], firstTxAmount, 21000, big.NewInt(testMinGasPrice), nil) signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm1.chainConfig.ChainID), testKeys[0].ToECDSA()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) txErrors := vm1.txPool.AddRemotesSync([]*types.Transaction{signedTx}) for i, err := range txErrors { - if err != nil { - t.Fatalf("Failed to add tx at index %d: %s", i, err) - } + require.NoError(t, err, "Failed to add tx at index %d: %s", i, err) } msg, err := vm1.WaitForEvent(t.Context()) @@ -817,44 +688,24 @@ func testReorgProtection(t *testing.T, scheme string) { require.Equal(t, commonEng.PendingTxs, msg) vm1BlkA, err := vm1.BuildBlock(t.Context()) - if err != nil { - t.Fatalf("Failed to build block with import transaction: %s", err) - } + require.NoError(t, err, "Failed to build block with import transaction") - if err := vm1BlkA.Verify(t.Context()); err != nil { - t.Fatalf("Block failed verification on VM1: %s", err) - } + require.NoError(t, vm1BlkA.Verify(t.Context()), "Block failed verification on VM1") - if err := vm1.SetPreference(t.Context(), vm1BlkA.ID()); err != nil { - t.Fatal(err) - } + require.NoError(t, vm1.SetPreference(t.Context(), vm1BlkA.ID())) vm2BlkA, err := vm2.ParseBlock(t.Context(), vm1BlkA.Bytes()) - if err != nil { - t.Fatalf("Unexpected error parsing block from vm2: %s", err) - } - if err := vm2BlkA.Verify(t.Context()); err != nil { - t.Fatalf("Block failed verification on VM2: %s", err) - } - if err := vm2.SetPreference(t.Context(), vm2BlkA.ID()); err != nil { - t.Fatal(err) - } + require.NoError(t, err, "Unexpected error parsing block from vm2") + require.NoError(t, vm2BlkA.Verify(t.Context()), "Block failed verification on VM2") + require.NoError(t, vm2.SetPreference(t.Context(), vm2BlkA.ID())) - if err := vm1BlkA.Accept(t.Context()); err != nil { - t.Fatalf("VM1 failed to accept block: %s", err) - } - if err := vm2BlkA.Accept(t.Context()); err != nil { - t.Fatalf("VM2 failed to accept block: %s", err) - } + require.NoError(t, vm1BlkA.Accept(t.Context()), "VM1 failed to accept block") + require.NoError(t, vm2BlkA.Accept(t.Context()), "VM2 failed to accept block") newHead := <-newTxPoolHeadChan1 - if newHead.Head.Hash() != common.Hash(vm1BlkA.ID()) { - t.Fatalf("Expected new block to match") - } + require.Equal(t, common.Hash(vm1BlkA.ID()), newHead.Head.Hash(), "Expected new block to match") newHead = <-newTxPoolHeadChan2 - if newHead.Head.Hash() != common.Hash(vm2BlkA.ID()) { - t.Fatalf("Expected new block to match") - } + require.Equal(t, common.Hash(vm2BlkA.ID()), newHead.Head.Hash(), "Expected new block to match") // Create list of 10 successive transactions to build block A on vm1 // and to be split into two separate blocks on VM2 @@ -862,9 +713,7 @@ func testReorgProtection(t *testing.T, scheme string) { for i := 0; i < 10; i++ { tx := types.NewTransaction(uint64(i), testEthAddrs[0], big.NewInt(10), 21000, big.NewInt(testMinGasPrice), nil) signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm1.chainConfig.ChainID), testKeys[1].ToECDSA()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) txs[i] = signedTx } @@ -873,9 +722,7 @@ func testReorgProtection(t *testing.T, scheme string) { // Add the remote transactions, build the block, and set VM1's preference for block A errs = vm1.txPool.AddRemotesSync(txs) for i, err := range errs { - if err != nil { - t.Fatalf("Failed to add transaction to VM1 at index %d: %s", i, err) - } + require.NoError(t, err, "Failed to add transaction to VM1 at index %d: %s", i, err) } msg, err = vm1.WaitForEvent(t.Context()) @@ -883,26 +730,18 @@ func testReorgProtection(t *testing.T, scheme string) { require.Equal(t, commonEng.PendingTxs, msg) vm1BlkB, err := vm1.BuildBlock(t.Context()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) - if err := vm1BlkB.Verify(t.Context()); err != nil { - t.Fatal(err) - } + require.NoError(t, vm1BlkB.Verify(t.Context())) - if err := vm1.SetPreference(t.Context(), vm1BlkB.ID()); err != nil { - t.Fatal(err) - } + require.NoError(t, vm1.SetPreference(t.Context(), vm1BlkB.ID())) // Split the transactions over two blocks, and set VM2's preference to them in sequence // after building each block // Block C errs = vm2.txPool.AddRemotesSync(txs[0:5]) for i, err := range errs { - if err != nil { - t.Fatalf("Failed to add transaction to VM2 at index %d: %s", i, err) - } + require.NoError(t, err, "Failed to add transaction to VM2 at index %d: %s", i, err) } msg, err = vm2.WaitForEvent(t.Context()) @@ -910,39 +749,27 @@ func testReorgProtection(t *testing.T, scheme string) { require.Equal(t, commonEng.PendingTxs, msg) vm2BlkC, err := vm2.BuildBlock(t.Context()) - if err != nil { - t.Fatalf("Failed to build BlkC on VM2: %s", err) - } + require.NoError(t, err, "Failed to build BlkC on VM2") - if err := vm2BlkC.Verify(t.Context()); err != nil { - t.Fatalf("Block failed verification on VM2: %s", err) - } + require.NoError(t, vm2BlkC.Verify(t.Context()), "Block failed verification on VM2") vm1BlkC, err := vm1.ParseBlock(t.Context(), vm2BlkC.Bytes()) - if err != nil { - t.Fatalf("Unexpected error parsing block from vm2: %s", err) - } + require.NoError(t, err, "Unexpected error parsing block from vm2") - if err := vm1BlkC.Verify(t.Context()); err != nil { - t.Fatalf("Block failed verification on VM1: %s", err) - } + require.NoError(t, vm1BlkC.Verify(t.Context()), "Block failed verification on VM1") // Accept B, such that block C should get Rejected. - if err := vm1BlkB.Accept(t.Context()); err != nil { - t.Fatalf("VM1 failed to accept block: %s", err) - } + require.NoError(t, vm1BlkB.Accept(t.Context()), "VM1 failed to accept block") // The below (setting preference blocks that have a common ancestor // with the preferred chain lower than the last finalized block) // should NEVER happen. However, the VM defends against this // just in case. - if err := vm1.SetPreference(t.Context(), vm1BlkC.ID()); !strings.Contains(err.Error(), "cannot orphan finalized block") { - t.Fatalf("Unexpected error when setting preference that would trigger reorg: %s", err) - } + err = vm1.SetPreference(t.Context(), vm1BlkC.ID()) + require.ErrorContains(t, err, "cannot orphan finalized block", "Expected error when setting preference that would orphan finalized block") - if err := vm1BlkC.Accept(t.Context()); !strings.Contains(err.Error(), "expected accepted block to have parent") { - t.Fatalf("Unexpected error when setting block at finalized height: %s", err) - } + err = vm1BlkC.Accept(t.Context()) + require.ErrorContains(t, err, "expected accepted block to have parent", "Expected error when accepting orphaned block") } // Regression test to ensure that a VM that accepts block C while preferring @@ -971,13 +798,9 @@ func testNonCanonicalAccept(t *testing.T, scheme string) { vm2 := tvm2.vm defer func() { - if err := vm1.Shutdown(t.Context()); err != nil { - t.Fatal(err) - } + require.NoError(t, vm1.Shutdown(t.Context())) - if err := vm2.Shutdown(t.Context()); err != nil { - t.Fatal(err) - } + require.NoError(t, vm2.Shutdown(t.Context())) }() newTxPoolHeadChan1 := make(chan core.NewTxPoolReorgEvent, 1) @@ -987,15 +810,11 @@ func testNonCanonicalAccept(t *testing.T, scheme string) { tx := types.NewTransaction(uint64(0), testEthAddrs[1], firstTxAmount, 21000, big.NewInt(testMinGasPrice), nil) signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm1.chainConfig.ChainID), testKeys[0].ToECDSA()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) txErrors := vm1.txPool.AddRemotesSync([]*types.Transaction{signedTx}) for i, err := range txErrors { - if err != nil { - t.Fatalf("Failed to add tx at index %d: %s", i, err) - } + require.NoError(t, err, "Failed to add tx at index %d: %s", i, err) } msg, err := vm1.WaitForEvent(t.Context()) @@ -1003,61 +822,36 @@ func testNonCanonicalAccept(t *testing.T, scheme string) { require.Equal(t, commonEng.PendingTxs, msg) vm1BlkA, err := vm1.BuildBlock(t.Context()) - if err != nil { - t.Fatalf("Failed to build block with import transaction: %s", err) - } + require.NoError(t, err, "Failed to build block with import transaction") - if err := vm1BlkA.Verify(t.Context()); err != nil { - t.Fatalf("Block failed verification on VM1: %s", err) - } + require.NoError(t, vm1BlkA.Verify(t.Context()), "Block failed verification on VM1") - if _, err := vm1.GetBlockIDAtHeight(t.Context(), vm1BlkA.Height()); err != database.ErrNotFound { - t.Fatalf("Expected unaccepted block not to be indexed by height, but found %s", err) - } + _, err = vm1.GetBlockIDAtHeight(t.Context(), vm1BlkA.Height()) + require.ErrorIs(t, err, database.ErrNotFound, "Expected unaccepted block not to be indexed by height, but found %s", err) - if err := vm1.SetPreference(t.Context(), vm1BlkA.ID()); err != nil { - t.Fatal(err) - } + require.NoError(t, vm1.SetPreference(t.Context(), vm1BlkA.ID())) vm2BlkA, err := vm2.ParseBlock(t.Context(), vm1BlkA.Bytes()) - if err != nil { - t.Fatalf("Unexpected error parsing block from vm2: %s", err) - } - if err := vm2BlkA.Verify(t.Context()); err != nil { - t.Fatalf("Block failed verification on VM2: %s", err) - } - if _, err := vm2.GetBlockIDAtHeight(t.Context(), vm2BlkA.Height()); err != database.ErrNotFound { - t.Fatalf("Expected unaccepted block not to be indexed by height, but found %s", err) - } - if err := vm2.SetPreference(t.Context(), vm2BlkA.ID()); err != nil { - t.Fatal(err) - } - - if err := vm1BlkA.Accept(t.Context()); err != nil { - t.Fatalf("VM1 failed to accept block: %s", err) - } - if blkID, err := vm1.GetBlockIDAtHeight(t.Context(), vm1BlkA.Height()); err != nil { - t.Fatalf("Height lookuped failed on accepted block: %s", err) - } else if blkID != vm1BlkA.ID() { - t.Fatalf("Expected accepted block to be indexed by height, but found %s", blkID) - } - if err := vm2BlkA.Accept(t.Context()); err != nil { - t.Fatalf("VM2 failed to accept block: %s", err) - } - if blkID, err := vm2.GetBlockIDAtHeight(t.Context(), vm2BlkA.Height()); err != nil { - t.Fatalf("Height lookuped failed on accepted block: %s", err) - } else if blkID != vm2BlkA.ID() { - t.Fatalf("Expected accepted block to be indexed by height, but found %s", blkID) - } + require.NoError(t, err, "Unexpected error parsing block from vm2") + require.NoError(t, vm2BlkA.Verify(t.Context()), "Block failed verification on VM2") + _, err = vm2.GetBlockIDAtHeight(t.Context(), vm2BlkA.Height()) + require.ErrorIs(t, err, database.ErrNotFound) + require.NoError(t, vm2.SetPreference(t.Context(), vm2BlkA.ID())) + + require.NoError(t, vm1BlkA.Accept(t.Context()), "VM1 failed to accept block") + blkID, err := vm1.GetBlockIDAtHeight(t.Context(), vm1BlkA.Height()) + require.NoError(t, err, "Height lookuped failed on accepted block") + require.Equal(t, vm1BlkA.ID(), blkID, "Expected accepted block to be indexed by height, but found %s", blkID) + + require.NoError(t, vm2BlkA.Accept(t.Context()), "VM2 failed to accept block") + blkID, err = vm2.GetBlockIDAtHeight(t.Context(), vm2BlkA.Height()) + require.NoError(t, err, "Height lookuped failed on accepted block") + require.Equal(t, vm2BlkA.ID(), blkID, "Expected accepted block to be indexed by height, but found %s", blkID) newHead := <-newTxPoolHeadChan1 - if newHead.Head.Hash() != common.Hash(vm1BlkA.ID()) { - t.Fatalf("Expected new block to match") - } + require.Equal(t, common.Hash(vm1BlkA.ID()), newHead.Head.Hash(), "Expected new block to match") newHead = <-newTxPoolHeadChan2 - if newHead.Head.Hash() != common.Hash(vm2BlkA.ID()) { - t.Fatalf("Expected new block to match") - } + require.Equal(t, common.Hash(vm2BlkA.ID()), newHead.Head.Hash(), "Expected new block to match") // Create list of 10 successive transactions to build block A on vm1 // and to be split into two separate blocks on VM2 @@ -1065,9 +859,7 @@ func testNonCanonicalAccept(t *testing.T, scheme string) { for i := 0; i < 10; i++ { tx := types.NewTransaction(uint64(i), testEthAddrs[0], big.NewInt(10), 21000, big.NewInt(testMinGasPrice), nil) signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm1.chainConfig.ChainID), testKeys[1].ToECDSA()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) txs[i] = signedTx } @@ -1076,9 +868,7 @@ func testNonCanonicalAccept(t *testing.T, scheme string) { // Add the remote transactions, build the block, and set VM1's preference for block A errs = vm1.txPool.AddRemotesSync(txs) for i, err := range errs { - if err != nil { - t.Fatalf("Failed to add transaction to VM1 at index %d: %s", i, err) - } + require.NoError(t, err, "Failed to add transaction to VM1 at index %d: %s", i, err) } msg, err = vm1.WaitForEvent(t.Context()) @@ -1086,33 +876,22 @@ func testNonCanonicalAccept(t *testing.T, scheme string) { require.Equal(t, commonEng.PendingTxs, msg) vm1BlkB, err := vm1.BuildBlock(t.Context()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) - if err := vm1BlkB.Verify(t.Context()); err != nil { - t.Fatal(err) - } + require.NoError(t, vm1BlkB.Verify(t.Context())) - if _, err := vm1.GetBlockIDAtHeight(t.Context(), vm1BlkB.Height()); err != database.ErrNotFound { - t.Fatalf("Expected unaccepted block not to be indexed by height, but found %s", err) - } + _, err = vm1.GetBlockIDAtHeight(t.Context(), vm1BlkB.Height()) + require.ErrorIs(t, err, database.ErrNotFound, "Expected unaccepted block not to be indexed by height, but found %s", err) - if err := vm1.SetPreference(t.Context(), vm1BlkB.ID()); err != nil { - t.Fatal(err) - } + require.NoError(t, vm1.SetPreference(t.Context(), vm1BlkB.ID())) blkBHeight := vm1BlkB.Height() blkBHash := vm1BlkB.(*chain.BlockWrapper).Block.(*wrappedBlock).ethBlock.Hash() - if b := vm1.blockChain.GetBlockByNumber(blkBHeight); b.Hash() != blkBHash { - t.Fatalf("expected block at %d to have hash %s but got %s", blkBHeight, blkBHash.Hex(), b.Hash().Hex()) - } + require.Equal(t, blkBHash, vm1.blockChain.GetBlockByNumber(blkBHeight).Hash(), "expected block at %d to have hash %s but got %s", blkBHeight, blkBHash.Hex(), vm1.blockChain.GetBlockByNumber(blkBHeight).Hash().Hex()) errs = vm2.txPool.AddRemotesSync(txs[0:5]) for i, err := range errs { - if err != nil { - t.Fatalf("Failed to add transaction to VM2 at index %d: %s", i, err) - } + require.NoError(t, err, "Failed to add transaction to VM2 at index %d: %s", i, err) } msg, err = vm2.WaitForEvent(t.Context()) @@ -1120,37 +899,24 @@ func testNonCanonicalAccept(t *testing.T, scheme string) { require.Equal(t, commonEng.PendingTxs, msg) vm2BlkC, err := vm2.BuildBlock(t.Context()) - if err != nil { - t.Fatalf("Failed to build BlkC on VM2: %s", err) - } + require.NoError(t, err, "Failed to build BlkC on VM2") vm1BlkC, err := vm1.ParseBlock(t.Context(), vm2BlkC.Bytes()) - if err != nil { - t.Fatalf("Unexpected error parsing block from vm2: %s", err) - } + require.NoError(t, err, "Unexpected error parsing block from vm2") - if err := vm1BlkC.Verify(t.Context()); err != nil { - t.Fatalf("Block failed verification on VM1: %s", err) - } + require.NoError(t, vm1BlkC.Verify(t.Context()), "Block failed verification on VM1") - if _, err := vm1.GetBlockIDAtHeight(t.Context(), vm1BlkC.Height()); err != database.ErrNotFound { - t.Fatalf("Expected unaccepted block not to be indexed by height, but found %s", err) - } + _, err = vm1.GetBlockIDAtHeight(t.Context(), vm1BlkC.Height()) + require.ErrorIs(t, err, database.ErrNotFound, "Expected unaccepted block not to be indexed by height, but found %s", err) - if err := vm1BlkC.Accept(t.Context()); err != nil { - t.Fatalf("VM1 failed to accept block: %s", err) - } + require.NoError(t, vm1BlkC.Accept(t.Context()), "VM1 failed to accept block") - if blkID, err := vm1.GetBlockIDAtHeight(t.Context(), vm1BlkC.Height()); err != nil { - t.Fatalf("Height lookuped failed on accepted block: %s", err) - } else if blkID != vm1BlkC.ID() { - t.Fatalf("Expected accepted block to be indexed by height, but found %s", blkID) - } + blkID, err = vm1.GetBlockIDAtHeight(t.Context(), vm1BlkC.Height()) + require.NoError(t, err, "Height lookuped failed on accepted block") + require.Equal(t, vm1BlkC.ID(), blkID, "Expected accepted block to be indexed by height, but found %s", blkID) blkCHash := vm1BlkC.(*chain.BlockWrapper).Block.(*wrappedBlock).ethBlock.Hash() - if b := vm1.blockChain.GetBlockByNumber(blkBHeight); b.Hash() != blkCHash { - t.Fatalf("expected block at %d to have hash %s but got %s", blkBHeight, blkCHash.Hex(), b.Hash().Hex()) - } + require.Equal(t, blkCHash, vm1.blockChain.GetBlockByNumber(blkBHeight).Hash(), "expected block at %d to have hash %s but got %s", blkBHeight, blkCHash.Hex(), vm1.blockChain.GetBlockByNumber(blkBHeight).Hash().Hex()) } // Regression test to ensure that a VM that verifies block B, C, then @@ -1182,13 +948,9 @@ func testStickyPreference(t *testing.T, scheme string) { vm2 := tvm2.vm defer func() { - if err := vm1.Shutdown(t.Context()); err != nil { - t.Fatal(err) - } + require.NoError(t, vm1.Shutdown(t.Context())) - if err := vm2.Shutdown(t.Context()); err != nil { - t.Fatal(err) - } + require.NoError(t, vm2.Shutdown(t.Context())) }() newTxPoolHeadChan1 := make(chan core.NewTxPoolReorgEvent, 1) @@ -1198,15 +960,11 @@ func testStickyPreference(t *testing.T, scheme string) { tx := types.NewTransaction(uint64(0), testEthAddrs[1], firstTxAmount, 21000, big.NewInt(testMinGasPrice), nil) signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm1.chainConfig.ChainID), testKeys[0].ToECDSA()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) txErrors := vm1.txPool.AddRemotesSync([]*types.Transaction{signedTx}) for i, err := range txErrors { - if err != nil { - t.Fatalf("Failed to add tx at index %d: %s", i, err) - } + require.NoError(t, err, "Failed to add tx at index %d: %s", i, err) } msg, err := vm1.WaitForEvent(t.Context()) @@ -1214,44 +972,24 @@ func testStickyPreference(t *testing.T, scheme string) { require.Equal(t, commonEng.PendingTxs, msg) vm1BlkA, err := vm1.BuildBlock(t.Context()) - if err != nil { - t.Fatalf("Failed to build block with import transaction: %s", err) - } + require.NoError(t, err, "Failed to build block with import transaction") - if err := vm1BlkA.Verify(t.Context()); err != nil { - t.Fatalf("Block failed verification on VM1: %s", err) - } + require.NoError(t, vm1BlkA.Verify(t.Context()), "Block failed verification on VM1") - if err := vm1.SetPreference(t.Context(), vm1BlkA.ID()); err != nil { - t.Fatal(err) - } + require.NoError(t, vm1.SetPreference(t.Context(), vm1BlkA.ID())) vm2BlkA, err := vm2.ParseBlock(t.Context(), vm1BlkA.Bytes()) - if err != nil { - t.Fatalf("Unexpected error parsing block from vm2: %s", err) - } - if err := vm2BlkA.Verify(t.Context()); err != nil { - t.Fatalf("Block failed verification on VM2: %s", err) - } - if err := vm2.SetPreference(t.Context(), vm2BlkA.ID()); err != nil { - t.Fatal(err) - } + require.NoError(t, err, "Unexpected error parsing block from vm2") + require.NoError(t, vm2BlkA.Verify(t.Context()), "Block failed verification on VM2") + require.NoError(t, vm2.SetPreference(t.Context(), vm2BlkA.ID())) - if err := vm1BlkA.Accept(t.Context()); err != nil { - t.Fatalf("VM1 failed to accept block: %s", err) - } - if err := vm2BlkA.Accept(t.Context()); err != nil { - t.Fatalf("VM2 failed to accept block: %s", err) - } + require.NoError(t, vm1BlkA.Accept(t.Context()), "VM1 failed to accept block") + require.NoError(t, vm2BlkA.Accept(t.Context()), "VM2 failed to accept block") newHead := <-newTxPoolHeadChan1 - if newHead.Head.Hash() != common.Hash(vm1BlkA.ID()) { - t.Fatalf("Expected new block to match") - } + require.Equal(t, common.Hash(vm1BlkA.ID()), newHead.Head.Hash(), "Expected new block to match") newHead = <-newTxPoolHeadChan2 - if newHead.Head.Hash() != common.Hash(vm2BlkA.ID()) { - t.Fatalf("Expected new block to match") - } + require.Equal(t, common.Hash(vm2BlkA.ID()), newHead.Head.Hash(), "Expected new block to match") // Create list of 10 successive transactions to build block A on vm1 // and to be split into two separate blocks on VM2 @@ -1259,9 +997,7 @@ func testStickyPreference(t *testing.T, scheme string) { for i := 0; i < 10; i++ { tx := types.NewTransaction(uint64(i), testEthAddrs[0], big.NewInt(10), 21000, big.NewInt(testMinGasPrice), nil) signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm1.chainConfig.ChainID), testKeys[1].ToECDSA()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) txs[i] = signedTx } @@ -1270,9 +1006,7 @@ func testStickyPreference(t *testing.T, scheme string) { // Add the remote transactions, build the block, and set VM1's preference for block A errs = vm1.txPool.AddRemotesSync(txs) for i, err := range errs { - if err != nil { - t.Fatalf("Failed to add transaction to VM1 at index %d: %s", i, err) - } + require.NoError(t, err, "Failed to add transaction to VM1 at index %d: %s", i, err) } msg, err = vm1.WaitForEvent(t.Context()) @@ -1280,29 +1014,20 @@ func testStickyPreference(t *testing.T, scheme string) { require.Equal(t, commonEng.PendingTxs, msg) vm1BlkB, err := vm1.BuildBlock(t.Context()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) - if err := vm1BlkB.Verify(t.Context()); err != nil { - t.Fatal(err) - } + require.NoError(t, vm1BlkB.Verify(t.Context())) - if err := vm1.SetPreference(t.Context(), vm1BlkB.ID()); err != nil { - t.Fatal(err) - } + require.NoError(t, vm1.SetPreference(t.Context(), vm1BlkB.ID())) blkBHeight := vm1BlkB.Height() blkBHash := vm1BlkB.(*chain.BlockWrapper).Block.(*wrappedBlock).ethBlock.Hash() - if b := vm1.blockChain.GetBlockByNumber(blkBHeight); b.Hash() != blkBHash { - t.Fatalf("expected block at %d to have hash %s but got %s", blkBHeight, blkBHash.Hex(), b.Hash().Hex()) - } + foundBlkBHash := vm1.blockChain.GetBlockByNumber(blkBHeight).Hash() + require.Equal(t, blkBHash, foundBlkBHash, "expected block at %d to have hash %s but got %s", blkBHeight, blkBHash.Hex(), vm1.blockChain.GetBlockByNumber(blkBHeight).Hash().Hex()) errs = vm2.txPool.AddRemotesSync(txs[0:5]) for i, err := range errs { - if err != nil { - t.Fatalf("Failed to add transaction to VM2 at index %d: %s", i, err) - } + require.NoError(t, err, "Failed to add transaction to VM2 at index %d: %s", i, err) } msg, err = vm2.WaitForEvent(t.Context()) @@ -1310,28 +1035,18 @@ func testStickyPreference(t *testing.T, scheme string) { require.Equal(t, commonEng.PendingTxs, msg) vm2BlkC, err := vm2.BuildBlock(t.Context()) - if err != nil { - t.Fatalf("Failed to build BlkC on VM2: %s", err) - } + require.NoError(t, err, "Failed to build BlkC on VM2") - if err := vm2BlkC.Verify(t.Context()); err != nil { - t.Fatalf("BlkC failed verification on VM2: %s", err) - } + require.NoError(t, vm2BlkC.Verify(t.Context()), "BlkC failed verification on VM2") - if err := vm2.SetPreference(t.Context(), vm2BlkC.ID()); err != nil { - t.Fatal(err) - } + require.NoError(t, vm2.SetPreference(t.Context(), vm2BlkC.ID())) newHead = <-newTxPoolHeadChan2 - if newHead.Head.Hash() != common.Hash(vm2BlkC.ID()) { - t.Fatalf("Expected new block to match") - } + require.Equal(t, common.Hash(vm2BlkC.ID()), newHead.Head.Hash(), "Expected new block to match") errs = vm2.txPool.AddRemotesSync(txs[5:]) for i, err := range errs { - if err != nil { - t.Fatalf("Failed to add transaction to VM2 at index %d: %s", i, err) - } + require.NoError(t, err, "Failed to add transaction to VM2 at index %d: %s", i, err) } msg, err = vm2.WaitForEvent(t.Context()) @@ -1339,96 +1054,50 @@ func testStickyPreference(t *testing.T, scheme string) { require.Equal(t, commonEng.PendingTxs, msg) vm2BlkD, err := vm2.BuildBlock(t.Context()) - if err != nil { - t.Fatalf("Failed to build BlkD on VM2: %s", err) - } + require.NoError(t, err, "Failed to build BlkD on VM2") // Parse blocks produced in vm2 vm1BlkC, err := vm1.ParseBlock(t.Context(), vm2BlkC.Bytes()) - if err != nil { - t.Fatalf("Unexpected error parsing block from vm2: %s", err) - } + require.NoError(t, err, "Unexpected error parsing block from vm2") blkCHash := vm1BlkC.(*chain.BlockWrapper).Block.(*wrappedBlock).ethBlock.Hash() vm1BlkD, err := vm1.ParseBlock(t.Context(), vm2BlkD.Bytes()) - if err != nil { - t.Fatalf("Unexpected error parsing block from vm2: %s", err) - } + require.NoError(t, err, "Unexpected error parsing block from vm2") blkDHeight := vm1BlkD.Height() blkDHash := vm1BlkD.(*chain.BlockWrapper).Block.(*wrappedBlock).ethBlock.Hash() // Should be no-ops - if err := vm1BlkC.Verify(t.Context()); err != nil { - t.Fatalf("Block failed verification on VM1: %s", err) - } - if err := vm1BlkD.Verify(t.Context()); err != nil { - t.Fatalf("Block failed verification on VM1: %s", err) - } - if b := vm1.blockChain.GetBlockByNumber(blkBHeight); b.Hash() != blkBHash { - t.Fatalf("expected block at %d to have hash %s but got %s", blkBHeight, blkBHash.Hex(), b.Hash().Hex()) - } - if b := vm1.blockChain.GetBlockByNumber(blkDHeight); b != nil { - t.Fatalf("expected block at %d to be nil but got %s", blkDHeight, b.Hash().Hex()) - } - if b := vm1.blockChain.CurrentBlock(); b.Hash() != blkBHash { - t.Fatalf("expected current block to have hash %s but got %s", blkBHash.Hex(), b.Hash().Hex()) - } + require.NoError(t, vm1BlkC.Verify(t.Context()), "Block failed verification on VM1") + require.NoError(t, vm1BlkD.Verify(t.Context()), "Block failed verification on VM1") + require.Equal(t, blkBHash, vm1.blockChain.GetBlockByNumber(blkBHeight).Hash(), "expected block at %d to have hash %s but got %s", blkBHeight, blkBHash.Hex(), vm1.blockChain.GetBlockByNumber(blkBHeight).Hash().Hex()) + require.Nil(t, vm1.blockChain.GetBlockByNumber(blkDHeight), "expected block at %d to be nil", blkDHeight) + require.Equal(t, blkBHash, vm1.blockChain.CurrentBlock().Hash(), "expected current block to have hash %s but got %s", blkBHash.Hex(), vm1.blockChain.CurrentBlock().Hash().Hex()) // Should still be no-ops on re-verify - if err := vm1BlkC.Verify(t.Context()); err != nil { - t.Fatalf("Block failed verification on VM1: %s", err) - } - if err := vm1BlkD.Verify(t.Context()); err != nil { - t.Fatalf("Block failed verification on VM1: %s", err) - } - if b := vm1.blockChain.GetBlockByNumber(blkBHeight); b.Hash() != blkBHash { - t.Fatalf("expected block at %d to have hash %s but got %s", blkBHeight, blkBHash.Hex(), b.Hash().Hex()) - } - if b := vm1.blockChain.GetBlockByNumber(blkDHeight); b != nil { - t.Fatalf("expected block at %d to be nil but got %s", blkDHeight, b.Hash().Hex()) - } - if b := vm1.blockChain.CurrentBlock(); b.Hash() != blkBHash { - t.Fatalf("expected current block to have hash %s but got %s", blkBHash.Hex(), b.Hash().Hex()) - } + require.NoError(t, vm1BlkC.Verify(t.Context()), "Block failed verification on VM1") + require.NoError(t, vm1BlkD.Verify(t.Context()), "Block failed verification on VM1") + require.Equal(t, blkBHash, vm1.blockChain.GetBlockByNumber(blkBHeight).Hash(), "expected block at %d to have hash %s but got %s", blkBHeight, blkBHash.Hex(), vm1.blockChain.GetBlockByNumber(blkBHeight).Hash().Hex()) + require.Nil(t, vm1.blockChain.GetBlockByNumber(blkDHeight), "expected block at %d to be nil", blkDHeight) + require.Equal(t, blkBHash, vm1.blockChain.CurrentBlock().Hash(), "expected current block to have hash %s but got %s", blkBHash.Hex(), vm1.blockChain.CurrentBlock().Hash().Hex()) // Should be queryable after setting preference to side chain - if err := vm1.SetPreference(t.Context(), vm1BlkD.ID()); err != nil { - t.Fatal(err) - } + require.NoError(t, vm1.SetPreference(t.Context(), vm1BlkD.ID())) - if b := vm1.blockChain.GetBlockByNumber(blkBHeight); b.Hash() != blkCHash { - t.Fatalf("expected block at %d to have hash %s but got %s", blkBHeight, blkCHash.Hex(), b.Hash().Hex()) - } - if b := vm1.blockChain.GetBlockByNumber(blkDHeight); b.Hash() != blkDHash { - t.Fatalf("expected block at %d to have hash %s but got %s", blkDHeight, blkDHash.Hex(), b.Hash().Hex()) - } - if b := vm1.blockChain.CurrentBlock(); b.Hash() != blkDHash { - t.Fatalf("expected current block to have hash %s but got %s", blkDHash.Hex(), b.Hash().Hex()) - } + require.Equal(t, blkCHash, vm1.blockChain.GetBlockByNumber(blkBHeight).Hash(), "expected block at %d to have hash %s but got %s", blkBHeight, blkCHash.Hex(), vm1.blockChain.GetBlockByNumber(blkBHeight).Hash().Hex()) + require.Equal(t, blkDHash, vm1.blockChain.GetBlockByNumber(blkDHeight).Hash(), "expected block at %d to have hash %s but got %s", blkDHeight, blkDHash.Hex(), vm1.blockChain.GetBlockByNumber(blkDHeight).Hash().Hex()) + require.Equal(t, blkDHash, vm1.blockChain.CurrentBlock().Hash(), "expected current block to have hash %s but got %s", blkDHash.Hex(), vm1.blockChain.CurrentBlock().Hash().Hex()) // Attempt to accept out of order - if err := vm1BlkD.Accept(t.Context()); !strings.Contains(err.Error(), "expected accepted block to have parent") { - t.Fatalf("unexpected error when accepting out of order block: %s", err) - } + require.ErrorContains(t, vm1BlkD.Accept(t.Context()), "expected accepted block to have parent", "unexpected error when accepting out of order block") // Accept in order - if err := vm1BlkC.Accept(t.Context()); err != nil { - t.Fatalf("Block failed verification on VM1: %s", err) - } - if err := vm1BlkD.Accept(t.Context()); err != nil { - t.Fatalf("Block failed acceptance on VM1: %s", err) - } + require.NoError(t, vm1BlkC.Accept(t.Context()), "Block failed verification on VM1") + require.NoError(t, vm1BlkD.Accept(t.Context()), "Block failed acceptance on VM1") // Ensure queryable after accepting - if b := vm1.blockChain.GetBlockByNumber(blkBHeight); b.Hash() != blkCHash { - t.Fatalf("expected block at %d to have hash %s but got %s", blkBHeight, blkCHash.Hex(), b.Hash().Hex()) - } - if b := vm1.blockChain.GetBlockByNumber(blkDHeight); b.Hash() != blkDHash { - t.Fatalf("expected block at %d to have hash %s but got %s", blkDHeight, blkDHash.Hex(), b.Hash().Hex()) - } - if b := vm1.blockChain.CurrentBlock(); b.Hash() != blkDHash { - t.Fatalf("expected current block to have hash %s but got %s", blkDHash.Hex(), b.Hash().Hex()) - } + require.Equal(t, blkCHash, vm1.blockChain.GetBlockByNumber(blkBHeight).Hash(), "expected block at %d to have hash %s but got %s", blkBHeight, blkCHash.Hex(), vm1.blockChain.GetBlockByNumber(blkBHeight).Hash().Hex()) + require.Equal(t, blkDHash, vm1.blockChain.GetBlockByNumber(blkDHeight).Hash(), "expected block at %d to have hash %s but got %s", blkDHeight, blkDHash.Hex(), vm1.blockChain.GetBlockByNumber(blkDHeight).Hash().Hex()) + require.Equal(t, blkDHash, vm1.blockChain.CurrentBlock().Hash(), "expected current block to have hash %s but got %s", blkDHash.Hex(), vm1.blockChain.CurrentBlock().Hash().Hex()) } // Regression test to ensure that a VM that prefers block B is able to parse @@ -1460,12 +1129,8 @@ func testUncleBlock(t *testing.T, scheme string) { vm2 := tvm2.vm defer func() { - if err := vm1.Shutdown(t.Context()); err != nil { - t.Fatal(err) - } - if err := vm2.Shutdown(t.Context()); err != nil { - t.Fatal(err) - } + require.NoError(t, vm1.Shutdown(t.Context())) + require.NoError(t, vm2.Shutdown(t.Context())) }() newTxPoolHeadChan1 := make(chan core.NewTxPoolReorgEvent, 1) @@ -1475,15 +1140,11 @@ func testUncleBlock(t *testing.T, scheme string) { tx := types.NewTransaction(uint64(0), testEthAddrs[1], firstTxAmount, 21000, big.NewInt(testMinGasPrice), nil) signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm1.chainConfig.ChainID), testKeys[0].ToECDSA()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) txErrors := vm1.txPool.AddRemotesSync([]*types.Transaction{signedTx}) for i, err := range txErrors { - if err != nil { - t.Fatalf("Failed to add tx at index %d: %s", i, err) - } + require.NoError(t, err, "Failed to add tx at index %d: %s", i, err) } msg, err := vm1.WaitForEvent(t.Context()) @@ -1491,52 +1152,30 @@ func testUncleBlock(t *testing.T, scheme string) { require.Equal(t, commonEng.PendingTxs, msg) vm1BlkA, err := vm1.BuildBlock(t.Context()) - if err != nil { - t.Fatalf("Failed to build block with import transaction: %s", err) - } + require.NoError(t, err, "Failed to build block with import transaction") - if err := vm1BlkA.Verify(t.Context()); err != nil { - t.Fatalf("Block failed verification on VM1: %s", err) - } + require.NoError(t, vm1BlkA.Verify(t.Context()), "Block failed verification on VM1") - if err := vm1.SetPreference(t.Context(), vm1BlkA.ID()); err != nil { - t.Fatal(err) - } + require.NoError(t, vm1.SetPreference(t.Context(), vm1BlkA.ID())) vm2BlkA, err := vm2.ParseBlock(t.Context(), vm1BlkA.Bytes()) - if err != nil { - t.Fatalf("Unexpected error parsing block from vm2: %s", err) - } - if err := vm2BlkA.Verify(t.Context()); err != nil { - t.Fatalf("Block failed verification on VM2: %s", err) - } - if err := vm2.SetPreference(t.Context(), vm2BlkA.ID()); err != nil { - t.Fatal(err) - } - - if err := vm1BlkA.Accept(t.Context()); err != nil { - t.Fatalf("VM1 failed to accept block: %s", err) - } - if err := vm2BlkA.Accept(t.Context()); err != nil { - t.Fatalf("VM2 failed to accept block: %s", err) - } + require.NoError(t, err, "Unexpected error parsing block from vm2") + require.NoError(t, vm2BlkA.Verify(t.Context()), "Block failed verification on VM2") + require.NoError(t, vm2.SetPreference(t.Context(), vm2BlkA.ID())) + + require.NoError(t, vm1BlkA.Accept(t.Context()), "VM1 failed to accept block") + require.NoError(t, vm2BlkA.Accept(t.Context()), "VM2 failed to accept block") newHead := <-newTxPoolHeadChan1 - if newHead.Head.Hash() != common.Hash(vm1BlkA.ID()) { - t.Fatalf("Expected new block to match") - } + require.Equal(t, common.Hash(vm1BlkA.ID()), newHead.Head.Hash(), "Expected new block to match") newHead = <-newTxPoolHeadChan2 - if newHead.Head.Hash() != common.Hash(vm2BlkA.ID()) { - t.Fatalf("Expected new block to match") - } + require.Equal(t, common.Hash(vm2BlkA.ID()), newHead.Head.Hash(), "Expected new block to match") txs := make([]*types.Transaction, 10) for i := 0; i < 10; i++ { tx := types.NewTransaction(uint64(i), testEthAddrs[0], big.NewInt(10), 21000, big.NewInt(testMinGasPrice), nil) signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm1.chainConfig.ChainID), testKeys[1].ToECDSA()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) txs[i] = signedTx } @@ -1544,9 +1183,7 @@ func testUncleBlock(t *testing.T, scheme string) { errs = vm1.txPool.AddRemotesSync(txs) for i, err := range errs { - if err != nil { - t.Fatalf("Failed to add transaction to VM1 at index %d: %s", i, err) - } + require.NoError(t, err, "Failed to add transaction to VM1 at index %d: %s", i, err) } msg, err = vm1.WaitForEvent(t.Context()) @@ -1554,23 +1191,15 @@ func testUncleBlock(t *testing.T, scheme string) { require.Equal(t, commonEng.PendingTxs, msg) vm1BlkB, err := vm1.BuildBlock(t.Context()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) - if err := vm1BlkB.Verify(t.Context()); err != nil { - t.Fatal(err) - } + require.NoError(t, vm1BlkB.Verify(t.Context())) - if err := vm1.SetPreference(t.Context(), vm1BlkB.ID()); err != nil { - t.Fatal(err) - } + require.NoError(t, vm1.SetPreference(t.Context(), vm1BlkB.ID())) errs = vm2.txPool.AddRemotesSync(txs[0:5]) for i, err := range errs { - if err != nil { - t.Fatalf("Failed to add transaction to VM2 at index %d: %s", i, err) - } + require.NoError(t, err, "Failed to add transaction to VM2 at index %d: %s", i, err) } msg, err = vm2.WaitForEvent(t.Context()) @@ -1578,28 +1207,18 @@ func testUncleBlock(t *testing.T, scheme string) { require.Equal(t, commonEng.PendingTxs, msg) vm2BlkC, err := vm2.BuildBlock(t.Context()) - if err != nil { - t.Fatalf("Failed to build BlkC on VM2: %s", err) - } + require.NoError(t, err, "Failed to build BlkC on VM2") - if err := vm2BlkC.Verify(t.Context()); err != nil { - t.Fatalf("BlkC failed verification on VM2: %s", err) - } + require.NoError(t, vm2BlkC.Verify(t.Context()), "BlkC failed verification on VM2") - if err := vm2.SetPreference(t.Context(), vm2BlkC.ID()); err != nil { - t.Fatal(err) - } + require.NoError(t, vm2.SetPreference(t.Context(), vm2BlkC.ID())) newHead = <-newTxPoolHeadChan2 - if newHead.Head.Hash() != common.Hash(vm2BlkC.ID()) { - t.Fatalf("Expected new block to match") - } + require.Equal(t, common.Hash(vm2BlkC.ID()), newHead.Head.Hash(), "Expected new block to match") errs = vm2.txPool.AddRemotesSync(txs[5:10]) for i, err := range errs { - if err != nil { - t.Fatalf("Failed to add transaction to VM2 at index %d: %s", i, err) - } + require.NoError(t, err, "Failed to add transaction to VM2 at index %d: %s", i, err) } msg, err = vm2.WaitForEvent(t.Context()) @@ -1607,9 +1226,7 @@ func testUncleBlock(t *testing.T, scheme string) { require.Equal(t, commonEng.PendingTxs, msg) vm2BlkD, err := vm2.BuildBlock(t.Context()) - if err != nil { - t.Fatalf("Failed to build BlkD on VM2: %s", err) - } + require.NoError(t, err, "Failed to build BlkD on VM2") // Create uncle block from blkD blkDEthBlock := vm2BlkD.(*chain.BlockWrapper).Block.(*wrappedBlock).ethBlock @@ -1626,12 +1243,10 @@ func testUncleBlock(t *testing.T, scheme string) { ) uncleBlock, _ := wrapBlock(uncleEthBlock, tvm2.vm) - if err := uncleBlock.Verify(t.Context()); !errors.Is(err, errUnclesUnsupported) { - t.Fatalf("VM2 should have failed with %q but got %q", errUnclesUnsupported, err.Error()) - } - if _, err := vm1.ParseBlock(t.Context(), vm2BlkC.Bytes()); err != nil { - t.Fatalf("VM1 errored parsing blkC: %s", err) - } + verifyErr := uncleBlock.Verify(t.Context()) + require.ErrorIs(t, verifyErr, errUnclesUnsupported) + _, err = vm1.ParseBlock(t.Context(), vm2BlkC.Bytes()) + require.NoError(t, err, "VM1 errored parsing blkC") _, err = vm1.ParseBlock(t.Context(), uncleBlock.Bytes()) require.ErrorIs(t, err, errUnclesUnsupported) } @@ -1653,22 +1268,16 @@ func testEmptyBlock(t *testing.T, scheme string) { }) defer func() { - if err := tvm.vm.Shutdown(t.Context()); err != nil { - t.Fatal(err) - } + require.NoError(t, tvm.vm.Shutdown(t.Context())) }() tx := types.NewTransaction(uint64(0), testEthAddrs[1], firstTxAmount, 21000, big.NewInt(testMinGasPrice), nil) signedTx, err := types.SignTx(tx, types.NewEIP155Signer(tvm.vm.chainConfig.ChainID), testKeys[0].ToECDSA()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) txErrors := tvm.vm.txPool.AddRemotesSync([]*types.Transaction{signedTx}) for i, err := range txErrors { - if err != nil { - t.Fatalf("Failed to add tx at index %d: %s", i, err) - } + require.NoError(t, err, "Failed to add tx at index %d: %s", i, err) } msg, err := tvm.vm.WaitForEvent(t.Context()) @@ -1676,9 +1285,7 @@ func testEmptyBlock(t *testing.T, scheme string) { require.Equal(t, commonEng.PendingTxs, msg) blk, err := tvm.vm.BuildBlock(t.Context()) - if err != nil { - t.Fatalf("Failed to build block with import transaction: %s", err) - } + require.NoError(t, err, "Failed to build block with import transaction") // Create empty block from blkA ethBlock := blk.(*chain.BlockWrapper).Block.(*wrappedBlock).ethBlock @@ -1692,16 +1299,12 @@ func testEmptyBlock(t *testing.T, scheme string) { ) emptyBlock, err := wrapBlock(emptyEthBlock, tvm.vm) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) - if _, err := tvm.vm.ParseBlock(t.Context(), emptyBlock.Bytes()); !errors.Is(err, errEmptyBlock) { - t.Fatalf("VM should have failed with errEmptyBlock but got %s", err.Error()) - } - if err := emptyBlock.Verify(t.Context()); !errors.Is(err, errEmptyBlock) { - t.Fatalf("block should have failed verification with errEmptyBlock but got %s", err.Error()) - } + _, err = tvm.vm.ParseBlock(t.Context(), emptyBlock.Bytes()) + require.ErrorIs(t, err, errEmptyBlock) + verifyErr := emptyBlock.Verify(t.Context()) + require.ErrorIs(t, verifyErr, errEmptyBlock) } // Regression test to ensure that a VM that verifies block B, C, then @@ -1732,13 +1335,9 @@ func testAcceptReorg(t *testing.T, scheme string) { vm2 := tvm2.vm defer func() { - if err := vm1.Shutdown(t.Context()); err != nil { - t.Fatal(err) - } + require.NoError(t, vm1.Shutdown(t.Context())) - if err := vm2.Shutdown(t.Context()); err != nil { - t.Fatal(err) - } + require.NoError(t, vm2.Shutdown(t.Context())) }() newTxPoolHeadChan1 := make(chan core.NewTxPoolReorgEvent, 1) @@ -1748,15 +1347,11 @@ func testAcceptReorg(t *testing.T, scheme string) { tx := types.NewTransaction(uint64(0), testEthAddrs[1], firstTxAmount, 21000, big.NewInt(testMinGasPrice), nil) signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm1.chainConfig.ChainID), testKeys[0].ToECDSA()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) txErrors := vm1.txPool.AddRemotesSync([]*types.Transaction{signedTx}) for i, err := range txErrors { - if err != nil { - t.Fatalf("Failed to add tx at index %d: %s", i, err) - } + require.NoError(t, err, "Failed to add tx at index %d: %s", i, err) } msg, err := vm1.WaitForEvent(t.Context()) @@ -1764,44 +1359,24 @@ func testAcceptReorg(t *testing.T, scheme string) { require.Equal(t, commonEng.PendingTxs, msg) vm1BlkA, err := vm1.BuildBlock(t.Context()) - if err != nil { - t.Fatalf("Failed to build block with import transaction: %s", err) - } + require.NoError(t, err, "Failed to build block with import transaction") - if err := vm1BlkA.Verify(t.Context()); err != nil { - t.Fatalf("Block failed verification on VM1: %s", err) - } + require.NoError(t, vm1BlkA.Verify(t.Context()), "Block failed verification on VM1") - if err := vm1.SetPreference(t.Context(), vm1BlkA.ID()); err != nil { - t.Fatal(err) - } + require.NoError(t, vm1.SetPreference(t.Context(), vm1BlkA.ID())) vm2BlkA, err := vm2.ParseBlock(t.Context(), vm1BlkA.Bytes()) - if err != nil { - t.Fatalf("Unexpected error parsing block from vm2: %s", err) - } - if err := vm2BlkA.Verify(t.Context()); err != nil { - t.Fatalf("Block failed verification on VM2: %s", err) - } - if err := vm2.SetPreference(t.Context(), vm2BlkA.ID()); err != nil { - t.Fatal(err) - } + require.NoError(t, err, "Unexpected error parsing block from vm2") + require.NoError(t, vm2BlkA.Verify(t.Context()), "Block failed verification on VM2") + require.NoError(t, vm2.SetPreference(t.Context(), vm2BlkA.ID())) - if err := vm1BlkA.Accept(t.Context()); err != nil { - t.Fatalf("VM1 failed to accept block: %s", err) - } - if err := vm2BlkA.Accept(t.Context()); err != nil { - t.Fatalf("VM2 failed to accept block: %s", err) - } + require.NoError(t, vm1BlkA.Accept(t.Context()), "VM1 failed to accept block") + require.NoError(t, vm2BlkA.Accept(t.Context()), "VM2 failed to accept block") newHead := <-newTxPoolHeadChan1 - if newHead.Head.Hash() != common.Hash(vm1BlkA.ID()) { - t.Fatalf("Expected new block to match") - } + require.Equal(t, common.Hash(vm1BlkA.ID()), newHead.Head.Hash(), "Expected new block to match") newHead = <-newTxPoolHeadChan2 - if newHead.Head.Hash() != common.Hash(vm2BlkA.ID()) { - t.Fatalf("Expected new block to match") - } + require.Equal(t, common.Hash(vm2BlkA.ID()), newHead.Head.Hash(), "Expected new block to match") // Create list of 10 successive transactions to build block A on vm1 // and to be split into two separate blocks on VM2 @@ -1809,9 +1384,7 @@ func testAcceptReorg(t *testing.T, scheme string) { for i := 0; i < 10; i++ { tx := types.NewTransaction(uint64(i), testEthAddrs[0], big.NewInt(10), 21000, big.NewInt(testMinGasPrice), nil) signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm1.chainConfig.ChainID), testKeys[1].ToECDSA()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) txs[i] = signedTx } @@ -1819,9 +1392,7 @@ func testAcceptReorg(t *testing.T, scheme string) { // for block B errs := vm1.txPool.AddRemotesSync(txs) for i, err := range errs { - if err != nil { - t.Fatalf("Failed to add transaction to VM1 at index %d: %s", i, err) - } + require.NoError(t, err, "Failed to add transaction to VM1 at index %d: %s", i, err) } msg, err = vm1.WaitForEvent(t.Context()) @@ -1829,23 +1400,15 @@ func testAcceptReorg(t *testing.T, scheme string) { require.Equal(t, commonEng.PendingTxs, msg) vm1BlkB, err := vm1.BuildBlock(t.Context()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) - if err := vm1BlkB.Verify(t.Context()); err != nil { - t.Fatal(err) - } + require.NoError(t, vm1BlkB.Verify(t.Context())) - if err := vm1.SetPreference(t.Context(), vm1BlkB.ID()); err != nil { - t.Fatal(err) - } + require.NoError(t, vm1.SetPreference(t.Context(), vm1BlkB.ID())) errs = vm2.txPool.AddRemotesSync(txs[0:5]) for i, err := range errs { - if err != nil { - t.Fatalf("Failed to add transaction to VM2 at index %d: %s", i, err) - } + require.NoError(t, err, "Failed to add transaction to VM2 at index %d: %s", i, err) } msg, err = vm2.WaitForEvent(t.Context()) @@ -1853,28 +1416,18 @@ func testAcceptReorg(t *testing.T, scheme string) { require.Equal(t, commonEng.PendingTxs, msg) vm2BlkC, err := vm2.BuildBlock(t.Context()) - if err != nil { - t.Fatalf("Failed to build BlkC on VM2: %s", err) - } + require.NoError(t, err, "Failed to build BlkC on VM2") - if err := vm2BlkC.Verify(t.Context()); err != nil { - t.Fatalf("BlkC failed verification on VM2: %s", err) - } + require.NoError(t, vm2BlkC.Verify(t.Context()), "BlkC failed verification on VM2") - if err := vm2.SetPreference(t.Context(), vm2BlkC.ID()); err != nil { - t.Fatal(err) - } + require.NoError(t, vm2.SetPreference(t.Context(), vm2BlkC.ID())) newHead = <-newTxPoolHeadChan2 - if newHead.Head.Hash() != common.Hash(vm2BlkC.ID()) { - t.Fatalf("Expected new block to match") - } + require.Equal(t, common.Hash(vm2BlkC.ID()), newHead.Head.Hash(), "Expected new block to match") errs = vm2.txPool.AddRemotesSync(txs[5:]) for i, err := range errs { - if err != nil { - t.Fatalf("Failed to add transaction to VM2 at index %d: %s", i, err) - } + require.NoError(t, err, "Failed to add transaction to VM2 at index %d: %s", i, err) } msg, err = vm2.WaitForEvent(t.Context()) @@ -1882,52 +1435,30 @@ func testAcceptReorg(t *testing.T, scheme string) { require.Equal(t, commonEng.PendingTxs, msg) vm2BlkD, err := vm2.BuildBlock(t.Context()) - if err != nil { - t.Fatalf("Failed to build BlkD on VM2: %s", err) - } + require.NoError(t, err, "Failed to build BlkD on VM2") // Parse blocks produced in vm2 vm1BlkC, err := vm1.ParseBlock(t.Context(), vm2BlkC.Bytes()) - if err != nil { - t.Fatalf("Unexpected error parsing block from vm2: %s", err) - } + require.NoError(t, err, "Unexpected error parsing block from vm2") vm1BlkD, err := vm1.ParseBlock(t.Context(), vm2BlkD.Bytes()) - if err != nil { - t.Fatalf("Unexpected error parsing block from vm2: %s", err) - } + require.NoError(t, err, "Unexpected error parsing block from vm2") - if err := vm1BlkC.Verify(t.Context()); err != nil { - t.Fatalf("Block failed verification on VM1: %s", err) - } - if err := vm1BlkD.Verify(t.Context()); err != nil { - t.Fatalf("Block failed verification on VM1: %s", err) - } + require.NoError(t, vm1BlkC.Verify(t.Context()), "Block failed verification on VM1") + require.NoError(t, vm1BlkD.Verify(t.Context()), "Block failed verification on VM1") blkBHash := vm1BlkB.(*chain.BlockWrapper).Block.(*wrappedBlock).ethBlock.Hash() - if b := vm1.blockChain.CurrentBlock(); b.Hash() != blkBHash { - t.Fatalf("expected current block to have hash %s but got %s", blkBHash.Hex(), b.Hash().Hex()) - } + require.Equal(t, blkBHash, vm1.blockChain.CurrentBlock().Hash(), "expected current block to have hash %s but got %s", blkBHash.Hex(), vm1.blockChain.CurrentBlock().Hash().Hex()) - if err := vm1BlkC.Accept(t.Context()); err != nil { - t.Fatal(err) - } + require.NoError(t, vm1BlkC.Accept(t.Context())) blkCHash := vm1BlkC.(*chain.BlockWrapper).Block.(*wrappedBlock).ethBlock.Hash() - if b := vm1.blockChain.CurrentBlock(); b.Hash() != blkCHash { - t.Fatalf("expected current block to have hash %s but got %s", blkCHash.Hex(), b.Hash().Hex()) - } - if err := vm1BlkB.Reject(t.Context()); err != nil { - t.Fatal(err) - } + require.Equal(t, blkCHash, vm1.blockChain.CurrentBlock().Hash(), "expected current block to have hash %s but got %s", blkCHash.Hex(), vm1.blockChain.CurrentBlock().Hash().Hex()) + require.NoError(t, vm1BlkB.Reject(t.Context())) - if err := vm1BlkD.Accept(t.Context()); err != nil { - t.Fatal(err) - } + require.NoError(t, vm1BlkD.Accept(t.Context())) blkDHash := vm1BlkD.(*chain.BlockWrapper).Block.(*wrappedBlock).ethBlock.Hash() - if b := vm1.blockChain.CurrentBlock(); b.Hash() != blkDHash { - t.Fatalf("expected current block to have hash %s but got %s", blkDHash.Hex(), b.Hash().Hex()) - } + require.Equal(t, blkDHash, vm1.blockChain.CurrentBlock().Hash(), "expected current block to have hash %s but got %s", blkDHash.Hex(), vm1.blockChain.CurrentBlock().Hash().Hex()) } func TestTimeSemanticVerify(t *testing.T) { @@ -1989,22 +1520,16 @@ func TestTimeSemanticVerify(t *testing.T) { }) defer func() { - if err := tvm.vm.Shutdown(t.Context()); err != nil { - t.Fatal(err) - } + require.NoError(t, tvm.vm.Shutdown(t.Context())) }() tx := types.NewTransaction(uint64(0), testEthAddrs[1], firstTxAmount, 21000, big.NewInt(testMinGasPrice), nil) signedTx, err := types.SignTx(tx, types.LatestSigner(tvm.vm.chainConfig), testKeys[0].ToECDSA()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) txErrors := tvm.vm.txPool.AddRemotesSync([]*types.Transaction{signedTx}) for i, err := range txErrors { - if err != nil { - t.Fatalf("Failed to add tx at index %d: %s", i, err) - } + require.NoError(t, err, "Failed to add tx at index %d: %s", i, err) } msg, err := tvm.vm.WaitForEvent(t.Context()) @@ -2012,13 +1537,8 @@ func TestTimeSemanticVerify(t *testing.T) { require.Equal(t, commonEng.PendingTxs, msg) blk, err := tvm.vm.BuildBlock(t.Context()) - if err != nil { - t.Fatalf("Failed to build block with import transaction: %s", err) - } - - if err := blk.Verify(t.Context()); err != nil { - t.Fatalf("Block failed verification on VM: %s", err) - } + require.NoError(t, err, "Failed to build block with import transaction") + require.NoError(t, blk.Verify(t.Context()), "Block failed verification on VM") // Create empty block from blkA ethBlk := blk.(*chain.BlockWrapper).Block.(*wrappedBlock).ethBlock @@ -2075,23 +1595,17 @@ func TestBuildTimeMilliseconds(t *testing.T) { }) defer func() { - if err := tvm.vm.Shutdown(t.Context()); err != nil { - t.Fatal(err) - } + require.NoError(t, tvm.vm.Shutdown(t.Context())) }() tvm.vm.clock.Set(buildTime) tx := types.NewTransaction(uint64(0), testEthAddrs[1], firstTxAmount, 21000, big.NewInt(testMinGasPrice), nil) signedTx, err := types.SignTx(tx, types.NewEIP155Signer(tvm.vm.chainConfig.ChainID), testKeys[0].ToECDSA()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) txErrors := tvm.vm.txPool.AddRemotesSync([]*types.Transaction{signedTx}) for i, err := range txErrors { - if err != nil { - t.Fatalf("Failed to add tx at index %d: %s", i, err) - } + require.NoError(t, err, "Failed to add tx at index %d: %s", i, err) } msg, err := tvm.vm.WaitForEvent(t.Context()) @@ -2099,9 +1613,7 @@ func TestBuildTimeMilliseconds(t *testing.T) { require.Equal(t, commonEng.PendingTxs, msg) blk, err := tvm.vm.BuildBlock(t.Context()) - if err != nil { - t.Fatalf("Failed to build block with import transaction: %s", err) - } + require.NoError(t, err, "Failed to build block with import transaction") require.NoError(t, err) ethBlk := blk.(*chain.BlockWrapper).Block.(*wrappedBlock).ethBlock require.Equal(t, test.expectedTimeMilliseconds, customtypes.BlockTimeMilliseconds(ethBlk)) @@ -2124,22 +1636,16 @@ func testLastAcceptedBlockNumberAllow(t *testing.T, scheme string) { }) defer func() { - if err := tvm.vm.Shutdown(t.Context()); err != nil { - t.Fatal(err) - } + require.NoError(t, tvm.vm.Shutdown(t.Context())) }() tx := types.NewTransaction(uint64(0), testEthAddrs[1], firstTxAmount, 21000, big.NewInt(testMinGasPrice), nil) signedTx, err := types.SignTx(tx, types.NewEIP155Signer(tvm.vm.chainConfig.ChainID), testKeys[0].ToECDSA()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) txErrors := tvm.vm.txPool.AddRemotesSync([]*types.Transaction{signedTx}) for i, err := range txErrors { - if err != nil { - t.Fatalf("Failed to add tx at index %d: %s", i, err) - } + require.NoError(t, err, "Failed to add tx at index %d: %s", i, err) } msg, err := tvm.vm.WaitForEvent(t.Context()) @@ -2147,17 +1653,11 @@ func testLastAcceptedBlockNumberAllow(t *testing.T, scheme string) { require.Equal(t, commonEng.PendingTxs, msg) blk, err := tvm.vm.BuildBlock(t.Context()) - if err != nil { - t.Fatalf("Failed to build block with import transaction: %s", err) - } + require.NoError(t, err, "Failed to build block with import transaction") - if err := blk.Verify(t.Context()); err != nil { - t.Fatalf("Block failed verification on VM: %s", err) - } + require.NoError(t, blk.Verify(t.Context()), "Block failed verification on VM") - if err := tvm.vm.SetPreference(t.Context(), blk.ID()); err != nil { - t.Fatal(err) - } + require.NoError(t, tvm.vm.SetPreference(t.Context(), blk.ID())) blkHeight := blk.Height() blkHash := blk.(*chain.BlockWrapper).Block.(*wrappedBlock).ethBlock.Hash() @@ -2166,27 +1666,17 @@ func testLastAcceptedBlockNumberAllow(t *testing.T, scheme string) { ctx := t.Context() b, err := tvm.vm.eth.APIBackend.BlockByNumber(ctx, rpc.BlockNumber(blkHeight)) - if err != nil { - t.Fatal(err) - } - if b.Hash() != blkHash { - t.Fatalf("expected block at %d to have hash %s but got %s", blkHeight, blkHash.Hex(), b.Hash().Hex()) - } + require.NoError(t, err) + require.Equal(t, blkHash, b.Hash(), "expected block at %d to have hash %s but got %s", blkHeight, blkHash.Hex(), b.Hash().Hex()) tvm.vm.eth.APIBackend.SetAllowUnfinalizedQueries(false) _, err = tvm.vm.eth.APIBackend.BlockByNumber(ctx, rpc.BlockNumber(blkHeight)) - if !errors.Is(err, eth.ErrUnfinalizedData) { - t.Fatalf("expected ErrUnfinalizedData but got %s", err.Error()) - } + require.ErrorIs(t, err, eth.ErrUnfinalizedData, "expected ErrUnfinalizedData but got %s", err) + require.NoError(t, blk.Accept(t.Context()), "VM failed to accept block") - if err := blk.Accept(t.Context()); err != nil { - t.Fatalf("VM failed to accept block: %s", err) - } - - if b := tvm.vm.blockChain.GetBlockByNumber(blkHeight); b.Hash() != blkHash { - t.Fatalf("expected block at %d to have hash %s but got %s", blkHeight, blkHash.Hex(), b.Hash().Hex()) - } + b = tvm.vm.blockChain.GetBlockByNumber(blkHeight) + require.Equal(t, blkHash, b.Hash(), "expected block at %d to have hash %s but got %s", blkHeight, blkHash.Hex(), b.Hash().Hex()) } func TestBuildAllowListActivationBlock(t *testing.T) { @@ -2199,69 +1689,49 @@ func TestBuildAllowListActivationBlock(t *testing.T) { func testBuildAllowListActivationBlock(t *testing.T, scheme string) { genesis := &core.Genesis{} - if err := genesis.UnmarshalJSON([]byte(genesisJSONSubnetEVM)); err != nil { - t.Fatal(err) - } + require.NoError(t, genesis.UnmarshalJSON([]byte(genesisJSONSubnetEVM))) params.GetExtra(genesis.Config).GenesisPrecompiles = extras.Precompiles{ deployerallowlist.ConfigKey: deployerallowlist.NewConfig(utils.TimeToNewUint64(time.Now()), testEthAddrs, nil, nil), } genesisJSON, err := genesis.MarshalJSON() - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) tvm := newVM(t, testVMConfig{ genesisJSON: string(genesisJSON), configJSON: getConfig(scheme, ""), }) defer func() { - if err := tvm.vm.Shutdown(t.Context()); err != nil { - t.Fatal(err) - } + require.NoError(t, tvm.vm.Shutdown(t.Context())) }() newTxPoolHeadChan := make(chan core.NewTxPoolReorgEvent, 1) tvm.vm.txPool.SubscribeNewReorgEvent(newTxPoolHeadChan) genesisState, err := tvm.vm.blockChain.StateAt(tvm.vm.blockChain.Genesis().Root()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) role := deployerallowlist.GetContractDeployerAllowListStatus(genesisState, testEthAddrs[0]) - if role != allowlist.NoRole { - t.Fatalf("Expected allow list status to be set to no role: %s, but found: %s", allowlist.NoRole, role) - } + require.Equal(t, allowlist.NoRole, role, "Expected allow list status to be set to no role: %s, but found: %s", allowlist.NoRole, role) // Send basic transaction to construct a simple block and confirm that the precompile state configuration in the worker behaves correctly. tx := types.NewTransaction(uint64(0), testEthAddrs[1], new(big.Int).Mul(firstTxAmount, big.NewInt(4)), 21000, big.NewInt(testMinGasPrice*3), nil) signedTx, err := types.SignTx(tx, types.NewEIP155Signer(tvm.vm.chainConfig.ChainID), testKeys[0].ToECDSA()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) txErrors := tvm.vm.txPool.AddRemotesSync([]*types.Transaction{signedTx}) for i, err := range txErrors { - if err != nil { - t.Fatalf("Failed to add tx at index %d: %s", i, err) - } + require.NoError(t, err, "Failed to add tx at index %d: %s", i, err) } blk := issueAndAccept(t, tvm.vm) newHead := <-newTxPoolHeadChan - if newHead.Head.Hash() != common.Hash(blk.ID()) { - t.Fatalf("Expected new block to match") - } + require.Equal(t, common.Hash(blk.ID()), newHead.Head.Hash(), "Expected new block to match") // Verify that the allow list config activation was handled correctly in the first block. blkState, err := tvm.vm.blockChain.StateAt(blk.(*chain.BlockWrapper).Block.(*wrappedBlock).ethBlock.Root()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) role = deployerallowlist.GetContractDeployerAllowListStatus(blkState, testEthAddrs[0]) - if role != allowlist.AdminRole { - t.Fatalf("Expected allow list status to be set role %s, but found: %s", allowlist.AdminRole, role) - } + require.Equal(t, allowlist.AdminRole, role, "Expected allow list status to be set role %s, but found: %s", allowlist.AdminRole, role) } // Test that the tx allow list allows whitelisted transactions and blocks non-whitelisted addresses @@ -2270,9 +1740,7 @@ func TestTxAllowListSuccessfulTx(t *testing.T) { managerKey := testKeys[1] managerAddress := testEthAddrs[1] genesis := &core.Genesis{} - if err := genesis.UnmarshalJSON([]byte(toGenesisJSON(paramstest.ForkToChainConfig[upgradetest.Durango]))); err != nil { - t.Fatal(err) - } + require.NoError(t, genesis.UnmarshalJSON([]byte(toGenesisJSON(paramstest.ForkToChainConfig[upgradetest.Durango])))) // this manager role should not be activated because DurangoTimestamp is in the future params.GetExtra(genesis.Config).GenesisPrecompiles = extras.Precompiles{ txallowlist.ConfigKey: txallowlist.NewConfig(utils.NewUint64(0), testEthAddrs[0:1], nil, nil), @@ -2280,9 +1748,7 @@ func TestTxAllowListSuccessfulTx(t *testing.T) { durangoTime := time.Now().Add(10 * time.Hour) params.GetExtra(genesis.Config).DurangoTimestamp = utils.TimeToNewUint64(durangoTime) genesisJSON, err := genesis.MarshalJSON() - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // prepare the new upgrade bytes to disable the TxAllowList disableAllowListTime := durangoTime.Add(10 * time.Hour) @@ -2309,28 +1775,20 @@ func TestTxAllowListSuccessfulTx(t *testing.T) { }) defer func() { - if err := tvm.vm.Shutdown(t.Context()); err != nil { - t.Fatal(err) - } + require.NoError(t, tvm.vm.Shutdown(t.Context())) }() newTxPoolHeadChan := make(chan core.NewTxPoolReorgEvent, 1) tvm.vm.txPool.SubscribeNewReorgEvent(newTxPoolHeadChan) genesisState, err := tvm.vm.blockChain.StateAt(tvm.vm.blockChain.Genesis().Root()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // Check that address 0 is whitelisted and address 1 is not role := txallowlist.GetTxAllowListStatus(genesisState, testEthAddrs[0]) - if role != allowlist.AdminRole { - t.Fatalf("Expected allow list status to be set to admin: %s, but found: %s", allowlist.AdminRole, role) - } + require.Equal(t, allowlist.AdminRole, role, "Expected allow list status to be set to admin: %s, but found: %s", allowlist.AdminRole, role) role = txallowlist.GetTxAllowListStatus(genesisState, testEthAddrs[1]) - if role != allowlist.NoRole { - t.Fatalf("Expected allow list status to be set to no role: %s, but found: %s", allowlist.NoRole, role) - } + require.Equal(t, allowlist.NoRole, role, "Expected allow list status to be set to no role: %s, but found: %s", allowlist.NoRole, role) // Should not be a manager role because Durango has not activated yet role = txallowlist.GetTxAllowListStatus(genesisState, managerAddress) require.Equal(t, allowlist.NoRole, role) @@ -2341,21 +1799,15 @@ func TestTxAllowListSuccessfulTx(t *testing.T) { require.NoError(t, err) errs := tvm.vm.txPool.AddRemotesSync([]*types.Transaction{signedTx0}) - if err := errs[0]; err != nil { - t.Fatalf("Failed to add tx at index: %s", err) - } + require.NoError(t, errs[0], "Failed to add tx at index") // Submit a rejected transaction, should throw an error tx1 := types.NewTransaction(uint64(0), testEthAddrs[1], big.NewInt(2), 21000, big.NewInt(testMinGasPrice), nil) signedTx1, err := types.SignTx(tx1, types.NewEIP155Signer(tvm.vm.chainConfig.ChainID), testKeys[1].ToECDSA()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) errs = tvm.vm.txPool.AddRemotesSync([]*types.Transaction{signedTx1}) - if err := errs[0]; !errors.Is(err, vmerrors.ErrSenderAddressNotAllowListed) { - t.Fatalf("expected ErrSenderAddressNotAllowListed, got: %s", err) - } + require.ErrorIs(t, errs[0], vmerrors.ErrSenderAddressNotAllowListed) // Submit a rejected transaction, should throw an error because manager is not activated tx2 := types.NewTransaction(uint64(0), managerAddress, big.NewInt(2), 21000, big.NewInt(testMinGasPrice), nil) @@ -2374,9 +1826,7 @@ func TestTxAllowListSuccessfulTx(t *testing.T) { txs := block.Transactions() - if txs.Len() != 1 { - t.Fatalf("Expected number of txs to be %d, but found %d", 1, txs.Len()) - } + require.Len(t, txs, 1, "Expected number of txs to be %d, but found %d", 1, txs.Len()) require.Equal(t, signedTx0.Hash(), txs[0].Hash()) @@ -2491,17 +1941,13 @@ func TestVerifyManagerConfig(t *testing.T) { func TestTxAllowListDisablePrecompile(t *testing.T) { // Setup chain params genesis := &core.Genesis{} - if err := genesis.UnmarshalJSON([]byte(toGenesisJSON(paramstest.ForkToChainConfig[upgradetest.Latest]))); err != nil { - t.Fatal(err) - } + require.NoError(t, genesis.UnmarshalJSON([]byte(toGenesisJSON(paramstest.ForkToChainConfig[upgradetest.Latest])))) enableAllowListTimestamp := upgrade.InitiallyActiveTime // enable at initially active time params.GetExtra(genesis.Config).GenesisPrecompiles = extras.Precompiles{ txallowlist.ConfigKey: txallowlist.NewConfig(utils.TimeToNewUint64(enableAllowListTimestamp), testEthAddrs[0:1], nil, nil), } genesisJSON, err := genesis.MarshalJSON() - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // arbitrary choice ahead of enableAllowListTimestamp disableAllowListTimestamp := enableAllowListTimestamp.Add(10 * time.Hour) @@ -2527,28 +1973,20 @@ func TestTxAllowListDisablePrecompile(t *testing.T) { tvm.vm.clock.Set(disableAllowListTimestamp) // upgrade takes effect after a block is issued, so we can set vm's clock here. defer func() { - if err := tvm.vm.Shutdown(t.Context()); err != nil { - t.Fatal(err) - } + require.NoError(t, tvm.vm.Shutdown(t.Context())) }() newTxPoolHeadChan := make(chan core.NewTxPoolReorgEvent, 1) tvm.vm.txPool.SubscribeNewReorgEvent(newTxPoolHeadChan) genesisState, err := tvm.vm.blockChain.StateAt(tvm.vm.blockChain.Genesis().Root()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // Check that address 0 is whitelisted and address 1 is not role := txallowlist.GetTxAllowListStatus(genesisState, testEthAddrs[0]) - if role != allowlist.AdminRole { - t.Fatalf("Expected allow list status to be set to admin: %s, but found: %s", allowlist.AdminRole, role) - } + require.Equal(t, allowlist.AdminRole, role, "expected allow list status to be set to admin: %s, but found: %s", allowlist.AdminRole, role) role = txallowlist.GetTxAllowListStatus(genesisState, testEthAddrs[1]) - if role != allowlist.NoRole { - t.Fatalf("Expected allow list status to be set to no role: %s, but found: %s", allowlist.NoRole, role) - } + require.Equal(t, allowlist.NoRole, role, "expected allow list status to be set to no role: %s, but found: %s", allowlist.NoRole, role) // Submit a successful transaction tx0 := types.NewTransaction(uint64(0), testEthAddrs[0], big.NewInt(1), 21000, big.NewInt(testMinGasPrice), nil) @@ -2556,30 +1994,22 @@ func TestTxAllowListDisablePrecompile(t *testing.T) { require.NoError(t, err) errs := tvm.vm.txPool.AddRemotesSync([]*types.Transaction{signedTx0}) - if err := errs[0]; err != nil { - t.Fatalf("Failed to add tx at index: %s", err) - } + require.NoError(t, errs[0], "Failed to add tx at index") // Submit a rejected transaction, should throw an error tx1 := types.NewTransaction(uint64(0), testEthAddrs[1], big.NewInt(2), 21000, big.NewInt(testMinGasPrice), nil) signedTx1, err := types.SignTx(tx1, types.NewEIP155Signer(tvm.vm.chainConfig.ChainID), testKeys[1].ToECDSA()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) errs = tvm.vm.txPool.AddRemotesSync([]*types.Transaction{signedTx1}) - if err := errs[0]; !errors.Is(err, vmerrors.ErrSenderAddressNotAllowListed) { - t.Fatalf("expected ErrSenderAddressNotAllowListed, got: %s", err) - } + require.ErrorIs(t, errs[0], vmerrors.ErrSenderAddressNotAllowListed, "want %s, got %s", vmerrors.ErrSenderAddressNotAllowListed, errs[0]) blk := issueAndAccept(t, tvm.vm) // Verify that the constructed block only has the whitelisted tx block := blk.(*chain.BlockWrapper).Block.(*wrappedBlock).ethBlock txs := block.Transactions() - if txs.Len() != 1 { - t.Fatalf("Expected number of txs to be %d, but found %d", 1, txs.Len()) - } + require.Len(t, txs, 1, "Expected number of txs to be %d, but found %d", 1, txs.Len()) require.Equal(t, signedTx0.Hash(), txs[0].Hash()) // verify the issued block is after the network upgrade @@ -2589,9 +2019,7 @@ func TestTxAllowListDisablePrecompile(t *testing.T) { // retry the rejected Tx, which should now succeed errs = tvm.vm.txPool.AddRemotesSync([]*types.Transaction{signedTx1}) - if err := errs[0]; err != nil { - t.Fatalf("Failed to add tx at index: %s", err) - } + require.NoError(t, errs[0], "Failed to add tx at index") tvm.vm.clock.Set(tvm.vm.clock.Time().Add(2 * time.Second)) // add 2 seconds for gas fee to adjust blk = issueAndAccept(t, tvm.vm) @@ -2599,9 +2027,7 @@ func TestTxAllowListDisablePrecompile(t *testing.T) { // Verify that the constructed block only has the previously rejected tx block = blk.(*chain.BlockWrapper).Block.(*wrappedBlock).ethBlock txs = block.Transactions() - if txs.Len() != 1 { - t.Fatalf("Expected number of txs to be %d, but found %d", 1, txs.Len()) - } + require.Equal(t, 1, txs.Len(), "Expected number of txs to be %d, but found %d", 1, txs.Len()) require.Equal(t, signedTx1.Hash(), txs[0].Hash()) } @@ -2609,9 +2035,7 @@ func TestTxAllowListDisablePrecompile(t *testing.T) { func TestFeeManagerChangeFee(t *testing.T) { // Setup chain params genesis := &core.Genesis{} - if err := genesis.UnmarshalJSON([]byte(genesisJSONSubnetEVM)); err != nil { - t.Fatal(err) - } + require.NoError(t, genesis.UnmarshalJSON([]byte(genesisJSONSubnetEVM))) configExtra := params.GetExtra(genesis.Config) configExtra.GenesisPrecompiles = extras.Precompiles{ feemanager.ConfigKey: feemanager.NewConfig(utils.NewUint64(0), testEthAddrs[0:1], nil, nil, nil), @@ -2633,36 +2057,26 @@ func TestFeeManagerChangeFee(t *testing.T) { configExtra.FeeConfig = testLowFeeConfig genesisJSON, err := genesis.MarshalJSON() - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) tvm := newVM(t, testVMConfig{ genesisJSON: string(genesisJSON), }) defer func() { - if err := tvm.vm.Shutdown(t.Context()); err != nil { - t.Fatal(err) - } + require.NoError(t, tvm.vm.Shutdown(t.Context())) }() newTxPoolHeadChan := make(chan core.NewTxPoolReorgEvent, 1) tvm.vm.txPool.SubscribeNewReorgEvent(newTxPoolHeadChan) genesisState, err := tvm.vm.blockChain.StateAt(tvm.vm.blockChain.Genesis().Root()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // Check that address 0 is whitelisted and address 1 is not role := feemanager.GetFeeManagerStatus(genesisState, testEthAddrs[0]) - if role != allowlist.AdminRole { - t.Fatalf("Expected fee manager list status to be set to admin: %s, but found: %s", allowlist.AdminRole, role) - } + require.Equal(t, allowlist.AdminRole, role, "expected fee manager list status to be set to admin: %s, but found: %s", allowlist.AdminRole, role) role = feemanager.GetFeeManagerStatus(genesisState, testEthAddrs[1]) - if role != allowlist.NoRole { - t.Fatalf("Expected fee manager list status to be set to no role: %s, but found: %s", allowlist.NoRole, role) - } + require.Equal(t, allowlist.NoRole, role, "expected fee manager list status to be set to no role: %s, but found: %s", allowlist.NoRole, role) // Contract is initialized but no preconfig is given, reader should return genesis fee config feeConfig, lastChangedAt, err := tvm.vm.blockChain.GetFeeConfigAt(tvm.vm.blockChain.Genesis().Header()) require.NoError(t, err) @@ -2688,20 +2102,14 @@ func TestFeeManagerChangeFee(t *testing.T) { }) signedTx, err := types.SignTx(tx, types.LatestSigner(genesis.Config), testKeys[0].ToECDSA()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) errs := tvm.vm.txPool.AddRemotesSync([]*types.Transaction{signedTx}) - if err := errs[0]; err != nil { - t.Fatalf("Failed to add tx at index: %s", err) - } + require.NoError(t, errs[0], "Failed to add tx at index") blk := issueAndAccept(t, tvm.vm) newHead := <-newTxPoolHeadChan - if newHead.Head.Hash() != common.Hash(blk.ID()) { - t.Fatalf("Expected new block to match") - } + require.Equal(t, common.Hash(blk.ID()), newHead.Head.Hash(), "Expected new block to match") block := blk.(*chain.BlockWrapper).Block.(*wrappedBlock).ethBlock @@ -2723,9 +2131,7 @@ func TestFeeManagerChangeFee(t *testing.T) { }) signedTx2, err := types.SignTx(tx2, types.LatestSigner(genesis.Config), testKeys[0].ToECDSA()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) err = tvm.vm.txPool.AddRemotesSync([]*types.Transaction{signedTx2})[0] require.ErrorIs(t, err, txpool.ErrUnderpriced) @@ -2742,14 +2148,10 @@ func TestAllowFeeRecipientDisabled(t *testing.T) { func testAllowFeeRecipientDisabled(t *testing.T, scheme string) { genesis := &core.Genesis{} - if err := genesis.UnmarshalJSON([]byte(genesisJSONSubnetEVM)); err != nil { - t.Fatal(err) - } + require.NoError(t, genesis.UnmarshalJSON([]byte(genesisJSONSubnetEVM))) params.GetExtra(genesis.Config).AllowFeeRecipients = false // set to false initially genesisJSON, err := genesis.MarshalJSON() - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) tvm := newVM(t, testVMConfig{ genesisJSON: string(genesisJSON), configJSON: getConfig(scheme, ""), @@ -2757,9 +2159,7 @@ func testAllowFeeRecipientDisabled(t *testing.T, scheme string) { tvm.vm.miner.SetEtherbase(common.HexToAddress("0x0123456789")) // set non-blackhole address by force defer func() { - if err := tvm.vm.Shutdown(t.Context()); err != nil { - t.Fatal(err) - } + require.NoError(t, tvm.vm.Shutdown(t.Context())) }() newTxPoolHeadChan := make(chan core.NewTxPoolReorgEvent, 1) @@ -2767,15 +2167,11 @@ func testAllowFeeRecipientDisabled(t *testing.T, scheme string) { tx := types.NewTransaction(uint64(0), testEthAddrs[1], new(big.Int).Mul(firstTxAmount, big.NewInt(4)), 21000, big.NewInt(testMinGasPrice*3), nil) signedTx, err := types.SignTx(tx, types.NewEIP155Signer(tvm.vm.chainConfig.ChainID), testKeys[0].ToECDSA()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) txErrors := tvm.vm.txPool.AddRemotesSync([]*types.Transaction{signedTx}) for i, err := range txErrors { - if err != nil { - t.Fatalf("Failed to add tx at index %d: %s", i, err) - } + require.NoError(t, err, "Failed to add tx at index %d: %s", i, err) } msg, err := tvm.vm.WaitForEvent(t.Context()) @@ -2801,9 +2197,7 @@ func testAllowFeeRecipientDisabled(t *testing.T, scheme string) { ) modifiedBlk, err := wrapBlock(modifiedBlock, tvm.vm) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) err = modifiedBlk.Verify(t.Context()) require.ErrorIs(t, err, vmerrors.ErrInvalidCoinbase) @@ -2811,31 +2205,23 @@ func testAllowFeeRecipientDisabled(t *testing.T, scheme string) { func TestAllowFeeRecipientEnabled(t *testing.T) { genesis := &core.Genesis{} - if err := genesis.UnmarshalJSON([]byte(genesisJSONSubnetEVM)); err != nil { - t.Fatal(err) - } + require.NoError(t, genesis.UnmarshalJSON([]byte(genesisJSONSubnetEVM))) params.GetExtra(genesis.Config).AllowFeeRecipients = true genesisJSON, err := genesis.MarshalJSON() - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) etherBase := common.HexToAddress("0x0123456789") c := config.NewDefaultConfig() c.FeeRecipient = etherBase.String() configJSON, err := json.Marshal(c) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) tvm := newVM(t, testVMConfig{ genesisJSON: string(genesisJSON), configJSON: string(configJSON), }) defer func() { - if err := tvm.vm.Shutdown(t.Context()); err != nil { - t.Fatal(err) - } + require.NoError(t, tvm.vm.Shutdown(t.Context())) }() newTxPoolHeadChan := make(chan core.NewTxPoolReorgEvent, 1) @@ -2843,29 +2229,21 @@ func TestAllowFeeRecipientEnabled(t *testing.T) { tx := types.NewTransaction(uint64(0), testEthAddrs[1], new(big.Int).Mul(firstTxAmount, big.NewInt(4)), 21000, big.NewInt(testMinGasPrice*3), nil) signedTx, err := types.SignTx(tx, types.NewEIP155Signer(tvm.vm.chainConfig.ChainID), testKeys[0].ToECDSA()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) txErrors := tvm.vm.txPool.AddRemotesSync([]*types.Transaction{signedTx}) for i, err := range txErrors { - if err != nil { - t.Fatalf("Failed to add tx at index %d: %s", i, err) - } + require.NoError(t, err, "Failed to add tx at index %d: %s", i, err) } blk := issueAndAccept(t, tvm.vm) newHead := <-newTxPoolHeadChan - if newHead.Head.Hash() != common.Hash(blk.ID()) { - t.Fatalf("Expected new block to match") - } + require.Equal(t, common.Hash(blk.ID()), newHead.Head.Hash(), "Expected new block to match") ethBlock := blk.(*chain.BlockWrapper).Block.(*wrappedBlock).ethBlock require.Equal(t, etherBase, ethBlock.Coinbase()) // Verify that etherBase has received fees blkState, err := tvm.vm.blockChain.StateAt(ethBlock.Root()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) balance := blkState.GetBalance(etherBase) require.Equal(t, 1, balance.Cmp(common.U2560)) @@ -3161,9 +2539,7 @@ func TestSkipChainConfigCheckCompatible(t *testing.T) { }) defer func() { - if err := tvm.vm.Shutdown(t.Context()); err != nil { - t.Fatal(err) - } + require.NoError(t, tvm.vm.Shutdown(t.Context())) }() newTxPoolHeadChan := make(chan core.NewTxPoolReorgEvent, 1) @@ -3173,21 +2549,15 @@ func TestSkipChainConfigCheckCompatible(t *testing.T) { tx := types.NewTransaction(uint64(0), key.Address, firstTxAmount, 21000, big.NewInt(testMinGasPrice), nil) signedTx, err := types.SignTx(tx, types.NewEIP155Signer(tvm.vm.chainConfig.ChainID), testKeys[0].ToECDSA()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) errs := tvm.vm.txPool.AddRemotesSync([]*types.Transaction{signedTx}) for i, err := range errs { - if err != nil { - t.Fatalf("Failed to add tx at index %d: %s", i, err) - } + require.NoError(t, err, "Failed to add tx at index %d: %s", i, err) } blk := issueAndAccept(t, tvm.vm) newHead := <-newTxPoolHeadChan - if newHead.Head.Hash() != common.Hash(blk.ID()) { - t.Fatalf("Expected new block to match") - } + require.Equal(t, common.Hash(blk.ID()), newHead.Head.Hash(), "Expected new block to match") reinitVM := &VM{} // use the block's timestamp instead of 0 since rewind to genesis @@ -3269,22 +2639,16 @@ func TestParentBeaconRootBlock(t *testing.T) { }) defer func() { - if err := tvm.vm.Shutdown(t.Context()); err != nil { - t.Fatal(err) - } + require.NoError(t, tvm.vm.Shutdown(t.Context())) }() tx := types.NewTransaction(uint64(0), testEthAddrs[1], firstTxAmount, 21000, big.NewInt(testMinGasPrice), nil) signedTx, err := types.SignTx(tx, types.NewEIP155Signer(tvm.vm.chainConfig.ChainID), testKeys[0].ToECDSA()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) txErrors := tvm.vm.txPool.AddRemotesSync([]*types.Transaction{signedTx}) for i, err := range txErrors { - if err != nil { - t.Fatalf("Failed to add tx at index %d: %s", i, err) - } + require.NoError(t, err, "Failed to add tx at index %d: %s", i, err) } msg, err := tvm.vm.WaitForEvent(t.Context()) @@ -3292,9 +2656,7 @@ func TestParentBeaconRootBlock(t *testing.T) { require.Equal(t, commonEng.PendingTxs, msg) blk, err := tvm.vm.BuildBlock(t.Context()) - if err != nil { - t.Fatalf("Failed to build block with import transaction: %s", err) - } + require.NoError(t, err, "Failed to build block with import transaction") // Modify the block to have a parent beacon root ethBlock := blk.(*chain.BlockWrapper).Block.(*wrappedBlock).ethBlock @@ -3303,9 +2665,7 @@ func TestParentBeaconRootBlock(t *testing.T) { parentBeaconEthBlock := ethBlock.WithSeal(header) parentBeaconBlock, err := wrapBlock(parentBeaconEthBlock, tvm.vm) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) errCheck := func(err error) { if test.expectedError { @@ -3388,9 +2748,7 @@ func TestStandaloneDB(t *testing.T) { func TestFeeManagerRegressionMempoolMinFeeAfterRestart(t *testing.T) { // Setup chain params genesis := &core.Genesis{} - if err := genesis.UnmarshalJSON([]byte(genesisJSONSubnetEVM)); err != nil { - t.Fatal(err) - } + require.NoError(t, genesis.UnmarshalJSON([]byte(genesisJSONSubnetEVM))) precompileActivationTime := utils.NewUint64(genesis.Timestamp + 5) // 5 seconds after genesis configExtra := params.GetExtra(genesis.Config) configExtra.GenesisPrecompiles = extras.Precompiles{ @@ -3413,9 +2771,7 @@ func TestFeeManagerRegressionMempoolMinFeeAfterRestart(t *testing.T) { configExtra.FeeConfig = testHighFeeConfig genesisJSON, err := genesis.MarshalJSON() - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) tvm := newVM(t, testVMConfig{ genesisJSON: string(genesisJSON), }) diff --git a/plugin/evm/vm_upgrade_bytes_test.go b/plugin/evm/vm_upgrade_bytes_test.go index 67f563b4a6..fd656fbe3b 100644 --- a/plugin/evm/vm_upgrade_bytes_test.go +++ b/plugin/evm/vm_upgrade_bytes_test.go @@ -5,7 +5,6 @@ package evm import ( "encoding/json" - "errors" "fmt" "math/big" "testing" @@ -22,7 +21,6 @@ import ( "github.com/ava-labs/libevm/core/types" "github.com/ava-labs/libevm/crypto" "github.com/holiman/uint256" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/ava-labs/subnet-evm/core" @@ -47,9 +45,7 @@ func TestVMUpgradeBytesPrecompile(t *testing.T) { }, } upgradeBytesJSON, err := json.Marshal(upgradeConfig) - if err != nil { - t.Fatalf("could not marshal upgradeConfig to json: %s", err) - } + require.NoError(t, err, "could not marshal upgradeConfig to json") // initialize the VM with these upgrade bytes tvm := newVM(t, testVMConfig{ @@ -57,9 +53,7 @@ func TestVMUpgradeBytesPrecompile(t *testing.T) { upgradeJSON: string(upgradeBytesJSON), }) defer func() { - if err := tvm.vm.Shutdown(t.Context()); err != nil { - t.Fatal(err) - } + require.NoError(t, tvm.vm.Shutdown(t.Context())) }() tvm.vm.clock.Set(enableAllowListTimestamp) @@ -70,20 +64,14 @@ func TestVMUpgradeBytesPrecompile(t *testing.T) { require.NoError(t, err) errs := tvm.vm.txPool.AddRemotesSync([]*types.Transaction{signedTx0}) - if err := errs[0]; err != nil { - t.Fatalf("Failed to add tx at index: %s", err) - } + require.NoError(t, errs[0], "Failed to add tx at index") // Submit a rejected transaction, should throw an error tx1 := types.NewTransaction(uint64(0), testEthAddrs[1], big.NewInt(2), 21000, big.NewInt(testMinGasPrice), nil) signedTx1, err := types.SignTx(tx1, types.NewEIP155Signer(tvm.vm.chainConfig.ChainID), testKeys[1].ToECDSA()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) errs = tvm.vm.txPool.AddRemotesSync([]*types.Transaction{signedTx1}) - if err := errs[0]; !errors.Is(err, vmerrors.ErrSenderAddressNotAllowListed) { - t.Fatalf("expected ErrSenderAddressNotAllowListed, got: %s", err) - } + require.ErrorIs(t, errs[0], vmerrors.ErrSenderAddressNotAllowListed, "expected ErrSenderAddressNotAllowListed") // prepare the new upgrade bytes to disable the TxAllowList disableAllowListTimestamp := tvm.vm.clock.Time().Add(10 * time.Hour) // arbitrary choice @@ -94,32 +82,22 @@ func TestVMUpgradeBytesPrecompile(t *testing.T) { }, ) upgradeBytesJSON, err = json.Marshal(upgradeConfig) - if err != nil { - t.Fatalf("could not marshal upgradeConfig to json: %s", err) - } + require.NoError(t, err, "could not marshal upgradeConfig to json") // Reset metrics to allow re-initialization tvm.vm.ctx.Metrics = metrics.NewPrefixGatherer() // restart the vm with the same stateful params newVM := &VM{} - if err := newVM.Initialize( + require.NoError(t, newVM.Initialize( t.Context(), tvm.vm.ctx, tvm.db, []byte(genesisJSONSubnetEVM), upgradeBytesJSON, []byte{}, []*commonEng.Fx{}, tvm.appSender, - ); err != nil { - t.Fatal(err) - } + )) defer func() { - if err := newVM.Shutdown(t.Context()); err != nil { - t.Fatal(err) - } + require.NoError(t, newVM.Shutdown(t.Context())) }() // Set the VM's state to NormalOp to initialize the tx pool. - if err := newVM.SetState(t.Context(), snow.Bootstrapping); err != nil { - t.Fatal(err) - } - if err := newVM.SetState(t.Context(), snow.NormalOp); err != nil { - t.Fatal(err) - } + require.NoError(t, newVM.SetState(t.Context(), snow.Bootstrapping)) + require.NoError(t, newVM.SetState(t.Context(), snow.NormalOp)) newTxPoolHeadChan := make(chan core.NewTxPoolReorgEvent, 1) newVM.txPool.SubscribeNewReorgEvent(newTxPoolHeadChan) newVM.clock.Set(disableAllowListTimestamp) @@ -127,36 +105,28 @@ func TestVMUpgradeBytesPrecompile(t *testing.T) { // Make a block, previous rules still apply (TxAllowList is active) // Submit a successful transaction errs = newVM.txPool.AddRemotesSync([]*types.Transaction{signedTx0}) - if err := errs[0]; err != nil { - t.Fatalf("Failed to add tx at index: %s", err) - } + require.NoError(t, errs[0], "Failed to add tx at index") // Submit a rejected transaction, should throw an error errs = newVM.txPool.AddRemotesSync([]*types.Transaction{signedTx1}) - if err := errs[0]; !errors.Is(err, vmerrors.ErrSenderAddressNotAllowListed) { - t.Fatalf("expected ErrSenderAddressNotAllowListed, got: %s", err) - } + require.ErrorIs(t, errs[0], vmerrors.ErrSenderAddressNotAllowListed, "expected ErrSenderAddressNotAllowListed") blk := issueAndAccept(t, newVM) // Verify that the constructed block only has the whitelisted tx block := blk.(*chain.BlockWrapper).Block.(*wrappedBlock).ethBlock txs := block.Transactions() - if txs.Len() != 1 { - t.Fatalf("Expected number of txs to be %d, but found %d", 1, txs.Len()) - } - assert.Equal(t, signedTx0.Hash(), txs[0].Hash()) + require.Len(t, txs, 1, "Expected number of txs to be %d, but found %d", 1, txs.Len()) + require.Equal(t, signedTx0.Hash(), txs[0].Hash()) // verify the issued block is after the network upgrade - assert.GreaterOrEqual(t, int64(block.Time()), disableAllowListTimestamp.Unix()) + require.GreaterOrEqual(t, int64(block.Time()), disableAllowListTimestamp.Unix()) <-newTxPoolHeadChan // wait for new head in tx pool // retry the rejected Tx, which should now succeed errs = newVM.txPool.AddRemotesSync([]*types.Transaction{signedTx1}) - if err := errs[0]; err != nil { - t.Fatalf("Failed to add tx at index: %s", err) - } + require.NoError(t, errs[0], "Failed to add tx at index") newVM.clock.Set(newVM.clock.Time().Add(2 * time.Second)) // add 2 seconds for gas fee to adjust blk = issueAndAccept(t, newVM) @@ -164,10 +134,8 @@ func TestVMUpgradeBytesPrecompile(t *testing.T) { // Verify that the constructed block only has the previously rejected tx block = blk.(*chain.BlockWrapper).Block.(*wrappedBlock).ethBlock txs = block.Transactions() - if txs.Len() != 1 { - t.Fatalf("Expected number of txs to be %d, but found %d", 1, txs.Len()) - } - assert.Equal(t, signedTx1.Hash(), txs[0].Hash()) + require.Len(t, txs, 1, "Expected number of txs to be %d, but found %d", 1, txs.Len()) + require.Equal(t, signedTx1.Hash(), txs[0].Hash()) } func TestNetworkUpgradesOverridden(t *testing.T) { @@ -221,13 +189,11 @@ func TestNetworkUpgradesOverridden(t *testing.T) { signedTx0, err := types.SignTx(tx0, types.NewEIP155Signer(restartedVM.chainConfig.ChainID), testKeys[0].ToECDSA()) require.NoError(t, err) errs := restartedVM.txPool.AddRemotesSync([]*types.Transaction{signedTx0}) - if err := errs[0]; err != nil { - t.Fatalf("Failed to add tx at index: %s", err) - } + require.NoError(t, errs[0], "Failed to add tx at index") blk := issueAndAccept(t, restartedVM) require.NotNil(t, blk) - require.EqualValues(t, 1, blk.Height()) + require.Equal(t, uint64(1), blk.Height()) // verify upgrade overrides require.True(t, restartedVM.currentRules().IsDurango) @@ -335,7 +301,7 @@ func TestVMStateUpgrade(t *testing.T) { blk := issueAndAccept(t, tvm.vm) require.NotNil(t, blk) - require.EqualValues(t, 1, blk.Height()) + require.Equal(t, uint64(1), blk.Height()) // Verify the state upgrade was applied state, err := tvm.vm.blockChain.State() diff --git a/precompile/allowlist/allowlisttest/test_allowlist_config.go b/precompile/allowlist/allowlisttest/test_allowlist_config.go index dc4e0e7beb..8738db0d40 100644 --- a/precompile/allowlist/allowlisttest/test_allowlist_config.go +++ b/precompile/allowlist/allowlisttest/test_allowlist_config.go @@ -8,6 +8,7 @@ import ( "testing" "github.com/ava-labs/libevm/common" + "github.com/stretchr/testify/require" "go.uber.org/mock/gomock" "github.com/ava-labs/subnet-evm/precompile/allowlist" @@ -211,9 +212,7 @@ func VerifyPrecompileWithAllowListTests(t *testing.T, module modules.Module, ver tests := AllowListConfigVerifyTests(t, module) // Add the contract specific tests to the map of tests to run. for name, test := range verifyTests { - if _, exists := tests[name]; exists { - t.Fatalf("duplicate test name: %s", name) - } + require.NotContains(t, tests, name, "duplicate test name: %s", name) tests[name] = test } @@ -225,9 +224,7 @@ func EqualPrecompileWithAllowListTests(t *testing.T, module modules.Module, equa tests := AllowListConfigEqualTests(t, module) // Add the contract specific tests to the map of tests to run. for name, test := range equalTests { - if _, exists := tests[name]; exists { - t.Fatalf("duplicate test name: %s", name) - } + require.NotContains(t, tests, name, "duplicate test name: %s", name) tests[name] = test } diff --git a/scripts/upstream_files.txt b/scripts/upstream_files.txt index 350cafb134..595c60d727 100644 --- a/scripts/upstream_files.txt +++ b/scripts/upstream_files.txt @@ -1,3 +1,29 @@ +accounts/* +cmd/* +consensus/* +core/* +eth/* +ethclient/* +internal/* +log/* +miner/* +node/* +params/config.go +params/config_test.go +params/denomination.go +params/network_params.go +params/version.go +plugin/evm/customtypes/block_test.go +plugin/evm/customtypes/hashing_test.go +plugin/evm/customtypes/rlp_fuzzer_test.go +plugin/evm/customtypes/types_test.go +rpc/* +signer/* +tests/init.go +tests/rlp_test_util.go +tests/state_test_util.go +triedb/* + !accounts/abi/abi_extra_test.go !accounts/abi/bind/bind_extra.go !accounts/abi/bind/precompilebind/* @@ -25,31 +51,4 @@ !ethclient/client_interface_test.go !internal/ethapi/api_extra.go !internal/ethapi/api_extra_test.go -!plugin/evm/customtypes/* !triedb/firewood/* - -accounts/* -cmd/* -consensus/* -core/* -eth/* -ethclient/* -internal/* -log/* -miner/* -node/* -params/config.go -params/config_test.go -params/denomination.go -params/network_params.go -params/version.go -plugin/evm/customtypes/block_test.go -plugin/evm/customtypes/hashing_test.go -plugin/evm/customtypes/rlp_fuzzer_test.go -plugin/evm/customtypes/types_test.go -rpc/* -signer/* -tests/init.go -tests/rlp_test_util.go -tests/state_test_util.go -triedb/* diff --git a/sync/client/client_test.go b/sync/client/client_test.go index c060b22d3a..2a89c7d895 100644 --- a/sync/client/client_test.go +++ b/sync/client/client_test.go @@ -107,9 +107,7 @@ func TestGetCode(t *testing.T) { codeHashes, res, expectedCode := test.setupRequest() responseBytes, err := message.Codec.Marshal(message.Version, res) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // Dirty hack required because the client will re-request if it encounters // an error. attempted := false @@ -152,9 +150,7 @@ func TestGetBlocks(t *testing.T) { engine := dummy.NewETHFaker() numBlocks := 110 blocks, _, err := core.GenerateChain(params.TestChainConfig, genesis, engine, memdb, numBlocks, 0, func(_ int, _ *core.BlockGen) {}) - if err != nil { - t.Fatal("unexpected error when generating test blockchain", err) - } + require.NoError(t, err) require.Len(t, blocks, numBlocks) // Construct client @@ -175,9 +171,7 @@ func TestGetBlocks(t *testing.T) { blockBytes := make([][]byte, 0, len(blocks)) for i := len(blocks) - 1; i >= 0; i-- { buf := new(bytes.Buffer) - if err := blocks[i].EncodeRLP(buf); err != nil { - t.Fatalf("failed to generate expected response %s", err) - } + require.NoError(t, blocks[i].EncodeRLP(buf)) blockBytes = append(blockBytes, buf.Bytes()) } @@ -197,13 +191,8 @@ func TestGetBlocks(t *testing.T) { }, getResponse: func(t *testing.T, request message.BlockRequest) []byte { response, err := blocksRequestHandler.OnBlockRequest(t.Context(), ids.GenerateTestNodeID(), 1, request) - if err != nil { - t.Fatal(err) - } - - if len(response) == 0 { - t.Fatal("Failed to generate valid response") - } + require.NoError(t, err) + require.NotEmpty(t, response, "Failed to generate valid response") return response }, @@ -220,13 +209,8 @@ func TestGetBlocks(t *testing.T) { getResponse: func(t *testing.T, request message.BlockRequest) []byte { request.Parents -= 5 response, err := blocksRequestHandler.OnBlockRequest(t.Context(), ids.GenerateTestNodeID(), 1, request) - if err != nil { - t.Fatal(err) - } - - if len(response) == 0 { - t.Fatal("Failed to generate valid response") - } + require.NoError(t, err) + require.NotEmpty(t, response) return response }, @@ -254,19 +238,14 @@ func TestGetBlocks(t *testing.T) { }, getResponse: func(t *testing.T, request message.BlockRequest) []byte { response, err := blocksRequestHandler.OnBlockRequest(t.Context(), ids.GenerateTestNodeID(), 1, request) - if err != nil { - t.Fatalf("failed to get block response: %s", err) - } + require.NoError(t, err) var blockResponse message.BlockResponse - if _, err = message.Codec.Unmarshal(response, &blockResponse); err != nil { - t.Fatalf("failed to marshal block response: %s", err) - } + _, err = message.Codec.Unmarshal(response, &blockResponse) + require.NoError(t, err) // Replace middle value with garbage data blockResponse.Blocks[10] = []byte("invalid value replacing block bytes") responseBytes, err := message.Codec.Marshal(message.Version, blockResponse) - if err != nil { - t.Fatalf("failed to marshal block response: %s", err) - } + require.NoError(t, err) return responseBytes }, @@ -284,13 +263,8 @@ func TestGetBlocks(t *testing.T) { Height: 99, Parents: 16, }) - if err != nil { - t.Fatal(err) - } - - if len(response) == 0 { - t.Fatal("Failed to generate valid response") - } + require.NoError(t, err) + require.NotEmpty(t, response) return response }, @@ -313,9 +287,7 @@ func TestGetBlocks(t *testing.T) { Blocks: blockBytes, } responseBytes, err := message.Codec.Marshal(message.Version, blockResponse) - if err != nil { - t.Fatalf("failed to marshal block response: %s", err) - } + require.NoError(t, err) return responseBytes }, @@ -332,9 +304,7 @@ func TestGetBlocks(t *testing.T) { Blocks: nil, } responseBytes, err := message.Codec.Marshal(message.Version, blockResponse) - if err != nil { - t.Fatalf("failed to marshal block response: %s", err) - } + require.NoError(t, err) return responseBytes }, @@ -353,9 +323,7 @@ func TestGetBlocks(t *testing.T) { Blocks: blockBytes, } responseBytes, err := message.Codec.Marshal(message.Version, blockResponse) - if err != nil { - t.Fatalf("failed to marshal block response: %s", err) - } + require.NoError(t, err) return responseBytes }, @@ -382,15 +350,10 @@ func TestGetBlocks(t *testing.T) { blockResponse, err := stateSyncClient.GetBlocks(ctx, test.request.Hash, test.request.Height, test.request.Parents) if len(test.expectedErr) != 0 { - if err == nil { - t.Fatalf("Expected error: %s, but found no error", test.expectedErr) - } require.ErrorContains(t, err, test.expectedErr) return } - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) test.assertResponse(t, blockResponse) }) @@ -442,12 +405,8 @@ func TestGetLeafs(t *testing.T) { }, getResponse: func(t *testing.T, request message.LeafsRequest) []byte { response, err := handler.OnLeafsRequest(t.Context(), ids.GenerateTestNodeID(), 1, request) - if err != nil { - t.Fatal("unexpected error in calling leafs request handler", err) - } - if len(response) == 0 { - t.Fatal("Failed to create valid response") - } + require.NoError(t, err) + require.NotEmpty(t, response) return response }, @@ -468,12 +427,8 @@ func TestGetLeafs(t *testing.T) { modifiedRequest := request modifiedRequest.Limit = leafsLimit response, err := handler.OnLeafsRequest(t.Context(), ids.GenerateTestNodeID(), 1, modifiedRequest) - if err != nil { - t.Fatal("unexpected error in calling leafs request handler", err) - } - if len(response) == 0 { - t.Fatal("Failed to create valid response") - } + require.NoError(t, err) + require.NotEmpty(t, response) return response }, @@ -488,12 +443,8 @@ func TestGetLeafs(t *testing.T) { }, getResponse: func(t *testing.T, request message.LeafsRequest) []byte { response, err := handler.OnLeafsRequest(t.Context(), ids.GenerateTestNodeID(), 1, request) - if err != nil { - t.Fatal("unexpected error in calling leafs request handler", err) - } - if len(response) == 0 { - t.Fatal("Failed to create valid response") - } + require.NoError(t, err) + require.NotEmpty(t, response) return response }, @@ -512,13 +463,9 @@ func TestGetLeafs(t *testing.T) { }, getResponse: func(t *testing.T, request message.LeafsRequest) []byte { response, err := handler.OnLeafsRequest(t.Context(), ids.GenerateTestNodeID(), 1, request) - if err != nil { - t.Fatal("unexpected error in calling leafs request handler", err) - } + require.NoError(t, err) + require.NotEmpty(t, response) - if len(response) == 0 { - t.Fatal("Failed to create valid response") - } return response }, requireResponse: func(t *testing.T, response message.LeafsResponse) { @@ -536,12 +483,9 @@ func TestGetLeafs(t *testing.T) { }, getResponse: func(t *testing.T, request message.LeafsRequest) []byte { response, err := handler.OnLeafsRequest(t.Context(), ids.GenerateTestNodeID(), 1, request) - if err != nil { - t.Fatal("unexpected error in calling leafs request handler", err) - } - if len(response) == 0 { - t.Fatal("Failed to create valid response") - } + require.NoError(t, err) + require.NotEmpty(t, response) + return response }, requireResponse: func(t *testing.T, response message.LeafsResponse) { @@ -559,12 +503,8 @@ func TestGetLeafs(t *testing.T) { }, getResponse: func(t *testing.T, request message.LeafsRequest) []byte { response, err := handler.OnLeafsRequest(t.Context(), ids.GenerateTestNodeID(), 1, request) - if err != nil { - t.Fatal("unexpected error in calling leafs request handler", err) - } - if len(response) == 0 { - t.Fatal("Failed to create valid response") - } + require.NoError(t, err) + require.NotEmpty(t, response) return response }, @@ -583,23 +523,17 @@ func TestGetLeafs(t *testing.T) { }, getResponse: func(t *testing.T, request message.LeafsRequest) []byte { response, err := handler.OnLeafsRequest(t.Context(), ids.GenerateTestNodeID(), 1, request) - if err != nil { - t.Fatal("unexpected error in calling leafs request handler", err) - } - if len(response) == 0 { - t.Fatal("Failed to create valid response") - } + require.NoError(t, err) + require.NotEmpty(t, response) + var leafResponse message.LeafsResponse - if _, err := message.Codec.Unmarshal(response, &leafResponse); err != nil { - t.Fatal(err) - } + _, err = message.Codec.Unmarshal(response, &leafResponse) + require.NoError(t, err) leafResponse.Keys = leafResponse.Keys[1:] leafResponse.Vals = leafResponse.Vals[1:] modifiedResponse, err := message.Codec.Marshal(message.Version, leafResponse) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) return modifiedResponse }, expectedErr: errInvalidRangeProof, @@ -613,22 +547,15 @@ func TestGetLeafs(t *testing.T) { }, getResponse: func(t *testing.T, request message.LeafsRequest) []byte { response, err := handler.OnLeafsRequest(t.Context(), ids.GenerateTestNodeID(), 1, request) - if err != nil { - t.Fatal("unexpected error in calling leafs request handler", err) - } - if len(response) == 0 { - t.Fatal("Failed to create valid response") - } + require.NoError(t, err) + require.NotEmpty(t, response) var leafResponse message.LeafsResponse - if _, err := message.Codec.Unmarshal(response, &leafResponse); err != nil { - t.Fatal(err) - } + _, err = message.Codec.Unmarshal(response, &leafResponse) + require.NoError(t, err) modifiedRequest := request modifiedRequest.Start = leafResponse.Keys[1] modifiedResponse, err := handler.OnLeafsRequest(t.Context(), ids.GenerateTestNodeID(), 2, modifiedRequest) - if err != nil { - t.Fatal("unexpected error in calling leafs request handler", err) - } + require.NoError(t, err) return modifiedResponse }, expectedErr: errInvalidRangeProof, @@ -642,23 +569,16 @@ func TestGetLeafs(t *testing.T) { }, getResponse: func(t *testing.T, request message.LeafsRequest) []byte { response, err := handler.OnLeafsRequest(t.Context(), ids.GenerateTestNodeID(), 1, request) - if err != nil { - t.Fatal("unexpected error in calling leafs request handler", err) - } - if len(response) == 0 { - t.Fatal("Failed to create valid response") - } + require.NoError(t, err) + require.NotEmpty(t, response) var leafResponse message.LeafsResponse - if _, err := message.Codec.Unmarshal(response, &leafResponse); err != nil { - t.Fatal(err) - } - leafResponse.Keys = leafResponse.Keys[:len(leafResponse.Keys)-2] - leafResponse.Vals = leafResponse.Vals[:len(leafResponse.Vals)-2] + _, err = message.Codec.Unmarshal(response, &leafResponse) + require.NoError(t, err) + leafResponse.Keys = leafResponse.Keys[:len(leafResponse.Keys)-1] + leafResponse.Vals = leafResponse.Vals[:len(leafResponse.Vals)-1] modifiedResponse, err := message.Codec.Marshal(message.Version, leafResponse) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) return modifiedResponse }, expectedErr: errInvalidRangeProof, @@ -672,24 +592,17 @@ func TestGetLeafs(t *testing.T) { }, getResponse: func(t *testing.T, request message.LeafsRequest) []byte { response, err := handler.OnLeafsRequest(t.Context(), ids.GenerateTestNodeID(), 1, request) - if err != nil { - t.Fatal("unexpected error in calling leafs request handler", err) - } - if len(response) == 0 { - t.Fatal("Failed to create valid response") - } + require.NoError(t, err) + require.NotEmpty(t, response) var leafResponse message.LeafsResponse - if _, err := message.Codec.Unmarshal(response, &leafResponse); err != nil { - t.Fatal(err) - } + _, err = message.Codec.Unmarshal(response, &leafResponse) + require.NoError(t, err) // Remove middle key-value pair response leafResponse.Keys = append(leafResponse.Keys[:100], leafResponse.Keys[101:]...) leafResponse.Vals = append(leafResponse.Vals[:100], leafResponse.Vals[101:]...) modifiedResponse, err := message.Codec.Marshal(message.Version, leafResponse) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) return modifiedResponse }, expectedErr: errInvalidRangeProof, @@ -703,23 +616,16 @@ func TestGetLeafs(t *testing.T) { }, getResponse: func(t *testing.T, request message.LeafsRequest) []byte { response, err := handler.OnLeafsRequest(t.Context(), ids.GenerateTestNodeID(), 1, request) - if err != nil { - t.Fatal("unexpected error in calling leafs request handler", err) - } - if len(response) == 0 { - t.Fatal("Failed to create valid response") - } + require.NoError(t, err) + require.NotEmpty(t, response) var leafResponse message.LeafsResponse - if _, err := message.Codec.Unmarshal(response, &leafResponse); err != nil { - t.Fatal(err) - } + _, err = message.Codec.Unmarshal(response, &leafResponse) + require.NoError(t, err) // Remove middle key-value pair response leafResponse.Vals[100] = []byte("garbage value data") modifiedResponse, err := message.Codec.Marshal(message.Version, leafResponse) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) return modifiedResponse }, expectedErr: errInvalidRangeProof, @@ -733,24 +639,17 @@ func TestGetLeafs(t *testing.T) { }, getResponse: func(t *testing.T, request message.LeafsRequest) []byte { response, err := handler.OnLeafsRequest(t.Context(), ids.GenerateTestNodeID(), 1, request) - if err != nil { - t.Fatal("unexpected error in calling leafs request handler", err) - } - if len(response) == 0 { - t.Fatal("Failed to create valid response") - } + require.NoError(t, err) + require.NotEmpty(t, response) var leafResponse message.LeafsResponse - if _, err := message.Codec.Unmarshal(response, &leafResponse); err != nil { - t.Fatal(err) - } + _, err = message.Codec.Unmarshal(response, &leafResponse) + require.NoError(t, err) // Remove the proof leafResponse.ProofVals = nil modifiedResponse, err := message.Codec.Marshal(message.Version, leafResponse) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) return modifiedResponse }, expectedErr: errInvalidRangeProof, @@ -766,10 +665,8 @@ func TestGetLeafs(t *testing.T) { return } - leafsResponse, ok := response.(message.LeafsResponse) - if !ok { - t.Fatalf("parseLeafsResponse returned incorrect type %T", response) - } + leafsResponse := response.(message.LeafsResponse) + require.IsType(t, message.LeafsResponse{}, leafsResponse, "parseLeafsResponse returned incorrect type %T", leafsResponse) test.requireResponse(t, leafsResponse) }) } @@ -806,9 +703,7 @@ func TestGetLeafsRetries(t *testing.T) { mockNetClient.mockResponse(1, nil, goodResponse) res, err := client.GetLeafs(ctx, request) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) require.Len(t, res.Keys, 1024) require.Len(t, res.Vals, 1024) @@ -817,9 +712,7 @@ func TestGetLeafsRetries(t *testing.T) { mockNetClient.mockResponses(nil, invalidResponse, invalidResponse, goodResponse) res, err = client.GetLeafs(ctx, request) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) require.Len(t, res.Keys, 1024) require.Len(t, res.Vals, 1024) diff --git a/sync/handlers/block_request_test.go b/sync/handlers/block_request_test.go index 5612464d95..5a680633a2 100644 --- a/sync/handlers/block_request_test.go +++ b/sync/handlers/block_request_test.go @@ -18,6 +18,7 @@ import ( "github.com/ava-labs/libevm/rlp" "github.com/ava-labs/libevm/triedb" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "github.com/ava-labs/subnet-evm/consensus/dummy" "github.com/ava-labs/subnet-evm/core" @@ -78,9 +79,7 @@ func executeBlockRequestTest(t testing.TB, test blockRequestTest, blocks []*type blockRequest.Parents = test.requestedParents responseBytes, err := blockRequestHandler.OnBlockRequest(t.Context(), ids.GenerateTestNodeID(), 1, blockRequest) - if err != nil { - t.Fatal("unexpected error during block request", err) - } + require.NoError(t, err) if test.assertResponse != nil { test.assertResponse(t, mockHandlerStats, responseBytes) } @@ -93,16 +92,13 @@ func executeBlockRequestTest(t testing.TB, test blockRequestTest, blocks []*type assert.NotEmpty(t, responseBytes) var response message.BlockResponse - if _, err = message.Codec.Unmarshal(responseBytes, &response); err != nil { - t.Fatal("error unmarshalling", err) - } + _, err = message.Codec.Unmarshal(responseBytes, &response) + require.NoError(t, err) assert.Len(t, response.Blocks, test.expectedBlocks) for _, blockBytes := range response.Blocks { block := new(types.Block) - if err := rlp.DecodeBytes(blockBytes, block); err != nil { - t.Fatal("could not parse block", err) - } + require.NoError(t, rlp.DecodeBytes(blockBytes, block)) assert.GreaterOrEqual(t, test.startBlockIndex, 0) assert.Equal(t, blocks[test.startBlockIndex].Hash(), block.Hash()) test.startBlockIndex-- @@ -118,10 +114,8 @@ func TestBlockRequestHandler(t *testing.T) { tdb := triedb.NewDatabase(memdb, nil) genesis := gspec.MustCommit(memdb, tdb) engine := dummy.NewETHFaker() - blocks, _, err := core.GenerateChain(params.TestChainConfig, genesis, engine, memdb, 96, 0, func(int, *core.BlockGen) {}) - if err != nil { - t.Fatal("unexpected error when generating test blockchain", err) - } + blocks, _, err := core.GenerateChain(params.TestChainConfig, genesis, engine, memdb, 96, 0, func(_ int, _ *core.BlockGen) {}) + require.NoError(t, err) assert.Len(t, blocks, 96) tests := []blockRequestTest{ @@ -185,14 +179,10 @@ func TestBlockRequestHandlerLargeBlocks(t *testing.T) { data = make([]byte, units.MiB/16) } tx, err := types.SignTx(types.NewTransaction(b.TxNonce(addr1), addr1, big.NewInt(10000), 4_215_304, nil, data), signer, key1) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) b.AddTx(tx) }) - if err != nil { - t.Fatal("unexpected error when generating test blockchain", err) - } + require.NoError(t, err) assert.Len(t, blocks, 96) tests := []blockRequestTest{ @@ -230,10 +220,8 @@ func TestBlockRequestHandlerCtxExpires(t *testing.T) { tdb := triedb.NewDatabase(memdb, nil) genesis := gspec.MustCommit(memdb, tdb) engine := dummy.NewETHFaker() - blocks, _, err := core.GenerateChain(params.TestChainConfig, genesis, engine, memdb, 11, 0, func(int, *core.BlockGen) {}) - if err != nil { - t.Fatal("unexpected error when generating test blockchain", err) - } + blocks, _, err := core.GenerateChain(params.TestChainConfig, genesis, engine, memdb, 11, 0, func(_ int, _ *core.BlockGen) {}) + require.NoError(t, err) assert.Len(t, blocks, 11) @@ -268,23 +256,18 @@ func TestBlockRequestHandlerCtxExpires(t *testing.T) { Height: blocks[10].NumberU64(), Parents: uint16(8), }) - if err != nil { - t.Fatal("unexpected error from BlockRequestHandler", err) - } + require.NoError(t, err) assert.NotEmpty(t, responseBytes) var response message.BlockResponse - if _, err = message.Codec.Unmarshal(responseBytes, &response); err != nil { - t.Fatal("error unmarshalling", err) - } + _, err = message.Codec.Unmarshal(responseBytes, &response) + require.NoError(t, err) // requested 8 blocks, received cancelAfterNumRequests because of timeout assert.Len(t, response.Blocks, cancelAfterNumRequests) for i, blockBytes := range response.Blocks { block := new(types.Block) - if err := rlp.DecodeBytes(blockBytes, block); err != nil { - t.Fatal("could not parse block", err) - } + require.NoError(t, rlp.DecodeBytes(blockBytes, block)) assert.Equal(t, blocks[len(blocks)-i-1].Hash(), block.Hash()) } } diff --git a/sync/handlers/code_request_test.go b/sync/handlers/code_request_test.go index eee04dde6f..81a47e81b2 100644 --- a/sync/handlers/code_request_test.go +++ b/sync/handlers/code_request_test.go @@ -100,12 +100,9 @@ func TestCodeRequestHandler(t *testing.T) { return } var response message.CodeResponse - if _, err = message.Codec.Unmarshal(responseBytes, &response); err != nil { - t.Fatal("error unmarshalling CodeResponse", err) - } - if len(expectedResponse) != len(response.Data) { - t.Fatalf("Unexpected length of code data expected %d != %d", len(expectedResponse), len(response.Data)) - } + _, err = message.Codec.Unmarshal(responseBytes, &response) + require.NoError(t, err) + require.Len(t, response.Data, len(expectedResponse)) for i, code := range expectedResponse { require.Equal(t, code, response.Data[i], "code bytes mismatch at index %d", i) } diff --git a/sync/handlers/leafs_request_test.go b/sync/handlers/leafs_request_test.go index 174808c499..93ad901998 100644 --- a/sync/handlers/leafs_request_test.go +++ b/sync/handlers/leafs_request_test.go @@ -33,9 +33,7 @@ func TestLeafsRequestHandler_OnLeafsRequest(t *testing.T) { corruptedTrieRoot, _, _ := statesynctest.GenerateTrie(t, r, trieDB, 100, common.HashLength) tr, err := trie.New(trie.TrieID(corruptedTrieRoot), trieDB) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // Corrupt [corruptedTrieRoot] statesynctest.CorruptTrie(t, memdb, tr, 5) @@ -441,9 +439,8 @@ func TestLeafsRequestHandler_OnLeafsRequest(t *testing.T) { require.NoError(t, err) var leafsResponse message.LeafsResponse - if _, err := message.Codec.Unmarshal(response, &leafsResponse); err != nil { - t.Fatalf("unexpected error when unmarshalling LeafsResponse: %v", err) - } + _, err = message.Codec.Unmarshal(response, &leafsResponse) + require.NoError(t, err) require.Len(t, leafsResponse.Keys, 500) require.Len(t, leafsResponse.Vals, 500) @@ -456,9 +453,7 @@ func TestLeafsRequestHandler_OnLeafsRequest(t *testing.T) { "account data served from snapshot": { prepareTestFn: func() (context.Context, message.LeafsRequest) { snap, err := snapshot.New(snapConfig, memdb, trieDB, common.Hash{}, accountTrieRoot) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) snapshotProvider.Snapshot = snap return t.Context(), message.LeafsRequest{ Root: accountTrieRoot, @@ -483,9 +478,7 @@ func TestLeafsRequestHandler_OnLeafsRequest(t *testing.T) { "partial account data served from snapshot": { prepareTestFn: func() (context.Context, message.LeafsRequest) { snap, err := snapshot.New(snapConfig, memdb, trieDB, common.Hash{}, accountTrieRoot) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) snapshotProvider.Snapshot = snap it := snap.DiskAccountIterator(common.Hash{}) defer it.Release() @@ -498,9 +491,7 @@ func TestLeafsRequestHandler_OnLeafsRequest(t *testing.T) { // modify one entry of 1 in 4 segments if i%(segmentLen*4) == 0 { acc, err := types.FullAccount(it.Account()) - if err != nil { - t.Fatalf("could not parse snapshot account: %v", err) - } + require.NoError(t, err) acc.Nonce++ bytes := types.SlimAccountRLP(*acc) rawdb.WriteAccountSnapshot(memdb, it.Hash(), bytes) @@ -536,9 +527,7 @@ func TestLeafsRequestHandler_OnLeafsRequest(t *testing.T) { "storage data served from snapshot": { prepareTestFn: func() (context.Context, message.LeafsRequest) { snap, err := snapshot.New(snapConfig, memdb, trieDB, common.Hash{}, accountTrieRoot) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) snapshotProvider.Snapshot = snap return t.Context(), message.LeafsRequest{ Root: largeTrieRoot, @@ -564,9 +553,7 @@ func TestLeafsRequestHandler_OnLeafsRequest(t *testing.T) { "partial storage data served from snapshot": { prepareTestFn: func() (context.Context, message.LeafsRequest) { snap, err := snapshot.New(snapConfig, memdb, trieDB, common.Hash{}, accountTrieRoot) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) snapshotProvider.Snapshot = snap it := snap.DiskStorageIterator(largeStorageAccount, common.Hash{}) defer it.Release() @@ -615,9 +602,7 @@ func TestLeafsRequestHandler_OnLeafsRequest(t *testing.T) { "last snapshot key removed": { prepareTestFn: func() (context.Context, message.LeafsRequest) { snap, err := snapshot.New(snapConfig, memdb, trieDB, common.Hash{}, accountTrieRoot) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) snapshotProvider.Snapshot = snap it := snap.DiskStorageIterator(smallStorageAccount, common.Hash{}) defer it.Release() @@ -651,9 +636,7 @@ func TestLeafsRequestHandler_OnLeafsRequest(t *testing.T) { "request last key when removed from snapshot": { prepareTestFn: func() (context.Context, message.LeafsRequest) { snap, err := snapshot.New(snapConfig, memdb, trieDB, common.Hash{}, accountTrieRoot) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) snapshotProvider.Snapshot = snap it := snap.DiskStorageIterator(smallStorageAccount, common.Hash{}) defer it.Release() @@ -717,9 +700,7 @@ func requireRangeProofIsValid(t *testing.T, request *message.LeafsRequest, respo defer proof.Close() for _, proofVal := range response.ProofVals { proofKey := crypto.Keccak256(proofVal) - if err := proof.Put(proofKey, proofVal); err != nil { - t.Fatal(err) - } + require.NoError(t, proof.Put(proofKey, proofVal)) } } diff --git a/sync/statesync/code_syncer_test.go b/sync/statesync/code_syncer_test.go index 206f0cba70..d3ebdb4b8b 100644 --- a/sync/statesync/code_syncer_test.go +++ b/sync/statesync/code_syncer_test.go @@ -12,7 +12,6 @@ import ( "github.com/ava-labs/libevm/core/rawdb" "github.com/ava-labs/libevm/crypto" "github.com/ava-labs/libevm/ethdb/memorydb" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/ava-labs/subnet-evm/plugin/evm/customrawdb" @@ -61,27 +60,20 @@ func testCodeSyncer(t *testing.T, test codeSyncerTest) { codeSyncer.start(t.Context()) for _, codeHashes := range test.codeRequestHashes { - if err := codeSyncer.addCode(codeHashes); err != nil { - require.ErrorIs(t, err, test.err) - } + require.NoError(t, codeSyncer.addCode(codeHashes)) } codeSyncer.notifyAccountTrieCompleted() err := <-codeSyncer.Done() + require.ErrorIs(t, err, test.err) if test.err != nil { - if err == nil { - t.Fatal(t, "expected non-nil error: %s", test.err) - } - assert.ErrorIs(t, err, test.err) return - } else if err != nil { - t.Fatal(err) } // Assert that the client synced the code correctly. for i, codeHash := range codeHashes { codeBytes := rawdb.ReadCode(clientDB, codeHash) - assert.Equal(t, test.codeByteSlices[i], codeBytes) + require.Equal(t, test.codeByteSlices[i], codeBytes) } } diff --git a/sync/statesync/statesynctest/test_sync.go b/sync/statesync/statesynctest/test_sync.go index 646e6d1c0a..3594207de1 100644 --- a/sync/statesync/statesynctest/test_sync.go +++ b/sync/statesync/statesynctest/test_sync.go @@ -35,9 +35,7 @@ func AssertDBConsistency(t testing.TB, root common.Hash, clientDB ethdb.Database } numSnapshotAccounts++ } - if err := accountIt.Error(); err != nil { - t.Fatal(err) - } + require.NoError(t, accountIt.Error()) trieAccountLeaves := 0 AssertTrieConsistency(t, root, serverTrieDB, clientTrieDB, func(key, val []byte) error { @@ -94,9 +92,7 @@ func FillAccountsWithStorage(t *testing.T, r *rand.Rand, serverDB ethdb.Database newRoot, _ := FillAccounts(t, r, serverTrieDB, root, numAccounts, func(t *testing.T, _ int, account types.StateAccount) types.StateAccount { codeBytes := make([]byte, 256) _, err := r.Read(codeBytes) - if err != nil { - t.Fatalf("error reading random code bytes: %v", err) - } + require.NoError(t, err, "error reading random code bytes") codeHash := crypto.Keccak256Hash(codeBytes) rawdb.WriteCode(serverDB, codeHash, codeBytes) diff --git a/sync/statesync/statesynctest/test_trie.go b/sync/statesync/statesynctest/test_trie.go index 68557d05a0..c2f180f581 100644 --- a/sync/statesync/statesynctest/test_trie.go +++ b/sync/statesync/statesynctest/test_trie.go @@ -25,10 +25,9 @@ import ( // GenerateTrie creates a trie with [numKeys] random key-value pairs inside of [trieDB]. // Returns the root of the generated trie, the slice of keys inserted into the trie in lexicographical // order, and the slice of corresponding values. +// GenerateTrie reads from [rand] and the caller should call rand.Seed(n) for deterministic results func GenerateTrie(t *testing.T, r *rand.Rand, trieDB *triedb.Database, numKeys int, keySize int) (common.Hash, [][]byte, [][]byte) { - if keySize < wrappers.LongLen+1 { - t.Fatal("key size must be at least 9 bytes (8 bytes for uint64 and 1 random byte)") - } + require.GreaterOrEqual(t, keySize, wrappers.LongLen+1, "key size must be at least 9 bytes (8 bytes for uint64 and 1 random byte)") return FillTrie(t, r, 0, numKeys, keySize, trieDB, types.EmptyRootHash) } @@ -36,9 +35,7 @@ func GenerateTrie(t *testing.T, r *rand.Rand, trieDB *triedb.Database, numKeys i // returns inserted keys and values func FillTrie(t *testing.T, r *rand.Rand, start, numKeys int, keySize int, trieDB *triedb.Database, root common.Hash) (common.Hash, [][]byte, [][]byte) { testTrie, err := trie.New(trie.TrieID(root), trieDB) - if err != nil { - t.Fatalf("error creating trie: %v", err) - } + require.NoError(t, err) keys := make([][]byte, 0, numKeys) values := make([][]byte, 0, numKeys) @@ -73,33 +70,24 @@ func FillTrie(t *testing.T, r *rand.Rand, start, numKeys int, keySize int, trieD // non-empty trie at [root]. (all key/value pairs must be equal) func AssertTrieConsistency(t testing.TB, root common.Hash, a, b *triedb.Database, onLeaf func(key, val []byte) error) { trieA, err := trie.New(trie.TrieID(root), a) - if err != nil { - t.Fatalf("error creating trieA, root=%s, err=%v", root, err) - } + require.NoError(t, err) trieB, err := trie.New(trie.TrieID(root), b) - if err != nil { - t.Fatalf("error creating trieB, root=%s, err=%v", root, err) - } + require.NoError(t, err) nodeItA, err := trieA.NodeIterator(nil) - if err != nil { - t.Fatalf("error creating node iterator for trieA, root=%s, err=%v", root, err) - } + require.NoError(t, err) nodeItB, err := trieB.NodeIterator(nil) - if err != nil { - t.Fatalf("error creating node iterator for trieB, root=%s, err=%v", root, err) - } + require.NoError(t, err) itA := trie.NewIterator(nodeItA) itB := trie.NewIterator(nodeItB) + count := 0 for itA.Next() && itB.Next() { count++ require.Equal(t, itA.Key, itB.Key) require.Equal(t, itA.Value, itB.Value) if onLeaf != nil { - if err := onLeaf(itA.Key, itA.Value); err != nil { - t.Fatalf("error in onLeaf callback: %v", err) - } + require.NoError(t, onLeaf(itA.Key, itA.Value)) } } require.NoError(t, itA.Err) @@ -115,25 +103,16 @@ func CorruptTrie(t *testing.T, diskdb ethdb.Batcher, tr *trie.Trie, n int) { // Delete some trie nodes batch := diskdb.NewBatch() nodeIt, err := tr.NodeIterator(nil) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) count := 0 for nodeIt.Next(true) { count++ if count%n == 0 && nodeIt.Hash() != (common.Hash{}) { - if err := batch.Delete(nodeIt.Hash().Bytes()); err != nil { - t.Fatal(err) - } + require.NoError(t, batch.Delete(nodeIt.Hash().Bytes())) } } - if err := nodeIt.Error(); err != nil { - t.Fatal(err) - } - - if err := batch.Write(); err != nil { - t.Fatal(err) - } + require.NoError(t, nodeIt.Error()) + require.NoError(t, batch.Write()) } // FillAccounts adds [numAccounts] randomly generated accounts to the secure trie at [root] and commits it to [trieDB]. @@ -151,9 +130,7 @@ func FillAccounts( ) tr, err := trie.NewStateTrie(trie.TrieID(root), trieDB) - if err != nil { - t.Fatalf("error opening trie: %v", err) - } + require.NoError(t, err) for i := 0; i < numAccounts; i++ { acc := types.StateAccount{ @@ -167,9 +144,7 @@ func FillAccounts( } accBytes, err := rlp.EncodeToBytes(&acc) - if err != nil { - t.Fatalf("failed to rlp encode account: %v", err) - } + require.NoError(t, err) key := utilstest.NewKey(t) tr.MustUpdate(key.Address[:], accBytes) @@ -177,14 +152,8 @@ func FillAccounts( } newRoot, nodes, err := tr.Commit(false) - if err != nil { - t.Fatalf("error committing trie: %v", err) - } - if err := trieDB.Update(newRoot, root, 0, trienode.NewWithNodeSet(nodes), nil); err != nil { - t.Fatalf("error updating trieDB: %v", err) - } - if err := trieDB.Commit(newRoot, false); err != nil { - t.Fatalf("error committing trieDB: %v", err) - } + require.NoError(t, err) + require.NoError(t, trieDB.Update(newRoot, root, 0, trienode.NewWithNodeSet(nodes), nil)) + require.NoError(t, trieDB.Commit(newRoot, false)) return newRoot, accounts } diff --git a/sync/statesync/sync_test.go b/sync/statesync/sync_test.go index 355fd69e26..6e8317b1de 100644 --- a/sync/statesync/sync_test.go +++ b/sync/statesync/sync_test.go @@ -100,7 +100,7 @@ func waitFor(t *testing.T, ctx context.Context, resultFunc func(context.Context) pprof.Lookup("goroutine").WriteTo(&stackBuf, 2) t.Log(stackBuf.String()) // fail the test - t.Fatal("unexpected timeout waiting for sync result") + require.Fail(t, "unexpected timeout waiting for sync result") } require.ErrorIs(t, err, expected, "result of sync did not match expected error") @@ -548,9 +548,7 @@ func TestDifferentWaitContext(t *testing.T) { MaxOutstandingCodeHashes: DefaultMaxOutstandingCodeHashes, RequestSize: 1024, }) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // Create two different contexts startCtx := t.Context() // Never cancelled diff --git a/warp/verifier_backend_test.go b/warp/verifier_backend_test.go index d3571be059..97d679cc76 100644 --- a/warp/verifier_backend_test.go +++ b/warp/verifier_backend_test.go @@ -159,15 +159,9 @@ func TestBlockSignatures(t *testing.T) { toMessageBytes := func(id ids.ID) []byte { idPayload, err := payload.NewHash(id) - if err != nil { - panic(err) - } - + require.NoError(t, err) msg, err := avalancheWarp.NewUnsignedMessage(snowCtx.NetworkID, snowCtx.ChainID, idPayload.Bytes()) - if err != nil { - panic(err) - } - + require.NoError(t, err) return msg.Bytes() }