Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

implement forkid changes for time base forks #180

Merged
merged 8 commits into from
Mar 11, 2025
Merged
Show file tree
Hide file tree
Changes from 5 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion cmd/devp2p/internal/ethtest/chain.go
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ func (c *Chain) RootAt(height int) common.Hash {

// ForkID gets the fork id of the chain.
func (c *Chain) ForkID() forkid.ID {
return forkid.NewID(c.chainConfig, c.blocks[0].Hash(), uint64(c.Len()))
return forkid.NewID(c.chainConfig, c.blocks[0].Hash(), uint64(c.Len()), c.blocks[0].Time())
}

// Shorten returns a copy chain of a desired height from the imported
Expand Down
123 changes: 82 additions & 41 deletions core/forkid/forkid.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ import (
"math"
"math/big"
"reflect"
"slices"
"strings"

"github.com/morph-l2/go-ethereum/common"
Expand All @@ -44,6 +45,12 @@ var (
ErrLocalIncompatibleOrStale = errors.New("local incompatible or needs update")
)

// timestampThreshold is the Ethereum mainnet genesis timestamp. It is used to
// differentiate if a forkid.next field is a block number or a timestamp. Whilst
// very hacky, something's needed to split the validation during the transition
// period (block forks -> time forks).
const timestampThreshold = 1729490400

// Blockchain defines all necessary method to build a forkID.
type Blockchain interface {
// Config retrieves the chain's fork configuration.
Expand All @@ -65,31 +72,41 @@ type ID struct {
// Filter is a fork id filter to validate a remotely advertised ID.
type Filter func(id ID) error

// NewID calculates the Ethereum fork ID from the chain config, genesis hash, and head.
func NewID(config *params.ChainConfig, genesis common.Hash, head uint64) ID {
// NewID calculates the Ethereum fork ID from the chain config, genesis hash, head and time.
func NewID(config *params.ChainConfig, genesis common.Hash, head, time uint64) ID {
// Calculate the starting checksum from the genesis hash
hash := crc32.ChecksumIEEE(genesis[:])

// Calculate the current fork checksum and the next fork block
var next uint64
for _, fork := range gatherForks(config) {
forksByBlock, forksByTime := gatherForks(config)
for _, fork := range forksByBlock {
if fork <= head {
// Fork already passed, checksum the previous hash and the fork number
hash = checksumUpdate(hash, fork)
continue
}
next = fork
break
return ID{Hash: checksumToBytes(hash), Next: fork}
}
for _, fork := range forksByTime {
if fork <= time {
// Fork already passed, checksum the previous hash and fork timestamp
hash = checksumUpdate(hash, fork)
continue
}
return ID{Hash: checksumToBytes(hash), Next: fork}
}
return ID{Hash: checksumToBytes(hash), Next: next}
return ID{Hash: checksumToBytes(hash), Next: 0}
}

// NewIDWithChain calculates the Ethereum fork ID from an existing chain instance.
func NewIDWithChain(chain Blockchain) ID {
head := chain.CurrentHeader()

return NewID(
chain.Config(),
chain.Genesis().Hash(),
chain.CurrentHeader().Number.Uint64(),
head.Number.Uint64(),
head.Time,
)
}

Expand All @@ -99,26 +116,28 @@ func NewFilter(chain Blockchain) Filter {
return newFilter(
chain.Config(),
chain.Genesis().Hash(),
func() uint64 {
return chain.CurrentHeader().Number.Uint64()
func() (uint64, uint64) {
head := chain.CurrentHeader()
return head.Number.Uint64(), head.Time
},
)
}

// NewStaticFilter creates a filter at block zero.
func NewStaticFilter(config *params.ChainConfig, genesis common.Hash) Filter {
head := func() uint64 { return 0 }
head := func() (uint64, uint64) { return 0, 0 }
return newFilter(config, genesis, head)
}

// newFilter is the internal version of NewFilter, taking closures as its arguments
// instead of a chain. The reason is to allow testing it without having to simulate
// an entire blockchain.
func newFilter(config *params.ChainConfig, genesis common.Hash, headfn func() uint64) Filter {
func newFilter(config *params.ChainConfig, genesis common.Hash, headfn func() (uint64, uint64)) Filter {
// Calculate the all the valid fork hash and fork next combos
var (
forks = gatherForks(config)
sums = make([][4]byte, len(forks)+1) // 0th is the genesis
forksByBlock, forksByTime = gatherForks(config)
forks = append(append([]uint64{}, forksByBlock...), forksByTime...)
sums = make([][4]byte, len(forks)+1) // 0th is the genesis
)
hash := crc32.ChecksumIEEE(genesis[:])
sums[0] = checksumToBytes(hash)
Expand All @@ -129,7 +148,10 @@ func newFilter(config *params.ChainConfig, genesis common.Hash, headfn func() ui
// Add two sentries to simplify the fork checks and don't require special
// casing the last one.
forks = append(forks, math.MaxUint64) // Last fork will never be passed

if len(forksByTime) == 0 {
// In purely block based forks, avoid the sentry spilling into timestapt territory
forksByBlock = append(forksByBlock, math.MaxUint64) // Last fork will never be passed
}
// Create a validator that will filter out incompatible chains
return func(id ID) error {
// Run the fork checksum validation ruleset:
Expand All @@ -151,8 +173,13 @@ func newFilter(config *params.ChainConfig, genesis common.Hash, headfn func() ui
// the remote, but at this current point in time we don't have enough
// information.
// 4. Reject in all other cases.
head := headfn()
block, time := headfn()
for i, fork := range forks {
// Pick the head comparison based on fork progression
head := block
if i >= len(forksByBlock) {
head = time
}
// If our head is beyond this fork, continue to the next (we have a dummy
// fork of maxuint64 as the last item to always fail this check eventually).
if head >= fork {
Expand All @@ -163,7 +190,7 @@ func newFilter(config *params.ChainConfig, genesis common.Hash, headfn func() ui
if sums[i] == id.Hash {
// Fork checksum matched, check if a remote future fork block already passed
// locally without the local node being aware of it (rule #1a).
if id.Next > 0 && head >= id.Next {
if id.Next > 0 && (head >= id.Next || (id.Next > timestampThreshold && time >= id.Next)) {
return ErrLocalIncompatibleOrStale
}
// Haven't passed locally a remote-only fork, accept the connection (rule #1b).
Expand Down Expand Up @@ -211,46 +238,60 @@ func checksumToBytes(hash uint32) [4]byte {
return blob
}

// gatherForks gathers all the known forks and creates a sorted list out of them.
func gatherForks(config *params.ChainConfig) []uint64 {
// gatherForks gathers all the known forks and creates two sorted lists out of
// them, one for the block number based forks and the second for the timestamps.
func gatherForks(config *params.ChainConfig) ([]uint64, []uint64) {
// Gather all the fork block numbers via reflection
kind := reflect.TypeOf(params.ChainConfig{})
conf := reflect.ValueOf(config).Elem()

var forks []uint64
x := uint64(0)
var (
forksByBlock []uint64
forksByTime []uint64
)
for i := 0; i < kind.NumField(); i++ {
// Fetch the next field and skip non-fork rules
field := kind.Field(i)
if !strings.HasSuffix(field.Name, "Block") {

time := strings.HasSuffix(field.Name, "Time")
if !time && !strings.HasSuffix(field.Name, "Block") {
continue
}
if field.Type != reflect.TypeOf(new(big.Int)) {
continue

Comment on lines +256 to +260
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🛠️ Refactor suggestion

Improve fork type detection logic

The current approach to detect timestamp-based forks using string suffix matching is brittle and could lead to incorrect classification if field names follow unexpected patterns.

Consider implementing a more robust method for determining fork types. Instead of relying on naming conventions, you could:

  1. Use explicit type annotations in the ChainConfig struct
  2. Define dedicated lists of which fields are timestamp-based vs block-based
  3. Implement a more sophisticated reflection-based approach that checks field documentation or tags

This would make the code more maintainable and less prone to errors if new fork types are added in the future.

// Extract the fork rule block number or timestamp and aggregate it
if field.Type == reflect.TypeOf(&x) {
if rule := conf.Field(i).Interface().(*uint64); rule != nil {
forksByTime = append(forksByTime, *rule)
}
}
// Extract the fork rule block number and aggregate it
rule := conf.Field(i).Interface().(*big.Int)
if rule != nil {
forks = append(forks, rule.Uint64())
if field.Type == reflect.TypeOf(new(big.Int)) {
if rule := conf.Field(i).Interface().(*big.Int); rule != nil {
forksByBlock = append(forksByBlock, rule.Uint64())
}
}
}
// Sort the fork block numbers to permit chronological XOR
for i := 0; i < len(forks); i++ {
for j := i + 1; j < len(forks); j++ {
if forks[i] > forks[j] {
forks[i], forks[j] = forks[j], forks[i]
}
slices.Sort(forksByBlock)
slices.Sort(forksByTime)

// Deduplicate fork identifiers applying multiple forks
for i := 1; i < len(forksByBlock); i++ {
if forksByBlock[i] == forksByBlock[i-1] {
forksByBlock = append(forksByBlock[:i], forksByBlock[i+1:]...)
i--
}
}
// Deduplicate block numbers applying multiple forks
for i := 1; i < len(forks); i++ {
if forks[i] == forks[i-1] {
forks = append(forks[:i], forks[i+1:]...)
for i := 1; i < len(forksByTime); i++ {
if forksByTime[i] == forksByTime[i-1] {
forksByTime = append(forksByTime[:i], forksByTime[i+1:]...)
i--
}
}
// Skip any forks in block 0, that's the genesis ruleset
if len(forks) > 0 && forks[0] == 0 {
forks = forks[1:]
if len(forksByBlock) > 0 && forksByBlock[0] == 0 {
forksByBlock = forksByBlock[1:]
}
if len(forksByTime) > 0 && forksByTime[0] == 0 {
forksByTime = forksByTime[1:]
}
return forks
return forksByBlock, forksByTime
}
57 changes: 55 additions & 2 deletions core/forkid/forkid_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -136,7 +136,7 @@ func TestCreation(t *testing.T) {
}
for i, tt := range tests {
for j, ttt := range tt.cases {
if have := NewID(tt.config, tt.genesis, ttt.head); have != ttt.want {
if have := NewID(tt.config, tt.genesis, ttt.head, 0); have != ttt.want {
t.Errorf("test %d, case %d: fork ID mismatch: have %x, want %x", i, j, have, ttt.want)
}
}
Expand Down Expand Up @@ -218,7 +218,7 @@ func TestValidation(t *testing.T) {
{7279999, ID{Hash: checksumToBytes(0xa00bc324), Next: 7279999}, ErrLocalIncompatibleOrStale},
}
for i, tt := range tests {
filter := newFilter(params.MainnetChainConfig, params.MainnetGenesisHash, func() uint64 { return tt.head })
filter := newFilter(params.MainnetChainConfig, params.MainnetGenesisHash, func() (uint64, uint64) { return tt.head, 0 })
if err := filter(tt.id); err != tt.err {
t.Errorf("test %d: validation error mismatch: have %v, want %v", i, err, tt.err)
}
Expand Down Expand Up @@ -247,3 +247,56 @@ func TestEncoding(t *testing.T) {
}
}
}

func TestTimeBasedForkInGenesis(t *testing.T) {
// Config that has not timestamp enabled
legacyConfig := *params.MorphMainnetChainConfig
legacyConfig.Morph203Time = nil

morphMainnetConfig := *params.MorphMainnetChainConfig
morph203Time := uint64(1741579200)
morphMainnetConfig.Morph203Time = params.NewUint64(morph203Time) // 2025-03-10 04:00:00 UTC

tests := []struct {
config *params.ChainConfig
head uint64
time uint64
id ID
err error
}{
//------------------
// Block based tests
//------------------

// Local is mainnet, remote announces the same. No future fork is announced.
{&legacyConfig, 0, 0, ID{Hash: checksumToBytes(0xb0709522), Next: 0}, nil},

{&legacyConfig, 0, morph203Time + 1, ID{Hash: checksumToBytes(0xb0709522), Next: morph203Time}, ErrLocalIncompatibleOrStale},

//------------------
// Timestamp based tests
//------------------

// unpassed fork
{&morphMainnetConfig, 6656942, morph203Time - 1, ID{Hash: checksumToBytes(0xb0709522), Next: 0}, nil},

// passed fork
{&morphMainnetConfig, 6656942, morph203Time + 1, ID{Hash: checksumToBytes(0xb0709522), Next: 0}, ErrRemoteStale},

// unpassed fork
{&morphMainnetConfig, 6656942, morph203Time - 1, ID{Hash: checksumToBytes(0xb0709522), Next: morph203Time}, nil},

// passed fork
{&morphMainnetConfig, 6656942, morph203Time + 1, ID{Hash: checksumToBytes(0xb0709522), Next: morph203Time}, nil},

// subset fork
{&morphMainnetConfig, 6656942, morph203Time + 1, ID{Hash: checksumToBytes(0xb0709522), Next: morph203Time - 1}, ErrRemoteStale},
}

for i, tt := range tests {
filter := newFilter(tt.config, params.MorphMainnetGenesisHash, func() (uint64, uint64) { return tt.head, tt.time })
if err := filter(tt.id); err != tt.err {
t.Errorf("test %d: validation error mismatch: have %v, want %v", i, err, tt.err)
}
}
}
31 changes: 23 additions & 8 deletions core/vm/contracts.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,8 @@ import (
)

var (
errPrecompileDisabled = errors.New("sha256, ripemd160, blake2f precompiles temporarily disabled")
errModexpUnsupportedInput = errors.New("modexp temporarily only accepts inputs of 32 bytes (256 bits) or less")
errPrecompileDisabled = errors.New("sha256, ripemd160, blake2f precompiles temporarily disabled")
)

// PrecompiledContract is the basic interface for native Go contracts. The implementation
Expand Down Expand Up @@ -131,10 +132,10 @@ var PrecompiledContractsMorph203 = map[common.Address]PrecompiledContract{
common.BytesToAddress([]byte{2}): &sha256hash{},
common.BytesToAddress([]byte{3}): &ripemd160hash{},
common.BytesToAddress([]byte{4}): &dataCopy{},
common.BytesToAddress([]byte{5}): &bigModExp{eip2565: true},
common.BytesToAddress([]byte{5}): &bigModExp{eip2565: true, morph203: true},
common.BytesToAddress([]byte{6}): &bn256AddIstanbul{},
common.BytesToAddress([]byte{7}): &bn256ScalarMulIstanbul{},
common.BytesToAddress([]byte{8}): &bn256PairingIstanbul{},
common.BytesToAddress([]byte{8}): &bn256PairingIstanbul{morph203: true},
common.BytesToAddress([]byte{9}): &blake2F{},
}

Expand Down Expand Up @@ -324,7 +325,8 @@ func (c *dataCopy) Run(in []byte) ([]byte, error) {

// bigModExp implements a native big integer exponential modular operation.
type bigModExp struct {
eip2565 bool
eip2565 bool
morph203 bool
}

var (
Expand Down Expand Up @@ -456,6 +458,14 @@ func (c *bigModExp) Run(input []byte) ([]byte, error) {
expLen = expLenBigInt.Uint64()
modLen = modLenBigInt.Uint64()
)
// Check that all inputs are `u256` (32 - bytes) or less, revert otherwise
if !c.morph203 {
var lenLimit = new(big.Int).SetInt64(32)
if baseLenBigInt.Cmp(lenLimit) > 0 || expLenBigInt.Cmp(lenLimit) > 0 || modLenBigInt.Cmp(lenLimit) > 0 {
return nil, errModexpUnsupportedInput
}
}

if len(input) > 96 {
input = input[96:]
} else {
Expand Down Expand Up @@ -591,7 +601,12 @@ var (

// runBn256Pairing implements the Bn256Pairing precompile, referenced by both
// Byzantium and Istanbul operations.
func runBn256Pairing(input []byte) ([]byte, error) {
func runBn256Pairing(input []byte, morph203 bool) ([]byte, error) {
// Allow at most 4 inputs
if !morph203 && len(input) > 4*192 {
return nil, errBadPairingInput
}

// Handle some corner cases cheaply
if len(input)%192 > 0 {
return nil, errBadPairingInput
Expand Down Expand Up @@ -622,15 +637,15 @@ func runBn256Pairing(input []byte) ([]byte, error) {

// bn256PairingIstanbul implements a pairing pre-compile for the bn256 curve
// conforming to Istanbul consensus rules.
type bn256PairingIstanbul struct{}
type bn256PairingIstanbul struct{ morph203 bool }

// RequiredGas returns the gas required to execute the pre-compiled contract.
func (c *bn256PairingIstanbul) RequiredGas(input []byte) uint64 {
return params.Bn256PairingBaseGasIstanbul + uint64(len(input)/192)*params.Bn256PairingPerPointGasIstanbul
}

func (c *bn256PairingIstanbul) Run(input []byte) ([]byte, error) {
return runBn256Pairing(input)
return runBn256Pairing(input, c.morph203)
}

// bn256PairingByzantium implements a pairing pre-compile for the bn256 curve
Expand All @@ -643,7 +658,7 @@ func (c *bn256PairingByzantium) RequiredGas(input []byte) uint64 {
}

func (c *bn256PairingByzantium) Run(input []byte) ([]byte, error) {
return runBn256Pairing(input)
return runBn256Pairing(input, false)
}

type blake2F struct{}
Expand Down
Loading