@@ -24,6 +24,7 @@ import (
2424 "math"
2525 "math/big"
2626 "reflect"
27+ "slices"
2728 "strings"
2829
2930 "github.com/morph-l2/go-ethereum/common"
4445 ErrLocalIncompatibleOrStale = errors .New ("local incompatible or needs update" )
4546)
4647
48+ // timestampThreshold is the Morph mainnet genesis timestamp. It is used to
49+ // differentiate if a forkid.next field is a block number or a timestamp. Whilst
50+ // very hacky, something's needed to split the validation during the transition
51+ // period (block forks -> time forks).
52+ const timestampThreshold = 1729490400
53+
4754// Blockchain defines all necessary method to build a forkID.
4855type Blockchain interface {
4956 // Config retrieves the chain's fork configuration.
@@ -65,31 +72,41 @@ type ID struct {
6572// Filter is a fork id filter to validate a remotely advertised ID.
6673type Filter func (id ID ) error
6774
68- // NewID calculates the Ethereum fork ID from the chain config, genesis hash, and head .
69- func NewID (config * params.ChainConfig , genesis common.Hash , head uint64 ) ID {
75+ // NewID calculates the Ethereum fork ID from the chain config, genesis hash, head and time .
76+ func NewID (config * params.ChainConfig , genesis common.Hash , head , time uint64 ) ID {
7077 // Calculate the starting checksum from the genesis hash
7178 hash := crc32 .ChecksumIEEE (genesis [:])
7279
7380 // Calculate the current fork checksum and the next fork block
74- var next uint64
75- for _ , fork := range gatherForks ( config ) {
81+ forksByBlock , forksByTime := gatherForks ( config )
82+ for _ , fork := range forksByBlock {
7683 if fork <= head {
7784 // Fork already passed, checksum the previous hash and the fork number
7885 hash = checksumUpdate (hash , fork )
7986 continue
8087 }
81- next = fork
82- break
88+ return ID {Hash : checksumToBytes (hash ), Next : fork }
89+ }
90+ for _ , fork := range forksByTime {
91+ if fork <= time {
92+ // Fork already passed, checksum the previous hash and fork timestamp
93+ hash = checksumUpdate (hash , fork )
94+ continue
95+ }
96+ return ID {Hash : checksumToBytes (hash ), Next : fork }
8397 }
84- return ID {Hash : checksumToBytes (hash ), Next : next }
98+ return ID {Hash : checksumToBytes (hash ), Next : 0 }
8599}
86100
87101// NewIDWithChain calculates the Ethereum fork ID from an existing chain instance.
88102func NewIDWithChain (chain Blockchain ) ID {
103+ head := chain .CurrentHeader ()
104+
89105 return NewID (
90106 chain .Config (),
91107 chain .Genesis ().Hash (),
92- chain .CurrentHeader ().Number .Uint64 (),
108+ head .Number .Uint64 (),
109+ head .Time ,
93110 )
94111}
95112
@@ -99,26 +116,28 @@ func NewFilter(chain Blockchain) Filter {
99116 return newFilter (
100117 chain .Config (),
101118 chain .Genesis ().Hash (),
102- func () uint64 {
103- return chain .CurrentHeader ().Number .Uint64 ()
119+ func () (uint64 , uint64 ) {
120+ head := chain .CurrentHeader ()
121+ return head .Number .Uint64 (), head .Time
104122 },
105123 )
106124}
107125
108126// NewStaticFilter creates a filter at block zero.
109127func NewStaticFilter (config * params.ChainConfig , genesis common.Hash ) Filter {
110- head := func () uint64 { return 0 }
128+ head := func () ( uint64 , uint64 ) { return 0 , 0 }
111129 return newFilter (config , genesis , head )
112130}
113131
114132// newFilter is the internal version of NewFilter, taking closures as its arguments
115133// instead of a chain. The reason is to allow testing it without having to simulate
116134// an entire blockchain.
117- func newFilter (config * params.ChainConfig , genesis common.Hash , headfn func () uint64 ) Filter {
135+ func newFilter (config * params.ChainConfig , genesis common.Hash , headfn func () ( uint64 , uint64 ) ) Filter {
118136 // Calculate the all the valid fork hash and fork next combos
119137 var (
120- forks = gatherForks (config )
121- sums = make ([][4 ]byte , len (forks )+ 1 ) // 0th is the genesis
138+ forksByBlock , forksByTime = gatherForks (config )
139+ forks = append (append ([]uint64 {}, forksByBlock ... ), forksByTime ... )
140+ sums = make ([][4 ]byte , len (forks )+ 1 ) // 0th is the genesis
122141 )
123142 hash := crc32 .ChecksumIEEE (genesis [:])
124143 sums [0 ] = checksumToBytes (hash )
@@ -129,7 +148,10 @@ func newFilter(config *params.ChainConfig, genesis common.Hash, headfn func() ui
129148 // Add two sentries to simplify the fork checks and don't require special
130149 // casing the last one.
131150 forks = append (forks , math .MaxUint64 ) // Last fork will never be passed
132-
151+ if len (forksByTime ) == 0 {
152+ // In purely block based forks, avoid the sentry spilling into timestapt territory
153+ forksByBlock = append (forksByBlock , math .MaxUint64 ) // Last fork will never be passed
154+ }
133155 // Create a validator that will filter out incompatible chains
134156 return func (id ID ) error {
135157 // Run the fork checksum validation ruleset:
@@ -151,8 +173,13 @@ func newFilter(config *params.ChainConfig, genesis common.Hash, headfn func() ui
151173 // the remote, but at this current point in time we don't have enough
152174 // information.
153175 // 4. Reject in all other cases.
154- head := headfn ()
176+ block , time := headfn ()
155177 for i , fork := range forks {
178+ // Pick the head comparison based on fork progression
179+ head := block
180+ if i >= len (forksByBlock ) {
181+ head = time
182+ }
156183 // If our head is beyond this fork, continue to the next (we have a dummy
157184 // fork of maxuint64 as the last item to always fail this check eventually).
158185 if head >= fork {
@@ -163,7 +190,7 @@ func newFilter(config *params.ChainConfig, genesis common.Hash, headfn func() ui
163190 if sums [i ] == id .Hash {
164191 // Fork checksum matched, check if a remote future fork block already passed
165192 // locally without the local node being aware of it (rule #1a).
166- if id .Next > 0 && head >= id .Next {
193+ if id .Next > 0 && ( head >= id .Next || ( id . Next > timestampThreshold && time >= id . Next )) {
167194 return ErrLocalIncompatibleOrStale
168195 }
169196 // Haven't passed locally a remote-only fork, accept the connection (rule #1b).
@@ -211,46 +238,60 @@ func checksumToBytes(hash uint32) [4]byte {
211238 return blob
212239}
213240
214- // gatherForks gathers all the known forks and creates a sorted list out of them.
215- func gatherForks (config * params.ChainConfig ) []uint64 {
241+ // gatherForks gathers all the known forks and creates two sorted lists out of
242+ // them, one for the block number based forks and the second for the timestamps.
243+ func gatherForks (config * params.ChainConfig ) ([]uint64 , []uint64 ) {
216244 // Gather all the fork block numbers via reflection
217245 kind := reflect .TypeOf (params.ChainConfig {})
218246 conf := reflect .ValueOf (config ).Elem ()
219-
220- var forks []uint64
247+ x := uint64 (0 )
248+ var (
249+ forksByBlock []uint64
250+ forksByTime []uint64
251+ )
221252 for i := 0 ; i < kind .NumField (); i ++ {
222253 // Fetch the next field and skip non-fork rules
223254 field := kind .Field (i )
224- if ! strings .HasSuffix (field .Name , "Block" ) {
255+
256+ time := strings .HasSuffix (field .Name , "Time" )
257+ if ! time && ! strings .HasSuffix (field .Name , "Block" ) {
225258 continue
226259 }
227- if field .Type != reflect .TypeOf (new (big.Int )) {
228- continue
260+
261+ // Extract the fork rule block number or timestamp and aggregate it
262+ if field .Type == reflect .TypeOf (& x ) {
263+ if rule := conf .Field (i ).Interface ().(* uint64 ); rule != nil {
264+ forksByTime = append (forksByTime , * rule )
265+ }
229266 }
230- // Extract the fork rule block number and aggregate it
231- rule := conf .Field (i ).Interface ().(* big.Int )
232- if rule != nil {
233- forks = append ( forks , rule . Uint64 ())
267+ if field . Type == reflect . TypeOf ( new (big. Int )) {
268+ if rule := conf .Field (i ).Interface ().(* big.Int ); rule != nil {
269+ forksByBlock = append ( forksByBlock , rule . Uint64 ())
270+ }
234271 }
235272 }
236- // Sort the fork block numbers to permit chronological XOR
237- for i := 0 ; i < len (forks ); i ++ {
238- for j := i + 1 ; j < len (forks ); j ++ {
239- if forks [i ] > forks [j ] {
240- forks [i ], forks [j ] = forks [j ], forks [i ]
241- }
273+ slices .Sort (forksByBlock )
274+ slices .Sort (forksByTime )
275+
276+ // Deduplicate fork identifiers applying multiple forks
277+ for i := 1 ; i < len (forksByBlock ); i ++ {
278+ if forksByBlock [i ] == forksByBlock [i - 1 ] {
279+ forksByBlock = append (forksByBlock [:i ], forksByBlock [i + 1 :]... )
280+ i --
242281 }
243282 }
244- // Deduplicate block numbers applying multiple forks
245- for i := 1 ; i < len (forks ); i ++ {
246- if forks [i ] == forks [i - 1 ] {
247- forks = append (forks [:i ], forks [i + 1 :]... )
283+ for i := 1 ; i < len (forksByTime ); i ++ {
284+ if forksByTime [i ] == forksByTime [i - 1 ] {
285+ forksByTime = append (forksByTime [:i ], forksByTime [i + 1 :]... )
248286 i --
249287 }
250288 }
251289 // Skip any forks in block 0, that's the genesis ruleset
252- if len (forks ) > 0 && forks [0 ] == 0 {
253- forks = forks [1 :]
290+ if len (forksByBlock ) > 0 && forksByBlock [0 ] == 0 {
291+ forksByBlock = forksByBlock [1 :]
292+ }
293+ if len (forksByTime ) > 0 && forksByTime [0 ] == 0 {
294+ forksByTime = forksByTime [1 :]
254295 }
255- return forks
296+ return forksByBlock , forksByTime
256297}
0 commit comments