Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
15 commits
Select commit Hold shift + click to select a range
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion consensus/ethash/algorithm.go
Original file line number Diff line number Diff line change
Expand Up @@ -303,7 +303,7 @@ func generateDataset(dest []uint32, epoch uint64, epochLength uint64, cache []ui
if elapsed > 3*time.Second {
logFn = logger.Info
}
logFn("Generated ethash verification cache", "epochLength", epochLength, "elapsed", common.PrettyDuration(elapsed))
logFn("Generated ethash verification dataset", "epochLength", epochLength, "elapsed", common.PrettyDuration(elapsed))
}()

// Figure out whether the bytes need to be swapped for the machine
Expand Down
89 changes: 73 additions & 16 deletions consensus/ethash/ethash.go
Original file line number Diff line number Diff line change
Expand Up @@ -226,31 +226,37 @@ func (lru *lru) get(epoch uint64, epochLength uint64, ecip1099FBlock *uint64) (i
lru.mu.Lock()
defer lru.mu.Unlock()

cacheKey := fmt.Sprintf("%d-%d", epoch, epochLength)
// Get or create the item for the requested epoch.
item, ok := lru.cache.Get(epoch)
item, ok := lru.cache.Get(cacheKey)
if !ok {
if lru.future > 0 && lru.future == epoch {
item = lru.futureItem
} else {
log.Trace("Requiring new ethash "+lru.what, "epoch", epoch)
item = lru.new(epoch, epochLength)
}
lru.cache.Add(epoch, item)
lru.cache.Add(cacheKey, item)
}

// Ensure pre-generation handles ecip-1099 changeover correctly
var nextEpoch = epoch + 1
var nextEpochLength = epochLength
if ecip1099FBlock != nil {
nextEpochBlock := nextEpoch * epochLength
// Note that == demands that the ECIP1099 activation block is situated
// at the beginning of an epoch.
// https://github.com/ethereumclassic/ECIPs/blob/master/_specs/ecip-1099.md#implementation
if nextEpochBlock == *ecip1099FBlock && epochLength == epochLengthDefault {
nextEpoch = nextEpoch / 2
nextEpochLength = epochLengthECIP1099
}
}

// Update the 'future item' if epoch is larger than previously seen.
if epoch < maxEpoch-1 && lru.future < nextEpoch {
// Last conditional clause ('lru.future > nextEpoch') handles the ECIP1099 case where
// the next epoch is expected to be LESSER THAN that of the previous state's future epoch number.
if epoch < maxEpoch-1 && lru.future != nextEpoch {
log.Trace("Requiring new future ethash "+lru.what, "epoch", nextEpoch)
future = lru.new(nextEpoch, nextEpochLength)
lru.future = nextEpoch
Expand Down Expand Up @@ -337,8 +343,13 @@ func (c *cache) generate(dir string, limit int, lock bool, test bool) {
if !isLittleEndian() {
endian = ".be"
}
path := filepath.Join(dir, fmt.Sprintf("cache-R%d-%x%s", algorithmRevision, seed[:8], endian))
logger := log.New("epoch", c.epoch)
// The file path naming scheme was changed to include epoch values in the filename,
// which enables a filepath glob with scan to identify out-of-bounds caches and remove them.
// The legacy path declaration is provided below as a comment for reference.
//
// path := filepath.Join(dir, fmt.Sprintf("cache-R%d-%x%s", algorithmRevision, seed[:8], endian)) // LEGACY
path := filepath.Join(dir, fmt.Sprintf("cache-R%d-%d-%x%s", algorithmRevision, c.epoch, seed[:8], endian)) // CURRENT
logger := log.New("epoch", c.epoch, "epochLength", c.epochLength)

// We're about to mmap the file, ensure that the mapping is cleaned up when the
// cache becomes unused.
Expand Down Expand Up @@ -367,11 +378,34 @@ func (c *cache) generate(dir string, limit int, lock bool, test bool) {
c.cache = make([]uint32, size/4)
generateCache(c.cache, c.epoch, c.epochLength, seed)
}
// Iterate over all previous instances and delete old ones
for ep := int(c.epoch) - limit; ep >= 0; ep-- {
seed := seedHash(uint64(ep), c.epochLength)
path := filepath.Join(dir, fmt.Sprintf("cache-R%d-%x%s", algorithmRevision, seed[:8], endian))
os.Remove(path)

// Iterate over all cache file instances, deleting any out of bounds (where epoch is below lower limit, or above upper limit).
matches, _ := filepath.Glob(filepath.Join(dir, fmt.Sprintf("cache-R%d*", algorithmRevision)))
for _, file := range matches {
var ar int // algorithm revision
var e uint64 // epoch
var s string // seed
if _, err := fmt.Sscanf(filepath.Base(file), "cache-R%d-%d-%s"+endian, &ar, &e, &s); err != nil {
// There is an unrecognized file in this directory.
// See if the name matches the expected pattern of the legacy naming scheme.
if _, err := fmt.Sscanf(filepath.Base(file), "cache-R%d-%s"+endian, &ar, &s); err == nil {
// This file matches the previous generation naming pattern (sans epoch).
if err := os.Remove(file); err != nil {
logger.Error("Failed to remove legacy ethash cache file", "file", file, "err", err)
} else {
logger.Warn("Deleted legacy ethash cache file", "path", file)
}
}
// Else the file is unrecognized (unknown name format), leave it alone.
continue
}
if e <= c.epoch-uint64(limit) || e > c.epoch+1 {
if err := os.Remove(file); err == nil {
logger.Debug("Deleted ethash cache file", "target.epoch", e, "file", file)
} else {
logger.Error("Failed to delete ethash cache file", "target.epoch", e, "file", file, "err", err)
}
}
}
})
}
Expand Down Expand Up @@ -429,7 +463,7 @@ func (d *dataset) generate(dir string, limit int, lock bool, test bool) {
if !isLittleEndian() {
endian = ".be"
}
path := filepath.Join(dir, fmt.Sprintf("full-R%d-%x%s", algorithmRevision, seed[:8], endian))
path := filepath.Join(dir, fmt.Sprintf("full-R%d-%d-%x%s", algorithmRevision, d.epoch, seed[:8], endian))
logger := log.New("epoch", d.epoch)

// We're about to mmap the file, ensure that the mapping is cleaned up when the
Expand Down Expand Up @@ -465,11 +499,34 @@ func (d *dataset) generate(dir string, limit int, lock bool, test bool) {
d.dataset = make([]uint32, dsize/4)
generateDataset(d.dataset, d.epoch, d.epochLength, cache)
}
// Iterate over all previous instances and delete old ones
for ep := int(d.epoch) - limit; ep >= 0; ep-- {
seed := seedHash(uint64(ep), d.epochLength)
path := filepath.Join(dir, fmt.Sprintf("full-R%d-%x%s", algorithmRevision, seed[:8], endian))
os.Remove(path)

// Iterate over all full file instances, deleting any out of bounds (where epoch is below lower limit, or above upper limit).
matches, _ := filepath.Glob(filepath.Join(dir, fmt.Sprintf("full-R%d*", algorithmRevision)))
for _, file := range matches {
var ar int // algorithm revision
var e uint64 // epoch
var s string // seed
if _, err := fmt.Sscanf(filepath.Base(file), "full-R%d-%d-%s"+endian, &ar, &e, &s); err != nil {
// There is an unrecognized file in this directory.
// See if the name matches the expected pattern of the legacy naming scheme.
if _, err := fmt.Sscanf(filepath.Base(file), "full-R%d-%s"+endian, &ar, &s); err == nil {
// This file matches the previous generation naming pattern (sans epoch).
if err := os.Remove(file); err != nil {
logger.Error("Failed to remove legacy ethash full file", "file", file, "err", err)
} else {
logger.Warn("Deleted legacy ethash full file", "path", file)
}
}
// Else the file is unrecognized (unknown name format), leave it alone.
continue
}
if e <= d.epoch-uint64(limit) || e > d.epoch+1 {
if err := os.Remove(file); err == nil {
logger.Debug("Deleted ethash full file", "target.epoch", e, "file", file)
} else {
logger.Error("Failed to delete ethash full file", "target.epoch", e, "file", file, "err", err)
}
}
}
})
}
Expand Down
224 changes: 224 additions & 0 deletions consensus/ethash/ethash_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,19 +17,243 @@
package ethash

import (
"fmt"
"math"
"math/big"
"math/rand"
"os"
"path/filepath"
"sync"
"testing"
"time"

"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
)

func verboseLogging() {
glogger := log.NewGlogHandler(log.StreamHandler(os.Stdout, log.TerminalFormat(false)))
glogger.Verbosity(log.Lvl(99))
log.Root().SetHandler(glogger)
}

func TestEthashECIP1099UniqueSeedHashes(t *testing.T) {
// Use some "big" arbitrary multiple to make sure that simulate real life adequately.
testIterationMultiple := 6
ecip1099Block := uint64(epochLengthDefault * 3 * testIterationMultiple)

// Define a table to hold our seed hashes.
// We'll reference these to see if there are any dupes.
type seedHashT struct {
epoch uint64
epochLength uint64
}
seedHashes := make(map[string]seedHashT)

trialMax := ecip1099Block * uint64(testIterationMultiple) * 42
latestIteratedEpoch := uint64(math.MaxInt64)
for n := uint64(0); n < trialMax; n += epochLengthDefault / 2 {
// Calculate the epoch number independently to use for logging and debugging.
epochLength := epochLengthDefault
if n >= ecip1099Block {
epochLength = epochLengthECIP1099
}
ep := calcEpoch(n, uint64(epochLength))
epl := calcEpochLength(n, &ecip1099Block)

if ep != latestIteratedEpoch {
latestIteratedEpoch = ep

seed := seedHash(ep, epl)
seedHex := hexutil.Encode(seed[:])[2:]
if v, ok := seedHashes[seedHex]; ok {
t.Logf("block=%d epoch=%d epoch.len=%d ECIP1099=/%d (%0.1f%%) RANGE=/%d (%0.1f%%)",
n,
ep, epl,
ecip1099Block, float64(n)/float64(ecip1099Block)*100,
trialMax, float64(n)/float64(trialMax)*100,
)
t.Errorf("duplicate seed hash: %s a.epoch=%d a.epochLength=%d b.epoch=%d b.epochLength=%d",
seedHex, v.epoch, v.epochLength, ep, epl)
} else {
seedHashes[seedHex] = seedHashT{
epoch: ep,
epochLength: epl,
}
}
}
}
}

func TestEthashCaches(t *testing.T) {
verboseLogging()

// Make a copy of the default config.
conf := Config{
CacheDir: filepath.Join(os.TempDir(), "ethash-cache-test-cachedir"),
CachesInMem: 2,
CachesOnDisk: 3,
CachesLockMmap: false,
DatasetsInMem: 1,
DatasetsOnDisk: 2,
DatasetsLockMmap: false,
DatasetDir: filepath.Join(os.TempDir(), "ethash-cache-test-datadir"),
PowMode: ModeNormal,
}

// Clean up ahead of ourselves.
os.RemoveAll(conf.CacheDir)
os.RemoveAll(conf.DatasetDir)

// And after ourselves.
defer os.RemoveAll(conf.CacheDir)
defer os.RemoveAll(conf.DatasetDir)

// Use some "big" arbitrary multiple to make sure that simulate real life adequately.
testIterationMultiple := 6
ecip1099Block := uint64(epochLengthDefault * conf.CachesInMem * testIterationMultiple)
conf.ECIP1099Block = &ecip1099Block

// Construct our Ethash
e := New(conf, nil, false)

trialMax := ecip1099Block * uint64(testIterationMultiple) * 2
latestIteratedEpoch := uint64(math.MaxInt64)
for n := uint64(0); n < trialMax; n += epochLengthDefault / 300 {
// Calculate the epoch number independently to use for logging and debugging.
epochLength := epochLengthDefault
if n >= ecip1099Block {
epochLength = epochLengthECIP1099
}
ep := calcEpoch(n, uint64(epochLength))
epl := calcEpochLength(n, conf.ECIP1099Block)

if ep != latestIteratedEpoch {
t.Logf("block=%d epoch=%d epoch.len=%d ECIP1099=/%d (%0.1f%%) RANGE=/%d (%0.1f%%)",
n,
ep, epl,
ecip1099Block, float64(n)/float64(ecip1099Block)*100,
trialMax, float64(n)/float64(trialMax)*100,
)
latestIteratedEpoch = ep
}

// This is the tested function.
c := e.cache(n)

// Do we get the right epoch length?
if c.epochLength != epl {
// Give the future epoch routine a chance to finish.
time.Sleep(1 * time.Second)

// current status
t.Logf("block=%d epoch=%d epoch.len=%d ECIP1099=/%d (%0.1f%%) RANGE=/%d (%0.1f%%)",
n,
ep, epl,
ecip1099Block, float64(n)/float64(ecip1099Block)*100,
trialMax, float64(n)/float64(trialMax)*100,
)

// ls -l /tmp/ethash-cache-test-cachedir
entries, _ := os.ReadDir(conf.CacheDir)
t.Log("cachedir", conf.CacheDir)
for _, entry := range entries {
t.Logf(` - %s\n`, entry.Name())
}

t.Fatalf("Unexpected epoch length: %d", c.epochLength)
}

entries, _ := os.ReadDir(conf.CacheDir)
// We add +1 to CachesOnDisk because the future epoch cache is also created and can still
// be in-progress generating as a goroutine.
if len(entries) > conf.CachesOnDisk+1 {
for _, entry := range entries {
t.Logf(` - %s`, entry.Name())
}
t.Fatalf("Too many cache files: %d", len(entries))
}
}
}

func TestEthashCacheFileEviction(t *testing.T) {
verboseLogging()

// Make a copy of the default config.
conf := Config{
CacheDir: filepath.Join(os.TempDir(), "ethash-cache-test-cachedir"),
CachesInMem: 2,
CachesOnDisk: 3,
CachesLockMmap: false,
DatasetsInMem: 1,
DatasetsOnDisk: 2,
DatasetsLockMmap: false,
DatasetDir: filepath.Join(os.TempDir(), "ethash-cache-test-datadir"),
PowMode: ModeNormal,
}

// Clean up ahead of ourselves.
os.RemoveAll(conf.CacheDir)
os.RemoveAll(conf.DatasetDir)

// And after ourselves.
defer os.RemoveAll(conf.CacheDir)
defer os.RemoveAll(conf.DatasetDir)

// Use some "big" arbitrary multiple to make sure that simulate real life adequately.
testIterationMultiple := 6
ecip1099Block := uint64(epochLengthDefault * conf.CachesInMem * testIterationMultiple)
conf.ECIP1099Block = &ecip1099Block

// Construct our Ethash
e := New(conf, nil, false)

bn := uint64(12_345_678)

el := calcEpochLength(bn, conf.ECIP1099Block)
ep := calcEpoch(bn, el)
seed := seedHash(ep, el)

os.MkdirAll(conf.CacheDir, 0700)

// Create a legacy cache file.
// This should get removed.
legacyCacheFileBasePath := fmt.Sprintf("cache-R%d-%x", algorithmRevision, seed[:8])
legacyCacheFilePath := filepath.Join(conf.CacheDir, legacyCacheFileBasePath)
if err := os.WriteFile(legacyCacheFilePath, []byte{}, 0644); err != nil {
t.Fatal(err)
}
// Create an unknown file in the cache dir.
// This should not get removed.
unknownCacheFilePath := filepath.Join(conf.CacheDir, "unexpected-file")
if err := os.WriteFile(unknownCacheFilePath, []byte{}, 0644); err != nil {
t.Fatal(err)
}

// Print entries before ethash.cache method called.
entries, _ := os.ReadDir(conf.CacheDir)
for _, entry := range entries {
t.Logf(` - %s`, entry.Name())
}

// Call the cache method, which will clean up the cache dir after generating the cache.
e.cache(bn)

entries, _ = os.ReadDir(conf.CacheDir)
for _, entry := range entries {
t.Logf(` - %s`, entry.Name())
}

if _, err := os.Stat(legacyCacheFilePath); !os.IsNotExist(err) {
t.Fatalf("legacy cache file %s not removed", legacyCacheFilePath)
}
if _, err := os.Stat(unknownCacheFilePath); err != nil {
t.Fatalf("unknown cache file %s removed", unknownCacheFilePath)
}
}

// Tests caches get sets correct future
func TestCachesGet(t *testing.T) {
ethashA := NewTester(nil, false)
Expand Down