Skip to content

Commit

Permalink
Address review
Browse files Browse the repository at this point in the history
  • Loading branch information
magik6k committed Aug 7, 2024
1 parent 325cab5 commit d1688ad
Show file tree
Hide file tree
Showing 13 changed files with 155 additions and 260 deletions.
162 changes: 36 additions & 126 deletions cmd/curio/calc.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,11 @@ var calcCmd = &cli.Command{

var calcBatchCpuCmd = &cli.Command{
Name: "batch-cpu",
Usage: "See layout of batch sealer threads",
Usage: "Analyze and display the layout of batch sealer threads",
Description: `Analyze and display the layout of batch sealer threads on your CPU.
It provides detailed information about CPU utilization for batch sealing operations, including core allocation, thread
distribution for different batch sizes.`,
Flags: []cli.Flag{
&cli.BoolFlag{Name: "dual-hashers", Value: true},
},
Expand Down Expand Up @@ -61,157 +65,59 @@ var calcBatchCpuCmd = &cli.Command{
printForBatchSize := func(batchSize int) {
fmt.Printf("Batch Size: %s sectors\n", color.CyanString("%d", batchSize))
fmt.Println()
fmt.Printf("Required Threads: %d\n", batchSize/sectorsPerThread)
requiredCCX := (batchSize + sectorsPerCCX - 1) / sectorsPerCCX
fmt.Printf("Required CCX: %d\n", requiredCCX)

requiredCores := requiredCCX + batchSize/sectorsPerThread/info.ThreadsPerCore
fmt.Printf("Required Cores: %d hasher (+4 minimum for non-hashers)\n", requiredCores)
config, err := sealsupra.GenerateSupraSealConfig(*info, cctx.Bool("dual-hashers"), batchSize, nil)
if err != nil {
fmt.Printf("Error generating config: %s\n", err)
return
}

enoughCores := requiredCores <= info.CoreCount
fmt.Printf("Required Threads: %d\n", config.RequiredThreads)
fmt.Printf("Required CCX: %d\n", config.RequiredCCX)
fmt.Printf("Required Cores: %d hasher (+4 minimum for non-hashers)\n", config.RequiredCores)

enoughCores := config.RequiredCores <= info.CoreCount
if enoughCores {
fmt.Printf("Enough cores available for hashers %s\n", color.GreenString("✔"))
} else {
fmt.Printf("Not enough cores available for hashers %s\n", color.RedString("✘"))
return
}

coresLeftover := info.CoreCount - requiredCores
fmt.Printf("Non-hasher cores: %d\n", coresLeftover)

const minOverheadCores = 4

type CoreNum = int // core number, 0-based

var (
// core assignments for non-hasher work
// defaults are the absolutely worst case of just 4 cores available

pc1writer CoreNum = 1
pc1reader CoreNum = 2
pc1orchestrator CoreNum = 3

pc2reader CoreNum = 0
pc2hasher CoreNum = 1
pc2hasher_cpu CoreNum = 0
pc2writer CoreNum = 0

c1reader CoreNum = 0

pc2writer_cores int = 1
)

if coresLeftover < minOverheadCores {
fmt.Printf("Not enough cores for coordination %s\n", color.RedString("✘"))
return
} else {
fmt.Printf("Enough cores for coordination %s\n", color.GreenString("✔"))
}

nextFreeCore := minOverheadCores
fmt.Printf("Non-hasher cores: %d\n", info.CoreCount-config.RequiredCores)

// first move pc2 to individual cores
if coresLeftover > nextFreeCore {
pc2writer = nextFreeCore
nextFreeCore++
} else {
if config.P2WrRdOverlap {
color.Yellow("! P2 writer will share a core with P2 reader, performance may be impacted")
}

if coresLeftover > nextFreeCore {
pc2hasher = nextFreeCore
nextFreeCore++
} else {
if config.P2HsP1WrOverlap {
color.Yellow("! P2 hasher will share a core with P1 writer, performance may be impacted")
}

if coresLeftover > nextFreeCore {
pc2hasher_cpu = nextFreeCore
nextFreeCore++
} else {
if config.P2HcP2RdOverlap {
color.Yellow("! P2 hasher_cpu will share a core with P2 reader, performance may be impacted")
}

if coresLeftover > nextFreeCore {
// might be fine to sit on core0, but let's not do that
pc2reader = nextFreeCore
c1reader = nextFreeCore
nextFreeCore++
}

// add p2 writer cores, up to 8 total
if coresLeftover > nextFreeCore {
// swap pc2reader with pc2writer
pc2writer, pc2reader = pc2reader, pc2writer

for i := 0; i < 7; i++ {
if coresLeftover > nextFreeCore {
pc2writer_cores++
nextFreeCore++
}
}
}

fmt.Println()
fmt.Printf("pc1 writer: %d\n", pc1writer)
fmt.Printf("pc1 reader: %d\n", pc1reader)
fmt.Printf("pc1 orchestrator: %d\n", pc1orchestrator)
fmt.Printf("pc1 writer: %d\n", config.Topology.PC1Writer)
fmt.Printf("pc1 reader: %d\n", config.Topology.PC1Reader)
fmt.Printf("pc1 orchestrator: %d\n", config.Topology.PC1Orchestrator)
fmt.Println()
fmt.Printf("pc2 reader: %d\n", pc2reader)
fmt.Printf("pc2 hasher: %d\n", pc2hasher)
fmt.Printf("pc2 hasher_cpu: %d\n", pc2hasher_cpu)
fmt.Printf("pc2 writer: %d\n", pc2writer)
fmt.Printf("pc2 writer_cores: %d\n", pc2writer_cores)
fmt.Printf("pc2 reader: %d\n", config.Topology.PC2Reader)
fmt.Printf("pc2 hasher: %d\n", config.Topology.PC2Hasher)
fmt.Printf("pc2 hasher_cpu: %d\n", config.Topology.PC2HasherCPU)
fmt.Printf("pc2 writer: %d\n", config.Topology.PC2Writer)
fmt.Printf("pc2 writer_cores: %d\n", config.Topology.PC2WriterCores)
fmt.Println()
fmt.Printf("c1 reader: %d\n", c1reader)
fmt.Printf("c1 reader: %d\n", config.Topology.C1Reader)
fmt.Println()

unoccupiedCores := coresLeftover - nextFreeCore
fmt.Printf("Unoccupied Cores: %d\n\n", unoccupiedCores)

var ccxCores []CoreNum // first core in each CCX
for i := 0; i < info.CoreCount; i += info.CoresPerL3 {
ccxCores = append(ccxCores, i)
}

type sectorCoreConfig struct {
core CoreNum // coordinator core
hashers CoreNum // number of hasher cores
}
var coreConfigs []sectorCoreConfig

for i := requiredCores; i > 0; {
firstCCXCoreNum := ccxCores[len(ccxCores)-1]
toAssign := min(i, info.CoresPerL3)

// shift up the first core if possible so that cores on the right are used first
coreNum := firstCCXCoreNum + info.CoresPerL3 - toAssign

coreConfigs = append(coreConfigs, sectorCoreConfig{
core: coreNum,
hashers: (toAssign - 1) * info.ThreadsPerCore,
})

i -= toAssign
if toAssign == info.CoresPerL3 {
ccxCores = ccxCores[:len(ccxCores)-1]
if len(ccxCores) == 0 {
break
}
}
}

// reverse the order
for i, j := 0, len(coreConfigs)-1; i < j; i, j = i+1, j-1 {
coreConfigs[i], coreConfigs[j] = coreConfigs[j], coreConfigs[i]
}
fmt.Printf("Unoccupied Cores: %d\n\n", config.UnoccupiedCores)

fmt.Println("{")
fmt.Printf(" sectors = %d;\n", batchSize)
fmt.Println(" coordinators = (")
for i, config := range coreConfigs {
fmt.Printf(" { core = %d;\n hashers = %d; }", config.core, config.hashers)
if i < len(coreConfigs)-1 {
for i, coord := range config.Topology.SectorConfigs[0].Coordinators {
fmt.Printf(" { core = %d;\n hashers = %d; }", coord.Core, coord.Hashers)
if i < len(config.Topology.SectorConfigs[0].Coordinators)-1 {
fmt.Println(",")
} else {
fmt.Println()
Expand All @@ -235,6 +141,10 @@ var calcBatchCpuCmd = &cli.Command{
var calcSuprasealConfigCmd = &cli.Command{
Name: "supraseal-config",
Usage: "Generate a supra_seal configuration",
Description: `Generate a supra_seal configuration for a given batch size.
This command outputs a configuration expected by SupraSeal. Main purpose of this command is for debugging and testing.
The config can be used directly with SupraSeal binaries to test it without involving Curio.`,
Flags: []cli.Flag{
&cli.BoolFlag{
Name: "dual-hashers",
Expand Down
Binary file removed commit-phase1-output
Binary file not shown.
4 changes: 3 additions & 1 deletion deps/config/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -478,11 +478,13 @@ type CurioSealConfig struct {
// Set to false for older CPUs (Zen 2 and before).
SingleHasherPerThread bool

// LayerNVMEDevices is a list of pcie device addresses that should be used for layer storage.
// LayerNVMEDevices is a list of pcie device addresses that should be used for SDR layer storage.
// The required storage is 11 * BatchSealBatchSize * BatchSealSectorSize * BatchSealPipelines
// Total Read IOPS for optimal performance should be 10M+.
// The devices MUST be NVMe devices, not used for anything else. Any data on the devices will be lost!
//
// It's recommend to define these settings in a per-machine layer, as the devices are machine-specific.
//
// Example: ["0000:01:00.0", "0000:01:00.1"]
LayerNVMEDevices []string
}
Expand Down
4 changes: 4 additions & 0 deletions lib/proof/porep_vproof_bin_decode.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,10 @@ import (
"io"
)

// This file contains a bincode decoder for Commit1OutRaw.
// This is the format output by the C++ supraseal C1 implementation.
// bincode - https://github.com/bincode-org/bincode

func ReadLE[T any](r io.Reader) (T, error) {
var out T
err := binary.Read(r, binary.LittleEndian, &out)
Expand Down
24 changes: 22 additions & 2 deletions lib/proof/porep_vproof_bin_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,22 +2,42 @@ package proof

import (
"bytes"
"compress/gzip"
"encoding/json"
"io"
"os"
"testing"

"github.com/filecoin-project/filecoin-ffi/cgo"
)

func TestDecode(t *testing.T) {
if os.Getenv("EXPENSIVE_TESTS") == "" {
t.Skip()
}

//binFile := "../../extern/supra_seal/demos/c2-test/resources/test/commit-phase1-output"
binFile := "../../commit-phase1-output"
binFile := "../../commit-phase1-output.gz"

rawData, err := os.ReadFile(binFile)
gzData, err := os.ReadFile(binFile)
if err != nil {
t.Fatal(err)
}

gzReader, err := gzip.NewReader(bytes.NewReader(gzData))
if err != nil {
t.Fatal(err)
}

rawData, err := io.ReadAll(gzReader)
if err != nil {
t.Fatal(err)
}

if err := gzReader.Close(); err != nil {
t.Fatal(err)
}

dec, err := DecodeCommit1OutRaw(bytes.NewReader(rawData))
if err != nil {
t.Fatal(err)
Expand Down
3 changes: 3 additions & 0 deletions lib/proof/porep_vproof_challenges.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,9 @@ import (
"github.com/minio/sha256-simd"
)

// TODO: This file is a placeholder with links to the original implementation in Rust. Eventually we want to
// have our own implementation for generating PoRep vanilla proofs in Go.

// https://github.com/filecoin-project/rust-fil-proofs/blob/8f5bd86be36a55e33b9b293ba22ea13ca1f28163/storage-proofs-porep/src/stacked/vanilla/challenges.rs#L21

func DeriveInteractiveChallenges(
Expand Down
3 changes: 2 additions & 1 deletion lib/proof/porep_vproof_types.go
Original file line number Diff line number Diff line change
@@ -1,9 +1,10 @@
package proof

// This file contains some type definitions from
// This file contains PoRep vanilla proof type definitions from
// - https://github.com/filecoin-project/rust-fil-proofs/tree/master/storage-proofs-core/src/merkle
// - https://github.com/filecoin-project/rust-fil-proofs/tree/master/storage-proofs-porep/src/stacked/vanilla
// - https://github.com/filecoin-project/rust-filecoin-proofs-api/tree/master/src
// The json representation of those matches the representation expected by rust-fil-proofs.

// core

Expand Down
3 changes: 3 additions & 0 deletions lib/proof/porep_vproof_vanilla.go
Original file line number Diff line number Diff line change
@@ -1,5 +1,8 @@
package proof

// TODO: This file is a placeholder with links to the original implementation in Rust. Eventually we want to
// have our own implementation for generating PoRep vanilla proofs in Go.

// https://github.com/filecoin-project/rust-fil-proofs/blob/8f5bd86be36a55e33b9b293ba22ea13ca1f28163/storage-proofs-porep/src/stacked/vanilla/proof_scheme.rs#L60
func ProveAllPartitions() {

Expand Down
Loading

0 comments on commit d1688ad

Please sign in to comment.