diff --git a/Makefile b/Makefile index 89242c264..c84112517 100644 --- a/Makefile +++ b/Makefile @@ -24,6 +24,22 @@ BUILD_DEPS+=ffi-version-check .PHONY: ffi-version-check +## BLST (from supraseal, but needed in curio) + +BLST_PATH:=extern/supra_seal/ +BLST_DEPS:=.install-blst +BLST_DEPS:=$(addprefix $(BLST_PATH),$(BLST_DEPS)) + +$(BLST_DEPS): build/.blst-install ; + +build/.blst-install: $(BLST_PATH) + bash scripts/build-blst.sh + @touch $@ + +MODULES+=$(BLST_PATH) +BUILD_DEPS+=build/.blst-install +CLEAN+=build/.blst-install + ## SUPRA-FFI ifeq ($(shell uname),Linux) @@ -37,7 +53,7 @@ build/.supraseal-install: $(SUPRA_FFI_PATH) cd $(SUPRA_FFI_PATH) && ./build.sh @touch $@ -MODULES+=$(SUPRA_FFI_PATH) +# MODULES+=$(SUPRA_FFI_PATH) -- already included in BLST_PATH CLEAN+=build/.supraseal-install endif diff --git a/alertmanager/task_alert.go b/alertmanager/task_alert.go index c99221d65..9042ef2cb 100644 --- a/alertmanager/task_alert.go +++ b/alertmanager/task_alert.go @@ -19,6 +19,7 @@ import ( "github.com/filecoin-project/curio/harmony/harmonydb" "github.com/filecoin-project/curio/harmony/harmonytask" "github.com/filecoin-project/curio/harmony/resources" + "github.com/filecoin-project/curio/harmony/taskhelp" "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/chain/types" @@ -161,7 +162,7 @@ func (a *AlertTask) CanAccept(ids []harmonytask.TaskID, engine *harmonytask.Task func (a *AlertTask) TypeDetails() harmonytask.TaskTypeDetails { return harmonytask.TaskTypeDetails{ - Max: 1, + Max: taskhelp.Max(1), Name: "AlertManager", Cost: resources.Resources{ Cpu: 1, diff --git a/cmd/curio/main.go b/cmd/curio/main.go index 38f8c8604..24ed5d1bb 100644 --- a/cmd/curio/main.go +++ b/cmd/curio/main.go @@ -64,6 +64,7 @@ func main() { webCmd, guidedsetup.GuidedsetupCmd, sealCmd, + unsealCmd, marketCmd, fetchParamCmd, ffiCmd, diff --git a/cmd/curio/pipeline.go b/cmd/curio/seal.go similarity index 100% rename from cmd/curio/pipeline.go rename to cmd/curio/seal.go diff --git a/cmd/curio/tasks/tasks.go b/cmd/curio/tasks/tasks.go index 03c07ba2b..abd5f0c33 100644 --- a/cmd/curio/tasks/tasks.go +++ b/cmd/curio/tasks/tasks.go @@ -23,6 +23,7 @@ import ( "github.com/filecoin-project/curio/deps/config" "github.com/filecoin-project/curio/harmony/harmonydb" "github.com/filecoin-project/curio/harmony/harmonytask" + "github.com/filecoin-project/curio/harmony/taskhelp" "github.com/filecoin-project/curio/lib/chainsched" "github.com/filecoin-project/curio/lib/curiochain" "github.com/filecoin-project/curio/lib/fastparamfetch" @@ -35,9 +36,11 @@ import ( "github.com/filecoin-project/curio/tasks/message" "github.com/filecoin-project/curio/tasks/metadata" piece2 "github.com/filecoin-project/curio/tasks/piece" + "github.com/filecoin-project/curio/tasks/scrub" "github.com/filecoin-project/curio/tasks/seal" "github.com/filecoin-project/curio/tasks/sealsupra" "github.com/filecoin-project/curio/tasks/snap" + "github.com/filecoin-project/curio/tasks/unseal" window2 "github.com/filecoin-project/curio/tasks/window" "github.com/filecoin-project/curio/tasks/winning" @@ -250,6 +253,11 @@ func addSealingTasks( var addFinalize bool // NOTE: Tasks with the LEAST priority are at the top + if cfg.Subsystems.EnableCommP { + scrubUnsealedTask := scrub.NewCommDCheckTask(db, slr) + activeTasks = append(activeTasks, scrubUnsealedTask) + } + if cfg.Subsystems.EnableBatchSeal { slotMgr = slotmgr.NewSlotMgr() @@ -268,8 +276,12 @@ func addSealingTasks( } if cfg.Subsystems.EnableSealSDR { - sdrTask := seal.NewSDRTask(full, db, sp, slr, cfg.Subsystems.SealSDRMaxTasks, cfg.Subsystems.SealSDRMinTasks) - activeTasks = append(activeTasks, sdrTask) + sdrMax := taskhelp.Max(cfg.Subsystems.SealSDRMaxTasks) + + sdrTask := seal.NewSDRTask(full, db, sp, slr, sdrMax, cfg.Subsystems.SealSDRMinTasks) + keyTask := unseal.NewTaskUnsealSDR(slr, db, sdrMax, full) + + activeTasks = append(activeTasks, sdrTask, keyTask) } if cfg.Subsystems.EnableSealSDRTrees { treeDTask := seal.NewTreeDTask(sp, db, slr, cfg.Subsystems.SealSDRTreesMaxTasks) @@ -295,6 +307,11 @@ func addSealingTasks( moveStorageTask := seal.NewMoveStorageTask(sp, slr, db, cfg.Subsystems.MoveStorageMaxTasks) moveStorageSnapTask := snap.NewMoveStorageTask(slr, db, cfg.Subsystems.MoveStorageMaxTasks) activeTasks = append(activeTasks, moveStorageTask, moveStorageSnapTask) + + if !cfg.Subsystems.NoUnsealedDecode { + unsealTask := unseal.NewTaskUnsealDecode(slr, db, cfg.Subsystems.MoveStorageMaxTasks, full) + activeTasks = append(activeTasks, unsealTask) + } } if cfg.Subsystems.EnableSendCommitMsg { commitTask := seal.NewSubmitCommitTask(sp, db, full, sender, as, cfg) diff --git a/cmd/curio/unseal.go b/cmd/curio/unseal.go new file mode 100644 index 000000000..7c962b183 --- /dev/null +++ b/cmd/curio/unseal.go @@ -0,0 +1,572 @@ +package main + +import ( + "database/sql" + "encoding/csv" + "fmt" + "net/url" + "os" + "strconv" + "strings" + "time" + + "github.com/fatih/color" + "github.com/samber/lo" + "github.com/snadrus/must" + "github.com/urfave/cli/v2" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + + "github.com/filecoin-project/curio/deps" + "github.com/filecoin-project/curio/lib/dealdata" + "github.com/filecoin-project/curio/lib/paths" + "github.com/filecoin-project/curio/lib/reqcontext" + "github.com/filecoin-project/curio/lib/storiface" +) + +var unsealCmd = &cli.Command{ + Name: "unseal", + Usage: "Manage unsealed data", + Subcommands: []*cli.Command{ + unsealInfoCmd, + listUnsealPipelineCmd, + setTargetUnsealStateCmd, + unsealCheckCmd, + }, +} + +var unsealInfoCmd = &cli.Command{ + Name: "info", + Usage: "Get information about unsealed data", + ArgsUsage: "[minerAddress] [sectorNumber]", + + Action: func(cctx *cli.Context) error { + if cctx.Args().Len() != 2 { + return cli.ShowCommandHelp(cctx, "info") + } + minerAddress := cctx.Args().Get(0) + sectorNumber := cctx.Args().Get(1) + + maddr, err := address.NewFromString(minerAddress) + if err != nil { + return xerrors.Errorf("failed to parse miner address: %w", err) + } + + minerId, err := address.IDFromAddress(maddr) + if err != nil { + return xerrors.Errorf("failed to get miner id: %w", err) + } + + sectorNumberInt, err := strconv.Atoi(sectorNumber) + if err != nil { + return xerrors.Errorf("failed to parse sector number: %w", err) + } + + ctx := reqcontext.ReqContext(cctx) + dep, err := deps.GetDepsCLI(ctx, cctx) + if err != nil { + return err + } + + type fmeta struct { + FileType int64 `db:"sector_filetype"` + StorageID string `db:"storage_id"` + URLs string `db:"urls"` + CanSeal bool `db:"can_seal"` + CanStore bool `db:"can_store"` + } + + var fileMeta []fmeta + + err = dep.DB.Select(ctx, &fileMeta, ` + SELECT sector_filetype, sl.storage_id, sp.urls, sp.can_seal, sp.can_store FROM sector_location sl + LEFT JOIN storage_path sp ON sl.storage_id = sp.storage_id + WHERE miner_id = $1 AND sector_num = $2; + `, minerId, sectorNumberInt) + if err != nil { + return xerrors.Errorf("failed to query sector location: %w", err) + } + + matchType := func(t storiface.SectorFileType) func(item fmeta) bool { + return func(item fmeta) bool { + return storiface.SectorFileType(item.FileType) == t + } + } + + sealStoreStr := func(canSeal, canStore bool) string { + switch { + case canSeal && canStore: + return color.CyanString("seal/store") + case canSeal: + return color.YellowString("seal") + case canStore: + return color.GreenString("store") + default: + return color.BlueString("none") + } + } + + simpleUrls := func(urls string) (string, error) { + // urls are , separated, we only want the host:port parts + // paths.URLSeparator + + var out []string + for _, urlStr := range paths.UrlsFromString(urls) { + u, err := url.Parse(urlStr) + if err != nil { + return "", err + } + + out = append(out, u.Host) + } + + return strings.Join(out, ","), nil + } + + printMetaFor := func(fileType storiface.SectorFileType) { + paths := lo.Filter(fileMeta, filterPred(matchType(fileType))) + for _, path := range paths { + idSuffix := ".." + path.StorageID[len(path.StorageID)-8:] + + fmt.Printf(" - %s (%s) %s\n", idSuffix, sealStoreStr(path.CanSeal, path.CanStore), must.One(simpleUrls(path.URLs))) + } + } + + fmt.Println("** On Disk:") + + if _, ok := lo.Find(fileMeta, matchType(storiface.FTUnsealed)); ok { + fmt.Printf("Unsealed: %s\n", color.GreenString("✔")) + printMetaFor(storiface.FTUnsealed) + } else { + fmt.Printf("Unsealed: %s\n", color.RedString("✘")) + } + + _, ok := lo.Find(fileMeta, matchType(storiface.FTSealed)) + _, okSnap := lo.Find(fileMeta, matchType(storiface.FTUpdate)) + ok = ok || okSnap + if okSnap { + fmt.Printf("Sealed: %s %s\n", color.GreenString("✔"), color.YellowString("snap")) + printMetaFor(storiface.FTUpdate) + } else if ok { + fmt.Printf("Sealed: %s\n", color.GreenString("✔")) + printMetaFor(storiface.FTSealed) + } else { + fmt.Printf("Sealed: %s\n", color.RedString("✘")) + } + + var meta []struct { + TicketValue []byte `db:"ticket_value"` + TargetUnsealState *bool `db:"target_unseal_state"` + } + + err = dep.DB.Select(ctx, &meta, ` + SELECT ticket_value, target_unseal_state FROM sectors_meta WHERE sp_id = $1 AND sector_num = $2 + `, minerId, sectorNumberInt) + if err != nil { + return xerrors.Errorf("failed to query sector meta: %w", err) + } + + fmt.Println() + + if len(meta) > 0 { + if meta[0].TargetUnsealState == nil { + fmt.Printf("Target Unsealed State: %s\n", color.YellowString("keep as is")) + } else if *meta[0].TargetUnsealState { + fmt.Printf("Target Unsealed State: %s\n", color.GreenString("ensure unsealed")) + } else { + fmt.Printf("Target Unsealed State: %s\n", color.CyanString("ensure no unsealed")) + } + + if len(meta[0].TicketValue) > 0 { + fmt.Printf("Ticket: %s\n", color.GreenString("✔")) + } else { + fmt.Printf("Ticket: %s (unseal not possible)\n", color.RedString("✘")) + } + } + + var pipeline []struct { + CreateTime time.Time `db:"create_time"` + TaskIDUnsealSDR *int64 `db:"task_id_unseal_sdr"` + AfterUnsealSDR bool `db:"after_unseal_sdr"` + TaskIDDecodeSector *int64 `db:"task_id_decode_sector"` + AfterDecodeSector bool `db:"after_decode_sector"` + } + + err = dep.DB.Select(ctx, &pipeline, ` + SELECT create_time, task_id_unseal_sdr, after_unseal_sdr, task_id_decode_sector, after_decode_sector FROM sectors_unseal_pipeline WHERE sp_id = $1 AND sector_number = $2 + `, minerId, sectorNumberInt) + if err != nil { + return xerrors.Errorf("failed to query sector pipeline: %w", err) + } + + fmt.Println() + + if len(pipeline) > 0 { + fmt.Printf("Unseal Pipeline:\n") + fmt.Printf(" - Created: %s\n", pipeline[0].CreateTime) + + if pipeline[0].TaskIDUnsealSDR != nil { + fmt.Printf(" - Unseal SDR: %s running (task %d)\n", color.YellowString("⧖"), *pipeline[0].TaskIDUnsealSDR) + } else { + if pipeline[0].AfterUnsealSDR { + fmt.Printf(" - Unseal SDR: %s done\n", color.GreenString("✔")) + } else { + fmt.Printf(" - Unseal SDR: %s not done\n", color.RedString("✘")) + } + } + + if pipeline[0].TaskIDDecodeSector != nil { + fmt.Printf(" - Decode Sector: %s running (task %d)\n", color.YellowString("⧖"), *pipeline[0].TaskIDDecodeSector) + } else { + if pipeline[0].AfterDecodeSector { + fmt.Printf(" - Decode Sector: %s done\n", color.GreenString("✔")) + } else { + fmt.Printf(" - Decode Sector: %s not done\n", color.RedString("✘")) + } + } + } else { + fmt.Printf("Unseal Pipeline: %s no entry\n", color.YellowString("✘")) + } + + var scrubEntry []struct { + CreateTime time.Time `db:"create_time"` + Ok *bool `db:"ok"` + Message *string `db:"message"` + } + + err = dep.DB.Select(ctx, &scrubEntry, ` + SELECT create_time, ok, message FROM scrub_unseal_commd_check WHERE sp_id = $1 AND sector_number = $2 + ORDER BY create_time DESC LIMIT 1`, minerId, sectorNumberInt) + if err != nil { + return xerrors.Errorf("failed to query scrub check: %w", err) + } + + fmt.Println() + fmt.Printf("Integrity Check:\n") + + if len(scrubEntry) == 0 { + fmt.Printf(" - No checks yet %s\n", color.YellowString("✘")) + } else { + fmt.Printf(" - Created: %s\n", scrubEntry[0].CreateTime) + if scrubEntry[0].Ok != nil { + if *scrubEntry[0].Ok { + fmt.Printf(" - Result: %s\n", color.GreenString("✔")) + } else { + fmt.Printf(" - Result: %s\n", color.RedString("✘")) + fmt.Printf(" - Message: %s\n", *scrubEntry[0].Message) + } + } else { + fmt.Printf(" - In progress\n") + } + } + + return nil + }, +} + +func filterPred[T any](pred func(T) bool) func(T, int) bool { + return func(item T, _ int) bool { + return pred(item) + } +} + +var listUnsealPipelineCmd = &cli.Command{ + Name: "list-sectors", + Usage: "List data from the sectors_unseal_pipeline and sectors_meta tables", + Flags: []cli.Flag{ + &cli.Int64Flag{ + Name: "sp-id", + Aliases: []string{"s"}, + Usage: "Filter by storage provider ID", + }, + &cli.StringFlag{ + Name: "output", + Aliases: []string{"o"}, + Usage: "Output file path (default: stdout)", + }, + }, + Action: func(cctx *cli.Context) error { + ctx := reqcontext.ReqContext(cctx) + dep, err := deps.GetDepsCLI(ctx, cctx) + if err != nil { + return err + } + + rows, err := dep.DB.Query(ctx, ` + SELECT + sm.sp_id, + sm.sector_num, + sm.reg_seal_proof, + sm.target_unseal_state, + sm.is_cc, + sup.create_time as create_time, + sup.task_id_unseal_sdr, + sup.after_unseal_sdr, + sup.task_id_decode_sector, + sup.after_decode_sector + FROM + sectors_meta sm + LEFT JOIN + sectors_unseal_pipeline sup + ON + sm.sp_id = sup.sp_id AND sm.sector_num = sup.sector_number + WHERE + ($1 = 0 OR sm.sp_id = $1) + ORDER BY + sm.sp_id, sm.sector_num DESC + `, cctx.Int64("sp-id")) + if err != nil { + return xerrors.Errorf("failed to query sectors data: %w", err) + } + defer rows.Close() + + writer := csv.NewWriter(os.Stdout) + if output := cctx.String("output"); output != "" { + file, err := os.Create(output) + if err != nil { + return xerrors.Errorf("failed to create output file: %w", err) + } + defer file.Close() + writer = csv.NewWriter(file) + } + defer writer.Flush() + + // Write header + if err := writer.Write([]string{ + "SP ID", "Sector Number", "Reg Seal Proof", "Target Unseal State", "Is CC", + "Create Time", "Task ID Unseal SDR", "After Unseal SDR", + "Task ID Decode Sector", "After Decode Sector", + }); err != nil { + return xerrors.Errorf("failed to write CSV header: %w", err) + } + + // Write data + for rows.Next() { + var spID, sectorNumber, regSealProof int64 + var targetUnsealState, isCC *bool + var createTime *time.Time + var taskIDUnsealSDR, taskIDDecodeSector *int64 + var afterUnsealSDR, afterDecodeSector *bool + + err := rows.Scan( + &spID, §orNumber, ®SealProof, &targetUnsealState, &isCC, + &createTime, &taskIDUnsealSDR, &afterUnsealSDR, + &taskIDDecodeSector, &afterDecodeSector, + ) + if err != nil { + return xerrors.Errorf("failed to scan row: %w", err) + } + + var cts string + if createTime != nil { + cts = createTime.Format(time.RFC3339) + } + + row := []string{ + must.One(address.NewIDAddress(uint64(spID))).String(), + strconv.FormatInt(sectorNumber, 10), + strconv.FormatInt(regSealProof, 10), + formatNullableBool(targetUnsealState), + formatNullableBool(isCC), + cts, + formatNullableInt64(taskIDUnsealSDR), + formatNullableBool(afterUnsealSDR), + formatNullableInt64(taskIDDecodeSector), + formatNullableBool(afterDecodeSector), + } + + if err := writer.Write(row); err != nil { + return xerrors.Errorf("failed to write CSV row: %w", err) + } + } + + if err := rows.Err(); err != nil { + return xerrors.Errorf("error iterating rows: %w", err) + } + + fmt.Println("Data exported successfully.") + return nil + }, +} + +var setTargetUnsealStateCmd = &cli.Command{ + Name: "set-target-state", + Usage: "Set the target unseal state for a sector", + ArgsUsage: " ", + Description: `Set the target unseal state for a specific sector. + : The storage provider ID + : The sector number + : The target state (true, false, or none) + + The unseal target state indicates to curio how an unsealed copy of the sector should be maintained. + If the target state is true, curio will ensure that the sector is unsealed. + If the target state is false, curio will ensure that there is no unsealed copy of the sector. + If the target state is none, curio will not change the current state of the sector. + + Currently when the curio will only start new unseal processes when the target state changes from another state to true. + + When the target state is false, and an unsealed sector file exists, the GC mark step will create a removal mark + for the unsealed sector file. The file will only be removed after the removal mark is accepted. +`, + Action: func(cctx *cli.Context) error { + if cctx.Args().Len() != 3 { + return cli.ShowSubcommandHelp(cctx) + } + + sp, err := address.NewFromString(cctx.Args().Get(0)) + if err != nil { + return xerrors.Errorf("invalid storage provider address: %w", err) + } + + spID, err := address.IDFromAddress(sp) + if err != nil { + return xerrors.Errorf("failed to get storage provider id: %w", err) + } + + sectorNum, err := strconv.ParseInt(cctx.Args().Get(1), 10, 64) + if err != nil { + return xerrors.Errorf("invalid sector-number: %w", err) + } + + targetStateStr := strings.ToLower(cctx.Args().Get(2)) + var targetState *bool + switch targetStateStr { + case "true": + trueVal := true + targetState = &trueVal + case "false": + falseVal := false + targetState = &falseVal + case "none": + targetState = nil + default: + return xerrors.Errorf("invalid target-state: must be true, false, or none") + } + + ctx := reqcontext.ReqContext(cctx) + dep, err := deps.GetDepsCLI(ctx, cctx) + if err != nil { + return err + } + + _, err = dep.DB.Exec(ctx, ` + UPDATE sectors_meta + SET target_unseal_state = $1 + WHERE sp_id = $2 AND sector_num = $3 + `, targetState, spID, sectorNum) + if err != nil { + return xerrors.Errorf("failed to update target unseal state: %w", err) + } + + fmt.Printf("Successfully set target unseal state to %v for SP %d, sector %d\n", targetStateStr, spID, sectorNum) + return nil + }, +} + +func formatNullableInt64(v *int64) string { + if v == nil { + return "" + } + return strconv.FormatInt(*v, 10) +} + +func formatNullableBool(v *bool) string { + if v == nil { + return "" + } + return strconv.FormatBool(*v) +} + +var unsealCheckCmd = &cli.Command{ + Name: "check", + Usage: "Check data integrity in unsealed sector files", + ArgsUsage: " ", + Description: `Create a check task for a specific sector, wait for its completion, and output the result. + : The storage provider ID + : The sector number`, + Action: func(cctx *cli.Context) error { + if cctx.Args().Len() != 2 { + return cli.ShowSubcommandHelp(cctx) + } + + sp, err := address.NewFromString(cctx.Args().Get(0)) + if err != nil { + return xerrors.Errorf("invalid storage provider address: %w", err) + } + + spID, err := address.IDFromAddress(sp) + if err != nil { + return xerrors.Errorf("failed to get storage provider id: %w", err) + } + + sectorNum, err := strconv.ParseInt(cctx.Args().Get(1), 10, 64) + if err != nil { + return xerrors.Errorf("invalid sector-number: %w", err) + } + + ctx := reqcontext.ReqContext(cctx) + dep, err := deps.GetDepsCLI(ctx, cctx) + if err != nil { + return err + } + + // Figure out the expected unsealed CID + + unsealedCid, err := dealdata.UnsealedCidFromPieces(ctx, dep.DB, int64(spID), sectorNum) + if err != nil { + return xerrors.Errorf("getting deal data CID: %w", err) + } + fmt.Printf("Expected unsealed CID: %s\n", unsealedCid) + + // Create the check task + var checkID int64 + err = dep.DB.QueryRow(ctx, ` + INSERT INTO scrub_unseal_commd_check (sp_id, sector_number, expected_unsealed_cid) + VALUES ($1, $2, $3) + RETURNING check_id + `, spID, sectorNum, unsealedCid.String()).Scan(&checkID) + if err != nil { + return xerrors.Errorf("failed to create check task: %w", err) + } + + _, _ = fmt.Fprintf(os.Stderr, "Created check task with ID %d\n", checkID) + + // Poll for completion + dots := 0 + for { + var ok sql.NullBool + var actualUnsealedCID, message sql.NullString + + err := dep.DB.QueryRow(ctx, ` + SELECT ok, actual_unsealed_cid, message + FROM scrub_unseal_commd_check + WHERE check_id = $1 + `, checkID).Scan(&ok, &actualUnsealedCID, &message) + + if err != nil { + return xerrors.Errorf("failed to query check task status: %w", err) + } + + if ok.Valid { + // Task completed + _, _ = fmt.Fprintf(os.Stderr, "\n") // Move to the next line after the dots + if ok.Bool { + fmt.Printf("Check task completed successfully %s\n", color.GreenString("✔")) + fmt.Printf("Actual unsealed CID: %s\n", actualUnsealedCID.String) + } else { + fmt.Printf("Check task failed %s\n", color.RedString("✘")) + fmt.Printf("Error message: %s\n", message.String) + fmt.Printf("Actual unsealed CID: %s\n", actualUnsealedCID.String) + } + return nil + } + + // Update progress indicator + dots = (dots + 1) % 4 + _, _ = fmt.Fprintf(os.Stderr, "Check task still in progress%s \r", "."+strings.Repeat(".", dots)) + + time.Sleep(2 * time.Second) + } + }, +} diff --git a/deps/config/doc_gen.go b/deps/config/doc_gen.go index 3776faf02..6e0a1d2ce 100644 --- a/deps/config/doc_gen.go +++ b/deps/config/doc_gen.go @@ -620,6 +620,14 @@ This tasks should only be enabled on nodes with long-term storage. The MoveStorage task is the last task in the sealing pipeline. It moves the sealed sector data from the SDRTrees machine into long-term storage. This task runs after the Finalize task.`, + }, + { + Name: "NoUnsealedDecode", + Type: "bool", + + Comment: `NoUnsealedDecode disables the decoding sector data on this node. Normally data encoding is enabled by default on +storage nodes with the MoveStorage task enabled. Setting this option to true means that unsealed data for sectors +will not be stored on this node`, }, { Name: "MoveStorageMaxTasks", @@ -662,6 +670,13 @@ This step submits the generated proofs to the chain.`, Comment: `UpdateProveMaxTasks sets the maximum number of concurrent SnapDeal proving tasks that can run on this instance.`, }, + { + Name: "EnableCommP", + Type: "bool", + + Comment: `EnableCommP enabled the commP task on te node. CommP is calculated before sending PublishDealMessage for a Mk12 +deal, and when checking sector data with 'curio unseal check'.`, + }, { Name: "BoostAdapters", Type: "[]string", diff --git a/deps/config/types.go b/deps/config/types.go index 18386ae84..059862015 100644 --- a/deps/config/types.go +++ b/deps/config/types.go @@ -211,6 +211,11 @@ type CurioSubsystemsConfig struct { // SDRTrees machine into long-term storage. This task runs after the Finalize task. EnableMoveStorage bool + // NoUnsealedDecode disables the decoding sector data on this node. Normally data encoding is enabled by default on + // storage nodes with the MoveStorage task enabled. Setting this option to true means that unsealed data for sectors + // will not be stored on this node + NoUnsealedDecode bool + // The maximum amount of MoveStorage tasks that can run simultaneously. Note that the maximum number of tasks will // also be bounded by resources available on the machine. It is recommended that this value is set to a number which // uses all available network (or disk) bandwidth on the machine without causing bottlenecks. @@ -234,6 +239,10 @@ type CurioSubsystemsConfig struct { // UpdateProveMaxTasks sets the maximum number of concurrent SnapDeal proving tasks that can run on this instance. UpdateProveMaxTasks int + // EnableCommP enabled the commP task on te node. CommP is calculated before sending PublishDealMessage for a Mk12 + // deal, and when checking sector data with 'curio unseal check'. + EnableCommP bool + // BoostAdapters is a list of tuples of miner address and port/ip to listen for market (e.g. boost) requests. // This interface is compatible with the lotus-miner RPC, implementing a subset needed for storage market operations. // Strings should be in the format "actor:ip:port". IP cannot be 0.0.0.0. We recommend using a private IP. diff --git a/documentation/en/configuration/default-curio-configuration.md b/documentation/en/configuration/default-curio-configuration.md index 24aab3ad5..32f642c5d 100644 --- a/documentation/en/configuration/default-curio-configuration.md +++ b/documentation/en/configuration/default-curio-configuration.md @@ -159,6 +159,13 @@ description: The default curio configuration # type: bool #EnableMoveStorage = false + # NoUnsealedDecode disables the decoding sector data on this node. Normally data encoding is enabled by default on + # storage nodes with the MoveStorage task enabled. Setting this option to true means that unsealed data for sectors + # will not be stored on this node + # + # type: bool + #NoUnsealedDecode = false + # The maximum amount of MoveStorage tasks that can run simultaneously. Note that the maximum number of tasks will # also be bounded by resources available on the machine. It is recommended that this value is set to a number which # uses all available network (or disk) bandwidth on the machine without causing bottlenecks. @@ -194,6 +201,12 @@ description: The default curio configuration # type: int #UpdateProveMaxTasks = 0 + # EnableCommP enabled the commP task on te node. CommP is calculated before sending PublishDealMessage for a Mk12 + # deal, and when checking sector data with 'curio unseal check'. + # + # type: bool + #EnableCommP = false + # BoostAdapters is a list of tuples of miner address and port/ip to listen for market (e.g. boost) requests. # This interface is compatible with the lotus-miner RPC, implementing a subset needed for storage market operations. # Strings should be in the format "actor:ip:port". IP cannot be 0.0.0.0. We recommend using a private IP. diff --git a/documentation/en/curio-cli/curio.md b/documentation/en/curio-cli/curio.md index f8e86d4bb..18a4c4de7 100644 --- a/documentation/en/curio-cli/curio.md +++ b/documentation/en/curio-cli/curio.md @@ -17,6 +17,7 @@ COMMANDS: web Start Curio web interface guided-setup Run the guided setup for migrating from lotus-miner to Curio or Creating a new Curio miner seal Manage the sealing pipeline + unseal Manage unsealed data market fetch-params Fetch proving parameters calc Math Utils @@ -538,6 +539,97 @@ OPTIONS: --help, -h show help ``` +## curio unseal +``` +NAME: + curio unseal - Manage unsealed data + +USAGE: + curio unseal command [command options] [arguments...] + +COMMANDS: + info Get information about unsealed data + list-sectors List data from the sectors_unseal_pipeline and sectors_meta tables + set-target-state Set the target unseal state for a sector + check Check data integrity in unsealed sector files + help, h Shows a list of commands or help for one command + +OPTIONS: + --help, -h show help +``` + +### curio unseal info +``` +NAME: + curio unseal info - Get information about unsealed data + +USAGE: + curio unseal info [command options] [minerAddress] [sectorNumber] + +OPTIONS: + --help, -h show help +``` + +### curio unseal list-sectors +``` +NAME: + curio unseal list-sectors - List data from the sectors_unseal_pipeline and sectors_meta tables + +USAGE: + curio unseal list-sectors [command options] [arguments...] + +OPTIONS: + --sp-id value, -s value Filter by storage provider ID (default: 0) + --output value, -o value Output file path (default: stdout) + --help, -h show help +``` + +### curio unseal set-target-state +``` +NAME: + curio unseal set-target-state - Set the target unseal state for a sector + +USAGE: + curio unseal set-target-state [command options] + +DESCRIPTION: + Set the target unseal state for a specific sector. + : The storage provider ID + : The sector number + : The target state (true, false, or none) + + The unseal target state indicates to curio how an unsealed copy of the sector should be maintained. + If the target state is true, curio will ensure that the sector is unsealed. + If the target state is false, curio will ensure that there is no unsealed copy of the sector. + If the target state is none, curio will not change the current state of the sector. + + Currently when the curio will only start new unseal processes when the target state changes from another state to true. + + When the target state is false, and an unsealed sector file exists, the GC mark step will create a removal mark + for the unsealed sector file. The file will only be removed after the removal mark is accepted. + + +OPTIONS: + --help, -h show help +``` + +### curio unseal check +``` +NAME: + curio unseal check - Check data integrity in unsealed sector files + +USAGE: + curio unseal check [command options] + +DESCRIPTION: + Create a check task for a specific sector, wait for its completion, and output the result. + : The storage provider ID + : The sector number + +OPTIONS: + --help, -h show help +``` + ## curio market ``` NAME: diff --git a/go.mod b/go.mod index 41ac3822e..752bb39f2 100644 --- a/go.mod +++ b/go.mod @@ -9,6 +9,7 @@ require ( github.com/alecthomas/jsonschema v0.0.0-20200530073317-71f438968921 github.com/charmbracelet/lipgloss v0.10.0 github.com/codeskyblue/go-sh v0.0.0-20200712050446-30169cf553fe + github.com/consensys/gnark-crypto v0.12.1 github.com/detailyang/go-fallocate v0.0.0-20180908115635-432fa640bd2e github.com/docker/go-units v0.5.0 github.com/dustin/go-humanize v1.0.1 @@ -21,12 +22,14 @@ require ( github.com/filecoin-project/go-cbor-util v0.0.1 github.com/filecoin-project/go-commp-utils v0.1.4 github.com/filecoin-project/go-commp-utils/nonffi v0.0.0-20240802040721-2a04ffc8ffe8 + github.com/filecoin-project/go-commp-utils/v2 v2.1.0 github.com/filecoin-project/go-fil-commcid v0.1.0 + github.com/filecoin-project/go-fil-commp-hashhash v0.2.0 github.com/filecoin-project/go-jsonrpc v0.6.1-0.20240820160949-2cfe810e5d2f github.com/filecoin-project/go-padreader v0.0.1 github.com/filecoin-project/go-state-types v0.14.0 github.com/filecoin-project/go-statestore v0.2.0 - github.com/filecoin-project/lotus v1.28.2-0.20240729072329-792eecc4ea3f + github.com/filecoin-project/lotus v1.28.2-0.20240902200914-dde3cd9a0306 github.com/filecoin-project/specs-actors/v2 v2.3.6 github.com/filecoin-project/specs-actors/v5 v5.0.6 github.com/filecoin-project/specs-actors/v6 v6.0.2 @@ -65,6 +68,7 @@ require ( github.com/sirupsen/logrus v1.9.2 github.com/snadrus/must v0.0.0-20240605044437-98cedd57f8eb github.com/stretchr/testify v1.9.0 + github.com/triplewz/poseidon v0.0.2-0.20240407130934-5265fab9d889 github.com/urfave/cli/v2 v2.25.5 github.com/whyrusleeping/cbor-gen v0.1.2 github.com/yugabyte/pgx/v5 v5.5.3-yb-2 @@ -96,6 +100,7 @@ require ( github.com/bahlo/generic-list-go v0.2.0 // indirect github.com/benbjohnson/clock v1.3.5 // indirect github.com/beorn7/perks v1.0.1 // indirect + github.com/bits-and-blooms/bitset v1.13.0 // indirect github.com/buger/jsonparser v1.1.1 // indirect github.com/cespare/xxhash v1.1.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect @@ -281,7 +286,6 @@ require ( github.com/shirou/gopsutil v2.18.12+incompatible // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect - github.com/triplewz/poseidon v0.0.0-20230828015038-79d8165c88ed // indirect github.com/valyala/bytebufferpool v1.0.0 // indirect github.com/valyala/fasttemplate v1.0.1 // indirect github.com/whyrusleeping/bencher v0.0.0-20190829221104-bb6607aa8bba // indirect diff --git a/go.sum b/go.sum index 1fe11f3ee..4e4e7e863 100644 --- a/go.sum +++ b/go.sum @@ -103,6 +103,8 @@ github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24 github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bits-and-blooms/bitset v1.13.0 h1:bAQ9OPNFYbGHV6Nez0tmNI0RiEu7/hxlYJRUA0wFAVE= +github.com/bits-and-blooms/bitset v1.13.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= github.com/btcsuite/btcd v0.0.0-20190213025234-306aecffea32/go.mod h1:DrZx5ec/dmnfpw9KyYoQyYo7d0KEvTkk/5M/vbZjAr8= github.com/btcsuite/btcd v0.0.0-20190523000118-16327141da8c/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= @@ -162,6 +164,8 @@ github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0 h1:sDMmm+q/3+Bu github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM= github.com/codeskyblue/go-sh v0.0.0-20200712050446-30169cf553fe h1:69JI97HlzP+PH5Mi1thcGlDoBr6PS2Oe+l3mNmAkbs4= github.com/codeskyblue/go-sh v0.0.0-20200712050446-30169cf553fe/go.mod h1:VQx0hjo2oUeQkQUET7wRwradO6f+fN5jzXgB/zROxxE= +github.com/consensys/gnark-crypto v0.12.1 h1:lHH39WuuFgVHONRl3J0LRBtuYdQTumFSDtJF7HpyG8M= +github.com/consensys/gnark-crypto v0.12.1/go.mod h1:v2Gy7L/4ZRosZ7Ivs+9SfUDr0f5UlG+EM5t7MPHiLuY= github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM= github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw= @@ -269,6 +273,8 @@ github.com/filecoin-project/go-commp-utils v0.1.4 h1:/WSsrAb0xupo+aRWRyD80lRUXAX github.com/filecoin-project/go-commp-utils v0.1.4/go.mod h1:Sekocu5q9b4ECAUFu853GFUbm8I7upAluummHFe2kFo= github.com/filecoin-project/go-commp-utils/nonffi v0.0.0-20240802040721-2a04ffc8ffe8 h1:jAG2g1Fs/qoDSSaI8JaP/KmqR+QQ8IVQ6k9xKONa72M= github.com/filecoin-project/go-commp-utils/nonffi v0.0.0-20240802040721-2a04ffc8ffe8/go.mod h1:kU2KuSPLB+Xz4FEbVE0abzSN4l6irZ8tqgcYWPVDftU= +github.com/filecoin-project/go-commp-utils/v2 v2.1.0 h1:KWNRalUp2bhN1SW7STsJS2AHs9mnfGKk9LnQgzDe+gI= +github.com/filecoin-project/go-commp-utils/v2 v2.1.0/go.mod h1:NbxJYlhxtWaNhlVCj/gysLNu26kYII83IV5iNrAO9iI= github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03/go.mod h1:+viYnvGtUTgJRdy6oaeF4MTFKAfatX071MPDPBL11EQ= github.com/filecoin-project/go-crypto v0.1.0 h1:Pob2MphoipMbe/ksxZOMcQvmBHAd3sI/WEqcbpIsGI0= github.com/filecoin-project/go-crypto v0.1.0/go.mod h1:K9UFXvvoyAVvB+0Le7oGlKiT9mgA5FHOJdYQXEE8IhI= @@ -276,6 +282,8 @@ github.com/filecoin-project/go-f3 v0.2.0 h1:Gis44+hOrDjSUEw3IDmU7CudNILi5e+bb1pg github.com/filecoin-project/go-f3 v0.2.0/go.mod h1:43fBLX0iX0+Nnw4Z91wSrdfDYAd6YEDexy7GcLnIJtk= github.com/filecoin-project/go-fil-commcid v0.1.0 h1:3R4ds1A9r6cr8mvZBfMYxTS88OqLYEo6roi+GiIeOh8= github.com/filecoin-project/go-fil-commcid v0.1.0/go.mod h1:Eaox7Hvus1JgPrL5+M3+h7aSPHc0cVqpSxA+TxIEpZQ= +github.com/filecoin-project/go-fil-commp-hashhash v0.2.0 h1:HYIUugzjq78YvV3vC6rL95+SfC/aSTVSnZSZiDV5pCk= +github.com/filecoin-project/go-fil-commp-hashhash v0.2.0/go.mod h1:VH3fAFOru4yyWar4626IoS5+VGE8SfZiBODJLUigEo4= github.com/filecoin-project/go-hamt-ipld v0.1.5 h1:uoXrKbCQZ49OHpsTCkrThPNelC4W3LPEk0OrS/ytIBM= github.com/filecoin-project/go-hamt-ipld v0.1.5/go.mod h1:6Is+ONR5Cd5R6XZoCse1CWaXZc0Hdb/JeX+EQCQzX24= github.com/filecoin-project/go-hamt-ipld/v2 v2.0.0 h1:b3UDemBYN2HNfk3KOXNuxgTTxlWi3xVvbQP0IT38fvM= @@ -304,8 +312,8 @@ github.com/filecoin-project/go-statestore v0.2.0 h1:cRRO0aPLrxKQCZ2UOQbzFGn4WDNd github.com/filecoin-project/go-statestore v0.2.0/go.mod h1:8sjBYbS35HwPzct7iT4lIXjLlYyPor80aU7t7a/Kspo= github.com/filecoin-project/go-storedcounter v0.1.0 h1:Mui6wSUBC+cQGHbDUBcO7rfh5zQkWJM/CpAZa/uOuus= github.com/filecoin-project/go-storedcounter v0.1.0/go.mod h1:4ceukaXi4vFURIoxYMfKzaRF5Xv/Pinh2oTnoxpv+z8= -github.com/filecoin-project/lotus v1.28.2-0.20240729072329-792eecc4ea3f h1:HgTbO7Aib+aHeJxXZxWEScrl8DpG/BT7NPD1VxMO0kw= -github.com/filecoin-project/lotus v1.28.2-0.20240729072329-792eecc4ea3f/go.mod h1:2u+GoTcI1SLZo+N9Alonlegj1Y+hZpQ5TLfxRhlJIew= +github.com/filecoin-project/lotus v1.28.2-0.20240902200914-dde3cd9a0306 h1:ifh5Q3bYQkrdG3qpvSIOxOAhzz7r64mroz2STxlFXV4= +github.com/filecoin-project/lotus v1.28.2-0.20240902200914-dde3cd9a0306/go.mod h1:j/t2uL7mcE+ZC3FvTbui2Md4XCqAPNp1rh3SIwzTx5w= github.com/filecoin-project/pubsub v1.0.0 h1:ZTmT27U07e54qV1mMiQo4HDr0buo8I1LDHBYLXlsNXM= github.com/filecoin-project/pubsub v1.0.0/go.mod h1:GkpB33CcUtUNrLPhJgfdy4FDx4OMNR9k+46DHx/Lqrg= github.com/filecoin-project/specs-actors v0.9.13/go.mod h1:TS1AW/7LbG+615j4NsjMK1qlpAwaFsG9w0V2tg2gSao= @@ -1137,8 +1145,8 @@ github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJ github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= -github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= @@ -1238,8 +1246,8 @@ github.com/tidwall/match v1.0.1 h1:PnKP62LPNxHKTwvHHZZzdOAOCtsJTjo6dZLCwpKm5xc= github.com/tidwall/match v1.0.1/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0+pL9E= github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= -github.com/triplewz/poseidon v0.0.0-20230828015038-79d8165c88ed h1:C8H2ql+vCBhEi7d3vMBBbdCAKv9s/thfPyLEuSvFpMU= -github.com/triplewz/poseidon v0.0.0-20230828015038-79d8165c88ed/go.mod h1:QYG1d0B4YZD7TgF6qZndTTu4rxUGFCCZAQRDanDj+9c= +github.com/triplewz/poseidon v0.0.2-0.20240407130934-5265fab9d889 h1:cbYPZOEknyV/Gyud82ebTPiciOnVSv6tiMCQi5Y+mAs= +github.com/triplewz/poseidon v0.0.2-0.20240407130934-5265fab9d889/go.mod h1:fmoxtMcbtMUjlSJmpuS3Wk/oKSvdJpIp9YWRbsOu3T0= github.com/uber/jaeger-client-go v2.30.0+incompatible h1:D6wyKGCecFaSRUpo8lCVbaOOb6ThwMmTEbhRwtKR97o= github.com/uber/jaeger-client-go v2.30.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-lib v2.4.1+incompatible h1:td4jdvLcExb4cBISKIpHuGoVXh+dVKhn2Um6rjCsSsg= @@ -1601,7 +1609,6 @@ golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= diff --git a/harmony/harmonydb/sql/20240212-common-layers.sql b/harmony/harmonydb/sql/20240212-common-layers.sql index cf72e1750..3cc89c42e 100644 --- a/harmony/harmonydb/sql/20240212-common-layers.sql +++ b/harmony/harmonydb/sql/20240212-common-layers.sql @@ -18,13 +18,13 @@ INSERT INTO harmony_config (title, config) VALUES EnablePoRepProof = true EnableSendCommitMsg = true EnableMoveStorage = true - '), + '), -- 20240904-scrub-unseal-check.sql adds EnableScrubUnsealed = true ('seal-gpu', ' [Subsystems] EnableSealSDRTrees = true EnableSendPrecommitMsg = true - '), + '), -- 20240904-scrub-unseal-check.sql adds EnableScrubUnsealed = true ('seal-snark', ' [Subsystems] EnablePoRepProof = true diff --git a/harmony/harmonydb/sql/20240425-sector_meta.sql b/harmony/harmonydb/sql/20240425-sector_meta.sql index f4396acfc..9a4a7f0c6 100644 --- a/harmony/harmonydb/sql/20240425-sector_meta.sql +++ b/harmony/harmonydb/sql/20240425-sector_meta.sql @@ -27,6 +27,9 @@ CREATE TABLE sectors_meta ( -- deadline BIGINT, (null = not crawled) -- partition BIGINT, (null = not crawled) + -- Added in 20240903-unseal-pipeline.sql + -- target_unseal_state BOOLEAN, (null = either way, true - ensure unsealed, false - ensure sealed only) + PRIMARY KEY (sp_id, sector_num) ); diff --git a/harmony/harmonydb/sql/20240903-unseal-pipeline.sql b/harmony/harmonydb/sql/20240903-unseal-pipeline.sql new file mode 100644 index 000000000..c1d7d668a --- /dev/null +++ b/harmony/harmonydb/sql/20240903-unseal-pipeline.sql @@ -0,0 +1,98 @@ +CREATE TABLE sectors_unseal_pipeline ( + sp_id BIGINT NOT NULL, + sector_number BIGINT NOT NULL, + reg_seal_proof BIGINT NOT NULL, + + create_time TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT current_timestamp, + + task_id_unseal_sdr BIGINT, -- builds unseal cache + after_unseal_sdr bool NOT NULL DEFAULT FALSE, + + task_id_decode_sector BIGINT, -- makes the "unsealed" copy (runs in target long-term storage) + after_decode_sector bool NOT NULL DEFAULT FALSE, + + primary key (sp_id, sector_number) +); + +ALTER TABLE sectors_meta ADD COLUMN target_unseal_state BOOLEAN; + +-- To unseal +-- 1. Target unseal state is true +-- 2. No unsealed sector entry in sector_location +-- 3. No unsealed sector entry in sectors_unseal_pipeline + +CREATE OR REPLACE FUNCTION update_sectors_unseal_pipeline_materialized( + target_sp_id BIGINT, + target_sector_num BIGINT +) RETURNS VOID AS $$ +DECLARE + should_be_added BOOLEAN; + should_not_be_removed BOOLEAN; +BEGIN + -- Check if the sector should be in the materialized table + SELECT EXISTS ( + SELECT 1 + FROM sectors_meta sm + WHERE sm.sp_id = target_sp_id + AND sm.sector_num = target_sector_num + AND sm.target_unseal_state = TRUE + AND sm.is_cc = FALSE + AND NOT EXISTS ( + SELECT 1 FROM sector_location sl + WHERE sl.miner_id = sm.sp_id + AND sl.sector_num = sm.sector_num + AND sl.sector_filetype = 1 -- 1 is unsealed + ) + AND NOT EXISTS ( + SELECT 1 FROM sectors_unseal_pipeline sup + WHERE sup.sp_id = sm.sp_id + AND sup.sector_number = sm.sector_num + ) + ) INTO should_be_added; + + -- If it should be in the materialized table + IF should_be_added THEN + -- Insert or update the row + INSERT INTO sectors_unseal_pipeline (sp_id, sector_number, reg_seal_proof) + SELECT sm.sp_id, sm.sector_num, sm.reg_seal_proof + FROM sectors_meta sm + WHERE sm.sp_id = target_sp_id AND sm.sector_num = target_sector_num + ON CONFLICT (sp_id, sector_number) DO UPDATE + SET reg_seal_proof = EXCLUDED.reg_seal_proof; + -- no else, the pipeline entries remove themselves after the unseal is done + END IF; + + -- Check if the sector should not be removed + SELECT EXISTS ( + SELECT 1 + FROM sectors_meta sm + WHERE sm.sp_id = target_sp_id + AND sm.sector_num = target_sector_num + AND sm.target_unseal_state = TRUE + ) INTO should_not_be_removed; + + -- If it should not be removed + IF should_not_be_removed THEN + -- Just in case make sure the sector is not scheduled for removal + DELETE FROM storage_removal_marks + WHERE sp_id = target_sp_id AND sector_num = target_sector_num AND sector_filetype = 1; -- 1 is unsealed + END IF; +END; +$$ LANGUAGE plpgsql; + +CREATE OR REPLACE FUNCTION trig_sectors_meta_update_materialized() RETURNS TRIGGER AS $$ +BEGIN + IF TG_OP = 'INSERT' OR TG_OP = 'UPDATE' THEN + PERFORM update_sectors_unseal_pipeline_materialized(NEW.sp_id, NEW.sector_num); + ELSIF TG_OP = 'DELETE' THEN + PERFORM update_sectors_unseal_pipeline_materialized(OLD.sp_id, OLD.sector_num); + END IF; + RETURN NULL; +END; +$$ LANGUAGE plpgsql; + +CREATE TRIGGER trig_sectors_meta_update_materialized + AFTER INSERT OR UPDATE OR DELETE ON sectors_meta + FOR EACH ROW EXECUTE FUNCTION trig_sectors_meta_update_materialized(); + +-- not triggering on sector_location, storage can be detached occasionally and auto-scheduling 10000s of unseals is bad diff --git a/harmony/harmonydb/sql/20240904-scrub-unseal-check.sql b/harmony/harmonydb/sql/20240904-scrub-unseal-check.sql new file mode 100644 index 000000000..c6c8bc70a --- /dev/null +++ b/harmony/harmonydb/sql/20240904-scrub-unseal-check.sql @@ -0,0 +1,52 @@ +CREATE TABLE scrub_unseal_commd_check ( + check_id BIGSERIAL PRIMARY KEY, + + sp_id BIGINT NOT NULL, + sector_number BIGINT NOT NULL, + create_time TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT current_timestamp, + + task_id BIGINT, + + expected_unsealed_cid TEXT NOT NULL, + + -- results + ok BOOLEAN, + actual_unsealed_cid TEXT, + message TEXT, + + UNIQUE (sp_id, sector_number, create_time), + UNIQUE (task_id) +); + +-- Add EnableScrubUnsealed to seal and seal-gpu IF they are the defaults +UPDATE harmony_config +SET config = ' + [Subsystems] + EnableSealSDR = true + EnableSealSDRTrees = true + EnableSendPrecommitMsg = true + EnablePoRepProof = true + EnableSendCommitMsg = true + EnableMoveStorage = true + EnableScrubUnsealed = true + ' WHERE title = 'seal' AND config = ' + [Subsystems] + EnableSealSDR = true + EnableSealSDRTrees = true + EnableSendPrecommitMsg = true + EnablePoRepProof = true + EnableSendCommitMsg = true + EnableMoveStorage = true + '; + +UPDATE harmony_config +SET config = ' + [Subsystems] + EnableSealSDRTrees = true + EnableSendPrecommitMsg = true + EnableScrubUnsealed = true + ' WHERE title = 'seal-gpu' AND config = ' + [Subsystems] + EnableSealSDRTrees = true + EnableSendPrecommitMsg = true + '; \ No newline at end of file diff --git a/harmony/harmonytask/harmonytask.go b/harmony/harmonytask/harmonytask.go index 9fd3cfbb9..f812fe073 100644 --- a/harmony/harmonytask/harmonytask.go +++ b/harmony/harmonytask/harmonytask.go @@ -12,6 +12,7 @@ import ( "github.com/filecoin-project/curio/harmony/harmonydb" "github.com/filecoin-project/curio/harmony/resources" + "github.com/filecoin-project/curio/harmony/taskhelp" ) // Consts (except for unit test) @@ -22,8 +23,9 @@ var FOLLOW_FREQUENCY = 1 * time.Minute // Check for work to follow this type TaskTypeDetails struct { // Max returns how many tasks this machine can run of this type. - // Zero (default) or less means unrestricted. - Max int + // Nil (default)/Zero or less means unrestricted. + // Counters can either be independent when created with Max, or shared between tasks with SharedMax.Make() + Max Limiter // Name is the task name to be added to the task list. Name string @@ -97,6 +99,23 @@ type TaskInterface interface { Adder(AddTaskFunc) } +type Limiter interface { + // Active returns the number of tasks of this type that are currently running + // in this limiter / limiter group. + Active() int + + // ActiveThis returns the number of tasks of this type that are currently running + // in this limiter (e.g. per-task-type count). + ActiveThis() int + + // AtMax returns whether this limiter permits more tasks to run. + AtMax() bool + + // Add increments / decrements the active task counters by delta. This call + // is atomic + Add(delta int) +} + // AddTaskFunc is responsible for adding a task's details "extra info" to the DB. // It should return true if the task should be added, false if it was already there. // This is typically accomplished with a "unique" index on your detals table that @@ -160,6 +179,10 @@ func New( TaskTypeDetails: c.TypeDetails(), TaskEngine: e, } + if h.Max == nil { + h.Max = taskhelp.Max(0) + } + if Registry[h.TaskTypeDetails.Name] == nil { return nil, fmt.Errorf("task %s not registered: var _ = harmonytask.Reg(t TaskInterface)", h.TaskTypeDetails.Name) } @@ -218,31 +241,31 @@ func (e *TaskEngine) GracefullyTerminate() { for { timeout := time.Millisecond for _, h := range e.handlers { - if h.TaskTypeDetails.Name == "WinPost" && h.Count.Load() > 0 { + if h.TaskTypeDetails.Name == "WinPost" && h.Max.Active() > 0 { timeout = time.Second log.Infof("node shutdown deferred for %f seconds", timeout.Seconds()) continue } - if h.TaskTypeDetails.Name == "WdPost" && h.Count.Load() > 0 { + if h.TaskTypeDetails.Name == "WdPost" && h.Max.Active() > 0 { timeout = time.Second * 3 log.Infof("node shutdown deferred for %f seconds due to running WdPost task", timeout.Seconds()) continue } - if h.TaskTypeDetails.Name == "WdPostSubmit" && h.Count.Load() > 0 { + if h.TaskTypeDetails.Name == "WdPostSubmit" && h.Max.Active() > 0 { timeout = time.Second log.Infof("node shutdown deferred for %f seconds due to running WdPostSubmit task", timeout.Seconds()) continue } - if h.TaskTypeDetails.Name == "WdPostRecover" && h.Count.Load() > 0 { + if h.TaskTypeDetails.Name == "WdPostRecover" && h.Max.Active() > 0 { timeout = time.Second log.Infof("node shutdown deferred for %f seconds due to running WdPostRecover task", timeout.Seconds()) continue } // Test tasks for itest - if h.TaskTypeDetails.Name == "ThingOne" && h.Count.Load() > 0 { + if h.TaskTypeDetails.Name == "ThingOne" && h.Max.Active() > 0 { timeout = time.Second log.Infof("node shutdown deferred for %f seconds due to running itest task", timeout.Seconds()) continue @@ -397,8 +420,8 @@ func (e *TaskEngine) pollerTryAllWork() bool { func (e *TaskEngine) ResourcesAvailable() resources.Resources { tmp := e.reg.Resources for _, t := range e.handlers { - ct := t.Count.Load() - tmp.Cpu -= int(ct) * t.Cost.Cpu + ct := t.Max.ActiveThis() + tmp.Cpu -= ct * t.Cost.Cpu tmp.Gpu -= float64(ct) * t.Cost.Gpu tmp.Ram -= uint64(ct) * t.Cost.Ram } diff --git a/harmony/harmonytask/task_type_handler.go b/harmony/harmonytask/task_type_handler.go index c9d619ba1..53c1534d1 100644 --- a/harmony/harmonytask/task_type_handler.go +++ b/harmony/harmonytask/task_type_handler.go @@ -6,7 +6,6 @@ import ( "fmt" "runtime" "strconv" - "sync/atomic" "time" logging "github.com/ipfs/go-log/v2" @@ -28,7 +27,6 @@ type taskTypeHandler struct { TaskInterface TaskTypeDetails TaskEngine *TaskEngine - Count atomic.Int32 } func (h *taskTypeHandler) AddTask(extra func(TaskID, *harmonydb.Tx) (bool, error)) { @@ -87,7 +85,7 @@ top: // 1. Can we do any more of this task type? // NOTE: 0 is the default value, so this way people don't need to worry about // this setting unless they want to limit the number of tasks of this type. - if h.Max > 0 && int(h.Count.Load()) >= h.Max { + if h.Max.AtMax() { log.Debugw("did not accept task", "name", h.Name, "reason", "at max already") return false } @@ -174,10 +172,10 @@ canAcceptAgain: tag.Upsert(sourceTag, from), }, TaskMeasures.TasksStarted.M(1)) - h.Count.Add(1) + h.Max.Add(1) _ = stats.RecordWithTags(context.Background(), []tag.Mutator{ tag.Upsert(taskNameTag, h.Name), - }, TaskMeasures.ActiveTasks.M(int64(h.Count.Load()))) + }, TaskMeasures.ActiveTasks.M(int64(h.Max.ActiveThis()))) go func() { log.Infow("Beginning work on Task", "id", *tID, "from", from, "name", h.Name) @@ -202,7 +200,7 @@ canAcceptAgain: "while processing "+h.Name+" task "+strconv.Itoa(int(*tID))+": ", r, " Stack: ", string(stackSlice[:sz])) } - h.Count.Add(-1) + h.Max.Add(-1) releaseStorage() h.recordCompletion(*tID, sectorID, workStart, done, doErr) @@ -242,7 +240,7 @@ func (h *taskTypeHandler) recordCompletion(tID TaskID, sectorID *abi.SectorID, w _ = stats.RecordWithTags(context.Background(), []tag.Mutator{ tag.Upsert(taskNameTag, h.Name), - }, TaskMeasures.ActiveTasks.M(int64(h.Count.Load()))) + }, TaskMeasures.ActiveTasks.M(int64(h.Max.ActiveThis()))) duration := workEnd.Sub(workStart).Seconds() TaskMeasures.TaskDuration.Observe(duration) @@ -340,7 +338,7 @@ VALUES ($1, $2, $3, $4, $5, $6, $7, $8) RETURNING id`, tID, h.Name, postedTime.U func (h *taskTypeHandler) AssertMachineHasCapacity() error { r := h.TaskEngine.ResourcesAvailable() - if h.Max > 0 && int(h.Count.Load()) >= h.Max { + if h.Max.AtMax() { return errors.New("Did not accept " + h.Name + " task: at max already") } diff --git a/harmony/taskhelp/max.go b/harmony/taskhelp/max.go new file mode 100644 index 000000000..05e89c404 --- /dev/null +++ b/harmony/taskhelp/max.go @@ -0,0 +1,42 @@ +package taskhelp + +import ( + "sync/atomic" +) + +type MaxCounter struct { + // maximum number of tasks of this type that can be run + N int + + // current number of tasks of this type that are running (shared) + current *atomic.Int32 + + // current number of tasks of this type that are running (per task) + currentThis *atomic.Int32 +} + +func (m *MaxCounter) AtMax() bool { + return m.Max() > 0 && m.Active() >= m.Max() +} + +func (m *MaxCounter) Max() int { + return m.N +} + +// note: cur can't be called on counters for which max is 0 +func (m *MaxCounter) Active() int { + return int(m.current.Load()) +} + +func (m *MaxCounter) ActiveThis() int { + return int(m.currentThis.Load()) +} + +func (m *MaxCounter) Add(n int) { + m.current.Add(int32(n)) + m.currentThis.Add(int32(n)) +} + +func Max(n int) *MaxCounter { + return &MaxCounter{N: n, current: new(atomic.Int32), currentThis: new(atomic.Int32)} +} diff --git a/lib/dealdata/dealdata.go b/lib/dealdata/dealdata.go index c3c658a0e..16495ca15 100644 --- a/lib/dealdata/dealdata.go +++ b/lib/dealdata/dealdata.go @@ -20,8 +20,8 @@ import ( "github.com/filecoin-project/curio/lib/filler" "github.com/filecoin-project/curio/lib/storiface" + "github.com/filecoin-project/lotus/chain/proofs" "github.com/filecoin-project/lotus/storage/pipeline/lib/nullreader" - "github.com/filecoin-project/lotus/storage/sealer/ffiwrapper" ) var log = logging.Logger("dealdata") @@ -73,6 +73,51 @@ func DealDataSnap(ctx context.Context, db *harmonydb.DB, sc *ffi.SealCalls, spId return getDealMetadata(ctx, db, sc, spt, pieces, false) } +func UnsealedCidFromPieces(ctx context.Context, db *harmonydb.DB, spId, sectorNumber int64) (cid.Cid, error) { + var sectorParams []struct { + RegSealProof int64 `db:"reg_seal_proof"` + } + err := db.Select(ctx, §orParams, ` + SELECT reg_seal_proof + FROM sectors_meta + WHERE sp_id = $1 AND sector_num = $2`, spId, sectorNumber) + if err != nil { + return cid.Undef, xerrors.Errorf("getting sector params: %w", err) + } + if len(sectorParams) != 1 { + return cid.Undef, xerrors.Errorf("expected 1 sector param, got %d", len(sectorParams)) + } + + var minDealMetadata []struct { + PieceIndex int64 `db:"piece_num"` + PieceCID string `db:"piece_cid"` + PieceSize int64 `db:"piece_size"` + } + err = db.Select(ctx, &minDealMetadata, ` + SELECT piece_num, piece_cid, piece_size + FROM sectors_meta_pieces + WHERE sp_id = $1 AND sector_num = $2 ORDER BY piece_num ASC`, spId, sectorNumber) + if err != nil { + return cid.Undef, xerrors.Errorf("getting pieces: %w", err) + } + + var dms []dealMetadata + for _, md := range minDealMetadata { + dms = append(dms, dealMetadata{ + PieceIndex: md.PieceIndex, + PieceCID: md.PieceCID, + PieceSize: md.PieceSize, + }) + } + + dd, err := getDealMetadata(ctx, db, nil, abi.RegisteredSealProof(sectorParams[0].RegSealProof), dms, true) + if err != nil { + return cid.Undef, xerrors.Errorf("getting deal metadata: %w", err) + } + + return dd.CommD, nil +} + func getDealMetadata(ctx context.Context, db *harmonydb.DB, sc *ffi.SealCalls, spt abi.RegisteredSealProof, pieces []dealMetadata, commDOnly bool) (*DealData, error) { ssize, err := spt.SectorSize() if err != nil { @@ -106,7 +151,7 @@ func getDealMetadata(ctx context.Context, db *harmonydb.DB, sc *ffi.SealCalls, s return nil, xerrors.Errorf("parsing piece cid: %w", err) } - pads, padLength := ffiwrapper.GetRequiredPadding(offset.Padded(), abi.PaddedPieceSize(p.PieceSize)) + pads, padLength := proofs.GetRequiredPadding(offset.Padded(), abi.PaddedPieceSize(p.PieceSize)) offset += padLength.Unpadded() for _, pad := range pads { diff --git a/lib/ffi/cunative/decode_sdr.go b/lib/ffi/cunative/decode_sdr.go new file mode 100644 index 000000000..20746645f --- /dev/null +++ b/lib/ffi/cunative/decode_sdr.go @@ -0,0 +1,230 @@ +package cunative + +/* +#cgo CFLAGS: -I${SRCDIR}/../../../extern/supra_seal/deps/blst/bindings +#cgo LDFLAGS: -L${SRCDIR}/../../../extern/supra_seal/deps/blst -lblst +#include +#include +#include "blst.h" + +// Decode function using blst_fr_sub +void curio_blst_decode(const uint8_t *replica, const uint8_t *key, uint8_t *out, size_t len) { + blst_fr value, k, result; + + for (size_t i = 0; i < len; i += 32) { + // Read 32 bytes (256 bits) from replica and key + blst_fr_from_uint64(&value, (const uint64_t*)(replica + i)); + blst_fr_from_uint64(&k, (const uint64_t*)(key + i)); + + // Perform the decoding operation using blst_fr_sub + blst_fr_sub(&result, &value, &k); + + // Write the result to the output + blst_uint64_from_fr((uint64_t*)(out + i), &result); + } +} +*/ +import "C" +import ( + "io" + "runtime" + "sync" + "unsafe" + + pool "github.com/libp2p/go-buffer-pool" +) + +/* + +Simple Sequential implementation for reference: + +func Decode(replica, key io.Reader, out io.Writer) error { + const bufSz = 1 << 20 + + var rbuf, kbuf [bufSz]byte + var obuf [bufSz]byte + + for { + // Read replica + rn, err := io.ReadFull(replica, rbuf[:]) + if err != nil && err != io.ErrUnexpectedEOF { + if err == io.EOF { + return nil + } + return err + } + + // Read key + kn, err := io.ReadFull(key, kbuf[:rn]) + if err != nil && err != io.ErrUnexpectedEOF { + return err + } + + if kn != rn { + return io.ErrUnexpectedEOF + } + + // Decode the chunk using blst_decode + C.curio_blst_decode( + (*C.uint8_t)(unsafe.Pointer(&rbuf[0])), + (*C.uint8_t)(unsafe.Pointer(&kbuf[0])), + (*C.uint8_t)(unsafe.Pointer(&obuf[0])), + C.size_t(rn), + ) + + // Write the chunk + _, err = out.Write(obuf[:rn]) + if err != nil { + return err + } + + if rn < len(rbuf) { + return nil + } + } +} + +*/ + +const ( + bufSz = 4 << 20 + nWorkers = 24 +) + +func Decode(replica, key io.Reader, out io.Writer) error { + workers := nWorkers + if runtime.NumCPU() < workers { + workers = runtime.NumCPU() + } + + var wg sync.WaitGroup + errChan := make(chan error, 1) + jobChan := make(chan job, workers) + resultChan := make(chan result, workers) + + // Start worker goroutines + for i := 0; i < workers; i++ { + wg.Add(1) + go worker(&wg, jobChan, resultChan) + } + + // Start a goroutine to close the job channel when all reading is done + go func() { + defer close(jobChan) + chunkID := int64(0) + for { + rbuf := pool.Get(bufSz) + kbuf := pool.Get(bufSz) + + // Read replica + rn, err := io.ReadFull(replica, rbuf) + if err != nil && err != io.ErrUnexpectedEOF { + if err == io.EOF { + return + } + errChan <- err + return + } + + // Read key + kn, err := io.ReadFull(key, kbuf[:rn]) + if err != nil && err != io.ErrUnexpectedEOF { + errChan <- err + return + } + + if kn != rn { + errChan <- io.ErrUnexpectedEOF + return + } + + // worker will release rbuff and kbuf, so get len here + rblen := len(rbuf) + + jobChan <- job{rbuf[:rn], kbuf[:rn], rn, chunkID} + chunkID++ + + if rn < rblen { + return + } + } + }() + + // Start a goroutine to close the result channel when all jobs are done + go func() { + wg.Wait() + close(resultChan) + }() + + // Write results in order + var writeErr error + expectedChunkID := int64(0) + resultBuffer := make(map[int64]result) + + for r := range resultChan { + for { + if r.chunkID == expectedChunkID { + _, err := out.Write(r.data) + pool.Put(r.data) + if err != nil && writeErr == nil { + writeErr = err + } + expectedChunkID++ + + // Check if we have buffered results that can now be written + if nextResult, ok := resultBuffer[expectedChunkID]; ok { + r = nextResult + delete(resultBuffer, expectedChunkID) + continue + } + break + } else { + // Buffer this result for later + resultBuffer[r.chunkID] = r + break + } + } + } + + close(errChan) + + // Check for any errors + for err := range errChan { + if err != nil { + return err + } + } + + return writeErr +} + +type job struct { + rbuf []byte + kbuf []byte + size int + chunkID int64 +} + +type result struct { + data []byte + size int + chunkID int64 +} + +func worker(wg *sync.WaitGroup, jobs <-chan job, results chan<- result) { + defer wg.Done() + for j := range jobs { + obuf := pool.Get(j.size) + C.curio_blst_decode( + (*C.uint8_t)(unsafe.Pointer(&j.rbuf[0])), + (*C.uint8_t)(unsafe.Pointer(&j.kbuf[0])), + (*C.uint8_t)(unsafe.Pointer(&obuf[0])), + C.size_t(j.size), + ) + + pool.Put(j.rbuf) + pool.Put(j.kbuf) + + results <- result{obuf, j.size, j.chunkID} + } +} diff --git a/lib/ffi/cunative/decode_snap.go b/lib/ffi/cunative/decode_snap.go new file mode 100644 index 000000000..25b346c3d --- /dev/null +++ b/lib/ffi/cunative/decode_snap.go @@ -0,0 +1,524 @@ +package cunative + +/* +#cgo CFLAGS: -I${SRCDIR}/../../../extern/supra_seal/deps/blst/bindings +#cgo LDFLAGS: -L${SRCDIR}/../../../extern/supra_seal/deps/blst -lblst +#include +#include +#include "blst.h" + +void snap_decode_loop(const uint8_t *replica, const uint8_t *key, const uint8_t *rho_invs, uint8_t *out, size_t node_count, size_t node_size) { + blst_fr replica_fr, key_fr, rho_inv_fr, out_fr; + + for (size_t i = 0; i < node_count; i++) { + // Read replica data + blst_fr_from_uint64(&replica_fr, (const uint64_t*)(replica + i * node_size)); + + // Read key data + blst_fr_from_uint64(&key_fr, (const uint64_t*)(key + i * node_size)); + + // Read rho inverse + blst_fr_from_uint64(&rho_inv_fr, (const uint64_t*)(rho_invs + i * 32)); + + // Perform the decoding operation + blst_fr_sub(&out_fr, &replica_fr, &key_fr); + blst_fr_mul(&out_fr, &out_fr, &rho_inv_fr); + + // Write the result + blst_uint64_from_fr((uint64_t*)(out + i * node_size), &out_fr); + } +} +*/ +import "C" + +import ( + "encoding/hex" + "io" + "math/big" + "math/bits" + "runtime" + "sync" + "unsafe" + + "github.com/consensys/gnark-crypto/ecc/bls12-381/fr" + "github.com/ipfs/go-cid" + pool "github.com/libp2p/go-buffer-pool" + "github.com/snadrus/must" + "github.com/triplewz/poseidon" + "golang.org/x/xerrors" + + commcid "github.com/filecoin-project/go-fil-commcid" + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/curio/lib/proof" +) + +type B32le = [32]byte +type BytesLE = []byte + +func DecodeSnap(spt abi.RegisteredSealProof, commD, commK cid.Cid, key, replica io.Reader, out io.Writer) error { + ssize, err := spt.SectorSize() + if err != nil { + return xerrors.Errorf("failed to get sector size: %w", err) + } + + nodesCount := uint64(ssize / proof.NODE_SIZE) + + commDNew, err := commcid.CIDToDataCommitmentV1(commD) + if err != nil { + return xerrors.Errorf("failed to convert commD to CID: %w", err) + } + + commROld, err := commcid.CIDToReplicaCommitmentV1(commK) + if err != nil { + return xerrors.Errorf("failed to convert commK to replica commitment: %w", err) + } + + // Calculate phi + phi, err := Phi(commDNew, commROld) + if err != nil { + return xerrors.Errorf("failed to calculate phi: %w", err) + } + + // Precompute all rho^-1 values + h := hDefault(nodesCount) + rhoInvs, err := NewInv(phi, h, nodesCount) + if err != nil { + return xerrors.Errorf("failed to compute rho inverses: %w", err) + } + + // Convert rhoInvs to byte slice + rhoInvsBytes := make([]byte, nodesCount*32) + for i := uint64(0); i < nodesCount; i++ { + rhoInv := rhoInvs.Get(i) + copy(rhoInvsBytes[i*32:(i+1)*32], rhoInv[:]) + } + + workers := nWorkers + if runtime.NumCPU() < workers { + workers = runtime.NumCPU() + } + + var wg sync.WaitGroup + errChan := make(chan error, 1) + jobChan := make(chan jobSnap, workers) + resultChan := make(chan resultSnap, workers) + + // Start worker goroutines + for i := 0; i < workers; i++ { + wg.Add(1) + go workerSnap(&wg, jobChan, resultChan, rhoInvs) + } + + // Start a goroutine to close the job channel when all reading is done + go func() { + defer close(jobChan) + chunkID := int64(0) + for { + rbuf := pool.Get(bufSz) + kbuf := pool.Get(bufSz) + + // Read replica + rn, err := io.ReadFull(replica, rbuf) + if err != nil && err != io.ErrUnexpectedEOF { + if err == io.EOF { + return + } + errChan <- err + return + } + + // Read key + kn, err := io.ReadFull(key, kbuf[:rn]) + if err != nil && err != io.ErrUnexpectedEOF { + errChan <- err + return + } + + if kn != rn { + errChan <- io.ErrUnexpectedEOF + return + } + + // worker will release rbuf and kbuf, so get len here + rblen := len(rbuf) + + jobChan <- jobSnap{rbuf[:rn], kbuf[:rn], rn, chunkID} + chunkID++ + + if rn < rblen { + return + } + } + }() + + // Start a goroutine to close the result channel when all jobs are done + go func() { + wg.Wait() + close(resultChan) + }() + + // Write results in order + var writeErr error + expectedChunkID := int64(0) + resultBuffer := make(map[int64]resultSnap) + + for r := range resultChan { + for { + if r.chunkID == expectedChunkID { + _, err := out.Write(r.data) + pool.Put(r.data) + if err != nil && writeErr == nil { + writeErr = err + } + expectedChunkID++ + + // Check if we have buffered results that can now be written + if nextResult, ok := resultBuffer[expectedChunkID]; ok { + r = nextResult + delete(resultBuffer, expectedChunkID) + continue + } + break + } else { + // Buffer this result for later + resultBuffer[r.chunkID] = r + break + } + } + } + + close(errChan) + + // Check for any errors + for err := range errChan { + if err != nil { + return err + } + } + + return writeErr +} + +type jobSnap struct { + rbuf []byte + kbuf []byte + size int + chunkID int64 +} + +type resultSnap struct { + data []byte + size int + chunkID int64 +} + +func workerSnap(wg *sync.WaitGroup, jobs <-chan jobSnap, results chan<- resultSnap, rhos *Rhos) { + defer wg.Done() + for j := range jobs { + obuf := pool.Get(j.size) + + // Calculate the starting node index for this chunk + startNode := uint64(j.chunkID) * uint64(bufSz) / proof.NODE_SIZE + nodeCount := uint64(j.size) / proof.NODE_SIZE + + // Convert rhoInvs to byte slice + rhoInvsBytes := pool.Get(int(nodeCount * 32)) + for i := uint64(0); i < nodeCount; i++ { + rhoInv := rhos.Get(startNode + i) + copy(rhoInvsBytes[i*32:(i+1)*32], rhoInv[:]) + } + + C.snap_decode_loop( + (*C.uint8_t)(unsafe.Pointer(&j.rbuf[0])), + (*C.uint8_t)(unsafe.Pointer(&j.kbuf[0])), + (*C.uint8_t)(unsafe.Pointer(&rhoInvsBytes[0])), + (*C.uint8_t)(unsafe.Pointer(&obuf[0])), + C.size_t(nodeCount), + C.size_t(proof.NODE_SIZE), + ) + + pool.Put(j.rbuf) + pool.Put(j.kbuf) + + results <- resultSnap{obuf, j.size, j.chunkID} + } +} + +// Phi implements the phi function as described in the Rust code. +// It computes phi = H(comm_d_new, comm_r_old) using Poseidon hash with a custom domain separation tag. +func Phi(commDNew, commROld BytesLE) (B32le, error) { + inputA := bigIntLE(commDNew) + inputB := bigIntLE(commROld) + input := []*big.Int{inputA, inputB} + + cons, err := poseidon.GenPoseidonConstants[*CustomDomainSepTagElement](3) + if err != nil { + return [32]byte{}, err + } + + // Compute the hash + h, err := poseidon.Hash(input, cons, poseidon.OptimizedStatic) + if err != nil { + return [32]byte{}, xerrors.Errorf("failed to compute Poseidon hash: %w", err) + } + + hElement := ffElementBytesLE(new(fr.Element).SetBigInt(h)) + + return hElement, nil +} + +func rho(phi B32le, high uint32) (*fr.Element, error) { + inputA := bigIntLE(phi[:]) + inputB := new(big.Int).SetUint64(uint64(high)) + input := []*big.Int{inputA, inputB} + + cons, err := poseidon.GenPoseidonConstants[*CustomDomainSepTagElement](3) + if err != nil { + return nil, err + } + + // Compute the hash + h, err := poseidon.Hash(input, cons, poseidon.OptimizedStatic) + if err != nil { + return nil, err + } + + return new(fr.Element).SetBigInt(h), nil +} + +// Rhos represents a collection of precomputed rho values +type Rhos struct { + rhos map[uint64]B32le + bitsShr uint64 +} + +// NewInv generates the inverted rhos for a certain number of nodes +func NewInv(phi [32]byte, h uint64, nodesCount uint64) (*Rhos, error) { + return NewInvRange(phi, h, nodesCount, 0, nodesCount) +} + +// NewInvRange generates the inverted rhos for a certain number of nodes and range +func NewInvRange(phi [32]byte, h uint64, nodesCount, offset, num uint64) (*Rhos, error) { + bitsShr := calcBitsShr(h, nodesCount) + highRange := calcHighRange(offset, num, bitsShr) + + rhos := make(map[uint64]B32le) + for high := highRange.Start; high <= highRange.End; high++ { + rhoVal, err := rho(phi, uint32(high)) + if err != nil { + return nil, err + } + + invRho := new(fr.Element).Inverse(rhoVal) // same as blst_fr_eucl_inverse?? + rhos[high] = ffElementBytesLE(invRho) + } + + return &Rhos{ + rhos: rhos, + bitsShr: bitsShr, + }, nil +} + +// Get retrieves the rho for a specific node offset +func (r *Rhos) Get(offset uint64) B32le { + high := offset >> r.bitsShr + return r.rhos[high] +} + +func calcBitsShr(h uint64, nodesCount uint64) uint64 { + nodeIndexBitLen := uint64(bits.TrailingZeros64(nodesCount)) + return nodeIndexBitLen - h +} + +type Range struct { + Start, End uint64 +} + +func calcHighRange(offset, num uint64, bitsShr uint64) Range { + firstHigh := offset >> bitsShr + lastHigh := (offset + num - 1) >> bitsShr + return Range{Start: firstHigh, End: lastHigh} +} + +// the `h` values allowed for the given sector-size. Each `h` value is a possible number +// of high bits taken from each challenge `c`. A single value of `h = hs[i]` is taken from `hs` +// for each proof; the circuit takes `h_select = 2^i` as a public input. +// +// Those values are hard-coded for the circuit and cannot be changed without another trusted +// setup. +// +// Returns the `h` for the given sector-size. The `h` value is the number of high bits taken from +// each challenge `c`. For production use, it was determined to use the value at index 3, which +// translates to a value of 10 for production sector sizes. +func hDefault(nodesCount uint64) uint64 { + const nodes32KiB = 32 * 1024 / proof.NODE_SIZE + if nodesCount <= nodes32KiB { + return 1 + } + return 10 +} + +func ffElementBytesLE(z *fr.Element) (res B32le) { + fr.LittleEndian.PutElement(&res, *z) + return +} + +func bigIntLE(in BytesLE) *big.Int { + // copy to b + b := make([]byte, len(in)) + copy(b, in) + + // invert + for i, j := 0, len(b)-1; i < j; i, j = i+1, j-1 { + b[i], b[j] = b[j], b[i] + } + + // SetBytes is BE, so we needed to invert + return new(big.Int).SetBytes(b) +} + +// CustomDomainSepTagElement is a custom element which overrides SetString used by the poseidon hash function to set +// the default hardcoded DST. We hijack the SetString function to set the DST to the hardcoded value needed for Snap. +type CustomDomainSepTagElement struct { + *fr.Element +} + +func (c *CustomDomainSepTagElement) SetUint64(u uint64) *CustomDomainSepTagElement { + if c.Element == nil { + c.Element = new(fr.Element) + } + + c.Element = c.Element.SetUint64(u) + return c +} + +func (c *CustomDomainSepTagElement) SetBigInt(b *big.Int) *CustomDomainSepTagElement { + if c.Element == nil { + c.Element = new(fr.Element) + } + + c.Element = c.Element.SetBigInt(b) + return c +} + +func (c *CustomDomainSepTagElement) SetBytes(bytes []byte) *CustomDomainSepTagElement { + if c.Element == nil { + c.Element = new(fr.Element) + } + + c.Element = c.Element.SetBytes(bytes) + return c +} + +func (c *CustomDomainSepTagElement) BigInt(b *big.Int) *big.Int { + if c.Element == nil { + c.Element = new(fr.Element) + } + + return c.Element.BigInt(b) +} + +func (c *CustomDomainSepTagElement) SetOne() *CustomDomainSepTagElement { + if c.Element == nil { + c.Element = new(fr.Element) + } + + c.Element = c.Element.SetOne() + return c +} + +func (c *CustomDomainSepTagElement) SetZero() *CustomDomainSepTagElement { + if c.Element == nil { + c.Element = new(fr.Element) + } + + c.Element = c.Element.SetZero() + return c +} + +func (c *CustomDomainSepTagElement) Inverse(e *CustomDomainSepTagElement) *CustomDomainSepTagElement { + if c.Element == nil { + c.Element = new(fr.Element) + } + + c.Element = c.Element.Inverse(e.Element) + return c +} + +func (c *CustomDomainSepTagElement) Set(e *CustomDomainSepTagElement) *CustomDomainSepTagElement { + if c.Element == nil { + c.Element = new(fr.Element) + } + + c.Element = c.Element.Set(e.Element) + return c +} + +func (c *CustomDomainSepTagElement) Square(e *CustomDomainSepTagElement) *CustomDomainSepTagElement { + if c.Element == nil { + c.Element = new(fr.Element) + } + + c.Element = c.Element.Square(e.Element) + return c +} + +func (c *CustomDomainSepTagElement) Mul(e2 *CustomDomainSepTagElement, e *CustomDomainSepTagElement) *CustomDomainSepTagElement { + if c.Element == nil { + c.Element = new(fr.Element) + } + + c.Element = c.Element.Mul(e2.Element, e.Element) + return c +} + +func (c *CustomDomainSepTagElement) Add(e2 *CustomDomainSepTagElement, e *CustomDomainSepTagElement) *CustomDomainSepTagElement { + if c.Element == nil { + c.Element = new(fr.Element) + } + + c.Element = c.Element.Add(e2.Element, e.Element) + return c +} + +func (c *CustomDomainSepTagElement) Sub(e2 *CustomDomainSepTagElement, e *CustomDomainSepTagElement) *CustomDomainSepTagElement { + if c.Element == nil { + c.Element = new(fr.Element) + } + + c.Element = c.Element.Sub(e2.Element, e.Element) + return c +} + +func (c *CustomDomainSepTagElement) Cmp(x *CustomDomainSepTagElement) int { + if c.Element == nil { + c.Element = new(fr.Element) + } + + return c.Element.Cmp(x.Element) +} + +func (c *CustomDomainSepTagElement) SetString(s string) (*CustomDomainSepTagElement, error) { + if s == "3" { + genRandomnessDST := "0000000000010000000000000000000000000000000000000000000000000000" + dstLE := must.One(hex.DecodeString(genRandomnessDST)) + inverted := make([]byte, len(dstLE)) + for i := 0; i < len(dstLE); i++ { + inverted[i] = dstLE[len(dstLE)-1-i] + } + + c.SetBytes(inverted) + return c, nil + } + + el, err := c.Element.SetString(s) + if err != nil { + return nil, err + } + + c.Element = el + return c, nil +} + +var _ poseidon.Element[*CustomDomainSepTagElement] = &CustomDomainSepTagElement{} diff --git a/lib/ffi/cunative/decode_snap_test.go b/lib/ffi/cunative/decode_snap_test.go new file mode 100644 index 000000000..5cbac4ae6 --- /dev/null +++ b/lib/ffi/cunative/decode_snap_test.go @@ -0,0 +1,182 @@ +package cunative + +import ( + "bytes" + "crypto/rand" + "encoding/hex" + "io" + "os" + "path/filepath" + "testing" + "time" + + "github.com/detailyang/go-fallocate" + "github.com/stretchr/testify/require" + + ffi "github.com/filecoin-project/filecoin-ffi" + commp2 "github.com/filecoin-project/go-commp-utils/v2" + "github.com/filecoin-project/go-commp-utils/zerocomm" + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/curio/lib/proof" + + "github.com/filecoin-project/lotus/lib/nullreader" + "github.com/filecoin-project/lotus/storage/sealer/fr32" +) + +func TestSnapDecode(t *testing.T) { + t.Run("2K", testSnapDecode(abi.RegisteredSealProof_StackedDrg2KiBV1_1)) + t.Run("8M", testSnapDecode(abi.RegisteredSealProof_StackedDrg8MiBV1_1)) +} + +func testSnapDecode(spt abi.RegisteredSealProof) func(t *testing.T) { + return func(t *testing.T) { + td := t.TempDir() + cache := filepath.Join(td, "cache") + unseal := filepath.Join(td, "unsealed") + sealKey := filepath.Join(td, "sealed") + + require.NoError(t, os.MkdirAll(cache, 0755)) + + ssize, err := spt.SectorSize() + require.NoError(t, err) + + // write null "unsealed" + { + uf, err := os.Create(unseal) + require.NoError(t, err) + _, err = io.CopyN(uf, &nullreader.Reader{}, int64(ssize)) + require.NoError(t, err) + require.NoError(t, uf.Close()) + } + + { + // proofs are really dumb + f, err := os.Create(sealKey) + require.NoError(t, err) + err = f.Close() + require.NoError(t, err) + } + + snum := abi.SectorNumber(123) + miner := abi.ActorID(545) + ticket := abi.SealRandomness{1, 2, 3, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + pieces := []abi.PieceInfo{{ + Size: abi.PaddedPieceSize(ssize), + PieceCID: zerocomm.ZeroPieceCommitment(abi.PaddedPieceSize(ssize).Unpadded()), + }} + + p1o, err := ffi.SealPreCommitPhase1(spt, cache, unseal, sealKey, snum, miner, ticket, pieces) + require.NoError(t, err) + commK, _, err := ffi.SealPreCommitPhase2(p1o, cache, sealKey) + require.NoError(t, err) + + // snap encode + update := filepath.Join(td, "update") + updateCache := filepath.Join(td, "update-cache") + + // data to encode + unsBuf := make([]byte, abi.PaddedPieceSize(ssize).Unpadded()) + _, _ = rand.Read(unsBuf) + + padded := make([]byte, abi.PaddedPieceSize(ssize)) + fr32.Pad(unsBuf, padded) + + // write to the update file as fr32 padded + { + f, err := os.Create(unseal) + require.NoError(t, err) + + _, err = io.Copy(f, bytes.NewReader(padded)) + require.NoError(t, err) + + err = f.Close() + require.NoError(t, err) + } + + unsealedCid, err := commp2.GeneratePieceCIDFromFile(abi.RegisteredSealProof_StackedDrg2KiBV1_1, bytes.NewReader(unsBuf), abi.PaddedPieceSize(ssize).Unpadded()) + require.NoError(t, err) + + pieces = []abi.PieceInfo{{ + Size: abi.PaddedPieceSize(ssize), + PieceCID: unsealedCid, + }} + + upt, err := spt.RegisteredUpdateProof() + require.NoError(t, err) + + { + require.NoError(t, os.MkdirAll(updateCache, 0755)) + f, err := os.Create(update) + require.NoError(t, err) + require.NoError(t, fallocate.Fallocate(f, 0, int64(ssize))) + err = f.Close() + require.NoError(t, err) + } + + _, commD, err := ffi.SectorUpdate.EncodeInto(upt, update, updateCache, sealKey, cache, unseal, pieces) + require.NoError(t, err) + + // snap decode + keyReader, err := os.Open(sealKey) + require.NoError(t, err) + updateReader, err := os.Open(update) + require.NoError(t, err) + + var out bytes.Buffer + err = DecodeSnap(spt, commD, commK, keyReader, updateReader, &out) + require.NoError(t, err) + + // extract with rust + decOut := filepath.Join(td, "goldenOut") + + f, err := os.Create(decOut) + require.NoError(t, err) + require.NoError(t, fallocate.Fallocate(f, 0, int64(ssize))) + require.NoError(t, f.Close()) + + decStart := time.Now() + err = ffi.SectorUpdate.DecodeFrom(upt, decOut, update, sealKey, cache, commD) + require.NoError(t, err) + + decDone := time.Now() + t.Logf("Decode time: %s", decDone.Sub(decStart)) + t.Logf("Decode throughput: %f MB/s", float64(ssize)/decDone.Sub(decStart).Seconds()/1024/1024) + + // read rust data + rustOut, err := os.Open(decOut) + require.NoError(t, err) + + var outRust bytes.Buffer + _, err = io.Copy(&outRust, rustOut) + require.NoError(t, err) + require.NoError(t, rustOut.Close()) + + // compare rust out with padded + require.Equal(t, outRust.Bytes(), padded) + + // check data + require.Equal(t, int(ssize), out.Len()) + + for i := 0; i < out.Len(); i += proof.NODE_SIZE { + require.Equal(t, padded[i:i+proof.NODE_SIZE], out.Bytes()[i:i+proof.NODE_SIZE]) + } + } +} + +func TestPhi(t *testing.T) { + d := "b0c133e15929f16f9491f9c82a128786d006a3b7286642cc78644c974f55c42f" + r := "58b65c3e1a1d52c078cb69b4ac995e515c54be9cbecd8ed28ee8009722d3c969" + + goodPhi := "436c953f3ae69bf47385748daf306871e6839a91d5229a55dda0e02653ce2f27" + + dBytes, err := hex.DecodeString(d) + require.NoError(t, err) + rBytes, err := hex.DecodeString(r) + require.NoError(t, err) + + phi, err := Phi(dBytes, rBytes) + require.NoError(t, err) + + require.Equal(t, goodPhi, hex.EncodeToString(phi[:])) +} diff --git a/lib/ffi/cunative/decode_test.go b/lib/ffi/cunative/decode_test.go new file mode 100644 index 000000000..afeee878f --- /dev/null +++ b/lib/ffi/cunative/decode_test.go @@ -0,0 +1,268 @@ +package cunative + +import ( + "bytes" + "encoding/base64" + "encoding/hex" + "io" + "strings" + "testing" + + "github.com/stretchr/testify/require" +) + +const ( + UnsealedB64 = `uCZJ+CN+03dGb3H5I0IiufducP3GIlK7bz4RyZPOhQ3AzsnnL8C+pA09fQoH6UB8soyH3LJ3kn10 +2XGD05PfDiDhQ2NqIuHkkVyQe/TEpP4wJJWCM2TzWiDRwZipTEQZMVIJoXdbosLtW3itCf0h5X0W +5j/6w6bP5bS90MGlZwIpZ5polGh8dUHmpy3QSoXHwUQR1hca4Bvgh/7oQq2jD7xeF/ag22fLUbnI +EGBk92qHLM95dNhtjeHMrsfvVDgITMGzaZnZ/0VTg0G81zFZUNeAu88O6YTVQqvmjKlvDBlVXLIN +VaxXWccgfJ1fKl3rQpq3uRubhCkZMMuF7DU6EU9jDg4yAWC1eHjU9uEHmeX8fz4FZoN8dRzZKAxJ +iTISAZKCSE3poBO4HD10fW1lacZdxQnfJs9flKNsJxjQuydjzwEQ+5Z7MAqVQoNLDYQy0uyRja+0 +kHKnzE1XfVGVOcp8M4EfQbeMcrASYizLbhxkVlIGlgfYKn7N8O9uZ/Am+Oorz6tDDD+si/+9BHNI +NtnKyspoCPWBMmRMOioSUwEMWOIPeIVdGyNAFrtVx/fwAwlN5wszsJF0v4ZEB03pLLx0aEbK1Xjk +3MOFzNboBbDbOF/6tZ/LGfF7wJh80UAg9Tfb+j+pc+WyETC0E3c1zxu0uhgjqXLpZX4tdo8MpTxq +jZqCCLeT6yfwiBYGdAvgbv7qL8VbjqRaW5W4h1aSG2GeQQ3CT9TICuNcDFSAof815xWzI6zG4UJE +8w12D6gTxPg+6WXQaL8W9Ipkus6xoFvGfEi7KEBG1y6m2peY5RZ+piEx7W6qwgRnVB3BdTYwLFK3 +hDoZYUJehmf+GDzvMpI57Lvu7EVLdpThGxWguF1r6ptPd4e67XIuVU6iot01AXfdl2l2dM+zjajd +KkxUALftJXRhJDroqRw6/tfqBj2bExbPOF4VQ819qknGI1VEq1dgViCm8zbEo9xJBMTpDDm2SUbT +bD81gLnevOVDAzq5M0TxgkPzKEWGKpS4240zHfNNg10uQtSNC9mhGZNpBNQUNM7hVWC36KgwYDYe +qj80vOiagOVhzE/SQZfrZ2if4N3/9IN9AfIiXKDhW9MNBRGow6fpQnJsDf3sy/g23nGccXTSNTWa +TTaILlyYHBwf2W4pLGd/g6nMsZg2iB+Oz1cPgGvSYvsaCtUFaYkdGTUvYH4EtpI4KX3Lws5qpVPi ++eC6H1CI0enh7BdT8KCFNrGSM4rK/Lyn56i1GDnW8BM3Be1IBzJfN/00WEGi3wgn1811wDxvciqq ++lbjzefvc2uuKf9fICdx3b+A7bMRLB/Gdx/mpoKA8QUmSb8j+JKo9vu0x1uwhZIQ9NmcVjceHloT +/oBp1Xa2EeqjF21OLzHcG76RT0TaW+nyQlNUmWAEwq19qgM28eMikyi4lzGy1UaEf8K2IyIQevmq +QFePcRdv95f0VEuDrm1kBp90o0yu+c3MPWUOiELQcWpXbCCTO7PV1kGBUYDwpnJBFuFMXfVi0U7m +i/pmnrMHOy38IkginbslTZQWvjdPT3XeXQVaHTMa1WhIGMm0Vb/y5ltjhCxWE7mGePdT/ATZQk+a +GTi7A/Dy7XHoJ5adA87WdrxcIkg0xzu5HuM/JxVaCksFdiMFuOeRTeK3M3WwxcX3HdkXIU6HmRIy +eGGVe9Ajoha1iMqLI23BCU8LjzHTXM0rCBAy5onIhn3Ue68NC+kOE+OgTt2hDL42mgm+ygNusUpC +Bdxo6rCHKsC93xT6OYGB5OfYsRDjp1rqnfhhMk56C9E72/X+Vgp1RJ7PI4RC4UYMdUmTdViuOgnc ++MgfaWGUdQj3sz3H3i58W9C2i0XdurJyjm1p71M2YKR9Zzrg+L3gNaZF/HrkXdOkNQXXi/AT1iup +Gs9o5rb3S83MqVYAbn4FwvfP2jr1tFFDdS0nbDozlTZIwTwM4hK6Xu5kdchULz/Fs6+MHNVjw0zh +scba5vE00nx8z9PGPm6zLbmPow/WEwcxP1EzRY3MFM0JewMh5zKgvyqLcCzprKbTPfZq+j40oBd3 +uQ3Yg/soYqQtgFcU6NUdB25BaB3SR6R4g8aDOCI+BlGva2CDv7arApFqyh7/TbkORn+8iW6ndB5g +lh6yLHnLR6SSQIDuoqWItFeK3U/gkea5Ld0vy4JeIfc9qGIp8wGl5ItM1SRoEZFAvDNsltj0odmo +v7WaU/S+sSvCzATAptsl+48l9xU5KD3Wra/9E4+k3p884WqR0P61qalDLyCrlJBfp7obf7hml/PD +0pLxA/Tep/tyUPaYNbScidg4gkVLmMbkSmebBy8nUXA6tKoVqa71VPk7/L5JpC3ZmRMotgnYvBDp +EJFEb8eIHu94r2NXbxM/zpl6ElmnYQt8J8KbZu232hrXxIy9Ff86Ah9CXx4OX5+fCeT06h6sJRIg +mDZ4pnHz3yYAVFsPydGUY27y4PdgOTlbMp8hnchEFzydjZbOb0ATorgCKK7CH9Sx9aDbEdiBhUeT +0l9lDXpVMANS6rJakVSGT7WDmXhUbWrwaOmC94oixEk2C0mVfQ0MBhyZY8uN9L5xFC6/92HARzpX +Ww8RwQujXElTtjyiyT4zqAPOhO8lpCOpkxrihIJPzh6WlUqFsQ0+3mmAyErqG0zvtIsaFIZcwfUA +r7pV9tsjvV35LwOXXo4IROc/49EsuofW7atP6KW/1ACCiUSr5XCZ8Y0Hea0Q6tymK5U3tg0EEAID +AAAA` + + SealedB64 = `eCLQmsbCOE0l8dXnvcDXnapJx+UTMuACojnw0pGZL0IC1n3nK8oIQD79fjP7n8wtr1qQTWw4Ruc0 +FNATszFVNzwTXzRlGjw2aRzofPanVgV4lfDrSDm19uwy3y5Vb4Ab/u33KfxI3yz566sWQ3Q8lrlu +BBnK0pKrjiTqDVp/qDE9mh/olxh4J2HlxYt8jEguuf30YR98N3MtEuJOawSzNlpDLyRGyMJGupzy +Wr5BqrhE6FsA06gXAgF0UtI/nHs4JRGpSRHPxMSCeLtBzBpOuhJbwu0Y6ATgJA6vvD7blitroi/z +W0+lBMho+qa7frSHU1Q7EUvwAdQeGgxxUR2zQLs/BCi+n4b2d66kHT0dYkKHA3YQvdihJnDzKKPs +4qwcoSHhG44vnA04O7lm9POkjF/e6FLDArbtF+QUd9f9gzVI3skNH5bbZWgyzWCgfz+4xNHPCBEs +UDgs+Q+wUCXAVLGfZeF/sCz5uGwM+mGNeLeQx2so5izuHGeqzFAOSVJE4lh6CpL/dPrcUg7Xu4UE +knls4TBcACIDR0Cs0RIAWAI0WDKHpoUp6zCYHMIPA62BGsh5FwOk2HgUnUS6MEz6PiU8bxZSdrVK +/gHH/g44rbAJI/bDLO+09Xq4Ptd2gl8ntVGY9g9ek6/3UUd8hiiolpM27w8HmlGd/WrKKtP4EWze +66V5u9/U0FLy98AoVGpKhxd/Yib86s7YEe7DR+3cRkzWcPIo8rAR5YEfJ2tvOOxkcNY0kehb6oNj +1+KF8h8poN2jaXGx3q5OwYz0xak4I/HT5N4C1bxlepMj0E53bB9/PgypurTXI+ep4HzkP0IddZ3b +hEJ8NCX0EGuqQ3G7XYHMJ2kBOgSh1uD8+tzeTOa02FxjTQ10c3WWy4ZD2L5Z4oE2agiTe5j3qO6e +Pq6mAKcwUg18Uj3e2de/qdFgW2SYDt3mW2UTA46gGcJ2SHcCrbn7pCfkIF6L0A8aIRNDO2IT/xfd +1ArF9BKEI8mFvAiPW10F6JKHmO3QY24ZOUNlGd59ALaGiQ6k0vfLzb53G3dQ4f6YCRR6kMmZqDld +emXlH2QAFpRZKyCHXzht24ztc7b8ivKTScDK7efwNHQSPfRROXHfwP0EYkETRT5Sfr1AjFJ17br2 +rwFCNxq86iY2rTHOdKnsctBfenOdgIbtZpR1Z6ynvgb2PJ/Fx+IN/zteT3GAlD4EjPzr7QZvK5E2 +rbW8VffWnCnZUwV3RhTUQTGJlek0QINdH0MDe1Iabv4wXDfJdwcwIkafzMOkv2owcCK0Ejf8gpBS +uNrKS5DsZ4zBF1qplECI4DuPDw95iSpSnwY0cAexHZldSaCYTCn1MOozoU0KmxFFHbavci00W85Y +tUwaVrtj3nIv4drcghS2yYNoO9ZK6wHUPWwm+/tDd1+lwM3gBsvBg1UP/fn/vgjzdlndWoIKm5Wa +WOiMgzQKt2P475folJjAnaYJTBjpFOiaoSkmR5XTScjrLCIbAetro3QBTZwdLzy6siLfw6xNrztJ +5iqWn+L5HFm1UUIpo7yFgFk80/jnMnAL6m9j9qE+0xWJgFogO6dX0PpKgi4oNogijplymtXd2clc +/FToG4fwQ40bae9i0YYp9nbzSlIWQx1lFQMpUsB4IJhiwvNNMBDBrv9x8gCLhj90YZofRUqg6iux +aTQp69F1/w2lIawigIJ7He3amsAbRqcuxxPaCwtuvAhONK9v4FNsONtw71Z4H4ibqlT7urKdThPb +MnV/e1W05Ht5NPdH+XluVldGcxd0pRzR5LFFgqJPcK9gxjdZ9Hz8D6DDfqEEu8P5IkLoufLsNkhX +x7CLjqpASRHFLGuOyMO6H7V9kcv1AUSOpRNf84L/206VMRdTdR/cS1ePthw2mnvuy1UYz8xAh4XX +/k9WDNdqSWib5E6p20A1fR6wB/ACkumwwJSbC7TBRJGAO64WFevJsOwK9PESAWu5lrS69HhgYF18 +pX+QMeErhtNuS7f+BuPX3sa4lGVDH96gxopTYY1HQ3XO74aQLDSoU7ZU1KSxtz4I5w/eRVttiMhU +Bvf6HzpY81J09DgO2WHgRyoBjHshM23njVr+B1Bi8zUe2un4DNzqLha4bwtux4hNGcggE/DU+R8b +K+ErSsdRrzeUj5HbnaBKzAGtQqrufW7Ej3mMUzd4lgjpXDNFrlCSQY5/f1i59i0wb57LYmHYDZ+S +nwwyTz2pAHWxxyw4sdqvWqH4nAjVu13lGvy3iUKxiz9izk2FnVSaRZgTPEujCQeHTRlel5EQjQiS +d2otxs659WsWgqgDdZrEeLhM9yQ8tiERCXIfb/QujYsm6kfU+y1KkXHA+u/xqyX0ehh8EmA7bneu +U1xbjcmj9c2QF/Hv4jbiUFCIgLiwMDS7ZfoDFFVNKuUUiD477vH0g9V0TB6y9gtKuPsS/BvSjvpK +8JIIb3WpTcwssn92xkGIBCHVpakWq6QwkPgnSorVr1AMdm+FMdf5Pv+tLkvyA5qmDh4sq0T3hQFQ +zye+k8SgRvb6FGU22Tbjrao4ZwclGTArYWGL8uQE72rMvrluJRss6VIjwjTt9ofluy9dKFbyShjl +EnI7Jsb+a0U0S016BAQUq9GydXSiYZk4EBLvRFdj0KLcvbpiuxJEAgyLflJFUpdwgLC+LlOMWGtv +ldckCmNno7TrZWeKcG4qR2echMFZnWj1ugx0SP0lS0+/h6fj3bl8iXnjUQUFfuBeeKGEP0w=` + + KeyB64 = `wPuGoqJEZdXegWTumX615LLaVuhMD45HMvveCf7KqTRCB7T/+wlKmzDAASn0toux/M0IcbnAs2nA +Ol6Q3511KBwyG9H691pR179XAQLjsQZHcVtpFdXBm8xhHZarIjwCzZvuiITtPGoLkDNpOXcasTtY +HtnPDuzbqG8sPZjZQC8UM4V/A7D7sR//HV6sQcNm97jjiwdiV1dNiuNlKFcPJ57kFy6l7Fp7aOMp +Sl7dsk29u4yGXtCpdB+nowpQR0Mw2U/133f1xH4v9XmF9Oj0aTvaBh4K/38K4mLIL5VrihIWRn3l +BqNNqwBIfglcVFecELqDVy9VfaoF6kDrZOd4L2zc9RmMniZB/zXQJlsVyVyKgzcLV1UlsVMaAJej +WXoKoI9e00BG+/l/HnzydoY/I5mAI0nk2+aNg0CoT78tyA3lDsj9I/9fNV6dit1UcruF8uQ9e2F3 +v8WELMJY09MqG+ciMmBgb3VsRrz5lzXCCZsscRkiUCUW8ujc22Cf4WEd6m1OO+a7aLswxw4ZtxK8 +W6ChFmbz9yyBFNxfl+jtBAEoAFB3LgDMzw1YBge6O7WQFr8sMPdwKOef3b11Kf8QEmnHBtCHoDxm +IT5BMjhPpwAu6pbJdk/p24k8fj76sB4HwBm9+8+0H8pEQBfIcrFyx3eCNPfj8N6zl+yctEPsbC90 +Xgv3sihB5SoCb6oi4F5qGBmUMmGgXCp+tlgLwJZKK+s3L+VmotxI2p7CGhfvluwuicCBbTyVCEEf +5NQP43cV3ORkgAvhde83zQGQC9uGgpUNaJZHrHwfo2R99bbehggBmOp3zUUtYeJCjF8jygvtSEsk +AAhj0+KVigOsKjXMKu+SO60STb5VYEwb38c+lIhJ7sAT1oW5hQJodjihNeEj4QpZ0p4cB8lDG0bB +E2JSAPBCLJkaLgP2L7uFq/l1VCf9+sYXIwf+v8Aib3iwJCK+AWKbTgc+LSfHLDPQHE9ZLildtdEJ +aMuPdFmlZuNBuc7VJxkUZU+Ub6hKOdpgXbUx/OovfVhYRzoWxx4qtCsOF6M7rTC3s7PCpyBpSAM/ +0CWxY3tlla73XtC0HaGBcyROk9j8lW4WSM6nkUcP2aAEOOOpdcn1fYuYVEQmeUUboEukGt6it4Vc +Ysu5CL4jzgoX1MKkSEJt7yaTyNpm+GZflzxm50DVWwvbMsq/Xlnw5QYv7/J73qvLYn8gKzgEhj1U +s9QBNqdOyz/3Zu0jVnNOC4D2YV9qQ8a1N5pNYhlEfer5VkqAcNXQ6khqdIIC4GEJmVQ+UvqMEGao +vYPnfaj88yAT7lpJdBkXA3wOIltnXQuMJ+dNyYQwLJM3AOF0VJZMOu5+2fFZFX80KdwSHPYVPXRF +t8uwgEStzIiLyW2OU+PZrcXW65Fwjxjh+hjSYZs/tbEnFsqqFeee8CxXZchN6cFu95YmN2D6IJzv +F5H9ER2cv8sDmkxl5im4lQeYTImOIPJvbczv+IVLVfu9E6l1OTiWzDKA+xstiMl4nEGSZrfq3exi +WjAvAS/y4Su5LvoGBgFgM8UlFcGY4/osjGoJ2W4k/qxAaJFr5edk6Z7n/QHSIs+bFaIentAEl3rC +4hwtGJf9VRszQVnFzbhSf7qWKArie+Gr9h/pKqseFk1dTNBIeCgvYR26vovawHl8Q8EHJPwYURl/ +8dKTbwFSXffvmOGWXBW6E57PC49I6dkCvwOoJYGlNYt5uP9h1WpdJfjPoHnWEspkEEs98K4vnciY +LZkWkaQsuru7VOJNv/jscW9twQaR/cHmRrnjT1TVZN4k60FanXKHywH0Wh3C2XztrfhURJo+/D57 +zudrJUms0wjOeC3H6ZQ+xOTGBYYYR5EbF6b1Ay/Je6oXytxyfGH7FbFJuqFRPKhJllBBQ9wssVku +5IDtJSBz/ZrOOviobcIvuybgLLUN3ZdtS2d0n3mOr1o4enEKM9gPUv6lfim+0Sv04gQu2KP8nBCb +87i1Su/2s1bye+M3yHQksQ0p8VVtC9dvhzkgHAB7LqjEdINvRQEIlIvJY3jICpg0qRlzSxw56LDd +TOkinD4vka5GdOH58IvCQLy/I15P68huCpR6zy0k7eRubol1TSU/LIVNpexuec8+00hkiYEthQG7 +lMJ5HU6GZ5MBTxHt+vrBF6oiZVoO7IcKYpxciLQZdRGrtNAbu07tXAIzqjNR5ZzvsmpfzIjja8Xp +31aX+0jqTknv+id4Cv+JXxHTpfKbkyAPbUy6dbMMrZ8l7eLzzFXkm+7PDCv4dHYnpl5CGNmp9RTO +pNc7wtraTXCjMbJqP+Yn798Tdd/wHVssvgqEZ8UHPBvsNZ2+Un9UPHiE/jCoB/ga4QRUXFZjsWbF +QssWHgIb194XaI2YcyOjgrYNbl8Jzyg/PjhorWeVT8o9w7F92PK5gbYy7f+jl2yqrhceEf0laegq +WFyQyAO2baUsXiRn/W/zoLLixLG1cWvVXVkGrcGQmBRv6Ni2wZbmnEarBp0v5MX0GH1QmWx1ALq8 +/MdYhkpLFvOoKrLbR+JcXvW0zY7Qq8U6+HcI+1niKiGWs3DZpw0g5DaKXmhfAslyAwCeM5jvVuNl +WWwyPfSOV3l+vmN/KDnhAs7k8IR8vXWPfPcMwNQTAoRGKHDdCQUGJKIKtgdbNkuByySkGs0vl3Vu +5hzPE4dD5lbyNWTzEeAhA4Bcoe8s4+AezWAkYFdmdk49/mI4+Ejjl+vb2Ff0kwO4TAxNiT4=` +) + +func TestDecode(t *testing.T) { + unsealed, err := base64.StdEncoding.DecodeString(strings.ReplaceAll(UnsealedB64, "\n", "")) + require.NoError(t, err) + sealed, err := base64.StdEncoding.DecodeString(strings.ReplaceAll(SealedB64, "\n", "")) + require.NoError(t, err) + key, err := base64.StdEncoding.DecodeString(strings.ReplaceAll(KeyB64, "\n", "")) + require.NoError(t, err) + + // unsealed has a trailer, so trim to 2k + unsealed = unsealed[:2<<10] + + require.Len(t, unsealed, 2<<10) + require.Len(t, sealed, 2<<10) + require.Len(t, key, 2<<10) + + // extend to 8M to test parallel decoding + repeats := (8 << 20) / len(unsealed) + unsealed = bytes.Repeat(unsealed, repeats) + sealed = bytes.Repeat(sealed, repeats) + key = bytes.Repeat(key, repeats) + + // Create readers for sealed and key data + sealedReader := bytes.NewReader(sealed) + keyReader := bytes.NewReader(key) + + // Create a buffer to store the decoded output + var decodedBuf bytes.Buffer + + // Call the Decode function + err = Decode(sealedReader, keyReader, &decodedBuf) + require.NoError(t, err) + + // Compare the decoded output with the expected unsealed data + decodedData, err := io.ReadAll(&decodedBuf) + require.NoError(t, err) + + // Debug: Print the first few bytes of each slice + t.Logf("First 32 bytes of unsealed: %s", hex.EncodeToString(unsealed[:32])) + t.Logf("First 32 bytes of sealed: %s", hex.EncodeToString(sealed[:32])) + t.Logf("First 32 bytes of key: %s", hex.EncodeToString(key[:32])) + t.Logf("First 32 bytes of decoded: %s", hex.EncodeToString(decodedData[:32])) + + // Find the first differing byte + for i := 0; i < len(unsealed) && i < len(decodedData); i++ { + if unsealed[i] != decodedData[i] { + t.Logf("First difference at index %d: expected %02x, got %02x", i, unsealed[i], decodedData[i]) + break + } + } + + // Compare the full slices + if !bytes.Equal(unsealed, decodedData) { + t.Errorf("Decoded data does not match expected unsealed data") + // Print more detailed diff information + for i := 0; i < len(unsealed) && i < len(decodedData); i += 32 { + end := i + 32 + if end > len(unsealed) { + end = len(unsealed) + } + t.Logf("unsealed[%d:%d]: %s", i, end, hex.EncodeToString(unsealed[i:end])) + t.Logf("decoded[%d:%d]: %s", i, end, hex.EncodeToString(decodedData[i:end])) + t.Logf("") + } + } else { + t.Logf("Decoded data matches expected unsealed data") + } + + require.Equal(t, unsealed, decodedData, "Decoded data does not match expected unsealed data") +} + +/* +2024-09-02: + +goos: linux +goarch: amd64 +pkg: github.com/filecoin-project/curio/lib/ffi/cunative +cpu: AMD Ryzen Threadripper PRO 7995WX 96-Cores +BenchmarkDecode4G +BenchmarkDecode4G-192 1 1201906605 ns/op 3573.46 MB/s + +goos: linux +goarch: amd64 +pkg: github.com/filecoin-project/curio/lib/ffi/cunative +cpu: AMD Ryzen 7 7840HS w/ Radeon 780M Graphics +BenchmarkDecode4G-16 1 23175794389 ns/op 185.32 MB/s + +goos: linux +goarch: amd64 +pkg: github.com/filecoin-project/curio/lib/ffi/cunative +cpu: Intel(R) Xeon(R) Silver 4310T CPU @ 2.30GHz +BenchmarkDecode4G-40 1 6854494251 ns/op 626.59 MB/s +*/ +func BenchmarkDecode4G(b *testing.B) { + // Size of the data to decode (4 GiB) + const dataSize = 4 << 30 + + // Decode the base64 strings for sealed and key data + sealed, err := base64.StdEncoding.DecodeString(strings.ReplaceAll(SealedB64, "\n", "")) + if err != nil { + b.Fatal(err) + } + key, err := base64.StdEncoding.DecodeString(strings.ReplaceAll(KeyB64, "\n", "")) + if err != nil { + b.Fatal(err) + } + + // Create 1 GiB buffers by repeating the sealed and key data + sealedRepeated := bytes.Repeat(sealed, dataSize/len(sealed)+1)[:dataSize] + keyRepeated := bytes.Repeat(key, dataSize/len(key)+1)[:dataSize] + + // Create readers for the data + sealedReader := bytes.NewReader(sealedRepeated) + keyReader := bytes.NewReader(keyRepeated) + + // Create a buffer for the output + var outputBuffer bytes.Buffer + + // Reset the timer before the benchmark loop + b.ResetTimer() + + for i := 0; i < b.N; i++ { + // Reset the readers and output buffer for each iteration + _, _ = sealedReader.Seek(0, io.SeekStart) + _, _ = keyReader.Seek(0, io.SeekStart) + outputBuffer.Reset() + + // Call the Decode function + err := Decode(sealedReader, keyReader, &outputBuffer) + if err != nil { + b.Fatal(err) + } + + // Ensure the output size is correct + if outputBuffer.Len() != dataSize { + b.Fatalf("Expected output size %d, got %d", dataSize, outputBuffer.Len()) + } + } + + // Add custom metrics + b.SetBytes(dataSize) // This will report bytes/sec in the benchmark output +} diff --git a/lib/ffi/piece_funcs.go b/lib/ffi/piece_funcs.go index b7a582882..f09a3c26b 100644 --- a/lib/ffi/piece_funcs.go +++ b/lib/ffi/piece_funcs.go @@ -22,7 +22,7 @@ func (sb *SealCalls) WritePiece(ctx context.Context, taskID *harmonytask.TaskID, defer done() dest := paths.Piece - tempDest := dest + ".tmp" + tempDest := dest + storiface.TempSuffix destFile, err := os.OpenFile(tempDest, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) if err != nil { diff --git a/lib/ffi/scrub_funcs.go b/lib/ffi/scrub_funcs.go new file mode 100644 index 000000000..e18d92c60 --- /dev/null +++ b/lib/ffi/scrub_funcs.go @@ -0,0 +1,53 @@ +package ffi + +import ( + "bufio" + "context" + "io" + "time" + + "github.com/ipfs/go-cid" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/curio/lib/proof" + "github.com/filecoin-project/curio/lib/storiface" + + "github.com/filecoin-project/lotus/storage/sealer/fr32" +) + +func (sb *SealCalls) CheckUnsealedCID(ctx context.Context, s storiface.SectorRef) (cid.Cid, error) { + reader, err := sb.sectors.storage.ReaderSeq(ctx, s, storiface.FTUnsealed) + if err != nil { + return cid.Undef, xerrors.Errorf("getting unsealed sector reader: %w", err) + } + defer reader.Close() + + ssize, err := s.ProofType.SectorSize() + if err != nil { + return cid.Undef, xerrors.Errorf("getting sector size: %w", err) + } + + startTime := time.Now() + cc := new(proof.DataCidWriter) + + upReader, err := fr32.NewUnpadReader(bufio.NewReaderSize(io.LimitReader(reader, int64(ssize)), 1<<20), abi.PaddedPieceSize(ssize)) + if err != nil { + return cid.Undef, xerrors.Errorf("creating unpad reader") + } + + n, err := io.CopyBuffer(cc, upReader, make([]byte, abi.PaddedPieceSize(1<<20).Unpadded())) + if err != nil { + return cid.Undef, xerrors.Errorf("computing unsealed CID: %w", err) + } + + res, err := cc.Sum() + if err != nil { + return cid.Undef, xerrors.Errorf("computing unsealed CID: %w", err) + } + + log.Infow("computed unsealed CID", "cid", res, "size", n, "duration", time.Since(startTime), "MiB/s", float64(n)/time.Since(startTime).Seconds()/1024/1024) + + return res.PieceCID, nil +} diff --git a/lib/ffi/sdr_funcs.go b/lib/ffi/sdr_funcs.go index 78dbfd656..3a8330d13 100644 --- a/lib/ffi/sdr_funcs.go +++ b/lib/ffi/sdr_funcs.go @@ -143,43 +143,67 @@ func (l *storageProvider) AcquireSector(ctx context.Context, taskID *harmonytask }, nil } -func (sb *SealCalls) GenerateSDR(ctx context.Context, taskID harmonytask.TaskID, sector storiface.SectorRef, ticket abi.SealRandomness, commKcid cid.Cid) error { - paths, pathIDs, releaseSector, err := sb.sectors.AcquireSector(ctx, &taskID, sector, storiface.FTNone, storiface.FTCache, storiface.PathSealing) +func (sb *SealCalls) GenerateSDR(ctx context.Context, taskID harmonytask.TaskID, into storiface.SectorFileType, sector storiface.SectorRef, ticket abi.SealRandomness, commDcid cid.Cid) error { + paths, pathIDs, releaseSector, err := sb.sectors.AcquireSector(ctx, &taskID, sector, storiface.FTNone, into, storiface.PathSealing) if err != nil { return xerrors.Errorf("acquiring sector paths: %w", err) } defer releaseSector() // prepare SDR params - commp, err := commcid.CIDToDataCommitmentV1(commKcid) + commd, err := commcid.CIDToDataCommitmentV1(commDcid) if err != nil { - return xerrors.Errorf("computing commK: %w", err) + return xerrors.Errorf("computing commD (%s): %w", commDcid, err) } - replicaID, err := sector.ProofType.ReplicaId(sector.ID.Miner, sector.ID.Number, ticket, commp) + replicaID, err := sector.ProofType.ReplicaId(sector.ID.Miner, sector.ID.Number, ticket, commd) if err != nil { return xerrors.Errorf("computing replica id: %w", err) } + intoPath := storiface.PathByType(paths, into) + intoTemp := intoPath + storiface.TempSuffix + // make sure the cache dir is empty - if err := os.RemoveAll(paths.Cache); err != nil { - return xerrors.Errorf("removing cache dir: %w", err) + if err := os.RemoveAll(intoPath); err != nil { + return xerrors.Errorf("removing into: %w", err) + } + if err := os.RemoveAll(intoTemp); err != nil { + return xerrors.Errorf("removing intoTemp: %w", err) } - if err := os.MkdirAll(paths.Cache, 0755); err != nil { - return xerrors.Errorf("mkdir cache dir: %w", err) + if err := os.MkdirAll(intoTemp, 0755); err != nil { + return xerrors.Errorf("mkdir intoTemp dir: %w", err) } // generate new sector key err = ffi.GenerateSDR( sector.ProofType, - paths.Cache, + intoTemp, replicaID, ) if err != nil { - return xerrors.Errorf("generating SDR %d (%s): %w", sector.ID.Number, paths.Unsealed, err) + return xerrors.Errorf("generating SDR %d (%s): %w", sector.ID.Number, intoTemp, err) } - if err := sb.ensureOneCopy(ctx, sector.ID, pathIDs, storiface.FTCache); err != nil { + onlyLastLayer := into == storiface.FTKey + if onlyLastLayer { + // move the last layer to the final location + numLayers, err := proofpaths.SDRLayers(sector.ProofType) + if err != nil { + return xerrors.Errorf("getting number of layers: %w", err) + } + lastLayer := proofpaths.LayerFileName(numLayers) + + if err := os.Rename(filepath.Join(intoTemp, lastLayer), filepath.Join(intoPath)); err != nil { + return xerrors.Errorf("renaming last layer: %w", err) + } + } else { + if err := os.Rename(intoTemp, intoPath); err != nil { + return xerrors.Errorf("renaming into: %w", err) + } + } + + if err := sb.ensureOneCopy(ctx, sector.ID, pathIDs, into); err != nil { return xerrors.Errorf("ensure one copy: %w", err) } diff --git a/lib/ffi/unseal_funcs.go b/lib/ffi/unseal_funcs.go new file mode 100644 index 000000000..8879b0f6b --- /dev/null +++ b/lib/ffi/unseal_funcs.go @@ -0,0 +1,86 @@ +package ffi + +import ( + "context" + "io" + "os" + "time" + + "github.com/ipfs/go-cid" + "golang.org/x/xerrors" + + "github.com/filecoin-project/curio/harmony/harmonytask" + "github.com/filecoin-project/curio/lib/ffi/cunative" + "github.com/filecoin-project/curio/lib/storiface" +) + +func (sb *SealCalls) decodeCommon(ctx context.Context, taskID harmonytask.TaskID, sector storiface.SectorRef, fileType storiface.SectorFileType, decodeFunc func(sealReader, keyReader io.Reader, outFile io.Writer) error) error { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + paths, pathIDs, releaseSector, err := sb.sectors.AcquireSector(ctx, &taskID, sector, storiface.FTNone, storiface.FTUnsealed, storiface.PathStorage) + if err != nil { + return xerrors.Errorf("acquiring sector paths: %w", err) + } + defer releaseSector() + + sealReader, err := sb.sectors.storage.ReaderSeq(ctx, sector, fileType) + if err != nil { + return xerrors.Errorf("getting sealed sector reader: %w", err) + } + + keyReader, err := sb.sectors.storage.ReaderSeq(ctx, sector, storiface.FTKey) + if err != nil { + return xerrors.Errorf("getting key reader: %w", err) + } + + tempDest := paths.Unsealed + storiface.TempSuffix + + outFile, err := os.Create(tempDest) + if err != nil { + return xerrors.Errorf("creating unsealed file: %w", err) + } + defer outFile.Close() + + start := time.Now() + + err = decodeFunc(sealReader, keyReader, outFile) + if err != nil { + return xerrors.Errorf("decoding sealed sector: %w", err) + } + + end := time.Now() + + ssize, err := sector.ProofType.SectorSize() + if err != nil { + return xerrors.Errorf("getting sector size: %w", err) + } + + log.Infow("decoded sector", "sectorID", sector, "duration", end.Sub(start), "MiB/s", float64(ssize)/(1<<20)/end.Sub(start).Seconds()) + + if err := os.Rename(tempDest, paths.Unsealed); err != nil { + return xerrors.Errorf("renaming to unsealed file: %w", err) + } + + if err := sb.ensureOneCopy(ctx, sector.ID, pathIDs, storiface.FTUnsealed); err != nil { + return xerrors.Errorf("ensure one copy: %w", err) + } + + if err := sb.sectors.storage.Remove(ctx, sector.ID, storiface.FTKey, true, nil); err != nil { + return err + } + + return nil +} + +func (sb *SealCalls) DecodeSDR(ctx context.Context, taskID harmonytask.TaskID, sector storiface.SectorRef) error { + return sb.decodeCommon(ctx, taskID, sector, storiface.FTSealed, func(sealReader, keyReader io.Reader, outFile io.Writer) error { + return cunative.Decode(sealReader, keyReader, outFile) + }) +} + +func (sb *SealCalls) DecodeSnap(ctx context.Context, taskID harmonytask.TaskID, commD, commK cid.Cid, sector storiface.SectorRef) error { + return sb.decodeCommon(ctx, taskID, sector, storiface.FTUpdate, func(sealReader, keyReader io.Reader, outFile io.Writer) error { + return cunative.DecodeSnap(sector.ProofType, commD, commK, keyReader, sealReader, outFile) + }) +} diff --git a/lib/paths/local.go b/lib/paths/local.go index 9510d7704..6b8944983 100644 --- a/lib/paths/local.go +++ b/lib/paths/local.go @@ -9,6 +9,7 @@ import ( "os" "path/filepath" "runtime" + "strings" "sync" "sync/atomic" "time" @@ -104,12 +105,15 @@ func (p *path) stat(ls LocalStorage, newReserve ...statExistingSectorForReservat used, err := ls.DiskUsage(sp) if err == os.ErrNotExist { - p, ferr := tempFetchDest(sp, false) - if ferr != nil { - return 0, ferr - } + used, err = ls.DiskUsage(sp + storiface.TempSuffix) + if err == os.ErrNotExist { + p, ferr := tempFetchDest(sp, false) + if ferr != nil { + return 0, ferr + } - used, err = ls.DiskUsage(p) + used, err = ls.DiskUsage(p) + } } if err != nil { // we don't care about 'not exist' errors, as storage can be @@ -213,6 +217,10 @@ func (p *path) sectorPath(sid abi.SectorID, fileType storiface.SectorFileType) s type URLs []string +func UrlsFromString(in string) URLs { + return strings.Split(in, URLSeparator) +} + func NewLocal(ctx context.Context, ls LocalStorage, index SectorIndex, urls []string) (*Local, error) { l := &Local{ localStorage: newCachedLocalStorage(ls), diff --git a/lib/proof/datacid.go b/lib/proof/datacid.go new file mode 100644 index 000000000..299ca90b3 --- /dev/null +++ b/lib/proof/datacid.go @@ -0,0 +1,167 @@ +package proof + +import ( + "math/bits" + "runtime" + + "github.com/ipfs/go-cid" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-commp-utils/nonffi" + "github.com/filecoin-project/go-commp-utils/zerocomm" + commcid "github.com/filecoin-project/go-fil-commcid" + commp "github.com/filecoin-project/go-fil-commp-hashhash" + "github.com/filecoin-project/go-state-types/abi" +) + +type DataCIDSize struct { + PayloadSize int64 + PieceSize abi.PaddedPieceSize + PieceCID cid.Cid +} + +const commPBufPad = abi.PaddedPieceSize(8 << 20) +const CommPBuf = abi.UnpaddedPieceSize(commPBufPad - (commPBufPad / 128)) // can't use .Unpadded() for const + +type ciderr struct { + c cid.Cid + err error +} + +/* +DataCidWriter is used as follows: + +cc := new(DataCidWriter) +_, err = io.Copy(cc, f) +dc, err := cc.Sum() + +This computes CommP / PieceCID from a stream, also returns piece and payload sizes. +*/ +type DataCidWriter struct { + len int64 + buf [CommPBuf]byte + leaves []chan ciderr + + tbufs [][CommPBuf]byte + throttle chan int +} + +func (w *DataCidWriter) Write(p []byte) (int, error) { + if w.throttle == nil { + w.throttle = make(chan int, runtime.NumCPU()) + for i := 0; i < cap(w.throttle); i++ { + w.throttle <- i + } + } + if w.tbufs == nil { + w.tbufs = make([][CommPBuf]byte, cap(w.throttle)) + } + + n := len(p) + for len(p) > 0 { + buffered := int(w.len % int64(len(w.buf))) + toBuffer := len(w.buf) - buffered + if toBuffer > len(p) { + toBuffer = len(p) + } + + copied := copy(w.buf[buffered:], p[:toBuffer]) + p = p[copied:] + w.len += int64(copied) + + if copied > 0 && w.len%int64(len(w.buf)) == 0 { + leaf := make(chan ciderr, 1) + bufIdx := <-w.throttle + copy(w.tbufs[bufIdx][:], w.buf[:]) + + go func() { + defer func() { + w.throttle <- bufIdx + }() + + cc := new(commp.Calc) + _, _ = cc.Write(w.tbufs[bufIdx][:]) + p, _, _ := cc.Digest() + l, _ := commcid.PieceCommitmentV1ToCID(p) + leaf <- ciderr{ + c: l, + err: nil, + } + }() + + w.leaves = append(w.leaves, leaf) + } + } + return n, nil +} + +func (w *DataCidWriter) Sum() (DataCIDSize, error) { + // process last non-zero leaf if exists + lastLen := w.len % int64(len(w.buf)) + rawLen := w.len + + leaves := make([]cid.Cid, len(w.leaves)) + for i, leaf := range w.leaves { + r := <-leaf + if r.err != nil { + return DataCIDSize{}, xerrors.Errorf("processing leaf %d: %w", i, r.err) + } + leaves[i] = r.c + } + + // process remaining bit of data + if lastLen != 0 { + if len(leaves) != 0 { + copy(w.buf[lastLen:], make([]byte, int(int64(CommPBuf)-lastLen))) + lastLen = int64(CommPBuf) + } + + cc := new(commp.Calc) + _, _ = cc.Write(w.buf[:lastLen]) + pb, pps, _ := cc.Digest() + p, _ := commcid.PieceCommitmentV1ToCID(pb) + + if abi.PaddedPieceSize(pps).Unpadded() < CommPBuf { // special case for pieces smaller than 16MiB + return DataCIDSize{ + PayloadSize: w.len, + PieceSize: abi.PaddedPieceSize(pps), + PieceCID: p, + }, nil + } + + leaves = append(leaves, p) + } + + // pad with zero pieces to power-of-two size + fillerLeaves := (1 << (bits.Len(uint(len(leaves) - 1)))) - len(leaves) + for i := 0; i < fillerLeaves; i++ { + leaves = append(leaves, zerocomm.ZeroPieceCommitment(CommPBuf)) + } + + if len(leaves) == 1 { + return DataCIDSize{ + PayloadSize: rawLen, + PieceSize: abi.PaddedPieceSize(len(leaves)) * commPBufPad, + PieceCID: leaves[0], + }, nil + } + + pieces := make([]abi.PieceInfo, len(leaves)) + for i, leaf := range leaves { + pieces[i] = abi.PieceInfo{ + Size: commPBufPad, + PieceCID: leaf, + } + } + + p, err := nonffi.GenerateUnsealedCID(abi.RegisteredSealProof_StackedDrg64GiBV1, pieces) + if err != nil { + return DataCIDSize{}, xerrors.Errorf("generating unsealed CID: %w", err) + } + + return DataCIDSize{ + PayloadSize: rawLen, + PieceSize: abi.PaddedPieceSize(len(leaves)) * commPBufPad, + PieceCID: p, + }, nil +} diff --git a/lib/storiface/filetype.go b/lib/storiface/filetype.go index 56983a25d..07fab509d 100644 --- a/lib/storiface/filetype.go +++ b/lib/storiface/filetype.go @@ -43,6 +43,9 @@ const ( // Piece Park FTPiece + // Curio unseal + FTKey + FileTypes = iota ) @@ -54,7 +57,7 @@ const ( // - FTUpdate: represents snap sectors // - FTUpdateCache: represents snap cache sectors // - FTPiece: represents Piece Park sectors -var PathTypes = []SectorFileType{FTUnsealed, FTSealed, FTCache, FTUpdate, FTUpdateCache, FTPiece} +var PathTypes = []SectorFileType{FTUnsealed, FTSealed, FTCache, FTUpdate, FTUpdateCache, FTPiece, FTKey} // FTNone represents a sector file type of none. This constant is used in the StorageLock method to specify that a sector should not have any file locked. // Example usage: @@ -86,6 +89,7 @@ var FSOverheadSeal = map[SectorFileType]int{ // 10x overheads FTUpdateCache: FSOverheadDen*2 + 1, FTCache: 141, // 11 layers + D(2x ssize) + C + R' FTPiece: FSOverheadDen, + FTKey: FSOverheadDen*11 + 1, } // sector size * disk / fs overhead. FSOverheadDen is like the unit of sector size @@ -103,12 +107,17 @@ var FsOverheadFinalized = map[SectorFileType]int{ FTUpdateCache: 1, FTCache: 1, FTPiece: FSOverheadDen, + FTKey: FSOverheadDen, } // SectorFileType represents the type of a sector file // TypeFromString converts a string to a SectorFileType type SectorFileType int +// TempSuffix is appended to file names when they are worked on before being atomically moved to their final location. +// Local Path GC should be aware of this suffix and have adequate cleanup logic. +const TempSuffix = ".tmp" + // TypeFromString converts a string representation of a SectorFileType to its corresponding value. // It returns the SectorFileType and nil error if the string matches one of the existing types. // If the string does not match any type, it returns 0 and an error. @@ -126,6 +135,8 @@ func TypeFromString(s string) (SectorFileType, error) { return FTUpdateCache, nil case "piece": return FTPiece, nil + case "key": + return FTKey, nil default: return 0, xerrors.Errorf("unknown sector file type '%s'", s) } @@ -146,6 +157,8 @@ func (t SectorFileType) String() string { return "update-cache" case FTPiece: return "piece" + case FTKey: + return "key" default: return fmt.Sprintf("", t, (t & ((1 << FileTypes) - 1)).Strings()) } @@ -334,6 +347,7 @@ type SectorPaths struct { Update string UpdateCache string Piece string + Key string } // HasAllSet checks if all paths of a SectorPaths struct are set for a given SectorFileType. @@ -425,6 +439,8 @@ func PathByType(sps SectorPaths, fileType SectorFileType) string { return sps.UpdateCache case FTPiece: return sps.Piece + case FTKey: + return sps.Key } panic("requested unknown path type") @@ -444,6 +460,8 @@ func SetPathByType(sps *SectorPaths, fileType SectorFileType, p string) { sps.UpdateCache = p case FTPiece: sps.Piece = p + case FTKey: + sps.Key = p } } diff --git a/scripts/build-blst.sh b/scripts/build-blst.sh new file mode 100644 index 000000000..d887ff902 --- /dev/null +++ b/scripts/build-blst.sh @@ -0,0 +1,7 @@ +#!/usr/bin/env bash + +if [ ! -d "extern/supra_seal/deps/blst" ]; then + git clone https://github.com/supranational/blst.git extern/supra_seal/deps/blst + (cd extern/supra_seal/deps/blst + ./build.sh -march=native) +fi diff --git a/tasks/gc/pipeline_meta_gc.go b/tasks/gc/pipeline_meta_gc.go index 2b0fecb9d..dfd9edfa9 100644 --- a/tasks/gc/pipeline_meta_gc.go +++ b/tasks/gc/pipeline_meta_gc.go @@ -9,6 +9,7 @@ import ( "github.com/filecoin-project/curio/harmony/harmonydb" "github.com/filecoin-project/curio/harmony/harmonytask" "github.com/filecoin-project/curio/harmony/resources" + "github.com/filecoin-project/curio/harmony/taskhelp" ) const SDRPipelineGCInterval = 19 * time.Minute @@ -30,6 +31,9 @@ func (s *PipelineGC) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done if err := s.cleanupUpgrade(); err != nil { return false, xerrors.Errorf("cleanupUpgrade: %w", err) } + if err := s.cleanupUnseal(); err != nil { + return false, xerrors.Errorf("cleanupUnseal: %w", err) + } return true, nil } @@ -41,7 +45,7 @@ func (s *PipelineGC) CanAccept(ids []harmonytask.TaskID, engine *harmonytask.Tas func (s *PipelineGC) TypeDetails() harmonytask.TaskTypeDetails { return harmonytask.TaskTypeDetails{ - Max: 1, + Max: taskhelp.Max(1), Name: "PipelineGC", Cost: resources.Resources{ Cpu: 1, @@ -149,5 +153,24 @@ func (s *PipelineGC) cleanupUpgrade() error { return nil } +func (s *PipelineGC) cleanupUnseal() error { + // Remove sectors_unseal_pipeline entries where: + // after_unseal_sdr is true + // after_decode_sector is true + + ctx := context.Background() + + // Execute the query + _, err := s.db.Exec(ctx, `DELETE FROM sectors_unseal_pipeline + WHERE after_unseal_sdr = TRUE + AND after_decode_sector = TRUE; +`) + if err != nil { + return xerrors.Errorf("failed to clean up unseal entries: %w", err) + } + + return nil +} + var _ harmonytask.TaskInterface = &PipelineGC{} var _ = harmonytask.Reg(&PipelineGC{}) diff --git a/tasks/gc/storage_endpoint_gc.go b/tasks/gc/storage_endpoint_gc.go index f6bb00b1d..600631f7a 100644 --- a/tasks/gc/storage_endpoint_gc.go +++ b/tasks/gc/storage_endpoint_gc.go @@ -13,6 +13,7 @@ import ( "github.com/filecoin-project/curio/harmony/harmonydb" "github.com/filecoin-project/curio/harmony/harmonytask" "github.com/filecoin-project/curio/harmony/resources" + "github.com/filecoin-project/curio/harmony/taskhelp" "github.com/filecoin-project/curio/lib/paths" "github.com/filecoin-project/curio/lib/storiface" @@ -270,7 +271,7 @@ func (s *StorageEndpointGC) CanAccept(ids []harmonytask.TaskID, engine *harmonyt func (s *StorageEndpointGC) TypeDetails() harmonytask.TaskTypeDetails { return harmonytask.TaskTypeDetails{ - Max: 1, + Max: taskhelp.Max(1), Name: "StorageMetaGC", Cost: resources.Resources{ Cpu: 1, diff --git a/tasks/gc/storage_gc_mark.go b/tasks/gc/storage_gc_mark.go index 9a184bb84..bb910ebea 100644 --- a/tasks/gc/storage_gc_mark.go +++ b/tasks/gc/storage_gc_mark.go @@ -14,6 +14,7 @@ import ( "github.com/filecoin-project/curio/harmony/harmonydb" "github.com/filecoin-project/curio/harmony/harmonytask" "github.com/filecoin-project/curio/harmony/resources" + "github.com/filecoin-project/curio/harmony/taskhelp" "github.com/filecoin-project/curio/lib/curiochain" "github.com/filecoin-project/curio/lib/paths" "github.com/filecoin-project/curio/lib/storiface" @@ -240,7 +241,45 @@ func (s *StorageGCMark) Do(taskID harmonytask.TaskID, stillOwned func() bool) (d return len(toRemove) > 0, nil }, harmonydb.OptionRetry()) if err != nil { - return false, xerrors.Errorf("BeginTransaction: %w", err) + return false, xerrors.Errorf("stage 1 mark transaction: %w", err) + } + + /* + STAGE 2: Mark unsealed sectors which we don't want for removal + */ + _, err = s.db.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) { + /* + SELECT m.sector_num, m.sp_id, sl.storage_id FROM sectors_meta m + INNER JOIN sector_location sl ON m.sp_id = sl.miner_id AND m.sector_num = sl.sector_num + WHERE m.target_unseal_state = false AND sl.sector_filetype= 1 + */ + + var unsealedSectors []struct { + SpID int64 `db:"sp_id"` + SectorNum int64 `db:"sector_num"` + StorageID string `db:"storage_id"` + } + + err = tx.Select(&unsealedSectors, `SELECT m.sector_num, m.sp_id, sl.storage_id FROM sectors_meta m + INNER JOIN sector_location sl ON m.sp_id = sl.miner_id AND m.sector_num = sl.sector_num + LEFT JOIN sectors_unseal_pipeline sup ON m.sp_id = sup.sp_id AND m.sector_num = sup.sector_number + WHERE m.target_unseal_state = false AND sl.sector_filetype= 1 AND sup.sector_number IS NULL`) // FTUnsealed = 1 + if err != nil { + return false, xerrors.Errorf("select unsealed sectors: %w", err) + } + + for _, sector := range unsealedSectors { + _, err := tx.Exec(`INSERT INTO storage_removal_marks (sp_id, sector_num, sector_filetype, storage_id) + VALUES ($1, $2, 1, $3) ON CONFLICT DO NOTHING`, sector.SpID, sector.SectorNum, sector.StorageID) + if err != nil { + return false, xerrors.Errorf("insert storage_removal_marks: %w", err) + } + } + + return len(unsealedSectors) > 0, nil + }, harmonydb.OptionRetry()) + if err != nil { + return false, xerrors.Errorf("unseal stage transaction: %w", err) } return true, nil @@ -253,7 +292,7 @@ func (s *StorageGCMark) CanAccept(ids []harmonytask.TaskID, engine *harmonytask. func (s *StorageGCMark) TypeDetails() harmonytask.TaskTypeDetails { return harmonytask.TaskTypeDetails{ - Max: 1, + Max: taskhelp.Max(1), Name: "StorageGCMark", Cost: resources.Resources{ Cpu: 1, diff --git a/tasks/gc/storage_gc_sweep.go b/tasks/gc/storage_gc_sweep.go index 1666fdf21..da9864370 100644 --- a/tasks/gc/storage_gc_sweep.go +++ b/tasks/gc/storage_gc_sweep.go @@ -12,6 +12,7 @@ import ( "github.com/filecoin-project/curio/harmony/harmonydb" "github.com/filecoin-project/curio/harmony/harmonytask" "github.com/filecoin-project/curio/harmony/resources" + "github.com/filecoin-project/curio/harmony/taskhelp" "github.com/filecoin-project/curio/lib/paths" storiface "github.com/filecoin-project/curio/lib/storiface" ) @@ -109,7 +110,7 @@ func (s *StorageGCSweep) CanAccept(ids []harmonytask.TaskID, engine *harmonytask func (s *StorageGCSweep) TypeDetails() harmonytask.TaskTypeDetails { return harmonytask.TaskTypeDetails{ - Max: 1, + Max: taskhelp.Max(1), Name: "StorageGCSweep", Cost: resources.Resources{ Cpu: 1, diff --git a/tasks/message/sender.go b/tasks/message/sender.go index f75aaa6d3..7dbc97417 100644 --- a/tasks/message/sender.go +++ b/tasks/message/sender.go @@ -17,6 +17,7 @@ import ( "github.com/filecoin-project/curio/harmony/harmonydb" "github.com/filecoin-project/curio/harmony/harmonytask" "github.com/filecoin-project/curio/harmony/resources" + "github.com/filecoin-project/curio/harmony/taskhelp" "github.com/filecoin-project/curio/lib/promise" "github.com/filecoin-project/lotus/api" @@ -236,7 +237,7 @@ func (s *SendTask) CanAccept(ids []harmonytask.TaskID, engine *harmonytask.TaskE func (s *SendTask) TypeDetails() harmonytask.TaskTypeDetails { return harmonytask.TaskTypeDetails{ - Max: 1024, + Max: taskhelp.Max(1024), Name: "SendMessage", Cost: resources.Resources{ Cpu: 0, diff --git a/tasks/metadata/task_sector_expirations.go b/tasks/metadata/task_sector_expirations.go index 9761b73fd..366e44090 100644 --- a/tasks/metadata/task_sector_expirations.go +++ b/tasks/metadata/task_sector_expirations.go @@ -15,6 +15,7 @@ import ( "github.com/filecoin-project/curio/harmony/harmonydb" "github.com/filecoin-project/curio/harmony/harmonytask" "github.com/filecoin-project/curio/harmony/resources" + "github.com/filecoin-project/curio/harmony/taskhelp" "github.com/filecoin-project/curio/lib/curiochain" "github.com/filecoin-project/lotus/chain/actors/adt" @@ -182,7 +183,7 @@ func (s *SectorMetadata) CanAccept(ids []harmonytask.TaskID, engine *harmonytask func (s *SectorMetadata) TypeDetails() harmonytask.TaskTypeDetails { return harmonytask.TaskTypeDetails{ - Max: 1, + Max: taskhelp.Max(1), Name: "SectorMetadata", Cost: resources.Resources{ Cpu: 1, diff --git a/tasks/piece/task_cleanup_piece.go b/tasks/piece/task_cleanup_piece.go index 205221ee8..4e112913b 100644 --- a/tasks/piece/task_cleanup_piece.go +++ b/tasks/piece/task_cleanup_piece.go @@ -9,6 +9,7 @@ import ( "github.com/filecoin-project/curio/harmony/harmonydb" "github.com/filecoin-project/curio/harmony/harmonytask" "github.com/filecoin-project/curio/harmony/resources" + "github.com/filecoin-project/curio/harmony/taskhelp" "github.com/filecoin-project/curio/lib/ffi" "github.com/filecoin-project/curio/lib/promise" "github.com/filecoin-project/curio/lib/storiface" @@ -116,7 +117,7 @@ func (c *CleanupPieceTask) CanAccept(ids []harmonytask.TaskID, engine *harmonyta func (c *CleanupPieceTask) TypeDetails() harmonytask.TaskTypeDetails { return harmonytask.TaskTypeDetails{ - Max: c.max, + Max: taskhelp.Max(c.max), Name: "DropPiece", Cost: resources.Resources{ Cpu: 1, diff --git a/tasks/piece/task_park_piece.go b/tasks/piece/task_park_piece.go index b4fa5ba6f..359f5313a 100644 --- a/tasks/piece/task_park_piece.go +++ b/tasks/piece/task_park_piece.go @@ -13,6 +13,7 @@ import ( "github.com/filecoin-project/curio/harmony/harmonydb" "github.com/filecoin-project/curio/harmony/harmonytask" "github.com/filecoin-project/curio/harmony/resources" + "github.com/filecoin-project/curio/harmony/taskhelp" "github.com/filecoin-project/curio/lib/dealdata" ffi2 "github.com/filecoin-project/curio/lib/ffi" "github.com/filecoin-project/curio/lib/paths" @@ -193,7 +194,7 @@ func (p *ParkPieceTask) TypeDetails() harmonytask.TaskTypeDetails { const maxSizePiece = 64 << 30 return harmonytask.TaskTypeDetails{ - Max: p.max, + Max: taskhelp.Max(p.max), Name: "ParkPiece", Cost: resources.Resources{ Cpu: 1, diff --git a/tasks/scrub/task_scrub_commd.go b/tasks/scrub/task_scrub_commd.go new file mode 100644 index 000000000..04fc21391 --- /dev/null +++ b/tasks/scrub/task_scrub_commd.go @@ -0,0 +1,166 @@ +package scrub + +import ( + "context" + "math/rand/v2" + "runtime" + "time" + + "github.com/ipfs/go-cid" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/curio/harmony/harmonydb" + "github.com/filecoin-project/curio/harmony/harmonytask" + "github.com/filecoin-project/curio/harmony/resources" + "github.com/filecoin-project/curio/lib/ffi" + "github.com/filecoin-project/curio/lib/passcall" + "github.com/filecoin-project/curio/lib/storiface" +) + +const MinSchedInterval = 100 * time.Second + +type ScrubCommDTask struct { + db *harmonydb.DB + sc *ffi.SealCalls +} + +func NewCommDCheckTask(db *harmonydb.DB, sc *ffi.SealCalls) *ScrubCommDTask { + return &ScrubCommDTask{db: db, sc: sc} +} + +func (c *ScrubCommDTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done bool, err error) { + ctx := context.Background() + + var checkReq []struct { + CheckID int64 `db:"check_id"` + SpID int64 `db:"sp_id"` + SectorNumber int64 `db:"sector_number"` + ExpectUnsealedCID string `db:"expected_unsealed_cid"` + RegSealProof int64 `db:"reg_seal_proof"` + } + + err = c.db.Select(ctx, &checkReq, ` + SELECT u.check_id, u.sp_id, u.sector_number, u.expected_unsealed_cid, sm.reg_seal_proof + FROM scrub_unseal_commd_check u + INNER JOIN sectors_meta sm ON sm.sp_id = u.sp_id AND sm.sector_num = u.sector_number + WHERE task_id = $1 + `, taskID) + if err != nil { + return false, xerrors.Errorf("fetching check request: %w", err) + } + if len(checkReq) == 0 { + return false, xerrors.Errorf("no check requests found") + } + + check := checkReq[0] + + s := storiface.SectorRef{ + ID: abi.SectorID{ + Miner: abi.ActorID(check.SpID), + Number: abi.SectorNumber(check.SectorNumber), + }, + ProofType: abi.RegisteredSealProof(check.RegSealProof), + } + + expectUnsealedCID, err := cid.Parse(check.ExpectUnsealedCID) + if err != nil { + return false, xerrors.Errorf("parsing expected unsealed CID: %w", err) + } + + actual, err := c.sc.CheckUnsealedCID(ctx, s) + if err != nil { + return false, xerrors.Errorf("checking unsealed CID: %w", err) + } + + storeResult := func(ok bool, actualCID *cid.Cid, message string) error { + _, err := c.db.Exec(ctx, ` + UPDATE scrub_unseal_commd_check + SET ok = $1, actual_unsealed_cid = $2, message = $3 + WHERE check_id = $4 + `, ok, actualCID, message, check.CheckID) + return err + } + + if actual != expectUnsealedCID { + err := storeResult(false, &actual, "unsealed CID mismatch") + if err != nil { + return false, xerrors.Errorf("storing result (mismatch %s != %s): %w", actual, expectUnsealedCID, err) + } + + return true, nil + } + + err = storeResult(true, &actual, "") + if err != nil { + return false, xerrors.Errorf("storing result (correct): %w", err) + } + + return true, nil +} + +func (c *ScrubCommDTask) CanAccept(ids []harmonytask.TaskID, engine *harmonytask.TaskEngine) (*harmonytask.TaskID, error) { + id := ids[0] + return &id, nil +} + +func (c *ScrubCommDTask) TypeDetails() harmonytask.TaskTypeDetails { + return harmonytask.TaskTypeDetails{ + Name: "ScrubCommDCheck", + Cost: resources.Resources{ + Cpu: min(1, runtime.NumCPU()/4), + Ram: uint64(runtime.NumCPU())*(8<<20) + 128<<20, + }, + MaxFailures: 3, + IAmBored: passcall.Every(MinSchedInterval, func(taskFunc harmonytask.AddTaskFunc) error { + return c.schedule(context.Background(), taskFunc) + }), + } +} + +func (c *ScrubCommDTask) Adder(taskFunc harmonytask.AddTaskFunc) { +} + +func (c *ScrubCommDTask) schedule(ctx context.Context, taskFunc harmonytask.AddTaskFunc) error { + taskFunc(func(id harmonytask.TaskID, tx *harmonydb.Tx) (shouldCommit bool, seriousError error) { + var checks []struct { + CheckID int64 `db:"check_id"` + SpID int64 `db:"sp_id"` + SectorNumber int64 `db:"sector_number"` + } + + err := tx.Select(&checks, ` + SELECT check_id, sp_id, sector_number + FROM scrub_unseal_commd_check + WHERE task_id IS NULL LIMIT 20 + `) + if err != nil { + return false, xerrors.Errorf("getting tasks: %w", err) + } + + if len(checks) == 0 { + return false, nil + } + + // pick at random in case there are a bunch of schedules across the cluster + check := checks[rand.N(len(checks))] + + _, err = tx.Exec(` + UPDATE scrub_unseal_commd_check + SET task_id = $1 + WHERE check_id = $2 AND task_id IS NULL + `, id, check.CheckID) + if err != nil { + return false, xerrors.Errorf("updating task id: %w", err) + } + + return true, nil + }) + + return nil + +} + +var _ = harmonytask.Reg(&ScrubCommDTask{}) +var _ harmonytask.TaskInterface = &ScrubCommDTask{} diff --git a/tasks/seal/task_finalize.go b/tasks/seal/task_finalize.go index 15277c75a..13dff17a0 100644 --- a/tasks/seal/task_finalize.go +++ b/tasks/seal/task_finalize.go @@ -10,6 +10,7 @@ import ( "github.com/filecoin-project/curio/harmony/harmonydb" "github.com/filecoin-project/curio/harmony/harmonytask" "github.com/filecoin-project/curio/harmony/resources" + "github.com/filecoin-project/curio/harmony/taskhelp" "github.com/filecoin-project/curio/lib/ffi" "github.com/filecoin-project/curio/lib/slotmgr" storiface "github.com/filecoin-project/curio/lib/storiface" @@ -254,7 +255,7 @@ func (f *FinalizeTask) CanAccept(ids []harmonytask.TaskID, engine *harmonytask.T func (f *FinalizeTask) TypeDetails() harmonytask.TaskTypeDetails { return harmonytask.TaskTypeDetails{ - Max: f.max, + Max: taskhelp.Max(f.max), Name: "Finalize", Cost: resources.Resources{ Cpu: 1, diff --git a/tasks/seal/task_movestorage.go b/tasks/seal/task_movestorage.go index c83e1d167..f980ec825 100644 --- a/tasks/seal/task_movestorage.go +++ b/tasks/seal/task_movestorage.go @@ -10,6 +10,7 @@ import ( "github.com/filecoin-project/curio/harmony/harmonydb" "github.com/filecoin-project/curio/harmony/harmonytask" "github.com/filecoin-project/curio/harmony/resources" + "github.com/filecoin-project/curio/harmony/taskhelp" ffi2 "github.com/filecoin-project/curio/lib/ffi" "github.com/filecoin-project/curio/lib/paths" storiface "github.com/filecoin-project/curio/lib/storiface" @@ -143,7 +144,7 @@ func (m *MoveStorageTask) TypeDetails() harmonytask.TaskTypeDetails { } return harmonytask.TaskTypeDetails{ - Max: m.max, + Max: taskhelp.Max(m.max), Name: "MoveStorage", Cost: resources.Resources{ Cpu: 1, diff --git a/tasks/seal/task_porep.go b/tasks/seal/task_porep.go index 0a2f352da..bbed5a277 100644 --- a/tasks/seal/task_porep.go +++ b/tasks/seal/task_porep.go @@ -14,6 +14,7 @@ import ( "github.com/filecoin-project/curio/harmony/harmonydb" "github.com/filecoin-project/curio/harmony/harmonytask" "github.com/filecoin-project/curio/harmony/resources" + "github.com/filecoin-project/curio/harmony/taskhelp" "github.com/filecoin-project/curio/lib/ffi" "github.com/filecoin-project/curio/lib/storiface" @@ -162,7 +163,7 @@ func (p *PoRepTask) TypeDetails() harmonytask.TaskTypeDetails { gpu = 0 } res := harmonytask.TaskTypeDetails{ - Max: p.max, + Max: taskhelp.Max(p.max), Name: "PoRep", Cost: resources.Resources{ Cpu: 1, diff --git a/tasks/seal/task_sdr.go b/tasks/seal/task_sdr.go index f990f1b8c..2e989821b 100644 --- a/tasks/seal/task_sdr.go +++ b/tasks/seal/task_sdr.go @@ -45,10 +45,11 @@ type SDRTask struct { sc *ffi2.SealCalls - max, min int + max harmonytask.Limiter + min int } -func NewSDRTask(api SDRAPI, db *harmonydb.DB, sp *SealPoller, sc *ffi2.SealCalls, maxSDR, minSDR int) *SDRTask { +func NewSDRTask(api SDRAPI, db *harmonydb.DB, sp *SealPoller, sc *ffi2.SealCalls, maxSDR harmonytask.Limiter, minSDR int) *SDRTask { return &SDRTask{ api: api, db: db, @@ -117,7 +118,7 @@ func (s *SDRTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done bo // Trees; After one retry, it should return the sector to the // SDR stage; max number of retries should be configurable - err = s.sc.GenerateSDR(ctx, taskID, sref, ticket, dealData.CommD) + err = s.sc.GenerateSDR(ctx, taskID, storiface.FTCache, sref, ticket, dealData.CommD) if err != nil { return false, xerrors.Errorf("generating sdr: %w", err) } @@ -181,7 +182,7 @@ func (s *SDRTask) TypeDetails() harmonytask.TaskTypeDetails { res := harmonytask.TaskTypeDetails{ Max: s.max, Name: "SDR", - Cost: resources.Resources{ // todo offset for prefetch? + Cost: resources.Resources{ Cpu: 4, // todo multicore sdr Gpu: 0, Ram: (64 << 30) + (256 << 20), diff --git a/tasks/seal/task_submit_commit.go b/tasks/seal/task_submit_commit.go index e62bf4a7f..3aea6665b 100644 --- a/tasks/seal/task_submit_commit.go +++ b/tasks/seal/task_submit_commit.go @@ -21,6 +21,7 @@ import ( "github.com/filecoin-project/curio/harmony/harmonydb" "github.com/filecoin-project/curio/harmony/harmonytask" "github.com/filecoin-project/curio/harmony/resources" + "github.com/filecoin-project/curio/harmony/taskhelp" "github.com/filecoin-project/curio/lib/multictladdr" "github.com/filecoin-project/curio/tasks/message" @@ -420,7 +421,7 @@ func (s *SubmitCommitTask) CanAccept(ids []harmonytask.TaskID, engine *harmonyta func (s *SubmitCommitTask) TypeDetails() harmonytask.TaskTypeDetails { return harmonytask.TaskTypeDetails{ - Max: 128, + Max: taskhelp.Max(128), Name: "CommitSubmit", Cost: resources.Resources{ Cpu: 0, diff --git a/tasks/seal/task_submit_precommit.go b/tasks/seal/task_submit_precommit.go index fa502c1db..323db913f 100644 --- a/tasks/seal/task_submit_precommit.go +++ b/tasks/seal/task_submit_precommit.go @@ -18,6 +18,7 @@ import ( "github.com/filecoin-project/curio/harmony/harmonydb" "github.com/filecoin-project/curio/harmony/harmonytask" "github.com/filecoin-project/curio/harmony/resources" + "github.com/filecoin-project/curio/harmony/taskhelp" "github.com/filecoin-project/curio/lib/multictladdr" "github.com/filecoin-project/curio/tasks/message" @@ -332,7 +333,7 @@ func (s *SubmitPrecommitTask) CanAccept(ids []harmonytask.TaskID, engine *harmon func (s *SubmitPrecommitTask) TypeDetails() harmonytask.TaskTypeDetails { return harmonytask.TaskTypeDetails{ - Max: 1024, + Max: taskhelp.Max(1024), Name: "PreCommitSubmit", Cost: resources.Resources{ Cpu: 0, diff --git a/tasks/seal/task_synth_proofs.go b/tasks/seal/task_synth_proofs.go index 70f2d3416..6f2c08222 100644 --- a/tasks/seal/task_synth_proofs.go +++ b/tasks/seal/task_synth_proofs.go @@ -12,6 +12,7 @@ import ( "github.com/filecoin-project/curio/harmony/harmonydb" "github.com/filecoin-project/curio/harmony/harmonytask" "github.com/filecoin-project/curio/harmony/resources" + "github.com/filecoin-project/curio/harmony/taskhelp" "github.com/filecoin-project/curio/lib/dealdata" "github.com/filecoin-project/curio/lib/ffi" "github.com/filecoin-project/curio/lib/paths" @@ -155,7 +156,7 @@ func (s *SyntheticProofTask) TypeDetails() harmonytask.TaskTypeDetails { } res := harmonytask.TaskTypeDetails{ - Max: s.max, + Max: taskhelp.Max(s.max), Name: "SyntheticProofs", Cost: resources.Resources{ Cpu: 1, diff --git a/tasks/seal/task_treed.go b/tasks/seal/task_treed.go index 19dd598e2..115693947 100644 --- a/tasks/seal/task_treed.go +++ b/tasks/seal/task_treed.go @@ -10,6 +10,7 @@ import ( "github.com/filecoin-project/curio/harmony/harmonydb" "github.com/filecoin-project/curio/harmony/harmonytask" "github.com/filecoin-project/curio/harmony/resources" + "github.com/filecoin-project/curio/harmony/taskhelp" "github.com/filecoin-project/curio/lib/dealdata" ffi2 "github.com/filecoin-project/curio/lib/ffi" storiface "github.com/filecoin-project/curio/lib/storiface" @@ -40,7 +41,7 @@ func (t *TreeDTask) TypeDetails() harmonytask.TaskTypeDetails { } return harmonytask.TaskTypeDetails{ - Max: t.max, + Max: taskhelp.Max(t.max), Name: "TreeD", Cost: resources.Resources{ Cpu: 1, diff --git a/tasks/seal/task_treerc.go b/tasks/seal/task_treerc.go index 120441ef7..26a2f57ed 100644 --- a/tasks/seal/task_treerc.go +++ b/tasks/seal/task_treerc.go @@ -11,6 +11,7 @@ import ( "github.com/filecoin-project/curio/harmony/harmonydb" "github.com/filecoin-project/curio/harmony/harmonytask" "github.com/filecoin-project/curio/harmony/resources" + "github.com/filecoin-project/curio/harmony/taskhelp" "github.com/filecoin-project/curio/lib/dealdata" ffi2 "github.com/filecoin-project/curio/lib/ffi" "github.com/filecoin-project/curio/lib/paths" @@ -171,7 +172,7 @@ func (t *TreeRCTask) TypeDetails() harmonytask.TaskTypeDetails { } return harmonytask.TaskTypeDetails{ - Max: t.max, + Max: taskhelp.Max(t.max), Name: "TreeRC", Cost: resources.Resources{ Cpu: 1, diff --git a/tasks/sealsupra/task_supraseal.go b/tasks/sealsupra/task_supraseal.go index 36dc235aa..8163bdbf4 100644 --- a/tasks/sealsupra/task_supraseal.go +++ b/tasks/sealsupra/task_supraseal.go @@ -22,6 +22,7 @@ import ( "github.com/filecoin-project/curio/harmony/harmonydb" "github.com/filecoin-project/curio/harmony/harmonytask" "github.com/filecoin-project/curio/harmony/resources" + "github.com/filecoin-project/curio/harmony/taskhelp" "github.com/filecoin-project/curio/lib/hugepageutil" "github.com/filecoin-project/curio/lib/passcall" "github.com/filecoin-project/curio/lib/paths" @@ -425,7 +426,7 @@ var ssizeToName = map[abi.SectorSize]string{ func (s *SupraSeal) TypeDetails() harmonytask.TaskTypeDetails { return harmonytask.TaskTypeDetails{ - Max: s.pipelines, + Max: taskhelp.Max(s.pipelines), Name: fmt.Sprintf("Batch%d-%s", s.sectors, ssizeToName[must.One(s.spt.SectorSize())]), Cost: resources.Resources{ Cpu: 1, diff --git a/tasks/snap/task_encode.go b/tasks/snap/task_encode.go index 42ab18e25..4bf62a97a 100644 --- a/tasks/snap/task_encode.go +++ b/tasks/snap/task_encode.go @@ -13,6 +13,7 @@ import ( "github.com/filecoin-project/curio/harmony/harmonydb" "github.com/filecoin-project/curio/harmony/harmonytask" "github.com/filecoin-project/curio/harmony/resources" + "github.com/filecoin-project/curio/harmony/taskhelp" "github.com/filecoin-project/curio/lib/dealdata" "github.com/filecoin-project/curio/lib/ffi" "github.com/filecoin-project/curio/lib/passcall" @@ -128,7 +129,7 @@ func (e *EncodeTask) TypeDetails() harmonytask.TaskTypeDetails { } return harmonytask.TaskTypeDetails{ - Max: e.max, + Max: taskhelp.Max(e.max), Name: "UpdateEncode", Cost: resources.Resources{ Cpu: 1, @@ -157,7 +158,7 @@ func (e *EncodeTask) schedule(ctx context.Context, taskFunc harmonytask.AddTaskF SectorNumber int64 `db:"sector_number"` } - err := e.db.Select(ctx, &tasks, `SELECT sp_id, sector_number FROM sectors_snap_pipeline WHERE data_assigned = true AND after_encode = FALSE AND task_id_encode IS NULL`) + err := tx.Select(&tasks, `SELECT sp_id, sector_number FROM sectors_snap_pipeline WHERE data_assigned = true AND after_encode = FALSE AND task_id_encode IS NULL`) if err != nil { return false, xerrors.Errorf("getting tasks: %w", err) } diff --git a/tasks/snap/task_movestorage.go b/tasks/snap/task_movestorage.go index ddabc1cf0..aa26e2125 100644 --- a/tasks/snap/task_movestorage.go +++ b/tasks/snap/task_movestorage.go @@ -11,6 +11,7 @@ import ( "github.com/filecoin-project/curio/harmony/harmonydb" "github.com/filecoin-project/curio/harmony/harmonytask" "github.com/filecoin-project/curio/harmony/resources" + "github.com/filecoin-project/curio/harmony/taskhelp" "github.com/filecoin-project/curio/lib/ffi" "github.com/filecoin-project/curio/lib/passcall" "github.com/filecoin-project/curio/lib/paths" @@ -87,7 +88,7 @@ func (m *MoveStorageTask) TypeDetails() harmonytask.TaskTypeDetails { ssize = abi.SectorSize(2 << 20) } return harmonytask.TaskTypeDetails{ - Max: m.max, + Max: taskhelp.Max(m.max), Name: "UpdateStore", Cost: resources.Resources{ Cpu: 1, @@ -112,7 +113,7 @@ func (m *MoveStorageTask) schedule(ctx context.Context, taskFunc harmonytask.Add SectorNumber int64 `db:"sector_number"` } - err := m.db.Select(ctx, &tasks, `SELECT sp_id, sector_number FROM sectors_snap_pipeline WHERE after_encode = TRUE AND after_prove = TRUE AND after_move_storage = FALSE AND task_id_move_storage IS NULL`) + err := tx.Select(&tasks, `SELECT sp_id, sector_number FROM sectors_snap_pipeline WHERE after_encode = TRUE AND after_prove = TRUE AND after_move_storage = FALSE AND task_id_move_storage IS NULL`) if err != nil { return false, xerrors.Errorf("getting tasks: %w", err) } diff --git a/tasks/snap/task_prove.go b/tasks/snap/task_prove.go index 90467ef13..0ab3af74d 100644 --- a/tasks/snap/task_prove.go +++ b/tasks/snap/task_prove.go @@ -12,6 +12,7 @@ import ( "github.com/filecoin-project/curio/harmony/harmonydb" "github.com/filecoin-project/curio/harmony/harmonytask" "github.com/filecoin-project/curio/harmony/resources" + "github.com/filecoin-project/curio/harmony/taskhelp" "github.com/filecoin-project/curio/lib/ffi" "github.com/filecoin-project/curio/lib/passcall" "github.com/filecoin-project/curio/lib/storiface" @@ -122,7 +123,7 @@ func (p *ProveTask) TypeDetails() harmonytask.TaskTypeDetails { gpu = 0 } return harmonytask.TaskTypeDetails{ - Max: p.max, + Max: taskhelp.Max(p.max), Name: "UpdateProve", Cost: resources.Resources{ Cpu: 1, @@ -147,7 +148,7 @@ func (p *ProveTask) schedule(ctx context.Context, taskFunc harmonytask.AddTaskFu SectorNumber int64 `db:"sector_number"` } - err := p.db.Select(ctx, &tasks, `SELECT sp_id, sector_number FROM sectors_snap_pipeline WHERE after_encode = TRUE AND after_prove = FALSE AND task_id_prove IS NULL`) + err := tx.Select(&tasks, `SELECT sp_id, sector_number FROM sectors_snap_pipeline WHERE after_encode = TRUE AND after_prove = FALSE AND task_id_prove IS NULL`) if err != nil { return false, xerrors.Errorf("getting tasks: %w", err) } diff --git a/tasks/snap/task_submit.go b/tasks/snap/task_submit.go index 744d7a1c7..463320d34 100644 --- a/tasks/snap/task_submit.go +++ b/tasks/snap/task_submit.go @@ -262,6 +262,10 @@ func (s *SubmitTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done if err != nil { return false, xerrors.Errorf("parsing new sealed cid: %w", err) } + newUnsealedCID, err := cid.Parse(update.UpdateUnsealedCID) + if err != nil { + return false, xerrors.Errorf("parsing new unsealed cid: %w", err) + } // Prepare params params := miner.ProveReplicaUpdates3Params{ @@ -369,7 +373,7 @@ func (s *SubmitTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done return false, xerrors.Errorf("inserting into message_waits: %w", err) } - if err := s.transferUpdatedSectorData(ctx, update.SpID, update.SectorNumber, newSealedCID, cid.Undef, mcid); err != nil { + if err := s.transferUpdatedSectorData(ctx, update.SpID, update.SectorNumber, newUnsealedCID, newSealedCID, mcid); err != nil { return false, xerrors.Errorf("updating sector meta: %w", err) } @@ -465,7 +469,7 @@ func (s *SubmitTask) schedule(ctx context.Context, taskFunc harmonytask.AddTaskF SectorNumber int64 `db:"sector_number"` } - err := s.db.Select(ctx, &tasks, `SELECT sp_id, sector_number FROM sectors_snap_pipeline WHERE failed = FALSE + err := tx.Select(&tasks, `SELECT sp_id, sector_number FROM sectors_snap_pipeline WHERE failed = FALSE AND after_encode = TRUE AND after_prove = TRUE AND after_submit = FALSE diff --git a/tasks/unseal/task_unseal_decode.go b/tasks/unseal/task_unseal_decode.go new file mode 100644 index 000000000..b67f1ef4e --- /dev/null +++ b/tasks/unseal/task_unseal_decode.go @@ -0,0 +1,254 @@ +package unseal + +import ( + "context" + "math/rand/v2" + "time" + + "github.com/ipfs/go-cid" + logging "github.com/ipfs/go-log/v2" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/curio/harmony/harmonydb" + "github.com/filecoin-project/curio/harmony/harmonytask" + "github.com/filecoin-project/curio/harmony/resources" + "github.com/filecoin-project/curio/harmony/taskhelp" + "github.com/filecoin-project/curio/lib/dealdata" + "github.com/filecoin-project/curio/lib/ffi" + "github.com/filecoin-project/curio/lib/passcall" + "github.com/filecoin-project/curio/lib/paths" + "github.com/filecoin-project/curio/lib/storiface" +) + +var log = logging.Logger("unseal") + +const MinSchedInterval = 120 * time.Second + +type TaskUnsealDecode struct { + max int + + sc *ffi.SealCalls + db *harmonydb.DB + api UnsealSDRApi +} + +func NewTaskUnsealDecode(sc *ffi.SealCalls, db *harmonydb.DB, max int, api UnsealSDRApi) *TaskUnsealDecode { + return &TaskUnsealDecode{ + max: max, + sc: sc, + db: db, + api: api, + } +} + +func (t *TaskUnsealDecode) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done bool, err error) { + ctx := context.Background() + + var sectorParamsArr []struct { + SpID int64 `db:"sp_id"` + SectorNumber int64 `db:"sector_number"` + RegSealProof int64 `db:"reg_seal_proof"` + } + + err = t.db.Select(ctx, §orParamsArr, ` + SELECT sp_id, sector_number, reg_seal_proof + FROM sectors_unseal_pipeline + WHERE task_id_decode_sector = $1`, taskID) + if err != nil { + return false, xerrors.Errorf("getting sector params: %w", err) + } + + if len(sectorParamsArr) == 0 { + return false, xerrors.Errorf("no sector params") + } + + sectorParams := sectorParamsArr[0] + + var sectorMeta []struct { + TicketValue []byte `db:"ticket_value"` + OrigSealedCID string `db:"orig_sealed_cid"` + CurSealedCID string `db:"cur_sealed_cid"` + CurUnsealedCID string `db:"cur_unsealed_cid"` + } + err = t.db.Select(ctx, §orMeta, ` + SELECT ticket_value, orig_sealed_cid, cur_sealed_cid, cur_unsealed_cid + FROM sectors_meta + WHERE sp_id = $1 AND sector_num = $2`, sectorParams.SpID, sectorParams.SectorNumber) + if err != nil { + return false, xerrors.Errorf("getting sector meta: %w", err) + } + + if len(sectorMeta) != 1 { + return false, xerrors.Errorf("expected 1 sector meta, got %d", len(sectorMeta)) + } + + smeta := sectorMeta[0] + commK, err := cid.Decode(smeta.OrigSealedCID) + if err != nil { + return false, xerrors.Errorf("decoding OrigSealedCID: %w", err) + } + + var commD, commR cid.Cid + if smeta.CurSealedCID == "" || smeta.CurSealedCID == "b" { + // https://github.com/filecoin-project/curio/issues/191 + // + + // "unsealed" actually stores the sealed CID, "sealed" is empty + commR, err = cid.Decode(smeta.CurUnsealedCID) + if err != nil { + return false, xerrors.Errorf("decoding CurSealedCID: %w", err) + } + + commD, err = dealdata.UnsealedCidFromPieces(ctx, t.db, sectorParams.SpID, sectorParams.SectorNumber) + if err != nil { + return false, xerrors.Errorf("getting deal data CID: %w", err) + } + + log.Warnw("workaround for issue #191", "task", taskID, "commD", commD, "commK", commK, "commR", commR) + + // + } else { + commD, err = cid.Decode(smeta.CurUnsealedCID) + if err != nil { + return false, xerrors.Errorf("decoding CurUnsealedCID (%s): %w", smeta.CurUnsealedCID, err) + } + commR, err = cid.Decode(smeta.CurSealedCID) + if err != nil { + return false, xerrors.Errorf("decoding CurSealedCID: %w", err) + } + } + + sref := storiface.SectorRef{ + ID: abi.SectorID{ + Miner: abi.ActorID(sectorParams.SpID), + Number: abi.SectorNumber(sectorParams.SectorNumber), + }, + ProofType: abi.RegisteredSealProof(sectorParams.RegSealProof), + } + + isSnap := commK != commR + log.Infow("unseal decode", "snap", isSnap, "task", taskID, "commK", commK, "commR", commR, "commD", commD) + if isSnap { + err := t.sc.DecodeSnap(ctx, taskID, commD, commK, sref) + if err != nil { + return false, xerrors.Errorf("DecodeSnap: %w", err) + } + } else { + err = t.sc.DecodeSDR(ctx, taskID, sref) + if err != nil { + return false, xerrors.Errorf("DecodeSDR: %w", err) + } + } + + // NOTE: Decode.. drops the sector key at the end + + _, err = t.db.Exec(ctx, `UPDATE sectors_unseal_pipeline SET after_decode_sector = TRUE, task_id_decode_sector = NULL WHERE task_id_decode_sector = $1`, taskID) + if err != nil { + return false, xerrors.Errorf("updating task: %w", err) + } + + return true, nil +} + +func (t *TaskUnsealDecode) CanAccept(ids []harmonytask.TaskID, engine *harmonytask.TaskEngine) (*harmonytask.TaskID, error) { + id := ids[0] + return &id, nil +} + +func (t *TaskUnsealDecode) TypeDetails() harmonytask.TaskTypeDetails { + ssize := abi.SectorSize(32 << 30) // todo task details needs taskID to get correct sector size + if isDevnet { + ssize = abi.SectorSize(2 << 20) + } + + return harmonytask.TaskTypeDetails{ + Max: taskhelp.Max(t.max), + Name: "UnsealDecode", + Cost: resources.Resources{ + Cpu: 4, // todo multicore sdr + Gpu: 0, + Ram: 54 << 30, + Storage: t.sc.Storage(t.taskToSector, storiface.FTUnsealed, storiface.FTNone, ssize, storiface.PathStorage, paths.MinFreeStoragePercentage), + }, + MaxFailures: 2, + IAmBored: passcall.Every(MinSchedInterval, func(taskFunc harmonytask.AddTaskFunc) error { + return t.schedule(context.Background(), taskFunc) + }), + } +} + +func (t *TaskUnsealDecode) schedule(ctx context.Context, taskFunc harmonytask.AddTaskFunc) error { + // schedule at most one decode when we're bored + + taskFunc(func(id harmonytask.TaskID, tx *harmonydb.Tx) (shouldCommit bool, seriousError error) { + var tasks []struct { + SpID int64 `db:"sp_id"` + SectorNumber int64 `db:"sector_number"` + } + + err := t.db.Select(ctx, &tasks, `SELECT sp_id, sector_number FROM sectors_unseal_pipeline WHERE after_unseal_sdr = TRUE AND after_decode_sector = FALSE AND task_id_decode_sector IS NULL`) + if err != nil { + return false, xerrors.Errorf("getting tasks: %w", err) + } + + if len(tasks) == 0 { + return false, nil + } + + // pick at random in case there are a bunch of schedules across the cluster + t := tasks[rand.N(len(tasks))] + + _, err = tx.Exec(`UPDATE sectors_unseal_pipeline SET task_id_decode_sector = $1 WHERE sp_id = $2 AND sector_number = $3`, id, t.SpID, t.SectorNumber) + if err != nil { + return false, xerrors.Errorf("updating task id: %w", err) + } + + return true, nil + }) + + return nil +} + +func (t *TaskUnsealDecode) Adder(taskFunc harmonytask.AddTaskFunc) { +} + +func (t *TaskUnsealDecode) GetSpid(db *harmonydb.DB, taskID int64) string { + sid, err := t.GetSectorID(db, taskID) + if err != nil { + log.Errorf("getting sector id: %s", err) + return "" + } + return sid.Miner.String() +} + +func (t *TaskUnsealDecode) GetSectorID(db *harmonydb.DB, taskID int64) (*abi.SectorID, error) { + var spId, sectorNumber uint64 + err := db.QueryRow(context.Background(), `SELECT sp_id,sector_number FROM sectors_unseal_pipeline WHERE task_id_decode_sector = $1`, taskID).Scan(&spId, §orNumber) + if err != nil { + return nil, err + } + return &abi.SectorID{ + Miner: abi.ActorID(spId), + Number: abi.SectorNumber(sectorNumber), + }, nil +} + +func (t *TaskUnsealDecode) taskToSector(id harmonytask.TaskID) (ffi.SectorRef, error) { + var refs []ffi.SectorRef + + err := t.db.Select(context.Background(), &refs, `SELECT sp_id, sector_number, reg_seal_proof FROM sectors_unseal_pipeline WHERE task_id_decode_sector = $1`, id) + if err != nil { + return ffi.SectorRef{}, xerrors.Errorf("getting sector ref: %w", err) + } + + if len(refs) != 1 { + return ffi.SectorRef{}, xerrors.Errorf("expected 1 sector ref, got %d", len(refs)) + } + + return refs[0], nil +} + +var _ = harmonytask.Reg(&TaskUnsealDecode{}) +var _ harmonytask.TaskInterface = &TaskUnsealDecode{} diff --git a/tasks/unseal/task_unseal_sdr.go b/tasks/unseal/task_unseal_sdr.go new file mode 100644 index 000000000..d8cf3880f --- /dev/null +++ b/tasks/unseal/task_unseal_sdr.go @@ -0,0 +1,226 @@ +package unseal + +import ( + "context" + "math/rand/v2" + + "github.com/ipfs/go-cid" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/curio/harmony/harmonydb" + "github.com/filecoin-project/curio/harmony/harmonytask" + "github.com/filecoin-project/curio/harmony/resources" + "github.com/filecoin-project/curio/lib/ffi" + "github.com/filecoin-project/curio/lib/passcall" + "github.com/filecoin-project/curio/lib/paths" + "github.com/filecoin-project/curio/lib/storiface" + + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/types" +) + +var isDevnet = build.BlockDelaySecs < 30 + +type UnsealSDRApi interface { + StateSectorGetInfo(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (*miner.SectorOnChainInfo, error) +} + +type TaskUnsealSdr struct { + max harmonytask.Limiter + + sc *ffi.SealCalls + db *harmonydb.DB + api UnsealSDRApi +} + +func NewTaskUnsealSDR(sc *ffi.SealCalls, db *harmonydb.DB, max harmonytask.Limiter, api UnsealSDRApi) *TaskUnsealSdr { + return &TaskUnsealSdr{ + max: max, + sc: sc, + db: db, + api: api, + } +} + +func (t *TaskUnsealSdr) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done bool, err error) { + ctx := context.Background() + + var sectorParamsArr []struct { + SpID int64 `db:"sp_id"` + SectorNumber int64 `db:"sector_number"` + RegSealProof int64 `db:"reg_seal_proof"` + } + + err = t.db.Select(ctx, §orParamsArr, ` + SELECT sp_id, sector_number, reg_seal_proof + FROM sectors_unseal_pipeline + WHERE task_id_unseal_sdr = $1`, taskID) + if err != nil { + return false, xerrors.Errorf("getting sector params: %w", err) + } + + if len(sectorParamsArr) == 0 { + return false, xerrors.Errorf("no sector params") + } + + sectorParams := sectorParamsArr[0] + + var sectorMeta []struct { + TicketValue []byte `db:"ticket_value"` + OrigUnsealedCID string `db:"orig_unsealed_cid"` + } + err = t.db.Select(ctx, §orMeta, ` + SELECT ticket_value, orig_unsealed_cid + FROM sectors_meta + WHERE sp_id = $1 AND sector_num = $2`, sectorParams.SpID, sectorParams.SectorNumber) + if err != nil { + return false, xerrors.Errorf("getting sector meta: %w", err) + } + + if len(sectorMeta) != 1 { + return false, xerrors.Errorf("expected 1 sector meta, got %d", len(sectorMeta)) + } + + // NOTE: Even for snap sectors for SDR we need the original unsealed CID + commD, err := cid.Decode(sectorMeta[0].OrigUnsealedCID) + if err != nil { + return false, xerrors.Errorf("decoding commd: %w", err) + } + + if len(sectorMeta[0].TicketValue) != abi.RandomnessLength { + return false, xerrors.Errorf("invalid ticket value length %d", len(sectorMeta[0].TicketValue)) + } + + sref := storiface.SectorRef{ + ID: abi.SectorID{ + Miner: abi.ActorID(sectorParams.SpID), + Number: abi.SectorNumber(sectorParams.SectorNumber), + }, + ProofType: abi.RegisteredSealProof(sectorParams.RegSealProof), + } + + log.Infow("unseal generate sdr key", "sector", sref.ID, "proof", sref.ProofType, "task", taskID, "ticket", sectorMeta[0].TicketValue, "commD", commD) + + if err := t.sc.GenerateSDR(ctx, taskID, storiface.FTKey, sref, sectorMeta[0].TicketValue, commD); err != nil { + return false, xerrors.Errorf("generate sdr: %w", err) + } + + // Mark the task as done + _, err = t.db.Exec(ctx, `UPDATE sectors_unseal_pipeline SET after_unseal_sdr = TRUE, task_id_unseal_sdr = NULL WHERE task_id_unseal_sdr = $1`, taskID) + if err != nil { + return false, xerrors.Errorf("updating task: %w", err) + } + + return true, nil +} + +func (t *TaskUnsealSdr) CanAccept(ids []harmonytask.TaskID, engine *harmonytask.TaskEngine) (*harmonytask.TaskID, error) { + id := ids[0] + return &id, nil +} + +func (t *TaskUnsealSdr) TypeDetails() harmonytask.TaskTypeDetails { + ssize := abi.SectorSize(32 << 30) // todo task details needs taskID to get correct sector size + if isDevnet { + ssize = abi.SectorSize(2 << 20) + } + + res := harmonytask.TaskTypeDetails{ + Max: t.max, + Name: "SDRKeyRegen", + Cost: resources.Resources{ + Cpu: 4, // todo multicore sdr + Gpu: 0, + Ram: 54 << 30, + Storage: t.sc.Storage(t.taskToSector, storiface.FTKey, storiface.FTNone, ssize, storiface.PathSealing, paths.MinFreeStoragePercentage), + }, + MaxFailures: 2, + IAmBored: passcall.Every(MinSchedInterval, func(taskFunc harmonytask.AddTaskFunc) error { + return t.schedule(context.Background(), taskFunc) + }), + } + + if isDevnet { + res.Cost.Ram = 1 << 30 + } + + return res +} + +func (t *TaskUnsealSdr) schedule(ctx context.Context, taskFunc harmonytask.AddTaskFunc) error { + // schedule at most one unseal when we're bored + + taskFunc(func(id harmonytask.TaskID, tx *harmonydb.Tx) (shouldCommit bool, seriousError error) { + var tasks []struct { + SpID int64 `db:"sp_id"` + SectorNumber int64 `db:"sector_number"` + } + + err := t.db.Select(ctx, &tasks, `SELECT sp_id, sector_number FROM sectors_unseal_pipeline WHERE after_unseal_sdr = FALSE AND task_id_unseal_sdr IS NULL`) + if err != nil { + return false, xerrors.Errorf("getting tasks: %w", err) + } + + if len(tasks) == 0 { + return false, nil + } + + // pick at random in case there are a bunch of schedules across the cluster + t := tasks[rand.N(len(tasks))] + + _, err = tx.Exec(`UPDATE sectors_unseal_pipeline SET task_id_unseal_sdr = $1 WHERE sp_id = $2 AND sector_number = $3`, id, t.SpID, t.SectorNumber) + if err != nil { + return false, xerrors.Errorf("updating task id: %w", err) + } + + return true, nil + }) + + return nil +} + +func (t *TaskUnsealSdr) Adder(taskFunc harmonytask.AddTaskFunc) { +} + +func (t *TaskUnsealSdr) GetSpid(db *harmonydb.DB, taskID int64) string { + sid, err := t.GetSectorID(db, taskID) + if err != nil { + log.Errorf("getting sector id: %s", err) + return "" + } + return sid.Miner.String() +} + +func (t *TaskUnsealSdr) GetSectorID(db *harmonydb.DB, taskID int64) (*abi.SectorID, error) { + var spId, sectorNumber uint64 + err := db.QueryRow(context.Background(), `SELECT sp_id,sector_number FROM sectors_unseal_pipeline WHERE task_id_unseal_sdr = $1`, taskID).Scan(&spId, §orNumber) + if err != nil { + return nil, err + } + return &abi.SectorID{ + Miner: abi.ActorID(spId), + Number: abi.SectorNumber(sectorNumber), + }, nil +} + +func (t *TaskUnsealSdr) taskToSector(id harmonytask.TaskID) (ffi.SectorRef, error) { + var refs []ffi.SectorRef + + err := t.db.Select(context.Background(), &refs, `SELECT sp_id, sector_number, reg_seal_proof FROM sectors_unseal_pipeline WHERE task_id_unseal_sdr = $1`, id) + if err != nil { + return ffi.SectorRef{}, xerrors.Errorf("getting sector ref: %w", err) + } + + if len(refs) != 1 { + return ffi.SectorRef{}, xerrors.Errorf("expected 1 sector ref, got %d", len(refs)) + } + + return refs[0], nil +} + +var _ = harmonytask.Reg(&TaskUnsealSdr{}) +var _ harmonytask.TaskInterface = (*TaskUnsealSdr)(nil) diff --git a/tasks/window/compute_task.go b/tasks/window/compute_task.go index 2d7cf246a..a53f58a1a 100644 --- a/tasks/window/compute_task.go +++ b/tasks/window/compute_task.go @@ -366,7 +366,7 @@ func (t *WdPostTask) TypeDetails() harmonytask.TaskTypeDetails { return harmonytask.TaskTypeDetails{ Name: "WdPost", - Max: t.max, + Max: taskhelp.Max(t.max), MaxFailures: 5, Follows: nil, Cost: resources.Resources{ diff --git a/tasks/window/recover_task.go b/tasks/window/recover_task.go index 722a08001..07997bf13 100644 --- a/tasks/window/recover_task.go +++ b/tasks/window/recover_task.go @@ -14,6 +14,7 @@ import ( "github.com/filecoin-project/curio/harmony/harmonydb" "github.com/filecoin-project/curio/harmony/harmonytask" "github.com/filecoin-project/curio/harmony/resources" + "github.com/filecoin-project/curio/harmony/taskhelp" "github.com/filecoin-project/curio/lib/chainsched" "github.com/filecoin-project/curio/lib/multictladdr" "github.com/filecoin-project/curio/lib/promise" @@ -217,7 +218,7 @@ func (w *WdPostRecoverDeclareTask) CanAccept(ids []harmonytask.TaskID, engine *h func (w *WdPostRecoverDeclareTask) TypeDetails() harmonytask.TaskTypeDetails { return harmonytask.TaskTypeDetails{ - Max: 128, + Max: taskhelp.Max(128), Name: "WdPostRecover", Cost: resources.Resources{ Cpu: 1, diff --git a/tasks/window/submit_task.go b/tasks/window/submit_task.go index 623f30572..b5ce172ff 100644 --- a/tasks/window/submit_task.go +++ b/tasks/window/submit_task.go @@ -16,6 +16,7 @@ import ( "github.com/filecoin-project/curio/harmony/harmonydb" "github.com/filecoin-project/curio/harmony/harmonytask" "github.com/filecoin-project/curio/harmony/resources" + "github.com/filecoin-project/curio/harmony/taskhelp" "github.com/filecoin-project/curio/lib/chainsched" "github.com/filecoin-project/curio/lib/multictladdr" "github.com/filecoin-project/curio/lib/promise" @@ -183,7 +184,7 @@ func (w *WdPostSubmitTask) CanAccept(ids []harmonytask.TaskID, engine *harmonyta func (w *WdPostSubmitTask) TypeDetails() harmonytask.TaskTypeDetails { return harmonytask.TaskTypeDetails{ - Max: 128, + Max: taskhelp.Max(128), Name: "WdPostSubmit", Cost: resources.Resources{ Cpu: 0, diff --git a/tasks/winning/inclusion_check_task.go b/tasks/winning/inclusion_check_task.go index 94df4afb6..a36afafe2 100644 --- a/tasks/winning/inclusion_check_task.go +++ b/tasks/winning/inclusion_check_task.go @@ -11,6 +11,7 @@ import ( "github.com/filecoin-project/curio/harmony/harmonydb" "github.com/filecoin-project/curio/harmony/harmonytask" "github.com/filecoin-project/curio/harmony/resources" + "github.com/filecoin-project/curio/harmony/taskhelp" "github.com/filecoin-project/lotus/chain/types" ) @@ -88,7 +89,7 @@ func (i *InclusionCheckTask) CanAccept(ids []harmonytask.TaskID, engine *harmony func (i *InclusionCheckTask) TypeDetails() harmonytask.TaskTypeDetails { return harmonytask.TaskTypeDetails{ - Max: 1, + Max: taskhelp.Max(1), Name: "WinInclCheck", Cost: resources.Resources{ Cpu: 1, diff --git a/tasks/winning/winning_task.go b/tasks/winning/winning_task.go index f1c350c30..182af4604 100644 --- a/tasks/winning/winning_task.go +++ b/tasks/winning/winning_task.go @@ -25,6 +25,7 @@ import ( "github.com/filecoin-project/curio/harmony/harmonydb" "github.com/filecoin-project/curio/harmony/harmonytask" "github.com/filecoin-project/curio/harmony/resources" + "github.com/filecoin-project/curio/harmony/taskhelp" "github.com/filecoin-project/curio/lib/ffiselect" "github.com/filecoin-project/curio/lib/paths" "github.com/filecoin-project/curio/lib/promise" @@ -540,7 +541,7 @@ func (t *WinPostTask) TypeDetails() harmonytask.TaskTypeDetails { return harmonytask.TaskTypeDetails{ Name: "WinPost", - Max: t.max, + Max: taskhelp.Max(t.max), // We're not allowing retry to be conservative. Retry in winningPoSt done badly can lead to slashing, and // that is generally worse than not mining a block. In general the task code is heavily defensive, and diff --git a/web/api/webrpc/storage_stats.go b/web/api/webrpc/storage_stats.go index 18e20cd67..12f7aa194 100644 --- a/web/api/webrpc/storage_stats.go +++ b/web/api/webrpc/storage_stats.go @@ -2,8 +2,14 @@ package webrpc import ( "context" + "net/url" + "strings" "time" + "github.com/samber/lo" + "github.com/snadrus/must" + + "github.com/filecoin-project/curio/lib/paths" "github.com/filecoin-project/curio/lib/storiface" "github.com/filecoin-project/lotus/chain/types" @@ -72,19 +78,43 @@ type StorageGCMarks struct { Approved bool `db:"approved"` ApprovedAt *time.Time `db:"approved_at"` + CanSeal bool `db:"can_seal"` + CanStore bool `db:"can_store"` + + Urls string `db:"urls"` + // db ignored TypeName string `db:"-"` + PathType string `db:"-"` } func (a *WebRPC) StorageGCMarks(ctx context.Context) ([]StorageGCMarks, error) { var marks []StorageGCMarks - err := a.deps.DB.Select(ctx, &marks, `SELECT sp_id, sector_num, sector_filetype, storage_id, created_at, approved, approved_at FROM storage_removal_marks ORDER BY created_at DESC`) + err := a.deps.DB.Select(ctx, &marks, ` + SELECT m.sp_id, m.sector_num, m.sector_filetype, m.storage_id, m.created_at, m.approved, m.approved_at, sl.can_seal, sl.can_store, sl.urls + FROM storage_removal_marks m LEFT JOIN storage_path sl ON m.storage_id = sl.storage_id + ORDER BY created_at DESC`) if err != nil { return nil, err } for i, m := range marks { marks[i].TypeName = storiface.SectorFileType(m.FileType).String() + + var pathRole []string + if m.CanSeal { + pathRole = append(pathRole, "Scratch") + } + if m.CanStore { + pathRole = append(pathRole, "Store") + } + marks[i].PathType = strings.Join(pathRole, "/") + + us := paths.UrlsFromString(m.Urls) + us = lo.Map(us, func(u string, _ int) string { + return must.One(url.Parse(u)).Host + }) + marks[i].Urls = strings.Join(us, ", ") } return marks, nil diff --git a/web/static/gc/gc-marks.mjs b/web/static/gc/gc-marks.mjs index 98c87c20d..cef0d977d 100644 --- a/web/static/gc/gc-marks.mjs +++ b/web/static/gc/gc-marks.mjs @@ -32,6 +32,7 @@ class StorageGCStats extends LitElement { Address Sector Number Storage Path + Storage Type File Type Marked At Approved @@ -42,7 +43,15 @@ class StorageGCStats extends LitElement { f0${entry.Actor} ${entry.SectorNum} - ${entry.StorageID} + +
+ ${entry.StorageID} +
+
+ ${entry.Urls} +
+ + ${entry.PathType} ${entry.TypeName} ${entry.CreatedAt} diff --git a/web/static/gc/index.html b/web/static/gc/index.html index 1599813b0..a2d3516f8 100644 --- a/web/static/gc/index.html +++ b/web/static/gc/index.html @@ -10,6 +10,7 @@ +

Storage GC Info

@@ -34,6 +35,7 @@

Marked for GC

+
diff --git a/web/static/task/index.html b/web/static/task/index.html index 3b3be41aa..0536bbaea 100644 --- a/web/static/task/index.html +++ b/web/static/task/index.html @@ -10,6 +10,7 @@ +

Task Details

@@ -39,6 +40,7 @@

Recent Failures

+