diff --git a/.gitignore b/.gitignore index 2dde72219..3763805be 100644 --- a/.gitignore +++ b/.gitignore @@ -15,6 +15,7 @@ tofu/main/*/*config .vscode # Others +.env scripts/utils/*.txt scripts/utils/export-metrics/metrics* scripts/utils/export-metrics/chunks_head diff --git a/Dockerfile b/Dockerfile index 583584629..169bab273 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -ARG K6_VERSION="1.3.0" +ARG K6_VERSION="1.5.0" FROM golang:1.24-alpine3.22 AS builder # match whichever tagged version is used by the K6_VERSION docker image # see build layer at https://github.com/grafana/k6/blob/v${K6_VERSION}/Dockerfile diff --git a/Makefile b/Makefile index e7b21dac1..c2ca8dbbf 100644 --- a/Makefile +++ b/Makefile @@ -55,9 +55,9 @@ lint: internal/vendored/bin .PHONY: tofu-fmt-check tofu-fmt-check: internal/vendored/bin @for dir in $(TOFU_MAIN_DIRS); do \ - $(TOFU) -chdir=./tofu/main/$$dir fmt -check -recursive || exit 1; \ + $(TOFU) -chdir=./tofu/main/$$dir fmt -check -diff -recursive || exit 1; \ done - $(TOFU) -chdir=./tofu/modules fmt -check -recursive + $(TOFU) -chdir=./tofu/modules fmt -check -diff -recursive .PHONY: tofu-fmt tofu-fmt: internal/vendored/bin diff --git a/cmd/dartboard/main.go b/cmd/dartboard/main.go index 2d268958c..c8501c441 100644 --- a/cmd/dartboard/main.go +++ b/cmd/dartboard/main.go @@ -22,7 +22,7 @@ import ( "path/filepath" "github.com/rancher/dartboard/cmd/dartboard/subcommands" - "github.com/urfave/cli/v2" + cli "github.com/urfave/cli/v2" ) func main() { @@ -52,9 +52,22 @@ func main() { Action: subcommands.Deploy, Flags: []cli.Flag{ &cli.BoolFlag{ - Name: subcommands.ArgSkipApply, - Value: false, - Usage: "skip `tofu apply`, assume apply was already called", + Name: subcommands.ArgSkipApply, + Value: false, + Usage: "skip 'tofu apply', assume apply was already called", + DefaultText: "false", + }, + &cli.BoolFlag{ + Name: subcommands.ArgSkipCharts, + Value: false, + Usage: "skip 'helm install' for all charts, assume charts have already been installed for upstream and tester clusters", + DefaultText: "false", + }, + &cli.BoolFlag{ + Name: subcommands.ArgSkipRefresh, + Value: false, + Usage: "skip refresh phase for tofu resources, assume resources are refreshed and up-to-date", + DefaultText: "false", }, }, }, diff --git a/cmd/dartboard/subcommands/apply.go b/cmd/dartboard/subcommands/apply.go index d68847a3f..6c32e4aca 100644 --- a/cmd/dartboard/subcommands/apply.go +++ b/cmd/dartboard/subcommands/apply.go @@ -16,7 +16,7 @@ limitations under the License. package subcommands -import "github.com/urfave/cli/v2" +import cli "github.com/urfave/cli/v2" func Apply(cli *cli.Context) error { tf, _, err := prepare(cli) @@ -24,10 +24,13 @@ func Apply(cli *cli.Context) error { return err } - if err = tf.PrintVersion(cli.Context); err != nil { + if err = tf.PrintVersion(); err != nil { return err } - if err = tf.Apply(cli.Context); err != nil { + + skipRefresh := cli.Bool(ArgSkipRefresh) + + if err = tf.Apply(skipRefresh); err != nil { return err } diff --git a/cmd/dartboard/subcommands/deploy.go b/cmd/dartboard/subcommands/deploy.go index 89e1672b4..945d432b6 100644 --- a/cmd/dartboard/subcommands/deploy.go +++ b/cmd/dartboard/subcommands/deploy.go @@ -17,20 +17,28 @@ limitations under the License. package subcommands import ( - "crypto/tls" + "encoding/json" "fmt" "io" "log" - "net/http" "os" "path/filepath" + "regexp" + "sort" + "strconv" "strings" "github.com/rancher/dartboard/internal/dart" "github.com/rancher/dartboard/internal/helm" "github.com/rancher/dartboard/internal/kubectl" "github.com/rancher/dartboard/internal/tofu" - "github.com/urfave/cli/v2" + "github.com/rancher/shepherd/clients/rancher" + "github.com/rancher/shepherd/pkg/session" + cli "github.com/urfave/cli/v2" + + "github.com/sirupsen/logrus" + + "github.com/rancher/dartboard/internal/actions" ) type chart struct { @@ -46,76 +54,217 @@ func Deploy(cli *cli.Context) error { return err } - if !cli.Bool(ArgSkipApply) { - if err = tf.PrintVersion(cli.Context); err != nil { - return err - } - if err = tf.Apply(cli.Context); err != nil { - return err - } + if err = applyTofuChanges(cli, tf); err != nil { + return err } - clusters, err := tf.OutputClusters(cli.Context) + clusters, custom_clusters, err := tf.ParseOutputs() if err != nil { return err } // Helm charts tester := clusters["tester"] - - if err = chartInstall(tester.Kubeconfig, chart{"k6-files", "tester", "k6-files"}, nil); err != nil { - return err - } - if err = chartInstall(tester.Kubeconfig, chart{"mimir", "tester", "mimir"}, nil); err != nil { - return err - } - if err = chartInstall(tester.Kubeconfig, chart{"grafana-dashboards", "tester", "grafana-dashboards"}, nil); err != nil { - return err - } - if err = chartInstallGrafana(r, &tester); err != nil { - return err + if len(tester.Kubeconfig) > 0 && !cli.Bool(ArgSkipCharts) { + if err = installTesterCharts(tester, r); err != nil { + return err + } } upstream := clusters["upstream"] rancherVersion := r.ChartVariables.RancherVersion + rancherImageTag := "v" + rancherVersion if r.ChartVariables.RancherImageTagOverride != "" { rancherImageTag = r.ChartVariables.RancherImageTagOverride + image := "rancher/rancher" if r.ChartVariables.RancherImageOverride != "" { image = r.ChartVariables.RancherImageOverride } + err = importImageIntoK3d(tf, image+":"+rancherImageTag, upstream) if err != nil { return err } } - if err = chartInstallCertManager(r, &upstream); err != nil { + if !cli.Bool(ArgSkipCharts) { + if err = installUpstreamCharts(r, rancherImageTag, &upstream); err != nil { + return err + } + } + + // Setup rancher client + upstreamAdd, err := getAppAddressFor(upstream) + if err != nil { + return err + } + + rancherSession := session.NewSession() + rancherSession.CleanupEnabled = false + + log.Printf("Setting up Rancher Client's Config") + + rancherHost := strings.Split(upstreamAdd.Public.HTTPSURL, "://")[1] + rancherConfig := actions.NewRancherConfig(rancherHost, "", r.ChartVariables.AdminPassword, true) + + log.Printf("Setting up Rancher Client") + + rancherClient, err := actions.SetupRancherClient(&rancherConfig, r.ChartVariables.AdminPassword, rancherSession) + if err != nil { + return err + } + + if len(clusters) > 0 { + if err = importDownstreamClusters(r, clusters, rancherClient, &rancherConfig); err != nil { + return err + } + } + + logrus.Debugf("\nBEFORE CUSTOM CLUSTER LOGIC\n") + + if len(custom_clusters) > 0 { + logrus.Debugf("\nIN CUSTOM CLUSTER LOGIC\n") + + if err := actions.RegisterCustomClusters(r, custom_clusters, rancherClient, &rancherConfig); err != nil { + return err + } + } + + if len(r.ClusterTemplates) > 0 { + if err = setupHarvesterAndProvision(r, rancherClient); err != nil { + return err + } + + log.Printf("Provisioning Downstream Clusters") + + if err = actions.ProvisionDownstreamClusters(r, r.ClusterTemplates, rancherClient); err != nil { + return err + } + } + + return GetAccess(cli) +} + +// applyTofuChanges applies or outputs Terraform/Tofu changes +func applyTofuChanges(cli *cli.Context, tf *tofu.Tofu) error { + skipRefresh := cli.Bool(ArgSkipRefresh) + + if !cli.Bool(ArgSkipApply) { + if err := tf.PrintVersion(); err != nil { + return err + } + + return tf.Apply(skipRefresh) + } + + return tf.Output(nil, false) +} + +// installTesterCharts installs required charts on the tester cluster +func installTesterCharts(tester tofu.Cluster, r *dart.Dart) error { + if err := chartInstall(tester.Kubeconfig, chart{"k6-files", "tester", "k6-files"}, nil); err != nil { return err } - if err = chartInstallRancher(r, rancherImageTag, &upstream); err != nil { + + if err := chartInstall(tester.Kubeconfig, chart{"mimir", "tester", "mimir"}, nil); err != nil { return err } - if err = chartInstallRancherIngress(&upstream); err != nil { + + if err := chartInstall(tester.Kubeconfig, chart{"grafana-dashboards", "tester", "grafana-dashboards"}, nil); err != nil { return err } - if err = chartInstallCgroupsExporter(&upstream); err != nil { + + return chartInstallGrafana(r, &tester) +} + +// installUpstreamCharts installs Rancher and related charts on the upstream cluster +func installUpstreamCharts(r *dart.Dart, rancherImageTag string, upstream *tofu.Cluster) error { + if err := chartInstallCertManager(r, upstream); err != nil { return err } - // Wait for Rancher deployments to be complete, or subsequent steps may fail - if err = kubectl.WaitRancher(upstream.Kubeconfig); err != nil { + if err := chartInstallRancher(r, rancherImageTag, upstream); err != nil { return err } - if err = chartInstallRancherMonitoring(r, &upstream); err != nil { + + if err := chartInstallRancherIngress(upstream); err != nil { return err } - if err = importDownstreamClusters(r, rancherImageTag, tf, clusters); err != nil { + + if err := chartInstallCgroupsExporter(upstream); err != nil { return err } - return GetAccess(cli) + // Wait for Rancher deployments to be complete, or subsequent steps may fail + if err := kubectl.WaitRancher(upstream.Kubeconfig); err != nil { + return err + } + + return chartInstallRancherMonitoring(r, upstream) +} + +// importDownstreamClusters imports all downstream clusters into Rancher +func importDownstreamClusters(r *dart.Dart, clusters map[string]tofu.Cluster, rancherClient *rancher.Client, rancherConfig *rancher.Config) error { + downstreamClusters := []tofu.Cluster{} + + for k, v := range clusters { + if strings.HasPrefix(k, "downstream") { + v.Name = k + downstreamClusters = append(downstreamClusters, v) + } + } + + SortItemsNaturally(downstreamClusters, func(c tofu.Cluster) string { return c.Name }) + + jsonBytes, err := json.MarshalIndent(downstreamClusters, "", " ") + if err != nil { + return fmt.Errorf("error marshaling JSON: %w", err) + } + + fmt.Println("Import Clusters:\n", string(jsonBytes)) + + log.Printf("Importing Downstream Clusters") + + return actions.ImportDownstreamClusters(r, downstreamClusters, rancherClient, rancherConfig) +} + +// setupHarvesterAndProvision sets up Harvester client if needed +func setupHarvesterAndProvision(r *dart.Dart, rancherClient *rancher.Client) error { + if !strings.Contains(r.TofuMainDirectory, "harvester") { + return nil + } + + log.Printf("Parsing Harvester's Kubeconfig") + + var ( + kubeconfig *actions.Kubeconfig + err error + ) + + if len(r.TofuVariables["kubeconfig"].(string)) > 0 { + kubeconfig, err = actions.ParseKubeconfig(r.TofuVariables["kubeconfig"].(string)) + if err != nil { + return fmt.Errorf("error while parsing kubeconfig at %v: %w", r.TofuVariables["kubeconfig"].(string), err) + } + } + + log.Printf("Setting up Harvester Client's Config") + + harvesterHost := strings.Split(kubeconfig.Clusters[0].Cluster.Server, "://")[1] + harvesterConfig := actions.NewHarvesterConfig(harvesterHost, kubeconfig.Users[0].User.Token, "", true) + + log.Printf("Setting up Harvester Client") + + harvesterClient, err := actions.NewHarvesterImportClient(rancherClient, &harvesterConfig) + if err != nil { + return fmt.Errorf("error while setting up HarvesterImportClient with config %v: %w", harvesterConfig, err) + } + + log.Printf("Importing Harvester Cluster into Rancher for provisioning") + + return harvesterClient.ImportCluster() } func chartInstall(kubeConf string, chart chart, vals map[string]any, extraArgs ...string) error { @@ -135,6 +284,7 @@ func chartInstall(kubeConf string, chart chart, vals map[string]any, extraArgs . if err = helm.Install(kubeConf, path, name, namespace, vals, extraArgs...); err != nil { return fmt.Errorf("chart %s: %w", name, err) } + return nil } @@ -163,6 +313,7 @@ func chartInstallCertManager(r *dart.Dart, cluster *tofu.Cluster) error { namespace: "cert-manager", path: fmt.Sprintf("https://charts.jetstack.io/charts/cert-manager-v%s.tgz", r.ChartVariables.CertManagerVersion), } + return chartInstall(cluster.Kubeconfig, chartCertManager, map[string]any{"installCRDs": true}) } @@ -185,7 +336,6 @@ func chartInstallRancher(r *dart.Dart, rancherImageTag string, cluster *tofu.Clu if r.ChartVariables.ForcePrimeRegistry { rancherRepo = "https://charts.rancher.com/server-charts/prime/rancher-" } - } chartRancher := chart{ @@ -198,10 +348,12 @@ func chartInstallRancher(r *dart.Dart, rancherImageTag string, cluster *tofu.Clu if err != nil { return fmt.Errorf("chart %s: %w", chartRancher.name, err) } + rancherClusterName := clusterAdd.Public.Name rancherClusterURL := clusterAdd.Public.HTTPSURL var extraEnv []map[string]any + extraEnv = []map[string]any{ { "name": "CATTLE_SERVER_URL", @@ -221,6 +373,7 @@ func chartInstallRancher(r *dart.Dart, rancherImageTag string, cluster *tofu.Clu chartVals := getRancherValsJSON(r.ChartVariables.RancherImageOverride, rancherImageTag, r.ChartVariables.AdminPassword, rancherClusterName, extraEnv, r.ChartVariables.RancherReplicas) var extraArgs []string + if r.ChartVariables.RancherValues != "" { p, err := writeValuesFile(r.ChartVariables.RancherValues) if err != nil { @@ -245,9 +398,11 @@ func writeValuesFile(content string) (string, error) { if err != nil { return "", err } + if _, err := io.WriteString(p, content); err != nil { return "", err } + return p.Name(), nil } @@ -264,11 +419,13 @@ func chartInstallRancherIngress(cluster *tofu.Cluster) error { } var sans []string - if len(clusterAdd.Local.Name) > 0 { + if len(clusterAdd.Local.Name) > 0 && clusterAdd.Local.Name != clusterAdd.Public.Name { sans = append(sans, clusterAdd.Local.Name) } - if len(clusterAdd.Public.Name) > 0 { - sans = append(sans, clusterAdd.Public.Name) + + if len(sans) == 0 { + log.Printf("Skipping chart %q as no additional SANs are defined", chartRancherIngress.name) + return nil } chartVals := map[string]any{ @@ -281,7 +438,9 @@ func chartInstallRancherIngress(cluster *tofu.Cluster) error { func chartInstallRancherMonitoring(r *dart.Dart, cluster *tofu.Cluster) error { rancherMinorVersion := strings.Join(strings.Split(r.ChartVariables.RancherVersion, ".")[0:2], ".") + const chartPrefix = "https://github.com/rancher/charts/raw/release-v" + chartPath := fmt.Sprintf("%s%s", chartPrefix, rancherMinorVersion) if len(r.ChartVariables.RancherAppsRepoOverride) > 0 { @@ -305,6 +464,7 @@ func chartInstallRancherMonitoring(r *dart.Dart, cluster *tofu.Cluster) error { }, "systemDefaultRegistry": "", } + err := chartInstall(cluster.Kubeconfig, chartRancherMonitoringCRD, chartVals) if err != nil { return err @@ -321,6 +481,7 @@ func chartInstallRancherMonitoring(r *dart.Dart, cluster *tofu.Cluster) error { if err != nil { return fmt.Errorf("chart %s: %w", chartRancherMonitoring.name, err) } + mimirURL := clusterAdd.Public.HTTPURL + "/mimir/api/v1/push" chartVals = getRancherMonitoringValsJSON(cluster.ReserveNodeForMonitoring, mimirURL) @@ -333,12 +494,13 @@ func chartInstallCgroupsExporter(cluster *tofu.Cluster) error { } func getRancherMonitoringValsJSON(reserveNodeForMonitoring bool, mimirURL string) map[string]any { - nodeSelector := map[string]any{} tolerations := []any{} monitoringRestrictions := map[string]any{} + if reserveNodeForMonitoring { nodeSelector["monitoring"] = "true" + tolerations = append(tolerations, map[string]any{"key": "monitoring", "operator": "Exists", "effect": "NoSchedule"}) monitoringRestrictions["nodeSelector"] = nodeSelector monitoringRestrictions["tolerations"] = tolerations @@ -489,173 +651,38 @@ func getRancherValsJSON(rancherImageOverride, rancherImageTag, bootPwd, hostname return result } -func importDownstreamClusters(r *dart.Dart, rancherImageTag string, tf *tofu.Tofu, clusters map[string]tofu.Cluster) error { - - log.Print("Import downstream clusters") - - if err := importDownstreamClustersRancherSetup(r, clusters); err != nil { - return err - } - - buffer := 10 - clustersChan := make(chan string, buffer) - errorChan := make(chan error) - clustersCount := 0 - - for clusterName := range clusters { - if !strings.HasPrefix(clusterName, "downstream") { - continue - } - clustersCount++ - go importDownstreamClusterDo(r, rancherImageTag, tf, clusters, clusterName, clustersChan, errorChan) - } - - for { - if clustersCount == 0 { - return nil +// naturalCompare compares strings a and b in "natural" alphanumeric order +func naturalCompare(a, b string) bool { + tokenRegex := regexp.MustCompile(`\d+|\D+`) + // split into tokens of numbers + aTokens := tokenRegex.FindAllString(a, -1) + + bTokens := tokenRegex.FindAllString(b, -1) + for i := 0; i < len(aTokens) && i < len(bTokens); i++ { + aTok, bTok := aTokens[i], bTokens[i] + // If both tokens are numeric, compare as integers + if aNum, errA := strconv.Atoi(aTok); errA == nil { + if bNum, errB := strconv.Atoi(bTok); errB == nil { + if aNum != bNum { + return aNum < bNum + } + + continue // numbers are equal, move to next token + } } - select { - case err := <-errorChan: - return err - case completed := <-clustersChan: - log.Printf("Cluster %q imported successfully.\n", completed) - clustersCount-- + // Fallback to default lexicographic compare + if aTok != bTok { + return aTok < bTok } } + // If all shared tokens are equal, the shorter string is less + return len(aTokens) < len(bTokens) } -func importDownstreamClusterDo(r *dart.Dart, rancherImageTag string, tf *tofu.Tofu, clusters map[string]tofu.Cluster, clusterName string, ch chan<- string, errCh chan<- error) { - log.Print("Import cluster " + clusterName) - yamlFile, err := os.CreateTemp("", "scli-"+clusterName+"-*.yaml") - if err != nil { - errCh <- fmt.Errorf("%s import failed: %w", clusterName, err) - return - } - defer os.Remove(yamlFile.Name()) - defer yamlFile.Close() - - clusterID, err := importClustersDownstreamGetYAML(clusters, clusterName, yamlFile) - if err != nil { - errCh <- fmt.Errorf("%s import failed: %w", clusterName, err) - return - } - - downstream, ok := clusters[clusterName] - if !ok { - err := fmt.Errorf("error: cannot find access data for cluster %q", clusterName) - errCh <- fmt.Errorf("%s import failed: %w", clusterName, err) - return - } - if r.ChartVariables.RancherImageTagOverride != "" { - err = importImageIntoK3d(tf, "rancher/rancher-agent:"+rancherImageTag, downstream) - if err != nil { - errCh <- fmt.Errorf("%s downstream k3d image import failed: %w", clusterName, err) - return - } - } - - if err := kubectl.Apply(downstream.Kubeconfig, yamlFile.Name()); err != nil { - errCh <- fmt.Errorf("%s import failed: %w", clusterName, err) - return - } - - if err := kubectl.WaitForReadyCondition(clusters["upstream"].Kubeconfig, - "clusters.management.cattle.io", clusterID, "", "ready", 10); err != nil { - errCh <- fmt.Errorf("%s import failed: %w", clusterName, err) - return - } - if err := kubectl.WaitForReadyCondition(clusters["upstream"].Kubeconfig, - "cluster.fleet.cattle.io", clusterName, "fleet-default", "ready", 10); err != nil { - errCh <- fmt.Errorf("%s import failed: %w", clusterName, err) - return - } - - err = kubectl.WaitForReadyCondition(downstream.Kubeconfig, "deployment", "rancher-webhook", "cattle-system", "available", 15) - if err != nil { - errCh <- fmt.Errorf("%s waiting for rancher-webhook failed: %w", clusterName, err) - return - } - if r.ChartVariables.DownstreamRancherMonitoring { - if err := chartInstallRancherMonitoring(r, &downstream); err != nil { - errCh <- fmt.Errorf("downstream monitoring installation on cluster %s failed: %w", clusterName, err) - return - } - } - ch <- clusterName -} - -func importDownstreamClustersRancherSetup(r *dart.Dart, clusters map[string]tofu.Cluster) error { - tester := clusters["tester"] - upstream := clusters["upstream"] - upstreamAdd, err := getAppAddressFor(upstream) - if err != nil { - return err - } - - downstreamClusters := []string{} - for clusterName := range clusters { - if strings.HasPrefix(clusterName, "downstream") { - downstreamClusters = append(downstreamClusters, clusterName) - } - } - importedClusterNames := strings.Join(downstreamClusters, ",") - - envVars := map[string]string{ - "BASE_URL": upstreamAdd.Public.HTTPSURL, - "BOOTSTRAP_PASSWORD": "admin", - "PASSWORD": r.ChartVariables.AdminPassword, - "IMPORTED_CLUSTER_NAMES": importedClusterNames, - } - - if err = kubectl.K6run(tester.Kubeconfig, "rancher/rancher_setup.js", envVars, nil, true, upstreamAdd.Local.HTTPSURL, false); err != nil { - return err - } - return nil -} - -func importClustersDownstreamGetYAML(clusters map[string]tofu.Cluster, name string, yamlFile *os.File) (clusterID string, err error) { - var status map[string]interface{} - - upstream := clusters["upstream"] - upstreamAdd, err := getAppAddressFor(upstream) - if err != nil { - return - } - - if status, err = kubectl.GetStatus(upstream.Kubeconfig, "clusters.provisioning.cattle.io", name, "fleet-default"); err != nil { - return - } - clusterID, ok := status["clusterName"].(string) - if !ok { - err = fmt.Errorf("error accessing fleet-default/%s clusters: no valid 'clusterName' in 'Status'", name) - return - } - - if status, err = kubectl.GetStatus(upstream.Kubeconfig, "clusterregistrationtokens.management.cattle.io", "default-token", clusterID); err != nil { - return - } - token, ok := status["token"].(string) - if !ok { - err = fmt.Errorf("error accessing %s/default-token clusterregistrationtokens: no valid 'token' in 'Status'", clusterID) - return - } - - url := fmt.Sprintf("%s/v3/import/%s_%s.yaml", upstreamAdd.Local.HTTPSURL, token, clusterID) - tr := &http.Transport{TLSClientConfig: &tls.Config{InsecureSkipVerify: true}} - client := &http.Client{Transport: tr} - resp, err := client.Get(url) - if err != nil { - return - } - defer resp.Body.Close() - - _, err = io.Copy(yamlFile, resp.Body) - if err != nil { - return - } - if err = yamlFile.Sync(); err != nil { - return - } - - return +// SortItemsNaturally ia a generic function that sorts a slice of a given type +// by "Name" (any provided string) using natural order +func SortItemsNaturally[T any](items []T, getName func(T) string) { + sort.Slice(items, func(i, j int) bool { + return naturalCompare(getName(items[i]), getName(items[j])) + }) } diff --git a/cmd/dartboard/subcommands/destroy.go b/cmd/dartboard/subcommands/destroy.go index 4b87bb231..af890bd62 100644 --- a/cmd/dartboard/subcommands/destroy.go +++ b/cmd/dartboard/subcommands/destroy.go @@ -16,13 +16,25 @@ limitations under the License. package subcommands -import "github.com/urfave/cli/v2" +import ( + "fmt" + + "github.com/rancher/dartboard/internal/actions" + cli "github.com/urfave/cli/v2" +) func Destroy(cli *cli.Context) error { - tf, _, err := prepare(cli) + tf, r, err := prepare(cli) + if err != nil { + return err + } + + clusterStatePath := fmt.Sprintf("%s/%s", r.TofuWorkspaceStatePath, actions.ClustersStateFile) + // TODO: Implement a flag to -only- destroy ClusterStatus state + Clusters registered in Rancher + err = actions.DestroyClusterState(clusterStatePath) if err != nil { return err } - return tf.Destroy(cli.Context) + return tf.Destroy() } diff --git a/cmd/dartboard/subcommands/getaccess.go b/cmd/dartboard/subcommands/getaccess.go index 602b4b51a..c9f32710b 100644 --- a/cmd/dartboard/subcommands/getaccess.go +++ b/cmd/dartboard/subcommands/getaccess.go @@ -21,7 +21,7 @@ import ( "strings" "github.com/rancher/dartboard/internal/tofu" - "github.com/urfave/cli/v2" + cli "github.com/urfave/cli/v2" ) func GetAccess(cli *cli.Context) error { @@ -30,7 +30,7 @@ func GetAccess(cli *cli.Context) error { return err } - clusters, err := tf.OutputClusters(cli.Context) + clusters, _, err := tf.ParseOutputs() if err != nil { return err } @@ -39,6 +39,7 @@ func GetAccess(cli *cli.Context) error { tester := clusters["tester"] downstreams := make(map[string]tofu.Cluster) + for k, v := range clusters { if strings.HasPrefix(k, "downstream") { downstreams[k] = v @@ -46,6 +47,7 @@ func GetAccess(cli *cli.Context) error { } upstreamAddresses, err := getAppAddressFor(upstream) + rancherURL := "" if err == nil { rancherURL = upstreamAddresses.Local.HTTPSURL @@ -57,9 +59,11 @@ func GetAccess(cli *cli.Context) error { fmt.Println() printAccessDetails(r, "UPSTREAM", upstream, rancherURL) + for name, downstream := range downstreams { printAccessDetails(r, strings.ToUpper(name), downstream, "") } + printAccessDetails(r, "TESTER", tester, "") return nil diff --git a/cmd/dartboard/subcommands/load.go b/cmd/dartboard/subcommands/load.go index 2edea9e49..d0cee5eeb 100644 --- a/cmd/dartboard/subcommands/load.go +++ b/cmd/dartboard/subcommands/load.go @@ -25,7 +25,7 @@ import ( "github.com/rancher/dartboard/internal/dart" "github.com/rancher/dartboard/internal/kubectl" "github.com/rancher/dartboard/internal/tofu" - "github.com/urfave/cli/v2" + cli "github.com/urfave/cli/v2" ) // TODO: Make this command idempotent. Get count (# of resources) matching some unique identifier. @@ -37,7 +37,7 @@ func Load(cli *cli.Context) error { return err } - clusters, err := tf.OutputClusters(cli.Context) + clusters, _, err := tf.ParseOutputs() if err != nil { return err } @@ -54,6 +54,7 @@ func Load(cli *cli.Context) error { if clusterName != "upstream" && !strings.HasPrefix(clusterName, "downstream") { continue } + if err := loadConfigMapAndSecrets(r, tester.Kubeconfig, clusterName, clusterData); err != nil { return err } @@ -67,6 +68,7 @@ func Load(cli *cli.Context) error { if err := loadProjects(r, tester.Kubeconfig, "upstream", clusters["upstream"]); err != nil { return err } + return nil } @@ -89,19 +91,23 @@ func loadConfigMapAndSecrets(r *dart.Dart, kubeconfig string, clusterName string } log.Printf("Load resources on cluster %q (#ConfigMaps: %s, #Secrets: %s)\n", clusterName, configMapCount, secretCount) + if err := kubectl.K6run(kubeconfig, "generic/create_k8s_resources.js", envVars, tags, true, clusterData.KubernetesAddresses.Tunnel, false); err != nil { return fmt.Errorf("failed loading ConfigMaps and Secrets on cluster %q: %w", clusterName, err) } + return nil } func loadRolesAndUsers(r *dart.Dart, kubeconfig string, clusterName string, clusterData tofu.Cluster) error { roleCount := strconv.Itoa(r.TestVariables.TestRoles) userCount := strconv.Itoa(r.TestVariables.TestUsers) + clusterAdd, err := getAppAddressFor(clusterData) if err != nil { return fmt.Errorf("failed loading Roles and Users on cluster %q: %w", clusterName, err) } + envVars := map[string]string{ "BASE_URL": clusterAdd.Public.HTTPSURL, "USERNAME": "admin", @@ -122,15 +128,18 @@ func loadRolesAndUsers(r *dart.Dart, kubeconfig string, clusterName string, clus if err := kubectl.K6run(kubeconfig, "generic/create_roles_users.js", envVars, tags, true, clusterAdd.Local.HTTPSURL, false); err != nil { return fmt.Errorf("failed loading Roles and Users on cluster %q: %w", clusterName, err) } + return nil } func loadProjects(r *dart.Dart, kubeconfig string, clusterName string, clusterData tofu.Cluster) error { projectCount := strconv.Itoa(r.TestVariables.TestProjects) + clusterAdd, err := getAppAddressFor(clusterData) if err != nil { return fmt.Errorf("failed loading Projects on cluster %q: %w", clusterName, err) } + envVars := map[string]string{ "BASE_URL": clusterAdd.Public.HTTPSURL, "USERNAME": "admin", @@ -148,5 +157,6 @@ func loadProjects(r *dart.Dart, kubeconfig string, clusterName string, clusterDa if err := kubectl.K6run(kubeconfig, "generic/create_projects.js", envVars, tags, true, clusterAdd.Local.HTTPSURL, false); err != nil { return fmt.Errorf("failed loading Projects on cluster %q: %w", clusterName, err) } + return nil } diff --git a/cmd/dartboard/subcommands/reapply.go b/cmd/dartboard/subcommands/reapply.go index 1748d7622..64f39c5ba 100644 --- a/cmd/dartboard/subcommands/reapply.go +++ b/cmd/dartboard/subcommands/reapply.go @@ -16,7 +16,7 @@ limitations under the License. package subcommands -import "github.com/urfave/cli/v2" +import cli "github.com/urfave/cli/v2" func Reapply(c *cli.Context) error { err := Destroy(c) diff --git a/cmd/dartboard/subcommands/redeploy.go b/cmd/dartboard/subcommands/redeploy.go index 70dda3c2e..7a09abf2c 100644 --- a/cmd/dartboard/subcommands/redeploy.go +++ b/cmd/dartboard/subcommands/redeploy.go @@ -16,7 +16,7 @@ limitations under the License. package subcommands -import "github.com/urfave/cli/v2" +import cli "github.com/urfave/cli/v2" func Redeploy(c *cli.Context) error { err := Destroy(c) diff --git a/cmd/dartboard/subcommands/utils.go b/cmd/dartboard/subcommands/utils.go index 65e5b11cf..776ad99cc 100644 --- a/cmd/dartboard/subcommands/utils.go +++ b/cmd/dartboard/subcommands/utils.go @@ -18,11 +18,12 @@ package subcommands import ( "fmt" + "path/filepath" "github.com/rancher/dartboard/internal/docker" "github.com/rancher/dartboard/internal/k3d" "github.com/rancher/dartboard/internal/vendored" - "github.com/urfave/cli/v2" + cli "github.com/urfave/cli/v2" "github.com/rancher/dartboard/internal/dart" "github.com/rancher/dartboard/internal/kubectl" @@ -30,8 +31,10 @@ import ( ) const ( - ArgDart = "dart" - ArgSkipApply = "skip-apply" + ArgDart = "dart" + ArgSkipApply = "skip-apply" + ArgSkipCharts = "skip-charts" + ArgSkipRefresh = "skip-refresh" ) type clusterAddress struct { @@ -48,10 +51,21 @@ type clusterAddresses struct { // prepare prepares tofu for execution and parses a dart file from the command line context func prepare(cli *cli.Context) (*tofu.Tofu, *dart.Dart, error) { dartPath := cli.String(ArgDart) + d, err := dart.Parse(dartPath) if err != nil { return nil, nil, err } + + tofuWorkspaceStatePath := fmt.Sprintf("%s/%s_config", d.TofuMainDirectory, d.TofuWorkspace) + + absPath, err := filepath.Abs(tofuWorkspaceStatePath) + if err != nil { + return nil, nil, err + } + + d.TofuWorkspaceStatePath = absPath + fmt.Printf("Using dart: %s\n", dartPath) fmt.Printf("OpenTofu main directory: %s\n", d.TofuMainDirectory) fmt.Printf("Using Tofu workspace: %s\n", d.TofuWorkspace) @@ -61,25 +75,30 @@ func prepare(cli *cli.Context) (*tofu.Tofu, *dart.Dart, error) { return nil, nil, err } - tf, err := tofu.New(cli.Context, d.TofuVariables, d.TofuMainDirectory, d.TofuWorkspace, d.TofuParallelism, true) + tf, err := tofu.New(d.TofuVariables, d.TofuMainDirectory, d.TofuWorkspace, d.TofuParallelism, true) if err != nil { return nil, nil, err } + return tf, d, nil } // printAccessDetails prints to console addresses and kubeconfig file paths of a cluster for user convenience func printAccessDetails(r *dart.Dart, name string, cluster tofu.Cluster, rancherURL string) { fmt.Printf("*** %s CLUSTER\n", name) + if rancherURL != "" { fmt.Printf(" Rancher UI: %s (admin/%s)\n", rancherURL, r.ChartVariables.AdminPassword) } + fmt.Println(" Kubernetes API:") fmt.Printf("export KUBECONFIG=%q\n", cluster.Kubeconfig) fmt.Printf("kubectl config use-context %q\n", cluster.Context) + for node, command := range cluster.NodeAccessCommands { fmt.Printf(" Node %s: %q\n", node, command) } + fmt.Println() } @@ -101,6 +120,7 @@ func getAppAddressFor(cluster tofu.Cluster) (clusterAddresses, error) { localNetworkName = loadBalancerName } } + localNetworkHTTPPort := add.Tunnel.HTTPPort if localNetworkHTTPPort == 0 { localNetworkHTTPPort = add.Public.HTTPPort @@ -108,6 +128,7 @@ func getAppAddressFor(cluster tofu.Cluster) (clusterAddresses, error) { localNetworkHTTPPort = 80 } } + localNetworkHTTPSPort := add.Tunnel.HTTPSPort if localNetworkHTTPSPort == 0 { localNetworkHTTPSPort = add.Public.HTTPSPort @@ -125,6 +146,7 @@ func getAppAddressFor(cluster tofu.Cluster) (clusterAddresses, error) { clusterNetworkName = loadBalancerName } } + clusterNetworkHTTPPort := add.Public.HTTPPort if clusterNetworkHTTPPort == 0 { clusterNetworkHTTPPort = add.Private.HTTPPort @@ -132,6 +154,7 @@ func getAppAddressFor(cluster tofu.Cluster) (clusterAddresses, error) { clusterNetworkHTTPPort = 80 } } + clusterNetworkHTTPSPort := add.Public.HTTPSPort if clusterNetworkHTTPSPort == 0 { clusterNetworkHTTPSPort = add.Private.HTTPSPort @@ -167,5 +190,6 @@ func importImageIntoK3d(tf *tofu.Tofu, image string, cluster tofu.Cluster) error } } } + return nil } diff --git a/download-vendored-bin.sh b/download-vendored-bin.sh index 3b911c487..25f57059b 100755 --- a/download-vendored-bin.sh +++ b/download-vendored-bin.sh @@ -3,8 +3,8 @@ set -e OPENTOFU_VERSION=1.10.3 -KUBECTL_VERSION=1.33.4 -HELM_VERSION=3.18.5 +KUBECTL_VERSION=1.34.3 +HELM_VERSION=4.0.4 K3D_VERSION=5.7.4 GOOS=`go env GOOS` diff --git a/go.mod b/go.mod index 3dd9ab5fd..3d870ee33 100644 --- a/go.mod +++ b/go.mod @@ -1,23 +1,256 @@ module github.com/rancher/dartboard -go 1.24.0 +go 1.25.0 -toolchain go1.24.2 +replace ( + github.com/containerd/containerd => github.com/containerd/containerd v1.6.27 // for compatibilty with docker 20.10.x + github.com/crewjam/saml => github.com/rancher/saml v0.4.14-rancher3 + github.com/docker/distribution => github.com/docker/distribution v2.8.2+incompatible // rancher-machine requires a replace is set + github.com/docker/docker => github.com/docker/docker v20.10.27+incompatible // rancher-machine requires a replace is set + + github.com/openshift/api => github.com/openshift/api v0.0.0-20191219222812-2987a591a72c + github.com/openshift/client-go => github.com/openshift/client-go v0.0.0-20200521150516-05eb9880269c + github.com/rancher/rancher/pkg/apis => github.com/rancher/rancher/pkg/apis v0.0.0-20251216090958-18e340c45365 + github.com/rancher/rancher/pkg/client => github.com/rancher/rancher/pkg/client v0.0.0-20250212213103-5c3550f55322 + github.com/rancher/tests/actions => github.com/git-ival/rancher-tests/actions v0.0.0-20260124000123-5a84324608ac + + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc => go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp => go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 + go.opentelemetry.io/otel => go.opentelemetry.io/otel v1.28.0 + go.opentelemetry.io/otel/metric => go.opentelemetry.io/otel/metric v1.28.0 + go.opentelemetry.io/otel/sdk => go.opentelemetry.io/otel/sdk v1.28.0 + go.opentelemetry.io/otel/trace => go.opentelemetry.io/otel/trace v1.28.0 + go.opentelemetry.io/proto/otlp => go.opentelemetry.io/proto/otlp v1.3.1 + + helm.sh/helm/v3 => github.com/rancher/helm/v3 v3.16.1-rancher1 + k8s.io/api => k8s.io/api v0.34.1 + k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.34.1 + k8s.io/apimachinery => k8s.io/apimachinery v0.34.1 + k8s.io/apiserver => k8s.io/apiserver v0.34.1 + k8s.io/cli-runtime => k8s.io/cli-runtime v0.34.1 + k8s.io/client-go => k8s.io/client-go v0.34.1 + k8s.io/cloud-provider => k8s.io/cloud-provider v0.34.1 + k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.34.1 + k8s.io/code-generator => k8s.io/code-generator v0.34.1 + k8s.io/component-base => k8s.io/component-base v0.34.1 + k8s.io/component-helpers => k8s.io/component-helpers v0.34.1 + k8s.io/controller-manager => k8s.io/controller-manager v0.34.1 + k8s.io/cri-api => k8s.io/cri-api v0.34.1 + k8s.io/cri-client => k8s.io/cri-client v0.34.1 + k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.34.1 + k8s.io/dynamic-resource-allocation => k8s.io/dynamic-resource-allocation v0.34.1 + k8s.io/endpointslice => k8s.io/endpointslice v0.34.1 + k8s.io/kms => k8s.io/kms v0.34.1 + k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.34.1 + k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.34.1 + k8s.io/kube-openapi => k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b + k8s.io/kube-proxy => k8s.io/kube-proxy v0.34.1 + k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.34.1 + k8s.io/kubectl => k8s.io/kubectl v0.34.1 + k8s.io/kubelet => k8s.io/kubelet v0.34.1 + k8s.io/kubernetes => k8s.io/kubernetes v1.32.2 + k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.34.1 + k8s.io/metrics => k8s.io/metrics v0.34.1 + k8s.io/mount-utils => k8s.io/mount-utils v0.34.1 + k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.34.1 + k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.34.1 + oras.land/oras-go => oras.land/oras-go v1.2.2 // for docker 20.10.x compatibility + sigs.k8s.io/cluster-api => sigs.k8s.io/cluster-api v1.10.6 +) require ( - al.essio.dev/pkg/shellescape v1.5.0 - github.com/qase-tms/qase-go/pkg/qase-go v1.0.4 - github.com/qase-tms/qase-go/qase-api-client v1.2.0 - github.com/qase-tms/qase-go/qase-api-v2-client v1.1.3 + al.essio.dev/pkg/shellescape v1.6.0 + github.com/harvester/harvester v1.7.0 + github.com/matoous/go-nanoid/v2 v2.1.0 + github.com/minio/pkg v1.7.5 + github.com/qase-tms/qase-go/pkg/qase-go v1.0.7 + github.com/qase-tms/qase-go/qase-api-client v1.2.1 + github.com/qase-tms/qase-go/qase-api-v2-client v1.1.4 + github.com/rancher/rancher/pkg/apis v0.0.0 + github.com/rancher/shepherd v0.0.0-20260122211220-e4fc12acd2be + github.com/rancher/tests/actions v0.0.0-20260123231955-aec9bc41f9c9 + github.com/rancher/tests/interoperability v0.0.0-20260123231955-aec9bc41f9c9 github.com/sirupsen/logrus v1.9.3 - github.com/urfave/cli/v2 v2.27.1 + github.com/urfave/cli/v2 v2.27.6 + gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 + k8s.io/api v0.34.3 + k8s.io/apimachinery v0.34.3 + k8s.io/client-go v12.0.0+incompatible + kubevirt.io/api v1.6.0 ) require ( - github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect + dario.cat/mergo v1.0.1 // indirect + emperror.dev/errors v0.8.1 // indirect + github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect + github.com/MakeNowJust/heredoc v1.0.0 // indirect + github.com/Masterminds/goutils v1.1.1 // indirect + github.com/Masterminds/semver/v3 v3.4.0 // indirect + github.com/Masterminds/sprig/v3 v3.3.0 // indirect + github.com/Microsoft/go-winio v0.6.2 // indirect + github.com/Microsoft/hcsshim v0.12.0-rc.3 // indirect + github.com/apparentlymart/go-cidr v1.1.0 // indirect + github.com/aws/aws-sdk-go v1.55.8 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/blang/semver v3.5.1+incompatible // indirect + github.com/blang/semver/v4 v4.0.0 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/chai2010/gettext-go v1.0.2 // indirect + github.com/cisco-open/operator-tools v0.37.0 // indirect + github.com/containerd/cgroups/v3 v3.0.2 // indirect + github.com/containerd/containerd v1.7.28 // indirect + github.com/containerd/errdefs v1.0.0 // indirect + github.com/coreos/go-semver v0.3.1 // indirect + github.com/coreos/go-systemd/v22 v22.5.0 // indirect + github.com/cpuguy83/go-md2man/v2 v2.0.7 // indirect + github.com/creasty/defaults v1.5.2 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/docker/distribution v2.8.3+incompatible // indirect + github.com/docker/docker v27.1.1+incompatible // indirect + github.com/docker/go-connections v0.5.0 // indirect + github.com/docker/go-units v0.5.0 // indirect + github.com/emicklei/go-restful/v3 v3.12.2 // indirect + github.com/evanphx/json-patch v5.9.11+incompatible // indirect + github.com/evanphx/json-patch/v5 v5.9.11 // indirect + github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect + github.com/fatih/camelcase v1.0.0 // indirect + github.com/fsnotify/fsnotify v1.9.0 // indirect + github.com/fxamacker/cbor/v2 v2.9.0 // indirect + github.com/ghodss/yaml v1.0.0 // indirect + github.com/go-errors/errors v1.4.2 // indirect + github.com/go-ini/ini v1.67.0 // indirect + github.com/go-logr/logr v1.4.3 // indirect + github.com/go-openapi/jsonpointer v0.21.1 // indirect + github.com/go-openapi/jsonreference v0.21.0 // indirect + github.com/go-openapi/swag v0.23.1 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect + github.com/golang/protobuf v1.5.4 // indirect + github.com/google/btree v1.1.3 // indirect + github.com/google/gnostic-models v0.7.0 // indirect + github.com/google/go-cmp v0.7.0 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect + github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 // indirect + github.com/harvester/harvester-network-controller v1.6.0-rc3 // indirect + github.com/huandu/xstrings v1.5.0 // indirect + github.com/iancoleman/orderedmap v0.3.0 // indirect + github.com/imdario/mergo v0.3.16 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/jmespath/go-jmespath v0.4.0 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/k8snetworkplumbingwg/network-attachment-definition-client v1.7.6 // indirect + github.com/k8snetworkplumbingwg/whereabouts v0.9.2 // indirect + github.com/kr/fs v0.1.0 // indirect + github.com/kube-logging/logging-operator v0.0.0-20250424202944-7e1f9aad6e21 // indirect + github.com/kube-logging/logging-operator/pkg/sdk v0.12.0 // indirect + github.com/kubereboot/kured v1.13.1 // indirect + github.com/kubernetes-csi/external-snapshotter/client/v4 v4.2.0 // indirect + github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect + github.com/longhorn/longhorn-manager v1.8.1 // indirect + github.com/mailru/easyjson v0.9.0 // indirect + github.com/mcuadros/go-version v0.0.0-20190830083331-035f6764e8d2 // indirect + github.com/mitchellh/copystructure v1.2.0 // indirect + github.com/mitchellh/go-wordwrap v1.0.1 // indirect + github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/moby/spdystream v0.5.0 // indirect + github.com/moby/sys/mount v0.3.3 // indirect + github.com/moby/sys/mountinfo v0.7.2 // indirect + github.com/moby/sys/user v0.3.0 // indirect + github.com/moby/term v0.5.2 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect + github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect + github.com/opencontainers/go-digest v1.0.0 // indirect + github.com/opencontainers/image-spec v1.1.1 // indirect + github.com/opencontainers/runc v1.2.1 // indirect + github.com/openshift/custom-resource-status v1.1.2 // indirect + github.com/peterbourgon/diskv v2.0.1+incompatible // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pkg/sftp v1.13.5 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.82.0 // indirect + github.com/prometheus/client_golang v1.23.2 // indirect + github.com/prometheus/client_model v0.6.2 // indirect + github.com/prometheus/common v0.66.1 // indirect + github.com/prometheus/procfs v0.17.0 // indirect + github.com/rancher/aks-operator v1.13.0-rc.4 // indirect + github.com/rancher/ali-operator v1.13.0-rc.2 // indirect + github.com/rancher/apiserver v0.8.0 // indirect + github.com/rancher/eks-operator v1.13.0-rc.4 // indirect + github.com/rancher/fleet/pkg/apis v0.15.0-alpha.4 // indirect + github.com/rancher/gke-operator v1.13.0-rc.3 // indirect + github.com/rancher/lasso v0.2.5 // indirect + github.com/rancher/norman v0.8.1 // indirect + github.com/rancher/rancher v0.0.0-20251223145833-24cecce3325e // indirect + github.com/rancher/rke v1.8.5 // indirect + github.com/rancher/system-upgrade-controller/pkg/apis v0.0.0-20250930163923-f2c9e60b1078 // indirect + github.com/rancher/tfp-automation v0.0.0-20251219210947-f4a1a9882c29 // indirect + github.com/rancher/wrangler v1.1.2 // indirect + github.com/rancher/wrangler/v3 v3.3.1 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect - github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect - golang.org/x/sys v0.34.0 // indirect + github.com/shopspring/decimal v1.4.0 // indirect + github.com/spf13/cast v1.8.0 // indirect + github.com/spf13/cobra v1.10.1 // indirect + github.com/spf13/pflag v1.0.9 // indirect + github.com/stretchr/testify v1.11.1 // indirect + github.com/x448/float16 v0.8.4 // indirect + github.com/xlab/treeprint v1.2.0 // indirect + github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 // indirect + go.etcd.io/etcd/api/v3 v3.6.5 // indirect + go.etcd.io/etcd/client/pkg/v3 v3.6.5 // indirect + go.etcd.io/etcd/client/v2 v2.305.21 // indirect + go.etcd.io/etcd/client/v3 v3.6.5 // indirect + go.opencensus.io v0.24.0 // indirect + go.opentelemetry.io/otel v1.38.0 // indirect + go.opentelemetry.io/otel/trace v1.38.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + go.uber.org/zap v1.27.0 // indirect + go.yaml.in/yaml/v2 v2.4.2 // indirect + go.yaml.in/yaml/v3 v3.0.4 // indirect + golang.org/x/crypto v0.45.0 // indirect + golang.org/x/mod v0.30.0 // indirect + golang.org/x/net v0.47.0 // indirect + golang.org/x/oauth2 v0.32.0 // indirect + golang.org/x/sync v0.18.0 // indirect + golang.org/x/sys v0.38.0 // indirect + golang.org/x/term v0.37.0 // indirect + golang.org/x/text v0.31.0 // indirect + golang.org/x/time v0.13.0 // indirect + gomodules.xyz/jsonpatch/v2 v2.5.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20251002232023-7c0ddcbb5797 // indirect + google.golang.org/grpc v1.75.1 // indirect + google.golang.org/protobuf v1.36.10 // indirect + gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/validator.v2 v2.0.1 // indirect + k8s.io/apiextensions-apiserver v0.34.1 // indirect + k8s.io/apiserver v0.34.1 // indirect + k8s.io/cli-runtime v0.34.1 // indirect + k8s.io/component-base v0.34.1 // indirect + k8s.io/component-helpers v0.34.1 // indirect + k8s.io/klog/v2 v2.130.1 // indirect + k8s.io/kube-aggregator v0.34.1 // indirect + k8s.io/kube-openapi v0.31.9 // indirect + k8s.io/kubectl v0.34.1 // indirect + k8s.io/kubernetes v1.34.2 // indirect + k8s.io/pod-security-admission v0.34.1 // indirect + k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 // indirect + kubevirt.io/containerized-data-importer-api v1.62.0 // indirect + kubevirt.io/controller-lifecycle-operator-sdk/api v0.0.0-20220329064328-f3cc58c6ed90 // indirect + sigs.k8s.io/cli-utils v0.37.2 // indirect + sigs.k8s.io/cluster-api v1.10.6 // indirect + sigs.k8s.io/controller-runtime v0.22.3 // indirect + sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 // indirect + sigs.k8s.io/kustomize/api v0.20.1 // indirect + sigs.k8s.io/kustomize/kyaml v0.20.1 // indirect + sigs.k8s.io/randfill v1.0.0 // indirect + sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect + sigs.k8s.io/yaml v1.6.0 // indirect ) diff --git a/go.sum b/go.sum index 7112cce0a..5bd452d4b 100644 --- a/go.sum +++ b/go.sum @@ -1,44 +1,931 @@ -al.essio.dev/pkg/shellescape v1.5.0 h1:7oTvSsQ5kg9WksA9O58y9wjYnY4jP0CL82/Q8WLUGKk= -al.essio.dev/pkg/shellescape v1.5.0/go.mod h1:6sIqp7X2P6mThCQ7twERpZTuigpr6KbZWtls1U8I890= -github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w= -github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +al.essio.dev/pkg/shellescape v1.6.0 h1:NxFcEqzFSEVCGN2yq7Huv/9hyCEGVa/TncnOOBBeXHA= +al.essio.dev/pkg/shellescape v1.6.0/go.mod h1:6sIqp7X2P6mThCQ7twERpZTuigpr6KbZWtls1U8I890= +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s= +dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= +emperror.dev/errors v0.8.1 h1:UavXZ5cSX/4u9iyvH6aDcuGkVjeexUGJ7Ij7G4VfQT0= +emperror.dev/errors v0.8.1/go.mod h1:YcRvLPh626Ubn2xqtoprejnA5nFha+TJ+2vew48kWuE= +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg= +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ= +github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE= +github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= +github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= +github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1Xbatp0= +github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= +github.com/Masterminds/sprig/v3 v3.3.0 h1:mQh0Yrg1XPo6vjYXgtf5OtijNAKJRNcTdOOGZe3tPhs= +github.com/Masterminds/sprig/v3 v3.3.0/go.mod h1:Zy1iXRYNqNLUolqCpL4uhk6SHUMAOSCzdgBfDb35Lz0= +github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= +github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= +github.com/Microsoft/hcsshim v0.12.0-rc.3 h1:5GNGrobGs/sN/0nFO21W9k4lFn+iXXZAE8fCZbmdRak= +github.com/Microsoft/hcsshim v0.12.0-rc.3/go.mod h1:WuNfcaYNaw+KpCEsZCIM6HCEmu0c5HfXpi+dDSmveP0= +github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= +github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883 h1:bvNMNQO63//z+xNgfBlViaCIJKLlCJ6/fmUseuG0wVQ= +github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= +github.com/apparentlymart/go-cidr v1.1.0 h1:2mAhrMoF+nhXqxTzSZMUzDHkLjmIHC+Zzn4tdgBZjnU= +github.com/apparentlymart/go-cidr v1.1.0/go.mod h1:EBcsNrHc3zQeuaeCeCtQruQm+n9/YjEn/vI25Lg7Gwc= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= +github.com/aws/aws-sdk-go v1.55.8 h1:JRmEUbU52aJQZ2AjX4q4Wu7t4uZjOu71uyNmaWlUkJQ= +github.com/aws/aws-sdk-go v1.55.8/go.mod h1:ZkViS9AqA6otK+JBBNH2++sx1sgxrPKcSzPPvQkUtXk= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= +github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= +github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chai2010/gettext-go v1.0.2 h1:1Lwwip6Q2QGsAdl/ZKPCwTe9fe0CjlUbqj5bFNSjIRk= +github.com/chai2010/gettext-go v1.0.2/go.mod h1:y+wnP2cHYaVj19NZhYKAwEMH2CI1gNHeQQ+5AjwawxA= +github.com/chromedp/cdproto v0.0.0-20230802225258-3cf4e6d46a89/go.mod h1:GKljq0VrfU4D5yc+2qA6OVr8pmO/MBbPEWqWQ/oqGEs= +github.com/chromedp/chromedp v0.9.2/go.mod h1:LkSXJKONWTCHAfQasKFUZI+mxqS4tZqhmtGzzhLsnLs= +github.com/chromedp/sysutil v1.0.0/go.mod h1:kgWmDdq8fTzXYcKIBqIYvRRTnYb9aNS9moAV0xufSww= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/logex v1.2.1/go.mod h1:JLbx6lG2kDbNRFnfkgvh4eRJRPX1QCoOIWomwysCBrQ= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObkaSkeBlk= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8= +github.com/cisco-open/operator-tools v0.37.0 h1:qAkAbWQA+aeWHZOqpWL8FuiZ42cWWUZ0OmWfr3TBeGw= +github.com/cisco-open/operator-tools v0.37.0/go.mod h1:SaMi2aMNILC5Wrqw9m92ptN5InMH2Zt3CSKkGlzyqfQ= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/containerd/cgroups/v3 v3.0.2 h1:f5WFqIVSgo5IZmtTT3qVBo6TzI1ON6sycSBKkymb9L0= +github.com/containerd/cgroups/v3 v3.0.2/go.mod h1:JUgITrzdFqp42uI2ryGA+ge0ap/nxzYgkGmIcetmErE= +github.com/containerd/containerd v1.6.27 h1:xGPieCivG5JfO6Sm4XYml/aruv0ru39gN4Wtl7tqeIA= +github.com/containerd/containerd v1.6.27/go.mod h1:uWjQMLorvbCqqDRTte+n8HnW82DIaT7mhvAiB1rOez4= +github.com/containerd/continuity v0.4.1 h1:wQnVrjIyQ8vhU2sgOiL5T07jo+ouqc2bnKsv5/EqGhU= +github.com/containerd/continuity v0.4.1/go.mod h1:F6PTNCKepoxEaXLQp3wDAjygEnImnZ/7o4JzpodfroQ= +github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI= +github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M= +github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= +github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= +github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= +github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= +github.com/cpuguy83/go-md2man/v2 v2.0.7 h1:zbFlGlXEAKlwXpmvle3d8Oe3YnkKIK4xSRTd3sHPnBo= +github.com/cpuguy83/go-md2man/v2 v2.0.7/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= +github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= +github.com/creasty/defaults v1.5.2 h1:/VfB6uxpyp6h0fr7SPp7n8WJBoV8jfxQXPCnkVSjyls= +github.com/creasty/defaults v1.5.2/go.mod h1:FPZ+Y0WNrbqOVw+c6av63eyHUAl6pMHZwqLPvXUZGfY= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8= +github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v20.10.27+incompatible h1:Id/ZooynV4ZlD6xX20RCd3SR0Ikn7r4QZDa2ECK2TgA= +github.com/docker/docker v20.10.27+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= +github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/emicklei/go-restful v2.15.0+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU= +github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/evanphx/json-patch v5.9.11+incompatible h1:ixHHqfcGvxhWkniF1tWxBHA0yb4Z+d1UQi45df52xW8= +github.com/evanphx/json-patch v5.9.11+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjTM0wiaDU= +github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM= +github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f h1:Wl78ApPPB2Wvf/TIe2xdyJxTlb6obmF18d8QdkxNDu4= +github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f/go.mod h1:OSYXu++VVOHnXeitef/D8n/6y4QV8uLHSFXX4NeXMGc= +github.com/fatih/camelcase v1.0.0 h1:hxNvNX/xYBp0ovncs8WyWZrOrpBNub/JfaMvbURyft8= +github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= +github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM= +github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= +github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/git-ival/rancher-tests/actions v0.0.0-20260124000123-5a84324608ac h1:iDE89pJZIXg3ecqa9hNZv7p2qXuYcCE/fnr4uyjDo2Y= +github.com/git-ival/rancher-tests/actions v0.0.0-20260124000123-5a84324608ac/go.mod h1:B3PHdZzeP5zl722nCQ9dYZG2kbhw+g+qnsFqL0bOnzM= +github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= +github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= +github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A= +github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= +github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= +github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= +github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= +github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= +github.com/go-openapi/jsonpointer v0.21.1 h1:whnzv/pNXtK2FbX/W9yJfRmE2gsmkfahjMKB0fZvcic= +github.com/go-openapi/jsonpointer v0.21.1/go.mod h1:50I1STOfbY1ycR8jGz8DaMeLCdXiI6aDteEdRNNzpdk= +github.com/go-openapi/jsonreference v0.19.6/go.mod h1:diGHMEHg2IqXZGKxqyvWdfWU/aim5Dprw5bqpKkTvns= +github.com/go-openapi/jsonreference v0.20.1/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= +github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= +github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= +github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= +github.com/go-openapi/swag v0.23.1 h1:lpsStH0n2ittzTnbaSloVZLuB5+fvSY/+hnagBjSNZU= +github.com/go-openapi/swag v0.23.1/go.mod h1:STZs8TbRvEQQKUA+JZNAm3EWlgaOBGpyFDqQnDHMef0= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= +github.com/gobwas/httphead v0.1.0/go.mod h1:O/RXo79gxV8G+RqlR/otEwx4Q36zl9rqC5u12GKvMCM= +github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= +github.com/gobwas/ws v1.2.1/go.mod h1:hRKAFb8wOxFROYNsT1bqfWnhX+b5MFeJM9r2ZSwg/KY= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ= +github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= +github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= +github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo= +github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw= +github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo= +github.com/google/pprof v0.0.0-20240827171923-fa2c70bbbfe5/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= +github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= +github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 h1:BHT72Gu3keYf3ZEu2J0b1vyeLSOYI8bm5wbJM/8yDe8= +github.com/google/pprof v0.0.0-20250403155104-27863c87afa6/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= -github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo= +github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA= +github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA= +github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 h1:8Tjv8EJ+pM1xP8mK6egEbD1OgnVTyacbefKhmbLhIhU= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2/go.mod h1:pkJQ2tZHJ0aFOVEEot6oZmaVEZcRme73eIFmhiVuRWs= +github.com/harvester/harvester v1.7.0 h1:lkhMOXSFJnhWNn7AUtZoHXHs0D236LaghV/TD7zPs+M= +github.com/harvester/harvester v1.7.0/go.mod h1:FNTp5MvfucfLLTsoGIz3J9y5Yu9KDqVCMoi6Ua8h7hw= +github.com/harvester/harvester-network-controller v1.6.0-rc3 h1:lHpsxalwOboV5UaBL7T/XcV3Ib86VioyEju2J11/kUk= +github.com/harvester/harvester-network-controller v1.6.0-rc3/go.mod h1:j+tWQZTf5aIg+rcvv4atUIu7LB7rfKtFfOfIv1MxcIs= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/huandu/xstrings v1.5.0 h1:2ag3IFq9ZDANvthTwTiqSSZLjDc+BedvHPAp5tJy2TI= +github.com/huandu/xstrings v1.5.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/iancoleman/orderedmap v0.3.0 h1:5cbR2grmZR/DiVt+VJopEhtVs9YGInGIxAoMJn+Ichc= +github.com/iancoleman/orderedmap v0.3.0/go.mod h1:XuLcCUkdL5owUCQeF2Ue9uuw1EptkJDkXXS7VoV7XGE= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20240312041847-bd984b5ce465/go.mod h1:gx7rwoVhcfuVKG5uya9Hs3Sxj7EIvldVofAWIUtGouw= +github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= +github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/k8snetworkplumbingwg/network-attachment-definition-client v1.7.6 h1:lhSaboKtal0XF2yqSw2BqNB1vUL4+a4BFe39I9G/yiM= +github.com/k8snetworkplumbingwg/network-attachment-definition-client v1.7.6/go.mod h1:CM7HAH5PNuIsqjMN0fGc1ydM74Uj+0VZFhob620nklw= +github.com/k8snetworkplumbingwg/whereabouts v0.9.2 h1:M/LhO2FwXN4C3WPkD0FoT6JACraSW4XrU7mxX9xZd8o= +github.com/k8snetworkplumbingwg/whereabouts v0.9.2/go.mod h1:Ot92NvSldSfWW4+VYvxtB9FduUUDF7RBiFw6zqad0Ss= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= +github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= +github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kube-logging/logging-operator v0.0.0-20250424202944-7e1f9aad6e21 h1:F1iUW743+2WuXzuyZCqfAHVbY4WOXWvJmAI+r+Xq6ps= +github.com/kube-logging/logging-operator v0.0.0-20250424202944-7e1f9aad6e21/go.mod h1:GyFf3HUstR74QouU+BkbY093JUheb1GlVRhkClHtdYU= +github.com/kube-logging/logging-operator/pkg/sdk v0.12.0 h1:4T8hmf8qoiUmde0FbFLzfizR3vc+y778F8m8iHCA3JQ= +github.com/kube-logging/logging-operator/pkg/sdk v0.12.0/go.mod h1:NAja47Hl9bIxrzzLE4kj2LZYXUKiQSdyVPrrbAeuqSE= +github.com/kubereboot/kured v1.13.1 h1:mEOtzWRaLNt4ekzHMsuTH1/+6iAuQ3qKg4c7FJc8AXE= +github.com/kubereboot/kured v1.13.1/go.mod h1:clIzvBID0k5PqWwm0K7NbovdRcaD45oEBMg9Qyah1uQ= +github.com/kubernetes-csi/external-snapshotter/client/v4 v4.2.0 h1:nHHjmvjitIiyPlUHk/ofpgvBcNcawJLtf4PYHORLjAA= +github.com/kubernetes-csi/external-snapshotter/client/v4 v4.2.0/go.mod h1:YBCo4DoEeDndqvAn6eeu0vWM7QdXmHEeI9cFWplmBys= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/ledongthuc/pdf v0.0.0-20220302134840-0c2507a12d80/go.mod h1:imJHygn/1yfhB7XSJJKlFZKl/J+dCPAknuiaGOshXAs= +github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= +github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= +github.com/lithammer/dedent v1.1.0 h1:VNzHMVCBNG1j0fh3OrsFRkVUwStdDArbgBWoPAffktY= +github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc= +github.com/longhorn/longhorn-manager v1.8.1 h1:RWAfhAVxxLgQQR+C9VWLNf9oirdz14S69ZW7w/rfkTg= +github.com/longhorn/longhorn-manager v1.8.1/go.mod h1:qnTey0+/mV+oUXowygscE2dNkcnCtscjw7bVJG9F0SA= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4= +github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU= +github.com/matoous/go-nanoid/v2 v2.1.0 h1:P64+dmq21hhWdtvZfEAofnvJULaRR1Yib0+PnU669bE= +github.com/matoous/go-nanoid/v2 v2.1.0/go.mod h1:KlbGNQ+FhrUNIHUxZdL63t7tl4LaPkZNpUULS8H4uVM= +github.com/mcuadros/go-version v0.0.0-20190830083331-035f6764e8d2 h1:YocNLcTBdEdvY3iDK6jfWXvEaM5OCKkjxPKoJRdB3Gg= +github.com/mcuadros/go-version v0.0.0-20190830083331-035f6764e8d2/go.mod h1:76rfSfYPWj01Z85hUf/ituArm797mNKcvINh1OlsZKo= +github.com/minio/pkg v1.7.5 h1:UOUJjewE5zoaDPlCMJtNx/swc1jT1ZR+IajT7hrLd44= +github.com/minio/pkg v1.7.5/go.mod h1:mEfGMTm5Z0b5EGxKNuPwyb5A2d+CC/VlUyRj6RJtIwo= +github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= +github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= +github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= +github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= +github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= +github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/spdystream v0.5.0 h1:7r0J1Si3QO/kjRitvSLVVFUjxMEb/YLj6S9FF62JBCU= +github.com/moby/spdystream v0.5.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI= +github.com/moby/sys/mount v0.3.3 h1:fX1SVkXFJ47XWDoeFW4Sq7PdQJnV2QIDZAqjNqgEjUs= +github.com/moby/sys/mount v0.3.3/go.mod h1:PBaEorSNTLG5t/+4EgukEQVlAvVEc6ZjTySwKdqp5K0= +github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI= +github.com/moby/sys/mountinfo v0.7.2 h1:1shs6aH5s4o5H2zQLn796ADW1wMrIwHsyJ2v9KouLrg= +github.com/moby/sys/mountinfo v0.7.2/go.mod h1:1YOa8w8Ih7uW0wALDUgT1dTTSBrZ+HiBLGws92L2RU4= +github.com/moby/sys/user v0.3.0 h1:9ni5DlcW5an3SvRSx4MouotOygvzaXbaSrc/wGDFWPo= +github.com/moby/sys/user v0.3.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs= +github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ= +github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0= +github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4= +github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= +github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= +github.com/onsi/ginkgo/v2 v2.0.0/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= +github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= +github.com/onsi/ginkgo/v2 v2.1.4/go.mod h1:um6tUpWM/cxCK3/FK8BXqEiUMUwRgSM4JXG47RKZmLU= +github.com/onsi/ginkgo/v2 v2.1.6/go.mod h1:MEH45j8TBi6u9BMogfbp0stKC5cdGjumZj5Y7AG4VIk= +github.com/onsi/ginkgo/v2 v2.3.0/go.mod h1:Eew0uilEqZmIEZr8JrvYlvOM7Rr6xzTmMV8AyFNU9d0= +github.com/onsi/ginkgo/v2 v2.4.0/go.mod h1:iHkDK1fKGcBoEHT5W7YBq4RFWaQulw+caOMkAt4OrFo= +github.com/onsi/ginkgo/v2 v2.5.0/go.mod h1:Luc4sArBICYCS8THh8v3i3i5CuSZO+RaQRaJoeNwomw= +github.com/onsi/ginkgo/v2 v2.7.0/go.mod h1:yjiuMwPokqY1XauOgju45q3sJt6VzQ/Fict1LFVcsAo= +github.com/onsi/ginkgo/v2 v2.8.1/go.mod h1:N1/NbDngAFcSLdyZ+/aYTYGSlq9qMCS/cNKGJjy+csc= +github.com/onsi/ginkgo/v2 v2.9.0/go.mod h1:4xkjoL/tZv4SMWeww56BU5kAt19mVB47gTWxmrTcxyk= +github.com/onsi/ginkgo/v2 v2.9.1/go.mod h1:FEcmzVcCHl+4o9bQZVab+4dC9+j+91t2FHSzmGAPfuo= +github.com/onsi/ginkgo/v2 v2.9.2/go.mod h1:WHcJJG2dIlcCqVfBAwUCrJxSPFb6v4azBwgxeMeDuts= +github.com/onsi/ginkgo/v2 v2.9.5/go.mod h1:tvAoo1QUJwNEU2ITftXTpR7R1RbCzoZUOs3RonqW57k= +github.com/onsi/ginkgo/v2 v2.9.7/go.mod h1:cxrmXWykAwTwhQsJOPfdIDiJ+l2RYq7U8hFU+M/1uw0= +github.com/onsi/ginkgo/v2 v2.11.0/go.mod h1:ZhrRA5XmEE3x3rhlzamx/JJvujdZoJ2uvgI7kR0iZvM= +github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o= +github.com/onsi/ginkgo/v2 v2.17.1/go.mod h1:llBI3WDLL9Z6taip6f33H76YcWtJv+7R3HigUjbIBOs= +github.com/onsi/ginkgo/v2 v2.17.2/go.mod h1:nP2DPOQoNsQmsVyv5rDA8JkXQoCs6goXIvr/PRJ1eCc= +github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To= +github.com/onsi/ginkgo/v2 v2.20.1/go.mod h1:lG9ey2Z29hR41WMVthyJBGUBcBhGOtoPF2VFMvBXFCI= +github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= +github.com/onsi/ginkgo/v2 v2.27.2 h1:LzwLj0b89qtIy6SSASkzlNvX6WktqurSHwkk2ipF/Ns= +github.com/onsi/ginkgo/v2 v2.27.2/go.mod h1:ArE1D/XhNXBXCBkKOLkbsb2c81dQHCRcF5zwn/ykDRo= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= +github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs= +github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= +github.com/onsi/gomega v1.20.1/go.mod h1:DtrZpjmvpn2mPm4YWQa0/ALMDj9v4YxLgojwPeREyVo= +github.com/onsi/gomega v1.21.1/go.mod h1:iYAIXgPSaDHak0LCMA+AWBpIKBr8WZicMxnE8luStNc= +github.com/onsi/gomega v1.22.1/go.mod h1:x6n7VNe4hw0vkyYUM4mjIXx3JbLiPaBPNgB7PRQ1tuM= +github.com/onsi/gomega v1.24.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg= +github.com/onsi/gomega v1.24.1/go.mod h1:3AOiACssS3/MajrniINInwbfOOtfZvplPzuRSmvt1jM= +github.com/onsi/gomega v1.26.0/go.mod h1:r+zV744Re+DiYCIPRlYOTxn0YkOLcAnW8k1xXdMPGhM= +github.com/onsi/gomega v1.27.1/go.mod h1:aHX5xOykVYzWOV4WqQy0sy8BQptgukenXpCXfadcIAw= +github.com/onsi/gomega v1.27.3/go.mod h1:5vG284IBtfDAmDyrK+eGyZmUgUlmi+Wngqo557cZ6Gw= +github.com/onsi/gomega v1.27.4/go.mod h1:riYq/GJKh8hhoM01HN6Vmuy93AarCXCBGpvFDK3q3fQ= +github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg= +github.com/onsi/gomega v1.27.7/go.mod h1:1p8OOlwo2iUUDsHnOrjE5UKYJ+e3W8eQ3qSlRahPmr4= +github.com/onsi/gomega v1.27.8/go.mod h1:2J8vzI/s+2shY9XHRApDkdgPo1TKT7P2u6fXeJKFnNQ= +github.com/onsi/gomega v1.27.10/go.mod h1:RsS8tutOdbdgzbPtzzATp12yT7kM5I5aElG3evPbQ0M= +github.com/onsi/gomega v1.30.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= +github.com/onsi/gomega v1.33.0/go.mod h1:+925n5YtiFsLzzafLUHzVMBpvvRAzrydIBiSIxjX3wY= +github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0= +github.com/onsi/gomega v1.34.1/go.mod h1:kU1QgUvBDLXBJq618Xvm2LUX6rSAfRaFRTcdOeDLwwY= +github.com/onsi/gomega v1.34.2/go.mod h1:v1xfxRgk0KIsG+QOdm7p8UosrOzPYRo60fd3B/1Dukc= +github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/onsi/gomega v1.38.2 h1:eZCjf2xjZAqe+LeWvKb5weQ+NcPwX84kqJ0cZNxok2A= +github.com/onsi/gomega v1.38.2/go.mod h1:W2MJcYxRGV63b418Ai34Ud0hEdTVXq9NW9+Sx6uXf3k= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= +github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= +github.com/opencontainers/runc v1.2.1 h1:mQkmeFSUxqFaVmvIn1VQPeQIKpHFya5R07aJw0DKQa8= +github.com/opencontainers/runc v1.2.1/go.mod h1:/PXzF0h531HTMsYQnmxXkBD7YaGShm/2zcRB79dksUc= +github.com/openshift/custom-resource-status v1.1.2 h1:C3DL44LEbvlbItfd8mT5jWrqPfHnSOQoQf/sypqA6A4= +github.com/openshift/custom-resource-status v1.1.2/go.mod h1:DB/Mf2oTeiAmVVX1gN+NEqweonAPY0TKUwADizj8+ZA= +github.com/orisano/pixelmatch v0.0.0-20220722002657-fb0b55479cde/go.mod h1:nZgzbfBr3hhjoZnS66nKrHmduYNpc34ny7RK4z5/HM0= +github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= +github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/sftp v1.13.5 h1:a3RLUqkyjYRtBTZJZ1VRrKbN3zhuPLlUc3sphVz81go= +github.com/pkg/sftp v1.13.5/go.mod h1:wHDZ0IZX6JcBYRK1TH9bcVq8G7TLpVHYIGJRFnmPfxg= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/qase-tms/qase-go/pkg/qase-go v1.0.4 h1:I/Xj6nxHw7z9QHQeyTp4a9mdEENql+Ssblwemuy/Qq4= -github.com/qase-tms/qase-go/pkg/qase-go v1.0.4/go.mod h1:eZi3JSnhgm8vnlWzgi770ylRpb1GD/yaVuO3OJBVrx4= -github.com/qase-tms/qase-go/qase-api-client v1.2.0 h1:wAOA90XpkbvW3ewPU2jQK/n717HUw84uDQN+GlxhSZ0= -github.com/qase-tms/qase-go/qase-api-client v1.2.0/go.mod h1:Za2AZQxuqkyc09vqHSlnceLjc40zFAAXMwhPPAMptMo= -github.com/qase-tms/qase-go/qase-api-v2-client v1.1.3 h1:UK3O2HJUb9KydoYekkZFMM+pqmf3qEZHgohdgWWh5FU= -github.com/qase-tms/qase-go/qase-api-v2-client v1.1.3/go.mod h1:qyIUXyT9ein6Ii2+IUW3R0eXWAJzVj44II05RRMR+wg= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.82.0 h1:Ee6zu4IR/WKYEcYHL4+gbC1A3GAzlHWxSjjMyRVBHYw= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.82.0/go.mod h1:hY5yoQsoIalncoxYqXXCDL5y7f+GGYYlW9Bi2IdU5KY= +github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o= +github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= +github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= +github.com/prometheus/common v0.66.1 h1:h5E0h5/Y8niHc5DlaLlWLArTQI7tMrsfQjHV+d9ZoGs= +github.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+SbZk/HH/dA= +github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0= +github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw= +github.com/qase-tms/qase-go/pkg/qase-go v1.0.7 h1:WBWnLTn6XSxD5JcPiIAw6nmcjLM7kS5EwgJuoPQ7qnk= +github.com/qase-tms/qase-go/pkg/qase-go v1.0.7/go.mod h1:I9AU24ez6V5DWMTrQQLuemW7FTK0Mrqlj98ewG6TINY= +github.com/qase-tms/qase-go/qase-api-client v1.2.1 h1:UHXJx8iwvsQKISOpkkMCvjpiEVQfnj6f+zPLthfLyLA= +github.com/qase-tms/qase-go/qase-api-client v1.2.1/go.mod h1:Za2AZQxuqkyc09vqHSlnceLjc40zFAAXMwhPPAMptMo= +github.com/qase-tms/qase-go/qase-api-v2-client v1.1.4 h1:Hrs9oGO/YaQGxea8GUnuXHNru0nsf8w9MKB6ruW8x00= +github.com/qase-tms/qase-go/qase-api-v2-client v1.1.4/go.mod h1:qyIUXyT9ein6Ii2+IUW3R0eXWAJzVj44II05RRMR+wg= +github.com/rancher/aks-operator v1.13.0-rc.4 h1:tc7p2gZmRg4c6VBwWTQJYwmh1hlN68kftjoBIdGCnqw= +github.com/rancher/aks-operator v1.13.0-rc.4/go.mod h1:1ZjZB6zGHK+NGchN9KLplq+xPxRRi+q6Uzet5bjFwxo= +github.com/rancher/ali-operator v1.13.0-rc.2 h1:a0biHGez+Np9XybJVh3yKN4RGPdaCzfM6D6cAXJac6o= +github.com/rancher/ali-operator v1.13.0-rc.2/go.mod h1:s5HznpxsN9LsgtX6u5UoW9dZNKnDLuXcwzQRAEoDcog= +github.com/rancher/apiserver v0.8.0 h1:yCXsCa67X/Y///NKJ/pq6pv6wmt3hq/OIzBaIna2grY= +github.com/rancher/apiserver v0.8.0/go.mod h1:Wb+Z8ktNyIuqt9hw30geFBQFJQucWTqgu6trxxMtcyM= +github.com/rancher/eks-operator v1.13.0-rc.4 h1:XowN8+m3QZTIBOBLzar4frtz0xtREb9kcX6KXhF4eas= +github.com/rancher/eks-operator v1.13.0-rc.4/go.mod h1:SbaKX2ttFWCxGOYkrKYeWH/6E4oToq2rRTcrMa2Mmdk= +github.com/rancher/fleet/pkg/apis v0.15.0-alpha.4 h1:l6pdMToVQSuhFaNmENuY1+v+5lltwHvw92zbt7iK6sU= +github.com/rancher/fleet/pkg/apis v0.15.0-alpha.4/go.mod h1:srlFTlA6425rCPRELTdtFcZM8wDNPaqW4O4aj6sArs4= +github.com/rancher/gke-operator v1.13.0-rc.3 h1:a6U+7+XIbJPH2CE7/vFUx6RpThNbFl7fqIqkEBb6zmA= +github.com/rancher/gke-operator v1.13.0-rc.3/go.mod h1:TroxpmqMh63Hf4H5bC+2GYcgOCQp9kIUDfyKdNAMo6Q= +github.com/rancher/lasso v0.2.5 h1:K++lWDDdfeN98Ixc1kCfUq0/q6tLjoHN++Np6QntXw0= +github.com/rancher/lasso v0.2.5/go.mod h1:71rWfv+KkdSmSxZ9Ly5QYhxAu0nEUcaq9N2ByjcHqAM= +github.com/rancher/norman v0.8.1 h1:114Rdt3xsWTUdqaxlIR2F6PJT0ls01vF0Rfglustgow= +github.com/rancher/norman v0.8.1/go.mod h1:vZ5qL+eKodJ7zOMQYdl6jwMrSFrqTKpA+KYSFEKew2M= +github.com/rancher/rancher v0.0.0-20251223145833-24cecce3325e h1:HExd4+6+bF4aYv0Wj/eORxc65Y4einqWaDqvJE+yjys= +github.com/rancher/rancher v0.0.0-20251223145833-24cecce3325e/go.mod h1:ORjiG9PXFw0JT3+CtC5Ih34joRgwCgedRhQQbos8Nag= +github.com/rancher/rancher/pkg/apis v0.0.0-20251216090958-18e340c45365 h1:Zt1buGZaSHLDM+jdIFV6/9xAKKRQRCaMupVowNBccvs= +github.com/rancher/rancher/pkg/apis v0.0.0-20251216090958-18e340c45365/go.mod h1:RER1KZXlywCmu3RSbyM6+4S3rbgnqzNrJuuX4l4wsz0= +github.com/rancher/rke v1.8.5 h1:dyEPIHRcatYaNPtTZhJH5Omu7BE6+UeJ8GNjw5X6hqY= +github.com/rancher/rke v1.8.5/go.mod h1:EaAkq796bgmmx/s15Xz0TvCkBOfepMOqO8tFockOmis= +github.com/rancher/shepherd v0.0.0-20260122211220-e4fc12acd2be h1:x7+nxYLCqygc5T/efzfdbXORuV89VBJvrK9Wbe2vNJE= +github.com/rancher/shepherd v0.0.0-20260122211220-e4fc12acd2be/go.mod h1:SJtW8Jqv0rphZzsGnvB965YdyR2FqFtB+TbbzVLt8F4= +github.com/rancher/system-upgrade-controller/pkg/apis v0.0.0-20250930163923-f2c9e60b1078 h1:1MJSgYkgXhr/Zc5idJkKa10SiBQd0HVtbxVOBoghlzY= +github.com/rancher/system-upgrade-controller/pkg/apis v0.0.0-20250930163923-f2c9e60b1078/go.mod h1:CV2Soy/Skw8/SA9dDJVgpeHxoEdtjYkNpNy6xvvC5kA= +github.com/rancher/tests/interoperability v0.0.0-20260123231955-aec9bc41f9c9 h1:97yTPrfwD1th16/fgTeM1ntGVgbVVgQa9zJCORYggJo= +github.com/rancher/tests/interoperability v0.0.0-20260123231955-aec9bc41f9c9/go.mod h1:Wh4P1n9hmbWd8Hpzw5M/A7X6a7V8PMoLxEpjxi/9uXU= +github.com/rancher/tfp-automation v0.0.0-20251219210947-f4a1a9882c29 h1:K7gKqQy8w4oPlm85x/t3WjMhHPi0nQTX49RMzJnkGK0= +github.com/rancher/tfp-automation v0.0.0-20251219210947-f4a1a9882c29/go.mod h1:RjYzdxC3g1MOBrAR4/F210qnW+oqrD0HWNwAR1XYnCM= +github.com/rancher/wrangler v1.1.2 h1:oXbXo9k7y/H4drUpb4RM1c++vT9O3rpoNEfyusGykiU= +github.com/rancher/wrangler v1.1.2/go.mod h1:2k9MyhlBdjcutcBGoOJSUAz0HgDAXnMjv81d3n/AaQc= +github.com/rancher/wrangler/v3 v3.3.1 h1:YFqRfhxjuLNudUrvWrn+64wUPZ8pnn2KWbTsha75JLg= +github.com/rancher/wrangler/v3 v3.3.1/go.mod h1:0D4kZDaOUkP5W2Zfww/75tQwF9w7kaZgzpZG+4XQDAI= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= +github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= +github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN3Uc8sB6B/s6Z4t2xvBgU1htSHuq8= +github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4= +github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k= +github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/spf13/cast v1.8.0 h1:gEN9K4b8Xws4EX0+a0reLmhq8moKn7ntRlQYgjPeCDk= +github.com/spf13/cast v1.8.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s= +github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.9 h1:9exaQaMOCwffKiiiYk6/BndUBv+iRViNW+4lEMi0PvY= +github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/urfave/cli/v2 v2.27.1 h1:8xSQ6szndafKVRmfyeUMxkNUJQMjL1F2zmsZ+qHpfho= -github.com/urfave/cli/v2 v2.27.1/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6SinrnYQ= -github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU= -github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +github.com/urfave/cli/v2 v2.27.6 h1:VdRdS98FNhKZ8/Az8B7MTyGQmpIr36O1EHybx/LaZ4g= +github.com/urfave/cli/v2 v2.27.6/go.mod h1:3Sevf16NykTbInEnD0yKkjDAeZDS0A6bzhBH5hrMvTQ= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= +github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ= +github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= +github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 h1:gEOO8jv9F4OT7lGCjxCBTO/36wtF6j2nSip77qHd4x4= +github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1/go.mod h1:Ohn+xnUBiLI6FVj/9LpzZWtj1/D6lUovWYBkxHVV3aM= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +go.etcd.io/etcd/api/v3 v3.6.5 h1:pMMc42276sgR1j1raO/Qv3QI9Af/AuyQUW6CBAWuntA= +go.etcd.io/etcd/api/v3 v3.6.5/go.mod h1:ob0/oWA/UQQlT1BmaEkWQzI0sJ1M0Et0mMpaABxguOQ= +go.etcd.io/etcd/client/pkg/v3 v3.6.5 h1:Duz9fAzIZFhYWgRjp/FgNq2gO1jId9Yae/rLn3RrBP8= +go.etcd.io/etcd/client/pkg/v3 v3.6.5/go.mod h1:8Wx3eGRPiy0qOFMZT/hfvdos+DjEaPxdIDiCDUv/FQk= +go.etcd.io/etcd/client/v2 v2.305.21 h1:eLiFfexc2mE+pTLz9WwnoEsX5JTTpLCYVivKkmVXIRA= +go.etcd.io/etcd/client/v2 v2.305.21/go.mod h1:OKkn4hlYNf43hpjEM3Ke3aRdUkhSl8xjKjSf8eCq2J8= +go.etcd.io/etcd/client/v3 v3.6.5 h1:yRwZNFBx/35VKHTcLDeO7XVLbCBFbPi+XV4OC3QJf2U= +go.etcd.io/etcd/client/v3 v3.6.5/go.mod h1:ZqwG/7TAFZ0BJ0jXRPoJjKQJtbFo/9NIY8uoFFKcCyo= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo= +go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4= +go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q= +go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s= +go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE= +go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg= +go.opentelemetry.io/otel/sdk/metric v1.37.0 h1:90lI228XrB9jCMuSdA0673aubgRobVZFhbjxHHspCPc= +go.opentelemetry.io/otel/sdk/metric v1.37.0/go.mod h1:cNen4ZWfiD37l5NhS+Keb5RXVWZWpRE+9WyVCpbo5ps= +go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g= +go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/mock v0.6.0 h1:hyF9dfmbgIX5EfOdasqLsWD6xqpNZlXblLB/Dbnwv3Y= +go.uber.org/mock v0.6.0/go.mod h1:KiVJ4BqZJaMj4svdfmHM0AUx4NJYO8ZNpPnZn1Z+BBU= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI= +go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU= +go.yaml.in/yaml/v3 v3.0.3/go.mod h1:tBHosrYAkRZjRAOREWbDnBXUf08JOwYq++0QNwQiWzI= +go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= +go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= +golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= +golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= +golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= +golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= +golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= +golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= +golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= +golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M= +golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= +golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= +golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M= +golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54= +golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U= +golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc= +golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q= +golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= +golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI= +golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.19.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= +golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk= +golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM= +golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= +golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= +golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= +golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= +golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= +golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= +golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= +golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= +golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= +golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= +golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= +golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE= +golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= +golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= +golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= +golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY= +golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= +golang.org/x/oauth2 v0.32.0 h1:jsCblLleRMDrxMN29H3z/k1KliIvpLgCkE6R8FXXNgY= +golang.org/x/oauth2 v0.32.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I= +golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.34.0 h1:H5Y5sJ2L2JRdyv7ROF1he/lPdvFsd0mJHFw2ThKHxLA= -golang.org/x/sys v0.34.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.23.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc= +golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE= +golang.org/x/telemetry v0.0.0-20240521205824-bda55230c457/go.mod h1:pRgIJT+bRLFKnoM1ldnzKoxTIn14Yxz928LQRYYgIN0= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= +golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= +golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= +golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= +golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= +golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= +golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= +golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= +golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= +golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk= +golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= +golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= +golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4= +golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk= +golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M= +golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g= +golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU= +golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= +golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= +golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM= +golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM= +golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.13.0 h1:eUlYslOIt32DgYD6utsuUeHs4d7AsEYLuIAdg7FlYgI= +golang.org/x/time v0.13.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= +golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA= +golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= +golang.org/x/tools v0.9.1/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc= +golang.org/x/tools v0.9.3/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc= +golang.org/x/tools v0.12.0/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM= +golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= +golang.org/x/tools v0.16.1/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0= +golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps= +golang.org/x/tools v0.20.0/go.mod h1:WvitBU7JJf6A4jOdg4S1tviW9bhUxkgeCui/0JHctQg= +golang.org/x/tools v0.21.0/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= +golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c= +golang.org/x/tools v0.23.0/go.mod h1:pnu6ufv6vQkll6szChhK3C3L/ruaIv5eBeztNG8wtsI= +golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ= +golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0= +golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ= +golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gomodules.xyz/jsonpatch/v2 v2.5.0 h1:JELs8RLM12qJGXU4u/TO3V25KW8GreMKl9pdkk14RM0= +gomodules.xyz/jsonpatch/v2 v2.5.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= +gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= +gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5 h1:BIRfGDEjiHRrk0QKZe3Xv2ieMhtgRGeLcZQ0mIVn4EY= +google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5/go.mod h1:j3QtIyytwqGr1JUDtYXwtMXWPKsEa5LtzIFN1Wn5WvE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251002232023-7c0ddcbb5797 h1:CirRxTOwnRWVLKzDNrs0CXAaVozJoR4G9xvdRecrdpk= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251002232023-7c0ddcbb5797/go.mod h1:HSkG/KdJWusxU1F6CNrwNDjBMgisKxGnc5dAZfT0mjQ= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.75.1 h1:/ODCNEuf9VghjgO3rqLcfg8fiOP0nSluljWFlDxELLI= +google.golang.org/grpc v1.75.1/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE= +google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= +gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/validator.v2 v2.0.1 h1:xF0KWyGWXm/LM2G1TrEjqOu4pa6coO9AlWSf3msVfDY= gopkg.in/validator.v2 v2.0.1/go.mod h1:lIUZBlB3Im4s/eYp39Ry/wkR02yOPhZ9IwIRBjuPuG8= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q= +gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +k8s.io/api v0.34.1 h1:jC+153630BMdlFukegoEL8E/yT7aLyQkIVuwhmwDgJM= +k8s.io/api v0.34.1/go.mod h1:SB80FxFtXn5/gwzCoN6QCtPD7Vbu5w2n1S0J5gFfTYk= +k8s.io/apiextensions-apiserver v0.34.1 h1:NNPBva8FNAPt1iSVwIE0FsdrVriRXMsaWFMqJbII2CI= +k8s.io/apiextensions-apiserver v0.34.1/go.mod h1:hP9Rld3zF5Ay2Of3BeEpLAToP+l4s5UlxiHfqRaRcMc= +k8s.io/apimachinery v0.34.1 h1:dTlxFls/eikpJxmAC7MVE8oOeP1zryV7iRyIjB0gky4= +k8s.io/apimachinery v0.34.1/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw= +k8s.io/apiserver v0.34.1 h1:U3JBGdgANK3dfFcyknWde1G6X1F4bg7PXuvlqt8lITA= +k8s.io/apiserver v0.34.1/go.mod h1:eOOc9nrVqlBI1AFCvVzsob0OxtPZUCPiUJL45JOTBG0= +k8s.io/cli-runtime v0.34.1 h1:btlgAgTrYd4sk8vJTRG6zVtqBKt9ZMDeQZo2PIzbL7M= +k8s.io/cli-runtime v0.34.1/go.mod h1:aVA65c+f0MZiMUPbseU/M9l1Wo2byeaGwUuQEQVVveE= +k8s.io/client-go v0.34.1 h1:ZUPJKgXsnKwVwmKKdPfw4tB58+7/Ik3CrjOEhsiZ7mY= +k8s.io/client-go v0.34.1/go.mod h1:kA8v0FP+tk6sZA0yKLRG67LWjqufAoSHA2xVGKw9Of8= +k8s.io/code-generator v0.34.1/go.mod h1:DeWjekbDnJWRwpw3s0Jat87c+e0TgkxoR4ar608yqvg= +k8s.io/component-base v0.34.1 h1:v7xFgG+ONhytZNFpIz5/kecwD+sUhVE6HU7qQUiRM4A= +k8s.io/component-base v0.34.1/go.mod h1:mknCpLlTSKHzAQJJnnHVKqjxR7gBeHRv0rPXA7gdtQ0= +k8s.io/component-helpers v0.34.1 h1:gWhH3CCdwAx5P3oJqZKb4Lg5FYZTWVbdWtOI8n9U4XY= +k8s.io/component-helpers v0.34.1/go.mod h1:4VgnUH7UA/shuBur+OWoQC0xfb69sy/93ss0ybZqm3c= +k8s.io/gengo v0.0.0-20211129171323-c02415ce4185/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= +k8s.io/gengo/v2 v2.0.0-20250604051438-85fd79dbfd9f/go.mod h1:EJykeLsmFC60UQbYJezXkEsG2FLrt0GPNkU5iK5GWxU= +k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/klog/v2 v2.40.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-aggregator v0.34.1 h1:WNLV0dVNoFKmuyvdWLd92iDSyD/TSTjqwaPj0U9XAEU= +k8s.io/kube-aggregator v0.34.1/go.mod h1:RU8j+5ERfp0h+gIvWtxRPfsa5nK7rboDm8RST8BJfYQ= +k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b h1:MloQ9/bdJyIu9lb1PzujOPolHyvO06MXG5TUIj2mNAA= +k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b/go.mod h1:UZ2yyWbFTpuhSbFhv24aGNOdoRdJZgsIObGBUaYVsts= +k8s.io/kubectl v0.34.1 h1:1qP1oqT5Xc93K+H8J7ecpBjaz511gan89KO9Vbsh/OI= +k8s.io/kubectl v0.34.1/go.mod h1:JRYlhJpGPyk3dEmJ+BuBiOB9/dAvnrALJEiY/C5qa6A= +k8s.io/kubernetes v1.32.2 h1:mShetlA102UpjRVSGzB+5vjJwy8oPy8FMWrkTH5f37o= +k8s.io/kubernetes v1.32.2/go.mod h1:tiIKO63GcdPRBHW2WiUFm3C0eoLczl3f7qi56Dm1W8I= +k8s.io/pod-security-admission v0.34.1 h1:XsP5eh8qCj69hK0a5TBMU4Ed7Ckn8JEmmbk/iepj+XM= +k8s.io/pod-security-admission v0.34.1/go.mod h1:87yY36Gxc8Hjx24FxqAD5zMY4k0tP0u7Mu/XuwXEbmg= +k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/utils v0.0.0-20250604170112-4c0f3b243397/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 h1:SjGebBtkBqHFOli+05xYbK8YF1Dzkbzn+gDM4X9T4Ck= +k8s.io/utils v0.0.0-20251002143259-bc988d571ff4/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +kubevirt.io/api v1.6.0 h1:ZO3Dh0b24PCdCe76uLD4cRusXKrcarOzt229UFly7PQ= +kubevirt.io/api v1.6.0/go.mod h1:p66fEy/g79x7VpgUwrkUgOoG2lYs5LQq37WM6JXMwj4= +kubevirt.io/containerized-data-importer-api v1.62.0 h1:gu8s/H1qJ40xpFH+B4Hr1X6W22IbzazOXHvRAUwrESw= +kubevirt.io/containerized-data-importer-api v1.62.0/go.mod h1:VGp35wxpLXU18b7cnEpmcThI3AjcZUSfg/Zfql44U4o= +kubevirt.io/controller-lifecycle-operator-sdk/api v0.0.0-20220329064328-f3cc58c6ed90 h1:QMrd0nKP0BGbnxTqakhDZAUhGKxPiPiN5gSDqKUmGGc= +kubevirt.io/controller-lifecycle-operator-sdk/api v0.0.0-20220329064328-f3cc58c6ed90/go.mod h1:018lASpFYBsYN6XwmA2TIrPCx6e0gviTd/ZNtSitKgc= +sigs.k8s.io/cli-utils v0.37.2 h1:GOfKw5RV2HDQZDJlru5KkfLO1tbxqMoyn1IYUxqBpNg= +sigs.k8s.io/cli-utils v0.37.2/go.mod h1:V+IZZr4UoGj7gMJXklWBg6t5xbdThFBcpj4MrZuCYco= +sigs.k8s.io/cluster-api v1.10.6 h1:0bnLTpT47R8KIvGZ3tTGek0DwMIc8fZi6IxA3Mlqq4g= +sigs.k8s.io/cluster-api v1.10.6/go.mod h1:vymugs3Jm3gxHVMuVqdzgp6BVy/SEqQVyUg/UM7bnT4= +sigs.k8s.io/controller-runtime v0.22.3 h1:I7mfqz/a/WdmDCEnXmSPm8/b/yRTy6JsKKENTijTq8Y= +sigs.k8s.io/controller-runtime v0.22.3/go.mod h1:+QX1XUpTXN4mLoblf4tqr5CQcyHPAki2HLXqQMY6vh8= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= +sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 h1:IpInykpT6ceI+QxKBbEflcR5EXP7sU1kvOlxwZh5txg= +sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= +sigs.k8s.io/kustomize/api v0.20.1 h1:iWP1Ydh3/lmldBnH/S5RXgT98vWYMaTUL1ADcr+Sv7I= +sigs.k8s.io/kustomize/api v0.20.1/go.mod h1:t6hUFxO+Ph0VxIk1sKp1WS0dOjbPCtLJ4p8aADLwqjM= +sigs.k8s.io/kustomize/kyaml v0.20.1 h1:PCMnA2mrVbRP3NIB6v9kYCAc38uvFLVs8j/CD567A78= +sigs.k8s.io/kustomize/kyaml v0.20.1/go.mod h1:0EmkQHRUsJxY8Ug9Niig1pUMSCGHxQ5RklbpV/Ri6po= +sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= +sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/structured-merge-diff/v6 v6.2.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE= +sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco= +sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE= +sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= +sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= +sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= +sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= diff --git a/internal/actions/batchrunner.go b/internal/actions/batchrunner.go new file mode 100644 index 000000000..46aee2207 --- /dev/null +++ b/internal/actions/batchrunner.go @@ -0,0 +1,198 @@ +package actions + +import ( + "fmt" + "runtime" + "sync" + "time" + + "github.com/rancher/dartboard/internal/dart" + "github.com/rancher/dartboard/internal/tofu" + "github.com/rancher/shepherd/clients/rancher" + shepherddefaults "github.com/rancher/shepherd/extensions/defaults" + "github.com/sirupsen/logrus" +) + +var maxWorkers = runtime.GOMAXPROCS(0) * 2 + +// Mutex to sync map[string]*ClusterStatus mutations and state file writes +var stateMutex sync.Mutex + +// stateUpdate is a simple "signaling" struct for the file writer goroutine to persist state +type stateUpdate struct { + Completed time.Time + Name string + Stage Stage +} + +type jobResult struct { + err error + skipped bool +} + +type JobDataTypes interface { + tofu.Cluster | dart.ClusterTemplate | tofu.CustomCluster +} + +// SequencedBatchRunner contains all the channels and WaitGroups needed +// for processing a batch of Clusters concurrently with sequenced state updates +type SequencedBatchRunner[J JobDataTypes] struct { + // Channel to sequence updates + seqCh chan struct{} + // Channel for all write requests + Updates chan stateUpdate + // Channel for batch jobs + Jobs chan J + // Channel for each individual job's results/error output + Results chan jobResult + + // WaitGroups for Job workers and the Updates channel which sequences writes to the ClustarStatus state file + wgWorkers sync.WaitGroup + wgWriter sync.WaitGroup +} + +// NewSequencedBatchRunner constructs a new runner for one batch +func NewSequencedBatchRunner[J JobDataTypes](batchSize int) *SequencedBatchRunner[J] { + br := &SequencedBatchRunner[J]{ + Updates: make(chan stateUpdate, batchSize*3), + seqCh: make(chan struct{}, 1), + Jobs: make(chan J, batchSize), + Results: make(chan jobResult, batchSize), + } + // seed the sequencer + br.seqCh <- struct{}{} + + return br +} + +// Run executes the batch: starts the file writer, workers, enqueues jobs, collects results +func (br *SequencedBatchRunner[J]) Run(batch []J, + statuses map[string]*ClusterStatus, statePath string, client *rancher.Client, + config *rancher.Config, +) error { + // Start writer + br.wgWriter.Add(1) + + go br.writer(statuses, statePath) + + // Spawn workers + for range maxWorkers { + br.wgWorkers.Add(1) + + go br.worker(statuses, client, config) + } + + // Enqueue and close jobs + for _, c := range batch { + br.Jobs <- c + } + + close(br.Jobs) + + // Reset skip count for this batch + numSkipped := 0 + sleepAfter := false + // Collect results + for range batch { + res := <-br.Results + if res.err != nil { + // Clean up in case of error + br.Wait() + return fmt.Errorf("error during batch run: %w", res.err) + } + + if res.skipped { + numSkipped++ + } + // Decide whether to sleep before propagating error + sleepAfter = numSkipped < len(batch)/2 + } + + // After finishing this batch: + if sleepAfter { + // If fewer than half were skipped, sleep briefly + logrus.Infof("Batch done: %d/%d skipped; sleeping before next batch.\n", numSkipped, len(batch)) + time.Sleep(shepherddefaults.TwoMinuteTimeout) + } else { + // Otherwise, go straight into the next batch + fmt.Printf("Batch done: %d/%d skipped; continuing without sleep.\n", numSkipped, len(batch)) + } + + // Clean up + br.Wait() + + return nil +} + +func (br *SequencedBatchRunner[J]) Wait() { + br.wgWorkers.Wait() + close(br.Updates) + br.wgWriter.Wait() + close(br.Results) +} + +// writer serializes all state updates and persists immediately +func (br *SequencedBatchRunner[J]) writer(statuses map[string]*ClusterStatus, statePath string) { + defer br.wgWriter.Done() + + for u := range br.Updates { + stateMutex.Lock() + logrus.Debugf("\nIN WRITER\n") + logrus.Debugf("\n%v\n", statuses) + cs := statuses[u.Name] + + cs.Stage = u.Stage + switch u.Stage { + case StageNew: + cs.New = true + case StageInfra: + cs.Infra = true + case StageCreated: + cs.Created = true + case StageImported: + cs.Imported = true + case StageProvisioned: + cs.Provisioned = true + case StageRegistered: + cs.Registered = true + } + + if err := SaveClusterState(statePath, statuses); err != nil { + logrus.Errorf("failed to save state for %s:%s: %v", u.Name, u.Stage, err) + } + + stateMutex.Unlock() + } +} + +// worker consumes Jobs, calls the proper handler based on the Job Type, signals Updates and Results +func (br *SequencedBatchRunner[J]) worker(statuses map[string]*ClusterStatus, + client *rancher.Client, config *rancher.Config, +) { + defer br.wgWorkers.Done() + + for job := range br.Jobs { + var ( + skipped bool + err error + ) + // Use type assertion to determine which function to call + + switch typedJob := any(job).(type) { + case tofu.Cluster: + skipped, err = importClusterWithRunner(br, typedJob, statuses, client, config) + case dart.ClusterTemplate: + skipped, err = provisionClusterWithRunner(br, typedJob, statuses, client) + case tofu.CustomCluster: + skipped, err = registerCustomClusterWithRunner(br, typedJob, statuses, client, config) + default: + err = fmt.Errorf("unsupported job type: %T", job) + } + + br.Results <- jobResult{skipped: skipped, err: err} + + if err != nil { + return + } + } +} diff --git a/internal/actions/clusters.go b/internal/actions/clusters.go new file mode 100644 index 000000000..dbc818ca3 --- /dev/null +++ b/internal/actions/clusters.go @@ -0,0 +1,531 @@ +package actions + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/rancher/dartboard/internal/dart" + "github.com/rancher/dartboard/internal/tofu" + apisV1 "github.com/rancher/rancher/pkg/apis/provisioning.cattle.io/v1" + "github.com/rancher/tests/actions/clusters" + rancherclusters "github.com/rancher/tests/actions/clusters" + "github.com/rancher/tests/actions/machinepools" + "github.com/rancher/tests/actions/registries" + "github.com/rancher/tests/actions/reports" + "github.com/sirupsen/logrus" + + "github.com/rancher/shepherd/clients/rancher" + mgmtv3 "github.com/rancher/shepherd/clients/rancher/generated/management/v3" + v1 "github.com/rancher/shepherd/clients/rancher/v1" + shepherdclusters "github.com/rancher/shepherd/extensions/clusters" + "github.com/rancher/shepherd/extensions/defaults" + shepherddefaults "github.com/rancher/shepherd/extensions/defaults" + stevetypes "github.com/rancher/shepherd/extensions/defaults/stevetypes" + "github.com/rancher/shepherd/extensions/etcdsnapshot" + "github.com/rancher/shepherd/extensions/kubeconfig" + nodestat "github.com/rancher/shepherd/extensions/nodes" + "github.com/rancher/shepherd/extensions/tokenregistration" + "github.com/rancher/shepherd/extensions/workloads/pods" + shepherdnodes "github.com/rancher/shepherd/pkg/nodes" + "github.com/rancher/shepherd/pkg/wait" + + "github.com/rancher/tests/actions/psact" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + kwait "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apimachinery/pkg/watch" +) + +const ( + psactRancherPrivileged string = "rancher-privileged" +) + +// TODELETE: +// type CustomClusterTemplate struct { +// dart.ClusterTemplate +// Nodes []tofu.Node `yaml:"nodes"` +// } + +// ConvertConfigToClusterConfig converts the ClusterConfig from (user) input to a rancher/tests ClusterConfig +func ConvertConfigToClusterConfig(config *dart.ClusterConfig) *rancherclusters.ClusterConfig { + var newConfig rancherclusters.ClusterConfig + for i := range config.MachinePools { + newConfig.MachinePools[i].Pools = config.MachinePools[i].Pools + newConfig.MachinePools[i].MachinePoolConfig = machinepools.MachinePoolConfig{ + NodeRoles: machinepools.NodeRoles{ + ControlPlane: config.MachinePools[i].MachinePoolConfig.ControlPlane, + Etcd: config.MachinePools[i].MachinePoolConfig.Etcd, + Worker: config.MachinePools[i].MachinePoolConfig.Worker, + Quantity: config.MachinePools[i].MachinePoolConfig.Quantity, + }, + } + } + + newConfig.Providers = &[]string{config.Provider} + newConfig.PSACT = psactRancherPrivileged + + return &newConfig +} + +// GetK3SRKE2Cluster is a "helper" functions that takes a rancher client, and the rke2 cluster config as parameters. +// This function registers a delete cluster function with a wait.WatchWait to ensure the cluster is removed cleanly +func GetK3SRKE2Cluster(client *rancher.Client, config *rancher.Config, cluster *apisV1.Cluster) (*v1.SteveAPIObject, error) { + clusterObjs, err := client.Steve.SteveType(shepherdclusters.ProvisioningSteveResourceType).ListAll(nil) + if err != nil { + return nil, err + } + + for _, obj := range clusterObjs.Data { + if obj.Name == cluster.Name { + ctx := context.Background() + + err = kwait.PollUntilContextTimeout(ctx, 500*time.Millisecond, 2*time.Minute, true, func(_ context.Context) (done bool, err error) { + client, err = client.ReLoginForConfig(config) + if err != nil { + return false, err + } + + _, err = client.Steve.SteveType(shepherdclusters.ProvisioningSteveResourceType).ByID(obj.ID) + if err != nil { + return false, nil + } + + return true, nil + }) + if err != nil { + return nil, err + } + + return &obj, nil + } + } + + return nil, fmt.Errorf("Could not find expected Cluster") +} + +// CreateK3SRKE2Cluster is a "helper" functions that takes a rancher client, and the rke2 cluster config as parameters. +// This function registers a delete cluster function with a wait.WatchWait to ensure the cluster is removed cleanly +func CreateK3SRKE2Cluster(client *rancher.Client, config *rancher.Config, cluster *apisV1.Cluster) (*v1.SteveAPIObject, error) { + clusterObj, err := client.Steve.SteveType(shepherdclusters.ProvisioningSteveResourceType).Create(cluster) + if err != nil { + return nil, err + } + + ctx := context.Background() + + err = kwait.PollUntilContextTimeout(ctx, 500*time.Millisecond, 2*time.Minute, true, func(_ context.Context) (done bool, err error) { + client, err = client.ReLoginForConfig(config) + if err != nil { + return false, err + } + + _, err = client.Steve.SteveType(shepherdclusters.ProvisioningSteveResourceType).ByID(clusterObj.ID) + if err != nil { + return false, nil + } + + return true, nil + }) + if err != nil { + return nil, err + } + + client.Session.RegisterCleanupFunc(func() error { + adminClient, err := rancher.NewClient(client.RancherConfig.AdminToken, client.Session) + if err != nil { + return err + } + + provKubeClient, err := adminClient.GetKubeAPIProvisioningClient() + if err != nil { + return err + } + + watchInterface, err := provKubeClient.Clusters(clusterObj.ObjectMeta.Namespace).Watch(context.TODO(), metav1.ListOptions{ + FieldSelector: "metadata.name=" + clusterObj.ObjectMeta.Name, + TimeoutSeconds: &shepherddefaults.WatchTimeoutSeconds, + }) + if err != nil { + return err + } + + client, err = client.ReLogin() + if err != nil { + return err + } + + err = client.Steve.SteveType(shepherdclusters.ProvisioningSteveResourceType).Delete(clusterObj) + if err != nil { + return err + } + + return wait.WatchWait(watchInterface, func(event watch.Event) (ready bool, err error) { + cluster := event.Object.(*apisV1.Cluster) + if event.Type == watch.Error { + return false, fmt.Errorf("there was an error deleting cluster %s: %w", cluster.Name, err) + } else if event.Type == watch.Deleted { + return true, nil + } else if cluster == nil { + return true, nil + } + + return false, nil + }) + }) + + return clusterObj, nil +} + +// createRegistrationCommand is a helper for rke2/k3s custom clusters to create the registration command with advanced options configured per node +func createRegistrationCommand(command, publicIP, privateIP string, machinePool apisV1.RKEMachinePool) string { + if len(publicIP) > 0 { + command += fmt.Sprintf(" --address %s", publicIP) + } + + if len(privateIP) > 0 { + command += fmt.Sprintf(" --internal-address %s", privateIP) + } + + for labelKey, labelValue := range machinePool.Labels { + command += fmt.Sprintf(" --label %s=%s", labelKey, labelValue) + } + + for _, taint := range machinePool.Taints { + command += fmt.Sprintf(" --taints %s=%s:%s", taint.Key, taint.Value, taint.Effect) + } + + return command +} + +// RegisterCustomCluster registers a non-rke1 cluster using a 3rd party client for its nodes +func RegisterCustomCluster(client *rancher.Client, steveObject *v1.SteveAPIObject, cluster *apisV1.Cluster, nodes []tofu.Node) (*v1.SteveAPIObject, error) { + quantityPerPool := []int32{} + rolesPerPool := []string{} + + fmt.Println("Building role oommand") + + for _, pool := range cluster.Spec.RKEConfig.MachinePools { + var finalRoleCommand string + if pool.ControlPlaneRole { + finalRoleCommand += " --controlplane" + } + + if pool.EtcdRole { + finalRoleCommand += " --etcd" + } + + if pool.WorkerRole { + finalRoleCommand += " --worker" + } + + quantityPerPool = append(quantityPerPool, *pool.Quantity) + rolesPerPool = append(rolesPerPool, finalRoleCommand) + } + + customCluster, err := client.Steve.SteveType(etcdsnapshot.ProvisioningSteveResouceType).ByID(steveObject.ID) + if err != nil { + return nil, err + } + + clusterStatus := &apisV1.ClusterStatus{} + + err = v1.ConvertToK8sType(customCluster.Status, clusterStatus) + if err != nil { + return nil, err + } + + token, err := tokenregistration.GetRegistrationToken(client, clusterStatus.ClusterName) + if err != nil { + return nil, err + } + + kubeProvisioningClient, err := client.GetKubeAPIProvisioningClient() + if err != nil { + return nil, err + } + + result, err := kubeProvisioningClient.Clusters(cluster.Namespace).Watch(context.TODO(), metav1.ListOptions{ + FieldSelector: "metadata.name=" + cluster.Name, + TimeoutSeconds: &defaults.WatchTimeoutSeconds, + }) + if err != nil { + return nil, err + } + + checkFunc := shepherdclusters.IsProvisioningClusterReady + + var command string + + totalNodesObserved := 0 + + for poolIndex, poolRole := range rolesPerPool { + for nodeIndex := range int(quantityPerPool[poolIndex]) { + node := nodes[totalNodesObserved+nodeIndex] + + logrus.Infof("Execute Registration Command for node named %s", node.Name) + logrus.Infof("Linux pool detected, using bash...") + + command = fmt.Sprintf("%s %s", token.InsecureNodeCommand, poolRole) + command = createRegistrationCommand(command, node.PublicIP, node.PrivateIP, cluster.Spec.RKEConfig.MachinePools[poolIndex]) + logrus.Infof("Node command: %s", command) + + nodeSSHKey, err := tofu.ReadBytesFromPath(node.SSHKeyPath) + if err != nil { + return nil, fmt.Errorf("error getting node's SSH Key from %s: %w", node.SSHKeyPath, err) + } + + shepherdNode := shepherdnodes.Node{ + PublicIPAddress: node.PublicIP, + PrivateIPAddress: node.PrivateIP, + SSHUser: node.SSHUser, + SSHKey: nodeSSHKey, + } + + output, err := shepherdNode.ExecuteCommand(command) + if err != nil { + return nil, err + } + + logrus.Info(output) + } + + totalNodesObserved += int(quantityPerPool[poolIndex]) + } + + err = wait.WatchWait(result, checkFunc) + if err != nil { + return nil, err + } + + registeredCluster, err := client.Steve.SteveType(stevetypes.Provisioning).ByID(cluster.Namespace + "/" + cluster.Name) + + return registeredCluster, err +} + +// VerifyClusterCreated confirms that the cluster resource exists +func VerifyClusterCreated(client *rancher.Client, name, namespace string) (bool, error) { + obj, _, err := shepherdclusters.GetProvisioningClusterByName(client, name, namespace) + if err != nil { + return false, fmt.Errorf("API error verifying creation of %s: %w", name, err) + } + + return obj != nil, nil +} + +// VerifyClusterImported confirms that the cluster resource is in Ready state +func VerifyClusterImported(client *rancher.Client, name, namespace string) (bool, error) { + obj, _, err := shepherdclusters.GetProvisioningClusterByName(client, name, namespace) + if err != nil { + return false, fmt.Errorf("error getting Cluster by verifying import of %s: %w", name, err) + } + // In case the Cluster object was not successfully created in the first place + if obj == nil { + return false, nil + } + + return obj.Status.Ready, nil +} + +// setupClusterVerification prepares clients and watches for cluster verification +func setupClusterVerification(client *rancher.Client, config *rancher.Config, cluster *v1.SteveAPIObject) (*rancher.Client, error) { + client, err := client.ReLoginForConfig(config) + if err != nil { + return nil, err + } + + fmt.Printf("\nRELOGIN CLIENT: %v\n", client) + fmt.Printf("\nRANCHER CONFIG: %v\n", config) + fmt.Printf("\nCLUSTER OBJECT: %v\n", cluster) + + adminClient, err := rancher.NewClientForConfig(client.RancherConfig.AdminToken, config, client.Session) + if err != nil { + return nil, err + } + + kubeProvisioningClient, err := adminClient.GetKubeAPIProvisioningClient() + reports.TimeoutClusterReport(cluster, err) + + if err != nil { + return nil, err + } + + watchInterface, err := kubeProvisioningClient.Clusters(cluster.Namespace).Watch(context.TODO(), metav1.ListOptions{ + FieldSelector: "metadata.name=" + cluster.Name, + TimeoutSeconds: &defaults.WatchTimeoutSeconds, + }) + reports.TimeoutClusterReport(cluster, err) + + if err != nil { + return nil, err + } + + checkFunc := shepherdclusters.IsProvisioningClusterReady + err = wait.WatchWait(watchInterface, checkFunc) + reports.TimeoutClusterReport(cluster, err) + + if err != nil { + return nil, err + } + + return adminClient, nil +} + +// verifyClusterBasics checks service account tokens and machine readiness +func verifyClusterBasics(client *rancher.Client, cluster *v1.SteveAPIObject) error { + clusterToken, err := clusters.CheckServiceAccountTokenSecret(client, cluster.Name) + reports.TimeoutClusterReport(cluster, err) + + if err != nil { + return err + } + + if !clusterToken { + return fmt.Errorf("serviceAccountTokenSecret does not exist in this cluster: %s", cluster.Name) + } + + err = nodestat.AllMachineReady(client, cluster.ID, defaults.ThirtyMinuteTimeout) + reports.TimeoutClusterReport(cluster, err) + + return err +} + +// VerifyCluster validates that a non-rke1 cluster and its resources are in a good state, matching a given config. +func VerifyCluster(client *rancher.Client, config *rancher.Config, cluster *v1.SteveAPIObject) error { + adminClient, err := setupClusterVerification(client, config, cluster) + if err != nil { + return err + } + + if err := verifyClusterBasics(client, cluster); err != nil { + return err + } + + status := &apisV1.ClusterStatus{} + err = v1.ConvertToK8sType(cluster.Status, status) + reports.TimeoutClusterReport(cluster, err) + + if err != nil { + return err + } + + clusterSpec := &apisV1.ClusterSpec{} + err = v1.ConvertToK8sType(cluster.Spec, clusterSpec) + reports.TimeoutClusterReport(cluster, err) + + if err != nil { + return err + } + + if clusterSpec.DefaultPodSecurityAdmissionConfigurationTemplateName != "" && len(clusterSpec.DefaultPodSecurityAdmissionConfigurationTemplateName) > 0 { + err := psact.CreateNginxDeployment(client, status.ClusterName, clusterSpec.DefaultPodSecurityAdmissionConfigurationTemplateName) + reports.TimeoutClusterReport(cluster, err) + + if err != nil { + return err + } + } + + if clusterSpec.RKEConfig.Registries != nil { + for registryName := range clusterSpec.RKEConfig.Registries.Configs { + havePrefix, err := registries.CheckAllClusterPodsForRegistryPrefix(client, status.ClusterName, registryName) + reports.TimeoutClusterReport(cluster, err) + + if !havePrefix { + return fmt.Errorf("found cluster (%s) pods that do not have the expected registry prefix %s: %w", status.ClusterName, registryName, err) + } + + if err != nil { + return err + } + } + } + + if clusterSpec.LocalClusterAuthEndpoint.Enabled { + mgmtClusterObject, err := adminClient.Management.Cluster.ByID(status.ClusterName) + reports.TimeoutClusterReport(cluster, err) + + if err != nil { + return err + } + + err = VerifyACE(adminClient, mgmtClusterObject) + if err != nil { + return err + } + } + + podErrors := pods.StatusPods(client, status.ClusterName) + if len(podErrors) > 0 { + errorStrings := make([]string, len(podErrors)) + for i, e := range podErrors { + errorStrings[i] = e.Error() + } + + return fmt.Errorf("encountered pod errors: %s", strings.Join(errorStrings, ";")) + } + + return nil +} + +func VerifyACE(client *rancher.Client, cluster *mgmtv3.Cluster) error { + client, err := client.ReLogin() + if err != nil { + return err + } + + kubeConfig, err := kubeconfig.GetKubeconfig(client, cluster.ID) + if err != nil { + return err + } + + original, err := client.SwitchContext(cluster.Name, kubeConfig) + if err != nil { + return err + } + + originalResp, err := original.Resource(corev1.SchemeGroupVersion.WithResource("pods")).Namespace("").List(context.TODO(), metav1.ListOptions{}) + if err != nil { + return err + } + + for _, pod := range originalResp.Items { + fmt.Printf("Pod %s", pod.GetName()) + } + + // each control plane has a context. For ACE, we should check these contexts + contexts, err := kubeconfig.GetContexts(kubeConfig) + if err != nil { + return err + } + + var contextNames []string + + for context := range contexts { + if strings.Contains(context, "pool") { + contextNames = append(contextNames, context) + } + } + + for _, contextName := range contextNames { + dynamic, err := client.SwitchContext(contextName, kubeConfig) + if err != nil { + return err + } + + resp, err := dynamic.Resource(corev1.SchemeGroupVersion.WithResource("pods")).Namespace("").List(context.TODO(), metav1.ListOptions{}) + if err != nil { + return err + } + + fmt.Printf("Switched Context to %v", contextName) + + for _, pod := range resp.Items { + fmt.Printf("Pod %v", pod.GetName()) + } + } + + return nil +} diff --git a/internal/actions/handlestate.go b/internal/actions/handlestate.go new file mode 100644 index 000000000..d0d60ca2e --- /dev/null +++ b/internal/actions/handlestate.go @@ -0,0 +1,155 @@ +package actions + +import ( + "fmt" + "os" + + "github.com/sirupsen/logrus" + yaml "gopkg.in/yaml.v2" +) + +// ClusterStatus holds the state of each cluster. +type ClusterStatus struct { + Name string `yaml:"name"` + New bool `yaml:"new"` + Infra bool `yaml:"infra"` + Created bool `yaml:"created"` + Imported bool `yaml:"imported"` + Provisioned bool `yaml:"provisioned"` + Registered bool `yaml:"registered"` + Stage Stage `yaml:"stage"` + // // Only one of the following should be included + // tofu.Cluster `yaml:"cluster,omitempty"` //For Imported Clusters + // dart.ClusterTemplate `yaml:"cluster_template,omitempty"` //For Provisioned Clusters +} + +const ClustersStateFile = "clusters_state.yaml" + +// Setup an "enum" for handling stateUpdate "Stage" logic +// See https://gobyexample.com/enums +type Stage int + +const ( + StageNew Stage = iota // Cluster is not yet created + StageInfra // Cluster infrastructure was created + StageCreated // Cluster was created + StageImported // Cluster has been imported + StageProvisioned // Cluster has been provisioned + StageRegistered // Cluster has been registered +) + +// Gives a human-readable name for the Stage. +func (s Stage) String() string { + switch s { + case StageNew: + return "New" + case StageInfra: + return "Infra" + case StageCreated: + return "Created" + case StageImported: + return "Imported" + case StageProvisioned: + return "Provisioned" + case StageRegistered: + return "Registered" + // Return the int representing the Stage, if no case handles it + default: + return fmt.Sprintf("Stage(%d)", s) + } +} + +// SaveClusterState persists the map[string]*ClusterStatus to a YAML file. +func SaveClusterState(filePath string, statuses map[string]*ClusterStatus) error { + data, err := yaml.Marshal(statuses) + if err != nil { + return fmt.Errorf("failed to marshal Cluster state: %w", err) + } + + if err := os.WriteFile(filePath, data, 0o644); err != nil { + return fmt.Errorf("failed to write Cluster state file: %w", err) + } + + return nil +} + +// LoadClusterState reads the YAML state file and unmarshals into map[string]*ClusterStatus. +// If the file does not exist, it returns an empty map[string]*ClusterStatus without error. +func LoadClusterState(filePath string) (map[string]*ClusterStatus, error) { + if _, err := os.Stat(filePath); os.IsNotExist(err) { + fmt.Printf("Did not find existing Cluster state file at %s.\n Creating new Cluster state file and returning new empty map[string]*ClusterStatus\n", filePath) + + if err := SaveClusterState(filePath, map[string]*ClusterStatus{}); err != nil { + return nil, fmt.Errorf("failed init Cluster state file: %w", err) + } + + return map[string]*ClusterStatus{}, nil + } else if err != nil { + return nil, fmt.Errorf("failed to os.Stat Cluster state file at %s: %w", filePath, err) + } + + data, err := os.ReadFile(filePath) + if err != nil { + return nil, fmt.Errorf("failed to os.ReadFile Cluster state file at %s: %w", filePath, err) + } + + var statuses map[string]*ClusterStatus + if err := yaml.Unmarshal(data, &statuses); err != nil { + return nil, fmt.Errorf("failed to unmarshal state: %w", err) + } + + return statuses, nil +} + +func DestroyClusterState(filePath string) error { + if _, err := os.Stat(filePath); os.IsNotExist(err) { + fmt.Printf("Did not find existing Cluster state file at %s.\n", filePath) + return nil + } else if err != nil { + return fmt.Errorf("failed to os.Stat Cluster state file at %s during destroy: %w", filePath, err) + } + + err := os.Remove(filePath) + if err != nil { + return fmt.Errorf("failed to os.Remove Cluster state file at %s: %w", filePath, err) + } + + return nil +} + +func FindClusterStatus(statuses map[string]*ClusterStatus, condition func(*ClusterStatus) bool) *ClusterStatus { + for key := range statuses { + if condition(statuses[key]) { + return statuses[key] + } + } + + return nil +} + +func FindClusterStatusByName(statuses map[string]*ClusterStatus, name string) *ClusterStatus { + return FindClusterStatus(statuses, func(cs *ClusterStatus) bool { + return cs.Name == name + }) +} + +func FindOrCreateStatusByName(statuses map[string]*ClusterStatus, name string) *ClusterStatus { + clusterStatus := FindClusterStatusByName(statuses, name) + if clusterStatus == nil { + fmt.Printf("Did not find existing ClusterStatus object for Cluster with name %s.\n", name) + + newClusterStatus := ClusterStatus{ + Name: name, + } + statuses[name] = &newClusterStatus + + logrus.Debugf("Created new ClusterStatus object. ClusterStatus.") + logrus.Debugf("\n%v\n", statuses) + + return statuses[name] + } + + fmt.Printf("Found ClusterStatus object named %s. ClusterStatus.", name) + + return clusterStatus +} diff --git a/internal/actions/harvester.go b/internal/actions/harvester.go new file mode 100644 index 000000000..a4d9af624 --- /dev/null +++ b/internal/actions/harvester.go @@ -0,0 +1,81 @@ +package actions + +import ( + "fmt" + + "github.com/rancher/shepherd/clients/harvester" + "github.com/rancher/shepherd/clients/rancher" + "github.com/sirupsen/logrus" + + "github.com/rancher/shepherd/pkg/session" + harvesteraction "github.com/rancher/tests/interoperability/harvester" +) + +type HarvesterImportClient struct { + client *rancher.Client + session *session.Session + harvesterClient *harvester.Client + clusterID string +} + +func NewHarvesterConfig(host, adminToken, adminPassword string, insecure bool) harvester.Config { + defaultBool := false + + return harvester.Config{ + Host: host, + AdminToken: adminToken, + AdminPassword: adminPassword, + Insecure: &insecure, + Cleanup: &defaultBool, + } +} + +// Function to import the Harvester client into the Rancher cluster +func NewHarvesterImportClient(rancherClient *rancher.Client, harvesterConfig *harvester.Config) (*HarvesterImportClient, error) { + h := HarvesterImportClient{ + client: rancherClient, + session: session.NewSession(), + } + + harvesterClient, err := harvester.NewClientForConfig(harvesterConfig.AdminToken, harvesterConfig, h.session) + if err != nil { + return nil, fmt.Errorf("error while setting up Harvester client: %v", err) + } + + h.harvesterClient = harvesterClient + + h.session.RegisterCleanupFunc(func() error { + return harvesteraction.ResetHarvesterRegistration(h.harvesterClient) + }) + + return &h, nil +} + +func (h *HarvesterImportClient) ImportCluster() error { + harvesterInRancherID, err := harvesteraction.RegisterHarvesterWithRancher(h.client, h.harvesterClient) + if err != nil { + return fmt.Errorf("error while registering Harvester cluster with Rancher: %v", err) + } + + logrus.Info(harvesterInRancherID) + + h.clusterID = harvesterInRancherID + + // cluster, err := h.client.Management.Cluster.ByID(harvesterInRancherID) + // if err != nil { + // return fmt.Errorf("error while getting Harvester's Rancher Cluster ID: %v", err) + // } + + // kubeConfig, err := h.client.Management.Cluster.ActionGenerateKubeconfig(cluster) + // if err != nil { + // return fmt.Errorf("error while generating Harvester's Rancher Cluster kubeconfig: %v", err) + // } + + // var harvesterCredentialConfig cloudcredentials.HarvesterCredentialConfig + + // harvesterCredentialConfig.ClusterID = harvesterInRancherID + // harvesterCredentialConfig.ClusterType = "imported" + // harvesterCredentialConfig.KubeconfigContent = kubeConfig.Config + + return nil +} diff --git a/internal/actions/kubeconfig.go b/internal/actions/kubeconfig.go new file mode 100644 index 000000000..67cc49799 --- /dev/null +++ b/internal/actions/kubeconfig.go @@ -0,0 +1,124 @@ +package actions + +import ( + "fmt" + + "github.com/rancher/dartboard/internal/tofu" + "github.com/rancher/shepherd/clients/rancher" + yaml "gopkg.in/yaml.v2" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" +) + +type Kubeconfig struct { + APIVersion string + Kind string + CurrentContext string + Clusters []Cluster + Users []User + Contexts []Context +} + +type Cluster struct { + Name string + Cluster ClusterInfo +} + +type ClusterInfo struct { + Server string + CertificateAuthorityData string +} + +type User struct { + Name string + User UserData +} + +type UserData struct { + Token string +} + +type Context struct { + Name string + Context ContextData +} + +type ContextData struct { + User string + Cluster string +} + +func ParseKubeconfig(kubeconfigPath string) (*Kubeconfig, error) { + kubeconfig := Kubeconfig{} + + kubeconfigBytes, err := GetKubeconfigBytes(kubeconfigPath) + if err != nil { + return nil, err + } + + err = yaml.Unmarshal(kubeconfigBytes, &kubeconfig) + if err != nil { + return nil, fmt.Errorf("error unmarshaling kubeconfig YAML for %s: %w", kubeconfigBytes, err) + } + + return &kubeconfig, nil +} + +func GetKubeconfigBytes(kubeconfigPath string) ([]byte, error) { + kubeconfigBytes, err := tofu.ReadBytesFromPath(kubeconfigPath) + if err != nil { + return nil, err + } + + return kubeconfigBytes, err +} + +// RESTConfigFromKubeConfig is a convenience method to give back a restconfig from your kubeconfig bytes. +// For programmatic access, this is what you want 80% of the time +func GetRESTConfigFromBytes(kubeconfig []byte) (*rest.Config, error) { + restConfig, err := clientcmd.RESTConfigFromKubeConfig(kubeconfig) + if err != nil { + return nil, fmt.Errorf("error while getting Rest Config from kubeconfig bytes: %w", err) + } + + return restConfig, nil +} + +func GetRESTConfigFromPath(kubeconfigPath string) (*rest.Config, error) { + clusterKubeconfig, err := GetKubeconfigBytes(kubeconfigPath) + if err != nil { + return nil, err + } + + restConfig, err := GetRESTConfigFromBytes(clusterKubeconfig) + if err != nil { + return nil, fmt.Errorf("error getting REST Config for Kubeconfig at %s:\n%w", kubeconfigPath, err) + } + + return restConfig, nil +} + +func GetRESTConfigForClusterID(rancherClient *rancher.Client, id string) (*rest.Config, error) { + cluster, err := rancherClient.Management.Cluster.ByID(id) + if err != nil { + return nil, fmt.Errorf("error while getting Cluster by ID %s: %v", id, err) + } + + output, err := rancherClient.Management.Cluster.ActionGenerateKubeconfig(cluster) + if err != nil { + return nil, fmt.Errorf("error while generating Kubeconfig for Cluster with ID %s: %v", id, err) + } + + configBytes := []byte(output.Config) + + restConfig, err := clientcmd.RESTConfigFromKubeConfig(configBytes) + if err != nil { + return nil, fmt.Errorf("error while getting Rest Config for Cluster with ID %s: %v", id, err) + } + + return restConfig, nil +} + +func GetLocalClusterRESTConfig(rancherClient *rancher.Client) (*rest.Config, error) { + return GetRESTConfigForClusterID(rancherClient, "local") +} diff --git a/internal/actions/pods.go b/internal/actions/pods.go new file mode 100644 index 000000000..32667ad92 --- /dev/null +++ b/internal/actions/pods.go @@ -0,0 +1,58 @@ +package actions + +import ( + "context" + "time" + + "github.com/rancher/shepherd/clients/rancher" + shepherdpods "github.com/rancher/shepherd/extensions/workloads/pods" + "k8s.io/apimachinery/pkg/util/wait" +) + +const ( + PodResourceSteveType = "pod" +) + +// StatusPods is a helper function that uses the steve client to list pods on a namespace for a specific cluster +// and return the statuses in a list of strings +func StatusPodsWithTimeout(client *rancher.Client, clusterID string, timeout time.Duration) []error { + downstreamClient, err := client.Steve.ProxyDownstream(clusterID) + if err != nil { + return []error{err} + } + + var podErrors []error + + steveClient := downstreamClient.SteveType(PodResourceSteveType) + ctx := context.Background() + + err = wait.PollUntilContextTimeout(ctx, 5*time.Second, timeout, true, func(_ context.Context) (done bool, err error) { + // emptying pod errors every time we poll so that we don't return stale errors + podErrors = []error{} + + pods, err := steveClient.List(nil) + if err != nil { + // not returning the error in this case, as it could cause a false positive if we start polling too early. + return false, nil + } + + for _, pod := range pods.Data { + isReady, err := shepherdpods.IsPodReady(&pod) + if !isReady { + // not returning the error in this case, as it could cause a false positive if we start polling too early. + return false, nil + } + + if err != nil { + podErrors = append(podErrors, err) + } + } + + return true, nil + }) + if err != nil { + podErrors = append(podErrors, err) + } + + return podErrors +} diff --git a/internal/actions/providers.go b/internal/actions/providers.go new file mode 100644 index 000000000..6b399172c --- /dev/null +++ b/internal/actions/providers.go @@ -0,0 +1,56 @@ +package actions + +import ( + "fmt" + + "github.com/rancher/shepherd/extensions/cloudcredentials/aws" + "github.com/rancher/shepherd/extensions/cloudcredentials/azure" + "github.com/rancher/shepherd/extensions/cloudcredentials/harvester" + "github.com/rancher/tests/actions/machinepools" + "github.com/rancher/tests/actions/provisioning" + "github.com/rancher/tests/actions/provisioninginput" +) + +// CreateProvider returns all machine and cloud credential +// configs in the form of a Provider struct. Accepts a +// string of the name of the provider. +func CreateProvider(name string) provisioning.Provider { + var provider provisioning.Provider + + switch name { + case provisioninginput.AWSProviderName.String(): + provider = provisioning.Provider{ + Name: provisioninginput.AWSProviderName, + MachineConfigPoolResourceSteveType: machinepools.AWSPoolType, + MachinePoolFunc: machinepools.NewAWSMachineConfig, + CloudCredFunc: aws.CreateAWSCloudCredentials, + GetMachineRolesFunc: machinepools.GetAWSMachineRoles, + } + + return provider + case provisioninginput.AzureProviderName.String(): + provider = provisioning.Provider{ + Name: provisioninginput.AzureProviderName, + MachineConfigPoolResourceSteveType: machinepools.AzurePoolType, + MachinePoolFunc: machinepools.NewAzureMachineConfig, + CloudCredFunc: azure.CreateAzureCloudCredentials, + GetMachineRolesFunc: machinepools.GetAzureMachineRoles, + } + + return provider + case provisioninginput.HarvesterProviderName.String(): + provider = provisioning.Provider{ + Name: provisioninginput.HarvesterProviderName, + MachineConfigPoolResourceSteveType: machinepools.HarvesterPoolType, + MachinePoolFunc: machinepools.NewHarvesterMachineConfig, + CloudCredFunc: harvester.CreateHarvesterCloudCredentials, + GetMachineRolesFunc: machinepools.GetHarvesterMachineRoles, + } + + return provider + } + + panic(fmt.Sprintf("Provider:%v not found", name)) + // Unreachable, but makes golangci-lint (govet) analyzer happy + // return provider +} diff --git a/internal/actions/rancher.go b/internal/actions/rancher.go new file mode 100644 index 000000000..5e4fec0e5 --- /dev/null +++ b/internal/actions/rancher.go @@ -0,0 +1,609 @@ +package actions + +import ( + "fmt" + "log" + "strings" + "time" + + "github.com/rancher/dartboard/internal/dart" + "github.com/rancher/dartboard/internal/tofu" + yaml "gopkg.in/yaml.v2" + + "github.com/rancher/shepherd/clients/rancher" + management "github.com/rancher/shepherd/clients/rancher/generated/management/v3" + v1 "github.com/rancher/shepherd/clients/rancher/v1" + "github.com/rancher/shepherd/extensions/cloudcredentials" + shepherdclusters "github.com/rancher/shepherd/extensions/clusters" + shepherddefaults "github.com/rancher/shepherd/extensions/defaults" + shepherdtokens "github.com/rancher/shepherd/extensions/token" + "github.com/rancher/shepherd/pkg/session" + shepherdwait "github.com/rancher/shepherd/pkg/wait" + + "github.com/rancher/tests/actions/machinepools" + "github.com/rancher/tests/actions/pipeline" + "github.com/rancher/tests/actions/provisioning" + "github.com/rancher/tests/actions/reports" + + provv1 "github.com/rancher/rancher/pkg/apis/provisioning.cattle.io/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const fleetNamespace = "fleet-default" + +func NewRancherConfig(host, adminToken, adminPassword string, insecure bool) rancher.Config { + defaultBool := false + + return rancher.Config{ + Host: host, + AdminToken: adminToken, + AdminPassword: adminPassword, + Insecure: &insecure, + Cleanup: &defaultBool, + } +} + +func SetupRancherClient(rancherConfig *rancher.Config, bootstrapPassword string, session *session.Session) (*rancher.Client, error) { + adminUser := &management.User{ + Username: "admin", + Password: bootstrapPassword, + } + + fmt.Printf("Rancher Config:\nHost: %s\nAdminPassword: %s\nAdminToken: %s\nInsecure: %t\n", rancherConfig.Host, rancherConfig.AdminPassword, rancherConfig.AdminToken, *rancherConfig.Insecure) + + adminToken, err := shepherdtokens.GenerateUserToken(adminUser, rancherConfig.Host) + if err != nil { + return nil, fmt.Errorf("error while creating Admin Token with config %v:\n%v", &rancherConfig, err) + } + + rancherConfig.AdminToken = adminToken.Token + + client, err := rancher.NewClientForConfig(rancherConfig.AdminToken, rancherConfig, session) + if err != nil { + return nil, fmt.Errorf("error while setting up Rancher client with config %v:\n%v", rancherConfig, err) + } + + err = pipeline.PostRancherInstall(client, rancherConfig.AdminPassword) + if err != nil { + return nil, fmt.Errorf("error during post- rancher install: %v", err) + } + + client, err = rancher.NewClientForConfig(rancherConfig.AdminToken, rancherConfig, session) + if err != nil { + return nil, fmt.Errorf("error during post- rancher install on re-login: %v", err) + } + + return client, err +} + +func ProvisionDownstreamClusters(r *dart.Dart, templates []dart.ClusterTemplate, rancherClient *rancher.Client) error { + if r.ClusterBatchSize <= 0 { + panic("ClusterBatchSize must be > 0") + } + + for _, template := range r.ClusterTemplates { + err := ProvisionClustersInBatches(r, template, rancherClient) + if err != nil { + return err + } + } + + return nil +} + +// Provisions clusters in "batches" where r.ClusterBatchSize is the maximum # of clusters to provision before sleeping for a short period and continuing +// This will continue to provision clusters until template.ClusterCount # of Clusters have been provisioned +func ProvisionClustersInBatches(r *dart.Dart, template dart.ClusterTemplate, rancherClient *rancher.Client) error { + clusterStatePath := fmt.Sprintf("%s/%s", r.TofuWorkspaceStatePath, ClustersStateFile) + + statuses, err := LoadClusterState(clusterStatePath) + if err != nil { + return err + } + + batchNum := 0 + // Create batches of clusters from the template + for i := 0; i < template.ClusterCount; i += r.ClusterBatchSize { + // Create a batch of templates with unique names + batchTemplates := make([]dart.ClusterTemplate, 0, r.ClusterBatchSize) + j := min(i+r.ClusterBatchSize, template.ClusterCount) + + // Generate the name for each instance of the template and add the template instances to the batchTemplates slice + for k := i; k < j; k++ { + templateCopy := template + templateCopy.SetGeneratedName(fmt.Sprintf("%d-%d", batchNum, k-i)) + batchTemplates = append(batchTemplates, templateCopy) + } + + // Create and run a batch runner for this batch of templates + batchRunner := NewSequencedBatchRunner[dart.ClusterTemplate](len(batchTemplates)) + + err := batchRunner.Run(batchTemplates, statuses, clusterStatePath, rancherClient, nil) + if err != nil { + return err + } + + batchNum++ + } + + return nil +} + +func provisionClusterWithRunner[J JobDataTypes](br *SequencedBatchRunner[J], template dart.ClusterTemplate, + statuses map[string]*ClusterStatus, rancherClient *rancher.Client, +) (skipped bool, err error) { + clusterName := template.GeneratedName() + + stateMutex.Lock() + + cs := FindOrCreateStatusByName(statuses, clusterName) + // cs.ClusterTemplate = template + stateMutex.Unlock() + + <-br.seqCh + + br.Updates <- stateUpdate{Name: clusterName, Stage: StageNew, Completed: time.Now()} + + br.seqCh <- struct{}{} + + if cs.Provisioned { + fmt.Printf("Cluster %s has already been provisioned, skipping...\n", cs.Name) + return true, nil + } + + fmt.Printf("Continuing with cluster provisioning...\n") + + // switch { + // case strings.Contains(template.DistroVersio"k3s"): + // template.DistroVersion = []string{template.DistroVersion} + // case strings.Contains(template.DistroVersio"rke2"): + // template.DistroVersion = []string{template.DistroVersion} + // default: + // return false, fmt.Errorf("error while parsing kubernetes version for version %v", template.DistroVersion) + // } + + nodeProvider := CreateProvider(template.ClusterConfig.Provider) + templateClusterConfig := ConvertConfigToClusterConfig(template.ClusterConfig) + + // Create the cluster + clusterObject, err := provisioning.CreateProvisioningCluster(rancherClient, nodeProvider, cloudcredentials.CloudCredential{}, templateClusterConfig, machinepools.MachineConfigs{}, nil) + reports.TimeoutClusterReport(clusterObject, err) + + if err != nil { + return false, fmt.Errorf("error while provisioning cluster with ClusterConfig %v:\n%v", templateClusterConfig, err) + } + + <-br.seqCh + + br.Updates <- stateUpdate{Name: clusterName, Stage: StageCreated, Completed: time.Now()} + + br.seqCh <- struct{}{} + + fmt.Printf("Cluster named %s was created.\n", clusterName) + + // Wait for the cluster to be ready + fiveMinuteTimeout := int64(shepherddefaults.FiveMinuteTimeout) + listOpts := metav1.ListOptions{ + FieldSelector: "metadata.name=" + clusterObject.ID, + TimeoutSeconds: &fiveMinuteTimeout, + } + + watchInterface, err := rancherClient.GetManagementWatchInterface(management.ClusterType, listOpts) + if err != nil { + return false, fmt.Errorf("error while getting Management Watch Interface with Cluster %v and ListOptions %v:\n%v", clusterObject.ID, listOpts, err) + } + + checkFunc := shepherdclusters.IsProvisioningClusterReady + err = shepherdwait.WatchWait(watchInterface, checkFunc) + reports.TimeoutClusterReport(clusterObject, err) + + if err != nil { + return false, fmt.Errorf("error while waiting for Provisioned Cluster to be Ready %v:\n%v", clusterObject.ID, err) + } + + cs.Provisioned = true + + <-br.seqCh + + br.Updates <- stateUpdate{Name: clusterName, Stage: StageProvisioned, Completed: time.Now()} + + br.seqCh <- struct{}{} + + fmt.Printf("Cluster named %s was provisioned.\n", clusterName) + + return false, nil +} + +func ImportDownstreamClusters(r *dart.Dart, clusters []tofu.Cluster, rancherClient *rancher.Client, rancherConfig *rancher.Config) error { + if r.ClusterBatchSize <= 0 { + panic("ClusterBatchSize must be > 0") + } + + if len(clusters) == 0 { + fmt.Printf("No importable Clusters were provided.\n") + } + + err := ImportClustersInBatches(r, clusters, rancherClient, rancherConfig) + if err != nil { + return err + } + + return nil +} + +func ImportClustersInBatches(r *dart.Dart, clusters []tofu.Cluster, rancherClient *rancher.Client, rancherConfig *rancher.Config) error { + clusterStatePath := fmt.Sprintf("%s/%s", r.TofuWorkspaceStatePath, ClustersStateFile) + + statuses, err := LoadClusterState(clusterStatePath) + if err != nil { + return err + } + + // Enqueue clusters in batches and collect results + for i := 0; i < len(clusters); i += r.ClusterBatchSize { + j := min(i+r.ClusterBatchSize, len(clusters)) + batch := clusters[i:j] + + batchRunner := NewSequencedBatchRunner[tofu.Cluster](len(batch)) + + err := batchRunner.Run(batch, statuses, clusterStatePath, rancherClient, rancherConfig) + if err != nil { + return err + } + } + + return nil +} + +// createAndWaitForCluster creates a cluster and waits for it to be ready +func createAndWaitForCluster(rancherClient *rancher.Client, rancherConfig *rancher.Config, importCluster *provv1.Cluster) (*provv1.Cluster, error) { + if _, err := CreateK3SRKE2Cluster(rancherClient, rancherConfig, importCluster); err != nil { + return nil, fmt.Errorf("error while creating Steve Cluster with Name %s:\n%w", importCluster.Name, err) + } + + err := BackoffWait(30, func() (finished bool, err error) { + updatedCluster, _, err := shepherdclusters.GetProvisioningClusterByName(rancherClient, importCluster.Name, importCluster.Namespace) + if err != nil { + return false, fmt.Errorf("error while getting Cluster by Name %s in Namespace %s:\n%w", importCluster.Name, importCluster.Namespace, err) + } + + if updatedCluster.Status.ClusterName != "" { + return true, nil + } + + return false, nil + }) + if err != nil { + return nil, err + } + + updatedCluster, _, err := shepherdclusters.GetProvisioningClusterByName(rancherClient, importCluster.Name, importCluster.Namespace) + if err != nil { + return nil, fmt.Errorf("error while getting Cluster by Name %s in Namespace %s:\n%w", importCluster.Name, importCluster.Namespace, err) + } + + return updatedCluster, nil +} + +// performClusterImport imports an external cluster into Rancher +func performClusterImport(rancherClient *rancher.Client, cluster tofu.Cluster, importCluster *provv1.Cluster) (*provv1.Cluster, error) { + restConfig, err := GetRESTConfigFromPath(cluster.Kubeconfig) + if err != nil { + return nil, err + } + // Apply client-side rate limiting + restConfig.QPS = 50 + restConfig.Burst = 100 + + updatedCluster, _, err := shepherdclusters.GetProvisioningClusterByName(rancherClient, importCluster.Name, importCluster.Namespace) + if err != nil { + return nil, fmt.Errorf("error while getting Cluster by Name %s in Namespace %s:\n%w", importCluster.Name, importCluster.Namespace, err) + } + + fmt.Printf("Importing Cluster, ID:%s Name:%s\n", updatedCluster.Status.ClusterName, updatedCluster.Name) + + err = shepherdclusters.ImportCluster(rancherClient, updatedCluster, restConfig) + if err != nil { + return nil, fmt.Errorf("error while creating Job for importing Cluster %s:\n%w", updatedCluster.Name, err) + } + + err = BackoffWait(100, func() (finished bool, err error) { + updatedCluster, _, err = shepherdclusters.GetProvisioningClusterByName(rancherClient, importCluster.Name, importCluster.Namespace) + if err != nil { + return false, fmt.Errorf("error while getting Cluster by Name %s in Namespace %s:\n%w", importCluster.Name, importCluster.Namespace, err) + } + + return updatedCluster.Status.Ready, nil + }) + if err != nil { + return nil, err + } + + return updatedCluster, nil +} + +func importClusterWithRunner[J JobDataTypes](br *SequencedBatchRunner[J], cluster tofu.Cluster, + statuses map[string]*ClusterStatus, rancherClient *rancher.Client, rancherConfig *rancher.Config, +) (skipped bool, err error) { + stateMutex.Lock() + + cs := FindOrCreateStatusByName(statuses, cluster.Name) + + stateMutex.Unlock() + <-br.seqCh + + br.Updates <- stateUpdate{Name: cluster.Name, Stage: StageNew, Completed: time.Now()} + + br.seqCh <- struct{}{} + + fmt.Printf("Found existing ClusterStatus object for Cluster with name %s.\n", cluster.Name) + + if cs.Imported { + fmt.Printf("Cluster %s has already been imported, skipping...\n", cs.Name) + return true, nil + } + + fmt.Printf("Continuing with cluster creation...\n") + + importCluster := provv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: cluster.Name, + Namespace: fleetNamespace, + }, + } + if !cs.Created { + updatedCluster, err := createAndWaitForCluster(rancherClient, rancherConfig, &importCluster) + if err != nil { + return false, err + } + + _ = updatedCluster // used for side effects in createAndWaitForCluster + + // sequence the Created event + <-br.seqCh + + br.Updates <- stateUpdate{Name: cluster.Name, Stage: StageCreated, Completed: time.Now()} + + br.seqCh <- struct{}{} + + fmt.Printf("Cluster named %s was created.\n", importCluster.Name) + } + + updatedCluster, err := performClusterImport(rancherClient, cluster, &importCluster) + if err != nil { + return false, err + } + + cs.Imported = true + + <-br.seqCh + + br.Updates <- stateUpdate{Name: cluster.Name, Stage: StageImported, Completed: time.Now()} + + br.seqCh <- struct{}{} + + fmt.Printf("Cluster named %s was imported.\n", updatedCluster.Name) + + podErrors := StatusPodsWithTimeout(rancherClient, updatedCluster.Status.ClusterName, shepherddefaults.OneMinuteTimeout) + if len(podErrors) > 0 { + errorStrings := make([]string, len(podErrors)) + for i, e := range podErrors { + errorStrings[i] = e.Error() + } + + return false, fmt.Errorf("error while checking Status of Pods in Cluster %s:\n%s", updatedCluster.Status.ClusterName, strings.Join(errorStrings, "\n")) + } + + return false, nil +} + +func RegisterCustomClusters(r *dart.Dart, templates []tofu.CustomCluster, + rancherClient *rancher.Client, rancherConfig *rancher.Config, +) error { + if r.ClusterBatchSize <= 0 { + panic("ClusterBatchSize must be > 0") + } + + for _, template := range templates { + yamlData, err := yaml.Marshal(template) + if err != nil { + log.Fatalf("Error marshaling YAML: %v", err) + } + + fmt.Printf("\ntofu.CustomCluster:\n%s\n", string(yamlData)) + } + + for _, template := range templates { + err := RegisterCustomClustersInBatches(r, template, rancherClient, rancherConfig) + if err != nil { + return err + } + } + + return nil +} + +func RegisterCustomClustersInBatches(r *dart.Dart, template tofu.CustomCluster, rancherClient *rancher.Client, rancherConfig *rancher.Config) error { + clusterStatePath := fmt.Sprintf("%s/%s", r.TofuWorkspaceStatePath, ClustersStateFile) + + statuses, err := LoadClusterState(clusterStatePath) + if err != nil { + return err + } + + var custom_clusters []tofu.CustomCluster + // Build []tofu.CustomCluster, length = template.ClusterCount + startNodes := 0 + nodeBatchSize := len(template.Nodes) / template.ClusterCount + endNodes := nodeBatchSize + + for i := range template.ClusterCount { + customCluster := template + customCluster.Name = fmt.Sprintf("%s-%d", template.Name, i) + customCluster.Nodes = template.Nodes[startNodes:endNodes] + startNodes += nodeBatchSize + endNodes += nodeBatchSize + + custom_clusters = append(custom_clusters, customCluster) + } + + // endTemplate := min(r.ClusterBatchSize, len(custom_clusters)) + for startTemplate := 0; startTemplate < len(custom_clusters); startTemplate += r.ClusterBatchSize { + endTemplate := min(startTemplate+r.ClusterBatchSize, len(custom_clusters)) + batchTemplates := custom_clusters[startTemplate:endTemplate] + + batchRunner := NewSequencedBatchRunner[tofu.CustomCluster](len(batchTemplates)) + + err := batchRunner.Run(batchTemplates, statuses, clusterStatePath, rancherClient, rancherConfig) + if err != nil { + return err + } + // endTemplate += min(r.ClusterBatchSize, len(custom_clusters)-endTemplate) + } + + return nil +} + +// createMachinePools creates machine pools from template configuration +func createMachinePools(template tofu.CustomCluster) []provv1.RKEMachinePool { + var machinePools []provv1.RKEMachinePool + + for _, pool := range template.MachinePools { + newPool := provv1.RKEMachinePool{ + EtcdRole: pool.Etcd, + ControlPlaneRole: pool.ControlPlane, + WorkerRole: pool.Worker, + Quantity: &pool.Quantity, + } + machinePools = append(machinePools, newPool) + } + + return machinePools +} + +// createOrGetClusterObject creates a new cluster object or retrieves an existing one +func createOrGetClusterObject[J JobDataTypes](br *SequencedBatchRunner[J], rancherClient *rancher.Client, rancherConfig *rancher.Config, provCluster *provv1.Cluster, cs *ClusterStatus, clusterName string) (*v1.SteveAPIObject, error) { + var ( + clusterResp *v1.SteveAPIObject + err error + ) + + if !cs.Created { + fmt.Printf("Creating Cluster object for %s\n", cs.Name) + + clusterResp, err = CreateK3SRKE2Cluster(rancherClient, rancherConfig, provCluster) + if err != nil { + return nil, err + } + + _, err = GetK3SRKE2Cluster(rancherClient, rancherConfig, provCluster) + if err != nil { + return nil, err + } + + <-br.seqCh + + br.Updates <- stateUpdate{Name: clusterName, Stage: StageCreated, Completed: time.Now()} + + br.seqCh <- struct{}{} + + fmt.Printf("Cluster named %s was created.\n", provCluster.Name) + } else { + clusterResp, err = GetK3SRKE2Cluster(rancherClient, rancherConfig, provCluster) + if err != nil { + return nil, err + } + } + + return clusterResp, nil +} + +func registerCustomClusterWithRunner[J JobDataTypes](br *SequencedBatchRunner[J], + template tofu.CustomCluster, statuses map[string]*ClusterStatus, + rancherClient *rancher.Client, rancherConfig *rancher.Config, +) (skipped bool, err error) { + fmt.Printf("\nregisterCustomClusterWithRunner\n") + + clusterName := template.Name + + stateMutex.Lock() + + cs := FindOrCreateStatusByName(statuses, clusterName) + + stateMutex.Unlock() + + <-br.seqCh + + br.Updates <- stateUpdate{Name: clusterName, Stage: StageNew, Completed: time.Now()} + + br.seqCh <- struct{}{} + + if cs.Registered { + fmt.Printf("Cluster %s has already been registered, skipping...\n", cs.Name) + return true, nil + } + + fmt.Printf("Continuing with cluster registration...\n") + + provCluster := &provv1.Cluster{ + TypeMeta: metav1.TypeMeta{ + Kind: "Cluster", + APIVersion: "provisioning.cattle.io/v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: clusterName, + Namespace: fleetNamespace, + }, + Spec: provv1.ClusterSpec{ + KubernetesVersion: template.DistroVersion, + DefaultPodSecurityAdmissionConfigurationTemplateName: psactRancherPrivileged, + RKEConfig: &provv1.RKEConfig{}, + }, + } + + clusterResp, err := createOrGetClusterObject(br, rancherClient, rancherConfig, provCluster, cs, clusterName) + if err != nil { + return false, err + } + + machinePools := createMachinePools(template) + provCluster.Spec.RKEConfig.MachinePools = machinePools + + var clusterObject *v1.SteveAPIObject + // Retry registration if SSH handshake fails (likely due to node not being ready or concurrency limits) + err = BackoffWait(20, func() (bool, error) { + var regErr error + + clusterObject, regErr = RegisterCustomCluster(rancherClient, clusterResp, provCluster, template.Nodes) + if regErr != nil { + if strings.Contains(regErr.Error(), "ssh: handshake failed") || strings.Contains(regErr.Error(), "ssh: unable to authenticate") { + fmt.Printf("SSH handshake failed for cluster %s, retrying... Error: %v\n", clusterName, regErr) + return false, nil + } + + return false, regErr + } + + return true, nil + }) + reports.TimeoutClusterReport(clusterObject, err) + + if err != nil { + return false, err + } + + err = VerifyCluster(rancherClient, rancherConfig, clusterObject) + if err != nil { + return false, err + } + + <-br.seqCh + + br.Updates <- stateUpdate{Name: clusterName, Stage: StageRegistered, Completed: time.Now()} + + br.seqCh <- struct{}{} + + fmt.Printf("Cluster named %s was registered.\n", clusterName) + + return false, nil +} diff --git a/internal/actions/utils.go b/internal/actions/utils.go new file mode 100644 index 000000000..516a0e4f5 --- /dev/null +++ b/internal/actions/utils.go @@ -0,0 +1,13 @@ +package actions + +// NewTrue returns a pointer to true +func NewTrue() *bool { + b := true + return &b +} + +// NewFalse returns a pointer to false +func NewFalse() *bool { + b := false + return &b +} diff --git a/internal/actions/wait.go b/internal/actions/wait.go new file mode 100644 index 000000000..857f69bf7 --- /dev/null +++ b/internal/actions/wait.go @@ -0,0 +1,17 @@ +package actions + +import ( + "time" + + "k8s.io/apimachinery/pkg/util/wait" +) + +// BackoffWait is a shared helper for wait.ExponentialBackoff. +func BackoffWait(steps int, cond func() (bool, error)) error { + return wait.ExponentialBackoff(wait.Backoff{ + Duration: 1 * time.Second, + Factor: 1.1, + Jitter: 0.1, + Steps: steps, + }, cond) +} diff --git a/internal/dart/config.go b/internal/dart/config.go new file mode 100644 index 000000000..b7bac028b --- /dev/null +++ b/internal/dart/config.go @@ -0,0 +1,176 @@ +package dart + +import ( + "errors" + "fmt" + + "github.com/rancher/tests/actions/machinepools" + yaml "gopkg.in/yaml.v3" +) + +var ( + ErrNoConfig = errors.New("no NodeConfig set, must have 1") + ErrMultipleConfigs = errors.New("multiple NodeConfigs set, can only have 1") + ErrInvalidCPU = errors.New("cpu must be > 0") + ErrInvalidMemory = errors.New("memory must be > 0") + ErrInvalidString = errors.New("string must not be empty") +) + +const ( + HarvesterProvider = "harvester" + AWSProvider = "aws" + AzureProvider = "azure" + K3DProvider = "k3d" +) + +type AnyNodeConfig interface { + ProviderConfig + HarvesterNodeConfig | AWSNodeConfig | AzureNodeConfig | K3DNodeConfig +} + +type ProviderConfig interface { + // ProviderName returns the name of the provider + ProviderName() string + // Validate validates the config, returning an error if invalid + Validate() error +} + +type NodeConfig struct { + Harvester *HarvesterNodeConfig `json:"harvester,omitempty" yaml:"harvester,omitempty"` + AWS *AWSNodeConfig `json:"aws,omitempty" yaml:"aws,omitempty"` + Azure *AzureNodeConfig `json:"azure,omitempty" yaml:"azure,omitempty"` + K3DNodeConfig *K3DNodeConfig `json:"k3d_node_config,omitempty" yaml:"k3d_node_config,omitempty"` +} + +type ( + AWSNodeConfig struct{} + AzureNodeConfig struct{} + K3DNodeConfig struct{} +) + +type HarvesterNodeConfig struct { + Tags map[string]string `json:"tags" yaml:"tags"` + ImageName string `json:"image_name" yaml:"image_name"` + ImageNamespace string `json:"image_namespace" yaml:"image_namespace"` + Namespace string `json:"namespace" yaml:"namespace"` + Password string `json:"password" yaml:"password"` + Disks []HarvesterDisk `json:"disks" yaml:"disks"` + SSHSharedPublicKeys []SSHSharedPublicKey `json:"ssh_shared_public_keys" yaml:"ssh_shared_public_keys"` + CPU int `json:"cpu" yaml:"cpu"` + Memory int `json:"memory" yaml:"memory"` + EFI bool `json:"efi" yaml:"efi"` + SecureBoot bool `json:"secure_boot" yaml:"secure_boot"` +} + +type SSHSharedPublicKey struct { + Name string `json:"name" yaml:"name"` + Namespace string `json:"namespace" yaml:"namespace"` +} + +type HarvesterDisk struct { + Name string `json:"name" yaml:"name"` + Type string `json:"type" yaml:"type"` + Bus string `json:"bus" yaml:"bus"` + Size int `json:"size" yaml:"size"` +} + +type ClusterConfig struct { + Provider string `yaml:"provider"` + MachinePools []MachinePools `yaml:"machine_pools"` +} + +type MachinePools struct { + MachinePoolConfig MachinePoolConfig `yaml:"machine_pool_config,omitempty" default:"[]"` + machinepools.Pools +} + +type MachinePoolConfig struct { + NodeConfig NodeConfig `yaml:"node_config"` + Quantity int32 `json:"quantity" yaml:"quantity"` + ControlPlane bool `json:"controlplane,omitempty" yaml:"controlplane,omitempty"` + Etcd bool `json:"etcd,omitempty" yaml:"etcd,omitempty"` + Worker bool `json:"worker,omitempty" yaml:"worker,omitempty"` +} + +func (h HarvesterNodeConfig) ProviderName() string { return "harvester" } +func (h HarvesterNodeConfig) Validate() error { + // Currently these are the only *required* fields, by default + if h.CPU <= 0 { + return fmt.Errorf("error while validating harvester: %w", ErrInvalidCPU) + } + + if h.Memory <= 0 { + return fmt.Errorf("error while validating harvester: %w", ErrInvalidMemory) + } + + if h.Password == "" { + return fmt.Errorf("error while validating harvester: password %w", ErrInvalidString) + } + + return nil +} +func (h AWSNodeConfig) ProviderName() string { return "aws" } +func (h AWSNodeConfig) Validate() error { + panic("not yet implemented") +} +func (h AzureNodeConfig) ProviderName() string { return "azure" } +func (h AzureNodeConfig) Validate() error { + panic("not yet implemented") +} +func (h K3DNodeConfig) ProviderName() string { return "k3d" } +func (h K3DNodeConfig) Validate() error { + panic("not yet implemented") +} + +// ToMap converts a given parameter to a valid map +func ToMap(a any) (map[string]interface{}, error) { + bytes, err := yaml.Marshal(a) + if err != nil { + return nil, err + } + + var result map[string]interface{} + if err := yaml.Unmarshal(bytes, &result); err != nil { + return nil, err + } + + return result, nil +} + +// GetActiveConfig returns the single non‑nil ProviderConfig inside nc +// If exactly one is set, it returns that, otherwise an error. +func (nc *NodeConfig) GetActiveConfig() (ProviderConfig, error) { + var ( + found ProviderConfig + count int + ) + + if nc.Harvester != nil { + found = *nc.Harvester + count++ + } + + if nc.AWS != nil { + found = *nc.AWS + count++ + } + + if nc.Azure != nil { + found = *nc.Azure + count++ + } + + if nc.K3DNodeConfig != nil { + found = *nc.K3DNodeConfig + count++ + } + + switch count { + case 0: + return nil, fmt.Errorf("error: %w", ErrNoConfig) + case 1: + return found, found.Validate() + default: + return nil, fmt.Errorf("error: %w", ErrMultipleConfigs) + } +} diff --git a/internal/dart/recipe.go b/internal/dart/recipe.go index 5ff60eb06..7f8a895cc 100644 --- a/internal/dart/recipe.go +++ b/internal/dart/recipe.go @@ -8,35 +8,49 @@ import ( "strconv" "strings" - "gopkg.in/yaml.v3" + yaml "gopkg.in/yaml.v3" ) // Dart is a "recipe" that encodes all parameters for a test run type Dart struct { - TofuMainDirectory string `yaml:"tofu_main_directory"` - TofuWorkspace string `yaml:"tofu_workspace"` - TofuParallelism int `yaml:"tofu_parallelism"` - TofuVariables map[string]any `yaml:"tofu_variables"` - ChartVariables ChartVariables `yaml:"chart_variables"` - TestVariables TestVariables `yaml:"test_variables"` + TofuVariables map[string]any `yaml:"tofu_variables"` + TofuMainDirectory string `yaml:"tofu_main_directory"` + TofuWorkspace string `yaml:"tofu_workspace"` + TofuWorkspaceStatePath string `yaml:"-"` + ClusterTemplates []ClusterTemplate `yaml:"cluster_templates"` + ChartVariables ChartVariables `yaml:"chart_variables"` + TestVariables TestVariables `yaml:"test_variables"` + TofuParallelism int `yaml:"tofu_parallelism"` + ClusterBatchSize int `yaml:"cluster_batch_size"` +} + +type ClusterTemplate struct { + NodeConfig *NodeConfig `yaml:"node_config"` + ClusterConfig *ClusterConfig `yaml:"cluster_config"` + generatedName string + NamePrefix string `yaml:"name_prefix"` + DistroVersion string `yaml:"distro_version"` + NodesPerCluster int `yaml:"-"` + ClusterCount int `yaml:"cluster_count"` + IsCustomCluster bool `yaml:"is_custom_cluster"` } type ChartVariables struct { - RancherReplicas int `yaml:"rancher_replicas"` - DownstreamRancherMonitoring bool `yaml:"downstream_rancher_monitoring"` + RancherAppsRepoOverride string `yaml:"rancher_apps_repo_override"` + RancherMonitoringVersion string `yaml:"rancher_monitoring_version"` AdminPassword string `yaml:"admin_password"` UserPassword string `yaml:"user_password"` RancherVersion string `yaml:"rancher_version"` - ForcePrimeRegistry bool `yaml:"force_prime_registry"` - RancherAppsRepoOverride string `yaml:"rancher_apps_repo_override"` - RancherChartRepoOverride string `yaml:"rancher_chart_repo_override"` + RancherValues string `yaml:"rancher_values"` + TesterGrafanaVersion string `yaml:"tester_grafana_version"` RancherImageOverride string `yaml:"rancher_image_override"` - RancherImageTagOverride string `yaml:"rancher_image_tag_override"` - RancherMonitoringVersion string `yaml:"rancher_monitoring_version"` CertManagerVersion string `yaml:"cert_manager_version"` - TesterGrafanaVersion string `yaml:"tester_grafana_version"` - RancherValues string `yaml:"rancher_values"` + RancherImageTagOverride string `yaml:"rancher_image_tag_override"` + RancherChartRepoOverride string `yaml:"rancher_chart_repo_override"` ExtraEnvironmentVariables []map[string]any `yaml:"extra_environment_variables"` + RancherReplicas int `yaml:"rancher_replicas"` + DownstreamRancherMonitoring bool `yaml:"downstream_rancher_monitoring"` + ForcePrimeRegistry bool `yaml:"force_prime_registry"` } type TestVariables struct { @@ -47,25 +61,27 @@ type TestVariables struct { TestProjects int `yaml:"test_projects"` } -var defaultDart = Dart{ - TofuParallelism: 10, - TofuVariables: map[string]any{}, - ChartVariables: ChartVariables{ - RancherReplicas: 1, - DownstreamRancherMonitoring: false, - AdminPassword: "adminadminadmin", - RancherVersion: "2.9.1", - RancherMonitoringVersion: "104.1.0+up57.0.3", - CertManagerVersion: "1.8.0", - TesterGrafanaVersion: "6.56.5", - }, - TestVariables: TestVariables{ - TestConfigMaps: 2000, - TestSecrets: 2000, - TestRoles: 20, - TestUsers: 10, - TestProjects: 20, - }, +func defaultDart() Dart { + return Dart{ + TofuParallelism: 10, + TofuVariables: map[string]any{}, + ChartVariables: ChartVariables{ + RancherReplicas: 1, + DownstreamRancherMonitoring: false, + AdminPassword: "adminadminadmin", + RancherVersion: "2.9.1", + RancherMonitoringVersion: "104.1.0+up57.0.3", + CertManagerVersion: "1.8.0", + TesterGrafanaVersion: "6.56.5", + }, + TestVariables: TestVariables{ + TestConfigMaps: 2000, + TestSecrets: 2000, + TestRoles: 20, + TestUsers: 10, + TestProjects: 20, + }, + } } func Parse(path string) (*Dart, error) { @@ -73,15 +89,19 @@ func Parse(path string) (*Dart, error) { if err != nil { return nil, fmt.Errorf("failed to read dart file: %w", err) } - result := defaultDart + + result := defaultDart() + err = yaml.Unmarshal(bytes, &result) if err != nil { return nil, fmt.Errorf("failed to unmarshal dart file: %w", err) } + tofuVars, err := yaml.Marshal(result.TofuVariables) if err != nil { return nil, fmt.Errorf("failed to marshal recipe's tofu variables: %w", err) } + log.Printf("\nTofu variables: \n%v\n", string(tofuVars)) result.ChartVariables.RancherVersion = normalizeVersion(result.ChartVariables.RancherVersion) @@ -104,6 +124,49 @@ func needsPrime(version string) bool { major, _ := strconv.Atoi(versionSplits[0]) minor, _ := strconv.Atoi(versionSplits[1]) patch, _ := strconv.Atoi(versionSplits[2]) + return (major == 2 && minor == 7 && patch >= 11) || (major == 2 && minor == 8 && patch >= 6) } + +func UpdateDart(r *Dart, path string) error { + data, err := yaml.Marshal(r) + if err != nil { + return fmt.Errorf("failed to marshal Dart file: %w", err) + } + + if err := os.WriteFile(path, data, 0o644); err != nil { + return fmt.Errorf("failed to write Dart file: %w", err) + } + + return nil +} + +func (ct *ClusterTemplate) SetGeneratedName(suffix string) { + ct.generatedName = fmt.Sprintf("%s-%s", ct.NamePrefix, suffix) +} + +func (ct *ClusterTemplate) GeneratedName() string { + return ct.generatedName +} + +func (ct *ClusterTemplate) ProcessNodesPerCluster() int { + var sum int32 + + yamlData, err := yaml.Marshal(ct.ClusterConfig) + if err != nil { + log.Fatalf("Error marshaling YAML: %v", err) + } + + fmt.Printf("\nClusterTemplate.Config: %s\n", string(yamlData)) + + for _, pool := range ct.ClusterConfig.MachinePools { + fmt.Printf("\nFound pool with %d quantity\n", int(pool.MachinePoolConfig.Quantity)) + sum += pool.MachinePoolConfig.Quantity + } + + fmt.Printf("\nFound a total of %d nodes across all pools\n", int(sum)) + ct.NodesPerCluster = int(sum) + + return ct.NodesPerCluster +} diff --git a/internal/docker/docker.go b/internal/docker/docker.go index 666be4128..d2951805b 100644 --- a/internal/docker/docker.go +++ b/internal/docker/docker.go @@ -36,25 +36,35 @@ func Images(image string) ([]string, error) { log.Printf("Exec: docker %s\n", strings.Join(args, " ")) cmd := exec.Command("docker", args...) - var outStream strings.Builder - var errStream strings.Builder + + var ( + outStream strings.Builder + errStream strings.Builder + ) + cmd.Stdout = &outStream + cmd.Stderr = &errStream if err := cmd.Run(); err != nil { return nil, fmt.Errorf("%v", errStream.String()) } lines := strings.Split(strings.TrimSpace(outStream.String()), "\n") + var images []string + for _, line := range lines { if line != "" { var img Image + err := json.Unmarshal([]byte(line), &img) if err != nil { - return nil, fmt.Errorf("error unmarshalling JSON output from docker images: %w", err) + return nil, fmt.Errorf("error unmarshaling JSON output from docker images: %w", err) } + images = append(images, img.Repository+":"+img.Tag) } } + return images, nil } diff --git a/internal/harvester/LICENSE b/internal/harvester/LICENSE new file mode 100644 index 000000000..b09cd7856 --- /dev/null +++ b/internal/harvester/LICENSE @@ -0,0 +1,201 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/internal/harvester/README.md b/internal/harvester/README.md new file mode 100644 index 000000000..2df5ae1a7 --- /dev/null +++ b/internal/harvester/README.md @@ -0,0 +1 @@ +Code ported over from https://github.com/belgaied2/harvester-cli, heavily modified diff --git a/internal/harvester/common.go b/internal/harvester/common.go new file mode 100644 index 000000000..1c9ffdb34 --- /dev/null +++ b/internal/harvester/common.go @@ -0,0 +1,30 @@ +package harvester + +import "k8s.io/apimachinery/pkg/api/resource" + +// HandleCPUOverCommitment calculates the CPU Request based on the CPU Limit and the CPU Overcommitment setting. +func HandleCPUOverCommitment(overCommitSettingMap map[string]int, cpuNumber int64) resource.Quantity { + // cpuQuantity := resource.NewQuantity(cpuNumber, resource.DecimalSI) + cpuOvercommit := overCommitSettingMap["cpu"] + if cpuOvercommit <= 0 { + cpuOvercommit = 100 // default value + } + + cpuRequest := (1000 * cpuNumber) * 100 / int64(cpuOvercommit) + + return *resource.NewMilliQuantity(cpuRequest, resource.DecimalSI) +} + +// HandleMemoryOverCommitment calculates the memory Request based on the memory Limit and the memory Overcommitment setting. +func HandleMemoryOverCommitment(overCommitSettingMap map[string]int, memory string) resource.Quantity { + // cpuQuantity := resource.NewQuantity(cpuNumber, resource.DecimalSI) + memoryRequest := resource.MustParse(memory) + memoryValue := memoryRequest.Value() + + memOvercommit := overCommitSettingMap["memory"] + if memOvercommit <= 0 { + memOvercommit = 100 // default value + } + + return *resource.NewQuantity(memoryValue*100/int64(memOvercommit), resource.BinarySI) +} diff --git a/internal/harvester/harvester.go b/internal/harvester/harvester.go new file mode 100644 index 000000000..810cd56a0 --- /dev/null +++ b/internal/harvester/harvester.go @@ -0,0 +1,1014 @@ +package harvester + +import ( + "context" + "encoding/json" + "fmt" + "strconv" + "strings" + + "github.com/harvester/harvester/pkg/apis/harvesterhci.io/v1beta1" + harvclient "github.com/harvester/harvester/pkg/generated/clientset/versioned" + randGen "github.com/matoous/go-nanoid/v2" + "github.com/minio/pkg/wildcard" + "github.com/rancher/dartboard/internal/actions" + "github.com/sirupsen/logrus" + "gopkg.in/yaml.v2" + v1 "k8s.io/api/core/v1" + k8sresource "k8s.io/apimachinery/pkg/api/resource" + k8smetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + kubeclient "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/clientcmd" + VMv1 "kubevirt.io/api/core/v1" +) + +const ( + vmAnnotationPVC = "harvesterhci.io/volumeClaimTemplates" + vmAnnotationNetworkIps = "networks.harvesterhci.io/ips" + defaultDiskSize = "10Gi" + defaultMemSize = "1Gi" + defaultNbCPUCores = 1 + defaultNamespace = "default" + ubuntuDefaultImage = "https://cloud-images.ubuntu.com/minimal/daily/focal/current/focal-minimal-cloudimg-amd64.img" + defaultCloudInitUserData = "#cloud-config\npackages:\n - qemu-guest-agent\nruncmd:\n - [ systemctl, daemon-reload ]\n - [ systemctl, enable, qemu-guest-agent.service ]\n - [ systemctl, start, --no-block, qemu-guest-agent.service ]" + defaultCloudInitNetworkData = "version: 2\nrenderer: networkd\nethernets:\n enp1s0:\n dhcp4: true" + defaultCloudInitCmPrefix = "default-ubuntu-" + defaultOverCommitSettingName = "overcommit-config" + RemovedPVCsAnnotationKey = "harvesterhci.io/removedPersistentVolumeClaims" +) + +// VirtualMachineData type is a Data Structure that holds information to display for VM +type VirtualMachineData struct { + VirtualMachine VMv1.VirtualMachine + State string + Name string + Node string + Memory string + IPAddress string + CPU uint32 +} + +type VMTemplateInput struct { + Name string `json:"name,omitempty" yaml:"name,omitempty"` + Namespace string `json:"namespace,omitempty" yaml:"namespace,omitempty"` + Version string `json:"version,omitempty" yaml:"version,omitempty"` +} + +type VMNetworkInput struct { + Name string `json:"name,omitempty" yaml:"name,omitempty"` + Namespace string `json:"namespace,omitempty" yaml:"namespace,omitempty"` + CloudInitTemplate string `json:"cloud_init_template,omitempty" yaml:"cloud_init_template,omitempty"` + CloudInitData []byte `json:"cloud_init_data,omitempty" yaml:"cloud_init_data,omitempty"` +} + +type VMUser struct { + Name string `json:"name,omitempty" yaml:"name,omitempty"` + CloudInitTemplate string `json:"cloud_init_template,omitempty" yaml:"cloud_init_template,omitempty"` + CloudInitData []byte `json:"cloud_init_data,omitempty" yaml:"cloud_init_data,omitempty"` +} + +type VMImage struct { + ID string `json:"id,omitempty" yaml:"id,omitempty"` + Name string `json:"name,omitempty" yaml:"name,omitempty"` + Namespace string `json:"namespace,omitempty" yaml:"namespace,omitempty"` +} + +type VMSSHKey struct { + Name string `json:"name,omitempty" yaml:"name,omitempty"` + Namespace string `json:"namespace,omitempty" yaml:"namespace,omitempty"` +} + +type VMInput struct { + Image VMImage `json:"image,omitempty" yaml:"image,omitempty"` + Template VMTemplateInput `json:"template,omitempty" yaml:"template,omitempty"` + SSHKey VMSSHKey `json:"ssh_key,omitempty" yaml:"ssh_key,omitempty"` + Name string `json:"name,omitempty" yaml:"name,omitempty"` + Namespace string `json:"namespace,omitempty" yaml:"namespace,omitempty"` + Description string `json:"description,omitempty" yaml:"description,omitempty"` + DiskSize string `json:"disk_size,omitempty" yaml:"disk_size,omitempty"` + Network VMNetworkInput `json:"network,omitempty" yaml:"network,omitempty"` + User VMUser `json:"user,omitempty" yaml:"user,omitempty"` + Count int `json:"count,omitempty" yaml:"count,omitempty"` + CPUs int `json:"cpus,omitempty" yaml:"cpus,omitempty"` + Memory int `json:"memory,omitempty" yaml:"memory,omitempty"` +} + +var overCommitSettingMap map[string]int + +// GetHarvesterClient creates a Client for Harvester from Config input +func GetHarvesterClient(kubeconfigPath string) (*harvclient.Clientset, error) { + clientConfig, err := clientcmd.BuildConfigFromFlags("", kubeconfigPath) + if err != nil { + return &harvclient.Clientset{}, err + } + + return harvclient.NewForConfig(clientConfig) +} + +// GetKubeClient creates a Vanilla Kubernetes Client to query the Kubernetes-native API Objects +func GetKubeClient(kubeconfigPath string) (*kubeclient.Clientset, error) { + clientConfig, err := clientcmd.BuildConfigFromFlags("", kubeconfigPath) + if err != nil { + return &kubeclient.Clientset{}, err + } + + return kubeclient.NewForConfig(clientConfig) +} + +// ListVMs lists the VMs available in Harvester +func ListVMs(c *harvclient.Clientset, namespace string) ([]VMv1.VirtualMachine, []VirtualMachineData, error) { + vmList, err := c.KubevirtV1().VirtualMachines(namespace).List(context.TODO(), k8smetav1.ListOptions{}) + if err != nil { + return nil, nil, err + } + + vmiList, err := c.KubevirtV1().VirtualMachineInstances(namespace).List(context.TODO(), k8smetav1.ListOptions{}) + if err != nil { + return nil, nil, err + } + + vmiMap := map[string]VMv1.VirtualMachineInstance{} + for _, vmi := range vmiList.Items { + vmiMap[vmi.Name] = vmi + } + + var ( + allVMs []VMv1.VirtualMachine + allVMData []VirtualMachineData + ) + + for _, vm := range vmList.Items { + state := string(vm.Status.PrintableStatus) + + var IP string + if len(vmiMap[vm.Name].Status.Interfaces) == 0 { + IP = "" + } else { + IP = vmiMap[vm.Name].Status.Interfaces[0].IP + } + + var memory string + + if vm.Spec.Template != nil && + vm.Spec.Template.Spec.Domain.Resources.Limits != nil && + vm.Spec.Template.Spec.Domain.Resources.Limits.Memory().CmpInt64(int64(0)) == 0 { + if vm.Spec.Template.Spec.Domain.Resources.Requests != nil { + memory = vm.Spec.Template.Spec.Domain.Resources.Requests.Memory().String() + } + } else if vm.Spec.Template != nil && vm.Spec.Template.Spec.Domain.Resources.Limits != nil { + memory = vm.Spec.Template.Spec.Domain.Resources.Limits.Memory().String() + } + + allVMs = append(allVMs, vm) + + vmData := VirtualMachineData{ + State: state, + VirtualMachine: vm, + Name: vm.Name, + Node: vmiMap[vm.Name].Status.NodeName, + CPU: vm.Spec.Template.Spec.Domain.CPU.Cores, + Memory: memory, + IPAddress: IP, + } + allVMData = append(allVMData, vmData) + } + + return allVMs, allVMData, nil +} + +// DeleteVM deletes VMs which name is given in argument +func DeleteVM(c *harvclient.Clientset, namespace, vmName string) error { + if strings.Contains(vmName, "*") || strings.Contains(vmName, "?") { + matchingVMs, err := BuildVMListMatchingWildcard(c, namespace, vmName) + if err != nil { + return err + } + + for _, vmExisting := range matchingVMs { + err := DeleteVMWithPVC(c, &vmExisting, namespace) + if err != nil { + return err + } + } + } else { + vm, err := c.KubevirtV1().VirtualMachines(namespace).Get(context.TODO(), vmName, k8smetav1.GetOptions{}) + if err != nil { + return fmt.Errorf("no VM with the provided name found") + } + + err = DeleteVMWithPVC(c, vm, namespace) + if err != nil { + return err + } + } + + return nil +} + +func DeleteVMWithPVC(c *harvclient.Clientset, vmExisting *VMv1.VirtualMachine, namespace string) error { + vmCopy := vmExisting.DeepCopy() + + var removedPVCs []string + + if vmCopy.Spec.Template != nil { + for _, vol := range vmCopy.Spec.Template.Spec.Volumes { + if vol.PersistentVolumeClaim == nil { + continue + } + + removedPVCs = append(removedPVCs, vol.PersistentVolumeClaim.ClaimName) + } + } + + if vmCopy.Annotations == nil { + vmCopy.Annotations = make(map[string]string) + } + + vmCopy.Annotations[RemovedPVCsAnnotationKey] = strings.Join(removedPVCs, ",") + + _, err := c.KubevirtV1().VirtualMachines(namespace).Update(context.TODO(), vmCopy, k8smetav1.UpdateOptions{}) + if err != nil { + return fmt.Errorf("error during removal of PVCs in the VM reference, %w", err) + } + + err = c.KubevirtV1().VirtualMachines(namespace).Delete(context.TODO(), vmCopy.Name, k8smetav1.DeleteOptions{}) + if err != nil { + return fmt.Errorf("VM named %s could not be deleted successfully: %w", vmCopy.Name, err) + } else { + logrus.Infof("VM %s deleted successfully", vmCopy.Name) + } + + return nil +} + +// CreateVM implements the CLI *vm create* command, there are two options, either to create a VM from a Harvester VM template or from a VM image +func CreateVM(c *harvclient.Clientset, k *kubeclient.Clientset, vmInputs *VMInput) error { + if vmInputs.Template.Name != "" { + return createVMFromTemplate(c, k, vmInputs) + } else { + return createVMFromImage(c, k, nil, vmInputs) + } +} + +// createVMFromTemplate creates a VM from a VM template provided in the CLI command +func createVMFromTemplate(c *harvclient.Clientset, k *kubeclient.Clientset, vmInputs *VMInput) error { + var err error + // checking if template exists + templateContent, err := c.HarvesterhciV1beta1().VirtualMachineTemplates(vmInputs.Namespace).Get(context.TODO(), vmInputs.Template.Name, k8smetav1.GetOptions{}) + if err != nil { + return fmt.Errorf("template %s was not found on the Harvester Cluster in namespace %s", vmInputs.Template.Name, vmInputs.Namespace) + } + + // Picking the templateVersion + var templateVersion *v1beta1.VirtualMachineTemplateVersion + if vmInputs.Template.Version == "" { + templateVersion, err = c.HarvesterhciV1beta1().VirtualMachineTemplateVersions(vmInputs.Namespace).Get(context.TODO(), vmInputs.Template.Version, k8smetav1.GetOptions{}) + if err != nil { + return err + } + + logrus.Debugf("templateVersion found is :%s\n", templateContent.Spec.DefaultVersionID) + + templateVersion.ManagedFields = []k8smetav1.ManagedFieldsEntry{} + + marshalledTemplateVersion, err := json.Marshal(templateVersion) + if err != nil { + return err + } + + logrus.Debugf("template version: %s\n", string(marshalledTemplateVersion)) + } else { + versionNum, err := strconv.Atoi(vmInputs.Template.Version) + if err != nil { + return err + } + + templateVersion, err = fetchTemplateVersionFromInt(c, vmInputs.Namespace, vmInputs.Template.Name, versionNum) + if err != nil { + return err + } + } + + templateVersionAnnotation := templateVersion.Spec.VM.ObjectMeta.Annotations[vmAnnotationPVC] + logrus.Debugf("VM Annotation for PVC (should be JSON format): %s", templateVersionAnnotation) + + var pvcList []v1.PersistentVolumeClaim + + err = json.Unmarshal([]byte(templateVersionAnnotation), &pvcList) + if err != nil { + return err + } + + if len(pvcList) == 0 { + return fmt.Errorf("no PersistentVolumeClaims found in VM template PVC annotation") + } + + pvc := pvcList[0] + + vmImageIdWithNamespace, ok := pvc.ObjectMeta.Annotations["harvesterhci.io/imageId"] + if !ok || vmImageIdWithNamespace == "" { + return fmt.Errorf("missing imageId annotation on PVC %q", pvc.Name) + } + + imageIdParts := strings.Split(vmImageIdWithNamespace, "/") + if len(imageIdParts) < 2 { + return fmt.Errorf("invalid imageId annotation %q on PVC %q", vmImageIdWithNamespace, pvc.Name) + } + + vmInputs.Image.ID = imageIdParts[1] + if vmInputs.DiskSize == "" { + vmInputs.DiskSize = pvc.Spec.Resources.Requests.Storage().String() + } + + vmTemplate := templateVersion.Spec.VM.Spec.Template + + err = createVMFromImage(c, k, vmTemplate, vmInputs) + if err != nil { + return err + } + + return nil +} + +// fetchTemplateVersionFromInt gets the Template with the right version given the context (containing template name) and the version as an integer +func fetchTemplateVersionFromInt(c *harvclient.Clientset, namespace, templateName string, version int) (*v1beta1.VirtualMachineTemplateVersion, error) { + templateSelector := "template.harvesterhci.io/templateID=" + templateName + + allTemplateVersions, err := c.HarvesterhciV1beta1().VirtualMachineTemplateVersions(namespace).List(context.TODO(), k8smetav1.ListOptions{ + LabelSelector: templateSelector, + }) + if err != nil { + return nil, err + } + + for _, serverTemplateVersion := range allTemplateVersions.Items { + if version == serverTemplateVersion.Status.Version { + return &serverTemplateVersion, nil + } + } + + return nil, fmt.Errorf("no VM template named %s with version %d found", templateName, version) +} + +// setupVMImage retrieves or sets up the VM image and returns the image and storage class name +func setupVMImage(c *harvclient.Clientset, vmInputs *VMInput) (*v1beta1.VirtualMachineImage, string, error) { + var ( + vmImage *v1beta1.VirtualMachineImage + err error + ) + + if vmInputs.Image.ID != "" { + vmImage, err = c.HarvesterhciV1beta1().VirtualMachineImages(vmInputs.Image.Namespace).Get(context.TODO(), vmInputs.Image.Name, k8smetav1.GetOptions{}) + if err != nil { + return nil, "", err + } + + logrus.Debugf("Found Image with ID %s!", vmInputs.Image.ID) + } else { + vmImage, err = SetDefaultVMImage(c, vmInputs) + if err != nil { + return nil, "", err + } + } + + return vmImage, vmImage.Status.StorageClassName, nil +} + +// validateNetworkExists checks if the network exists in Harvester +func validateNetworkExists(c *harvclient.Clientset, namespace, name string) error { + _, err := c.K8sCniCncfIoV1().NetworkAttachmentDefinitions(namespace).Get(context.TODO(), name, k8smetav1.GetOptions{}) + if err != nil { + return fmt.Errorf("problem while verifying network existence; %w", err) + } + + return nil +} + +// initializeOverCommitSettings retrieves and initializes the overcommit settings +func initializeOverCommitSettings(c *harvclient.Clientset) error { + overCommitSetting, err := c.HarvesterhciV1beta1().Settings().Get(context.TODO(), defaultOverCommitSettingName, k8smetav1.GetOptions{}) + if err != nil { + return fmt.Errorf("encountered issue when querying Harvester for setting %s: %w", defaultOverCommitSettingName, err) + } + + err = json.Unmarshal([]byte(overCommitSetting.Default), &overCommitSettingMap) + if err != nil { + return fmt.Errorf("encountered issue when unmarshaling setting value %s: %w", defaultOverCommitSettingName, err) + } + + return nil +} + +// createVMFromImage creates a VM from a VM Image using the CLI command context to get information +func createVMFromImage(c *harvclient.Clientset, k *kubeclient.Clientset, vmTemplate *VMv1.VirtualMachineInstanceTemplateSpec, vmInputs *VMInput) error { + if vmInputs.Count == 0 { + return fmt.Errorf("VM count provided is 0, no VM will be created") + } + + vmImage, storageClassName, err := setupVMImage(c, vmInputs) + if err != nil { + return err + } + + _ = vmImage // Image is setup for vmInputs side effects + + vmNameBase := vmInputs.Name + vmLabels := map[string]string{ + "harvesterhci.io/creator": "harvester", + } + vmiLabels := vmLabels + + if err := validateNetworkExists(c, vmInputs.Network.Namespace, vmInputs.Network.Name); err != nil { + return err + } + + if err := initializeOverCommitSettings(c); err != nil { + return err + } + + for i := 1; i <= vmInputs.Count; i++ { + if err := createSingleVM(c, k, vmTemplate, vmInputs, vmNameBase, vmiLabels, vmLabels, storageClassName, i); err != nil { + return err + } + } + + return nil +} + +// prepareVMTemplate prepares or creates the VM template for a specific VM instance +func prepareVMTemplate(c *harvclient.Clientset, k *kubeclient.Clientset, vmTemplate *VMv1.VirtualMachineInstanceTemplateSpec, pvcName, vmNameBase string, vmiLabels map[string]string, vmInputs *VMInput) (*VMv1.VirtualMachineInstanceTemplateSpec, error) { + var err error + + if vmTemplate == nil { + return BuildVMTemplate(c, k, pvcName, vmiLabels, vmInputs) + } + + vmTemplate.Spec.Volumes[0].PersistentVolumeClaim.ClaimName = pvcName + + if vmTemplate.ObjectMeta.Labels == nil { + vmTemplate.ObjectMeta.Labels = make(map[string]string) + } + + vmTemplate.ObjectMeta.Labels["harvesterhci.io/vmNamePrefix"] = vmNameBase + vmTemplate.Spec.Affinity = &v1.Affinity{ + PodAntiAffinity: &v1.PodAntiAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []v1.WeightedPodAffinityTerm{ + { + Weight: int32(1), + PodAffinityTerm: v1.PodAffinityTerm{ + TopologyKey: "kubernetes.io/hostname", + LabelSelector: &k8smetav1.LabelSelector{ + MatchLabels: map[string]string{ + "harvesterhci.io/vmNamePrefix": vmNameBase, + }, + }, + }, + }, + }, + }, + } + + err = enrichVMTemplate(c, k, vmTemplate, vmInputs) + if err != nil { + return nil, fmt.Errorf("unable to enrich VM template with values from flags: %w", err) + } + + return vmTemplate, nil +} + +// createSingleVM creates a single VM instance +func createSingleVM(c *harvclient.Clientset, k *kubeclient.Clientset, vmTemplate *VMv1.VirtualMachineInstanceTemplateSpec, vmInputs *VMInput, vmNameBase string, vmiLabels, vmLabels map[string]string, storageClassName string, i int) error { + vmName := vmNameBase + if vmInputs.Count > 1 { + vmName = vmNameBase + "-" + fmt.Sprint(i) + } + + vmiLabels["harvesterhci.io/vmName"] = vmName + vmiLabels["harvesterhci.io/vmNamePrefix"] = vmNameBase + + diskRandomID, err := randGen.New(8) + if err != nil { + return err + } + + pvcName := vmName + "-disk-0-" + diskRandomID + pvcAnnotation := "[{\"metadata\":{\"name\":\"" + pvcName + "\",\"annotations\":{\"harvesterhci.io/imageId\":\"" + vmInputs.Image.Namespace + "/" + vmInputs.Image.ID + "\"}},\"spec\":{\"accessModes\":[\"ReadWriteMany\"],\"resources\":{\"requests\":{\"storage\":\"" + vmInputs.DiskSize + "\"}},\"volumeMode\":\"Block\",\"storageClassName\":\"" + storageClassName + "\"}}]" + + vmTemplate, err = prepareVMTemplate(c, k, vmTemplate, pvcName, vmNameBase, vmiLabels, vmInputs) + if err != nil { + return err + } + + vm := &VMv1.VirtualMachine{ + ObjectMeta: k8smetav1.ObjectMeta{ + Name: vmName, + Namespace: vmInputs.Namespace, + Annotations: map[string]string{ + vmAnnotationPVC: pvcAnnotation, + vmAnnotationNetworkIps: "[]", + }, + Labels: vmLabels, + }, + Spec: VMv1.VirtualMachineSpec{ + Running: actions.NewTrue(), + Template: vmTemplate, + }, + } + + _, err = c.KubevirtV1().VirtualMachines(vmInputs.Namespace).Create(context.TODO(), vm, k8smetav1.CreateOptions{}) + + return err +} + +func enrichVMTemplate(c *harvclient.Clientset, k *kubeclient.Clientset, vmTemplate *VMv1.VirtualMachineInstanceTemplateSpec, vmInputs *VMInput) error { + if vmInputs.CPUs > 0 { + vmTemplate.Spec.Domain.CPU.Cores = uint32(vmInputs.CPUs) + cpuQuantity := k8sresource.NewQuantity(int64(vmInputs.CPUs), k8sresource.DecimalSI) + + vmTemplate.Spec.Domain.Resources.Limits["cpu"] = *cpuQuantity + if vmTemplate.Spec.Domain.Resources.Requests == nil { + vmTemplate.Spec.Domain.Resources.Requests = v1.ResourceList{} + } + + vmTemplate.Spec.Domain.Resources.Requests["cpu"] = HandleCPUOverCommitment(overCommitSettingMap, int64(vmInputs.CPUs)) + } + + if vmInputs.Memory > 0 { + vmMemory := strconv.Itoa(vmInputs.Memory) + + vmTemplate.Spec.Domain.Resources.Limits["memory"] = k8sresource.MustParse(vmMemory) + if vmTemplate.Spec.Domain.Resources.Requests == nil { + vmTemplate.Spec.Domain.Resources.Requests = v1.ResourceList{} + } + + vmTemplate.Spec.Domain.Resources.Requests["memory"] = HandleMemoryOverCommitment(overCommitSettingMap, vmMemory) + } + + networkNS := vmInputs.Network.Namespace + if networkNS == "" { + networkNS = vmInputs.Namespace + } + + dataMap := map[string]any{ + "network-name": vmInputs.Network.Name, + "network-namespace": networkNS, + "network-data-content": vmInputs.Network.CloudInitData, + "network-data-template": vmInputs.Network.CloudInitTemplate, + "user-name": vmInputs.User.Name, + "user-namespace": vmInputs.Namespace, + "user-data-content": vmInputs.User.CloudInitData, + "user-data-template": vmInputs.User.CloudInitTemplate, + } + + for _, userDataType := range []string{"network", "user"} { + for _, reference := range []string{"content", "template"} { + if dataMap[userDataType+"-data-"+reference] != nil { + for _, volume := range vmTemplate.Spec.Volumes { + if volume.Name == "cloudinitdisk" { + if userDataType == "network" { + networkData, err := getCloudInitData(k, dataMap, "network") + if err != nil { + return fmt.Errorf("error during the retrieval of the network cloud-init data: %s", err) + } + + volume.CloudInitNoCloud.NetworkData = networkData + } else { + userData, err := getCloudInitData(k, dataMap, "user") + if err != nil { + return fmt.Errorf("error during the retrieval of the user cloud-init data: %s", err) + } + + volume.CloudInitNoCloud.UserData = userData + } + } + } + } + } + } + + return nil +} + +// getCloudInitNetworkData gives the ConfigMap object with name indicated in the command line, +// and will create a new one called "ubuntu-std-network" if none is provided or no ConfigMap was found with the same name +func getCloudInitData(k *kubeclient.Clientset, dataMap map[string]any, scope string) (string, error) { + if scope != "user" && scope != "network" { + return "", fmt.Errorf("wrong value for scope parameter") + } + + flagName := scope + "-data" + + var cloudInitDataString string + + if dataMap[flagName+"-content"] == nil { + flagName = flagName + "-template" + + cmName := dataMap[scope+"-name"] + cmNS := dataMap[scope+"-namespace"] + + if cmName != nil && cmName != "" { + cmNameStr, ok := cmName.(string) + if !ok { + return "", fmt.Errorf("ConfigMap name is not a string") + } + + cmNSStr, ok := cmNS.(string) + if !ok { + return "", fmt.Errorf("ConfigMap namespace is not a string") + } + + ciData, err := k.CoreV1().ConfigMaps(cmNSStr).Get(context.TODO(), cmNameStr, k8smetav1.GetOptions{}) + if err != nil { + return "", fmt.Errorf("ConfigMap named %s was not found, please specify another ConfigMap or remove the %s input to use the default one for ubuntu", cmNameStr, scope+"-name") + } + + return ciData.Data["cloudInit"], nil + } + + if scope == "user" { + return defaultCloudInitUserData, nil + } else if scope == "network" { + return defaultCloudInitNetworkData, nil + } + } + + if dataMap[flagName+"-template"] != "" { + return "", fmt.Errorf("you can't specify both a ConfigMap reference and a file path for the cloud-init data") + } + + if dataMap[flagName+"-content"] == nil { + return "", fmt.Errorf("no cloud-init data was supplied") + } + + cloudInitDataBytes, ok := dataMap[flagName+"-content"].([]byte) + if !ok { + return "", fmt.Errorf("cloud-init data is not in expected format ([]byte)") + } + + if len(cloudInitDataBytes) == 0 { + return "", fmt.Errorf("cloud-init data is empty") + } + + cloudInitDataString = string(cloudInitDataBytes) + + return cloudInitDataString, nil +} + +// prepareCloudInitData retrieves and prepares cloud-init user and network data +func prepareCloudInitData(c *harvclient.Clientset, k *kubeclient.Clientset, dataMap map[string]any, vmInputs *VMInput) (userData, networkData string, sshKey *v1beta1.KeyPair, err error) { + cloudInitCustomUserData, err := getCloudInitData(k, dataMap, "user") + if err != nil { + return "", "", nil, fmt.Errorf("error during getting cloud init user data from Harvester: %w", err) + } + + if vmInputs.SSHKey.Name != "" { + sshKey, err = c.HarvesterhciV1beta1().KeyPairs(vmInputs.SSHKey.Namespace).Get(context.TODO(), vmInputs.SSHKey.Name, k8smetav1.GetOptions{}) + if err != nil { + return "", "", nil, fmt.Errorf("error during getting keypair from Harvester: %w", err) + } + + logrus.Debugf("SSH Key Name %s given does exist!", vmInputs.SSHKey.Name) + } else if !userDataContainsKey(cloudInitCustomUserData) { + sshKey, err = SetDefaultSSHKey(c, vmInputs) + if err != nil { + return "", "", nil, fmt.Errorf("error during setting default SSH key: %w", err) + } + } + + cloudInitUserData, err := MergeOptionsInUserData(cloudInitCustomUserData, defaultCloudInitUserData, sshKey) + if err != nil { + return "", "", nil, fmt.Errorf("error during merging cloud init user data: %w", err) + } + + cloudInitNetworkData, err := getCloudInitData(k, dataMap, "network") + if err != nil { + return "", "", nil, fmt.Errorf("error during getting cloud-init for networking: %w", err) + } + + return cloudInitUserData, cloudInitNetworkData, sshKey, nil +} + +// buildVMDomainSpec creates the domain specification for a VM +func buildVMDomainSpec(vmInputs *VMInput) VMv1.DomainSpec { + return VMv1.DomainSpec{ + CPU: &VMv1.CPU{ + Cores: uint32(vmInputs.CPUs), + Sockets: 1, + Threads: 1, + }, + Devices: VMv1.Devices{ + Inputs: []VMv1.Input{ + { + Bus: "usb", + Type: "tablet", + Name: "tablet", + }, + }, + Interfaces: []VMv1.Interface{ + { + Name: "nic-1", + Model: "virtio", + InterfaceBindingMethod: VMv1.DefaultBridgeNetworkInterface().InterfaceBindingMethod, + }, + }, + Disks: []VMv1.Disk{ + { + Name: "disk-0", + DiskDevice: VMv1.DiskDevice{ + Disk: &VMv1.DiskTarget{ + Bus: "virtio", + }, + }, + }, + { + Name: "cloudinitdisk", + DiskDevice: VMv1.DiskDevice{ + Disk: &VMv1.DiskTarget{ + Bus: "virtio", + }, + }, + }, + }, + }, + Resources: VMv1.ResourceRequirements{ + Requests: v1.ResourceList{ + "memory": HandleMemoryOverCommitment(overCommitSettingMap, fmt.Sprintf("%dGi", vmInputs.Memory)), + "cpu": HandleCPUOverCommitment(overCommitSettingMap, int64(vmInputs.CPUs)), + }, + Limits: v1.ResourceList{ + "memory": k8sresource.MustParse(fmt.Sprintf("%dGi", vmInputs.Memory)), + "cpu": *k8sresource.NewQuantity(int64(vmInputs.CPUs), k8sresource.DecimalSI), + }, + }, + } +} + +// BuildVMTemplate creates a *VMv1.VirtualMachineInstanceTemplateSpec from the CLI Flags and some computed values +func BuildVMTemplate(c *harvclient.Clientset, k *kubeclient.Clientset, pvcName string, vmiLabels map[string]string, vmInputs *VMInput) (vmTemplate *VMv1.VirtualMachineInstanceTemplateSpec, err error) { + networkNS := vmInputs.Network.Namespace + if networkNS == "" { + networkNS = vmInputs.Namespace + } + + dataMap := map[string]any{ + "network-name": vmInputs.Network.Name, + "network-namespace": networkNS, + "network-data-content": vmInputs.Network.CloudInitData, + "network-data-template": vmInputs.Network.CloudInitTemplate, + "user-name": vmInputs.User.Name, + "user-namespace": vmInputs.Namespace, + "user-data-content": vmInputs.User.CloudInitData, + "user-data-template": vmInputs.User.CloudInitTemplate, + } + + cloudInitUserData, cloudInitNetworkData, sshKey, err := prepareCloudInitData(c, k, dataMap, vmInputs) + if err != nil { + return nil, err + } + + vmTemplate = &VMv1.VirtualMachineInstanceTemplateSpec{ + ObjectMeta: k8smetav1.ObjectMeta{ + Annotations: vmiAnnotations(pvcName, vmInputs.SSHKey.Name), + Labels: vmiLabels, + }, + Spec: VMv1.VirtualMachineInstanceSpec{ + Hostname: vmInputs.Name, + Networks: []VMv1.Network{ + { + Name: "nic-1", + NetworkSource: VMv1.NetworkSource{ + Multus: &VMv1.MultusNetwork{ + NetworkName: vmInputs.Network.Name, + }, + }, + }, + }, + Volumes: []VMv1.Volume{ + { + Name: "disk-0", + VolumeSource: VMv1.VolumeSource{ + PersistentVolumeClaim: &VMv1.PersistentVolumeClaimVolumeSource{ + PersistentVolumeClaimVolumeSource: v1.PersistentVolumeClaimVolumeSource{ + ClaimName: pvcName, + }, + }, + }, + }, + { + Name: "cloudinitdisk", + VolumeSource: VMv1.VolumeSource{ + CloudInitNoCloud: &VMv1.CloudInitNoCloudSource{ + UserData: cloudInitUserData, + NetworkData: cloudInitNetworkData, + }, + }, + }, + }, + Domain: buildVMDomainSpec(vmInputs), + Affinity: &v1.Affinity{ + PodAntiAffinity: &v1.PodAntiAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []v1.WeightedPodAffinityTerm{ + { + Weight: int32(1), + PodAffinityTerm: v1.PodAffinityTerm{ + TopologyKey: "kubernetes.io/hostname", + LabelSelector: &k8smetav1.LabelSelector{ + MatchLabels: map[string]string{ + "harvesterhci.io/vmNamePrefix": vmInputs.Name, + }, + }, + }, + }, + }, + }, + }, + }, + } + _ = sshKey // sshKey was used in prepareCloudInitData for side effects + + return vmTemplate, nil +} + +// vmiAnnotations generates a map of strings to be injected as annotations from a PVC name and an SSK Keyname +func vmiAnnotations(pvcName string, sshKeyName string) map[string]string { + return map[string]string{ + "harvesterhci.io/diskNames": "[\"" + pvcName + "\"]", + "harvesterhci.io/sshNames": "[\"" + sshKeyName + "\"]", + } +} + +// checks if the userData contains an ssh_authorized_keys entry +func userDataContainsKey(userData string) bool { + var userDataMap map[string]interface{} + + if err := yaml.Unmarshal([]byte(userData), &userDataMap); err != nil { + return false + } + + if _, ok := userDataMap["ssh_authorized_keys"]; ok { + return true + } + + return false +} + +// BuildVMListMatchingWildcard creates an array of VM objects which names match the given wildcard pattern +func BuildVMListMatchingWildcard(c *harvclient.Clientset, namespace, vmNameWildcard string) ([]VMv1.VirtualMachine, error) { + vms, err := c.KubevirtV1().VirtualMachines(namespace).List(context.TODO(), k8smetav1.ListOptions{}) + if err != nil { + return nil, fmt.Errorf("No VMs found with name %s", vmNameWildcard) + } + + var matchingVMs []VMv1.VirtualMachine + + for _, vm := range vms.Items { + if wildcard.Match(vmNameWildcard, vm.Name) { + matchingVMs = append(matchingVMs, vm) + } + } + + logrus.Infof("number of matching VMs for pattern %s: %d", vmNameWildcard, len(matchingVMs)) + + return matchingVMs, nil +} + +// SetDefaultVMImage creates a default VM image based on Ubuntu if none has been provided at the command line. +func SetDefaultVMImage(c *harvclient.Clientset, vmInputs *VMInput) (result *v1beta1.VirtualMachineImage, err error) { + result = &v1beta1.VirtualMachineImage{} + + vmImages, err := c.HarvesterhciV1beta1().VirtualMachineImages(vmInputs.Image.Namespace).List(context.TODO(), k8smetav1.ListOptions{}) + if err != nil { + err = fmt.Errorf("error during setting default VM Image: %w", err) + return + } + + var vmImage *v1beta1.VirtualMachineImage + + if len(vmImages.Items) == 0 { + vmImage, err = CreateVMImage(c, vmInputs.Image.Namespace, "ubuntu-default-image", ubuntuDefaultImage) + if err != nil { + err = fmt.Errorf("impossible to create a default VM Image: %s", err) + return + } + } else { + vmImage = &vmImages.Items[0] + } + + imageID := vmImage.ObjectMeta.Name + vmInputs.Image.ID = imageID + imageName := vmImage.Spec.DisplayName + vmInputs.Image.Name = imageName + + if err != nil { + logrus.Warnf("error encountered during the storage of the imageID value: %s", imageID) + } + + result = vmImage + + return +} + +// CreateVMImage will create a VM Image on Harvester given an image name and an image URL +func CreateVMImage(c *harvclient.Clientset, namespace string, imageName string, url string) (*v1beta1.VirtualMachineImage, error) { + suffix, err := randGen.New(6) + if err != nil { + return nil, err + } + + vmImage, err := c.HarvesterhciV1beta1().VirtualMachineImages(namespace).Create( + context.TODO(), + &v1beta1.VirtualMachineImage{ + ObjectMeta: k8smetav1.ObjectMeta{ + Name: "ubuntu-default" + suffix, + }, + Spec: v1beta1.VirtualMachineImageSpec{ + DisplayName: imageName, + URL: url, + }, + }, + k8smetav1.CreateOptions{}) + if err != nil { + return &v1beta1.VirtualMachineImage{}, err + } + + return vmImage, nil +} + +// SetDefaultSSHKey assign a default SSH key to the VM if none was provided at the command line +func SetDefaultSSHKey(c *harvclient.Clientset, vmInputs *VMInput) (sshKey *v1beta1.KeyPair, err error) { + sshKey = &v1beta1.KeyPair{} + + sshKeys, err := c.HarvesterhciV1beta1().KeyPairs(vmInputs.Namespace).List(context.TODO(), k8smetav1.ListOptions{}) + if err != nil { + err = fmt.Errorf("error during listing KeyPairs: %s", err) + return + } + + if len(sshKeys.Items) == 0 { + err = fmt.Errorf("no ssh keys exists in harvester, please add a new ssh key") + return + } + + sshKey = &sshKeys.Items[0] + vmInputs.SSHKey.Name = sshKey.Name + vmInputs.SSHKey.Namespace = sshKey.Namespace + + return +} + +// MergeOptionsInUserData merges the default user data and the provided public key with the user data provided by the user +func MergeOptionsInUserData(userData string, defaultUserData string, sshKey *v1beta1.KeyPair) (string, error) { + var ( + err error + userDataMap map[string]interface{} + defaultUserDataMap map[string]interface{} + ) + + err = yaml.Unmarshal([]byte(userData), &userDataMap) + if err != nil { + return "", err + } + + err = yaml.Unmarshal([]byte(defaultUserData), &defaultUserDataMap) + if err != nil { + return "", err + } + + if userDataMap["ssh_authorized_keys"] != nil && sshKey != nil && sshKey.Spec.PublicKey != "" { + if sshKeyList, ok := userDataMap["ssh_authorized_keys"].([]interface{}); ok { + sshKeyList = append(sshKeyList, sshKey.Spec.PublicKey) + userDataMap["ssh_authorized_keys"] = sshKeyList + } + } + + if userDataMap["packages"] != nil { + if packagesList, ok := userDataMap["packages"].([]interface{}); ok { + if defaultPackages, ok := defaultUserDataMap["packages"].([]interface{}); ok { + packagesList = append(packagesList, defaultPackages...) + userDataMap["packages"] = packagesList + } + } + } else { + userDataMap["packages"] = defaultUserDataMap["packages"] + } + + if userDataMap["runcmd"] != nil { + if defaultRuncmd, ok := defaultUserDataMap["runcmd"].([]interface{}); ok { + if userRuncmd, ok := userDataMap["runcmd"].([]interface{}); ok { + defaultRuncmd = append(defaultRuncmd, userRuncmd...) + userDataMap["runcmd"] = defaultRuncmd + } + } + } else { + userDataMap["runcmd"] = defaultUserDataMap["runcmd"] + } + + mergedUserData, err := yaml.Marshal(userDataMap) + if err != nil { + return "", err + } + + finalUserData := fmt.Sprintf("#cloud-config\n%s", string(mergedUserData)) + + return finalUserData, nil +} diff --git a/internal/helm/helm.go b/internal/helm/helm.go index e79f2f9c3..399261764 100644 --- a/internal/helm/helm.go +++ b/internal/helm/helm.go @@ -35,22 +35,30 @@ func Install(kubecfg, chartLocation, releaseName, namespace string, vals map[str chartLocation, "--create-namespace", } + if vals != nil { valueString := "" + for k, v := range vals { jsonVal, err := json.Marshal(v) if err != nil { return err } + valueString += k + "=" + string(jsonVal) + "," } + args = append(args, "--set-json="+valueString) } + args = append(args, extraArgs...) cmd := vendored.Command("helm", args...) + var errStream strings.Builder + cmd.Stdout = os.Stdout + cmd.Stderr = &errStream if err := cmd.Run(); err != nil { return fmt.Errorf("%v", errStream.String()) diff --git a/internal/k3d/k3d.go b/internal/k3d/k3d.go index 0f754cb02..bbd87d480 100644 --- a/internal/k3d/k3d.go +++ b/internal/k3d/k3d.go @@ -28,8 +28,11 @@ func ImageImport(k3dClusterName string, image string) error { args := []string{"image", "import", "--cluster", k3dClusterName, image} cmd := vendored.Command("k3d", args...) + var errStream strings.Builder + cmd.Stdout = os.Stdout + cmd.Stderr = &errStream if err := cmd.Run(); err != nil { return fmt.Errorf("%v", errStream.String()) diff --git a/internal/kubectl/kubectl.go b/internal/kubectl/kubectl.go index 4fdb99249..3c638c231 100644 --- a/internal/kubectl/kubectl.go +++ b/internal/kubectl/kubectl.go @@ -75,10 +75,12 @@ var ( func collectFileEntries(root string, exts map[string]bool) ([]FileEntry, error) { // Get valid path to root and ensure it exists root = filepath.Clean(root) + info, err := os.Stat(root) if err != nil { return nil, fmt.Errorf("stat root %q: %w", root, err) } + if !info.IsDir() { return nil, fmt.Errorf("root %q is not a directory", root) } @@ -96,7 +98,9 @@ func collectFileEntries(root string, exts map[string]bool) ([]FileEntry, error) } visited := map[string]bool{} // tracks visited resolved real paths to avoid cycles + var out []FileEntry + stack := []stackEntry{{virtualRel: "", realPath: resolvedRoot}} for len(stack) > 0 { @@ -115,9 +119,11 @@ func collectFileEntries(root string, exts map[string]bool) ([]FileEntry, error) if rp, err := filepath.EvalSymlinks(absReal); err == nil { absRealResolved = rp } + if visited[absRealResolved] { continue } + visited[absRealResolved] = true entries, err := os.ReadDir(cur.realPath) @@ -139,11 +145,13 @@ func collectFileEntries(root string, exts map[string]bool) ([]FileEntry, error) log.Printf("warning: broken symlink or cannot resolve %q: %v", entryRealPath, err) continue } + stat, err := os.Stat(target) if err != nil { log.Printf("warning: cannot stat symlink target %q: %v", target, err) continue } + if stat.IsDir() { // push directory to stack to preserve virtualRel stack = append(stack, stackEntry{virtualRel: virtualRel, realPath: target}) @@ -204,6 +212,7 @@ func getCachedEntries(root string, exts map[string]bool) ([]FileEntry, error) { cacheOnce.Do(func() { cachedEntries, cacheErr = collectFileEntries(root, exts) }) + return cachedEntries, cacheErr } @@ -212,6 +221,7 @@ func Exec(kubepath string, output io.Writer, args ...string) error { cmd := vendored.Command("kubectl", fullArgs...) var errStream strings.Builder + cmd.Stderr = &errStream cmd.Stdin = os.Stdin @@ -222,6 +232,7 @@ func Exec(kubepath string, output io.Writer, args ...string) error { if err := cmd.Run(); err != nil { return fmt.Errorf("error while running kubectl with params %v: %v", fullArgs, errStream.String()) } + return nil } @@ -234,21 +245,26 @@ func WaitRancher(kubePath string) error { if err != nil { return err } + err = WaitForReadyCondition(kubePath, "deployment", "rancher-webhook", "cattle-system", "available", 3) if err != nil { return err } + err = WaitForReadyCondition(kubePath, "deployment", "fleet-controller", "cattle-fleet-system", "available", 5) + return err } func WaitForReadyCondition(kubePath, resource, name, namespace string, condition string, minutes int) error { var err error + args := []string{"wait", resource, name} if len(namespace) > 0 { args = append(args, "--namespace", namespace) } + args = append(args, "--for", fmt.Sprintf("condition=%s=true", condition), fmt.Sprintf("--timeout=%dm", minutes)) maxRetries := minutes * 30 @@ -271,13 +287,16 @@ func WaitForReadyCondition(kubePath, resource, name, namespace string, condition func GetRancherFQDNFromLoadBalancer(kubePath string) (string, error) { ingress := map[string]string{} + err := Get(kubePath, "services", "", "", ".items[0].status.loadBalancer.ingress[0]", &ingress) if err != nil { return "", err } + if ip, ok := ingress["ip"]; ok { return ip + ".sslip.io", nil } + if hostname, ok := ingress["hostname"]; ok { return hostname, nil } @@ -287,6 +306,7 @@ func GetRancherFQDNFromLoadBalancer(kubePath string) (string, error) { func Get(kubePath string, kind string, name string, namespace string, jsonpath string, out any) error { output := new(bytes.Buffer) + args := []string{ "get", kind, @@ -294,11 +314,13 @@ func Get(kubePath string, kind string, name string, namespace string, jsonpath s if name != "" { args = append(args, name) } + if namespace != "" { args = append(args, "--namespace", namespace) } else { args = append(args, "--all-namespaces") } + args = append(args, "-o", fmt.Sprintf("jsonpath={%s}", jsonpath)) if err := Exec(kubePath, output, args...); err != nil { @@ -314,6 +336,7 @@ func Get(kubePath string, kind string, name string, namespace string, jsonpath s func GetStatus(kubepath, kind, name, namespace string) (map[string]any, error) { out := map[string]any{} + err := Get(kubepath, kind, name, namespace, ".status", &out) if err != nil { return nil, err @@ -326,10 +349,12 @@ func K6run(kubeconfig, testPath string, envVars, tags map[string]string, printLo // gather file entries root := "./charts/k6-files/test-files" exts := map[string]bool{".js": true, ".mjs": true, ".sh": true, ".env": true} + entries, err := getCachedEntries(root, exts) if err != nil { log.Fatal(err) } + relTestPath := testPath // get rel path to test file for _, e := range entries { @@ -341,12 +366,15 @@ func K6run(kubeconfig, testPath string, envVars, tags map[string]string, printLo // print what we are about to do quotedArgs := []string{"run"} + for k, v := range envVars { if k == "BASE_URL" { v = localBaseURL } + quotedArgs = append(quotedArgs, "-e", shellescape.Quote(fmt.Sprintf("%s=%s", k, v))) } + quotedArgs = append(quotedArgs, shellescape.Quote(relTestPath)) log.Printf("Running equivalent of:\n./bin/k6 %s\n", strings.Join(quotedArgs, " ")) @@ -356,6 +384,7 @@ func K6run(kubeconfig, testPath string, envVars, tags map[string]string, printLo if err != nil { return err } + err = Exec(kubeconfig, nil, "--namespace="+K6Namespace, "create", "secret", "generic", K6KubeSecretName, "--from-file=config="+path) if err != nil { @@ -367,13 +396,16 @@ func K6run(kubeconfig, testPath string, envVars, tags map[string]string, printLo args := []string{"run"} // ensure we get the complete summary args = append(args, "--summary-mode=full") + for k, v := range envVars { // substitute kubeconfig file path with path to secret if k == "KUBECONFIG" { v = "/kube/config" } + args = append(args, "-e", fmt.Sprintf("%s=%s", k, v)) } + for k, v := range tags { args = append(args, "--tag", fmt.Sprintf("%s=%s", k, v)) } @@ -398,6 +430,7 @@ func K6run(kubeconfig, testPath string, envVars, tags map[string]string, printLo "subPath": e.Key, }) } + if _, ok := envVars["KUBECONFIG"]; ok { volumes = append(volumes, map[string]any{"name": K6KubeSecretName, "secret": map[string]string{"secretName": "kube"}}) volumeMounts = append(volumeMounts, map[string]string{"mountPath": "/kube", "name": K6KubeSecretName}) @@ -426,6 +459,7 @@ func K6run(kubeconfig, testPath string, envVars, tags map[string]string, printLo "volumes": volumes, }, } + overrideJSON, err := json.Marshal(override) if err != nil { return err diff --git a/internal/qase/client.go b/internal/qase/client.go index bf6ef8c63..73b3f45f6 100644 --- a/internal/qase/client.go +++ b/internal/qase/client.go @@ -57,27 +57,35 @@ func NewCustomUnifiedClient(cfg *config.Config) (*CustomUnifiedClient, error) { func SetupQaseClient() *CustomUnifiedClient { token := os.Getenv(config.QaseTestOpsAPITokenEnvVar) projectCode := os.Getenv(config.QaseTestOpsProjectEnvVar) + if token == "" { logrus.Fatalf("%s environment variable not set", config.QaseTestOpsAPITokenEnvVar) } + if projectCode == "" { logrus.Fatalf("%s environment variable not set", config.QaseTestOpsProjectEnvVar) } - var err error - var cfg *config.Config + var ( + err error + cfg *config.Config + ) cfgBuilder := config.NewConfigBuilder().LoadFromEnvironment() + cfg, err = cfgBuilder.Build() if err != nil { logrus.Fatalf("Failed to build Qase config from environment variables: %v", err) } + if cfg.Mode == "" { cfg.Mode = config.MODE_TESTOPS } + if cfg.Fallback == "" { cfg.Fallback = config.MODE_REPORT } + if cfg.Debug { logrus.SetLevel(logrus.DebugLevel) } @@ -103,11 +111,14 @@ func (c *CustomUnifiedClient) CreateTestRun(ctx context.Context, testRunName, pr resp, res, err := c.V1Client.GetAPIClient().RunsAPI.CreateRun(authCtx, projectCode).RunCreate(*runCreate).Execute() logResponseBody(res, "CreateTestRun") + if err != nil { return 0, fmt.Errorf("failed to create test run: %w", err) } + runID := *resp.Result.Id c.Config.TestOps.Run.ID = &runID + return runID, nil } @@ -121,6 +132,7 @@ func (c *CustomUnifiedClient) GetTestRun(ctx context.Context, projectCode string resp, res, err := c.V1Client.GetAPIClient().RunsAPI.GetRun(authCtx, projectCode, int32(runID)).Execute() logResponseBody(res, "GetTestRun") + if err != nil { return nil, fmt.Errorf("failed to get test run: %w", err) } @@ -135,6 +147,7 @@ func (c *CustomUnifiedClient) GetTestCase(ctx context.Context, projectCode strin }) resp, res, err := c.V1Client.GetAPIClient().CasesAPI.GetCase(authCtx, projectCode, int32(caseID)).Execute() logResponseBody(res, "GetTestCase") + return resp.Result, err } @@ -151,10 +164,12 @@ func (c *CustomUnifiedClient) CompleteTestRun(ctx context.Context, projectCode s logrus.Debugf("Completing test run ID: %d", runID) _, res, err := c.V1Client.GetAPIClient().RunsAPI.CompleteRun(authCtx, c.Config.TestOps.Project, int32(runID)).Execute() logResponseBody(res, "CompleteTestRun") + if err != nil { return fmt.Errorf("failed to complete test run: %w", err) } } + return nil } @@ -167,13 +182,16 @@ func (c *CustomUnifiedClient) UploadAttachments(ctx context.Context, files []*os projectCode := c.Config.TestOps.Project var hashes []string + for _, file := range files { logrus.Debugf("Uploading attachment: %s", file.Name()) + hash, err := c.V1Client.UploadAttachment(ctx, projectCode, []*os.File{file}) if err != nil { logrus.Warnf("Failed to upload attachment %s: %v", file.Name(), err) continue } + if hash != "" { hashes = append(hashes, hash) } @@ -184,6 +202,7 @@ func (c *CustomUnifiedClient) UploadAttachments(ctx context.Context, files []*os } logrus.Infof("Successfully uploaded %d out of %d attachments.", len(hashes), len(files)) + return hashes, nil } @@ -197,6 +216,7 @@ func (c *CustomUnifiedClient) GetTestCaseByTitle(ctx context.Context, projectCod limit := int32(100) offset := int32(0) + var matchingCase *api_v1_client.TestCase for { @@ -206,6 +226,7 @@ func (c *CustomUnifiedClient) GetTestCaseByTitle(ctx context.Context, projectCod Offset(offset). Execute() logResponseBody(res, "GetTestCaseByTitle") + if err != nil { return nil, fmt.Errorf("failed to get test cases: %w", err) } @@ -239,19 +260,23 @@ func (c *CustomUnifiedClient) CreateTestResultV1(ctx context.Context, projectCod }) _, res, err := c.V1Client.GetAPIClient().ResultsAPI.CreateResult(authCtx, projectCode, int32(runID)).ResultCreate(result).Execute() logResponseBody(res, "CreateTestResultV1") + if err != nil || !strings.Contains(strings.ToLower(res.Status), "ok") { return fmt.Errorf("failed to create v1 test result or did not receive 'OK; response: %w", err) } + return nil } // CreateTestResultV2 creates a test result using the V2 API. func (c *CustomUnifiedClient) CreateTestResultV2(ctx context.Context, projectCode string, runID int64, result api_v2_client.ResultCreate) error { - res, err := c.V2Client.GetAPIClient().ResultsAPI.CreateResultV2(ctx, projectCode, runID).ResultCreate(result).Execute() + _, res, err := c.V2Client.GetAPIClient().ResultsAPI.CreateResultV2(ctx, projectCode, runID).ResultCreate(result).Execute() logResponseBody(res, "CreateTestResultV2") + if err != nil { return fmt.Errorf("failed to create v2 test result: %w", err) } + return nil } diff --git a/internal/tofu/format/format.go b/internal/tofu/format/format.go index 61fae6b92..4f4447ee4 100644 --- a/internal/tofu/format/format.go +++ b/internal/tofu/format/format.go @@ -65,6 +65,7 @@ func primitiveToHclString(value interface{}, isNested bool) (string, error) { if isNested { return fmt.Sprintf("\"%v\"", v), nil } + return fmt.Sprintf("%v", v), nil case bool: return strconv.FormatBool(v), nil @@ -79,6 +80,7 @@ func primitiveToHclString(value interface{}, isNested bool) (string, error) { vInt64 = int64(vInt32) } } + return fmt.Sprintf("%d", vInt64), nil case float32, float64: // explicitly convert to float64 if needed @@ -87,6 +89,7 @@ func primitiveToHclString(value interface{}, isNested bool) (string, error) { vFloat64 = float64(v.(float32)) return strconv.FormatFloat(vFloat64, 'f', -1, 32), nil } + return strconv.FormatFloat(vFloat64, 'f', -1, 64), nil default: return fmt.Sprintf("%v", v), fmt.Errorf("no defined case for type of value: %T", v) @@ -100,17 +103,29 @@ func primitiveToHclString(value interface{}, isNested bool) (string, error) { // good enough for the type of variables we deal with in Dartboard. func ConvertValueToHCL(value any, isNested bool) string { // We use type assertions to manually convert into []interface{} and map[string]interface{} if and when needed - var v string - var err error + var ( + v string + err error + ) + if slice, isSlice := value.([]any); isSlice { v = sliceToHclString(slice) } else if m, isMap := value.(map[string]any); isMap { v = mapToHclString(m) + } else if sliceMap, isSliceMap := value.([]map[string]any); isSliceMap { + anySlice := make([]any, len(sliceMap)) + for i, m := range sliceMap { + anySlice[i] = m + } + + v = sliceToHclString(anySlice) } else { v, err = primitiveToHclString(value, isNested) } + if err != nil { log.Panicf("%v", err) } + return v } diff --git a/internal/tofu/tofu.go b/internal/tofu/tofu.go index f7fe22c67..82c8b5311 100644 --- a/internal/tofu/tofu.go +++ b/internal/tofu/tofu.go @@ -18,67 +18,112 @@ package tofu import ( "bytes" - "context" "encoding/json" + "errors" "fmt" "io" "log" "os" + "os/user" "path/filepath" "strconv" "strings" + "github.com/sirupsen/logrus" + "github.com/rancher/dartboard/internal/tofu/format" "github.com/rancher/dartboard/internal/vendored" ) type ClusterAddress struct { - HTTPPort uint `json:"http_port"` - HTTPSPort uint `json:"https_port"` - Name string `json:"name"` + Name string `json:"name" yaml:"name"` + HTTPPort uint `json:"http_port" yaml:"http_port"` + HTTPSPort uint `json:"https_port" yaml:"https_port"` } type ClusterAppAddresses struct { - Private ClusterAddress `json:"private"` - Public ClusterAddress `json:"public"` - Tunnel ClusterAddress `json:"tunnel"` + Private ClusterAddress `json:"private" yaml:"private"` + Public ClusterAddress `json:"public" yaml:"public"` + Tunnel ClusterAddress `json:"tunnel" yaml:"tunnel"` } type Addresses struct { - Public string `json:"public"` - Private string `json:"private"` - Tunnel string `json:"tunnel"` + Public string `json:"public" yaml:"public"` + Private string `json:"private" yaml:"private"` + Tunnel string `json:"tunnel" yaml:"tunnel"` } type Cluster struct { - AppAddresses ClusterAppAddresses `json:"app_addresses"` - Name string `json:"name"` - Context string `json:"context"` - IngressClassName string `json:"ingress_class_name"` - Kubeconfig string `json:"kubeconfig"` - NodeAccessCommands map[string]string `json:"node_access_commands"` - KubernetesAddresses Addresses `json:"kubernetes_addresses"` - ReserveNodeForMonitoring bool `json:"reserve_node_for_monitoring"` + NodeAccessCommands map[string]string `json:"node_access_commands" yaml:"node_access_commands"` + KubernetesAddresses Addresses `json:"kubernetes_addresses" yaml:"kubernetes_addresses"` + Name string `json:"name" yaml:"name"` + Context string `json:"context" yaml:"context"` + IngressClassName string `json:"ingress_class_name" yaml:"ingress_class_name"` + Kubeconfig string `json:"kubeconfig" yaml:"kubeconfig"` + AppAddresses ClusterAppAddresses `json:"app_addresses" yaml:"app_addresses"` + ReserveNodeForMonitoring bool `json:"reserve_node_for_monitoring" yaml:"reserve_node_for_monitoring"` +} + +type CustomCluster struct { + Name string `json:"name,omitempty" yaml:"name"` + NamePrefix string `yaml:"name_prefix" json:"name_prefix,omitempty"` + DistroVersion string `yaml:"distro_version" json:"distro_version,omitempty"` + Nodes []Node `yaml:"nodes" json:"nodes,omitempty"` + MachinePools []MachinePoolConfig `yaml:"machine_pools" json:"machine_pools,omitempty"` + ClusterCount int `yaml:"cluster_count" json:"cluster_count,omitempty"` + ServerCount int `yaml:"server_count" json:"server_count,omitempty"` +} + +type MachinePools struct { + // machinepools.Pools + MachinePoolConfig MachinePoolConfig `yaml:"machine_pool_config,omitempty" default:"[]"` +} + +type MachinePoolConfig struct { + ControlPlane bool `json:",omitempty" yaml:"controlplane,omitempty"` + Etcd bool `json:"etcd,omitempty" yaml:"etcd,omitempty"` + Worker bool `json:"worker,omitempty" yaml:"worker,omitempty"` + Quantity int32 `json:"quantity,omitempty" yaml:"quantity,omitempty"` +} + +type Node struct { + Name string `json:"name" yaml:"name"` + PublicIP string `json:"public_ip,omitempty" yaml:"public_ip,omitempty"` + PublicHostName string `json:"public_name,omitempty" yaml:"public_name,omitempty"` + PrivateIP string `json:"private_ip,omitempty" yaml:"private_ip,omitempty"` + PrivateHostName string `json:"private_name,omitempty" yaml:"private_name,omitempty"` + SSHUser string `json:"ssh_user" yaml:"ssh_user"` + SSHKeyPath string `json:"ssh_key_path" yaml:"ssh_key_path"` } type Clusters struct { - Value map[string]Cluster `json:"value"` + Value map[string]Cluster `json:"value,omitempty" yaml:"value,omitempty"` +} + +type CustomClusters struct { + Value []CustomCluster `json:"value,omitempty" yaml:"value,omitempty"` +} + +type Nodes struct { + Value map[string]Node `json:"value,omitempty" yaml:"value,omitempty"` } type Output struct { - Clusters Clusters `json:"clusters"` + Clusters Clusters `json:"clusters,omitzero" yaml:"clusters,omitzero"` + CustomClusters CustomClusters `json:"custom_clusters,omitzero" yaml:"custom_clusters,omitzero"` } type Tofu struct { dir string workspace string + variables []string threads int verbose bool - variables []string } -func New(ctx context.Context, variableMap map[string]interface{}, dir string, ws string, parallelism int, verbose bool) (*Tofu, error) { +func New(variableMap map[string]interface{}, dir string, ws string, parallelism int, verbose bool) (*Tofu, error) { var variables []string + for k, v := range variableMap { variable := fmt.Sprintf("%s=%s", k, format.ConvertValueToHCL(v, false)) variables = append(variables, variable) @@ -96,6 +141,7 @@ func New(ctx context.Context, variableMap map[string]interface{}, dir string, ws for _, variable := range t.variables { args = append(args, "-var", variable) } + if err := t.exec(nil, args...); err != nil { return nil, err } @@ -109,6 +155,7 @@ func (t *Tofu) exec(output io.Writer, args ...string) error { cmd := vendored.Command("tofu", fullArgs...) var errStream strings.Builder + cmd.Stderr = &errStream cmd.Stdin = os.Stdin @@ -123,37 +170,41 @@ func (t *Tofu) exec(output io.Writer, args ...string) error { if err := cmd.Run(); err != nil { return fmt.Errorf("error while running tofu: %v", errStream.String()) } + return nil } -func (t *Tofu) handleWorkspace(ctx context.Context) error { +func (t *Tofu) handleWorkspace() error { if !(len(t.workspace) > 0) { t.workspace = "default" } - wsExists, err := t.workspaceExists(ctx) + wsExists, err := t.workspaceExists() if err != nil { return err } if wsExists { log.Printf("Found existing tofu workspace: %s", t.workspace) - return t.selectWorkspace(ctx) + return t.selectWorkspace() } log.Printf("Creating new tofu workspace: %s", t.workspace) - if err = t.newWorkspace(ctx); err != nil { + + if err = t.newWorkspace(); err != nil { return err } - return t.selectWorkspace(ctx) + return t.selectWorkspace() } -func (t *Tofu) workspaceExists(ctx context.Context) (bool, error) { +func (t *Tofu) workspaceExists() (bool, error) { args := []string{"workspace", "list"} - var out bytes.Buffer - var err error + var ( + out bytes.Buffer + err error + ) if err = t.exec(&out, args...); err != nil { return false, fmt.Errorf("failed to list workspaces: %v", err) @@ -164,28 +215,61 @@ func (t *Tofu) workspaceExists(ctx context.Context) (bool, error) { return wsExists, err } -func (t *Tofu) selectWorkspace(ctx context.Context) error { +func (t *Tofu) selectWorkspace() error { args := []string{"workspace", "select", t.workspace} return t.exec(nil, args...) } -func (t *Tofu) newWorkspace(ctx context.Context) error { +func (t *Tofu) newWorkspace() error { args := []string{"workspace", "new", t.workspace} return t.exec(nil, args...) } -func (t *Tofu) Apply(ctx context.Context) error { - t.handleWorkspace(ctx) +func (t *Tofu) Apply(skipRefresh bool) error { + err := t.handleWorkspace() + if err != nil { + return err + } args := t.commonArgs("apply") + if skipRefresh { + args = append(args, "-refresh=false") + } + return t.exec(nil, args...) } -func (t *Tofu) Destroy(ctx context.Context) error { - t.handleWorkspace(ctx) +func (t *Tofu) Output(out io.Writer, jsonFormat bool) error { + err := t.handleWorkspace() + if err != nil { + return err + } + + var args []string + if jsonFormat { + args = []string{"output", "-json"} + } else { + args = []string{"output"} + } + + writer := out + if out == nil { + logrus.Debugf("\nLogging to stdout since no io.Writer was provided\n") + + writer = os.Stdout + } + + return t.exec(writer, args...) +} + +func (t *Tofu) Destroy() error { + err := t.handleWorkspace() + if err != nil { + return err + } args := t.commonArgs("destroy") @@ -199,27 +283,31 @@ func (t *Tofu) commonArgs(command string) []string { for _, variable := range t.variables { args = append(args, "-var", variable) } + return args } -func (t *Tofu) OutputClusters(ctx context.Context) (map[string]Cluster, error) { - t.handleWorkspace(ctx) +func (t *Tofu) ParseOutputs() (map[string]Cluster, []CustomCluster, error) { + err := t.handleWorkspace() + if err != nil { + return nil, nil, err + } buffer := new(bytes.Buffer) - if err := t.exec(buffer, "output", "-json"); err != nil { - return nil, err + if err := t.Output(buffer, true); err != nil { + return nil, nil, err } output := &Output{} if err := json.Unmarshal(buffer.Bytes(), output); err != nil { - return nil, fmt.Errorf("error: tofu OutputClusters: %w", err) + return nil, nil, fmt.Errorf("error: tofu ParseOutputs: %w", err) } - return output.Clusters.Value, nil + return output.Clusters.Value, output.CustomClusters.Value, nil } // PrintVersion prints the Tofu version information -func (t *Tofu) PrintVersion(ctx context.Context) error { +func (t *Tofu) PrintVersion() error { return t.exec(log.Writer(), "version") } @@ -228,3 +316,48 @@ func (t *Tofu) IsK3d() bool { _, f := filepath.Split(t.dir) return f == "k3d" } + +// ReadBytesFromPath reads in the file from the given path, returns the file in []byte format +func ReadBytesFromPath(filePath string) ([]byte, error) { + var ( + fileBytes []byte + path string + ) + + if strings.Contains(filePath, "~") { + usr, err := user.Current() + if err != nil { + return nil, errors.New("error retrieving current user") + } + + path = strings.Replace(filePath, "~", usr.HomeDir, 1) + } else { + path = filePath + } + + if _, err := os.Stat(path); err == nil { + fileBytes, err = os.ReadFile(path) + if err != nil { + return nil, fmt.Errorf("error reading file at %s: %w", path, err) + } + } else { + return nil, fmt.Errorf("error could not find file at %s: %w", path, err) + } + + return fileBytes, nil +} + +// GetNodesByPrefix takes a flat map of nodes and returns a map +// from prefix → slice of Nodes whose key begins with that prefix. +func GetNodesByPrefix(all map[string]Node, prefix string) []Node { + grouped := []Node{} + + for key := range all { + if strings.HasPrefix(key, prefix) { + fmt.Printf("Appending node: %v", all[key]) + grouped = append(grouped, all[key]) + } + } + + return grouped +} diff --git a/internal/vendored/embed.go b/internal/vendored/embed.go index 98a035e41..2617c2725 100644 --- a/internal/vendored/embed.go +++ b/internal/vendored/embed.go @@ -22,8 +22,7 @@ var sourceFS embed.FS // ExtractBinaries extracts embedded binaries at runtime func ExtractBinaries() error { - - err := os.Mkdir(DestinationDir, 0755) + err := os.Mkdir(DestinationDir, 0o755) if err != nil && !os.IsExist(err) { return err } @@ -40,10 +39,12 @@ func ExtractBinaries() error { // skip existing destFile := filepath.Join(".bin", strings.TrimPrefix(path, SourceDir+"/")) + _, err = os.Stat(destFile) if err == nil { return nil } + if !os.IsNotExist(err) { return fmt.Errorf("failed to check if file %v exists: %v", destFile, err) } @@ -54,7 +55,7 @@ func ExtractBinaries() error { return fmt.Errorf("failed to read an embedded file %v: %v", path, err) } - err = os.WriteFile(destFile, content, 0755) + err = os.WriteFile(destFile, content, 0o755) if err != nil { return fmt.Errorf("failed to write %v: %v", destFile, err) } diff --git a/internal/vendored/run.go b/internal/vendored/run.go index fb45534eb..a67b831d2 100644 --- a/internal/vendored/run.go +++ b/internal/vendored/run.go @@ -18,6 +18,7 @@ func Command(name string, args ...string) *exec.Cmd { for i, arg := range args { quotedArgs[i] = shellescape.Quote(arg) } + log.Printf("Running command: \n%s %s\n", vendoredName, strings.Join(quotedArgs, " ")) return exec.Command(vendoredName, args...) diff --git a/qasereporter-k6/main.go b/qasereporter-k6/main.go index ccb904d67..650b18386 100644 --- a/qasereporter-k6/main.go +++ b/qasereporter-k6/main.go @@ -100,6 +100,7 @@ func main() { granularReporting := flag.Bool("granular", false, "Enable granular reporting of all Metric and Point lines from k6 JSON output.") // The -runID flag allows overriding the test case ID. runIDOverride := flag.String("runID", "", "Qase test run ID to report results against.") + flag.Parse() if runIDStr == "" && runName == "" { @@ -202,6 +203,7 @@ func reportMetrics(params map[string]string) { resultBody := v1.NewResultCreate(status) resultBody.SetCaseId(testCaseID) resultBody.SetComment(comment) + if len(params) > 0 { resultBody.SetParam(params) } @@ -222,6 +224,7 @@ func getAndValidateTestCaseParameters(testCaseParameters []v1.TestCaseParameter) } logrus.Infof("Test case has %d parameter(s), validating against environment variables...", len(testCaseParameters)) + parametersMap := make(map[string]string) for _, parameter := range testCaseParameters { @@ -247,6 +250,7 @@ func getAndValidateTestCaseParameters(testCaseParameters []v1.TestCaseParameter) parametersMap[parameterTitle] = parameterValue } } + return parametersMap } diff --git a/qasereporter-k6/summary.go b/qasereporter-k6/summary.go index 1d3c78dec..dc7fb07cc 100644 --- a/qasereporter-k6/summary.go +++ b/qasereporter-k6/summary.go @@ -37,7 +37,7 @@ type K6SummaryMetric struct { Thresholds map[string]K6SummaryThreshold `json:"thresholds,omitempty"` } -// K6SummaryThreshold represents a threshold with its pass/fail status. +// K6SummaryThreshold represents a threshold with its pass/fail status. type K6SummaryThreshold struct { OK bool `json:"ok"` } @@ -87,6 +87,7 @@ func reportSummary(params map[string]string) { if len(attachments) > 0 { var files []*os.File + for _, filePath := range attachments { file, err := os.Open(filePath) if err != nil { @@ -113,6 +114,7 @@ func reportSummary(params map[string]string) { resultBody.SetCaseId(testCaseID) resultBody.SetComment(comment) resultBody.SetAttachments(attachmentHashes) + if len(params) > 0 { resultBody.SetParam(params) } diff --git a/tofu/main/aws/main.tf b/tofu/main/aws/main.tf index 929d3803e..9f4469300 100644 --- a/tofu/main/aws/main.tf +++ b/tofu/main/aws/main.tf @@ -4,16 +4,17 @@ provider "aws" { } module "network" { - source = "../../modules/aws/network" - project_name = var.project_name - region = var.region - availability_zone = var.availability_zone - existing_vpc_name = var.existing_vpc_name - bastion_host_ami = length(var.bastion_host_ami) > 0 ? var.bastion_host_ami : null - ssh_bastion_user = var.ssh_bastion_user - ssh_public_key_path = var.ssh_public_key_path - ssh_private_key_path = var.ssh_private_key_path - ssh_prefix_list = var.ssh_prefix_list + source = "../../modules/aws/network" + project_name = var.project_name + region = var.region + availability_zone = var.availability_zone + existing_vpc_name = var.existing_vpc_name + bastion_host_ami = length(var.bastion_host_ami) > 0 ? var.bastion_host_ami : null + bastion_host_instance_type = var.bastion_host_instance_type + ssh_bastion_user = var.ssh_bastion_user + ssh_public_key_path = var.ssh_public_key_path + ssh_private_key_path = var.ssh_private_key_path + ssh_prefix_list = var.ssh_prefix_list } module "test_environment" { diff --git a/tofu/main/aws/outputs.tf b/tofu/main/aws/outputs.tf index 5f7b73161..416e929ed 100644 --- a/tofu/main/aws/outputs.tf +++ b/tofu/main/aws/outputs.tf @@ -1,3 +1,7 @@ output "clusters" { value = module.test_environment.clusters } + +output "custom_clusters" { + value = module.test_environment.custom_clusters +} diff --git a/tofu/main/aws/variables.tf b/tofu/main/aws/variables.tf index 9b2073de3..6ede9f563 100644 --- a/tofu/main/aws/variables.tf +++ b/tofu/main/aws/variables.tf @@ -104,3 +104,9 @@ variable "bastion_host_ami" { default = "ami-0e55a8b472a265e3f" // openSUSE-Leap-15-5-v20230608-hvm-ssd-arm64-a516e959-df54-4035-bb1a-63599b7a6df9 } + +variable "bastion_host_instance_type" { + description = "EC2 instance type" + type = string + default = "t4g.large" +} diff --git a/tofu/main/azure/terraform.tf b/tofu/main/azure/terraform.tf index 7efcbff84..5eee831b6 100644 --- a/tofu/main/azure/terraform.tf +++ b/tofu/main/azure/terraform.tf @@ -1,4 +1,5 @@ terraform { + required_version = ">=1.8.2" required_providers { azurerm = { source = "hashicorp/azurerm" diff --git a/tofu/main/harvester/outputs.tf b/tofu/main/harvester/outputs.tf index 5f7b73161..416e929ed 100644 --- a/tofu/main/harvester/outputs.tf +++ b/tofu/main/harvester/outputs.tf @@ -1,3 +1,7 @@ output "clusters" { value = module.test_environment.clusters } + +output "custom_clusters" { + value = module.test_environment.custom_clusters +} diff --git a/tofu/main/harvester/terraform.tf b/tofu/main/harvester/terraform.tf index 555660063..42c44dad5 100644 --- a/tofu/main/harvester/terraform.tf +++ b/tofu/main/harvester/terraform.tf @@ -1,4 +1,5 @@ terraform { + required_version = ">=1.8.1" required_providers { harvester = { source = "harvester/harvester" diff --git a/tofu/main/harvester/variables.tf b/tofu/main/harvester/variables.tf index f3b8d59e2..3c719888c 100644 --- a/tofu/main/harvester/variables.tf +++ b/tofu/main/harvester/variables.tf @@ -121,7 +121,7 @@ variable "network" { vlan_id = 100 name = "vmnet-shared" namespace = "default" - interace_type = "bridge" + interface_type = "bridge" public = true wait_for_lease = true } diff --git a/tofu/main/k3d/terraform.tf b/tofu/main/k3d/terraform.tf index a2d5f9294..99ae38db4 100644 --- a/tofu/main/k3d/terraform.tf +++ b/tofu/main/k3d/terraform.tf @@ -1,4 +1,5 @@ terraform { + required_version = ">=1.8.1" required_providers { docker = { source = "kreuzwerker/docker" diff --git a/tofu/modules/generic/k3s/main.tf b/tofu/modules/generic/k3s/main.tf index 18df5dd72..2f6aa9eec 100644 --- a/tofu/modules/generic/k3s/main.tf +++ b/tofu/modules/generic/k3s/main.tf @@ -221,7 +221,7 @@ resource "local_file" "kubeconfig" { { cluster = { certificate-authority-data = base64encode(tls_self_signed_cert.server_ca_cert.cert_pem) - server = local.local_kubernetes_api_url + server = var.create_tunnels ? local.local_kubernetes_api_url : "https://${module.server_nodes[0].public_name}:6443" } name = var.name } diff --git a/tofu/modules/generic/node/outputs.tf b/tofu/modules/generic/node/outputs.tf index 237c9cfca..f0d77cd3f 100644 --- a/tofu/modules/generic/node/outputs.tf +++ b/tofu/modules/generic/node/outputs.tf @@ -1,25 +1,21 @@ -output "private_name" { - value = module.host.private_name +output "name" { + value = var.name } -output "private_ip" { - value = module.host.private_ip +output "public_ip" { + value = module.host.public_ip } output "public_name" { value = module.host.public_name } -output "public_ip" { - value = module.host.public_ip -} - -output "name" { - value = var.name +output "private_ip" { + value = module.host.private_ip } -output "ssh_script_filename" { - value = local_file.ssh_script.filename +output "private_name" { + value = module.host.private_name } output "ssh_user" { @@ -29,3 +25,7 @@ output "ssh_user" { output "ssh_key_path" { value = module.host.ssh_key_path } + +output "ssh_script_filename" { + value = local_file.ssh_script.filename +} diff --git a/tofu/modules/generic/rke2/install_rke2.sh b/tofu/modules/generic/rke2/install_rke2.sh index 8e76aadac..7b9d0dcbf 100644 --- a/tofu/modules/generic/rke2/install_rke2.sh +++ b/tofu/modules/generic/rke2/install_rke2.sh @@ -146,4 +146,3 @@ fi systemctl enable rke2-${type}.service systemctl restart rke2-${type}.service - diff --git a/tofu/modules/generic/rke2/main.tf b/tofu/modules/generic/rke2/main.tf index 007e31425..50edcab99 100644 --- a/tofu/modules/generic/rke2/main.tf +++ b/tofu/modules/generic/rke2/main.tf @@ -215,7 +215,7 @@ resource "local_file" "kubeconfig" { { cluster = { certificate-authority-data = base64encode(tls_self_signed_cert.server_ca_cert.cert_pem) - server = local.local_kubernetes_api_url + server = var.create_tunnels ? local.local_kubernetes_api_url : "https://${module.server_nodes[0].public_name}:6443" } name = var.name } diff --git a/tofu/modules/generic/test_environment/main.tf b/tofu/modules/generic/test_environment/main.tf index 77d2e69c0..da7c53b46 100644 --- a/tofu/modules/generic/test_environment/main.tf +++ b/tofu/modules/generic/test_environment/main.tf @@ -2,7 +2,18 @@ locals { downstream_clusters = flatten([ for i, template in var.downstream_cluster_templates : [ for j in range(template.cluster_count) : merge(template, { name = "downstream-${i}-${j}" }) - ] if template.cluster_count > 0]) + ] if template.cluster_count > 0 && !template.is_custom_cluster + ]) + custom_cluster_name_prefix = "downstream-custom" + nodes = flatten([ + for template_idx, template in var.downstream_cluster_templates : [ + for j in range(template.cluster_count * template.server_count) : merge(template, { + name = "${local.custom_cluster_name_prefix}-${template_idx}-${j}" + origin_index = template_idx + index = j + }) + ] if template.cluster_count > 0 && template.is_custom_cluster + ]) } module "upstream_postgres" { @@ -89,3 +100,16 @@ module "downstream_clusters" { network_config = var.network_config node_module_variables = local.downstream_clusters[count.index].node_module_variables } + +module "nodes" { + count = length(local.nodes) + source = "../node" + project_name = var.project_name + name = local.nodes[count.index].name + ssh_private_key_path = var.ssh_private_key_path + ssh_user = var.ssh_user + node_module = var.node_module + node_module_variables = local.nodes[count.index].node_module_variables + network_config = var.network_config + public = local.nodes[count.index].public_ip +} diff --git a/tofu/modules/generic/test_environment/outputs.tf b/tofu/modules/generic/test_environment/outputs.tf index a413b1d78..02462be0b 100644 --- a/tofu/modules/generic/test_environment/outputs.tf +++ b/tofu/modules/generic/test_environment/outputs.tf @@ -3,6 +3,25 @@ output "clusters" { "upstream" : module.upstream_cluster.config, "tester" : var.tester_cluster != null ? module.tester_cluster[0].config : null, }, - { for i, cluster in local.downstream_clusters : cluster.name => module.downstream_clusters[i].config } + { for i, cluster in local.downstream_clusters : cluster.name => module.downstream_clusters[i].config }, ) } + +output "custom_clusters" { + value = flatten([ + for template_idx, template in var.downstream_cluster_templates : [ + merge(template, { + nodes = [ + for i, node in local.nodes : merge(module.nodes[i], { + ssh_user = var.ssh_user + ssh_key_path = abspath(pathexpand(var.ssh_private_key_path)) + }) if node.origin_index == template_idx + ] + name = "${local.custom_cluster_name_prefix}-${template_idx}" + machine_pools = [ + for j, pool in template.machine_pools : pool.machine_pool_config + ] + }) + ] if template.cluster_count > 0 && template.is_custom_cluster + ]) +} diff --git a/tofu/modules/generic/test_environment/variables.tf b/tofu/modules/generic/test_environment/variables.tf index ec417f533..bcfae5334 100644 --- a/tofu/modules/generic/test_environment/variables.tf +++ b/tofu/modules/generic/test_environment/variables.tf @@ -55,15 +55,46 @@ variable "downstream_cluster_templates" { agent_count = number // Number of agent nodes in the downstream cluster distro_version = string // Version of the Kubernetes distro in the downstream cluster - public_ip = bool // Whether the downstream cluster should have a public IP assigned - reserve_node_for_monitoring = bool // Set a 'monitoring' label and taint on one node of the downstream cluster to reserve it for monitoring - enable_audit_log = bool // Enable audit log for the cluster + is_custom_cluster = optional(bool, false) // Whether the downstream cluster is a custom cluster (it should only have nodes created) + public_ip = optional(bool, false) // Whether the downstream cluster should have a public IP assigned + reserve_node_for_monitoring = optional(bool, false) // Set a 'monitoring' label and taint on one node of the downstream cluster to reserve it for monitoring + enable_audit_log = optional(bool, false) // Enable audit log for the cluster create_tunnels = optional(bool, false) // Whether ssh tunnels to the downstream cluster's first server node should be created. Default false max_pods = optional(number, 110) // Max pods per node node_cidr_mask_size = optional(number, 24) // Number of IP addresses for pods per node - node_module_variables = any // Node module-specific variables + machine_pools = optional(list(object({ + machine_pool_config = object({ + etcd = bool + controlplane = bool + worker = bool + quantity = number + node_module_variables = optional(any) + }) + }))) + node_module_variables = optional(any, {}) // Node module-specific variables })) + + validation { + condition = alltrue(flatten([for i, template in var.downstream_cluster_templates : template.is_custom_cluster ? length(template.machine_pools) > 0 : true])) + error_message = "Custom cluster templates must have at least one machine pool." + } + validation { + condition = alltrue(flatten([for i, template in var.downstream_cluster_templates : template.is_custom_cluster ? length(template.machine_pools) > 0 ? contains([1, 3, 5], sum([for j, pool in template.machine_pools : pool.machine_pool_config.etcd ? pool.machine_pool_config.quantity : 0])) : false : true])) + error_message = "The number of etcd nodes per Custom cluster template must be one of [1, 3, 5]." + } + validation { + condition = alltrue(flatten([for i, template in var.downstream_cluster_templates : template.is_custom_cluster ? length(template.machine_pools) > 0 ? sum([for j, pool in template.machine_pools : pool.machine_pool_config.controlplane ? pool.machine_pool_config.quantity : 0]) > 0 : false : true])) + error_message = "Custom cluster templates must have at least one controlplane node." + } + validation { + condition = alltrue(flatten([for i, template in var.downstream_cluster_templates : template.is_custom_cluster ? length(template.machine_pools) > 0 ? sum([for j, pool in template.machine_pools : pool.machine_pool_config.worker ? pool.machine_pool_config.quantity : 0]) > 0 : false : true])) + error_message = "Custom cluster templates must have at least one worker node." + } + validation { + condition = alltrue(flatten([for i, template in var.downstream_cluster_templates : template.is_custom_cluster ? sum([for j, pool in template.machine_pools : pool.machine_pool_config.quantity]) == template.server_count : true])) + error_message = "Custom cluster templates must have enough nodes for the given machine pool configuration." + } validation { condition = alltrue([for i, template in var.downstream_cluster_templates : template.cluster_count > 0 ? template.server_count > 0 ? true : false : true]) error_message = "Must have at least one server per cluster template when cluster_count > 0." @@ -76,8 +107,7 @@ variable "downstream_cluster_templates" { # https://github.com/opentofu/opentofu/issues/1896#issuecomment-2275763570 -> # https://github.com/opentofu/opentofu/issues/2155 variable "downstream_cluster_distro_module" { - description = "Name of the module to use for the downstream clusters" - type = string + description = "Name of the module to use for downstream clusters. Default assumes imported cluster" default = "generic/k3s" } diff --git a/tofu/modules/harvester/node/data.tf b/tofu/modules/harvester/node/data.tf index 95cac1f56..fb10606b1 100644 --- a/tofu/modules/harvester/node/data.tf +++ b/tofu/modules/harvester/node/data.tf @@ -1,5 +1,5 @@ data "harvester_image" "this" { - count = var.node_module_variables.image_name != null && var.node_module_variables.image_namespace != null ? 1 : 0 + count = var.node_module_variables.image_name != null && local.image_namespace != null && var.node_module_variables.image_id == null ? 1 : 0 display_name = var.node_module_variables.image_name namespace = local.image_namespace } diff --git a/tofu/modules/harvester/node/outputs.tf b/tofu/modules/harvester/node/outputs.tf index 2667fc3d2..45c03524b 100644 --- a/tofu/modules/harvester/node/outputs.tf +++ b/tofu/modules/harvester/node/outputs.tf @@ -14,10 +14,6 @@ output "private_ip" { value = var.network_config.public ? local.public_network_interfaces[0].ip_address : local.private_network_interfaces[0].ip_address } -output "public_address" { - value = var.network_config.public ? local.public_network_interfaces[0].ip_address : local.private_network_interfaces[0].ip_address -} - output "public_name" { value = "${var.network_config.public ? local.public_network_interfaces[0].ip_address : local.private_network_interfaces[0].ip_address}.sslip.io" }