From 21b3ecb87c3d6646311ef93da857b0b7791d801c Mon Sep 17 00:00:00 2001 From: VestigeJ Date: Tue, 30 Jul 2024 17:09:38 -0700 Subject: [PATCH 1/2] reverted missing declaration --- pkg/testcase/networkconnectivity.go | 48 +++++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) diff --git a/pkg/testcase/networkconnectivity.go b/pkg/testcase/networkconnectivity.go index 1824b655..0e0feb63 100644 --- a/pkg/testcase/networkconnectivity.go +++ b/pkg/testcase/networkconnectivity.go @@ -10,6 +10,7 @@ import ( "github.com/rancher/distros-test-framework/shared" . "github.com/onsi/gomega" + // . "github.com/onsi/gomega/gstruct" ) // TestInternodeConnectivityMixedOS validates communication between linux and windows nodes. @@ -109,3 +110,50 @@ func testCrossNodeService(services, ports, expected []string) error { return nil } + +func TestEndpointReadiness(cluster *factory.Cluster) { + //do more checks on the filesystem to ensure the certs are all created and in the correct location before this + commands := []string{ + "sudo curl -sk http://127.0.0.1:10248/healthz", //kubelet + "sudo curl -sk http://127.0.0.1:10249/healthz", //kube-proxy + "sudo curl -sk https://127.0.0.1:10257/healthz", //kube-controller + "sudo curl -sk https://127.0.0.1:10258/healthz", //cloud-controller + "sudo curl -sk https://127.0.0.1:10259/healthz", //kube-scheduler + "sudo curl -sk " + fmt.Sprintf("--cert /var/lib/rancher/%s/server/tls/client-ca.crt", cluster.Config.Product) + fmt.Sprintf(" --key /var/lib/rancher/%s/server/tls/client-ca.key", cluster.Config.Product) + " https://127.0.0.1:6443/healthz", + // {Command: "sudo curl -sk http://127.0.0.1:10256/healthz"}, //SearchString: "lastUpdated" or "nodeEligible: true" //check with devs for this versus second kube-proxy port + // "sudo curl -sk " + fmt.Sprintf("--cert /var/lib/rancher/%s/server/tls/etcd/server-client.crt", cluster.Config.Product) + fmt.Sprintf(" --key /var/lib/rancher/%s/server/tls/etcd/server-client.key", cluster.Config.Product) + " https://127.0.0.1:2379/livez?verbose", + } + var err error + for _, serverIP := range cluster.ServerIPs { + for _, endpoint := range commands { + fmt.Printf("Running command %s against server %s", commands, serverIP) + err = assert.CheckComponentCmdNode( + endpoint, + serverIP, + "ok") + } + } + Expect(err).NotTo(HaveOccurred(), err) +} + +func Testk8sAPIReady(cluster *factory.Cluster) { + for _, serverIP := range cluster.ServerIPs { + err := assert.CheckComponentCmdNode( + "kubectl get --raw='/readyz?verbose'", + serverIP, + "readyz check passed", + ) + Expect(err).NotTo(HaveOccurred(), err) + } +} + +func Testk8sAPILive(cluster *factory.Cluster) { + for _, serverIP := range cluster.ServerIPs { + err := assert.CheckComponentCmdNode( + "kubectl get --raw='/livez?verbose'", + serverIP, + "livez check passed", + ) + Expect(err).NotTo(HaveOccurred(), err) + } +} From 7030decbc187acff28644d2f0a09e9b2cbc8f428 Mon Sep 17 00:00:00 2001 From: VestigeJ Date: Tue, 18 Feb 2025 09:52:29 -0800 Subject: [PATCH 2/2] rebased on incoming changes again --- .gitignore | 2 + Makefile | 4 + docs/testing.md | 19 ++- .../conformance/conformance_suite_test.go | 99 +++++++++++++ entrypoint/conformance/sonobuoy_test.go | 44 ++++++ pkg/testcase/cluster.go | 40 ++--- pkg/testcase/conformance.go | 138 ++++++++++++++++++ pkg/testcase/networkconnectivity.go | 48 ------ scripts/install_sonobuoy.sh | 72 +++++++++ scripts/mixedos_sonobuoy.sh | 55 ------- shared/cluster.go | 29 +++- 11 files changed, 410 insertions(+), 140 deletions(-) create mode 100644 entrypoint/conformance/conformance_suite_test.go create mode 100644 entrypoint/conformance/sonobuoy_test.go create mode 100644 pkg/testcase/conformance.go create mode 100755 scripts/install_sonobuoy.sh delete mode 100755 scripts/mixedos_sonobuoy.sh diff --git a/.gitignore b/.gitignore index bcfb5448..116044cc 100644 --- a/.gitignore +++ b/.gitignore @@ -11,3 +11,5 @@ workloads/*/dynamic-ingressroute.yaml /config/*.env.* /config/*.tfvars tmp/ +entrypoint/conformance/my-sonobuoy-plugins +entrypoint/conformance/*.tar.gz diff --git a/Makefile b/Makefile index 2fd18273..33ee4ba5 100644 --- a/Makefile +++ b/Makefile @@ -71,6 +71,9 @@ test-upgrade-manual: test-upgrade-node-replacement: @go test -timeout=120m -v -tags=upgradereplacement -count=1 ./entrypoint/upgradecluster/... -installVersionOrCommit ${INSTALL_VERSION_OR_COMMIT} -channel ${CHANNEL} +test-run-sonobuoy: + @go test -timeout=170m -v -count=1 ./entrypoint/conformance/... $(if ${SONOBUOY_VERSION},-sonobuoyVersion ${SONOBUOY_VERSION}) --ginkgo.timeout=170m + test-create-mixedos: @go test -timeout=45m -v -count=1 ./entrypoint/mixedoscluster/... $(if ${SONOBUOY_VERSION},-sonobuoyVersion ${SONOBUOY_VERSION}) @@ -125,3 +128,4 @@ go-check: shell-check: @shellcheck modules/airgap/setup/*.sh @shellcheck modules/ipv6only/scripts/*.sh + @shellcheck scripts/*.sh \ No newline at end of file diff --git a/docs/testing.md b/docs/testing.md index 944db23f..7f928f84 100644 --- a/docs/testing.md +++ b/docs/testing.md @@ -284,4 +284,21 @@ S3_FOLDER=snapshots ### Not supported/implemented currently for cluster restore: - Hardened Cluster Setup - ExternalDB Setup -- Selinux Setup \ No newline at end of file +- Selinux Setup + +## Validating Conformance Tests with Sonobuoy + - Please note that the sonobuoy version has not been updated for a year and the functionality of sonobuoy is degrading with minor versions of k8s. + - Full conformance tests done for patch validations should be 3 servers 1 agent minimum. + - You can use the make file command `make test-run-sonobuoy` to run the conformance tests. + - Additionally you can use `go test -timeout=140m -v -count=1 ./entrypoint/conformance/... --ginkgo.timeout=140m` you must extend the ginkgo timeout in addition to the go test timeout + - Required vars in `*.tfvars` file minimum conformance configuration: +``` +no_of_server_nodes = 1 +no_of_worker_nodes = 1 +``` +- sonobuoy's output is becoming unreliable for status checks observe remaining count incorrect at 404. + sono status + PLUGIN STATUS RESULT COUNT PROGRESS + e2e complete passed 1 Passed: 0, Failed: 0, Remaining:404 + systemd-logs complete passed 2 + Sonobuoy has completed. Use `sonobuoy retrieve` to get results \ No newline at end of file diff --git a/entrypoint/conformance/conformance_suite_test.go b/entrypoint/conformance/conformance_suite_test.go new file mode 100644 index 00000000..fddd4f2f --- /dev/null +++ b/entrypoint/conformance/conformance_suite_test.go @@ -0,0 +1,99 @@ +package sonobuoyconformance + +import ( + "flag" + "os" + "strconv" + "strings" + "testing" + + "github.com/rancher/distros-test-framework/config" + "github.com/rancher/distros-test-framework/pkg/customflag" + "github.com/rancher/distros-test-framework/pkg/qase" + "github.com/rancher/distros-test-framework/shared" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var ( + qaseReport = os.Getenv("REPORT_TO_QASE") + kubeconfig string + cluster *shared.Cluster + cfg *config.Env + err error +) + +func TestMain(m *testing.M) { + flag.StringVar(&customflag.ServiceFlag.External.SonobuoyVersion, "sonobuoyVersion", "0.57.2", "Sonobuoy binary version") + flag.Var(&customflag.ServiceFlag.Destroy, "destroy", "Destroy cluster after test") + flag.Parse() + + verifyClusterNodes() + + cfg, err = config.AddEnv() + if err != nil { + shared.LogLevel("error", "error adding env vars: %w\n", err) + os.Exit(1) + } + + kubeconfig = os.Getenv("KUBE_CONFIG") + if kubeconfig == "" { + // gets a cluster from terraform. + cluster = shared.ClusterConfig(cfg) + } else { + // gets a cluster from kubeconfig. + cluster = shared.KubeConfigCluster(kubeconfig) + } + + os.Exit(m.Run()) + os.Exit(1) +} + +func TestConformance(t *testing.T) { + RegisterFailHandler(Fail) + + RunSpecs(t, "Run Conformance Suite") +} + +var _ = ReportAfterSuite("Conformance Suite", func(report Report) { + + if strings.ToLower(qaseReport) == "true" { + qaseClient, err := qase.AddQase() + Expect(err).ToNot(HaveOccurred(), "error adding qase") + + qaseClient.SpecReportTestResults(qaseClient.Ctx, &report, cfg.InstallVersion) + } else { + shared.LogLevel("info", "Qase reporting is not enabled") + } +}) + +var _ = AfterSuite(func() { + if customflag.ServiceFlag.Destroy { + status, err := shared.DestroyCluster(cfg) + Expect(err).NotTo(HaveOccurred()) + Expect(status).To(Equal("cluster destroyed")) + } +}) + +func verifyClusterNodes() { + // if re-running locally the env variables are not set after cleanup + shared.LogLevel("info", "verying cluster configuration matches minimum requirements for conformance tests") + serverNum, err := strconv.Atoi(os.Getenv("no_of_server_nodes")) + if err != nil { + shared.LogLevel("error", "error converting no_of_server_nodes to int: %w", err) + os.Exit(1) + } + + agentNum, _ := strconv.Atoi(os.Getenv("no_of_agent_nodes")) + if err != nil { + shared.LogLevel("error", "error converting no_of_agent_nodes to int: %w", err) + os.Exit(1) + } + + if serverNum < 1 && agentNum < 1 { + shared.LogLevel("error", "%s", "cluster must at least consist of 1 server and 1 agent") + os.Exit(1) + } + +} diff --git a/entrypoint/conformance/sonobuoy_test.go b/entrypoint/conformance/sonobuoy_test.go new file mode 100644 index 00000000..36ffe0a3 --- /dev/null +++ b/entrypoint/conformance/sonobuoy_test.go @@ -0,0 +1,44 @@ +package sonobuoyconformance + +import ( + "fmt" + + "github.com/rancher/distros-test-framework/pkg/assert" + "github.com/rancher/distros-test-framework/pkg/testcase" + + . "github.com/onsi/ginkgo/v2" +) + +var _ = Describe("Sonobuoy Conformance Tests...", func() { + + It("Starts Up with no issues", func() { + testcase.TestBuildCluster(cluster) + }) + + It("Validates Node", func() { + testcase.TestNodeStatus( + cluster, + assert.NodeAssertReadyStatus(), + nil, + ) + }) + + It("Validate Pods", func() { + testcase.TestPodStatus( + cluster, + assert.PodAssertRestart(), + assert.PodAssertReady()) + }) + + It("Validates the releases conformance with upstream requirements", func() { + testcase.ConformanceTest("certified-conformance") + }) +}) + +var _ = AfterEach(func() { + if CurrentSpecReport().Failed() { + fmt.Printf("\nFAILED! %s\n\n", CurrentSpecReport().FullText()) + } else { + fmt.Printf("\nPASSED! %s\n\n", CurrentSpecReport().FullText()) + } +}) diff --git a/pkg/testcase/cluster.go b/pkg/testcase/cluster.go index 06d58aeb..960a8ff3 100644 --- a/pkg/testcase/cluster.go +++ b/pkg/testcase/cluster.go @@ -4,11 +4,9 @@ import ( "fmt" "strings" - "github.com/rancher/distros-test-framework/pkg/customflag" "github.com/rancher/distros-test-framework/pkg/testcase/support" "github.com/rancher/distros-test-framework/shared" - . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) @@ -50,35 +48,17 @@ func TestBuildCluster(cluster *shared.Cluster) { } } -// TestSonobuoyMixedOS runs sonobuoy tests for mixed os cluster (linux + windows) node. -func TestSonobuoyMixedOS(deleteWorkload bool) { - sonobuoyVersion := customflag.ServiceFlag.External.SonobuoyVersion - err := shared.SonobuoyMixedOS("install", sonobuoyVersion) - Expect(err).NotTo(HaveOccurred()) +func checkAndPrintAgentNodeIPs(agentNum int, agentIPs []string, isWindows bool) { + info := "Agent Node IPs:" - cmd := "sonobuoy run --kubeconfig=" + shared.KubeConfigFile + - " --plugin my-sonobuoy-plugins/mixed-workload-e2e/mixed-workload-e2e.yaml" + - " --aggregator-node-selector kubernetes.io/os:linux --wait" - res, err := shared.RunCommandHost(cmd) - Expect(err).NotTo(HaveOccurred(), "failed output: "+res) - - cmd = "sonobuoy retrieve --kubeconfig=" + shared.KubeConfigFile - testResultTar, err := shared.RunCommandHost(cmd) - Expect(err).NotTo(HaveOccurred(), "failed cmd: "+cmd) - - cmd = "sonobuoy results " + testResultTar - res, err = shared.RunCommandHost(cmd) - Expect(err).NotTo(HaveOccurred(), "failed cmd: "+cmd) - Expect(res).Should(ContainSubstring("Plugin: mixed-workload-e2e\nStatus: passed\n")) + if isWindows { + info = "Windows " + info + } - if deleteWorkload { - cmd = "sonobuoy delete --all --wait --kubeconfig=" + shared.KubeConfigFile - _, err = shared.RunCommandHost(cmd) - Expect(err).NotTo(HaveOccurred(), "failed cmd: "+cmd) - err = shared.SonobuoyMixedOS("delete", sonobuoyVersion) - if err != nil { - GinkgoT().Errorf("error: %v", err) - return - } + if agentNum > 0 { + Expect(agentIPs).ShouldNot(BeEmpty()) + shared.LogLevel("info", info+" %v", agentIPs) + } else { + Expect(agentIPs).Should(BeEmpty()) } } diff --git a/pkg/testcase/conformance.go b/pkg/testcase/conformance.go new file mode 100644 index 00000000..51d89ab6 --- /dev/null +++ b/pkg/testcase/conformance.go @@ -0,0 +1,138 @@ +package testcase + +import ( + "os" + "strings" + + "github.com/rancher/distros-test-framework/shared" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +// TestSonobuoyMixedOS runs sonobuoy tests for mixed os cluster (linux + windows) node. +func TestSonobuoyMixedOS(deleteWorkload bool) { + installConformanceBinary() + + cmd := "sonobuoy run --kubeconfig=" + shared.KubeConfigFile + + " --plugin my-sonobuoy-plugins/mixed-workload-e2e/mixed-workload-e2e.yaml" + + " --aggregator-node-selector kubernetes.io/os:linux --wait" + res, err := shared.RunCommandHost(cmd) + Expect(err).NotTo(HaveOccurred(), "failed output: "+res) + + cmd = "sonobuoy retrieve --kubeconfig=" + shared.KubeConfigFile + testResultTar, err := shared.RunCommandHost(cmd) + Expect(err).NotTo(HaveOccurred(), "failed cmd: "+cmd) + + cmd = "sonobuoy results " + testResultTar + res, err = shared.RunCommandHost(cmd) + Expect(err).NotTo(HaveOccurred(), "failed cmd: "+cmd) + Expect(res).Should(ContainSubstring("Plugin: mixed-workload-e2e\nStatus: passed\n")) + + if deleteWorkload { + cmd = "sonobuoy delete --all --wait --kubeconfig=" + shared.KubeConfigFile + _, err = shared.RunCommandHost(cmd) + Expect(err).NotTo(HaveOccurred(), "failed cmd: "+cmd) + err = shared.InstallSonobuoy("delete") + if err != nil { + GinkgoT().Errorf("error: %v", err) + return + } + } +} + +func ConformanceTest(testName string) { + installConformanceBinary() + launchSonobuoyTests(testName) + checkStatus() + testResultTar := getResults() + shared.LogLevel("info", "%s", "testResultTar: "+testResultTar) + rerunFailedTests(testResultTar) + testResultTar = getResults() + shared.LogLevel("info", "%s", "testResultTar: "+testResultTar) + parseResults(testResultTar) + cleanupTests() +} + +func installConformanceBinary() { + shared.LogLevel("info", "installing sonobuoy binary") + err := shared.InstallSonobuoy("install") + Expect(err).NotTo(HaveOccurred()) +} + +func launchSonobuoyTests(testMode string) { + shared.LogLevel("info", "checking namespace existence") + cmds := "kubectl get namespace sonobuoy --kubeconfig=" + shared.KubeConfigFile + res, _ := shared.RunCommandHost(cmds) + + if strings.Contains(res, "Active") { + shared.LogLevel("info", "%s", "sonobuoy namespace is active, waiting for it to complete") + return + } + + if strings.Contains(res, "Error from server (NotFound): namespaces \"sonobuoy\" not found") { + cmd := "sonobuoy run --kubeconfig=" + shared.KubeConfigFile + + " --mode=" + testMode + " --kubernetes-version=" + shared.ExtractKubeImageVersion() + _, err := shared.RunCommandHost(cmd) + Expect(err).NotTo(HaveOccurred()) + } +} + +func checkStatus() { + shared.LogLevel("info", "checking status of running tests") + cmd := "sonobuoy status --kubeconfig=" + shared.KubeConfigFile + Eventually(func() string { + res, err := shared.RunCommandHost(cmd) + Expect(err).NotTo(HaveOccurred()) + return res + }, "170m", "10m").Should(ContainSubstring("Sonobuoy has completed"), "timed out waiting for sonobuoy") +} + +func getResults() string { + shared.LogLevel("info", "getting sonobuoy results") + cmd := "sonobuoy retrieve --kubeconfig=" + shared.KubeConfigFile + res, err := shared.RunCommandHost(cmd) + Expect(err).NotTo(HaveOccurred()) + + return res +} + +func rerunFailedTests(testResultTar string) { + ciliumExpectedFailures := ` + [sig-network] Services should serve endpoints on same port and different protocols + Services should be able to switch session affinity for service with type clusterIP + Services should have session affinity work for service with type clusterIP` + + if strings.Contains(os.Getenv("cni"), "cilium") { + shared.LogLevel("info", "Cilium has known issues with conformance tests, skipping re-run") + shared.LogLevel("info", "ciliumExpectedFailures: %s", ciliumExpectedFailures) + + return + } + + shared.LogLevel("info", "re-running tests that failed from previous run") + + cmd := "sonobuoy run --rerun-failed=" + testResultTar + " --kubeconfig=" + shared.KubeConfigFile + + "--kubernetes-version=" + shared.ExtractKubeImageVersion() + + res, err := shared.RunCommandHost(cmd) + Expect(err).To(HaveOccurred(), "failed cmd: "+cmd) + Expect(res).Should(ContainSubstring("no tests failed for plugin")) +} + +func parseResults(testResultTar string) { + shared.LogLevel("info", "parsing sonobuoy results") + cmd := "sonobuoy results " + testResultTar + res, err := shared.RunCommandHost(cmd) + Expect(err).NotTo(HaveOccurred(), "failed cmd: "+cmd) + Expect(res).Should(ContainSubstring("Status: passed")) + shared.LogLevel("info", "%s", "sonobuoy results: "+res) +} + +func cleanupTests() { + shared.LogLevel("info", "cleaning up cluster conformance tests and deleting sonobuoy namespace") + cmd := "sonobuoy delete --all --wait --kubeconfig=" + shared.KubeConfigFile + res, err := shared.RunCommandHost(cmd) + Expect(err).NotTo(HaveOccurred(), "failed cmd: "+cmd) + Expect(res).Should(ContainSubstring("deleted")) +} diff --git a/pkg/testcase/networkconnectivity.go b/pkg/testcase/networkconnectivity.go index 0e0feb63..1824b655 100644 --- a/pkg/testcase/networkconnectivity.go +++ b/pkg/testcase/networkconnectivity.go @@ -10,7 +10,6 @@ import ( "github.com/rancher/distros-test-framework/shared" . "github.com/onsi/gomega" - // . "github.com/onsi/gomega/gstruct" ) // TestInternodeConnectivityMixedOS validates communication between linux and windows nodes. @@ -110,50 +109,3 @@ func testCrossNodeService(services, ports, expected []string) error { return nil } - -func TestEndpointReadiness(cluster *factory.Cluster) { - //do more checks on the filesystem to ensure the certs are all created and in the correct location before this - commands := []string{ - "sudo curl -sk http://127.0.0.1:10248/healthz", //kubelet - "sudo curl -sk http://127.0.0.1:10249/healthz", //kube-proxy - "sudo curl -sk https://127.0.0.1:10257/healthz", //kube-controller - "sudo curl -sk https://127.0.0.1:10258/healthz", //cloud-controller - "sudo curl -sk https://127.0.0.1:10259/healthz", //kube-scheduler - "sudo curl -sk " + fmt.Sprintf("--cert /var/lib/rancher/%s/server/tls/client-ca.crt", cluster.Config.Product) + fmt.Sprintf(" --key /var/lib/rancher/%s/server/tls/client-ca.key", cluster.Config.Product) + " https://127.0.0.1:6443/healthz", - // {Command: "sudo curl -sk http://127.0.0.1:10256/healthz"}, //SearchString: "lastUpdated" or "nodeEligible: true" //check with devs for this versus second kube-proxy port - // "sudo curl -sk " + fmt.Sprintf("--cert /var/lib/rancher/%s/server/tls/etcd/server-client.crt", cluster.Config.Product) + fmt.Sprintf(" --key /var/lib/rancher/%s/server/tls/etcd/server-client.key", cluster.Config.Product) + " https://127.0.0.1:2379/livez?verbose", - } - var err error - for _, serverIP := range cluster.ServerIPs { - for _, endpoint := range commands { - fmt.Printf("Running command %s against server %s", commands, serverIP) - err = assert.CheckComponentCmdNode( - endpoint, - serverIP, - "ok") - } - } - Expect(err).NotTo(HaveOccurred(), err) -} - -func Testk8sAPIReady(cluster *factory.Cluster) { - for _, serverIP := range cluster.ServerIPs { - err := assert.CheckComponentCmdNode( - "kubectl get --raw='/readyz?verbose'", - serverIP, - "readyz check passed", - ) - Expect(err).NotTo(HaveOccurred(), err) - } -} - -func Testk8sAPILive(cluster *factory.Cluster) { - for _, serverIP := range cluster.ServerIPs { - err := assert.CheckComponentCmdNode( - "kubectl get --raw='/livez?verbose'", - serverIP, - "livez check passed", - ) - Expect(err).NotTo(HaveOccurred(), err) - } -} diff --git a/scripts/install_sonobuoy.sh b/scripts/install_sonobuoy.sh new file mode 100755 index 00000000..b5e0a49d --- /dev/null +++ b/scripts/install_sonobuoy.sh @@ -0,0 +1,72 @@ +#!/bin/bash + +# Usage: sh install_sonobuoy.sh runs with default values +# Usage: sh install_sonobouy.sh install 0.56.17 arm64 +# Usage: sh install_sonobouy.sh delete + +action=${1:-install} +version=${2:-0.57.2} +arch=${3:-amd64} +mixed_plugins_url="git clone https://github.com/phillipsj/my-sonobuoy-plugins.git" +sonobuoy_url="https://github.com/vmware-tanzu/sonobuoy/releases/download/v${version}/sonobuoy_${version}_linux_${arch}.tar.gz" +max_retries=5 +retry_delay=11 +# adopt golang error handling in bash check variables are passed in appropriately - if not return appropriate error message + +download_retry(){ + i=1 + + until $1 || [ $i -gt $max_retries ]; do + echo "Retry $i failed. Waiting $retry_delay seconds before retrying..." + sleep $retry_delay + ((i++)) + done + + if [ $i -gt $max_retries ]; then + echo "Download failed after $max_retries attempts." + exit 1 + fi +} + +installation(){ + echo "Installing sonobuoy version ${version}" + if [ ! -d "my-sonobuoy-plugins" ]; + then + echo "Cloning repo: https://github.com/phillipsj/my-sonobuoy-plugins.git" + download_retry "${mixed_plugins_url}" + fi + wait + echo "Downloading sonobouy installer..." + if [[ $(command -v wget) ]]; then + download_retry "wget -q ${sonobuoy_url} -O sonobuoy.tar.gz" + wait + elif [[ $(command -v curl) ]]; then + download_retry "curl -s ${sonobuoy_url} --output sonobuoy.tar.gz" + wait + else + echo "Unable to use wget or curl to download sonobuoy installer, consider a networking error or an under configured OS if this error persists" + fi + wait + tar -xvf sonobuoy.tar.gz + wait + mv sonobuoy /usr/local/bin/sonobuoy + chmod +x /usr/local/bin/sonobuoy +} + +deletion(){ + #has_bin sonobuoy + echo "Deleting sonobuoy installer" + rm -rf my-sonobuoy-plugins + rm -rf sonobuoy_* + rm -rf /usr/local/bin/sonobuoy +} + +if [ "$action" == "install" ]; +then + installation +elif [ "$action" == "delete" ]; +then + deletion +else + echo "Invalid argument, please pass required arg [install or delete]" +fi diff --git a/scripts/mixedos_sonobuoy.sh b/scripts/mixedos_sonobuoy.sh deleted file mode 100755 index 9e9f8987..00000000 --- a/scripts/mixedos_sonobuoy.sh +++ /dev/null @@ -1,55 +0,0 @@ -#!/bin/bash - -# Usage: sh mixedos_sonobouy.sh install 0.56.17 amd64 -# Usage: sh mixedos_sonobouy.sh delete - -action=$1 -version=$2 -arch=$3 - -if [ -z "$version" ] || [[ -n "$version" && "$version" != *.* ]]; -then - version="0.56.17" -fi - -if [ -z "$arch" ] || [[ "$arch" != "arm" ]]; -then - arch="amd64" -fi - -installation(){ - echo "Installing sonobuoy version ${version} for mixedos validation" - if [ ! -d "my-sonobuoy-plugins" ]; - then - echo "Cloning repo: https://github.com/phillipsj/my-sonobuoy-plugins.git" - git clone https://github.com/phillipsj/my-sonobuoy-plugins.git - fi - wait - if [ ! -f "sonobuoy_${version}_linux_${arch}.tar.gz" ]; - then - echo "Downloading sonobouy installer" - wget -q https://github.com/vmware-tanzu/sonobuoy/releases/download/v${version}/sonobuoy_${version}_linux_${arch}.tar.gz - fi - wait - tar -xvf sonobuoy_${version}_linux_${arch}.tar.gz - chmod +x sonobuoy && mv sonobuoy /usr/local/bin/sonobuoy -} - -deletion(){ - echo "Deleting sonobuoy installer" - rm -rf my-sonobuoy-plugins - rm -rf sonobuoy_* - rm -rf /usr/local/bin/sonobuoy -} - -if [ "$action" == "install" ]; -then - installation -elif [ "$action" == "delete" ]; -then - deletion -else - echo "Invalid argument, please pass required arg [install or delete]" -fi - - diff --git a/shared/cluster.go b/shared/cluster.go index 66f16227..c5461b05 100644 --- a/shared/cluster.go +++ b/shared/cluster.go @@ -57,7 +57,6 @@ func handleWorkload(action, resourceDir, workload string) error { func applyWorkload(workload, filename string) error { LogLevel("info", "Applying %s", workload) - cmd := "kubectl apply -f " + filename + " --kubeconfig=" + KubeConfigFile out, err := RunCommandHost(cmd) fmt.Println(out) @@ -65,7 +64,6 @@ func applyWorkload(workload, filename string) error { if strings.Contains(out, "Invalid value") { return fmt.Errorf("failed to apply workload %s: %s", workload, out) } - return ReturnLogError("failed to run kubectl apply: %w", err) } @@ -252,21 +250,21 @@ func FetchIngressIP(namespace string) (ingressIPs []string, err error) { return ingressIPs, nil } -// SonobuoyMixedOS Executes scripts/mixedos_sonobuoy.sh script. +// InstallSonobuoy Executes scripts/install_sonobuoy.sh script. // action required install or cleanup sonobuoy plugin for mixed OS cluster. // version optional sonobouy version to be installed. -func SonobuoyMixedOS(action, version string) error { +func InstallSonobuoy(action string) error { if action != "install" && action != "delete" { return ReturnLogError("invalid action: %s. Must be 'install' or 'delete'", action) } - scriptsDir := BasePath() + "/scripts/mixedos_sonobuoy.sh" + scriptsDir := BasePath() + "/scripts/install_sonobuoy.sh" err := os.Chmod(scriptsDir, 0o755) if err != nil { return ReturnLogError("failed to change script permissions: %w", err) } - cmd := exec.Command("/bin/sh", scriptsDir, action, version) + cmd := exec.Command("/bin/sh", scriptsDir, action) output, err := cmd.CombinedOutput() if err != nil { return ReturnLogError("failed to execute %s action sonobuoy: %w\nOutput: %s", action, err, output) @@ -699,3 +697,22 @@ func AddProductCfg() *config.Env { return cfg } + +func ExtractKubeImageVersion() string { + _, serverVersion, err := Product() + if err != nil { + LogLevel("error", "error retrieving version of product: %s", err) + os.Exit(1) + } + + version := strings.Split(serverVersion, "+")[0] + version = strings.TrimSpace(version) + + if version == "" { + LogLevel("error", "%s failed to resolve to server version string: %s", serverVersion, err) + os.Exit(1) + } + LogLevel("info", "serverVersionReturnValue : %s", version) + + return version +}