From 0c3c3a07cb82cc0859a23449f118a552df6be76b Mon Sep 17 00:00:00 2001 From: Fanny Jiang Date: Fri, 27 Dec 2024 14:21:40 -0500 Subject: [PATCH] add new kind provisioners (aws and local) and update generic k8s e2e tests --- .gitlab-ci.yml | 4 +- Makefile | 2 +- config/new-e2e/kustomization.yaml | 7 + test/e2e/Pulumi.yaml | 5 + test/e2e/common/common.go | 85 ++++ .../autodiscovery-annotation.yaml | 35 ++ .../datadog-agent-ccr-enabled.yaml | 19 + .../new_manifests/datadog-agent-logs.yaml | 20 + .../new_manifests/datadog-agent-minimum.yaml | 10 + test/e2e/provisioners/common.go | 126 ++++++ test/e2e/provisioners/kind.go | 416 ++++++++++++++++++ test/e2e/tests/k8sSuite/k8sSuite_test.go | 286 ++++++++++++ test/e2e/tests/k8sSuite/kind_aws_test.go | 40 ++ test/e2e/tests/k8sSuite/kind_local_test.go | 47 ++ test/e2e/tests/utils/utils.go | 95 ++++ 15 files changed, 1194 insertions(+), 3 deletions(-) create mode 100644 config/new-e2e/kustomization.yaml create mode 100644 test/e2e/Pulumi.yaml create mode 100644 test/e2e/common/common.go create mode 100644 test/e2e/manifests/new_manifests/autodiscovery-annotation.yaml create mode 100644 test/e2e/manifests/new_manifests/datadog-agent-ccr-enabled.yaml create mode 100644 test/e2e/manifests/new_manifests/datadog-agent-logs.yaml create mode 100644 test/e2e/manifests/new_manifests/datadog-agent-minimum.yaml create mode 100644 test/e2e/provisioners/common.go create mode 100644 test/e2e/provisioners/kind.go create mode 100644 test/e2e/tests/k8sSuite/k8sSuite_test.go create mode 100644 test/e2e/tests/k8sSuite/kind_aws_test.go create mode 100644 test/e2e/tests/k8sSuite/kind_local_test.go create mode 100644 test/e2e/tests/utils/utils.go diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 4b4ade463..b8df3373e 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -1,4 +1,4 @@ -image: registry.ddbuild.io/images/mirror/golang:1.22.7 +image: registry.ddbuild.io/images/mirror/golang:1.23.0 variables: PROJECTNAME: "datadog-operator" PROJECTNAME_CHECK: "datadog-operator-check" @@ -13,7 +13,7 @@ variables: RH_PARTNER_REGISTRY_USER: "redhat-isv-containers+5e7c8ebc1c86a3163d1a69be-robot" RH_PARTNER_REGISTRY_KEY_SSM_KEY: redhat_registry_key RH_PARTNER_API_KEY_SSM_KEY: redhat_api_key - TEST_INFRA_DEFINITIONS_BUILDIMAGES: 3c7d2dc2d3dd + TEST_INFRA_DEFINITIONS_BUILDIMAGES: 36860d3c29b4 PUSH_IMAGES_TO_STAGING: description: "Set PUSH_IMAGE_TO_STAGING to 'true' if you want to push the operator to internal staging registry." diff --git a/Makefile b/Makefile index d36506393..a35b93b80 100644 --- a/Makefile +++ b/Makefile @@ -210,7 +210,7 @@ integration-tests-v2: $(ENVTEST) ## Run tests with reconciler V2 .PHONY: e2e-tests e2e-tests: manifests $(KUSTOMIZE) ## Run E2E tests and destroy environment stacks after tests complete. To run locally, complete pre-reqs (see docs/how-to-contribute.md) and prepend command with `aws-vault exec sso-agent-sandbox-account-admin --`. E.g. `aws-vault exec sso-agent-sandbox-account-admin -- make e2e-tests`. - KUBEBUILDER_ASSETS="$(ROOT)/bin/$(PLATFORM)/" go test -C test/e2e --tags=e2e github.com/DataDog/datadog-operator/e2e -v -timeout 1h -coverprofile cover_e2e.out + KUBEBUILDER_ASSETS="$(ROOT)/bin/$(PLATFORM)/" go test -C test/e2e github.com/DataDog/datadog-operator/test/e2e -run TestAWSKindSuite -count=1 -v -timeout 1h -coverprofile cover_e2e.out .PHONY: e2e-tests-keep-stacks e2e-tests-keep-stacks: manifests $(KUSTOMIZE) ## Run E2E tests and keep environment stacks running. To run locally, complete pre-reqs (see docs/how-to-contribute.md) and prepend command with `aws-vault exec sso-agent-sandbox-account-admin --`. E.g. `aws-vault exec sso-agent-sandbox-account-admin -- make e2e-tests-keep-stacks`. diff --git a/config/new-e2e/kustomization.yaml b/config/new-e2e/kustomization.yaml new file mode 100644 index 000000000..31fa79706 --- /dev/null +++ b/config/new-e2e/kustomization.yaml @@ -0,0 +1,7 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namePrefix: datadog-operator-e2e- +namespace: e2e-operator +resources: +- ../crd +- ../rbac diff --git a/test/e2e/Pulumi.yaml b/test/e2e/Pulumi.yaml new file mode 100644 index 000000000..55c9b8118 --- /dev/null +++ b/test/e2e/Pulumi.yaml @@ -0,0 +1,5 @@ +name: e2elocal +runtime: go +description: Generic scenario (check scenario variable) +config: + pulumi:disable-default-providers: ["*"] \ No newline at end of file diff --git a/test/e2e/common/common.go b/test/e2e/common/common.go new file mode 100644 index 000000000..ca2e8da88 --- /dev/null +++ b/test/e2e/common/common.go @@ -0,0 +1,85 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package common + +import ( + "encoding/json" + "os" + "path/filepath" + "regexp" + "runtime" +) + +var ( + NamespaceName = "e2e-operator" + K8sVersion = GetEnv("K8S_VERSION", "1.26") + ImgPullPassword = GetEnv("IMAGE_PULL_PASSWORD", "") + OperatorImageName = GetEnv("IMG", "") + + DdaMinimalPath = filepath.Join(ManifestsPath, "datadog-agent-minimum.yaml") + ManifestsPath = filepath.Join(ProjectRootPath, "test/e2e/manifests/new_manifests") + + timeout int64 = 60 + ProjectRootPath = projectRoot() +) + +const ( + NodeAgentSelector = "agent.datadoghq.com/component=agent" + ClusterAgentSelector = "agent.datadoghq.com/component=cluster-agent" + ClusterCheckRunnerSelector = "agent.datadoghq.com/component=cluster-checks-runner" +) + +// GetAbsPath Return absolute path for given path +func GetAbsPath(path string) (string, error) { + absPath, err := filepath.Abs(path) + if err != nil { + return "", err + } + _, err = os.Stat(absPath) + if err != nil { + return "", err + } + if os.IsNotExist(err) { + return "", err + } + + return absPath, nil +} + +func GetEnv(key, fallback string) string { + if value, ok := os.LookupEnv(key); ok { + return value + } + return fallback +} + +func ParseCollectorJson(collectorOutput string) map[string]interface{} { + var jsonString string + var jsonObject map[string]interface{} + + re := regexp.MustCompile(`(\{.*\})`) + match := re.FindStringSubmatch(collectorOutput) + if len(match) > 0 { + jsonString = match[0] + } else { + return map[string]interface{}{} + } + + // Parse collector JSON + err := json.Unmarshal([]byte(jsonString), &jsonObject) + if err != nil { + return map[string]interface{}{} + } + return jsonObject +} + +func projectRoot() string { + _, b, _, ok := runtime.Caller(0) + if ok { + return filepath.Join(filepath.Dir(b), "../../..") + } + return "" +} diff --git a/test/e2e/manifests/new_manifests/autodiscovery-annotation.yaml b/test/e2e/manifests/new_manifests/autodiscovery-annotation.yaml new file mode 100644 index 000000000..54ad98f45 --- /dev/null +++ b/test/e2e/manifests/new_manifests/autodiscovery-annotation.yaml @@ -0,0 +1,35 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx-deployment + labels: + app: nginx + namespace: e2e-operator +spec: + replicas: 1 + selector: + matchLabels: + app: nginx + e2e: autodiscovery + template: + metadata: + annotations: + ad.datadoghq.com/nginx.check_names: '["http_check"]' + ad.datadoghq.com/nginx.init_configs: '[{}]' + ad.datadoghq.com/nginx.instances: | + [ + { + "name": "http_custom_identifier", + "url": "http://www.google.com" + } + ] + ad.datadoghq.com/tolerate-unready: "true" + labels: + app: nginx + e2e: autodiscovery + spec: + containers: + - name: nginx + image: nginx:1.14.2 + ports: + - containerPort: 80 diff --git a/test/e2e/manifests/new_manifests/datadog-agent-ccr-enabled.yaml b/test/e2e/manifests/new_manifests/datadog-agent-ccr-enabled.yaml new file mode 100644 index 000000000..eedf8b6fd --- /dev/null +++ b/test/e2e/manifests/new_manifests/datadog-agent-ccr-enabled.yaml @@ -0,0 +1,19 @@ +apiVersion: datadoghq.com/v2alpha1 +kind: DatadogAgent +metadata: + name: datadog-ccr-enabled + namespace: e2e-operator + labels: + agent.datadoghq.com/e2e-test: datadog-agent-ccr-enabled +spec: + global: + kubelet: + tlsVerify: false + features: + clusterChecks: + enabled: true + useClusterChecksRunners: true + liveContainerCollection: + enabled: true + logCollection: + enabled: true diff --git a/test/e2e/manifests/new_manifests/datadog-agent-logs.yaml b/test/e2e/manifests/new_manifests/datadog-agent-logs.yaml new file mode 100644 index 000000000..2e8ffc771 --- /dev/null +++ b/test/e2e/manifests/new_manifests/datadog-agent-logs.yaml @@ -0,0 +1,20 @@ +apiVersion: datadoghq.com/v2alpha1 +kind: DatadogAgent +metadata: + name: datadog-agent-logs + namespace: e2e-operator + labels: + agent.datadoghq.com/e2e-test: datadog-agent-logs +spec: + global: + kubelet: + tlsVerify: false + features: + clusterChecks: + enabled: true + useClusterChecksRunners: false + logCollection: + enabled: true + containerCollectAll: true + liveContainerCollection: + enabled: true diff --git a/test/e2e/manifests/new_manifests/datadog-agent-minimum.yaml b/test/e2e/manifests/new_manifests/datadog-agent-minimum.yaml new file mode 100644 index 000000000..cb73cd537 --- /dev/null +++ b/test/e2e/manifests/new_manifests/datadog-agent-minimum.yaml @@ -0,0 +1,10 @@ +apiVersion: datadoghq.com/v2alpha1 +kind: DatadogAgent +metadata: + namespace: e2e-operator + labels: + agent.datadoghq.com/e2e-test: datadog-agent-minimum +spec: + global: + kubelet: + tlsVerify: false diff --git a/test/e2e/provisioners/common.go b/test/e2e/provisioners/common.go new file mode 100644 index 000000000..77728d02d --- /dev/null +++ b/test/e2e/provisioners/common.go @@ -0,0 +1,126 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package provisioners + +import ( + "fmt" + "github.com/DataDog/datadog-operator/pkg/plugin/common" + e2ecommon "github.com/DataDog/datadog-operator/test/e2e/common" + "os" + "path/filepath" + "sigs.k8s.io/kustomize/api/types" + "sigs.k8s.io/kustomize/kyaml/resid" + "sigs.k8s.io/yaml" +) + +var ( + NewMgrKustomizeDirPath = filepath.Join(e2ecommon.ProjectRootPath, "config", "new-e2e") +) + +const ( + DefaultMgrImageName = "gcr.io/datadoghq/operator" + DefaultMgrImgTag = "latest" + DefaultMgrFileName = "e2e-manager.yaml" + UserData = `#!/bin/bash +echo "User Data" +echo "Installing kubectl" +snap install kubectl --classic + +echo "Verifying kubectl" +kubectl version --client + +echo "Installing kubens" +curl -sLo kubens https://github.com/ahmetb/kubectx/releases/download/v0.9.5/kubens +chmod +x kubens +mv kubens /usr/local/bin/ + +echo ' + +alias k="kubectl" +alias kg="kubectl get" +alias kgp="kubectl get pod" +alias krm="kubectl delete" +alias krmp="kubectl delete pod" +alias kd="kubectl describe" +alias kdp="kubectl describe pod" +alias ke="kubectl edit" +alias kl="kubectl logs" +alias kx="kubectl exec" +' >> /home/ubuntu/.bashrc +` +) + +func loadKustomization(path string) (*types.Kustomization, error) { + data, err := os.ReadFile(path) + if err != nil { + return nil, err + } + + var kustomization types.Kustomization + if err := yaml.Unmarshal(data, &kustomization); err != nil { + return nil, err + } + + return &kustomization, nil +} + +func saveKustomization(path string, kustomization *types.Kustomization) error { + data, err := yaml.Marshal(kustomization) + if err != nil { + return err + } + + if err := os.WriteFile(path, data, 0644); err != nil { + return err + } + + return nil +} + +// updateKustomization Updates kustomization.yaml file in given kustomize directory with extra resources and image name and tag if `IMG` environment variable is set. +func UpdateKustomization(kustomizeDirPath string, kustomizeResourcePaths []string) error { + var imgName, imgTag string + + kustomizationFilePath := fmt.Sprintf("%s/kustomization.yaml", kustomizeDirPath) + k, err := loadKustomization(kustomizationFilePath) + if err != nil { + return err + } + + // Update resources with target e2e-manager resource yaml + if kustomizeResourcePaths != nil { + // We empty slice to avoid accumulating patches from previous tests + k.Patches = k.Patches[:0] + for _, res := range kustomizeResourcePaths { + k.Patches = append(k.Patches, types.Patch{ + Path: res, + Target: &types.Selector{ + ResId: resid.NewResIdKindOnly("Deployment", "manager"), + }, + }) + } + } + + // Update image + if os.Getenv("IMG") != "" { + imgName, imgTag = common.SplitImageString(os.Getenv("IMG")) + } else { + imgName = DefaultMgrImageName + imgTag = DefaultMgrImgTag + } + for i, img := range k.Images { + if img.Name == "controller" { + k.Images[i].NewName = imgName + k.Images[i].NewTag = imgTag + } + } + + if err := saveKustomization(kustomizationFilePath, k); err != nil { + return err + } + + return nil +} diff --git a/test/e2e/provisioners/kind.go b/test/e2e/provisioners/kind.go new file mode 100644 index 000000000..d404478e8 --- /dev/null +++ b/test/e2e/provisioners/kind.go @@ -0,0 +1,416 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package provisioners + +import ( + "fmt" + "github.com/DataDog/test-infra-definitions/scenarios/aws/ec2" + "os" + "strings" + + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments" + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/provisioners" + awskubernetes "github.com/DataDog/datadog-agent/test/new-e2e/pkg/provisioners/aws/kubernetes" + + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/runner" + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/optional" + "github.com/DataDog/datadog-operator/test/e2e/common" + "github.com/DataDog/test-infra-definitions/common/config" + "github.com/DataDog/test-infra-definitions/components/datadog/agent" + "github.com/DataDog/test-infra-definitions/components/datadog/agentwithoperatorparams" + fakeintakeComp "github.com/DataDog/test-infra-definitions/components/datadog/fakeintake" + "github.com/DataDog/test-infra-definitions/components/datadog/operator" + "github.com/DataDog/test-infra-definitions/components/datadog/operatorparams" + kubeComp "github.com/DataDog/test-infra-definitions/components/kubernetes" + "github.com/DataDog/test-infra-definitions/resources/local" + "github.com/DataDog/test-infra-definitions/scenarios/aws/fakeintake" + "github.com/pulumi/pulumi-kubernetes/sdk/v4/go/kubernetes" + corev1 "github.com/pulumi/pulumi-kubernetes/sdk/v4/go/kubernetes/core/v1" + "github.com/pulumi/pulumi-kubernetes/sdk/v4/go/kubernetes/kustomize" + metav1 "github.com/pulumi/pulumi-kubernetes/sdk/v4/go/kubernetes/meta/v1" + "github.com/pulumi/pulumi-kubernetes/sdk/v4/go/kubernetes/yaml" + "github.com/pulumi/pulumi/sdk/v3/go/auto" + "github.com/pulumi/pulumi/sdk/v3/go/pulumi" + "path/filepath" +) + +const ( + provisionerBaseID = "aws-kind" + defaultProvisionerName = "kind" +) + +// KubernetesProvisionerParams contains all the parameters needed to create a Kubernetes environment +type KubernetesProvisionerParams struct { + name string + testName string + operatorOptions []operatorparams.Option + ddaOptions []agentwithoperatorparams.Option + k8sVersion string + kustomizeResources []string + + fakeintakeOptions []fakeintake.Option + extraConfigParams runner.ConfigMap + yamlWorkloads []YAMLWorkload + workloadAppFuncs []func(e config.Env, kubeProvider *kubernetes.Provider) (*kubeComp.Workload, error) + local bool +} + +func newKubernetesProvisionerParams() *KubernetesProvisionerParams { + return &KubernetesProvisionerParams{ + name: defaultProvisionerName, + testName: "", + ddaOptions: []agentwithoperatorparams.Option{}, + operatorOptions: []operatorparams.Option{}, + k8sVersion: common.K8sVersion, + kustomizeResources: nil, + fakeintakeOptions: []fakeintake.Option{}, + extraConfigParams: runner.ConfigMap{}, + yamlWorkloads: []YAMLWorkload{}, + workloadAppFuncs: []func(e config.Env, kubeProvider *kubernetes.Provider) (*kubeComp.Workload, error){}, + local: false, + } +} + +// newAWSK8sProvisionerOpts Translates the generic KubernetesProvisionerParams into a list of awskubernetes.ProvisionerOption for the AWS Kind provisioner +func newAWSK8sProvisionerOpts(params *KubernetesProvisionerParams) []awskubernetes.ProvisionerOption { + provisionerName := provisionerBaseID + params.name + + extraConfig := params.extraConfigParams + extraConfig.Merge(runner.ConfigMap{ + "ddinfra:kubernetesVersion": auto.ConfigValue{Value: params.k8sVersion}, + "ddagent:imagePullRegistry": auto.ConfigValue{Value: "669783387624.dkr.ecr.us-east-1.amazonaws.com"}, + "ddagent:imagePullUsername": auto.ConfigValue{Value: "AWS"}, + "ddagent:imagePullPassword": auto.ConfigValue{Value: common.ImgPullPassword}, + }) + + newOpts := []awskubernetes.ProvisionerOption{ + awskubernetes.WithName(provisionerName), + awskubernetes.WithOperator(), + awskubernetes.WithOperatorDDAOptions(params.ddaOptions...), + awskubernetes.WithOperatorOptions(params.operatorOptions...), + awskubernetes.WithExtraConfigParams(extraConfig), + awskubernetes.WithWorkloadApp(KustomizeWorkloadAppFunc(params.testName, params.kustomizeResources)), + awskubernetes.WithFakeIntakeOptions(params.fakeintakeOptions...), + awskubernetes.WithEC2VMOptions([]ec2.VMOption{ec2.WithUserData(UserData)}...), + } + + for _, yamlWorkload := range params.yamlWorkloads { + newOpts = append(newOpts, awskubernetes.WithWorkloadApp(YAMLWorkloadAppFunc(yamlWorkload))) + } + + return newOpts +} + +// KubernetesProvisionerOption is a function that modifies the KubernetesProvisionerParams +type KubernetesProvisionerOption func(params *KubernetesProvisionerParams) error + +// WithName sets the name of the provisioner +func WithName(name string) KubernetesProvisionerOption { + return func(params *KubernetesProvisionerParams) error { + params.name = name + return nil + } +} + +// WithTestName sets the name of the test +func WithTestName(name string) KubernetesProvisionerOption { + return func(params *KubernetesProvisionerParams) error { + params.testName = name + return nil + } +} + +// WithK8sVersion sets the kubernetes version +func WithK8sVersion(k8sVersion string) KubernetesProvisionerOption { + return func(params *KubernetesProvisionerParams) error { + params.k8sVersion = k8sVersion + return nil + } +} + +// WithOperatorOptions adds options to the DatadogAgent resource +func WithOperatorOptions(opts ...operatorparams.Option) KubernetesProvisionerOption { + return func(params *KubernetesProvisionerParams) error { + params.operatorOptions = opts + return nil + } +} + +// WithoutOperator removes the Datadog Operator resource +func WithoutOperator() KubernetesProvisionerOption { + return func(params *KubernetesProvisionerParams) error { + params.operatorOptions = nil + return nil + } +} + +// WithDDAOptions adds options to the DatadogAgent resource +func WithDDAOptions(opts ...agentwithoperatorparams.Option) KubernetesProvisionerOption { + return func(params *KubernetesProvisionerParams) error { + params.ddaOptions = opts + return nil + } +} + +// WithoutDDA removes the DatadogAgent resource +func WithoutDDA() KubernetesProvisionerOption { + return func(params *KubernetesProvisionerParams) error { + params.ddaOptions = nil + return nil + } +} + +// WithExtraConfigParams adds extra config parameters to the environment +func WithExtraConfigParams(configMap runner.ConfigMap) KubernetesProvisionerOption { + return func(params *KubernetesProvisionerParams) error { + params.extraConfigParams = configMap + return nil + } +} + +// WithKustomizeResources adds extra kustomize resources +func WithKustomizeResources(k []string) KubernetesProvisionerOption { + return func(params *KubernetesProvisionerParams) error { + params.kustomizeResources = k + return nil + } +} + +// WithoutFakeIntake removes the fake intake +func WithoutFakeIntake() KubernetesProvisionerOption { + return func(params *KubernetesProvisionerParams) error { + params.fakeintakeOptions = nil + return nil + } +} + +// WithLocal uses the localKindRunFunc to create a local kind environment +func WithLocal(local bool) KubernetesProvisionerOption { + return func(params *KubernetesProvisionerParams) error { + params.local = local + return nil + } +} + +// YAMLWorkload defines the parameters for a Kubernetes resource's YAML file +type YAMLWorkload struct { + Name string + Path string +} + +// WithYAMLWorkload adds a workload app to the environment for given YAML file path +func WithYAMLWorkload(yamlWorkload YAMLWorkload) KubernetesProvisionerOption { + return func(params *KubernetesProvisionerParams) error { + params.yamlWorkloads = append(params.yamlWorkloads, yamlWorkload) + return nil + } +} + +// WithWorkloadApp adds a workload app to the environment +func WithWorkloadApp(appFunc func(e config.Env, kubeProvider *kubernetes.Provider) (*kubeComp.Workload, error)) KubernetesProvisionerOption { + return func(params *KubernetesProvisionerParams) error { + params.workloadAppFuncs = append(params.workloadAppFuncs, appFunc) + return nil + } +} + +// KubernetesProvisioner generic Kubernetes provisioner wrapper that creates a new provisioner +// Inspired by https://github.com/DataDog/datadog-agent/blob/main/test/new-e2e/pkg/environments/local/kubernetes/kind.go +func KubernetesProvisioner(opts ...KubernetesProvisionerOption) provisioners.TypedProvisioner[environments.Kubernetes] { + // We ALWAYS need to make a deep copy of `params`, as the provisioner can be called multiple times. + // and it's easy to forget about it, leading to hard to debug issues. + var awsK8sOpts []awskubernetes.ProvisionerOption + var provisioner provisioners.TypedProvisioner[environments.Kubernetes] + + params := newKubernetesProvisionerParams() + _ = optional.ApplyOptions(params, opts) + inCI := os.Getenv("GITLAB_CI") + + if !params.local || strings.ToLower(inCI) == "true" { + awsK8sOpts = newAWSK8sProvisionerOpts(params) + provisioner = awskubernetes.KindProvisioner(awsK8sOpts...) + return provisioner + } + + provisionerName := "local-" + params.name + + provisioner = provisioners.NewTypedPulumiProvisioner(provisionerName, func(ctx *pulumi.Context, env *environments.Kubernetes) error { + // We ALWAYS need to make a deep copy of `params`, as the provisioner can be called multiple times. + // and it's easy to forget about it, leading to hard to debug issues. + params := newKubernetesProvisionerParams() + _ = optional.ApplyOptions(params, opts) + + return localKindRunFunc(ctx, env, params) + + }, params.extraConfigParams) + + return provisioner +} + +// localKindRunFunc is the Pulumi run function that runs the local Kind provisioner +func localKindRunFunc(ctx *pulumi.Context, env *environments.Kubernetes, params *KubernetesProvisionerParams) error { + localEnv, err := local.NewEnvironment(ctx) + if err != nil { + return err + } + + kindCluster, err := kubeComp.NewLocalKindCluster(&localEnv, localEnv.CommonNamer().ResourceName("local-kind"), params.k8sVersion) + if err != nil { + return err + } + + if err = kindCluster.Export(ctx, &env.KubernetesCluster.ClusterOutput); err != nil { + return err + } + + // Build Kubernetes provider + kindKubeProvider, err := kubernetes.NewProvider(ctx, localEnv.CommonNamer().ResourceName("k8s-provider"), &kubernetes.ProviderArgs{ + Kubeconfig: kindCluster.KubeConfig, + EnableServerSideApply: pulumi.BoolPtr(true), + }) + if err != nil { + return err + } + + if params.fakeintakeOptions != nil { + fakeintakeOpts := []fakeintake.Option{fakeintake.WithLoadBalancer()} + params.fakeintakeOptions = append(fakeintakeOpts, params.fakeintakeOptions...) + + fakeIntake, err := fakeintakeComp.NewLocalDockerFakeintake(&localEnv, "fakeintake") + if err != nil { + return err + } + if err = fakeIntake.Export(ctx, &env.FakeIntake.FakeintakeOutput); err != nil { + return err + } + + if params.ddaOptions != nil { + params.ddaOptions = append(params.ddaOptions, agentwithoperatorparams.WithFakeIntake(fakeIntake)) + } + } else { + env.FakeIntake = nil + } + + ns, err := corev1.NewNamespace(ctx, localEnv.CommonNamer().ResourceName("k8s-namespace"), &corev1.NamespaceArgs{Metadata: &metav1.ObjectMetaArgs{ + Name: pulumi.String("e2e-operator"), + }}, pulumi.Provider(kindKubeProvider)) + + if err != nil { + return err + } + + // Install kustomizations + kustomizeAppFunc := KustomizeWorkloadAppFunc(params.testName, params.kustomizeResources) + + e2eKustomize, err := kustomizeAppFunc(&localEnv, kindKubeProvider) + if err != nil { + return err + } + + // Create Operator component + var operatorComp *operator.Operator + if params.operatorOptions != nil { + operatorOpts := []pulumi.ResourceOption{ + pulumi.DependsOn([]pulumi.Resource{e2eKustomize, ns}), + } + params.operatorOptions = append(params.operatorOptions, operatorparams.WithPulumiResourceOptions(operatorOpts...)) + + operatorComp, err = operator.NewOperator(&localEnv, localEnv.CommonNamer().ResourceName("operator"), kindKubeProvider, params.operatorOptions...) + if err != nil { + return err + } + } + + // Setup DDA options + if params.ddaOptions != nil && params.operatorOptions != nil { + ddaResourceOpts := []pulumi.ResourceOption{ + pulumi.DependsOn([]pulumi.Resource{e2eKustomize, operatorComp}), + } + params.ddaOptions = append( + params.ddaOptions, + agentwithoperatorparams.WithPulumiResourceOptions(ddaResourceOpts...)) + + ddaComp, err := agent.NewDDAWithOperator(&localEnv, params.name, kindKubeProvider, params.ddaOptions...) + if err != nil { + return err + } + + if err = ddaComp.Export(ctx, &env.Agent.KubernetesAgentOutput); err != nil { + return err + } + } else { + env.Agent = nil + } + + for _, workload := range params.yamlWorkloads { + _, err = yaml.NewConfigFile(ctx, workload.Name, &yaml.ConfigFileArgs{ + File: workload.Path, + }, pulumi.Provider(kindKubeProvider)) + if err != nil { + return err + } + } + + for _, appFunc := range params.workloadAppFuncs { + _, err := appFunc(&localEnv, kindKubeProvider) + if err != nil { + return err + } + } + + return nil +} + +// KustomizeWorkloadAppFunc Installs the operator e2e kustomize directory and any extra kustomize resources +func KustomizeWorkloadAppFunc(name string, extraKustomizeResources []string) func(e config.Env, kubeProvider *kubernetes.Provider) (*kubeComp.Workload, error) { + return func(e config.Env, kubeProvider *kubernetes.Provider) (*kubeComp.Workload, error) { + k8sComponent := &kubeComp.Workload{} + if err := e.Ctx().RegisterComponentResource("dd:apps", fmt.Sprintf("kustomize-%s", name), k8sComponent, pulumi.DeleteBeforeReplace(true)); err != nil { + return nil, err + } + + // Install kustomizations + kustomizeDirPath, err := filepath.Abs(NewMgrKustomizeDirPath) + if err != nil { + return nil, err + } + + err = UpdateKustomization(kustomizeDirPath, extraKustomizeResources) + if err != nil { + return nil, err + } + kustomizeOpts := []pulumi.ResourceOption{ + pulumi.Provider(kubeProvider), + pulumi.Parent(k8sComponent), + } + + _, err = kustomize.NewDirectory(e.Ctx(), "e2e-manager", + kustomize.DirectoryArgs{ + Directory: pulumi.String(kustomizeDirPath), + }, kustomizeOpts...) + if err != nil { + return nil, err + } + return k8sComponent, nil + } +} + +// YAMLWorkloadAppFunc Applies a Kubernetes resource yaml file +func YAMLWorkloadAppFunc(yamlWorkload YAMLWorkload) func(e config.Env, kubeProvider *kubernetes.Provider) (*kubeComp.Workload, error) { + return func(e config.Env, kubeProvider *kubernetes.Provider) (*kubeComp.Workload, error) { + k8sComponent := &kubeComp.Workload{} + if err := e.Ctx().RegisterComponentResource("dd:apps", "k8s-apply", k8sComponent); err != nil { + return nil, err + } + _, err := yaml.NewConfigFile(e.Ctx(), yamlWorkload.Name, &yaml.ConfigFileArgs{ + File: yamlWorkload.Path, + }, pulumi.Provider(kubeProvider)) + if err != nil { + return nil, err + } + return k8sComponent, nil + } +} diff --git a/test/e2e/tests/k8sSuite/k8sSuite_test.go b/test/e2e/tests/k8sSuite/k8sSuite_test.go new file mode 100644 index 000000000..3d9d1fcb5 --- /dev/null +++ b/test/e2e/tests/k8sSuite/k8sSuite_test.go @@ -0,0 +1,286 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package k8ssuite + +import ( + "context" + "fmt" + "github.com/DataDog/datadog-agent/test/fakeintake/aggregator" + "github.com/DataDog/datadog-agent/test/fakeintake/client" + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments" + "github.com/DataDog/datadog-operator/test/e2e/common" + "github.com/DataDog/datadog-operator/test/e2e/provisioners" + "github.com/DataDog/datadog-operator/test/e2e/tests/utils" + + "path/filepath" + "regexp" + "strings" + "testing" + "time" + + "github.com/DataDog/test-infra-definitions/components/datadog/agentwithoperatorparams" + "github.com/DataDog/test-infra-definitions/components/datadog/operatorparams" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e" + "github.com/stretchr/testify/assert" +) + +var ( + matchTags = []*regexp.Regexp{regexp.MustCompile("kube_container_name:.*")} + matchOpts = []client.MatchOpt[*aggregator.MetricSeries]{client.WithMatchingTags[*aggregator.MetricSeries](matchTags)} +) + +type k8sSuite struct { + e2e.BaseSuite[environments.Kubernetes] + local bool +} + +func (s *k8sSuite) TestGenericK8s() { + defaultOperatorOpts := []operatorparams.Option{ + operatorparams.WithNamespace(common.NamespaceName), + operatorparams.WithOperatorFullImagePath(common.OperatorImageName), + operatorparams.WithHelmValues("installCRDs: false"), + } + + defaultProvisionerOpts := []provisioners.KubernetesProvisionerOption{ + provisioners.WithK8sVersion(common.K8sVersion), + provisioners.WithOperatorOptions(defaultOperatorOpts...), + provisioners.WithLocal(s.local), + } + + defaultDDAOpts := []agentwithoperatorparams.Option{ + agentwithoperatorparams.WithNamespace(common.NamespaceName), + } + + s.T().Run("Verify Operator", func(t *testing.T) { + s.Assert().EventuallyWithT(func(c *assert.CollectT) { + utils.VerifyOperator(s.T(), c, common.NamespaceName, s.Env().KubernetesCluster.Client()) + }, 300*time.Second, 15*time.Second, "Could not validate operator pod in time") + }) + + s.T().Run("Minimal DDA config", func(t *testing.T) { + ddaConfigPath, err := common.GetAbsPath(common.DdaMinimalPath) + assert.NoError(s.T(), err) + + ddaOpts := []agentwithoperatorparams.Option{ + agentwithoperatorparams.WithDDAConfig(agentwithoperatorparams.DDAConfig{ + Name: "dda-minimum", + YamlFilePath: ddaConfigPath, + }), + } + ddaOpts = append(ddaOpts, defaultDDAOpts...) + + provisionerOptions := []provisioners.KubernetesProvisionerOption{ + provisioners.WithTestName("e2e-operator-minimal-dda"), + provisioners.WithK8sVersion(common.K8sVersion), + provisioners.WithOperatorOptions(defaultOperatorOpts...), + provisioners.WithDDAOptions(ddaOpts...), + provisioners.WithLocal(s.local), + } + + s.UpdateEnv(provisioners.KubernetesProvisioner(provisionerOptions...)) + + err = s.Env().FakeIntake.Client().FlushServerAndResetAggregators() + s.Assert().NoError(err) + + s.Assert().EventuallyWithT(func(c *assert.CollectT) { + utils.VerifyAgentPods(s.T(), c, common.NamespaceName, s.Env().KubernetesCluster.Client(), common.NodeAgentSelector+",agent.datadoghq.com/name=dda-minimum") + utils.VerifyNumPodsForSelector(s.T(), c, common.NamespaceName, s.Env().KubernetesCluster.Client(), 1, common.ClusterAgentSelector+",agent.datadoghq.com/name=dda-minimum") + + agentPods, err := s.Env().KubernetesCluster.Client().CoreV1().Pods(common.NamespaceName).List(context.TODO(), metav1.ListOptions{LabelSelector: common.NodeAgentSelector + ",agent.datadoghq.com/name=dda-minimum", + FieldSelector: "status.phase=Running"}) + assert.NoError(s.T(), err) + + for _, pod := range agentPods.Items { + output, _, err := s.Env().KubernetesCluster.KubernetesClient.PodExec(common.NamespaceName, pod.Name, "agent", []string{"agent", "status", "collector", "-j"}) + assert.NoError(c, err) + utils.VerifyCheck(c, output, "kubelet") + } + + metricNames, err := s.Env().FakeIntake.Client().GetMetricNames() + s.Assert().NoError(err) + assert.Contains(c, metricNames, "kubernetes.cpu.usage.total") + + metrics, err := s.Env().FakeIntake.Client().FilterMetrics("kubernetes.cpu.usage.total", matchOpts...) + s.Assert().NoError(err) + + for _, metric := range metrics { + for _, points := range metric.Points { + s.Assert().Greater(points.Value, float64(0)) + } + } + + clusterAgentPods, err := s.Env().KubernetesCluster.Client().CoreV1().Pods(common.NamespaceName).List(context.TODO(), metav1.ListOptions{LabelSelector: common.ClusterAgentSelector + ",agent.datadoghq.com/e2e-test=datadog-agent-minimum"}) + assert.NoError(s.T(), err) + + for _, pod := range clusterAgentPods.Items { + output, _, err := s.Env().KubernetesCluster.KubernetesClient.PodExec(common.NamespaceName, pod.Name, "agent", []string{"agent", "status", "collector", "-j"}) + assert.NoError(c, err) + utils.VerifyCheck(c, output, "kubernetes_state_core") + } + + s.verifyKSMCheck(c) + }, 10*time.Minute, 30*time.Second, "could not validate KSM (cluster check) metrics in time") + + }) + + s.T().Run("KSM check works (cluster check runner)", func(t *testing.T) { + ddaConfigPath, err := common.GetAbsPath(filepath.Join(common.ManifestsPath, "datadog-agent-ccr-enabled.yaml")) + assert.NoError(s.T(), err) + + ddaOpts := []agentwithoperatorparams.Option{ + agentwithoperatorparams.WithDDAConfig(agentwithoperatorparams.DDAConfig{ + Name: "dda-minimum", + YamlFilePath: ddaConfigPath, + }), + } + ddaOpts = append(ddaOpts, defaultDDAOpts...) + + provisionerOptions := []provisioners.KubernetesProvisionerOption{ + provisioners.WithTestName("e2e-operator-ksm-ccr"), + provisioners.WithK8sVersion(common.K8sVersion), + provisioners.WithOperatorOptions(defaultOperatorOpts...), + provisioners.WithDDAOptions(ddaOpts...), + provisioners.WithLocal(s.local), + } + + s.UpdateEnv(provisioners.KubernetesProvisioner(provisionerOptions...)) + + err = s.Env().FakeIntake.Client().FlushServerAndResetAggregators() + s.Assert().NoError(err) + + s.Assert().EventuallyWithTf(func(c *assert.CollectT) { + utils.VerifyAgentPods(s.T(), c, common.NamespaceName, s.Env().KubernetesCluster.Client(), "app.kubernetes.io/instance=datadog-ccr-enabled-agent") + + utils.VerifyNumPodsForSelector(s.T(), c, common.NamespaceName, s.Env().KubernetesCluster.Client(), 1, "app.kubernetes.io/instance=datadog-ccr-enabled-cluster-checks-runner") + + ccrPods, err := s.Env().KubernetesCluster.Client().CoreV1().Pods(common.NamespaceName).List(context.TODO(), metav1.ListOptions{LabelSelector: "app.kubernetes.io/instance=datadog-ccr-enabled-cluster-checks-runner"}) + assert.NoError(s.T(), err) + + for _, ccr := range ccrPods.Items { + output, _, err := s.Env().KubernetesCluster.KubernetesClient.PodExec(common.NamespaceName, ccr.Name, "agent", []string{"agent", "status", "collector", "-j"}) + assert.NoError(c, err) + utils.VerifyCheck(c, output, "kubernetes_state_core") + } + + s.verifyKSMCheck(c) + }, 10*time.Minute, 15*time.Second, "could not validate kubernetes_state_core (cluster check on CCR) check in time") + }) + + s.T().Run("Autodiscovery works", func(t *testing.T) { + ddaConfigPath, err := common.GetAbsPath(common.DdaMinimalPath) + assert.NoError(s.T(), err) + + ddaOpts := []agentwithoperatorparams.Option{ + agentwithoperatorparams.WithDDAConfig(agentwithoperatorparams.DDAConfig{Name: "dda-autodiscovery", YamlFilePath: ddaConfigPath}), + } + ddaOpts = append(ddaOpts, defaultDDAOpts...) + + provisionerOptions := []provisioners.KubernetesProvisionerOption{ + provisioners.WithTestName("e2e-operator-autodiscovery"), + provisioners.WithDDAOptions(ddaOpts...), + provisioners.WithYAMLWorkload(provisioners.YAMLWorkload{Name: "nginx", Path: strings.Join([]string{common.ManifestsPath, "autodiscovery-annotation.yaml"}, "/")}), + provisioners.WithLocal(s.local), + } + provisionerOptions = append(provisionerOptions, defaultProvisionerOpts...) + + // Add nginx with annotations + s.UpdateEnv(provisioners.KubernetesProvisioner(provisionerOptions...)) + + err = s.Env().FakeIntake.Client().FlushServerAndResetAggregators() + s.Assert().NoError(err) + + s.Assert().EventuallyWithTf(func(c *assert.CollectT) { + utils.VerifyNumPodsForSelector(s.T(), c, common.NamespaceName, s.Env().KubernetesCluster.Client(), 1, "app=nginx") + + utils.VerifyAgentPods(s.T(), c, common.NamespaceName, s.Env().KubernetesCluster.Client(), common.NodeAgentSelector+",agent.datadoghq.com/name=dda-autodiscovery") + + // check agent pods for http check + agentPods, err := s.Env().KubernetesCluster.Client().CoreV1().Pods(common.NamespaceName).List(context.TODO(), metav1.ListOptions{LabelSelector: common.NodeAgentSelector + ",agent.datadoghq.com/name=dda-autodiscovery", + FieldSelector: "status.phase=Running"}) + assert.NoError(c, err) + + for _, pod := range agentPods.Items { + output, _, err := s.Env().KubernetesCluster.KubernetesClient.PodExec(common.NamespaceName, pod.Name, "agent", []string{"agent", "status", "collector", "-j"}) + assert.NoError(c, err) + + utils.VerifyCheck(c, output, "http_check") + } + + s.verifyHTTPCheck(c) + }, 900*time.Second, 15*time.Second, "could not validate http_check in time") + }) + + s.T().Run("Logs collection works", func(t *testing.T) { + ddaConfigPath, err := common.GetAbsPath(filepath.Join(common.ManifestsPath, "datadog-agent-logs.yaml")) + assert.NoError(s.T(), err) + + ddaOpts := []agentwithoperatorparams.Option{ + agentwithoperatorparams.WithDDAConfig(agentwithoperatorparams.DDAConfig{ + Name: "datadog-agent-logs", + YamlFilePath: ddaConfigPath, + }), + } + ddaOpts = append(ddaOpts, defaultDDAOpts...) + + provisionerOptions := []provisioners.KubernetesProvisionerOption{ + provisioners.WithTestName("e2e-operator-logs-collection"), + provisioners.WithK8sVersion(common.K8sVersion), + provisioners.WithOperatorOptions(defaultOperatorOpts...), + provisioners.WithDDAOptions(ddaOpts...), + provisioners.WithLocal(s.local), + } + + s.UpdateEnv(provisioners.KubernetesProvisioner(provisionerOptions...)) + + // Verify logs collection on agent pod + s.Assert().EventuallyWithTf(func(c *assert.CollectT) { + utils.VerifyAgentPods(s.T(), c, common.NamespaceName, s.Env().KubernetesCluster.Client(), "app.kubernetes.io/instance=datadog-agent-logs-agent") + + agentPods, err := s.Env().KubernetesCluster.Client().CoreV1().Pods(common.NamespaceName).List(context.TODO(), metav1.ListOptions{LabelSelector: "app.kubernetes.io/instance=datadog-agent-logs-agent"}) + assert.NoError(c, err) + + for _, pod := range agentPods.Items { + output, _, err := s.Env().KubernetesCluster.KubernetesClient.PodExec(common.NamespaceName, pod.Name, "agent", []string{"agent", "status", "logs agent", "-j"}) + assert.NoError(c, err) + utils.VerifyAgentPodLogs(c, output) + } + + s.verifyAPILogs() + }, 300*time.Second, 15*time.Second, "could not valid logs collection in time") + }) +} + +func (s *k8sSuite) verifyAPILogs() { + logs, err := s.Env().FakeIntake.Client().FilterLogs("agent") + s.Assert().NoError(err) + s.Assert().NotEmptyf(logs, fmt.Sprintf("Expected fake intake-ingested logs to not be empty: %s", err)) +} + +func (s *k8sSuite) verifyKSMCheck(c *assert.CollectT) { + metricNames, err := s.Env().FakeIntake.Client().GetMetricNames() + assert.NoError(c, err) + assert.Contains(c, metricNames, "kubernetes_state.container.running") + + metrics, err := s.Env().FakeIntake.Client().FilterMetrics("kubernetes_state.container.running", matchOpts...) + assert.NoError(c, err) + assert.NotEmptyf(c, metrics, fmt.Sprintf("expected metric series to not be empty: %s", err)) +} + +func (s *k8sSuite) verifyHTTPCheck(c *assert.CollectT) { + metricNames, err := s.Env().FakeIntake.Client().GetMetricNames() + assert.NoError(c, err) + assert.Contains(c, metricNames, "network.http.can_connect") + metrics, err := s.Env().FakeIntake.Client().FilterMetrics("network.http.can_connect") + assert.NoError(c, err) + assert.Greater(c, len(metrics), 0) + for _, metric := range metrics { + for _, points := range metric.Points { + assert.Greater(c, points.Value, float64(0)) + } + } +} diff --git a/test/e2e/tests/k8sSuite/kind_aws_test.go b/test/e2e/tests/k8sSuite/kind_aws_test.go new file mode 100644 index 000000000..75264adbe --- /dev/null +++ b/test/e2e/tests/k8sSuite/kind_aws_test.go @@ -0,0 +1,40 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package k8ssuite + +import ( + "fmt" + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e" + "github.com/DataDog/datadog-operator/test/e2e/common" + "github.com/DataDog/datadog-operator/test/e2e/provisioners" + "github.com/DataDog/test-infra-definitions/components/datadog/operatorparams" + "testing" +) + +type awsKindSuite struct { + k8sSuite +} + +func TestAWSKindSuite(t *testing.T) { + operatorOptions := []operatorparams.Option{ + operatorparams.WithNamespace(common.NamespaceName), + operatorparams.WithOperatorFullImagePath(common.OperatorImageName), + operatorparams.WithHelmValues("installCRDs: false"), + } + + provisionerOptions := []provisioners.KubernetesProvisionerOption{ + provisioners.WithTestName("e2e-operator"), + provisioners.WithOperatorOptions(operatorOptions...), + provisioners.WithoutDDA(), + } + + e2eOpts := []e2e.SuiteOption{ + e2e.WithStackName(fmt.Sprintf("operator-awskind-%s", common.K8sVersion)), + e2e.WithProvisioner(provisioners.KubernetesProvisioner(provisionerOptions...)), + } + + e2e.Run(t, &awsKindSuite{}, e2eOpts...) +} diff --git a/test/e2e/tests/k8sSuite/kind_local_test.go b/test/e2e/tests/k8sSuite/kind_local_test.go new file mode 100644 index 000000000..a875677dd --- /dev/null +++ b/test/e2e/tests/k8sSuite/kind_local_test.go @@ -0,0 +1,47 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package k8ssuite + +import ( + "fmt" + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e" + "github.com/DataDog/datadog-operator/test/e2e/common" + "github.com/DataDog/datadog-operator/test/e2e/provisioners" + "github.com/DataDog/test-infra-definitions/components/datadog/operatorparams" + "testing" +) + +type localKindSuite struct { + k8sSuite +} + +func (s *localKindSuite) SetupSuite() { + // use `s.local = true` to use to local kind provisioner + s.local = true + s.BaseSuite.SetupSuite() +} + +func TestLocalKindSuite(t *testing.T) { + operatorOptions := []operatorparams.Option{ + operatorparams.WithNamespace(common.NamespaceName), + operatorparams.WithOperatorFullImagePath(common.OperatorImageName), + operatorparams.WithHelmValues("installCRDs: false"), + } + + provisionerOptions := []provisioners.KubernetesProvisionerOption{ + provisioners.WithTestName("e2e-operator"), + provisioners.WithOperatorOptions(operatorOptions...), + provisioners.WithoutDDA(), + provisioners.WithLocal(true), + } + + e2eOpts := []e2e.SuiteOption{ + e2e.WithStackName(fmt.Sprintf("operator-localkind-%s", common.K8sVersion)), + e2e.WithProvisioner(provisioners.KubernetesProvisioner(provisionerOptions...)), + } + + e2e.Run(t, &localKindSuite{}, e2eOpts...) +} diff --git a/test/e2e/tests/utils/utils.go b/test/e2e/tests/utils/utils.go new file mode 100644 index 000000000..879f76ea6 --- /dev/null +++ b/test/e2e/tests/utils/utils.go @@ -0,0 +1,95 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package utils + +import ( + "context" + "fmt" + "github.com/DataDog/datadog-operator/test/e2e/common" + "github.com/stretchr/testify/assert" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + kubeClient "k8s.io/client-go/kubernetes" + "strconv" + "strings" + "testing" +) + +func VerifyOperator(t *testing.T, c *assert.CollectT, namespace string, k8sClient kubeClient.Interface) { + VerifyNumPodsForSelector(t, c, namespace, k8sClient, 1, "app.kubernetes.io/name=datadog-operator") +} + +func VerifyNumPodsForSelector(t *testing.T, c *assert.CollectT, namespace string, k8sClient kubeClient.Interface, numPods int, selector string) { + t.Log("Waiting for number of pods created", "number", numPods, "selector", selector) + podsList, err := k8sClient.CoreV1().Pods(namespace).List(context.TODO(), metav1.ListOptions{ + LabelSelector: selector, + FieldSelector: "status.phase=Running", + }) + assert.NoError(c, err) + assert.NotNil(c, podsList) + assert.NotEmpty(c, podsList.Items) + assert.Equal(c, numPods, len(podsList.Items)) +} + +func VerifyAgentPods(t *testing.T, c *assert.CollectT, namespace string, k8sClient kubeClient.Interface, selector string) { + nodesList, err := k8sClient.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) + assert.NoError(c, err) + assert.NotNil(c, nodesList) + assert.NotEmpty(c, nodesList.Items) + VerifyNumPodsForSelector(t, c, namespace, k8sClient, len(nodesList.Items), selector) +} + +func VerifyCheck(c *assert.CollectT, collectorOutput string, checkName string) { + var runningChecks map[string]interface{} + + checksJson := common.ParseCollectorJson(collectorOutput) + if checksJson != nil { + runningChecks = checksJson["runnerStats"].(map[string]interface{})["Checks"].(map[string]interface{}) + if check, found := runningChecks[checkName].(map[string]interface{}); found { + for _, instance := range check { + assert.EqualValues(c, checkName, instance.(map[string]interface{})["CheckName"].(string)) + + lastError, exists := instance.(map[string]interface{})["LastError"].(string) + assert.True(c, exists) + assert.Empty(c, lastError) + + totalErrors, exists := instance.(map[string]interface{})["TotalErrors"].(float64) + assert.True(c, exists) + assert.Zero(c, totalErrors) + + totalMetricSamples, exists := instance.(map[string]interface{})["TotalMetricSamples"].(float64) + assert.True(c, exists) + assert.Greater(c, totalMetricSamples, float64(0)) + } + } else { + assert.True(c, found, fmt.Sprintf("Check %s not found or not yet running.", checkName)) + } + } +} + +func VerifyAgentPodLogs(c *assert.CollectT, collectorOutput string) { + var agentLogs []interface{} + logsJson := common.ParseCollectorJson(collectorOutput) + + tailedIntegrations := 0 + if logsJson != nil { + agentLogs = logsJson["logsStats"].(map[string]interface{})["integrations"].([]interface{}) + for _, log := range agentLogs { + if integration, ok := log.(map[string]interface{})["sources"].([]interface{})[0].(map[string]interface{}); ok { + message, exists := integration["messages"].([]interface{})[0].(string) + if exists && len(message) > 0 { + num, _ := strconv.Atoi(string(message[0])) + if num > 0 && strings.Contains(message, "files tailed") { + tailedIntegrations++ + } + } + } else { + assert.True(c, ok, "Failed to get sources from logs. Possible causes: missing 'sources' field, empty array, or incorrect data format.") + } + } + } + totalIntegrations := len(agentLogs) + assert.True(c, tailedIntegrations >= totalIntegrations*80/100, "Expected at least 80%% of integrations to be tailed, got %d/%d", tailedIntegrations, totalIntegrations) +}