diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 28558e44d79..0c7ab15d0e9 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -105,11 +105,13 @@ static/ @rohanmaharjan100 @wei-harness /src/components/Docs/data/serviceReliabilityManagementData.ts @sunilgupta-harness # Chaos Docs -/docs/chaos-engineering/ @neelanjan00 @SmritiSatya @shovanmaity @Jonsy13 @Saranya-jena @Adarshkumar14 @ispeakc0de @amityt @uditgaurav @S-ayanide @vanshBhatia-A4k9 @ksatchit @umamukkara +/docs/chaos-engineering/ @neelanjan00 @SmritiSatya @shovanmaity @Jonsy13 @Saranya-jena @Adarshkumar14 @ispeakc0de @amityt @uditgaurav @S-ayanide @vanshBhatia-A4k9 @ksatchit @umamukkara @SarthakJain26 /release-notes/chaos-engineering.md @neelanjan00 @SmritiSatya @Jonsy13 /src/components/Roadmap/data/ceData.ts @krishi0408 @neelanjan00 @SmritiSatya @vishal-av @SushrutHarness /src/components/Docs/data/chaosEngineeringData* @krishi0408 @neelanjan00 @SmritiSatya @vishal-av @SushrutHarness # Chaos FAQs +/docs/faqs/chaos-engineering-faqs @neelanjan00 @SmritiSatya @shovanmaity @Jonsy13 @Saranya-jena @Adarshkumar14 @ispeakc0de @amityt @uditgaurav @S-ayanide @vanshBhatia-A4k9 @ksatchit @umamukkara @SarthakJain26 +/docs/faqs/static @neelanjan00 @SmritiSatya @Jonsy13 @Adarshkumar14 @ksatchit @umamukkara /kb/chaos-engineering/chaos-engineering-faq @neelanjan00 @SmritiSatya @ksatchit @umamukkara # Platform Docs diff --git a/docs/chaos-engineering/concepts/explore-features.md b/docs/chaos-engineering/concepts/explore-features.md index 0e60b976e75..4b9909fa66a 100644 --- a/docs/chaos-engineering/concepts/explore-features.md +++ b/docs/chaos-engineering/concepts/explore-features.md @@ -47,7 +47,7 @@ Instead of creating a database with the services, you can automatically discover Below is the control flow to a discovered service. -![](../use-harness-ce/service-discovery/static/control-flow-1.png) +![control flow](../../platform/service-discovery/static/control-flow-1.png) For more information on how to create a discovery agent, go to [Service Discovery](/docs/chaos-engineering/use-harness-ce/service-discovery). diff --git a/docs/chaos-engineering/concepts/how-stuff-works/generic-pod-flow.md b/docs/chaos-engineering/concepts/how-stuff-works/generic-pod-flow.md new file mode 100644 index 00000000000..414b36370e2 --- /dev/null +++ b/docs/chaos-engineering/concepts/how-stuff-works/generic-pod-flow.md @@ -0,0 +1,47 @@ +--- +title: Generic Pod Fault Workflow +sidebar_label: Generic Pod Fault Workflow +sidebar_position: 20 +--- + +This topic describes the flow of control when you execute a generic Kubernetes pod experiment in Harness Chaos Engineering. + +The diagram below describes the flow of control for a generic Kubernetes pod experiment. + +![generic pod fault flow](../static/how-stuff-works/generic-pod-fault-flow.png) + +### Step 1: Design and Launch an Experiment + +You (the user) define the chaos experiment in the Chaos Control Plane. +This includes configuring the fault type, duration, target application, and other parameters. + +### Step 2. Chaos Agent Picks Experiment + +The Chaos Agent (or Subscriber) detects the new experiment and claims it. + +### Step 3. Apply Chaos Experiment + +The agent/subscriber applies the Custom Resource (CR) YAML, which includes: + - Security Context Constraints (permissions required for execution) + - Fault parameters (for example, pod delete, network latency) + - Application details (target app) + +### Step 4. Controllers Create Helper Pods + +- The controllers watch the CR and create Just-In-Time (transient) chaos helper pods (if required) on the same node as the target application container. +- For chaos faults such as pod-level CPU/memory stress, and network disruptions, helper pods are created. However, for faults that rely on kube-api operations (such as pod-delete and pod-autoscaler), helper pods are not created. + +### Step 5. Inject Fault into Application + +- The helper pod runs in the same namespace as the target application and executes the chaos process (for example, increases CPU usage). Here, the Security Context Constraints (like `RUNASANY`, `PRIVILEGED`, `NET_ADMIN`, `SYS_ADMIN`, `ALLOW_HOSTPATH_MOUNT`, and `HOST_PID`, ) are mapped with the chaos Service Account. + +### Step 6. Target Application Experiences Chaos Impact + +The target application container (inside the pod) is affected by the chaos fault. +The impact is contained within the target pod’s namespace, ensuring: + - Other pods on the node remain unaffected. + - Node-level services remain operational. + +In faults that don't use helper pods, chaos is usually reverted/removed automatically. + +In faults where helper pods are created, these pods are removed once the chaos duration is complete. By the end of this process, the subscriber sends back the results of the chaos experiment to the control plane and continues to poll for new experiment tasks. \ No newline at end of file diff --git a/docs/chaos-engineering/concepts/how-stuff-works/stress-fault-flow.md b/docs/chaos-engineering/concepts/how-stuff-works/stress-fault-flow.md new file mode 100644 index 00000000000..89b42aca093 --- /dev/null +++ b/docs/chaos-engineering/concepts/how-stuff-works/stress-fault-flow.md @@ -0,0 +1,53 @@ +--- +title: Stress/IO Fault Workflow +sidebar_label: Stress or IO Fault Workflow +sidebar_position: 30 +--- + +This topic describes the flow of control when you execute a stress or IO experiment in Harness Chaos Engineering. + +The diagram below describes the flow of control for a stress or IO experiment. + +![stress pod fault flow](../static/how-stuff-works/stress-fault-flow.png) + +### Step 1. Identifying the Target Application +The Target Application Pod runs an App Container. +The app container has specific namespaces for processes (pid_ns) and mounts (mnt_ns). +### Step 2. Initiating the Chaos Injection +A Chaos Helper Pod is deployed to facilitate the fault injection. +The helper pod needs to identify and access the app container. +### Step 3. Retrieving Container Details +The Chaos Helper Pod examines the Pod Spec to get the App Container ID corresponding to the target application. +It then inspects the Container Metadata. +### Step 4. Extracting Process Information +The helper pod derives the PID (Process ID) of the app container. +This PID is necessary for injecting the fault into the process namespace. +### Step 5. Executing the Chaos Process +The helper pod injects the chaos process into the App Container’s namespace. +This process runs for the duration of the chaos experiment. +### Step 6. Performing the Stress or I/O Fault +The type of fault determines how the experiment proceeds: +A. I/O Stress Chaos + +Obtain the cgroup of the target container. +Prepare stressor processes in pause mode. +Transfer the I/O stress process into the target container's cgroup. +Resume the stress process to consume disk resources. +B. I/O Latency Chaos + +Enter the target container’s PID namespace (pid_ns) and mount namespace (mnt_ns). +Use FUSE, ptrace, or file mounting techniques to simulate I/O slowdowns or disruptions. +### Step 7. Required Permissions & Configuration +The Chaos Helper Pod needs: +Root/Sudo access +Host path for socket mount +(For I/O Latency Chaos) Additional system capabilities: +hostipc=true +net_admin +sys_admin +### Step 8. Observing & Controlling the Chaos Duration +The fault injection runs for a defined period. +Once completed, the injected processes terminate. +### Step 9. Restoring Normal Operations +The experiment concludes. +Any temporary changes revert, and monitoring data is collected. \ No newline at end of file diff --git a/docs/chaos-engineering/concepts/static/how-stuff-works/generic-pod-fault-flow.png b/docs/chaos-engineering/concepts/static/how-stuff-works/generic-pod-fault-flow.png new file mode 100644 index 00000000000..ff0d77e5e08 Binary files /dev/null and b/docs/chaos-engineering/concepts/static/how-stuff-works/generic-pod-fault-flow.png differ diff --git a/docs/chaos-engineering/concepts/static/how-stuff-works/stress-fault-flow.png b/docs/chaos-engineering/concepts/static/how-stuff-works/stress-fault-flow.png new file mode 100644 index 00000000000..6918ede15a1 Binary files /dev/null and b/docs/chaos-engineering/concepts/static/how-stuff-works/stress-fault-flow.png differ diff --git a/docs/chaos-engineering/getting-started/saas/delegate-driven-agentless-chaos.md b/docs/chaos-engineering/getting-started/saas/delegate-driven-agentless-chaos.md new file mode 100644 index 00000000000..fa93f70d215 --- /dev/null +++ b/docs/chaos-engineering/getting-started/saas/delegate-driven-agentless-chaos.md @@ -0,0 +1,66 @@ +--- +title: Delegate-Driven Agentless Fault Injection +description: Use a Harness Delegate to execute an agentless model-based Kubernetes chaos fault. +sidebar_position: 10 +--- + +This topic describes how you can use a Harness Delegate along with an agentless chaos execution model to execute a chaos fault on Kubernetes. + +## Before you begin, review the following: + +* [Agentless Chaos Execution Model](/docs/chaos-engineering/concepts/how-stuff-works/agentless-chaos-working#agentless-chaos-execution-model) +* [Centralized Execution Plane](/docs/chaos-engineering/concepts/how-stuff-works/centralized-exec-plane) +* [Application Map](/docs/chaos-engineering/use-harness-ce/application-map#what-is-an-application-map) +* [Service Discovery](/docs/chaos-engineering/use-harness-ce/service-discovery) + +## Install Delegate in your Infrastructure + +### Step 1: Set up your target cluster + +a. In this step, set up your cluster (where Harness CE executes chaos experiments) with a dedicated namespace for Harness, and go to [Centralized Delegate](/docs/chaos-engineering/use-harness-ce/infrastructures/centralized-delegate) to perform the following steps. + - Create a dedicated Namespace. + - Create a service account in the dedicated namespace. + - Create K8s RBACs and Role binding for Chaos runner pods. + - Create a cluster role and cluster role binding for conducting service discovery and executing chaos experiments. + - Create a K8s connector with cluster URL and service account token as authentication method. + - Create an Infrastructure using the K8s connector. + + +### Step 2: Set Up a Discovery agent + +b. [Create a Discovery Agent](/docs/platform/service-discovery/customize-agent). + +### Step 3: Harness Discovers Services + +The discovery agent may take some time to discover the microservices in your Kubernetes cluster. + +If the microservices in your system are communicating with each other, the agent will discover the connectivity between these microservices. Otherwise, no connectivity is discovered. + +However, you can manually create network experiments and tag them to the relevant application map. + +c. Once the services are discovered, the output appears similar to this. + + ![services discovered](./static/delegate-drive-agentless/services-5.png) + +### Step 4: Create Application Map + +d. [Create an application map](/docs/chaos-engineering/use-harness-ce/application-map#create-an-application-map) from the discovery agent. + +### Step 5: Harness Auto-Creates Experiments + +e. Go to **Application Map** in **Project Settings**, and select the application map that you created earlier. It would look similar to the image below. + + ![output](./static/delegate-drive-agentless/app-map-6.png) + +f. In the above screen, click **Chaos Experiments** and choose one of **OnlyFew**, **Moderate** and **Maximum** to auto-create experiments. These options describes the number of chaos experiments that would be automatically created. + + ![options](./static/delegate-drive-agentless/options.png) + +g. Based on the option you select, Harness auto-creates the experiments for each discovered service that was selected in the application map. Refresh the page to see the auto-created experiments. + + ![auto-create](./static/delegate-drive-agentless/auto-create-7.png) + +## Troubleshooting + +- Go to [Troubleshooting Discovery Agent](/docs/chaos-engineering/troubleshooting/#discovery-agent) in case your discovery agent is unable to discover services. + diff --git a/docs/chaos-engineering/getting-started/saas/saas.md b/docs/chaos-engineering/getting-started/saas/saas.md index c9b3492f7d6..bd929e54f6a 100644 --- a/docs/chaos-engineering/getting-started/saas/saas.md +++ b/docs/chaos-engineering/getting-started/saas/saas.md @@ -22,7 +22,7 @@ In this tutorial, you will apply chaos on a sample boutique application on Kuber ![HCE Overview](./static/first-chaos/hce-overview.png) -### Step 2: Create an environment +## Step 2: Create an environment 2. A chaos experiment is executed in a chaos infrastructure that is associated with an **environment**. To create a new environment, navigate to the **Environments** page, and choose a **New Environment**. Specify the environment name, a description (optional), and tags (optional). Select the environment type, **Production** or **Non-Production**. Finally, select **Create** to add the new environment. @@ -32,7 +32,7 @@ In this tutorial, you will apply chaos on a sample boutique application on Kuber You can also select one of the environments from the list of environments if it is available instead of creating an environment. ::: -### Step 3: Create an infrastructure +## Step 3: Create an infrastructure 3. Once you have created an environment, you can add chaos infrastructure to it. Depending on your application, you can select **Kubernetes**, **Linux** or **Windows**. In this tutorial, you can select a Kubernetes infrastructure, which you will use to inject faults into Kubernetes resources. You can use an existing infrastructure or create a new one. In this tutorial, you can create a new infrastructure. For this, select **Enable chaos**. @@ -62,7 +62,7 @@ The **Cluster-wide access** installation mode allows you to target resources acr ![Infrastructure State](./static/first-chaos/infrastructure-state.png) -### Step 4: Create a demo application and observability infrastructure +## Step 4: Create a demo application and observability infrastructure Once you are all ready to target our Kubernetes resources, you can execute the simplest fault, [**Pod Delete**](/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/pod-delete). The "pod delete" chaos fault deletes the pods of a deployment, StatefulSet, DaemonSet, etc, to validate the resiliency of a microservice application. @@ -143,7 +143,7 @@ Once you are all ready to target our Kubernetes resources, you can execute the s ![Grafana App Dashboard](./static/first-chaos/grafana-app-dashboard.png) -### Step 5: Construct a chaos experiment +## Step 5: Construct a chaos experiment Since the target application has been deployed, you can now create a chaos experiment. You will target the pods of the `carts` microservice with the **pod delete** fault. Currently, the cart page is healthy and accessible from the front end, as seen in the `/cart` route. @@ -208,7 +208,7 @@ Under probe details, you can see that the URL is `http://cartservice.hce.svc.clu ![Probes Config 6](./static/first-chaos/apply-changes.png) -### Step 6: Observing chaos execution +## Step 6: Observing chaos execution 24. To execute the chaos experiment, click **Save**, and then **Run**. @@ -257,7 +257,7 @@ Under probe details, you can see that the URL is `http://cartservice.hce.svc.clu ![Application Down Dashboard](./static/first-chaos/application-down-dashboard.png) -### Step 7: Evaluate the experiment run +## Step 7: Evaluate the experiment run 30. When the experiment execution concludes, you get a resilience score of 0 %. You will observe that the pod delete fault step failed. Before analyzing the experiment result, you can validate that the application is now again accessible, without any errors. You can validate this from the Grafana dashboard metrics that indicate the app returning to normal as the chaos duration is over. diff --git a/docs/chaos-engineering/getting-started/saas/static/delegate-drive-agentless/app-map-6.png b/docs/chaos-engineering/getting-started/saas/static/delegate-drive-agentless/app-map-6.png new file mode 100644 index 00000000000..8cb2b16b98f Binary files /dev/null and b/docs/chaos-engineering/getting-started/saas/static/delegate-drive-agentless/app-map-6.png differ diff --git a/docs/chaos-engineering/getting-started/saas/static/delegate-drive-agentless/auto-create-7.png b/docs/chaos-engineering/getting-started/saas/static/delegate-drive-agentless/auto-create-7.png new file mode 100644 index 00000000000..51a4a1226cd Binary files /dev/null and b/docs/chaos-engineering/getting-started/saas/static/delegate-drive-agentless/auto-create-7.png differ diff --git a/docs/chaos-engineering/getting-started/saas/static/delegate-drive-agentless/options.png b/docs/chaos-engineering/getting-started/saas/static/delegate-drive-agentless/options.png new file mode 100644 index 00000000000..9ad5e5edb4a Binary files /dev/null and b/docs/chaos-engineering/getting-started/saas/static/delegate-drive-agentless/options.png differ diff --git a/docs/chaos-engineering/getting-started/saas/static/delegate-drive-agentless/services-5.png b/docs/chaos-engineering/getting-started/saas/static/delegate-drive-agentless/services-5.png new file mode 100644 index 00000000000..5c0c8786206 Binary files /dev/null and b/docs/chaos-engineering/getting-started/saas/static/delegate-drive-agentless/services-5.png differ diff --git a/docs/chaos-engineering/security/security-templates/openshift-scc.md b/docs/chaos-engineering/security/security-templates/openshift-scc.md index 2992f261320..f89fe677868 100644 --- a/docs/chaos-engineering/security/security-templates/openshift-scc.md +++ b/docs/chaos-engineering/security/security-templates/openshift-scc.md @@ -15,7 +15,7 @@ Security context constraints allow administrators to control permissions for pod The default service account is used to run applications within a project. You can run other applications in the same project, but if you don't want to override the privileges used for all applications, create a new service account and grant special rights to the project where the application is run. :::tip -You can leverage all the [permissions mentioned](#run-service-account-as-a-cluster-admin) for fault execution as well as [service discovery](/docs/chaos-engineering/use-harness-ce/service-discovery/user-defined-service-account). The SCC described below is a superset, which means only some of them are required for service discovery. +You can leverage all the [permissions mentioned](#run-service-account-as-a-cluster-admin) for fault execution as well as [service discovery](/docs/platform/service-discovery/user-defined-service-account). The SCC described below is a superset, which means only some of them are required for service discovery. ::: ### Create a new service account diff --git a/docs/chaos-engineering/troubleshooting/known-issues.md b/docs/chaos-engineering/troubleshooting/known-issues.md index 363b8bef18d..dce27f2d331 100644 --- a/docs/chaos-engineering/troubleshooting/known-issues.md +++ b/docs/chaos-engineering/troubleshooting/known-issues.md @@ -12,6 +12,9 @@ This topic walks you through the known issues in Harness CE. The **Upgrade now** button appears even when the Kubernetes infrastructure is on the latest version due to the API not returning the correct update status. +![](./static/images/update.png) + + ## Copy to Clipboard Issue If you try to access the Harness Self-Managed Enterprise Edition (SMP) portal over an HTTP-based connection, the **Copy to clipboard** facility will not work. This facility works only when you access SMP over an HTTPS-based connection. diff --git a/docs/chaos-engineering/troubleshooting/static/images/update.png b/docs/chaos-engineering/troubleshooting/static/images/update.png new file mode 100644 index 00000000000..971a8e0b623 Binary files /dev/null and b/docs/chaos-engineering/troubleshooting/static/images/update.png differ diff --git a/docs/chaos-engineering/troubleshooting/troubleshooting.md b/docs/chaos-engineering/troubleshooting/troubleshooting.md index 9c53ee430c4..2e7731567d3 100644 --- a/docs/chaos-engineering/troubleshooting/troubleshooting.md +++ b/docs/chaos-engineering/troubleshooting/troubleshooting.md @@ -67,6 +67,33 @@ To fix this issue, perform the following steps: $ kubectl apply -f harness-chaos-enable.yml ``` +## Discovery Agent + +If the Discovery Agent is unable to discover services, + - Fetch the pods in the dedicated namespace in your target cluster. For example, if you have created a namespace `harness-chaos` in your target cluster, execute the following command to check the status of the pods. + + ``` + kubectl get pods -n harness-chaos + ``` + + - If you see a particular pod failing or in some erroneous state, get metadata of that particular pod. + + ``` + kubectl describe pod -n harness-chaos + ``` + + - View the logs of that particular pod. + + ``` + kubectl logs -f -n harness-chaos + ``` + +If the logs suggest that no resources were found in the dedicated namespace, check the logs of delegates installed. + + ``` + kubectl get pods -n harness-delegate-ng + ``` + ## Probe related troubleshooting ### Environment variable and secret usage references in source mode of command probe diff --git a/docs/chaos-engineering/use-harness-ce/application-map.md b/docs/chaos-engineering/use-harness-ce/application-map.md index 62d48816484..93c9486128d 100644 --- a/docs/chaos-engineering/use-harness-ce/application-map.md +++ b/docs/chaos-engineering/use-harness-ce/application-map.md @@ -50,7 +50,7 @@ You can [create](#create-application-map), [edit](#edit-application-map), and [d ![](./static/app-maps/name-np-2.png) -5. Select one or more discovered services and select **Next**. +5. Select one or more discovered services on which you wish to inject chaos and select **Next**. ![](./static/app-maps/select-service-3.png) diff --git a/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/_category_.json b/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/_category_.json index 85134a9d077..61f5d734c8c 100644 --- a/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/_category_.json +++ b/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/_category_.json @@ -1,6 +1,6 @@ { "position": 50, - "label": "Governance in Execution", + "label": "ChaosGuard- Governance in Execution", "collapsed": "true", "customProps": { "description": "Set of policies, rules, and processes that manage and control how chaos experiments are created, executed, and monitored." diff --git a/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/govern-run.md b/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/govern-run.md index 8e4d1c7d2ab..da3328d4464 100644 --- a/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/govern-run.md +++ b/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/govern-run.md @@ -16,19 +16,19 @@ This topic describes how you can configure **ChaosGuard** to enforce security po 1. In the **Chaos** module, select **ChaosGuard**, and select **Conditions**. The **Conditions** page lists existing conditions (if any). - ![navigate to chaos](./static/exec/navigate-1.png) + ![navigate to chaos](../static/chaosguard/navigate-1.png) 2. To create a condition, click **New condition**. - ![new-condition](./static/exec/new-condition.png) + ![new-condition](../static/chaosguard/new-condition.png) 3. Provide a name, a description (optional), and tags (optional). Specify the infrastructure. If you select either **Linux** or **Windows**, click **Save**. - ![infrastructure options](./static/exec/infra-options.png) + ![infrastructure options](../static/chaosguard/infra-options.png) - 3a. If you select **Kubernetes**, you can select one of **Harness Infrastructures** (or Harness Delegate) or **Dedicated Chaos Infrastructure**. Click **Save**. + 3a. If you select **Kubernetes**, you can select one of **Harness Infrastructures** (also known as Harness Delegate) or **Dedicated Chaos Infrastructure**. Click **Save**. - ![edit-condition](./static/exec/edit-condition.png) + ![edit-condition](../static/chaosguard/edit-condition.png) This gives you 3 ways to define a condition from the **Condition Editor**: - [YAML manifest](#define-constraints-using-yaml) @@ -39,33 +39,33 @@ This gives you 3 ways to define a condition from the **Condition Editor**: 1. Select the **YAML** tab. - ![select](./static/exec/select-1.png) + ![select](../static/chaosguard/select-1.png) -2. Specify the relevant values corresponding to the respective names. Click **Save**. +2. Specify the relevant values corresponding to the respective names. Click **Save**. In this example, the YAML configuration indicates that a condition is applied to a fault named **pod-delete** with the specified **label**, **namespace**, and **serviceAccount**. - ![yaml edit](./static/exec/yaml-edit.png) + ![yaml edit](../static/chaosguard/yaml-edit.png) ### Define constraints using the visual editor 1. To add conditions using a visual editor, navigate to the **VISUAL** tab of the condition you created earlier. - ![condition](./static/exec/condition-create.png) + ![condition](../static/chaosguard/condition-create.png) -2. Add the **WHAT** clause. It blocks a fault that is **EQUAL TO** (or matches) or **NOT EQUAL TO** (everything else apart from the given value) pod delete. You can add more than one **WHAT** clause. +2. Add the **WHAT** clause. It blocks a fault whose name is **EQUAL TO** (or matches) or **NOT EQUAL TO** (everything else apart from the given value) pod delete. You can add more than one **WHAT** clause. This clause also takes the experiment name as input. - ![what](./static/exec/condition-what.png) + ![what](../static/chaosguard/condition-what.png) 3. Add the **WHERE** clause. It blocks one or more infrastructure. Select more than one infrastructure by hovering over the field. - ![where](./static/exec/condition-where.png) + ![where](../static/chaosguard/condition-where.png) 4. Add the **WHICH** clause. It blocks the infrastructure that has specific entries for **APPLICATION MAP**, **SERVICES**, **NAMESPACE** (mandatory), **KIND** (mandatory), and **APP LABEL**. You can add more than one **WHICH** clause. - ![which](./static/exec/condition-which.png) + ![which](../static/chaosguard/condition-which.png) 5. Add the **USING** clause. It blocks specific service account. You can add more than one service account by clicking the field and adding service account name to it. Click **Save**. - ![using](./static/exec/condition-using.png) + ![using](../static/chaosguard/condition-using.png) :::tip - You can use both **'EQUAL'** and **'NOT EQUAL TO'** operators in the condition logic for WHAT, WHERE, WHICH and USING. @@ -76,52 +76,52 @@ This gives you 3 ways to define a condition from the **Condition Editor**: 1. Instead of selecting the required parameters, you can generate conditions with the help of Harness AIDA. AIDA assistant shows up when you are configuring a condition. You can choose one of the suggestions provided by Harness AIDA by clicking on it or writing something along the same lines as the suggestions. - ![aida suggestion](./static/exec/aida-sug-1.png) + ![aida suggestion](../static/chaosguard/aida-sug-1.png) 2. When you type a condition, you will see that AIDA generates a YAML corresponding to your condition. If the YAML generated meets the conditions, you can click **Apply YAML**. - ![aida generation](./static/exec/aida-gen-2.png) + ![aida generation](../static/chaosguard/aida-gen-2.png) 3. If the generated YAML does not meet your conditions, click **Try again**. In the snippet below, you will see that AIDA applies the YAML generated to the editor. - ![aida apply](./static/exec/aida-apply-3.png) + ![aida apply](../static/chaosguard/aida-apply-3.png) ### Save condition After you define the constraints of a condition either using [YAML](#define-constraints-using-yaml), [visual editor](#define-constraints-using-the-visual-editor), or [AIDA](#define-constraints-using-aida), select **Save**. - ![save constraints](./static/exec/save-constraint.png) + ![save constraints](../static/chaosguard/save-constraint.png) ## Configure a rule 1. Click **New rule**. - ![](./static/exec/new-rule.png) + ![](../static/chaosguard/new-rule.png) 2. Specify parameters such as name, description (optional), tags (optional), user group to apply the rule (you can apply the rule to multiple user groups), and time window to apply the rule. You can apply multiple time windows to apply the rule. Click **Next**. - ![](./static/exec/add-des-2.png) + ![](../static/chaosguard/add-des-2.png) 3. Select user groups. Click **Apply Selected**. - ![](./static/exec/usr-grp-3.png) + ![](../static/chaosguard/usr-grp-3.png) 4. Select a condition (or multiple conditions) that you wish to apply. Click **Done**. - ![](./static/exec/select-cnd-4.png) + ![](../static/chaosguard/select-cnd-4.png) :::info note * Below is a snap that shows a successful evaluation of all the rules in a chaos experiment. - ![](./static/exec/rule-evaluation-pass.png) + ![](../static/chaosguard/rule-evaluation-pass.png) * Below is a snap that shows a failed evaluation of some (or all) rules in a chaos experiment. - ![](./static/exec/rule-evaluation-fail.png) + ![](../static/chaosguard/rule-evaluation-fail.png) ::: ### Enable and disable rules * The image below shows the two different states of a rule (enable and disable). - ![chaosguard-rules](./static/exec/chaosguard-rules.png) \ No newline at end of file + ![chaosguard-rules](../static/chaosguard/chaosguard-rules.png) \ No newline at end of file diff --git a/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/governance-in-execution.md b/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/governance-in-execution.md index 699ba9568e1..0516e2b9a01 100644 --- a/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/governance-in-execution.md +++ b/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/governance-in-execution.md @@ -28,8 +28,7 @@ The different levels of security policy enforcement include (but are not limited ## Flow of control The security evaluation step iterates over every active (or enabled) [rule](#rule) for every experiment run in the project. If the evaluation is successful, you can proceed with the experiment. Upon failure, you can't iterate further in the experiment. Below is a flowchart that summarizes the flow of control when you enable a ChaosGuard rule for a fault or set of faults. - ![flow-chart](./static/chaosguard/flow-chart-chaosguard.png) - + ![flow-chart](../static/chaosguard-concepts/flow-chart-chaosguard.png) ## Low-level security governance requirements The table below describes the requirements for advanced environments. @@ -85,10 +84,15 @@ A rule becomes active when all its conditions are met, controlling who can execu The example below describes the rule as **applicable on the cluster chaosday-k8s-cluster between [5 PM, Friday, Sept 15th] to [9 AM, Monday, Sept 18th] for the specific condition**. -![rules-chaosguard](./static/chaosguard/add-conditions.png) +![rules-chaosguard](../static/chaosguard-concepts/add-conditions.png) :::tip Creating the ChaosGuard rules is subject to Harness RBAC policies. By default, these rules are enabled only for the project admin. However, the admin can delegate this to trusted users (typically in multi- or secondary admin scenarios). -![chaosguard-access-control](./static/chaosguard/chaosguard-access-control.png) -::: \ No newline at end of file +![chaosguard-access-control](../static/chaosguard-concepts/chaosguard-access-control.png) +::: + +## Next Steps + +- [Configure a Condition](/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/govern-run#configure-a-condition) +- [Configure a Rule](/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/govern-run#configure-a-rule) \ No newline at end of file diff --git a/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/static/chaosguard/add-conditions.png b/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/static/chaosguard/add-conditions.png deleted file mode 100644 index 02bdf64a413..00000000000 Binary files a/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/static/chaosguard/add-conditions.png and /dev/null differ diff --git a/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/static/chaosguard/chaos-studio-condition.png b/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/static/chaosguard/chaos-studio-condition.png deleted file mode 100644 index 68f80640007..00000000000 Binary files a/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/static/chaosguard/chaos-studio-condition.png and /dev/null differ diff --git a/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/static/chaosguard/chaosguard-access-control.png b/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/static/chaosguard/chaosguard-access-control.png deleted file mode 100644 index d6eaeed0182..00000000000 Binary files a/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/static/chaosguard/chaosguard-access-control.png and /dev/null differ diff --git a/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/static/chaosguard/condition-using.png b/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/static/chaosguard/condition-using.png deleted file mode 100644 index b9e62e8d383..00000000000 Binary files a/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/static/chaosguard/condition-using.png and /dev/null differ diff --git a/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/static/chaosguard/condition-what.png b/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/static/chaosguard/condition-what.png deleted file mode 100644 index f708ab62b40..00000000000 Binary files a/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/static/chaosguard/condition-what.png and /dev/null differ diff --git a/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/static/chaosguard/condition-where.png b/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/static/chaosguard/condition-where.png deleted file mode 100644 index 449350d4f69..00000000000 Binary files a/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/static/chaosguard/condition-where.png and /dev/null differ diff --git a/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/static/chaosguard/condition-which.png b/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/static/chaosguard/condition-which.png deleted file mode 100644 index d93aa7f41bd..00000000000 Binary files a/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/static/chaosguard/condition-which.png and /dev/null differ diff --git a/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/static/chaosguard/fine-grain-control.png b/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/static/chaosguard/fine-grain-control.png deleted file mode 100644 index 6792da911e0..00000000000 Binary files a/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/static/chaosguard/fine-grain-control.png and /dev/null differ diff --git a/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/static/chaosguard/flow-chart-chaosguard.png b/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/static/chaosguard/flow-chart-chaosguard.png deleted file mode 100644 index 05888131244..00000000000 Binary files a/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/static/chaosguard/flow-chart-chaosguard.png and /dev/null differ diff --git a/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/static/chaosguard/new-user-entry.png b/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/static/chaosguard/new-user-entry.png deleted file mode 100644 index 6c7c70ab090..00000000000 Binary files a/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/static/chaosguard/new-user-entry.png and /dev/null differ diff --git a/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/static/exec/add-des-2.png b/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/static/exec/add-des-2.png deleted file mode 100644 index aab932be638..00000000000 Binary files a/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/static/exec/add-des-2.png and /dev/null differ diff --git a/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/static/exec/aida-apply-3.png b/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/static/exec/aida-apply-3.png deleted file mode 100644 index 04c02164c9b..00000000000 Binary files a/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/static/exec/aida-apply-3.png and /dev/null differ diff --git a/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/static/exec/aida-gen-2.png b/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/static/exec/aida-gen-2.png deleted file mode 100644 index bd3a9eb893e..00000000000 Binary files a/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/static/exec/aida-gen-2.png and /dev/null differ diff --git a/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/static/exec/aida-sug-1.png b/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/static/exec/aida-sug-1.png deleted file mode 100644 index 2d7f7f165fc..00000000000 Binary files a/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/static/exec/aida-sug-1.png and /dev/null differ diff --git a/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/static/exec/chaosguard-rules.png b/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/static/exec/chaosguard-rules.png deleted file mode 100644 index 763641c1eed..00000000000 Binary files a/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/static/exec/chaosguard-rules.png and /dev/null differ diff --git a/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/static/exec/condition-create.png b/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/static/exec/condition-create.png deleted file mode 100644 index f9385f0e4a2..00000000000 Binary files a/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/static/exec/condition-create.png and /dev/null differ diff --git a/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/static/exec/condition-using.png b/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/static/exec/condition-using.png deleted file mode 100644 index f3a6a3c6cfe..00000000000 Binary files a/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/static/exec/condition-using.png and /dev/null differ diff --git a/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/static/exec/condition-what.png b/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/static/exec/condition-what.png deleted file mode 100644 index 0275e95f782..00000000000 Binary files a/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/static/exec/condition-what.png and /dev/null differ diff --git a/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/static/exec/condition-where.png b/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/static/exec/condition-where.png deleted file mode 100644 index f2b7e96d691..00000000000 Binary files a/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/static/exec/condition-where.png and /dev/null differ diff --git a/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/static/exec/condition-which.png b/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/static/exec/condition-which.png deleted file mode 100644 index 0f345dd6e19..00000000000 Binary files a/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/static/exec/condition-which.png and /dev/null differ diff --git a/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/static/exec/edit-condition.png b/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/static/exec/edit-condition.png deleted file mode 100644 index e4da1488167..00000000000 Binary files a/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/static/exec/edit-condition.png and /dev/null differ diff --git a/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/static/exec/navigate-1.png b/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/static/exec/navigate-1.png deleted file mode 100644 index 27659140ad8..00000000000 Binary files a/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/static/exec/navigate-1.png and /dev/null differ diff --git a/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/static/exec/new-condition.png b/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/static/exec/new-condition.png deleted file mode 100644 index 98281209444..00000000000 Binary files a/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/static/exec/new-condition.png and /dev/null differ diff --git a/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/static/exec/new-rule.png b/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/static/exec/new-rule.png deleted file mode 100644 index a089b2cd876..00000000000 Binary files a/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/static/exec/new-rule.png and /dev/null differ diff --git a/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/static/exec/rule-evaluation-fail.png b/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/static/exec/rule-evaluation-fail.png deleted file mode 100644 index a9c4dc2e0f7..00000000000 Binary files a/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/static/exec/rule-evaluation-fail.png and /dev/null differ diff --git a/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/static/exec/rule-evaluation-pass.png b/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/static/exec/rule-evaluation-pass.png deleted file mode 100644 index cca8011ac57..00000000000 Binary files a/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/static/exec/rule-evaluation-pass.png and /dev/null differ diff --git a/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/static/exec/save-constraint.png b/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/static/exec/save-constraint.png deleted file mode 100644 index 6e609710680..00000000000 Binary files a/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/static/exec/save-constraint.png and /dev/null differ diff --git a/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/static/exec/select-1.png b/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/static/exec/select-1.png deleted file mode 100644 index b3c98ee5fe4..00000000000 Binary files a/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/static/exec/select-1.png and /dev/null differ diff --git a/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/static/exec/select-cnd-4.png b/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/static/exec/select-cnd-4.png deleted file mode 100644 index e6f2ebf6275..00000000000 Binary files a/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/static/exec/select-cnd-4.png and /dev/null differ diff --git a/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/static/exec/usr-grp-3.png b/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/static/exec/usr-grp-3.png deleted file mode 100644 index 45c45900e3e..00000000000 Binary files a/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/static/exec/usr-grp-3.png and /dev/null differ diff --git a/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/static/exec/yaml-edit.png b/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/static/exec/yaml-edit.png deleted file mode 100644 index 1aad050f768..00000000000 Binary files a/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/static/exec/yaml-edit.png and /dev/null differ diff --git a/docs/chaos-engineering/use-harness-ce/governance/static/chaosguard/condition-using.png b/docs/chaos-engineering/use-harness-ce/governance/static/chaosguard/condition-using.png index b9e62e8d383..f3a6a3c6cfe 100644 Binary files a/docs/chaos-engineering/use-harness-ce/governance/static/chaosguard/condition-using.png and b/docs/chaos-engineering/use-harness-ce/governance/static/chaosguard/condition-using.png differ diff --git a/docs/chaos-engineering/use-harness-ce/governance/static/chaosguard/condition-what.png b/docs/chaos-engineering/use-harness-ce/governance/static/chaosguard/condition-what.png index f708ab62b40..0275e95f782 100644 Binary files a/docs/chaos-engineering/use-harness-ce/governance/static/chaosguard/condition-what.png and b/docs/chaos-engineering/use-harness-ce/governance/static/chaosguard/condition-what.png differ diff --git a/docs/chaos-engineering/use-harness-ce/governance/static/chaosguard/condition-where.png b/docs/chaos-engineering/use-harness-ce/governance/static/chaosguard/condition-where.png index 449350d4f69..f2b7e96d691 100644 Binary files a/docs/chaos-engineering/use-harness-ce/governance/static/chaosguard/condition-where.png and b/docs/chaos-engineering/use-harness-ce/governance/static/chaosguard/condition-where.png differ diff --git a/docs/chaos-engineering/use-harness-ce/governance/static/chaosguard/condition-which.png b/docs/chaos-engineering/use-harness-ce/governance/static/chaosguard/condition-which.png index d93aa7f41bd..0f345dd6e19 100644 Binary files a/docs/chaos-engineering/use-harness-ce/governance/static/chaosguard/condition-which.png and b/docs/chaos-engineering/use-harness-ce/governance/static/chaosguard/condition-which.png differ diff --git a/docs/chaos-engineering/use-harness-ce/governance/static/chaosguard/edit-condition.png b/docs/chaos-engineering/use-harness-ce/governance/static/chaosguard/edit-condition.png index 1afe307cee7..e4da1488167 100644 Binary files a/docs/chaos-engineering/use-harness-ce/governance/static/chaosguard/edit-condition.png and b/docs/chaos-engineering/use-harness-ce/governance/static/chaosguard/edit-condition.png differ diff --git a/docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/static/exec/infra-options.png b/docs/chaos-engineering/use-harness-ce/governance/static/chaosguard/infra-options.png similarity index 100% rename from docs/chaos-engineering/use-harness-ce/governance/governance-in-execution/static/exec/infra-options.png rename to docs/chaos-engineering/use-harness-ce/governance/static/chaosguard/infra-options.png diff --git a/docs/chaos-engineering/use-harness-ce/image-registry.md b/docs/chaos-engineering/use-harness-ce/image-registry.md index fd77ef3e93d..b9a63f07079 100644 --- a/docs/chaos-engineering/use-harness-ce/image-registry.md +++ b/docs/chaos-engineering/use-harness-ce/image-registry.md @@ -42,6 +42,15 @@ This feature is behind the feature flag `CHAOS_IMAGEREGISTRY_DEV`. Contact [Harn +### Permissions Required + +Ensure you have at least **View** permissions to the project to execute chaos experiments. + +To create or view an image registry, ask your admin to grant you the **Create/Edit** permissions from account/project/organization settings. + + ![](./static/image-registry/chaos-engineering-img-registry-perms.png) + + ### Configure Image Registry from Account/Organization/Project/Infrastructure settings With appropriate permissions, you can configure image registry from the **account** or **organization** or **project** or **infrastructure** settings. diff --git a/docs/chaos-engineering/use-harness-ce/probes/prom-probe/_category_.json b/docs/chaos-engineering/use-harness-ce/probes/prom-probe/_category_.json new file mode 100644 index 00000000000..fd630677da5 --- /dev/null +++ b/docs/chaos-engineering/use-harness-ce/probes/prom-probe/_category_.json @@ -0,0 +1,6 @@ +{ + "position": 60, + "label": "Prometheus Probe", + "collapsible": "true", + "collapsed": "true", +} diff --git a/docs/chaos-engineering/use-harness-ce/probes/prom-probe/configure-prom-probe.md b/docs/chaos-engineering/use-harness-ce/probes/prom-probe/configure-prom-probe.md new file mode 100644 index 00000000000..040c331b13b --- /dev/null +++ b/docs/chaos-engineering/use-harness-ce/probes/prom-probe/configure-prom-probe.md @@ -0,0 +1,89 @@ +--- +title: Configure Prometheus Probe +sidebar_position: 20 +description: Configure Prometheus probe +--- + +This topic describes the configuration and usage of Prometheus probe. + +## Before you begin, review the following: + +- [What is a Resilience Probe?](/docs/chaos-engineering/use-harness-ce/probes/#what-is-a-resilience-probe) +- [Prometheus Probe](/docs/chaos-engineering/use-harness-ce/probes/prom-probe/) + +### Configuration + +1. Go to **Chaos Engineering** module and select **Resilience Probes**. Select **New Probe**. + + ![navigate to module](./static/navigate-1.png) + +2. Select infrastructure type as **Kubernetes** and chaos probe as **Prometheus**. + + ![prometheus probe](./static/select-prom.png) + +3. Provide the name, and click **Configure Details**. + + ![configure details](./static/name-1.png) + +4. Based on your application's requirements, provide values for the following parameters. + +- **Prometheus Endpoint**: It is the target HTTP/HTTPS endpoint that the probe will send requests to. + + ![](./static/details-2.png) + +5. **Authorization** section has the following fields: + + - **Type**: Type of HTTP request to be performed. Supports `GET` and `POST`. + - **Credentials**: Authentication credentials (username and password) required to access the target URL/endpoint. This field is mutually exclusive with **Credentials file** field. + - **Credentials file**: Path of the file that contains authentication credentials to access the HTTP endpoint. This field is mutually exclusive with **Credentials** field. + + Go to [Authorization](/docs/chaos-engineering/use-harness-ce/probes/prom-probe/#authentication) for more information. + + **TLS Config** has the following fields: + + - **CA file**: Path of the file to validate the custom certificates for TLS of the target URL. + - **Cert file**: Path of the file to the client certificate required for mTLS. + - **Key file**: Path of the file to the client key required for mTLS. + - **Insecure Skip Verify**: If enabled, the probe bypasses the SSL/TLS certificate verification, allowing requests to proceed even if the certificate is invalid or self-signed. + + Go to [TLS](/docs/chaos-engineering/use-harness-ce/probes/prom-probe/#tls) for more information. + + ![](./static/auth-3.png) + ![](./static/auth-3-1.png) + + +6. Provide the **Prometheus Query** (**Query** or **Query Path** depending on your usage). + + - **Query**: The PromQL query used with the probe to fetch the desired Prometheus metrics. Ensure that the strings inside the query are enclosed within backslash ("/"). This field is mutually exclusive with **Query Path** field. + - **Query Path**: Path of the file where PromQL query is present. This field is mutually exclusive with **Query** field. + + Go to [Schema](/docs/chaos-engineering/use-harness-ce/probes/prom-probe/#schema) for more information. + + ![](./static/query-4.png) + +7. Specify the data comparison fields, and click **Configure Properties**. + + The data returned using the PromQL **Query** or **Query Path** is compared to the following fields: + + - **Type**: Type of data compared with result of Prometheus query. Accepts only `float` data type. + - **Comparison Criteria**: The criteria (`>=`, `<=`, `==`, `<`, `>`, `!=`, and so on) based on which the **value** and the result of Prometheus query are compared. + - **Value**: The value with which the result of Prometheus query is compared + + Go to [Comparator](/docs/chaos-engineering/use-harness-ce/probes/prom-probe/#comparator) for more information. + + ![](./static/comparison-5.png) + + +8. Specify general probe properties such as timeout, interval, and so on. Click **Create Probe**. + +- **Timeout**: Time limit for the probe to execute the check and return the expected output. +- **Interval**: Duration for which the probe waits between subsequent attempts. +- **Attempt**: Number of times a check is executed upon failure in the previous attempts before declaring the probe status as `FAILED`. +- **Polling Interval**: Time interval for which `continuous` and `onchaos` probe modes should wait after each iteration. +- **Intitial Delay**: Duration to wait before the probe begins execution. +- **Verbosity**: Level of detail to include in the logs generated during the execution of the probe. Choose between `info` (essential logs, probe status are printed) and `debug` (in-depth logs, timestamps, and execution logs are printed) mode. +- **Stop on Failure (Optional)**: Enable it to continue or disable it to stop the experiment execution after the probe fails. Disabled by default. + +Go to [Run Properties](/docs/chaos-engineering/use-harness-ce/probes/prom-probe/#run-properties) for more information. + + ![](./static/properties-6.png) diff --git a/docs/chaos-engineering/use-harness-ce/probes/prom-probe.md b/docs/chaos-engineering/use-harness-ce/probes/prom-probe/prom-probe.md similarity index 88% rename from docs/chaos-engineering/use-harness-ce/probes/prom-probe.md rename to docs/chaos-engineering/use-harness-ce/probes/prom-probe/prom-probe.md index 22bf2a1b3e6..beaaa391dea 100644 --- a/docs/chaos-engineering/use-harness-ce/probes/prom-probe.md +++ b/docs/chaos-engineering/use-harness-ce/probes/prom-probe/prom-probe.md @@ -9,41 +9,16 @@ redirect_from: - /docs/chaos-engineering/concepts/explore-concepts/resilience-probes/prom-probe --- -import CommonNote from './shared/common-note.md' +import CommonNote from '../shared/common-note.md' -The Prometheus probe allows users to run Prometheus queries and match the resulting output against specific conditions. The intent behind this probe is to allow users to define metrics-based SLOs in a declarative way and determine the experiment verdict based on their success. The probe runs the query on a Prometheus server defined by the endpoint and checks whether the output satisfies the specified criteria. The outcome of a PromQL query (that is provided) is used for probe validation. +Prometheus probe allows you to run Prometheus queries and match the resulting output against specific conditions. You can define metrics-based SLOs in a declarative way and determine the experiment verdict based on their success. The probe runs the query on a Prometheus server defined by the endpoint and checks whether the output satisfies the specified criteria. The outcome of a PromQL query (that is provided) is used for probe validation. :::info YAML only feature -In case of complex queries that span multiple lines, the `queryPath` attribute can be used to provide the link to a file consisting of the query. This file can be made available in the experiment pod via a ConfigMap resource, with the ConfigMap being passed in the [ChaosEngine](https://litmuschaos.github.io/litmus/experiments/concepts/chaos-resources/chaos-engine/contents/) or the [ChaosExperiment](https://litmuschaos.github.io/litmus/experiments/concepts/chaos-resources/chaos-experiment/contents/) CR. Also, `query` and `queryPath` attributes are mutually exclusive. Refer to the probe schema [here](https://docs.litmuschaos.io/docs/concepts/probes#promprobe). +In case of complex queries that span multiple lines, the `queryPath` attribute can be used to provide the link to a file consisting of the query. This file can be made available in the experiment pod via a ConfigMap resource, with the ConfigMap being passed in the [ChaosEngine](https://litmuschaos.github.io/litmus/experiments/concepts/chaos-resources/chaos-engine/contents/) or the [ChaosExperiment](https://litmuschaos.github.io/litmus/experiments/concepts/chaos-resources/chaos-experiment/contents/) CR. Refer to the probe schema [here](https://docs.litmuschaos.io/docs/concepts/probes#promprobe). ::: -## Probe definition - -You can define the probes at **.spec.experiments[].spec.probe** path inside the chaos engine. - -```yaml -kind: Workflow -apiVersion: argoproj.io/v1alpha1 -spec: - templates: - - inputs: - artifacts: - - raw: - data: | - apiVersion: litmuschaos.io/v1alpha1 - kind: ChaosEngine - spec: - experiments: - - spec: - probe: - #################################### - Probes are defined here - #################################### -``` - -:::tip -The Prometheus probe expects you to provide a PromQL query along with Prometheus service endpoints to check for specific criteria. -::: +### Input +Prometheus probe takes a PromQL query along with Prometheus service endpoints as input to check for specific criteria. ## Schema @@ -64,20 +39,6 @@ Listed below is the probe schema for the Prometheus probe, with properties share N/A type: string The name holds the name of the probe. It can be set based on the use case - - type - Flag to hold the type of the probe - Mandatory - httpProbe, k8sProbe, cmdProbe, promProbe, and datadogProbe - The type supports five types of probes: httpProbe, k8sProbe, cmdProbe, promProbe, and datadogProbe. - - - mode - Flag to hold the mode of the probe - Mandatory - SOT, EOT, Edge, Continuous, OnChaos - The mode supports five modes of probes: SOT, EOT, Edge, Continuous, and OnChaos. Datadog probe supports EOT mode only. - endpoint Flag to hold the prometheus endpoints for the promProbe @@ -155,11 +116,11 @@ The `credentials` and `credentialsFile` are two options that can't be used simul Flag to hold the authentication type Optional string - The type encompasses the authentication method, which includes support for both basic and bearer authentication types + The type encompasses the authentication method, which includes support for both `basic` and `bearer` authentication types. credentials - Flag to hold the basic auth credentials in `base64` format or `bearer` token + Flag to hold the basic auth credentials in `base64` format or `bearer`. token Optional string The credentials consists of the basic authentication credentials, either as username:password encoded in `base64` format or as a `bearer` token, depending on the authentication type diff --git a/docs/chaos-engineering/use-harness-ce/probes/prom-probe/static/auth-3-1.png b/docs/chaos-engineering/use-harness-ce/probes/prom-probe/static/auth-3-1.png new file mode 100644 index 00000000000..3f8231752ae Binary files /dev/null and b/docs/chaos-engineering/use-harness-ce/probes/prom-probe/static/auth-3-1.png differ diff --git a/docs/chaos-engineering/use-harness-ce/probes/prom-probe/static/auth-3.png b/docs/chaos-engineering/use-harness-ce/probes/prom-probe/static/auth-3.png new file mode 100644 index 00000000000..e8d19b70286 Binary files /dev/null and b/docs/chaos-engineering/use-harness-ce/probes/prom-probe/static/auth-3.png differ diff --git a/docs/chaos-engineering/use-harness-ce/probes/prom-probe/static/comparison-5.png b/docs/chaos-engineering/use-harness-ce/probes/prom-probe/static/comparison-5.png new file mode 100644 index 00000000000..990f605c1cb Binary files /dev/null and b/docs/chaos-engineering/use-harness-ce/probes/prom-probe/static/comparison-5.png differ diff --git a/docs/chaos-engineering/use-harness-ce/probes/prom-probe/static/details-2.png b/docs/chaos-engineering/use-harness-ce/probes/prom-probe/static/details-2.png new file mode 100644 index 00000000000..dda03639d06 Binary files /dev/null and b/docs/chaos-engineering/use-harness-ce/probes/prom-probe/static/details-2.png differ diff --git a/docs/chaos-engineering/use-harness-ce/probes/prom-probe/static/name-1.png b/docs/chaos-engineering/use-harness-ce/probes/prom-probe/static/name-1.png new file mode 100644 index 00000000000..6829784882e Binary files /dev/null and b/docs/chaos-engineering/use-harness-ce/probes/prom-probe/static/name-1.png differ diff --git a/docs/chaos-engineering/use-harness-ce/probes/prom-probe/static/navigate-1.png b/docs/chaos-engineering/use-harness-ce/probes/prom-probe/static/navigate-1.png new file mode 100644 index 00000000000..d60edbf3a46 Binary files /dev/null and b/docs/chaos-engineering/use-harness-ce/probes/prom-probe/static/navigate-1.png differ diff --git a/docs/chaos-engineering/use-harness-ce/probes/prom-probe/static/properties-6.png b/docs/chaos-engineering/use-harness-ce/probes/prom-probe/static/properties-6.png new file mode 100644 index 00000000000..11338c5e069 Binary files /dev/null and b/docs/chaos-engineering/use-harness-ce/probes/prom-probe/static/properties-6.png differ diff --git a/docs/chaos-engineering/use-harness-ce/probes/prom-probe/static/query-4.png b/docs/chaos-engineering/use-harness-ce/probes/prom-probe/static/query-4.png new file mode 100644 index 00000000000..39fb2ec4eaf Binary files /dev/null and b/docs/chaos-engineering/use-harness-ce/probes/prom-probe/static/query-4.png differ diff --git a/docs/chaos-engineering/use-harness-ce/probes/prom-probe/static/select-prom.png b/docs/chaos-engineering/use-harness-ce/probes/prom-probe/static/select-prom.png new file mode 100644 index 00000000000..554003bea77 Binary files /dev/null and b/docs/chaos-engineering/use-harness-ce/probes/prom-probe/static/select-prom.png differ diff --git a/docs/chaos-engineering/use-harness-ce/service-discovery.md b/docs/chaos-engineering/use-harness-ce/service-discovery.md new file mode 100644 index 00000000000..d4dc7f2643c --- /dev/null +++ b/docs/chaos-engineering/use-harness-ce/service-discovery.md @@ -0,0 +1,38 @@ +--- +id: service-discovery +sidebar_position: 1 +title: Service Discovery +--- + +## Before you begin, review the following: + +- [Service Discovery](/docs/platform/service-discovery/) + +### How does Harness CE leverage discovered services? + +Harness CE uses the discovered services to identify the various available services in the chaos module, that is, the chaos targets in the Kubernetes cluster that you can deploy using Harness or other means. + +As an Harness CE user, service discovery simplifies your decision-making around: + +- Which service to target to inject chaos? +- Which chaos faults to inject into a target service? +- What validations and health checks to perform while executing chaos faults? + +Consequently, you will be able to find the resilience of your service (with the help of resilience coverage reports, service-level resilience scores, and other such metrics). + +:::tip +You can leverage all the [permissions mentioned](/docs/chaos-engineering/security/security-templates/openshift-scc#run-service-account-as-a-cluster-admin) for fault execution as well as service discovery. +::: + +### Advantages + +- Reduces overhead of creating a database with services +- User-friendly +- Increased adoption of Harness CE + +When you are onboarding, one of the steps involves discovering services. Harness CE creates the discovery agent that automatically discovers services for your application. + +## Next Steps + +- [Customize Discovery Agent](/docs/platform/service-discovery/#customize-discovery-agent) +- [Delete Discovery Agent](/docs/platform/service-discovery/#delete-discovery-agent) diff --git a/docs/chaos-engineering/use-harness-ce/service-discovery/_category_.json b/docs/chaos-engineering/use-harness-ce/service-discovery/_category_.json deleted file mode 100644 index 614dd9a8e12..00000000000 --- a/docs/chaos-engineering/use-harness-ce/service-discovery/_category_.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "position": 80, - "label": "Service Discovery", - "collapsible": true, - "collapsed": true, - "customProps": { - "description": "Service Discovery" - } -} \ No newline at end of file diff --git a/docs/chaos-engineering/use-harness-ce/service-discovery/service-discovery.md b/docs/chaos-engineering/use-harness-ce/service-discovery/service-discovery.md deleted file mode 100644 index 1d75c1e3f01..00000000000 --- a/docs/chaos-engineering/use-harness-ce/service-discovery/service-discovery.md +++ /dev/null @@ -1,93 +0,0 @@ ---- -id: service-discovery -sidebar_position: 1 -title: Service Discovery -redirect_from: -- /docs/chaos-engineering/features/service-discovery/intro-service-discovery -- /docs/category/service-discovery -- /docs/chaos-engineering/concepts/explore-concepts/service-discovery/ ---- - -This topic introduces you to **service discovery** for **Kubernetes infrastructure**, its significance, and its purpose. - -### What is a discovered service? -Discovered service is an entity on the Harness platform (also known as the control plane) that corresponds to a Kubernetes service on your (user) cluster. It includes details about the connections made from and to it. - -Below is the control flow to a discovered service. - - ![](./static/control-flow-1.png) - -### Why is a discovered service required? - -Large enterprises deal with hundreds of deployed services. Monitoring these services or incorporating chaos engineering on these services would require building a database of these services and the relationship between these services, which is time-consuming. Service discovery comes into the picture: Instead of creating a database with the services, **automatically discover services** and the relationship between them. - -### How does HCE discover services? - -HCE performs the following steps to discover services in your cluster: -1. Scan your Kubernetes cluster periodically (you can define the interval or ad-hoc). -2. Build a database of services that describes the relationship between the services. -3. Provide APIs to group the discovered services into a map, wherein the map represents a topological view of an application. - -### How does HCE leverage discovered services? - -HCE tests the resilience of the application, and HCE uses the discovered services to: - -- Identify the various available services in the chaos module, that is, the chaos targets in the Kubernetes cluster that you can deploy using Harness or other means. -- Record the resources backing the above services (logical resources such as deployment, pods, containers, processes, FQDNs, ports, physical resources such as nodes, storage, and so on). -- Highlight the given service's position and the lineage within the topology view. - -As an HCE user, service discovery simplifies your decision-making around: - -- Which service to target? -- Which chaos faults to inject into a target service? -- What validations and health checks to perform while executing chaos faults? - -Consequently, you will be able to find the resilience of your service (with the help of resilience coverage reports, service-level resilience scores, and other such metrics). - -:::tip -You can leverage all the [permissions mentioned](/docs/chaos-engineering/security/security-templates/openshift-scc#run-service-account-as-a-cluster-admin) for fault execution as well as service discovery. -::: - -### Advantages - -- Reduces overhead of creating a database with services -- User-friendly -- Increased adoption of HCE - -When you are onboarding, one of the steps involves discovering services. HCE creates the discovery agent that automatically discovers services for your application. - -If you want to customize the discovery agent, follow the steps below. - -### Customize discovery agent - -1. To customize the discovery agent, navigate to **Chaos**, select **Project Settings** and select **Discovery**. - - ![](./static/discovery.png) - -2. Click **New Discovery Agent**. - - ![](./static/select-agent.png) - -3. Select an **environment**, **infrastructure**, **Discovery Agent Name** and **namespace**. The next step (optional) requires information such as node selector, blacklisted namespaces, and period of detecting the services. Select **Create New Discovery Agent**. - - ![](./static/add-details-discovery.png) - -### Edit discovery agent - -1. To edit a discovery agent, navigate to the agent and click **Edit**. Make the necessary changes to the required fields. - - ![](./static/edit-dis-agent-1.png) - -2. Select **Update Discovery Agent**. - - ![](./static/edit-details-discovery.png) - -### Delete discovery agent - -1. To delete a discovery agent, navigate to the agent you want to delete and select **Delete**. - - ![](./static/delete-1.png) - -2. Select **Delete**. - - ![](./static/confirm-2.png) \ No newline at end of file diff --git a/docs/chaos-engineering/use-harness-ce/service-discovery/static/add-details-discovery.png b/docs/chaos-engineering/use-harness-ce/service-discovery/static/add-details-discovery.png deleted file mode 100644 index 4455c5ab7df..00000000000 Binary files a/docs/chaos-engineering/use-harness-ce/service-discovery/static/add-details-discovery.png and /dev/null differ diff --git a/docs/chaos-engineering/use-harness-ce/service-discovery/static/confirm-2.png b/docs/chaos-engineering/use-harness-ce/service-discovery/static/confirm-2.png deleted file mode 100644 index cd8047ce182..00000000000 Binary files a/docs/chaos-engineering/use-harness-ce/service-discovery/static/confirm-2.png and /dev/null differ diff --git a/docs/chaos-engineering/use-harness-ce/service-discovery/static/control-flow-1.png b/docs/chaos-engineering/use-harness-ce/service-discovery/static/control-flow-1.png deleted file mode 100644 index ce4e72e43d9..00000000000 Binary files a/docs/chaos-engineering/use-harness-ce/service-discovery/static/control-flow-1.png and /dev/null differ diff --git a/docs/chaos-engineering/use-harness-ce/service-discovery/static/delete-1.png b/docs/chaos-engineering/use-harness-ce/service-discovery/static/delete-1.png deleted file mode 100644 index 637d87b8be7..00000000000 Binary files a/docs/chaos-engineering/use-harness-ce/service-discovery/static/delete-1.png and /dev/null differ diff --git a/docs/chaos-engineering/use-harness-ce/service-discovery/static/edit-details-discovery.png b/docs/chaos-engineering/use-harness-ce/service-discovery/static/edit-details-discovery.png deleted file mode 100644 index 8b18f4bda89..00000000000 Binary files a/docs/chaos-engineering/use-harness-ce/service-discovery/static/edit-details-discovery.png and /dev/null differ diff --git a/docs/chaos-engineering/use-harness-ce/service-discovery/static/edit-dis-agent-1.png b/docs/chaos-engineering/use-harness-ce/service-discovery/static/edit-dis-agent-1.png deleted file mode 100644 index 2bdfaa62893..00000000000 Binary files a/docs/chaos-engineering/use-harness-ce/service-discovery/static/edit-dis-agent-1.png and /dev/null differ diff --git a/docs/chaos-engineering/use-harness-ce/service-discovery/static/proxy/sd-1.png b/docs/chaos-engineering/use-harness-ce/service-discovery/static/proxy/sd-1.png deleted file mode 100644 index 9b7bd0afa65..00000000000 Binary files a/docs/chaos-engineering/use-harness-ce/service-discovery/static/proxy/sd-1.png and /dev/null differ diff --git a/docs/chaos-engineering/use-harness-ce/service-discovery/static/proxy/sd-2.png b/docs/chaos-engineering/use-harness-ce/service-discovery/static/proxy/sd-2.png deleted file mode 100644 index 800cb9db5e5..00000000000 Binary files a/docs/chaos-engineering/use-harness-ce/service-discovery/static/proxy/sd-2.png and /dev/null differ diff --git a/docs/chaos-engineering/use-harness-ce/service-discovery/static/proxy/sd-4.png b/docs/chaos-engineering/use-harness-ce/service-discovery/static/proxy/sd-4.png deleted file mode 100644 index 96edddd9f51..00000000000 Binary files a/docs/chaos-engineering/use-harness-ce/service-discovery/static/proxy/sd-4.png and /dev/null differ diff --git a/docs/chaos-engineering/use-harness-ce/service-discovery/static/proxy/sd-5.png b/docs/chaos-engineering/use-harness-ce/service-discovery/static/proxy/sd-5.png deleted file mode 100644 index 7b3f8a62771..00000000000 Binary files a/docs/chaos-engineering/use-harness-ce/service-discovery/static/proxy/sd-5.png and /dev/null differ diff --git a/docs/chaos-engineering/use-harness-ce/service-discovery/user-defined-service-account.md b/docs/chaos-engineering/use-harness-ce/service-discovery/user-defined-service-account.md deleted file mode 100644 index d2ed4b1aff6..00000000000 --- a/docs/chaos-engineering/use-harness-ce/service-discovery/user-defined-service-account.md +++ /dev/null @@ -1,479 +0,0 @@ ---- -title: Restrict Discovery to Specific Namespace(s) -sidebar_position: 3 -description: Restrict Discovery to Single and Multiple Namespaces. -redirect_from: - - /docs/chaos-engineering/concepts/explore-concepts/service-discovery/user-defined-service-account ---- - -This topic describes how you can use user-defined service accounts in different scopes to discover services. You can create the necessary roles in your cluster, and provide the service account name in the UI. - -### Cluster Scope -In this scope, the service account is created by default and the discovery runs in cluster scope by default. - -:::tip -This is the default mode of operation and if you want to create service account, refer to the YAML below and provide the service account name in the UI. -::: - -
- Cluster Scope - -```yaml -apiVersion: v1 -kind: ServiceAccount -metadata: - name: cluster-discovery -``` - -```yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: da-mgmt - namespace: hce-sa -rules: -- apiGroups: - - "" - resources: - - pods - verbs: - - create - - delete - - get - - list -- apiGroups: - - "" - resources: - - pods/log - verbs: - - get - - list - - watch -- apiGroups: - - apps - resources: - - deployments - verbs: - - create - - delete - - get - - list - - patch - - update -``` - -The YAML below describes how the role `da-discovery` is created in cluster scope and how RoleBinding is used with the role `da-mgmt`. - -```yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: da-discovery -rules: -- apiGroups: - - apps - resources: - - deployments - - replicasets - - daemonsets - - statefulsets - verbs: - - watch - - list - - get -- apiGroups: - - "" - resources: - - pods - - replicationcontrollers - - services - - statefulsets - - nodes - - namespaces - verbs: - - watch - - list - - get -- apiGroups: - - batch - resources: - - jobs - - cronjobs - verbs: - - watch - - list - - get -``` - -The YAML describes how RoleBinding is used with the role `da-mgmt`. - -```yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: da-mgmt - namespace: hce-sa -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: da-mgmt -subjects: -- kind: ServiceAccount - name: cluster-discovery - namespace: hce-sa -``` - -The YAML describes how ClusterRoleBinding is used with the cluster role `da-discovery`. - -```yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: da-discovery -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: da-discovery -subjects: -- kind: ServiceAccount - name: cluster-discovery - namespace: hce-sa -``` - -:::tip -The `da-mgmt` role is common to all ways creating service account for discovered services because managing the discovery is required for all scopes. -::: - -
- -### Single Namespace Scope -When you want to discover resources from a particular namespace, you can create a service account with the role, `da-mgmt`. This role is bound to the service account. - -```yaml -apiVersion: v1 -kind: ServiceAccount -metadata: - name: namespace-discovery -``` - -The role `da-mgmt` is required during the process of service discovery to manage the discovery process. - -To manage the entire process of discovery, it is required to create pods that are transient. Hence, the role `da-mgmt` is created (that is common to all modes) that is described below. - -```yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: da-mgmt - namespace: hce-sa -rules: -- apiGroups: - - "" - resources: - - pods - verbs: - - create - - delete - - get - - list -- apiGroups: - - "" - resources: - - pods/log - verbs: - - get - - list - - watch -- apiGroups: - - apps - resources: - - deployments - verbs: - - create - - delete - - get - - list - - patch - - update -``` - -To discover services and metadata associated with it, you need to create a role `da-discovery`. -The YAML below describes creating a role `da-discovery` in the namespace `hce-sa`. - -```yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: da-discovery - namespace: hce-sa -rules: -- apiGroups: - - apps - resources: - - deployments - - replicasets - - daemonsets - - statefulsets - verbs: - - watch - - list - - get -- apiGroups: - - "" - resources: - - pods - - replicationcontrollers - - services - - statefulsets - verbs: - - watch - - list - - get -- apiGroups: - - batch - resources: - - jobs - - cronjobs - verbs: - - watch - - list - - get -``` - -The YAML below describes how the `da-mgmt` RoleBinding is applied to service account `namespace-discovery`. - -```yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: da-mgmt - namespace: hce-sa -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: da-mgmt -subjects: -- kind: ServiceAccount - name: namespace-discovery - namespace: hce-sa -``` - -The YAML below describes how the `da-discovery` role is bound to service account `namespace-discovery`. - -```yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: da-discovery - namespace: hce-sa -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: da-discovery -subjects: -- kind: ServiceAccount - name: namespace-discovery - namespace: hce-sa -``` - -### Multiple Namespaces - -You can add [multiple namespaces in the UI](#multiple-namespace-scope) by selecting the **Inclusion** option in the UI. To exclude certain namespaces, select **Exclusion** and specify namespaces to exclude from the service discovery process. - -The `da-mgmt` role remains constant, to help manage service discovery process. - -```yaml -apiVersion: v1 -kind: ServiceAccount -metadata: - name: multiple-namespace-discovery ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: da-mgmt - namespace: hce-sa -rules: -- apiGroups: - - "" - resources: - - pods - verbs: - - create - - delete - - get - - list -- apiGroups: - - "" - resources: - - pods/log - verbs: - - get - - list - - watch -- apiGroups: - - apps - resources: - - deployments - verbs: - - create - - delete - - get - - list - - patch - - update ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: da-mgmt - namespace: hce-sa -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: da-mgmt -subjects: -- kind: ServiceAccount - name: multiple-namespace-discovery - namespace: hce-sa -``` - -The YAML below describes how the cluster role `da-discovery` is configured to discover services and metadata associated with it. - -```yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: da-discovery -rules: -- apiGroups: - - apps - resources: - - deployments - - replicasets - - daemonsets - - statefulsets - verbs: - - watch - - list - - get -- apiGroups: - - "" - resources: - - pods - - replicationcontrollers - - services - - statefulsets - verbs: - - watch - - list - - get -- apiGroups: - - batch - resources: - - jobs - - cronjobs - verbs: - - watch - - list - - get -``` - -If you want to have multiple namespaces when discovering services, you can create RoleBindings to bind the cluster role with the specific namespace. -To enable discovery for two namespaces, say `hce` and `cert-manager`, you need two role bindings. -The YAML below describes how you can achieve this. - -```yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: da-discovery - namespace: hce -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: da-discovery -subjects: -- kind: ServiceAccount - name: multiple-namespace-discovery - namespace: hce-sa ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: da-discovery - namespace: cert-manager -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: da-discovery -subjects: -- kind: ServiceAccount - name: multiple-namespace-discovery - namespace: hce-sa -``` - -### Additional Permissions for Multiple Namespaces - -To discover traffic on multiple namespaces, additional permissions are necessary. -Without the additional permissions, connectivity can't be discovered in single and multiple namespaces. - -The YAML below describes how you can attach additional permissions with the existing service account. - -```yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: da-discovery-extra -rules: -- apiGroups: - - "" - resources: - - nodes - verbs: - - watch - - list - - get ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: da-discovery-extra -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: da-discovery-extra -subjects: -- kind: ServiceAccount - name: multiple-namespace-discovery - namespace: hce-sa -``` - -Once you create the necessary roles in your cluster, add the service account name in the UI. Follow the steps below. - -1. Go to **Chaos** module and select **Projects**. Select **Discovery** and click **New Discovery Agent**. - - ![](./static/proxy/sd-1.png) - -2. Provide the **Environment**, **Infrastructure**, **Discovery Agent Name**, and **Namespace**. - - ![](./static/proxy/sd-2.png) - -:::info note -In case of cluster scope, you can provide the service account name in the UI to discover services. -::: - -### Single Namespace Scope -To use single namespace, select **Inclusion** and provide the namespace. Disable the **Detect network trace connectivity**. - - ![](./static/proxy/sd-4.png) - -### Multiple Namespace Scope - -To use multiple namespaces, provide multiple namespaces and click **Create New Discovery Agent**. - - ![](./static/proxy/sd-5.png) - -:::tip -- If you are not using additional permissions, disable the **Detect network trace connectivity** (it is enabled by default that corresponds to single namespace scope). -::: \ No newline at end of file diff --git a/docs/chaos-engineering/use-harness-ce/static/image-registry/chaos-engineering-img-registry-perms.png b/docs/chaos-engineering/use-harness-ce/static/image-registry/chaos-engineering-img-registry-perms.png new file mode 100644 index 00000000000..e17fa5c5cb7 Binary files /dev/null and b/docs/chaos-engineering/use-harness-ce/static/image-registry/chaos-engineering-img-registry-perms.png differ diff --git a/docs/cloud-cost-management/4-use-ccm-cost-optimization/autostopping-guides/rbac-autostopping.md b/docs/cloud-cost-management/4-use-ccm-cost-optimization/autostopping-guides/rbac-autostopping.md new file mode 100644 index 00000000000..7b579fd1cb4 --- /dev/null +++ b/docs/cloud-cost-management/4-use-ccm-cost-optimization/autostopping-guides/rbac-autostopping.md @@ -0,0 +1,45 @@ +--- +title: Granular RBAC for AutoStopping +description: This document providers details for granular RBAC for autostopping. +sidebar_position: 50 +--- + +With RBAC, user can control view, create+edit, and delete access for AWS, Azure, and GCP autostopping resources based on cloud connectors (cloud accounts). Granular permission is an additional fine grained permission level on top of global RBAC levels. Global RBAC currently provides following roles: +- `CCM Viewer` : Grants permission to view CCM entities, including AutoStopping rules and loadbalancers as whole. +- `CCM Admin`: Grants permission to create/edit/delete CCM entities, including all AutoStopping rules and loadbalancers. + +While global RBAC assigns broad roles such as Admin, Viewer, or Editor, granular permissions enable precise control over which resources and actions a user can access. This reducing security risks by granting only the necessary permissions. It also improves multi-team management by restricting access based on specific resource groups, such as connectors, without exposing unnecessary data. + +Connectors are shared resources in the Harness account, meaning multiple users can use them. Since they are shared, they appear in the Shared Resources section when creating a Resource Group (RG) in the ACL (Access Control List) module. Admins can create Resource Groups (RGs) for connectors by selecting the relevant connectors. + +Once an RG is created, it can be assigned to a user along with a role. This ensures that the user can only perform operations allowed by the role and only on the resources specified in the RG. + +Example: +If a user is assigned the `CCM Viewer` role on an RG called `rg_dev_connectors`, which includes a connector named `dev_connector` (linked to a DEV cloud account), then: + +- The user will have only viewer access to AutoStopping Rules (ASRs) and Load balancers created using `dev_connector`. +- They will not be able to modify or manage other ASRs or connectors. + + +![](./static/granular-rbac-one.png) +![](./static/granular-rbac-four.png) + +To control which cloud accounts a user can perform the above actions on, you need to create a **Resource Group** under **Account Settings > Resource Groups** that defines the appropriate access. + +1. Under **"Shared Resources"**, select **"Connectors"**, then choose **"Specified"**. +2. Select all the **CCM AWS Account Connectors** for the cloud accounts you want to grant access to. +3. Create as many **Resource Groups** as needed, depending on the number of distinct access patterns required. + +![](./static/granular-rbac-two.png) + +Once you have a **role** and a **resource group**, you can assign access to a **user, group, or service account**. + +To do this, use your custom **"Autostopping" role** and select the **resource group** that defines the appropriate access for the user, group, or account. + +![](./static/granular-rbac-three.png) + + +To ensure users can properly view AutoStopping Rules, you must grant them `**Connector:View**` permission. This allows them to load all necessary information related to AutoStopping Rules. + +![](./static/ui-error.png) +![](./static/ui-error-two.png) \ No newline at end of file diff --git a/docs/cloud-cost-management/4-use-ccm-cost-optimization/autostopping-guides/static/granular-rbac-four.png b/docs/cloud-cost-management/4-use-ccm-cost-optimization/autostopping-guides/static/granular-rbac-four.png new file mode 100644 index 00000000000..d95a0d341b6 Binary files /dev/null and b/docs/cloud-cost-management/4-use-ccm-cost-optimization/autostopping-guides/static/granular-rbac-four.png differ diff --git a/docs/cloud-cost-management/4-use-ccm-cost-optimization/autostopping-guides/static/granular-rbac-one.png b/docs/cloud-cost-management/4-use-ccm-cost-optimization/autostopping-guides/static/granular-rbac-one.png new file mode 100644 index 00000000000..7446cf01ff8 Binary files /dev/null and b/docs/cloud-cost-management/4-use-ccm-cost-optimization/autostopping-guides/static/granular-rbac-one.png differ diff --git a/docs/cloud-cost-management/4-use-ccm-cost-optimization/autostopping-guides/static/granular-rbac-three.png b/docs/cloud-cost-management/4-use-ccm-cost-optimization/autostopping-guides/static/granular-rbac-three.png new file mode 100644 index 00000000000..d68aa9b0cc7 Binary files /dev/null and b/docs/cloud-cost-management/4-use-ccm-cost-optimization/autostopping-guides/static/granular-rbac-three.png differ diff --git a/docs/cloud-cost-management/4-use-ccm-cost-optimization/autostopping-guides/static/granular-rbac-two.png b/docs/cloud-cost-management/4-use-ccm-cost-optimization/autostopping-guides/static/granular-rbac-two.png new file mode 100644 index 00000000000..77e1fb25523 Binary files /dev/null and b/docs/cloud-cost-management/4-use-ccm-cost-optimization/autostopping-guides/static/granular-rbac-two.png differ diff --git a/docs/cloud-cost-management/4-use-ccm-cost-optimization/autostopping-guides/static/ui-error-two.png b/docs/cloud-cost-management/4-use-ccm-cost-optimization/autostopping-guides/static/ui-error-two.png new file mode 100644 index 00000000000..2395ca8bfd6 Binary files /dev/null and b/docs/cloud-cost-management/4-use-ccm-cost-optimization/autostopping-guides/static/ui-error-two.png differ diff --git a/docs/cloud-cost-management/4-use-ccm-cost-optimization/autostopping-guides/static/ui-error.png b/docs/cloud-cost-management/4-use-ccm-cost-optimization/autostopping-guides/static/ui-error.png new file mode 100644 index 00000000000..f93e4321dd1 Binary files /dev/null and b/docs/cloud-cost-management/4-use-ccm-cost-optimization/autostopping-guides/static/ui-error.png differ diff --git a/docs/cloud-development-environments/ides/intellij.md b/docs/cloud-development-environments/ides/intellij.md index 982902290eb..96697e64043 100644 --- a/docs/cloud-development-environments/ides/intellij.md +++ b/docs/cloud-development-environments/ides/intellij.md @@ -1,6 +1,6 @@ --- title: IntelliJ IDEA -sidebar_position: 3 +sidebar_position: 4 description: Connect to your Gitspaces within IntelliJ IDEA. sidebar_label: IntelliJ IDEA --- diff --git a/docs/cloud-development-environments/ides/jetbrains-gateway.md b/docs/cloud-development-environments/ides/jetbrains-gateway.md new file mode 100644 index 00000000000..309e09fe0ae --- /dev/null +++ b/docs/cloud-development-environments/ides/jetbrains-gateway.md @@ -0,0 +1,101 @@ +--- +title: JetBrains Gateway Plugin +sidebar_position: 3 +description: Connect and manage your Gitspaces with JetBrains IDEs using the JetBrains Gateway Plugin. +sidebar_label: JetBrains Gateway Plugin +--- + +Harness CDE supports seamless and efficient remote development in **JetBrains IDEs** using **JetBrains Gateway**. + +[**JetBrains Gateway**](https://www.jetbrains.com/remote-development/gateway/) is a lightweight desktop application that allows you to work remotely with **JetBrains IDEs** without downloading the full IDE. It connects to a remote server, fetches the necessary backend components, and opens your project in a **JetBrains client**. + +With the [**Harness Gitspaces Plugin**](https://plugins.jetbrains.com/plugin/26594-harness-gitspaces), you can seamlessly access and manage your **Gitspaces** created in JetBrains IDEs. This plugin ensures smooth navigation and efficient development within your IDE. + +The following JetBrains IDEs are supported for remote development: +- IntelliJ IDEA +- PyCharm +- PhpStorm +- GoLand +- CLion +- Rider +- RubyMine +- Webstorm + +![](./static/manage-plugin-2.png) + +## Pre-Requisites + +#### Install Harness Gitspaces JetBrains Plugin Package +Ensure that you have downloaded the latest version of the [**Harness Gitspaces JetBrains Gateway Plugin**](https://plugins.jetbrains.com/plugin/26594-harness-gitspaces) package. Follow [these steps](/docs/cloud-development-environments/ides/jetbrains-gateway#installing-the-plugin) to install and configure the plugin. + +#### Install JetBrains Gateway +Before proceeding, ensure that [JetBrains Gateway](https://www.jetbrains.com/remote-development/gateway/) is installed on your device. + +#### Recommended Gitspace Configuration +Refer to [this section](/docs/cloud-development-environments/ides/intellij#recommended-gitspace-configuration) to understand the **recommended Gitspace requirements** for optimal performance when connecting to your Gitspace in JetBrains IDEs. + +## Installing the Plugin + +Follow these steps to install the **Harness Gitspace Plugin**: + +1. Once you've installed JetBrains Gateway, click the **settings icon** in the bottom-left corner of the application. +Image + +2. Select **"Manage Providers."** +Image + +3. From the **Plugins Marketplace**, search for **Harness Gitspaces**. Click **Install**. +Image + +4. Once you've installed the plugin, click **"OK"** in the bottom-corner page. +Image + +5. That’s it! You will now see the plugin successfully installed in your **JetBrains Gateway connections.** +Image + + + +## Configuring the Plugin +Now that you've successfully installed the plugin, you can configure it in **JetBrains Gateway** using the following steps: + +1. Click on **"Harness Gitspaces"** from the sidebar connections. +2. You will be prompted to configure the app URL in JetBrains Gateway. Enter: **"https://app.harness.io"** and click **"Connect"** +Image + +3. You will be redirected to the **Harness platform** to sign in. Enter your credentials to log into your account. +Image + +4. That’s it! Once configured, you can view all your **Gitspaces** created in JetBrains IDEs directly within the **JetBrains Gateway** application. +![](./static/configure-plugin-3.png) + +## Managing Your Gitspaces + +You can access and manage your **Gitspaces** (only those created in JetBrains IDEs) directly within the **JetBrains Gateway** application. + +### Access Your Gitspaces + +You can connect to your **Gitspaces** directly from the **Gateway** application: + +1. **For an actively running Gitspace**, click on **"Connect."** This will connect you to your remote Gitspace within your selected IDE. +2. **For a stopped Gitspace**, clicking on **"Connect"** will redirect you to the **Harness Gitspaces UI**, where you can check its details. + +![](./static/connect-plugin.png) + +:::info +**Note:** While a Gitspace is transitioning between **started and stopped states**, its status in the application will be displayed as **"Busy."** This indicates that the Gitspace is undergoing the transition. +::: + +### Start Your Gitspaces + +You can start your **stopped Gitspaces** directly from **JetBrains Gateway**: +- Click the **Green Start** icon to start your Gitspace. To open your Gitspace in your preferred IDE, refer to the IDE-specific documentation. For example, here’s how you can connect to your [Gitspace in IntelliJ IDEA](/docs/cloud-development-environments/ides/intellij#open-the-gitspace-in-intellij). +![](./static/start-gitspace-plugin.png) +- This icon will be visible **only if your Gitspace is stopped**. + +### Stop Your Gitspaces + +You can stop your **active Gitspaces** directly from **JetBrains Gateway**: +- Click the **Red Stop** icon to stop it from running. +![](./static/stop-gitspace-plugin.png) +- This icon will be visible **only if your Gitspace is currently active**. + diff --git a/docs/cloud-development-environments/ides/static/configure-plugin-3.png b/docs/cloud-development-environments/ides/static/configure-plugin-3.png new file mode 100644 index 00000000000..0a13ffa9f63 Binary files /dev/null and b/docs/cloud-development-environments/ides/static/configure-plugin-3.png differ diff --git a/docs/cloud-development-environments/ides/static/connect-plugin.png b/docs/cloud-development-environments/ides/static/connect-plugin.png new file mode 100644 index 00000000000..db1f44b491e Binary files /dev/null and b/docs/cloud-development-environments/ides/static/connect-plugin.png differ diff --git a/docs/cloud-development-environments/ides/static/install-plugin-1.png b/docs/cloud-development-environments/ides/static/install-plugin-1.png new file mode 100644 index 00000000000..b06bf51c860 Binary files /dev/null and b/docs/cloud-development-environments/ides/static/install-plugin-1.png differ diff --git a/docs/cloud-development-environments/ides/static/install-plugin-2.png b/docs/cloud-development-environments/ides/static/install-plugin-2.png new file mode 100644 index 00000000000..19af10d21ae Binary files /dev/null and b/docs/cloud-development-environments/ides/static/install-plugin-2.png differ diff --git a/docs/cloud-development-environments/ides/static/install-plugin-3.png b/docs/cloud-development-environments/ides/static/install-plugin-3.png new file mode 100644 index 00000000000..9fc28e284a9 Binary files /dev/null and b/docs/cloud-development-environments/ides/static/install-plugin-3.png differ diff --git a/docs/cloud-development-environments/ides/static/install-plugin-4.png b/docs/cloud-development-environments/ides/static/install-plugin-4.png new file mode 100644 index 00000000000..c41a0d4c806 Binary files /dev/null and b/docs/cloud-development-environments/ides/static/install-plugin-4.png differ diff --git a/docs/cloud-development-environments/ides/static/install-plugin-5.png b/docs/cloud-development-environments/ides/static/install-plugin-5.png new file mode 100644 index 00000000000..040f1ea5e02 Binary files /dev/null and b/docs/cloud-development-environments/ides/static/install-plugin-5.png differ diff --git a/docs/cloud-development-environments/ides/static/manage-plugin-2.png b/docs/cloud-development-environments/ides/static/manage-plugin-2.png new file mode 100644 index 00000000000..57dd74ed971 Binary files /dev/null and b/docs/cloud-development-environments/ides/static/manage-plugin-2.png differ diff --git a/docs/cloud-development-environments/ides/static/start-gitspace-plugin.png b/docs/cloud-development-environments/ides/static/start-gitspace-plugin.png new file mode 100644 index 00000000000..ec82bec4923 Binary files /dev/null and b/docs/cloud-development-environments/ides/static/start-gitspace-plugin.png differ diff --git a/docs/cloud-development-environments/ides/static/stop-gitspace-plugin.png b/docs/cloud-development-environments/ides/static/stop-gitspace-plugin.png new file mode 100644 index 00000000000..3aad3fa8289 Binary files /dev/null and b/docs/cloud-development-environments/ides/static/stop-gitspace-plugin.png differ diff --git a/docs/continuous-delivery/cd-infrastructure/terragrunt-howtos.md b/docs/continuous-delivery/cd-infrastructure/terragrunt-howtos.md index 96853deb364..c23b06e12e0 100644 --- a/docs/continuous-delivery/cd-infrastructure/terragrunt-howtos.md +++ b/docs/continuous-delivery/cd-infrastructure/terragrunt-howtos.md @@ -88,6 +88,13 @@ terragrunt --version ### Supported Terragrunt and Terraform versions +In Harness, Terragrunt is fully supported up to version 0.66.9 + +Starting from 0.67.0, Terragrunt introduced a breaking change that may cause some issues because of new logging format. See [Terragrunt 0.67.0 breaking change](https://github.com/gruntwork-io/terragrunt/releases/tag/v0.67.0) + +If you are using Terragrunt 0.67.0 or higher, you need to enable the environment variable TERRAGRUNT_FORWARD_TF_STDOUT to maintain the previous logging behavior. +This ensures that Terragrunt preserves its standard logging format, preventing disruptions in Harness pipelines. + Terragrunt maintains a Terraform version compatibility table to help ensure that you have the correct versions of Terragrunt and Terraform running together. For the Terraform versions supported by Terragrunt, go to [Terraform Version Compatibility Table](https://terragrunt.gruntwork.io/docs/getting-started/supported-versions/). diff --git a/docs/continuous-delivery/deploy-srv-diff-platforms/aws/aws-lambda-deployments.md b/docs/continuous-delivery/deploy-srv-diff-platforms/aws/aws-lambda-deployments.md index 9b14383d91f..888b5e08f0f 100644 --- a/docs/continuous-delivery/deploy-srv-diff-platforms/aws/aws-lambda-deployments.md +++ b/docs/continuous-delivery/deploy-srv-diff-platforms/aws/aws-lambda-deployments.md @@ -21,7 +21,6 @@ This topic covers the basics of Harness' Lambda support and provides examples on - Currently, Lambda functions can be packaged as ZIP files in S3 Buckets or as containers in AWS ECR. - If Harness were to support another repository, like Nexus, when the container is fetched by the API, AWS spins up AWS resources (S3, ECR) anyways, and so Harness has limited support to S3 and ECR. - The containers must exist in ECR. Containers are not supported in other repositories. -- Currently, Lambda functions can't be deployed with an OIDC-enabled AWS Connector. ## AWS IAM permissions @@ -147,6 +146,8 @@ AWS connectors are used in your Harness service for the artifact you select in * You can use the same connector or different connectors, but ensure that the credentials provided for the artifact connector are sufficient to fetch the ZIP or image and the credentials provided for the infrastructure definition connector are sufficient to deploy to Lambda. +OIDC connectors are also supported for Lambda deployments, and this functionality is available starting with **delegate version 851xx or later**. + ## Harness service configuration When you create a Harness service, select **AWS Lambda** to define a service that represents the AWS Lambda function you want to deploy. diff --git a/docs/continuous-delivery/deploy-srv-diff-platforms/serverless/serverless-lambda-cd-quickstart.md b/docs/continuous-delivery/deploy-srv-diff-platforms/serverless/serverless-lambda-cd-quickstart.md index 5f392161a7e..3abb40c422f 100644 --- a/docs/continuous-delivery/deploy-srv-diff-platforms/serverless/serverless-lambda-cd-quickstart.md +++ b/docs/continuous-delivery/deploy-srv-diff-platforms/serverless/serverless-lambda-cd-quickstart.md @@ -76,7 +76,7 @@ Review [Harness Key Concepts](/docs/platform/get-started/key-concepts) to esta - View and copy the API Key and Secret to a temporary place. You'll need them when setting up the Harness AWS Connector later in this quickstart. - **Full Admin Access:** click on **Attach existing policies directly**. Search for and select **AdministratorAccess** then click **Next: Review**. Check to make sure everything looks good and click **Create user**. - **Limited Access:** click on **Create policy**. Select the **JSON** tab, and add the JSON using the following code from the [Serverless gist](https://gist.github.com/ServerlessBot/7618156b8671840a539f405dea2704c8) IAMCredentials.json: -- **OIDC-enabled AWS Connector**: Serverless functions can be deployed using an OIDC-enabled AWS Connector. +- **OIDC-enabled AWS Connector**: Serverless functions can be deployed using an OIDC-enabled AWS Connector, and this functionality is available starting with **delegate version 851xx or later**.
IAMCredentials.json @@ -207,12 +207,6 @@ The `s3:GetBucketLocation` action is required for a custom S3 bucket only. import IrsaPartial from '/docs/shared/aws-connector-auth-options.md'; -:::note - -Currently, Serverless functions can't be deployed with an OIDC-enabled AWS connector. - -::: - ## Serverless framework support diff --git a/docs/continuous-delivery/deploy-srv-diff-platforms/traditional/ssh-ng.md b/docs/continuous-delivery/deploy-srv-diff-platforms/traditional/ssh-ng.md index ccc359ede5f..cce2fff0cf4 100644 --- a/docs/continuous-delivery/deploy-srv-diff-platforms/traditional/ssh-ng.md +++ b/docs/continuous-delivery/deploy-srv-diff-platforms/traditional/ssh-ng.md @@ -521,6 +521,124 @@ Command finished with status SUCCESS ``` Congratulations! You have now successfully created and completed the steps for running a pipeline by using Secure Shell. +## Selective Rerun and Skipping Hosts with Same Artifact + +You can now skip the **hosts where the last deployment was successful using the same artifact** for traditional deployments. These improvements ensure: +- **Efficient reruns**: Redeploy only on failed hosts instead of all hosts. +- **Expressions for failed hosts**: Retrieve failed hosts dynamically for debugging, fixing and rerunning on only failed hosts. + +To use this feature, navigate to the **Advanced** tab of the **CD stage**, enable the **Skip instances with the same artifact version already deployed** checkbox. + +You can enable this checkbox using the run-time by making this checkbox a **Runtime Input**. + +
+ +
+ +:::note +Currently, the Selective Failed Hosts Rerun feature is behind the feature flag `CDS_SKIP_INSTANCES_V2`. Contact [Harness Support](mailto:support@harness.io) to enable the feature. + +**Change in Behavior with Feature Flag Activation** +Enabling the `CDS_SKIP_INSTANCES_V2` feature flag **enhances the skip instances feature** for improved reliability across deployment scenarios. The updated behavior includes: + +- **Org/Account-Level Service & Environment Handling**: Ensures consistent application of skip instance logic across different organizational scopes. +- **Partial Success Handling**: Tracks and skips only successfully deployed hosts, preventing unnecessary re-deployments. +::: + +**Success Criteria for Deployment on a Host** + +- **Successfully Deployed Criteria**: A host is considered successfully deployed only if **all command steps in an execution complete successfully**. +- **Deployed Criteria**: A host is considered deployed if **any command step execution occurs on the host**. + +**Key Features** + +**1. Selective Retry for Failed Hosts** +- Deployment retries now **target only failed hosts** instead of redeploying on all hosts when the **Skip instances with the same artifact version already deployed** checkbox is enabled. + +**2. Enhanced Skip Instances Feature** +- Deployment is **skipped on hosts** where the **last deployment was successful using the same artifact**. +- Each host’s deployment success is tracked **individually**, ensuring that only failed hosts are retried. +- **Infrastructure changes** (e.g., connector updates, credential changes) are considered when determining the last deployment on a host. + +**3. Improved Rollback Behavior** +- The **Skip Instances** feature now **tracks rollbacks per host**, ensuring that only the required hosts are updated. +- This guarantees that rollback logic correctly applies **only to affected hosts**, preventing unnecessary redeployments. + +**4. New Expressions Introduced** +These expressions provide better tracking of deployment and skipped instances: +- `<+stage.output.skippedHosts>`: Fetches hosts skipped during the current deployment via the Skip Instances feature. +- `<+stageFqn.deployedHosts.succeeded>`: Fetches hosts that successfully deployed in a stage. +- `<+stageFqn.deployedHosts.failed>`: Fetches hosts that failed deployment in a stage. + +:::note +Currently, the `<+stageFqn.deployedHosts.succeeded>` and `<+stageFqn.deployedHosts.failed>` expressions are **resolved only after stage completion**. + +- The **full stage FQN** (Fully Qualified Name) must be used, e.g., `<+pipeline.stages.ssh.deployedHosts.succeeded>`. +- These expressions will include only the hosts that meet the **Deployed** criteria. + +::: + +
+Example Workflow: Deployment with Partial Success + +This example demonstrates how the **Skip Instances** feature allows rerunning a pipeline without redeploying successfully deployed hosts. By enabling this feature, only failed hosts are re-run, optimizing deployment efficiency and reducing unnecessary re-deployments. + +**Step 1: Deploy on Two Hosts Using Artifact Version 1** +Deploy **artifact version 1** on **host1** and **host2** using a command step. + +**Outcome:** +- **host1** successfully deployed **version 1**. +- **Deployment on host2 failed**. + +--- + +**Step 2: Fix the Issue on Host2 and Rerun the Pipeline with Skip Instances Enabled** +After resolving the issue on **host2**, rerun the pipeline with the **Skip Instances** feature enabled. + +**Outcome:** +- **Deployment on host1 is skipped** since it was previously successful. +- **host2 successfully deploys version 1**. + +
+ +
+Use-Cases for Selective Rerun and Skipping Hosts + +The improved retry and rollback mechanisms ensure that only necessary actions are taken, avoiding unnecessary redeployments and rollbacks. Below are some key scenarios and how they are handled: + +1. **Pipeline Termination After Successful Deployment** +- If the pipeline terminates due to **expire/abort/failure** cases, but the host was successfully deployed via a command step before termination, the deployment on that host is still considered successful. +- This ensures that unexpected pipeline failures do not unnecessarily mark successful hosts as failed. + +2. **Parallel Deployments on the Same Hosts** +- When the same hosts are deployed in parallel using different stages, the stage with the most recent command step execution is considered the last deployment for the skip instances feature. + +3. **Executions Without Command Steps** +- If a pipeline execution does not contain command steps, it is ignored in tracking. +- Such deployments are not considered for the skip instances feature. + +4. **Partial Success Without Rollback** +- If a deployment succeeds on some hosts but fails on others, **only failed hosts are deployed on rerun**. +- Successfully deployed **hosts are skipped**. + +5. **Execution Failure Followed by a Partial Rollback** +- If a rollback is **partially successful**, only successfully rolled-back hosts are **marked as completed**. +- The system ensures these hosts are correctly updated for future deployments. + +6. **Handling Command Step Retries** +- If a command step **fails initially** but **succeeds after retry**, the host is **marked as successfully deployed**. +- Ensures hosts are not mistakenly retried in future deployments. + +7. **Command Steps within Step Groups** +- If a command step inside a step group fails but **succeeds on retry**, the host is considered **successfully deployed**. +- This prevents unnecessary redeployments on already successful hosts. + +8. **Pipeline Rollback Considerations** +- If a pipeline rollback is triggered, only hosts which were rollbacked successfully are marked as completed. +- The system ensures these hosts are correctly updated for future deployments + +
+ ## Permission to perform SSH Deployments in AWS We use the SSH Credentials to connect to hosts to perform deployment. diff --git a/docs/continuous-delivery/deploy-srv-diff-platforms/traditional/static/skip-instance.png b/docs/continuous-delivery/deploy-srv-diff-platforms/traditional/static/skip-instance.png new file mode 100644 index 00000000000..e360053e00a Binary files /dev/null and b/docs/continuous-delivery/deploy-srv-diff-platforms/traditional/static/skip-instance.png differ diff --git a/docs/continuous-delivery/deploy-srv-diff-platforms/traditional/win-rm-tutorial.md b/docs/continuous-delivery/deploy-srv-diff-platforms/traditional/win-rm-tutorial.md index 1cb497981db..a848c1a655b 100644 --- a/docs/continuous-delivery/deploy-srv-diff-platforms/traditional/win-rm-tutorial.md +++ b/docs/continuous-delivery/deploy-srv-diff-platforms/traditional/win-rm-tutorial.md @@ -397,6 +397,14 @@ After selecting the Execution Strategy, we are now ready to run the pipeline. You have now successfully created and completed the steps for running a pipeline by using WinRM. +## Selective Rerun and Skipping Hosts with Same Artifact + +You can do a **selective rerun** for traditional deployments. These improvements ensure: +- **Efficient reruns**: Redeploy only on failed hosts instead of all hosts. +- **Expressions for failed hosts**: Retrieve failed hosts dynamically for debugging and retry logic. + +For more information, goto [Selective Rerun and Skipping Hosts with Same Artifact](/docs/continuous-delivery/deploy-srv-diff-platforms/traditional/ssh-ng/#selective-rerun-and-skipping-hosts-with-same-artifact) + ## Permission to perform WinRM Deployments in AWS We use the WinRM Credentials to connect to hosts to perform deployment. diff --git a/docs/continuous-integration/use-ci/build-and-upload-artifacts/build-and-push/build-and-push-to-acr.md b/docs/continuous-integration/use-ci/build-and-upload-artifacts/build-and-push/build-and-push-to-acr.md index c34e04ab194..a0f8a0ab30b 100644 --- a/docs/continuous-integration/use-ci/build-and-upload-artifacts/build-and-push/build-and-push-to-acr.md +++ b/docs/continuous-integration/use-ci/build-and-upload-artifacts/build-and-push/build-and-push-to-acr.md @@ -202,3 +202,4 @@ Go to the [CI Knowledge Base](/kb/continuous-integration/continuous-integration- - [How do I fix this kaniko container runtime error: kaniko should only be run inside of a container?](/kb/continuous-integration/articles/kaniko_container_runtime_error) - [Can I push and pull from two different docker registries that have same prefix for registry URL ?](/kb/continuous-integration/continuous-integration-faqs/#can-i-push-and-pull-from-two-different-docker-registries-that-have-same-prefix-for-registry-url-) - [Why does the parallel execution of build and push steps fail when using Buildx on Kubernetes?](/kb/continuous-integration/continuous-integration-faqs#why-does-the-parallel-execution-of-build-and-push-steps-fail-when-using-buildx-on-kubernetes) +- [Why do Build and Push steps fail with "Error while loading buildkit image: exit status 1" when /var/lib/docker is included in shared paths during DIND execution?](/kb/continuous-integration/continuous-integration-faqs#why-do-build-and-push-steps-fail-with-error-while-loading-buildkit-image-exit-status-1-when-varlibdocker-is-included-in-shared-paths-during-dind-execution) diff --git a/docs/continuous-integration/use-ci/build-and-upload-artifacts/build-and-push/build-and-push-to-docker-jfrog.md b/docs/continuous-integration/use-ci/build-and-upload-artifacts/build-and-push/build-and-push-to-docker-jfrog.md index 9f34ba8dc16..9986e07a626 100644 --- a/docs/continuous-integration/use-ci/build-and-upload-artifacts/build-and-push/build-and-push-to-docker-jfrog.md +++ b/docs/continuous-integration/use-ci/build-and-upload-artifacts/build-and-push/build-and-push-to-docker-jfrog.md @@ -258,3 +258,4 @@ Go to the [CI Knowledge Base](/kb/continuous-integration/continuous-integration- - [How do I fix this kaniko container runtime error: kaniko should only be run inside of a container?](/kb/continuous-integration/articles/kaniko_container_runtime_error) - [Can I push and pull from two different docker registries that have same prefix for registry URL ?](/kb/continuous-integration/continuous-integration-faqs/#can-i-push-and-pull-from-two-different-docker-registries-that-have-same-prefix-for-registry-url-) - [Why does the parallel execution of build and push steps fail when using Buildx on Kubernetes?](/kb/continuous-integration/continuous-integration-faqs#why-does-the-parallel-execution-of-build-and-push-steps-fail-when-using-buildx-on-kubernetes) +- [Why do Build and Push steps fail with "Error while loading buildkit image: exit status 1" when /var/lib/docker is included in shared paths during DIND execution?](/kb/continuous-integration/continuous-integration-faqs#why-do-build-and-push-steps-fail-with-error-while-loading-buildkit-image-exit-status-1-when-varlibdocker-is-included-in-shared-paths-during-dind-execution) \ No newline at end of file diff --git a/docs/continuous-integration/use-ci/build-and-upload-artifacts/build-and-push/build-and-push-to-docker-registry.md b/docs/continuous-integration/use-ci/build-and-upload-artifacts/build-and-push/build-and-push-to-docker-registry.md index 57bafe7a523..c994f6b1608 100644 --- a/docs/continuous-integration/use-ci/build-and-upload-artifacts/build-and-push/build-and-push-to-docker-registry.md +++ b/docs/continuous-integration/use-ci/build-and-upload-artifacts/build-and-push/build-and-push-to-docker-registry.md @@ -218,3 +218,4 @@ Go to the [CI Knowledge Base](/kb/continuous-integration/continuous-integration- - [How do I fix this kaniko container runtime error: kaniko should only be run inside of a container?](/kb/continuous-integration/articles/kaniko_container_runtime_error) - [Can I push and pull from two different docker registries that have same prefix for registry URL ?](/kb/continuous-integration/continuous-integration-faqs/#can-i-push-and-pull-from-two-different-docker-registries-that-have-same-prefix-for-registry-url-) - [Why does the parallel execution of build and push steps fail when using Buildx on Kubernetes?](/kb/continuous-integration/continuous-integration-faqs#why-does-the-parallel-execution-of-build-and-push-steps-fail-when-using-buildx-on-kubernetes) +- [Why do Build and Push steps fail with "Error while loading buildkit image: exit status 1" when /var/lib/docker is included in shared paths during DIND execution?](/kb/continuous-integration/continuous-integration-faqs#why-do-build-and-push-steps-fail-with-error-while-loading-buildkit-image-exit-status-1-when-varlibdocker-is-included-in-shared-paths-during-dind-execution) \ No newline at end of file diff --git a/docs/continuous-integration/use-ci/build-and-upload-artifacts/build-and-push/build-and-push-to-ecr-step-settings.md b/docs/continuous-integration/use-ci/build-and-upload-artifacts/build-and-push/build-and-push-to-ecr-step-settings.md index db9cf909cad..1f08112e99a 100644 --- a/docs/continuous-integration/use-ci/build-and-upload-artifacts/build-and-push/build-and-push-to-ecr-step-settings.md +++ b/docs/continuous-integration/use-ci/build-and-upload-artifacts/build-and-push/build-and-push-to-ecr-step-settings.md @@ -320,3 +320,4 @@ Go to the [CI Knowledge Base](/kb/continuous-integration/continuous-integration- - [How do I fix this kaniko container runtime error: kaniko should only be run inside of a container?](/kb/continuous-integration/articles/kaniko_container_runtime_error) - [Can I push and pull from two different docker registries that have same prefix for registry URL?](/kb/continuous-integration/continuous-integration-faqs/#can-i-push-and-pull-from-two-different-docker-registries-that-have-same-prefix-for-registry-url-) - [Why does the parallel execution of build and push steps fail when using Buildx on Kubernetes?](/kb/continuous-integration/continuous-integration-faqs#why-does-the-parallel-execution-of-build-and-push-steps-fail-when-using-buildx-on-kubernetes) +- [Why do Build and Push steps fail with "Error while loading buildkit image: exit status 1" when /var/lib/docker is included in shared paths during DIND execution?](/kb/continuous-integration/continuous-integration-faqs#why-do-build-and-push-steps-fail-with-error-while-loading-buildkit-image-exit-status-1-when-varlibdocker-is-included-in-shared-paths-during-dind-execution) \ No newline at end of file diff --git a/docs/continuous-integration/use-ci/build-and-upload-artifacts/build-and-push/build-and-push-to-gar.md b/docs/continuous-integration/use-ci/build-and-upload-artifacts/build-and-push/build-and-push-to-gar.md index 83ff1fd78e8..56a67d94007 100644 --- a/docs/continuous-integration/use-ci/build-and-upload-artifacts/build-and-push/build-and-push-to-gar.md +++ b/docs/continuous-integration/use-ci/build-and-upload-artifacts/build-and-push/build-and-push-to-gar.md @@ -206,3 +206,4 @@ Go to the [CI Knowledge Base](/kb/continuous-integration/continuous-integration- - [How do I fix this kaniko container runtime error: kaniko should only be run inside of a container?](/kb/continuous-integration/articles/kaniko_container_runtime_error) - [Can I push and pull from two different docker registries that have same prefix for registry URL ?](/kb/continuous-integration/continuous-integration-faqs/#can-i-push-and-pull-from-two-different-docker-registries-that-have-same-prefix-for-registry-url-) - [Why does the parallel execution of build and push steps fail when using Buildx on Kubernetes?](/kb/continuous-integration/continuous-integration-faqs#why-does-the-parallel-execution-of-build-and-push-steps-fail-when-using-buildx-on-kubernetes) +- [Why do Build and Push steps fail with "Error while loading buildkit image: exit status 1" when /var/lib/docker is included in shared paths during DIND execution?](/kb/continuous-integration/continuous-integration-faqs#why-do-build-and-push-steps-fail-with-error-while-loading-buildkit-image-exit-status-1-when-varlibdocker-is-included-in-shared-paths-during-dind-execution) \ No newline at end of file diff --git a/docs/continuous-integration/use-ci/build-and-upload-artifacts/build-and-push/build-and-push-to-gcr.md b/docs/continuous-integration/use-ci/build-and-upload-artifacts/build-and-push/build-and-push-to-gcr.md index b5c8f99ab3a..398c573f8d5 100644 --- a/docs/continuous-integration/use-ci/build-and-upload-artifacts/build-and-push/build-and-push-to-gcr.md +++ b/docs/continuous-integration/use-ci/build-and-upload-artifacts/build-and-push/build-and-push-to-gcr.md @@ -206,3 +206,4 @@ Go to the [CI Knowledge Base](/kb/continuous-integration/continuous-integration- - [How do I fix this kaniko container runtime error: kaniko should only be run inside of a container?](/kb/continuous-integration/articles/kaniko_container_runtime_error) - [Can I push and pull from two different docker registries that have same prefix for registry URL ?](/kb/continuous-integration/continuous-integration-faqs/#can-i-push-and-pull-from-two-different-docker-registries-that-have-same-prefix-for-registry-url-) - [Why does the parallel execution of build and push steps fail when using Buildx on Kubernetes?](/kb/continuous-integration/continuous-integration-faqs#why-does-the-parallel-execution-of-build-and-push-steps-fail-when-using-buildx-on-kubernetes) +- [Why do Build and Push steps fail with "Error while loading buildkit image: exit status 1" when /var/lib/docker is included in shared paths during DIND execution?](/kb/continuous-integration/continuous-integration-faqs#why-do-build-and-push-steps-fail-with-error-while-loading-buildkit-image-exit-status-1-when-varlibdocker-is-included-in-shared-paths-during-dind-execution) \ No newline at end of file diff --git a/docs/continuous-integration/use-ci/build-and-upload-artifacts/build-and-push/build-and-push-to-ghcr.md b/docs/continuous-integration/use-ci/build-and-upload-artifacts/build-and-push/build-and-push-to-ghcr.md index 98200fb198c..d1f4a694997 100644 --- a/docs/continuous-integration/use-ci/build-and-upload-artifacts/build-and-push/build-and-push-to-ghcr.md +++ b/docs/continuous-integration/use-ci/build-and-upload-artifacts/build-and-push/build-and-push-to-ghcr.md @@ -200,3 +200,4 @@ Go to the [CI Knowledge Base](/kb/continuous-integration/continuous-integration- - [How do I fix this kaniko container runtime error: kaniko should only be run inside of a container?](/kb/continuous-integration/articles/kaniko_container_runtime_error) - [Can I push and pull from two different docker registries that have same prefix for registry URL ?](/kb/continuous-integration/continuous-integration-faqs/#can-i-push-and-pull-from-two-different-docker-registries-that-have-same-prefix-for-registry-url-) - [Why does the parallel execution of build and push steps fail when using Buildx on Kubernetes?](/kb/continuous-integration/continuous-integration-faqs#why-does-the-parallel-execution-of-build-and-push-steps-fail-when-using-buildx-on-kubernetes) +- [Why do Build and Push steps fail with "Error while loading buildkit image: exit status 1" when /var/lib/docker is included in shared paths during DIND execution?](/kb/continuous-integration/continuous-integration-faqs#why-do-build-and-push-steps-fail-with-error-while-loading-buildkit-image-exit-status-1-when-varlibdocker-is-included-in-shared-paths-during-dind-execution) \ No newline at end of file diff --git a/docs/continuous-integration/use-ci/set-up-build-infrastructure/harness-ci.md b/docs/continuous-integration/use-ci/set-up-build-infrastructure/harness-ci.md index 9f92bbc17ce..bcc5e6c5751 100644 --- a/docs/continuous-integration/use-ci/set-up-build-infrastructure/harness-ci.md +++ b/docs/continuous-integration/use-ci/set-up-build-infrastructure/harness-ci.md @@ -155,6 +155,17 @@ API key authentication is required. For more information about API keys, go to [ Harness images are available on Docker Hub, the [Harness project on GAR](http://us-docker.pkg.dev/gar-prod-setup/harness-public), and the [Harness ECR public gallery](https://gallery.ecr.aws/harness). In a continuation of this effort, and to improve stability when pulling Harness-required images, Harness deprecated the Harness-hosted `app.harness` Docker registry effective 15 February 2024. For more information, go to [Connect to the Harness container image registry](/docs/platform/connectors/artifact-repositories/connect-to-harness-container-image-registry-using-docker-connector.md#deprecation-notice-appharness-docker-registry). +## Windows Rootless +:::info +Currently, the feature to download rootless **lite-engine**, **ci-addon**, and **drone-git** images for Windows by default is behind the feature flag, `CI_ADDON_LE_WINDOWS_ROOTLESS`. [Contact Harness Support](https://support.harness.io/) to enable this feature. + +::: + +Customers who are trying to utilize Windows Images with a rootless operation can do so by downloading the appropriate images. The rootless Windows version is available as of the following version, or higher: +* `harness/ci-addon:rootless-1.16.71` +* `harness/ci-lite-engine:rootless-1.16.71` +* `harness/drone-git:1.6.7-rootless` + ## Troubleshoot Harness images Go to the [CI Knowledge Base](/kb/continuous-integration/continuous-integration-faqs) for questions and issues related to Harness-required images and pipeline initialization, such as: diff --git a/docs/database-devops/use-database-devops/rollback-for-database-schemas.md b/docs/database-devops/use-database-devops/rollback-for-database-schemas.md index 1ce05ce9ee8..57fc2101ad7 100644 --- a/docs/database-devops/use-database-devops/rollback-for-database-schemas.md +++ b/docs/database-devops/use-database-devops/rollback-for-database-schemas.md @@ -9,7 +9,7 @@ This topic describes how Harness Database DevOps implements automated rollback t ## What are tags? -A tag is a marker or label assigned to a specific point in a database's migration history. Harness recommends creating a change every time you deploy a changeset to a database so that you always have a rollback point for future changes. +A tag is a marker or label assigned to a specific point in a database's migration history. Harness recommends creating a tag every time you deploy a changeset to a database so that you always have a rollback point for future changes. ### Rollback A Database Schema @@ -33,6 +33,25 @@ Here is how you can rollback a database within Harness Database DevOps: You can refer to the Harness documentation detailing how to [Add a Liquibase command step](/docs/database-devops/use-database-devops/add-liquibase-command-step.md) ::: +## Rolling Back to a Previous Database State + +The **Apply Schema** step in our deployment pipeline applies database changeSets and provides an expression pointing to the tag marking the database state before deployment. + +How It Works +- If a Liquibase tag exists on the last changeSet, it is captured and exposed in the rollback expression. +- If no tag exists, the Apply Schema step creates one before applying new changes. +- Use this exposed tag as expression to rollback to the previous state. + +Expression format: +1. If Apply Schema step run as part of different stage: `<+pipeline.stages.{stageIdentifier}.spec.execution.steps.{stepGroupIdentifier}.steps.{stepIdentifier}.output.preStartTag>` +2. If Apply Schema step run as part of same stage: `<+execution.steps.{stepGroupIdentifier}.steps.{stepIdentifier}.output.preStartTag>` + +Example: For the following pipeline configuration, the expressions would be +- `<+pipeline.stages.s2.spec.execution.steps.stepGroup1.steps.DBSchemaApply_1.output.preStartTag>` +- `<+execution.steps.stepGroup1.steps.DBSchemaApply_1.output.preStartTag>` + +![stage-configuration](./static/db-devops-stage-config.png) + ## Built in failure strategies including rollback When managing database schema changes, it’s crucial to have mechanisms in place to handle failures gracefully. Built-in failure strategies, including rollback, are designed to protect your application and data by providing automated responses when something goes wrong during a database update. diff --git a/docs/database-devops/use-database-devops/static/db-devops-stage-config.png b/docs/database-devops/use-database-devops/static/db-devops-stage-config.png new file mode 100644 index 00000000000..fdcafdd5e6c Binary files /dev/null and b/docs/database-devops/use-database-devops/static/db-devops-stage-config.png differ diff --git a/docs/faqs/chaos-engineering-faqs.md b/docs/faqs/chaos-engineering-faqs.md new file mode 100644 index 00000000000..061c1ae219b --- /dev/null +++ b/docs/faqs/chaos-engineering-faqs.md @@ -0,0 +1,373 @@ +--- +title: Chaos Engineering (CE) FAQs +description: This article addresses some frequently asked questions about Harness Chaos Engineering. +sidebar_position: 1 +redirect_from: + - /kb/chaos-engineering/chaos-engineering-faq +--- + +## General + +For an overview of Harness support for platforms, methodologies, and related technologies, go to [Supported platforms and technologies](/docs/platform/platform-whats-supported). + +#### How do I get the Instance ID or license ID with Harness SMP? + +Login using Admin privilege, and then navigate to **License** to find the **License ID**. + +#### Can I run both serial and parallel faults wile using Linux infrastructure? + +Currently, parallel fault experiments aren't supported for Linux. + +#### How to add and manage a custom chaos hub? + +You can navigate to chaos hubs in the chaos module and select `+ New ChaosHub`, and fill in the details regarding your public or private hub. Ensure that you have chaos hub read or write permission enabled and a GitHub connector configured for the repository you are about to connect. + +#### How do I connect to a private chaos hub? + +To connect to a private chaos hub repository, connect to a Harness GitHub connector through a Harness Delegate, or GitHub directly by providing your GitHub SSH key or Personal Access Token (PAT). Once this is done, you can select the connector when adding a chaos hub. + +#### How are faults different from experiments? + +Faults refer to the failures that are injected into the target resource as part of an experiment. Whereas a chaos experiment is a set of different faults coupled together to achieve a desired chaos impact. + +#### What are the possible reasons I can't see tunables in Tune Fault UI? + +Since the tuning of a chaos experiment is highly declarative, sometimes it may cause parsing issues, these may be the possible reasons: + +- The step name of the fault and the template name might have been changed due to custom editing. +- The step name has been removed completely. +- The template definition has been erased. + +#### How are probes useful in an experiment? + +A probe can help understand the underlying patterns and laws that govern the behavior of your systems, and you can use that understanding to predict or control their behavior. Probes can be used to test scenarios such as network partitioning, pod failures, and node failures, by adding additional checks, it can also be used to test the behavior of applications during such scenarios. + +#### How is resilience score affected if a few of my probes fail? + +The weighted average of probe success percentage of each of the probe determines the value of the overall resilience score of the experiment. The value depends on the successful outcome of the probe criteria based on the type and mode selected. There are two possible values of probe success percentage for each of the probe criteria, either 0(if the criteria assertion fails) or 100(if the criteria assertion passes). + +```vim +Total Resilience for one single experiment = (Weight Given to that experiment * Probe Success Percentage) +``` + +![Resilience Score](./static/chaos-engineering-faq-resilience-score.png) + +#### I have trouble creating an experiment YAML from scratch, can I generate one? + +Yes, you can generate a YAML file by choosing the normal flow of creating an experiment (blank canvas or through a template), in the YAML/Visual toggle you can see a generated YAML based on the inputs provided by you. A generated YAML can also be downloaded after navigating to `Chaos Experiments` and clicking on `Download Experiments`. + +Additionally you can also leverage Harness [Go SDK repository](https://github.com/harness/harness-go-sdk) and generate a template. + +#### My issue is not mentioned here, how can I report it? + +To report an issue which is not mentioned here, head over to **Help** in Harness SaaS and click **Submit a ticket** and provide your feedback. + +## Features and capabilities + +#### How do you inject chaos on managed cloud services? +You can obtain the required permissions to inject chaos on respective cloud-services like [AWS permissions](https://developer.harness.io/docs/chaos-engineering/use-harness-ce/chaos-faults/aws/permissions),[Cloud foundry](https://developer.harness.io/docs/chaos-engineering/use-harness-ce/chaos-faults/cloud-foundry/permissions), and [GCP](https://developer.harness.io/docs/chaos-engineering/use-harness-ce/chaos-faults/gcp/security-configurations/prepare-secret-for-gcp). + +#### Do you support chaos on [X] [AWS/GCP/Azure] service? +Yes. For more information, go to [AWS chaos](https://developer.harness.io/docs/chaos-engineering/use-harness-ce/chaos-faults/aws/), [GCP chaos](https://developer.harness.io/docs/chaos-engineering/use-harness-ce/chaos-faults/gcp/), and [Azure chaos](https://developer.harness.io/docs/chaos-engineering/use-harness-ce/chaos-faults/azure/). + +#### Can you simulate a Zone, Region Failover in [AWS/GCP/Azure]? +Yes. For more information, go to [ALB zone down](https://developer.harness.io/docs/chaos-engineering/use-harness-ce/chaos-faults/aws/alb-az-down), [CLB zone down](https://developer.harness.io/docs/chaos-engineering/use-harness-ce/chaos-faults/aws/clb-az-down), and so on. + +#### Do you support staggered or staged increase of CPU/Memory resources within a Pod/Machine? +Yes. For more information, go to [pod memory hog](https://developer.harness.io/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/pod-memory-hog) and [pod CPU hog](/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/pod/pod-cpu-hog) . + +#### Do you support load-based chaos/can you generate load during chaos execution? +Yes. For more information, go to [locust loadgen](https://developer.harness.io/docs/chaos-engineering/use-harness-ce/chaos-faults/load/locust-loadgen). + +#### Do you support chaos on DataCenter infrastructure resources such as Switches, Loadbalancers, Hardware Encryption Devices etc.,? +Yes, you can use [SSH chaos](/docs/chaos-engineering/use-harness-ce/chaos-faults/ssh/ssh-chaos) to inject chaos on switches, load balancers, and so on. + +#### Does the tool provide recommendations to fix weaknesses identified by Chaos Experimentation? +No, HCE helps identify the failures in your application by injecting failures intentionally. This way, you can identify the failures and use other methods to address the issues identified using HCE. + +#### What kind of Reporting is provided by the tool? +Once you execute your chaos experiments, you can download the reports that describe the experiment details such as runs, infrastructure, resilience score, and so on. For more information, go to [resilience probes](/docs/chaos-engineering/use-harness-ce/probes/), [alerts](/docs/chaos-engineering/use-harness-ce/experiments/alert-integration) and [chaos dashboard](/docs/chaos-engineering/use-harness-ce/dashboards/). + + +#### Can I deploy chaos infrastructure on an Openshift Cluster? +Yes, you can deploy chaos infrastructure on OpenShift clusters and run chaos experiments using the SCCs that we provide. + +#### Can we disrupt connection/network between any service in a cluster and an external service only & keep other connections intact without actually disrupting external service? +Yes, you can use set the `DESTINATION_IPS` or `DESTINATION_HOSTS` tunabls in all network level chaos experiments. +For example, to execute the network loss fault, +- Between the target app and cloud SQL, you can specify `DESTINATION_HOSTS` as **sqladmin.googleapis.com**. +- Between the target app and storage/GCS, you can specify `DESTINATION_HOSTS` as **storage.googleapis.com**. +- Between the target app and composer, you can specify `DESTINATION_HOSTS` as **composer.googleapis.com**. + + +## Harness Delegate + +:::tip +The FAQs below are based on HCE entities using [Harness Delegate](/docs/chaos-engineering/use-harness-ce/infrastructures/#what-is-ddcr). +::: + +#### Do you support On-Premise Harness Control Plane? +Yes, HCE supports the self-managed enterprise edition (SME, also known as self-managed platform or SMP). Depending on the version of HCE (SaaS or Self-Managed Platform), the control plane is hosted by Harness (for SaaS) or within your domain (for example, harness.your-domain.io). Go to [SMP](/docs/chaos-engineering/getting-started/smp/) for more information. + +#### Does chaos use the Harness Delegate or does it need a separate agent? +HCE uses the Harness Delegate to execute chaos experiments, which you can leverage to: +- Auto-create chaos experiments. +- Improve execution speed of chaos experiments (up to 5 times). +- Better control over chaos experiments by facilitating advanced tunables. +- Gain insights into application-level resilience scores. + +Go to [Harness Delegate](/docs/chaos-engineering/use-harness-ce/infrastructures/#what-is-ddcr) for more information. + +#### What ports are necessary to be opened in the org's firewall rules to access the Harness Control Plane from the user environment? +You can access the Harness control plane from the user environment with outbound connectivity over HTTPS using port 443. +Go to [permissions and ports](https://developer.harness.io/docs/platform/references/permissions-and-ports-for-harness-connections) and [FAQs](https://developer.harness.io/kb/chaos-engineering/chaos-engineering-faq ) for more details. + +#### What permissions are necessary to run the Chaos Agent on users' Kubernetes clusters / Linux / Windows Machines? + +Permissions required to execute chaos faults are different for different platforms. Go to: +- [Kubernetes permissions](https://developer.harness.io/docs/chaos-engineering/use-harness-ce/chaos-faults/kubernetes/tkgi/Requirements); +- [AWS permissions](https://developer.harness.io/docs/chaos-engineering/use-harness-ce/chaos-faults/aws/permissions); and +- [Linux permissions](https://developer.harness.io/docs/chaos-engineering/use-harness-ce/chaos-faults/linux/permissions), respectively. + +#### What user data is sent to and stored on the Harness platform, and for how long? + +HCE doesn't store any user data with respect to the chaos experiments. The details associated with the user's cluster such as the target application details (kind, labels and namespace) are limited to be used within the chaos experiment you selected/created/executed. + +#### Can the chaos pods be mapped to/assume a specific IAM role on the cloud account for fault injection? +Yes, with cloud-based faults, chaos pods can be mapped to specific IAM roles. You have to create the IAM role on the cloud account and map it to the appropriate policy. The user's credentials should be embedded in a K8s secret before executing the faults. +You can also create [a superset AWS policy](https://developer.harness.io/docs/chaos-engineering/use-harness-ce/chaos-faults/aws/security-configurations/policy-for-all-aws-faults) to allow executing all fault types. +For more information, go to [AWS switch profile](https://developer.harness.io/docs/chaos-engineering/use-harness-ce/chaos-faults/aws/security-configurations/aws-switch-profile) and [workload identity setup for GCP](/docs/chaos-engineering/use-harness-ce/chaos-faults/gcp/gcp-iam-integration). + +## Image Registry + +If you lack the necessary permissions (**View** or **Create/Edit**) for the image registry in the project, account, or organization settings, you will encounter the following error: + +**The user is not authorized to perform view operation on the chaos image registry.** + +Ask your admin to grant you the necessary permissions to access the image registry. + +![](./static/chaos-engineering-img-registry-perms.png) + +To execute a chaos experiment, you must have **at least** **Project Viewer** permissions. + +## Deployment model + +#### Do you support execution of chaos across clusters from a single agent? +Yes. With Harness Delegate, we support executing chaos across clusters from a single agent. You need to deploy your delegate in one cluster and then you can create connectors by provide master URL and access token of the other cluster and assign the same delegate to inject chaos. + +#### Do you have dedicated agents across target types (Linux, Windows, Kubernetes, Cloud)? +Yes, HCE has dedicated agents for different target types such as Linux, Windows, Kubernetes, and Cloud platforms (AWS, Azure, GCP, Cloud Foundry). +The agents are: +- Deployed centrally on Kubernetes to inject faults on K8s microservices; +- Native agents for VMware using system service inside target machine; +- Remote chaos agents to execute targets outside clusters for cloud resources. + +## Operations + +#### What are the prerequisites to setup/onboard Harness Chaos Engineering? + +Go to [prerequisites](/docs/chaos-engineering/getting-started/saas/) to fulfill the requirements before onboarding. Once all the prerequisites are fulfilled, you can explore[sandbox](/docs/chaos-engineering/training/sandbox) or execute [your first chaos experiment](/docs/chaos-engineering/getting-started/saas). +**OR** +If you want a head start to your journey with HCE, you can onboard HCE in two ways: +- [Automated onboarding](/docs/chaos-engineering/getting-started/onboarding/automated-onboarding); and +- [Guided onboarding](/docs/chaos-engineering/getting-started/onboarding/guided-onboarding). + +#### Can all the Chaos Operations be managed via APIs (agent, experiment life cycles etc.,) +Yes, all chaos operations can be managed using APIs. For more information, go to [HCE API documentation](https://apidocs.harness.io/chaos.html). + +#### Are there any tutorials to get started with Chaos? +Yes, you can start executing chaos engineering experiments in the following ways: +- [Run chaos experiments](/docs/chaos-engineering/getting-started/saas) +- [Run chaos experiments from blank canvas](/docs/chaos-engineering/getting-started/saas/chaos-experiment-from-blank-canvas) +- [Execute experiments using API](/docs/chaos-engineering/getting-started/saas/experiment-using-api) + +#### Do you provide a Sandbox environment for us to play with the tool? +Yes, you can execute experiments in a sandbox environment. Go to [sandbox environment](/docs/chaos-engineering/training/sandbox) to play around with HCE. + +#### Can I schedule the execution of a Chaos Experiment? +Yes, you can [schedule](/docs/chaos-engineering/use-harness-ce/experiments/create-experiments#run-or-schedule-the-experiment) chaos experiments. + +## Kubernetes experiment flow optimization + +:::tip +- With the release 1.38.0 of harness-chaos, the experiment execution flow for Kubernetes experiments has been optimized by eliminating the install step of experiment CRs (custom resource) and leveraging all the environment variables with the chaos engine. +- The `litmus-checker` and `chaos-k8s` that were responsible for installing the chaos experiment CR and performing cleanup steps respectively, have been removed. +- With this, the time taken to complete a chaos experiment and the manifest length has been reduced, thereby making it easy to maintain the manifest. +::: + +#### Will the existing chaos experiments execute as usual without any changes? + +Yes, you can execute all the existing chaos experiments even if no changes are made to the manifest and even if chaos infrastructure is not upgraded. + +#### Will the existing Kubernetes chaos infrastructure (< 1.38.0) have to be mandatorily upgraded? + +No, the existing infrastructures will continue to function as usual, but HCE recommends you upgrade to version 1.38.0 or the latest version for optimized performance. + +#### I can't see older infrastructures (< 1.38.0) while constructing a new experiment? + +- Due to the recent optimization changes, HCE has removed the experiment CR and its installation from the experiment manifest. Now, all the environment variables, experiment image, imagePullPolicy, arguments, and commands will be passed directly into the chaos engine. +- However, older infrastructures that use older components (operator, chaos-runner) rely on the experiment CR to execute experiments successfully. As a result, new experiments will not be able to run on the older infrastructures. + +#### Can a new Kubernetes experiment run on old Kubernetes infrastructure? + +- No, since new experiments have changes in the chaos engine, the old chaos runner can't read all the environment variables from the chaos engine. + +#### Can the old Kubernetes experiment run on new Kubernetes infrastructure? + +- Yes, the changes are backward-compatible and all the older components (`chaos-k8s`, `litmus-checker`) are still present (which will not be maintained henceforth). The image of these components will not go beyond version 1.37.0 since they will be not updated after this. In addition, `chaos-runner` and `chaos-operator` are designed to be backward-compatible. + +#### Why does the experiment pod take time to show up in the running status? +- The initial execution by Argo may take some time since it needs to pull the images for the `go-runner` for the first time. Subsequent executions will not take as much time. + +#### Why are litmus-checker and chaos-k8s not displaying beyond on 1.37.0? + +- If the experiment format is old, you may see `litmus-checker` and `chaos-k8s` images in the YAML. Since version 1.37.0 is the last supported version of these components, the `litmus-checker` and `chaos-k8s` are displayed with version 1.37.0. For the new experiment format, you will only see a `go-runner` image. + +#### Why can't I create a new experiment from the UI? +- To create a new experiment, you need to have at least one infrastructure in version 1.38.x or higher. Hence, you can either [connect a new infrastructure](/docs/chaos-engineering/use-harness-ce/infrastructures/enable-disable) or [upgrade an existing one](/docs/chaos-engineering/use-harness-ce/infrastructures/upgrade-infra). + +#### Is there a way to upgrade the older experiment to the new format? +- Yes, you can manually edit the experiment manifest or create a new experiment from the UI. Older experiments will continue to work because of backward compatibility. + +## Application Maps + +#### How to manually associate experiments as a part of Application Map? +To manually associate the experiment as a part of an application map, list the experiment as a part of an [application map](/docs/chaos-engineering/use-harness-ce/application-map), specify the tag `applicationmap=` while creating the experiment. + +## Security + +#### What are the identity providers supported by Harness Chaos for user authentication? + +The Harness platform is fully integrated with several public OAuth providers, with support for two-factor authentication and domain whitelisting. +To learn more, go to [authentication overview](/docs/platform/authentication/authentication-overview). + +#### How does the chaos infrastructure connect to the Harness SaaS control plane? Which ports should be opened in the users' environments? + +The chaos infrastructure connects to the Harness control plane through outbound connectivity over HTTP(s) using port 443. To learn more, go to [chaos infrastructures](/docs/chaos-engineering/use-harness-ce/infrastructures/enable-disable). + +#### What are the permissions and privileges required to deploy and run the chaos infrastructure? + +The chaos infrastructure setup involves the creation of CRDs and RBAC resources. This setup typically needs cluster-admin intervention. To learn more, go to [Kubernetes roles for the chaos infrastructure](/docs/chaos-engineering/security/#kubernetes-roles-for-chaos-infrastructure). + +#### Can you run multiple cluster-scoped chaos infrastructures on same clusters? + +It is recommended that you **don't** run multiple cluster-scoped chaos infrastructures on the same cluster since this would result in the chaos infrastructures overwriting each other's cluster-level resources. + +#### Chaos infrastructure is inactive, how to execute my experiment? + +A chaos infrastructure could be inactive due to a variety of reasons. When you try to execute an experiment but the chaos infrastructure is inactive, you can switch over to a different infrastructure that is active (represented with a green circle at the right side of the infrastructure name) and execute your experiment or create a new chaos infrastructure (provided you have the necessary privileges to create one) and execute your experiment on it. + +#### How do I control user actions in a given environment in Harness Chaos? + +The scope of a user's access to chaos resources added to a given Harness account or project can be controlled by assigning them a predefined or custom role. To learn more, go to [chaos access control](/docs/chaos-engineering/use-harness-ce/governance/rbac#user-authorization-and-role-based-access-control). + +#### How do I control the security blast radius in terms of access to application microservices and infrastructure resources in a user environment? + +The chaos infrastructure can be installed in a cluster-wide scope (with the ability to discover and inject chaos on microservices across namespaces and infrastructure components such as nodes and volumes) as well as in a namespace-specific scope (where discovery and chaos injection are limited to resources within a specific namespace). + +In addition, users can provide a custom service account to carry out experiments, thereby limiting the fault types in the user environment. To learn more, go to [blast radius control using permissions](/docs/chaos-engineering/security/#blast-radius-control-using-permissions). + +#### How does Harness Chaos access cloud resources in the users' environment? + +Harness Chaos experiment pods consume Kubernetes secrets that contain access credentials, which are leveraged to make provider-specific API calls to the cloud platform to inject chaos. To learn more, go to [Secrets management](/docs/chaos-engineering/security/#secrets-management). + +#### Can cloud service accounts be used instead of user credentials to access cloud resources? + +When the chaos infrastructure is deployed on EKS clusters, the experiments can leverage the IAM service account (IRSA) instead of consuming secrets with user account access details. To learn more, go to [IAM integration for AWS authentication](/docs/chaos-engineering/use-harness-ce/chaos-faults/aws/security-configurations/aws-iam-integration). + +#### How does Harness Chaos access APM platforms to perform hypothesis validation? + +Harness Chaos experiments can consume K8s secrets containing authentication information for the desired APM and use it within the command-probe pods that leverage this information to make the right provider-specific API calls to retrieve metrics and other pertinent data. To learn more, go to [command probes](/docs/chaos-engineering/use-harness-ce/probes/command-probe). + +#### What are the details about the user and the user's environment accessed and stored by Harness? + +The following user information is stored in the Harness database and object store: + +- FQDNs or URLs or IPs of microservices in user clusters. +- Chaos experiment execution logs, with process information and results. + +The information is purged on a policy basis, with defaults set at "x" days. + +#### How can I track the actions of a user on the Harness platform? + +Harness provides an audit log to the account admin where user actions on the chaos resources are logged with timestamps. To learn more, go to [audit trail](/docs/platform/governance/audit-trail). + +#### Can Harness perform security chaos tests in the users' environments? + +Harness Chaos supports experiments that simulate DoS attacks on services. You can achieve this by simulating very high loads that render the system slow (if the correct rate limits are in place) or non-functional (if rate limiting is not implemented). To learn more, go to [generic locust fault](/docs/chaos-engineering/use-harness-ce/chaos-faults/load/locust-loadgen/). + +#### How can I avoid image override in the manifest? + +When you edit an experiment that has multiple repositories, a pop up asks if you want to override the images or not. Select **NO** to avoid overriding the image repository in the manifest. + +#### Where can I find my chaos experiment report? + +* To find details about your chaos experiment such as resilience score, total runs, infrastructure used, schedule details, probe details, run history, and so on, navigate to your experiment. Click **View report**. + +![report 3](./static/hce-report-3.png) + +* Below is a sample screen of probe details in the report. + +![report 4](./static/hce-report-4.png) + +* To view the details of a **specific chaos step**, click the chaos step. + +![report 1](./static/hce-report-1.png) + +* Click **v** arrow and click **View probe details**. + +![report 2](./static/hce-report-2.png) + +:::tip +* You can download the chaos experiment report. +* The probe result summary can be seen in the experiment logs too. +::: + +#### How can I pass secrets in the source mode of command probe without specifying an image so as to execute the probe by passing username and password? + +* To pass secrets as environment variables in the source mode of the command probe, specify the environment variable or the environment variable file on the target VM and reference this file in the script where you are executing the probe. +* If you want to execute the probe on a server other than the target, disable the security context in the advanced fault settings. + +#### How is the chaos Agent authenticated by the Harness control plane? +A unique Id, named cluster ID and a dedicated key (named access-key) are generated when you install the chaos agent. These two identifiers are used to authenticate the Harness control plane. Every API request made to the control plane includes these identifiers for authentication. Go to [security](/docs/chaos-engineering/security/) and [FAQ](https://developer.harness.io/kb/chaos-engineering/chaos-engineering-faq) for more details. + +This is applicable on HCE entities that use a dedicated infrastructure rather than the Harness Delegate, because in the case of Harness Delegate, the Delegate itself is the agent. + +## Integration + +#### Can Harness Chaos Agents be installed via Helm Charts? +Yes, [chaos dedicated infrastructure](/docs/chaos-engineering/use-harness-ce/infrastructures/enable-disable#use-helm-template-to-install-chaos-infrastructure) as well as [Harness Delegate](https://www.harness.io/blog/delegate-installation-via-helm) can be installed using Helm charts. + +#### Can chaos experiments be triggered from [X] pipeline (Harness, Jenkins, Gitlab, Azure DevOps)? +Yes, HCE provides integration with many tools, such as [Gitlab pipelines](https://developer.harness.io/docs/chaos-engineering/integrations/experiment-as-gitlab-pipeline), [Jenkins pipelines](https://developer.harness.io/docs/chaos-engineering/integrations/experiment-as-jenkins-pipeline), with [Harness CD](https://developer.harness.io/docs/category/integrate-hce-with-harness-cd), [Harness Feature Flags](https://developer.harness.io/docs/chaos-engineering/integrations/chaos-ff), and [SRM](https://developer.harness.io/docs/chaos-engineering/integrations/use-chaos-with-srm). + +#### Does Harness Chaos provide Resilience Probes for [X] APM (Prometheus, Dynatrace, Datadog, NewRelic, Splunk)? +Yes, you can use resilience probes with [Prometheus](/docs/chaos-engineering/use-harness-ce/probes/prom-probe), [Dynatrace](/docs/chaos-engineering/use-harness-ce/probes/dynatrace-probe), [DataDog](/docs/chaos-engineering/use-harness-ce/probes/datadog-probe), and [NewRelic](/docs/chaos-engineering/use-harness-ce/probes/command-probe/cmd-probe-newrelic). + +## License + +#### How is licensing counted for services across different environments in Harness Chaos Engineering? + +Licensing is counted separately for each service in different environments. For example, if chaos experimentation is conducted on a Kubernetes service named “login-service” in both QA and Production environments within the same 30-day cycle, it will consume two chaos service licenses. + +#### Does using the same service in multiple environments increase license usage? + +Yes, each unique environment where a service undergoes chaos experimentation counts individually towards license utilization, allowing separate tracking for services across environments. + +#### How is the license utilization measured in Harness Chaos Engineering? + +License utilization is measured over a 30-day cycle. Each cycle allows license services to be rolled over to a different set of target services, enabling flexible use across various teams, applications, and environments. + +#### Can I change the target services for my license in Harness Chaos Engineering? + +Yes, at the end of each 30-day cycle, license services can be re-assigned to a different set of target services, thereby optimizing the resource utilization. + +## Use cases + +#### How can we leverage Harness Chaos to test Disaster Recovery (DR)? + +We can leverage Harness Chaos Engineering to test Disaster Recovery in the following ways: +- Implement node network loss: Specify `NODE_LABEL` tunable with the label of the target zone. +- Implement node drain: Specify `NODE_LABEL` tunable with the label of the target zone. +- Implement BYOC for Cloud SQL Instance failover. +- Implement BYOC for GCP Composer Database failover. \ No newline at end of file diff --git a/docs/faqs/static/chaos-engineering-faq-resilience-score.png b/docs/faqs/static/chaos-engineering-faq-resilience-score.png new file mode 100644 index 00000000000..cb753a0aa9b Binary files /dev/null and b/docs/faqs/static/chaos-engineering-faq-resilience-score.png differ diff --git a/docs/faqs/static/chaos-engineering-img-registry-perms.png b/docs/faqs/static/chaos-engineering-img-registry-perms.png new file mode 100644 index 00000000000..e17fa5c5cb7 Binary files /dev/null and b/docs/faqs/static/chaos-engineering-img-registry-perms.png differ diff --git a/docs/faqs/static/hce-report-1.png b/docs/faqs/static/hce-report-1.png new file mode 100644 index 00000000000..e4ea5c1f503 Binary files /dev/null and b/docs/faqs/static/hce-report-1.png differ diff --git a/docs/faqs/static/hce-report-2.png b/docs/faqs/static/hce-report-2.png new file mode 100644 index 00000000000..2a5bb0a9fcf Binary files /dev/null and b/docs/faqs/static/hce-report-2.png differ diff --git a/docs/faqs/static/hce-report-3.png b/docs/faqs/static/hce-report-3.png new file mode 100644 index 00000000000..243de5ced35 Binary files /dev/null and b/docs/faqs/static/hce-report-3.png differ diff --git a/docs/faqs/static/hce-report-4.png b/docs/faqs/static/hce-report-4.png new file mode 100644 index 00000000000..5db76aa06da Binary files /dev/null and b/docs/faqs/static/hce-report-4.png differ diff --git a/docs/harness-cloud-operations/computing-uptime.md b/docs/harness-cloud-operations/computing-uptime.md index 90e574e0b5d..0187556edd2 100644 --- a/docs/harness-cloud-operations/computing-uptime.md +++ b/docs/harness-cloud-operations/computing-uptime.md @@ -234,20 +234,9 @@ All the Platform and Pipeline SLIs are applicable here. | **SLI** | **Threshold** | **Availability**| |-------------------------------------------|-----------------|-----------------------------------------| -| Login Failure (Legacy only)| Greater than 30 seconds for a consecutive duration of 5 minutes |Major Outage| -| Integrations list API Error Rate | failure rate (5XX) of the API in 5 minutes > 0.5 |Major Outage| -| Integrations list API Latency | Response time greater than 30 seconds | Degraded Performance| -| Ingestion Delay | Delay in receiving any events > 24 hours |Partial Outage| -| ETL / Aggregations Delay| Delay in receiving any events > 48 hours |Partial Outage| -| ETL / Aggregations Performance| Jobs stuck in scheduled state for more than 12 hours |Degraded Performance| -| ES Indexing Delay| Delay in receiving any events > 48 hours |Partial Outage| -| UI dashboard widget Load times| Greater than 3 mins for a consecutive duration of 10 mins for all customers |Degraded Performance| -| UI landing page/dashboard page not loading| For a consecutive duration of 5 mins |Major Outage| -| Trellis Events| Delay in processing events > 24 hours or monthly calculation not finished in first 7 days |Degraded Performance| -| DB Health | DB Load > 80% |Degraded Performance| -| ES Cluster health | ES cluster state RED / read-only mode |Partial Outage| -| Server API Error rate (5XX) | More than 1% over 5 min rolling window | Major Outage| -| API Response Time | 95th percentile: > 15s over 5 min rolling window | Degraded performance| +| APIs Error rate | More than 5% over 5 min rolling window | Major outage | +| API Response Time | 95th percentile: > 2s over 5 min rolling window | Degraded performance| +| Ingestion & data processing delay | Delay of more than 72 hours for the latest data to appear on the dashboard.
This threshold excludes delays caused by pending customer actions. In the event of failures, processing historical data may take additional time depending on the volume of data that needs to be backfilled. | Degraded performance | diff --git a/docs/infra-as-code-management/pipelines/plugin-images.md b/docs/infra-as-code-management/pipelines/plugin-images.md index d1497ec2a9e..fc3570e7b3b 100644 --- a/docs/infra-as-code-management/pipelines/plugin-images.md +++ b/docs/infra-as-code-management/pipelines/plugin-images.md @@ -4,23 +4,15 @@ description: Create and use your own Terraform plugin images. sidebar_position: 30 --- - - import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; -Harness provides the flexibility to use custom images in your IACM stage, which refers to individual Terraform plugin steps such as the `terraform init` step in an IaCM pipeline provision stage. This guide walks you through the process of creating your custom image and incorporating it into your Harness pipelines. +Harness provides the flexibility to use custom images in your IaCM stage, which applies to individual infrastructure provisioning steps, such as the `init` step in an OpenTofu or Terraform pipeline stage. This guide walks you through the process of creating a custom image and incorporating it into your Harness pipelines. +
- Network Connectivity Requirements + ## Network Connectivity Requirements When using OpenTofu with Harness IaC Management (IaCM), it’s important to ensure that your environment allows the necessary network access for OpenTofu to function properly. OpenTofu relies on external services to download binaries, modules, and providers. If your environment is restricted, such as in air-gapped setups or strict firewall configurations, you may need to whitelist specific domains or use alternative strategies like custom images as mentioned above. @@ -47,20 +39,15 @@ Make sure to configure your firewall to allow outbound access to the domains lis ## Create an image Harness allows you to create custom images based on a provided base image. This enables you to tailor the image to your specific needs and use it in your workflows. -:::warning Version lock-in -Once you create a custom image using our base image, it becomes **version-locked**. This means that if we release a new version of our base image, your custom image will not automatically update to the latest version. For instance, if you create an image today using our base image version "1.0.0" and we subsequently release version "1.1.0," your custom image will still be using version "1.0.0." - -If your version is out-of-date, while it is unlikely to cause a pipeline failure, your current version may lack some features, be open to security vulnerabilities or run into compatibility issues. In such cases, follow the **Mitigating versioning challenges** section below: -::: +:::warning Version Lock-In & Mitigating Challenges +Custom images created from our base image are **version-locked**, meaning they won't automatically update with new releases. While an outdated version might not cause pipeline failures, it could lack features, have security vulnerabilities, or face compatibility issues. -:::tip Mitigating versioning challenges -To address this versioning challenge and ensure that your custom image stays up-to-date with our latest improvements and features, you can implement specific steps within your CI/CD pipelines. These steps may include periodically checking for updates to our base image and rebuilding the custom image as necessary. It's crucial to proactively monitor our releases and sync the custom images to take full advantage of the latest enhancements. +To keep your custom image current with our latest improvements, periodically check for updates to our base image and rebuild your custom image as needed. Proactively monitor our releases to fully benefit from the latest enhancements. -**To help detect out-of-date versions, Harness log a warning if your image version is five versions behind the latest release.** +**Harness will log a warning if your image version is five versions behind the latest release, helping you detect out-of-date versions.** ::: ### Create a custom image - Create custom images with root-based and rootless custom containers for **Harness Cloud** and **Kubernetes** environments. The following examples demonstrate package installation via `microdnf` and direct binary installation for tools like `kubectl`. @@ -129,7 +116,7 @@ USER app && rm kustomize_v5.3.0_linux_amd64.tar.gz ```
- +--- ## Use your own image To use your custom image in a step, create a reference in the YAML configuration indicating that the step should use your image. @@ -146,9 +133,12 @@ To use your custom image in a step, create a reference in the YAML configuration connectorRef: privateConnector # (2) ``` :::note -In this example, the `image` attribute **(1)** in the YAML points to the plugin image in the Elastic Container Registry (ECR) where it is hosted. If your image is hosted in a private ECR, you'll need to create a connector for that ECR and define the `connectorRef` **(2)** for the connector. This ensures that Harness can access the image. At this stage, the "apply" step in your pipeline will use the "private_harness_terraform_plugin" and have access to `kubectl` and `kustomize` for its operations. +In this example, the `image` attribute **(1)** in the YAML points to the plugin image hosted in the Elastic Container Registry (ECR) to store your Docker images securely. + +If it's in a private ECR, create a connector and define the `connectorRef` **(2)** to allow Harness access and to ensure the `apply` step in your pipeline uses the 'private_harness_terraform_plugin' and has access to `kubectl` and `kustomize` for operations. ::: +--- ## IACM execution-config To use images from your repository in an IACM stage, you can use the `execution-config` API endpoints. @@ -241,4 +231,7 @@ Although some images mentioned here are also used by [CI](https://developer.harn 'https://app.harness.io/gateway/iacm-manager/execution-config/get-default-config?accountIdentifier=&infra=k8' ``` - \ No newline at end of file + + +## Conclusion +In conclusion, custom images provide a powerful way to optimize your IaCM pipelines. By staying proactive with updates and leveraging the flexibility of Harness, you can ensure robust, secure, and efficient infrastructure management. Ready to take the next step? Implement these strategies and watch your deployment processes transform! diff --git a/docs/infra-as-code-management/workspaces/workspace-expressions.md b/docs/infra-as-code-management/workspaces/workspace-expressions.md index 2535c468c7f..9b2e8adb806 100644 --- a/docs/infra-as-code-management/workspaces/workspace-expressions.md +++ b/docs/infra-as-code-management/workspaces/workspace-expressions.md @@ -1,33 +1,37 @@ --- -title: Workspace expressions -description: Learn how to use Harness workspace expressions. +title: Workspace Expressions +description: Discover how to utilize workspace expressions for dynamic CI/CD pipeline customization in Harness IaCM. sidebar_position: 50 --- import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; -Workspace expressions in Harness Infrastructure as Code Management (IaCM) provide a powerful way to dynamically reference and utilize various workspace-level parameters within your pipelines. By leveraging JEXL (Java Expression Language) expressions, you can access key workspace attributes, allowing for greater flexibility and customization in your workflows. +Harness workspace expressions empower you to dynamically reference workspace-level parameters within your pipelines, enhancing flexibility and customization. By leveraging JEXL (Java Expression Language), you can seamlessly access key workspace attributes. -With workspace expressions, you can: +### Key Benefits +- **Dynamic Retrieval:** Access workspace-specific identifiers and paths effortlessly. +- **Environment Integration:** Utilize environment and secret variables within the workspace context. +- **Versioning and Provisioning:** Integrate versioning and provisioner details directly into your pipeline logic. +- **Cost Management:** Extract outputs and control cost estimation features. +- **Git Management:** Seamlessly manage Git-related parameters like repository name, branch, commit SHA, and tags. -- Dynamically retrieve and use workspace-specific identifiers and paths. -- Access environment variables and secret variables within the workspace context. -- Integrate versioning and provisioner details directly into your pipeline logic. -- Extract outputs and control cost estimation features. -- Seamlessly manage Git-related parameters such as repository name, branch, commit SHA, and tags. - -These expressions are particularly useful in scenarios where your pipeline needs to adapt to different environments or configurations based on the workspace in use. +These expressions are invaluable when adapting pipelines to different environments or configurations. :::info migrate expressions -Go to the **Migrate expressions** tab in the [how to use workspace expressions](https://developer.harness.io/docs/infra-as-code-management/workspaces/workspace-expressions#how-to-use-workspace-expressions) section to see how to update your old expressions to the most recent format. +Go to the **Migrate Expressions** tab in the [How to Use Workspace Expressions](/docs/infra-as-code-management/workspaces/workspace-expressions#how-to-use-workspace-expressions) section to update old expressions to the latest format. ::: -### Available workspace expressions +## Available Workspace Expressions +Supported workspace expressions are written in the syntax **`<+workspace.ATTRIBUTE>`**, where ATTRIBUTE can be a single or multi-level identifier, such as: +- **`<+workspace.identifier>`** +- **`<+workspace.type>`** (e.g., opentofu) +- **`<+workspace.envVars.SOME_ENV_VAR>`** -Here’s a table of the available workspace expressions in Harness IaCM: +
+See full list of supported workspace expressions -| **Paremeter** | **Expression** | **Description** | +| **Parameter** | **Expression** | **Description** | |-------------------------------|--------------------------------------------------|---------------------------------------------------------------------------------| | **Workspace ID** | `<+workspace.identifier>` | Retrieves the unique identifier for the workspace. | | **Folder Path** | `<+workspace.folderPath>` | Returns the root directory path associated with the workspace. | @@ -35,7 +39,7 @@ Here’s a table of the available workspace expressions in Harness IaCM: | **Provisioner Version** | `<+workspace.provisionerVersion>` | Fetches the version of the provisioner used in the workspace. | | **Connector Reference** | `<+workspace.connectorRef>` | Retrieves the connector reference associated with the workspace. | | **Environment Variable** | `<+workspace.envVars.SOME_ENV_VAR>` | Accesses a specific environment variable within the workspace. | -| **Terraform/Opentofu Variable** | `<+workspace.variables.OPEN_TOFU_VAR>` | Retrieves a variable specific to Terraform or Opentofu within the workspace. | +| **Terraform/Opentofu Variable** | `<+workspace.variables.OPEN_TOFU_VAR>` | Retrieves a variable specific to Terraform or OpenTofu within the workspace. | | **Secret Variable** | `<+workspace.envVars.SECRET_ENV_VAR>` | Accesses a secret variable defined in the workspace. | | **Output** | `<+workspace.outputs.OUTPUT_ID>` | Retrieves a specific output from the workspace. | | **Workspace Name** | `<+workspace.name>` | Returns the name of the workspace. | @@ -45,32 +49,22 @@ Here’s a table of the available workspace expressions in Harness IaCM: | **Git Branch** | `<+workspace.gitBranch>` | Returns the name of the Git branch currently in use. | | **Git SHA** | `<+workspace.gitSha>` | Retrieves the commit SHA from the Git repository. | | **Git Tag** | `<+workspace.gitTag>` | Returns the Git tag associated with the current commit. | +
-### How to use workspace expressions - -The following example highlights how you can apply workspace expressions to your pipelines run step and output the workspace identifier and secret variable value. +## How to Use Workspace Expressions +Here's an example of applying workspace expressions in your pipelines to output the workspace identifier and secret variable value. :::warning reserved keyword -Do not name your run step `workspace` as it is a reserved keyword. +Avoid naming your run step `workspace` as it is a reserved keyword. ::: - - + + - -If you are currently using and older expression format, update it to the new format accordingly. + + +If you're using an older expression format, update it to the new format as follows: For example, update secret variables from: @@ -81,23 +75,10 @@ For example, update secret variables from: // NEW <+workspace.envVars.SECRET_ENV_VAR> ``` +The following guide demonstrates updating your workspace's secret environment variable reference: -The following guide demonstrates how to update your workspace's secret environment variable reference: - - - -See the following reference of old and new expression formats: + +See the reference of old and new expression formats:
See old and new expressions @@ -126,8 +107,8 @@ See the following reference of old and new expression formats: | **Secret from Connector**| Before: `<+pipeline.stages.s1.spec.execution.steps.init.spec.secretVariablesFromConnector.PLUGIN_SECRET_KEY>` | | | After: `<+workspace.secretVariablesFromConnector.PLUGIN_SECRET_KEY>` |
+
---- -Workspace expressions in Harness IaCM allow for advanced customization and dynamic referencing within your CI/CD pipelines. By understanding and utilizing these expressions, you can enhance the efficiency and adaptability of your infrastructure management processes. +By understanding and utilizing workspace expressions in Harness IaCM, you can significantly enhance the efficiency and adaptability of your infrastructure management processes. \ No newline at end of file diff --git a/docs/internal-developer-portal/adoption/adoption-playbook.md b/docs/internal-developer-portal/adoption/adoption-playbook.md index 57ad814865b..d7c7a142cce 100644 --- a/docs/internal-developer-portal/adoption/adoption-playbook.md +++ b/docs/internal-developer-portal/adoption/adoption-playbook.md @@ -245,7 +245,7 @@ From this point onwards, onboard more teams, solve newer use-cases, onboard thei - Create an internal Slack/Teams channel called **#harness-idp-adoption**. Announce new features and ask users to share feedback (both good and bad). - No two portals please. Avoiding creating fragmentation for Developers when it comes to their IDP use-cases. -- Onboarding is not the same as Adoption. Onboarding refers to one or more Platform Engineers setting up the tool with Authentication, Authorization and other configuration. Adoption refers to active usage by Developers. Onboarding is a pre-requisite to Adoption. +- Onboarding is not the same as Adoption. Onboarding refers to one or more Platform Engineers setting up the tool with Authentication, Authorization and other configuration. Adoption refers to active usage by Developers. Onboarding is a prerequisite to Adoption. ### Central vs Distributed Catalog definition YAML files diff --git a/docs/internal-developer-portal/api-refernces/public-api.md b/docs/internal-developer-portal/api-refernces/public-api.md index e0da524825d..3884f7c71c6 100644 --- a/docs/internal-developer-portal/api-refernces/public-api.md +++ b/docs/internal-developer-portal/api-refernces/public-api.md @@ -30,21 +30,20 @@ Register Software Component in Harness Catalog. #### URL ```bash -https://idp.harness.io/{ACCOUNT_IDENTIFIER}/idp/api/catalog/locations +https://idp.harness.io//idp/api/catalog/locations ``` #### URL Parameters -`ACCOUNT_IDENTIFIER`: Your Harness account ID. +`ACCOUNT_ID`: Your Harness account ID. You can find your account ID in any Harness URL, for example: ```bash -https://app.harness.io/ng/account/ACCOUNT_ID/idp/overview +https://app.harness.io/ng/account//idp/overview ``` #### Headers - `x-api-key`: Your Harness API token. -- `Harness-Account`: Your Harness account ID. #### Request Body @@ -58,9 +57,8 @@ https://app.harness.io/ng/account/ACCOUNT_ID/idp/overview ### cURL Example ```cURL -curl --location 'https://idp.harness.io/{ACCOUNT_IDENTIFIER}/idp/api/catalog/locations' \ ---header 'x-api-key: {X_API_KEY}' \ ---header 'Harness-Account: {ACCOUNT_IDENTIFIER}' +curl --location 'https://idp.harness.io//idp/api/catalog/locations' \ +--header 'x-api-key: ' \ --data-raw '{"type":"url","target":"https://github.com/harness-community/idp-samples/blob/main/catalog-info.yaml"}' ``` #### Response: @@ -80,28 +78,27 @@ Syncs the component with the latest version of `catalog-info.yaml` stored in git #### URL ```bash -https://idp.harness.io/{ACCOUNT_IDENTIFIER}/idp/api/catalog/refresh +https://idp.harness.io//idp/api/catalog/refresh ``` #### URL Parameters -`ACCOUNT_IDENTIFIER`: Your Harness account ID. +`ACCOUNT_ID`: Your Harness account ID. You can find your account ID in any Harness URL, for example: ```bash -https://app.harness.io/ng/account/ACCOUNT_ID/idp/overview +https://app.harness.io/ng/account//idp/overview ``` #### Headers - `x-api-key`: Your Harness API token. -- `Harness-Account`: Your Harness account ID. ### cURL Example ```cURL -curl 'https://idp.harness.io/{HARNESS_ACCOUNT_IDENTIFIER}/idp/api/catalog/refresh' \ +curl 'https://idp.harness.io//idp/api/catalog/refresh' \ -H 'Content-Type: application/json' \ - -H 'x-api-key: {HARNESS_X_API_KEY}' \ + -H 'x-api-key: ' \ --data-raw '{"entityRef":"{ENTITY_REF}"}' ``` @@ -135,24 +132,23 @@ Delete Software Component from Harness Catalog. #### URL ```bash -https://idp.harness.io/{ACCOUNT_IDENTIFIER}/idp/api/catalog/locations/{LOCATION_ID} +https://idp.harness.io//idp/api/catalog/locations/ ``` #### URL Parameters -`ACCOUNT_IDENTIFIER`: Your Harness account ID. +`ACCOUNT_ID`: Your Harness account ID. You can find your account ID in any Harness URL, for example: ```bash -https://app.harness.io/ng/account/ACCOUNT_ID/idp/overview +https://app.harness.io/ng/account//idp/overview ``` `LOCATION_ID`: To get the Location ID, use the cURL command given below to fetch all the Locations for the account and `grep` your component name and the ID mentioned there is the Location ID to be used. ```cURL -curl 'https://idp.harness.io/{ACCOUNT_IDENTIFIER}/idp/api/catalog/locations' \ ---header 'x-api-key: {X_API_KEY}' \ ---header 'Harness-Account: {ACCOUNT_IDENTIFIER}' +curl 'https://idp.harness.io//idp/api/catalog/locations' \ +--header 'x-api-key: ' ``` The Response of the above cURL would be as shown below and the `id` mentioned is the **Location ID**, search for the component name in the response and pick the `id` @@ -173,14 +169,12 @@ The Response of the above cURL would be as shown below and the `id` mentioned is #### Headers - `x-api-key`: Your Harness API token. -- `Harness-Account`: Your Harness account ID. ### cURL Example ```cURL -curl --location --request DELETE 'https://idp.harness.io/{ACCOUNT_ID}/idp/api/catalog/locations/{LOCATION_ID}' \ ---header 'x-api-key: ' \ ---header 'Harness-Account: {ACCOUNT_ID}' +curl --location --request DELETE 'https://idp.harness.io//idp/api/catalog/locations/' \ +--header 'x-api-key: ' ``` #### Response: The response will remove the software component along the with the locations from your IDP catalog as defined in the location provided. @@ -199,24 +193,23 @@ Deletes an entity by its `metadata.uid` field value. #### URL ```bash -https://idp.harness.io/ACCOUNT_ID/idp/api/catalog/entities/by-uid/{uid} +https://idp.harness.io//idp/api/catalog/entities/by-uid/ ``` #### URL Parameters -`ACCOUNT_IDENTIFIER`: Your Harness account ID. +`ACCOUNT_ID`: Your Harness account ID. You can find your account ID in any Harness URL, for example: ```bash -https://app.harness.io/ng/account/ACCOUNT_ID/idp/overview +https://app.harness.io/ng/account//idp/overview ``` `udi`: To get the uid, use the cURL command given below to fetch all the Locations for the account and `grep` your component name and the `metadata.uid` mentioned there is the `uid` to be used. ```cURL -curl --location 'https://idp.harness.io/ACCOUNT_ID/idp/api/catalog/entities/by-query?filter=kind=location' \ ---header 'x-api-key: {X_API_KEY}' \ ---header 'Harness-Account: {ACCOUNT_IDENTIFIER}' +curl --location 'https://idp.harness.io//idp/api/catalog/entities/by-query?filter=kind=location' \ +--header 'x-api-key: ' ``` The Response of the above cURL would be as shown below and the `metadata.uid` mentioned is the **uid**, search for the component name in the response and pick the `uid` @@ -244,14 +237,12 @@ The Response of the above cURL would be as shown below and the `metadata.uid` me #### Headers - `x-api-key`: Your Harness API token. -- `Harness-Account`: Your Harness account ID. ### cURL Example ```cURL -curl --location --request DELETE 'https://idp.harness.io/ACCOUNT_ID/idp/api/catalog/entities/by-uid/{uid}' \ ---header 'x-api-key: ' \ ---header 'Harness-Account: {ACCOUNT_ID}' +curl --location --request DELETE 'https://idp.harness.io//idp/api/catalog/entities/by-uid/' \ +--header 'x-api-key: ' ``` #### Response: The response will remove the locations from your IDP catalog as defined in the `uid`. @@ -270,16 +261,16 @@ Retrieves catalog entities that match a specific filter from the Harness IDP. #### URL ```bash -https://idp.harness.io/{ACCOUNT_IDENTIFIER}/idp/api/catalog/entities +https://idp.harness.io//idp/api/catalog/entities ``` #### URL Parameters -1. `ACCOUNT_IDENTIFIER`: Your Harness account ID. +1. `ACCOUNT_ID`: Your Harness account ID. You can find your account ID in any Harness URL, for example: ```bash -https://app.harness.io/ng/account/ACCOUNT_ID/idp/overview +https://app.harness.io/ng/account//idp/overview ``` 2. Lists entities. Supports the following **query parameters**, described in sections below: @@ -314,9 +305,8 @@ Example: - **Example: Filter `components` based on `system` name** ```cURL -curl --location 'https://idp.harness.io/ACCOUNT_ID/idp/api/catalog/entities?filter=kind=component,relations.partOf=system:default/SYSTEM_NAME' \ ---header 'x-api-key: X_API_KEY' \ ---header 'Harness-Account: ACCOUNT_ID' +curl --location 'https://idp.harness.io//idp/api/catalog/entities?filter=kind=component,relations.partOf=system:default/SYSTEM_NAME' \ +--header 'x-api-key: ' ``` In the above example since the `system` name is mentioned under `relations` in component's definition YAML, hence we have used the filter `relations.partOf=system:default/SYSTEM_NAME`. Here's the mention of `system` in component's YAML. @@ -347,15 +337,12 @@ relations: #### Headers - `x-api-key`: Your Harness API token. -- `Harness-Account`: Your Harness account ID. ### cURL Example ```cURL -curl 'https://idp.harness.io/{ACCOUNT_IDENTIFIER}/idp/api/catalog/entities?filter=kind=template' \ ---header 'x-api-key: {X_API_KEY}' \ ---header 'Harness-Account: {ACCOUNT_IDENTIFIER}' -``` +curl 'https://idp.harness.io//idp/api/catalog/entities?filter=kind=template' \ +--header 'x-api-key: ' #### Response: diff --git a/docs/internal-developer-portal/catalog/catalog-ingestion/catalog-ingestion-api-tutorial.md b/docs/internal-developer-portal/catalog/catalog-ingestion/catalog-ingestion-api-tutorial.md index e2a6f1cd8e8..e3869bfed9e 100644 --- a/docs/internal-developer-portal/catalog/catalog-ingestion/catalog-ingestion-api-tutorial.md +++ b/docs/internal-developer-portal/catalog/catalog-ingestion/catalog-ingestion-api-tutorial.md @@ -29,9 +29,8 @@ In this tutorial we will be using the catalog metadata ingestion APIs to add add ```cURL curl --location 'https://app.harness.io/gateway/v1/catalog/custom-properties/entity/' \ ---header 'Harness-Account: ACCOUNT_ID' \ --header 'Content-Type: application/json' \ ---header 'x-api-key: X-API-KEY' \ +--header 'x-api-key: ' \ --data '{ "entity_ref": "warehouse", "property": "metadata.codeCoverageScore", @@ -89,7 +88,6 @@ pipeline: # Define the API endpoint and headers url = 'https://app.harness.io/gateway/v1/catalog/custom-properties/entity' headers = { - 'Harness-Account': '<+account.identifier>', 'Content-Type': 'application/json', 'x-api-key': '<+pipeline.variables.apikey>' # Replace with your actual API key } diff --git a/docs/internal-developer-portal/catalog/catalog-ingestion/catalog-ingestion-api.md b/docs/internal-developer-portal/catalog/catalog-ingestion/catalog-ingestion-api.md index 1e5786c6a7b..bc324c103ac 100644 --- a/docs/internal-developer-portal/catalog/catalog-ingestion/catalog-ingestion-api.md +++ b/docs/internal-developer-portal/catalog/catalog-ingestion/catalog-ingestion-api.md @@ -129,7 +129,6 @@ POST /catalog/custom-properties/entity ``` X-API-KEY: Harness API Key -Harness-Account: YOUR ACCOUNT ID Content-Type: application/json ``` @@ -146,9 +145,8 @@ You can find your account ID in any of your Harness URLs, for example: `https:// ```sh curl \ --location 'https://app.harness.io/gateway/v1/catalog/custom-properties/entity' \ ---header 'Harness-Account: ADD_YOUR_ACCOUNT_ID' \ --header 'Content-Type: application/json' \ ---header 'x-api-key: ADD_YOUR_API_KEY' \ +--header 'x-api-key: ' \ --data '{ "entity_ref": "boutique-service", "property": "metadata.codeCoverageScore", @@ -234,9 +232,8 @@ metadata: ```sh curl \ --location 'https://app.harness.io/gateway/v1/catalog/custom-properties/entity' \ ---header 'Harness-Account: ADD_YOUR_ACCOUNT_ID' \ --header 'Content-Type: application/json' \ ---header 'x-api-key: ADD_YOUR_API_KEY' \ +--header 'x-api-key: ' \ --data '{ "entity_ref": "boutique-service" "properties": [ @@ -345,9 +342,8 @@ metadata: ```sh curl --location 'https://app.harness.io/gateway/v1/catalog/custom-properties/entity' \ ---header 'Harness-Account: ADD_YOUR_ACCOUNT_ID' \ --header 'Content-Type: application/json' \ ---header 'x-api-key: ADD_YOUR_API_KEY' \ +--header 'x-api-key: ' \ --data '{ "property": "metadata.releaseVersion", "entity_refs": [ @@ -576,9 +572,8 @@ metadata: ```sh curl --location 'https://app.harness.io/gateway/v1/catalog/custom-properties/entity' \ ---header 'Harness-Account: ADD_YOUR_ACCOUNT_ID' \ --header 'Content-Type: application/json' \ ---header 'x-api-key: ADD_YOUR_API_KEY' \ +--header 'x-api-key: ' \ --data '{ "entity_ref": "boutique-service", "property": "metadata.tags", @@ -787,9 +782,8 @@ POST https://app.harness.io/gateway/v1/catalog/custom-properties ```bash curl --location --request DELETE 'https://app.harness.io/gateway/v1/catalog/custom-properties/entity' \ ---header 'Harness-Account: ADD_YOUR_ACCOUNT_ID' \ --header 'Content-Type: application/json' \ ---header 'x-api-key: ADD_YOUR_API_KEY' \ +--header 'x-api-key: ' \ --data '{ "entity_ref": "boutique-service", "property": "metadata.teamLead" @@ -802,9 +796,8 @@ Note that this only works for the custom properties added using the Ingestion AP ```bash curl --location --request DELETE 'https://app.harness.io/gateway/v1/catalog/custom-properties/entity' \ ---header 'Harness-Account: ADD_YOUR_ACCOUNT_ID' \ --header 'Content-Type: application/json' \ ---header 'x-api-key: ADD_YOUR_API_KEY' \ +--header 'x-api-key: ' \ --data '{ "entity_ref": "boutique-service", "properties": ["metadata.teamLead", "metadata.teamOwner"] @@ -815,9 +808,8 @@ curl --location --request DELETE 'https://app.harness.io/gateway/v1/catalog/cust ```bash curl --location --request DELETE 'https://app.harness.io/gateway/v1/catalog/custom-properties/property' \ ---header 'Harness-Account: ADD_YOUR_ACCOUNT_ID' \ --header 'Content-Type: application/json' \ ---header 'x-api-key: ADD_YOUR_API_KEY' \ +--header 'x-api-key: ' \ --data '{ "property": "metadata.releaseVersion", "entity_refs": ["idp-service", "order-service"] @@ -828,9 +820,8 @@ curl --location --request DELETE 'https://app.harness.io/gateway/v1/catalog/cust ```bash curl --location --request DELETE 'https://app.harness.io/gateway/v1/catalog/custom-properties' \ ---header 'Harness-Account: ADD_YOUR_ACCOUNT_ID' \ --header 'Content-Type: application/json' \ ---header 'x-api-key: ADD_YOUR_API_KEY' \ +--header 'x-api-key: ' \ --data '{ "property": "metadata.teamLead", "filter": { diff --git a/docs/internal-developer-portal/catalog/register-software-component.md b/docs/internal-developer-portal/catalog/register-software-component.md index 9a5ea456400..5a630642df4 100644 --- a/docs/internal-developer-portal/catalog/register-software-component.md +++ b/docs/internal-developer-portal/catalog/register-software-component.md @@ -86,9 +86,8 @@ Follow these steps to register components using the Harness Catalog API endpoint - Follow the following cURL command with the request body to register your component. The body takes two input at present `type` and `target`. ```cURL -curl --location 'https://idp.harness.io/{ACCOUNT_IDENTIFIER}/idp/api/catalog/locations' \ ---header 'x-api-key: {X_API_KEY}' \ ---header 'Harness-Account: {ACCOUNT_IDENTIFIER}' +curl --location 'https://idp.harness.io//idp/api/catalog/locations' \ +--header 'x-api-key: ' \ --data-raw '{"type":"url","target":"https://github.com/harness-community/idp-samples/blob/main/catalog-info.yaml"}' ``` @@ -128,7 +127,7 @@ function usage { while getopts a:x:u:l:h flag do case "${flag}" in - a) ACCOUNT_IDENTIFIER=${OPTARG};; + a) ACCOUNT_ID=${OPTARG};; x) X_API_KEY=${OPTARG};; u) BEARER_AUTHORIZATION=${OPTARG};; l) CATALOG_LOCATIONS=${OPTARG};; @@ -143,16 +142,16 @@ CATALOG_LOCATION_REGISTER_DATA='{"type":"url","target":"CATALOG_LOCATION_TARGET" for LOCATION in ${CATALOG_LOCATIONS//,/ } do echo -e "\n--------" - echo "Registering $LOCATION catalog location in Harness IDP account $ACCOUNT_IDENTIFIER" + echo "Registering $LOCATION catalog location in Harness IDP account $ACCOUNT_ID" POST_DATA=${CATALOG_LOCATION_REGISTER_DATA/CATALOG_LOCATION_TARGET/$LOCATION} - RESULT_HTTP_CODE=$(curl --write-out %{http_code} -s --output /dev/null -H "Content-Type: application/json" -H "Harness-Account: ${ACCOUNT_IDENTIFIER}" -H "x-api-key: ${X_API_KEY}" -H "Authorization: Bearer ${BEARER_AUTHORIZATION}" -X POST --data "${POST_DATA}" "https://idp.harness.io/${ACCOUNT_IDENTIFIER}/idp/api/catalog/locations") + RESULT_HTTP_CODE=$(curl --write-out %{http_code} -s --output /dev/null -H "Content-Type: application/json" -H "Harness-Account: $" -H "x-api-key: ${X_API_KEY}" -H "Authorization: Bearer ${BEARER_AUTHORIZATION}" -X POST --data "${POST_DATA}" "https://idp.harness.io/$/idp/api/catalog/locations") if [[ "$RESULT_HTTP_CODE" -ne 201 ]] ; then - echo "Failed registering $LOCATION catalog location in Harness IDP account $ACCOUNT_IDENTIFIER" + echo "Failed registering $LOCATION catalog location in Harness IDP account $ACCOUNT_ID" else - echo "Successfully registered $LOCATION catalog location in Harness IDP account $ACCOUNT_IDENTIFIER" + echo "Successfully registered $LOCATION catalog location in Harness IDP account $ACCOUNT_ID" fi echo "--------" diff --git a/docs/internal-developer-portal/catalog/software-catalog.md b/docs/internal-developer-portal/catalog/software-catalog.md index c08dca9b5c6..2d6cf4c255d 100644 --- a/docs/internal-developer-portal/catalog/software-catalog.md +++ b/docs/internal-developer-portal/catalog/software-catalog.md @@ -53,9 +53,8 @@ You can access catalog APIs to retrieve software components from the catalog, re Start by generating a Harness API Key as described in [Manage API keys](/docs/platform/automation/api/add-and-manage-api-keys). Then, by using your Harness account ID and the API Key, you can make requests to the IDP catalog APIs as follows: ``` -curl --location 'https://idp.harness.io/{ACCOUNT_IDENTIFIER}/idp/api/catalog/locations' \ ---header 'x-api-key: {X_API_KEY}' \ ---header 'Harness-Account: {ACCOUNT_IDENTIFIER}' +curl --location 'https://idp.harness.io//idp/api/catalog/locations' \ +--header 'x-api-key: ' ``` To learn more about the available catalog APIs supported by Backstage, go to [API](https://backstage.io/docs/features/software-catalog/software-catalog-api/). diff --git a/docs/internal-developer-portal/flows/dynamic-picker.md b/docs/internal-developer-portal/flows/dynamic-picker.md index dabcfd806d9..deebebc114d 100644 --- a/docs/internal-developer-portal/flows/dynamic-picker.md +++ b/docs/internal-developer-portal/flows/dynamic-picker.md @@ -91,13 +91,13 @@ Endpoints targeting the `harness.io` domain should **not** be configured behind Hit "Save Configuration" and now our backend proxy is ready to use! -You can verify this endpoint by making requests to the `proxy` endpoint at `https://idp.harness.io/{ACCOUNT_IDENTIFIER}/idp/api/proxy/`. For example in order to test the GitHub example above, you can make a request to +You can verify this endpoint by making requests to the `proxy` endpoint at `https://idp.harness.io//idp/api/proxy/`. For example in order to test the GitHub example above, you can make a request to ``` -https://idp.harness.io/{ACCOUNT_IDENTIFIER}/idp/api/proxy/github-api/user +https://idp.harness.io//idp/api/proxy/github-api/user ``` -Here `https://idp.harness.io/{ACCOUNT_IDENTIFIER}/idp/api/proxy/github-api/` can be seen exactly as `https://api.github.com/`. So all the endpoint paths on the GitHub API can be used after the proxy endpoint URL. You can learn more about how to consume Harness IDP APIs on our [API Docs](/docs/internal-developer-portal/api-refernces/public-api). +Here `https://idp.harness.io//idp/api/proxy/github-api/` can be seen exactly as `https://api.github.com/`. So all the endpoint paths on the GitHub API can be used after the proxy endpoint URL. You can learn more about how to consume Harness IDP APIs on our [API Docs](/docs/internal-developer-portal/api-refernces/public-api). ### Step 2: Create the Dropdown Picker in Workflows Form diff --git a/docs/internal-developer-portal/flows/worflowyaml.md b/docs/internal-developer-portal/flows/worflowyaml.md index f98928fbcd6..f92cdae9ef2 100644 --- a/docs/internal-developer-portal/flows/worflowyaml.md +++ b/docs/internal-developer-portal/flows/worflowyaml.md @@ -268,7 +268,7 @@ Other secret managers are **not supported** for storing secrets with this featur #### Referencing the **`secret`** in the **`steps`** spec: ```YAML -apiKeySecret: ${{ secretId }} +apiKeySecret: "secretId" ``` Here, ```secretId``` refers to the identifier of the secret which stores the **Harness API Key**. You can retrieve this ``secretId`` from the **Harness Secret Manager**. @@ -291,7 +291,7 @@ steps: url: pipeline url inputset: username: ${{ parameters.username }} - apiKeySecret: ${{ secretId }} + apiKeySecret: "secretId" output: links: - title: Pipeline Details diff --git a/docs/internal-developer-portal/flows/workflows-tutorials/create-a-service.md b/docs/internal-developer-portal/flows/workflows-tutorials/create-a-service.md index 1c0d0cc44b2..c0e081b9d49 100644 --- a/docs/internal-developer-portal/flows/workflows-tutorials/create-a-service.md +++ b/docs/internal-developer-portal/flows/workflows-tutorials/create-a-service.md @@ -135,8 +135,7 @@ import TabItem from '@theme/TabItem'; # Add catalog-info.yaml location to catalog curl --location 'https://idp.harness.io/<+account.identifier>/idp/api/catalog/locations' \ - --header 'x-api-key: Harness PAT' \ - --header 'Harness-Account: <+account.identifier>' \ + --header 'x-api-key: ' \ --data-raw '{"type":"url","target":"https://github.com/<+pipeline.variables.github_org>/<+pipeline.variables.github_repo>/blob/main/catalog-info.yaml"}' ``` @@ -218,9 +217,8 @@ For eg: `<+pipeline.variables.project_name>` variable is pre-populated by `proje git push --set-upstream https://oauth2:<+pipeline.variables.gitlab_token>@gitlab.com/<+pipeline.variables.gitlab_org>/<+pipeline.variables.gitlab_repo>.git main # Add catalog-info.yaml location to catalog - curl --location 'https://idp.harness.io/ACCOUNT_ID/idp/api/catalog/locations' \ - --header 'x-api-key: Harness PAT' \ - --header 'Harness-Account: Account_ID' \ + curl --location 'https://idp.harness.io//idp/api/catalog/locations' \ + --header 'x-api-key: ' \ --data-raw '{"type":"url","target":"https://gitlab.com/<+pipeline.variables.gitlab_org>/<+pipeline.variables.gitlab_repo>/blob/main/catalog-info.yaml"}' ``` diff --git a/docs/internal-developer-portal/flows/workflows-tutorials/self-service-onboarding-pipeline-tutorial.md b/docs/internal-developer-portal/flows/workflows-tutorials/self-service-onboarding-pipeline-tutorial.md index 56910b5c768..aa97cf006c8 100644 --- a/docs/internal-developer-portal/flows/workflows-tutorials/self-service-onboarding-pipeline-tutorial.md +++ b/docs/internal-developer-portal/flows/workflows-tutorials/self-service-onboarding-pipeline-tutorial.md @@ -36,9 +36,9 @@ You can also create a new project for the service onboarding pipelines. Eventual :::info -You need to have completed all the [pre-requisites](#pre-requisite) for the below given YAML to work properly +You need to have completed all the [prerequisites](#pre-requisite) for the below given YAML to work properly -Please update the `connectorRef: ` for all the steps it's used, also here we are assuming the git provider to be GitHub please update the `connectorType` for `CreateRepo`, `DirectPush` and `RegisterCatalog` step in case it's other than GitHub. Also under the slack notify step for `token` add the token identifier, you have created above as part of pre-requisites. +Please update the `connectorRef: ` for all the steps it's used, also here we are assuming the git provider to be GitHub please update the `connectorType` for `CreateRepo`, `DirectPush` and `RegisterCatalog` step in case it's other than GitHub. Also under the slack notify step for `token` add the token identifier, you have created above as part of prerequisites. ::: diff --git a/docs/internal-developer-portal/get-started/scorecard-quickstart.md b/docs/internal-developer-portal/get-started/scorecard-quickstart.md index e52d7348287..b6d7baa915f 100644 --- a/docs/internal-developer-portal/get-started/scorecard-quickstart.md +++ b/docs/internal-developer-portal/get-started/scorecard-quickstart.md @@ -27,7 +27,7 @@ Organizations can define custom criteria for readiness, such as ensuring the pre 1. Make sure you have the [IDP Admin](https://developer.harness.io/docs/internal-developer-portal/rbac/resources-roles#1-idp-admin) or [IDP Platform Engineer](https://developer.harness.io/docs/internal-developer-portal/rbac/resources-roles#2-idp-platform-engineer) role assigned to you with permissions to view, create and edit [scorecards](https://developer.harness.io/docs/internal-developer-portal/rbac/resources-roles#1-scorecards) -2. Register a software component following the steps mentioned [here](https://developer.harness.io/docs/internal-developer-portal/catalog/register-software-component), if you already have software components registered in your catalog you can move directly to creating a Scorecard. Here's an example of `catalog-info.yaml` we will be using for this tutorial. +2. Register a software component following the steps mentioned [here](https://developer.harness.io/docs/internal-developer-portal/catalog/register-software-component). If you already have software components registered in your catalog you can move directly to creating a Scorecard. Here's an example of `catalog-info.yaml` we will be using for this tutorial.
Example YAML diff --git a/docs/internal-developer-portal/plugins/custom-plugins/add-a-custom-plugin.md b/docs/internal-developer-portal/plugins/custom-plugins/add-a-custom-plugin.md index c7ed833e010..4b0f1b3895e 100644 --- a/docs/internal-developer-portal/plugins/custom-plugins/add-a-custom-plugin.md +++ b/docs/internal-developer-portal/plugins/custom-plugins/add-a-custom-plugin.md @@ -221,8 +221,8 @@ endpoints: This configuration creates a proxy endpoint in IDP. - `target` defines the destination server where the request will be proxies. - `pathRewrite` is used to remove or replace parts of the path in the request URL. -When a request is made to `https://idp.harness.io/{ACCOUNT_ID}/idp/api/proxy/github-api/`, the following happens: -1. The target setting modifies the base URL, transforming `https://idp.harness.io/{ACCOUNT_ID}/idp/api/proxy/github-api/` into `https://api.github.com/api/proxy/github-api/`. +When a request is made to `https://idp.harness.io//idp/api/proxy/github-api/`, the following happens: +1. The target setting modifies the base URL, transforming `https://idp.harness.io//idp/api/proxy/github-api/` into `https://api.github.com/api/proxy/github-api/`. 2. `pathRewrite` replaces `/api/proxy/github-api/` with `/`, so the URL transforms from `https://api.github.com/api/proxy/github-api/` to `https://api.github.com/`. 11. Now in the above proxy authorization is added as a variable `SECRET_TOKEN` and the secret is stored in Harness Secret manager. diff --git a/docs/internal-developer-portal/tutorials/how-to-use-catalog-ingestion-api.md b/docs/internal-developer-portal/tutorials/how-to-use-catalog-ingestion-api.md index c303ce2a402..afc7bf5e5d8 100644 --- a/docs/internal-developer-portal/tutorials/how-to-use-catalog-ingestion-api.md +++ b/docs/internal-developer-portal/tutorials/how-to-use-catalog-ingestion-api.md @@ -124,7 +124,6 @@ We have used few pipeline variables in the body, which will be used to take inpu ```sh curl --location 'https://app.harness.io/gateway/v1/catalog/custom-properties' \ ---header 'Harness-Account: <+account.identifier>' \ --header 'Content-Type: application/json' \ --header 'x-api-key: <+secrets.getValue('account.TOKEN_ID')>' \ --data '{ @@ -284,7 +283,6 @@ In the above body the openTicket which got created in JIRA will be added, to kin 20. Under **Optional Configuration** add the **Assertion** as `<+httpResponseCode>==200`. 21. Under **Headers** add the following key value pairs: - - Harness-Account: `<+account.identifier>` - Content-Type: `application/json` - Accept: `*/*` - x-api-key: `<+secrets.getValue('account.TOKEN_ID')>`, add the token ID for your API key. Get your token ID from your Profile @@ -531,7 +529,6 @@ We have used few pipeline variables in the body, which will be used to take inpu ```sh curl --location 'https://app.harness.io/gateway/v1/catalog/custom-properties' \ ---header 'Harness-Account: <+account.identifier>' \ --header 'Content-Type: application/json' \ --header 'x-api-key: <+secrets.getValue('account.TOKEN_ID')>' \ --data '{ @@ -652,7 +649,6 @@ In the above body the openTicket which got created in JIRA will be added, to kin 20. Under **Optional Configuration** add the **Assertion** as `<+httpResponseCode>==200`. 21. Under **Headers** add the following key value pairs: - - Harness-Account: `<+account.identifier>` - Content-Type: `application/json` - Accept: `*/*` - x-api-key: `<+secrets.getValue('account.TOKEN_ID')>`, add the token ID for your API key. Get your token ID from your Profile diff --git a/docs/internal-developer-portal/tutorials/register-component-in-catalog.md b/docs/internal-developer-portal/tutorials/register-component-in-catalog.md index ce867100fe9..7e536b5f8a7 100644 --- a/docs/internal-developer-portal/tutorials/register-component-in-catalog.md +++ b/docs/internal-developer-portal/tutorials/register-component-in-catalog.md @@ -249,9 +249,8 @@ Follow these steps to register components using the Harness Catalog API endpoint - Follow the following cURL command with the request body to register your component. The body takes two input at present `type` and `target`. ```cURL -curl --location 'https://idp.harness.io/{ACCOUNT_IDENTIFIER}/idp/api/catalog/locations' \ ---header 'x-api-key: {X_API_KEY}' \ ---header 'Harness-Account: {ACCOUNT_IDENTIFIER}' +curl --location 'https://idp.harness.io//idp/api/catalog/locations' \ +--header 'x-api-key: ' \ --data-raw '{"type":"url","target":"https://github.com/harness-community/idp-samples/blob/main/catalog-info.yaml"}' ``` @@ -291,7 +290,7 @@ function usage { while getopts a:x:u:l:h flag do case "${flag}" in - a) ACCOUNT_IDENTIFIER=${OPTARG};; + a) ACCOUNT_ID=${OPTARG};; x) X_API_KEY=${OPTARG};; u) BEARER_AUTHORIZATION=${OPTARG};; l) CATALOG_LOCATIONS=${OPTARG};; @@ -306,16 +305,16 @@ CATALOG_LOCATION_REGISTER_DATA='{"type":"url","target":"CATALOG_LOCATION_TARGET" for LOCATION in ${CATALOG_LOCATIONS//,/ } do echo -e "\n--------" - echo "Registering $LOCATION catalog location in Harness IDP account $ACCOUNT_IDENTIFIER" + echo "Registering $LOCATION catalog location in Harness IDP account $ACCOUNT_ID" POST_DATA=${CATALOG_LOCATION_REGISTER_DATA/CATALOG_LOCATION_TARGET/$LOCATION} - RESULT_HTTP_CODE=$(curl --write-out %{http_code} -s --output /dev/null -H "Content-Type: application/json" -H "Harness-Account: ${ACCOUNT_IDENTIFIER}" -H "x-api-key: ${X_API_KEY}" -H "Authorization: Bearer ${BEARER_AUTHORIZATION}" -X POST --data "${POST_DATA}" "https://idp.harness.io/${ACCOUNT_IDENTIFIER}/idp/api/catalog/locations") + RESULT_HTTP_CODE=$(curl --write-out %{http_code} -s --output /dev/null -H "Content-Type: application/json" -H "Harness-Account: $" -H "x-api-key: ${X_API_KEY}" -H "Authorization: Bearer ${BEARER_AUTHORIZATION}" -X POST --data "${POST_DATA}" "https://idp.harness.io/$/idp/api/catalog/locations") if [[ "$RESULT_HTTP_CODE" -ne 201 ]] ; then - echo "Failed registering $LOCATION catalog location in Harness IDP account $ACCOUNT_IDENTIFIER" + echo "Failed registering $LOCATION catalog location in Harness IDP account $ACCOUNT_ID" else - echo "Successfully registerd $LOCATION catalog location in Harness IDP account $ACCOUNT_IDENTIFIER" + echo "Successfully registered $LOCATION catalog location in Harness IDP account $ACCOUNT_ID" fi echo "--------" diff --git a/docs/internal-developer-portal/tutorials/service-onboarding-with-idp-stage.md b/docs/internal-developer-portal/tutorials/service-onboarding-with-idp-stage.md index 922e8b20307..02d4821dae3 100644 --- a/docs/internal-developer-portal/tutorials/service-onboarding-with-idp-stage.md +++ b/docs/internal-developer-portal/tutorials/service-onboarding-with-idp-stage.md @@ -42,7 +42,7 @@ You can also create a new project for the service onboarding pipelines. Eventual You need to have completed all the steps under **[PreRequisites](#prerequisites)** for the below given YAML to work properly -Please update the `connectorRef: ` for all the steps it's used, also here we are assuming the git provider to be GitHub please update the `connectorType` for `CreateRepo`, `DirectPush` and `RegisterCatalog` step in case it's other than GitHub. Also under the slack notify step for `token` add the token identifier, you have created above as part of pre-requisites. +Please update the `connectorRef: ` for all the steps it's used, also here we are assuming the git provider to be GitHub please update the `connectorType` for `CreateRepo`, `DirectPush` and `RegisterCatalog` step in case it's other than GitHub. Also under the slack notify step for `token` add the token identifier, you have created above as part of prerequisites. ::: diff --git a/docs/platform/approvals/adding-harness-approval-stages.md b/docs/platform/approvals/adding-harness-approval-stages.md index 22bc055fa1e..7992b11a840 100644 --- a/docs/platform/approvals/adding-harness-approval-stages.md +++ b/docs/platform/approvals/adding-harness-approval-stages.md @@ -295,12 +295,6 @@ For example, in a subsequent step's **Conditional Execution** settings, you coul You can now specify allowed values for the **Manual Approval** step. -:::note - -Currently, allowed values for custom approval inputs is behind the feature flag `CDS_ENABLE_CONSTRAINTS_ON_APPROVAL_INPUTS`. Contact [Harness Support](mailto:support@harness.io) to enable the feature. - -::: - Here's an example YAML file showcasing various configurations you can use for allowed values. #### YAML Example diff --git a/docs/platform/connectors/cloud-providers/ref-cloud-providers/aws-connector-settings-reference.md b/docs/platform/connectors/cloud-providers/ref-cloud-providers/aws-connector-settings-reference.md index 3ec9edcccf5..cce8faf2002 100644 --- a/docs/platform/connectors/cloud-providers/ref-cloud-providers/aws-connector-settings-reference.md +++ b/docs/platform/connectors/cloud-providers/ref-cloud-providers/aws-connector-settings-reference.md @@ -743,16 +743,7 @@ For more strategies, go to [Exponential Backoff And Jitter](https://aws.amazon.c Limitations - - - OIDC Deployment Support - - The following deployments are not supported using the OIDC connector: -
    -
  • AWS Serverless Lambda deployments
  • -
- - + OIDC Plugin-based Support @@ -789,18 +780,6 @@ For more strategies, go to [Exponential Backoff And Jitter](https://aws.amazon.c - - OIDC Manifest and Artifact Support - - The following artifact and manifest sources are not supported using the OIDC connector: -
    -
  • AMI Artifact
  • -
  • ECR Artifact
  • -
  • S3 Artifact
  • -
  • S3 Manifest
  • -
- - diff --git a/docs/platform/delegates/manage-delegates/select-delegates-with-selectors.md b/docs/platform/delegates/manage-delegates/select-delegates-with-selectors.md index d2f802b00a6..ac019bbeb9f 100644 --- a/docs/platform/delegates/manage-delegates/select-delegates-with-selectors.md +++ b/docs/platform/delegates/manage-delegates/select-delegates-with-selectors.md @@ -8,7 +8,7 @@ helpdocs_is_private: false helpdocs_is_published: true --- -Harness runs tasks by using Harness Delegate to connect your environment to resources. Harness selects the best delegate based on previous use or round-robin selection. For more information, go to [How Harness Manager picks delegates](/docs/platform/delegates/delegate-concepts/delegate-overview.md#how-harness-manager-picks-delegates). +Harness runs tasks by using Harness Delegate to connect your environment to resources. Harness selects the best delegate based on the current number of tasks getting executed on delegates, delegate executing the least number of tasks will be selected first. For more information, go to [How Harness Manager picks delegates](/docs/platform/delegates/delegate-concepts/delegate-overview.md#how-harness-manager-picks-delegates). In some cases, you might want Harness to select specific delegates. In these cases, you can use the **Delegate Selector** settings in pipelines, connectors, and so on, with corresponding delegate tags. diff --git a/docs/platform/git-experience/git-experience-overview.md b/docs/platform/git-experience/git-experience-overview.md index 2008043f693..434f22e0b85 100644 --- a/docs/platform/git-experience/git-experience-overview.md +++ b/docs/platform/git-experience/git-experience-overview.md @@ -51,7 +51,11 @@ You can save the following Harness resources in Git using Harness Git Experience * Services * Environment * Infrastructure Definitions -* Policies +* OPA Policies + +:::info note +Support of Git Experience for OPA policies is behind the FF `OPA_IMPORT_FROM_GIT`. Please contact [Harness Support](mailto:support@harness.io) to enable this feature. +::: :::info note diff --git a/docs/platform/governance/policy-as-code/add-a-governance-policy-step-to-a-pipeline.md b/docs/platform/governance/policy-as-code/add-a-governance-policy-step-to-a-pipeline.md index 9b7e13a7c83..61f9bbddce6 100644 --- a/docs/platform/governance/policy-as-code/add-a-governance-policy-step-to-a-pipeline.md +++ b/docs/platform/governance/policy-as-code/add-a-governance-policy-step-to-a-pipeline.md @@ -10,7 +10,7 @@ helpdocs_is_published: true You can enforce policies in two ways: -- **Account, Org, and** **Project-specific:** you can create the policy and apply it to all Pipelines in your Account, Org, and Project. The policy is evaluated on Pipeline-level events like On Run and On Save. Go to [Harness Governance Quickstart](/docs/platform/governance/policy-as-code/harness-governance-quickstart). +- **Account, Org, and** **Project-specific:** you can create the policy and apply it to all Pipelines in your Account, Org, and Project. The policy is evaluated on Pipeline-level events like On Run, On Save and On Step Start. Go to [Harness Governance Quickstart](/docs/platform/governance/policy-as-code/harness-governance-quickstart). - **Stage-specific:** you can add a Policy step, add a new/existing Policy Set to it, and then provide a JSON payload to evaluate. - The policy is evaluated whenever the Pipeline reaches the Policy step. - Policy evaluation can be performed on data generated when the Pipeline is running, such as resolved expressions. @@ -206,6 +206,127 @@ As you can see in the above **Policy** step, in **Payload**, we reference the ou Now when we run the Pipeline, the Policy Step will evaluate the JSON in Payload and see that it passes. +### Runtime Inputs Evaluation + +:::info note +Currently, this feature is behind the feature flag `PIPE_IS_PRE_STEP_OPA_POLICY_EVALUATION_ENABLED`. Contact [Harness Support](mailto:support@harness.io) to enable the feature. +::: + +Runtime inputs are evaluated when the step starts. Consider the following example: + +Policy:- + +``` +package shellscript + +is_docker_delegate_present { + some i + input.spec.delegateSelectors[i].delegateSelectors != "docker-delegate" +} + +deny[message] { + is_docker_delegate_present + message = "Usage of delegate other than 'docker-delegate' is not allowed." +} +``` +Pipeline YAML: + +```yaml +pipeline: + name: PipelineOPA + identifier: PipelineOPA + projectIdentifier: Krishika_test + orgIdentifier: default + stages: + - stage: + name: s1 + identifier: s1 + type: Custom + spec: + execution: + steps: + - step: + type: ShellScript + name: ShellScript_1 + identifier: ShellScript_1 + spec: + shell: Bash + delegateSelectors: + - <+pipeline.variables.Delegate_selector> + timeout: 10m + variables: + - name: Delegate_selector + type: String + required: true + value: <+input> +``` + +Let's suppose we give input value test-delegate, the evaluation will fail, and the JSON payload received will be: + +```json +{ + "identifier": "ShellScript_1", + "metadata": { + "action": "onstepstart", + "principalIdentifier": "", + "principalType": "", + "projectMetadata": { + "description": "", + "identifier": "Krishika_test", + "modules": [ + "CD", + "CI", + "CV", + "CF", + "CE", + "STO", + "CHAOS", + "SRM", + "IACM", + "CET", + "IDP", + "CODE", + "SSCA", + "CORE", + "PMS", + "TEMPLATESERVICE", + "SEI", + "HAR" + ], + "name": "Krishika_test", + "orgIdentifier": "default", + "tags": {} + }, + "timestamp": 1739868956, + "type": "pipeline" + }, + "name": "ShellScript_1", + "spec": { + "delegateSelectors": [ + { + "delegateSelectors": "test-delegate", + "origin": "step" + } + ], + "environmentVariables": {}, + "executionTarget": {}, + "outputVariables": {}, + "secretOutputVariables": [], + "shell": "Bash", + "source": { + "spec": { + "script": "sleep 10" + }, + "type": "Inline", + "uuid": "Qo-zXU4STBehlORMY3po_A" + } + }, + "timeout": "10m", + "type": "ShellScript", + "uuid": "bjlvBvxVS0qWkEO3vEMHzg" +} +``` + ### Policy Step Expressions You can use the following Harness expressions to output Policy Step status in a [Shell Script](/docs/continuous-delivery/x-platform-cd-features/cd-steps/utilities/shell-script-step) step: diff --git a/docs/platform/governance/policy-as-code/add-a-governance-policy-step-to-a-service-account.md b/docs/platform/governance/policy-as-code/add-a-governance-policy-step-to-a-service-account.md new file mode 100644 index 00000000000..ff683564888 --- /dev/null +++ b/docs/platform/governance/policy-as-code/add-a-governance-policy-step-to-a-service-account.md @@ -0,0 +1,115 @@ +--- +title: Use Harness Policy As Code for Service Account. +description: Describes steps to add policies to Service Account. +sidebar_position: 6 +helpdocs_topic_id: 4kuokatvyw +helpdocs_category_id: zoc8fpiifm +helpdocs_is_private: false +helpdocs_is_published: true +--- + +Harness provides governance using Open Policy Agent (OPA), Policy Management, and Rego policies. + +You can create a policy and apply it to all [Service Accounts](../../role-based-access-control/add-and-manage-service-account). The policy is evaluated during Service account-level events, such as 'On Save,' which happens when a Service account is created or updated. For more details, check out the [Harness Governance Quickstart](/docs/platform/governance/policy-as-code/harness-governance-quickstart). + +### Prerequisites + +* [Harness Governance Overview](/docs/platform/governance/policy-as-code/harness-governance-overview) +* [Harness Governance Quickstart](/docs/platform/governance/policy-as-code/harness-governance-quickstart) +* Policies use OPA authoring language Rego. For more information, go to [OPA Policy Authoring](https://academy.styra.com/courses/opa-rego). + +### Step 1: Add a Policy + +1. In Harness, go to **Account Settings** → **Policies** → **New Policy**. + + ![opa-navigation](../../governance/policy-as-code/static/opa-navigation.gif) + +2. The **New Policy** settings will appear. + + Enter a **Name** for your policy → click **Apply**. + + ![opa-create-policy](../../governance/policy-as-code/static/opa-create-policy.png) + +3. Next, add your Rego policy. + + For example, add a policy to prevent users from creating a service account with names having "success" in it, like 'pipeline-success' or 'successfuldeploy' and so on. + + ``` + package opaexamplepolicy + + deny[msg] { + contains(input.serviceAccount.name, "success") + #input.serviceAccount.name == "word" + msg = sprintf("CONTAINS Service account name '%s' contains the forbidden word 'success'.", [input.serviceAccount.name]) + } + ``` + click **Save** + + ![opa-save-policy](../../governance/policy-as-code/static/opa-save-policy.png) + +### Step 2: Add the Policy to a Policy Set + +After creating your policy, add it to a Policy Set before applying it to your Service Account. + +1. In **Policies**, click **Policy Sets** . + + ![opa-select-policy-sets](../../governance/policy-as-code/static/opa-select-policy-sets.png) + +2. Click **New Policy Set**, Enter a **Name** and **Description** for the Policy Set. + + In **Entity type**, select **Service Account**. + + ![opa-service-setting](../../governance/policy-as-code/static/opa-name-description-entitiy-setup.png) + + In **On what event should the Policy Set be evaluated**, select **On save.** → **Continue**. + + :::note Important + + Existing service accounts are not automatically updated with policies. Policies are applied only when they are created or updated. + ::: + +### Step 3: Select Policy evaluation criteria + +1. Click **Add Policy** in **Policy evaluation criteria**. + + Select the policy from the list. + + ![select-opa-policy](../../governance/policy-as-code/static/opa-select-policy.png)  + + +2. Choose the severity and action for policy violations. + + You can select one of the following + + * **Warn & continue** - You will receive a warning if the policy is not met when the Service Account is evaluated, but the Service Account will be saved and you may proceed. + * **Error and exit** - You'll get an error and be exited without saving the Service Account if the policy isn't met when the Service Account is examined. + + ![opa-apply-severity](../../governance/policy-as-code/static/opa-apply.png) + + Click **Apply**, and then click **Finish**. + +### Step 4: Policy Enforcement. + +1. Now, your Policy Set is automatically set to Enforced, to make it unenforced, toggle off the **Enforced** button. + + ![](../../governance/policy-as-code/static/opa-un-enforcement.gif) + +### Step 5: Apply a Policy to a Service Account + +- After creating your Policy Set and adding policies, apply it to a service account. + + You can add a [Service Account](/docs/platform/role-based-access-control/add-and-manage-service-account/#create-a-service-account) from any module in your Project in Project setup, or in your Organization, or Account Resources. + +1. "Go to **Account Settings** → **Access Control** → **Service Account** + + click **New Service Account**. + + Try using a name denied by your policy, e.g., **"successfuldeploy"**." + + Based on your selection in the Policy Evaluation criteria, you will either receive a warning or an error. + + ![](../../governance/policy-as-code/static/opa-denied.png) + +### Explore More on Harness Policy as code. + +* [Harness Policy As Code Overview](/docs/feature-flags/troubleshoot-ff/harness-policy-engine) \ No newline at end of file diff --git a/docs/platform/governance/policy-as-code/harness-governance-overview.md b/docs/platform/governance/policy-as-code/harness-governance-overview.md index d933544e346..ab811aee5e2 100644 --- a/docs/platform/governance/policy-as-code/harness-governance-overview.md +++ b/docs/platform/governance/policy-as-code/harness-governance-overview.md @@ -162,12 +162,19 @@ Soon, policies can be applied to more entities, such as Connectors, Services, En ### Pipelines +:::info note +Currently, **On Step Start** pipeline event is behind the feature flag `PIPE_IS_PRE_STEP_OPA_POLICY_EVALUATION_ENABLED`. Contact [Harness Support](mailto:support@harness.io) to enable the feature. +::: + Policies are evaluated against Harness Pipelines. The input payload is an expanded version of the Pipeline YAML, including expanded references of stages/steps.  Policy Sets can be configured to be enforced automatically on these Pipeline events: * **On Save:** Policies are evaluated when the Pipeline is saved. * **On Run:** Policy sets are evaluated after the preflight checks. +* **On Step Start:** Policy sets are evaluated when the step execution starts. + +![](./static/pipeline-events-opa.png) Severities: diff --git a/docs/platform/governance/policy-as-code/sample-policy-use-case.md b/docs/platform/governance/policy-as-code/sample-policy-use-case.md index b73c5aed1ba..4d23825a49e 100644 --- a/docs/platform/governance/policy-as-code/sample-policy-use-case.md +++ b/docs/platform/governance/policy-as-code/sample-policy-use-case.md @@ -24,6 +24,8 @@ This topic provides sample policies you can use in policy steps and on pipeline- - [Policy samples](#policy-samples) + - [Root policy samples](#root-policy-samples) + - [Evaluate secrets in pipeline and only allow secrets that are at the account level](#evaluate-secrets-in-pipeline-and-only-allow-secrets-that-are-at-the-account-level) - [Connector policy samples](#connector-policy-samples) - [Enforce authorization type while configuring a Kubernetes connector](#enforce-authorization-type-while-configuring-a-kubernetes-connector) - [Enforce access control for a specific connector at runtime while configuring the pipeline](#enforce-access-control-for-a-specific-connector-at-runtime-while-configuring-the-pipeline) @@ -64,6 +66,29 @@ This topic provides sample policies you can use in policy steps and on pipeline- ## Policy samples +### Root policy samples +* [Evaluate secrets in pipeline and only allow secrets that are at the account level](#evaluate-secrets-in-pipeline-and-only-allow-secrets-that-are-at-the-account-level) +#### Evaluate secrets in pipeline and only allow secrets that are at the account level +This rule is set in place to ensure the pipeline yaml does not include secrets at the project level, but will allow secrets at the account level. + +```json +package policy + +# Rule to check if any value in the input contains the secret substring +has_secret_value { + walk(input, [_, value]) + is_string(value) + contains(value, "<+secrets.getValue") + not contains(value,"<+secrets.getValue(\"account.") +} + +# Main denial rule +deny[msg] { + has_secret_value + msg := "Found potentially sensitive value containing 'secret.getValues' in the input" +} +``` + ### Connector policy samples * [Enforce authorization type while configuring a Kubernetes connector](#enforce-authorization-type-while-configuring-a-kubernetes-connector) diff --git a/docs/platform/governance/policy-as-code/static/opa-apply.png b/docs/platform/governance/policy-as-code/static/opa-apply.png new file mode 100644 index 00000000000..0b521f46c7f Binary files /dev/null and b/docs/platform/governance/policy-as-code/static/opa-apply.png differ diff --git a/docs/platform/governance/policy-as-code/static/opa-create-policy.png b/docs/platform/governance/policy-as-code/static/opa-create-policy.png new file mode 100644 index 00000000000..40c48b0fd05 Binary files /dev/null and b/docs/platform/governance/policy-as-code/static/opa-create-policy.png differ diff --git a/docs/platform/governance/policy-as-code/static/opa-denied.png b/docs/platform/governance/policy-as-code/static/opa-denied.png new file mode 100644 index 00000000000..6028cf7d31f Binary files /dev/null and b/docs/platform/governance/policy-as-code/static/opa-denied.png differ diff --git a/docs/platform/governance/policy-as-code/static/opa-name-description-entitiy-setup.png b/docs/platform/governance/policy-as-code/static/opa-name-description-entitiy-setup.png new file mode 100644 index 00000000000..7cbc4e50492 Binary files /dev/null and b/docs/platform/governance/policy-as-code/static/opa-name-description-entitiy-setup.png differ diff --git a/docs/platform/governance/policy-as-code/static/opa-navigation.gif b/docs/platform/governance/policy-as-code/static/opa-navigation.gif new file mode 100644 index 00000000000..f0d07da70e1 Binary files /dev/null and b/docs/platform/governance/policy-as-code/static/opa-navigation.gif differ diff --git a/docs/platform/governance/policy-as-code/static/opa-save-policy.png b/docs/platform/governance/policy-as-code/static/opa-save-policy.png new file mode 100644 index 00000000000..99aa528692a Binary files /dev/null and b/docs/platform/governance/policy-as-code/static/opa-save-policy.png differ diff --git a/docs/platform/governance/policy-as-code/static/opa-select-policy-sets.png b/docs/platform/governance/policy-as-code/static/opa-select-policy-sets.png new file mode 100644 index 00000000000..560d0f6332f Binary files /dev/null and b/docs/platform/governance/policy-as-code/static/opa-select-policy-sets.png differ diff --git a/docs/platform/governance/policy-as-code/static/opa-select-policy.png b/docs/platform/governance/policy-as-code/static/opa-select-policy.png new file mode 100644 index 00000000000..0fd23b0bf1d Binary files /dev/null and b/docs/platform/governance/policy-as-code/static/opa-select-policy.png differ diff --git a/docs/platform/governance/policy-as-code/static/opa-un-enforcement.gif b/docs/platform/governance/policy-as-code/static/opa-un-enforcement.gif new file mode 100644 index 00000000000..be9d08950f7 Binary files /dev/null and b/docs/platform/governance/policy-as-code/static/opa-un-enforcement.gif differ diff --git a/docs/platform/governance/policy-as-code/static/pipeline-events-opa.png b/docs/platform/governance/policy-as-code/static/pipeline-events-opa.png new file mode 100644 index 00000000000..5156084f119 Binary files /dev/null and b/docs/platform/governance/policy-as-code/static/pipeline-events-opa.png differ diff --git a/docs/platform/service-discovery/customize-agent.md b/docs/platform/service-discovery/customize-agent.md new file mode 100644 index 00000000000..47af0ec0e92 --- /dev/null +++ b/docs/platform/service-discovery/customize-agent.md @@ -0,0 +1,65 @@ +--- +title: Customize Discovery Agent +sidebar_position: 3 +--- + +This topic describes how to create a discovery agent and configure it to discover services, edit it, and delete it. + +### Create Discovery Agent + +1. Navigate to **Project Settings** and select **Discovery**. + + ![](./static/customize-agent/discovery.png) + +2. Click **New Discovery Agent**. + + ![](./static/customize-agent/select-agent.png) + +3. Specify the following variables: + + - **Environment** (this is created earlier, and is the place where you install infrastructure) + - **Infrastructure** (this is created earlier, where your target cluster is housed). + - Specify **Discovery Agent Name**. + - For the **Namespace**, provide the dedicated namespace you created in your target cluster for Harness. This is the namespace where Harness runs chaos runners, discovers services and executes chaos experiments. + + ![create agent](./static/customize-agent/create-agent-1.png) + +4. Select one of the following based on your requirement. + + - **Namespace Selector**: Selects one or more namespaces based on [level selector](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/). + - **Exclusion**: Select this option to avoid Harness from discovering certain namespaces. Provide the name of the namespaces to be excluded. + - **Inclusion**: Select this option to allow Harness to discover specific namespaces. Provide the name of the namespaces to be included. + + ![exclusion settings](./static/customize-agent/exclusion-2.png) + +5. If your cluster control plane node has a taint, **Add Node Selector** so that Harness launches its pod only on the worker nodes. + + ![node selector](./static/customize-agent/node-selector-3.png) + +6. Populate the **Use this Service Account** field with the Service Account name that you created in the dedicated namespace in your target cluster for Harness. + + ![service account](./static/customize-agent/service-acc-4.png) + +7. Click **Create New Discovery Agent** on the top right corner of the page. + + ![](./static/customize-agent/add-details-discovery.png) + +### Edit Discovery Agent + +1. To edit a discovery agent, navigate to the agent (that you created earlier) and click **Edit**. Make the necessary changes to the required fields. + + ![](./static/edit-dis-agent-1.png) + +2. Select **Update Discovery Agent**. + + ![](./static/edit-details-discovery.png) + +### Delete Discovery Agent + +1. To delete a discovery agent, navigate to the agent you want to delete and select **Delete**. + + ![](./static/delete-1.png) + +2. Select **Delete**. + + ![](./static/confirm-2.png) \ No newline at end of file diff --git a/docs/platform/service-discovery/service-discovery.md b/docs/platform/service-discovery/service-discovery.md index a07b7534f0f..4b88c556cc9 100644 --- a/docs/platform/service-discovery/service-discovery.md +++ b/docs/platform/service-discovery/service-discovery.md @@ -1,5 +1,4 @@ --- -id: service-discovery sidebar_position: 1 title: Service Discovery redirect_from: @@ -47,38 +46,6 @@ You can leverage all the [permissions mentioned](/docs/chaos-engineering/securit When you are onboarding, one of the steps involves discovering services. Harness creates the discovery agent that automatically discovers services for your application. -If you want to customize the discovery agent, follow the steps below. +## Next Steps -### Customize discovery agent - -1. To customize the discovery agent, navigate to **Project Settings** and select **Discovery**. - - ![](./static/discovery.png) - -2. Click **New Discovery Agent**. - - ![](./static/select-agent.png) - -3. Select an **environment**, **infrastructure**, **Discovery Agent Name** and **namespace**. The next step (optional) requires information such as node selector, blacklisted namespaces, and period of detecting the services. Select **Create New Discovery Agent**. - - ![](./static/add-details-discovery.png) - -### Edit discovery agent - -1. To edit a discovery agent, navigate to the agent and click **Edit**. Make the necessary changes to the required fields. - - ![](./static/edit-dis-agent-1.png) - -2. Select **Update Discovery Agent**. - - ![](./static/edit-details-discovery.png) - -### Delete discovery agent - -1. To delete a discovery agent, navigate to the agent you want to delete and select **Delete**. - - ![](./static/delete-1.png) - -2. Select **Delete**. - - ![](./static/confirm-2.png) \ No newline at end of file +- [Customize Discovery Agent](/docs/platform/service-discovery/customize-agent) diff --git a/docs/platform/service-discovery/static/add-details-discovery.png b/docs/platform/service-discovery/static/add-details-discovery.png deleted file mode 100644 index 4455c5ab7df..00000000000 Binary files a/docs/platform/service-discovery/static/add-details-discovery.png and /dev/null differ diff --git a/docs/platform/service-discovery/static/customize-agent/add-details-discovery.png b/docs/platform/service-discovery/static/customize-agent/add-details-discovery.png new file mode 100644 index 00000000000..64b371edc45 Binary files /dev/null and b/docs/platform/service-discovery/static/customize-agent/add-details-discovery.png differ diff --git a/docs/platform/service-discovery/static/customize-agent/create-agent-1.png b/docs/platform/service-discovery/static/customize-agent/create-agent-1.png new file mode 100644 index 00000000000..31a740c81d4 Binary files /dev/null and b/docs/platform/service-discovery/static/customize-agent/create-agent-1.png differ diff --git a/docs/chaos-engineering/use-harness-ce/service-discovery/static/discovery.png b/docs/platform/service-discovery/static/customize-agent/discovery.png similarity index 100% rename from docs/chaos-engineering/use-harness-ce/service-discovery/static/discovery.png rename to docs/platform/service-discovery/static/customize-agent/discovery.png diff --git a/docs/platform/service-discovery/static/customize-agent/exclusion-2.png b/docs/platform/service-discovery/static/customize-agent/exclusion-2.png new file mode 100644 index 00000000000..9da7cad4714 Binary files /dev/null and b/docs/platform/service-discovery/static/customize-agent/exclusion-2.png differ diff --git a/docs/platform/service-discovery/static/customize-agent/node-selector-3.png b/docs/platform/service-discovery/static/customize-agent/node-selector-3.png new file mode 100644 index 00000000000..ef33fc52bd3 Binary files /dev/null and b/docs/platform/service-discovery/static/customize-agent/node-selector-3.png differ diff --git a/docs/chaos-engineering/use-harness-ce/service-discovery/static/select-agent.png b/docs/platform/service-discovery/static/customize-agent/select-agent.png similarity index 100% rename from docs/chaos-engineering/use-harness-ce/service-discovery/static/select-agent.png rename to docs/platform/service-discovery/static/customize-agent/select-agent.png diff --git a/docs/platform/service-discovery/static/customize-agent/service-acc-4.png b/docs/platform/service-discovery/static/customize-agent/service-acc-4.png new file mode 100644 index 00000000000..5c78cc032a9 Binary files /dev/null and b/docs/platform/service-discovery/static/customize-agent/service-acc-4.png differ diff --git a/docs/platform/service-discovery/static/discovery.png b/docs/platform/service-discovery/static/discovery.png deleted file mode 100644 index 60f28f5b64c..00000000000 Binary files a/docs/platform/service-discovery/static/discovery.png and /dev/null differ diff --git a/docs/platform/service-discovery/static/select-agent.png b/docs/platform/service-discovery/static/select-agent.png deleted file mode 100644 index 63be9c0260b..00000000000 Binary files a/docs/platform/service-discovery/static/select-agent.png and /dev/null differ diff --git a/docs/platform/service-discovery/user-defined-service-account.md b/docs/platform/service-discovery/user-defined-service-account.md index bc8cb26ea9e..23361876f0b 100644 --- a/docs/platform/service-discovery/user-defined-service-account.md +++ b/docs/platform/service-discovery/user-defined-service-account.md @@ -1,6 +1,6 @@ --- title: Restrict Discovery to Specific Namespace(s) -sidebar_position: 3 +sidebar_position: 5 description: Restrict Discovery to Single and Multiple Namespaces. redirect_from: - /docs/chaos-engineering/concepts/explore-concepts/service-discovery/user-defined-service-account diff --git a/docs/security-testing-orchestration/sto-techref-category/snyk/snyk-scanner-reference.md b/docs/security-testing-orchestration/sto-techref-category/snyk/snyk-scanner-reference.md index cede77ec467..f8930422f46 100644 --- a/docs/security-testing-orchestration/sto-techref-category/snyk/snyk-scanner-reference.md +++ b/docs/security-testing-orchestration/sto-techref-category/snyk/snyk-scanner-reference.md @@ -216,36 +216,98 @@ import StoSettingSettings from '../shared/step_palette/all/_settings.md'; -### Show original issue severities overridden by Snyk security policies +## Show original issue severities overridden by Snyk security policies -You can configure a Snyk step to show the original score when a [Snyk Enterprise security policy](https://docs.snyk.io/enterprise-configuration/policies/security-policies) overrode the severity for an issue coming from the `snyk` CLI. You can see this information in **Issue Details**. +Harness originally will categorize a scan utilizing the CVSS scores that are provided, and using it to categorize vulnerabilities into **Critical, High, Medium, and Low** categories - +With the Snyk Step, or a [Custom Scan Step](https://developer.harness.io/docs/security-testing-orchestration/custom-scanning/custom-scan-reference/) utilizing a self installed Snyk runner, you can configure Harness to utilize the severity scores in a [Snyk Enterprise security policy](https://docs.snyk.io/enterprise-configuration/policies/security-policies), and provide an override severity for an issue. This can be adjusted from your Snyk results, or it can also come from the `snyk` CLI. You can see this information in **Issue Details**. - +![](../static/sto-7041-override-in-security-tests.png) -This feature is supported for `snyk container` and `snyk test` JSON output that properly reflects an override. - -To enable this behavior, add the setting `ingest_tool_severity` and set it to `true` in the Snyk ingestion step. With this setting enabled, the Snyk step processes the relevant data for issues with overridden severities. +Harness will also display the information that the severity was overriden, and the original score, so long as the report follows the Snyk required format - - +![](../static/sto-6927-override-popup-for-snyk.png) - +This feature is supported for `snyk container` and `snyk test` JSON output that properly reflects an override. - - +### Requirement + +- [STO Plugin version 1.56.x or higher](https://hub.docker.com/r/harness/sto-plugin/tags) + +### JSON Output Format +Please note that the override format must follow the Snyk documented process, such as [in the following Snyk example.](https://docs.snyk.io/supported-languages-package-managers-and-frameworks/c-c++/snyk-cli-for-open-source-c++-scans) + +The formatting should contain the following: +- A value, `originalSeverity`, needs to be defined, and should contain the original severity value +- The `severity` value can now be defined with the new severity value. Below is a portion of an adjusted vulnerability sample: + +``` +{ + "vulnerabilities": [ + { + "id": "SNYK-JS-POSTCSS-5926692", + "title": "Improper Input Validation", + "CVSSv3": "CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:N/I:L/A:N", + "credit": [ + "Unknown" + ], + "semver": { + "vulnerable": [ + "<8.4.31" + ] + }, + "exploit": "Not Defined", + "fixedIn": [ + "8.4.31" + ], + "patches": [], + "insights": { + "triageAdvice": null + }, + "language": "js", + "severity": "critical", + "originalSeverity": "medium", + "cvssScore": 5.3, + "functions": [], + "malicious": false, + "isDisputed": false, + "moduleName": "postcss", + [...] + } + ] + } +``` + + +### Enable Severity Override +To enable this behavior, add the setting `ingest_tool_severity` and set it to `true` in the Snyk ingestion step or Custom Scan Step. With this setting enabled, the Snyk step processes the relevant data for issues with overridden severities. + + + + Add `ingest_tool_severity` to Snyk step keys + ![](../static/sto-7041-add-setting-in-visual-editor.png) + Add `ingest_tool_severity` to Custom Scan step keys + ![](./static/snyk-customstage-Ingesttoolseverity.png) + - ``` yaml - - step: - type: Snyk - spec: - settings: - ingest_tool_severity: "true" - ``` - - - + **Snyk Step Declaration** + ``` yaml + - step: + type: Snyk + spec: + settings: + ingest_tool_severity: "true" + ``` + **Custom Scan Step Declaration** + ``` yaml + - step: + type: Security + spec: + settings: + ingest_tool_severity: "true" + ``` + + ### Additional Configuration diff --git a/docs/security-testing-orchestration/sto-techref-category/snyk/static/snyk-customstage-Ingesttoolseverity.png b/docs/security-testing-orchestration/sto-techref-category/snyk/static/snyk-customstage-Ingesttoolseverity.png new file mode 100644 index 00000000000..8c7de8d6c5e Binary files /dev/null and b/docs/security-testing-orchestration/sto-techref-category/snyk/static/snyk-customstage-Ingesttoolseverity.png differ diff --git a/docs/software-engineering-insights/sei-administration/harness-sei-sli.md b/docs/software-engineering-insights/sei-administration/harness-sei-sli.md new file mode 100644 index 00000000000..a4440c313f4 --- /dev/null +++ b/docs/software-engineering-insights/sei-administration/harness-sei-sli.md @@ -0,0 +1,39 @@ +--- +title: Harness SEI SLIs +description: How we compute uptime for different Harness SEI Services +sidebar_label: Harness SEI SLIs +sidebar_position: 20 +--- + +This is a Harness operational reference guide for all the Service Level Indicators (SLIs) across the Harness Software Engineering Insights module. Our SLO gets calculated based on these user centric SLIs. + +## Weightage factor + +Harness operations apply a weighting factor to the SLIs post any incidents. + +- Major outage = 100% of the downtime hit +- Partial = 30% of the downtime hit +- Degraded performance = None. This is because our stance is that a degraded performance does impact the user experience but it’s not technically downtime. + +A production incident, commonly known as an "incident," is an unexpected event or problem that arises within our live production environments, resulting in either complete or partial service disruptions. In the case of a partial incident, it renders one or more functions of a module nonfunctional or inaccessible. All production incidents are posted in our status page (https://status.harness.io) and our users can subscribe to the feeds from this site to get notified. + +## SEI SLIs + +| **SLI** | **Threshold** | **Availability**| +|-------------------------------------------|-----------------|-----------------------------------------| +| APIs Error rate | More than 5% over 5 min rolling window | Major outage | +| API Response Time | 95th percentile: > 2s over 5 min rolling window | Degraded performance| +| Ingestion & data processing delay | Delay of more than 72 hours for the latest data to appear on the dashboard.
This threshold excludes delays caused by pending customer actions. In the event of failures, processing historical data may take additional time depending on the volume of data that needs to be backfilled. | Degraded performance | + +:::info Third-party integrations and ingestion impact + +Changes, upgrades, or issues in third-party integrations can sometimes lead to delays in data ingestion and processing. + +While these delays do not indicate an internal failure, they can affect the timeliness of data availability. To address this, our system will send in-product notifications to keep users informed, ensuring transparency and reducing any unnecessary concerns. + +::: + + + + + diff --git a/docs/software-engineering-insights/setup-sei/configure-integrations/sei-integrations-overview.md b/docs/software-engineering-insights/setup-sei/configure-integrations/sei-integrations-overview.md index 44de7421a2a..e3b686682d3 100644 --- a/docs/software-engineering-insights/setup-sei/configure-integrations/sei-integrations-overview.md +++ b/docs/software-engineering-insights/setup-sei/configure-integrations/sei-integrations-overview.md @@ -13,7 +13,7 @@ Interactions with third-party systems on Harness SEI are managed through the SEI You can use any application-specific integration supported by Harness SEI to integrate your tool with SEI. If you need to connect a CI/CD tool that currently lacks integration support from SEI, you have the option to create a custom CI/CD integration separately. :::info -After adding an integration, it may take up to 24 hours for data to fully sync to SEI. During this time, any widgets you set up in Insights may not display data until the sync is complete. +After adding an integration, it may take up to 24-72 hours for data to fully sync to SEI. During this time, any widgets you set up in dashboards may not display data until the sync is complete. ::: ![](./static/integrations-overview.png) diff --git a/docs/software-supply-chain-assurance/artifact/verify-signed-artifacts.md b/docs/software-supply-chain-assurance/artifact/verify-signed-artifacts.md index c6627aafde3..0eaac9ef292 100644 --- a/docs/software-supply-chain-assurance/artifact/verify-signed-artifacts.md +++ b/docs/software-supply-chain-assurance/artifact/verify-signed-artifacts.md @@ -27,13 +27,7 @@ The artifact verification step ensures the authenticity of the signed artifact b The Artifact Verification step pulls the `.sig` file from the artifact registry and verifies it with the corresponding public key. In the artifact signing step, if you chosen not to push the `.sig` file to the registry, then for the artifact verification `.sig` file will instead be pulled from the Harness database . This process ensures that the artifact was signed by a trusted entity, thereby confirming its integrity and authenticity. -You can search for **Artifact Verification** and add it to either the **Build** or **Security** stage of a Harness pipeline - -:::note - -At present, Harness does not support artifact verification in the deployment stage, However this is part of our roadmap. - -::: +You can search for **Artifact Verification** and add it to either the **Build** , **Deploy** or **Security** stage of a Harness pipeline Follow the instructions below to configure the Artifact Verification step. diff --git a/docs/software-supply-chain-assurance/slsa/generate-slsa.md b/docs/software-supply-chain-assurance/slsa/generate-slsa.md index 4da0607af4a..480e1a84ea2 100644 --- a/docs/software-supply-chain-assurance/slsa/generate-slsa.md +++ b/docs/software-supply-chain-assurance/slsa/generate-slsa.md @@ -144,13 +144,16 @@ For verifying the SLSA attestation, please refer to [Verify SLSA](/docs/software ## SLSA Generation step configuration with Build and Push step When using the Harness CI **Build and Push** step for the image-building process, you can configure the **SLSA Generation** step to generate and attest to the Provenance. Follow the [SLSA Generation step configuration](#slsa-generation-step-configuration), for the **Artifact Digest** field, you can use [Harness Expressions](/docs/platform/variables-and-expressions/harness-variables/) to dynamically populate the digest of the image built during the **Build and Push** step. -For example, the expression could look like: -`<+pipeline.stages..spec.execution.steps..output.outputVariables.digest>` +For example, the expression looks like: + +`<+pipeline.stages..spec.execution.steps. +.artifact_.stepArtifacts. +publishedImageArtifacts[0].digest>` If you have already executed the **Build and Push** step, navigate to the execution details, open the **Output Variables** tab, and copy the expression for the digest from the **Input Name** column. - + For performing the attestation, refer to the section [Attest SLSA Provenance](#attest-slsa-provenance) diff --git a/docs/software-supply-chain-assurance/slsa/static/buildandpush.png b/docs/software-supply-chain-assurance/slsa/static/buildandpush.png new file mode 100644 index 00000000000..793b421ff37 Binary files /dev/null and b/docs/software-supply-chain-assurance/slsa/static/buildandpush.png differ diff --git a/kb/cloud-cost-management/cloud-cost-management-faqs.md b/kb/cloud-cost-management/cloud-cost-management-faqs.md index e20258d20b0..b4e69139018 100644 --- a/kb/cloud-cost-management/cloud-cost-management-faqs.md +++ b/kb/cloud-cost-management/cloud-cost-management-faqs.md @@ -161,7 +161,7 @@ Currently, it's not supported. However, you can leverage dashboard for the same. When we ingest cloud data, we make certain modifications to the tags/labels. However, with cluster data, we ingest the labels without any alterations. -More information can be found [here](../../docs/cloud-cost-management/use-ccm-cost-reporting/root-cost-analysis/analyze-cost-for-aws#analyze-aws-cost). +More information can be found [here](https://developer.harness.io/docs/cloud-cost-management/use-ccm-cost-reporting/root-cost-analysis/analyze-cost-for-aws/#analyze-aws-cost). ### Why does the dropdown in the GPU cost tracking perspective only show instance types that have already been used, and not all available GPU instance types across clouds? diff --git a/kb/continuous-integration/continuous-integration-faqs.md b/kb/continuous-integration/continuous-integration-faqs.md index 26bc366dcde..a3e0ab12e79 100644 --- a/kb/continuous-integration/continuous-integration-faqs.md +++ b/kb/continuous-integration/continuous-integration-faqs.md @@ -1627,6 +1627,8 @@ To avoid authentication issues, it's recommended to either use a PAT when config ### How can user access the secrets as files in a Docker build without writing them to layers? The **build and push** steps used to build Docker images have a context field. Users can use the context field in the build and push steps to mount the current directory at `/harness`. By copying your files to a specific directory and then mounting them, you can avoid writing secrets into the Docker image layers. +### Why do Build and Push steps fail with "Error while loading buildkit image: exit status 1" when /var/lib/docker is included in shared paths during DIND execution? +**Build and Push** steps fail with the error "Error while loading buildkit image: exit status 1" when `/var/lib/docker` is included in the shared paths during Docker-in-Docker (DIND) execution because DIND creates a Docker daemon using this path, and sharing it across steps causes conflicts when multiple build steps try to create and access their own Docker daemons. To resolve this, remove `/var/lib/docker` from the shared paths configuration, which prevents conflicts and allows **Build and Push** steps to execute successfully. ## Upload artifacts diff --git a/kb/feature-flags/harness-feature-flag-faqs.md b/kb/feature-flags/harness-feature-flag-faqs.md index 50aa888787f..5a5231da808 100644 --- a/kb/feature-flags/harness-feature-flag-faqs.md +++ b/kb/feature-flags/harness-feature-flag-faqs.md @@ -95,7 +95,7 @@ The SDK is initialized for a specific target, enabling personalized flag evaluat You can find a detailed overview of how Harness Feature Flags' SDKs behave after a flag change in the following table: [Communication loop between Harness and the SDKs](https://developer.harness.io/docs/feature-flags/use-ff/ff-sdks/sdk-overview/communication-sdks-harness-feature-flags#polling). -### CanI call initialize more than once to update attributes? +### Can I call initialize more than once to update attributes? Currently, FF doesn't have an option to update attributes without closing the SDK. You need to close and reinitialize the SDK to update attributes. diff --git a/kb/platform/articles/snow-connector-oidc-refresh-token.md b/kb/platform/articles/snow-connector-oidc-refresh-token.md new file mode 100644 index 00000000000..073cc47273e --- /dev/null +++ b/kb/platform/articles/snow-connector-oidc-refresh-token.md @@ -0,0 +1,169 @@ +--- +description: KB - Guide on creating a Harness ServiceNow connector using OIDC Refresh Token, with focus on ServiceNow configurations. +title: Harness ServiceNow connector using OIDC Refresh Token +--- + +# ServiceNow Connector - OIDC Refresh Token + +You can connect Harness to ServiceNow using a Harness ServiceNow connector. This connector enables you to approve and reject pipeline steps directly within Harness. + +## Supported Authentication Methods +Harness supports the following authentication methods: + +1. **Username and Password** +2. **ADFS Client Credentials with Certificate** +3. **OIDC Refresh Token** + +> **Note**: Harness recommends avoiding the "Username and Password" authentication method for production-grade setups. + +This document is intended to be used alongside the Harness DeveloperHub article on [Connecting to ServiceNow](https://developer.harness.io/docs/platform/connectors/ticketing-systems/connect-to-service-now/). + +## Document Scope +The scope of this document is to provide the necessary configuration steps within the ServiceNow instance to set up a Harness ServiceNow Connector. + +## ServiceNow Instance OAuth Configuration + +1. **Log in to your ServiceNow instance**. For important notes, refer to the [Harness DeveloperHub Important Notes](https://developer.harness.io/docs/platform/connectors/ticketing-systems/connect-to-service-now/#important-notes). + +2. In the left-hand **Application Navigator**, search for **"OAuth"** under **All** and go to **Application Registry**. + +![](../static/snow-application-registry.png) + +3. Click **New** and select **Create an OAuth API endpoint for external clients**. Assign a meaningful name and click **Submit**. + > For more details about the other fields, refer to the [ServiceNow Connector Documentation](https://docs.servicenow.com/bundle/xanadu-platform-security/page/administer/security/task/t_CreateEndpointforExternalClients.html). + +![](../static/snow-application-registry-new.png) + +![](../static/snow-oauth-api.png) + +![](../static/snow-endpoint.png) + +4. Once configured, you’ll receive a **Client ID**, **Client Secret**, **Username**, and **Password**. With these credentials, you can generate the **Refresh Token** and **Access Token**. **Refresh Token** is needed for the ServiceNow (SNOW) Connector in Harness. + +![](../static/snow-endpoint-client-secret.png) + +### Generate the Refresh Token and Access Token + +Use the following `curl` command to obtain the tokens: + +```bash +curl --location --request POST 'https://.service-now.com/oauth_token.do' \ +--header 'Accept: application/json' \ +--header 'Content-Type: application/x-www-form-urlencoded' \ +--data-urlencode 'grant_type=password' \ +--data-urlencode 'client_id=b893bef11d034ea1a12d058a35ac98fa' \ +--data-urlencode 'client_secret=********' \ +--data-urlencode 'username=admin' \ +--data-urlencode 'password=********' +``` + +### OAuth Token Response Example + +After executing the token generation `curl` command, the response will look like this: + +```json +{ + "access_token": "KHFeW3Y-IabRcWGdSPKWop98o_EiFjgnvLtehQl1ULTfsqZ1xerrd-6GpUbf8GSJ0ss****o0StERjAdba3TbYrJaw", + "refresh_token": "lX18oGvfH5aph7YaiOBkKFvgQyCD5j7VFdh-kA4JpjPgk4MLcB0HdgN1-j-BoprYNuSw****LQEf0W5YIFxzQ", + "scope": "", + "token_type": "Bearer", + "expires_in": 1799 +} +``` + +## Creating the ServiceNow Connector in Harness + +Now that you have all the required credentials: +1. **Client ID** +2. **Client Secret** +3. **Refresh Token** +4. **Token URL** (formed based on the example in the Harness DeveloperHub article) + +You can proceed to create a **ServiceNow Connector** in Harness and test the connection. This step will require a Harness Delegate. + +## Testing the ServiceNow Connector with a Simple Pipeline + +To verify the connector configuration, create a simple pipeline in Harness. + +![](../static/snow-test-pipeline.png) + +Below is an example YAML file for testing the connector: + +```yaml +pipeline: + name: ServiceNow Connector Test + identifier: ServiceNow_Connector_Test + projectIdentifier: InfiniteBlue + orgIdentifier: default + tags: {} + stages: + - stage: + name: SNOW Test + identifier: SNOW_Test + description: "" + type: Approval + spec: + execution: + steps: + - step: + name: SNOW Create + identifier: SNOW_Create + type: ServiceNowCreate + timeout: 5m + spec: + connectorRef: SNOW_Connector + ticketType: incident + fields: + - name: description + value: Testing Harness ServiceNow Connector + - name: short_description + value: Testing Harness ServiceNow Connector + createType: Normal + - step: + name: SNOW Approval + identifier: SNOW_Approval + type: ServiceNowApproval + timeout: 1d + spec: + approvalCriteria: + type: KeyValues + spec: + matchAnyCondition: true + conditions: + - key: state + operator: equals + value: Resolved + rejectionCriteria: + type: KeyValues + spec: + matchAnyCondition: true + conditions: [] + connectorRef: SNOW_Connector + ticketNumber: <+pipeline.stages.SNOW_Test.spec.execution.steps.SNOW.ticket.ticketNumber> + retryInterval: 1m + ticketType: incident + - step: + name: SNOW Update + identifier: SNOW_Update + type: ServiceNowUpdate + timeout: 5m + spec: + useServiceNowTemplate: false + connectorRef: SNOW_Connector + ticketType: incident + ticketNumber: <+pipeline.stages.SNOW_Test.spec.execution.steps.SNOW.ticket.ticketNumber> + fields: + - name: state + value: "7" + tags: {} +``` +This example pipeline performs the following actions: + +1. **Creates a new ServiceNow ticket**: The pipeline creates an incident ticket in ServiceNow with specified fields, such as description and short description. + +2. **Approves the ticket**: An approval step waits for the ticket to reach the desired state, such as "Resolved". If the conditions are met, the approval is granted; otherwise, it can be set to reject. + +3. **Updates the ticket status**: Once approved, the pipeline updates the ticket status or any other specified fields to indicate completion or further action as needed. + +These steps confirm that the ServiceNow Connector is properly configured and functional within Harness. + diff --git a/kb/platform/static/snow-application-registry-new.png b/kb/platform/static/snow-application-registry-new.png new file mode 100644 index 00000000000..047493635eb Binary files /dev/null and b/kb/platform/static/snow-application-registry-new.png differ diff --git a/kb/platform/static/snow-application-registry.png b/kb/platform/static/snow-application-registry.png new file mode 100644 index 00000000000..35d8d2d1463 Binary files /dev/null and b/kb/platform/static/snow-application-registry.png differ diff --git a/kb/platform/static/snow-endpoint-client-secret.png b/kb/platform/static/snow-endpoint-client-secret.png new file mode 100644 index 00000000000..d6137bb5047 Binary files /dev/null and b/kb/platform/static/snow-endpoint-client-secret.png differ diff --git a/kb/platform/static/snow-endpoint.png b/kb/platform/static/snow-endpoint.png new file mode 100644 index 00000000000..d6f191d82cd Binary files /dev/null and b/kb/platform/static/snow-endpoint.png differ diff --git a/kb/platform/static/snow-oauth-api.png b/kb/platform/static/snow-oauth-api.png new file mode 100644 index 00000000000..774ca646849 Binary files /dev/null and b/kb/platform/static/snow-oauth-api.png differ diff --git a/kb/platform/static/snow-test-pipeline.png b/kb/platform/static/snow-test-pipeline.png new file mode 100644 index 00000000000..df0b0c85544 Binary files /dev/null and b/kb/platform/static/snow-test-pipeline.png differ diff --git a/kb/reference-architectures/ccm/best-practices/aws-connectors-and-roles.md b/kb/reference-architectures/ccm/best-practices/aws-connectors-and-roles.md index 235c98015e5..9a52cbb4ad0 100644 --- a/kb/reference-architectures/ccm/best-practices/aws-connectors-and-roles.md +++ b/kb/reference-architectures/ccm/best-practices/aws-connectors-and-roles.md @@ -9,7 +9,12 @@ The process below defines how to provision Harness connectors and AWS IAM roles ## Permissions -You will need access to provision IAM roles in AWS and create CCM connectors in Harness. +You will need access to provision IAM roles in AWS and create CCM connectors in Harness. When running the Terraform code, two variables need to be defined: + +`HARNESS_ACCOUNT_ID` = (Your Harness Account ID) + +`HARNESS_PLATFORM_API_KEY` = Created via a [service account](https://developer.harness.io/docs/platform/role-based-access-control/add-and-manage-service-account/) with connector and CCM admin permissions across all resources. + ## Setup Providers diff --git a/kb/reference-architectures/ccm/best-practices/azure-connectors-and-roles.md b/kb/reference-architectures/ccm/best-practices/azure-connectors-and-roles.md index 66ea9169a02..5ab8c220c74 100644 --- a/kb/reference-architectures/ccm/best-practices/azure-connectors-and-roles.md +++ b/kb/reference-architectures/ccm/best-practices/azure-connectors-and-roles.md @@ -9,7 +9,11 @@ The process below defines how to provision Harness connectors and Azure IAM role ## Permissions -You will need access to provision IAM roles in Azure and create CCM connectors in Harness. +You will need access to provision IAM roles in Azure and create CCM connectors in Harness. When running the Terraform code, two variables need to be defined: + +`HARNESS_ACCOUNT_ID` = (Your Harness Account ID) + +`HARNESS_PLATFORM_API_KEY` = Created via a [service account](https://developer.harness.io/docs/platform/role-based-access-control/add-and-manage-service-account/) with connector and CCM admin permissions across all resources. ## Setup Providers diff --git a/kb/reference-architectures/ccm/best-practices/ccm-only-delegate.md b/kb/reference-architectures/ccm/best-practices/ccm-only-delegate.md index ca942c8738d..25fb1611747 100644 --- a/kb/reference-architectures/ccm/best-practices/ccm-only-delegate.md +++ b/kb/reference-architectures/ccm/best-practices/ccm-only-delegate.md @@ -1,9 +1,11 @@ --- -title: CCM Only Delegate -description: A Helm chart with parameters that ensure the Delegate is only used for CCM. +title: IaC for a CCM Only Delegate +description: IaC code samples for a Harness delegate that is tasked only for CCM related activities. --- -# Overview +# Provision a CCM Only Delegate via a Helm chart + +## Overview Because Delegates are a Harness platform offering, they can potentially be used for other modules other than CCM. In this example, we are setting specific parameters to ensure that the service account of the Delegate is read only and preventing the running of scripts: ``` @@ -11,7 +13,7 @@ Because Delegates are a Harness platform offering, they can potentially be used --set-json custom_envs='[{"name":"BLOCK_SHELL_TASK","value":"true"}]' ``` -# Resource Requirements +## Resource Requirements Gathering fine-grain metrics in the cluster is memory intensive. In an effort to ensure we don't run out of memory and terminate the pod, the following sizing guidelines are recommended: | # Nodes in the Cluster | CPU (Cores) | MEM (Mi) | @@ -58,3 +60,20 @@ helm upgrade -i helm-delegate --namespace harness-delegate-ng --create-namespace --set cpu=1 \ --set memory=3814 ``` + + +# Use Terraform to provision Delegate tokens + +## Overview +Managing delegate tokens at scale isn't ideal inside the Harness UI. You can instead manage these with Terraform using the [Harness provider](https://registry.terraform.io/providers/harness/harness/latest/docs/resources/platform_delegate_token). All CCM functionality is at the account level so ensure you only set the `account_id` parameter and do not set an organization or project. + +You can then leverage the token created to provision your delegate with Terraform: `harness_platform_delegatetoken.this.value`. You can use the [Helm provider](https://registry.terraform.io/providers/hashicorp/helm/latest/docs) to directly reference the delegate token within your [delegate deployment values](https://registry.terraform.io/modules/harness/kubernetes-delegate/harness/latest). + +## Terraform Example +``` +# Create delegate token for account level +resource "harness_platform_delegatetoken" "test" { + name = "test token" + account_id = "account_id" +} +``` \ No newline at end of file diff --git a/kb/reference-architectures/ccm/best-practices/gcp-connectors-and-roles.md b/kb/reference-architectures/ccm/best-practices/gcp-connectors-and-roles.md index 435b6884895..9e5d7252a18 100644 --- a/kb/reference-architectures/ccm/best-practices/gcp-connectors-and-roles.md +++ b/kb/reference-architectures/ccm/best-practices/gcp-connectors-and-roles.md @@ -9,7 +9,11 @@ The process below defines how to provision Harness connectors and GCP IAM roles ## Permissions -You will need access to provision IAM roles in GCP and create CCM connectors in Harness. +You will need access to provision IAM roles in GCP and create CCM connectors in Harness. When running the Terraform code, two variables need to be defined: + +`HARNESS_ACCOUNT_ID` = (Your Harness Account ID) + +`HARNESS_PLATFORM_API_KEY` = Created via a [service account](https://developer.harness.io/docs/platform/role-based-access-control/add-and-manage-service-account/) with connector and CCM admin permissions across all resources. ## Setup Providers diff --git a/kb/reference-architectures/ccm/best-practices/k8s-and-ccm-k8s-connectors.md b/kb/reference-architectures/ccm/best-practices/k8s-and-ccm-k8s-connectors.md index 4b6e6face8c..a719042b374 100644 --- a/kb/reference-architectures/ccm/best-practices/k8s-and-ccm-k8s-connectors.md +++ b/kb/reference-architectures/ccm/best-practices/k8s-and-ccm-k8s-connectors.md @@ -9,7 +9,11 @@ The process below defines how to provision Harness connectors to get K8s costs i ## Permissions -You will need access to create CCM connectors in Harness. +You will need access to create CCM connectors in Harness. When running the Terraform code, two variables need to be defined: + +`HARNESS_ACCOUNT_ID` = (Your Harness Account ID) + +`HARNESS_PLATFORM_API_KEY` = Created via a [service account](https://developer.harness.io/docs/platform/role-based-access-control/add-and-manage-service-account/) with connector and CCM admin permissions across all resources. ## Setup Providers diff --git a/kb/reference-architectures/ccm/onboarding/autostopping/k8s.md b/kb/reference-architectures/ccm/onboarding/autostopping/k8s.md index bbb72873ce7..fe3d5efd9f8 100644 --- a/kb/reference-architectures/ccm/onboarding/autostopping/k8s.md +++ b/kb/reference-architectures/ccm/onboarding/autostopping/k8s.md @@ -212,3 +212,15 @@ If the pods do come up and are healthy but the page is not redirecting, or the l - Scale controller to one: `k -n harness-autostopping scale deploy/autostopping-controller --replicas=1` At this point the configmap should be regenerated, and you can try accessing your application again. + +## No snapshot found for Autostopping rule + +This could be because of any issues regarding the config map used for the Autostopping rule. Execute these steps to delete the existing config map and regenerate it. + +``` +kubectl delete configmap harness-autostopping-config -n harness-autostopping + +kubectl rollout restart deployment autostopping-controller -n harness-autostopping + +kubectl rollout restart deployment autostopping-router -n harness-autostopping +``` diff --git a/kb/reference-architectures/ccm/onboarding/k8s.md b/kb/reference-architectures/ccm/onboarding/k8s.md index ac2274d1336..d767af783e4 100644 --- a/kb/reference-architectures/ccm/onboarding/k8s.md +++ b/kb/reference-architectures/ccm/onboarding/k8s.md @@ -45,7 +45,7 @@ custom_envs: value: "true" ``` -When deploying a delegate, it is recommended that you name the delegate either the same as the cluster name or something very similar that makes it obvious what cluster the delegate is deployed into. +When deploying a delegate, it is recommended that you name the delegate either the same as the cluster name or something very similar that makes it obvious what cluster the delegate is deployed into. An example of deploying a CCM Only delegate via a Helm chart is located [here](https://developer.harness.io/kb/reference-architectures/ccm/best-practices/ccm-only-delegate). ### Resource Constraints diff --git a/release-notes/continuous-delivery.md b/release-notes/continuous-delivery.md index 42dcf513c4b..6c8207ddd02 100644 --- a/release-notes/continuous-delivery.md +++ b/release-notes/continuous-delivery.md @@ -1,7 +1,7 @@ --- title: Continuous Delivery & GitOps release notes sidebar_label: Continuous Delivery & GitOps -date: 2025-02-17T10:00:00 +date: 2025-02-25T10:00:00 sidebar_position: 8 --- @@ -47,6 +47,18 @@ import Kustomizedep from '/release-notes/shared/kustomize-3-4-5-deprecation-noti ## February 2025 +### Version 1.78.7 + +#### Fixed Issues + +- Previously, the system fetched the service YAML from the Master branch during Helm Chart Deployment, preventing the selection of the Primary Manifest due to a branch mismatch. This issue is resolved. (**CDS-106242, ZD-77092**) +- Previously, users experienced an issue where selecting Org-level environments during pipeline runtime resulted in an access error despite having the correct permissions. This issue is resolved. (**CDS-106527, ZD-74454**) +- Previously, pipeline executions were not appearing in the Execution History tab after moving a pipeline to Git. The issue is resolved. +**Note**: When switching from Pipeline Studio to Execution History, the system automatically applies a branch filter based on the selected branch in Pipeline Studio. To view executions performed before moving the pipeline to Git, users need to remove the branch filter. (**PIPE-25154, ZD-78153**) +- Previously, the Approval Step input variables dialog box in the Harness UI displayed unnecessary line wrapping, causing UI distortion. The issue is resolved. (**CDS-106804, ZD-78238**) +- Previously, the Primary Manifest API did not support passing Git details for remote services. The issue is resolved by updating the API to support remote services. (**CDS-106625, ZD-77092**) +- Previously, in Pipeline Studio UI, selecting a Deploy stage followed by an Approval stage caused an unnecessary environment section to be added to the Approval stage YAML. This modification occurred even without making actual edits. The issue is resolved. (**PIPE-24398**) +- Previously, in Containerized Step Groups, adding a third parallel step resulted in an incorrect step palette being displayed, not respecting the containerized setting of the step group. The issue is resolved. (**PIPE-24399**) ### GitOps Version 1.25.3, GitOps Agent Version 0.86.2 diff --git a/release-notes/continuous-integration.md b/release-notes/continuous-integration.md index 7d80cbf9fa2..c05b4b55690 100644 --- a/release-notes/continuous-integration.md +++ b/release-notes/continuous-integration.md @@ -1,7 +1,7 @@ --- title: Continuous Integration release notes sidebar_label: Continuous Integration -date: 2025-02-14T10:00 +date: 2025-02-21T10:00 sidebar_position: 10 --- @@ -22,6 +22,26 @@ These release notes describe recent changes to Harness Continuous Integration. ## February 2025 +### Version 1.67 + + + +#### New features and enhancements + +- The Build Intelligence flag can now be toggled on and off based on an already resolved variable value, providing greater flexibility in pipeline configurations (CI-15706). +- The new input `PLUGIN_IGNORE_PATHS` (available in `plugins/kaniko:1.10.6`) allows users to specify multiple paths to ignore during the build. Each path is trimmed and appended as a separate `--ignore-path` flag in the Kaniko build process. (CI-16193) + +#### Fixed issues + +- Fixed an issue where enabling Build Intelligence caused Python shell executions to fail due to an extra newline being appended. The autoinjection script has been corrected for **run**, **runtest**, and **runtestv2** in Build Intelligence. (CI-15914, ZD-78087) + +#### Harness images updates + +| **Image** | **Change** | **Previous version** | **New Version** | +| ------------------------ | ----------------------------------------------- | -------------------- | --------------- | +| `plugins/buildx` | Configuration Updates | 1.1.25 | 1.1.26 | +| `plugins/buildx-ecr` | Configuration Updates | 1.2.9 | 1.2.10 | + ### Version 1.66 diff --git a/release-notes/delegate.md b/release-notes/delegate.md index 848d29f84b8..6bce783b695 100644 --- a/release-notes/delegate.md +++ b/release-notes/delegate.md @@ -87,6 +87,20 @@ For more information, go to [Delegate expiration support policy](/docs/platform/ ::: +## February 2025 + +### Version 24.10.84710 + +#### Hotfix + +- Resolved an issue where WinRM passwords containing special characters were not working with WinRM Kerberos authentication. [CDS-107127] + +### Version 25.02.85301 + +#### Fixed Issues + +- Added a new API to send Git details in requests, ensuring services are fetched from the correct branch. [CDS-106242] + ## January 2025 ### Version 24.10.84107 diff --git a/release-notes/platform.md b/release-notes/platform.md index 0bd4cb220a6..3178688bffb 100644 --- a/release-notes/platform.md +++ b/release-notes/platform.md @@ -79,6 +79,17 @@ The following deprecated API endpoints are longer supported: ## February 2025 +### Version 1.78.x +#### Fixed issues + +- Fixed an issue in **Notification Rules** where changing the **ResourceType** and saving it would not apply correctly. The **ResourceType** field is now disabled to prevent this.. [PL-60466] + +#### New features and enhancements + +- Added **userGroup sync** support for **OIDC** and implemented **license enforcement** for OIDC. [PL-60492] + +- Introduced delegate version override support at different scopes (account, org, project), with and without tags. [PL-58099] + ### Version 1.77.x #### New features and enhancements diff --git a/release-notes/self-managed-enterprise-edition.md b/release-notes/self-managed-enterprise-edition.md index d451f0f182c..43ec40232a9 100644 --- a/release-notes/self-managed-enterprise-edition.md +++ b/release-notes/self-managed-enterprise-edition.md @@ -232,6 +232,77 @@ Harness Helm charts are now signed to ensure they are secure and trustworthy. Cl ::: +## Feb 24, 2025, Patch Version 0.25.4 + +This release includes the following Harness module and component versions. + +| **Name** | **Version** | +|---------------------------|------------------------------------------------------------------------------------------------| +| Helm Chart | [0.25.4](https://github.com/harness/helm-charts/releases/tag/harness-0.25.4) | +| Air Gap Bundle | [0.25.4](https://console.cloud.google.com/storage/browser/smp-airgap-bundles/harness-0.25.4) | +| NG Manager | 1.76.7 | +| CI Manager | 1.61.2 | +| Pipeline Service | 1.111.1 | +| Platform Service | 1.48.0 | +| Access Control Service | 1.70.0 | +| Delegate | 25.01.84800 | +| GitOps Service | 1.23.10 | +| Change Data Capture | 1.41.0 | +| STO Core | 1.123.1 | +| Test Intelligence Service | 1.42.1 | +| NG UI | 1.61.7 | +| LE NG | 1.5.6 | +| Looker | 1.7.6 | +| Log Service | 1.17.3 | +| Batch Processing | 1.38.4 | +| Gateway | 1.41.7 | + + +**Alternative air gap bundle download method** + +Some admins might not have Google account access to download air gap bundles. As an alternative, you can use `gsutil`. For `gsutil` installation instructions, go to [Install gsutil](https://cloud.google.com/storage/docs/gsutil_install) in the Google Cloud documentation. + +``` +gsutil -m cp \ + "gs://smp-airgap-bundles/harness-0.25.4/ccm_images.tgz" \ + "gs://smp-airgap-bundles/harness-0.25.4/cdng_images.tgz" \ + "gs://smp-airgap-bundles/harness-0.25.4/ce_images.tgz" \ + "gs://smp-airgap-bundles/harness-0.25.4/cet_images.tgz" \ + "gs://smp-airgap-bundles/harness-0.25.4/ci_images.tgz" \ + "gs://smp-airgap-bundles/harness-0.25.4/ff_images.tgz" \ + "gs://smp-airgap-bundles/harness-0.25.4/platform_images.tgz" \ + "gs://smp-airgap-bundles/harness-0.25.4/sto_images.tgz" \ + . +``` + +### Fixed issues + +#### Cloud Cost Management + +- Improved Query Performance: We have increased the socket timeout for all ClickHouse queries to enhance stability and performance. [CCM-21324] + +- Optimized Data Partitioning: We have added partitioning on the startTime field in unifiedTable to improve query efficiency. [CCM-21374] + +- Expanded AWS & Unified View Metrics: The following cost measures are now available in AWS and Unified views within Cloud Cost Dashboards, enabling more granular cost analysis: +AWS: +Total Discount +Private Rate Discount +Bundled Discount +EDP Discount +Tax +Credit +Refund + +Unified: +AWS Total Discount +AWS Private Rate Discount +AWS Bundled Discount +AWS EDP Discount +AWS Tax +AWS Credit +AWS Refund +[CCM-21445] + ## Feb 17, 2025, Patch Version 0.25.3 This release includes the following Harness module and component versions. diff --git a/src/components/Docs/IncidentResponse.tsx b/src/components/Docs/IncidentResponse.tsx index 5d5420c12da..414ddb8ee5c 100755 --- a/src/components/Docs/IncidentResponse.tsx +++ b/src/components/Docs/IncidentResponse.tsx @@ -15,7 +15,7 @@ export default function IR() {
-

Incident Response (COMING SOON)

+

Incident Response

@@ -52,4 +52,4 @@ export default function IR() {
); -} \ No newline at end of file +} diff --git a/src/components/Roadmap/data/cdData.ts b/src/components/Roadmap/data/cdData.ts index 9bb7794e307..2cb17dadd23 100644 --- a/src/components/Roadmap/data/cdData.ts +++ b/src/components/Roadmap/data/cdData.ts @@ -336,31 +336,31 @@ export const CdData: Horizon = { tag: [{value: "Deployment"}], title: "Azure Functions", description: "Users can deploy Azure Functions.", + link:"https://developer.harness.io/docs/continuous-delivery/deploy-srv-diff-platforms/azure/azure-function-tutorial/" }, { tag: [{value: "Deployment"}], title: "Google Cloud Run Support", description: "Users can deploy to Google Cloud Run.", - }, - { - tag: [{value: "OPA"}], - title: "Service, Environment, Overrides w/ OPA", - description: "Users can configure Service, Environment, and Overrides with OPA policies.", + link:"https://developer.harness.io/docs/continuous-delivery/deploy-srv-diff-platforms/google-cloud-functions/google-cloud-run/" }, { tag: [{value: "Pipeline"}], title: "Flexible Templates Phase II", description: "Users can reference dynamically inserted stages/steps in pipeline templates", + link:"https://developer.harness.io/docs/platform/templates/inject-step-stage-templates/" }, { tag: [{value: "OPA"}], title: "Service, Environment, Overrides w/ OPA", - description: "Users can create and enforce OPA policies for CD entities such as Services, Environments, Overrides, and Infrastructure definitionsn", + description: "Users can create and enforce OPA policies for CD entities such as Services, Environments, Overrides, and Infrastructure definitions", + link:"https://developer.harness.io/docs/continuous-delivery/x-platform-cd-features/advanced/cd-governance/opa-policies-for-cd-entities/" }, { tag: [{value: "GitOps"}], title: "Improved Application Filtering", description: "Users can filter applications using live search functionality, and wildcard search is also supported for application labels. ", + link:"https://developer.harness.io/docs/continuous-delivery/gitops/use-gitops/manage-gitops-applications/" } ] }