diff --git a/.github/workflows/pr-checker.yml b/.github/workflows/pr-checker.yml index f3bdb27e8..bae117dbe 100644 --- a/.github/workflows/pr-checker.yml +++ b/.github/workflows/pr-checker.yml @@ -56,7 +56,7 @@ jobs: format: 'table' severity: 'CRITICAL,HIGH' vuln-type: 'os,library' - skip-dirs: 'opt/telegraf' + skip-dirs: 'opt/telegraf,usr/sbin/telegraf' exit-code: '1' timeout: '5m0s' WINDOWS-build: diff --git a/.pipelines/pipeline.user.linux.official.all_tag.all_phase.all_config.ci_prod.yml b/.pipelines/pipeline.user.linux.official.all_tag.all_phase.all_config.ci_prod.yml index d47a60ffe..1e9909ee8 100644 --- a/.pipelines/pipeline.user.linux.official.all_tag.all_phase.all_config.ci_prod.yml +++ b/.pipelines/pipeline.user.linux.official.all_tag.all_phase.all_config.ci_prod.yml @@ -40,5 +40,6 @@ package: # to be named differently. Defaults to Dockerfile. # In effect, the -f option value passed to docker build will be repository_checkout_folder/src/DockerFinal/Foo.dockerfile. repository_name: 'cdpxlinux' # only supported ones are cdpx acr repos - tag: 'ciprod' # OPTIONAL: Defaults to latest. The tag for the built image. Final tag will be 1.0.0alpha, 1.0.0-timestamp-commitID. + tag: 'ciprod' # OPTIONAL: Defaults to latest. The tag for the built image. Final tag will be 1.0.0alpha, 1.0.0-timestamp-commitID. latest: false # OPTIONAL: Defaults to false. If tag is not set to latest and this flag is set, then tag as latest as well and push latest as well. + export_to_artifact_path: 'agentimage.tar.gz' # path for exported image and use this instead of fixed tag diff --git a/.pipelines/pipeline.user.linux.yml b/.pipelines/pipeline.user.linux.yml index 565661d64..9977e7a1a 100644 --- a/.pipelines/pipeline.user.linux.yml +++ b/.pipelines/pipeline.user.linux.yml @@ -47,3 +47,4 @@ package: repository_name: 'cdpxlinux' # only supported ones are cdpx acr repos tag: 'cidev' # OPTIONAL: Defaults to latest. The tag for the built image. Final tag will be 1.0.0alpha, 1.0.0-timestamp-commitID. latest: false # OPTIONAL: Defaults to false. If tag is not set to latest and this flag is set, then tag as latest as well and push latest as well. + export_to_artifact_path: 'agentimage.tar.gz' # path for exported image and use this instead of fixed tag diff --git a/.pipelines/pipeline.user.windows.official.all_tag.all_phase.all_config.ci_prod.yml b/.pipelines/pipeline.user.windows.official.all_tag.all_phase.all_config.ci_prod.yml index e0286fbd6..0dc0a47c5 100644 --- a/.pipelines/pipeline.user.windows.official.all_tag.all_phase.all_config.ci_prod.yml +++ b/.pipelines/pipeline.user.windows.official.all_tag.all_phase.all_config.ci_prod.yml @@ -5,7 +5,7 @@ environment: version: '2019' runtime: provider: 'appcontainer' - image: 'cdpxwin1809.azurecr.io/user/azure-monitor/container-insights:6.0' + image: 'cdpxwin1809.azurecr.io/user/azure-monitor/container-insights:latest' source_mode: 'map' version: @@ -53,3 +53,4 @@ package: repository_name: 'cdpxwin1809' # only supported ones are cdpx acr repos tag: 'win-ciprod' # OPTIONAL: Defaults to latest. The tag for the built image. Final tag will be 1.0.0alpha, 1.0.0-timestamp-commitID. latest: false # OPTIONAL: Defaults to false. If tag is not set to latest and this flag is set, then tag as latest as well and push latest as well. + export_to_artifact_path: 'agentimage.tar.zip' # path for exported image and use this instead of fixed tag diff --git a/.pipelines/pipeline.user.windows.yml b/.pipelines/pipeline.user.windows.yml index 2b7a54ae9..e9d0105ab 100644 --- a/.pipelines/pipeline.user.windows.yml +++ b/.pipelines/pipeline.user.windows.yml @@ -5,7 +5,7 @@ environment: version: '2019' runtime: provider: 'appcontainer' - image: 'cdpxwin1809.azurecr.io/user/azure-monitor/container-insights:6.0' + image: 'cdpxwin1809.azurecr.io/user/azure-monitor/container-insights:latest' source_mode: 'map' version: @@ -53,3 +53,4 @@ package: repository_name: 'cdpxwin1809' # only supported ones are cdpx acr repos tag: 'win-cidev' # OPTIONAL: Defaults to latest. The tag for the built image. Final tag will be 1.0.0alpha, 1.0.0-timestamp-commitID. latest: false # OPTIONAL: Defaults to false. If tag is not set to latest and this flag is set, then tag as latest as well and push latest as well. + export_to_artifact_path: 'agentimage.tar.zip' # path for exported image and use this instead of fixed tag diff --git a/.pipelines/release-agent.sh b/.pipelines/release-agent.sh new file mode 100644 index 000000000..b34dd9995 --- /dev/null +++ b/.pipelines/release-agent.sh @@ -0,0 +1,74 @@ +#!/bin/bash + +# Note - This script used in the pipeline as inline script + +# These are plain pipeline variable which can be modified anyone in the team +# AGENT_RELEASE=cidev +# AGENT_IMAGE_TAG_SUFFIX=07222021 + +#Name of the ACR for ciprod & cidev images +ACR_NAME=containerinsightsprod.azurecr.io +AGENT_IMAGE_FULL_PATH=${ACR_NAME}/public/azuremonitor/containerinsights/${AGENT_RELEASE}:${AGENT_RELEASE}${AGENT_IMAGE_TAG_SUFFIX} +AGENT_IMAGE_TAR_FILE_NAME=agentimage.tar.gz + +if [ -z $AGENT_IMAGE_TAG_SUFFIX ]; then + echo "-e error value of AGENT_RELEASE variable shouldnt be empty" + exit 1 +fi + +if [ -z $AGENT_RELEASE ]; then + echo "-e error AGENT_RELEASE shouldnt be empty" + exit 1 +fi + +echo "ACR NAME - ${ACR_NAME}" +echo "AGENT RELEASE - ${AGENT_RELEASE}" +echo "AGENT IMAGE TAG SUFFIX - ${AGENT_IMAGE_TAG_SUFFIX}" +echo "AGENT IMAGE FULL PATH - ${AGENT_IMAGE_FULL_PATH}" +echo "AGENT IMAGE TAR FILE PATH - ${AGENT_IMAGE_TAR_FILE_NAME}" + +echo "loading linuxagent image tarball" +IMAGE_NAME=$(docker load -i ${AGENT_IMAGE_TAR_FILE_NAME}) +echo IMAGE_NAME: $IMAGE_NAME +if [ $? -ne 0 ]; then + echo "-e error, on loading linux agent tarball from ${AGENT_IMAGE_TAR_FILE_NAME}" + echo "** Please check if this caused due to build error **" + exit 1 +else + echo "successfully loaded linux agent image tarball" +fi +# IMAGE_ID=$(docker images $IMAGE_NAME | awk '{print $3 }' | tail -1) +# echo "Image Id is : ${IMAGE_ID}" +prefix="Loadedimage:" +IMAGE_NAME=$(echo $IMAGE_NAME | tr -d '"' | tr -d "[:space:]") +IMAGE_NAME=${IMAGE_NAME/#$prefix} +echo "*** trimmed image name-:${IMAGE_NAME}" +echo "tagging the image $IMAGE_NAME as ${AGENT_IMAGE_FULL_PATH}" +# docker tag $IMAGE_NAME ${AGENT_IMAGE_FULL_PATH} +docker tag $IMAGE_NAME $AGENT_IMAGE_FULL_PATH + +if [ $? -ne 0 ]; then + echo "-e error tagging the image $IMAGE_NAME as ${AGENT_IMAGE_FULL_PATH}" + exit 1 +else + echo "successfully tagged the image $IMAGE_NAME as ${AGENT_IMAGE_FULL_PATH}" +fi + +# used pipeline identity to push the image to ciprod acr +echo "logging to acr: ${ACR_NAME}" +az acr login --name ${ACR_NAME} +if [ $? -ne 0 ]; then + echo "-e error log into acr failed: ${ACR_NAME}" + exit 1 +else + echo "successfully logged into acr:${ACR_NAME}" +fi + +echo "pushing ${AGENT_IMAGE_FULL_PATH}" +docker push ${AGENT_IMAGE_FULL_PATH} +if [ $? -ne 0 ]; then + echo "-e error on pushing the image ${AGENT_IMAGE_FULL_PATH}" + exit 1 +else + echo "Successfully pushed the image ${AGENT_IMAGE_FULL_PATH}" +fi diff --git a/README.md b/README.md index 73bf858cd..e3ceedc8e 100644 --- a/README.md +++ b/README.md @@ -326,7 +326,7 @@ For DEV and PROD branches, automatically deployed latest yaml with latest agent docker build -f ./core/Dockerfile -t /: . docker push /: ``` -3. update existing agentest image tag in e2e-tests.yaml with newly built image tag with MCR repo +3. update existing agentest image tag in e2e-tests.yaml & conformance.yaml with newly built image tag with MCR repo # Scenario Tests Clusters are used in release pipeline already has the yamls under test\scenario deployed. Make sure to validate these scenarios. diff --git a/ReleaseNotes.md b/ReleaseNotes.md index 423161236..dc42e7d51 100644 --- a/ReleaseNotes.md +++ b/ReleaseNotes.md @@ -11,6 +11,24 @@ additional questions or comments. Note : The agent version(s) below has dates (ciprod), which indicate the agent build dates (not release dates) +### 08/05/2021 - +##### Version microsoft/oms:ciprod08052021 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod08052021 (linux) +##### Code change log +- Linux Agent + - Fix for CPU spike which occurrs at around 6.30am UTC on every day because of unattended package upgrades + - Update MDSD build which has fixes for the following issues + - Undeterministic Core dump issue because of the non 200 status code and runtime exception stack unwindings + - Reduce the verbosity of the error logs for OMS & ODS code paths. + - Increase Timeout for OMS Homing service API calls from 30s to 60s + - Fix for https://github.com/Azure/AKS/issues/2457 + - In replicaset, tailing of the mdsd.err log file to agent telemetry + + +### 07/13/2021 - +##### Version microsoft/oms:win-ciprod06112021-2 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod06112021-2 (windows) +##### Code change log +- Hotfix for fixing NODE_IP environment variable not set issue for non sidecar mode + ### 07/02/2021 - ##### Version microsoft/oms:ciprod06112021-1 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod06112021-1 (linux) ##### Version microsoft/oms:win-ciprod06112021 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod06112021 (windows) diff --git a/ReleaseProcess.md b/ReleaseProcess.md index 8ec91546c..7bd858561 100644 --- a/ReleaseProcess.md +++ b/ReleaseProcess.md @@ -13,14 +13,12 @@ Here are the high-level instructions to get the CIPROD`
` image for 2. Make PR to ci_dev branch and once the PR approved, merge the changes to ci_dev 3. Latest bits of ci_dev automatically deployed to CIDEV cluster in build subscription so just validated E2E to make sure everthing works 4. If everything validated in DEV, make merge PR from ci_dev and ci_prod and merge once this reviewed by dev team -6. Update following pipeline variables under ReleaseCandiate with version of chart and image tag - - CIHELMCHARTVERSION # For example, 2.7.4 - - CIImageTagSuffix # ciprod08072020 or ciprod08072020-1 etc. -7. Merge ci_dev and ci_prod branch which will trigger automatic deployment of latest bits to CIPROD cluster with CIPROD`
` image to test and scale cluters, AKS, AKS-Engine - > Note: production image automatically pushed to CIPROD Public cloud ACR which will inturn replicated to Public cloud MCR. +5. Once the PR to ci_prod approved, please go-ahead and merge, and wait for ci_prod build successfully completed +6. Once the merged PR build successfully completed, update the value of AGENT_IMAGE_TAG_SUFFIX pipeline variable by editing the Release [ci-prod-release](https://github-private.visualstudio.com/microsoft/_release?_a=releases&view=mine&definitionId=38) + > Note - value format of AGENT_IMAGE_TAG_SUFFIX pipeline should be in `
` for our releases +7. Create a release by selecting the targetted build version of the _docker-provider_Official-ci_prod release 8. Validate all the scenarios against clusters in build subscription and scale clusters - # 2. Perf and scale testing Deploy latest omsagent yaml with release candidate agent image in to supported k8s versions and validate all the critical scenarios. In perticular, throughly validate the updates going as part of this release and also make sure no regressions. If this passes, deploy onto scale cluster and validate perf and scale aspects. Scale cluster in AME cloud and co-ordinate with agent team who has access to this cluster to deploy the release candiate onto this cluster. @@ -39,48 +37,49 @@ Image automatically synched to MCR CN from Public cloud MCR. Make PR against [AKS-Engine](https://github.com/Azure/aks-engine). Refer PR https://github.com/Azure/aks-engine/pull/2318 -## Arc for Kubernetes +## Arc for Kubernetes -Ev2 pipeline used to deploy the chart of the Arc K8s Container Insights Extension as per Safe Deployment Process. +Ev2 pipeline used to deploy the chart of the Arc K8s Container Insights Extension as per Safe Deployment Process. Here is the high level process ``` 1. Specify chart version of the release candidate and trigger [container-insights-arc-k8s-extension-ci_prod-release](https://github-private.visualstudio.com/microsoft/_release?_a=releases&view=all) 2. Get the approval from one of team member for the release - 3. Once the approved, release should be triggered automatically + 3. Once the approved, release should be triggered automatically 4. use `cimon-arck8s-eastus2euap` for validating latest release in canary region 5. TBD - Notify vendor team for the validation on all Arc K8s supported platforms ``` ## Microsoft Charts Repo release for On-prem K8s +> Note: This chart repo being used in the ARO v4 onboarding script as well. -Since HELM charts repo being deprecated, Microsoft charts repo being used for HELM chart release of on-prem K8s clusters. -To make chart release PR, fork [Microsoft-charts-repo]([https://github.com/microsoft/charts/tree/gh-pages) and make the PR against `gh-pages` branch of the upstream repo. +Since HELM charts repo being deprecated, Microsoft charts repo being used for HELM chart release of on-prem K8s clusters. +To make chart release PR, fork [Microsoft-charts-repo]([https://github.com/microsoft/charts/tree/gh-pages) and make the PR against `gh-pages` branch of the upstream repo. Refer PR - https://github.com/microsoft/charts/pull/23 for example. Once the PR merged, latest version of HELM chart should be available in couple of mins in https://microsoft.github.io/charts/repo and https://artifacthub.io/. Instructions to create PR ``` -# 1. create helm package for the release candidate +# 1. create helm package for the release candidate git clone git@github.com:microsoft/Docker-Provider.git git checkout ci_prod cd ~/Docker-Provider/charts/azuremonitor-containers # this path based on where you have cloned the repo - helm package . + helm package . -# 2. clone your fork repo and checkout gh_pages branch # gh_pages branch used as release branch - cd ~ +# 2. clone your fork repo and checkout gh_pages branch # gh_pages branch used as release branch + cd ~ git clone cd ~/charts # assumed the root dir of the clone is charts git checkout gh_pages -# 3. copy release candidate helm package - cd ~/charts/repo/azuremonitor-containers +# 3. copy release candidate helm package + cd ~/charts/repo/azuremonitor-containers # update chart version value with the version of chart being released - cp ~/Docker-Provider/charts/azuremonitor-containers/azuremonitor-containers-.tgz . + cp ~/Docker-Provider/charts/azuremonitor-containers/azuremonitor-containers-.tgz . cd ~/charts/repo - # update repo index file + # update repo index file helm repo index . - + # 4. Review the changes and make PR. Please note, you may need to revert unrelated changes automatically added by `helm repo index .` command ``` diff --git a/build/linux/installer/conf/td-agent-bit-rs.conf b/build/linux/installer/conf/td-agent-bit-rs.conf index 9613c270d..da3738da7 100644 --- a/build/linux/installer/conf/td-agent-bit-rs.conf +++ b/build/linux/installer/conf/td-agent-bit-rs.conf @@ -23,6 +23,19 @@ Skip_Long_Lines On Ignore_Older 2m +[INPUT] + Name tail + Tag oms.container.log.flbplugin.mdsd.* + Path /var/opt/microsoft/linuxmonagent/log/mdsd.err + Read_from_Head true + DB /var/opt/microsoft/docker-cimprov/state/mdsd-ai.db + DB.Sync Off + Parser docker + Mem_Buf_Limit 1m + Path_Key filepath + Skip_Long_Lines On + Ignore_Older 2m + [INPUT] Name tcp Tag oms.container.perf.telegraf.* diff --git a/build/linux/installer/conf/telegraf-rs.conf b/build/linux/installer/conf/telegraf-rs.conf index 0ca07f7e5..5de35d82c 100644 --- a/build/linux/installer/conf/telegraf-rs.conf +++ b/build/linux/installer/conf/telegraf-rs.conf @@ -124,26 +124,6 @@ namedrop = ["agent_telemetry", "file"] #tagdrop = ["AgentVersion","AKS_RESOURCE_ID", "ACS_RESOURCE_NAME", "Region","ClusterName","ClusterType", "Computer", "ControllerType"] -[[outputs.application_insights]] - ## Instrumentation key of the Application Insights resource. - instrumentation_key = "$TELEMETRY_APPLICATIONINSIGHTS_KEY" - - ## Timeout for closing (default: 5s). - # timeout = "5s" - - ## Enable additional diagnostic logging. - # enable_diagnostic_logging = false - - ## Context Tag Sources add Application Insights context tags to a tag value. - ## - ## For list of allowed context tag keys see: - ## https://github.com/Microsoft/ApplicationInsights-Go/blob/master/appinsights/contracts/contexttagkeys.go - # [outputs.application_insights.context_tag_sources] - # "ai.cloud.role" = "kubernetes_container_name" - # "ai.cloud.roleInstance" = "kubernetes_pod_name" - namepass = ["agent_telemetry"] - #tagdrop = ["nodeName"] - ############################################################################### # PROCESSOR PLUGINS # ############################################################################### diff --git a/build/linux/installer/conf/telegraf.conf b/build/linux/installer/conf/telegraf.conf index 8b6e2ad4b..0e4824e70 100644 --- a/build/linux/installer/conf/telegraf.conf +++ b/build/linux/installer/conf/telegraf.conf @@ -158,26 +158,6 @@ namepass = ["container.azm.ms/disk"] #fieldpass = ["used_percent"] -[[outputs.application_insights]] - ## Instrumentation key of the Application Insights resource. - instrumentation_key = "$TELEMETRY_APPLICATIONINSIGHTS_KEY" - - ## Timeout for closing (default: 5s). - # timeout = "5s" - - ## Enable additional diagnostic logging. - # enable_diagnostic_logging = false - - ## Context Tag Sources add Application Insights context tags to a tag value. - ## - ## For list of allowed context tag keys see: - ## https://github.com/Microsoft/ApplicationInsights-Go/blob/master/appinsights/contracts/contexttagkeys.go - # [outputs.application_insights.context_tag_sources] - # "ai.cloud.role" = "kubernetes_container_name" - # "ai.cloud.roleInstance" = "kubernetes_pod_name" - namepass = ["agent_telemetry"] - #tagdrop = ["nodeName"] - ############################################################################### # PROCESSOR PLUGINS # ############################################################################### diff --git a/build/linux/installer/datafiles/base_container.data b/build/linux/installer/datafiles/base_container.data index bdacf647d..b71cafd49 100644 --- a/build/linux/installer/datafiles/base_container.data +++ b/build/linux/installer/datafiles/base_container.data @@ -150,6 +150,9 @@ MAINTAINER: 'Microsoft Corporation' /etc/fluent/plugin/omslog.rb; source/plugins/utils/omslog.rb; 644; root; root /etc/fluent/plugin/oms_common.rb; source/plugins/utils/oms_common.rb; 644; root; root +/etc/fluent/plugin/extension.rb; source/plugins/utils/extension.rb; 644; root; root +/etc/fluent/plugin/extension_utils.rb; source/plugins/utils/extension_utils.rb; 644; root; root + /etc/fluent/kube.conf; build/linux/installer/conf/kube.conf; 644; root; root /etc/fluent/container.conf; build/linux/installer/conf/container.conf; 644; root; root diff --git a/build/linux/installer/scripts/livenessprobe.sh b/build/linux/installer/scripts/livenessprobe.sh index 252f471e9..8ecb7fe44 100644 --- a/build/linux/installer/scripts/livenessprobe.sh +++ b/build/linux/installer/scripts/livenessprobe.sh @@ -11,13 +11,29 @@ fi #optionally test to exit non zero value if fluentd is not running #fluentd not used in sidecar container -if [ "${CONTAINER_TYPE}" != "PrometheusSidecar" ]; then +if [ "${CONTAINER_TYPE}" != "PrometheusSidecar" ]; then (ps -ef | grep "fluentd" | grep -v "grep") if [ $? -ne 0 ] then echo "fluentd is not running" > /dev/termination-log exit 1 fi + # fluentd launches by default supervisor and worker process + # so adding the liveness checks individually to handle scenario if any of the process dies + # supervisor process + (ps -ef | grep "fluentd" | grep "supervisor" | grep -v "grep") + if [ $? -ne 0 ] + then + echo "fluentd supervisor is not running" > /dev/termination-log + exit 1 + fi + # worker process + (ps -ef | grep "fluentd" | grep -v "supervisor" | grep -v "grep" ) + if [ $? -ne 0 ] + then + echo "fluentd worker is not running" > /dev/termination-log + exit 1 + fi fi #test to exit non zero value if fluentbit is not running diff --git a/build/windows/installer/scripts/rubyKeepCertificateAlive/in_heartbeat_request.rb b/build/windows/installer/scripts/rubyKeepCertificateAlive/in_heartbeat_request.rb index e255c4a71..e525d8681 100644 --- a/build/windows/installer/scripts/rubyKeepCertificateAlive/in_heartbeat_request.rb +++ b/build/windows/installer/scripts/rubyKeepCertificateAlive/in_heartbeat_request.rb @@ -36,14 +36,18 @@ def start def enumerate begin - puts "Calling certificate renewal code..." - maintenance = OMS::OnboardingHelper.new( - ENV["WSID"], - ENV["DOMAIN"], - ENV["CI_AGENT_GUID"] - ) - ret_code = maintenance.register_certs() - puts "Return code from register certs : #{ret_code}" + if !ENV["AAD_MSI_AUTH_MODE"].nil? && !ENV["AAD_MSI_AUTH_MODE"].empty? && ENV["AAD_MSI_AUTH_MODE"].downcase == "true" + puts "skipping certificate renewal code since AAD MSI auth configured" + else + puts "Calling certificate renewal code..." + maintenance = OMS::OnboardingHelper.new( + ENV["WSID"], + ENV["DOMAIN"], + ENV["CI_AGENT_GUID"] + ) + ret_code = maintenance.register_certs() + puts "Return code from register certs : #{ret_code}" + end rescue => errorStr puts "in_heartbeat_request::enumerate:Failed in enumerate: #{errorStr}" # STDOUT telemetry should alredy be going to Traces in AI. diff --git a/kubernetes/linux/Dockerfile b/kubernetes/linux/Dockerfile index 1ae7bef61..07af7f4a7 100644 --- a/kubernetes/linux/Dockerfile +++ b/kubernetes/linux/Dockerfile @@ -2,7 +2,7 @@ FROM ubuntu:18.04 MAINTAINER OMSContainers@microsoft.com LABEL vendor=Microsoft\ Corp \ com.microsoft.product="Azure Monitor for containers" -ARG IMAGE_TAG=ciprod06112021 +ARG IMAGE_TAG=ciprod08052021 ENV AGENT_VERSION ${IMAGE_TAG} ENV tmpdir /opt ENV APPLICATIONINSIGHTS_AUTH NzAwZGM5OGYtYTdhZC00NThkLWI5NWMtMjA3ZjM3NmM3YmRi @@ -17,7 +17,7 @@ ENV KUBE_CLIENT_BACKOFF_BASE 1 ENV KUBE_CLIENT_BACKOFF_DURATION 0 ENV RUBY_GC_HEAP_OLDOBJECT_LIMIT_FACTOR 0.9 RUN /usr/bin/apt-get update && /usr/bin/apt-get install -y libc-bin wget openssl curl sudo python-ctypes init-system-helpers net-tools rsyslog cron vim dmidecode apt-transport-https gnupg && rm -rf /var/lib/apt/lists/* -COPY setup.sh main.sh defaultpromenvvariables defaultpromenvvariables-rs defaultpromenvvariables-sidecar mdsd.xml envmdsd $tmpdir/ +COPY setup.sh main.sh defaultpromenvvariables defaultpromenvvariables-rs defaultpromenvvariables-sidecar mdsd.xml envmdsd logrotate.conf $tmpdir/ WORKDIR ${tmpdir} # copy docker provider shell bundle to use the agent image diff --git a/kubernetes/linux/logrotate.conf b/kubernetes/linux/logrotate.conf new file mode 100644 index 000000000..921371fd0 --- /dev/null +++ b/kubernetes/linux/logrotate.conf @@ -0,0 +1,39 @@ +/var/opt/microsoft/linuxmonagent/log/mdsd.err { + copytruncate + rotate 7 + missingok + notifempty + delaycompress + compress + size 10M +} + +/var/opt/microsoft/linuxmonagent/log/mdsd.warn { + copytruncate + rotate 7 + missingok + notifempty + delaycompress + compress + size 10M +} + +/var/opt/microsoft/linuxmonagent/log/mdsd.info { + copytruncate + rotate 7 + missingok + notifempty + delaycompress + compress + size 10M +} + +/var/opt/microsoft/linuxmonagent/log/mdsd.qos { + copytruncate + rotate 7 + missingok + notifempty + delaycompress + compress + size 10M +} diff --git a/kubernetes/linux/main.sh b/kubernetes/linux/main.sh index 1a7034d4d..4986e3113 100644 --- a/kubernetes/linux/main.sh +++ b/kubernetes/linux/main.sh @@ -12,7 +12,7 @@ waitforlisteneronTCPport() { echo "${FUNCNAME[0]} called with incorrect arguments<$1 , $2>. Required arguments <#port, #wait-time-in-seconds>" return -1 else - + if [[ $port =~ $numeric ]] && [[ $waittimesecs =~ $numeric ]]; then #local varlistener=$(netstat -lnt | awk '$6 == "LISTEN" && $4 ~ ":25228$"') while true @@ -38,6 +38,51 @@ waitforlisteneronTCPport() { fi } +checkAgentOnboardingStatus() { + local sleepdurationsecs=1 + local totalsleptsecs=0 + local isaadmsiauthmode=$1 + local waittimesecs=$2 + local numeric='^[0-9]+$' + + if [ -z "$1" ] || [ -z "$2" ]; then + echo "${FUNCNAME[0]} called with incorrect arguments<$1 , $2>. Required arguments <#isaadmsiauthmode, #wait-time-in-seconds>" + return -1 + else + + if [[ $waittimesecs =~ $numeric ]]; then + successMessage="Onboarding success" + failureMessage="Failed to register certificate with OMS Homing service, giving up" + if [ "${isaadmsiauthmode}" == "true" ]; then + successMessage="Loaded data sources" + failureMessage="Failed to load data sources into config" + fi + while true + do + if [ $totalsleptsecs -gt $waittimesecs ]; then + echo "${FUNCNAME[0]} giving up checking agent onboarding status after $totalsleptsecs secs" + return 1 + fi + + if grep "$successMessage" "${MDSD_LOG}/mdsd.info"; then + echo "Onboarding success" + return 0 + elif grep "$failureMessage" "${MDSD_LOG}/mdsd.err"; then + echo "Onboarding Failure: Reason: Failed to onboard the agent" + echo "Onboarding Failure: Please verify log analytics workspace configuration such as existence of the workspace, workspace key and workspace enabled for public ingestion" + return 1 + fi + sleep $sleepdurationsecs + totalsleptsecs=$(($totalsleptsecs+1)) + done + else + echo "${FUNCNAME[0]} called with non-numeric arguments<$2>. Required arguments <#wait-time-in-seconds>" + return -1 + fi + fi +} + + #using /var/opt/microsoft/docker-cimprov/state instead of /var/opt/microsoft/omsagent/state since the latter gets deleted during onboarding mkdir -p /var/opt/microsoft/docker-cimprov/state @@ -57,7 +102,11 @@ else export customResourceId=$AKS_RESOURCE_ID echo "export customResourceId=$AKS_RESOURCE_ID" >> ~/.bashrc source ~/.bashrc - echo "customResourceId:$customResourceId" + echo "customResourceId:$customResourceId" + export customRegion=$AKS_REGION + echo "export customRegion=$AKS_REGION" >> ~/.bashrc + source ~/.bashrc + echo "customRegion:$customRegion" fi #set agent config schema version @@ -194,9 +243,15 @@ fi if [ -z $domain ]; then ClOUD_ENVIRONMENT="unknown" elif [ $domain == "opinsights.azure.com" ]; then - CLOUD_ENVIRONMENT="public" -else - CLOUD_ENVIRONMENT="national" + CLOUD_ENVIRONMENT="azurepubliccloud" +elif [ $domain == "opinsights.azure.cn" ]; then + CLOUD_ENVIRONMENT="azurechinacloud" +elif [ $domain == "opinsights.azure.us" ]; then + CLOUD_ENVIRONMENT="azureusgovernmentcloud" +elif [ $domain == "opinsights.azure.eaglex.ic.gov" ]; then + CLOUD_ENVIRONMENT="usnat" +elif [ $domain == "opinsights.azure.microsoft.scloud" ]; then + CLOUD_ENVIRONMENT="ussec" fi export CLOUD_ENVIRONMENT=$CLOUD_ENVIRONMENT echo "export CLOUD_ENVIRONMENT=$CLOUD_ENVIRONMENT" >> ~/.bashrc @@ -233,9 +288,9 @@ if [ ${#APPLICATIONINSIGHTS_AUTH_URL} -ge 1 ]; then # (check if APPLICATIONINSI fi -aikey=$(echo $APPLICATIONINSIGHTS_AUTH | base64 --decode) -export TELEMETRY_APPLICATIONINSIGHTS_KEY=$aikey -echo "export TELEMETRY_APPLICATIONINSIGHTS_KEY=$aikey" >> ~/.bashrc +aikey=$(echo $APPLICATIONINSIGHTS_AUTH | base64 --decode) +export TELEMETRY_APPLICATIONINSIGHTS_KEY=$aikey +echo "export TELEMETRY_APPLICATIONINSIGHTS_KEY=$aikey" >> ~/.bashrc source ~/.bashrc @@ -421,7 +476,7 @@ export KUBELET_RUNTIME_OPERATIONS_ERRORS_METRIC="kubelet_docker_operations_error if [ "$CONTAINER_RUNTIME" != "docker" ]; then # these metrics are avialble only on k8s versions <1.18 and will get deprecated from 1.18 export KUBELET_RUNTIME_OPERATIONS_METRIC="kubelet_runtime_operations" - export KUBELET_RUNTIME_OPERATIONS_ERRORS_METRIC="kubelet_runtime_operations_errors" + export KUBELET_RUNTIME_OPERATIONS_ERRORS_METRIC="kubelet_runtime_operations_errors" fi echo "set caps for ruby process to read container env from proc" @@ -445,34 +500,56 @@ DOCKER_CIMPROV_VERSION=$(dpkg -l | grep docker-cimprov | awk '{print $3}') echo "DOCKER_CIMPROV_VERSION=$DOCKER_CIMPROV_VERSION" export DOCKER_CIMPROV_VERSION=$DOCKER_CIMPROV_VERSION echo "export DOCKER_CIMPROV_VERSION=$DOCKER_CIMPROV_VERSION" >> ~/.bashrc -echo "*** activating oneagent in legacy auth mode ***" -CIWORKSPACE_id="$(cat /etc/omsagent-secret/WSID)" -#use the file path as its secure than env -CIWORKSPACE_keyFile="/etc/omsagent-secret/KEY" -cat /etc/mdsd.d/envmdsd | while read line; do - echo $line >> ~/.bashrc -done -source /etc/mdsd.d/envmdsd -echo "setting mdsd workspaceid & key for workspace:$CIWORKSPACE_id" -export CIWORKSPACE_id=$CIWORKSPACE_id -echo "export CIWORKSPACE_id=$CIWORKSPACE_id" >> ~/.bashrc -export CIWORKSPACE_keyFile=$CIWORKSPACE_keyFile -echo "export CIWORKSPACE_keyFile=$CIWORKSPACE_keyFile" >> ~/.bashrc -export OMS_TLD=$domain -echo "export OMS_TLD=$OMS_TLD" >> ~/.bashrc -export MDSD_FLUENT_SOCKET_PORT="29230" -echo "export MDSD_FLUENT_SOCKET_PORT=$MDSD_FLUENT_SOCKET_PORT" >> ~/.bashrc -#skip imds lookup since not used in legacy auth path +#skip imds lookup since not used either legacy or aad msi auth path export SKIP_IMDS_LOOKUP_FOR_LEGACY_AUTH="true" echo "export SKIP_IMDS_LOOKUP_FOR_LEGACY_AUTH=$SKIP_IMDS_LOOKUP_FOR_LEGACY_AUTH" >> ~/.bashrc - +# this used by mdsd to determine cloud specific LA endpoints +export OMS_TLD=$domain +echo "export OMS_TLD=$OMS_TLD" >> ~/.bashrc +cat /etc/mdsd.d/envmdsd | while read line; do + echo $line >> ~/.bashrc +done +source /etc/mdsd.d/envmdsd +MDSD_AAD_MSI_AUTH_ARGS="" +# check if its AAD Auth MSI mode via USING_AAD_MSI_AUTH +export AAD_MSI_AUTH_MODE=false +if [ "${USING_AAD_MSI_AUTH}" == "true" ]; then + echo "*** activating oneagent in aad auth msi mode ***" + # msi auth specific args + MDSD_AAD_MSI_AUTH_ARGS="-a -A" + export AAD_MSI_AUTH_MODE=true + echo "export AAD_MSI_AUTH_MODE=true" >> ~/.bashrc + # this used by mdsd to determine the cloud specific AMCS endpoints + export customEnvironment=$CLOUD_ENVIRONMENT + echo "export customEnvironment=$customEnvironment" >> ~/.bashrc + export MDSD_FLUENT_SOCKET_PORT="28230" + echo "export MDSD_FLUENT_SOCKET_PORT=$MDSD_FLUENT_SOCKET_PORT" >> ~/.bashrc + export ENABLE_MCS="true" + echo "export ENABLE_MCS=$ENABLE_MCS" >> ~/.bashrc + export MONITORING_USE_GENEVA_CONFIG_SERVICE="false" + echo "export MONITORING_USE_GENEVA_CONFIG_SERVICE=$MONITORING_USE_GENEVA_CONFIG_SERVICE" >> ~/.bashrc + export MDSD_USE_LOCAL_PERSISTENCY="false" + echo "export MDSD_USE_LOCAL_PERSISTENCY=$MDSD_USE_LOCAL_PERSISTENCY" >> ~/.bashrc +else + echo "*** activating oneagent in legacy auth mode ***" + CIWORKSPACE_id="$(cat /etc/omsagent-secret/WSID)" + #use the file path as its secure than env + CIWORKSPACE_keyFile="/etc/omsagent-secret/KEY" + echo "setting mdsd workspaceid & key for workspace:$CIWORKSPACE_id" + export CIWORKSPACE_id=$CIWORKSPACE_id + echo "export CIWORKSPACE_id=$CIWORKSPACE_id" >> ~/.bashrc + export CIWORKSPACE_keyFile=$CIWORKSPACE_keyFile + echo "export CIWORKSPACE_keyFile=$CIWORKSPACE_keyFile" >> ~/.bashrc + export MDSD_FLUENT_SOCKET_PORT="29230" + echo "export MDSD_FLUENT_SOCKET_PORT=$MDSD_FLUENT_SOCKET_PORT" >> ~/.bashrc +fi source ~/.bashrc dpkg -l | grep mdsd | awk '{print $2 " " $3}' -if [ "${CONTAINER_TYPE}" == "PrometheusSidecar" ]; then - echo "starting mdsd with mdsd-port=26130, fluentport=26230 and influxport=26330 in legacy auth mode in sidecar container..." +if [ "${CONTAINER_TYPE}" == "PrometheusSidecar" ]; then + echo "starting mdsd with mdsd-port=26130, fluentport=26230 and influxport=26330 in sidecar container..." #use tenant name to avoid unix socket conflict and different ports for port conflict #roleprefix to use container specific mdsd socket export TENANT_NAME="${CONTAINER_TYPE}" @@ -482,23 +559,29 @@ if [ "${CONTAINER_TYPE}" == "PrometheusSidecar" ]; then source ~/.bashrc mkdir /var/run/mdsd-${CONTAINER_TYPE} # add -T 0xFFFF for full traces - mdsd -r ${MDSD_ROLE_PREFIX} -p 26130 -f 26230 -i 26330 -e ${MDSD_LOG}/mdsd.err -w ${MDSD_LOG}/mdsd.warn -o ${MDSD_LOG}/mdsd.info -q ${MDSD_LOG}/mdsd.qos & -else - echo "starting mdsd in legacy auth mode in main container..." - # add -T 0xFFFF for full traces - mdsd -e ${MDSD_LOG}/mdsd.err -w ${MDSD_LOG}/mdsd.warn -o ${MDSD_LOG}/mdsd.info -q ${MDSD_LOG}/mdsd.qos & + mdsd ${MDSD_AAD_MSI_AUTH_ARGS} -r ${MDSD_ROLE_PREFIX} -p 26130 -f 26230 -i 26330 -e ${MDSD_LOG}/mdsd.err -w ${MDSD_LOG}/mdsd.warn -o ${MDSD_LOG}/mdsd.info -q ${MDSD_LOG}/mdsd.qos & +else + echo "starting mdsd mode in main container..." + # add -T 0xFFFF for full traces + mdsd ${MDSD_AAD_MSI_AUTH_ARGS} -e ${MDSD_LOG}/mdsd.err -w ${MDSD_LOG}/mdsd.warn -o ${MDSD_LOG}/mdsd.info -q ${MDSD_LOG}/mdsd.qos & +fi + +# Set up a cron job for logrotation +if [ ! -f /etc/cron.d/ci-agent ]; then + echo "setting up cronjob for ci agent log rotation" + echo "*/5 * * * * root /usr/sbin/logrotate -s /var/lib/logrotate/ci-agent-status /etc/logrotate.d/ci-agent >/dev/null 2>&1" > /etc/cron.d/ci-agent fi -# no dependency on fluentd for prometheus side car container -if [ "${CONTAINER_TYPE}" != "PrometheusSidecar" ]; then +# no dependency on fluentd for prometheus side car container +if [ "${CONTAINER_TYPE}" != "PrometheusSidecar" ]; then if [ ! -e "/etc/config/kube.conf" ]; then echo "*** starting fluentd v1 in daemonset" fluentd -c /etc/fluent/container.conf -o /var/opt/microsoft/docker-cimprov/log/fluentd.log --log-rotate-age 5 --log-rotate-size 20971520 & else echo "*** starting fluentd v1 in replicaset" fluentd -c /etc/fluent/kube.conf -o /var/opt/microsoft/docker-cimprov/log/fluentd.log --log-rotate-age 5 --log-rotate-size 20971520 & - fi -fi + fi +fi #If config parsing was successful, a copy of the conf file with replaced custom settings file is created if [ ! -e "/etc/config/kube.conf" ]; then @@ -634,8 +717,10 @@ service rsyslog stop echo "getting rsyslog status..." service rsyslog status +checkAgentOnboardingStatus $AAD_MSI_AUTH_MODE 30 + shutdown() { - pkill -f mdsd + pkill -f mdsd } trap "shutdown" SIGTERM diff --git a/kubernetes/linux/setup.sh b/kubernetes/linux/setup.sh index ad7cc2232..f73d01554 100644 --- a/kubernetes/linux/setup.sh +++ b/kubernetes/linux/setup.sh @@ -9,13 +9,16 @@ sed -i -e 's/# en_US.UTF-8 UTF-8/en_US.UTF-8 UTF-8/' /etc/locale.gen && \ dpkg-reconfigure --frontend=noninteractive locales && \ update-locale LANG=en_US.UTF-8 -#install oneagent - Official bits (05/17/2021) -wget https://github.com/microsoft/Docker-Provider/releases/download/05172021-oneagent/azure-mdsd_1.10.1-build.master.213_x86_64.deb +#install oneagent - Official bits (08/04/2021) +wget https://github.com/microsoft/Docker-Provider/releases/download/06242021-oneagent/azure-mdsd_1.10.3-build.master.257_x86_64.deb /usr/bin/dpkg -i $TMPDIR/azure-mdsd*.deb cp -f $TMPDIR/mdsd.xml /etc/mdsd.d cp -f $TMPDIR/envmdsd /etc/mdsd.d +#log rotate conf for mdsd and can be extended for other log files as well +cp -f $TMPDIR/logrotate.conf /etc/logrotate.d/ci-agent + #download inotify tools for watching configmap changes sudo apt-get update sudo apt-get install inotify-tools -y @@ -44,8 +47,8 @@ sudo apt-get update sudo apt-get install td-agent-bit=1.6.8 -y # install ruby2.6 -sudo apt-get install software-properties-common -y -sudo apt-add-repository ppa:brightbox/ruby-ng -y +sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys F5DA5F09C3173AA6 +sudo echo "deb http://ppa.launchpad.net/brightbox/ruby-ng/ubuntu bionic main" >> /etc/apt/sources.list sudo apt-get update sudo apt-get install ruby2.6 ruby2.6-dev gcc make -y # fluentd v1 gem @@ -53,12 +56,20 @@ gem install fluentd -v "1.12.2" --no-document fluentd --setup ./fluent gem install gyoku iso8601 --no-doc +# install kubeclient +wget https://github.com/ganga1980/kubeclient/raw/master/kubeclient-4.9.2.gem +sudo apt-get install libmagickwand-dev -y +sudo gem install kubeclient-*.gem --no-document +rm -rf $TMPDIR/kubeclient-*.gem rm -f $TMPDIR/docker-cimprov*.sh rm -f $TMPDIR/azure-mdsd*.deb rm -f $TMPDIR/mdsd.xml rm -f $TMPDIR/envmdsd +# remove build dependencies +sudo apt-get remove ruby2.6-dev gcc make -y + # Remove settings for cron.daily that conflict with the node's cron.daily. Since both are trying to rotate the same files # in /var/log at the same time, the rotation doesn't happen correctly and then the *.1 file is forever logged to. rm /etc/logrotate.d/alternatives /etc/logrotate.d/apt /etc/logrotate.d/azure-mdsd /etc/logrotate.d/rsyslog @@ -69,4 +80,4 @@ if [ -e "/var/lib/gems/2.6.0/gems/http_parser.rb-0.6.0/Gemfile.lock" ]; then #rename echo "Renaming unused gemfile.lock for http_parser 0.6.0" mv /var/lib/gems/2.6.0/gems/http_parser.rb-0.6.0/Gemfile.lock /var/lib/gems/2.6.0/gems/http_parser.rb-0.6.0/renamed_Gemfile_lock.renamed -fi +fi \ No newline at end of file diff --git a/kubernetes/omsagent.yaml b/kubernetes/omsagent.yaml index 617c81f38..49d4586c1 100644 --- a/kubernetes/omsagent.yaml +++ b/kubernetes/omsagent.yaml @@ -362,13 +362,13 @@ spec: schema-versions: "v1" spec: serviceAccountName: omsagent - dnsConfig: + dnsConfig: options: - name: ndots - value: "3" + value: "3" containers: - name: omsagent - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod06112021" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod08052021" imagePullPolicy: IfNotPresent resources: limits: @@ -384,7 +384,7 @@ spec: - name: AKS_REGION value: "VALUE_AKS_RESOURCE_REGION_VALUE" # this used for e2e test and setting this just emits some additional log statements which used for the e2e tests - - name: ISTEST + - name: ISTEST value: "true" #Uncomment below two lines for ACS clusters and set the cluster names manually. Also comment out the above two lines for ACS clusters #- name: ACS_RESOURCE_NAME @@ -446,7 +446,7 @@ spec: timeoutSeconds: 15 #Only in sidecar scraping mode - name: omsagent-prometheus - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod06112021" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod08052021" imagePullPolicy: IfNotPresent resources: limits: @@ -589,7 +589,7 @@ spec: serviceAccountName: omsagent containers: - name: omsagent - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod06112021" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod08052021" imagePullPolicy: IfNotPresent resources: limits: @@ -604,8 +604,8 @@ spec: - name: AKS_REGION value: "VALUE_AKS_RESOURCE_REGION_VALUE" # this used for e2e test and setting this just emits some additional log statements which used for the e2e tests - - name: ISTEST - value: "true" + - name: ISTEST + value: "true" # Uncomment below two lines for ACS clusters and set the cluster names manually. Also comment out the above two lines for ACS clusters #- name: ACS_RESOURCE_NAME # value: "my_acs_cluster_name" @@ -754,13 +754,13 @@ spec: schema-versions: "v1" spec: serviceAccountName: omsagent - dnsConfig: + dnsConfig: options: - name: ndots - value: "3" + value: "3" containers: - name: omsagent-win - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod06112021" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod06112021-2" imagePullPolicy: IfNotPresent resources: limits: diff --git a/kubernetes/windows/Dockerfile b/kubernetes/windows/Dockerfile index 94be59644..0ba64cd75 100644 --- a/kubernetes/windows/Dockerfile +++ b/kubernetes/windows/Dockerfile @@ -3,7 +3,7 @@ MAINTAINER OMSContainers@microsoft.com LABEL vendor=Microsoft\ Corp \ com.microsoft.product="Azure Monitor for containers" -ARG IMAGE_TAG=win-ciprod06112021 +ARG IMAGE_TAG=win-ciprod06112021-2 # Do not split this into multiple RUN! # Docker creates a layer for every RUN-Statement diff --git a/kubernetes/windows/main.ps1 b/kubernetes/windows/main.ps1 index bc053b0d6..3cbc11e20 100644 --- a/kubernetes/windows/main.ps1 +++ b/kubernetes/windows/main.ps1 @@ -43,17 +43,49 @@ function Start-FileSystemWatcher { function Set-EnvironmentVariables { $domain = "opinsights.azure.com" - $cloud_environment = "public" + $mcs_endpoint = "monitor.azure.com" + $cloud_environment = "azurepubliccloud" if (Test-Path /etc/omsagent-secret/DOMAIN) { # TODO: Change to omsagent-secret before merging $domain = Get-Content /etc/omsagent-secret/DOMAIN - $cloud_environment = "national" + if (![string]::IsNullOrEmpty($domain)) { + if ($domain -eq "opinsights.azure.com") { + $cloud_environment = "azurepubliccloud" + $mcs_endpoint = "monitor.azure.com" + } elseif ($domain -eq "opinsights.azure.cn") { + $cloud_environment = "azurechinacloud" + $mcs_endpoint = "monitor.azure.cn" + } elseif ($domain -eq "opinsights.azure.us") { + $cloud_environment = "azureusgovernmentcloud" + $mcs_endpoint = "monitor.azure.us" + } elseif ($domain -eq "opinsights.azure.eaglex.ic.gov") { + $cloud_environment = "usnat" + $mcs_endpoint = "monitor.azure.eaglex.ic.gov" + } elseif ($domain -eq "opinsights.azure.microsoft.scloud") { + $cloud_environment = "ussec" + $mcs_endpoint = "monitor.azure.microsoft.scloud" + } else { + Write-Host "Invalid or Unsupported domain name $($domain). EXITING....." + exit 1 + } + } else { + Write-Host "Domain name either null or empty. EXITING....." + exit 1 + } } + Write-Host "Log analytics domain: $($domain)" + Write-Host "MCS endpoint: $($mcs_endpoint)" + Write-Host "Cloud Environment: $($cloud_environment)" + # Set DOMAIN [System.Environment]::SetEnvironmentVariable("DOMAIN", $domain, "Process") [System.Environment]::SetEnvironmentVariable("DOMAIN", $domain, "Machine") + # Set MCS Endpoint + [System.Environment]::SetEnvironmentVariable("MCS_ENDPOINT", $mcs_endpoint, "Process") + [System.Environment]::SetEnvironmentVariable("MCS_ENDPOINT", $mcs_endpoint, "Machine") + # Set CLOUD_ENVIRONMENT [System.Environment]::SetEnvironmentVariable("CLOUD_ENVIRONMENT", $cloud_environment, "Process") [System.Environment]::SetEnvironmentVariable("CLOUD_ENVIRONMENT", $cloud_environment, "Machine") @@ -140,7 +172,7 @@ function Set-EnvironmentVariables { if ($aiKeyURl) { $aiKeyFetched = "" # retry up to 5 times - for( $i = 1; $i -le 4; $i++) { + for ( $i = 1; $i -le 4; $i++) { try { $response = Invoke-WebRequest -uri $aiKeyURl -UseBasicParsing -TimeoutSec 5 -ErrorAction:Stop @@ -158,7 +190,7 @@ function Set-EnvironmentVariables { Write-Host $_.Exception } } - + # Check if the fetched IKey was properly encoded. if not then turn off telemetry if ($aiKeyFetched -match '^[A-Za-z0-9=]+$') { Write-Host "Using cloud-specific instrumentation key" @@ -229,6 +261,39 @@ function Set-EnvironmentVariables { Write-Host "Failed to set environment variable HOSTNAME for target 'machine' since it is either null or empty" } + # check if its AAD Auth MSI mode via USING_AAD_MSI_AUTH environment variable + $isAADMSIAuth = [System.Environment]::GetEnvironmentVariable("USING_AAD_MSI_AUTH", "process") + if (![string]::IsNullOrEmpty($isAADMSIAuth)) { + [System.Environment]::SetEnvironmentVariable("AAD_MSI_AUTH_MODE", $isAADMSIAuth, "Process") + [System.Environment]::SetEnvironmentVariable("AAD_MSI_AUTH_MODE", $isAADMSIAuth, "Machine") + Write-Host "Successfully set environment variable AAD_MSI_AUTH_MODE - $($isAADMSIAuth) for target 'machine'..." + } + + # check if use token proxy endpoint set via USE_IMDS_TOKEN_PROXY_END_POINT environment variable + $useIMDSTokenProxyEndpoint = [System.Environment]::GetEnvironmentVariable("USE_IMDS_TOKEN_PROXY_END_POINT", "process") + if (![string]::IsNullOrEmpty($useIMDSTokenProxyEndpoint)) { + [System.Environment]::SetEnvironmentVariable("USE_IMDS_TOKEN_PROXY_END_POINT", $useIMDSTokenProxyEndpoint, "Process") + [System.Environment]::SetEnvironmentVariable("USE_IMDS_TOKEN_PROXY_END_POINT", $useIMDSTokenProxyEndpoint, "Machine") + Write-Host "Successfully set environment variable USE_IMDS_TOKEN_PROXY_END_POINT - $($useIMDSTokenProxyEndpoint) for target 'machine'..." + } + $nodeIp = [System.Environment]::GetEnvironmentVariable("NODE_IP", "process") + if (![string]::IsNullOrEmpty($nodeIp)) { + [System.Environment]::SetEnvironmentVariable("NODE_IP", $nodeIp, "machine") + Write-Host "Successfully set environment variable NODE_IP - $($nodeIp) for target 'machine'..." + } + else { + Write-Host "Failed to set environment variable NODE_IP for target 'machine' since it is either null or empty" + } + + $agentVersion = [System.Environment]::GetEnvironmentVariable("AGENT_VERSION", "process") + if (![string]::IsNullOrEmpty($agentVersion)) { + [System.Environment]::SetEnvironmentVariable("AGENT_VERSION", $agentVersion, "machine") + Write-Host "Successfully set environment variable AGENT_VERSION - $($agentVersion) for target 'machine'..." + } + else { + Write-Host "Failed to set environment variable AGENT_VERSION for target 'machine' since it is either null or empty" + } + # run config parser ruby /opt/omsagentwindows/scripts/ruby/tomlparser.rb .\setenv.ps1 @@ -364,13 +429,12 @@ function Start-Fluent-Telegraf { if (![string]::IsNullOrEmpty($containerRuntime) -and [string]$containerRuntime.StartsWith('docker') -eq $false) { # change parser from docker to cri if the container runtime is not docker Write-Host "changing parser from Docker to CRI since container runtime : $($containerRuntime) and which is non-docker" - (Get-Content -Path C:/etc/fluent/fluent.conf -Raw) -replace 'fluent-docker-parser.conf','fluent-cri-parser.conf' | Set-Content C:/etc/fluent/fluent.conf + (Get-Content -Path C:/etc/fluent/fluent.conf -Raw) -replace 'fluent-docker-parser.conf', 'fluent-cri-parser.conf' | Set-Content C:/etc/fluent/fluent.conf } # Start telegraf only in sidecar scraping mode $sidecarScrapingEnabled = [System.Environment]::GetEnvironmentVariable('SIDECAR_SCRAPING_ENABLED') - if (![string]::IsNullOrEmpty($sidecarScrapingEnabled) -and $sidecarScrapingEnabled.ToLower() -eq 'true') - { + if (![string]::IsNullOrEmpty($sidecarScrapingEnabled) -and $sidecarScrapingEnabled.ToLower() -eq 'true') { Write-Host "Starting telegraf..." Start-Telegraf } @@ -410,7 +474,6 @@ function Start-Telegraf { else { Write-Host "Failed to set environment variable KUBERNETES_SERVICE_PORT for target 'machine' since it is either null or empty" } - $nodeIp = [System.Environment]::GetEnvironmentVariable("NODE_IP", "process") if (![string]::IsNullOrEmpty($nodeIp)) { [System.Environment]::SetEnvironmentVariable("NODE_IP", $nodeIp, "machine") @@ -432,14 +495,15 @@ function Start-Telegraf { sc.exe \\$serverName config telegraf start= delayed-auto Write-Host "Successfully set delayed start for telegraf" - } else { + } + else { Write-Host "Failed to get environment variable PODNAME to set delayed telegraf start" } } catch { - $e = $_.Exception - Write-Host $e - Write-Host "exception occured in delayed telegraf start.. continuing without exiting" + $e = $_.Exception + Write-Host $e + Write-Host "exception occured in delayed telegraf start.. continuing without exiting" } Write-Host "Running telegraf service in test mode" C:\opt\telegraf\telegraf.exe --config "C:\etc\telegraf\telegraf.conf" --test @@ -448,8 +512,7 @@ function Start-Telegraf { # Trying to start telegraf again if it did not start due to fluent bit not being ready at startup Get-Service telegraf | findstr Running - if ($? -eq $false) - { + if ($? -eq $false) { Write-Host "trying to start telegraf in again in 30 seconds, since fluentbit might not have been ready..." Start-Sleep -s 30 C:\opt\telegraf\telegraf.exe --service start @@ -488,7 +551,7 @@ function Bootstrap-CACertificates { $certMountPath = "C:\ca" Get-ChildItem $certMountPath | Foreach-Object { - $absolutePath=$_.FullName + $absolutePath = $_.FullName Write-Host "cert path: $($absolutePath)" Import-Certificate -FilePath $absolutePath -CertStoreLocation 'Cert:\LocalMachine\Root' -Verbose } @@ -510,15 +573,19 @@ Start-FileSystemWatcher $aksResourceId = [System.Environment]::GetEnvironmentVariable("AKS_RESOURCE_ID") $requiresCertBootstrap = [System.Environment]::GetEnvironmentVariable("REQUIRES_CERT_BOOTSTRAP") if (![string]::IsNullOrEmpty($requiresCertBootstrap) -and ` - $requiresCertBootstrap.ToLower() -eq 'true' -and ` - ![string]::IsNullOrEmpty($aksResourceId) -and ` - $aksResourceId.ToLower().Contains("/microsoft.containerservice/managedclusters/")) -{ + $requiresCertBootstrap.ToLower() -eq 'true' -and ` + ![string]::IsNullOrEmpty($aksResourceId) -and ` + $aksResourceId.ToLower().Contains("/microsoft.containerservice/managedclusters/")) { Bootstrap-CACertificates } -Generate-Certificates -Test-CertificatePath +$isAADMSIAuth = [System.Environment]::GetEnvironmentVariable("USING_AAD_MSI_AUTH") +if (![string]::IsNullOrEmpty($isAADMSIAuth) -and $isAADMSIAuth.ToLower() -eq 'true') { + Write-Host "skipping agent onboarding via cert since AAD MSI Auth configured" +} else { + Generate-Certificates + Test-CertificatePath +} Start-Fluent-Telegraf # List all powershell processes running. This should have main.ps1 and filesystemwatcher.ps1 diff --git a/scripts/build/windows/install-build-pre-requisites.ps1 b/scripts/build/windows/install-build-pre-requisites.ps1 index 3bb56ac2a..7f1c9b54f 100755 --- a/scripts/build/windows/install-build-pre-requisites.ps1 +++ b/scripts/build/windows/install-build-pre-requisites.ps1 @@ -13,8 +13,8 @@ function Install-Go { exit } - $url = "https://dl.google.com/go/go1.14.1.windows-amd64.msi" - $output = Join-Path -Path $tempGo -ChildPath "go1.14.1.windows-amd64.msi" + $url = "https://dl.google.com/go/go1.15.14.windows-amd64.msi" + $output = Join-Path -Path $tempGo -ChildPath "go1.15.14.windows-amd64.msi" Write-Host("downloading go msi into directory path : " + $output + " ...") Invoke-WebRequest -Uri $url -OutFile $output -ErrorAction Stop Write-Host("downloading of go msi into directory path : " + $output + " completed") diff --git a/scripts/dcr-onboarding/ci-extension-dcr-streams.md b/scripts/dcr-onboarding/ci-extension-dcr-streams.md new file mode 100644 index 000000000..cbac41838 --- /dev/null +++ b/scripts/dcr-onboarding/ci-extension-dcr-streams.md @@ -0,0 +1,186 @@ +# 1 - ContainerLogV2 +> Note- Please note, this table uses NG schema +``` +stream-id: Microsoft-ContainerLogV2 +data-type: CONTAINERINSIGHTS_CONTAINERLOGV2 +intelligence-pack: ContainerInsights +solutions: ContainerInsights +platform: Any +la-table-name: ContainerLogV2 +alias-stream-id: Microsoft-ContainerLogV2 +contact-alias: OMScontainers@microsoft.com +stage: to review +tags: agent +``` + +# 2 - InsightsMetrics +``` +stream-id: Microsoft-InsightsMetrics +data-type: INSIGHTS_METRICS_BLOB +intelligence-pack: ContainerInsights +solutions: ContainerInsights +platform: Any +la-table-name: InsightsMetrics +alias-stream-id: Microsoft-InsightsMetrics +contact-alias: OMScontainers@microsoft.com +stage: to review +tags: agent +``` + +# 3 - ContainerInventory + +``` +stream-id: Microsoft-ContainerInventory +data-type: CONTAINER_INVENTORY_BLOB +intelligence-pack: ContainerInsights +solutions: ContainerInsights +platform: Any +la-table-name: ContainerInventory +alias-stream-id: Microsoft-ContainerInventory +contact-alias: OMScontainers@microsoft.com +stage: to review +tags: agent +``` + +# 4 - ContainerLog + +``` +stream-id: Microsoft-ContainerLog +data-type: CONTAINER_LOG_BLOB +intelligence-pack: Containers +solutions: ContainerInsights +platform: Any +la-table-name: ContainerLog +alias-stream-id: Microsoft-ContainerLog +contact-alias: OMScontainers@microsoft.com +stage: to review +tags: agent +``` + +# 5 - ContainerNodeInventory + +``` +stream-id: Microsoft-ContainerNodeInventory +data-type: CONTAINER_NODE_INVENTORY_BLOB +intelligence-pack: ContainerInsights +solutions: ContainerInsights +platform: Any +la-table-name: ContainerNodeInventory +alias-stream-id: Microsoft-ContainerNodeInventory +contact-alias: OMScontainers@microsoft.com +stage: to review +tags: agent +``` + +# 6 - KubePodInventory +``` +stream-id: Microsoft-KubePodInventory +data-type: KUBE_POD_INVENTORY_BLOB +intelligence-pack: ContainerInsights +solutions: ContainerInsights +platform: Any +la-table-name: KubePodInventory +alias-stream-id: Microsoft-KubePodInventory +contact-alias: OMScontainers@microsoft.com +stage: to review +tags: agent +``` + +# 7 - KubeNodeInventory +``` +stream-id: Microsoft-KubeNodeInventory +data-type: KUBE_NODE_INVENTORY_BLOB +intelligence-pack: ContainerInsights +solutions: ContainerInsights +platform: Any +la-table-name: KubeNodeInventory +alias-stream-id: Microsoft-KubeNodeInventory +contact-alias: OMScontainers@microsoft.com +stage: to review +tags: agent +``` + +# 8 - KubePVInventory +``` +stream-id: Microsoft-KubePVInventory +data-type: KUBE_PV_INVENTORY_BLOB +intelligence-pack: ContainerInsights +solutions: ContainerInsights +platform: Any +la-table-name: KubePVInventory +alias-stream-id: Microsoft-KubePVInventory +contact-alias: OMScontainers@microsoft.com +stage: to review +tags: agent +``` + +# 9 - KubeEvents +``` +stream-id: Microsoft-KubeEvents +data-type: KUBE_EVENTS_BLOB +intelligence-pack: ContainerInsights +solutions: ContainerInsights +platform: Any +la-table-name: KubeEvents +alias-stream-id: Microsoft-KubeEvents +contact-alias: OMScontainers@microsoft.com +stage: to review +tags: agent +``` + +# 10 - KubeServices +``` +stream-id: Microsoft-KubeServices +data-type: KUBE_SERVICES_BLOB +intelligence-pack: ContainerInsights +solutions: ContainerInsights +platform: Any +la-table-name: KubeServices +alias-stream-id: Microsoft-KubeServices +contact-alias: OMScontainers@microsoft.com +stage: to review +tags: agent +``` + +# 11 - KubeMonAgentEvents +``` +stream-id: Microsoft-KubeMonAgentEvents +data-type: KUBE_MON_AGENT_EVENTS_BLOB +intelligence-pack: Containers +solutions: ContainerInsights +platform: Any +la-table-name: KubeMonAgentEvents +alias-stream-id: Microsoft-KubeMonAgentEvents +contact-alias: OMScontainers@microsoft.com +stage: to review +tags: agent +``` + +# 12 - KubeHealth +``` +stream-id: Microsoft-KubeHealth +data-type: KUBE_HEALTH_BLOB +intelligence-pack: ContainerInsights +solutions: ContainerInsights +platform: Any +la-table-name: KubeHealth +alias-stream-id: Microsoft-KubeHealth +contact-alias: OMScontainers@microsoft.com +stage: to review +tags: agent +``` + +# 13 - Perf +``` +> Note - This stream already exists +stream-id: Microsoft-Perf +data-type: LINUX_PERF_BLOB +intelligence-pack: LogManagement +solutions: ContainerInsights +platform: Any +la-table-name: LogManagement +alias-stream-id: Microsoft-Perf +contact-alias: OMScontainers@microsoft.com +stage: to review +tags: agent +``` diff --git a/scripts/dcr-onboarding/ci-extension-dcr.json b/scripts/dcr-onboarding/ci-extension-dcr.json new file mode 100644 index 000000000..f3fbec79b --- /dev/null +++ b/scripts/dcr-onboarding/ci-extension-dcr.json @@ -0,0 +1,59 @@ +{ + "location": "", + "properties": { + "dataSources": { + "extensions": [ + { + "name": "ContainerInsightsExtension", + "streams": [ + "Microsoft-Perf", + "Microsoft-ContainerInventory", + "Microsoft-ContainerLog", + "Microsoft-ContainerLogV2", + "Microsoft-ContainerNodeInventory", + "Microsoft-KubeEvents", + "Microsoft-KubeHealth", + "Microsoft-KubeMonAgentEvents", + "Microsoft-KubeNodeInventory", + "Microsoft-KubePodInventory", + "Microsoft-KubePVInventory", + "Microsoft-KubeServices", + "Microsoft-InsightsMetrics" + + ], + "extensionName": "ContainerInsights" + } + ] + }, + "destinations": { + "logAnalytics": [ + { + "workspaceResourceId": "/subscriptions//resourcegroups//providers/microsoft.operationalinsights/workspaces/", + "name": "ciworkspace" + } + ] + }, + "dataFlows": [ + { + "streams": [ + "Microsoft-Perf", + "Microsoft-ContainerInventory", + "Microsoft-ContainerLog", + "Microsoft-ContainerLogV2", + "Microsoft-ContainerNodeInventory", + "Microsoft-KubeEvents", + "Microsoft-KubeHealth", + "Microsoft-KubeMonAgentEvents", + "Microsoft-KubeNodeInventory", + "Microsoft-KubePodInventory", + "Microsoft-KubePVInventory", + "Microsoft-KubeServices", + "Microsoft-InsightsMetrics" + ], + "destinations": [ + "ciworkspace" + ] + } + ] + } +} diff --git a/scripts/onboarding/managed/disable-monitoring.sh b/scripts/onboarding/managed/disable-monitoring.sh index 29b755331..40b0793bc 100644 --- a/scripts/onboarding/managed/disable-monitoring.sh +++ b/scripts/onboarding/managed/disable-monitoring.sh @@ -116,7 +116,7 @@ remove_monitoring_tags() if [ "$isUsingServicePrincipal" = true ] ; then echo "login to the azure using provided service principal creds" - az login --service-principal --username $servicePrincipalClientId --password $servicePrincipalClientSecret --tenant $servicePrincipalTenantId + az login --service-principal --username="$servicePrincipalClientId" --password="$servicePrincipalClientSecret" --tenant="$servicePrincipalTenantId" else echo "login to the azure interactively" az login --use-device-code diff --git a/scripts/onboarding/managed/enable-monitoring.ps1 b/scripts/onboarding/managed/enable-monitoring.ps1 index 828d061ac..e79ef2138 100644 --- a/scripts/onboarding/managed/enable-monitoring.ps1 +++ b/scripts/onboarding/managed/enable-monitoring.ps1 @@ -62,11 +62,10 @@ $isArcK8sCluster = $false $isAksCluster = $false $isUsingServicePrincipal = $false -# released chart version in mcr -$mcr = "mcr.microsoft.com" -$mcrChartVersion = "2.8.3" -$mcrChartRepoPath = "azuremonitor/containerinsights/preview/azuremonitor-containers" -$helmLocalRepoName = "." +# microsoft helm chart repo +$microsoftHelmRepo="https://microsoft.github.io/charts/repo" +$microsoftHelmRepoName="microsoft" + $omsAgentDomainName="opinsights.azure.com" if ([string]::IsNullOrEmpty($azureCloudName) -eq $true) { @@ -547,16 +546,12 @@ Write-Host "Helm version" : $helmVersion Write-Host("Installing or upgrading if exists, Azure Monitor for containers HELM chart ...") try { - Write-Host("pull the chart from mcr.microsoft.com") - [System.Environment]::SetEnvironmentVariable("HELM_EXPERIMENTAL_OCI", 1, "Process") - - Write-Host("pull the chart from mcr.microsoft.com") - helm chart pull ${mcr}/${mcrChartRepoPath}:${mcrChartVersion} - - Write-Host("export the chart from local cache to current directory") - helm chart export ${mcr}/${mcrChartRepoPath}:${mcrChartVersion} --destination . + Write-Host("Add helm chart repo- ${microsoftHelmRepoName} with repo path: ${microsoftHelmRepo}") + helm repo add ${microsoftHelmRepoName} ${microsoftHelmRepo} + Write-Host("Updating the helm chart repo- ${microsoftHelmRepoName} to get latest chart versions") + helm repo update ${microsoftHelmRepoName} - $helmChartRepoPath = "${helmLocalRepoName}" + "/" + "${helmChartName}" + $helmChartRepoPath = "${microsoftHelmRepoName}" + "/" + "${helmChartName}" Write-Host("helmChartRepoPath is : ${helmChartRepoPath}") diff --git a/scripts/onboarding/managed/enable-monitoring.sh b/scripts/onboarding/managed/enable-monitoring.sh index f27f944fd..5fc241517 100644 --- a/scripts/onboarding/managed/enable-monitoring.sh +++ b/scripts/onboarding/managed/enable-monitoring.sh @@ -43,11 +43,9 @@ defaultAzureCloud="AzureCloud" # default domain will be for public cloud omsAgentDomainName="opinsights.azure.com" -# released chart version in mcr -mcrChartVersion="2.8.3" -mcr="mcr.microsoft.com" -mcrChartRepoPath="azuremonitor/containerinsights/preview/azuremonitor-containers" -helmLocalRepoName="." +# microsoft helm chart repo +microsoftHelmRepo="https://microsoft.github.io/charts/repo" +microsoftHelmRepoName="microsoft" helmChartName="azuremonitor-containers" # default release name used during onboarding @@ -435,9 +433,10 @@ create_default_log_analytics_workspace() { workspaceResourceGroup="DefaultResourceGroup-"$workspaceRegionCode isRGExists=$(az group exists -g $workspaceResourceGroup) + isRGExists=$(echo $isRGExists | tr -d '"\r\n') workspaceName="DefaultWorkspace-"$subscriptionId"-"$workspaceRegionCode - if $isRGExists; then + if [ "${isRGExists}" == "true" ]; then echo "using existing default resource group:"$workspaceResourceGroup else echo "creating resource group: $workspaceResourceGroup in region: $workspaceRegion" @@ -455,7 +454,7 @@ create_default_log_analytics_workspace() { fi workspaceResourceId=$(az resource show -g $workspaceResourceGroup -n $workspaceName --resource-type $workspaceResourceProvider --query id -o json) - workspaceResourceId=$(echo $workspaceResourceId | tr -d '"') + workspaceResourceId=$(echo $workspaceResourceId | tr -d '"' | tr -d '"\r\n') echo "workspace resource Id: ${workspaceResourceId}" } @@ -495,10 +494,16 @@ install_helm_chart() { adminUserName=$(az aro list-credentials -g $clusterResourceGroup -n $clusterName --query 'kubeadminUsername' -o tsv) adminPassword=$(az aro list-credentials -g $clusterResourceGroup -n $clusterName --query 'kubeadminPassword' -o tsv) apiServer=$(az aro show -g $clusterResourceGroup -n $clusterName --query apiserverProfile.url -o tsv) + # certain az cli versions adds /r/n so trim them + adminUserName=$(echo $adminUserName | tr -d '"\r\n') + adminPassword=$(echo $adminPassword | tr -d '"\r\n') + apiServer=$(echo $apiServer | tr -d '"\r\n') echo "login to the cluster via oc login" oc login $apiServer -u $adminUserName -p $adminPassword - echo "creating project azure-monitor-for-containers" + echo "creating project: azure-monitor-for-containers" oc new-project $openshiftProjectName + echo "swicthing to project: azure-monitor-for-containers" + oc project $openshiftProjectName echo "getting config-context of aro v4 cluster" kubeconfigContext=$(oc config current-context) fi @@ -513,15 +518,7 @@ install_helm_chart() { clusterRegion=$(az resource show --ids ${clusterResourceId} --query location -o tsv) echo "cluster region is : ${clusterRegion}" - echo "pull the chart version ${mcrChartVersion} from ${mcr}/${mcrChartRepoPath}" - export HELM_EXPERIMENTAL_OCI=1 - helm chart pull $mcr/$mcrChartRepoPath:$mcrChartVersion - - echo "export the chart from local cache to current directory" - helm chart export $mcr/$mcrChartRepoPath:$mcrChartVersion --destination . - - helmChartRepoPath=$helmLocalRepoName/$helmChartName - + helmChartRepoPath=$microsoftHelmRepoName/$helmChartName echo "helm chart repo path: ${helmChartRepoPath}" if [ ! -z "$proxyEndpoint" ]; then @@ -550,7 +547,7 @@ install_helm_chart() { login_to_azure() { if [ "$isUsingServicePrincipal" = true ]; then echo "login to the azure using provided service principal creds" - az login --service-principal --username $servicePrincipalClientId --password $servicePrincipalClientSecret --tenant $servicePrincipalTenantId + az login --service-principal --username="$servicePrincipalClientId" --password="$servicePrincipalClientSecret" --tenant="$servicePrincipalTenantId" else echo "login to the azure interactively" az login --use-device-code @@ -581,6 +578,14 @@ enable_aks_monitoring_addon() { echo "status after enabling of aks monitoringa addon:$status" } +# add helm chart repo and update repo to get latest chart version +add_and_update_helm_chart_repo() { + echo "adding helm repo: ${microsoftHelmRepoName} with repo path: ${microsoftHelmRepo}" + helm repo add ${microsoftHelmRepoName} ${microsoftHelmRepo} + echo "updating helm repo: ${microsoftHelmRepoName} to get local charts updated with latest ones" + helm repo update +} + # parse and validate args parse_args $@ @@ -644,6 +649,9 @@ else attach_monitoring_tags fi +# add helm repo & update to get the latest chart version +add_and_update_helm_chart_repo + # install helm chart install_helm_chart diff --git a/scripts/onboarding/managed/upgrade-monitoring.sh b/scripts/onboarding/managed/upgrade-monitoring.sh index 5456a7072..edd48c938 100644 --- a/scripts/onboarding/managed/upgrade-monitoring.sh +++ b/scripts/onboarding/managed/upgrade-monitoring.sh @@ -19,14 +19,14 @@ set -e set -o pipefail -# released chart version for Azure Arc enabled Kubernetes public preview -mcrChartVersion="2.8.3" -mcr="mcr.microsoft.com" -mcrChartRepoPath="azuremonitor/containerinsights/preview/azuremonitor-containers" - +# microsoft helm chart repo +microsoftHelmRepo="https://microsoft.github.io/charts/repo" +microsoftHelmRepoName="microsoft" # default to public cloud since only supported cloud is azure public clod defaultAzureCloud="AzureCloud" -helmLocalRepoName="." +# microsoft helm chart repo +microsoftHelmRepo="https://microsoft.github.io/charts/repo" +microsoftHelmRepoName="microsoft" helmChartName="azuremonitor-containers" # default release name used during onboarding @@ -38,6 +38,9 @@ arcK8sResourceProvider="Microsoft.Kubernetes/connectedClusters" # default of resourceProvider is Azure Arc enabled Kubernetes and this will get updated based on the provider cluster resource resourceProvider="Microsoft.Kubernetes/connectedClusters" +# resource provider for azure redhat openshift v4 cluster +aroV4ResourceProvider="Microsoft.RedHatOpenShift/OpenShiftClusters" + # Azure Arc enabled Kubernetes cluster resource isArcK8sCluster=false @@ -235,10 +238,14 @@ upgrade_helm_chart_release() { adminUserName=$(az aro list-credentials -g $clusterResourceGroup -n $clusterName --query 'kubeadminUsername' -o tsv) adminPassword=$(az aro list-credentials -g $clusterResourceGroup -n $clusterName --query 'kubeadminPassword' -o tsv) apiServer=$(az aro show -g $clusterResourceGroup -n $clusterName --query apiserverProfile.url -o tsv) + # certain az cli versions adds /r/n so trim them + adminUserName=$(echo $adminUserName |tr -d '"\r\n') + adminPassword=$(echo $adminPassword |tr -d '"\r\n') + apiServer=$(echo $apiServer |tr -d '"\r\n') echo "login to the cluster via oc login" oc login $apiServer -u $adminUserName -p $adminPassword - echo "creating project azure-monitor-for-containers" - oc new-project $openshiftProjectName + echo "switching to project azure-monitor-for-containers" + oc project $openshiftProjectName echo "getting config-context of aro v4 cluster" kubeconfigContext=$(oc config current-context) fi @@ -249,15 +256,7 @@ upgrade_helm_chart_release() { echo "installing Azure Monitor for containers HELM chart on to the cluster with kubecontext:${kubeconfigContext} ..." fi - export HELM_EXPERIMENTAL_OCI=1 - - echo "pull the chart from ${mcr}/${mcrChartRepoPath}:${mcrChartVersion}" - helm chart pull ${mcr}/${mcrChartRepoPath}:${mcrChartVersion} - - echo "export the chart from local cache to current directory" - helm chart export ${mcr}/${mcrChartRepoPath}:${mcrChartVersion} --destination . - - helmChartRepoPath=$helmLocalRepoName/$helmChartName + helmChartRepoPath=$microsoftHelmRepoName/$helmChartName echo "upgrading the release: $releaseName to chart version : ${mcrChartVersion}" helm get values $releaseName -o yaml | helm upgrade --install $releaseName $helmChartRepoPath -f - @@ -267,7 +266,7 @@ upgrade_helm_chart_release() { login_to_azure() { if [ "$isUsingServicePrincipal" = true ]; then echo "login to the azure using provided service principal creds" - az login --service-principal --username $servicePrincipalClientId --password $servicePrincipalClientSecret --tenant $servicePrincipalTenantId + az login --service-principal --username="$servicePrincipalClientId" --password="$servicePrincipalClientSecret" --tenant="$servicePrincipalTenantId" else echo "login to the azure interactively" az login --use-device-code @@ -296,6 +295,14 @@ validate_and_configure_supported_cloud() { fi } +# add helm chart repo and update repo to get latest chart version +add_and_update_helm_chart_repo() { + echo "adding helm repo: ${microsoftHelmRepoName} with repo path: ${microsoftHelmRepo}" + helm repo add ${microsoftHelmRepoName} ${microsoftHelmRepo} + echo "updating helm repo: ${microsoftHelmRepoName} to get local charts updated with latest ones" + helm repo update +} + # parse and validate args parse_args $@ @@ -322,6 +329,9 @@ fi # validate the cluster has monitoring tags validate_monitoring_tags +# add helm repo & update to get the latest chart version +add_and_update_helm_chart_repo + # upgrade helm chart release upgrade_helm_chart_release diff --git a/source/plugins/go/src/extension/extension.go b/source/plugins/go/src/extension/extension.go new file mode 100644 index 000000000..c68140ded --- /dev/null +++ b/source/plugins/go/src/extension/extension.go @@ -0,0 +1,101 @@ +package extension + +import ( + "encoding/json" + "fmt" + "log" + "sync" + "strings" + uuid "github.com/google/uuid" + "github.com/ugorji/go/codec" +) + +type Extension struct { + datatypeStreamIdMap map[string]string +} + +var singleton *Extension +var once sync.Once +var extensionconfiglock sync.Mutex +var logger *log.Logger +var containerType string + +func GetInstance(flbLogger *log.Logger, containerType string) *Extension { + once.Do(func() { + singleton = &Extension{make(map[string]string)} + flbLogger.Println("Extension Instance created") + }) + logger = flbLogger + containerType = containerType + return singleton +} + +func (e *Extension) GetOutputStreamId(datatype string) string { + extensionconfiglock.Lock() + defer extensionconfiglock.Unlock() + if len(e.datatypeStreamIdMap) > 0 && e.datatypeStreamIdMap[datatype] != "" { + message := fmt.Sprintf("OutputstreamId: %s for the datatype: %s", e.datatypeStreamIdMap[datatype], datatype) + logger.Printf(message) + return e.datatypeStreamIdMap[datatype] + } + var err error + e.datatypeStreamIdMap, err = getDataTypeToStreamIdMapping() + if err != nil { + message := fmt.Sprintf("Error getting datatype to streamid mapping: %s", err.Error()) + logger.Printf(message) + } + return e.datatypeStreamIdMap[datatype] +} + +func getDataTypeToStreamIdMapping() (map[string]string, error) { + logger.Printf("extensionconfig::getDataTypeToStreamIdMapping:: getting extension config from fluent socket - start") + guid := uuid.New() + datatypeOutputStreamMap := make(map[string]string) + + taggedData := map[string]interface{}{"Request": "AgentTaggedData", "RequestId": guid.String(), "Tag": "ContainerInsights", "Version": "1"} + jsonBytes, err := json.Marshal(taggedData) + + var data []byte + enc := codec.NewEncoderBytes(&data, new(codec.MsgpackHandle)) + if err := enc.Encode(string(jsonBytes)); err != nil { + return datatypeOutputStreamMap, err + } + + fs := &FluentSocketWriter{ } + fs.sockAddress = "/var/run/mdsd/default_fluent.socket" + if containerType != "" && strings.Compare(strings.ToLower(containerType), "prometheussidecar") == 0 { + fs.sockAddress = fmt.Sprintf("/var/run/mdsd-%s/default_fluent.socket", containerType) + } + responseBytes, err := fs.WriteAndRead(data) + defer fs.disConnect() + logger.Printf("Info::mdsd::Making call to FluentSocket: %s to write and read the config data", fs.sockAddress) + if err != nil { + return datatypeOutputStreamMap, err + } + response := string(responseBytes) + + var responseObjet AgentTaggedDataResponse + err = json.Unmarshal([]byte(response), &responseObjet) + if err != nil { + logger.Printf("Error::mdsd::Failed to unmarshal config data. Error message: %s", string(err.Error())) + return datatypeOutputStreamMap, err + } + + var extensionData TaggedData + json.Unmarshal([]byte(responseObjet.TaggedData), &extensionData) + + extensionConfigs := extensionData.ExtensionConfigs + logger.Printf("Info::mdsd::build the datatype and streamid map -- start") + for _, extensionConfig := range extensionConfigs { + outputStreams := extensionConfig.OutputStreams + for dataType, outputStreamID := range outputStreams { + logger.Printf("Info::mdsd::datatype: %s, outputstreamId: %s", dataType, outputStreamID) + datatypeOutputStreamMap[dataType] = outputStreamID.(string) + } + } + logger.Printf("Info::mdsd::build the datatype and streamid map -- end") + + logger.Printf("extensionconfig::getDataTypeToStreamIdMapping:: getting extension config from fluent socket-end") + + return datatypeOutputStreamMap, nil +} diff --git a/source/plugins/go/src/extension/interfaces.go b/source/plugins/go/src/extension/interfaces.go new file mode 100644 index 000000000..c70ef17b8 --- /dev/null +++ b/source/plugins/go/src/extension/interfaces.go @@ -0,0 +1,34 @@ +package extension + +// AgentTaggedDataResponse struct for response from AgentTaggedData request +type AgentTaggedDataResponse struct { + Request string `json:"Request"` + RequestID string `json:"RequestId"` + Version string `json:"Version"` + Success bool `json:"Success"` + Description string `json:"Description"` + TaggedData string `json:"TaggedData"` +} + +// TaggedData structure for respone +type TaggedData struct { + SchemaVersion int `json:"schemaVersion"` + Version int `json:"version"` + ExtensionName string `json:"extensionName"` + ExtensionConfigs []ExtensionConfig `json:"extensionConfigurations"` + OutputStreamDefinitions map[string]StreamDefinition `json:"outputStreamDefinitions"` +} + +// StreamDefinition structure for named pipes +type StreamDefinition struct { + NamedPipe string `json:"namedPipe"` +} + +// ExtensionConfig structure for extension definition in DCR +type ExtensionConfig struct { + ID string `json:"id"` + OriginIds []string `json:"originIds"` + ExtensionSettings map[string]interface{} `json:"extensionSettings"` + InputStreams map[string]interface{} `json:"inputStreams"` + OutputStreams map[string]interface{} `json:"outputStreams"` +} diff --git a/source/plugins/go/src/extension/socket_writer.go b/source/plugins/go/src/extension/socket_writer.go new file mode 100644 index 000000000..1b16b319c --- /dev/null +++ b/source/plugins/go/src/extension/socket_writer.go @@ -0,0 +1,85 @@ +package extension + +import ( + "net" +) + +//MaxRetries for trying to write data to the socket +const MaxRetries = 5 + +//ReadBufferSize for reading data from sockets +//Current CI extension config size is ~5KB and going with 20KB to handle any future scenarios +const ReadBufferSize = 20480 + +//FluentSocketWriter writes data to AMA's default fluent socket +type FluentSocketWriter struct { + socket net.Conn + sockAddress string +} + +func (fs *FluentSocketWriter) connect() error { + c, err := net.Dial("unix", fs.sockAddress) + if err != nil { + return err + } + fs.socket = c + return nil +} + +func (fs *FluentSocketWriter) disConnect() error { + if (fs.socket != nil) { + fs.socket.Close() + fs.socket = nil + } + return nil +} + +func (fs *FluentSocketWriter) writeWithRetries(data []byte) (int, error) { + var ( + err error + n int + ) + for i := 0; i < MaxRetries; i++ { + n, err = fs.socket.Write(data) + if err == nil { + return n, nil + } + } + if err, ok := err.(net.Error); !ok || !err.Temporary() { + // so that connect() is called next time if write fails + // this happens when mdsd is restarted + _ = fs.socket.Close() // no need to log the socket closing error + fs.socket = nil + } + return 0, err +} + +func (fs *FluentSocketWriter) read() ([]byte, error) { + buf := make([]byte, ReadBufferSize) + n, err := fs.socket.Read(buf) + if err != nil { + return nil, err + } + return buf[:n], nil + +} + +func (fs *FluentSocketWriter) Write(payload []byte) (int, error) { + if fs.socket == nil { + // previous write failed with permanent error and socket was closed. + if err := fs.connect(); err != nil { + return 0, err + } + } + + return fs.writeWithRetries(payload) +} + +//WriteAndRead writes data to the socket and sends the response back +func (fs *FluentSocketWriter) WriteAndRead(payload []byte) ([]byte, error) { + _, err := fs.Write(payload) + if err != nil { + return nil, err + } + return fs.read() +} diff --git a/source/plugins/go/src/go.mod b/source/plugins/go/src/go.mod index c3e6c2044..4ead145ac 100644 --- a/source/plugins/go/src/go.mod +++ b/source/plugins/go/src/go.mod @@ -3,33 +3,17 @@ module Docker-Provider/source/plugins/go/src go 1.14 require ( - code.cloudfoundry.org/clock v1.0.1-0.20200131002207-86534f4ca3a5 // indirect github.com/Azure/azure-kusto-go v0.3.2 github.com/Azure/go-autorest/autorest/azure/auth v0.4.2 + github.com/Azure/go-autorest/autorest/to v0.4.0 // indirect + github.com/dnaeon/go-vcr v1.2.0 // indirect github.com/fluent/fluent-bit-go v0.0.0-20171103221316-c4a158a6e3a7 - github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680 // indirect - github.com/gogo/protobuf v0.0.0-20170330071051-c0656edd0d9e // indirect - github.com/golang/glog v0.0.0-20141105023935-44145f04b68c // indirect - github.com/google/btree v0.0.0-20160524151835-7d79101e329e // indirect - github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367 // indirect - github.com/google/uuid v1.1.1 - github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d // indirect - github.com/gregjones/httpcache v0.0.0-20170728041850-787624de3eb7 // indirect - github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3 // indirect + github.com/google/uuid v1.1.2 github.com/microsoft/ApplicationInsights-Go v0.4.3 - github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect - github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da // indirect - github.com/peterbourgon/diskv v2.0.1+incompatible // indirect - github.com/philhofer/fwd v1.0.0 // indirect - github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b // indirect + github.com/philhofer/fwd v1.1.1 // indirect github.com/tinylib/msgp v1.1.2 - github.com/ugorji/go v1.1.2-0.20180813092308-00b869d2f4a5 // indirect - golang.org/x/net v0.0.0-20200421231249-e086a090c8fd // indirect - golang.org/x/time v0.0.0-20161028155119-f51c12702a4d // indirect - gopkg.in/inf.v0 v0.9.0 // indirect + github.com/ugorji/go v1.1.2-0.20180813092308-00b869d2f4a5 gopkg.in/natefinch/lumberjack.v2 v2.0.0-20170531160350-a96e63847dc3 - k8s.io/api v0.0.0-20180628040859-072894a440bd // indirect - k8s.io/apimachinery v0.0.0-20180621070125-103fd098999d - k8s.io/client-go v8.0.0+incompatible - golang.org/x/crypto v0.0.0-20201216223049-8b5274cf687f + k8s.io/apimachinery v0.21.0 + k8s.io/client-go v0.21.0 ) diff --git a/source/plugins/go/src/go.sum b/source/plugins/go/src/go.sum index 7e8b3d765..7f93bb260 100644 --- a/source/plugins/go/src/go.sum +++ b/source/plugins/go/src/go.sum @@ -1,10 +1,28 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +code.cloudfoundry.org/clock v0.0.0-20180518195852-02e53af36e6c h1:5eeuG0BHx1+DHeT3AP+ISKZ2ht1UjGhm581ljqYpVeQ= code.cloudfoundry.org/clock v0.0.0-20180518195852-02e53af36e6c/go.mod h1:QD9Lzhd/ux6eNQVUDVRJX/RKTigpewimNYBi7ivZKY8= -code.cloudfoundry.org/clock v1.0.1-0.20200131002207-86534f4ca3a5 h1:LTlZ2AD8IV/d1JRzB+HHfZfF1M+K8lyOlN28zDEpw7U= -code.cloudfoundry.org/clock v1.0.1-0.20200131002207-86534f4ca3a5/go.mod h1:QD9Lzhd/ux6eNQVUDVRJX/RKTigpewimNYBi7ivZKY8= -github.com/Azure/azure-kusto-go v0.1.3 h1:0u+YqfIvwj5PHd+moXwtlxVePt8xTLU1ixM8Q6PjJ3o= -github.com/Azure/azure-kusto-go v0.1.3/go.mod h1:55hwXJ3PaahmWZFP7VC4+PlgsSUuetSA30rFtYFabfc= -github.com/Azure/azure-kusto-go v0.1.4-0.20200427191510-041d4ed55f86 h1:vyhCediIKg1gZ9H/kMcutU8F8BFNhxLk76Gti8UAOzo= -github.com/Azure/azure-kusto-go v0.1.4-0.20200427191510-041d4ed55f86/go.mod h1:55hwXJ3PaahmWZFP7VC4+PlgsSUuetSA30rFtYFabfc= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/Azure/azure-kusto-go v0.3.2 h1:XpS9co6GvEDl2oICF9HsjEsQVwEpRK6wbNWb9Z+uqsY= github.com/Azure/azure-kusto-go v0.3.2/go.mod h1:wd50n4qlsSxh+G4f80t+Fnl2ShK9AcXD+lMOstiKuYo= github.com/Azure/azure-pipeline-go v0.1.8/go.mod h1:XA1kFWRVhSK+KNFiOhfv83Fv8L9achrP7OxIzeTn1Yg= @@ -16,18 +34,21 @@ github.com/Azure/azure-storage-blob-go v0.8.0 h1:53qhf0Oxa0nOjgbDeeYPUeyiNmafAFE github.com/Azure/azure-storage-blob-go v0.8.0/go.mod h1:lPI3aLPpuLTeUwh1sViKXFxwl2B6teiRqI0deQUvsw0= github.com/Azure/azure-storage-queue-go v0.0.0-20191125232315-636801874cdd h1:b3wyxBl3vvr15tUAziPBPK354y+LSdfPCpex5oBttHo= github.com/Azure/azure-storage-queue-go v0.0.0-20191125232315-636801874cdd/go.mod h1:K6am8mT+5iFXgingS9LUc7TmbsW6XBw3nxaRyaMyWc8= -github.com/Azure/go-autorest v1.1.1 h1:4G9tVCqooRY3vDTB2bA1Z01PlSALtnUbji0AfzthUSs= -github.com/Azure/go-autorest v14.1.1+incompatible h1:m2F62e1Zk5DV3HENGdH/wEuzvJZIynHG4fHF7oiQwgE= +github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= github.com/Azure/go-autorest/autorest v0.9.3/go.mod h1:GsRuLYvwzLjjjRoWEIyMUaYq8GNUx2nRB378IPt/1p0= github.com/Azure/go-autorest/autorest v0.10.0 h1:mvdtztBqcL8se7MdrUweNieTNi4kfNG6GOJuurQJpuY= github.com/Azure/go-autorest/autorest v0.10.0/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= -github.com/Azure/go-autorest/autorest v0.10.2 h1:NuSF3gXetiHyUbVdneJMEVyPUYAe5wh+aN08JYAf1tI= +github.com/Azure/go-autorest/autorest v0.11.12 h1:gI8ytXbxMfI+IVbI9mP2JGCTXIuhHLgRlvQ9X4PsnHE= +github.com/Azure/go-autorest/autorest v0.11.12/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw= github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= github.com/Azure/go-autorest/autorest/adal v0.8.1/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= github.com/Azure/go-autorest/autorest/adal v0.8.2 h1:O1X4oexUxnZCaEUGsvMnr8ZGj8HI37tNezwY4npRqA0= github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= +github.com/Azure/go-autorest/autorest/adal v0.9.5 h1:Y3bBUV4rTuxenJJs41HU3qmqsb+auo+a3Lz+PlJPpL0= +github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= github.com/Azure/go-autorest/autorest/azure/auth v0.4.2 h1:iM6UAvjR97ZIeR93qTcwpKNMpV+/FTWjwEbuPD495Tk= github.com/Azure/go-autorest/autorest/azure/auth v0.4.2/go.mod h1:90gmfKdlmKgfjUpnCEpOJzsUEjrWDSLwHIG73tSXddM= github.com/Azure/go-autorest/autorest/azure/cli v0.3.1 h1:LXl088ZQlP0SBppGFsRZonW6hSvwgL5gRByMbvUbx8U= @@ -35,128 +56,471 @@ github.com/Azure/go-autorest/autorest/azure/cli v0.3.1/go.mod h1:ZG5p860J94/0kI9 github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= github.com/Azure/go-autorest/autorest/date v0.2.0 h1:yW+Zlqf26583pE43KhfnhFcdmSWlm5Ew6bxipnr/tbM= github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= +github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= +github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= +github.com/Azure/go-autorest/autorest/mocks v0.4.1 h1:K0laFcLE6VLTOwNgSxaGbUcLPuGXlNkbVvq4cW4nIHk= +github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk= +github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE= github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY= github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= +github.com/Azure/go-autorest/logger v0.2.0 h1:e4RVHVZKC5p6UANLJHkM4OfR1UKZPj8Wt8Pcx+3oqrE= +github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k= github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= -github.com/Microsoft/ApplicationInsights-Go v0.4.2 h1:HIZoGXMiKNwAtMAgCSSX35j9mP+DjGF9ezfBvxMDLLg= -github.com/Microsoft/ApplicationInsights-Go v0.4.2/go.mod h1:CukZ/G66zxXtI+h/VcVn3eVVDGDHfXM2zVILF7bMmsg= +github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= +github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dimchansky/utfbom v1.1.0 h1:FcM3g+nofKgUteL8dm/UpdRXNC9KmADgTpLKsu0TRo4= github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= +github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= +github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/fluent/fluent-bit-go v0.0.0-20171103221316-c4a158a6e3a7 h1:mck6KdLX2FTh2/ZD27dK69ehWDZR4hCk+nLf+HvAbDk= github.com/fluent/fluent-bit-go v0.0.0-20171103221316-c4a158a6e3a7/go.mod h1:JVF1Nl3QOPpKTR8xDjhkm0xINYUX0z4XdJvOpIUF+Eo= +github.com/form3tech-oss/jwt-go v3.2.2+incompatible h1:TcekIExNqud5crz4xD2pavyTgWiPvpYe4Xau31I0PRk= +github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680 h1:ZktWZesgun21uEDrwW7iEV1zPCGQldM2atlJZ3TdvVM= -github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/gogo/protobuf v0.0.0-20170330071051-c0656edd0d9e h1:ago6fNuQ6IhszPsXkeU7qRCyfsIX7L67WDybsAPkLl8= -github.com/gogo/protobuf v0.0.0-20170330071051-c0656edd0d9e/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/golang/glog v0.0.0-20141105023935-44145f04b68c h1:CbdkBQ1/PiAo0FYJhQGwASD8wrgNvTdf01g6+O9tNuA= -github.com/golang/glog v0.0.0-20141105023935-44145f04b68c/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/protobuf v1.1.0 h1:0iH4Ffd/meGoXqF2lSAhZHt8X+cPgkfn/cb6Cce5Vpc= -github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= +github.com/go-logr/logr v0.4.0 h1:K7/B1jt6fIBQVd4Owv2MqGQClcgf0R266+7C/QjRcLc= +github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= +github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= +github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= +github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= +github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/google/btree v0.0.0-20160524151835-7d79101e329e h1:JHB7F/4TJCrYBW8+GZO8VkWDj1jxcWuCl6uxKODiyi4= -github.com/google/btree v0.0.0-20160524151835-7d79101e329e/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367 h1:ScAXWS+TR6MZKex+7Z8rneuSJH+FSDqd6ocQyl+ZHo4= -github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= +github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d h1:7XGaL1e6bYS1yIonGp9761ExpPPV1ui0SAC59Yube9k= -github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= -github.com/gregjones/httpcache v0.0.0-20170728041850-787624de3eb7 h1:6TSoaYExHper8PYsJu23GWVNOyYRCSnIFyxKgLSZ54w= -github.com/gregjones/httpcache v0.0.0-20170728041850-787624de3eb7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gnostic v0.4.1 h1:DLJCy1n/vrD4HPjOvYcT8aYQXpPIzoRZONaYwyycI+I= +github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3 h1:/UewZcckqhvnnS0C6r3Sher2hSEbVmM6Ogpcjen08+Y= -github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149 h1:HfxbT6/JcvIljmERptWhwa8XzP7H3T+Z2N26gTsaDaA= github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= -github.com/microsoft/ApplicationInsights-Go v0.4.2 h1:LCv4NtCpXpsUF6ZUzZdpVG2x4RwebY7tiJUb25uYXiM= -github.com/microsoft/ApplicationInsights-Go v0.4.2/go.mod h1:DupRHRNoeuH4j8Yv3nux9/IXo3HZ0kO5A1ykNK4vR2E= github.com/microsoft/ApplicationInsights-Go v0.4.3 h1:gBuy5rM3o6Zo69QTkq1Ens8wx6sVf+mpgMjjfayiRcw= github.com/microsoft/ApplicationInsights-Go v0.4.3/go.mod h1:ih0t3h84PdzV1qGeUs89o9wL8eCuwf24M7TZp/nyqXk= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da h1:ZQGIPjr1iTtUPXZFk8WShqb5G+Qg65VHFLtSvmHh+Mw= -github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3PzxT8aQXRPkAt8xlV/e7d7w8GM5g0fa5F0D8= +github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.11.0 h1:JAKSXpt1YjtLA7YpPiqO9ss6sNXEsPfSGdwN0UHqzrw= +github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME= +github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= -github.com/philhofer/fwd v1.0.0 h1:UbZqGr5Y38ApvM/V/jEljVxwocdweyH+vmYvRPBnbqQ= -github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= +github.com/philhofer/fwd v1.1.1 h1:GdGcTjf5RNAxwS4QLsiMzJYj5KEvPJD3Abr261yRQXQ= +github.com/philhofer/fwd v1.1.1/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= -github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b h1:gQZ0qzfKHQIybLANtM3mBXNUtOfsCFXeTsnBqCsx1KM= -github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/tedsuo/ifrit v0.0.0-20180802180643-bea94bb476cc h1:LUUe4cdABGrIJAhl1P1ZpWY76AwukVszFdwkVFVLwIk= github.com/tedsuo/ifrit v0.0.0-20180802180643-bea94bb476cc/go.mod h1:eyZnKCc955uh98WQvzOm0dgAeLnf2O0Rz0LPoC5ze+0= github.com/tinylib/msgp v1.1.2 h1:gWmO7n0Ys2RBEb7GPYB9Ujq8Mk5p2U08lRnmMcGy6BQ= github.com/tinylib/msgp v1.1.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= github.com/ugorji/go v1.1.2-0.20180813092308-00b869d2f4a5 h1:JRe7Bc0YQq+x7Bm3p/LIBIb4aopsdr3H0KRKRI8g6oY= github.com/ugorji/go v1.1.2-0.20180813092308-00b869d2f4a5/go.mod h1:hnLbHMwcvSihnDhEfx2/BzKp2xb0Y+ErdfYcrs9tkJQ= -golang.org/x/crypto v0.0.0-20180222182404-49796115aa4b h1:/GxqO8gbyb+sNnviFY2IIMrtm8vGg6NEJDft68wJY/g= -golang.org/x/crypto v0.0.0-20180222182404-49796115aa4b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 h1:ULYEB3JvPRE/IfO+9uO7vKV/xzVTO7XPAwm8xbf4w2g= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975 h1:/Tl7pH94bvbAAHBdZJT947M/+gp0+CqQXDtMRC0fseo= -golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201216223049-8b5274cf687f h1:aZp0e2vLN4MToVqnjNEYEtrEA8RH8U8FN1CU7JgqsPU= -golang.org/x/crypto v0.0.0-20201216223049-8b5274cf687f/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= -golang.org/x/net v0.0.0-20170809000501-1c05540f6879 h1:0rFa7EaCGdQPmZVbo9F7MNF65b8dyzS6EUnXjs9Cllk= -golang.org/x/net v0.0.0-20170809000501-1c05540f6879/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83 h1:/ZScEX8SfEmUGRHs0gxpqteO5nfNW6axyZbBdw9A12g= +golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200421231249-e086a090c8fd h1:QPwSajcTUrFriMF1nJ3XzgoqakqQEsnZf9LdXdi2nkI= -golang.org/x/net v0.0.0-20200421231249-e086a090c8fd/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210224082022-3d97a244fca7 h1:OgUuv8lsRpBibGNbSizVwKWlysjaNzmC9gYMhPVfqFM= +golang.org/x/net v0.0.0-20210224082022-3d97a244fca7/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20171031081856-95c657629925 h1:nCH33NboKIsT4HoXBsXTWX8ul303HxWgkc5s2Ezwacg= -golang.org/x/sys v0.0.0-20171031081856-95c657629925/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073 h1:8qxJSnu+7dRq6upnbntrmriWByIakBuct5OM/MdQC1M= +golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221 h1:/ZHdbVpdR/jk3g30/d4yUL0JU9kksj8+F/bnQUVLGDM= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= -golang.org/x/text v0.0.0-20170810154203-b19bf474d317 h1:WKW+OPdYPlvOTVGHuMfjnIC6yY2SI93yFB0pZ7giBmQ= -golang.org/x/text v0.0.0-20170810154203-b19bf474d317/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d h1:SZxvLBoTP5yHO3Frd4z4vrF+DBX9vMVanchswa69toE= +golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/time v0.0.0-20161028155119-f51c12702a4d h1:TnM+PKb3ylGmZvyPXmo9m/wktg7Jn/a/fNmr33HSj8g= -golang.org/x/time v0.0.0-20161028155119-f51c12702a4d/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4 h1:0YWbFKbhXG/wIiuHDSKpS0Iy7FSA+u45VtBMfQcFTTc= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba h1:O8mE0/t419eoIwhTFpKVkHiTs/Igowgfkj25AcZrtiE= +golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/inf.v0 v0.9.0 h1:3zYtXIO92bvsdS3ggAdA8Gb4Azj0YU+TVY1uGYNFA8o= -gopkg.in/inf.v0 v0.9.0/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/natefinch/lumberjack.v2 v2.0.0-20170531160350-a96e63847dc3 h1:AFxeG48hTWHhDTQDk/m2gorfVHUEa9vo3tp3D7TzwjI= gopkg.in/natefinch/lumberjack.v2 v2.0.0-20170531160350-a96e63847dc3/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/yaml.v2 v2.0.0-20170721113624-670d4cfef054 h1:ROF+R/wHHruzF40n5DfPv2jwm7rCJwvs8fz+RTZWjLE= -gopkg.in/yaml.v2 v2.0.0-20170721113624-670d4cfef054/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -k8s.io/api v0.0.0-20180628040859-072894a440bd h1:HzgYeLDS1jLxw8DGr68KJh9cdQ5iZJizG0HZWstIhfQ= -k8s.io/api v0.0.0-20180628040859-072894a440bd/go.mod h1:iuAfoD4hCxJ8Onx9kaTIt30j7jUFS00AXQi6QMi99vA= -k8s.io/apimachinery v0.0.0-20180621070125-103fd098999d h1:MZjlsu9igBoVPZkXpIGoxI6EonqNsXXZU7hhvfQLkd4= -k8s.io/apimachinery v0.0.0-20180621070125-103fd098999d/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0= -k8s.io/client-go v8.0.0+incompatible h1:tTI4hRmb1DRMl4fG6Vclfdi6nTM82oIrTT7HfitmxC4= -k8s.io/client-go v8.0.0+incompatible/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyodBc8nIyt8L5s= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +k8s.io/api v0.21.0 h1:gu5iGF4V6tfVCQ/R+8Hc0h7H1JuEhzyEi9S4R5LM8+Y= +k8s.io/api v0.21.0/go.mod h1:+YbrhBBGgsxbF6o6Kj4KJPJnBmAKuXDeS3E18bgHNVU= +k8s.io/apimachinery v0.21.0 h1:3Fx+41if+IRavNcKOz09FwEXDBG6ORh6iMsTSelhkMA= +k8s.io/apimachinery v0.21.0/go.mod h1:jbreFvJo3ov9rj7eWT7+sYiRx+qZuCYXwWT1bcDswPY= +k8s.io/client-go v0.21.0 h1:n0zzzJsAQmJngpC0IhgFcApZyoGXPrDIAD601HD09ag= +k8s.io/client-go v0.21.0/go.mod h1:nNBytTF9qPFDEhoqgEPaarobC8QPae13bElIVHzIglA= +k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= +k8s.io/klog/v2 v2.8.0 h1:Q3gmuM9hKEjefWFFYF0Mat+YyFJvsUyYuwyNNJ5C9Ts= +k8s.io/klog/v2 v2.8.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= +k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7/go.mod h1:wXW5VT87nVfh/iLV8FpR2uDvrFyomxbtb1KivDbvPTE= +k8s.io/utils v0.0.0-20201110183641-67b214c5f920 h1:CbnUZsM497iRC5QMVkHwyl8s2tB3g7yaSHkYPkpgelw= +k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.1.0 h1:C4r9BgJ98vrKnnVCjwCSXcWjWe0NKcUQkmzDXZXGwH8= +sigs.k8s.io/structured-merge-diff/v4 v4.1.0/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= +sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/source/plugins/go/src/ingestion_token_utils.go b/source/plugins/go/src/ingestion_token_utils.go new file mode 100644 index 000000000..c96685042 --- /dev/null +++ b/source/plugins/go/src/ingestion_token_utils.go @@ -0,0 +1,516 @@ +package main + +import ( + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "os" + "regexp" + "strconv" + "strings" + "time" +) + +const IMDSTokenPathForWindows = "c:/etc/imds-access-token/token" // only used in windows +const AMCSAgentConfigAPIVersion = "2020-08-01-preview" +const AMCSIngestionTokenAPIVersion = "2020-04-01-preview" +const MaxRetries = 3 + +var IMDSToken string +var IMDSTokenExpiration int64 + +var ConfigurationId string +var ChannelId string + +var IngestionAuthToken string +var IngestionAuthTokenExpiration int64 + +type IMDSResponse struct { + AccessToken string `json:"access_token"` + ClientID string `json:"client_id"` + ExpiresIn string `json:"expires_in"` + ExpiresOn string `json:"expires_on"` + ExtExpiresIn string `json:"ext_expires_in"` + NotBefore string `json:"not_before"` + Resource string `json:"resource"` + TokenType string `json:"token_type"` +} + +type AgentConfiguration struct { + Configurations []struct { + Configurationid string `json:"configurationId"` + Etag string `json:"eTag"` + Op string `json:"op"` + Content struct { + Datasources []struct { + Configuration struct { + Extensionname string `json:"extensionName"` + } `json:"configuration"` + ID string `json:"id"` + Kind string `json:"kind"` + Streams []struct { + Stream string `json:"stream"` + Solution string `json:"solution"` + Extensionoutputstream string `json:"extensionOutputStream"` + } `json:"streams"` + Sendtochannels []string `json:"sendToChannels"` + } `json:"dataSources"` + Channels []struct { + Endpoint string `json:"endpoint"` + ID string `json:"id"` + Protocol string `json:"protocol"` + } `json:"channels"` + Extensionconfigurations struct { + Containerinsights []struct { + ID string `json:"id"` + Originids []string `json:"originIds"` + Outputstreams struct { + LinuxPerfBlob string `json:"LINUX_PERF_BLOB"` + ContainerInventoryBlob string `json:"CONTAINER_INVENTORY_BLOB"` + ContainerLogBlob string `json:"CONTAINER_LOG_BLOB"` + ContainerinsightsContainerlogv2 string `json:"CONTAINERINSIGHTS_CONTAINERLOGV2"` + ContainerNodeInventoryBlob string `json:"CONTAINER_NODE_INVENTORY_BLOB"` + KubeEventsBlob string `json:"KUBE_EVENTS_BLOB"` + KubeHealthBlob string `json:"KUBE_HEALTH_BLOB"` + KubeMonAgentEventsBlob string `json:"KUBE_MON_AGENT_EVENTS_BLOB"` + KubeNodeInventoryBlob string `json:"KUBE_NODE_INVENTORY_BLOB"` + KubePodInventoryBlob string `json:"KUBE_POD_INVENTORY_BLOB"` + KubePvInventoryBlob string `json:"KUBE_PV_INVENTORY_BLOB"` + KubeServicesBlob string `json:"KUBE_SERVICES_BLOB"` + InsightsMetricsBlob string `json:"INSIGHTS_METRICS_BLOB"` + } `json:"outputStreams"` + } `json:"ContainerInsights"` + } `json:"extensionConfigurations"` + } `json:"content"` + } `json:"configurations"` +} + +type IngestionTokenResponse struct { + Configurationid string `json:"configurationId"` + Ingestionauthtoken string `json:"ingestionAuthToken"` +} + +func getAccessTokenFromIMDS() (string, int64, error) { + Log("Info getAccessTokenFromIMDS: start") + useIMDSTokenProxyEndPoint := os.Getenv("USE_IMDS_TOKEN_PROXY_END_POINT") + imdsAccessToken := "" + var responseBytes []byte + var err error + + if (useIMDSTokenProxyEndPoint != "" && strings.Compare(strings.ToLower(useIMDSTokenProxyEndPoint), "true") == 0) { + Log("Info Reading IMDS Access Token from IMDS Token proxy endpoint") + mcsEndpoint := os.Getenv("MCS_ENDPOINT") + msi_endpoint_string := fmt.Sprintf("http://169.254.169.254/metadata/identity/oauth2/token?api-version=2018-02-01&resource=https://%s/", mcsEndpoint) + var msi_endpoint *url.URL + msi_endpoint, err := url.Parse(msi_endpoint_string) + if err != nil { + Log("getAccessTokenFromIMDS: Error creating IMDS endpoint URL: %s", err.Error()) + return imdsAccessToken, 0, err + } + req, err := http.NewRequest("GET", msi_endpoint.String(), nil) + if err != nil { + Log("getAccessTokenFromIMDS: Error creating HTTP request: %s", err.Error()) + return imdsAccessToken, 0, err + } + req.Header.Add("Metadata", "true") + + //IMDS endpoint nonroutable endpoint and requests doesnt go through proxy hence using dedicated http client + httpClient := &http.Client{Timeout: 30 * time.Second} + + // Call managed services for Azure resources token endpoint + var resp *http.Response = nil + IsSuccess := false + for retryCount := 0; retryCount < MaxRetries; retryCount++ { + resp, err = httpClient.Do(req) + if err != nil { + message := fmt.Sprintf("getAccessTokenFromIMDS: Error calling token endpoint: %s, retryCount: %d", err.Error(), retryCount) + Log(message) + SendException(message) + continue + } + + if resp != nil && resp.Body != nil { + defer resp.Body.Close() + } + + Log("getAccessTokenFromIMDS: IMDS Response Status: %d, retryCount: %d", resp.StatusCode, retryCount) + if IsRetriableError(resp.StatusCode) { + message := fmt.Sprintf("getAccessTokenFromIMDS: IMDS Request failed with an error code: %d, retryCount: %d", resp.StatusCode, retryCount) + Log(message) + retryDelay := time.Duration((retryCount + 1) * 100) * time.Millisecond + if resp.StatusCode == 429 { + if resp != nil && resp.Header.Get("Retry-After") != "" { + after, err := strconv.ParseInt(resp.Header.Get("Retry-After"), 10, 64) + if err != nil && after > 0 { + retryDelay = time.Duration(after) * time.Second + } + } + } + time.Sleep(retryDelay) + continue + } else if resp.StatusCode != 200 { + message := fmt.Sprintf("getAccessTokenFromIMDS: IMDS Request failed with nonretryable error code: %d, retryCount: %d", resp.StatusCode, retryCount) + Log(message) + SendException(message) + return imdsAccessToken, 0, err + } + IsSuccess = true + break // call succeeded, don't retry any more + } + if !IsSuccess || resp == nil || resp.Body == nil { + Log("getAccessTokenFromIMDS: IMDS Request ran out of retries") + return imdsAccessToken, 0, err + } + + // Pull out response body + responseBytes, err = ioutil.ReadAll(resp.Body) + if err != nil { + Log("getAccessTokenFromIMDS: Error reading response body: %s", err.Error()) + return imdsAccessToken, 0, err + } + + } else { + Log("Info Reading IMDS Access Token from file : %s", IMDSTokenPathForWindows) + if _, err = os.Stat(IMDSTokenPathForWindows); os.IsNotExist(err) { + Log("getAccessTokenFromIMDS: IMDS token file doesnt exist: %s", err.Error()) + return imdsAccessToken, 0, err + } + //adding retries incase if we ended up reading the token file while the token file being written + for retryCount := 0; retryCount < MaxRetries; retryCount++ { + responseBytes, err = ioutil.ReadFile(IMDSTokenPathForWindows) + if err != nil { + Log("getAccessTokenFromIMDS: Could not read IMDS token from file: %s, retryCount: %d", err.Error(), retryCount) + time.Sleep(time.Duration((retryCount + 1) * 100) * time.Millisecond) + continue + } + break + } + } + + if responseBytes == nil { + Log("getAccessTokenFromIMDS: Error responseBytes is nil") + return imdsAccessToken, 0, err + } + + // Unmarshall response body into struct + var imdsResponse IMDSResponse + err = json.Unmarshal(responseBytes, &imdsResponse) + if err != nil { + Log("getAccessTokenFromIMDS: Error unmarshalling the response: %s", err.Error()) + return imdsAccessToken, 0, err + } + imdsAccessToken = imdsResponse.AccessToken + + expiration, err := strconv.ParseInt(imdsResponse.ExpiresOn, 10, 64) + if err != nil { + Log("getAccessTokenFromIMDS: Error parsing ExpiresOn field from IMDS response: %s", err.Error()) + return imdsAccessToken, 0, err + } + Log("Info getAccessTokenFromIMDS: end") + return imdsAccessToken, expiration, nil +} + +func getAgentConfiguration(imdsAccessToken string) (configurationId string, channelId string, err error) { + Log("Info getAgentConfiguration: start") + configurationId = "" + channelId = "" + var amcs_endpoint *url.URL + osType := os.Getenv("OS_TYPE") + resourceId := os.Getenv("AKS_RESOURCE_ID") + resourceRegion := os.Getenv("AKS_REGION") + mcsEndpoint := os.Getenv("MCS_ENDPOINT") + amcs_endpoint_string := fmt.Sprintf("https://%s.handler.control.%s%s/agentConfigurations?platform=%s&api-version=%s", resourceRegion, mcsEndpoint, resourceId, osType, AMCSAgentConfigAPIVersion) + amcs_endpoint, err = url.Parse(amcs_endpoint_string) + if err != nil { + Log("getAgentConfiguration: Error creating AMCS endpoint URL: %s", err.Error()) + return configurationId, channelId, err + } + + var bearer = "Bearer " + imdsAccessToken + // Create a new request using http + req, err := http.NewRequest("GET", amcs_endpoint.String(), nil) + if err != nil { + message := fmt.Sprintf("getAgentConfiguration: Error creating HTTP request for AMCS endpoint: %s", err.Error()) + Log(message) + return configurationId, channelId, err + } + req.Header.Set("Authorization", bearer) + + var resp *http.Response = nil + IsSuccess := false + for retryCount := 0; retryCount < MaxRetries; retryCount++ { + resp, err = HTTPClient.Do(req) + if err != nil { + message := fmt.Sprintf("getAgentConfiguration: Error calling AMCS endpoint: %s", err.Error()) + Log(message) + SendException(message) + continue + } + if resp != nil && resp.Body != nil { + defer resp.Body.Close() + } + Log("getAgentConfiguration Response Status: %d", resp.StatusCode) + if IsRetriableError(resp.StatusCode) { + message := fmt.Sprintf("getAgentConfiguration: Request failed with an error code: %d, retryCount: %d", resp.StatusCode, retryCount) + Log(message) + retryDelay := time.Duration((retryCount + 1) * 100) * time.Millisecond + if resp.StatusCode == 429 { + if resp != nil && resp.Header.Get("Retry-After") != "" { + after, err := strconv.ParseInt(resp.Header.Get("Retry-After"), 10, 64) + if err != nil && after > 0 { + retryDelay = time.Duration(after) * time.Second + } + } + } + time.Sleep(retryDelay) + continue + } else if resp.StatusCode != 200 { + message := fmt.Sprintf("getAgentConfiguration: Request failed with nonretryable error code: %d, retryCount: %d", resp.StatusCode, retryCount) + Log(message) + SendException(message) + return configurationId, channelId, err + } + IsSuccess = true + break // call succeeded, don't retry any more + } + if !IsSuccess || resp == nil || resp.Body == nil { + message := fmt.Sprintf("getAgentConfiguration Request ran out of retries") + Log(message) + SendException(message) + return configurationId, channelId, err + } + responseBytes, err := ioutil.ReadAll(resp.Body) + if err != nil { + Log("getAgentConfiguration: Error reading response body from AMCS API call: %s", err.Error()) + return configurationId, channelId, err + } + + // Unmarshall response body into struct + var agentConfiguration AgentConfiguration + err = json.Unmarshal(responseBytes, &agentConfiguration) + if err != nil { + message := fmt.Sprintf("getAgentConfiguration: Error unmarshalling the response: %s", err.Error()) + Log(message) + SendException(message) + return configurationId, channelId, err + } + + if len(agentConfiguration.Configurations) == 0 { + message := "getAgentConfiguration: Received empty agentConfiguration.Configurations array" + Log(message) + SendException(message) + return configurationId, channelId, err + } + + if len(agentConfiguration.Configurations[0].Content.Channels) == 0 { + message := "getAgentConfiguration: Received empty agentConfiguration.Configurations[0].Content.Channels" + Log(message) + SendException(message) + return configurationId, channelId, err + } + + configurationId = agentConfiguration.Configurations[0].Configurationid + channelId = agentConfiguration.Configurations[0].Content.Channels[0].ID + + Log("getAgentConfiguration: obtained configurationId: %s, channelId: %s", configurationId, channelId) + Log("Info getAgentConfiguration: end") + + return configurationId, channelId, nil +} + +func getIngestionAuthToken(imdsAccessToken string, configurationId string, channelId string) (ingestionAuthToken string, refreshInterval int64, err error) { + Log("Info getIngestionAuthToken: start") + ingestionAuthToken = "" + refreshInterval = 0 + var amcs_endpoint *url.URL + osType := os.Getenv("OS_TYPE") + resourceId := os.Getenv("AKS_RESOURCE_ID") + resourceRegion := os.Getenv("AKS_REGION") + mcsEndpoint := os.Getenv("MCS_ENDPOINT") + amcs_endpoint_string := fmt.Sprintf("https://%s.handler.control.%s%s/agentConfigurations/%s/channels/%s/issueIngestionToken?platform=%s&api-version=%s", resourceRegion, mcsEndpoint, resourceId, configurationId, channelId, osType, AMCSIngestionTokenAPIVersion) + amcs_endpoint, err = url.Parse(amcs_endpoint_string) + if err != nil { + Log("getIngestionAuthToken: Error creating AMCS endpoint URL: %s", err.Error()) + return ingestionAuthToken, refreshInterval, err + } + + var bearer = "Bearer " + imdsAccessToken + // Create a new request using http + req, err := http.NewRequest("GET", amcs_endpoint.String(), nil) + if err != nil { + Log("getIngestionAuthToken: Error creating HTTP request for AMCS endpoint: %s", err.Error()) + return ingestionAuthToken, refreshInterval, err + } + + // add authorization header to the req + req.Header.Add("Authorization", bearer) + + var resp *http.Response = nil + IsSuccess := false + for retryCount := 0; retryCount < MaxRetries; retryCount++ { + // Call managed services for Azure resources token endpoint + resp, err = HTTPClient.Do(req) + if err != nil { + message := fmt.Sprintf("getIngestionAuthToken: Error calling AMCS endpoint for ingestion auth token: %s", err.Error()) + Log(message) + SendException(message) + resp = nil + continue + } + + if resp != nil && resp.Body != nil { + defer resp.Body.Close() + } + + Log("getIngestionAuthToken Response Status: %d", resp.StatusCode) + if IsRetriableError(resp.StatusCode) { + message := fmt.Sprintf("getIngestionAuthToken: Request failed with an error code: %d, retryCount: %d", resp.StatusCode, retryCount) + Log(message) + retryDelay := time.Duration((retryCount + 1) * 100) * time.Millisecond + if resp.StatusCode == 429 { + if resp != nil && resp.Header.Get("Retry-After") != "" { + after, err := strconv.ParseInt(resp.Header.Get("Retry-After"), 10, 64) + if err != nil && after > 0 { + retryDelay = time.Duration(after) * time.Second + } + } + } + time.Sleep(retryDelay) + continue + } else if resp.StatusCode != 200 { + message := fmt.Sprintf("getIngestionAuthToken: Request failed with nonretryable error code: %d, retryCount: %d", resp.StatusCode, retryCount) + Log(message) + SendException(message) + return ingestionAuthToken, refreshInterval, err + } + IsSuccess = true + break + } + + if !IsSuccess || resp == nil || resp.Body == nil { + message := "getIngestionAuthToken: ran out of retries calling AMCS for ingestion token" + Log(message) + SendException(message) + return ingestionAuthToken, refreshInterval, err + } + + // Pull out response body + responseBytes, err := ioutil.ReadAll(resp.Body) + if err != nil { + Log("getIngestionAuthToken: Error reading response body from AMCS Ingestion API call : %s", err.Error()) + return ingestionAuthToken, refreshInterval, err + } + + // Unmarshall response body into struct + var ingestionTokenResponse IngestionTokenResponse + err = json.Unmarshal(responseBytes, &ingestionTokenResponse) + if err != nil { + Log("getIngestionAuthToken: Error unmarshalling the response: %s", err.Error()) + return ingestionAuthToken, refreshInterval, err + } + + ingestionAuthToken = ingestionTokenResponse.Ingestionauthtoken + + refreshInterval, err = getTokenRefreshIntervalFromAmcsResponse(resp.Header) + if err != nil { + Log("getIngestionAuthToken: Error failed to parse max-age response header") + return ingestionAuthToken, refreshInterval, err + } + Log("getIngestionAuthToken: refresh interval %d seconds", refreshInterval) + + Log("Info getIngestionAuthToken: end") + return ingestionAuthToken, refreshInterval, nil +} + +var cacheControlHeaderRegex = regexp.MustCompile(`max-age=([0-9]+)`) + +func getTokenRefreshIntervalFromAmcsResponse(header http.Header) (refreshInterval int64, err error) { + cacheControlHeader, valueInMap := header["Cache-Control"] + if !valueInMap { + return 0, errors.New("getTokenRefreshIntervalFromAmcsResponse: Cache-Control not in passed header") + } + + for _, entry := range cacheControlHeader { + match := cacheControlHeaderRegex.FindStringSubmatch(entry) + if len(match) == 2 { + interval := 0 + interval, err = strconv.Atoi(match[1]) + if err != nil { + Log("getTokenRefreshIntervalFromAmcsResponse: error getting timeout from auth token. Header: " + strings.Join(cacheControlHeader, ",")) + return 0, err + } + refreshInterval = int64(interval) + return refreshInterval, nil + } + } + + return 0, errors.New("getTokenRefreshIntervalFromAmcsResponse: didn't find max-age in response header") +} + +func refreshIngestionAuthToken() { + for ; true; <-IngestionAuthTokenRefreshTicker.C { + if IMDSToken == "" || IMDSTokenExpiration <= (time.Now().Unix() + 60 * 60) { // token valid 24 hrs and refresh token 1 hr before expiry + imdsToken, imdsTokenExpiry, err := getAccessTokenFromIMDS() + if err != nil { + message := fmt.Sprintf("refreshIngestionAuthToken: Error on getAccessTokenFromIMDS %s \n", err.Error()) + Log(message) + SendException(message) + } else { + IMDSToken = imdsToken + IMDSTokenExpiration = imdsTokenExpiry + } + } + if IMDSToken == "" { + message := "refreshIngestionAuthToken: IMDSToken is empty" + Log(message) + SendException(message) + continue + } + var err error + // ignore agent configuration expiring, the configuration and channel IDs will never change (without creating an agent restart) + if ConfigurationId == "" || ChannelId == "" { + ConfigurationId, ChannelId, err = getAgentConfiguration(IMDSToken) + if err != nil { + message := fmt.Sprintf("refreshIngestionAuthToken: Error getAgentConfiguration %s \n", err.Error()) + Log(message) + SendException(message) + continue + } + } + if IMDSToken == "" || ConfigurationId == "" || ChannelId == "" { + message := "refreshIngestionAuthToken: IMDSToken or ConfigurationId or ChannelId empty" + Log(message) + SendException(message) + continue + } + ingestionAuthToken, refreshIntervalInSeconds, err := getIngestionAuthToken(IMDSToken, ConfigurationId, ChannelId) + if err != nil { + message := fmt.Sprintf("refreshIngestionAuthToken: Error getIngestionAuthToken %s \n", err.Error()) + Log(message) + SendException(message) + continue + } + IngestionAuthTokenUpdateMutex.Lock() + ODSIngestionAuthToken = ingestionAuthToken + IngestionAuthTokenUpdateMutex.Unlock() + if refreshIntervalInSeconds > 0 && refreshIntervalInSeconds != defaultIngestionAuthTokenRefreshIntervalSeconds { + //TODO - use Reset which is better when go version upgraded to 1.15 or up rather Stop() and NewTicker + //IngestionAuthTokenRefreshTicker.Reset(time.Second * time.Duration(refreshIntervalInSeconds)) + IngestionAuthTokenRefreshTicker.Stop() + IngestionAuthTokenRefreshTicker = time.NewTicker(time.Second * time.Duration(refreshIntervalInSeconds)) + } + } +} + +func IsRetriableError(httpStatusCode int) bool { + retryableStatusCodes := [5]int{408, 429, 502, 503, 504} + for _, code := range retryableStatusCodes { + if code == httpStatusCode { + return true + } + } + return false +} diff --git a/source/plugins/go/src/oms.go b/source/plugins/go/src/oms.go index 217ba1efc..91a5b4b40 100644 --- a/source/plugins/go/src/oms.go +++ b/source/plugins/go/src/oms.go @@ -22,6 +22,7 @@ import ( "github.com/tinylib/msgp/msgp" lumberjack "gopkg.in/natefinch/lumberjack.v2" + "Docker-Provider/source/plugins/go/src/extension" "github.com/Azure/azure-kusto-go/kusto/ingest" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -88,6 +89,7 @@ const IPName = "ContainerInsights" const defaultContainerInventoryRefreshInterval = 60 const kubeMonAgentConfigEventFlushInterval = 60 +const defaultIngestionAuthTokenRefreshIntervalSeconds = 3600 //Eventsource name in mdsd const MdsdContainerLogSourceName = "ContainerLogSource" @@ -106,6 +108,11 @@ const ContainerLogsV1Route = "v1" //container logs schema (v2=ContainerLogsV2 table in LA, anything else ContainerLogs table in LA. This is applicable only if Container logs route is NOT ADX) const ContainerLogV2SchemaVersion = "v2" +//env variable for AAD MSI Auth mode +const AADMSIAuthMode = "AAD_MSI_AUTH_MODE" + +// Tag prefix of mdsd output streamid for AMA in MSI auth mode +const MdsdOutputStreamIdTagPrefix = "dcr-" //env variable to container type const ContainerTypeEnv = "CONTAINER_TYPE" @@ -158,17 +165,19 @@ var ( // ADX tenantID AdxTenantID string //ADX client secret - AdxClientSecret string + AdxClientSecret string // container log or container log v2 tag name for oneagent route - MdsdContainerLogTagName string + MdsdContainerLogTagName string // kubemonagent events tag name for oneagent route MdsdKubeMonAgentEventsTagName string // InsightsMetrics tag name for oneagent route - MdsdInsightsMetricsTagName string + MdsdInsightsMetricsTagName string // flag to check if its Windows OS IsWindows bool - // container type - ContainerType string + // container type + ContainerType string + // flag to check whether LA AAD MSI Auth Enabled or not + IsAADMSIAuthMode bool ) var ( @@ -194,6 +203,10 @@ var ( EventHashUpdateMutex = &sync.Mutex{} // parent context used by ADX uploader ParentContext = context.Background() + // IngestionAuthTokenUpdateMutex read and write mutex access for ODSIngestionAuthToken + IngestionAuthTokenUpdateMutex = &sync.Mutex{} + // ODSIngestionAuthToken for windows agent AAD MSI Auth + ODSIngestionAuthToken string ) var ( @@ -201,6 +214,8 @@ var ( ContainerImageNameRefreshTicker *time.Ticker // KubeMonAgentConfigEventsSendTicker to send config events every hour KubeMonAgentConfigEventsSendTicker *time.Ticker + // IngestionAuthTokenRefreshTicker to refresh ingestion token + IngestionAuthTokenRefreshTicker *time.Ticker ) var ( @@ -340,12 +355,12 @@ const ( ) // DataType to be used as enum per data type socket client creation -type DataType int +type DataType int const ( // DataType to be used as enum per data type socket client creation ContainerLogV2 DataType = iota - KubeMonAgentEvents - InsightsMetrics + KubeMonAgentEvents + InsightsMetrics ) func createLogger() *log.Logger { @@ -402,7 +417,9 @@ func updateContainerImageNameMaps() { listOptions := metav1.ListOptions{} listOptions.FieldSelector = fmt.Sprintf("spec.nodeName=%s", Computer) - pods, err := ClientSet.CoreV1().Pods("").List(listOptions) + + // Context was added as a parameter, but we want the same behavior as before: see https://pkg.go.dev/context#TODO + pods, err := ClientSet.CoreV1().Pods("").List(context.TODO(), listOptions) if err != nil { message := fmt.Sprintf("Error getting pods %s\nIt is ok to log here and continue, because the logs will be missing image and Name, but the logs will still have the containerID", err.Error()) @@ -593,7 +610,7 @@ func flushKubeMonAgentEventRecords() { Message: k, Tags: fmt.Sprintf("%s", tagJson), } - laKubeMonAgentEventsRecords = append(laKubeMonAgentEventsRecords, laKubeMonAgentEventsRecord) + laKubeMonAgentEventsRecords = append(laKubeMonAgentEventsRecords, laKubeMonAgentEventsRecord) var stringMap map[string]string jsonBytes, err := json.Marshal(&laKubeMonAgentEventsRecord) if err != nil { @@ -606,10 +623,10 @@ func flushKubeMonAgentEventRecords() { Log(message) SendException(message) } else { - msgPackEntry := MsgPackEntry{ + msgPackEntry := MsgPackEntry{ Record: stringMap, } - msgPackEntries = append(msgPackEntries, msgPackEntry) + msgPackEntries = append(msgPackEntries, msgPackEntry) } } } @@ -632,23 +649,23 @@ func flushKubeMonAgentEventRecords() { Message: k, Tags: fmt.Sprintf("%s", tagJson), } - laKubeMonAgentEventsRecords = append(laKubeMonAgentEventsRecords, laKubeMonAgentEventsRecord) + laKubeMonAgentEventsRecords = append(laKubeMonAgentEventsRecords, laKubeMonAgentEventsRecord) var stringMap map[string]string jsonBytes, err := json.Marshal(&laKubeMonAgentEventsRecord) if err != nil { message := fmt.Sprintf("Error while Marshalling laKubeMonAgentEventsRecord to json bytes: %s", err.Error()) Log(message) SendException(message) - } else { - if err := json.Unmarshal(jsonBytes, &stringMap); err != nil { + } else { + if err := json.Unmarshal(jsonBytes, &stringMap); err != nil { message := fmt.Sprintf("Error while UnMarhalling json bytes to stringmap: %s", err.Error()) Log(message) SendException(message) } else { - msgPackEntry := MsgPackEntry{ + msgPackEntry := MsgPackEntry{ Record: stringMap, - } - msgPackEntries = append(msgPackEntries, msgPackEntry) + } + msgPackEntries = append(msgPackEntries, msgPackEntry) } } } @@ -681,62 +698,66 @@ func flushKubeMonAgentEventRecords() { Message: "No errors", Tags: fmt.Sprintf("%s", tagJson), } - laKubeMonAgentEventsRecords = append(laKubeMonAgentEventsRecords, laKubeMonAgentEventsRecord) + laKubeMonAgentEventsRecords = append(laKubeMonAgentEventsRecords, laKubeMonAgentEventsRecord) var stringMap map[string]string jsonBytes, err := json.Marshal(&laKubeMonAgentEventsRecord) - if err != nil { + if err != nil { message := fmt.Sprintf("Error while Marshalling laKubeMonAgentEventsRecord to json bytes: %s", err.Error()) Log(message) SendException(message) } else { - if err := json.Unmarshal(jsonBytes, &stringMap); err != nil { + if err := json.Unmarshal(jsonBytes, &stringMap); err != nil { message := fmt.Sprintf("Error while UnMarshalling json bytes to stringmap: %s", err.Error()) Log(message) SendException(message) - } else { - msgPackEntry := MsgPackEntry{ + } else { + msgPackEntry := MsgPackEntry{ Record: stringMap, } - msgPackEntries = append(msgPackEntries, msgPackEntry) + msgPackEntries = append(msgPackEntries, msgPackEntry) } } } } - if (IsWindows == false && len(msgPackEntries) > 0) { //for linux, mdsd route + if (IsWindows == false && len(msgPackEntries) > 0) { //for linux, mdsd route + if IsAADMSIAuthMode == true && strings.HasPrefix(MdsdKubeMonAgentEventsTagName, MdsdOutputStreamIdTagPrefix) == false { + Log("Info::mdsd::obtaining output stream id for data type: %s", KubeMonAgentEventDataType) + MdsdKubeMonAgentEventsTagName = extension.GetInstance(FLBLogger, ContainerType).GetOutputStreamId(KubeMonAgentEventDataType) + } Log("Info::mdsd:: using mdsdsource name for KubeMonAgentEvents: %s", MdsdKubeMonAgentEventsTagName) - msgpBytes := convertMsgPackEntriesToMsgpBytes(MdsdKubeMonAgentEventsTagName, msgPackEntries) + msgpBytes := convertMsgPackEntriesToMsgpBytes(MdsdKubeMonAgentEventsTagName, msgPackEntries) if MdsdKubeMonMsgpUnixSocketClient == nil { Log("Error::mdsd::mdsd connection for KubeMonAgentEvents does not exist. re-connecting ...") CreateMDSDClient(KubeMonAgentEvents, ContainerType) if MdsdKubeMonMsgpUnixSocketClient == nil { - Log("Error::mdsd::Unable to create mdsd client for KubeMonAgentEvents. Please check error log.") + Log("Error::mdsd::Unable to create mdsd client for KubeMonAgentEvents. Please check error log.") ContainerLogTelemetryMutex.Lock() defer ContainerLogTelemetryMutex.Unlock() - KubeMonEventsMDSDClientCreateErrors += 1 - } + KubeMonEventsMDSDClientCreateErrors += 1 + } } - if MdsdKubeMonMsgpUnixSocketClient != nil { + if MdsdKubeMonMsgpUnixSocketClient != nil { deadline := 10 * time.Second - MdsdKubeMonMsgpUnixSocketClient.SetWriteDeadline(time.Now().Add(deadline)) //this is based of clock time, so cannot reuse + MdsdKubeMonMsgpUnixSocketClient.SetWriteDeadline(time.Now().Add(deadline)) //this is based of clock time, so cannot reuse bts, er := MdsdKubeMonMsgpUnixSocketClient.Write(msgpBytes) - elapsed = time.Since(start) + elapsed = time.Since(start) if er != nil { message := fmt.Sprintf("Error::mdsd::Failed to write to kubemonagent mdsd %d records after %s. Will retry ... error : %s", len(msgPackEntries), elapsed, er.Error()) Log(message) if MdsdKubeMonMsgpUnixSocketClient != nil { MdsdKubeMonMsgpUnixSocketClient.Close() MdsdKubeMonMsgpUnixSocketClient = nil - } + } SendException(message) } else { numRecords := len(msgPackEntries) Log("FlushKubeMonAgentEventRecords::Info::Successfully flushed %d records that was %d bytes in %s", numRecords, bts, elapsed) // Send telemetry to AppInsights resource SendEvent(KubeMonAgentEventsFlushedEvent, telemetryDimensions) - } + } } else { - Log("Error::mdsd::Unable to create mdsd client for KubeMonAgentEvents. Please check error log.") - } + Log("Error::mdsd::Unable to create mdsd client for KubeMonAgentEvents. Please check error log.") + } } else if len(laKubeMonAgentEventsRecords) > 0 { //for windows, ODS direct kubeMonAgentEventEntry := KubeMonAgentEventBlob{ DataType: KubeMonAgentEventDataType, @@ -760,6 +781,16 @@ func flushKubeMonAgentEventRecords() { req.Header.Set("x-ms-AzureResourceId", ResourceID) } + if IsAADMSIAuthMode == true { + IngestionAuthTokenUpdateMutex.Lock() + ingestionAuthToken := ODSIngestionAuthToken + IngestionAuthTokenUpdateMutex.Unlock() + if ingestionAuthToken == "" { + Log("Error::ODS Ingestion Auth Token is empty. Please check error log.") + } + req.Header.Set("Authorization", "Bearer "+ingestionAuthToken) + } + resp, err := HTTPClient.Do(req) elapsed = time.Since(start) @@ -869,15 +900,15 @@ func PostTelegrafMetricsToLA(telegrafRecords []map[interface{}]interface{}) int message := fmt.Sprintf("PostTelegrafMetricsToLA::Info:derived %v metrics from %v timeseries", len(laMetrics), len(telegrafRecords)) Log(message) } - + if IsWindows == false { //for linux, mdsd route - var msgPackEntries []MsgPackEntry + var msgPackEntries []MsgPackEntry var i int start := time.Now() var elapsed time.Duration - for i = 0; i < len(laMetrics); i++ { - var interfaceMap map[string]interface{} + for i = 0; i < len(laMetrics); i++ { + var interfaceMap map[string]interface{} stringMap := make(map[string]string) jsonBytes, err := json.Marshal(*laMetrics[i]) if err != nil { @@ -886,31 +917,35 @@ func PostTelegrafMetricsToLA(telegrafRecords []map[interface{}]interface{}) int SendException(message) return output.FLB_OK } else { - if err := json.Unmarshal(jsonBytes, &interfaceMap); err != nil { + if err := json.Unmarshal(jsonBytes, &interfaceMap); err != nil { message := fmt.Sprintf("Error while UnMarshalling json bytes to interfaceMap: %s", err.Error()) Log(message) SendException(message) return output.FLB_OK - } else { + } else { for key, value := range interfaceMap { strKey := fmt.Sprintf("%v", key) strValue := fmt.Sprintf("%v", value) stringMap[strKey] = strValue - } - msgPackEntry := MsgPackEntry{ + } + msgPackEntry := MsgPackEntry{ Record: stringMap, } - msgPackEntries = append(msgPackEntries, msgPackEntry) - } + msgPackEntries = append(msgPackEntries, msgPackEntry) + } } } - if (len(msgPackEntries) > 0) { - msgpBytes := convertMsgPackEntriesToMsgpBytes(MdsdInsightsMetricsTagName, msgPackEntries) + if (len(msgPackEntries) > 0) { + if IsAADMSIAuthMode == true && (strings.HasPrefix(MdsdInsightsMetricsTagName, MdsdOutputStreamIdTagPrefix) == false) { + Log("Info::mdsd::obtaining output stream id for InsightsMetricsDataType since Log Analytics AAD MSI Auth Enabled") + MdsdInsightsMetricsTagName = extension.GetInstance(FLBLogger, ContainerType).GetOutputStreamId(InsightsMetricsDataType) + } + msgpBytes := convertMsgPackEntriesToMsgpBytes(MdsdInsightsMetricsTagName, msgPackEntries) if MdsdInsightsMetricsMsgpUnixSocketClient == nil { Log("Error::mdsd::mdsd connection does not exist. re-connecting ...") CreateMDSDClient(InsightsMetrics, ContainerType) if MdsdInsightsMetricsMsgpUnixSocketClient == nil { - Log("Error::mdsd::Unable to create mdsd client for insights metrics. Please check error log.") + Log("Error::mdsd::Unable to create mdsd client for insights metrics. Please check error log.") ContainerLogTelemetryMutex.Lock() defer ContainerLogTelemetryMutex.Unlock() InsightsMetricsMDSDClientCreateErrors += 1 @@ -919,13 +954,14 @@ func PostTelegrafMetricsToLA(telegrafRecords []map[interface{}]interface{}) int } deadline := 10 * time.Second - MdsdInsightsMetricsMsgpUnixSocketClient.SetWriteDeadline(time.Now().Add(deadline)) //this is based of clock time, so cannot reuse + MdsdInsightsMetricsMsgpUnixSocketClient.SetWriteDeadline(time.Now().Add(deadline)) //this is based of clock time, so cannot reuse bts, er := MdsdInsightsMetricsMsgpUnixSocketClient.Write(msgpBytes) elapsed = time.Since(start) if er != nil { Log("Error::mdsd::Failed to write to mdsd %d records after %s. Will retry ... error : %s", len(msgPackEntries), elapsed, er.Error()) + UpdateNumTelegrafMetricsSentTelemetry(0, 1, 0) if MdsdInsightsMetricsMsgpUnixSocketClient != nil { MdsdInsightsMetricsMsgpUnixSocketClient.Close() MdsdInsightsMetricsMsgpUnixSocketClient = nil @@ -933,14 +969,15 @@ func PostTelegrafMetricsToLA(telegrafRecords []map[interface{}]interface{}) int ContainerLogTelemetryMutex.Lock() defer ContainerLogTelemetryMutex.Unlock() - InsightsMetricsMDSDClientCreateErrors += 1 + InsightsMetricsMDSDClientCreateErrors += 1 return output.FLB_RETRY } else { numTelegrafMetricsRecords := len(msgPackEntries) + UpdateNumTelegrafMetricsSentTelemetry(numTelegrafMetricsRecords, 0, 0) Log("Success::mdsd::Successfully flushed %d telegraf metrics records that was %d bytes to mdsd in %s ", numTelegrafMetricsRecords, bts, elapsed) } } - + } else { // for windows, ODS direct var metrics []laTelegrafMetric @@ -979,6 +1016,18 @@ func PostTelegrafMetricsToLA(telegrafRecords []map[interface{}]interface{}) int if ResourceCentric == true { req.Header.Set("x-ms-AzureResourceId", ResourceID) } + if IsAADMSIAuthMode == true { + IngestionAuthTokenUpdateMutex.Lock() + ingestionAuthToken := ODSIngestionAuthToken + IngestionAuthTokenUpdateMutex.Unlock() + if ingestionAuthToken == "" { + message := "Error::ODS Ingestion Auth Token is empty. Please check error log." + Log(message) + return output.FLB_RETRY + } + // add authorization header to the req + req.Header.Set("Authorization", "Bearer "+ingestionAuthToken) + } start := time.Now() resp, err := HTTPClient.Do(req) @@ -1183,7 +1232,17 @@ func PostDataHelper(tailPluginRecords []map[interface{}]interface{}) int { numContainerLogRecords := 0 if len(msgPackEntries) > 0 && ContainerLogsRouteV2 == true { - //flush to mdsd + //flush to mdsd + if IsAADMSIAuthMode == true && strings.HasPrefix(MdsdContainerLogTagName, MdsdOutputStreamIdTagPrefix) == false { + Log("Info::mdsd::obtaining output stream id") + if ContainerLogSchemaV2 == true { + MdsdContainerLogTagName = extension.GetInstance(FLBLogger, ContainerType).GetOutputStreamId(ContainerLogV2DataType) + } else { + MdsdContainerLogTagName = extension.GetInstance(FLBLogger, ContainerType).GetOutputStreamId(ContainerLogDataType) + } + Log("Info::mdsd:: using mdsdsource name: %s", MdsdContainerLogTagName) + } + fluentForward := MsgPackForward{ Tag: MdsdContainerLogTagName, Entries: msgPackEntries, @@ -1300,7 +1359,7 @@ func PostDataHelper(tailPluginRecords []map[interface{}]interface{}) int { numContainerLogRecords = len(dataItemsADX) Log("Success::ADX::Successfully wrote %d container log records to ADX in %s", numContainerLogRecords, elapsed) - } else { //ODS + } else if ((ContainerLogSchemaV2 == true && len(dataItemsLAv2) > 0) || len(dataItemsLAv1) > 0) { //ODS var logEntry interface{} recordType := "" loglinesCount := 0 @@ -1342,7 +1401,19 @@ func PostDataHelper(tailPluginRecords []map[interface{}]interface{}) int { if ResourceCentric == true { req.Header.Set("x-ms-AzureResourceId", ResourceID) } - + + if IsAADMSIAuthMode == true { + IngestionAuthTokenUpdateMutex.Lock() + ingestionAuthToken := ODSIngestionAuthToken + IngestionAuthTokenUpdateMutex.Unlock() + if ingestionAuthToken == "" { + Log("Error::ODS Ingestion Auth Token is empty. Please check error log.") + return output.FLB_RETRY + } + // add authorization header to the req + req.Header.Set("Authorization", "Bearer "+ingestionAuthToken) + } + resp, err := HTTPClient.Do(req) elapsed = time.Since(start) @@ -1351,7 +1422,7 @@ func PostDataHelper(tailPluginRecords []map[interface{}]interface{}) int { Log(message) // Commenting this out for now. TODO - Add better telemetry for ods errors using aggregation //SendException(message) - + Log("Failed to flush %d records after %s", loglinesCount, elapsed) return output.FLB_RETRY @@ -1440,7 +1511,6 @@ func GetContainerIDK8sNamespacePodNameFromFileName(filename string) (string, str // InitializePlugin reads and populates plugin configuration func InitializePlugin(pluginConfPath string, agentVersion string) { - go func() { isTest := os.Getenv("ISTEST") if strings.Compare(strings.ToLower(strings.TrimSpace(isTest)), "true") == 0 { @@ -1480,10 +1550,10 @@ func InitializePlugin(pluginConfPath string, agentVersion string) { } ContainerType = os.Getenv(ContainerTypeEnv) - Log("Container Type %s", ContainerType) + Log("Container Type %s", ContainerType) osType := os.Getenv("OS_TYPE") - IsWindows = false + IsWindows = false // Linux if strings.Compare(strings.ToLower(osType), "windows") != 0 { Log("Reading configuration for Linux from %s", pluginConfPath) @@ -1502,7 +1572,7 @@ func InitializePlugin(pluginConfPath string, agentVersion string) { SendException(message) time.Sleep(30 * time.Second) log.Fatalln(message) - } + } OMSEndpoint = "https://" + WorkspaceID + ".ods." + LogAnalyticsWorkspaceDomain + "/OperationalData.svc/PostJsonDataItems" // Populate Computer field containerHostName, err1 := ioutil.ReadFile(pluginConfig["container_host_file_path"]) @@ -1532,7 +1602,7 @@ func InitializePlugin(pluginConfPath string, agentVersion string) { } } else { // windows - IsWindows = true + IsWindows = true Computer = os.Getenv("HOSTNAME") WorkspaceID = os.Getenv("WSID") logAnalyticsDomain := os.Getenv("DOMAIN") @@ -1541,6 +1611,11 @@ func InitializePlugin(pluginConfPath string, agentVersion string) { } Log("OMSEndpoint %s", OMSEndpoint) + IsAADMSIAuthMode = false + if strings.Compare(strings.ToLower(os.Getenv(AADMSIAuthMode)), "true") == 0 { + IsAADMSIAuthMode = true + Log("AAD MSI Auth Mode Configured") + } ResourceID = os.Getenv(envAKSResourceID) if len(ResourceID) > 0 { @@ -1614,13 +1689,13 @@ func InitializePlugin(pluginConfPath string, agentVersion string) { Log(message) } - PluginConfiguration = pluginConfig + PluginConfiguration = pluginConfig ContainerLogsRoute := strings.TrimSpace(strings.ToLower(os.Getenv("AZMON_CONTAINER_LOGS_ROUTE"))) Log("AZMON_CONTAINER_LOGS_ROUTE:%s", ContainerLogsRoute) - ContainerLogsRouteV2 = false - ContainerLogsRouteADX = false + ContainerLogsRouteV2 = false + ContainerLogsRouteADX = false if strings.Compare(ContainerLogsRoute, ContainerLogsADXRoute) == 0 { //check if adx clusteruri, clientid & secret are set @@ -1653,14 +1728,14 @@ func InitializePlugin(pluginConfPath string, agentVersion string) { Log("Routing container logs thru %s route...", ContainerLogsADXRoute) fmt.Fprintf(os.Stdout, "Routing container logs thru %s route...\n", ContainerLogsADXRoute) } - } else if strings.Compare(strings.ToLower(osType), "windows") != 0 { //for linux, oneagent will be default route + } else if strings.Compare(strings.ToLower(osType), "windows") != 0 { //for linux, oneagent will be default route ContainerLogsRouteV2 = true //default is mdsd route - if strings.Compare(ContainerLogsRoute, ContainerLogsV1Route) == 0 { + if strings.Compare(ContainerLogsRoute, ContainerLogsV1Route) == 0 { ContainerLogsRouteV2 = false //fallback option when hiddensetting set } Log("Routing container logs thru %s route...", ContainerLogsRoute) fmt.Fprintf(os.Stdout, "Routing container logs thru %s route... \n", ContainerLogsRoute) - } + } if ContainerLogsRouteV2 == true { CreateMDSDClient(ContainerLogV2, ContainerType) @@ -1673,7 +1748,7 @@ func InitializePlugin(pluginConfPath string, agentVersion string) { if IsWindows == false { // mdsd linux specific Log("Creating MDSD clients for KubeMonAgentEvents & InsightsMetrics") - CreateMDSDClient(KubeMonAgentEvents, ContainerType) + CreateMDSDClient(KubeMonAgentEvents, ContainerType) CreateMDSDClient(InsightsMetrics, ContainerType) } @@ -1712,5 +1787,11 @@ func InitializePlugin(pluginConfPath string, agentVersion string) { } MdsdInsightsMetricsTagName = MdsdInsightsMetricsSourceName - MdsdKubeMonAgentEventsTagName = MdsdKubeMonAgentEventsSourceName -} \ No newline at end of file + MdsdKubeMonAgentEventsTagName = MdsdKubeMonAgentEventsSourceName + Log("ContainerLogsRouteADX: %v, IsWindows: %v, IsAADMSIAuthMode = %v \n", ContainerLogsRouteADX, IsWindows, IsAADMSIAuthMode) + if !ContainerLogsRouteADX && IsWindows && IsAADMSIAuthMode { + Log("defaultIngestionAuthTokenRefreshIntervalSeconds = %d \n", defaultIngestionAuthTokenRefreshIntervalSeconds) + IngestionAuthTokenRefreshTicker = time.NewTicker(time.Second * time.Duration(defaultIngestionAuthTokenRefreshIntervalSeconds)) + go refreshIngestionAuthToken() + } +} diff --git a/source/plugins/go/src/telemetry.go b/source/plugins/go/src/telemetry.go index debe003e4..31818dbb3 100644 --- a/source/plugins/go/src/telemetry.go +++ b/source/plugins/go/src/telemetry.go @@ -145,8 +145,8 @@ func SendContainerLogPluginMetrics(telemetryPushIntervalProperty string) { ContainerLogTelemetryMutex.Unlock() if strings.Compare(strings.ToLower(os.Getenv("CONTROLLER_TYPE")), "daemonset") == 0 { + telemetryDimensions := make(map[string]string) if strings.Compare(strings.ToLower(os.Getenv("CONTAINER_TYPE")), "prometheussidecar") == 0 { - telemetryDimensions := make(map[string]string) telemetryDimensions["CustomPromMonitorPods"] = promMonitorPods if promMonitorPodsNamespaceLength > 0 { telemetryDimensions["CustomPromMonitorPodsNamespaceLength"] = strconv.Itoa(promMonitorPodsNamespaceLength) @@ -168,7 +168,23 @@ func SendContainerLogPluginMetrics(telemetryPushIntervalProperty string) { SendEvent(eventNameCustomPrometheusSidecarHeartbeat, telemetryDimensions) } else { - SendEvent(eventNameDaemonSetHeartbeat, make(map[string]string)) + fbitFlushIntervalSecs := os.Getenv("FBIT_SERVICE_FLUSH_INTERVAL") + if fbitFlushIntervalSecs != "" { + telemetryDimensions["FbitServiceFlushIntervalSecs"] = fbitFlushIntervalSecs + } + fbitTailBufferChunkSizeMBs := os.Getenv("FBIT_TAIL_BUFFER_CHUNK_SIZE") + if fbitTailBufferChunkSizeMBs != "" { + telemetryDimensions["FbitBufferChunkSizeMBs"] = fbitTailBufferChunkSizeMBs + } + fbitTailBufferMaxSizeMBs := os.Getenv("FBIT_TAIL_BUFFER_MAX_SIZE") + if fbitTailBufferMaxSizeMBs != "" { + telemetryDimensions["FbitBufferMaxSizeMBs"] = fbitTailBufferMaxSizeMBs + } + fbitTailMemBufLimitMBs := os.Getenv("FBIT_TAIL_MEM_BUF_LIMIT") + if fbitTailMemBufLimitMBs != "" { + telemetryDimensions["FbitMemBufLimitSizeMBs"] = fbitTailMemBufLimitMBs + } + SendEvent(eventNameDaemonSetHeartbeat, telemetryDimensions) flushRateMetric := appinsights.NewMetricTelemetry(metricNameAvgFlushRate, flushRate) TelemetryClient.Track(flushRateMetric) logRateMetric := appinsights.NewMetricTelemetry(metricNameAvgLogGenerationRate, logRate) diff --git a/source/plugins/go/src/utils.go b/source/plugins/go/src/utils.go index 3fe5c6d0e..02d30607e 100644 --- a/source/plugins/go/src/utils.go +++ b/source/plugins/go/src/utils.go @@ -63,27 +63,32 @@ func ReadConfiguration(filename string) (map[string]string, error) { // CreateHTTPClient used to create the client for sending post requests to OMSEndpoint func CreateHTTPClient() { - certFilePath := PluginConfiguration["cert_file_path"] - keyFilePath := PluginConfiguration["key_file_path"] - if IsWindows == false { - certFilePath = fmt.Sprintf(certFilePath, WorkspaceID) - keyFilePath = fmt.Sprintf(keyFilePath, WorkspaceID) - } - cert, err := tls.LoadX509KeyPair(certFilePath, keyFilePath) - if err != nil { - message := fmt.Sprintf("Error when loading cert %s", err.Error()) - SendException(message) - time.Sleep(30 * time.Second) - Log(message) - log.Fatalf("Error when loading cert %s", err.Error()) - } + var transport *http.Transport + if IsAADMSIAuthMode { + transport = &http.Transport{} + } else { + certFilePath := PluginConfiguration["cert_file_path"] + keyFilePath := PluginConfiguration["key_file_path"] + if IsWindows == false { + certFilePath = fmt.Sprintf(certFilePath, WorkspaceID) + keyFilePath = fmt.Sprintf(keyFilePath, WorkspaceID) + } + cert, err := tls.LoadX509KeyPair(certFilePath, keyFilePath) + if err != nil { + message := fmt.Sprintf("Error when loading cert %s", err.Error()) + SendException(message) + time.Sleep(30 * time.Second) + Log(message) + log.Fatalf("Error when loading cert %s", err.Error()) + } - tlsConfig := &tls.Config{ - Certificates: []tls.Certificate{cert}, - } + tlsConfig := &tls.Config{ + Certificates: []tls.Certificate{cert}, + } - tlsConfig.BuildNameToCertificate() - transport := &http.Transport{TLSClientConfig: tlsConfig} + tlsConfig.BuildNameToCertificate() + transport = &http.Transport{TLSClientConfig: tlsConfig} + } // set the proxy if the proxy configured if ProxyEndpoint != "" { proxyEndpointUrl, err := url.Parse(ProxyEndpoint) diff --git a/source/plugins/ruby/ApplicationInsightsUtility.rb b/source/plugins/ruby/ApplicationInsightsUtility.rb index 31f9503cd..eaa1d903d 100644 --- a/source/plugins/ruby/ApplicationInsightsUtility.rb +++ b/source/plugins/ruby/ApplicationInsightsUtility.rb @@ -21,6 +21,8 @@ class ApplicationInsightsUtility @@EnvApplicationInsightsEndpoint = "APPLICATIONINSIGHTS_ENDPOINT" @@EnvControllerType = "CONTROLLER_TYPE" @@EnvContainerRuntime = "CONTAINER_RUNTIME" + @@EnvAADMSIAuthMode = "AAD_MSI_AUTH_MODE" + @@isWindows = false @@hostName = (OMS::Common.get_hostname) @@os_type = ENV["OS_TYPE"] @@ -82,7 +84,12 @@ def initializeUtility() isProxyConfigured = false $log.info("proxy is not configured") end - + aadAuthMSIMode = ENV[@@EnvAADMSIAuthMode] + if !aadAuthMSIMode.nil? && !aadAuthMSIMode.empty? && aadAuthMSIMode.downcase == "true".downcase + @@CustomProperties["aadAuthMSIMode"] = "true" + else + @@CustomProperties["aadAuthMSIMode"] = "false" + end #Check if telemetry is turned off telemetryOffSwitch = ENV["DISABLE_TELEMETRY"] if telemetryOffSwitch && !telemetryOffSwitch.nil? && !telemetryOffSwitch.empty? && telemetryOffSwitch.downcase == "true".downcase diff --git a/source/plugins/ruby/CustomMetricsUtils.rb b/source/plugins/ruby/CustomMetricsUtils.rb index 220313e6b..fd9290b78 100644 --- a/source/plugins/ruby/CustomMetricsUtils.rb +++ b/source/plugins/ruby/CustomMetricsUtils.rb @@ -13,8 +13,8 @@ def check_custom_metrics_availability if aks_region.to_s.empty? || aks_resource_id.to_s.empty? return false # This will also take care of AKS-Engine Scenario. AKS_REGION/AKS_RESOURCE_ID is not set for AKS-Engine. Only ACS_RESOURCE_NAME is set end - - return aks_cloud_environment.to_s.downcase == 'public' + + return aks_cloud_environment.to_s.downcase == 'azurepubliccloud' end end end \ No newline at end of file diff --git a/source/plugins/ruby/KubernetesApiClient.rb b/source/plugins/ruby/KubernetesApiClient.rb index 4b50e20d8..7c9ffb28a 100644 --- a/source/plugins/ruby/KubernetesApiClient.rb +++ b/source/plugins/ruby/KubernetesApiClient.rb @@ -455,19 +455,19 @@ def getContainerResourceRequestsAndLimits(pod, metricCategory, metricNameToColle metricCollection = {} metricCollection["CounterName"] = metricNametoReturn metricCollection["Value"] = metricValue - + metricProps["json_Collections"] = [] - metricCollections = [] - metricCollections.push(metricCollection) + metricCollections = [] + metricCollections.push(metricCollection) metricProps["json_Collections"] = metricCollections.to_json - metricItems.push(metricProps) + metricItems.push(metricProps) #No container level limit for the given metric, so default to node level limit else nodeMetricsHashKey = clusterId + "/" + nodeName + "_" + "allocatable" + "_" + metricNameToCollect if (metricCategory == "limits" && @@NodeMetrics.has_key?(nodeMetricsHashKey)) metricValue = @@NodeMetrics[nodeMetricsHashKey] #@Log.info("Limits not set for container #{clusterId + "/" + podUid + "/" + containerName} using node level limits: #{nodeMetricsHashKey}=#{metricValue} ") - + metricProps = {} metricProps["Timestamp"] = metricTime metricProps["Host"] = nodeName @@ -480,10 +480,10 @@ def getContainerResourceRequestsAndLimits(pod, metricCategory, metricNameToColle metricCollection["CounterName"] = metricNametoReturn metricCollection["Value"] = metricValue metricProps["json_Collections"] = [] - metricCollections = [] - metricCollections.push(metricCollection) + metricCollections = [] + metricCollections.push(metricCollection) metricProps["json_Collections"] = metricCollections.to_json - metricItems.push(metricProps) + metricItems.push(metricProps) end end end @@ -614,11 +614,11 @@ def parseNodeLimitsFromNodeItem(node, metricCategory, metricNameToCollect, metri metricCollection["CounterName"] = metricNametoReturn metricCollection["Value"] = metricValue metricCollections = [] - metricCollections.push(metricCollection) - + metricCollections.push(metricCollection) + metricItem["json_Collections"] = [] metricItem["json_Collections"] = metricCollections.to_json - + #push node level metrics to a inmem hash so that we can use it looking up at container level. #Currently if container level cpu & memory limits are not defined we default to node level limits @@NodeMetrics[clusterId + "/" + node["metadata"]["name"] + "_" + metricCategory + "_" + metricNameToCollect] = metricValue @@ -818,4 +818,4 @@ def getKubeServicesInventoryRecords(serviceList, batchTime = Time.utc.iso8601) return kubeServiceRecords end end -end +end \ No newline at end of file diff --git a/source/plugins/ruby/constants.rb b/source/plugins/ruby/constants.rb index c40d4c357..40fa80c14 100644 --- a/source/plugins/ruby/constants.rb +++ b/source/plugins/ruby/constants.rb @@ -106,5 +106,28 @@ class Constants #Pod Statuses POD_STATUS_TERMINATING = "Terminating" + # Data type ids + CONTAINER_INVENTORY_DATA_TYPE = "CONTAINER_INVENTORY_BLOB" + CONTAINER_NODE_INVENTORY_DATA_TYPE = "CONTAINER_NODE_INVENTORY_BLOB" + PERF_DATA_TYPE = "LINUX_PERF_BLOB" + INSIGHTS_METRICS_DATA_TYPE = "INSIGHTS_METRICS_BLOB" + KUBE_SERVICES_DATA_TYPE = "KUBE_SERVICES_BLOB" + KUBE_POD_INVENTORY_DATA_TYPE = "KUBE_POD_INVENTORY_BLOB" + KUBE_NODE_INVENTORY_DATA_TYPE = "KUBE_NODE_INVENTORY_BLOB" + KUBE_PV_INVENTORY_DATA_TYPE = "KUBE_PV_INVENTORY_BLOB" + KUBE_EVENTS_DATA_TYPE = "KUBE_EVENTS_BLOB" + KUBE_MON_AGENT_EVENTS_DATA_TYPE = "KUBE_MON_AGENT_EVENTS_BLOB" + KUBE_HEALTH_DATA_TYPE = "KUBE_HEALTH_BLOB" + CONTAINERLOGV2_DATA_TYPE = "CONTAINERINSIGHTS_CONTAINERLOGV2" + CONTAINERLOG_DATA_TYPE = "CONTAINER_LOG_BLOB" + + #ContainerInsights Extension (AMCS) + CI_EXTENSION_NAME = "ContainerInsights" + CI_EXTENSION_VERSION = "1" + #Current CI extension config size is ~5KB and going with 20KB to handle any future scenarios + CI_EXTENSION_CONFIG_MAX_BYTES = 20480 + ONEAGENT_FLUENT_SOCKET_NAME = "/var/run/mdsd/default_fluent.socket" + #Tag prefix for output stream + EXTENSION_OUTPUT_STREAM_ID_TAG_PREFIX = "dcr-" end diff --git a/source/plugins/ruby/filter_health_model_builder.rb b/source/plugins/ruby/filter_health_model_builder.rb index d491f17c2..9decda881 100644 --- a/source/plugins/ruby/filter_health_model_builder.rb +++ b/source/plugins/ruby/filter_health_model_builder.rb @@ -4,11 +4,12 @@ require 'fluent/plugin/filter' -module Fluent::Plugin +module Fluent::Plugin + require_relative 'extension_utils' require 'logger' require 'yajl/json_gem' Dir[File.join(__dir__, './health', '*.rb')].each { |file| require file } - + class FilterHealthModelBuilder < Filter include HealthModel @@ -22,7 +23,7 @@ class FilterHealthModelBuilder < Filter attr_reader :buffer, :model_builder, :health_model_definition, :monitor_factory, :state_finalizers, :monitor_set, :model_builder, :hierarchy_builder, :resources, :kube_api_down_handler, :provider, :reducer, :state, :generator, :telemetry - + @@cluster_id = KubernetesApiClient.getClusterId @@token_file_path = "/var/run/secrets/kubernetes.io/serviceaccount/token" @@cert_file_path = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" @@ -56,7 +57,7 @@ def initialize deserialized_state_info = @cluster_health_state.get_state @state.initialize_state(deserialized_state_info) end - + rescue => e ApplicationInsightsUtility.sendExceptionTelemetry(e, {"FeatureArea" => "Health"}) end @@ -90,7 +91,14 @@ def filter_stream(tag, es) end begin new_es = Fluent::MultiEventStream.new - time = Time.now + time = Time.now + if ExtensionUtils.isAADMSIAuthMode() + $log.info("filter_health_model_builder::enumerate: AAD AUTH MSI MODE") + if @rewrite_tag.nil? || !@rewrite_tag.start_with?(Constants::EXTENSION_OUTPUT_STREAM_ID_TAG_PREFIX) + @rewrite_tag = ExtensionUtils.getOutputStreamId(Constants::KUBE_HEALTH_DATA_TYPE) + end + $log.info("filter_health_model_builder::filter_stream: using tag -#{@rewrite_tag} @ #{Time.now.utc.iso8601}") + end if tag.start_with?("kubehealth.DaemonSet.Node") node_records = [] @@ -222,7 +230,7 @@ def filter_stream(tag, es) @log.info "after optimizing health signals all_monitors.size #{all_monitors.size}" - + # for each key in monitor.keys, # get the state from health_monitor_state # generate the record to send @@ -245,7 +253,7 @@ def filter_stream(tag, es) @cluster_new_state = new_state end end - end + end new_es.add(emit_time, record) } @@ -261,7 +269,7 @@ def filter_stream(tag, es) @telemetry.send # return an empty event stream, else the match will throw a NoMethodError return Fluent::MultiEventStream.new - elsif tag.start_with?(@rewrite_tag) + elsif tag.start_with?(@rewrite_tag) # this filter also acts as a pass through as we are rewriting the tag and emitting to the fluent stream es else @@ -273,6 +281,6 @@ def filter_stream(tag, es) @log.warn "Message: #{e.message} Backtrace: #{e.backtrace}" return nil end - end + end end end diff --git a/source/plugins/ruby/in_cadvisor_perf.rb b/source/plugins/ruby/in_cadvisor_perf.rb index b3f9bd08b..862e88e44 100644 --- a/source/plugins/ruby/in_cadvisor_perf.rb +++ b/source/plugins/ruby/in_cadvisor_perf.rb @@ -20,7 +20,8 @@ def initialize require_relative "CAdvisorMetricsAPIClient" require_relative "oms_common" require_relative "omslog" - require_relative "constants" + require_relative "constants" + require_relative "extension_utils" end config_param :run_interval, :time, :default => 60 @@ -61,13 +62,24 @@ def enumerate() batchTime = currentTime.utc.iso8601 @@istestvar = ENV["ISTEST"] begin - eventStream = Fluent::MultiEventStream.new + eventStream = Fluent::MultiEventStream.new insightsMetricsEventStream = Fluent::MultiEventStream.new metricData = CAdvisorMetricsAPIClient.getMetrics(winNode: nil, metricTime: batchTime ) - metricData.each do |record| - eventStream.add(time, record) if record - end - + metricData.each do |record| + eventStream.add(time, record) if record + end + + if ExtensionUtils.isAADMSIAuthMode() + $log.info("in_cadvisor_perf::enumerate: AAD AUTH MSI MODE") + if @tag.nil? || !@tag.start_with?(Constants::EXTENSION_OUTPUT_STREAM_ID_TAG_PREFIX) + @tag = ExtensionUtils.getOutputStreamId(Constants::PERF_DATA_TYPE) + end + if @insightsmetricstag.nil? || !@insightsmetricstag.start_with?(Constants::EXTENSION_OUTPUT_STREAM_ID_TAG_PREFIX) + @insightsmetricstag = ExtensionUtils.getOutputStreamId(Constants::INSIGHTS_METRICS_DATA_TYPE) + end + $log.info("in_cadvisor_perf::enumerate: using perf tag -#{@tag} @ #{Time.now.utc.iso8601}") + $log.info("in_cadvisor_perf::enumerate: using insightsmetrics tag -#{@insightsmetricstag} @ #{Time.now.utc.iso8601}") + end router.emit_stream(@tag, eventStream) if eventStream router.emit_stream(@mdmtag, eventStream) if eventStream router.emit_stream(@containerhealthtag, eventStream) if eventStream @@ -136,6 +148,6 @@ def run_periodic @mutex.lock end @mutex.unlock - end + end end # CAdvisor_Perf_Input end # module diff --git a/source/plugins/ruby/in_containerinventory.rb b/source/plugins/ruby/in_containerinventory.rb index eebf422d6..9fcb7ab90 100644 --- a/source/plugins/ruby/in_containerinventory.rb +++ b/source/plugins/ruby/in_containerinventory.rb @@ -7,17 +7,18 @@ module Fluent::Plugin class Container_Inventory_Input < Input Fluent::Plugin.register_input("containerinventory", self) - @@PluginName = "ContainerInventory" + @@PluginName = "ContainerInventory" def initialize super require "yajl/json_gem" - require "time" + require "time" require_relative "ContainerInventoryState" require_relative "ApplicationInsightsUtility" require_relative "omslog" require_relative "CAdvisorMetricsAPIClient" - require_relative "kubernetes_container_inventory" + require_relative "kubernetes_container_inventory" + require_relative "extension_utils" end config_param :run_interval, :time, :default => 60 @@ -47,21 +48,28 @@ def shutdown @thread.join super # This super must be at the end of shutdown method end - end - + end + def enumerate - currentTime = Time.now + currentTime = Time.now batchTime = currentTime.utc.iso8601 emitTime = Fluent::Engine.now containerInventory = Array.new eventStream = Fluent::MultiEventStream.new hostName = "" - $log.info("in_container_inventory::enumerate : Begin processing @ #{Time.now.utc.iso8601}") + $log.info("in_container_inventory::enumerate : Begin processing @ #{Time.now.utc.iso8601}") + if ExtensionUtils.isAADMSIAuthMode() + $log.info("in_container_inventory::enumerate: AAD AUTH MSI MODE") + if @tag.nil? || !@tag.start_with?(Constants::EXTENSION_OUTPUT_STREAM_ID_TAG_PREFIX) + @tag = ExtensionUtils.getOutputStreamId(Constants::CONTAINER_INVENTORY_DATA_TYPE) + end + $log.info("in_container_inventory::enumerate: using tag -#{@tag} @ #{Time.now.utc.iso8601}") + end begin containerRuntimeEnv = ENV["CONTAINER_RUNTIME"] $log.info("in_container_inventory::enumerate : container runtime : #{containerRuntimeEnv}") clusterCollectEnvironmentVar = ENV["AZMON_CLUSTER_COLLECT_ENV_VAR"] - $log.info("in_container_inventory::enumerate : using cadvisor apis") + $log.info("in_container_inventory::enumerate : using cadvisor apis") containerIds = Array.new response = CAdvisorMetricsAPIClient.getPodsFromCAdvisor(winNode: nil) if !response.nil? && !response.body.nil? @@ -76,10 +84,10 @@ def enumerate end containerIds.push containerRecord["InstanceID"] containerInventory.push containerRecord - end + end end - end - end + end + end # Update the state for deleted containers deletedContainers = ContainerInventoryState.getDeletedContainers(containerIds) if !deletedContainers.nil? && !deletedContainers.empty? @@ -87,13 +95,13 @@ def enumerate container = ContainerInventoryState.readContainerState(deletedContainer) if !container.nil? container.each { |k, v| container[k] = v } - container["State"] = "Deleted" + container["State"] = "Deleted" KubernetesContainerInventory.deleteCGroupCacheEntryForDeletedContainer(container["InstanceID"]) containerInventory.push container end end - end - containerInventory.each do |record| + end + containerInventory.each do |record| eventStream.add(emitTime, record) if record end router.emit_stream(@tag, eventStream) if eventStream @@ -148,6 +156,6 @@ def run_periodic @mutex.lock end @mutex.unlock - end + end end # Container_Inventory_Input end # module diff --git a/source/plugins/ruby/in_kube_events.rb b/source/plugins/ruby/in_kube_events.rb index 6f65dab92..deeae6e14 100644 --- a/source/plugins/ruby/in_kube_events.rb +++ b/source/plugins/ruby/in_kube_events.rb @@ -3,7 +3,7 @@ require 'fluent/plugin/input' -module Fluent::Plugin +module Fluent::Plugin class Kube_Event_Input < Input Fluent::Plugin.register_input("kube_events", self) @@KubeEventsStateFile = "/var/opt/microsoft/docker-cimprov/state/KubeEventQueryState.yaml" @@ -18,6 +18,7 @@ def initialize require_relative "oms_common" require_relative "omslog" require_relative "ApplicationInsightsUtility" + require_relative "extension_utils" # refer tomlparser-agent-config for defaults # this configurable via configmap @@ -37,7 +38,7 @@ def configure(conf) super end - def start + def start if @run_interval super if !ENV["EVENTS_CHUNK_SIZE"].nil? && !ENV["EVENTS_CHUNK_SIZE"].empty? && ENV["EVENTS_CHUNK_SIZE"].to_i > 0 @@ -84,8 +85,15 @@ def enumerate batchTime = currentTime.utc.iso8601 eventQueryState = getEventQueryState newEventQueryState = [] - @eventsCount = 0 - + @eventsCount = 0 + + if ExtensionUtils.isAADMSIAuthMode() + $log.info("in_kube_events::enumerate: AAD AUTH MSI MODE") + if @tag.nil? || !@tag.start_with?(Constants::EXTENSION_OUTPUT_STREAM_ID_TAG_PREFIX) + @tag = ExtensionUtils.getOutputStreamId(Constants::KUBE_EVENTS_DATA_TYPE) + end + $log.info("in_kube_events::enumerate: using kubeevents tag -#{@tag} @ #{Time.now.utc.iso8601}") + end # Initializing continuation token to nil continuationToken = nil $log.info("in_kube_events::enumerate : Getting events from Kube API @ #{Time.now.utc.iso8601}") @@ -131,8 +139,8 @@ def enumerate end # end enumerate def parse_and_emit_records(events, eventQueryState, newEventQueryState, batchTime = Time.utc.iso8601) - currentTime = Time.now - emitTime = Fluent::Engine.now + currentTime = Time.now + emitTime = Fluent::Engine.now @@istestvar = ENV["ISTEST"] begin eventStream = Fluent::MultiEventStream.new @@ -166,7 +174,7 @@ def parse_and_emit_records(events, eventQueryState, newEventQueryState, batchTim record["Count"] = items["count"] record["Computer"] = nodeName record["ClusterName"] = KubernetesApiClient.getClusterName - record["ClusterId"] = KubernetesApiClient.getClusterId + record["ClusterId"] = KubernetesApiClient.getClusterId eventStream.add(emitTime, record) if record @eventsCount += 1 end diff --git a/source/plugins/ruby/in_kube_nodes.rb b/source/plugins/ruby/in_kube_nodes.rb index ebfa903fd..bc62756a1 100644 --- a/source/plugins/ruby/in_kube_nodes.rb +++ b/source/plugins/ruby/in_kube_nodes.rb @@ -6,12 +6,12 @@ module Fluent::Plugin class Kube_nodeInventory_Input < Input Fluent::Plugin.register_input("kube_nodes", self) - + @@configMapMountPath = "/etc/config/settings/log-data-collection-settings" @@promConfigMountPath = "/etc/config/settings/prometheus-data-collection-settings" @@osmConfigMountPath = "/etc/config/osm-settings/osm-metric-collection-configuration" @@AzStackCloudFileName = "/etc/kubernetes/host/azurestackcloud.json" - + @@rsPromInterval = ENV["TELEMETRY_RS_PROM_INTERVAL"] @@rsPromFieldPassCount = ENV["TELEMETRY_RS_PROM_FIELDPASS_LENGTH"] @@ -35,11 +35,12 @@ def initialize require_relative "KubernetesApiClient" require_relative "ApplicationInsightsUtility" require_relative "oms_common" - require_relative "omslog" + require_relative "omslog" + require_relative "extension_utils" - @ContainerNodeInventoryTag = "oneagent.containerInsights.CONTAINER_NODE_INVENTORY_BLOB" - @insightsMetricsTag = "oneagent.containerInsights.INSIGHTS_METRICS_BLOB" - @MDMKubeNodeInventoryTag = "mdm.kubenodeinventory" + @ContainerNodeInventoryTag = "oneagent.containerInsights.CONTAINER_NODE_INVENTORY_BLOB" + @insightsMetricsTag = "oneagent.containerInsights.INSIGHTS_METRICS_BLOB" + @MDMKubeNodeInventoryTag = "mdm.kubenodeinventory" @kubeperfTag = "oneagent.containerInsights.LINUX_PERF_BLOB" # refer tomlparser-agent-config for the defaults @@ -60,7 +61,7 @@ def configure(conf) super end - def start + def start if @run_interval super if !ENV["NODES_CHUNK_SIZE"].nil? && !ENV["NODES_CHUNK_SIZE"].empty? && ENV["NODES_CHUNK_SIZE"].to_i > 0 @@ -109,8 +110,27 @@ def enumerate @nodesAPIE2ELatencyMs = 0 @nodeInventoryE2EProcessingLatencyMs = 0 - nodeInventoryStartTime = (Time.now.to_f * 1000).to_i - + nodeInventoryStartTime = (Time.now.to_f * 1000).to_i + + if ExtensionUtils.isAADMSIAuthMode() + $log.info("in_kube_nodes::enumerate: AAD AUTH MSI MODE") + if @kubeperfTag.nil? || !@kubeperfTag.start_with?(Constants::EXTENSION_OUTPUT_STREAM_ID_TAG_PREFIX) + @kubeperfTag = ExtensionUtils.getOutputStreamId(Constants::PERF_DATA_TYPE) + end + if @insightsMetricsTag.nil? || !@insightsMetricsTag.start_with?(Constants::EXTENSION_OUTPUT_STREAM_ID_TAG_PREFIX) + @insightsMetricsTag = ExtensionUtils.getOutputStreamId(Constants::INSIGHTS_METRICS_DATA_TYPE) + end + if @ContainerNodeInventoryTag.nil? || !@ContainerNodeInventoryTag.start_with?(Constants::EXTENSION_OUTPUT_STREAM_ID_TAG_PREFIX) + @ContainerNodeInventoryTag = ExtensionUtils.getOutputStreamId(Constants::CONTAINER_NODE_INVENTORY_DATA_TYPE) + end + if @tag.nil? || !@tag.start_with?(Constants::EXTENSION_OUTPUT_STREAM_ID_TAG_PREFIX) + @tag = ExtensionUtils.getOutputStreamId(Constants::KUBE_NODE_INVENTORY_DATA_TYPE) + end + $log.info("in_kube_nodes::enumerate: using perf tag -#{@kubeperfTag} @ #{Time.now.utc.iso8601}") + $log.info("in_kube_nodes::enumerate: using insightsmetrics tag -#{@insightsMetricsTag} @ #{Time.now.utc.iso8601}") + $log.info("in_kube_nodes::enumerate: using containernodeinventory tag -#{@ContainerNodeInventoryTag} @ #{Time.now.utc.iso8601}") + $log.info("in_kube_nodes::enumerate: using kubenodeinventory tag -#{@tag} @ #{Time.now.utc.iso8601}") + end nodesAPIChunkStartTime = (Time.now.to_f * 1000).to_i # Initializing continuation token to nil @@ -161,19 +181,19 @@ def enumerate def parse_and_emit_records(nodeInventory, batchTime = Time.utc.iso8601) begin - currentTime = Time.now - emitTime = Fluent::Engine.now + currentTime = Time.now + emitTime = Fluent::Engine.now telemetrySent = false eventStream = Fluent::MultiEventStream.new containerNodeInventoryEventStream = Fluent::MultiEventStream.new insightsMetricsEventStream = Fluent::MultiEventStream.new - kubePerfEventStream = Fluent::MultiEventStream.new + kubePerfEventStream = Fluent::MultiEventStream.new @@istestvar = ENV["ISTEST"] #get node inventory nodeInventory["items"].each do |item| # node inventory nodeInventoryRecord = getNodeInventoryRecord(item, batchTime) - eventStream.add(emitTime, nodeInventoryRecord) if nodeInventoryRecord + eventStream.add(emitTime, nodeInventoryRecord) if nodeInventoryRecord if @NODES_EMIT_STREAM_BATCH_SIZE > 0 && eventStream.count >= @NODES_EMIT_STREAM_BATCH_SIZE $log.info("in_kube_node::parse_and_emit_records: number of node inventory records emitted #{@NODES_EMIT_STREAM_BATCH_SIZE} @ #{Time.now.utc.iso8601}") router.emit_stream(@tag, eventStream) if eventStream @@ -186,7 +206,7 @@ def parse_and_emit_records(nodeInventory, batchTime = Time.utc.iso8601) end # container node inventory - containerNodeInventoryRecord = getContainerNodeInventoryRecord(item, batchTime) + containerNodeInventoryRecord = getContainerNodeInventoryRecord(item, batchTime) containerNodeInventoryEventStream.add(emitTime, containerNodeInventoryRecord) if containerNodeInventoryRecord if @NODES_EMIT_STREAM_BATCH_SIZE > 0 && containerNodeInventoryEventStream.count >= @NODES_EMIT_STREAM_BATCH_SIZE @@ -235,7 +255,7 @@ def parse_and_emit_records(nodeInventory, batchTime = Time.utc.iso8601) @NodeCache.mem.set_capacity(nodeMetricRecord["Host"], metricVal) end end - nodeMetricRecords.each do |metricRecord| + nodeMetricRecords.each do |metricRecord| kubePerfEventStream.add(emitTime, metricRecord) if metricRecord end if @NODES_EMIT_STREAM_BATCH_SIZE > 0 && kubePerfEventStream.count >= @NODES_EMIT_STREAM_BATCH_SIZE @@ -265,7 +285,7 @@ def parse_and_emit_records(nodeInventory, batchTime = Time.utc.iso8601) if !insightsMetricsRecord.nil? && !insightsMetricsRecord.empty? nodeGPUInsightsMetricsRecords.push(insightsMetricsRecord) end - nodeGPUInsightsMetricsRecords.each do |insightsMetricsRecord| + nodeGPUInsightsMetricsRecords.each do |insightsMetricsRecord| insightsMetricsEventStream.add(emitTime, insightsMetricsRecord) if insightsMetricsRecord end if @NODES_EMIT_STREAM_BATCH_SIZE > 0 && insightsMetricsEventStream.count >= @NODES_EMIT_STREAM_BATCH_SIZE @@ -335,7 +355,7 @@ def parse_and_emit_records(nodeInventory, batchTime = Time.utc.iso8601) if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0) $log.info("kubeNodeInventoryEmitStreamSuccess @ #{Time.now.utc.iso8601}") end - eventStream = nil + eventStream = nil end if containerNodeInventoryEventStream.count > 0 $log.info("in_kube_node::parse_and_emit_records: number of container node inventory records emitted #{containerNodeInventoryEventStream.count} @ #{Time.now.utc.iso8601}") @@ -507,7 +527,7 @@ def getNodeTelemetryProps(item) $log.warn "in_kube_nodes::getContainerNodeIngetNodeTelemetryPropsventoryRecord:Failed: #{errorStr}" end return properties - end + end end # Kube_Node_Input class NodeStatsCache # inner class for caching implementation (CPU and memory caching is handled the exact same way, so logic to do so is moved to a private inner class) @@ -578,5 +598,5 @@ def cpu() def mem() return @@memCache end - end + end end # module diff --git a/source/plugins/ruby/in_kube_podinventory.rb b/source/plugins/ruby/in_kube_podinventory.rb index 5598602cd..ff5912c7e 100644 --- a/source/plugins/ruby/in_kube_podinventory.rb +++ b/source/plugins/ruby/in_kube_podinventory.rb @@ -11,7 +11,7 @@ class Kube_PodInventory_Input < Input @@MDMKubePodInventoryTag = "mdm.kubepodinventory" @@hostName = (OMS::Common.get_hostname) - + def initialize super @@ -27,6 +27,7 @@ def initialize require_relative "oms_common" require_relative "omslog" require_relative "constants" + require_relative "extension_utils" # refer tomlparser-agent-config for updating defaults # this configurable via configmap @@ -39,12 +40,12 @@ def initialize @winContainerCount = 0 @controllerData = {} @podInventoryE2EProcessingLatencyMs = 0 - @podsAPIE2ELatencyMs = 0 - + @podsAPIE2ELatencyMs = 0 + @kubeperfTag = "oneagent.containerInsights.LINUX_PERF_BLOB" @kubeservicesTag = "oneagent.containerInsights.KUBE_SERVICES_BLOB" @containerInventoryTag = "oneagent.containerInsights.CONTAINER_INVENTORY_BLOB" - @insightsMetricsTag = "oneagent.containerInsights.INSIGHTS_METRICS_BLOB" + @insightsMetricsTag = "oneagent.containerInsights.INSIGHTS_METRICS_BLOB" end config_param :run_interval, :time, :default => 60 @@ -55,7 +56,7 @@ def configure(conf) @inventoryToMdmConvertor = Inventory2MdmConvertor.new() end - def start + def start if @run_interval super if !ENV["PODS_CHUNK_SIZE"].nil? && !ENV["PODS_CHUNK_SIZE"].empty? && ENV["PODS_CHUNK_SIZE"].to_i > 0 @@ -107,7 +108,30 @@ def enumerate(podList = nil) batchTime = currentTime.utc.iso8601 serviceRecords = [] @podInventoryE2EProcessingLatencyMs = 0 - podInventoryStartTime = (Time.now.to_f * 1000).to_i + podInventoryStartTime = (Time.now.to_f * 1000).to_i + if ExtensionUtils.isAADMSIAuthMode() + $log.info("in_kube_podinventory::enumerate: AAD AUTH MSI MODE") + if @kubeperfTag.nil? || !@kubeperfTag.start_with?(Constants::EXTENSION_OUTPUT_STREAM_ID_TAG_PREFIX) + @kubeperfTag = ExtensionUtils.getOutputStreamId(Constants::PERF_DATA_TYPE) + end + if @kubeservicesTag.nil? || !@kubeservicesTag.start_with?(Constants::EXTENSION_OUTPUT_STREAM_ID_TAG_PREFIX) + @kubeservicesTag = ExtensionUtils.getOutputStreamId(Constants::KUBE_SERVICES_DATA_TYPE) + end + if @containerInventoryTag.nil? || !@containerInventoryTag.start_with?(Constants::EXTENSION_OUTPUT_STREAM_ID_TAG_PREFIX) + @containerInventoryTag = ExtensionUtils.getOutputStreamId(Constants::CONTAINER_INVENTORY_DATA_TYPE) + end + if @insightsMetricsTag.nil? || !@insightsMetricsTag.start_with?(Constants::EXTENSION_OUTPUT_STREAM_ID_TAG_PREFIX) + @insightsMetricsTag = ExtensionUtils.getOutputStreamId(Constants::INSIGHTS_METRICS_DATA_TYPE) + end + if @tag.nil? || !@tag.start_with?(Constants::EXTENSION_OUTPUT_STREAM_ID_TAG_PREFIX) + @tag = ExtensionUtils.getOutputStreamId(Constants::KUBE_POD_INVENTORY_DATA_TYPE) + end + $log.info("in_kube_podinventory::enumerate: using perf tag -#{@kubeperfTag} @ #{Time.now.utc.iso8601}") + $log.info("in_kube_podinventory::enumerate: using kubeservices tag -#{@kubeservicesTag} @ #{Time.now.utc.iso8601}") + $log.info("in_kube_podinventory::enumerate: using containerinventory tag -#{@containerInventoryTag} @ #{Time.now.utc.iso8601}") + $log.info("in_kube_podinventory::enumerate: using insightsmetrics tag -#{@insightsMetricsTag} @ #{Time.now.utc.iso8601}") + $log.info("in_kube_podinventory::enumerate: using kubepodinventory tag -#{@tag} @ #{Time.now.utc.iso8601}") + end # Get services first so that we dont need to make a call for very chunk $log.info("in_kube_podinventory::enumerate : Getting services from Kube API @ #{Time.now.utc.iso8601}") @@ -197,8 +221,8 @@ def enumerate(podList = nil) end def parse_and_emit_records(podInventory, serviceRecords, continuationToken, batchTime = Time.utc.iso8601) - currentTime = Time.now - emitTime = Fluent::Engine.now + currentTime = Time.now + emitTime = Fluent::Engine.now #batchTime = currentTime.utc.iso8601 eventStream = Fluent::MultiEventStream.new containerInventoryStream = Fluent::MultiEventStream.new @@ -214,8 +238,8 @@ def parse_and_emit_records(podInventory, serviceRecords, continuationToken, batc podInventoryRecords = getPodInventoryRecords(item, serviceRecords, batchTime) podInventoryRecords.each do |record| if !record.nil? - eventStream.add(emitTime, record) if record - @inventoryToMdmConvertor.process_pod_inventory_record(record) + eventStream.add(emitTime, record) if record + @inventoryToMdmConvertor.process_pod_inventory_record(record) end end # Setting this flag to true so that we can send ContainerInventory records for containers @@ -232,7 +256,7 @@ def parse_and_emit_records(podInventory, serviceRecords, continuationToken, batc # Send container inventory records for containers on windows nodes @winContainerCount += containerInventoryRecords.length containerInventoryRecords.each do |cirecord| - if !cirecord.nil? + if !cirecord.nil? containerInventoryStream.add(emitTime, cirecord) if cirecord end end @@ -255,7 +279,7 @@ def parse_and_emit_records(podInventory, serviceRecords, continuationToken, batc containerMetricDataItems.concat(KubernetesApiClient.getContainerResourceRequestsAndLimits(item, "limits", "cpu", "cpuLimitNanoCores", batchTime)) containerMetricDataItems.concat(KubernetesApiClient.getContainerResourceRequestsAndLimits(item, "limits", "memory", "memoryLimitBytes", batchTime)) - containerMetricDataItems.each do |record| + containerMetricDataItems.each do |record| kubePerfEventStream.add(emitTime, record) if record end @@ -274,7 +298,7 @@ def parse_and_emit_records(podInventory, serviceRecords, continuationToken, batc containerGPUInsightsMetricsDataItems.concat(KubernetesApiClient.getContainerResourceRequestsAndLimitsAsInsightsMetrics(item, "limits", "nvidia.com/gpu", "containerGpuLimits", batchTime)) containerGPUInsightsMetricsDataItems.concat(KubernetesApiClient.getContainerResourceRequestsAndLimitsAsInsightsMetrics(item, "requests", "amd.com/gpu", "containerGpuRequests", batchTime)) containerGPUInsightsMetricsDataItems.concat(KubernetesApiClient.getContainerResourceRequestsAndLimitsAsInsightsMetrics(item, "limits", "amd.com/gpu", "containerGpuLimits", batchTime)) - containerGPUInsightsMetricsDataItems.each do |insightsMetricsRecord| + containerGPUInsightsMetricsDataItems.each do |insightsMetricsRecord| insightsMetricsEventStream.add(emitTime, insightsMetricsRecord) if insightsMetricsRecord end @@ -341,7 +365,7 @@ def parse_and_emit_records(podInventory, serviceRecords, continuationToken, batc if !kubeServiceRecord.nil? # adding before emit to reduce memory foot print kubeServiceRecord["ClusterId"] = KubernetesApiClient.getClusterId - kubeServiceRecord["ClusterName"] = KubernetesApiClient.getClusterName + kubeServiceRecord["ClusterName"] = KubernetesApiClient.getClusterName kubeServicesEventStream.add(emitTime, kubeServiceRecord) if kubeServiceRecord if @PODS_EMIT_STREAM_BATCH_SIZE > 0 && kubeServicesEventStream.count >= @PODS_EMIT_STREAM_BATCH_SIZE $log.info("in_kube_podinventory::parse_and_emit_records: number of service records emitted #{@PODS_EMIT_STREAM_BATCH_SIZE} @ #{Time.now.utc.iso8601}") @@ -648,6 +672,6 @@ def getServiceNameFromLabels(namespace, labels, serviceRecords) ApplicationInsightsUtility.sendExceptionTelemetry(errorStr) end return serviceName - end + end end # Kube_Pod_Input -end # module +end # module \ No newline at end of file diff --git a/source/plugins/ruby/in_kube_pvinventory.rb b/source/plugins/ruby/in_kube_pvinventory.rb index 40eebac8a..fccfd459d 100644 --- a/source/plugins/ruby/in_kube_pvinventory.rb +++ b/source/plugins/ruby/in_kube_pvinventory.rb @@ -20,6 +20,7 @@ def initialize require_relative "oms_common" require_relative "omslog" require_relative "constants" + require_relative "extension_utils" # Response size is around 1500 bytes per PV @PV_CHUNK_SIZE = "5000" @@ -33,7 +34,7 @@ def configure(conf) super end - def start + def start if @run_interval super @finished = false @@ -61,7 +62,13 @@ def enumerate telemetryFlush = false @pvTypeToCountHash = {} currentTime = Time.now - batchTime = currentTime.utc.iso8601 + batchTime = currentTime.utc.iso8601 + if ExtensionUtils.isAADMSIAuthMode() + $log.info("in_kube_pvinventory::enumerate: AAD AUTH MSI MODE") + if @tag.nil? || !@tag.start_with?(Constants::EXTENSION_OUTPUT_STREAM_ID_TAG_PREFIX) + @tag = ExtensionUtils.getOutputStreamId(Constants::KUBE_PV_INVENTORY_DATA_TYPE) + end + end continuationToken = nil $log.info("in_kube_pvinventory::enumerate : Getting PVs from Kube API @ #{Time.now.utc.iso8601}") @@ -93,7 +100,7 @@ def enumerate if (timeDifferenceInMinutes >= Constants::TELEMETRY_FLUSH_INTERVAL_IN_MINUTES) telemetryFlush = true end - + # Flush AppInsights telemetry once all the processing is done if telemetryFlush == true telemetryProperties = {} @@ -110,8 +117,8 @@ def enumerate end # end enumerate def parse_and_emit_records(pvInventory, batchTime = Time.utc.iso8601) - currentTime = Time.now - emitTime = Fluent::Engine.now + currentTime = Time.now + emitTime = Fluent::Engine.now eventStream = Fluent::MultiEventStream.new @@istestvar = ENV["ISTEST"] begin @@ -152,8 +159,8 @@ def parse_and_emit_records(pvInventory, batchTime = Time.utc.iso8601) end records.each do |record| - if !record.nil? - eventStream.add(emitTime, record) + if !record.nil? + eventStream.add(emitTime, record) end end @@ -191,7 +198,7 @@ def getTypeInfo(item) begin if !item["spec"].nil? (Constants::PV_TYPES).each do |pvType| - + # PV is this type if !item["spec"][pvType].nil? @@ -252,6 +259,6 @@ def run_periodic @mutex.lock end @mutex.unlock - end + end end # Kube_PVInventory_Input end # module diff --git a/source/plugins/ruby/in_kubestate_deployments.rb b/source/plugins/ruby/in_kubestate_deployments.rb index 182c3ffc1..0b563a890 100644 --- a/source/plugins/ruby/in_kubestate_deployments.rb +++ b/source/plugins/ruby/in_kubestate_deployments.rb @@ -22,6 +22,7 @@ def initialize require_relative "omslog" require_relative "ApplicationInsightsUtility" require_relative "constants" + require_relative "extension_utils" # refer tomlparser-agent-config for defaults # this configurable via configmap @@ -44,7 +45,7 @@ def configure(conf) super end - def start + def start if @run_interval super if !ENV["DEPLOYMENTS_CHUNK_SIZE"].nil? && !ENV["DEPLOYMENTS_CHUNK_SIZE"].empty? && ENV["DEPLOYMENTS_CHUNK_SIZE"].to_i > 0 @@ -55,11 +56,11 @@ def start @DEPLOYMENTS_CHUNK_SIZE = 500 end $log.info("in_kubestate_deployments::start : DEPLOYMENTS_CHUNK_SIZE @ #{@DEPLOYMENTS_CHUNK_SIZE}") - + @finished = false @condition = ConditionVariable.new @mutex = Mutex.new - @thread = Thread.new(&method(:run_periodic)) + @thread = Thread.new(&method(:run_periodic)) end end @@ -81,8 +82,14 @@ def enumerate batchTime = currentTime.utc.iso8601 #set the running total for this batch to 0 - @deploymentsRunningTotal = 0 - + @deploymentsRunningTotal = 0 + + if ExtensionUtils.isAADMSIAuthMode() + $log.info("in_kubestate_deployments::enumerate: AAD AUTH MSI MODE") + if @tag.nil? || !@tag.start_with?(Constants::EXTENSION_OUTPUT_STREAM_ID_TAG_PREFIX) + @tag = ExtensionUtils.getOutputStreamId(Constants::INSIGHTS_METRICS_DATA_TYPE) + end + end # Initializing continuation token to nil continuationToken = nil $log.info("in_kubestate_deployments::enumerate : Getting deployments from Kube API @ #{Time.now.utc.iso8601}") @@ -186,7 +193,7 @@ def parse_and_emit_records(deployments, batchTime = Time.utc.iso8601) end time = Fluent::Engine.now - metricItems.each do |insightsMetricsRecord| + metricItems.each do |insightsMetricsRecord| insightsMetricsEventStream.add(time, insightsMetricsRecord) if insightsMetricsRecord end @@ -233,6 +240,6 @@ def run_periodic @mutex.lock end @mutex.unlock - end + end end end diff --git a/source/plugins/ruby/in_kubestate_hpa.rb b/source/plugins/ruby/in_kubestate_hpa.rb index 8f60bfb72..178f7944f 100644 --- a/source/plugins/ruby/in_kubestate_hpa.rb +++ b/source/plugins/ruby/in_kubestate_hpa.rb @@ -18,7 +18,8 @@ def initialize require_relative "oms_common" require_relative "omslog" require_relative "ApplicationInsightsUtility" - require_relative "constants" + require_relative "constants" + require_relative "extension_utils" # refer tomlparser-agent-config for defaults # this configurable via configmap @@ -41,7 +42,7 @@ def configure(conf) super end - def start + def start if @run_interval super if !ENV["HPA_CHUNK_SIZE"].nil? && !ENV["HPA_CHUNK_SIZE"].empty? && ENV["HPA_CHUNK_SIZE"].to_i > 0 @@ -78,7 +79,14 @@ def enumerate batchTime = currentTime.utc.iso8601 @hpaCount = 0 - + + if ExtensionUtils.isAADMSIAuthMode() + $log.info("in_kubestate_hpa::enumerate: AAD AUTH MSI MODE") + if @tag.nil? || !@tag.start_with?(Constants::EXTENSION_OUTPUT_STREAM_ID_TAG_PREFIX) + @tag = ExtensionUtils.getOutputStreamId(Constants::INSIGHTS_METRICS_DATA_TYPE) + end + $log.info("in_kubestate_hpa::enumerate: using tag -#{@tag} @ #{Time.now.utc.iso8601}") + end # Initializing continuation token to nil continuationToken = nil $log.info("in_kubestate_hpa::enumerate : Getting HPAs from Kube API @ #{Time.now.utc.iso8601}") @@ -186,7 +194,7 @@ def parse_and_emit_records(hpas, batchTime = Time.utc.iso8601) end time = Fluent::Engine.now - metricItems.each do |insightsMetricsRecord| + metricItems.each do |insightsMetricsRecord| insightsMetricsEventStream.add(time, insightsMetricsRecord) if insightsMetricsRecord end @@ -231,6 +239,6 @@ def run_periodic @mutex.lock end @mutex.unlock - end + end end end diff --git a/source/plugins/ruby/in_win_cadvisor_perf.rb b/source/plugins/ruby/in_win_cadvisor_perf.rb index 9ab2474b1..dd462fdf2 100644 --- a/source/plugins/ruby/in_win_cadvisor_perf.rb +++ b/source/plugins/ruby/in_win_cadvisor_perf.rb @@ -20,6 +20,7 @@ def initialize require_relative "oms_common" require_relative "omslog" require_relative "constants" + require_relative "extension_utils" @insightsMetricsTag = "oneagent.containerInsights.INSIGHTS_METRICS_BLOB" end @@ -58,6 +59,17 @@ def enumerate() timeDifference = (DateTime.now.to_time.to_i - @@winNodeQueryTimeTracker).abs timeDifferenceInMinutes = timeDifference / 60 @@istestvar = ENV["ISTEST"] + if ExtensionUtils.isAADMSIAuthMode() + $log.info("in_win_cadvisor_perf::enumerate: AAD AUTH MSI MODE") + if @tag.nil? || !@tag.start_with?(Constants::EXTENSION_OUTPUT_STREAM_ID_TAG_PREFIX) + @tag = ExtensionUtils.getOutputStreamId(Constants::PERF_DATA_TYPE) + end + if @insightsMetricsTag.nil? || !@insightsMetricsTag.start_with?(Constants::EXTENSION_OUTPUT_STREAM_ID_TAG_PREFIX) + @insightsMetricsTag = ExtensionUtils.getOutputStreamId(Constants::INSIGHTS_METRICS_DATA_TYPE) + end + $log.info("in_win_cadvisor_perf::enumerate: using perf tag -#{@kubeperfTag} @ #{Time.now.utc.iso8601}") + $log.info("in_win_cadvisor_perf::enumerate: using insightsmetrics tag -#{@insightsMetricsTag} @ #{Time.now.utc.iso8601}") + end #Resetting this cache so that it is populated with the current set of containers with every call CAdvisorMetricsAPIClient.resetWinContainerIdCache() diff --git a/source/plugins/ruby/out_mdm.rb b/source/plugins/ruby/out_mdm.rb index 8e80fb753..82d6e07db 100644 --- a/source/plugins/ruby/out_mdm.rb +++ b/source/plugins/ruby/out_mdm.rb @@ -21,6 +21,9 @@ def initialize require_relative "proxy_utils" @@token_resource_url = "https://monitoring.azure.com/" + # AAD auth supported only in public cloud and handle other clouds when enabled + # this is unified new token audience for LA AAD MSI auth & metrics + @@token_resource_audience = "https://monitor.azure.com/" @@grant_type = "client_credentials" @@azure_json_path = "/etc/kubernetes/host/azure.json" @@post_request_url_template = "https://%{aks_region}.monitoring.azure.com%{aks_resource_id}/metrics" @@ -28,6 +31,8 @@ def initialize # msiEndpoint is the well known endpoint for getting MSI authentications tokens @@msi_endpoint_template = "http://169.254.169.254/metadata/identity/oauth2/token?api-version=2018-02-01&client_id=%{user_assigned_client_id}&resource=%{resource}" + # IMDS msiEndpoint for AAD MSI Auth is the proxy endpoint whcih serves the MSI auth tokens with resource claim + @@imds_msi_endpoint_template = "http://169.254.169.254/metadata/identity/oauth2/token?api-version=2018-02-01&resource=%{resource}" @@user_assigned_client_id = ENV["USER_ASSIGNED_IDENTITY_CLIENT_ID"] @@plugin_name = "AKSCustomMetricsMDM" @@ -46,6 +51,7 @@ def initialize @last_telemetry_sent_time = nil # Setting useMsi to false by default @useMsi = false + @isAADMSIAuth = false @metrics_flushed_count = 0 @cluster_identity = nil @@ -124,7 +130,14 @@ def start @parsed_token_uri = URI.parse(aad_token_url) else @useMsi = true - msi_endpoint = @@msi_endpoint_template % { user_assigned_client_id: @@user_assigned_client_id, resource: @@token_resource_url } + if !@@user_assigned_client_id.nil? && !@@user_assigned_client_id.empty? + msi_endpoint = @@msi_endpoint_template % { user_assigned_client_id: @@user_assigned_client_id, resource: @@token_resource_url } + else + # in case of aad msi auth user_assigned_client_id will be empty + @log.info "using aad msi auth" + @isAADMSIAuth = true + msi_endpoint = @@imds_msi_endpoint_template % { resource: @@token_resource_audience } + end @parsed_token_uri = URI.parse(msi_endpoint) end @@ -148,8 +161,14 @@ def get_access_token @log.info "Refreshing access token for out_mdm plugin.." if (!!@useMsi) - @log.info "Using msi to get the token to post MDM data" - ApplicationInsightsUtility.sendCustomEvent("AKSCustomMetricsMDMToken-MSI", {}) + properties = {} + if (!!@isAADMSIAuth) + @log.info "Using aad msi auth to get the token to post MDM data" + properties["aadAuthMSIMode"] = "true" + else + @log.info "Using msi to get the token to post MDM data" + end + ApplicationInsightsUtility.sendCustomEvent("AKSCustomMetricsMDMToken-MSI", properties) @log.info "Opening TCP connection" http_access_token = Net::HTTP.start(@parsed_token_uri.host, @parsed_token_uri.port, :use_ssl => false) # http_access_token.use_ssl = false @@ -320,7 +339,7 @@ def send_to_mdm(post_body) ApplicationInsightsUtility.sendCustomEvent("AKSCustomMetricsMDMSendSuccessful", {}) @last_telemetry_sent_time = Time.now end - rescue Net::HTTPClientException => e # see https://docs.ruby-lang.org/en/2.6.0/NEWS.html about deprecating HTTPServerException and adding HTTPClientException + rescue Net::HTTPClientException => e # see https://docs.ruby-lang.org/en/2.6.0/NEWS.html about deprecating HTTPServerException and adding HTTPClientException if !response.nil? && !response.body.nil? #body will have actual error @log.info "Failed to Post Metrics to MDM : #{e} Response.body: #{response.body}" else diff --git a/source/plugins/utils/extension.rb b/source/plugins/utils/extension.rb new file mode 100644 index 000000000..78236fe15 --- /dev/null +++ b/source/plugins/utils/extension.rb @@ -0,0 +1,77 @@ +require "socket" +require "msgpack" +require "securerandom" +require "singleton" +require_relative "omslog" +require_relative "constants" +require_relative "ApplicationInsightsUtility" + + +class Extension + include Singleton + + def initialize + @cache = {} + @cache_lock = Mutex.new + $log.info("Extension::initialize complete") + end + + def get_output_stream_id(datatypeId) + @cache_lock.synchronize { + if @cache.has_key?(datatypeId) + return @cache[datatypeId] + else + @cache = get_config() + return @cache[datatypeId] + end + } + end + + private + def get_config() + extConfig = Hash.new + $log.info("Extension::get_config start ...") + begin + clientSocket = UNIXSocket.open(Constants::ONEAGENT_FLUENT_SOCKET_NAME) + requestId = SecureRandom.uuid.to_s + requestBodyJSON = { "Request" => "AgentTaggedData", "RequestId" => requestId, "Tag" => Constants::CI_EXTENSION_NAME, "Version" => Constants::CI_EXTENSION_VERSION }.to_json + $log.info("Extension::get_config::sending request with request body: #{requestBodyJSON}") + requestBodyMsgPack = requestBodyJSON.to_msgpack + clientSocket.write(requestBodyMsgPack) + clientSocket.flush + $log.info("reading the response from fluent socket: #{Constants::ONEAGENT_FLUENT_SOCKET_NAME}") + resp = clientSocket.recv(Constants::CI_EXTENSION_CONFIG_MAX_BYTES) + if !resp.nil? && !resp.empty? + $log.info("Extension::get_config::successfully read the extension config from fluentsocket and number of bytes read is #{resp.length}") + respJSON = JSON.parse(resp) + taggedData = respJSON["TaggedData"] + if !taggedData.nil? && !taggedData.empty? + taggedAgentData = JSON.parse(taggedData) + extensionConfigurations = taggedAgentData["extensionConfigurations"] + if !extensionConfigurations.nil? && !extensionConfigurations.empty? + extensionConfigurations.each do |extensionConfig| + outputStreams = extensionConfig["outputStreams"] + if !outputStreams.nil? && !outputStreams.empty? + outputStreams.each do |datatypeId, streamId| + $log.info("Extension::get_config datatypeId:#{datatypeId}, streamId: #{streamId}") + extConfig[datatypeId] = streamId + end + else + $log.warn("Extension::get_config::received outputStreams is either nil or empty") + end + end + else + $log.warn("Extension::get_config::received extensionConfigurations from fluentsocket is either nil or empty") + end + end + end + rescue => errorStr + $log.warn("Extension::get_config failed: #{errorStr}") + ApplicationInsightsUtility.sendExceptionTelemetry(errorStr) + ensure + clientSocket.close unless clientSocket.nil? + end + $log.info("Extension::get_config complete ...") + return extConfig + end +end diff --git a/source/plugins/utils/extension_utils.rb b/source/plugins/utils/extension_utils.rb new file mode 100644 index 000000000..5d439c6b2 --- /dev/null +++ b/source/plugins/utils/extension_utils.rb @@ -0,0 +1,27 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +#!/usr/local/bin/ruby +# frozen_string_literal: true + +require_relative "extension" + +class ExtensionUtils + class << self + def getOutputStreamId(dataType) + outputStreamId = "" + begin + if !dataType.nil? && !dataType.empty? + outputStreamId = Extension.instance.get_output_stream_id(dataType) + $log.info("ExtensionUtils::getOutputStreamId: got streamid: #{outputStreamId} for datatype: #{dataType}") + else + $log.warn("ExtensionUtils::getOutputStreamId: dataType shouldnt be nil or empty") + end + rescue => errorStr + $log.warn("ExtensionUtils::getOutputStreamId: failed with an exception: #{errorStr}") + end + return outputStreamId + end + def isAADMSIAuthMode() + return !ENV["AAD_MSI_AUTH_MODE"].nil? && !ENV["AAD_MSI_AUTH_MODE"].empty? && ENV["AAD_MSI_AUTH_MODE"].downcase == "true" + end + end +end diff --git a/test/e2e/conformance.yaml b/test/e2e/conformance.yaml new file mode 100644 index 000000000..ff790e690 --- /dev/null +++ b/test/e2e/conformance.yaml @@ -0,0 +1,15 @@ +sonobuoy-config: + driver: Job + plugin-name: azure-arc-ci-conformance + result-format: junit +spec: + image: mcr.microsoft.com/azuremonitor/containerinsights/cidev:ciconftest08142021 + imagePullPolicy: Always + name: plugin + resources: {} + volumes: + - name: results + emptyDir: {} + volumeMounts: + - mountPath: /tmp/results + name: results diff --git a/test/e2e/e2e-tests.yaml b/test/e2e/e2e-tests.yaml index 06dfa1fb0..25817be12 100644 --- a/test/e2e/e2e-tests.yaml +++ b/test/e2e/e2e-tests.yaml @@ -68,7 +68,7 @@ data: containers: [] restartPolicy: Never serviceAccountName: sonobuoy-serviceaccount - nodeSelector: + nodeSelector: kubernetes.io/os: linux tolerations: - effect: NoSchedule @@ -84,8 +84,11 @@ data: result-format: junit spec: env: + # this should be false if the test environment is non ARC K8s for example AKS + - name: IS_NON_ARC_K8S_TEST_ENVIRONMENT + value: "true" # Update values of CLIENT_ID, CLIENT_SECRET of the service principal which has permission to query LA ad Metrics API - # Update value of TENANT_ID corresponding your Azure Service principal + # Update value of TENANT_ID corresponding your Azure Service principal - name: CLIENT_ID value: "SP_CLIENT_ID_VALUE" - name: CLIENT_SECRET @@ -93,15 +96,15 @@ data: - name: TENANT_ID value: "SP_TENANT_ID_VALUE" - name: DEFAULT_QUERY_TIME_INTERVAL_IN_MINUTES - value: "10" + value: "10" - name: DEFAULT_METRICS_QUERY_TIME_INTERVAL_IN_MINUTES - value: "10" + value: "10" - name: AGENT_POD_EXPECTED_RESTART_COUNT - value: "0" + value: "0" - name: AZURE_CLOUD - value: "AZURE_PUBLIC_CLOUD" - # image tag should be updated if new tests being added after this image - image: mcr.microsoft.com/azuremonitor/containerinsights/cidev:ciagenttest02152021 + value: "AZURE_PUBLIC_CLOUD" + # image tag should be updated if new tests being added after this image + image: mcr.microsoft.com/azuremonitor/containerinsights/cidev:ciconftest08142021 imagePullPolicy: IfNotPresent name: plugin resources: {} @@ -144,7 +147,7 @@ spec: name: output-volume restartPolicy: Never serviceAccountName: sonobuoy-serviceaccount - nodeSelector: + nodeSelector: kubernetes.io/os: linux tolerations: - key: "kubernetes.io/e2e-evict-taint-key" diff --git a/test/e2e/src/common/constants.py b/test/e2e/src/common/constants.py index 770964cb5..392b10554 100644 --- a/test/e2e/src/common/constants.py +++ b/test/e2e/src/common/constants.py @@ -40,6 +40,8 @@ TIMEOUT = 300 +# WAIT TIME BEFORE READING THE AGENT LOGS +AGENT_WAIT_TIME_SECS = "180" # Azure Monitor for Container Extension related AGENT_RESOURCES_NAMESPACE = 'kube-system' AGENT_DEPLOYMENT_NAME = 'omsagent-rs' @@ -47,7 +49,9 @@ AGENT_WIN_DAEMONSET_NAME = 'omsagent-win' AGENT_DEPLOYMENT_PODS_LABEL_SELECTOR = 'rsName=omsagent-rs' -AGENT_DAEMON_SET_PODS_LABEL_SELECTOR = 'component=oms-agent' +AGENT_DAEMON_SET_PODS_LABEL_SELECTOR = 'dsName=omsagent-ds' +AGENT_DAEMON_SET_PODS_LABEL_SELECTOR_NON_ARC = 'component=oms-agent' +AGENT_FLUENTD_LOG_PATH = '/var/opt/microsoft/docker-cimprov/log/fluentd.log' AGENT_OMSAGENT_LOG_PATH = '/var/opt/microsoft/omsagent/log/omsagent.log' AGENT_REPLICASET_WORKFLOWS = ["kubePodInventoryEmitStreamSuccess", "kubeNodeInventoryEmitStreamSuccess"] diff --git a/test/e2e/src/core/Dockerfile b/test/e2e/src/core/Dockerfile index 9f85bdf4c..cd85aee40 100644 --- a/test/e2e/src/core/Dockerfile +++ b/test/e2e/src/core/Dockerfile @@ -1,11 +1,26 @@ FROM python:3.6 -RUN pip install --trusted-host pypi.org --trusted-host files.pythonhosted.org pytest pytest-xdist filelock requests kubernetes adal msrestazure +RUN pip install --trusted-host pypi.org --trusted-host files.pythonhosted.org pytest pytest-xdist filelock requests kubernetes adal msrestazure RUN curl https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 | bash \ && helm version +RUN apt-get update && apt-get -y upgrade && \ + apt-get -f -y install curl apt-transport-https lsb-release gnupg python3-pip python-pip && \ + curl -sL https://packages.microsoft.com/keys/microsoft.asc | gpg --dearmor > /etc/apt/trusted.gpg.d/microsoft.asc.gpg && \ + CLI_REPO=$(lsb_release -cs) && \ + echo "deb [arch=amd64] https://packages.microsoft.com/repos/azure-cli/ ${CLI_REPO} main" \ + > /etc/apt/sources.list.d/azure-cli.list && \ + apt-get update && \ + apt-get install -y azure-cli && \ + rm -rf /var/lib/apt/lists/* + +RUN python3 -m pip install junit_xml + +COPY --from=lachlanevenson/k8s-kubectl:v1.20.5 /usr/local/bin/kubectl /usr/local/bin/kubectl + COPY ./core/e2e_tests.sh / +COPY ./core/setup_failure_handler.py / COPY ./core/pytest.ini /e2etests/ COPY ./core/conftest.py /e2etests/ COPY ./core/helper.py /e2etests/ diff --git a/test/e2e/src/core/conftest.py b/test/e2e/src/core/conftest.py index e659d5189..02f644a18 100644 --- a/test/e2e/src/core/conftest.py +++ b/test/e2e/src/core/conftest.py @@ -22,42 +22,48 @@ def env_dict(): create_results_dir('/tmp/results') # Setting some environment variables - env_dict['SETUP_LOG_FILE'] = '/tmp/results/setup' + env_dict['SETUP_LOG_FILE'] = '/tmp/results/setup' env_dict['TEST_AGENT_LOG_FILE'] = '/tmp/results/containerinsights' env_dict['NUM_TESTS_COMPLETED'] = 0 - + print("Starting setup...") append_result_output("Starting setup...\n", env_dict['SETUP_LOG_FILE']) - + # Collecting environment variables env_dict['TENANT_ID'] = os.getenv('TENANT_ID') env_dict['CLIENT_ID'] = os.getenv('CLIENT_ID') env_dict['CLIENT_SECRET'] = os.getenv('CLIENT_SECRET') - + env_dict['IS_NON_ARC_K8S_TEST_ENVIRONMENT'] = os.getenv('IS_NON_ARC_K8S_TEST_ENVIRONMENT') + # released agent for Arc K8s still uses omsagent and when we rollout the agent with mdsd + # this shouldnt set after agent rollout with mdsd + env_dict['USING_OMSAGENT_BASE_AGENT'] = os.getenv('USING_OMSAGENT_BASE_AGENT') + + waitTimeInterval = int(os.getenv('AGENT_WAIT_TIME_SECS')) if os.getenv('AGENT_WAIT_TIME_SECS') else constants.AGENT_WAIT_TIME_SECS + env_dict['AGENT_WAIT_TIME_SECS'] = waitTimeInterval # get default query time interval for log analytics queries queryTimeInterval = int(os.getenv('DEFAULT_QUERY_TIME_INTERVAL_IN_MINUTES')) if os.getenv('DEFAULT_QUERY_TIME_INTERVAL_IN_MINUTES') else constants.DEFAULT_QUERY_TIME_INTERVAL_IN_MINUTES # add minute suffix since this format required for LA queries env_dict['DEFAULT_QUERY_TIME_INTERVAL_IN_MINUTES'] = str(queryTimeInterval) + "m" - + # get default query time interval for metrics queries env_dict['DEFAULT_METRICS_QUERY_TIME_INTERVAL_IN_MINUTES'] = int(os.getenv('DEFAULT_METRICS_QUERY_TIME_INTERVAL_IN_MINUTES')) if os.getenv('DEFAULT_METRICS_QUERY_TIME_INTERVAL_IN_MINUTES') else constants.DEFAULT_METRICS_QUERY_TIME_INTERVAL_IN_MINUTES - - - # expected agent pod restart count + + + # expected agent pod restart count env_dict['AGENT_POD_EXPECTED_RESTART_COUNT'] = int(os.getenv('AGENT_POD_EXPECTED_RESTART_COUNT')) if os.getenv('AGENT_POD_EXPECTED_RESTART_COUNT') else constants.AGENT_POD_EXPECTED_RESTART_COUNT # default to azure public cloud if AZURE_CLOUD not specified env_dict['AZURE_ENDPOINTS'] = constants.AZURE_CLOUD_DICT.get(os.getenv('AZURE_CLOUD')) if os.getenv('AZURE_CLOUD') else constants.AZURE_PUBLIC_CLOUD_ENDPOINTS - + if not env_dict.get('TENANT_ID'): pytest.fail('ERROR: variable TENANT_ID is required.') - + if not env_dict.get('CLIENT_ID'): pytest.fail('ERROR: variable CLIENT_ID is required.') - + if not env_dict.get('CLIENT_SECRET'): pytest.fail('ERROR: variable CLIENT_SECRET is required.') - + print("Setup Complete.") append_result_output("Setup Complete.\n", env_dict['SETUP_LOG_FILE']) @@ -66,22 +72,22 @@ def env_dict(): else: with Path.open(my_file, "rb") as f: env_dict = pickle.load(f) - + yield env_dict - + my_file = Path("env.pkl") with FileLock(str(my_file) + ".lock"): with Path.open(my_file, "rb") as f: env_dict = pickle.load(f) env_dict['NUM_TESTS_COMPLETED'] = 1 + env_dict.get('NUM_TESTS_COMPLETED') - if env_dict['NUM_TESTS_COMPLETED'] == int(os.getenv('NUM_TESTS')): + if env_dict['NUM_TESTS_COMPLETED'] == int(os.getenv('NUM_TESTS')): # Checking if cleanup is required. if os.getenv('SKIP_CLEANUP'): return print('Starting cleanup...') append_result_output("Starting Cleanup...\n", env_dict['SETUP_LOG_FILE']) - + print("Cleanup Complete.") append_result_output("Cleanup Complete.\n", env_dict['SETUP_LOG_FILE']) return diff --git a/test/e2e/src/core/e2e_tests.sh b/test/e2e/src/core/e2e_tests.sh index 3bfafdce9..dd9d93073 100644 --- a/test/e2e/src/core/e2e_tests.sh +++ b/test/e2e/src/core/e2e_tests.sh @@ -1,7 +1,158 @@ -#!/bin/sh +#!/bin/bash +set -x results_dir="${RESULTS_DIR:-/tmp/results}" +waitForResourcesReady() { + ready=false + max_retries=60 + sleep_seconds=10 + NAMESPACE=$1 + RESOURCETYPE=$2 + RESOURCE=$3 + # if resource not specified, set to --all + if [ -z $RESOURCE ]; then + RESOURCE="--all" + fi + for i in $(seq 1 $max_retries) + do + if [[ ! $(kubectl wait --for=condition=Ready ${RESOURCETYPE} ${RESOURCE} --namespace ${NAMESPACE}) ]]; then + echo "waiting for the resource:${RESOURCE} of the type:${RESOURCETYPE} in namespace:${NAMESPACE} to be ready state, iteration:${i}" + sleep ${sleep_seconds} + else + echo "resource:${RESOURCE} of the type:${RESOURCETYPE} in namespace:${NAMESPACE} in ready state" + ready=true + break + fi + done + + echo "waitForResourcesReady state: $ready" +} + + +waitForArcK8sClusterCreated() { + connectivityState=false + max_retries=60 + sleep_seconds=10 + for i in $(seq 1 $max_retries) + do + echo "iteration: ${i}, clustername: ${CLUSTER_NAME}, resourcegroup: ${RESOURCE_GROUP}" + clusterState=$(az connectedk8s show --name $CLUSTER_NAME --resource-group $RESOURCE_GROUP --query connectivityStatus -o json) + clusterState=$(echo $clusterState | tr -d '"' | tr -d '"\r\n') + echo "cluster current state: ${clusterState}" + if [ ! -z "$clusterState" ]; then + if [[ ("${clusterState}" == "Connected") || ("${clusterState}" == "Connecting") ]]; then + connectivityState=true + break + fi + fi + sleep ${sleep_seconds} + done + echo "Arc K8s cluster connectivityState: $connectivityState" +} + +waitForCIExtensionInstalled() { + installedState=false + max_retries=60 + sleep_seconds=10 + for i in $(seq 1 $max_retries) + do + echo "iteration: ${i}, clustername: ${CLUSTER_NAME}, resourcegroup: ${RESOURCE_GROUP}" + installState=$(az k8s-extension show --cluster-name $CLUSTER_NAME --resource-group $RESOURCE_GROUP --cluster-type connectedClusters --name azuremonitor-containers --query installState -o json) + installState=$(echo $installState | tr -d '"' | tr -d '"\r\n') + echo "extension install state: ${installState}" + if [ ! -z "$installState" ]; then + if [ "${installState}" == "Installed" ]; then + installedState=true + break + fi + fi + sleep ${sleep_seconds} + done + echo "container insights extension installedState: $installedState" +} + +validateCommonParameters() { + if [ -z $TENANT_ID ]; then + echo "ERROR: parameter TENANT_ID is required." > ${results_dir}/error + python3 setup_failure_handler.py + fi + if [ -z $CLIENT_ID ]; then + echo "ERROR: parameter CLIENT_ID is required." > ${results_dir}/error + python3 setup_failure_handler.py + fi + + if [ -z $CLIENT_SECRET ]; then + echo "ERROR: parameter CLIENT_SECRET is required." > ${results_dir}/error + python3 setup_failure_handler.py + fi +} + +validateArcConfTestParameters() { + if [ -z $SUBSCRIPTION_ID ]; then + echo "ERROR: parameter SUBSCRIPTION_ID is required." > ${results_dir}/error + python3 setup_failure_handler.py + fi + + if [ -z $RESOURCE_GROUP ]]; then + echo "ERROR: parameter RESOURCE_GROUP is required." > ${results_dir}/error + python3 setup_failure_handler.py + fi + + if [ -z $CLUSTER_NAME ]; then + echo "ERROR: parameter CLUSTER_NAME is required." > ${results_dir}/error + python3 setup_failure_handler.py + fi +} + +addArcConnectedK8sExtension() { + echo "adding Arc K8s connectedk8s extension" + az extension add --name connectedk8s 2> ${results_dir}/error || python3 setup_failure_handler.py +} + +addArcK8sCLIExtension() { + echo "adding Arc K8s k8s-extension extension" + az extension add --name k8s-extension +} + +createArcCIExtension() { + echo "creating extension type: Microsoft.AzureMonitor.Containers" + basicparameters="--cluster-name $CLUSTER_NAME --resource-group $RESOURCE_GROUP --cluster-type connectedClusters --extension-type Microsoft.AzureMonitor.Containers --scope cluster --name azuremonitor-containers" + if [ ! -z "$CI_ARC_RELEASE_TRAIN" ]; then + basicparameters="$basicparameters --release-train $CI_ARC_RELEASE_TRAIN" + fi + if [ ! -z "$CI_ARC_VERSION" ]; then + basicparameters="$basicparameters --version $CI_ARC_VERSION" + fi + + az k8s-extension create $basicparameters --configuration-settings omsagent.ISTEST=true +} + +showArcCIExtension() { + echo "arc ci extension status" + az k8s-extension show --cluster-name $CLUSTER_NAME --resource-group $RESOURCE_GROUP --cluster-type connectedClusters --name azuremonitor-containers +} + +deleteArcCIExtension() { + az k8s-extension delete --name azuremonitor-containers \ + --cluster-type connectedClusters \ + --cluster-name $CLUSTER_NAME \ + --resource-group $RESOURCE_GROUP --yes +} + +login_to_azure() { + # Login with service principal + echo "login to azure using the SP creds" + az login --service-principal \ + -u ${CLIENT_ID} \ + -p ${CLIENT_SECRET} \ + --tenant ${TENANT_ID} 2> ${results_dir}/error || python3 setup_failure_handler.py + + echo "setting subscription: ${SUBSCRIPTION_ID} as default subscription" + az account set -s $SUBSCRIPTION_ID +} + + # saveResults prepares the results for handoff to the Sonobuoy worker. # See: https://github.com/vmware-tanzu/sonobuoy/blob/master/docs/plugins.md saveResults() { @@ -17,6 +168,50 @@ saveResults() { # Ensure that we tell the Sonobuoy worker we are done regardless of results. trap saveResults EXIT +# validate common params +validateCommonParameters + +IS_ARC_K8S_ENV="true" +if [ -z $IS_NON_ARC_K8S_TEST_ENVIRONMENT ]; then + echo "arc k8s environment" +else + if [ "$IS_NON_ARC_K8S_TEST_ENVIRONMENT" = "true" ]; then + IS_ARC_K8S_ENV="false" + echo "non arc k8s environment" + fi +fi + +if [ "$IS_ARC_K8S_ENV" = "false" ]; then + echo "skipping installing of ARC K8s container insights extension since the test environment is non-arc K8s" +else + # validate params + validateArcConfTestParameters + + # login to azure + login_to_azure + + # add arc k8s connectedk8s extension + addArcConnectedK8sExtension + + # wait for arc k8s pods to be ready state + waitForResourcesReady azure-arc pods + + # wait for Arc K8s cluster to be created + waitForArcK8sClusterCreated + + # add CLI extension + addArcK8sCLIExtension + + # add ARC K8s container insights extension + createArcCIExtension + + # show the ci extension status + showArcCIExtension + + #wait for extension state to be installed + waitForCIExtensionInstalled +fi + # The variable 'TEST_LIST' should be provided if we want to run specific tests. If not provided, all tests are run NUM_PROCESS=$(pytest /e2etests/ --collect-only -k "$TEST_NAME_LIST" -m "$TEST_MARKER_LIST" | grep "