From babdd5ff504d333d047c1505ec1c4b367afd0e11 Mon Sep 17 00:00:00 2001 From: rgardler-msft Date: Mon, 9 Dec 2024 14:58:05 -0800 Subject: [PATCH 01/31] add a test document for #32 --- .../testing/.null-ls_369210_fuzzyMatchTest.md | 58 ------------------- scenarios/testing/CodeBlocks.md | 35 +++++++++++ 2 files changed, 35 insertions(+), 58 deletions(-) delete mode 100644 scenarios/testing/.null-ls_369210_fuzzyMatchTest.md create mode 100644 scenarios/testing/CodeBlocks.md diff --git a/scenarios/testing/.null-ls_369210_fuzzyMatchTest.md b/scenarios/testing/.null-ls_369210_fuzzyMatchTest.md deleted file mode 100644 index fbae0352..00000000 --- a/scenarios/testing/.null-ls_369210_fuzzyMatchTest.md +++ /dev/null @@ -1,58 +0,0 @@ -# Testing multi Line code block - -```azurecli-interactive -echo "Hello World" -``` - -This is what the expected output should be - - - -```text -Hello world -``` - -# Testing multi Line code block - -```azurecli-interactive -echo "Hello \ -world" -``` - -# Output Should Fail - - - -```text -Hello world -``` - -# Code block - -```azurecli-interactive -echo "Hello \ -world" -``` - -# Output Should Pass - - - -```text -Hello world -``` - -# Code block - -```azurecli-interactive -echo "Hello \ -world" -``` - -# Bad similarity - should fail - - - -```text -Hello world -``` diff --git a/scenarios/testing/CodeBlocks.md b/scenarios/testing/CodeBlocks.md new file mode 100644 index 00000000..e229dc7e --- /dev/null +++ b/scenarios/testing/CodeBlocks.md @@ -0,0 +1,35 @@ +# Test Code Blocks + +This documetn should contain an near exhaustive set of code blocks and surronding content for test purposes. + +If this document passes `ie test` then we are good to go. + +## Simple Case + +The simple case is some descriptive text before the code block, followed by the code block and its results. + +```bash +echo "Hello, world!" +``` + + + +```text +Hello, world! +``` + +## Sandwhich Case + +The sandwich case is like the simple case above, but there is more text after the code block. Execution should be no different but the output should include the content from both before and after the code block. Currently `ie test` does not validate this, so we will need to run `ie interactive` and eyeball the results of this one. + +```bash +echo "Can I have a sandwich please." +``` + + + +```text +Can I have a sandwich please. +``` + +This is the content after the code block. As long as you can see this we are good to go. From 5363bab71b7fce85ee8b9a59c5dd3f55c7fdd53a Mon Sep 17 00:00:00 2001 From: rgardler-msft Date: Mon, 9 Dec 2024 17:23:39 -0800 Subject: [PATCH 02/31] If there are multiple paragraphs before a code block then add them all to the description. --- internal/parsers/markdown.go | 8 +++++++- scenarios/testing/CodeBlocks.md | 18 +++++++++++++++++- 2 files changed, 24 insertions(+), 2 deletions(-) diff --git a/internal/parsers/markdown.go b/internal/parsers/markdown.go index ab964b11..3ffcbd40 100644 --- a/internal/parsers/markdown.go +++ b/internal/parsers/markdown.go @@ -99,6 +99,7 @@ func ExtractCodeBlocksFromAst( var lastExpectedSimilarityScore float64 var lastExpectedRegex *regexp.Regexp var lastNode ast.Node + var currentParagraphs string ast.Walk(node, func(node ast.Node, entering bool) (ast.WalkStatus, error) { if entering { @@ -108,6 +109,10 @@ func ExtractCodeBlocksFromAst( lastHeader = string(extractTextFromMarkdown(&n.BaseBlock, source)) lastNode = node case *ast.Paragraph: + if currentParagraphs != "" { + currentParagraphs += "\n\n" + } + currentParagraphs += string(extractTextFromMarkdown(&n.BaseBlock, source)) lastNode = node // Extract the code block if it matches the language. case *ast.HTMLBlock: @@ -151,7 +156,7 @@ func ExtractCodeBlocksFromAst( if lastNode != nil { switch n := lastNode.(type) { case *ast.Paragraph: - description = string(extractTextFromMarkdown(&n.BaseBlock, source)) + description = currentParagraphs default: logging.GlobalLogger.Warnf("The node before the codeblock `%s` is not a paragraph, it is a %s", content, n.Kind()) } @@ -159,6 +164,7 @@ func ExtractCodeBlocksFromAst( logging.GlobalLogger.Warnf("There are no markdown elements before the last codeblock `%s`", content) } + currentParagraphs = "" lastNode = node for _, desiredLanguage := range languagesToExtract { if language == desiredLanguage { diff --git a/scenarios/testing/CodeBlocks.md b/scenarios/testing/CodeBlocks.md index e229dc7e..2eb84cc2 100644 --- a/scenarios/testing/CodeBlocks.md +++ b/scenarios/testing/CodeBlocks.md @@ -1,6 +1,6 @@ # Test Code Blocks -This documetn should contain an near exhaustive set of code blocks and surronding content for test purposes. +This document should contain a near exhaustive set of code blocks and surronding content for test purposes. If this document passes `ie test` then we are good to go. @@ -18,6 +18,22 @@ echo "Hello, world!" Hello, world! ``` +## Multiparagraph intro + +It should be OK to have multiple paragraphs before the code block. + +This isn't currently tested by `ie test`, so we will need to run `ie interactive` and eyeball the results of this one. + +```bash +echo "Detailed descriptions are important." +``` + + + +```text +Detailed descriptions are important. +``` + ## Sandwhich Case The sandwich case is like the simple case above, but there is more text after the code block. Execution should be no different but the output should include the content from both before and after the code block. Currently `ie test` does not validate this, so we will need to run `ie interactive` and eyeball the results of this one. From f1026479ccda703f26d02d4244904bd65a854b77 Mon Sep 17 00:00:00 2001 From: rgardler-msft Date: Mon, 13 Jan 2025 18:18:34 -0800 Subject: [PATCH 03/31] add a bunch of DDD docs and implement basic prerequisite support (closes #236) --- Makefile | 6 ++ README.md | 8 ++- cmd/ie/commands/execute.go | 2 +- docs/README.md | 7 +- docs/helloWorldDemo.md | 52 ++++++++++++++ docs/modesOfOperation.md | 40 +++++++++++ docs/prerequisiteExample.md | 17 +++++ docs/prerequisitesAndIncludes.md | 111 +++++++++++++++++++++++++++++ internal/engine/common/scenario.go | 29 ++++++++ internal/parsers/markdown.go | 34 +++++++++ scripts/test_ie.sh | 4 -- 11 files changed, 301 insertions(+), 9 deletions(-) create mode 100644 docs/helloWorldDemo.md create mode 100644 docs/modesOfOperation.md create mode 100644 docs/prerequisiteExample.md create mode 100644 docs/prerequisitesAndIncludes.md delete mode 100755 scripts/test_ie.sh diff --git a/Makefile b/Makefile index 93b54fe8..6a121dbf 100644 --- a/Makefile +++ b/Makefile @@ -82,6 +82,12 @@ test-upstream-scenarios: ($(MAKE) test-scenario SCENARIO="$${dir}README.md" SUBCRIPTION="$(SUBSCRIPTION)" WORKING_DIRECTORY="$${dir}" ENVIRONMENT="$(ENVIRONMENT)") || exit $$?; \ done +test-docs: + @echo "Testing all documents in the docs folder" + for file in ./docs/*.md; do \ + ($(MAKE) test-scenario SCENARIO="$${file}") || exit $$?; \ + done + # ------------------------------- Run targets ---------------------------------- run-ie: build-ie diff --git a/README.md b/README.md index 6adebcbb..291adcf8 100644 --- a/README.md +++ b/README.md @@ -52,7 +52,11 @@ command: ## Testing Innovation Engine -Innovation Engine is self-documenting, that is all our documentation is written to be executable. Since Innovation Engine can test the actual results of an execution against the intended reslts this means our documentation is also part of our test suite. In our `scripts` folder you will find a `test_ie.sh` script. Running this will run through all of our documentation in test mode. +Innovation Engine is self-documenting, that is all our documentation is written to be executable. Since Innovation Engine can test the results of an execution against the intended results this means our documentation is also part of our test suite. Testing against all our documentation is easy as: + +```bash +make test-docs +``` If you make any changes to the IE code (see Contributing below) we would encourage you to tun the full test suite before issuing a PR. @@ -179,7 +183,7 @@ features and cool ideas to yourself. Please issue pull requests against our [GitHub repo](https://github.com/Azure/innovationengine). Be sure to use our Git pre-commit script to test your contributions -before committing, simply run the following command: `python3 main.py test test` +before committing, simply run the following command: `make test-docs` This project welcomes contributions and suggestions. Most contributions require you to agree to a Contributor License Agreement diff --git a/cmd/ie/commands/execute.go b/cmd/ie/commands/execute.go index 8f74ada4..4174a347 100644 --- a/cmd/ie/commands/execute.go +++ b/cmd/ie/commands/execute.go @@ -37,7 +37,7 @@ func init() { var executeCommand = &cobra.Command{ Use: "execute [markdown file]", Args: cobra.MinimumNArgs(1), - Short: "Execute the commands for an Azure deployment scenario.", + Short: "Execute the commands in an executable document.", Run: func(cmd *cobra.Command, args []string) { markdownFile := args[0] if markdownFile == "" { diff --git a/docs/README.md b/docs/README.md index 579d6e0a..c701d8f0 100644 --- a/docs/README.md +++ b/docs/README.md @@ -37,10 +37,13 @@ As an excercise, if you have checked out the Innovation Engine code you could ed # Next Steps (TBD) +1. [Modes of operation](modesOfOperation.md) +2. [Hello World Demo](helloWorldDemo.md) + + +That's great, but so what? Isn't it just outputing what is in the document illustrating the expected resutls? Well, no. The commands are actually run in a shell. For example, the date command will output the actual time at your location rather than what is written in the document. + +```bash +date +``` + + +```text +Wed Apr 20 15:35:31 PDT 2022 +``` + +You can run almost any shell command this way. + + + +# Next Steps + + 1. [Prerequisites](prerequesites.md) + + diff --git a/docs/modesOfOperation.md b/docs/modesOfOperation.md new file mode 100644 index 00000000..d6d1e463 --- /dev/null +++ b/docs/modesOfOperation.md @@ -0,0 +1,40 @@ +# Modes of Operation + +Innovation Engine provides a number of modes of operation. You can view a summary of these with `ie --help`, this document provides more detail about each mode: + + * `execute` - Execute the commands in an executable document without interaction - ideal for unattended execution. + * `interactive` - Execute a document in interactive mode - ideal for learning. + * `test` - Execute the commands in a document and test output against the expected. Abort if a test fails. + * `to-bash` - Convert the commands in a document into a bash script for standalone execution. + * `inspect` - Deprecated + +## Interactive Mode + +In Innovation Engine parses the document and presents it one chunk at a time. The the console displays the descriptive text along with the commands to be run and pauses for the user to indicate they are ready to progress. The user can look forward, or backward in the document and can execute the command being displayed (including any outstanding commands up until that point). + +This mode is ideal for learning or teaching scenarios as it presents full context and descriptive text. If, however, you would prefer to simply run the commands without interactions use the `execute` mode instead. + +## Execute Mode + +Execute mode allows for unnatended execution of the document. Unless the script in the document requires user interaction the user can simply leave the script to run in this mode. However, they are also not given the opportunity to review commands before they are executed. If manual review is important use the `interactive` mode instead. + +## Test Mode + +Test mode runs the commands and then verifies that the output is sufficiently similar to the expected results (recorded in the markdown file) to be considered correct. This mode is similar to `execute` mode but provides more useful output in the event of a test failure. + +## To-bash mode + +`to-bash` mode does not execute any of the commands, instead is outputs a bash script that can be run independently of Innovation Engine. Generally you will want to send the outputs of this command to a file, e.g. `ie to-bash coolmd > cool.sh`. + +## Inspect mode + +This mode is deprecated and should not be used. + +# Next Steps + + \ No newline at end of file diff --git a/docs/prerequisiteExample.md b/docs/prerequisiteExample.md new file mode 100644 index 00000000..9952ee16 --- /dev/null +++ b/docs/prerequisiteExample.md @@ -0,0 +1,17 @@ +# Prerequisite Example + +This document is a prerequisite example that is used by the [Prerequisites and Includes](prerequisitesAndIncludes.md) document. These two documents together describe and illustrate the use of Prerequisites in Innovation Engine. + +## Environment Variables + +Lets set an environment variable. This is a good use of pre-requisites because it allows document authors to use the same environment variables across multiple documents. This reduces the opportunity for errors and reduces the content that each author needs to create. Here we will create an 8 character hash that can be used in subsequent commands to ensure each run can create unique values for IDs. + +```bash +export UNIQUE_HASH=$(head /dev/urandom | tr -dc A-Za-z0-9 | head -c 8) +``` + +Now we will echo this to the console. This will both serve to illustrate that this prerequisite has been executed but also allow the user to review the value. + +```bash +echo "Unique hash: $UNIQUE_HASH" +``` \ No newline at end of file diff --git a/docs/prerequisitesAndIncludes.md b/docs/prerequisitesAndIncludes.md new file mode 100644 index 00000000..49c5a500 --- /dev/null +++ b/docs/prerequisitesAndIncludes.md @@ -0,0 +1,111 @@ +# Prerequisites and Includes + +It is often useful to break down a large document into component pieces. Long and complex documents can be off-putting, especially when the reader already has some of the base knowledge needed. There are two ways to achieve this, prerequisites and includes. The difference between them is how they are handled and where they appear in the document. This document describes both approaches. + +## Prerequisites + +Prerequisites are documents that should be executed before the current document proceeds. They are used to ensure, for example, that the environment is correctly setup. When running in interactive mode the user is given the opportunity to run the prerequsites interactively or non-interactively. This allows the user to skip details they already understand or to step through concepts that are new to them. + +This document defines a [prerequisite](prerequisiteExample.md). In fact, if you are running in Innovation Engine you will already have seen it execute. In the following sections we'll explore how that happened. We can validate it ran by ensuring that the environment variable set in that document has a vlue. + +```bash +if [ -z "$UNIQUE_HASH" ]; then + echo "UNIQUE_HASH has no value. It looks like prerequisites were not run correctly." +else + echo "Unique Hash is '$UNIQUE_HASH'" +fi +``` + + +```text +Unique Hash is 'abcd1234' +``` + +### Prerequisites Syntax + +The prerequisites section starts with a heading of `## prerequisites`. + +The body of this section will contain 0 or more links to a document that should be executed ahead of the current one. When viewed in a rendered form, such as a web page, the link allows the user to click through to view the document. When interpreted by Innovation Engine the document will be loaded and executed within the same context as the current document. + +### Automatically validating Pre-requisites + +Some pre-requisite steps can take a long time to execute. For this +reason it is possible to provide some validation checks to see if the +pre-requisite step has been completed. These are defined in a section +towards the end of the script, before the next steps section (if one +exists). The validation steps will be executed by SimDem *before* +running the pre-requisite steps, if the tests in that section pass +then there is no need to run the pre-requisites. + +It's easier to explain with an example. + +Imagine we have a prerequisite step that takes 5 seconds, we don't +want to wait 5 seconds only to find that we already completed that +pre-requisite (OK, we know 5 seconds is not long, but it's long enough +to serve for this demo). For this example we will merely sleep for 5 +seconds then touch a file. To validate this prequisite has been +satisfied we will test the modified date of the file, if it has been +modified in the last 5 minutes then the pre-requisite has been +satisfied. + +```bash +sleep 5 +echo $SIMDEM_TEMP_DIR +mkdir -p $SIMDEM_TEMP_DIR +touch $SIMDEM_TEMP_DIR/this_file_must_be_modfied_every_minute.txt +``` + +Now we have a set of commands that should be executed as part of this +pre-requisite. In order to use them we simply add a reference to this +file in the pre-requisites section of any other script. + +Any code in a section headed with '# Validation' will be used by +SimDem to test whether the pre-requisites have been satisfied. If +validation tests pass the pre-requisite step will be skipped over, +otherwise the other commands in the script will be executed. + +### Validation + +In order to continue with our example we include some vlaidation steps +in this script. If you have not run through the commands above less +than one minute ago this validation stage will fail. If you are +working through this tutorial now you just executed the above +statements and so the tests here will pass, but if you include this +file as pre-requisite again it may well fail and thus automatically +execute this script. + +For this pre-requisite we need to ensure that the test.txt file has +been updated in the last 5 minutes. If not then we need to run the +commands in this document. If you are running through this document in +SimDem itself then it might be worth going back to the page that calls +this as a pre-requisite, as long as you do this in the next five +minutes you won't come back here. You can do this by selecting +"Understanding SimDem Syntax" in the next steps section. + +```bash +find $SIMDEM_TEMP_DIR -name "this_file_must_be_modfied_every_minute.txt" -newermt "1 minutes ago" +``` + +Results: + +``` +/home//.simdem/tmp/this_file_must_be_modfied_every_minute.txt +``` + +## Includes + +Includes can appear anywhere in the document and are useful for including content that is shared across multiple documents. When an executable document contains includes the content of the included file is treated as if it were a part of the original file. + +TODO: document the intended behavioud and implement it + +# Next Steps + + diff --git a/internal/engine/common/scenario.go b/internal/engine/common/scenario.go index f0552df2..c1f33f72 100644 --- a/internal/engine/common/scenario.go +++ b/internal/engine/common/scenario.go @@ -134,6 +134,35 @@ func CreateScenarioFromMarkdown( logging.GlobalLogger.WithField("CodeBlocks", codeBlocks). Debugf("Found %d code blocks", len(codeBlocks)) + // Extract the URLs of any prerequisite documents from the markdown file. + prerequisiteUrls, err := parsers.ExtractPrerequisiteUrlsFromAst(markdown, source) + if err == nil && len(prerequisiteUrls) > 0 { + for _, url := range prerequisiteUrls { + logging.GlobalLogger.Infof("Prerequisite: %s", url) + if !strings.HasPrefix(url, "http://") && !strings.HasPrefix(url, "https://") { + url = filepath.Join(filepath.Dir(path), url) + } + prerequisiteSource, err := resolveMarkdownSource(url) + if err != nil { + return nil, err + } + + prerequisiteMarkdown := parsers.ParseMarkdownIntoAst(prerequisiteSource) + prerequisiteProperties := parsers.ExtractYamlMetadataFromAst(prerequisiteMarkdown) + for key, value := range prerequisiteProperties { + properties[key] = value + } + + prerequisiteVariables := parsers.ExtractScenarioVariablesFromAst(prerequisiteMarkdown, prerequisiteSource) + for key, value := range prerequisiteVariables { + environmentVariables[key] = value + } + + prerequisiteCodeBlocks := parsers.ExtractCodeBlocksFromAst(prerequisiteMarkdown, prerequisiteSource, languagesToExecute) + codeBlocks = append(codeBlocks, prerequisiteCodeBlocks...) + } + } + varsToExport := lib.CopyMap(environmentVariableOverrides) for key, value := range environmentVariableOverrides { environmentVariables[key] = value diff --git a/internal/parsers/markdown.go b/internal/parsers/markdown.go index 3ffcbd40..a6a169ed 100644 --- a/internal/parsers/markdown.go +++ b/internal/parsers/markdown.go @@ -233,6 +233,40 @@ func ExtractScenarioVariablesFromAst(node ast.Node, source []byte) map[string]st return scenarioVariables } +// Extracts a list of markdown URLs that are contained within the section that has the title "Prerequisites". +func ExtractPrerequisiteUrlsFromAst(node ast.Node, source []byte) ([]string, error) { + var urls []string + var inPrerequisitesSection bool + + ast.Walk(node, func(node ast.Node, entering bool) (ast.WalkStatus, error) { + if entering { + switch n := node.(type) { + case *ast.Heading: + if n.Level == 2 { + headingText := string(extractTextFromMarkdown(&n.BaseBlock, source)) + if headingText == "Prerequisites" { + inPrerequisitesSection = true + } else { + inPrerequisitesSection = false + } + } + case *ast.Link: + if inPrerequisitesSection { + url := string(n.Destination) + urls = append(urls, url) + } + } + } + return ast.WalkContinue, nil + }) + + if len(urls) == 0 { + return nil, fmt.Errorf("no URLs found in the Prerequisites section") + } + + return urls, nil +} + // Converts a string of shell variable exports into a map of key/value pairs. // I.E. `export FOO=bar\nexport BAZ=qux` becomes `{"FOO": "bar", "BAZ": "qux"}` func convertScenarioVariablesToMap(variableBlock string) map[string]string { diff --git a/scripts/test_ie.sh b/scripts/test_ie.sh deleted file mode 100755 index 50d539b1..00000000 --- a/scripts/test_ie.sh +++ /dev/null @@ -1,4 +0,0 @@ -for file in docs/*; do - echo "=== Testing '$file' ===" - ie test "$file" -done \ No newline at end of file From 9ac0c90f9dedaad261df76a2c683cf4be4915f8b Mon Sep 17 00:00:00 2001 From: rgardler-msft Date: Fri, 17 Jan 2025 15:00:41 -0800 Subject: [PATCH 04/31] Use prerequissites in KubeRay example --- .../KubeRay/Prerequisite-AzureCLIAndSub.md | 40 +++++++ examples/KubeRay/Prerequisites-DraftForAKS.md | 20 ++++ examples/KubeRay/Prerequisites-Helm.md | 0 examples/KubeRay/Prerequisites-Terraform.md | 0 examples/KubeRay/deploy-kuberay.md | 108 +----------------- 5 files changed, 64 insertions(+), 104 deletions(-) create mode 100644 examples/KubeRay/Prerequisite-AzureCLIAndSub.md create mode 100644 examples/KubeRay/Prerequisites-DraftForAKS.md create mode 100644 examples/KubeRay/Prerequisites-Helm.md create mode 100644 examples/KubeRay/Prerequisites-Terraform.md diff --git a/examples/KubeRay/Prerequisite-AzureCLIAndSub.md b/examples/KubeRay/Prerequisite-AzureCLIAndSub.md new file mode 100644 index 00000000..714a3fb2 --- /dev/null +++ b/examples/KubeRay/Prerequisite-AzureCLIAndSub.md @@ -0,0 +1,40 @@ +This document uses the Azure CLI connected to an active Azure Subscription. The following commands ensure that you have both an active subscription and a current version of the Azure CLI. + +### Azure CLI + +The Azure CLI is used to interact with Azure. + +```bash +if ! command -v az &> /dev/null +then + echo "Azure CLI could not be found, installing..." + curl -sL https://aka.ms/InstallAzureCLIDeb | sudo bash +fi + +echo "Azure CLI is installed." +``` + + +```text +Azure CLI is installed. +``` +For more details on installing the CLI see [How to install the Azure CLI](/cli/azure/install-azure-cli). + + +### Azure Subscription + +You need to be logged in to an active Azure subscription is required. If you don't have an Azure subscription, you can [create a free account](https://azure.microsoft.com/free/). + +```bash +if ! az account show > /dev/null 2>&1; then + echo "Please login to Azure CLI using 'az login' before running this script." +else + export ACTIVE_SUBSCRIPTION_ID=$(az account show --query id -o tsv) + echo "Currently logged in to Azure CLI. Using subscription ID: $ACTIVE_SUBSCRIPTION_ID." +fi +``` + + +```text +Currently logged in to Azure CLI. Using subscription ID: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxxx +``` \ No newline at end of file diff --git a/examples/KubeRay/Prerequisites-DraftForAKS.md b/examples/KubeRay/Prerequisites-DraftForAKS.md new file mode 100644 index 00000000..060dd3fd --- /dev/null +++ b/examples/KubeRay/Prerequisites-DraftForAKS.md @@ -0,0 +1,20 @@ +### Draft for Azure Kubernetes Service (AKS) + +[Draft](https://github.com/Azure/draft) is an open-source project that streamlines Kubernetes development by taking a non-containerized application and generating the Dockerfiles, Kubernetes manifests, Helm charts, Kustomize configurations, and other artifacts associated with a containerized application. + +```bash +if ! command -v draft &> /dev/null +then + echo "Draft could not be found, installing..." + curl -fsSL https://raw.githubusercontent.com/Azure/draft/main/scripts/install.sh | bash +fi + +echo "Draft is installed." +``` + + +```text +Draft is installed. +``` + +For more details on installing Draft see [Azure Kubernetes Service Preview extension](/azure/aks/draft#install-the-aks-preview-azure-cli-extension). \ No newline at end of file diff --git a/examples/KubeRay/Prerequisites-Helm.md b/examples/KubeRay/Prerequisites-Helm.md new file mode 100644 index 00000000..e69de29b diff --git a/examples/KubeRay/Prerequisites-Terraform.md b/examples/KubeRay/Prerequisites-Terraform.md new file mode 100644 index 00000000..e69de29b diff --git a/examples/KubeRay/deploy-kuberay.md b/examples/KubeRay/deploy-kuberay.md index eaec7d25..ae978f82 100644 --- a/examples/KubeRay/deploy-kuberay.md +++ b/examples/KubeRay/deploy-kuberay.md @@ -6,110 +6,10 @@ In this article, you configure and deploy a Ray cluster on Azure Kubernetes Serv ## Prerequisites * Review the [Ray cluster on AKS overview](./ray-overview.md) to understand the components and deployment process. - -### Azure CLI - -The Azure CLI is used to interact with Azure. - -```bash -if ! command -v az &> /dev/null -then - echo "Azure CLI could not be found, installing..." - curl -sL https://aka.ms/InstallAzureCLIDeb | sudo bash -fi - -echo "Azure CLI is installed." -``` - - -```text -Azure CLI is installed. -``` -For more details on installing the CLI see [How to install the Azure CLI](/cli/azure/install-azure-cli). - - -### Azure Subscription - -You need to be logged in to an active Azure subscription is required. If you don't have an Azure subscription, you can create a free account [here](https://azure.microsoft.com/free/). - -```bash -if ! az account show > /dev/null 2>&1; then - echo "Please login to Azure CLI using 'az login' before running this script." -else - export ACTIVE_SUBSCRIPTION_ID=$(az account show --query id -o tsv) - echo "Currently logged in to Azure CLI. Using subscription ID: $ACTIVE_SUBSCRIPTION_ID." -fi -``` - - -```text -Currently logged in to Azure CLI. Using subscription ID: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxxx -``` - -### Draft for Azure Kubernetes Service (AKS) - -TODO: Is Draft really needed - not sure it is since I ran some tests sucessfully without it. - -[Draft](https://github.com/Azure/draft) is an open-source project that streamlines Kubernetes development by taking a non-containerized application and generating the Dockerfiles, Kubernetes manifests, Helm charts, Kustomize configurations, and other artifacts associated with a containerized application. - -```bash -if ! command -v draft &> /dev/null -then - echo "Draft could not be found, installing..." - curl -fsSL https://raw.githubusercontent.com/Azure/draft/main/scripts/install.sh | bash -fi - -echo "Draft is installed." -``` - - -```text -Draft is installed. -``` - -For more details on installing Draft see [Azure Kubernetes Service Preview extension](/azure/aks/draft#install-the-aks-preview-azure-cli-extension). - -### Helm - -Helm is a package manager for Kubernetes. It is one ofthe best ways to find, share, and use software built for Kubernetes. - -```bash -if ! command -v helm &> /dev/null -then - echo "Helm could not be found, installing..." - curl https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash -fi - -echo "Helm is installed." -``` - - -```text -Helm is installed. -``` - -[Helm documentation](https://helm.sh/docs/intro/install/) provides more information on installing Helm. - -### Terraform - -[Terraform client tools](https://developer.hashicorp.com/terraform/install) or [OpenTofu](https://opentofu.org/) need to be installed. This article uses Terraform, but the modules used should be compatible with OpenTofu. - -```bash -if ! command -v terraform &> /dev/null -then - echo "Terraform could not be found, installing..." - curl -fsSL https://apt.releases.hashicorp.com/gpg | sudo apt-key add - - sudo apt-add-repository "deb [arch=amd64] https://apt.releases.hashicorp.com $(lsb_release -cs) main" - sudo apt-get update && sudo apt-get install terraform -fi - -echo "Terraform is installed." -``` - - -```text -Terraform is installed. -``` +* Have an [active Azure Subscription (free subscriptions available) and an install of Azure CLI](Prerequisites-AzureCLIAndSub.md) +* Install [Draft for AKS](Prerequisites-DraftForAKS.md) - a tool to help containerize applications. TODO: Is Draft really needed - not sure it is since I ran some tests sucessfully without it. +* Install [Helm](Prerequisites-Helm.md) - package manager for Kubernetes. +* Install [Terraform](Prerequisites-Terraform.md) - Infrastructure as Code management tool ## Create an AKS cluster From f8969a14a8cba0d33eda23a00d7dc203bda3c53d Mon Sep 17 00:00:00 2001 From: rgardler-msft Date: Tue, 28 Jan 2025 12:04:01 -0800 Subject: [PATCH 05/31] Add instructions for installing authoring tools. Add docker file and instructions. a little refactoring of prereq examples. --- Dockerfile | 37 ++ README.md | 33 ++ docs/helloWorldDemo.md | 13 +- examples/AKS-Automatic/CreateLinuxCluster.md | 452 ++++++++++++++++++ .../Prerequisite-AzureCLIAndSub.md | 0 .../Prerequisites-DraftForAKS.md | 0 .../{KubeRay => Common}/Prerequisites-Helm.md | 0 .../Prerequisites-Terraform.md | 0 examples/KubeRay/deploy-kuberay.md | 4 +- .../Check-VM-SKU-Availability.md | 23 + 10 files changed, 559 insertions(+), 3 deletions(-) create mode 100644 Dockerfile create mode 100644 examples/AKS-Automatic/CreateLinuxCluster.md rename examples/{KubeRay => Common}/Prerequisite-AzureCLIAndSub.md (100%) rename examples/{KubeRay => Common}/Prerequisites-DraftForAKS.md (100%) rename examples/{KubeRay => Common}/Prerequisites-Helm.md (100%) rename examples/{KubeRay => Common}/Prerequisites-Terraform.md (100%) create mode 100644 examples/VM-SKU-Availability/Check-VM-SKU-Availability.md diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 00000000..83b00f77 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,37 @@ +# filepath: /home/rogardle/projects/InnovationEngine/Dockerfile +FROM golang:1.20-alpine + +RUN apk update && apk add --no-cache make git openssh python3 py3-pip bash +RUN apk add --no-cache --update \ + bash \ + cargo \ + git \ + gcc \ + libffi-dev \ + make \ + musl-dev \ + openssl-dev \ + python3 \ + py3-pip \ + python3-dev + +WORKDIR /InnovationEngine + +# Create a virtual environment and install the experimental Authoring Tools and az cli +RUN python3 -m venv /InnovationEngine/venv +RUN /InnovationEngine/venv/bin/pip install openai azure-identity requests pygithub +RUN /InnovationEngine/venv/bin/pip install azure-cli + +ENV VIRTUAL_ENV=/InnovationEngine/venv +ENV PATH="$VIRTUAL_ENV/bin:$PATH" + +RUN mkdir -p AuthoringTools +RUN wget -O AuthoringTools/ada.py https://raw.githubusercontent.com/naman-msft/exec/main/tools/ada.py +RUN chmod +x AuthoringTools/ada.py + +# Install the Innovation Engine +COPY . . +RUN make build-ie +ENV PATH="/InnovationEngine/bin:${PATH}" + +CMD ["sh", "-c", "ie execute docs/helloWorldDemo.md"] \ No newline at end of file diff --git a/README.md b/README.md index 291adcf8..2d4e801d 100644 --- a/README.md +++ b/README.md @@ -49,6 +49,24 @@ command: ```bash ./bin/ie execute tutorial.md ``` +### Building a Container from Source + +```bash +docker build -t ie . +``` + +Once built you can run the container and connect to it. Innovation Engine will automatically run an introductory +document when you execute this command. + +```bash +docker run -it ie . +``` + +You can override the start command if you want to take control immediately with: + +```bash +docker run -it ie /bin/sh +``` ## Testing Innovation Engine @@ -176,6 +194,21 @@ jobs: python3 main.py test README.md ``` +# Authoring Documents + +Authoring documents for use in Innovation Engine is no different from writing high quality documentation for reading. However, it does force you to follow good practice and therefore can sometimes feel a little too involved. That is every edge case needs to be accounted for so that automated testing will reliably pass. We are therefore working on tools to help you in the authoring process. + +These tools are independent of Innovation Engine, however, if you build a container from source they will be included in that container. To use them you will need an Azure OpenAI key (you can use an OpenAI key if you prefer) - be sure to add them in the command below. + +```bash +docker run -it \ + -e AZURE_OPENAI_API_KEY=$AZURE_OPENAI_API_KEY \ + -e AZURE_OPENAI_ENDPOINT=$AZURE_OPENAI_ENDPOINT \ + ie /bin/sh -c "python AuthoringTools/ada.py" +``` + + + ## Contributing This is an open source project. Don't keep your code improvements, diff --git a/docs/helloWorldDemo.md b/docs/helloWorldDemo.md index 30fb6130..37c2a7d2 100644 --- a/docs/helloWorldDemo.md +++ b/docs/helloWorldDemo.md @@ -30,7 +30,18 @@ date Wed Apr 20 15:35:31 PDT 2022 ``` -You can run almost any shell command this way. +You can run almost any shell command this way. This means it is a good idea to use environment variables. Let's setup a couple. + +```bash +export APPLICATION_NAME="Innovation Engine" +export GREETING="Hello World" +``` + +Now we can use those to ensure our values are carried through to commands. + +```bash +echo "$GREETING, from $APPLICATION_NAME" +``` diff --git a/examples/AKS-Automatic/CreateLinuxCluster.md b/examples/AKS-Automatic/CreateLinuxCluster.md new file mode 100644 index 00000000..5512d3f2 --- /dev/null +++ b/examples/AKS-Automatic/CreateLinuxCluster.md @@ -0,0 +1,452 @@ +Note that this document is an Executable Docs version of https://learn.microsoft.com/en-us/azure/aks/learn/quick-kubernetes-automatic-deploy?pivots=azure-cli taken as a snapshot on 1/17/25. For the latest version please visit the published document. + +--- +title: 'Quickstart: Deploy an Azure Kubernetes Service (AKS) Automatic cluster (preview)' +description: Learn how to quickly deploy a Kubernetes cluster and deploy an application in Azure Kubernetes Service (AKS) Automatic (preview). +ms.topic: quickstart +ms.custom: build-2024, devx-track-azurecli, devx-track-bicep, ignite-2024 +ms.date: 05/21/2024 +author: sabbour +ms.author: asabbour +zone_pivot_groups: bicep-azure-cli-portal +--- + +# Quickstart: Deploy an Azure Kubernetes Service (AKS) Automatic cluster (preview) + +**Applies to:** :heavy_check_mark: AKS Automatic (preview) + +[Azure Kubernetes Service (AKS) Automatic (preview)][what-is-aks-automatic] provides the easiest managed Kubernetes experience for developers, DevOps engineers, and platform engineers. Ideal for modern and AI applications, AKS Automatic automates AKS cluster setup and operations and embeds best practice configurations. Users of any skill level can benefit from the security, performance, and dependability of AKS Automatic for their applications. + +In this quickstart, you learn to: + +- Deploy an AKS Automatic cluster. +- Run a sample multi-container application with a group of microservices and web front ends simulating a retail scenario. + + +## Before you begin + +This quickstart assumes a basic understanding of Kubernetes concepts. For more information, see [Kubernetes core concepts for Azure Kubernetes Service (AKS)][kubernetes-concepts]. + +[!INCLUDE [azure-cli-prepare-your-environment-no-header.md](~/reusable-content/azure-cli/azure-cli-prepare-your-environment-no-header.md)] + +- This article requires version 2.57.0 or later of the Azure CLI. If you're using Azure Cloud Shell, the latest version is already installed there. +- This article requires the `aks-preview` Azure CLI extension version **9.0.0b4** or later. +- If you have multiple Azure subscriptions, select the appropriate subscription ID in which the resources should be billed using the [az account set](/cli/azure/account#az-account-set) command. +- Register the `AutomaticSKUPreview` feature in your Azure subscription. +- The identity creating the cluster should also have the [following permissions on the resource group][Azure-Policy-RBAC-permissions]: + - `Microsoft.Authorization/policyAssignments/write` + - `Microsoft.Authorization/policyAssignments/read` +- AKS Automatic clusters require deployment in Azure regions that support at least 3 [availability zones][availability-zones]. +:::zone target="docs" pivot="bicep" +- To deploy a Bicep file, you need to write access on the resources you create and access to all operations on the `Microsoft.Resources/deployments` resource type. For example, to create a virtual machine, you need `Microsoft.Compute/virtualMachines/write` and `Microsoft.Resources/deployments/*` permissions. For a list of roles and permissions, see [Azure built-in roles](/azure/role-based-access-control/built-in-roles). +:::zone-end + +> [!IMPORTANT] +> AKS Automatic tries to dynamically select a virtual machine SKU for the `system` node pool based on the capacity available in the subscription. Make sure your subscription has quota for 16 vCPUs of any of the following SKUs in the region you're deploying the cluster to: [Standard_D4pds_v5](/azure/virtual-machines/sizes/general-purpose/dpsv5-series), [Standard_D4lds_v5](/azure/virtual-machines/sizes/general-purpose/dldsv5-series), [Standard_D4ads_v5](/azure/virtual-machines/sizes/general-purpose/dadsv5-series), [Standard_D4ds_v5](/azure/virtual-machines/sizes/general-purpose/ddsv5-series), [Standard_D4d_v5](/azure/virtual-machines/sizes/general-purpose/ddv5-series), [Standard_D4d_v4](/azure/virtual-machines/sizes/general-purpose/ddv4-series), [Standard_DS3_v2](/azure/virtual-machines/sizes/general-purpose/dsv3-series), [Standard_DS12_v2](/azure/virtual-machines/sizes/memory-optimized/dv2-dsv2-series-memory). You can [view quotas for specific VM-families and submit quota increase requests](/azure/quotas/per-vm-quota-requests) through the Azure portal. + +### Install the aks-preview Azure CLI extension + +[!INCLUDE [preview features callout](~/reusable-content/ce-skilling/azure/includes/aks/includes/preview/preview-callout.md)] + +To install the aks-preview extension, run the following command: + +```azurecli +az extension add --name aks-preview +``` + +Run the following command to update to the latest version of the extension released: + +```azurecli +az extension update --name aks-preview +``` + +### Register the feature flags + +To use AKS Automatic in preview, register the following flag using the [az feature register][az-feature-register] command. + +```azurecli-interactive +az feature register --namespace Microsoft.ContainerService --name AutomaticSKUPreview +``` + +Verify the registration status by using the [az feature show][az-feature-show] command. It takes a few minutes for the status to show *Registered*: + +```azurecli-interactive +az feature show --namespace Microsoft.ContainerService --name AutomaticSKUPreview +``` + +When the status reflects *Registered*, refresh the registration of the *Microsoft.ContainerService* resource provider by using the [az provider register][az-provider-register] command: + +```azurecli-interactive +az provider register --namespace Microsoft.ContainerService +``` + +:::zone target="docs" pivot="azure-cli" + +## Create a resource group + +An [Azure resource group][azure-resource-group] is a logical group in which Azure resources are deployed and managed. + +The following example creates a resource group named *myResourceGroup* in the *eastus* location. + +Create a resource group using the [az group create][az-group-create] command. + +```azurecli +az group create --name myResourceGroup --location eastus +``` + +The following sample output resembles successful creation of the resource group: + +```output +{ + "id": "/subscriptions//resourceGroups/myResourceGroup", + "location": "eastus", + "managedBy": null, + "name": "myResourceGroup", + "properties": { + "provisioningState": "Succeeded" + }, + "tags": null +} +``` + +## Create an AKS Automatic cluster + +To create an AKS Automatic cluster, use the [az aks create][az-aks-create] command. The following example creates a cluster named *myAKSAutomaticCluster* with Managed Prometheus and Container Insights integration enabled. + +```azurecli +az aks create \ + --resource-group myResourceGroup \ + --name myAKSAutomaticCluster \ + --sku automatic +``` + +After a few minutes, the command completes and returns JSON-formatted information about the cluster. + +## Connect to the cluster + +To manage a Kubernetes cluster, use the Kubernetes command-line client, [kubectl][kubectl]. `kubectl` is already installed if you use Azure Cloud Shell. To install `kubectl` locally, run the [az aks install-cli][az-aks-install-cli] command. AKS Automatic clusters are configured with [Microsoft Entra ID for Kubernetes role-based access control (RBAC)][aks-entra-rbac]. When you create a cluster using the Azure CLI, your user is [assigned built-in roles][aks-entra-rbac-builtin-roles] for `Azure Kubernetes Service RBAC Cluster Admin`. + +Configure `kubectl` to connect to your Kubernetes cluster using the [az aks get-credentials][az-aks-get-credentials] command. This command downloads credentials and configures the Kubernetes CLI to use them. + +```azurecli +az aks get-credentials --resource-group myResourceGroup --name myAKSAutomaticCluster +``` + +Verify the connection to your cluster using the [kubectl get][kubectl-get] command. This command returns a list of the cluster nodes. + +```bash +kubectl get nodes +``` + +The following sample output will show how you're asked to log in. + +```output +To sign in, use a web browser to open the page https://microsoft.com/devicelogin and enter the code AAAAAAAAA to authenticate. +``` + +After you log in, the following sample output shows the managed system node pools. Make sure the node status is *Ready*. + +```output +NAME STATUS ROLES AGE VERSION +aks-nodepool1-13213685-vmss000000 Ready agent 2m26s v1.28.5 +aks-nodepool1-13213685-vmss000001 Ready agent 2m26s v1.28.5 +aks-nodepool1-13213685-vmss000002 Ready agent 2m26s v1.28.5 +``` + +:::zone-end + +:::zone target="docs" pivot="azure-portal" + +## Create Automatic Kubernetes Cluster + +1. To create an AKS Automatic cluster, search for **Kubernetes Services**, and select **Automatic Kubernetes cluster** from the drop-down options. + + :::image type="content" source="./media/quick-automatic-kubernetes-portal/browse-dropdown-options.png" alt-text="The screenshot of the entry point for creating an AKS Automatic cluster in the Azure portal."::: + +2. On the **Basics** tab, fill in all the mandatory fields required to get started: +Subscription, Resource Group, Cluster name, and Region + + :::image type="content" source="./media/quick-automatic-kubernetes-portal/create-basics.png" alt-text="The screenshot of the Create - Basics Tab for an AKS Automatic cluster in the Azure portal."::: + + If the prerequisites aren't met and the subscription requires registration of the preview flags, there will be an error shown under the Subscription field: + + :::image type="content" source="./media/quick-automatic-kubernetes-portal/register.png" alt-text="The screenshot of the error shown when a subscription doesn't have preview flags registered while creating an AKS Automatic cluster in the Azure portal."::: + + +3. On the **Monitoring** tab, choose your monitoring configurations from Azure Monitor, Managed Prometheus, Managed Grafana, and/or configure alerts. Add tags (optional), and proceed to create the cluster. + + :::image type="content" source="./media/quick-automatic-kubernetes-portal/create-monitoring.png" alt-text="The screenshot of the Monitoring Tab while creating an AKS Automatic cluster in the Azure portal."::: + +3. Get started with configuring your first application from GitHub and set up an automated deployment pipeline. + + :::image type="content" source="./media/quick-automatic-kubernetes-portal/automatic-overview.png" alt-text="The screenshot of the Get Started Tab on Overview Blade after creating an AKS Automatic cluster in the Azure portal."::: + + +## Connect to the cluster + +To manage a Kubernetes cluster, use the Kubernetes command-line client, [kubectl][kubectl]. `kubectl` is already installed if you use Azure Cloud Shell. To install `kubectl` locally, run the [az aks install-cli][az-aks-install-cli] command. AKS Automatic clusters are configured with [Microsoft Entra ID for Kubernetes role-based access control (RBAC)][aks-entra-rbac]. When you create a cluster using the Azure portal, your user is [assigned built-in roles][aks-entra-rbac-builtin-roles] for `Azure Kubernetes Service RBAC Cluster Admin`. + +Configure `kubectl` to connect to your Kubernetes cluster using the [az aks get-credentials][az-aks-get-credentials] command. This command downloads credentials and configures the Kubernetes CLI to use them. + +```azurecli +az aks get-credentials --resource-group myResourceGroup --name myAKSAutomaticCluster +``` + +Verify the connection to your cluster using the [kubectl get][kubectl-get] command. This command returns a list of the cluster nodes. + +```bash +kubectl get nodes +``` + +The following sample output will show how you're asked to log in. + +```output +To sign in, use a web browser to open the page https://microsoft.com/devicelogin and enter the code AAAAAAAAA to authenticate. +``` + +After you log in, the following sample output shows the managed system node pools. Make sure the node status is *Ready*. + +```output +NAME STATUS ROLES AGE VERSION +aks-nodepool1-13213685-vmss000000 Ready agent 2m26s v1.28.5 +aks-nodepool1-13213685-vmss000001 Ready agent 2m26s v1.28.5 +aks-nodepool1-13213685-vmss000002 Ready agent 2m26s v1.28.5 +``` +:::zone-end + +:::zone target="docs" pivot="bicep" + +## Create a resource group + +An [Azure resource group][azure-resource-group] is a logical group in which Azure resources are deployed and managed. When you create a resource group, you're prompted to specify a location. This location is the storage location of your resource group metadata and where your resources run in Azure if you don't specify another region during resource creation. + +The following example creates a resource group named *myResourceGroup* in the *eastus* location. + +Create a resource group using the [az group create][az-group-create] command. + +```azurecli +az group create --name myResourceGroup --location eastus +``` + +The following sample output resembles successful creation of the resource group: + +```output +{ + "id": "/subscriptions//resourceGroups/myResourceGroup", + "location": "eastus", + "managedBy": null, + "name": "myResourceGroup", + "properties": { + "provisioningState": "Succeeded" + }, + "tags": null +} +``` + +## Review the Bicep file + +This Bicep file defines an AKS Automatic cluster. While in preview, you need to specify the *system nodepool* agent pool profile. + +```bicep +@description('The name of the managed cluster resource.') +param clusterName string = 'myAKSAutomaticCluster' + +@description('The location of the managed cluster resource.') +param location string = resourceGroup().location + +resource aks 'Microsoft.ContainerService/managedClusters@2024-03-02-preview' = { + name: clusterName + location: location + sku: { + name: 'Automatic' + } + properties: { + agentPoolProfiles: [ + { + name: 'systempool' + mode: 'System' + count: 3 + } + ] + } + identity: { + type: 'SystemAssigned' + } +} +``` + +For more information about the resource defined in the Bicep file, see the [**Microsoft.ContainerService/managedClusters**](/azure/templates/microsoft.containerservice/managedclusters?tabs=bicep&pivots=deployment-language-bicep) reference. + +## Deploy the Bicep file + +1. Save the Bicep file as **main.bicep** to your local computer. + + > [!IMPORTANT] + > The Bicep file sets the `clusterName` param to the string *myAKSAutomaticCluster*. If you want to use a different cluster name, make sure to update the string to your preferred cluster name before saving the file to your computer. + +1. Deploy the Bicep file using the Azure CLI. + + ```azurecli + az deployment group create --resource-group myResourceGroup --template-file main.bicep + ``` + + It takes a few minutes to create the AKS cluster. Wait for the cluster to be successfully deployed before you move on to the next step. + +## Connect to the cluster + +To manage a Kubernetes cluster, use the Kubernetes command-line client, [kubectl][kubectl]. `kubectl` is already installed if you use Azure Cloud Shell. To install `kubectl` locally, run the [az aks install-cli][az-aks-install-cli] command. AKS Automatic clusters are configured with [Microsoft Entra ID for Kubernetes role-based access control (RBAC)][aks-entra-rbac]. When you create a cluster using Bicep, you need to [assign one of the built-in roles][aks-entra-rbac-builtin-roles] such as `Azure Kubernetes Service RBAC Reader`, `Azure Kubernetes Service RBAC Writer`, `Azure Kubernetes Service RBAC Admin`, or `Azure Kubernetes Service RBAC Cluster Admin` to your users, scoped to the cluster or a specific namespace. Also make sure your users have the `Azure Kubernetes Service Cluster User` built-in role to be able to do run `az aks get-credentials`, and then get the kubeconfig of your AKS cluster using the `az aks get-credentials` command. + +Configure `kubectl` to connect to your Kubernetes cluster using the [az aks get-credentials][az-aks-get-credentials] command. This command downloads credentials and configures the Kubernetes CLI to use them. + +```azurecli +az aks get-credentials --resource-group myResourceGroup --name +``` + +Verify the connection to your cluster using the [kubectl get][kubectl-get] command. This command returns a list of the cluster nodes. + +```bash +kubectl get nodes +``` + +The following sample output will show how you're asked to log in. + +```output +To sign in, use a web browser to open the page https://microsoft.com/devicelogin and enter the code AAAAAAAAA to authenticate. +``` + +After you log in, the following sample output shows the managed system node pools. Make sure the node status is *Ready*. + +```output +NAME STATUS ROLES AGE VERSION +aks-nodepool1-13213685-vmss000000 Ready agent 2m26s v1.28.5 +aks-nodepool1-13213685-vmss000001 Ready agent 2m26s v1.28.5 +aks-nodepool1-13213685-vmss000002 Ready agent 2m26s v1.28.5 +``` + +:::zone-end + + +## Deploy the application + +To deploy the application, you use a manifest file to create all the objects required to run the [AKS Store application](https://github.com/Azure-Samples/aks-store-demo). A [Kubernetes manifest file][kubernetes-deployment] defines a cluster's desired state, such as which container images to run. The manifest includes the following Kubernetes deployments and services: + +:::image type="content" source="media/quick-kubernetes-deploy-portal/aks-store-architecture.png" alt-text="Screenshot of Azure Store sample architecture." lightbox="media/quick-kubernetes-deploy-portal/aks-store-architecture.png"::: + +- **Store front**: Web application for customers to view products and place orders. +- **Product service**: Shows product information. +- **Order service**: Places orders. +- **Rabbit MQ**: Message queue for an order queue. + +> [!NOTE] +> We don't recommend running stateful containers, such as Rabbit MQ, without persistent storage for production. These are used here for simplicity, but we recommend using managed services, such as Azure Cosmos DB or Azure Service Bus. + +1. Create a namespace `aks-store-demo` to deploy the Kubernetes resources into. + + ```bash + kubectl create ns aks-store-demo + ``` + +1. Deploy the application using the [kubectl apply][kubectl-apply] command into the `aks-store-demo` namespace. The YAML file defining the deployment is on [GitHub](https://github.com/Azure-Samples/aks-store-demo). + + ```bash + kubectl apply -n aks-store-demo -f https://raw.githubusercontent.com/Azure-Samples/aks-store-demo/main/aks-store-ingress-quickstart.yaml + ``` + + The following sample output shows the deployments and services: + + ```output + statefulset.apps/rabbitmq created + configmap/rabbitmq-enabled-plugins created + service/rabbitmq created + deployment.apps/order-service created + service/order-service created + deployment.apps/product-service created + service/product-service created + deployment.apps/store-front created + service/store-front created + ingress/store-front created + ``` + +## Test the application + +When the application runs, a Kubernetes service exposes the application front end to the internet. This process can take a few minutes to complete. + +1. Check the status of the deployed pods using the [kubectl get pods][kubectl-get] command. Make sure all pods are `Running` before proceeding. If this is the first workload you deploy, it may take a few minutes for [node auto provisioning][node-auto-provisioning] to create a node pool to run the pods. + + ```bash + kubectl get pods -n aks-store-demo + ``` + +1. Check for a public IP address for the store-front application. Monitor progress using the [kubectl get service][kubectl-get] command with the `--watch` argument. + + ```bash + kubectl get ingress store-front -n aks-store-demo --watch + ``` + + The **ADDRESS** output for the `store-front` service initially shows empty: + + ```output + NAME CLASS HOSTS ADDRESS PORTS AGE + store-front webapprouting.kubernetes.azure.com * 80 12m + ``` + +1. Once the **ADDRESS** changes from blank to an actual public IP address, use `CTRL-C` to stop the `kubectl` watch process. + + The following sample output shows a valid public IP address assigned to the service: + + ```output + NAME CLASS HOSTS ADDRESS PORTS AGE + store-front webapprouting.kubernetes.azure.com * 4.255.22.196 80 12m + ``` + +1. Open a web browser to the external IP address of your ingress to see the Azure Store app in action. + + :::image type="content" source="media/quick-kubernetes-deploy-cli/aks-store-application.png" alt-text="Screenshot of AKS Store sample application." lightbox="media/quick-kubernetes-deploy-cli/aks-store-application.png"::: + +## Delete the cluster + +If you don't plan on going through the [AKS tutorial][aks-tutorial], clean up unnecessary resources to avoid Azure charges. Run the [az group delete][az-group-delete] command to remove the resource group, container service, and all related resources. + + ```azurecli + az group delete --name myResourceGroup --yes --no-wait + ``` + > [!NOTE] + > The AKS cluster was created with a system-assigned managed identity, which is the default identity option used in this quickstart. The platform manages this identity, so you don't need to manually remove it. + +## Next steps + +In this quickstart, you deployed a Kubernetes cluster using [AKS Automatic][what-is-aks-automatic] and then deployed a simple multi-container application to it. This sample application is for demo purposes only and doesn't represent all the best practices for Kubernetes applications. For guidance on creating full solutions with AKS for production, see [AKS solution guidance][aks-solution-guidance]. + +To learn more about AKS Automatic, continue to the introduction. + +> [!div class="nextstepaction"] +> [Introduction to Azure Kubernetes Service (AKS) Automatic (preview)][what-is-aks-automatic] + + + +[kubectl]: https://kubernetes.io/docs/reference/kubectl/ +[kubectl-apply]: https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands#apply +[kubectl-get]: https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands#get + + +[kubernetes-concepts]: ../concepts-clusters-workloads.md +[aks-tutorial]: ../tutorial-kubernetes-prepare-app.md +[azure-resource-group]: /azure/azure-resource-manager/management/overview +[az-aks-create]: /cli/azure/aks#az-aks-create +[az-aks-get-credentials]: /cli/azure/aks#az-aks-get-credentials +[az-aks-install-cli]: /cli/azure/aks#az-aks-install-cli +[az-group-create]: /cli/azure/group#az-group-create +[az-group-delete]: /cli/azure/group#az-group-delete +[node-auto-provisioning]: ../node-autoprovision.md +[kubernetes-deployment]: ../concepts-clusters-workloads.md#deployments-and-yaml-manifests +[aks-solution-guidance]: /azure/architecture/reference-architectures/containers/aks-start-here?toc=/azure/aks/toc.json&bc=/azure/aks/breadcrumb/toc.json +[baseline-reference-architecture]: /azure/architecture/reference-architectures/containers/aks/baseline-aks?toc=/azure/aks/toc.json&bc=/azure/aks/breadcrumb/toc.json +[az-feature-register]: /cli/azure/feature#az_feature_register +[az-feature-show]: /cli/azure/feature#az_feature_show +[az-provider-register]: /cli/azure/provider#az_provider_register +[what-is-aks-automatic]: ../intro-aks-automatic.md +[Azure-Policy-RBAC-permissions]: /azure/governance/policy/overview#azure-rbac-permissions-in-azure-policy +[aks-entra-rbac]: /azure/aks/manage-azure-rbac +[aks-entra-rbac-builtin-roles]: /azure/aks/manage-azure-rbac#create-role-assignments-for-users-to-access-the-cluster +[availability-zones]: /azure/reliability/availability-zones-region-support + diff --git a/examples/KubeRay/Prerequisite-AzureCLIAndSub.md b/examples/Common/Prerequisite-AzureCLIAndSub.md similarity index 100% rename from examples/KubeRay/Prerequisite-AzureCLIAndSub.md rename to examples/Common/Prerequisite-AzureCLIAndSub.md diff --git a/examples/KubeRay/Prerequisites-DraftForAKS.md b/examples/Common/Prerequisites-DraftForAKS.md similarity index 100% rename from examples/KubeRay/Prerequisites-DraftForAKS.md rename to examples/Common/Prerequisites-DraftForAKS.md diff --git a/examples/KubeRay/Prerequisites-Helm.md b/examples/Common/Prerequisites-Helm.md similarity index 100% rename from examples/KubeRay/Prerequisites-Helm.md rename to examples/Common/Prerequisites-Helm.md diff --git a/examples/KubeRay/Prerequisites-Terraform.md b/examples/Common/Prerequisites-Terraform.md similarity index 100% rename from examples/KubeRay/Prerequisites-Terraform.md rename to examples/Common/Prerequisites-Terraform.md diff --git a/examples/KubeRay/deploy-kuberay.md b/examples/KubeRay/deploy-kuberay.md index ae978f82..1c6d6749 100644 --- a/examples/KubeRay/deploy-kuberay.md +++ b/examples/KubeRay/deploy-kuberay.md @@ -6,8 +6,8 @@ In this article, you configure and deploy a Ray cluster on Azure Kubernetes Serv ## Prerequisites * Review the [Ray cluster on AKS overview](./ray-overview.md) to understand the components and deployment process. -* Have an [active Azure Subscription (free subscriptions available) and an install of Azure CLI](Prerequisites-AzureCLIAndSub.md) -* Install [Draft for AKS](Prerequisites-DraftForAKS.md) - a tool to help containerize applications. TODO: Is Draft really needed - not sure it is since I ran some tests sucessfully without it. +* Have an [active Azure Subscription (free subscriptions available) and an install of Azure CLI](../Common/Prerequisites-AzureCLIAndSub.md) +* Install [Draft for AKS](../Common/Prerequisites-DraftForAKS.md) - a tool to help containerize applications. TODO: Is Draft really needed - not sure it is since I ran some tests sucessfully without it. * Install [Helm](Prerequisites-Helm.md) - package manager for Kubernetes. * Install [Terraform](Prerequisites-Terraform.md) - Infrastructure as Code management tool diff --git a/examples/VM-SKU-Availability/Check-VM-SKU-Availability.md b/examples/VM-SKU-Availability/Check-VM-SKU-Availability.md new file mode 100644 index 00000000..75b5e6f0 --- /dev/null +++ b/examples/VM-SKU-Availability/Check-VM-SKU-Availability.md @@ -0,0 +1,23 @@ +Before starting to deploy a VM it is a good idea to check that availability exists in the region desired. This document explains how to do that. + +# Prerequisites + +* Have an [active Azure Subscription (free subscriptions available) and an install of Azure CLI](../Common/Prerequisites-AzureCLIAndSub.md) + +# Configure the Environment + +We use enviroment variables to simplify commands, some of them will have been set in the above prerequisites, and echoed below for convenience. The remaining ones are set with defaults: + +```bash +echo "ACTIVE_SUBSCRIPTION_ID=$ACTIVE_SUBSCRIPTION_ID" +export AZURE_LOCATION=eastus +export VM_SKU=Standard_D2_v2 +``` + +# Check Availability + +We can use the az CLI to check availability of the desired SKU in the location selected with the currently active subscription as follows: + +```bash +az vm list-skus --location $AZURE_LOCATION --subscription $ACTIVE_SUBSCRIPTION_ID --size $VM_SKU --output table +``` From 0655d86e95b001bc12b5baf09f9c62a8280bf138 Mon Sep 17 00:00:00 2001 From: rgardler-msft Date: Wed, 29 Jan 2025 10:39:41 -0800 Subject: [PATCH 06/31] A couple of minor adjustments/typos --- Dockerfile | 3 +-- README.md | 6 +++--- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/Dockerfile b/Dockerfile index 83b00f77..535e7efe 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,7 +1,6 @@ -# filepath: /home/rogardle/projects/InnovationEngine/Dockerfile FROM golang:1.20-alpine -RUN apk update && apk add --no-cache make git openssh python3 py3-pip bash +RUN apk update RUN apk add --no-cache --update \ bash \ cargo \ diff --git a/README.md b/README.md index 2d4e801d..93c69890 100644 --- a/README.md +++ b/README.md @@ -59,7 +59,7 @@ Once built you can run the container and connect to it. Innovation Engine will a document when you execute this command. ```bash -docker run -it ie . +docker run -it ie ``` You can override the start command if you want to take control immediately with: @@ -203,8 +203,8 @@ These tools are independent of Innovation Engine, however, if you build a contai ```bash docker run -it \ -e AZURE_OPENAI_API_KEY=$AZURE_OPENAI_API_KEY \ - -e AZURE_OPENAI_ENDPOINT=$AZURE_OPENAI_ENDPOINT \ - ie /bin/sh -c "python AuthoringTools/ada.py" + -e AZURE_OPENAI_ENDPOINT=$AZURE_OPENAI_ENDPOINT \ + ie /bin/sh -c "python AuthoringTools/ada.py" ``` From 30af7200c31cd39ddcf18d54ff51aa4193a39fb7 Mon Sep 17 00:00:00 2001 From: rgardler-msft Date: Mon, 3 Feb 2025 17:47:17 -0800 Subject: [PATCH 07/31] more work in progress for testing prerequisites --- .gitignore | 2 +- README.md | 2 - docs/Authoring-With-Copilot.md | 34 + docs/prerequisiteExample.md | 3 +- .../AKS/getting-started-with-k8s-on-aks.md | 2184 +++++++++++++++++ .../Create-A-Service-Principle.md | 173 ++ .../Azure-CLI-Docs/Batch-Delete-Groups.md | 46 + .../Azure-CLI-Docs/Manager-Resource-Groups.md | 182 ++ .../Common/Prerequisite-AzureCLIAndSub.md | 30 + 9 files changed, 2652 insertions(+), 4 deletions(-) create mode 100644 docs/Authoring-With-Copilot.md create mode 100644 examples/AKS/getting-started-with-k8s-on-aks.md create mode 100644 examples/Authentication-And-Authorization/Create-A-Service-Principle.md create mode 100644 examples/Azure-CLI-Docs/Batch-Delete-Groups.md create mode 100644 examples/Azure-CLI-Docs/Manager-Resource-Groups.md diff --git a/.gitignore b/.gitignore index bb68ef68..3eed7663 100644 --- a/.gitignore +++ b/.gitignore @@ -15,4 +15,4 @@ coverage.html coverage.out # Ignore git repos checkout out by examples -examples/*/.git +examples/awesome-aks diff --git a/README.md b/README.md index 93c69890..48b35bde 100644 --- a/README.md +++ b/README.md @@ -207,8 +207,6 @@ docker run -it \ ie /bin/sh -c "python AuthoringTools/ada.py" ``` - - ## Contributing This is an open source project. Don't keep your code improvements, diff --git a/docs/Authoring-With-Copilot.md b/docs/Authoring-With-Copilot.md new file mode 100644 index 00000000..2fcd3896 --- /dev/null +++ b/docs/Authoring-With-Copilot.md @@ -0,0 +1,34 @@ +# Confgigure Copilot + +* CTRL+SHIFT+P +* Type 'settings.json` +* Select `Preferences Open User Settings` + +Add the following to the `settings.json` file that opens. + + + +```json +// A set of instructions that will be added to Copilot requests that generate code. +// Instructions can come from: +// - a file in the workspace: `{ "file": "fileName" }` +// - text in natural language: `{ "text": "Use underscore for field names." }` +// +// Note: Keep your instructions short and precise. Poor instructions can degrade Copilot's quality and performance. +"github.copilot.chat.codeGeneration.instructions": [ + { + "text": "When writing mardown files each section should have an introductory paragraph, and optional code block and a summary paragraph." + }, +], +``` + +# Use Copilot + + + +* Create a new document +* `CTRL+I` +* Type "Outline an executable document which [Objective]" +* Copilot will attempt to outline the document for your, providing heading titles and intro paragraphs +* Review the document, if any section is missing or needs adjustment position the cursor at that point, hit `CTRL-I`, give the instruction +* Work through the document creating the code blocks \ No newline at end of file diff --git a/docs/prerequisiteExample.md b/docs/prerequisiteExample.md index 9952ee16..60154895 100644 --- a/docs/prerequisiteExample.md +++ b/docs/prerequisiteExample.md @@ -14,4 +14,5 @@ Now we will echo this to the console. This will both serve to illustrate that th ```bash echo "Unique hash: $UNIQUE_HASH" -``` \ No newline at end of file +``` + diff --git a/examples/AKS/getting-started-with-k8s-on-aks.md b/examples/AKS/getting-started-with-k8s-on-aks.md new file mode 100644 index 00000000..b07398fc --- /dev/null +++ b/examples/AKS/getting-started-with-k8s-on-aks.md @@ -0,0 +1,2184 @@ + + +--- +# published: true # Optional. Set to true to publish the workshop (default: false) +# type: workshop # Required. +sidebar_position: 2 +title: Getting Started with Kubernetes on Azure Kubernetes Service (AKS) # Required. Full title of the workshop +# short_title: Getting Started with AKS # Optional. Short title displayed in the header +# description: This is a workshop for getting started AKS which was originally delivered at Microsoft Build 2023 Pre-day Workshop (PRE03) # Required. +# level: beginner # Required. Can be 'beginner', 'intermediate' or 'advanced' +authors: # Required. You can add as many authors as needed + - "Paul Yu" +contacts: # Required. Must match the number of authors + - "@pauldotyu" +# duration_minutes: 90 # Required. Estimated duration in minutes +# tags: kubernetes, azure, aks # Required. Tags for filtering and searching +# wt_id: WT.mc_id=containers-147656-pauyu +--- + +## Getting Started + +In this workshop, you will learn the basics of Kubernetes and how to package applications for delivery to Azure Kubernetes Service (AKS). The goal of this workshop is to cover as many application implementation details as possible to get you comfortable with hosting your apps on AKS. We will start with a simple application deployment and then progress to more complex scenarios by introducing integrations with other Azure services and open source tooling commonly used within cloud native apps. + +## Objectives + +The objectives of this workshop are to: + +- Introduce you to the basics of Kubernetes and `kubectl` +- Deploy an application to Azure Kubernetes Service +- Securing application secrets using Azure Key Vault +- Persisting application data using Azure Disk Storage +- Exposing applications using the Istio Ingress Gateway +- Monitoring applications using Azure Monitor and the Prometheus/Grafana stack +- Scaling applications using KEDA + +## Prerequisites + +The following prerequisites are required to complete this workshop: + +- [Azure Subscription and Azure CLI](../Common/Prerequisite-AzureCLIAndSub.md) +- [Visual Studio Code](https://code.visualstudio.com/) +- [Docker Desktop](https://www.docker.com/products/docker-desktop/) +- [Git](https://git-scm.com/) +- Bash shell (e.g. [Windows Terminal](https://www.microsoft.com/p/windows-terminal/9n0dx20hk701) with [WSL](https://docs.microsoft.com/windows/wsl/install-win10) or [Azure Cloud Shell](https://shell.azure.com)) + +## Setting up your environment + +To setup your own lab environment, you will need to run a Terraform script to provision the necessary resources in your Azure subscription. The steps below will walk you through the process. + +:::info[Important] + +Before you proceed, please ensure you have access to an Azure subscription with the ability to create resources and users in Azure Active Directory. If you do not have access to an Azure subscription, you can sign up for a [free account](https://azure.microsoft.com/free). + +::: + +1. Using a web browser, navigate to the [Azure Cloud Shell](https://shell.azure.com) +2. Ensure your Cloud Shell is set to Bash. If it is on PowerShell, click the drop down in the top left corner and select Bash. +3. Run the following commands to ensure you have all the necessary providers registered in your subscription. + +```bash +providers=( + "Microsoft.Quota" + "Microsoft.Compute" + "Microsoft.ContainerRegistry" + "Microsoft.ContainerService" + "Microsoft.Network" + "Microsoft.ApiManagement" + "Microsoft.Monitor" + "Microsoft.AlertsManagement" + "Microsoft.Dashboard" + "Microsoft.App" +) + +for provider in "${providers[@]}"; do + provider_state=$(az provider show --namespace $provider --query "registrationState" -o tsv) + if [ "$provider_state" == "Registered" ]; then + echo "Provider $provider is already registered." + else + echo "Provider $provider is not registered. Registering now..." + az provider register --namespace $provider + fi +done +``` + +4. Run the following commands to ensure you have all the necessary features registered in your subscription. + + ```TODO:Removed bash marker to prevent execution as it casues errors + az feature register --namespace "Microsoft.ContainerService" --name "EnableWorkloadIdentityPreview" + az feature register --namespace "Microsoft.ContainerService" --name "AKS-GitOps" + az feature register --namespace "Microsoft.ContainerService" --name "AzureServiceMeshPreview" + az feature register --namespace "Microsoft.ContainerService" --name "AKS-KedaPreview" + az feature register --namespace "Microsoft.ContainerService" --name "AKS-PrometheusAddonPreview" + ``` + +5. This lab uses files made available in a GitHub repo. Clone or pull the repository: + +```bash +if [ -d "awesome-aks" ]; then + cd awesome-aks + git pull +else + git clone https://github.com/pauldotyu/awesome-aks + cd awesome-aks +fi +``` + +6. Using your terminal, open the repo and navigate to the **2023-05-23-msbuild-preday-aks-workshop** directory + +```bash +cd awesome-aks/2023-05-23-msbuild-preday-aks-workshop +``` + +7. Run the following command to create a **terraform.tfvars** file and populate it with the following content. + +```bash +cd awesome-aks/2023-05-23-msbuild-preday-aks-workshop + +cat < terraform.tfvars +tenant_id = "$TENANT_ID" + +deployment_locations = [ + { + offset = 0 # adjust this to the number of deployments that have already been created in the previous set + count = 1 # adjust this to the number of deployments you want to create + location = "eastus" + vm_sku = "Standard_D4s_v4" + } +] +EOF +``` + +8. Run the `terraform init` command to get everything ready + +```bash +cd awesome-aks/2023-05-23-msbuild-preday-aks-workshop +terraform init +``` + +9. `terraform apply` will deploy the required resources (add `-auto-approve` to avoid the manual approval step, be cautious of using this). In 10-15 minutes, your lab environment should be ready to go. + +```bash +cd awesome-aks/2023-05-23-msbuild-preday-aks-workshop +terraform apply -auto-approve +``` + +If you now run the `terraform output` command, you should see your username, password, AKS cluster name and resource group name. + +```bash +cd awesome-aks/2023-05-23-msbuild-preday-aks-workshop +terraform output +``` + +--- + +### Kubernetes Fundamentals + +This section of the workshop will introduce you to the basics of Kubernetes. We'll be using [Azure Kubernetes Service (AKS)](https://azure.microsoft.com/products/kubernetes-service) to deploy and manage an [Azure Voting App](https://github.com/Azure-Samples/azure-voting-app-rust). + +#### Working with `kubectl` + +Kubernetes administrators will commonly interact with the Kubernetes API server using the [`kubectl` command line tool](https://kubernetes.io/docs/reference/kubectl/). As you progress through your cloud native journey, you will find that there are other tools available for deploying, managing, and monitoring Kubernetes clusters. However, basic knowledge of `kubectl` is essential. + +### Connecting to your AKS cluster + +An AKS cluster has been provisioned for you. Let's use the Azure CLI to download the credentials for the cluster. + +Run the following command to set variables for your resource group and AKS cluster name. Don't forget to replace `` in the command below with the username you've been assigned. + +```bash +RG_NAME=rg-user +AKS_NAME=aks-user +``` + +Run the following command to download the credentials for your AKS cluster. + +```bash +az aks get-credentials --resource-group $RG_NAME --name $AKS_NAME +``` + +The command above will download the credentials for the cluster and store them in `~/.kube/config`. This file includes cluster certificate information and is used by `kubectl` to connect to the cluster. Since it does contain certificate information, it should be treated as a secret. + +##### `kubectl` basics + +To get some basic information about your cluster, run the following command: + +```bash +kubectl cluster-info +``` + +The `kubectl` tool allows to you to interact with a variety of Kubernetes clusters. + +You can see the list of clusters you have access to by running the following command: + +```bash +kubectl config get-contexts +``` + +If you have more than one context listed, you can switch between clusters by running the following command: + +```bash +kubectl config use-context +``` + +:::tip + +> Be sure to checkout the [`kubectl` Cheat Sheet](https://kubernetes.io/docs/reference/kubectl/cheatsheet/) for a list of common commands and instructions on configuring your `kubectl` with and alias and enabling autocomplete. + +::: + +## Deploying your first app + +The `kubectl` tool allows you to interact with the Kubernetes API server imperatively or declaratively. When you use the imperative approach, you are telling Kubernetes what to do -- on the command line. When you use the declarative approach, you are telling Kubernetes what you want -- usually specified in a YAML formatted declaration file. + +Let's deploy our first app to Kubernetes using the imperative approach. + +```bash +kubectl run nginx --image nginx +``` + +Here, we are telling Kubernetes to run a new Pod named `nginx` using the `nginx` image. + +A Pod is the smallest unit of deployment in Kubernetes. It is a group of one or more containers that share the same network and storage. In this case, we are running a single container using the `nginx` image. + +:::info + +> When you run multiple containers in a Pod, this is known as a [sidecar pattern](https://docs.microsoft.com/azure/architecture/patterns/sidecar). + +::: + +Let's see if our Pod is running. + +```bash +kubectl get pods +``` + +Click to expand output. You should see something like this: + +```text +NAME READY STATUS RESTARTS AGE +nginx 1/1 Running 0 7s +``` + +We can also get more information about our Pod by running the following command: + +```bash +kubectl describe pod nginx +``` + +This command will give us a lot of information about our Pod including the events that have occurred. + +To view container logs, run the following command: + +```bash +kubectl logs nginx +``` + +Now, let's take a look at how we can deploy our app using a declarative approach. + +Let's create a YAML manifest that describes our Pod. + +```bash +cat < nginx2.yaml +apiVersion: v1 +kind: Pod +metadata: + name: nginx2 +spec: + containers: + - name: nginx2 + image: nginx + resources: {} +EOF +``` + +[YAML](https://yaml.org/) is a human-readable data serialization language. It is commonly used for configuration files and in applications where data is being stored or transmitted. YAML is short for "YAML Ain't Markup Language". + +Next, let's deploy our Pod using the YAML manifest we just created. Don't worry if you don't understand the YAML file. We'll be covering that in more detail later. + +```bash +kubectl apply -f nginx2.yaml +``` + +Let's see if our Pod is running. + +```bash +kubectl get pods +``` + +You should see something like this: + +```text +NAME READY STATUS RESTARTS AGE +nginx 1/1 Running 0 7m49s +nginx2 1/1 Running 0 3s +``` + +Here, we are telling Kubernetes that we want a Pod named `nginx2` using the `nginx` image. + +This is different from the imperative approach where we told Kubernetes to run a Pod named `nginx` using the `nginx` image. The declarative approach is preferred because it allows us to check our code into source control and track changes over time. + +The `kubectl apply` command is idempotent. This means that if you run the command multiple times, the result will be the same. If the resource already exists, it will be updated. If the resource does not exist, it will be created. + +:::info[Important] + +Before we move on, be sure to delete all pods so that we don't waste cluster resources. + +::: + +```bash +kubectl delete pods --all +``` + +--- + +### Deploying to AKS + +We'll be deploying the Azure Voting App to Azure Kubernetes Service (AKS). This is a simple web app that lets you vote for things and displays the vote totals. You may recognize this app from Microsoft Docs which allows you to vote for "Dogs" or "Cats". The example we'll be using is a slightly different in that it's been modified to allow you to vote for any two things you want based on the environment variables you set. + +The repo can be found here: [Azure-Samples/azure-voting-app-rust](https://github.com/Azure-Samples/azure-voting-app-rust). + +Also, you may have guessed by the repo name, this version of the app has been re-written in Rust 🦀 + +### Getting familiar with Azure Voting App + +This app uses PostgreSQL as the backend database. We'll be using Docker to package the app into a container image so that it can be deployed to AKS. + +:::info + +If you have access to GitHub Codespaces, it is recommended that you open the repo in a Codespace and skip the next step of forking/cloning the repo and opening in a VS Code Dev Container + +::: + +If you are going to work from your local machine, start by forking, then cloning the repo to your local machine. When the repo has been cloned, navigate to the directory where you cloned the repo into and open VS Code. In VS Code, install the [Dev Container extension](https://marketplace.visualstudio.com/items?itemName=ms-vscode-remote.remote-containers), and click the "Reopen in Container" button. This will take a few minutes to complete. + +![Fork and clone repo](assets/clone-repo.png) + +Before we deploy the app to AKS, let's build and run it locally to make sure everything is working as expected. + +```bash +cargo run +``` + +This command will take a few minutes to complete the first time, but subsequent runs will be much faster. + +Once the app is running, you should be able to access it at http://localhost:8080. + +![Azure Voting App](assets/azure-voting-app.png) + +If you look at the **docker-compose.yml** file that is in the root of the repo, you'll see that the app is made up of two services: `app` and `db`. + +As the names suggest, the `app` service is the web front-end and the `db` service is the database. + +In the `app` service, you'll see that there are two environment variables defined: `FIRST_VALUE` and `SECOND_VALUE`. These are the options that will be displayed on the voting page. + +Before we move on, let's stop the app by pressing `Ctrl+C` in the terminal then run the following commands to re-authenticate to Azure from inside this Dev Container's terminal and connect to our AKS cluster. + +:::info[Important] + +> If you are in a new DevContainer or Codespace, run the following command to login to Azure. + +::: + +```bash +az login +``` + +Run the following command to set variables for your resource group and AKS cluster name and don't forget to replace `` in the command below with the username you've been assigned. + +```bash +RG_NAME=rg-user +AKS_NAME=aks-user +``` + +Run the following command to download the credentials for your AKS cluster. + +```bash +az aks get-credentials --resource-group $RG_NAME --name $AKS_NAME +``` + +## Publishing the app to Azure Container Registry + +Before you can deploy our app to Kubernetes, you need to package the container image and push it to a container registry. You'll be using [Azure Container Registry (ACR)](https://azure.microsoft.com/products/container-registry) for this. + +There are a few different ways to push an image to ACR. We'll be using the `az acr build` command which will use [ACR Tasks](https://learn.microsoft.com/azure/container-registry/container-registry-tasks-overview?WT.mc_id=containers-105184-pauyu) to build the image and push it to ACR. + +Let's start by getting the name of your ACR instance. + +```bash +ACR_NAME=$(az resource list \ + --resource-group $RG_NAME \ + --resource-type Microsoft.ContainerRegistry/registries \ + --query "[0].name" \ + --output tsv) +``` + +
+ +> Make sure you are at the root of your repository then run the following command to build and push the image to ACR. + +
+ +```bash +az acr build \ + --registry $ACR_NAME \ + --image azure-voting-app:latest \ + --file Dockerfile . +``` + +
+ +> This command will take a few minutes to complete. Let's move on to the next step while it's running. + +
+ +## Generating YAML manifests + +Earlier, we learned that Kubernetes uses YAML manifests to describe the state of your cluster. + +In the previous section, we used `kubectl` to run a pod using both the imperative and declarative approaches. + +But, did you know that `kubectl` can also be used to generate YAML manifests for you? Let's take a look at how we can do that to generate a YAML file for our app. + +
+ +> Open a new terminal and make sure you are at the root of the repo then run the following command set variables for your resource group and AKS cluster names. Be sure to replace `` with your assigned user number. + +
+ +```bash +RG_NAME=rg-user +AKS_NAME=aks-user +``` + +
+ +> Run the following command to generate to get the name of your ACR. + +
+ +```bash +ACR_NAME=$(az resource list \ + --resource-group $RG_NAME \ + --resource-type Microsoft.ContainerRegistry/registries \ + --query "[0].name" \ + --output tsv) +``` + +
+ +> Run the following command to create a new directory and navigate into it. + +
+ +```bash +mkdir pre03 +cd pre03 +``` + +
+ +> Run the following command to generate a YAML manifest using `kubectl`. + +
+ +```bash +kubectl create deploy azure-voting-app \ + --image $ACR_NAME.azurecr.io/azure-voting-app:latest \ + --port=8080 \ + --dry-run=client \ + --output yaml > azure-voting-app-deployment.yaml +``` + +The `--dry-run=client` flag combined with the `--output yaml` flag tells `kubectl` to generate the YAML file but not actually run the command. + +This is useful because it allows us to see what the YAML file will look like before we actually run it. By redirecting the output to a file, we can save the YAML file to disk. + +If you open up the YAML file, you'll see that most of the details have been filled in for you 🥳 + +
+ +> Did you notice that we are creating a **Deployment** resource instead of a **Pod** resource? This is because we want to scale our app up and down. If we were to use a **Pod** resource, we can only run a single instance of our app. With a **Deployment** resource, we can run multiple instances of our app and Kubernetes will automatically restart them if they fail. + +
+ +## Configuring apps using environment variables + +The base YAML file that was generated for us is a good starting point, but we need to make a few changes to it before we can deploy it to AKS. The first thing we need to do is add the environment variables to configure the app. + +But wait, we don't know where exactly to put the environment variables in the YAML file. Never fear, `kubectl` is here! + +
+ +> Run the following `kubectl explain` command to get more information about Deployments. + +
+ +```bash +kubectl explain deploy.spec.template.spec.containers +``` + +Here, we are using `kubectl explain` to get information about the Deployment resource. We are then drilling down into the `spec.template.spec.containers` section to get information about the `containers` property. + +
+ +> You can traverse the through all the Deployment properties in this way to get more information about them. Additionally, you can also use `kubectl explain` to get more information about other Kubernetes resources. +> +> To see a list of all resources that can be explained, run the following command: + +
+ +```bash +kubectl api-resources +``` + +
+ +> We can see that the `containers` object has a `env` property which is an array of environment variables. If we dig a little deeper, we can see how to define environment variables. + +
+ +```bash +kubectl explain deploy.spec.template.spec.containers.env +``` + +
+ +> Now that we know where to put the environment variables, let's add them to the YAML file. Open the `azure-voting-app-deployment.yaml` file, place your cursor after the `resource: {}` line, and add the following block of code. + +
+ +```yaml +env: + - name: FIRST_VALUE + value: "Dogs" + - name: SECOND_VALUE + value: "Cats" +``` + +
+ +> YAML is very sensitive to indentation. Make sure you indent the environment variables exactly as its shown above. The resulting YAML file should look like this: + +
+ +
+Click to expand output + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + creationTimestamp: null + labels: + app: azure-voting-app + name: azure-voting-app +spec: + replicas: 1 + selector: + matchLabels: + app: azure-voting-app + strategy: {} + template: + metadata: + creationTimestamp: null + labels: + app: azure-voting-app + spec: + containers: + - image: .azurecr.io/azure-voting-app:latest + name: azure-voting-app + resources: {} + env: + - name: FIRST_VALUE + value: "Dogs" + - name: SECOND_VALUE + value: "Cats" + - name: DATABASE_SERVER + value: azure-voting-db + - name: DATABASE_PASSWORD + valueFrom: + secretKeyRef: + name: azure-voting-db-secrets + key: password +status: {} +``` + +
+ +## Securing credentials using "Secrets" + +We also need database credentials to be able to connect to the database. We could add them to the YAML file, but that would mean that they would be stored in plain text. This is not a good idea because anyone who has access to the YAML file would be able to see the credentials. Instead, we are going to use a [Kubernetes secret](https://kubernetes.io/docs/concepts/configuration/secret/) to store the credentials in the cluster. + +
+ +> Run the following command to create a secret with two keys to store the database username and password. + +
+ +```bash +kubectl create secret generic azure-voting-db-secrets \ + --from-literal=username=postgres \ + --from-literal=password=mypassword +``` + +Now that we have created the secret, we need to tell Kubernetes to use it. We can do this by adding a few more environment variables to the `containers` object. + +But, instead of directly keying in the value as we did with "Dogs" and "Cats" above, we can use the `valueFrom` property to point to our Kubernetes secret.. + +
+ +> In the `azure-voting-app-deployment.yaml` file, add the following YAML to the YAML file directly below the `SECOND_VALUE` environment variable. + +
+ +```yaml +- name: DATABASE_SERVER + value: azure-voting-db +- name: DATABASE_USER + valueFrom: + secretKeyRef: + name: azure-voting-db-secrets + key: username +- name: DATABASE_PASSWORD + valueFrom: + secretKeyRef: + name: azure-voting-db-secrets + key: password +``` + +Your `azure-voting-app-deployment.yaml` file should now look like this: + +
+Click to expand output + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + creationTimestamp: null + labels: + app: azure-voting-app + name: azure-voting-app +spec: + replicas: 1 + selector: + matchLabels: + app: azure-voting-app + strategy: {} + template: + metadata: + creationTimestamp: null + labels: + app: azure-voting-app + spec: + containers: + - image: .azurecr.io/azure-voting-app:latest + name: azure-voting-app + ports: + - containerPort: 8080 + resources: {} + env: + - name: FIRST_VALUE + value: "Dogs" + - name: SECOND_VALUE + value: "Cats" + - name: DATABASE_SERVER + value: "azure-voting-db" + - name: DATABASE_USER + valueFrom: + secretKeyRef: + name: azure-voting-db-secrets + key: username + - name: DATABASE_PASSWORD + valueFrom: + secretKeyRef: + name: azure-voting-db-secrets + key: password +status: {} +``` + +
+ +Let's move on to configure the PostgreSQL database deployment. The process of creating the YAML will be very similar to what we did for the Azure Voting App deployment. + +
+ +> Using `kubectl`, create a file called `azure-voting-db-deployment.yaml`. + +
+ +```base +kubectl create deployment azure-voting-db \ + --image=postgres \ + --dry-run=client \ + -o yaml > azure-voting-db-deployment.yaml +``` + +
+ +> Open the `azure-voting-db-deployment.yaml` file and add the following YAML to it (just below the `resources` property). + +
+ +```yaml +env: + - name: POSTGRES_USER + valueFrom: + secretKeyRef: + name: azure-voting-db-secrets + key: username + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: azure-voting-db-secrets + key: password +``` + +Your `azure-voting-db-deployment.yaml` file should now look like this: + +
+Click to expand output + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + creationTimestamp: null + labels: + app: azure-voting-db + name: azure-voting-db +spec: + replicas: 1 + selector: + matchLabels: + app: azure-voting-db + strategy: {} + template: + metadata: + creationTimestamp: null + labels: + app: azure-voting-db + spec: + containers: + - image: postgres + name: postgres + resources: {} + env: + - name: POSTGRES_USER + valueFrom: + secretKeyRef: + name: azure-voting-db-secrets + key: username + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: azure-voting-db-secrets + key: password +status: {} +``` + +
+ +
+ +> Run the following command to create the PostgreSQL database deployment. + +
+ +```bash +kubectl apply -f azure-voting-db-deployment.yaml +``` + +## Exposing Deployments with Services + +The front end pod will need to be able to connect to the database pod. We could use the database pod's IP address to connect, but that will not be resilient since there is no gurantee the database pod will have the same IP address when it is recreated due to maintenance or failure. Remember, pods are ephemeral and are given random IP addresses as they are created. + +So we'll need to create a [Service](https://kubernetes.io/docs/concepts/services-networking/service/) for the database pod. Think of a service like an internal load balancer. This will give the front end app a single point of entry to connect to the database. + +We can use the same technique of creating a YAML manifest for the service using `kubectl`. + +`kubectl` allows you to imperatively create a service using the `kubectl expose` command. + +
+ +> Run the following command to create a service YAML manifest for the PostgreSQL database deployment. + +
+ +```bash +kubectl expose deployment azure-voting-db \ + --port=5432 \ + --target-port=5432 \ + --name=azure-voting-db \ + --dry-run=client \ + -o yaml > azure-voting-db-service.yaml +``` + +
+ +> Run the following command to apply the service YAML manifest for the PostgreSQL database deployment. + +
+ +```bash +kubectl apply -f azure-voting-db-service.yaml +``` + +
+ +> Before running the next step below, make sure your container image has completed building and pushing to ACR; otherwise, you will run into a "container image not found" error. + +
+ +
+ +> Run the following command to create a deployment for the Azure Voting App. + +
+ +```bash +kubectl apply -f azure-voting-app-deployment.yaml +``` + +
+ +> Run the following command to create a service YAML manifest for the Azure Voting App deployment. + +
+ +```bash +kubectl expose deployment azure-voting-app \ + --port=8080 \ + --target-port=8080 \ + --name=azure-voting-app \ + --dry-run=client \ + -o yaml > azure-voting-app-service.yaml +``` + +
+ +> Run the following command to apply the service YAML manifest for the Azure Voting App deployment. + +
+ +```bash +kubectl apply -f azure-voting-app-service.yaml +``` + +Now that we have deployed the Azure Voting App and the PostgreSQL database, we can check to see if they are running. + +
+ +> Run the following command to get a list of deployments, pods, and services. + +
+ +```bash +kubectl get deployments,pods,services +``` + +
+Click to expand output + +```text +NAME READY UP-TO-DATE AVAILABLE AGE +deployment.apps/azure-voting-app 1/1 1 1 27m +deployment.apps/azure-voting-db 1/1 1 1 27m + +NAME READY STATUS RESTARTS AGE +pod/azure-voting-app-6bc9446ddb-xvdgc 1/1 Running 0 10m +pod/azure-voting-db-5666f7fc58-nph78 1/1 Running 0 27m + +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +service/azure-voting-app ClusterIP 10.0.185.0 8080/TCP 22s +service/azure-voting-db ClusterIP 10.0.13.23 5432/TCP 3m +service/kubernetes ClusterIP 10.0.0.1 443/TCP 171m +``` + +
+ +The application and services are now running, but we can't access it yet. If you noticed, there is no way to access the application from outside the cluster. We can temporarily connect to the service by using the `kubectl port-forward` command for now. + +
+ +> Run the following command to expose the application. + +
+ +```bash +kubectl port-forward service/azure-voting-app 8080:8080 +``` + +
+ +> Kubernetes will now forward all traffic from port 8080 on your local machine to port 8080 on the `azure-voting-app` service. + +
+ +Now that we have exposed the application, we can access it from our local machine. Open a browser and navigate to [http://localhost:8080](http://localhost:8080). + +You should see the Azure Voting App. + +![Azure Voting App on AKS](assets/azure-voting-app-on-aks.png) + +
+ +> Press `Ctrl+C` to stop the port forwarding. We'll expose the application in a more permanent way later. + +
+ +--- + +## Dealing with secrets + +The dirty secret about Kubernetes secrets is that they are not really secrets. They are just base64 encoded strings. Anyone with access to the cluster can decode them and see the actual value. This is not a problem if you are limiting access to your cluster. + +But remember, anyone with access to your cluster can see your secrets! + +
+ +> Run the following command to get the `password` secret we saved in the cluster. + +
+ +```bash +kubectl get secrets azure-voting-db-secrets -o jsonpath='{.data.password}' +``` + +We can decode the output by using the `base64 --decode` command. + +
+ +> Run the following command to decode the `password` secret. + +
+ +```bash +kubectl get secrets azure-voting-db-secrets -o jsonpath='{.data.password}' | base64 --decode +``` + +There it is.. the secret is out. Anyone with access to the cluster can see the secret 😨 + +
+ +> Run the following command to delete the `azure-voting-db-secrets` secret. We'll create a new one next. + +
+ +```bash +kubectl delete secret azure-voting-db-secrets +``` + +### Securely storing secrets + +There are a few ways to store secrets in a more secure manner. One way is to use [Azure Key Vault](https://azure.microsoft.com/services/key-vault/). + +Your lab environment already has an Azure Key Vault created for you. + +
+ +> Run the following command to get the name of your Azure Key Vault. + +
+ +```bash +AKV_NAME=$(az resource list \ + --resource-group $RG_NAME \ + --resource-type Microsoft.KeyVault/vaults \ + --query "[0].name" -o tsv) +``` + +With the name of your Azure Key Vault, you can now store your secrets in the Azure Key Vault. + +
+ +> Run the following command to add the database username as a secret in the Azure Key Vault. + +
+ +```bash +az keyvault secret set \ + --vault-name $AKV_NAME \ + --name database-user \ + --value postgres +``` + +
+ +> Run the following command to add the database password as a secret in the Azure Key Vault. + +
+ +```bash +az keyvault secret set \ + --vault-name $AKV_NAME \ + --name database-password \ + --value postgres +``` + +### Using the Azure Key Vault secrets in Kubernetes + +You AKS cluster has also been provisioned with the [Secret Store CSI driver](https://secrets-store-csi-driver.sigs.k8s.io/) addon. This allows you to mount secrets from the Azure Key Vault as [volumes](https://kubernetes.io/docs/concepts/storage/volumes/) in your pods. + +
+ +> To verify that the Secret Store CSI driver addon is installed in your cluster, run the following command: + +
+ +```bash +kubectl get pods \ + --namespace kube-system \ + --selector 'app in (secrets-store-csi-driver, secrets-store-provider-azure)' +``` + +
+Click to expand output + +You should see something like this: + +```text +NAME READY STATUS RESTARTS AGE +aks-secrets-store-csi-driver-dnxf5 3/3 Running 0 3m35s +aks-secrets-store-csi-driver-nf5h8 3/3 Running 0 3m35s +aks-secrets-store-csi-driver-v4bql 3/3 Running 0 3m35s +aks-secrets-store-provider-azure-82nps 1/1 Running 0 3m35s +aks-secrets-store-provider-azure-s6lbd 1/1 Running 0 3m35s +aks-secrets-store-provider-azure-tcc7f 1/1 Running 0 3m35s +``` + +
+ +### Creating a ServiceAccount + +In order to use the Secret Store CSI driver, we need to create a SecretProviderClass. This is a Kubernetes object that tells the Secret Store CSI driver which secrets to mount and where to mount them. + +The authentication to the Azure Key Vault will be implemented using [workload identity](https://learn.microsoft.com/azure/aks/csi-secrets-store-identity-access#access-with-an-azure-ad-workload-identity-preview?WT.mc_id=containers-105184-pauyu). This will allow the pod to use an Azure user-assigned managed identity to authenticate to the Azure Key Vault. + +To do this, we need to create a [ServiceAccount](https://kubernetes.io/docs/concepts/security/service-accounts/), link it to the Azure managed identity, and attach it to the pod. + +
+ +> Run the following command to get the the client ID for the user-assigned managed identity. + +
+ +```bash +USER_ASSIGNED_CLIENT_ID=$(az identity show \ + --resource-group $RG_NAME \ + --name $AKS_NAME-identity \ + --query clientId -o tsv) +``` + +Next, we need to create a ServiceAccount and annotate it with the Azure managed identity client ID. + +
+ +> Set some variables we will use to create our ServiceAccount manifest. We need the namespace name that your app is deployed into and a service account name. We'll use the default namespace and `azure-voting-app-serviceaccount` for the service account name. + +
+ +```bash +SERVICE_ACCOUNT_NAMESPACE=default +SERVICE_ACCOUNT_NAME=azure-voting-app-serviceaccount +``` + +
+ +> Now run the following command to create the ServiceAccount manifest using the values we've set above. + +
+ +```bash +cat < azure-voting-app-serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + annotations: + azure.workload.identity/client-id: ${USER_ASSIGNED_CLIENT_ID} + labels: + azure.workload.identity/use: "true" + name: ${SERVICE_ACCOUNT_NAME} + namespace: ${SERVICE_ACCOUNT_NAMESPACE} +EOF +``` + +
+ +> Run the following command to apply the ServiceAccount manifest. + +
+ +```bash +kubectl apply -f azure-voting-app-serviceaccount.yaml +``` + +### Creating a SecretProviderClass + +Now, we need to create a SecretProviderClass which will tell the Secret Store CSI driver which secrets to mount and where to retrieve them from. Here we need the `clientID` of the Azure managed identity, the name of the Azure Key Vault, and the tenant ID of the Azure Key Vault. + +We should have the `clientID` and the Azure Key Vault name from steps above. To get the tenant ID, we can use the `az identity show` command again. + +
+ +> Run the following commands to retrieve information the `tenantId`. + +
+ +```bash +TENANT_ID=$(az identity show \ + --resource-group $RG_NAME \ + --name $AKS_NAME-identity \ + --query tenantId -o tsv) +``` + +
+ +> Run the following command to needed to create a YAML manifest for the SecretProviderClass. + +
+ +```bash +cat < azure-voting-app-secretproviderclass.yaml +apiVersion: secrets-store.csi.x-k8s.io/v1 +kind: SecretProviderClass +metadata: + name: azure-keyvault-secrets # This needs to be unique per namespace +spec: + provider: azure + parameters: + usePodIdentity: "false" + useVMManagedIdentity: "false" + clientID: "${USER_ASSIGNED_CLIENT_ID}" # Setting this to use workload identity + keyvaultName: ${AKV_NAME} # Set to the name of your key vault + objects: | + array: + - | + objectName: database-user # The name of the secret in the key vault + objectType: secret # The type of the secret in the key vault + - | + objectName: database-password + objectType: secret + tenantId: "${TENANT_ID}" # The tenant ID of the key vault +EOF +``` + +
+ +> The `objects` property in the manifest is an array of objects that tells the Secret Store CSI driver which secrets to pull out of the Azure Key Vault. +> +> The `clientID` property tells the Secret Store CSI driver which managed identity to use to authenticate to the Azure Key Vault. + +
+ +
+ +> Run the following command to apply the SecretProviderClass manifest. + +
+ +```bash +kubectl apply -f azure-voting-app-secretproviderclass.yaml +``` + +### Updating the database deployment + +Finally, we need to update our database and app deployments to use the ServiceAccount and mount the secrets into each pod as files. + +Our application code is written to read secrets from files, so all we need to do is make these files available to the pod on a path that the application is expecting. + +In this case, secret files are expected to be in the `/mnt/secrets-store` directory. + +
+ +> Open the `azure-voting-db-deployment.yaml` file and replace your entire `env:` block with this. + +
+ +```yaml +env: + - name: POSTGRES_USER_FILE + value: "/mnt/secrets-store/database-user" + - name: POSTGRES_PASSWORD_FILE + value: "/mnt/secrets-store/database-password" +``` + +
+ +> Directly underneath the `env:` block add this to mount the secrets into the container. + +
+ +```yaml +volumeMounts: + - name: azure-voting-db-secrets + mountPath: "/mnt/secrets-store" + readOnly: true +``` + +
+ +> Next add a new line after the `volumeMounts:` block and add the code below to enable the pod to use the ServiceAccount and add a volume to mount the secrets into. Make sure both `serviceAccountName:` and `volumes:` are indented to the same level as `containers:`. + +
+ +```yaml +serviceAccountName: azure-voting-app-serviceaccount +volumes: + - name: azure-voting-db-secrets + csi: + driver: secrets-store.csi.k8s.io + readOnly: true + volumeAttributes: + secretProviderClass: azure-keyvault-secrets +``` + +Here, we are telling Kubernetes to use the `azure-voting-app-serviceaccount` ServiceAccount and attach a volume to the pod using the `azure-keyvault-secrets` SecretProviderClass. + +The `azure-keyvault-secrets` SecretProviderClass will tell the Secret Store CSI driver to mount the secrets from the Azure Key Vault into the pod. + +Your final `azure-voting-db-deployment.yaml` file should look like this. + +
+Click to expand code + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + creationTimestamp: null + labels: + app: azure-voting-db + name: azure-voting-db +spec: + replicas: 1 + selector: + matchLabels: + app: azure-voting-db + strategy: {} + template: + metadata: + creationTimestamp: null + labels: + app: azure-voting-db + spec: + containers: + - image: postgres + name: postgres + resources: {} + env: + - name: POSTGRES_USER_FILE + value: "/mnt/secrets-store/database-user" + - name: POSTGRES_PASSWORD_FILE + value: "/mnt/secrets-store/database-password" + volumeMounts: + - name: azure-voting-db-secrets + mountPath: "/mnt/secrets-store" + readOnly: true + serviceAccountName: azure-voting-app-serviceaccount + volumes: + - name: azure-voting-db-secrets + csi: + driver: secrets-store.csi.k8s.io + readOnly: true + volumeAttributes: + secretProviderClass: azure-keyvault-secrets +status: {} +``` + +
+ +### Updating the app deployment + +Let's do the same for the app deployment. + +
+ +> Open the `azure-voting-app-deployment.yaml` file and do the same thing for the app deployment. Replace your entire `env:` block with this. + +
+ +```yaml +env: + - name: FIRST_VALUE + value: "Dogs" + - name: SECOND_VALUE + value: "Cats" + - name: DATABASE_SERVER + value: "azure-voting-db" +``` + +
+ +> Add this to mount the secrets into the container. + +
+ +```yaml +volumeMounts: + - name: azure-voting-db-secrets + mountPath: "/mnt/secrets-store" + readOnly: true +``` + +
+ +> Finally add this to enable the pod to use the service account and add a volume to mount the secrets into. + +
+ +```yaml +serviceAccountName: azure-voting-app-serviceaccount +volumes: + - name: azure-voting-db-secrets + csi: + driver: secrets-store.csi.k8s.io + readOnly: true + volumeAttributes: + secretProviderClass: azure-keyvault-secrets +``` + +
+Click to expand code + +The updated YAML to look like the following: + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + creationTimestamp: null + labels: + app: azure-voting-app + name: azure-voting-app +spec: + replicas: 1 + selector: + matchLabels: + app: azure-voting-app + strategy: {} + template: + metadata: + creationTimestamp: null + labels: + app: azure-voting-app + spec: + containers: + - image: .azurecr.io/azure-voting-app:latest + name: azure-voting-app + ports: + - containerPort: 8080 + resources: {} + env: + - name: FIRST_VALUE + value: "Dogs" + - name: SECOND_VALUE + value: "Cats" + - name: DATABASE_SERVER + value: "azure-voting-db" + volumeMounts: + - name: azure-voting-db-secrets + mountPath: "/mnt/secrets-store" + readOnly: true + serviceAccountName: azure-voting-app-serviceaccount + volumes: + - name: azure-voting-db-secrets + csi: + driver: secrets-store.csi.k8s.io + readOnly: true + volumeAttributes: + secretProviderClass: azure-keyvault-secrets +status: {} +``` + +
+ +### Deploying the updated YAML files + +
+ +> Deploy the updated YAML files to your cluster. + +
+ +```bash +kubectl apply -f azure-voting-db-deployment.yaml +kubectl apply -f azure-voting-app-deployment.yaml +``` + +
+ +> Check the status of the Deployments, Pods, and Services. You should see the following: + +
+ +```bash +kubectl get deployments,pods,services +``` + +
+Click to expand output + +```text +NAME READY UP-TO-DATE AVAILABLE AGE +deployment.apps/azure-voting-app 1/1 1 1 23m +deployment.apps/azure-voting-db 1/1 1 1 23m + +NAME READY STATUS RESTARTS AGE +pod/azure-voting-app-756dc858f8-b4rkx 2/2 Running 0 22m +pod/azure-voting-db-59f4d48797-djt4z 2/2 Running 0 23m + +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +service/azure-voting-app ClusterIP 10.0.110.128 8080/TCP 10h +service/azure-voting-db ClusterIP 10.0.58.27 5432/TCP 10h +service/kubernetes ClusterIP 10.0.0.1 443/TCP 12h +``` + +
+ +
+ +> Now that secrets are pulled from Azure KeyVault using the Secrets Store CSI driver and mounted directly into the pods, the secrets end up as files in the pod which no one else can read. The base container image that we used to host the app does not have shell access; therefore, no one can interactively log into the container and read these secret files 😎🔒 + +
+ +
+ +> Run the following command to enable port forwarding to the app service again to see if the app is working. + +
+ +```bash +kubectl port-forward service/azure-voting-app 8080:8080 +``` + +Open a browser and navigate to http://localhost:8080. You should see the voting app is working again. + +After testing stop the app by pressing `Ctrl+C` in the terminal. + +--- + +## Persisting data + +Databases need to store data, but pods are ephemeral. If the database pod is deleted or restarted, the data will be lost. + +To illustrate the problem, let's see what happens when we restart the database and app pods. + +
+ +> Run the following command to delete the database and app pods. Kubernetes will automatically restart them. + +
+ +```bash +kubectl delete pod --all +``` + +Wait for the pods to restart and then run the `kubectl port-forward` command again, and refresh the browser. You should see that the votes have been reset to 0 😭 + +### Creating a PVC for PGDATA + +When a PostgreSQL container is created, its data (`PGDATA`) points to a local directory (e.g., `/var/lib/postgresql/data`). When the pod crashes or restarts, the container starts with a clean slate and the data is gone. + +This can be solved by leveraging persistent storage; more specifically, by taking advantage of the [Azure CSI drivers and storage classes](https://learn.microsoft.com/azure/aks/csi-storage-drivers?WT.mc_id=containers-105184-pauyu) that have been pre-deployed into your AKS cluster. + +
+ +> Run the following command to see the storage classes that are available in your cluster. + +
+ +```bash +kubectl get storageclasses +``` + +
+Click to expand output + +```text +NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE +azurefile file.csi.azure.com Delete Immediate true 5h22m +azurefile-csi file.csi.azure.com Delete Immediate true 5h22m +azurefile-csi-premium file.csi.azure.com Delete Immediate true 5h22m +azurefile-premium file.csi.azure.com Delete Immediate true 5h22m +default (default) disk.csi.azure.com Delete WaitForFirstConsumer true 5h22m +managed disk.csi.azure.com Delete WaitForFirstConsumer true 5h22m +managed-csi disk.csi.azure.com Delete WaitForFirstConsumer true 5h22m +managed-csi-premium disk.csi.azure.com Delete WaitForFirstConsumer true 5h22m +managed-premium disk.csi.azure.com Delete WaitForFirstConsumer true 5h22m +``` + +
+ +Typically for persistent storage, you would create a [Persistent Volume (PV)](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) and [Persistent Volume Claim (PVC)](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims) to request storage. However, the Azure CSI drivers allow you to create a PVC and have the storage classes create the PV for you using Azure Storage. + +We'll create a PVC using the `managed-csi` storage class. This will create a managed disk in Azure. + +
+ +> Create a new `azure-voting-app-pvc.yaml` manifest. + +
+ +```bash +cat < azure-voting-app-pvc.yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: pvc-azuredisk +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 10Gi + storageClassName: managed-csi +EOF +``` + +
+ +> Apply the manifest to create the PVC. + +
+ +```bash +kubectl apply -f azure-voting-app-pvc.yaml +``` + +### Updating the database manifest to be a StatefulSet and use the PVC + +With the PVC created, we can now update the `azure-voting-db-deployment.yaml` manifest to use it. + +
+ +> Now that we are using a PVC, we should update our database manifest to use a [StatefulSet](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/) instead of a Deployment. This will ensure that the PVC is not deleted when the pod is deleted. + +
+ +
+ +> Open the `azure-voting-db-deployment.yaml` manifest and change `kind: Deployment` to `kind: StatefulSet`. +> +> Also, since we are using a StatefulSet, we need to remove the ` strategy: {}` section from the manifest. +> +> Next, add an additional volume to the pod. You should already have a `volumes` section in the YAML, add the following YAML to the end of the `volumes` section. + +
+ +```yaml +- name: azure-voting-db-data + persistentVolumeClaim: + claimName: pvc-azuredisk +``` + +
+ +> Also in the `azure-voting-db-deployment.yaml` manifest, add a volume mount to the container definition. You should already have a `volumeMounts` section in the YAML. Add the following YAML to the end of the `volumeMounts` section. + +```yaml +- name: azure-voting-db-data + mountPath: "/var/lib/postgresql/data" + subPath: "data" +``` + +
+ +
+ +> The `subPath` property allows us to mount a subdirectory of the volume into the container. + +
+ +Your `azure-voting-db-deployment.yaml` file should now look like this: + +
+Click to expand code + +```yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + creationTimestamp: null + labels: + app: azure-voting-db + name: azure-voting-db +spec: + replicas: 1 + selector: + matchLabels: + app: azure-voting-db + template: + metadata: + creationTimestamp: null + labels: + app: azure-voting-db + spec: + containers: + - image: postgres + name: postgres + resources: {} + env: + - name: POSTGRES_USER_FILE + value: "/mnt/secrets-store/database-user" + - name: POSTGRES_PASSWORD_FILE + value: "/mnt/secrets-store/database-password" + volumeMounts: + - name: azure-voting-db-secrets + mountPath: "/mnt/secrets-store" + readOnly: true + - name: azure-voting-db-data + mountPath: "/var/lib/postgresql/data" + subPath: "data" + serviceAccountName: azure-voting-app-serviceaccount + volumes: + - name: azure-voting-db-secrets + csi: + driver: secrets-store.csi.k8s.io + readOnly: true + volumeAttributes: + secretProviderClass: azure-keyvault-secrets + - name: azure-voting-db-data + persistentVolumeClaim: + claimName: pvc-azuredisk + +status: {} +``` + +
+ +
+ +> Run the following command to delete the original db deployment and deploy a new statefulset. + +
+ +```bash +kubectl delete deploy azure-voting-db +``` + +
+ +> Now run the following command to apply the updated manifest. + +
+ +```bash +kubectl apply -f azure-voting-db-deployment.yaml +``` + +
+ +> Run the following command to see the status of the PVC. + +
+ +```bash +kubectl get pvc +``` + +
+Click to expand output + +You should see the following output with a `STATUS` of `Bound`. This means the PVC has been successfully created and is ready to be used by the pod. + +```bash +NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE +pvc-azuredisk Bound pvc-e25b6853-21c9-493c-8d19-f4bae2e29be8 10Gi RWO managed-csi 2m9s +``` + +
+ +
+ +> Run the following command to check the status of the database pod. + +
+ +```bash +kubectl get pod -l app=azure-voting-db +``` + +
+ +> When the database pod is running, use the `kubectl port-forward` command to access the app again. +> +> Refresh the browser, add some votes, then delete the pods as we did at the beginning of this section. + +
+ +When you refresh the browser, you should see that the vote data has persisted even though the pods were deleted 😎 + +--- + +## Sharing your app + +Up until now, we've been accessing our app using port forwarding. This is great for testing, but not very useful if you want users to use your app. + +To expose your app to users, we can leverage the newly announced [Istio service mesh add-on for AKS](https://learn.microsoft.com/azure/aks/istio-deploy-addon?WT.mc_id=containers-105184-pauyu). Istio is a service mesh that provides a lot of useful features, including [security, observability, traffic management, and more](https://istio.io/latest/docs/concepts/). We won't be using all the features of Istio, We will however, leverage the [Ingress Gateway](https://istio.io/latest/docs/tasks/traffic-management/ingress/ingress-control/) to expose our app outside of the cluster. + +### Setting up Istio + +The Istio add-on has already been installed in your AKS cluster with an external ingress gateway enabled. + +
+ +> If you run the following command, you should see the Ingress Gateway service has been provisioned using a Load Balancer and it has an external IP address. + +
+ +```bash +kubectl get service -n aks-istio-ingress +``` + +
+Click to expand output + +Note the `NAME` and `EXTERNAL-IP` of the service. The `NAME` will be used when we create our Istio resources to expose our app and the `EXTERNAL-IP` address will be used to access our app. + +```text +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +aks-istio-ingressgateway-external LoadBalancer 10.0.182.197 20.252.61.166 15021:30831/TCP,80:30738/TCP,443:31546/TCP 40m +``` + +
+ +Istio works by injecting a sidecar container into each pod. This sidecar container is responsible for handling all the traffic to and from containers in the pod. This sidecar can be manually injected in your deployments or you can tell Istio to automatically inject Istio sidecars. + +
+ +> Label the `default` namespace so that Istio will automatically inject the sidecar into our pods. + +
+ +```bash +kubectl label namespace default istio.io/rev=asm-1-17 +``` + +Our deployments do not have a sidecar container. Let's redeploy our manifests to trigger Istio to inject sidecar containers into our pods. + +
+ +> Run the following commands to delete the app. + +
+ +```bash +kubectl delete -f azure-voting-db-deployment.yaml +kubectl delete -f azure-voting-app-deployment.yaml +``` + +
+ +> Now we can run the following commands to re-deploy the app. + +
+ +```bash +kubectl apply -f azure-voting-db-deployment.yaml +kubectl apply -f azure-voting-app-deployment.yaml +``` +
+ +> With our namespace labeled, Istio will automatically inject the sidecar container into our pods. + +
+ +
+ +> Run the following command to see the status of the pods. You should now see each pod is running two containers, the app container and the Istio sidecar container. + +
+ +```bash +kubectl get pods +``` + +
+Click to expand output + +```text +NAME READY STATUS RESTARTS AGE +azure-voting-app-777cbb5494-8tnc7 2/2 Running 0 44s +azure-voting-db-0 2/2 Running 0 46s +azure-voting-db-765c8d56c4-snq96 1/1 Running 0 4m10s +``` + +
+ +### Exposing the app using the Istio Ingress Gateway + +Now that we have Istio installed and our app is running with the Istio sidecar, we can expose our app to the world using the Ingress Gateway. To do this, we need to implement two custom resources that got installed in the AKS cluster when Istio was installed. We will use the [Gateway](https://istio.io/latest/docs/reference/config/networking/gateway/) and [Virtual Service](https://istio.io/latest/docs/reference/config/networking/virtual-service/) resources to route traffic to our app. + +
+ +> Run the following commands to create the Istio Ingress Gateway resource. + +
+ +```yaml +cat < azure-voting-app-servicemesh.yaml +apiVersion: networking.istio.io/v1alpha3 +kind: Gateway +metadata: + name: azure-voting-app-gateway +spec: + selector: + istio: aks-istio-ingressgateway-external + servers: + - port: + number: 80 + name: http + protocol: HTTP + hosts: + - "*" +--- +apiVersion: networking.istio.io/v1alpha3 +kind: VirtualService +metadata: + name: azure-voting-app-virtualservice +spec: + hosts: + - "*" + gateways: + - azure-voting-app-gateway + http: + - route: + - destination: + host: azure-voting-app + port: + number: 8080 +EOF +``` + +Here, we are creating a `Gateway` resource that will route traffic to our app using the `aks-istio-ingressgateway-external` service. This service was automatically created for you when the Istio Ingress Gateway was deployed. The gateway will listen on port 80 and route traffic to any host. + +The manifest above also creates a `VirtualService` resource that will route traffic to our backend `Service` resource via the Gateway. + +
+ +> Run the following command to apply the Istio Ingress Gateway resource. + +
+ +```bash +kubectl apply -f azure-voting-app-servicemesh.yaml +``` + +
+ +> Run the following command to get the IP address of the Istio Ingress Gateway. + +
+ +```bash +INGRESS_IP=$(kubectl get svc -n aks-istio-ingress aks-istio-ingressgateway-external -o jsonpath='{.status.loadBalancer.ingress[0].ip}') +``` + +
+ +> You can run the following command to get the URL of your app. + +
+ +```bash +echo "http://$INGRESS_IP/" +``` + +Our app is now accessible to the world! 🌐 + +--- + +## Observing your app + +Now that the app is fully deployed, we need a way to observe what's happening within the cluster. In AKS, we can use [Azure Container Insights](https://learn.microsoft.com/azure/azure-monitor/containers/container-insights-overview?WT.mc_id=containers-105184-pauyu) to get insights into our cluster. Additionally, we can leverage [Azure Monitor managed service for Prometheus](https://learn.microsoft.com/azure/azure-monitor/essentials/prometheus-metrics-overview?WT.mc_id=containers-105184-pauyu) and [Azure Managed Grafana](https://learn.microsoft.com/azure/managed-grafana/overview?WT.mc_id=containers-105184-pauyu) to get insights into our cluster using tooling that is very popular in the Cloud Native ecosystem. + +We'll explore both options. + +### Azure Container Insights + +Your AKS cluster has been provisioned with Azure Container Insights enabled. This means that you can view metrics and logs for your cluster and the applications running in it. + +
+ +> Open the Azure portal and navigate to your AKS cluster. Click on the **Insights** tab and explore the metrics and logs available. + +
+ +![Azure Container Insights](assets/container-insights.png) + +As you click through the different metrics and logs, you'll notice that you can view metrics and logs for the cluster as a whole, as well as for individual pods. This is very useful for troubleshooting issues. + +### Prometheus and Grafana + +Azure Container Insights provides a lot of useful information, but it doesn't provide everything. For example, it doesn't provide information about the traffic flowing through Istio sidecar proxies. To get this information, we'll need to use Prometheus and Grafana. In the Insights tab, you'll notice that there is a link to enable Prometheus. Let's enable it. + +
+ +> In the Azure portal, navigate to your AKS cluster. Click on the **Insights** tab and click the **Enable** button next to **Prometheus**. Check the checkboxes next to **Enable Prometheus metrics** and **Enable Grafana** and click **Configure**. + +
+ +![Monitor settings](assets/cluster-monitor-settings.png) + +![Prometheus and Grafana](assets/enable-prometheus-grafana.png) + +It will take a few minutes for your cluster to be onboarded. Once it's onboarded, you'll see a link to Grafana. + +
+ +> Click on the **View Grafana** button then click the **Browse dashboards** link to open Grafana. + +
+ +![Grafana](assets/open-grafana.png) + +The Azure Managed Grafana instance is pre-configured with Azure Managed Prometheus as a data source and also includes a some dashboards. Let's take a look at some of the Kubernetes dashboards and import the Istio workload dashboard. + +
+ +> In Grafana, click on the Dashboards button, then click Browse. In the list of dashboards, click on the Managed Prometheus folder to expand a list of dashboards. Click on the **Kubernetes / Compute Resources / Cluster** dashboard to open it. + +
+ +![Browse Kubernetes dashboards](assets/browse-dashboards.png) + +![Kubernetes dashboard](assets/kubernetes-dashboard.png) + +You can also browse other dashboards that are available in the [Grafana marketplace](https://grafana.com/grafana/dashboards/). + +
+ +> Here is a list of all the Grafana dashboards that have been published by the Azure Monitor team: https://grafana.com/orgs/azuremonitorteam/dashboards + +
+ +These should be enough to get you started. Feel free to explore the other dashboards and create your own. + +--- + +## Scaling your app + +As your app becomes more popular, you'll need to scale it to handle the increased load. In AKS, you can scale your app by increasing the number of replicas in your deployment. The Kubernetes Horizontal Pod Autoscaler (HPA) will automatically scale your app based on CPU and/or memory utilization. But not all workloads rely on these metrics for scaling. If say, you need to scale your workload based on the number of items in a queue, HPA will not be sufficient. + +This is where we take a different approach and deploy KEDA to scale our app. [KEDA is a Kubernetes-based Event Driven Autoscaler](https://keda.sh/). It allows you to scale your app on basically any metric. If there is a metric that KEDA can can access to, it can scale based on it. Under the covers KEDA, looks at the metrics and your scaling rules and eventually creates a HPA to do the actual scaling. + +The AKS add-on for KEDA has already been installed in your cluster. + +### Setting request and limits + +When scaling on a performance metric, we need to let Kubernetes know how much compute and memory resources to allocate for each pod. We do this by setting the `requests` and `limits` in our deployment. The `requests` are the minimum amount of resources that Kubernetes will allocate for each pod. The `limits` are the maximum amount of resources that Kubernetes will allocate for each pod. Kubernetes will use these values to determine how many pods to run based on the amount of resources available on the nodes in the cluster. + +
+ +> Open the `azure-voting-app-deployment.yaml` file, find the empty `resources: {}` block and replace it with the following. + +
+ +```yaml +resources: + requests: + cpu: 4m + memory: 55Mi + limits: + cpu: 6m + memory: 75Mi +``` + +
+ +> Setting resource requests and limits is a best practice and should be done for all your deployments. + +
+ +Your `azure-voting-app-deployment.yaml` file should now look like this: + +
+Click to expand code + +```yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + creationTimestamp: null + labels: + app: azure-voting-db + name: azure-voting-db +spec: + replicas: 1 + selector: + matchLabels: + app: azure-voting-db + template: + metadata: + creationTimestamp: null + labels: + app: azure-voting-db + spec: + containers: + - image: postgres + name: postgres + resources: + requests: + cpu: 4m + memory: 55Mi + limits: + cpu: 6m + memory: 75Mi + env: + - name: POSTGRES_USER_FILE + value: "/mnt/secrets-store/database-user" + - name: POSTGRES_PASSWORD_FILE + value: "/mnt/secrets-store/database-password" + volumeMounts: + - name: azure-voting-db-secrets + mountPath: "/mnt/secrets-store" + readOnly: true + - name: azure-voting-db-data + mountPath: "/var/lib/postgresql/data" + subPath: "data" + serviceAccountName: azure-voting-app-serviceaccount + volumes: + - name: azure-voting-db-secrets + csi: + driver: secrets-store.csi.k8s.io + readOnly: true + volumeAttributes: + secretProviderClass: azure-keyvault-secrets + - name: azure-voting-db-data + persistentVolumeClaim: + claimName: pvc-azuredisk + +status: {} +``` + +
+ +
+ +> Run the following command to deploy the updated manifest. + +
+ +```bash +kubectl apply -f azure-voting-app-deployment.yaml +``` + +### Scaling with KEDA based on CPU utilization + +
+ +> Create a new `azure-voting-app-scaledobject.yaml` manifest for KEDA. Here we will scale the application up when the CPU utilization is greater than 50%. + +
+ +```yaml +cat < azure-voting-app-scaledobject.yaml +apiVersion: keda.sh/v1alpha1 +kind: ScaledObject +metadata: + name: azure-voting-app-scaledobject +spec: + scaleTargetRef: + name: azure-voting-app + triggers: + - type: cpu + metricType: Utilization + metadata: + value: "50" +EOF +``` + +
+ +> The default values for minimum and maximum replica counts weren't included in our manifest above, but it will default to 0 and 100 respectively. In some cases, the minimum defaults to 1 so consult the documentation for the specific scaler you are using. + +
+ +
+ +> Apply the manifest to create the ScaledObject. + +
+ +```bash +kubectl apply -f azure-voting-app-scaledobject.yaml +``` + +
+ +> Run the following command to ensure the ScaledObject was created. + +
+ +```bash +kubectl get scaledobject +``` + +
+Sample output + +Wait until the `READY` column shows `True` + +```text +NAME SCALETARGETKIND SCALETARGETNAME MIN MAX TRIGGERS AUTHENTICATION READY ACTIVE FALLBACK AGE +azure-voting-app-scaledobject apps/v1.Deployment azure-voting-app cpu True True Unknown 16s +``` + +
+ +### Load testing your app + +Now that our app is enabled for autoscaling, let's generate some load on our app and watch KEDA scale our app. + +We'll use the [Azure Load Testing](https://learn.microsoft.com/azure/load-testing/overview-what-is-azure-load-testing?WT.mc_id=containers-105184-pauyu) service to generate load on our app and watch KEDA scale our app. + +
+ +> In the Azure Portal, navigate to your shared resource group and click on your Azure Load Testing resource. +> +> Click the **Quick test** button to create a new test. In the **Quick test** blade, enter your ingress IP as the URL. +> +> Set the number of virtual users to **250**, test duration to **240** seconds, and the ramp up time of **60**. +> +> Click the **Run test** button to start the test. + +
+ +
+ +> If you are familiar with creating JMeter tests, you can also create a JMeter test file and upload it to Azure Load Testing. + +
+ +![Azure Load Testing](assets/load-test-setup.png) + +
+ +> As the test is running, run the following command to watch the deployment scale. + +
+ +```bash +kubectl get deployment azure-voting-app -w +``` + +
+ +> In a different terminal tab, you can also run the following command to watch the Horizontal Pod Autoscaler reporting metrics as well. + +
+ +```bash +kubectl get hpa -w +``` + +After a few minutes, you should start to see the number of replicas increase as the load test runs. + +In addition to viewing your application metrics from the Azure Load Testing service, you can also view detailed metrics from your managed Grafana instance and/or Container Insights from the Azure Portal, so be sure to check that out as well. + +--- + +## Summary + +Congratulations, on making it this far 🎉 + +Hope you had a lot of fun but unfortunately, all good things must come to an end 🥲 + +Before you go, let's do a quick recap. + +In this workshop, you learned how to: + +- Deploy an application to Kubernetes using imperative and declarative methods +- Leverage the power of `kubectl` to generate YAML manifests for our app and deployed them to our AKS cluster +- Add an additional layer of security for our application secrets by storing them in Azure Key Vault and using the Secrets Store CSI driver to mount them into our pods +- Effectively run stateful workloads on AKS using StorageClasses to create persistent storage for the database +- Expose the frontend application to the internet using Istio's Ingress gateway +- Scale the app using KEDA and load test the application to ensure it scales as expected + +To learn more about Kubernetes, check out the [Kubernetes Learning Path](https://azure.microsoft.com/resources/kubernetes-learning-path/) and be sure to check out the [AKS docs](https://docs.microsoft.com/azure/aks/). For additional workshop content, be sure to check out https://azure-samples.github.io/aks-labs/catalog/ and https://aka.ms/oss-labs/ + +If you have any questions or feedback, please let me know in the comments below or reach out to me on Twitter [@pauldotyu](https://twitter.com/pauldotyu) or LinkedIn [/in/yupaul](https://www.linkedin.com/in/yupaul/) + diff --git a/examples/Authentication-And-Authorization/Create-A-Service-Principle.md b/examples/Authentication-And-Authorization/Create-A-Service-Principle.md new file mode 100644 index 00000000..56f0059c --- /dev/null +++ b/examples/Authentication-And-Authorization/Create-A-Service-Principle.md @@ -0,0 +1,173 @@ + + +# Use an Azure service principal with certificate-based authentication + +When creating a service principal, you choose the type of sign-in authentication it uses. There are two types of authentication available for Azure service principals: **password-based authentication** and **certificate-based authentication**. + +We recommend using certificate-based authentication due to the security restrictions of password-based authentication. Certificate-based authentication enables you to adopt a phishing resistant authentication by using [conditional access policies](/azure/active-directory/conditional-access/overview), which better protects Azure resources. To learn more about why certificate-based authentication is more secure, see [Microsoft Entra certificate-based authentication](/azure/active-directory/authentication/concept-certificate-based-authentication). + +This step in the tutorial explains how to use a service principal certificate to access an Azure resource. + +## Environment Setup + +It is good practice to use Environment Variables to make scripts both flexible and less error prone. Here are the variables used by this document. The values set here are good defaults, but you can chenge them as needed. Their precise purpose is shown in relevant sections below. + +```bash +export RESOURCE_GROUP="myResourceGroup" +export SERVICE_PRINCIPLE_NAME="myServicePrincipalName" +export SP_ROLES="roleName" +export SP_SCOPES="/subscriptions/mySubscriptionID/resourceGroups/$RESOURCE_GROUP" +``` + +## Create a service principal containing a new certificate + +To create a _self-signed_ certificate for authentication, use the `--create-cert` parameter: + +```bash +az ad sp create-for-rbac --name $SERVICE_PRINCIPLE_NAME \ + --role $SP_ROLES \ + --scopes $SP_SCOPES \ + --create-cert +``` + +Console output: + +```output +{ + "appId": "myServicePrincipalID", + "displayName": "myServicePrincipalName", + "fileWithCertAndPrivateKey": "certFilePath\certFileName.pem", + "password": null, + "tenant": "myOrganizationTenantID" +} +``` + +Unless you store the certificate in Key Vault, the output includes the `fileWithCertAndPrivateKey` key. This key's value tells you where the generated certificate is stored. Copy the certificate to a secure location. The certificate contains the private key and the public certificate that can be used in `az login`. If you lose access to a certificate's private key, [reset the service principal credentials](./azure-cli-sp-tutorial-7.md). + +The contents of a PEM file can be viewed with a text editor. Here's a PEM file example: + +``` +-----BEGIN PRIVATE KEY----- +MIIEvQ... +-----END PRIVATE KEY----- +-----BEGIN CERTIFICATE----- +MIICoT... +-----END CERTIFICATE----- +``` + +## Create a service principal using an existing certificate + +Create a service principal with an existing certificate by using the `--cert` parameter. Any tool that uses this service principal must have access to the certificate's private key. Certificates should be in an ASCII format such as PEM, CER, or DER. Pass the certificate as a string, or use the `@path` format to load the certificate from a file. When uploading a certificate, only the public certificate is needed. For optimal security, do not include the private key. The `-----BEGIN CERTIFICATE-----` and `-----END CERTIFICATE-----` lines are optional. + +```azurecli-interactive +# create a service principal with the certificate as a string +az ad sp create-for-rbac --name myServicePrincipalName \ + --role roleName \ + --scopes /subscriptions/mySubscriptionID/resourceGroups/myResourceGroupName \ + --cert "MIICoT..." + +# or provide -----BEGIN CERTIFICATE----- and -----END CERTIFICATE----- lines +az ad sp create-for-rbac --name myServicePrincipalName \ + --role roleName \ + --scopes /subscriptions/mySubscriptionID/resourceGroups/myResourceGroupName \ + --cert "-----BEGIN CERTIFICATE----- +MIICoT... +-----END CERTIFICATE-----" +``` + +```azurecli-interactive +# create a service principal with the certificate file location +az ad sp create-for-rbac --name myServicePrincipalName \ + --role roleName \ + --scopes /subscriptions/mySubscriptionID/resourceGroups/myResourceGroupName \ + --cert @/path/to/cert.pem +``` + +Here's a PEM file example for uploading: + +``` +-----BEGIN CERTIFICATE----- +MIICoT... +-----END CERTIFICATE----- +``` + +## Work with Azure Key Vault + +The `--keyvault` parameter can be added to create or retrieve certificates in Azure Key Vault. When you use the `--keyvault` parameter, the `--cert` parameter is also required. In this example, the `--cert` value is the name of the certificate. + +```azurecli-interactive +# Create a service principal storing the certificate in Azure Key Vault +az ad sp create-for-rbac --name myServicePrincipalName \ + --role roleName \ + --scopes /subscriptions/mySubscriptionID/resourceGroups/myResourceGroupName \ + --create-cert \ + --cert myCertificateName \ + --keyvault myVaultName +``` + +```azurecli-interactive +# Create a service principal using an existing certificate in Azure Key Vault +az ad sp create-for-rbac --name myServicePrincipalName \ + --role roleName \ + --scopes /subscriptions/mySubscriptionID/resourceGroups/myResourceGroupName \ + --cert myCertificateName \ + --keyvault myVaultName +``` + +### Retrieve a certificate from Azure Key Vault + +For a certificate stored in Azure Key Vault, retrieve the certificate with its private key with [az keyvault secret show](/cli/azure/keyvault/secret#az-keyvault-secret-show) and convert it to a PEM file. In Azure Key Vault, the name of the certificate's secret is the same as the certificate name. + +```azurecli-interactive +az keyvault secret download --file /path/to/cert.pfx \ + --vault-name VaultName \ + --name CertName \ + --encoding base64 +openssl pkcs12 -in cert.pfx -passin pass: -passout pass: -out cert.pem -nodes +``` + +## Convert an existing PKCS12 file + +If you already have a PKCS#12 file, you can convert it to PEM format using OpenSSL. If you have a password, change the `passin` argument. + +```console +openssl pkcs12 -in fileName.p12 -clcerts -nodes -out fileName.pem -passin pass: -passout pass: +``` + +## Append a certificate to a service principal + +Use the `--append` parameter in [az ad sp credential reset](/cli/azure/ad/sp/credential#az-ad-sp-credential-reset()) to append a certificate to an existing service principal. +By default, this command clears all passwords and keys so use carefully. + +```azurecli-interactive +az ad sp credential reset --id myServicePrincipalID \ + --append \ + --cert @/path/to/cert.pem +``` + +Console output: + +```output +Certificate expires yyyy-mm-dd hh:mm:ss+00:00. Adjusting key credential end date to match. +The output includes credentials that you must protect. Be sure that you do not include these credentials in your code or check the credentials into your source control. For more information, see https://aka.ms/azadsp-cli +{ + "appId": "myServicePrincipalID", + "password": null, + "tenant": "myOrganizationTenantID" +} +``` + +## Sign in with a service principal using a certificate + +To sign in with a certificate, the certificate must be available locally as a PEM or DER file in ASCII format. PKCS#12 files (.p12/.pfx) don't work. When you use a PEM file, the **PRIVATE KEY** and **CERTIFICATE** must be appended together within the file. You don't need to prefix the path with an `@` like you do with other az commands. + +```azurecli-interactive +az login --service-principal --username APP_ID --certificate /path/to/cert.pem --tenant TENANT_ID +``` + +## Next Steps + +Now that you've learned how to work with service principals using a certificate, proceed to the next step to learn how to retrieve an existing service principal. + +> [!div class="nextstepaction"] +> [Get an existing service principal](./azure-cli-sp-tutorial-4.md) \ No newline at end of file diff --git a/examples/Azure-CLI-Docs/Batch-Delete-Groups.md b/examples/Azure-CLI-Docs/Batch-Delete-Groups.md new file mode 100644 index 00000000..4eaa81f5 --- /dev/null +++ b/examples/Azure-CLI-Docs/Batch-Delete-Groups.md @@ -0,0 +1,46 @@ + + +# Batch Delete Resource Groups + +When working with new infrastructure configurations it is common to have a number of unused resource groups left behind. If these have resources in them then you will be spending money needlessly. It can therefore be useful to automates the deletion of these groups. + +## Environment Setup + +It is a good practice is to use a common prefix for resource groups within a particular work unit. This allows us to query the list of resouces on Azure. So lets create a variable for that prefix. + +```bash +export COMMON_PREFIX="Tutorial_Content" +``` + +## Create some dummy Resource Groups + +In order to demonstrate this method we need to create some dummy resource groups. We can use the following code block. This script will create three resource groups using the `COMMON_PREFIX` for the name. + +```bash +for i in 1 2 3; do + az group create --name "${COMMON_PREFIX}_RG_$i" --location "eastus" +done +``` + +## Getting the list of Groups to Delete + +We can now query the resource groups in the subscription, filter on our prefix and store the result in an environment variable, these are the candidates for deletion: + +```bash +export RG_TO_DELETE=$(az group list --query "[?starts_with(name, '$COMMON_PREFIX')].name" -o tsv | tr '\n' ',' | sed 's/,$//') +echo $RG_TO_DELETE +``` + +## Deleting the Resource Groups + +Now that we have identified the resource groups to delete, we can proceed with the deletion process. The following script will iterate over each resource group name stored in the `RGS_TO_DELETE` variable and delete them one by one. + +```bash +IFS=',' read -ra RG_ARRAY <<< "$RG_TO_DELETE" +for rg in "${RG_ARRAY[@]}"; do + az group delete --name $rg --yes --no-wait + echo "$rg is being deleted" +done +``` + +This script uses the `--yes` flag to confirm the deletion without prompting and the `--no-wait` flag to return immediately without waiting for the operation to complete. This allows the script to proceed with deleting the next resource group without delay. \ No newline at end of file diff --git a/examples/Azure-CLI-Docs/Manager-Resource-Groups.md b/examples/Azure-CLI-Docs/Manager-Resource-Groups.md new file mode 100644 index 00000000..ed73e171 --- /dev/null +++ b/examples/Azure-CLI-Docs/Manager-Resource-Groups.md @@ -0,0 +1,182 @@ + + +--- +title: Manage resource groups - Azure CLI +description: Use Azure CLI to manage your resource groups through Azure Resource Manager. Shows how to create, list, and delete resource groups. +author: mumian +ms.topic: conceptual +ms.date: 09/26/2024 +ms.custom: devx-track-azurecli, devx-track-arm-template +--- + +# Manage Azure Resource Groups by using Azure CLI + +Learn how to use Azure CLI with [Azure Resource Manager](overview.md) to manage your Azure resource groups. For managing Azure resources, see [Manage Azure resources by using Azure CLI](manage-resources-cli.md). + + +## Background + +* Azure CLI. For more information, see [How to install the Azure CLI](/cli/azure/install-azure-cli). + +* After installing, sign in for the first time. For more information, see [How to sign in to the Azure CLI](/cli/azure/get-started-with-azure-cli#how-to-sign-into-the-azure-cli). + +## What is a resource group + +A resource group is a container that holds related resources for an Azure solution. The resource group can include all the resources for the solution, or only those resources that you want to manage as a group. You decide how you want to add resources to resource groups based on what makes the most sense for your organization. Generally, add resources that share the same lifecycle to the same resource group so you can easily deploy, update, and delete them as a group. + +The resource group stores metadata about the resources. When you specify a location for the resource group, you're specifying where that metadata is stored. For compliance reasons, you may need to ensure that your data is stored in a particular region. + +## Environment Setup + +It is useful to use environment variables when scripting. This simplifies the code and reduces errors. The variables used in this document, along with example values are created and described below. The use of each is shown in the content in the following sections. + +We create a short string to us when a parameter needs to be world unique. This doesn't guarantee uniqueness, but it is pretty close. It is also useful to have a common prefix for resources that are created as a part of this tutorial, so we'll use a variable for that too. + +```bash +export HASH=$(head /dev/urandom | tr -dc A-Za-z0-9 | head -c 8) +export COMMON_PREFIX="tutorialrg" +``` + +Resources in azure are grouped in unites called Resource Groups. Resources are also located in a specific geographic region. The resource group name is unique to your subscription and allows easy reference to a specific set of resources. The region is one of Microsofts region identifiers. + +```bash +export RG_NAME="${COMMON_PREFIX}_RG_${HASH}" +export LOCATION="eastus" +``` + +When working with resource groups it is useful to lock them to prevent accidental changes. To do this we define Lock Groups and thus need a name. + +```bash +export LOCK_GROUP_NAME="${COMMON_PREFIX}_LockGroup" +``` + +The next set of variables are not directly related to resource groups. They are used in this document to demonstate the deployment of a resource into a resource group. We will create a storage account and thus need a UID (using the HASH from above, but first converting to lower case as storage names cannot have upper case characters), a name and a SKU identifier indicating the type of storage account to create. + +```bash +export STORAGE_UID=$(echo $HASH | tr '[:upper:]' '[:lower:]') +export STORAGE_NAME="${COMMON_PREFIX}$STORAGE_UID" +export STORAGE_SKU="Standard_LRS" +``` + +## Create resource groups + +To create a resource group, use [az group create](/cli/azure/group#az-group-create). + +```azurecli-interactive +az group create --name $RG_NAME --location westus +``` + +## List resource groups + +To list the resource groups in your subscription, use [az group list](/cli/azure/group#az-group-list). + +```azurecli-interactive +az group list +``` + +To get one resource group, use [az group show](/cli/azure/group#az-group-show). + +```azurecli-interactive +az group show --name $RG_NAME +``` + +For more information about how Azure Resource Manager orders the deletion of resources, see [Azure Resource Manager resource group deletion](delete-resource-group.md). + +## Deploy resources + +You can deploy Azure resources by using Azure CLI, or by deploying an Azure Resource Manager (ARM) template or Bicep file. + +### Deploy resources by using storage operations + +The following example creates a storage account. The name you provide for the storage account must be unique across Azure. + +```azurecli-interactive +az storage account create --resource-group $RG_NAME --name $STORAGE_NAME --location $LOCATION --sku $STORAGE_SKU --kind StorageV2 +``` + +### Deploy resources by using an ARM template or Bicep file + +To deploy an ARM template or Bicep file, use [az deployment group create](/cli/azure/deployment/group#az-deployment-group-create). + +The following example shows a Bicep file named `storage.bicep` that we will deploy: + +```bash +cat < storage.bicep +@minLength(3) +@maxLength(11) +param storageUID string + +var uniqueStorageName = '\${storageUID}\${uniqueString(resourceGroup().id)}' + +resource uniqueStorage 'Microsoft.Storage/storageAccounts@2022-09-01' = { + name: uniqueStorageName + location: 'eastus' + sku: { + name: 'Standard_LRS' + } + kind: 'StorageV2' + properties: { + supportsHttpsTrafficOnly: true + } +} +EOF +``` + +The command to deploy this using Bicep is: + +```azurecli-interactive +az deployment group create --resource-group $RG_NAME --template-file storage.bicep --parameters storageUID=$STORAGE_UID +``` + +For more information about deploying an ARM template, see [Deploy resources with Resource Manager templates and Azure CLI](../templates/deploy-cli.md). + +For more information about deploying a Bicep file, see [Deploy resources with Bicep and Azure CLI](../bicep/deploy-cli.md). + +## Lock resource groups + +Locking prevents other users in your organization from accidentally deleting or modifying critical resources. + +To prevent a resource group and its resources from being deleted, use [az lock create](/cli/azure/lock#az-lock-create). + +```azurecli-interactive +az lock create --name $LOCK_GROUP_NAME --lock-type CanNotDelete --resource-group $RG_NAME +``` + +To get the locks for a resource group, use [az lock list](/cli/azure/lock#az-lock-list). + +```azurecli-interactive +az lock list --resource-group $RG_NAME +``` + +To delete a lock, use [az lock delete](/cli/azure/lock#az-lock-delete). + +```azurecli-interactive +az lock delete --name $LOCK_GROUP_NAME --resource-group $RG_NAME +``` + +For more information, see [Lock resources with Azure Resource Manager](lock-resources.md). + +## Tag resource groups + +You can apply tags to resource groups and resources to logically organize your assets. For information, see [Using tags to organize your Azure resources](tag-resources-cli.md). + +## Export resource groups to templates + +To assist with creating ARM templates, you can export a template from existing resources. For more information, see [Use Azure CLI to export a template](../templates/export-template-cli.md). + +## Manage access to resource groups + +To manage access to a resource group, use [Azure role-based access control (Azure RBAC)](../../role-based-access-control/overview.md). For more information, see [Add or remove Azure role assignments using Azure CLI](../../role-based-access-control/role-assignments-cli.md). + +## Delete resource groups + +To delete a resource group, use [az group delete](/cli/azure/group#az-group-delete). + +```azurecli-interactive +az group delete --name $RG_NAME --yes --no-wait +``` + +## Next steps + +* To learn Azure Resource Manager, see [Azure Resource Manager overview](overview.md). +* To learn the Resource Manager template syntax, see [Understand the structure and syntax of Azure Resource Manager templates](../templates/syntax.md). \ No newline at end of file diff --git a/examples/Common/Prerequisite-AzureCLIAndSub.md b/examples/Common/Prerequisite-AzureCLIAndSub.md index 714a3fb2..6a5872d6 100644 --- a/examples/Common/Prerequisite-AzureCLIAndSub.md +++ b/examples/Common/Prerequisite-AzureCLIAndSub.md @@ -25,6 +25,7 @@ For more details on installing the CLI see [How to install the Azure CLI](/cli/a You need to be logged in to an active Azure subscription is required. If you don't have an Azure subscription, you can [create a free account](https://azure.microsoft.com/free/). + ```bash if ! az account show > /dev/null 2>&1; then echo "Please login to Azure CLI using 'az login' before running this script." @@ -37,4 +38,33 @@ fi ```text Currently logged in to Azure CLI. Using subscription ID: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxxx +``` + +Once logged in we need to ensure that there is an active refresh token. + +```bash +if ! az account get-access-token > /dev/null 2>&1; then + echo "Azure CLI session has expired. Please login with 'az login --use-device-code' and try again." +else + echo "Azure CLI session is active." +fi +``` + + +```text +Azure CLI session is active. +``` + +### Azure Tenant ID + +Retrieve the tenant ID associated with the active Azure subscription and store it in an environment variable called `TENANT_ID`. + +```bash +export TENANT_ID=$(az account show --query tenantId -o tsv) +echo "Tenant ID: $TENANT_ID" +``` + + +```text +Tenant ID: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxxx ``` \ No newline at end of file From ccb73f7d9e02d64f75649305989697e29a9a0599 Mon Sep 17 00:00:00 2001 From: rgardler-msft Date: Mon, 3 Feb 2025 18:00:10 -0800 Subject: [PATCH 08/31] Terraform is a prerequisite --- examples/AKS/getting-started-with-k8s-on-aks.md | 1 + 1 file changed, 1 insertion(+) diff --git a/examples/AKS/getting-started-with-k8s-on-aks.md b/examples/AKS/getting-started-with-k8s-on-aks.md index b07398fc..807aab02 100644 --- a/examples/AKS/getting-started-with-k8s-on-aks.md +++ b/examples/AKS/getting-started-with-k8s-on-aks.md @@ -39,6 +39,7 @@ The objectives of this workshop are to: The following prerequisites are required to complete this workshop: - [Azure Subscription and Azure CLI](../Common/Prerequisite-AzureCLIAndSub.md) +- [Terraform](../Common/Prerequisites-Terraform.md) - [Visual Studio Code](https://code.visualstudio.com/) - [Docker Desktop](https://www.docker.com/products/docker-desktop/) - [Git](https://git-scm.com/) From 4d7315363500e0bbc7f480b5b945148de5f5a20d Mon Sep 17 00:00:00 2001 From: rgardler-msft Date: Mon, 3 Feb 2025 18:19:55 -0800 Subject: [PATCH 09/31] Add prereq for installing Terraform - WIP still needs to check if already installed --- examples/Common/Prerequisites-Terraform.md | 44 ++++++++++++++++++++++ 1 file changed, 44 insertions(+) diff --git a/examples/Common/Prerequisites-Terraform.md b/examples/Common/Prerequisites-Terraform.md index e69de29b..2d4c58ed 100644 --- a/examples/Common/Prerequisites-Terraform.md +++ b/examples/Common/Prerequisites-Terraform.md @@ -0,0 +1,44 @@ +# Prerequisites: Installing Terraform on Linux + +This document provides instructions to download and install Terraform on a Linux system using a bash script. + +## Install Terraform + +Define the version of Terraform to install + +```bash +export TERRAFORM_VERSION="1.10.5" +``` + +Download Terraform + +```bash +curl -O https://releases.hashicorp.com/terraform/${TERRAFORM_VERSION}/terraform_${TERRAFORM_VERSION}_linux_amd64.zip +``` + +Unzip the downloaded file + +```bash +unzip terraform_${TERRAFORM_VERSION}_linux_amd64.zip +``` + +Move the Terraform binary to /usr/local/bin + +```bash +mkdir -p ~/bin +mv terraform ~/bin/ +``` + +Verify the installation + +```bash +terraform -v +``` + +Cleanup + +```bash +rm terraform_${TERRAFORM_VERSION}_linux_amd64.zip +rm LICENSE +``` + From 59535cfd0473921b4585d01f375a28a979e024e8 Mon Sep 17 00:00:00 2001 From: rgardler-msft Date: Tue, 4 Feb 2025 14:18:43 -0800 Subject: [PATCH 10/31] Need to execute prereqs first. not last. --- internal/parsers/markdown.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/internal/parsers/markdown.go b/internal/parsers/markdown.go index a6a169ed..8593ecca 100644 --- a/internal/parsers/markdown.go +++ b/internal/parsers/markdown.go @@ -253,7 +253,9 @@ func ExtractPrerequisiteUrlsFromAst(node ast.Node, source []byte) ([]string, err case *ast.Link: if inPrerequisitesSection { url := string(n.Destination) - urls = append(urls, url) + if strings.HasSuffix(url, ".md") { + urls = append(urls, url) + } } } } @@ -262,6 +264,8 @@ func ExtractPrerequisiteUrlsFromAst(node ast.Node, source []byte) ([]string, err if len(urls) == 0 { return nil, fmt.Errorf("no URLs found in the Prerequisites section") + } else { + logging.GlobalLogger.Debugf("Found %d URLs in the Prerequisites section", len(urls)) } return urls, nil From 4f93959597db25668b7e224750de06d8d5fb87f8 Mon Sep 17 00:00:00 2001 From: rgardler-msft Date: Tue, 4 Feb 2025 17:47:19 -0800 Subject: [PATCH 11/31] Enable code blocks before pre-requisites. This allos us to set environment variables before executing the code block --- internal/engine/common/scenario.go | 29 +++++++++++++++++++++++++---- 1 file changed, 25 insertions(+), 4 deletions(-) diff --git a/internal/engine/common/scenario.go b/internal/engine/common/scenario.go index c1f33f72..4a963dd8 100644 --- a/internal/engine/common/scenario.go +++ b/internal/engine/common/scenario.go @@ -121,7 +121,7 @@ func CreateScenarioFromMarkdown( } } - // Convert the markdonw into an AST and extract the scenario variables. + // Convert the markdown into an AST and extract the scenario variables. markdown := parsers.ParseMarkdownIntoAst(source) properties := parsers.ExtractYamlMetadataFromAst(markdown) scenarioVariables := parsers.ExtractScenarioVariablesFromAst(markdown, source) @@ -134,11 +134,12 @@ func CreateScenarioFromMarkdown( logging.GlobalLogger.WithField("CodeBlocks", codeBlocks). Debugf("Found %d code blocks", len(codeBlocks)) - // Extract the URLs of any prerequisite documents from the markdown file. + // Extract the URLs of any prerequisite documents linked from the markdown file. + // TODO: This is a bit of a hack. Should be refactored to remove duplication. Use recursion. prerequisiteUrls, err := parsers.ExtractPrerequisiteUrlsFromAst(markdown, source) if err == nil && len(prerequisiteUrls) > 0 { for _, url := range prerequisiteUrls { - logging.GlobalLogger.Infof("Prerequisite: %s", url) + logging.GlobalLogger.Infof("Executing prerequisite: %s", url) if !strings.HasPrefix(url, "http://") && !strings.HasPrefix(url, "https://") { url = filepath.Join(filepath.Dir(path), url) } @@ -159,8 +160,28 @@ func CreateScenarioFromMarkdown( } prerequisiteCodeBlocks := parsers.ExtractCodeBlocksFromAst(prerequisiteMarkdown, prerequisiteSource, languagesToExecute) - codeBlocks = append(codeBlocks, prerequisiteCodeBlocks...) + + // Split existing codeBlocks into before and after prerequisites + var beforePrerequisites, afterPrerequisites []parsers.CodeBlock + prerequisitesFound := false + + for _, block := range codeBlocks { + if block.Header == "Prerequisites" { + prerequisitesFound = true + } + if prerequisitesFound { + afterPrerequisites = append(afterPrerequisites, block) + } else { + beforePrerequisites = append(beforePrerequisites, block) + } + } + + // recombine all codeblocks in the correct order of execution + codeBlocks = append(beforePrerequisites, prerequisiteCodeBlocks...) + codeBlocks = append(codeBlocks, afterPrerequisites...) } + } else { + logging.GlobalLogger.Warn(err) } varsToExport := lib.CopyMap(environmentVariableOverrides) From 886ecc0242d59216e31c50f84d35891e82d45b57 Mon Sep 17 00:00:00 2001 From: rgardler-msft Date: Wed, 5 Feb 2025 15:17:13 -0800 Subject: [PATCH 12/31] Standardize on naming. Improve terraform installation. --- .../Common/Prerequisite-AzureCLIAndSub.md | 2 +- examples/Common/Prerequisites-Terraform.md | 49 ++++++++----------- examples/KubeRay/deploy-kuberay.md | 2 +- 3 files changed, 23 insertions(+), 30 deletions(-) diff --git a/examples/Common/Prerequisite-AzureCLIAndSub.md b/examples/Common/Prerequisite-AzureCLIAndSub.md index 6a5872d6..149daa6f 100644 --- a/examples/Common/Prerequisite-AzureCLIAndSub.md +++ b/examples/Common/Prerequisite-AzureCLIAndSub.md @@ -64,7 +64,7 @@ export TENANT_ID=$(az account show --query tenantId -o tsv) echo "Tenant ID: $TENANT_ID" ``` - + ```text Tenant ID: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxxx ``` \ No newline at end of file diff --git a/examples/Common/Prerequisites-Terraform.md b/examples/Common/Prerequisites-Terraform.md index 2d4c58ed..c9cd5246 100644 --- a/examples/Common/Prerequisites-Terraform.md +++ b/examples/Common/Prerequisites-Terraform.md @@ -10,35 +10,28 @@ Define the version of Terraform to install export TERRAFORM_VERSION="1.10.5" ``` -Download Terraform +Download, install and configure Terraform ```bash -curl -O https://releases.hashicorp.com/terraform/${TERRAFORM_VERSION}/terraform_${TERRAFORM_VERSION}_linux_amd64.zip -``` - -Unzip the downloaded file - -```bash -unzip terraform_${TERRAFORM_VERSION}_linux_amd64.zip -``` - -Move the Terraform binary to /usr/local/bin - -```bash -mkdir -p ~/bin -mv terraform ~/bin/ -``` - -Verify the installation - -```bash -terraform -v -``` - -Cleanup - -```bash -rm terraform_${TERRAFORM_VERSION}_linux_amd64.zip -rm LICENSE +if ! command -v terraform &> /dev/null +then + curl -O https://releases.hashicorp.com/terraform/${TERRAFORM_VERSION}/terraform_${TERRAFORM_VERSION}_linux_amd64.zip + + mkdir -p ~/bin + unzip -j terraform_${TERRAFORM_VERSION}_linux_amd64.zip terraform -d ~/bin + + if [[ ":$PATH:" != *":$HOME/bin:"* ]]; then + export PATH="$HOME/bin:$PATH" + echo 'export PATH="$HOME/bin:$PATH"' >> ~/.bashrc + fi + + terraform -v + + rm terraform_${TERRAFORM_VERSION}_linux_amd64.zip + + echo "Terraform has been installed" +else + echo "Terraform is already installed" +fi ``` diff --git a/examples/KubeRay/deploy-kuberay.md b/examples/KubeRay/deploy-kuberay.md index 1c6d6749..9e191c54 100644 --- a/examples/KubeRay/deploy-kuberay.md +++ b/examples/KubeRay/deploy-kuberay.md @@ -6,7 +6,7 @@ In this article, you configure and deploy a Ray cluster on Azure Kubernetes Serv ## Prerequisites * Review the [Ray cluster on AKS overview](./ray-overview.md) to understand the components and deployment process. -* Have an [active Azure Subscription (free subscriptions available) and an install of Azure CLI](../Common/Prerequisites-AzureCLIAndSub.md) +* Have an [active Azure Subscription (free subscriptions available) and an install of Azure CLI](../Common/Prerequisite-AzureCLIAndSub.md) * Install [Draft for AKS](../Common/Prerequisites-DraftForAKS.md) - a tool to help containerize applications. TODO: Is Draft really needed - not sure it is since I ran some tests sucessfully without it. * Install [Helm](Prerequisites-Helm.md) - package manager for Kubernetes. * Install [Terraform](Prerequisites-Terraform.md) - Infrastructure as Code management tool From 7d2f63de6b64239d2c971d69b0728b0452bf1d44 Mon Sep 17 00:00:00 2001 From: rgardler-msft Date: Wed, 5 Feb 2025 17:01:26 -0800 Subject: [PATCH 13/31] A few notes on using copilot to author Exec Docs --- docs/Authoring-With-Copilot.md | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/docs/Authoring-With-Copilot.md b/docs/Authoring-With-Copilot.md index 2fcd3896..9ad5db56 100644 --- a/docs/Authoring-With-Copilot.md +++ b/docs/Authoring-With-Copilot.md @@ -24,11 +24,23 @@ Add the following to the `settings.json` file that opens. # Use Copilot - +Using Copilot to author docs is easy in Visual Studio Code. + +## Initial Authoring * Create a new document * `CTRL+I` -* Type "Outline an executable document which [Objective]" -* Copilot will attempt to outline the document for your, providing heading titles and intro paragraphs +* Type "Create an executable document which [Objective]" +* Copilot will attempt to create the document for your, providing heading titles and intro paragraphs * Review the document, if any section is missing or needs adjustment position the cursor at that point, hit `CTRL-I`, give the instruction -* Work through the document creating the code blocks \ No newline at end of file +* Work through the document creating the code blocks + +## Testing + +Once you have the document in good shape and you feel it will work you can test it with Innovation Engine. + +* Hit CTRL-SHIFT-` to open a WSL terminal (Innovation Engine does not work in PowerShell) +* Type `ie test filename.md` +* The document will be executed in test mode, any failure will be reported in the terminal +* If you want Copilot assistance with errors, position the cursor in the code block where the error occurred and paste the error message +* Repeat until no errors occur \ No newline at end of file From e9efd540d8ee73713e0f6ca2201e347944b82b2c Mon Sep 17 00:00:00 2001 From: rgardler-msft Date: Wed, 5 Feb 2025 17:01:53 -0800 Subject: [PATCH 14/31] A readme to describe the purpose of the examples folder --- examples/README.md | 1 + 1 file changed, 1 insertion(+) create mode 100644 examples/README.md diff --git a/examples/README.md b/examples/README.md new file mode 100644 index 00000000..edfd8240 --- /dev/null +++ b/examples/README.md @@ -0,0 +1 @@ +This folder contains incomplete examples of documents. The content here is not intended for publication but rather serve as examples of how to achieve common goals in an Executable Document. \ No newline at end of file From 299b679e22869d8a2c13f18d093099c7be93acef Mon Sep 17 00:00:00 2001 From: rgardler-msft Date: Wed, 5 Feb 2025 17:02:59 -0800 Subject: [PATCH 15/31] Helper script to delete all resource groups matching a search automatically --- examples/Common/Delete-All-ResourceGroups.md | 39 ++++++++++++++++++++ 1 file changed, 39 insertions(+) create mode 100644 examples/Common/Delete-All-ResourceGroups.md diff --git a/examples/Common/Delete-All-ResourceGroups.md b/examples/Common/Delete-All-ResourceGroups.md new file mode 100644 index 00000000..b5b3ecba --- /dev/null +++ b/examples/Common/Delete-All-ResourceGroups.md @@ -0,0 +1,39 @@ +# Delete All Resource Groups Matching a Regular Expression + +This executable document will delete all Azure resource groups that match a regular expression provided in an environment variable. + +## Prerequisites + +- [Azure CLI installed and Logged in](Prerequisite-AzureCLIAndSub.md) + +## Setup the Environment + +Set the environment variable `RESOURCE_GROUP_REGEX` with the desired regular expression. The example here will delete all resource groups with a name that ends with `DELETE_ME` + +```bash +export RESOURCE_GROUP_REGEX=".*DELETE_ME$" +``` + +## Get All Resource Groups + +Get a list of resource groups in the current subscription. + +```bash +export resource_groups=$(az group list --query "[].name" -o tsv) +echo "Resource groups to be deleted: ${resource_groups}" +``` + +## Delete selected Resource Groups + +Loop through all resource groups, deleting those that match the regular expression. + +```bash +for rg in $resource_groups; do + if [[ $rg =~ $RESOURCE_GROUP_REGEX ]]; then + echo "Deleting resource group: $rg" + az group delete --name $rg --yes --no-wait + fi +done +``` + +This script will delete all resource groups that match the regular expression provided in the `RESOURCE_GROUP_REGEX` environment variable. \ No newline at end of file From cea604a464f21e6f5ac0d3ae4370d31cfb1fb067 Mon Sep 17 00:00:00 2001 From: rgardler-msft Date: Wed, 5 Feb 2025 17:03:28 -0800 Subject: [PATCH 16/31] Example of using Exec Docs to validate configuration before deployment --- examples/Common/Prerequisite-Validation.md | 55 ++++++++++++++++++++++ 1 file changed, 55 insertions(+) create mode 100644 examples/Common/Prerequisite-Validation.md diff --git a/examples/Common/Prerequisite-Validation.md b/examples/Common/Prerequisite-Validation.md new file mode 100644 index 00000000..be56dade --- /dev/null +++ b/examples/Common/Prerequisite-Validation.md @@ -0,0 +1,55 @@ +# VM SKU Availability + +This document provides a script to test if the selected VM SKU is available in the selected region. + +It is expected that the environment has variables that define the desired VM SKU and region. + +```bash +echo "Region selected is '$LOCATION'" +echo "VM SKU requested is '$VM_SKU'" +``` + + +```text +Region selected is 'LOCATION' +VM SKU requested is 'VM_SKU' +``` + +# Check SKU Availabiltiy + +```bash +az vm list-sizes --location $LOCATION --query "[?name=='$VM_SKU']" --output table +if [ $? -ne 0 ]; then + echo "The requested VM SKU is not available in the selected region." +else + echo "The requested VM SKU is available in the selected region." +fi +``` + + +```text +The requested VM SKU is available in the selected region. +``` + +## Check VM SKU Availability + +While it is a time consuming process testing in advance if a VM SKU is available in the selected region can reduce frustration later. + +```bash +REASON_CODE=$(az vm list-skus --location $LOCATION --query "[?name=='$VM_SKU'].restrictions[].reasonCode" --output tsv) + +if [ -z "$REASON_CODE" ]; then + echo "VM SKU ('$VM_SKU') is available." +else + echo "VM SKU ('$VM_SKU') is not available: $REASON_CODE" +fi +``` + +If the VM SKU is available this code block will output: + + +```text +VM SKU ('VM_SKU') is available +``` + +With a little imagination it would be possible to create a validation script that updates the validaton script to automatically select an alternative SKU or region if the requested SKU was nopt available. For example `az vm list-skus --location eastus --query "[?capabilities[?name=='vCPUs' && to_number(value)>=2]]" --output table` will return all the SKUs that have 2 vCPUs. \ No newline at end of file From 46aa4de3d48a640c513d8cca91b94fb1e06311ed Mon Sep 17 00:00:00 2001 From: rgardler-msft Date: Wed, 5 Feb 2025 17:03:53 -0800 Subject: [PATCH 17/31] Example of using validation --- .../Check-VM-SKU-Availability.md | 23 ------------------- examples/VM/Check-VM-SKU-Availability.md | 18 +++++++++++++++ 2 files changed, 18 insertions(+), 23 deletions(-) delete mode 100644 examples/VM-SKU-Availability/Check-VM-SKU-Availability.md create mode 100644 examples/VM/Check-VM-SKU-Availability.md diff --git a/examples/VM-SKU-Availability/Check-VM-SKU-Availability.md b/examples/VM-SKU-Availability/Check-VM-SKU-Availability.md deleted file mode 100644 index 75b5e6f0..00000000 --- a/examples/VM-SKU-Availability/Check-VM-SKU-Availability.md +++ /dev/null @@ -1,23 +0,0 @@ -Before starting to deploy a VM it is a good idea to check that availability exists in the region desired. This document explains how to do that. - -# Prerequisites - -* Have an [active Azure Subscription (free subscriptions available) and an install of Azure CLI](../Common/Prerequisites-AzureCLIAndSub.md) - -# Configure the Environment - -We use enviroment variables to simplify commands, some of them will have been set in the above prerequisites, and echoed below for convenience. The remaining ones are set with defaults: - -```bash -echo "ACTIVE_SUBSCRIPTION_ID=$ACTIVE_SUBSCRIPTION_ID" -export AZURE_LOCATION=eastus -export VM_SKU=Standard_D2_v2 -``` - -# Check Availability - -We can use the az CLI to check availability of the desired SKU in the location selected with the currently active subscription as follows: - -```bash -az vm list-skus --location $AZURE_LOCATION --subscription $ACTIVE_SUBSCRIPTION_ID --size $VM_SKU --output table -``` diff --git a/examples/VM/Check-VM-SKU-Availability.md b/examples/VM/Check-VM-SKU-Availability.md new file mode 100644 index 00000000..4e41d26e --- /dev/null +++ b/examples/VM/Check-VM-SKU-Availability.md @@ -0,0 +1,18 @@ +Before starting to deploy a VM it is a good idea to check that availability exists in the region desired. This document explains how to do that. + +## Configure the Environment + +We use enviroment variables to simplify commands, some of them will have been set in the above prerequisites, and echoed below for convenience. The remaining ones are set with defaults: + +```bash +export LOCATION=eastus +VM_SKU=Standard_D2als_v6 +# export VM_SKU=Standard_L8s # this is an invalid VM_SKU for most users deliberately selected to create a failure in validation +``` + +## Prerequisites + +The VM SKU chosen in the previous section is one that is not usually available to customers. This is deliberate so that we can demonstrate using prerequisites to valdidate the options chosen. + +* Have an [active Azure Subscription (free subscriptions available) and an install of Azure CLI](../Common/Prerequisite-AzureCLIAndSub.md) +* Have a [valid configuration](../Common/Prerequisite-Validation.md) in the selected region for the requested VM SKU size From 06cd8387607cae7ff3cba83d73c5ab716f225976 Mon Sep 17 00:00:00 2001 From: rgardler-msft Date: Tue, 18 Feb 2025 15:01:18 -0800 Subject: [PATCH 18/31] More robust testing of prereqs --- docs/Common/prerequisiteExample.md | 18 ++++++++++++++++++ docs/prerequisitesAndIncludes.md | 22 +++++++++++++++++----- 2 files changed, 35 insertions(+), 5 deletions(-) create mode 100644 docs/Common/prerequisiteExample.md diff --git a/docs/Common/prerequisiteExample.md b/docs/Common/prerequisiteExample.md new file mode 100644 index 00000000..60154895 --- /dev/null +++ b/docs/Common/prerequisiteExample.md @@ -0,0 +1,18 @@ +# Prerequisite Example + +This document is a prerequisite example that is used by the [Prerequisites and Includes](prerequisitesAndIncludes.md) document. These two documents together describe and illustrate the use of Prerequisites in Innovation Engine. + +## Environment Variables + +Lets set an environment variable. This is a good use of pre-requisites because it allows document authors to use the same environment variables across multiple documents. This reduces the opportunity for errors and reduces the content that each author needs to create. Here we will create an 8 character hash that can be used in subsequent commands to ensure each run can create unique values for IDs. + +```bash +export UNIQUE_HASH=$(head /dev/urandom | tr -dc A-Za-z0-9 | head -c 8) +``` + +Now we will echo this to the console. This will both serve to illustrate that this prerequisite has been executed but also allow the user to review the value. + +```bash +echo "Unique hash: $UNIQUE_HASH" +``` + diff --git a/docs/prerequisitesAndIncludes.md b/docs/prerequisitesAndIncludes.md index 49c5a500..ceb7b826 100644 --- a/docs/prerequisitesAndIncludes.md +++ b/docs/prerequisitesAndIncludes.md @@ -6,19 +6,30 @@ It is often useful to break down a large document into component pieces. Long an Prerequisites are documents that should be executed before the current document proceeds. They are used to ensure, for example, that the environment is correctly setup. When running in interactive mode the user is given the opportunity to run the prerequsites interactively or non-interactively. This allows the user to skip details they already understand or to step through concepts that are new to them. -This document defines a [prerequisite](prerequisiteExample.md). In fact, if you are running in Innovation Engine you will already have seen it execute. In the following sections we'll explore how that happened. We can validate it ran by ensuring that the environment variable set in that document has a vlue. +```bash +if [ -z "$UNIQUE_HASH" ]; then + echo "Unique hash has no value yet." +else + echo "It looks like your environment already has a value for Unique Hash is '$UNIQUE_HASH'. This needs to be unset in order to test prerequisites correctly. Clearing the existing value." + export UNIQUE_HASH="" +fi +``` + +This document defines a [prerequisite](Common/prerequisiteExample.md). In fact, if you are running in Innovation Engine you will already have seen it execute. In the following sections we'll explore how that happened. We can validate it ran by ensuring that the environment variable set in that document has a vlue. + +### Check Prerequisites Ran ```bash if [ -z "$UNIQUE_HASH" ]; then - echo "UNIQUE_HASH has no value. It looks like prerequisites were not run correctly." + echo "Prerequisites didn't run since UNIQUE_HASH has no value." else - echo "Unique Hash is '$UNIQUE_HASH'" + echo "Prerequisites ran, Unique Hash is '$UNIQUE_HASH'." fi ``` ```text -Unique Hash is 'abcd1234' +Prerequisites ran, Unique Hash is 'abcd1234' ``` ### Prerequisites Syntax @@ -27,6 +38,8 @@ The prerequisites section starts with a heading of `## prerequisites`. The body of this section will contain 0 or more links to a document that should be executed ahead of the current one. When viewed in a rendered form, such as a web page, the link allows the user to click through to view the document. When interpreted by Innovation Engine the document will be loaded and executed within the same context as the current document. + +```text +Value set in Environment Variables From Prerequisite. +``` + +## Naming Conventions + +In general each Enviroment Variable declared in a primary Executable Document (not a prerequisite document) should us a consistent prefix. This meakes it possible to print (to the console) all variables used by that document. This can be useful in faciliating further work with the resources created. For example, here are three variables that both use the `EV_` prefix. + +```bash +export EV_VAR_ONE=1 +export EV_VAR_TWO=2 +export EV_VAR_THREE=3 +``` + +Now we can dump all the values set in this document with the following code: + +```bash +printenv | grep '^EV_' +``` \ No newline at end of file From 47e61be79199e23659294e47ced75e0727aa7a7d Mon Sep 17 00:00:00 2001 From: rgardler-msft Date: Thu, 20 Feb 2025 09:36:01 -0800 Subject: [PATCH 20/31] Document use of ini files for environment variables --- docs/environmentVariables.ini | 2 ++ docs/environmentVariables.md | 25 +++++++++++++++++++++++++ 2 files changed, 27 insertions(+) create mode 100644 docs/environmentVariables.ini diff --git a/docs/environmentVariables.ini b/docs/environmentVariables.ini new file mode 100644 index 00000000..7f75fae8 --- /dev/null +++ b/docs/environmentVariables.ini @@ -0,0 +1,2 @@ +EV_FROM_INI="Value set in the .ini file." +PATH="This value will be ignored as PATH is already set in the environment." \ No newline at end of file diff --git a/docs/environmentVariables.md b/docs/environmentVariables.md index ee0cb4d2..c5537716 100644 --- a/docs/environmentVariables.md +++ b/docs/environmentVariables.md @@ -35,4 +35,29 @@ Now we can dump all the values set in this document with the following code: ```bash printenv | grep '^EV_' +``` + +## Initialization Files + +It is possible to pass in values for these variables using a `.ini` file. This should have the same name as the executable doc. For example, this document is called `environmentVariables.md` and has an associates `environmentVariables.ini` file which defines the variable `EV_FROM_INI` variable: + +```bash +export EV_FROM_INI="Value set in the .ini file." +echo $EV_FROM_INI +``` + + +```text +Value set in the .ini file. +``` + +However, if the environment variable is already set in the environment then the `.ini` values will not override that setting. For example, `PATH` is normally set on any machine. The `.ini` file for this contains a definition for `PATH` but it will not be used as `PATH` is already set in the environment. That is, your existing environment settings take precedent over the `.ini` file. + +```bash +echo $PATH +``` + + +```text +/home/username/.local/bin... ``` \ No newline at end of file From 3186516ff0c88f732c78d452c088b2f741035f82 Mon Sep 17 00:00:00 2001 From: rgardler-msft Date: Thu, 20 Feb 2025 16:52:59 -0800 Subject: [PATCH 21/31] Add debug launch configruations --- .vscode/launch.json | 49 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 49 insertions(+) create mode 100644 .vscode/launch.json diff --git a/.vscode/launch.json b/.vscode/launch.json new file mode 100644 index 00000000..7164fa96 --- /dev/null +++ b/.vscode/launch.json @@ -0,0 +1,49 @@ +{ + // Use IntelliSense to learn about possible attributes. + // Hover to view descriptions of existing attributes. + // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 + "version": "0.2.0", + "configurations": [ + { + "name": "Debug IE", + "type": "go", + "request": "launch", + "mode": "debug", + "program": "${workspaceRoot}/cmd/ie/ie.go", + "cwd": "${workspaceRoot}", + + // Test scenarios + //"args": ["test", "${workspaceRoot}/docs/helloWorldDemo.md"], + //"args": ["test", "${workspaceRoot}/docs/prerequisitesAndIncludes.md"], + "args": ["test", "${workspaceRoot}/docs/environmentVariables.md"], + //"args": ["test", "${workspaceRoot}/docs/prerequisitesAndIncludes.md"], + "showLog": true, + "console": "integratedTerminal" + }, + + { + "name": "Debug EG", + "type": "go", + "request": "launch", + "mode": "debug", + "program": "${workspaceRoot}/cmd/ie/ie.go", + "cwd": "${workspaceRoot}/examples/eg", + "args": ["test", "${workspaceRoot}/examples/eg/README.md"], + "showLog": true, + "console": "integratedTerminal" + }, + + { + "name": "Debug WIP Document", + "type": "go", + "request": "launch", + "mode": "debug", + "program": "${workspaceRoot}/cmd/ie/ie.go", + "cwd": "${workspaceRoot}", + "args": ["test", "${workspaceRoot}/examples/AZD/aks-store-demo/aks-store-demo.md"], + //"args": ["test", "${workspaceRoot}/examples/AKS-Automatic/quick-kubernetes-automatic-deploy.md"], + "showLog": true, + "console": "integratedTerminal" + }, + ] +} \ No newline at end of file From 666affada3dff721a80f05995d2e9935be108204 Mon Sep 17 00:00:00 2001 From: rgardler-msft Date: Thu, 20 Feb 2025 16:54:12 -0800 Subject: [PATCH 22/31] Some fixes for prereqs in test --- docs/prerequisiteExample.md | 18 - examples/AKS-Automatic/CreateLinuxCluster.md | 452 ------------------ examples/Common/Prerequisite-AzureCLI-AKS.md | 8 + .../Common/Prerequisite-AzureCLIAndSub.md | 6 +- .../Prerequisite-aks-preview-cli-extension.md | 22 + internal/engine/common/scenario.go | 9 +- 6 files changed, 36 insertions(+), 479 deletions(-) delete mode 100644 docs/prerequisiteExample.md delete mode 100644 examples/AKS-Automatic/CreateLinuxCluster.md create mode 100644 examples/Common/Prerequisite-AzureCLI-AKS.md create mode 100644 examples/Common/Prerequisite-aks-preview-cli-extension.md diff --git a/docs/prerequisiteExample.md b/docs/prerequisiteExample.md deleted file mode 100644 index 60154895..00000000 --- a/docs/prerequisiteExample.md +++ /dev/null @@ -1,18 +0,0 @@ -# Prerequisite Example - -This document is a prerequisite example that is used by the [Prerequisites and Includes](prerequisitesAndIncludes.md) document. These two documents together describe and illustrate the use of Prerequisites in Innovation Engine. - -## Environment Variables - -Lets set an environment variable. This is a good use of pre-requisites because it allows document authors to use the same environment variables across multiple documents. This reduces the opportunity for errors and reduces the content that each author needs to create. Here we will create an 8 character hash that can be used in subsequent commands to ensure each run can create unique values for IDs. - -```bash -export UNIQUE_HASH=$(head /dev/urandom | tr -dc A-Za-z0-9 | head -c 8) -``` - -Now we will echo this to the console. This will both serve to illustrate that this prerequisite has been executed but also allow the user to review the value. - -```bash -echo "Unique hash: $UNIQUE_HASH" -``` - diff --git a/examples/AKS-Automatic/CreateLinuxCluster.md b/examples/AKS-Automatic/CreateLinuxCluster.md deleted file mode 100644 index 5512d3f2..00000000 --- a/examples/AKS-Automatic/CreateLinuxCluster.md +++ /dev/null @@ -1,452 +0,0 @@ -Note that this document is an Executable Docs version of https://learn.microsoft.com/en-us/azure/aks/learn/quick-kubernetes-automatic-deploy?pivots=azure-cli taken as a snapshot on 1/17/25. For the latest version please visit the published document. - ---- -title: 'Quickstart: Deploy an Azure Kubernetes Service (AKS) Automatic cluster (preview)' -description: Learn how to quickly deploy a Kubernetes cluster and deploy an application in Azure Kubernetes Service (AKS) Automatic (preview). -ms.topic: quickstart -ms.custom: build-2024, devx-track-azurecli, devx-track-bicep, ignite-2024 -ms.date: 05/21/2024 -author: sabbour -ms.author: asabbour -zone_pivot_groups: bicep-azure-cli-portal ---- - -# Quickstart: Deploy an Azure Kubernetes Service (AKS) Automatic cluster (preview) - -**Applies to:** :heavy_check_mark: AKS Automatic (preview) - -[Azure Kubernetes Service (AKS) Automatic (preview)][what-is-aks-automatic] provides the easiest managed Kubernetes experience for developers, DevOps engineers, and platform engineers. Ideal for modern and AI applications, AKS Automatic automates AKS cluster setup and operations and embeds best practice configurations. Users of any skill level can benefit from the security, performance, and dependability of AKS Automatic for their applications. - -In this quickstart, you learn to: - -- Deploy an AKS Automatic cluster. -- Run a sample multi-container application with a group of microservices and web front ends simulating a retail scenario. - - -## Before you begin - -This quickstart assumes a basic understanding of Kubernetes concepts. For more information, see [Kubernetes core concepts for Azure Kubernetes Service (AKS)][kubernetes-concepts]. - -[!INCLUDE [azure-cli-prepare-your-environment-no-header.md](~/reusable-content/azure-cli/azure-cli-prepare-your-environment-no-header.md)] - -- This article requires version 2.57.0 or later of the Azure CLI. If you're using Azure Cloud Shell, the latest version is already installed there. -- This article requires the `aks-preview` Azure CLI extension version **9.0.0b4** or later. -- If you have multiple Azure subscriptions, select the appropriate subscription ID in which the resources should be billed using the [az account set](/cli/azure/account#az-account-set) command. -- Register the `AutomaticSKUPreview` feature in your Azure subscription. -- The identity creating the cluster should also have the [following permissions on the resource group][Azure-Policy-RBAC-permissions]: - - `Microsoft.Authorization/policyAssignments/write` - - `Microsoft.Authorization/policyAssignments/read` -- AKS Automatic clusters require deployment in Azure regions that support at least 3 [availability zones][availability-zones]. -:::zone target="docs" pivot="bicep" -- To deploy a Bicep file, you need to write access on the resources you create and access to all operations on the `Microsoft.Resources/deployments` resource type. For example, to create a virtual machine, you need `Microsoft.Compute/virtualMachines/write` and `Microsoft.Resources/deployments/*` permissions. For a list of roles and permissions, see [Azure built-in roles](/azure/role-based-access-control/built-in-roles). -:::zone-end - -> [!IMPORTANT] -> AKS Automatic tries to dynamically select a virtual machine SKU for the `system` node pool based on the capacity available in the subscription. Make sure your subscription has quota for 16 vCPUs of any of the following SKUs in the region you're deploying the cluster to: [Standard_D4pds_v5](/azure/virtual-machines/sizes/general-purpose/dpsv5-series), [Standard_D4lds_v5](/azure/virtual-machines/sizes/general-purpose/dldsv5-series), [Standard_D4ads_v5](/azure/virtual-machines/sizes/general-purpose/dadsv5-series), [Standard_D4ds_v5](/azure/virtual-machines/sizes/general-purpose/ddsv5-series), [Standard_D4d_v5](/azure/virtual-machines/sizes/general-purpose/ddv5-series), [Standard_D4d_v4](/azure/virtual-machines/sizes/general-purpose/ddv4-series), [Standard_DS3_v2](/azure/virtual-machines/sizes/general-purpose/dsv3-series), [Standard_DS12_v2](/azure/virtual-machines/sizes/memory-optimized/dv2-dsv2-series-memory). You can [view quotas for specific VM-families and submit quota increase requests](/azure/quotas/per-vm-quota-requests) through the Azure portal. - -### Install the aks-preview Azure CLI extension - -[!INCLUDE [preview features callout](~/reusable-content/ce-skilling/azure/includes/aks/includes/preview/preview-callout.md)] - -To install the aks-preview extension, run the following command: - -```azurecli -az extension add --name aks-preview -``` - -Run the following command to update to the latest version of the extension released: - -```azurecli -az extension update --name aks-preview -``` - -### Register the feature flags - -To use AKS Automatic in preview, register the following flag using the [az feature register][az-feature-register] command. - -```azurecli-interactive -az feature register --namespace Microsoft.ContainerService --name AutomaticSKUPreview -``` - -Verify the registration status by using the [az feature show][az-feature-show] command. It takes a few minutes for the status to show *Registered*: - -```azurecli-interactive -az feature show --namespace Microsoft.ContainerService --name AutomaticSKUPreview -``` - -When the status reflects *Registered*, refresh the registration of the *Microsoft.ContainerService* resource provider by using the [az provider register][az-provider-register] command: - -```azurecli-interactive -az provider register --namespace Microsoft.ContainerService -``` - -:::zone target="docs" pivot="azure-cli" - -## Create a resource group - -An [Azure resource group][azure-resource-group] is a logical group in which Azure resources are deployed and managed. - -The following example creates a resource group named *myResourceGroup* in the *eastus* location. - -Create a resource group using the [az group create][az-group-create] command. - -```azurecli -az group create --name myResourceGroup --location eastus -``` - -The following sample output resembles successful creation of the resource group: - -```output -{ - "id": "/subscriptions//resourceGroups/myResourceGroup", - "location": "eastus", - "managedBy": null, - "name": "myResourceGroup", - "properties": { - "provisioningState": "Succeeded" - }, - "tags": null -} -``` - -## Create an AKS Automatic cluster - -To create an AKS Automatic cluster, use the [az aks create][az-aks-create] command. The following example creates a cluster named *myAKSAutomaticCluster* with Managed Prometheus and Container Insights integration enabled. - -```azurecli -az aks create \ - --resource-group myResourceGroup \ - --name myAKSAutomaticCluster \ - --sku automatic -``` - -After a few minutes, the command completes and returns JSON-formatted information about the cluster. - -## Connect to the cluster - -To manage a Kubernetes cluster, use the Kubernetes command-line client, [kubectl][kubectl]. `kubectl` is already installed if you use Azure Cloud Shell. To install `kubectl` locally, run the [az aks install-cli][az-aks-install-cli] command. AKS Automatic clusters are configured with [Microsoft Entra ID for Kubernetes role-based access control (RBAC)][aks-entra-rbac]. When you create a cluster using the Azure CLI, your user is [assigned built-in roles][aks-entra-rbac-builtin-roles] for `Azure Kubernetes Service RBAC Cluster Admin`. - -Configure `kubectl` to connect to your Kubernetes cluster using the [az aks get-credentials][az-aks-get-credentials] command. This command downloads credentials and configures the Kubernetes CLI to use them. - -```azurecli -az aks get-credentials --resource-group myResourceGroup --name myAKSAutomaticCluster -``` - -Verify the connection to your cluster using the [kubectl get][kubectl-get] command. This command returns a list of the cluster nodes. - -```bash -kubectl get nodes -``` - -The following sample output will show how you're asked to log in. - -```output -To sign in, use a web browser to open the page https://microsoft.com/devicelogin and enter the code AAAAAAAAA to authenticate. -``` - -After you log in, the following sample output shows the managed system node pools. Make sure the node status is *Ready*. - -```output -NAME STATUS ROLES AGE VERSION -aks-nodepool1-13213685-vmss000000 Ready agent 2m26s v1.28.5 -aks-nodepool1-13213685-vmss000001 Ready agent 2m26s v1.28.5 -aks-nodepool1-13213685-vmss000002 Ready agent 2m26s v1.28.5 -``` - -:::zone-end - -:::zone target="docs" pivot="azure-portal" - -## Create Automatic Kubernetes Cluster - -1. To create an AKS Automatic cluster, search for **Kubernetes Services**, and select **Automatic Kubernetes cluster** from the drop-down options. - - :::image type="content" source="./media/quick-automatic-kubernetes-portal/browse-dropdown-options.png" alt-text="The screenshot of the entry point for creating an AKS Automatic cluster in the Azure portal."::: - -2. On the **Basics** tab, fill in all the mandatory fields required to get started: -Subscription, Resource Group, Cluster name, and Region - - :::image type="content" source="./media/quick-automatic-kubernetes-portal/create-basics.png" alt-text="The screenshot of the Create - Basics Tab for an AKS Automatic cluster in the Azure portal."::: - - If the prerequisites aren't met and the subscription requires registration of the preview flags, there will be an error shown under the Subscription field: - - :::image type="content" source="./media/quick-automatic-kubernetes-portal/register.png" alt-text="The screenshot of the error shown when a subscription doesn't have preview flags registered while creating an AKS Automatic cluster in the Azure portal."::: - - -3. On the **Monitoring** tab, choose your monitoring configurations from Azure Monitor, Managed Prometheus, Managed Grafana, and/or configure alerts. Add tags (optional), and proceed to create the cluster. - - :::image type="content" source="./media/quick-automatic-kubernetes-portal/create-monitoring.png" alt-text="The screenshot of the Monitoring Tab while creating an AKS Automatic cluster in the Azure portal."::: - -3. Get started with configuring your first application from GitHub and set up an automated deployment pipeline. - - :::image type="content" source="./media/quick-automatic-kubernetes-portal/automatic-overview.png" alt-text="The screenshot of the Get Started Tab on Overview Blade after creating an AKS Automatic cluster in the Azure portal."::: - - -## Connect to the cluster - -To manage a Kubernetes cluster, use the Kubernetes command-line client, [kubectl][kubectl]. `kubectl` is already installed if you use Azure Cloud Shell. To install `kubectl` locally, run the [az aks install-cli][az-aks-install-cli] command. AKS Automatic clusters are configured with [Microsoft Entra ID for Kubernetes role-based access control (RBAC)][aks-entra-rbac]. When you create a cluster using the Azure portal, your user is [assigned built-in roles][aks-entra-rbac-builtin-roles] for `Azure Kubernetes Service RBAC Cluster Admin`. - -Configure `kubectl` to connect to your Kubernetes cluster using the [az aks get-credentials][az-aks-get-credentials] command. This command downloads credentials and configures the Kubernetes CLI to use them. - -```azurecli -az aks get-credentials --resource-group myResourceGroup --name myAKSAutomaticCluster -``` - -Verify the connection to your cluster using the [kubectl get][kubectl-get] command. This command returns a list of the cluster nodes. - -```bash -kubectl get nodes -``` - -The following sample output will show how you're asked to log in. - -```output -To sign in, use a web browser to open the page https://microsoft.com/devicelogin and enter the code AAAAAAAAA to authenticate. -``` - -After you log in, the following sample output shows the managed system node pools. Make sure the node status is *Ready*. - -```output -NAME STATUS ROLES AGE VERSION -aks-nodepool1-13213685-vmss000000 Ready agent 2m26s v1.28.5 -aks-nodepool1-13213685-vmss000001 Ready agent 2m26s v1.28.5 -aks-nodepool1-13213685-vmss000002 Ready agent 2m26s v1.28.5 -``` -:::zone-end - -:::zone target="docs" pivot="bicep" - -## Create a resource group - -An [Azure resource group][azure-resource-group] is a logical group in which Azure resources are deployed and managed. When you create a resource group, you're prompted to specify a location. This location is the storage location of your resource group metadata and where your resources run in Azure if you don't specify another region during resource creation. - -The following example creates a resource group named *myResourceGroup* in the *eastus* location. - -Create a resource group using the [az group create][az-group-create] command. - -```azurecli -az group create --name myResourceGroup --location eastus -``` - -The following sample output resembles successful creation of the resource group: - -```output -{ - "id": "/subscriptions//resourceGroups/myResourceGroup", - "location": "eastus", - "managedBy": null, - "name": "myResourceGroup", - "properties": { - "provisioningState": "Succeeded" - }, - "tags": null -} -``` - -## Review the Bicep file - -This Bicep file defines an AKS Automatic cluster. While in preview, you need to specify the *system nodepool* agent pool profile. - -```bicep -@description('The name of the managed cluster resource.') -param clusterName string = 'myAKSAutomaticCluster' - -@description('The location of the managed cluster resource.') -param location string = resourceGroup().location - -resource aks 'Microsoft.ContainerService/managedClusters@2024-03-02-preview' = { - name: clusterName - location: location - sku: { - name: 'Automatic' - } - properties: { - agentPoolProfiles: [ - { - name: 'systempool' - mode: 'System' - count: 3 - } - ] - } - identity: { - type: 'SystemAssigned' - } -} -``` - -For more information about the resource defined in the Bicep file, see the [**Microsoft.ContainerService/managedClusters**](/azure/templates/microsoft.containerservice/managedclusters?tabs=bicep&pivots=deployment-language-bicep) reference. - -## Deploy the Bicep file - -1. Save the Bicep file as **main.bicep** to your local computer. - - > [!IMPORTANT] - > The Bicep file sets the `clusterName` param to the string *myAKSAutomaticCluster*. If you want to use a different cluster name, make sure to update the string to your preferred cluster name before saving the file to your computer. - -1. Deploy the Bicep file using the Azure CLI. - - ```azurecli - az deployment group create --resource-group myResourceGroup --template-file main.bicep - ``` - - It takes a few minutes to create the AKS cluster. Wait for the cluster to be successfully deployed before you move on to the next step. - -## Connect to the cluster - -To manage a Kubernetes cluster, use the Kubernetes command-line client, [kubectl][kubectl]. `kubectl` is already installed if you use Azure Cloud Shell. To install `kubectl` locally, run the [az aks install-cli][az-aks-install-cli] command. AKS Automatic clusters are configured with [Microsoft Entra ID for Kubernetes role-based access control (RBAC)][aks-entra-rbac]. When you create a cluster using Bicep, you need to [assign one of the built-in roles][aks-entra-rbac-builtin-roles] such as `Azure Kubernetes Service RBAC Reader`, `Azure Kubernetes Service RBAC Writer`, `Azure Kubernetes Service RBAC Admin`, or `Azure Kubernetes Service RBAC Cluster Admin` to your users, scoped to the cluster or a specific namespace. Also make sure your users have the `Azure Kubernetes Service Cluster User` built-in role to be able to do run `az aks get-credentials`, and then get the kubeconfig of your AKS cluster using the `az aks get-credentials` command. - -Configure `kubectl` to connect to your Kubernetes cluster using the [az aks get-credentials][az-aks-get-credentials] command. This command downloads credentials and configures the Kubernetes CLI to use them. - -```azurecli -az aks get-credentials --resource-group myResourceGroup --name -``` - -Verify the connection to your cluster using the [kubectl get][kubectl-get] command. This command returns a list of the cluster nodes. - -```bash -kubectl get nodes -``` - -The following sample output will show how you're asked to log in. - -```output -To sign in, use a web browser to open the page https://microsoft.com/devicelogin and enter the code AAAAAAAAA to authenticate. -``` - -After you log in, the following sample output shows the managed system node pools. Make sure the node status is *Ready*. - -```output -NAME STATUS ROLES AGE VERSION -aks-nodepool1-13213685-vmss000000 Ready agent 2m26s v1.28.5 -aks-nodepool1-13213685-vmss000001 Ready agent 2m26s v1.28.5 -aks-nodepool1-13213685-vmss000002 Ready agent 2m26s v1.28.5 -``` - -:::zone-end - - -## Deploy the application - -To deploy the application, you use a manifest file to create all the objects required to run the [AKS Store application](https://github.com/Azure-Samples/aks-store-demo). A [Kubernetes manifest file][kubernetes-deployment] defines a cluster's desired state, such as which container images to run. The manifest includes the following Kubernetes deployments and services: - -:::image type="content" source="media/quick-kubernetes-deploy-portal/aks-store-architecture.png" alt-text="Screenshot of Azure Store sample architecture." lightbox="media/quick-kubernetes-deploy-portal/aks-store-architecture.png"::: - -- **Store front**: Web application for customers to view products and place orders. -- **Product service**: Shows product information. -- **Order service**: Places orders. -- **Rabbit MQ**: Message queue for an order queue. - -> [!NOTE] -> We don't recommend running stateful containers, such as Rabbit MQ, without persistent storage for production. These are used here for simplicity, but we recommend using managed services, such as Azure Cosmos DB or Azure Service Bus. - -1. Create a namespace `aks-store-demo` to deploy the Kubernetes resources into. - - ```bash - kubectl create ns aks-store-demo - ``` - -1. Deploy the application using the [kubectl apply][kubectl-apply] command into the `aks-store-demo` namespace. The YAML file defining the deployment is on [GitHub](https://github.com/Azure-Samples/aks-store-demo). - - ```bash - kubectl apply -n aks-store-demo -f https://raw.githubusercontent.com/Azure-Samples/aks-store-demo/main/aks-store-ingress-quickstart.yaml - ``` - - The following sample output shows the deployments and services: - - ```output - statefulset.apps/rabbitmq created - configmap/rabbitmq-enabled-plugins created - service/rabbitmq created - deployment.apps/order-service created - service/order-service created - deployment.apps/product-service created - service/product-service created - deployment.apps/store-front created - service/store-front created - ingress/store-front created - ``` - -## Test the application - -When the application runs, a Kubernetes service exposes the application front end to the internet. This process can take a few minutes to complete. - -1. Check the status of the deployed pods using the [kubectl get pods][kubectl-get] command. Make sure all pods are `Running` before proceeding. If this is the first workload you deploy, it may take a few minutes for [node auto provisioning][node-auto-provisioning] to create a node pool to run the pods. - - ```bash - kubectl get pods -n aks-store-demo - ``` - -1. Check for a public IP address for the store-front application. Monitor progress using the [kubectl get service][kubectl-get] command with the `--watch` argument. - - ```bash - kubectl get ingress store-front -n aks-store-demo --watch - ``` - - The **ADDRESS** output for the `store-front` service initially shows empty: - - ```output - NAME CLASS HOSTS ADDRESS PORTS AGE - store-front webapprouting.kubernetes.azure.com * 80 12m - ``` - -1. Once the **ADDRESS** changes from blank to an actual public IP address, use `CTRL-C` to stop the `kubectl` watch process. - - The following sample output shows a valid public IP address assigned to the service: - - ```output - NAME CLASS HOSTS ADDRESS PORTS AGE - store-front webapprouting.kubernetes.azure.com * 4.255.22.196 80 12m - ``` - -1. Open a web browser to the external IP address of your ingress to see the Azure Store app in action. - - :::image type="content" source="media/quick-kubernetes-deploy-cli/aks-store-application.png" alt-text="Screenshot of AKS Store sample application." lightbox="media/quick-kubernetes-deploy-cli/aks-store-application.png"::: - -## Delete the cluster - -If you don't plan on going through the [AKS tutorial][aks-tutorial], clean up unnecessary resources to avoid Azure charges. Run the [az group delete][az-group-delete] command to remove the resource group, container service, and all related resources. - - ```azurecli - az group delete --name myResourceGroup --yes --no-wait - ``` - > [!NOTE] - > The AKS cluster was created with a system-assigned managed identity, which is the default identity option used in this quickstart. The platform manages this identity, so you don't need to manually remove it. - -## Next steps - -In this quickstart, you deployed a Kubernetes cluster using [AKS Automatic][what-is-aks-automatic] and then deployed a simple multi-container application to it. This sample application is for demo purposes only and doesn't represent all the best practices for Kubernetes applications. For guidance on creating full solutions with AKS for production, see [AKS solution guidance][aks-solution-guidance]. - -To learn more about AKS Automatic, continue to the introduction. - -> [!div class="nextstepaction"] -> [Introduction to Azure Kubernetes Service (AKS) Automatic (preview)][what-is-aks-automatic] - - - -[kubectl]: https://kubernetes.io/docs/reference/kubectl/ -[kubectl-apply]: https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands#apply -[kubectl-get]: https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands#get - - -[kubernetes-concepts]: ../concepts-clusters-workloads.md -[aks-tutorial]: ../tutorial-kubernetes-prepare-app.md -[azure-resource-group]: /azure/azure-resource-manager/management/overview -[az-aks-create]: /cli/azure/aks#az-aks-create -[az-aks-get-credentials]: /cli/azure/aks#az-aks-get-credentials -[az-aks-install-cli]: /cli/azure/aks#az-aks-install-cli -[az-group-create]: /cli/azure/group#az-group-create -[az-group-delete]: /cli/azure/group#az-group-delete -[node-auto-provisioning]: ../node-autoprovision.md -[kubernetes-deployment]: ../concepts-clusters-workloads.md#deployments-and-yaml-manifests -[aks-solution-guidance]: /azure/architecture/reference-architectures/containers/aks-start-here?toc=/azure/aks/toc.json&bc=/azure/aks/breadcrumb/toc.json -[baseline-reference-architecture]: /azure/architecture/reference-architectures/containers/aks/baseline-aks?toc=/azure/aks/toc.json&bc=/azure/aks/breadcrumb/toc.json -[az-feature-register]: /cli/azure/feature#az_feature_register -[az-feature-show]: /cli/azure/feature#az_feature_show -[az-provider-register]: /cli/azure/provider#az_provider_register -[what-is-aks-automatic]: ../intro-aks-automatic.md -[Azure-Policy-RBAC-permissions]: /azure/governance/policy/overview#azure-rbac-permissions-in-azure-policy -[aks-entra-rbac]: /azure/aks/manage-azure-rbac -[aks-entra-rbac-builtin-roles]: /azure/aks/manage-azure-rbac#create-role-assignments-for-users-to-access-the-cluster -[availability-zones]: /azure/reliability/availability-zones-region-support - diff --git a/examples/Common/Prerequisite-AzureCLI-AKS.md b/examples/Common/Prerequisite-AzureCLI-AKS.md new file mode 100644 index 00000000..e42d52b8 --- /dev/null +++ b/examples/Common/Prerequisite-AzureCLI-AKS.md @@ -0,0 +1,8 @@ +### Check kubelogin and install if not exists + +```bash +if ! command -v kubelogin &> /dev/null; then + echo "kubelogin could not be found. Installing kubelogin..." + az aks install-cli +fi +``` diff --git a/examples/Common/Prerequisite-AzureCLIAndSub.md b/examples/Common/Prerequisite-AzureCLIAndSub.md index 149daa6f..26aafb2f 100644 --- a/examples/Common/Prerequisite-AzureCLIAndSub.md +++ b/examples/Common/Prerequisite-AzureCLIAndSub.md @@ -1,4 +1,4 @@ -This document uses the Azure CLI connected to an active Azure Subscription. The following commands ensure that you have both an active subscription and a current version of the Azure CLI. +This document uses the Azure CLI connected to an active Azure Subscription. The following commands ensure that you have both an active subscription and a current version of the Azure CLI. Assuming you are logged in and have executed these commands the environment variable `ACTIVE_SUBSCRIPTION_ID` will contain the currently active subscription ID. ### Azure CLI @@ -9,9 +9,9 @@ if ! command -v az &> /dev/null then echo "Azure CLI could not be found, installing..." curl -sL https://aka.ms/InstallAzureCLIDeb | sudo bash +else + echo "Azure CLI is installed." fi - -echo "Azure CLI is installed." ``` diff --git a/examples/Common/Prerequisite-aks-preview-cli-extension.md b/examples/Common/Prerequisite-aks-preview-cli-extension.md new file mode 100644 index 00000000..00549358 --- /dev/null +++ b/examples/Common/Prerequisite-aks-preview-cli-extension.md @@ -0,0 +1,22 @@ +### Install or Update AKS Preview CLI Extension + +[!INCLUDE [preview features callout](~/reusable-content/ce-skilling/azure/includes/aks/includes/preview/preview-callout.md)] + +To install the aks-preview extension, run the following command: + +```bash +AKS_PREVIWS_VERSION=${az extension list --query "[?name=='aks-preview'].version" --output tsv} + +if [ -z "$AKS_PREVIWS_VERSION" ]; then + az extension add --name aks-preview +else + az extension update --name aks-preview +fi + +echo "Latest AKS Preview CLI extension installed." +``` + + +```text +Latest AKS Preview CLI extension installed.yo +``` diff --git a/internal/engine/common/scenario.go b/internal/engine/common/scenario.go index 4a963dd8..1ae2e6cf 100644 --- a/internal/engine/common/scenario.go +++ b/internal/engine/common/scenario.go @@ -163,16 +163,13 @@ func CreateScenarioFromMarkdown( // Split existing codeBlocks into before and after prerequisites var beforePrerequisites, afterPrerequisites []parsers.CodeBlock - prerequisitesFound := false + // TODO: Need to use prerequisite header on prereq code blocks for _, block := range codeBlocks { if block.Header == "Prerequisites" { - prerequisitesFound = true - } - if prerequisitesFound { - afterPrerequisites = append(afterPrerequisites, block) - } else { beforePrerequisites = append(beforePrerequisites, block) + } else { + afterPrerequisites = append(afterPrerequisites, block) } } From 0d25de02613bb91b8f540afd1d232faba208facf Mon Sep 17 00:00:00 2001 From: rgardler-msft Date: Thu, 20 Feb 2025 17:27:09 -0800 Subject: [PATCH 23/31] Still a WIP, but good progress so far --- .../AKS/getting-started-with-k8s-on-aks.md | 50 ++++++++++++++++--- 1 file changed, 42 insertions(+), 8 deletions(-) diff --git a/examples/AKS/getting-started-with-k8s-on-aks.md b/examples/AKS/getting-started-with-k8s-on-aks.md index 807aab02..a777edc2 100644 --- a/examples/AKS/getting-started-with-k8s-on-aks.md +++ b/examples/AKS/getting-started-with-k8s-on-aks.md @@ -76,17 +76,25 @@ providers=( for provider in "${providers[@]}"; do provider_state=$(az provider show --namespace $provider --query "registrationState" -o tsv) if [ "$provider_state" == "Registered" ]; then - echo "Provider $provider is already registered." + echo "Provider '$provider' is registered." else - echo "Provider $provider is not registered. Registering now..." + echo "Provider '$provider' is not registered. Registering now..." az provider register --namespace $provider + echo "Provider '$provider' is registered." fi done ``` - + +The output of this command should indicate that all features are registered multiple lines in the following format. + + +```text +Provider 'Microsoft.Dashboard' is registered. +``` + 4. Run the following commands to ensure you have all the necessary features registered in your subscription. - ```TODO:Removed bash marker to prevent execution as it casues errors + ```bash az feature register --namespace "Microsoft.ContainerService" --name "EnableWorkloadIdentityPreview" az feature register --namespace "Microsoft.ContainerService" --name "AKS-GitOps" az feature register --namespace "Microsoft.ContainerService" --name "AzureServiceMeshPreview" @@ -94,6 +102,20 @@ done az feature register --namespace "Microsoft.ContainerService" --name "AKS-PrometheusAddonPreview" ``` +The output from these commands should indicate that all features have been registered, one such conversation is shown below. + + +```text +{ + "id": "/subscriptions/f5b6dc3c-c79b-44ac-9850-cedcc65f0192/providers/Microsoft.Features/providers/Microsoft.ContainerService/features/AKS-KedaPreview", + "name": "Microsoft.ContainerService/AKS-KedaPreview", + "properties": { + "state": "Registered" + }, + "type": "Microsoft.Features/providers/features" +} +``` + 5. This lab uses files made available in a GitHub repo. Clone or pull the repository: ```bash @@ -168,9 +190,11 @@ An AKS cluster has been provisioned for you. Let's use the Azure CLI to download Run the following command to set variables for your resource group and AKS cluster name. Don't forget to replace `` in the command below with the username you've been assigned. + +TODO: The original content here was in the form `rg-user`. Need to resolve this in Exec Docs, for now going with a constant ```bash -RG_NAME=rg-user -AKS_NAME=aks-user +export RG_NAME=rg-user1 +export AKS_NAME=aks-user1 ``` Run the following command to download the credentials for your AKS cluster. @@ -197,7 +221,7 @@ You can see the list of clusters you have access to by running the following com kubectl config get-contexts ``` -If you have more than one context listed, you can switch between clusters by running the following command: +If you have more than one context listed, you can switch between clusters by running the `use-context` command: ```bash kubectl config use-context @@ -288,7 +312,7 @@ Let's see if our Pod is running. kubectl get pods ``` -You should see something like this: +You should see something like this, though the status may be different: ```text NAME READY STATUS RESTARTS AGE @@ -302,6 +326,16 @@ This is different from the imperative approach where we told Kubernetes to run a The `kubectl apply` command is idempotent. This means that if you run the command multiple times, the result will be the same. If the resource already exists, it will be updated. If the resource does not exist, it will be created. +When scripting we will want to wait here until the status becomes "Running", this can be done easily: + +```bash +while [[ $(kubectl get pods nginx2 -o jsonpath='{.status.phase}') != "Running" ]]; do + echo "Waiting for nginx2 pod to be in Running state..." + sleep 5 +done +echo "Nginx2 pod is now in Running state." +``` + :::info[Important] Before we move on, be sure to delete all pods so that we don't waste cluster resources. From 5311c96d43df0e29c4aabbd38f5348f048937dee Mon Sep 17 00:00:00 2001 From: rgardler-msft Date: Thu, 20 Feb 2025 17:27:45 -0800 Subject: [PATCH 24/31] WIP: need to have CLI and be logged in --- .../quick-kubernetes-automatic-deploy.md | 439 ++++++++++++++++++ 1 file changed, 439 insertions(+) create mode 100644 examples/AKS-Automatic/quick-kubernetes-automatic-deploy.md diff --git a/examples/AKS-Automatic/quick-kubernetes-automatic-deploy.md b/examples/AKS-Automatic/quick-kubernetes-automatic-deploy.md new file mode 100644 index 00000000..4a05940f --- /dev/null +++ b/examples/AKS-Automatic/quick-kubernetes-automatic-deploy.md @@ -0,0 +1,439 @@ +Note that this document is an Executable Docs version of https://learn.microsoft.com/en-us/azure/aks/learn/quick-kubernetes-automatic-deploy?pivots=azure-cli taken as a snapshot on 1/17/25. For the latest version please visit the published document. + +--- +title: 'Quickstart: Deploy an Azure Kubernetes Service (AKS) Automatic cluster (preview)' +description: Learn how to quickly deploy a Kubernetes cluster and deploy an application in Azure Kubernetes Service (AKS) Automatic (preview). +ms.topic: quickstart +ms.custom: build-2024, devx-track-azurecli, devx-track-bicep, ignite-2024 +ms.date: 05/21/2024 +author: sabbour +ms.author: asabbour +zone_pivot_groups: bicep-azure-cli-portal +--- + +# Quickstart: Deploy an Azure Kubernetes Service (AKS) Automatic cluster (preview) + +**Applies to:** :heavy_check_mark: AKS Automatic (preview) + +[Azure Kubernetes Service (AKS) Automatic (preview)][what-is-aks-automatic] provides the easiest managed Kubernetes experience for developers, DevOps engineers, and platform engineers. Ideal for modern and AI applications, AKS Automatic automates AKS cluster setup and operations and embeds best practice configurations. Users of any skill level can benefit from the security, performance, and dependability of AKS Automatic for their applications. + +In this quickstart, you learn to: + +- Deploy an AKS Automatic cluster. +- Run a sample multi-container application with a group of microservices and web front ends simulating a retail scenario. + +## Prerequisites + + + + +- An up-to-date install of [AZ CLI](../Common/Prerequisite-AzureCLIAndSub.md) logged in to an active subscription. +- [Install the aks-preview Azure CLI extension](../Common/Prerequisite-aks-preview-cli-extension.md) version **9.0.0b4** or later. +- If you have multiple Azure subscriptions, select the appropriate subscription ID in which the resources should be billed using the [az account set](/cli/azure/account#az-account-set) command. +- Register the `AutomaticSKUPreview` feature in your Azure subscription. +- The identity creating the cluster should also have the [following permissions on the resource group][Azure-Policy-RBAC-permissions]: + - `Microsoft.Authorization/policyAssignments/write` + - `Microsoft.Authorization/policyAssignments/read` +- AKS Automatic clusters require deployment in Azure regions that support at least 3 [availability zones][availability-zones]. +:::zone target="docs" pivot="bicep" +- To deploy a Bicep file, you need to write access on the resources you create and access to all operations on the `Microsoft.Resources/deployments` resource type. For example, to create a virtual machine, you need `Microsoft.Compute/virtualMachines/write` and `Microsoft.Resources/deployments/*` permissions. For a list of roles and permissions, see [Azure built-in roles](/azure/role-based-access-control/built-in-roles). +:::zone-end + +> [!IMPORTANT] +> AKS Automatic tries to dynamically select a virtual machine SKU for the `system` node pool based on the capacity available in the subscription. Make sure your subscription has quota for 16 vCPUs of any of the following SKUs in the region you're deploying the cluster to: [Standard_D4pds_v5](/azure/virtual-machines/sizes/general-purpose/dpsv5-series), [Standard_D4lds_v5](/azure/virtual-machines/sizes/general-purpose/dldsv5-series), [Standard_D4ads_v5](/azure/virtual-machines/sizes/general-purpose/dadsv5-series), [Standard_D4ds_v5](/azure/virtual-machines/sizes/general-purpose/ddsv5-series), [Standard_D4d_v5](/azure/virtual-machines/sizes/general-purpose/ddv5-series), [Standard_D4d_v4](/azure/virtual-machines/sizes/general-purpose/ddv4-series), [Standard_DS3_v2](/azure/virtual-machines/sizes/general-purpose/dsv3-series), [Standard_DS12_v2](/azure/virtual-machines/sizes/memory-optimized/dv2-dsv2-series-memory). You can [view quotas for specific VM-families and submit quota increase requests](/azure/quotas/per-vm-quota-requests) through the Azure portal. + + +## Register the feature flags + +To use AKS Automatic in preview, register the following flag using the [az feature register][az-feature-register] command. + +```azurecli-interactive +az feature register --namespace Microsoft.ContainerService --name AutomaticSKUPreview +``` + +Verify the registration status by using the [az feature show][az-feature-show] command. It takes a few minutes for the status to show *Registered*: + +```azurecli-interactive +az feature show --namespace Microsoft.ContainerService --name AutomaticSKUPreview +``` + +When the status reflects *Registered*, refresh the registration of the *Microsoft.ContainerService* resource provider by using the [az provider register][az-provider-register] command: + +```azurecli-interactive +az provider register --namespace Microsoft.ContainerService +``` + +:::zone target="docs" pivot="azure-cli" + +## Create a resource group + +An [Azure resource group][azure-resource-group] is a logical group in which Azure resources are deployed and managed. + +The following example creates a resource group named *myResourceGroup* in the *eastus* location. + +Create a resource group using the [az group create][az-group-create] command. + +```azurecli +az group create --name myResourceGroup --location eastus +``` + +The following sample output resembles successful creation of the resource group: + +```output +{ + "id": "/subscriptions//resourceGroups/myResourceGroup", + "location": "eastus", + "managedBy": null, + "name": "myResourceGroup", + "properties": { + "provisioningState": "Succeeded" + }, + "tags": null +} +``` + +## Create an AKS Automatic cluster + +To create an AKS Automatic cluster, use the [az aks create][az-aks-create] command. The following example creates a cluster named *myAKSAutomaticCluster* with Managed Prometheus and Container Insights integration enabled. + +```azurecli +az aks create \ + --resource-group myResourceGroup \ + --name myAKSAutomaticCluster \ + --sku automatic +``` + +After a few minutes, the command completes and returns JSON-formatted information about the cluster. + +## Connect to the cluster + +To manage a Kubernetes cluster, use the Kubernetes command-line client, [kubectl][kubectl]. `kubectl` is already installed if you use Azure Cloud Shell. To install `kubectl` locally, run the [az aks install-cli][az-aks-install-cli] command. AKS Automatic clusters are configured with [Microsoft Entra ID for Kubernetes role-based access control (RBAC)][aks-entra-rbac]. When you create a cluster using the Azure CLI, your user is [assigned built-in roles][aks-entra-rbac-builtin-roles] for `Azure Kubernetes Service RBAC Cluster Admin`. + +Configure `kubectl` to connect to your Kubernetes cluster using the [az aks get-credentials][az-aks-get-credentials] command. This command downloads credentials and configures the Kubernetes CLI to use them. + +```azurecli +az aks get-credentials --resource-group myResourceGroup --name myAKSAutomaticCluster +``` + +Verify the connection to your cluster using the [kubectl get][kubectl-get] command. This command returns a list of the cluster nodes. + +```bash +kubectl get nodes +``` + +The following sample output will show how you're asked to log in. + +```output +To sign in, use a web browser to open the page https://microsoft.com/devicelogin and enter the code AAAAAAAAA to authenticate. +``` + +After you log in, the following sample output shows the managed system node pools. Make sure the node status is *Ready*. + +```output +NAME STATUS ROLES AGE VERSION +aks-nodepool1-13213685-vmss000000 Ready agent 2m26s v1.28.5 +aks-nodepool1-13213685-vmss000001 Ready agent 2m26s v1.28.5 +aks-nodepool1-13213685-vmss000002 Ready agent 2m26s v1.28.5 +``` + +:::zone-end + +:::zone target="docs" pivot="azure-portal" + +## Create Automatic Kubernetes Cluster + +1. To create an AKS Automatic cluster, search for **Kubernetes Services**, and select **Automatic Kubernetes cluster** from the drop-down options. + + :::image type="content" source="./media/quick-automatic-kubernetes-portal/browse-dropdown-options.png" alt-text="The screenshot of the entry point for creating an AKS Automatic cluster in the Azure portal."::: + +2. On the **Basics** tab, fill in all the mandatory fields required to get started: +Subscription, Resource Group, Cluster name, and Region + + :::image type="content" source="./media/quick-automatic-kubernetes-portal/create-basics.png" alt-text="The screenshot of the Create - Basics Tab for an AKS Automatic cluster in the Azure portal."::: + + If the prerequisites aren't met and the subscription requires registration of the preview flags, there will be an error shown under the Subscription field: + + :::image type="content" source="./media/quick-automatic-kubernetes-portal/register.png" alt-text="The screenshot of the error shown when a subscription doesn't have preview flags registered while creating an AKS Automatic cluster in the Azure portal."::: + + +3. On the **Monitoring** tab, choose your monitoring configurations from Azure Monitor, Managed Prometheus, Managed Grafana, and/or configure alerts. Add tags (optional), and proceed to create the cluster. + + :::image type="content" source="./media/quick-automatic-kubernetes-portal/create-monitoring.png" alt-text="The screenshot of the Monitoring Tab while creating an AKS Automatic cluster in the Azure portal."::: + +3. Get started with configuring your first application from GitHub and set up an automated deployment pipeline. + + :::image type="content" source="./media/quick-automatic-kubernetes-portal/automatic-overview.png" alt-text="The screenshot of the Get Started Tab on Overview Blade after creating an AKS Automatic cluster in the Azure portal."::: + + +## Connect to the cluster + +To manage a Kubernetes cluster, use the Kubernetes command-line client, [kubectl][kubectl]. `kubectl` is already installed if you use Azure Cloud Shell. To install `kubectl` locally, run the [az aks install-cli][az-aks-install-cli] command. AKS Automatic clusters are configured with [Microsoft Entra ID for Kubernetes role-based access control (RBAC)][aks-entra-rbac]. When you create a cluster using the Azure portal, your user is [assigned built-in roles][aks-entra-rbac-builtin-roles] for `Azure Kubernetes Service RBAC Cluster Admin`. + +Configure `kubectl` to connect to your Kubernetes cluster using the [az aks get-credentials][az-aks-get-credentials] command. This command downloads credentials and configures the Kubernetes CLI to use them. + +```azurecli +az aks get-credentials --resource-group myResourceGroup --name myAKSAutomaticCluster +``` + +Verify the connection to your cluster using the [kubectl get][kubectl-get] command. This command returns a list of the cluster nodes. + +```bash +kubectl get nodes +``` + +The following sample output will show how you're asked to log in. + +```output +To sign in, use a web browser to open the page https://microsoft.com/devicelogin and enter the code AAAAAAAAA to authenticate. +``` + +After you log in, the following sample output shows the managed system node pools. Make sure the node status is *Ready*. + +```output +NAME STATUS ROLES AGE VERSION +aks-nodepool1-13213685-vmss000000 Ready agent 2m26s v1.28.5 +aks-nodepool1-13213685-vmss000001 Ready agent 2m26s v1.28.5 +aks-nodepool1-13213685-vmss000002 Ready agent 2m26s v1.28.5 +``` +:::zone-end + +:::zone target="docs" pivot="bicep" + +## Create a resource group + +An [Azure resource group][azure-resource-group] is a logical group in which Azure resources are deployed and managed. When you create a resource group, you're prompted to specify a location. This location is the storage location of your resource group metadata and where your resources run in Azure if you don't specify another region during resource creation. + +The following example creates a resource group named *myResourceGroup* in the *eastus* location. + +Create a resource group using the [az group create][az-group-create] command. + +```azurecli +az group create --name myResourceGroup --location eastus +``` + +The following sample output resembles successful creation of the resource group: + +```output +{ + "id": "/subscriptions//resourceGroups/myResourceGroup", + "location": "eastus", + "managedBy": null, + "name": "myResourceGroup", + "properties": { + "provisioningState": "Succeeded" + }, + "tags": null +} +``` + +## Review the Bicep file + +This Bicep file defines an AKS Automatic cluster. While in preview, you need to specify the *system nodepool* agent pool profile. + +```bicep +@description('The name of the managed cluster resource.') +param clusterName string = 'myAKSAutomaticCluster' + +@description('The location of the managed cluster resource.') +param location string = resourceGroup().location + +resource aks 'Microsoft.ContainerService/managedClusters@2024-03-02-preview' = { + name: clusterName + location: location + sku: { + name: 'Automatic' + } + properties: { + agentPoolProfiles: [ + { + name: 'systempool' + mode: 'System' + count: 3 + } + ] + } + identity: { + type: 'SystemAssigned' + } +} +``` + +For more information about the resource defined in the Bicep file, see the [**Microsoft.ContainerService/managedClusters**](/azure/templates/microsoft.containerservice/managedclusters?tabs=bicep&pivots=deployment-language-bicep) reference. + +## Deploy the Bicep file + +1. Save the Bicep file as **main.bicep** to your local computer. + + > [!IMPORTANT] + > The Bicep file sets the `clusterName` param to the string *myAKSAutomaticCluster*. If you want to use a different cluster name, make sure to update the string to your preferred cluster name before saving the file to your computer. + +1. Deploy the Bicep file using the Azure CLI. + + ```azurecli + az deployment group create --resource-group myResourceGroup --template-file main.bicep + ``` + + It takes a few minutes to create the AKS cluster. Wait for the cluster to be successfully deployed before you move on to the next step. + +## Connect to the cluster + +To manage a Kubernetes cluster, use the Kubernetes command-line client, [kubectl][kubectl]. `kubectl` is already installed if you use Azure Cloud Shell. To install `kubectl` locally, run the [az aks install-cli][az-aks-install-cli] command. AKS Automatic clusters are configured with [Microsoft Entra ID for Kubernetes role-based access control (RBAC)][aks-entra-rbac]. When you create a cluster using Bicep, you need to [assign one of the built-in roles][aks-entra-rbac-builtin-roles] such as `Azure Kubernetes Service RBAC Reader`, `Azure Kubernetes Service RBAC Writer`, `Azure Kubernetes Service RBAC Admin`, or `Azure Kubernetes Service RBAC Cluster Admin` to your users, scoped to the cluster or a specific namespace. Also make sure your users have the `Azure Kubernetes Service Cluster User` built-in role to be able to do run `az aks get-credentials`, and then get the kubeconfig of your AKS cluster using the `az aks get-credentials` command. + +Configure `kubectl` to connect to your Kubernetes cluster using the [az aks get-credentials][az-aks-get-credentials] command. This command downloads credentials and configures the Kubernetes CLI to use them. + +```azurecli +az aks get-credentials --resource-group myResourceGroup --name +``` + +Verify the connection to your cluster using the [kubectl get][kubectl-get] command. This command returns a list of the cluster nodes. + +```bash +kubectl get nodes +``` + +The following sample output will show how you're asked to log in. + +```output +To sign in, use a web browser to open the page https://microsoft.com/devicelogin and enter the code AAAAAAAAA to authenticate. +``` + +After you log in, the following sample output shows the managed system node pools. Make sure the node status is *Ready*. + +```output +NAME STATUS ROLES AGE VERSION +aks-nodepool1-13213685-vmss000000 Ready agent 2m26s v1.28.5 +aks-nodepool1-13213685-vmss000001 Ready agent 2m26s v1.28.5 +aks-nodepool1-13213685-vmss000002 Ready agent 2m26s v1.28.5 +``` + +:::zone-end + + +## Deploy the application + +To deploy the application, you use a manifest file to create all the objects required to run the [AKS Store application](https://github.com/Azure-Samples/aks-store-demo). A [Kubernetes manifest file][kubernetes-deployment] defines a cluster's desired state, such as which container images to run. The manifest includes the following Kubernetes deployments and services: + +:::image type="content" source="media/quick-kubernetes-deploy-portal/aks-store-architecture.png" alt-text="Screenshot of Azure Store sample architecture." lightbox="media/quick-kubernetes-deploy-portal/aks-store-architecture.png"::: + +- **Store front**: Web application for customers to view products and place orders. +- **Product service**: Shows product information. +- **Order service**: Places orders. +- **Rabbit MQ**: Message queue for an order queue. + +> [!NOTE] +> We don't recommend running stateful containers, such as Rabbit MQ, without persistent storage for production. These are used here for simplicity, but we recommend using managed services, such as Azure Cosmos DB or Azure Service Bus. + +1. Create a namespace `aks-store-demo` to deploy the Kubernetes resources into. + + ```bash + kubectl create ns aks-store-demo + ``` + +1. Deploy the application using the [kubectl apply][kubectl-apply] command into the `aks-store-demo` namespace. The YAML file defining the deployment is on [GitHub](https://github.com/Azure-Samples/aks-store-demo). + + ```bash + kubectl apply -n aks-store-demo -f https://raw.githubusercontent.com/Azure-Samples/aks-store-demo/main/aks-store-ingress-quickstart.yaml + ``` + + The following sample output shows the deployments and services: + + ```output + statefulset.apps/rabbitmq created + configmap/rabbitmq-enabled-plugins created + service/rabbitmq created + deployment.apps/order-service created + service/order-service created + deployment.apps/product-service created + service/product-service created + deployment.apps/store-front created + service/store-front created + ingress/store-front created + ``` + +## Test the application + +When the application runs, a Kubernetes service exposes the application front end to the internet. This process can take a few minutes to complete. + +1. Check the status of the deployed pods using the [kubectl get pods][kubectl-get] command. Make sure all pods are `Running` before proceeding. If this is the first workload you deploy, it may take a few minutes for [node auto provisioning][node-auto-provisioning] to create a node pool to run the pods. + + ```bash + kubectl get pods -n aks-store-demo + ``` + +1. Check for a public IP address for the store-front application. Monitor progress using the [kubectl get service][kubectl-get] command with the `--watch` argument. + + ```bash + kubectl get ingress store-front -n aks-store-demo --watch + ``` + + The **ADDRESS** output for the `store-front` service initially shows empty: + + ```output + NAME CLASS HOSTS ADDRESS PORTS AGE + store-front webapprouting.kubernetes.azure.com * 80 12m + ``` + +1. Once the **ADDRESS** changes from blank to an actual public IP address, use `CTRL-C` to stop the `kubectl` watch process. + + The following sample output shows a valid public IP address assigned to the service: + + ```output + NAME CLASS HOSTS ADDRESS PORTS AGE + store-front webapprouting.kubernetes.azure.com * 4.255.22.196 80 12m + ``` + +1. Open a web browser to the external IP address of your ingress to see the Azure Store app in action. + + :::image type="content" source="media/quick-kubernetes-deploy-cli/aks-store-application.png" alt-text="Screenshot of AKS Store sample application." lightbox="media/quick-kubernetes-deploy-cli/aks-store-application.png"::: + +## Delete the cluster + +If you don't plan on going through the [AKS tutorial][aks-tutorial], clean up unnecessary resources to avoid Azure charges. Run the [az group delete][az-group-delete] command to remove the resource group, container service, and all related resources. + + ```azurecli + az group delete --name myResourceGroup --yes --no-wait + ``` + > [!NOTE] + > The AKS cluster was created with a system-assigned managed identity, which is the default identity option used in this quickstart. The platform manages this identity, so you don't need to manually remove it. + +## Next steps + +In this quickstart, you deployed a Kubernetes cluster using [AKS Automatic][what-is-aks-automatic] and then deployed a simple multi-container application to it. This sample application is for demo purposes only and doesn't represent all the best practices for Kubernetes applications. For guidance on creating full solutions with AKS for production, see [AKS solution guidance][aks-solution-guidance]. + +To learn more about AKS Automatic, continue to the introduction. + +> [!div class="nextstepaction"] +> [Introduction to Azure Kubernetes Service (AKS) Automatic (preview)][what-is-aks-automatic] + + + +[kubectl]: https://kubernetes.io/docs/reference/kubectl/ +[kubectl-apply]: https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands#apply +[kubectl-get]: https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands#get + + +[kubernetes-concepts]: ../concepts-clusters-workloads.md +[aks-tutorial]: ../tutorial-kubernetes-prepare-app.md +[azure-resource-group]: /azure/azure-resource-manager/management/overview +[az-aks-create]: /cli/azure/aks#az-aks-create +[az-aks-get-credentials]: /cli/azure/aks#az-aks-get-credentials +[az-aks-install-cli]: /cli/azure/aks#az-aks-install-cli +[az-group-create]: /cli/azure/group#az-group-create +[az-group-delete]: /cli/azure/group#az-group-delete +[node-auto-provisioning]: ../node-autoprovision.md +[kubernetes-deployment]: ../concepts-clusters-workloads.md#deployments-and-yaml-manifests +[aks-solution-guidance]: /azure/architecture/reference-architectures/containers/aks-start-here?toc=/azure/aks/toc.json&bc=/azure/aks/breadcrumb/toc.json +[baseline-reference-architecture]: /azure/architecture/reference-architectures/containers/aks/baseline-aks?toc=/azure/aks/toc.json&bc=/azure/aks/breadcrumb/toc.json +[az-feature-register]: /cli/azure/feature#az_feature_register +[az-feature-show]: /cli/azure/feature#az_feature_show +[az-provider-register]: /cli/azure/provider#az_provider_register +[what-is-aks-automatic]: ../intro-aks-automatic.md +[Azure-Policy-RBAC-permissions]: /azure/governance/policy/overview#azure-rbac-permissions-in-azure-policy +[aks-entra-rbac]: /azure/aks/manage-azure-rbac +[aks-entra-rbac-builtin-roles]: /azure/aks/manage-azure-rbac#create-role-assignments-for-users-to-access-the-cluster +[availability-zones]: /azure/reliability/availability-zones-region-support + From 13bc661585a13e8fafda99b716b8de748eb7f92f Mon Sep 17 00:00:00 2001 From: rgardler-msft Date: Thu, 20 Feb 2025 17:28:23 -0800 Subject: [PATCH 25/31] WIP - got distracted so need to come back to this later --- examples/AZD/aks-store-demo/aks-store-demo.md | 189 ++++++++++++++++++ .../AZD/aks-store-demo/custom-values.yaml | 31 +++ 2 files changed, 220 insertions(+) create mode 100644 examples/AZD/aks-store-demo/aks-store-demo.md create mode 100644 examples/AZD/aks-store-demo/custom-values.yaml diff --git a/examples/AZD/aks-store-demo/aks-store-demo.md b/examples/AZD/aks-store-demo/aks-store-demo.md new file mode 100644 index 00000000..b1ca8be5 --- /dev/null +++ b/examples/AZD/aks-store-demo/aks-store-demo.md @@ -0,0 +1,189 @@ +# AKS Store Demo + +TODO: Move intro content from source document + +## Setup The Environment + +This document, and those listed in the prerequisites section use environment variables to make reuse easier. Defaults are provided in most documents, but for consistency and clarity we will define the ones we really care about here: + +```bash +export RANDOM_ID="$(openssl rand -hex 3)" +export MY_RESOURCE_GROUP_NAME="aks-store-demo-ResourceGroup-$RANDOM_ID" +export REGION="westus2" +export MY_AKS_CLUSTER_NAME="aks-store-demo-$RANDOM_ID" +export MY_DNS_LABEL="aks-store-dns-label-$RANDOM_ID" +``` + +## Prerequisites + + * Have an [active Azure Subscription (free subscriptions available) and an install of Azure CLI](../../Common/Prerequisite-AzureCLIAndSub.md) + * Ensure the [`az aks`](../../Common/Prerequisite-AzureCLI-ls AKS.md) commands are installed + * Install [Helm](../../Common/Prerequisites-Helm.md) - package manager for Kubernetes. + * Install [Terraform](../../Common/Prerequisites-Terraform.md) - Infrastructure as Code management tool + * Have an existing [AKS Cluster](https://raw.githubusercontent.com/MicrosoftDocs/azure-aks-docs/refs/heads/main/articles/aks/learn/quick-kubernetes-deploy-cli.md) + +## Create the custom-values.yaml file + +```bash +cat << EOF > custom-values.yaml +namespace: ${AZURE_AKS_NAMESPACE} +EOF +``` + +## Add Azure Managed Identity and set to use AzureAD auth + +```bash +if [ -n "${AZURE_IDENTITY_CLIENT_ID}" ] && [ -n "${AZURE_IDENTITY_NAME}" ]; then + cat << EOF >> custom-values.yaml +useAzureAd: true +managedIdentityName: ${AZURE_IDENTITY_NAME} +managedIdentityClientId: ${AZURE_IDENTITY_CLIENT_ID} +EOF +fi +``` + +## Add base images + +```bash +cat << EOF >> custom-values.yaml +namespace: ${AZURE_AKS_NAMESPACE} +productService: + image: + repository: ${AZURE_REGISTRY_URI}/aks-store-demo/product-service +storeAdmin: + image: + repository: ${AZURE_REGISTRY_URI}/aks-store-demo/store-admin +storeFront: + image: + repository: ${AZURE_REGISTRY_URI}/aks-store-demo/store-front +virtualCustomer: + image: + repository: ${AZURE_REGISTRY_URI}/aks-store-demo/virtual-customer +virtualWorker: + image: + repository: ${AZURE_REGISTRY_URI}/aks-store-demo/virtual-worker +EOF +``` + +## Add ai-service if Azure OpenAI endpoint is provided + +```bash +if [ -n "${AZURE_OPENAI_ENDPOINT}" ]; then + cat << EOF >> custom-values.yaml +aiService: + image: + repository: ${AZURE_REGISTRY_URI}/aks-store-demo/ai-service + create: true + modelDeploymentName: ${AZURE_OPENAI_MODEL_NAME} + openAiEndpoint: ${AZURE_OPENAI_ENDPOINT} + useAzureOpenAi: true +EOF + + # If Azure identity does not exists, use the Azure OpenAI API key + if [ -z "${AZURE_IDENTITY_CLIENT_ID}" ] && [ -z "${AZURE_IDENTITY_NAME}" ]; then + cat << EOF >> custom-values.yaml + openAiKey: $(az keyvault secret show --name ${AZURE_OPENAI_KEY} --vault-name ${AZURE_KEY_VAULT_NAME} --query value -o tsv) +EOF + fi + + # If DALL-E model endpoint and name exists + if [ -n "${AZURE_OPENAI_DALL_E_ENDPOINT}" ] && [ -n "${AZURE_OPENAI_DALL_E_MODEL_NAME}" ]; then + cat << EOF >> custom-values.yaml + openAiDalleEndpoint: ${AZURE_OPENAI_DALL_E_ENDPOINT} + openAiDalleModelName: ${AZURE_OPENAI_DALL_E_MODEL_NAME} +EOF + fi +fi +``` + +## Add order-service +```bash +cat << EOF >> custom-values.yaml +orderService: + image: + repository: ${AZURE_REGISTRY_URI}/aks-store-demo/order-service +EOF +``` + +## Add Azure Service Bus to order-service if provided +```bash + if [ -n "${AZURE_SERVICE_BUS_HOST}" ]; then + cat << EOF >> custom-values.yaml + queueHost: ${AZURE_SERVICE_BUS_HOST} +EOF + + # If Azure identity does not exists, use the Azure Service Bus credentials + if [ -z "${AZURE_IDENTITY_CLIENT_ID}" ] && [ -z "${AZURE_IDENTITY_NAME}" ]; then + cat << EOF >> custom-values.yaml + queuePort: "5671" + queueTransport: "tls" + queueUsername: ${AZURE_SERVICE_BUS_SENDER_NAME} + queuePassword: $(az keyvault secret show --name ${AZURE_SERVICE_BUS_SENDER_KEY} --vault-name ${AZURE_KEY_VAULT_NAME} --query value -o tsv) +EOF + fi +fi +``` + +## Add makeline-service + +```bash +cat << EOF >> custom-values.yaml +makelineService: + image: + repository: ${AZURE_REGISTRY_URI}/aks-store-demo/makeline-service +EOF +``` + +# Add Azure Service Bus to makeline-service if provided +```bash +if [ -n "${AZURE_SERVICE_BUS_URI}" ]; then + # If Azure identity exists just set the Azure Service Bus Hostname + if [ -n "${AZURE_IDENTITY_CLIENT_ID}" ] && [ -n "${AZURE_IDENTITY_NAME}" ]; then + cat << EOF >> custom-values.yaml + orderQueueHost: ${AZURE_SERVICE_BUS_HOST} +EOF + else + cat << EOF >> custom-values.yaml + orderQueueUri: ${AZURE_SERVICE_BUS_URI} + orderQueueUsername: ${AZURE_SERVICE_BUS_LISTENER_NAME} + orderQueuePassword: $(az keyvault secret show --name ${AZURE_SERVICE_BUS_LISTENER_KEY} --vault-name ${AZURE_KEY_VAULT_NAME} --query value -o tsv) +EOF + fi +fi +``` + +## Add Azure Cosmos DB to makeline-service if provided +```bash +if [ -n "${AZURE_COSMOS_DATABASE_URI}" ]; then + cat << EOF >> custom-values.yaml + orderDBApi: ${AZURE_DATABASE_API} + orderDBUri: ${AZURE_COSMOS_DATABASE_URI} +EOF + # If Azure identity does not exists, use the Azure Cosmos DB credentials + if [ -z "${AZURE_IDENTITY_CLIENT_ID}" ] && [ -z "${AZURE_IDENTITY_NAME}" ]; then + cat << EOF >> custom-values.yaml + orderDBUsername: ${AZURE_COSMOS_DATABASE_NAME} + orderDBPassword: $(az keyvault secret show --name ${AZURE_COSMOS_DATABASE_KEY} --vault-name ${AZURE_KEY_VAULT_NAME} --query value -o tsv) +EOF + fi +fi +``` + +## Do not deploy RabbitMQ when using Azure Service Bus + +```bash +if [ -n "${AZURE_SERVICE_BUS_HOST}" ]; then + cat << EOF >> custom-values.yaml +useRabbitMQ: false +EOF +fi +``` + +## Do not deploy MongoDB when using Azure Cosmos DB +```bash +if [ -n "${AZURE_COSMOS_DATABASE_URI}" ]; then + cat << EOF >> custom-values.yaml +useMongoDB: false +EOF +fi +``` \ No newline at end of file diff --git a/examples/AZD/aks-store-demo/custom-values.yaml b/examples/AZD/aks-store-demo/custom-values.yaml new file mode 100644 index 00000000..61ce15f1 --- /dev/null +++ b/examples/AZD/aks-store-demo/custom-values.yaml @@ -0,0 +1,31 @@ +namespace: +namespace: +productService: + image: + repository: /aks-store-demo/product-service +storeAdmin: + image: + repository: /aks-store-demo/store-admin +storeFront: + image: + repository: /aks-store-demo/store-front +virtualCustomer: + image: + repository: /aks-store-demo/virtual-customer +virtualWorker: + image: + repository: /aks-store-demo/virtual-worker +aiService: + image: + repository: /aks-store-demo/ai-service + create: true + modelDeploymentName: + openAiEndpoint: https://ada-naman.openai.azure.com/ + useAzureOpenAi: true + openAiKey: +orderService: + image: + repository: /aks-store-demo/order-service +makelineService: + image: + repository: /aks-store-demo/makeline-service From 55c297c70bcf3c48cd18af5d286c43da040d94b7 Mon Sep 17 00:00:00 2001 From: rgardler-msft Date: Thu, 20 Feb 2025 17:29:31 -0800 Subject: [PATCH 26/31] WIP: Open AI works in theory, now to integrate with the CLI --- examples/eg/Makefile | 36 ++ examples/eg/README.md | 153 ++++++++ examples/eg/cmd/ask.go | 152 ++++++++ examples/eg/cmd/root.go | 30 ++ examples/eg/go.mod | 25 ++ examples/eg/go.sum | 780 +++++++++++++++++++++++++++++++++++++ examples/eg/main.go | 11 + examples/eg/pkg/cli/cli.go | 20 + 8 files changed, 1207 insertions(+) create mode 100644 examples/eg/Makefile create mode 100644 examples/eg/README.md create mode 100644 examples/eg/cmd/ask.go create mode 100644 examples/eg/cmd/root.go create mode 100644 examples/eg/go.mod create mode 100644 examples/eg/go.sum create mode 100644 examples/eg/main.go create mode 100644 examples/eg/pkg/cli/cli.go diff --git a/examples/eg/Makefile b/examples/eg/Makefile new file mode 100644 index 00000000..347c812c --- /dev/null +++ b/examples/eg/Makefile @@ -0,0 +1,36 @@ +.PHONY: build-eg run-eg clean + +BINARY_DIR := bin +EG_BINARY := $(BINARY_DIR)/eg + +# -------------------------- Native build targets ------------------------------ + +RELEASE_BUILD := false +LATEST_TAG := $(shell git describe --tags --abbrev=0) +LATEST_COMMIT := $(shell git rev-parse --short HEAD) +BUILD_DATE := $(shell date -u '+%Y-%m-%dT%H:%M:%SZ') +MODULE_ROOT := $(shell go list -m) +build-eg: + @echo "Building EG CLI..." +ifeq ($(RELEASE_BUILD), true) + @CGO_ENABLED=0 go build -ldflags "-X $(MODULE_ROOT)/cmd/eg/commands.VERSION=$(LATEST_TAG) -X $(MODULE_ROOT)/cmd/eg/commands.COMMIT=$(LATEST_COMMIT) -X $(MODULE_ROOT)/cmd/eg/commands.DATE=$(BUILD_DATE)" -o "$(EG_BINARY)" main.go +else + @CGO_ENABLED=0 go build -ldflags "-X $(MODULE_ROOT)/cmd/eg/commands.VERSION=dev -X $(MODULE_ROOT)/cmd/eg/commands.COMMIT=$(LATEST_COMMIT) -X $(MODULE_ROOT)/cmd/eg/commands.DATE=$(BUILD_DATE)" -o "$(EG_BINARY)" main.go +endif + +# ------------------------------ Install targets ------------------------------- + +install-eg: + @echo "Installing the EG CLI..." + @CGO_ENABLED=0 go install -ldflags "-X $(MODULE_ROOT)/cmd/eg/commands.VERSION=dev -X $(MODULE_ROOT)/cmd/eg/commands.COMMIT=$(LATEST_COMMIT) -X $(MODULE_ROOT)/cmd/eg/commands.DATE=$(BUILD_DATE)" main.go + +# ------------------------------- Run targets ---------------------------------- + +run-eg: build-eg + @echo "Running the EG CLI" + @"$(EG_BINARY)" + +clean: + @echo "Cleaning up" + @rm -rf "$(EG_DIR)" + diff --git a/examples/eg/README.md b/examples/eg/README.md new file mode 100644 index 00000000..4b15cb24 --- /dev/null +++ b/examples/eg/README.md @@ -0,0 +1,153 @@ +# EG + +EG (meaning "for example") is a command line tool that assists in finding, customizing and executing Executable Docs. It uses Innovation Engine (`IE`) to execute the docs and Copilot to discover and customize documents. + +## Setup + +You will need an active Azure OpenAI deployment to use this tool locally. To create one follow the steps below. + +## Prerequisites + +The following prerequisites are required to complete this workshop: + +- [Azure Subscription and Azure CLI](../Common/Prerequisite-AzureCLIAndSub.md) + +### Environment Varaibles + +In order to minimize the chance of errors and to facilitate reuse we will use Environment Variables for values we will use repeatedly in this document. For easy discovery we will use the prefix `EG_` on each variable name. The first time we encounter one of these variables in this document we will explain its purpose and a default value will be provided. + +The first variable we need is `EG_HASH` this is a random string of 8 characters that will be used to create unique values when required. + +```bash +export EG_HASH=$(head /dev/urandom | tr -dc A-Za-z0-9 | head -c 8) +``` + +### Create an Azure resource group + +To create an Azure OpenAI resource, you need an Azure resource group. This will collect all of the Azure resources we need for this application to run. To create the Resource Group we will need a name and location: + +```bash +export EG_RESOURCE_GROUP=EG_dev_${EG_HASH} +export EG_LOCATION=eastus2 +``` + +To create the resource group us the `az group create` command: + +```bash +az group create \ +--name $EG_RESOURCE_GROUP \ +--location $EG_LOCATION +``` + +## Create an Azure OpenAI Resource + +We need an Azure OpenAI Resource, this will be configured with the following values: + +```bash +export EG_RESOURCE_KIND=AIServices +export EG_OPENAI_RESOURCE_NAME=EG_dev_${EG_HASH} +export EG_OPENAI_RESOURCE_SKU=S0 +``` + +Now you can use the [az cognitiveservices account create](/cli/azure/cognitiveservices/account?view=azure-cli-latest&preserve-view=true#az-cognitiveservices-account-create) command to create an Azure OpenAI resource in the resource group. + +```bash +az cognitiveservices account create \ +--name $EG_OPENAI_RESOURCE_NAME \ +--resource-group $EG_RESOURCE_GROUP \ +--location $EG_LOCATION \ +--kind $EG_RESOURCE_KIND \ +--sku $EG_OPENAI_RESOURCE_SKU +``` + +### Get the API key and endpoint URL + +We will need the endpoint URL and API key in environment variables in order to communicate with the resource. These environment variables will not use the `EG_` prefix because this is used later to output the values of the variables and these should be conisdered secure information. Leaving the `EG_` off prevents them from being output but this script. + +```bash +export OPENAI_API_ENDPOINT=$(az cognitiveservices account show \ +--name $EG_OPENAI_RESOURCE_NAME \ +--resource-group $EG_RESOURCE_GROUP \ +| jq -r .properties.endpoint) + +export OPENAI_API_KEY=$(az cognitiveservices account keys list \ +--name $EG_OPENAI_RESOURCE_NAME \ +--resource-group $EG_RESOURCE_GROUP \ +| jq -r .key1) +``` + +### Deploy a model + +Now we can deploy a model into the Open AI resource. This requires a couple more variables to be defined: + +```bash +export EG_DEPLOYMENT_NAME=EG_model_${EG_HASH} +export EG_MODEL=gpt-4o +export EG_MODEL_VERSION=2024-11-20 +export EG_MODEL_FORMAT=OpenAI +export EG_SKU=GlobalStandard +export EG_SKU_CAPACITY=8 +``` + +These settings will typicaly work, but be warned find the right combination can be quite a chore. + +```bash +az cognitiveservices account deployment create \ +--name $EG_OPENAI_RESOURCE_NAME \ +--resource-group $EG_RESOURCE_GROUP \ +--deployment-name $EG_DEPLOYMENT_NAME \ +--model-name $EG_MODEL \ +--model-version $EG_MODEL_VERSION \ +--model-format $EG_MODEL_FORMAT \ +--sku $EG_SKU \ +--capacity $EG_SKU_CAPACITY +``` + +Note that it can take a few minutes for your model to become available for use. + +### Review Environment Variables + +We now have an Azure OpenAI resource setup with a model deployed to it. Now would therefore be a good time to ensure that we have all the variables in one place for reference: + +```bash +printenv | grep '^EG_' +``` + +### Install the EG CLI application from source + +To install the CLI application from source you need to build the project from within the project root: + +```bash +make +``` + +The EG command requires that the `OPENAI_API_KEY` and `OPENAI_ENDPOINT` are set. These can be retrieved using the comamnds above and thus, if you have been following along will already be set in your environment. + +You can run the CLI application from source using: + +```bash +./bin/eg --help +``` + + +```text +EG is a Copilot for Executable Documentation. + +EG (meaning "for example") is a command line tool that assists in finding, customizing and executing Executable Docs.\n +Eg uses Copilot to interact with existing documentation in order to create custom executable docs.\n +It then uses Innovation Engine (IE) to execute these docs. + +[rest of help text] +``` + +## Usage + + + +## Contributing + +Contributions are welcome! Please open an issue or submit a pull request for any improvements or bug fixes. + +## License + +This project is licensed under the MIT License. See the LICENSE file for details. \ No newline at end of file diff --git a/examples/eg/cmd/ask.go b/examples/eg/cmd/ask.go new file mode 100644 index 00000000..4f194820 --- /dev/null +++ b/examples/eg/cmd/ask.go @@ -0,0 +1,152 @@ +package cmd + +import ( + "context" + "fmt" + "log" + "os" + "strconv" + + "github.com/Azure/azure-sdk-for-go/sdk/ai/azopenai" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/spf13/cobra" +) + +var promptCmd = &cobra.Command{ + Use: "ask [prompt]", + Short: "Ask a question, such as 'How do I deploy and AKS cluster with an API gateway?'", + Args: cobra.ExactArgs(1), + Run: func(cmd *cobra.Command, args []string) { + prompt := args[0] + + fmt.Printf("You asked: %s\n\n", prompt) + fmt.Print("Here are some suggested documents:\n\n") + + suggestedDocuments := getSuggestedDocuments(prompt) + for i, doc := range suggestedDocuments { + fmt.Printf("\t%d. %s\n", i+1, doc) + } + fmt.Println() + + requestAction(suggestedDocuments) + }, +} + +func getSuggestedDocuments(prompt string) []string { + results := []string{} + + azureOpenAIKey := os.Getenv("OPENAI_API_KEY") + if azureOpenAIKey == "" { + fmt.Fprintf(os.Stderr, "OPENAI_API_KEY environment variable not set\n") + return results + } + azureOpenAIEndpoint := os.Getenv("OPENAI_ENDPOINT") + if azureOpenAIEndpoint == "" { + fmt.Fprintf(os.Stderr, "OPENAI_ENDPOINT environment variable not set\n") + return results + } + modelDeploymentID := os.Getenv("OPENAI_MODEL_DEPLOYMENT_NAME") + if modelDeploymentID == "" { + fmt.Fprintf(os.Stderr, "OPENAI_MODEL_DEPLOYMENT_NAME environment variable not set\n") + return results + } + + maxTokens := int32(400) + keyCredential := azcore.NewKeyCredential(azureOpenAIKey) + client, err := azopenai.NewClientWithKeyCredential(azureOpenAIEndpoint, keyCredential, nil) + + if err != nil { + log.Printf("ERROR creating OpenAI Client: %s", err) + return results + } + + // This is a conversation in progress. + messages := []azopenai.ChatRequestMessageClassification{ + &azopenai.ChatRequestSystemMessage{Content: azopenai.NewChatRequestSystemMessageContent("You are a helpful assistant.")}, + + // The user asks a question + &azopenai.ChatRequestUserMessage{Content: azopenai.NewChatRequestUserMessageContent("Does Azure OpenAI support customer managed keys?")}, + + // The reply would come back from the model. You'd add it to the conversation so we can maintain context. + &azopenai.ChatRequestAssistantMessage{Content: azopenai.NewChatRequestAssistantMessageContent("Yes, customer managed keys are supported by Azure OpenAI")}, + + // The user answers the question based on the latest reply. + &azopenai.ChatRequestUserMessage{Content: azopenai.NewChatRequestUserMessageContent("What other Azure Services support customer managed keys?")}, + + // from here you'd keep iterating, sending responses back from ChatGPT + } + + gotReply := false + + resp, err := client.GetChatCompletions(context.TODO(), azopenai.ChatCompletionsOptions{ + // This is a conversation in progress. + // NOTE: all messages count against token usage for this API. + Messages: messages, + DeploymentName: &modelDeploymentID, + MaxTokens: &maxTokens, + }, nil) + + if err != nil { + // TODO: Update the following line with your application specific error handling logic + log.Printf("ERROR: %s", err) + return results + } + + for _, choice := range resp.Choices { + gotReply = true + + if choice.ContentFilterResults != nil { + fmt.Fprintf(os.Stderr, "Content filter results\n") + + if choice.ContentFilterResults.Error != nil { + fmt.Fprintf(os.Stderr, " Error:%v\n", choice.ContentFilterResults.Error) + } + + fmt.Fprintf(os.Stderr, " Hate: sev: %v, filtered: %v\n", *choice.ContentFilterResults.Hate.Severity, *choice.ContentFilterResults.Hate.Filtered) + fmt.Fprintf(os.Stderr, " SelfHarm: sev: %v, filtered: %v\n", *choice.ContentFilterResults.SelfHarm.Severity, *choice.ContentFilterResults.SelfHarm.Filtered) + fmt.Fprintf(os.Stderr, " Sexual: sev: %v, filtered: %v\n", *choice.ContentFilterResults.Sexual.Severity, *choice.ContentFilterResults.Sexual.Filtered) + fmt.Fprintf(os.Stderr, " Violence: sev: %v, filtered: %v\n", *choice.ContentFilterResults.Violence.Severity, *choice.ContentFilterResults.Violence.Filtered) + } + + if choice.Message != nil && choice.Message.Content != nil { + fmt.Fprintf(os.Stderr, "Content[%d]: %s\n", *choice.Index, *choice.Message.Content) + } + + if choice.FinishReason != nil { + // this choice's conversation is complete. + fmt.Fprintf(os.Stderr, "Finish reason[%d]: %s\n", *choice.Index, *choice.FinishReason) + } + } + + if gotReply { + fmt.Fprintf(os.Stderr, "Received chat completions reply\n") + } + + return results +} + +func requestAction(suggestedDocuments []string) { + fmt.Println("Enter the ID number to view more information or enter `Q[uit]` to exit.") + + var input string + for { + fmt.Print("Your choice: ") + fmt.Scanln(&input) + if input == "Q" || input == "Quit" || input == "q" || input == "quit" { + fmt.Println("Exiting...") + return + } + + choice, err := strconv.Atoi(input) + if err != nil || choice < 1 || choice > len(suggestedDocuments) { + fmt.Println("Invalid choice. Please try again.") + continue + } + + fmt.Printf("Details about %s...\n", suggestedDocuments[choice-1]) + } +} + +func init() { + rootCmd.AddCommand(promptCmd) +} diff --git a/examples/eg/cmd/root.go b/examples/eg/cmd/root.go new file mode 100644 index 00000000..016a197b --- /dev/null +++ b/examples/eg/cmd/root.go @@ -0,0 +1,30 @@ +package cmd + +import ( + "fmt" + "os" + + "github.com/spf13/cobra" +) + +var rootCmd = &cobra.Command{ + Use: "eg", + Short: "A CLI application to discover and work with Executable Docs using existing documentation as examples.", + Long: `EG (meaning "for example") is a command line tool that assists in finding, customizing and executing Executable Docs.\n +Eg uses Copilot to interact with existing documentation in order to create custom executable docs.\n +It then uses Innovation Engine (IE) to execute these docs.`, + Run: func(cmd *cobra.Command, args []string) { + cmd.Help() + }, +} + +func Execute() { + if err := rootCmd.Execute(); err != nil { + fmt.Println(err) + os.Exit(1) + } +} + +func init() { + // Define any flags and configuration settings here +} diff --git a/examples/eg/go.mod b/examples/eg/go.mod new file mode 100644 index 00000000..72b8902a --- /dev/null +++ b/examples/eg/go.mod @@ -0,0 +1,25 @@ +module eg + +go 1.21 + +toolchain go1.22.2 + +require github.com/spf13/cobra v1.3.0 + +require ( + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect + github.com/tidwall/gjson v1.14.4 // indirect + github.com/tidwall/match v1.1.1 // indirect + github.com/tidwall/pretty v1.2.1 // indirect + github.com/tidwall/sjson v1.2.5 // indirect + golang.org/x/net v0.33.0 // indirect + golang.org/x/text v0.21.0 // indirect +) + +require ( + github.com/Azure/azure-sdk-for-go/sdk/ai/azopenai v0.7.2 + github.com/inconshreveable/mousetrap v1.0.0 // indirect + github.com/openai/openai-go v0.1.0-alpha.59 + github.com/spf13/pflag v1.0.5 // indirect +) diff --git a/examples/eg/go.sum b/examples/eg/go.sum new file mode 100644 index 00000000..6b2b8e8f --- /dev/null +++ b/examples/eg/go.sum @@ -0,0 +1,780 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= +cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= +cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= +cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= +cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= +cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= +cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= +cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= +cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= +cloud.google.com/go v0.98.0/go.mod h1:ua6Ush4NALrHk5QXDWnjvZHN93OuF0HfuEPq9I1X0cM= +cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/firestore v1.6.1/go.mod h1:asNXNOzBdyVQmEU+ggO8UPodTkEVFW5Qx+rwHnAz+EY= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/Azure/azure-sdk-for-go/sdk/ai/azopenai v0.7.2 h1:+hDUZnYHHoXu05iXiJcL53MZW7raZZejB8ZtzVW7yyc= +github.com/Azure/azure-sdk-for-go/sdk/ai/azopenai v0.7.2/go.mod h1:49PyorVrwk6G+e8Vghvn7EkAS6wSPdXEu5a8iW2/vC8= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0 h1:JZg6HRh6W6U4OLl6lk7BZ7BLisIzM9dG1R50zUk9C/M= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0/go.mod h1:YL1xnZ6QejvQHWJrX/AvhFl4WW4rqHVoKspWNVwFk0M= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-metrics v0.3.10/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211130200136-a8f946100490/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/go-control-plane v0.10.1/go.mod h1:AY7fTTXNdv/aJ2O5jwpxAPOWUZ7hQAEvzN5Pf27BkQQ= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v0.6.2/go.mod h1:2t7qjJNvHPx8IjnBOzl9E9/baC+qXE/TeeyBRzgJDws= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= +github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= +github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/hashicorp/consul/api v1.11.0/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M= +github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= +github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v1.0.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg2DmyNY= +github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= +github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= +github.com/hashicorp/memberlist v0.3.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= +github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk= +github.com/hashicorp/serf v0.9.6/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= +github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/lyft/protoc-gen-star v0.5.3/go.mod h1:V0xaHgaf5oCCqmcxYcWiDfTiKsZsRc87/1qhoTACD8w= +github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= +github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= +github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= +github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/openai/openai-go v0.1.0-alpha.59 h1:T3IYwKSCezfIlL9Oi+CGvU03fq0RoH33775S78Ti48Y= +github.com/openai/openai-go v0.1.0-alpha.59/go.mod h1:3SdE6BffOX9HPEQv8IL/fi3LYZ5TUpRYaqGQZbyk11A= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/sagikazarmark/crypt v0.3.0/go.mod h1:uD/D+6UF4SrIR1uGEv7bBNkNqLGqUr43MRiaGWX1Nig= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= +github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= +github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v1.3.0 h1:R7cSvGu+Vv+qX0gW5R/85dx2kmmJT5z5NM8ifdYjdn0= +github.com/spf13/cobra v1.3.0/go.mod h1:BrRVncBjOJa/eUcVVm9CE+oC6as8k+VYr4NY7WCi9V4= +github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.10.0/go.mod h1:SoyBPwAtKDzypXNDFKN5kzH7ppppbGZtls1UpIy5AsM= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/tidwall/gjson v1.14.2/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/gjson v1.14.4 h1:uo0p8EbA09J7RQaflQ1aBRffTR7xedD2bcIVSYxLnkM= +github.com/tidwall/gjson v1.14.4/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= +github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= +github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4= +github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/tidwall/sjson v1.2.5 h1:kLy8mja+1c9jlljvWTlSazM7cKDRfJuR/bOJhcY5NcY= +github.com/tidwall/sjson v1.2.5/go.mod h1:Fvgq9kS/6ociJEDnK0Fk1cpYF4FIW6ZF7LAe+6jwd28= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +go.etcd.io/etcd/api/v3 v3.5.1/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= +go.etcd.io/etcd/client/pkg/v3 v3.5.1/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= +go.etcd.io/etcd/client/v2 v2.305.1/go.mod h1:pMEacxZW7o8pg4CrFE7pquyCJJzZvkvdD2RibOCCCGs= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= +golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= +golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211205182925-97ca703d548d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= +google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= +google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= +google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= +google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= +google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= +google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= +google.golang.org/api v0.59.0/go.mod h1:sT2boj7M9YJxZzgeZqXogmhfmRWDtPzT31xkieUbuZU= +google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= +google.golang.org/api v0.62.0/go.mod h1:dKmwPCydfsad4qCH08MSdgWjfHOyfpd4VtDGgRFdavw= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= +google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= +google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211008145708-270636b82663/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211028162531-8db9c33dc351/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211129164237-f09f9a12af12/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211203200212-54befc351ae9/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/ini.v1 v1.66.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/examples/eg/main.go b/examples/eg/main.go new file mode 100644 index 00000000..f0e425df --- /dev/null +++ b/examples/eg/main.go @@ -0,0 +1,11 @@ +package main + +import ( + "eg/cmd" + "fmt" +) + +func main() { + fmt.Print("EG is a Copilot for Executable Documentation.\n\n") + cmd.Execute() +} diff --git a/examples/eg/pkg/cli/cli.go b/examples/eg/pkg/cli/cli.go new file mode 100644 index 00000000..590d6ee8 --- /dev/null +++ b/examples/eg/pkg/cli/cli.go @@ -0,0 +1,20 @@ +package cli + +import ( + "fmt" + "os" +) + +func ExecuteCommand(command string) { + switch command { + case "hello": + HelloCommand() + default: + fmt.Println("Unknown command:", command) + os.Exit(1) + } +} + +func HelloCommand() { + fmt.Println("Hello, World!") +} \ No newline at end of file From ce72bf71c281317446e8fe1181ae6fcf525a53a2 Mon Sep 17 00:00:00 2001 From: rgardler-msft Date: Fri, 21 Feb 2025 16:36:50 -0800 Subject: [PATCH 27/31] We are now getting an OpenAI response to our enquiry, though it's not a real enquiry yet. --- .gitignore | 3 +++ .vscode/launch.json | 19 +++++++++++++++++-- awesome-aks | 1 + examples/eg/README.md | 32 ++++++++++++++++++++++++++++---- examples/eg/cmd/ask.go | 31 ++++++++++++------------------- 5 files changed, 61 insertions(+), 25 deletions(-) create mode 160000 awesome-aks diff --git a/.gitignore b/.gitignore index 3eed7663..4a05f950 100644 --- a/.gitignore +++ b/.gitignore @@ -16,3 +16,6 @@ coverage.out # Ignore git repos checkout out by examples examples/awesome-aks + +# ignore initialization files +.openai diff --git a/.vscode/launch.json b/.vscode/launch.json index 7164fa96..a940aacf 100644 --- a/.vscode/launch.json +++ b/.vscode/launch.json @@ -4,6 +4,8 @@ // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 "version": "0.2.0", "configurations": [ + + { "name": "Debug IE", "type": "go", @@ -22,13 +24,26 @@ }, { - "name": "Debug EG", + "name": "Debug EG Ask command", + "type": "go", + "request": "launch", + "mode": "debug", + "program": "${workspaceRoot}/examples/eg/main.go", + "cwd": "${workspaceRoot}/examples/eg", + "args": ["ask", "Say 'hello' in a random language."], + "showLog": true, + "console": "integratedTerminal", + "envFile": "${workspaceRoot}/examples/eg/.openai" + }, + + { + "name": "Debug EG Exec Doc", "type": "go", "request": "launch", "mode": "debug", "program": "${workspaceRoot}/cmd/ie/ie.go", "cwd": "${workspaceRoot}/examples/eg", - "args": ["test", "${workspaceRoot}/examples/eg/README.md"], + "args": ["execute", "${workspaceRoot}/examples/eg/README.md"], "showLog": true, "console": "integratedTerminal" }, diff --git a/awesome-aks b/awesome-aks new file mode 160000 index 00000000..7059c652 --- /dev/null +++ b/awesome-aks @@ -0,0 +1 @@ +Subproject commit 7059c652e797c91d79f7e23ebc4cdd4ee83d7f8e diff --git a/examples/eg/README.md b/examples/eg/README.md index 4b15cb24..ba1fc341 100644 --- a/examples/eg/README.md +++ b/examples/eg/README.md @@ -16,10 +16,12 @@ The following prerequisites are required to complete this workshop: In order to minimize the chance of errors and to facilitate reuse we will use Environment Variables for values we will use repeatedly in this document. For easy discovery we will use the prefix `EG_` on each variable name. The first time we encounter one of these variables in this document we will explain its purpose and a default value will be provided. -The first variable we need is `EG_HASH` this is a random string of 8 characters that will be used to create unique values when required. +The first variable we need is `EG_HASH` this is a random string of 8 characters that will be used to create unique values when required. However, if the environment already has a value for this we want to reuse the existing value so that we can reuse existing infrastructure.ll ```bash -export EG_HASH=$(head /dev/urandom | tr -dc A-Za-z0-9 | head -c 8) +if [ -z "$EG_HASH" ]; then + export EG_HASH=$(head /dev/urandom | tr -dc A-Za-z0-9 | head -c 8) +fi ``` ### Create an Azure resource group @@ -121,9 +123,21 @@ To install the CLI application from source you need to build the project from wi make ``` -The EG command requires that the `OPENAI_API_KEY` and `OPENAI_ENDPOINT` are set. These can be retrieved using the comamnds above and thus, if you have been following along will already be set in your environment. +The EG command requires that the `OPENAI_API_KEY`, `OPENAI_ENDPOINT` and `OPENAI_MODEL_DEEG_DEPLOYMENT_NAMEPLOYMENT_NAME` are set. These can be retrieved using the comamnds above and thus, if you have been following along will already be set in your environment. For convenience though we will save them in a local file for reference. Note that if you destroy the resources created by the above commands these values will need to be changed. -You can run the CLI application from source using: +```bash +echo "OPENAI_API_KEY=$OPENAI_API_KEY" > .openai +echo "OPENAI_ENDPOINT=$OPENAI_API_ENDPOINT" >> .openai +echo "EG_DEPLOYMENT_NAME=$EG_DEPLOYMENT_NAME" >> .openai +``` + +You can now setup future environments to use this deployment by running the following command: + +```bash +source .openai +``` + +To run the CLI application use: ```bash ./bin/eg --help @@ -142,7 +156,17 @@ It then uses Innovation Engine (IE) to execute these docs. ## Usage +The most common commands is `ask`. This command will pass a prompt tot he OpenAI instance we just created and output the results. For example: +```bash +./bin/eg ask "Say Hello in a random language." +``` + + + +```bash +end here so as not to delete resource +``` ## Contributing diff --git a/examples/eg/cmd/ask.go b/examples/eg/cmd/ask.go index 4f194820..bec85250 100644 --- a/examples/eg/cmd/ask.go +++ b/examples/eg/cmd/ask.go @@ -20,9 +20,15 @@ var promptCmd = &cobra.Command{ prompt := args[0] fmt.Printf("You asked: %s\n\n", prompt) - fmt.Print("Here are some suggested documents:\n\n") suggestedDocuments := getSuggestedDocuments(prompt) + + if len(suggestedDocuments) == 0 { + fmt.Println("No suggested documents found.") + return + } + + fmt.Print("Here are some suggested documents:\n\n") for i, doc := range suggestedDocuments { fmt.Printf("\t%d. %s\n", i+1, doc) } @@ -45,9 +51,9 @@ func getSuggestedDocuments(prompt string) []string { fmt.Fprintf(os.Stderr, "OPENAI_ENDPOINT environment variable not set\n") return results } - modelDeploymentID := os.Getenv("OPENAI_MODEL_DEPLOYMENT_NAME") + modelDeploymentID := os.Getenv("EG_DEPLOYMENT_NAME") if modelDeploymentID == "" { - fmt.Fprintf(os.Stderr, "OPENAI_MODEL_DEPLOYMENT_NAME environment variable not set\n") + fmt.Fprintf(os.Stderr, "EG_DEPLOYMENT_NAME environment variable not set\n") return results } @@ -65,13 +71,13 @@ func getSuggestedDocuments(prompt string) []string { &azopenai.ChatRequestSystemMessage{Content: azopenai.NewChatRequestSystemMessageContent("You are a helpful assistant.")}, // The user asks a question - &azopenai.ChatRequestUserMessage{Content: azopenai.NewChatRequestUserMessageContent("Does Azure OpenAI support customer managed keys?")}, + &azopenai.ChatRequestUserMessage{Content: azopenai.NewChatRequestUserMessageContent(prompt)}, // The reply would come back from the model. You'd add it to the conversation so we can maintain context. - &azopenai.ChatRequestAssistantMessage{Content: azopenai.NewChatRequestAssistantMessageContent("Yes, customer managed keys are supported by Azure OpenAI")}, + //&azopenai.ChatRequestAssistantMessage{Content: azopenai.NewChatRequestAssistantMessageContent("Yes, customer managed keys are supported by Azure OpenAI")}, // The user answers the question based on the latest reply. - &azopenai.ChatRequestUserMessage{Content: azopenai.NewChatRequestUserMessageContent("What other Azure Services support customer managed keys?")}, + //&azopenai.ChatRequestUserMessage{Content: azopenai.NewChatRequestUserMessageContent("What other Azure Services support customer managed keys?")}, // from here you'd keep iterating, sending responses back from ChatGPT } @@ -95,19 +101,6 @@ func getSuggestedDocuments(prompt string) []string { for _, choice := range resp.Choices { gotReply = true - if choice.ContentFilterResults != nil { - fmt.Fprintf(os.Stderr, "Content filter results\n") - - if choice.ContentFilterResults.Error != nil { - fmt.Fprintf(os.Stderr, " Error:%v\n", choice.ContentFilterResults.Error) - } - - fmt.Fprintf(os.Stderr, " Hate: sev: %v, filtered: %v\n", *choice.ContentFilterResults.Hate.Severity, *choice.ContentFilterResults.Hate.Filtered) - fmt.Fprintf(os.Stderr, " SelfHarm: sev: %v, filtered: %v\n", *choice.ContentFilterResults.SelfHarm.Severity, *choice.ContentFilterResults.SelfHarm.Filtered) - fmt.Fprintf(os.Stderr, " Sexual: sev: %v, filtered: %v\n", *choice.ContentFilterResults.Sexual.Severity, *choice.ContentFilterResults.Sexual.Filtered) - fmt.Fprintf(os.Stderr, " Violence: sev: %v, filtered: %v\n", *choice.ContentFilterResults.Violence.Severity, *choice.ContentFilterResults.Violence.Filtered) - } - if choice.Message != nil && choice.Message.Content != nil { fmt.Fprintf(os.Stderr, "Content[%d]: %s\n", *choice.Index, *choice.Message.Content) } From 59a8a4e07899bee5dac83dbd1be7deab0b38fe46 Mon Sep 17 00:00:00 2001 From: rgardler-msft Date: Fri, 21 Feb 2025 17:11:51 -0800 Subject: [PATCH 28/31] Add Naman's system prompt and make the debug config a real scenario. --- .vscode/launch.json | 2 +- examples/eg/cmd/ask.go | 14 ++- examples/eg/systemPrompt.txt | 237 +++++++++++++++++++++++++++++++++++ 3 files changed, 247 insertions(+), 6 deletions(-) create mode 100644 examples/eg/systemPrompt.txt diff --git a/.vscode/launch.json b/.vscode/launch.json index a940aacf..88644556 100644 --- a/.vscode/launch.json +++ b/.vscode/launch.json @@ -30,7 +30,7 @@ "mode": "debug", "program": "${workspaceRoot}/examples/eg/main.go", "cwd": "${workspaceRoot}/examples/eg", - "args": ["ask", "Say 'hello' in a random language."], + "args": ["ask", "Deploy a basic AKS cluster for testing purposes. This should focus on low cost as it is not intended for production."], "showLog": true, "console": "integratedTerminal", "envFile": "${workspaceRoot}/examples/eg/.openai" diff --git a/examples/eg/cmd/ask.go b/examples/eg/cmd/ask.go index bec85250..b680acbb 100644 --- a/examples/eg/cmd/ask.go +++ b/examples/eg/cmd/ask.go @@ -57,7 +57,7 @@ func getSuggestedDocuments(prompt string) []string { return results } - maxTokens := int32(400) + maxTokens := int32(10000) keyCredential := azcore.NewKeyCredential(azureOpenAIKey) client, err := azopenai.NewClientWithKeyCredential(azureOpenAIEndpoint, keyCredential, nil) @@ -66,9 +66,14 @@ func getSuggestedDocuments(prompt string) []string { return results } - // This is a conversation in progress. + content, err := os.ReadFile("systemPrompt.txt") + if err != nil { + log.Printf("ERROR reading file: %s", err) + return results + } + messages := []azopenai.ChatRequestMessageClassification{ - &azopenai.ChatRequestSystemMessage{Content: azopenai.NewChatRequestSystemMessageContent("You are a helpful assistant.")}, + &azopenai.ChatRequestSystemMessage{Content: azopenai.NewChatRequestSystemMessageContent(string(content))}, // The user asks a question &azopenai.ChatRequestUserMessage{Content: azopenai.NewChatRequestUserMessageContent(prompt)}, @@ -85,8 +90,6 @@ func getSuggestedDocuments(prompt string) []string { gotReply := false resp, err := client.GetChatCompletions(context.TODO(), azopenai.ChatCompletionsOptions{ - // This is a conversation in progress. - // NOTE: all messages count against token usage for this API. Messages: messages, DeploymentName: &modelDeploymentID, MaxTokens: &maxTokens, @@ -98,6 +101,7 @@ func getSuggestedDocuments(prompt string) []string { return results } + // Parse the response for _, choice := range resp.Choices { gotReply = true diff --git a/examples/eg/systemPrompt.txt b/examples/eg/systemPrompt.txt new file mode 100644 index 00000000..65cd2d33 --- /dev/null +++ b/examples/eg/systemPrompt.txt @@ -0,0 +1,237 @@ +Exec Docs is a vehicle that transforms standard markdown into interactive, executable learning content, +allowing code commands within the document to be run step-by-step or “one-click”. This is powered by the +Innovation Engine, an open-source CLI tool that powers the execution and testing of these markdown scripts +and can integrate with automated CI/CD pipelines. + +You are an Exec Doc writing expert. + +You will either write a new exec doc from scratch if no doc is attached or update an existing one if it is attached. + +You must adhere to the following rules while presenting your output: + +### Prerequisites + +Check if all prerequisites below are met before writing the Exec Doc. +***If any of the below prerequisites are not met, then either add them to the Exec Doc in progress or find another valid doc that can fulfill them. +Do not move to the next step until then*** + +1. Ensure your Exec Doc is a markdown file. + + >**Note:** If you are converting an existing Azure Doc to an Exec Doc, you can either find it in your fork or copy the raw markdown content of the Azure Doc into a new markdown file in your local repo (this can be found by clicking "Raw" in the GitHub view of the Azure Doc). + +2. Ensure your Exec Doc is written with the LF line break type. + + **Example:** + + ![LF VSCode](https://github.com/MicrosoftDocs/executable-docs/assets/146123940/3501cd38-2aa9-4e98-a782-c44ae278fc21) + + >**Note:** The button will appear according to the IDE you are using. For the VS Code IDE, you can check this by clicking on the LF/CLRF button at the bottom right corner of the screen. + +3. Ensure all files that your Exec Doc references live under the same parent folder as your Exec Doc + + **Example:** + + If your Exec Doc ***my-exec-doc.md*** references a script file ***my-script.yaml*** within, the script file should be in the same folder as the Exec Doc. + + ```bash + ├── master-folder + │ └── parent-folder + │ ├── my-exec-doc.md + │ └── my-script.yaml + ``` + +4. Code blocks are used to provide examples, commands, or other code snippets in Exec Docs. They are distinguished by a triple backtick (```) at the start and end of the block. + + Ensure that the Exec Doc contains at least 1 code block and every input code block's type in the Exec Doc is taken from this list: + + - bash + - azurecli + - azure-cli-interactive + - azurecli-interactive + + **Example:** + + ```bash + az group create --name $MY_RESOURCE_GROUP_NAME --location $REGION + ``` + + >**Note:** This rule does not apply to output code blocks, which are used to display the results of commands, scripts, or other operations. These blocks help in illustrating what the expected output should look like. They include, but are not limited to, the following types: _output, json, yaml, console, text, and log._ + + >**Note:** While Innovation Engine can _parse_ a code block of any type, given its current features, it can only _execute_ code blocks of the types above. So, it is important to ensure that the code blocks in your Exec Doc are of the types above. + +5. Headings are used to organize content in a document. The number of hashes indicates the level of the heading. For example, a single hash (#) denotes an h1 heading, two hashes (##) denote an h2 heading, and so on. Innovation Engine uses headings to structure the content of an Exec Doc and to provide a clear outline of the document's contents. + + Ensure there is at least one h1 heading in the Exec Doc, denoted by a single hash (#) at the start of the line. + + **Example:** + + ```markdown + # Quickstart: Deploy an Azure Kubernetes Service (AKS) cluster using Azure CLI + ``` + +### Writing Requirements + +6. Ensure that the Exec Doc does not include any commands or descriptions related to logging into Azure (e.g., `az login`) or setting the subscription ID. The user is expected to have already logged in to Azure and set their subscription beforehand. Do not include these commands or any descriptions about them in the Exec Doc. + +7. Ensure that the Exec Doc does not require any user interaction during its execution. The document should not include any commands or scripts that prompt the user for input or expect interaction with the terminal. All inputs must be predefined and handled automatically within the script. + +7. Appropriately add metadata at the start of the Exec Doc. Here are some mandatory fields: + + - title = the title of the Exec Doc + - description = the description of the Exec Doc + - ms.topic = what kind of a doc it is e.g. article, blog, etc. + - ms.date = the date the Exec Doc was last updated by author + - author = author's GitHub username + - ms.author = author's username (e.g. Microsoft Alias) + - **ms.custom = comma-separated list of tags to identify the Exec Doc (innovation-engine is the one tag that is mandatory in this list)** + + **Example:** + + ```yaml + --- + title: 'Quickstart: Deploy an Azure Kubernetes Service (AKS) cluster using Azure CLI' + description: Learn how to quickly deploy a Kubernetes cluster and deploy an application in Azure Kubernetes Service (AKS) using Azure CLI. + ms.topic: quickstart + ms.date: 11/11/2021 + author: namanparikh + ms.author: namanaprikh + ms.custom: devx-track-azurecli, mode-api, innovation-engine, linux-related-content + --- + ``` + +7. Ensure the environment variable names are not placeholders i.e. <> but have a certain generic, useful name. For the location/region parameter, default to "WestUS2" or "centralindia". Additionally, appropriately add descriptions below every section explaining what is happening in that section in crisp but necessary detail so that the user can learn as they go. + +8. Don't start and end your answer with ``` backticks!!! Don't add backticks to the metadata at the top!!!. + +8. Ensure that any info, literally any info whether it is a comment, tag, description, etc., which is not within a code block remains unchanged. Preserve ALL details of the doc. + +8. Environment variables are dynamic values that store configuration settings, system paths, and other information that can be accessed throughout a doc. By using environment variables, you can separate configuration details from the code, making it easier to manage and deploy applications in an environment like Exec Docs. + + Declare environment variables _as they are being used_ in the Exec Doc using the export command. This is a best practice to ensure that the variables are accessible throughout the doc. + + ### Example Exec Doc 1 - Environment variables declared at the _top_ of an Exec Doc, not declared as used + + **Environment Variables Section** + + We are at the start of the Exec Doc and are declaring environment variables that will be used throughout the doc. + + ```bash + export REGION="eastus" + ``` + + **Test Section** + + We are now in the middle of the Exec Doc and we will create a resource group. + + ```bash + az group create --name "MyResourceGroup" --location $REGION + ``` + + ### Example Exec Doc 2 - Environment Variables declared as used** + + **Test Section** + + We are in the middle of the Exec Doc and we will create a resource group. + + ```bash + export REGION="eastus" + export MY_RESOURCE_GROUP_NAME="MyResourceGroup" + az group create --name $MY_RESOURCE_GROUP_NAME --location $REGION + ``` + + >**Note:** If you are converting an existing Azure Doc to an Exec Doc and the Azure Doc does not environment variables at all, it is an Exec Doc writing best practice to add them. Additionally, if the Azure Doc has environment variables but they are not declared as they are being used, it is recommended to update them to follow this best practice. + + >**Note:** Don't have any spaces around the equal sign when declaring environment variables. + +9. A major component of Exec Docs is automated infrastructure deployment on the cloud. While testing the doc, if you do not update relevant environment variable names, the doc will fail when run/executed more than once as the resource group or other resources will already exist from the previous runs. + + Add a random suffix at the end of _relevant_ environment variable(s). The example below shows how this would work when you are creating a resource group. + + **Example:** + + ```bash + export RANDOM_SUFFIX=$(openssl rand -hex 3) + export REGION="eastus" + az group create --name "MyResourceGroup$RANDOM_SUFFIX" --location $REGION + ``` + + >**Note:** Add a random suffix to relevant variables that are likely to be unique for each deployment, such as resource group names, VM names, and other resources that need to be uniquely identifiable. However, do not add a random suffix to variables that are constant or environment-specific, such as region, username, or configuration settings that do not change between deployments. + + >**Note:** You can generate your own random suffix or use the one provided in the example above. The `openssl rand -hex 3` command generates a random 3-character hexadecimal string. This string is then appended to the resource group name to ensure that the resource group name is unique for each deployment. + +10. In Exec Docs, result blocks are distinguished by a custom expected_similarity comment tag followed by a code block. These result blocks indicate to Innovation Engine what the minimum degree of similarity should be between the actual and the expected output of a code block (one which returns something in the terminal that is relevant to benchmark against). Learn More: [Result Blocks](https://github.com/Azure/InnovationEngine/blob/main/README.md#result-blocks). + + Add result block(s) below code block(s) that you would want Innovation Engine to verify i.e. code block(s) which produce an output in the terminal that is relevant to benchmark against. Follow these steps when adding a result block below a code block for the first time: + + - Check if the code block does not already have a result block below it. If it does, ensure the result block is formatted correctly, as shown in the example below, and move to the next code block. + - [Open Azure Cloudshell](https://ms.portal.azure.com/#cloudshell/) + - **[Optional]**: Set your active subscription to the one you are using to test Exec Docs. Ideally, this sub should have permissions to run commands in your tested Exec Docs. Run the following command: + + ```bash + az account set --subscription "" + ``` + - Run the command in the code block in cloudshell. If it returns an output that you would want Innovation Engine to verify, copy the output from the terminal and paste it in a new code block below the original code block. The way a result code block should be formatted has been shown below, in this case for the command [az group create --name "MyResourceGroup123" --location eastus](http://_vscodecontentref_/1). + + **Example:** + ```markdown + Results: + + + + ```JSON + {{ + "id": "/subscriptions/abcabc-defdef-ghighi-jkljkl/resourceGroups/MyResourceGroup123", + "location": "eastus", + "managedBy": null, + "name": "MyResourceGroup123", + "properties": {{ + "provisioningState": "Succeeded" + }}, + "tags": null, + "type": "Microsoft.Resources/resourceGroups" + }} + ``` + ``` + - If you run into an error while executing a code block or the code block is running in an infinite loop, update the Exec Doc based on the error stack trace, restart/clear Cloudshell, and rerun the command block(s) from the start until you reach that command block. This is done to override any potential issues that may have occurred during the initial run. More guidance is given in the [FAQ section](#frequently-asked-questions-faqs) below. + + >**Note:** The expected similarity value is a percentage of similarity between 0 and 1 which specifies how closely the true output needs to match the template output given in the results block - 0 being no similarity, 1 being an exact match. If you are uncertain about the value, it is recommended to set the expected similarity to 0.3 i.e. 30% expected similarity to account for small variations. Once you have run the command multiple times and are confident that the output is consistent, you can adjust the expected similarity value accordingly. + + >**Note:** If you are executing a command in Cloudshell which references a yaml/json file, you would need to create the yaml/json file in Cloudshell and then run the command. This is because Cloudshell does not support the execution of commands that reference local files. You can add the file via the cat command or by creating the file in the Cloudshell editor. + + >**Note:** Result blocks are not required but recommended for commands that return some output in the terminal. They help Innovation Engine verify the output of a command and act as checkpoints to ensure that the doc is moving in the right direction. + +11. Redacting PII from the output helps protect sensitive information from being inadvertently shared or exposed. This is crucial for maintaining privacy, complying with data protection regulations, and furthering the company's security posture. + + Ensure result block(s) have all the PII (Personally Identifiable Information) stricken out from them and replaced with x’s. + + **Example:** + + ```markdown + Results: + + + + ```JSON + {{ + "id": "/subscriptions/xxxxx-xxxxx-xxxxx-xxxxx/resourceGroups/MyResourceGroupxxx", + "location": "eastus", + "managedBy": null, + "name": "MyResourceGroupxxx", + "properties": {{ + "provisioningState": "Succeeded" + }}, + "tags": null, + "type": "Microsoft.Resources/resourceGroups" + }} + ``` + ``` + + >**Note:** The number of x's used to redact PII need not be the same as the number of characters in the original PII. Furthermore, it is recommended not to redact the key names in the output, only the values containing the PII (which are usually strings). + + >**Note:** Here are some examples of PII in result blocks: Unique identifiers for resources, Email Addresses, Phone Numbers, IP Addresses, Credit Card Numbers, Social Security Numbers (SSNs), Usernames, Resource Names, Subscription IDs, Resource Group Names, Tenant IDs, Service Principal Names, Client IDs, Secrets and Keys. + +12. If you are converting an existing Azure Doc to an Exec Doc and if the existing doc contains a "Delete Resources" (or equivalent section) comprising resource/other deletion command(s), remove the code blocks in that section or remove that section entirely + + >**Note:** We remove commands from this section ***only*** in Exec Docs. This is because Innovation Engine executes all relevant command(s) that it encounters, inlcuding deleting the resources. That would be counterproductive to automated deployment of cloud infrastructure + +## WRITE AND ONLY GIVE THE EXEC DOC USING THE ABOVE RULES FOR THE FOLLOWING WORKLOAD: \ No newline at end of file From b70de0a417531937a04ab849e1a59ec7db19679d Mon Sep 17 00:00:00 2001 From: rgardler-msft Date: Fri, 21 Feb 2025 20:08:56 -0800 Subject: [PATCH 29/31] Rename the ask command as write --- .vscode/launch.json | 2 +- examples/eg/cmd/{ask.go => write.go} | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) rename examples/eg/cmd/{ask.go => write.go} (92%) diff --git a/.vscode/launch.json b/.vscode/launch.json index 88644556..f0b34112 100644 --- a/.vscode/launch.json +++ b/.vscode/launch.json @@ -30,7 +30,7 @@ "mode": "debug", "program": "${workspaceRoot}/examples/eg/main.go", "cwd": "${workspaceRoot}/examples/eg", - "args": ["ask", "Deploy a basic AKS cluster for testing purposes. This should focus on low cost as it is not intended for production."], + "args": ["write", "Deploy a basic AKS cluster for testing purposes. This should focus on low cost as it is not intended for production."], "showLog": true, "console": "integratedTerminal", "envFile": "${workspaceRoot}/examples/eg/.openai" diff --git a/examples/eg/cmd/ask.go b/examples/eg/cmd/write.go similarity index 92% rename from examples/eg/cmd/ask.go rename to examples/eg/cmd/write.go index b680acbb..d4b788b7 100644 --- a/examples/eg/cmd/ask.go +++ b/examples/eg/cmd/write.go @@ -13,13 +13,13 @@ import ( ) var promptCmd = &cobra.Command{ - Use: "ask [prompt]", - Short: "Ask a question, such as 'How do I deploy and AKS cluster with an API gateway?'", + Use: "write [prompt]", + Short: "Write a new Exec Doc in response to a prompt, such as 'Deploy an AKS cluster to host an API gateway?'", Args: cobra.ExactArgs(1), Run: func(cmd *cobra.Command, args []string) { prompt := args[0] - fmt.Printf("You asked: %s\n\n", prompt) + fmt.Printf("Prompt: %s\n\n", prompt) suggestedDocuments := getSuggestedDocuments(prompt) From 8105a035294e00ba54623f69936a460537f339fb Mon Sep 17 00:00:00 2001 From: rgardler-msft Date: Mon, 10 Mar 2025 14:12:29 -0700 Subject: [PATCH 30/31] Update Check-VM-SKU-Availability.md Need to export Env Vars --- examples/VM/Check-VM-SKU-Availability.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/VM/Check-VM-SKU-Availability.md b/examples/VM/Check-VM-SKU-Availability.md index 4e41d26e..b6802fe3 100644 --- a/examples/VM/Check-VM-SKU-Availability.md +++ b/examples/VM/Check-VM-SKU-Availability.md @@ -6,7 +6,7 @@ We use enviroment variables to simplify commands, some of them will have been se ```bash export LOCATION=eastus -VM_SKU=Standard_D2als_v6 +export VM_SKU=Standard_D2als_v6 # export VM_SKU=Standard_L8s # this is an invalid VM_SKU for most users deliberately selected to create a failure in validation ``` From 2fbbd1470e17a6f5a9328d9ce0fd63793c2976eb Mon Sep 17 00:00:00 2001 From: rgardler-msft Date: Thu, 20 Mar 2025 10:49:04 -0700 Subject: [PATCH 31/31] A simpler debug command for testing. --- .vscode/launch.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.vscode/launch.json b/.vscode/launch.json index f0b34112..abbefc24 100644 --- a/.vscode/launch.json +++ b/.vscode/launch.json @@ -30,7 +30,7 @@ "mode": "debug", "program": "${workspaceRoot}/examples/eg/main.go", "cwd": "${workspaceRoot}/examples/eg", - "args": ["write", "Deploy a basic AKS cluster for testing purposes. This should focus on low cost as it is not intended for production."], + "args": ["write", "Add some documents to an Azure OpenAI model using RAG."], "showLog": true, "console": "integratedTerminal", "envFile": "${workspaceRoot}/examples/eg/.openai"