From ecb8c51b1a33db5b1a1f4802e781f205b79785ec Mon Sep 17 00:00:00 2001 From: Laura Jordana Date: Fri, 26 Jul 2024 00:44:15 -0700 Subject: [PATCH 01/15] version GPT-in-a-Box as 1.0 (#64) * update k8s release link to v0.2.2 * HF token no longer needed in run.sh * add version to solution name --- docs/gpt-in-a-box/kubernetes/v0.1/getting_started.md | 2 +- docs/gpt-in-a-box/kubernetes/v0.2/getting_started.md | 2 +- docs/gpt-in-a-box/kubernetes/v0.2/validated_models.md | 2 +- docs/gpt-in-a-box/overview.md | 4 ++-- docs/gpt-in-a-box/support.md | 6 +++--- docs/gpt-in-a-box/vm/v0.2/getting_started.md | 2 +- docs/gpt-in-a-box/vm/v0.3/getting_started.md | 2 +- docs/gpt-in-a-box/vm/v0.3/validated_models.md | 2 +- mkdocs.yml | 2 +- 9 files changed, 12 insertions(+), 12 deletions(-) diff --git a/docs/gpt-in-a-box/kubernetes/v0.1/getting_started.md b/docs/gpt-in-a-box/kubernetes/v0.1/getting_started.md index e26dfd54..3dcbb811 100644 --- a/docs/gpt-in-a-box/kubernetes/v0.1/getting_started.md +++ b/docs/gpt-in-a-box/kubernetes/v0.1/getting_started.md @@ -1,5 +1,5 @@ # Getting Started -This is a guide on getting started with GPT-in-a-Box deployment on a Kubernetes Cluster. You can find the open source repository for the K8s version [here](https://github.com/nutanix/nai-llm-k8s). +This is a guide on getting started with GPT-in-a-Box 1.0 deployment on a Kubernetes Cluster. You can find the open source repository for the K8s version [here](https://github.com/nutanix/nai-llm-k8s). ## Setup diff --git a/docs/gpt-in-a-box/kubernetes/v0.2/getting_started.md b/docs/gpt-in-a-box/kubernetes/v0.2/getting_started.md index 7cb0b9d5..e3c52af2 100644 --- a/docs/gpt-in-a-box/kubernetes/v0.2/getting_started.md +++ b/docs/gpt-in-a-box/kubernetes/v0.2/getting_started.md @@ -1,5 +1,5 @@ # Getting Started -This is a guide on getting started with GPT-in-a-Box deployment on a Kubernetes Cluster. You can find the open source repository for the K8s version [here](https://github.com/nutanix/nai-llm-k8s). +This is a guide on getting started with GPT-in-a-Box 1.0 deployment on a Kubernetes Cluster. You can find the open source repository for the K8s version [here](https://github.com/nutanix/nai-llm-k8s). ## Setup diff --git a/docs/gpt-in-a-box/kubernetes/v0.2/validated_models.md b/docs/gpt-in-a-box/kubernetes/v0.2/validated_models.md index 3ed7c8b6..0e31b95a 100644 --- a/docs/gpt-in-a-box/kubernetes/v0.2/validated_models.md +++ b/docs/gpt-in-a-box/kubernetes/v0.2/validated_models.md @@ -1,6 +1,6 @@ # Validated Models for Kubernetes Version -GPT-in-a-Box has been validated on a curated set of HuggingFace models Information pertaining to these models is stored in the ```llm/model_config.json``` file. +GPT-in-a-Box 1.0 has been validated on a curated set of HuggingFace models Information pertaining to these models is stored in the ```llm/model_config.json``` file. The Validated Models are : diff --git a/docs/gpt-in-a-box/overview.md b/docs/gpt-in-a-box/overview.md index f9bd01f5..8f3d6c2b 100644 --- a/docs/gpt-in-a-box/overview.md +++ b/docs/gpt-in-a-box/overview.md @@ -1,6 +1,6 @@ -# Nutanix GPT-in-a-Box Documentation +# Nutanix GPT-in-a-Box 1.0 Documentation -Welcome to the official home dedicated to documenting how to run Nutanix GPT-in-a-Box. Nutanix GPT-in-a-Box is a new turnkey solution that includes everything needed to build AI-ready infrastructure. Here, you'll find information and code to run Nutanix GPT-in-a-Box on Virtual Machines or Kubernetes Clusters. +Welcome to the official home dedicated to documenting how to run Nutanix GPT-in-a-Box 1.0. Nutanix GPT-in-a-Box 1.0 is a turnkey solution that includes everything needed to build AI-ready infrastructure. Here, you'll find information and code to run Nutanix GPT-in-a-Box 1.0 on Virtual Machines or Kubernetes Clusters. This new solution includes: diff --git a/docs/gpt-in-a-box/support.md b/docs/gpt-in-a-box/support.md index f2f75c64..7ac49a3c 100644 --- a/docs/gpt-in-a-box/support.md +++ b/docs/gpt-in-a-box/support.md @@ -1,14 +1,14 @@ -# Nutanix GPT-in-a-Box Support +# Nutanix GPT-in-a-Box 1.0 Support Nutanix maintains public GitHub repositories for GPT in a box. Support is handled directly via the repository. Issues and enhancement requests can be submitted in the Issues tab of the relevant repository. Search for and review existing open issues before submitting a new issue. To report a new issue navigate to the GitHub repository: [GitHub - nutanix/nai-llm ](https://github.com/nutanix/nai-llm) -This is the official repository for the virtual machine version of Nutanix GPT-in-a-Box. +This is the official repository for the virtual machine version of Nutanix GPT-in-a-Box 1.0. [GitHub - nutanix/nai-llm-k8s](https://github.com/nutanix/nai-llm-k8s) -This is the official repository for the Kubernetes version of Nutanix GPT-in-a-Box. +This is the official repository for the Kubernetes version of Nutanix GPT-in-a-Box 1.0. The support procedure is documented in [KB 16159](https://portal.nutanix.com/page/documents/kbs/details?targetId=kA0VO0000000dJ70AI). diff --git a/docs/gpt-in-a-box/vm/v0.2/getting_started.md b/docs/gpt-in-a-box/vm/v0.2/getting_started.md index 8c3dad24..d5ac8d3e 100644 --- a/docs/gpt-in-a-box/vm/v0.2/getting_started.md +++ b/docs/gpt-in-a-box/vm/v0.2/getting_started.md @@ -1,5 +1,5 @@ # Getting Started -This is a guide on getting started with GPT-in-a-Box deployment on a Virtual Machine. You can find the open source repository for the virtual machine version [here](https://github.com/nutanix/nai-llm). +This is a guide on getting started with GPT-in-a-Box 1.0 deployment on a Virtual Machine. You can find the open source repository for the virtual machine version [here](https://github.com/nutanix/nai-llm). Tested Specifications: diff --git a/docs/gpt-in-a-box/vm/v0.3/getting_started.md b/docs/gpt-in-a-box/vm/v0.3/getting_started.md index c868c75d..2603c5fe 100644 --- a/docs/gpt-in-a-box/vm/v0.3/getting_started.md +++ b/docs/gpt-in-a-box/vm/v0.3/getting_started.md @@ -1,5 +1,5 @@ # Getting Started -This is a guide on getting started with GPT-in-a-Box deployment on a Virtual Machine. You can find the open source repository for the virtual machine version [here](https://github.com/nutanix/nai-llm). +This is a guide on getting started with GPT-in-a-Box 1.0 deployment on a Virtual Machine. You can find the open source repository for the virtual machine version [here](https://github.com/nutanix/nai-llm). Tested Specifications: diff --git a/docs/gpt-in-a-box/vm/v0.3/validated_models.md b/docs/gpt-in-a-box/vm/v0.3/validated_models.md index f92cd1dc..0f4aebd0 100644 --- a/docs/gpt-in-a-box/vm/v0.3/validated_models.md +++ b/docs/gpt-in-a-box/vm/v0.3/validated_models.md @@ -1,6 +1,6 @@ # Validated Models for Virtual Machine Version -GPT-in-a-Box has been validated on a curated set of HuggingFace models. Information pertaining to these models is stored in the ```llm/model_config.json``` file. +GPT-in-a-Box 1.0 has been validated on a curated set of HuggingFace models. Information pertaining to these models is stored in the ```llm/model_config.json``` file. The Validated Models are : diff --git a/mkdocs.yml b/mkdocs.yml index d0aa99a2..dd0265cb 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -141,7 +141,7 @@ nav: - "Manual": "anthos/install/manual/index.md" - "Amazon EKS Anywhere": - "Install": "eksa/install/index.md" - - "GPT-in-a-Box": + - "GPT-in-a-Box 1.0": - "Overview": "gpt-in-a-box/overview.md" - "Deploy on Virtual Machine": - "v0.3": From 8ac4de1998c05f85295007727f1accb558f1077d Mon Sep 17 00:00:00 2001 From: Christophe Jauffret Date: Tue, 30 Jul 2024 19:35:44 +0200 Subject: [PATCH 02/15] update validation and add CAPX 1.4 (#65) --- docs/capx/latest | 2 +- docs/capx/v1.4.x/addons/install_csi_driver.md | 215 ++++++++++ docs/capx/v1.4.x/credential_management.md | 93 ++++ docs/capx/v1.4.x/experimental/autoscaler.md | 129 ++++++ .../capx/v1.4.x/experimental/capx_multi_pe.md | 30 ++ docs/capx/v1.4.x/experimental/oidc.md | 31 ++ docs/capx/v1.4.x/experimental/proxy.md | 62 +++ .../v1.4.x/experimental/registry_mirror.md | 96 +++++ docs/capx/v1.4.x/experimental/vpc.md | 40 ++ docs/capx/v1.4.x/getting_started.md | 159 +++++++ docs/capx/v1.4.x/pc_certificates.md | 149 +++++++ docs/capx/v1.4.x/port_requirements.md | 19 + .../tasks/capx_v14x_upgrade_procedure.md | 83 ++++ .../tasks/modify_machine_configuration.md | 11 + docs/capx/v1.4.x/troubleshooting.md | 13 + docs/capx/v1.4.x/types/nutanix_cluster.md | 64 +++ .../v1.4.x/types/nutanix_machine_template.md | 84 ++++ docs/capx/v1.4.x/user_requirements.md | 36 ++ docs/capx/v1.4.x/validated_integrations.md | 62 +++ mkdocs.yml | 403 +++++++++--------- 20 files changed, 1590 insertions(+), 191 deletions(-) create mode 100644 docs/capx/v1.4.x/addons/install_csi_driver.md create mode 100644 docs/capx/v1.4.x/credential_management.md create mode 100644 docs/capx/v1.4.x/experimental/autoscaler.md create mode 100644 docs/capx/v1.4.x/experimental/capx_multi_pe.md create mode 100644 docs/capx/v1.4.x/experimental/oidc.md create mode 100644 docs/capx/v1.4.x/experimental/proxy.md create mode 100644 docs/capx/v1.4.x/experimental/registry_mirror.md create mode 100644 docs/capx/v1.4.x/experimental/vpc.md create mode 100644 docs/capx/v1.4.x/getting_started.md create mode 100644 docs/capx/v1.4.x/pc_certificates.md create mode 100644 docs/capx/v1.4.x/port_requirements.md create mode 100644 docs/capx/v1.4.x/tasks/capx_v14x_upgrade_procedure.md create mode 100644 docs/capx/v1.4.x/tasks/modify_machine_configuration.md create mode 100644 docs/capx/v1.4.x/troubleshooting.md create mode 100644 docs/capx/v1.4.x/types/nutanix_cluster.md create mode 100644 docs/capx/v1.4.x/types/nutanix_machine_template.md create mode 100644 docs/capx/v1.4.x/user_requirements.md create mode 100644 docs/capx/v1.4.x/validated_integrations.md diff --git a/docs/capx/latest b/docs/capx/latest index 9ac194b0..25f6cdf3 120000 --- a/docs/capx/latest +++ b/docs/capx/latest @@ -1 +1 @@ -v1.3.x \ No newline at end of file +v1.4.x \ No newline at end of file diff --git a/docs/capx/v1.4.x/addons/install_csi_driver.md b/docs/capx/v1.4.x/addons/install_csi_driver.md new file mode 100644 index 00000000..afb4bdc8 --- /dev/null +++ b/docs/capx/v1.4.x/addons/install_csi_driver.md @@ -0,0 +1,215 @@ +# Nutanix CSI Driver installation with CAPX + +The Nutanix CSI driver is fully supported on CAPI/CAPX deployed clusters where all the nodes meet the [Nutanix CSI driver prerequisites](#capi-workload-cluster-prerequisites-for-the-nutanix-csi-driver). + +There are three methods to install the Nutanix CSI driver on a CAPI/CAPX cluster: + +- Helm +- ClusterResourceSet +- CAPX Flavor + +For more information, check the next sections. + +## CAPI Workload cluster prerequisites for the Nutanix CSI Driver + +Kubernetes workers need the following prerequisites to use the Nutanix CSI Drivers: + +- iSCSI initiator package (for Volumes based block storage) +- NFS client package (for Files based storage) + +These packages may already be present in the image you use with your infrastructure provider or you can also rely on your bootstrap provider to install them. More info is available in the [Prerequisites docs](https://portal.nutanix.com/page/documents/details?targetId=CSI-Volume-Driver-v2_6:csi-csi-plugin-prerequisites-r.html){target=_blank}. + +The package names and installation method will also vary depending on the operating system you plan to use. + +In the example below, `kubeadm` bootstrap provider is used to deploy these packages on top of an Ubuntu 20.04 image. The `kubeadm` bootstrap provider allows defining `preKubeadmCommands` that will be launched before Kubernetes cluster creation. These `preKubeadmCommands` can be defined both in `KubeadmControlPlane` for master nodes and in `KubeadmConfigTemplate` for worker nodes. + +In the example with an Ubuntu 20.04 image, both `KubeadmControlPlane` and `KubeadmConfigTemplate` must be modified as in the example below: + +```yaml +spec: + template: + spec: + # ....... + preKubeadmCommands: + - echo "before kubeadm call" > /var/log/prekubeadm.log + - apt update + - apt install -y nfs-common open-iscsi + - systemctl enable --now iscsid +``` +## Install the Nutanix CSI Driver with Helm + +A recent [Helm](https://helm.sh){target=_blank} version is needed (tested with Helm v3.10.1). + +The example below must be applied on a ready workload cluster. The workload cluster's kubeconfig can be retrieved and used to connect with the following command: + +```shell +clusterctl get kubeconfig $CLUSTER_NAME -n $CLUSTER_NAMESPACE > $CLUSTER_NAME-KUBECONFIG +export KUBECONFIG=$(pwd)/$CLUSTER_NAME-KUBECONFIG +``` + +Once connected to the cluster, follow the [CSI documentation](https://portal.nutanix.com/page/documents/details?targetId=CSI-Volume-Driver-v2_6:csi-csi-driver-install-t.html){target=_blank}. + +First, install the [nutanix-csi-snapshot](https://github.com/nutanix/helm/tree/master/charts/nutanix-csi-snapshot){target=_blank} chart followed by the [nutanix-csi-storage](https://github.com/nutanix/helm/tree/master/charts/nutanix-csi-storage){target=_blank} chart. + +See an example below: + +```shell +#Add the official Nutanix Helm repo and get the latest update +helm repo add nutanix https://nutanix.github.io/helm/ +helm repo update + +# Install the nutanix-csi-snapshot chart +helm install nutanix-csi-snapshot nutanix/nutanix-csi-snapshot -n ntnx-system --create-namespace + +# Install the nutanix-csi-storage chart +helm install nutanix-storage nutanix/nutanix-csi-storage -n ntnx-system --set createSecret=false +``` + +!!! warning + For correct Nutanix CSI driver deployment, a fully functional CNI deployment must be present. + +## Install the Nutanix CSI Driver with `ClusterResourceSet` + +The `ClusterResourceSet` feature was introduced to automatically apply a set of resources (such as CNI/CSI) defined by administrators to matching created/existing workload clusters. + +### Enabling the `ClusterResourceSet` feature + +At the time of writing, `ClusterResourceSet` is an experimental feature that must be enabled during the initialization of a management cluster with the `EXP_CLUSTER_RESOURCE_SET` feature gate. + +To do this, add `EXP_CLUSTER_RESOURCE_SET: "true"` in the `clusterctl` configuration file or just `export EXP_CLUSTER_RESOURCE_SET=true` before initializing the management cluster with `clusterctl init`. + +If the management cluster is already initialized, the `ClusterResourceSet` can be enabled by changing the configuration of the `capi-controller-manager` deployment in the `capi-system` namespace. + + ```shell + kubectl edit deployment -n capi-system capi-controller-manager + ``` + +Locate the section below: + +```yaml + - args: + - --leader-elect + - --metrics-bind-addr=localhost:8080 + - --feature-gates=MachinePool=false,ClusterResourceSet=true,ClusterTopology=false +``` + +Then replace `ClusterResourceSet=false` with `ClusterResourceSet=true`. + +!!! note + Editing the `deployment` resource will cause Kubernetes to automatically start new versions of the containers with the feature enabled. + + + +### Prepare the Nutanix CSI `ClusterResourceSet` + +#### Create the `ConfigMap` for the CSI Plugin + +First, create a `ConfigMap` that contains a YAML manifest with all resources to install the Nutanix CSI driver. + +Since the Nutanix CSI Driver is provided as a Helm chart, use `helm` to extract it before creating the `ConfigMap`. See an example below: + +```shell +helm repo add nutanix https://nutanix.github.io/helm/ +helm repo update + +kubectl create ns ntnx-system --dry-run=client -o yaml > nutanix-csi-namespace.yaml +helm template nutanix-csi-snapshot nutanix/nutanix-csi-snapshot -n ntnx-system > nutanix-csi-snapshot.yaml +helm template nutanix-csi-snapshot nutanix/nutanix-csi-storage -n ntnx-system > nutanix-csi-storage.yaml + +kubectl create configmap nutanix-csi-crs --from-file=nutanix-csi-namespace.yaml --from-file=nutanix-csi-snapshot.yaml --from-file=nutanix-csi-storage.yaml +``` + +#### Create the `ClusterResourceSet` + +Next, create the `ClusterResourceSet` resource that will map the `ConfigMap` defined above to clusters using a `clusterSelector`. + +The `ClusterResourceSet` needs to be created inside the management cluster. See an example below: + +```yaml +--- +apiVersion: addons.cluster.x-k8s.io/v1alpha3 +kind: ClusterResourceSet +metadata: + name: nutanix-csi-crs +spec: + clusterSelector: + matchLabels: + csi: nutanix + resources: + - kind: ConfigMap + name: nutanix-csi-crs +``` + +The `clusterSelector` field controls how Cluster API will match this `ClusterResourceSet` on one or more workload clusters. In the example scenario, the `matchLabels` approach is being used where the `ClusterResourceSet` will be applied to all workload clusters having the `csi: nutanix` label present. If the label isn't present, the `ClusterResourceSet` won't apply to that workload cluster. + +The `resources` field references the `ConfigMap` created above, which contains the manifests for installing the Nutanix CSI driver. + +#### Assign the `ClusterResourceSet` to a workload cluster + +Assign this `ClusterResourceSet` to the workload cluster by adding the correct label to the `Cluster` resource. + +This can be done before workload cluster creation by editing the output of the `clusterctl generate cluster` command or by modifying an already deployed workload cluster. + +In both cases, `Cluster` resources should look like this: + +```yaml +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + name: workload-cluster-name + namespace: workload-cluster-namespace + labels: + csi: nutanix +# ... +``` + +!!! warning + For correct Nutanix CSI driver deployment, a fully functional CNI deployment must be present. + +## Install the Nutanix CSI Driver with a CAPX flavor + +The CAPX provider can utilize a flavor to automatically deploy the Nutanix CSI using a `ClusterResourceSet`. + +### Prerequisites + +The following requirements must be met: + +- The operating system must meet the [Nutanix CSI OS prerequisites](#capi-workload-cluster-prerequisites-for-the-nutanix-csi-driver). +- The Management cluster must be installed with the [`CLUSTER_RESOURCE_SET` feature gate](#enabling-the-clusterresourceset-feature). + +### Installation + +Specify the `csi` flavor during workload cluster creation. See an example below: + +```shell +clusterctl generate cluster my-cluster -f csi +``` + +Additional environment variables are required: + +- `WEBHOOK_CA`: Base64 encoded CA certificate used to sign the webhook certificate +- `WEBHOOK_CERT`: Base64 certificate for the webhook validation component +- `WEBHOOK_KEY`: Base64 key for the webhook validation component + +The three components referenced above can be automatically created and referenced using [this script](https://github.com/nutanix-cloud-native/cluster-api-provider-nutanix/blob/main/scripts/gen-self-cert.sh){target=_blank}: + +``` +source scripts/gen-self-cert.sh +``` + +The certificate must reference the following names: + +- csi-snapshot-webhook +- csi-snapshot-webhook.ntnx-sytem +- csi-snapshot-webhook.ntnx-sytem.svc + +!!! warning + For correct Nutanix CSI driver deployment, a fully functional CNI deployment must be present. + +## Nutanix CSI Driver Configuration + +After the driver is installed, it must be configured for use by minimally defining a `Secret` and `StorageClass`. + +This can be done manually in the workload clusters or by using a `ClusterResourceSet` in the management cluster as explained above. + +See the Official [CSI Driver documentation](https://portal.nutanix.com/page/documents/details?targetId=CSI-Volume-Driver-v2_6:CSI-Volume-Driver-v2_6){target=_blank} on the Nutanix Portal for more configuration information. diff --git a/docs/capx/v1.4.x/credential_management.md b/docs/capx/v1.4.x/credential_management.md new file mode 100644 index 00000000..bebbc5a0 --- /dev/null +++ b/docs/capx/v1.4.x/credential_management.md @@ -0,0 +1,93 @@ +# Credential Management +Cluster API Provider Nutanix Cloud Infrastructure (CAPX) interacts with Nutanix Prism Central (PC) APIs to manage the required Kubernetes cluster infrastructure resources. + +PC credentials are required to authenticate to the PC APIs. CAPX currently supports two mechanisms to supply the required credentials: + +- Credentials injected into the CAPX manager deployment +- Workload cluster specific credentials + +## Credentials injected into the CAPX manager deployment +By default, credentials will be injected into the CAPX manager deployment when CAPX is initialized. See the [getting started guide](./getting_started.md) for more information on the initialization. + +Upon initialization a `nutanix-creds` secret will automatically be created in the `capx-system` namespace. This secret will contain the values supplied via the `NUTANIX_USER` and `NUTANIX_PASSWORD` parameters. + +The `nutanix-creds` secret will be used for workload cluster deployment if no other credential is supplied. + +### Example +An example of the automatically created `nutanix-creds` secret can be found below: +```yaml +--- +apiVersion: v1 +kind: Secret +type: Opaque +metadata: + name: nutanix-creds + namespace: capx-system +stringData: + credentials: | + [ + { + "type": "basic_auth", + "data": { + "prismCentral":{ + "username": "", + "password": "" + }, + "prismElements": null + } + } + ] +``` + +## Workload cluster specific credentials +Users can override the [credentials injected in CAPX manager deployment](#credentials-injected-into-the-capx-manager-deployment) by supplying a credential specific to a workload cluster. The credentials can be supplied by creating a secret in the same namespace as the `NutanixCluster` namespace. + +The secret can be referenced by adding a `credentialRef` inside the `prismCentral` attribute contained in the `NutanixCluster`. +The secret will also be deleted when the `NutanixCluster` is deleted. + +Note: There is a 1:1 relation between the secret and the `NutanixCluster` object. + +### Example +Create a secret in the namespace of the `NutanixCluster`: + +```yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: "" + namespace: "" +stringData: + credentials: | + [ + { + "type": "basic_auth", + "data": { + "prismCentral":{ + "username": "", + "password": "" + }, + "prismElements": null + } + } + ] +``` + +Add a `prismCentral` and corresponding `credentialRef` to the `NutanixCluster`: + +```yaml +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: NutanixCluster +metadata: + name: "" + namespace: "" +spec: + prismCentral: + ... + credentialRef: + name: "" + kind: Secret +... +``` + +See the [NutanixCluster](./types/nutanix_cluster.md) documentation for all supported configuration parameters for the `prismCentral` and `credentialRef` attribute. \ No newline at end of file diff --git a/docs/capx/v1.4.x/experimental/autoscaler.md b/docs/capx/v1.4.x/experimental/autoscaler.md new file mode 100644 index 00000000..2af57213 --- /dev/null +++ b/docs/capx/v1.4.x/experimental/autoscaler.md @@ -0,0 +1,129 @@ +# Using Autoscaler in combination with CAPX + +!!! warning + The scenario and features described on this page are experimental. It's important to note that they have not been fully validated. + +[Autoscaler](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/clusterapi/README.md){target=_blank} can be used in combination with Cluster API to automatically add or remove machines in a cluster. + +Autoscaler can be used in different deployment scenarios. This page will provide an overview of multiple autoscaler deployment scenarios in combination with CAPX. +See the [Testing](#testing) section to see how scale-up/scale-down events can be triggered to validate the autoscaler behaviour. + +More in-depth information on Autoscaler functionality can be found in the [Kubernetes documentation](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/clusterapi/README.md){target=_blank}. + +All Autoscaler configuration parameters can be found [here](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md#what-are-the-parameters-to-ca){target=_blank}. + +## Scenario 1: Management cluster managing an external workload cluster +In this scenario, Autoscaler will be running on a management cluster and it will manage an external workload cluster. See the management cluster managing an external workload cluster section of [Kubernetes documentation](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/clusterapi/README.md#autoscaler-running-in-management-cluster-using-service-account-credentials-with-separate-workload-cluster){target=_blank} for more information. + +### Steps +1. Deploy a management cluster and workload cluster. The [CAPI quickstart](https://cluster-api.sigs.k8s.io/user/quick-start.html){target=_blank} can be used as a starting point. + + !!! note + Make sure a CNI is installed in the workload cluster. + +4. Download the example [Autoscaler deployment file](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/clusterapi/examples/deployment.yaml){target=_blank}. +5. Modify the `deployment.yaml` file: + - Change the namespace of all resources to the namespaces of the workload cluster. + - Choose an autoscale image. + - Change the following parameters in the `Deployment` resource: +```YAML + spec: + containers: + name: cluster-autoscaler + command: + - /cluster-autoscaler + args: + - --cloud-provider=clusterapi + - --kubeconfig=/mnt/kubeconfig/kubeconfig.yml + - --clusterapi-cloud-config-authoritative + - -v=1 + volumeMounts: + - mountPath: /mnt/kubeconfig + name: kubeconfig + readOnly: true + ... + volumes: + - name: kubeconfig + secret: + secretName: -kubeconfig + items: + - key: value + path: kubeconfig.yml +``` +7. Apply the `deployment.yaml` file. +```bash +kubectl apply -f deployment.yaml +``` +8. Add the [annotations](#autoscaler-node-group-annotations) to the workload cluster `MachineDeployment` resource. +9. Test Autoscaler. Go to the [Testing](#testing) section. + +## Scenario 2: Autoscaler running on workload cluster +In this scenario, Autoscaler will be deployed [on top of the workload cluster](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/clusterapi/README.md#autoscaler-running-in-a-joined-cluster-using-service-account-credentials){target=_blank} directly. In order for Autoscaler to work, it is required that the workload cluster resources are moved from the management cluster to the workload cluster. + +### Steps +1. Deploy a management cluster and workload cluster. The [CAPI quickstart](https://cluster-api.sigs.k8s.io/user/quick-start.html){target=_blank} can be used as a starting point. +2. Get the kubeconfig file for the workload cluster and use this kubeconfig to login to the workload cluster. +```bash +clusterctl get kubeconfig -n /path/to/kubeconfig +``` +3. Install a CNI in the workload cluster. +4. Initialise the CAPX components on top of the workload cluster: +```bash +clusterctl init --infrastructure nutanix +``` +5. Migrate the workload cluster custom resources to the workload cluster. Run following command from the management cluster: +```bash +clusterctl move -n --to-kubeconfig /path/to/kubeconfig +``` +6. Verify if the cluster has been migrated by running following command on the workload cluster: +```bash +kubectl get cluster -A +``` +7. Download the example [autoscaler deployment file](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/clusterapi/examples/deployment.yaml){target=_blank}. +8. Create the Autoscaler namespace: +```bash +kubectl create ns autoscaler +``` +9. Apply the `deployment.yaml` file +```bash +kubectl apply -f deployment.yaml +``` +10. Add the [annotations](#autoscaler-node-group-annotations) to the workload cluster `MachineDeployment` resource. +11. Test Autoscaler. Go to the [Testing](#testing) section. + +## Testing + +1. Deploy an example Kubernetes application. For example, the one used in the [Kubernetes HorizontalPodAutoscaler Walkthrough](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough/). +```bash +kubectl apply -f https://k8s.io/examples/application/php-apache.yaml +``` +2. Increase the amount of replicas of the application to trigger a scale-up event: +``` +kubectl scale deployment php-apache --replicas 100 +``` +3. Decrease the amount of replicas of the application again to trigger a scale-down event. + + !!! note + In case of issues check the logs of the Autoscaler pods. + +4. After a while CAPX, will add more machines. Refer to the [Autoscaler configuration parameters](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md#what-are-the-parameters-to-ca){target=_blank} to tweak the behaviour and timeouts. + +## Autoscaler node group annotations +Autoscaler uses following annotations to define the upper and lower boundries of the managed machines: + +| Annotation | Example Value | Description | +|-------------------------------------------------------------|---------------|-----------------------------------------------| +| cluster.x-k8s.io/cluster-api-autoscaler-node-group-max-size | 5 | Maximum amount of machines in this node group | +| cluster.x-k8s.io/cluster-api-autoscaler-node-group-min-size | 1 | Minimum amount of machines in this node group | + +These annotations must be applied to the `MachineDeployment` resources of a CAPX cluster. + +### Example +```YAML +apiVersion: cluster.x-k8s.io/v1beta1 +kind: MachineDeployment +metadata: + annotations: + cluster.x-k8s.io/cluster-api-autoscaler-node-group-max-size: "5" + cluster.x-k8s.io/cluster-api-autoscaler-node-group-min-size: "1" +``` \ No newline at end of file diff --git a/docs/capx/v1.4.x/experimental/capx_multi_pe.md b/docs/capx/v1.4.x/experimental/capx_multi_pe.md new file mode 100644 index 00000000..bd52ccd7 --- /dev/null +++ b/docs/capx/v1.4.x/experimental/capx_multi_pe.md @@ -0,0 +1,30 @@ +# Creating a workload CAPX cluster spanning Prism Element clusters + +!!! warning + The scenario and features described on this page are experimental. It's important to note that they have not been fully validated. + +This page will explain how to deploy CAPX-based Kubernetes clusters where worker nodes are spanning multiple Prism Element (PE) clusters. + +!!! note + All the PE clusters must be managed by the same Prism Central (PC) instance. + +The topology will look like this: + +- One PC managing multiple PE's +- One CAPI management cluster +- One CAPI workload cluster with multiple `MachineDeployment`resources + +Refer to the [CAPI quickstart](https://cluster-api.sigs.k8s.io/user/quick-start.html){target=_blank} to get started with CAPX. + +To create workload clusters spanning multiple Prism Element clusters, it is required to create a `MachineDeployment` and `NutanixMachineTemplate` resource for each Prism Element cluster. The Prism Element specific parameters (name/UUID, subnet,...) are referenced in the `NutanixMachineTemplate`. + +## Steps +1. Create a management cluster that has the CAPX infrastructure provider deployed. +2. Create a `cluster.yml` file containing the workload cluster definition. Refer to the steps defined in the [CAPI quickstart guide](https://cluster-api.sigs.k8s.io/user/quick-start.html){target=_blank} to create an example `cluster.yml` file. +3. Add additional `MachineDeployment` and `NutanixMachineTemplate` resources. + + By default there is only one machine template and machine deployment defined. To add nodes residing on another Prism Element cluster, a new `MachineDeployment` and `NutanixMachineTemplate` resource needs to be added to the yaml file. The autogenerated `MachineDeployment` and `NutanixMachineTemplate` resource definitions can be used as a baseline. + + Make sure to modify the `MachineDeployment` and `NutanixMachineTemplate` parameters. + +4. Apply the modified `cluster.yml` file to the management cluster. diff --git a/docs/capx/v1.4.x/experimental/oidc.md b/docs/capx/v1.4.x/experimental/oidc.md new file mode 100644 index 00000000..0c274121 --- /dev/null +++ b/docs/capx/v1.4.x/experimental/oidc.md @@ -0,0 +1,31 @@ +# OIDC integration + +!!! warning + The scenario and features described on this page are experimental. It's important to note that they have not been fully validated. + +Kubernetes allows users to authenticate using various authentication mechanisms. One of these mechanisms is OIDC. Information on how Kubernetes interacts with OIDC providers can be found in the [OpenID Connect Tokens](https://kubernetes.io/docs/reference/access-authn-authz/authentication/#openid-connect-tokens){target=_blank} section of the official Kubernetes documentation. + + +Follow the steps below to configure a CAPX cluster to use an OIDC identity provider. + +## Steps +1. Generate a `cluster.yaml` file with the required CAPX cluster configuration. Refer to the [Getting Started](../getting_started.md){target=_blank} page for more information on how to generate a `cluster.yaml` file. Do not apply the `cluster.yaml` file. +2. Edit the `cluster.yaml` file and search for the `KubeadmControlPlane` resource. +3. Modify/add the `spec.kubeadmConfigSpec.clusterConfiguration.apiServer.extraArgs` attribute and add the required [API server parameters](https://kubernetes.io/docs/reference/access-authn-authz/authentication/#configuring-the-api-server){target=_blank}. See the [example](#example) below. +4. Apply the `cluster.yaml` file +5. Log in with the OIDC provider once the cluster is provisioned + +## Example +```YAML +kind: KubeadmControlPlane +spec: + kubeadmConfigSpec: + clusterConfiguration: + apiServer: + extraArgs: + ... + oidc-client-id: + oidc-issuer-url: + ... +``` + diff --git a/docs/capx/v1.4.x/experimental/proxy.md b/docs/capx/v1.4.x/experimental/proxy.md new file mode 100644 index 00000000..c8f940d4 --- /dev/null +++ b/docs/capx/v1.4.x/experimental/proxy.md @@ -0,0 +1,62 @@ +# Proxy configuration + +!!! warning + The scenario and features described on this page are experimental. It's important to note that they have not been fully validated. + +CAPX can be configured to use a proxy to connect to external networks. This proxy configuration needs to be applied to control plane and worker nodes. + +Follow the steps below to configure a CAPX cluster to use a proxy. + +## Steps +1. Generate a `cluster.yaml` file with the required CAPX cluster configuration. Refer to the [Getting Started](../getting_started.md){target=_blank} page for more information on how to generate a `cluster.yaml` file. Do not apply the `cluster.yaml` file. +2. Edit the `cluster.yaml` file and modify the following resources as shown in the [example](#example) below to add the proxy configuration. + 1. `KubeadmControlPlane`: + * Add the proxy configuration to the `spec.kubeadmConfigSpec.files` list. Do not modify other items in the list. + * Add `systemctl` commands to apply the proxy config in `spec.kubeadmConfigSpec.preKubeadmCommands`. Do not modify other items in the list. + 2. `KubeadmConfigTemplate`: + * Add the proxy configuration to the `spec.template.spec.files` list. Do not modify other items in the list. + * Add `systemctl` commands to apply the proxy config in `spec.template.spec.preKubeadmCommands`. Do not modify other items in the list. +4. Apply the `cluster.yaml` file + +## Example + +```YAML +--- +# controlplane proxy settings +kind: KubeadmControlPlane +spec: + kubeadmConfigSpec: + files: + - content: | + [Service] + Environment="HTTP_PROXY=" + Environment="HTTPS_PROXY=" + Environment="NO_PROXY=" + owner: root:root + path: /etc/systemd/system/containerd.service.d/http-proxy.conf + ... + preKubeadmCommands: + - sudo systemctl daemon-reload + - sudo systemctl restart containerd + ... +--- +# worker proxy settings +kind: KubeadmConfigTemplate +spec: + template: + spec: + files: + - content: | + [Service] + Environment="HTTP_PROXY=" + Environment="HTTPS_PROXY=" + Environment="NO_PROXY=" + owner: root:root + path: /etc/systemd/system/containerd.service.d/http-proxy.conf + ... + preKubeadmCommands: + - sudo systemctl daemon-reload + - sudo systemctl restart containerd + ... +``` + diff --git a/docs/capx/v1.4.x/experimental/registry_mirror.md b/docs/capx/v1.4.x/experimental/registry_mirror.md new file mode 100644 index 00000000..307a9425 --- /dev/null +++ b/docs/capx/v1.4.x/experimental/registry_mirror.md @@ -0,0 +1,96 @@ +# Registry Mirror configuration + +!!! warning + The scenario and features described on this page are experimental. It's important to note that they have not been fully validated. + +CAPX can be configured to use a private registry to act as a mirror of an external public registry. This registry mirror configuration needs to be applied to control plane and worker nodes. + +Follow the steps below to configure a CAPX cluster to use a registry mirror. + +## Steps +1. Generate a `cluster.yaml` file with the required CAPX cluster configuration. Refer to the [Getting Started](../getting_started.md){target=_blank} page for more information on how to generate a `cluster.yaml` file. Do not apply the `cluster.yaml` file. +2. Edit the `cluster.yaml` file and modify the following resources as shown in the [example](#example) below to add the proxy configuration. + 1. `KubeadmControlPlane`: + * Add the registry mirror configuration to the `spec.kubeadmConfigSpec.files` list. Do not modify other items in the list. + * Update `/etc/containerd/config.toml` commands to apply the registry mirror config in `spec.kubeadmConfigSpec.preKubeadmCommands`. Do not modify other items in the list. + 2. `KubeadmConfigTemplate`: + * Add the registry mirror configuration to the `spec.template.spec.files` list. Do not modify other items in the list. + * Update `/etc/containerd/config.toml` commands to apply the registry mirror config in `spec.template.spec.preKubeadmCommands`. Do not modify other items in the list. +4. Apply the `cluster.yaml` file + +## Example + +This example will configure a registry mirror for the following namespace: + +* registry.k8s.io +* ghcr.io +* quay.io + +and redirect them to corresponding projects of the `` registry. + +```YAML +--- +# controlplane proxy settings +kind: KubeadmControlPlane +spec: + kubeadmConfigSpec: + files: + - content: | + [host."https:///v2/registry.k8s.io"] + capabilities = ["pull", "resolve"] + skip_verify = false + override_path = true + owner: root:root + path: /etc/containerd/certs.d/registry.k8s.io/hosts.toml + - content: | + [host."https:///v2/ghcr.io"] + capabilities = ["pull", "resolve"] + skip_verify = false + override_path = true + owner: root:root + path: /etc/containerd/certs.d/ghcr.io/hosts.toml + - content: | + [host."https:///v2/quay.io"] + capabilities = ["pull", "resolve"] + skip_verify = false + override_path = true + owner: root:root + path: /etc/containerd/certs.d/quay.io/hosts.toml + ... + preKubeadmCommands: + - echo '\n[plugins."io.containerd.grpc.v1.cri".registry]\n config_path = "/etc/containerd/certs.d"' >> /etc/containerd/config.toml + ... +--- +# worker proxy settings +kind: KubeadmConfigTemplate +spec: + template: + spec: + files: + - content: | + [host."https:///v2/registry.k8s.io"] + capabilities = ["pull", "resolve"] + skip_verify = false + override_path = true + owner: root:root + path: /etc/containerd/certs.d/registry.k8s.io/hosts.toml + - content: | + [host."https:///v2/ghcr.io"] + capabilities = ["pull", "resolve"] + skip_verify = false + override_path = true + owner: root:root + path: /etc/containerd/certs.d/ghcr.io/hosts.toml + - content: | + [host."https:///v2/quay.io"] + capabilities = ["pull", "resolve"] + skip_verify = false + override_path = true + owner: root:root + path: /etc/containerd/certs.d/quay.io/hosts.toml + ... + preKubeadmCommands: + - echo '\n[plugins."io.containerd.grpc.v1.cri".registry]\n config_path = "/etc/containerd/certs.d"' >> /etc/containerd/config.toml + ... +``` + diff --git a/docs/capx/v1.4.x/experimental/vpc.md b/docs/capx/v1.4.x/experimental/vpc.md new file mode 100644 index 00000000..3513e47e --- /dev/null +++ b/docs/capx/v1.4.x/experimental/vpc.md @@ -0,0 +1,40 @@ +# Creating a workload CAPX cluster in a Nutanix Flow VPC + +!!! warning + The scenario and features described on this page are experimental. It's important to note that they have not been fully validated. + +!!! note + Nutanix Flow VPCs are only validated with CAPX 1.1.3+ + +[Nutanix Flow Virtual Networking](https://portal.nutanix.com/page/documents/details?targetId=Nutanix-Flow-Virtual-Networking-Guide-vpc_2022_9:Nutanix-Flow-Virtual-Networking-Guide-vpc_2022_9){target=_blank} allows users to create Virtual Private Clouds (VPCs) with Overlay networking. +The steps below will illustrate how a CAPX cluster can be deployed inside an overlay subnet (NAT) inside a VPC while the management cluster resides outside of the VPC. + + +## Steps +1. [Request a floating IP](https://portal.nutanix.com/page/documents/details?targetId=Nutanix-Flow-Networking-Guide:ear-flow-nw-request-floating-ip-pc-t.html){target=_blank} +2. Link the floating IP to an internal IP address inside the overlay subnet that will be used to deploy the CAPX cluster. This address will be assigned to the CAPX loadbalancer. To prevent IP conflicts, make sure the IP address is not part of the IP-pool defined in the subnet. +3. Generate a `cluster.yaml` file with the required CAPX cluster configuration where the `CONTROL_PLANE_ENDPOINT_IP` is set to the floating IP requested in the first step. Refer to the [Getting Started](../getting_started.md){target=_blank} page for more information on how to generate a `cluster.yaml` file. Do not apply the `cluster.yaml` file. +4. Edit the `cluster.yaml` file and search for the `KubeadmControlPlane` resource. +5. Modify the `spec.kubeadmConfigSpec.files.*.content` attribute and change the `kube-vip` definition similar to the [example](#example) below. +6. Apply the `cluster.yaml` file. +7. When the CAPX workload cluster is deployed, it will be reachable via the floating IP. + +## Example +```YAML +kind: KubeadmControlPlane +spec: + kubeadmConfigSpec: + files: + - content: | + apiVersion: v1 + kind: Pod + metadata: + name: kube-vip + namespace: kube-system + spec: + containers: + - env: + - name: address + value: "" +``` + diff --git a/docs/capx/v1.4.x/getting_started.md b/docs/capx/v1.4.x/getting_started.md new file mode 100644 index 00000000..c1643abd --- /dev/null +++ b/docs/capx/v1.4.x/getting_started.md @@ -0,0 +1,159 @@ +# Getting Started + +This is a guide on getting started with Cluster API Provider Nutanix Cloud Infrastructure (CAPX). To learn more about cluster API in more depth, check out the [Cluster API book](https://cluster-api.sigs.k8s.io/){target=_blank}. + +For more information on how install the Nutanix CSI Driver on a CAPX cluster, visit [Nutanix CSI Driver installation with CAPX](./addons/install_csi_driver.md). + +For more information on how CAPX handles credentials, visit [Credential Management](./credential_management.md). + +For more information on the port requirements for CAPX, visit [Port Requirements](./port_requirements.md). + +!!! note + [Nutanix Cloud Controller Manager (CCM)](../../ccm/latest/overview.md) is a mandatory component starting from CAPX v1.3.0. Ensure all CAPX-managed Kubernetes clusters are configured to use Nutanix CCM before upgrading to v1.3.0 or later. See [CAPX v1.4.x Upgrade Procedure](./tasks/capx_v14x_upgrade_procedure.md). + +## Production Workflow + +### Build OS image for NutanixMachineTemplate resource +Cluster API Provider Nutanix Cloud Infrastructure (CAPX) uses the [Image Builder](https://image-builder.sigs.k8s.io/){target=_blank} project to build OS images used for the Nutanix machines. + +Follow the steps detailed in [Building CAPI Images for Nutanix Cloud Platform (NCP)](https://image-builder.sigs.k8s.io/capi/providers/nutanix.html#building-capi-images-for-nutanix-cloud-platform-ncp){target=_blank} to use Image Builder on the Nutanix Cloud Platform. + +For a list of operating systems visit the OS image [Configuration](https://image-builder.sigs.k8s.io/capi/providers/nutanix.html#configuration){target=_blank} page. + +### Prerequisites for using Cluster API Provider Nutanix Cloud Infrastructure +The [Cluster API installation](https://cluster-api.sigs.k8s.io/user/quick-start.html#installation){target=_blank} section provides an overview of all required prerequisites: + +- [Common Prerequisites](https://cluster-api.sigs.k8s.io/user/quick-start.html#common-prerequisites){target=_blank} +- [Install and/or configure a Kubernetes cluster](https://cluster-api.sigs.k8s.io/user/quick-start.html#install-andor-configure-a-kubernetes-cluster){target=_blank} +- [Install clusterctl](https://cluster-api.sigs.k8s.io/user/quick-start.html#install-clusterctl){target=_blank} +- (Optional) [Enabling Feature Gates](https://cluster-api.sigs.k8s.io/user/quick-start.html#enabling-feature-gates){target=_blank} + +Make sure these prerequisites have been met before moving to the [Configure and Install Cluster API Provider Nutanix Cloud Infrastructure](#configure-and-install-cluster-api-provider-nutanix-cloud-infrastructure) step. + + +### Configure and Install Cluster API Provider Nutanix Cloud Infrastructure +To initialize Cluster API Provider Nutanix Cloud Infrastructure, `clusterctl` requires the following variables, which should be set in either `~/.cluster-api/clusterctl.yaml` or as environment variables. +``` +NUTANIX_ENDPOINT: "" # IP or FQDN of Prism Central +NUTANIX_USER: "" # Prism Central user +NUTANIX_PASSWORD: "" # Prism Central password +NUTANIX_INSECURE: false # or true + +KUBERNETES_VERSION: "v1.22.9" +WORKER_MACHINE_COUNT: 3 +NUTANIX_SSH_AUTHORIZED_KEY: "" + +NUTANIX_PRISM_ELEMENT_CLUSTER_NAME: "" +NUTANIX_MACHINE_TEMPLATE_IMAGE_NAME: "" +NUTANIX_SUBNET_NAME: "" + +EXP_CLUSTER_RESOURCE_SET: true # Required for Nutanix CCM installation +``` + +You can also see the required list of variables by running the following: +``` +clusterctl generate cluster mycluster -i nutanix --list-variables +Required Variables: + - CONTROL_PLANE_ENDPOINT_IP + - KUBERNETES_VERSION + - NUTANIX_ENDPOINT + - NUTANIX_MACHINE_TEMPLATE_IMAGE_NAME + - NUTANIX_PASSWORD + - NUTANIX_PRISM_ELEMENT_CLUSTER_NAME + - NUTANIX_SSH_AUTHORIZED_KEY + - NUTANIX_SUBNET_NAME + - NUTANIX_USER + +Optional Variables: + - CONTROL_PLANE_ENDPOINT_PORT (defaults to "6443") + - CONTROL_PLANE_MACHINE_COUNT (defaults to 1) + - KUBEVIP_LB_ENABLE (defaults to "false") + - KUBEVIP_SVC_ENABLE (defaults to "false") + - NAMESPACE (defaults to current Namespace in the KubeConfig file) + - NUTANIX_INSECURE (defaults to "false") + - NUTANIX_MACHINE_BOOT_TYPE (defaults to "legacy") + - NUTANIX_MACHINE_MEMORY_SIZE (defaults to "4Gi") + - NUTANIX_MACHINE_VCPU_PER_SOCKET (defaults to "1") + - NUTANIX_MACHINE_VCPU_SOCKET (defaults to "2") + - NUTANIX_PORT (defaults to "9440") + - NUTANIX_SYSTEMDISK_SIZE (defaults to "40Gi") + - WORKER_MACHINE_COUNT (defaults to 0) +``` + +!!! note + To prevent duplicate IP assignments, it is required to assign an IP-address to the `CONTROL_PLANE_ENDPOINT_IP` variable that is not part of the Nutanix IPAM or DHCP range assigned to the subnet of the CAPX cluster. + +!!! warning + Make sure [Cluster Resource Set (CRS)](https://cluster-api.sigs.k8s.io/tasks/experimental-features/cluster-resource-set){target=_blank} is enabled before running `clusterctl init` + +Now you can instantiate Cluster API with the following: +``` +clusterctl init -i nutanix +``` + +### Deploy a workload cluster on Nutanix Cloud Infrastructure +``` +export TEST_CLUSTER_NAME=mytestcluster1 +export TEST_NAMESPACE=mytestnamespace +CONTROL_PLANE_ENDPOINT_IP=x.x.x.x clusterctl generate cluster ${TEST_CLUSTER_NAME} \ + -i nutanix \ + --target-namespace ${TEST_NAMESPACE} \ + --kubernetes-version v1.22.9 \ + --control-plane-machine-count 1 \ + --worker-machine-count 3 > ./cluster.yaml +kubectl create ns ${TEST_NAMESPACE} +kubectl apply -f ./cluster.yaml -n ${TEST_NAMESPACE} +``` +To customize the configuration of the default `cluster.yaml` file generated by CAPX, visit the [NutanixCluster](./types/nutanix_cluster.md) and [NutanixMachineTemplate](./types/nutanix_machine_template.md) documentation. + +### Access a workload cluster +To access resources on the cluster, you can get the kubeconfig with the following: +``` +clusterctl get kubeconfig ${TEST_CLUSTER_NAME} -n ${TEST_NAMESPACE} > ${TEST_CLUSTER_NAME}.kubeconfig +kubectl --kubeconfig ./${TEST_CLUSTER_NAME}.kubeconfig get nodes +``` + +### Install CNI on workload a cluster + +You must deploy a Container Network Interface (CNI) based pod network add-on so that your pods can communicate with each other. Cluster DNS (CoreDNS) will not start up before a network is installed. + +!!! note + Take care that your pod network must not overlap with any of the host networks. You are likely to see problems if there is any overlap. If you find a collision between your network plugin's preferred pod network and some of your host networks, you must choose a suitable alternative CIDR block to use instead. It can be configured inside the `cluster.yaml` generated by `clusterctl generate cluster` before applying it. + +Several external projects provide Kubernetes pod networks using CNI, some of which also support [Network Policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/){target=_blank}. + +See a list of add-ons that implement the [Kubernetes networking model](https://kubernetes.io/docs/concepts/cluster-administration/networking/#how-to-implement-the-kubernetes-network-model){target=_blank}. At time of writing, the most common are [Calico](https://www.tigera.io/project-calico/){target=_blank} and [Cilium](https://cilium.io){target=_blank}. + +Follow the specific install guide for your selected CNI and install only one pod network per cluster. + +Once a pod network has been installed, you can confirm that it is working by checking that the CoreDNS pod is running in the output of `kubectl get pods --all-namespaces`. + + +### Kube-vip settings + +Kube-vip is a true load balancing solution for the Kubernetes control plane. It distributes API requests across control plane nodes. It also has the capability to provide load balancing for Kubernetes services. + +You can tweak kube-vip settings by using the following properties: + +- `KUBEVIP_LB_ENABLE` + +This setting allows control plane load balancing using IPVS. See +[Control Plane Load-Balancing documentation](https://kube-vip.io/docs/about/architecture/#control-plane-load-balancing){target=_blank} for further information. + +- `KUBEVIP_SVC_ENABLE` + +This setting enables a service of type LoadBalancer. See +[Kubernetes Service Load Balancing documentation](https://kube-vip.io/docs/about/architecture/#kubernetes-service-load-balancing){target=_blank} for further information. + +- `KUBEVIP_SVC_ELECTION` + +This setting enables Load Balancing of Load Balancers. See [Load Balancing Load Balancers](https://kube-vip.io/docs/usage/kubernetes-services/#load-balancing-load-balancers-when-using-arp-mode-yes-you-read-that-correctly-kube-vip-v050){target=_blank} for further information. + +### Delete a workload cluster +To remove a workload cluster from your management cluster, remove the cluster object and the provider will clean-up all resources. + +``` +kubectl delete cluster ${TEST_CLUSTER_NAME} -n ${TEST_NAMESPACE} +``` +!!! note + Deleting the entire cluster template with `kubectl delete -f ./cluster.yaml` may lead to pending resources requiring manual cleanup. diff --git a/docs/capx/v1.4.x/pc_certificates.md b/docs/capx/v1.4.x/pc_certificates.md new file mode 100644 index 00000000..f3fe1699 --- /dev/null +++ b/docs/capx/v1.4.x/pc_certificates.md @@ -0,0 +1,149 @@ +# Certificate Trust + +CAPX invokes Prism Central APIs using the HTTPS protocol. CAPX has different methods to handle the trust of the Prism Central certificates: + +- Enable certificate verification (default) +- Configure an additional trust bundle +- Disable certificate verification + +See the respective sections below for more information. + +!!! note + For more information about replacing Prism Central certificates, see the [Nutanix AOS Security Guide](https://portal.nutanix.com/page/documents/details?targetId=Nutanix-Security-Guide-v6_5:mul-security-ssl-certificate-pc-t.html){target=_blank}. + +## Enable certificate verification (default) +By default CAPX will perform certificate verification when invoking Prism Central API calls. This requires Prism Central to be configured with a publicly trusted certificate authority. +No additional configuration is required in CAPX. + +## Configure an additional trust bundle +CAPX allows users to configure an additional trust bundle. This will allow CAPX to verify certificates that are not issued by a publicy trusted certificate authority. + +To configure an additional trust bundle, the `NUTANIX_ADDITIONAL_TRUST_BUNDLE` environment variable needs to be set. The value of the `NUTANIX_ADDITIONAL_TRUST_BUNDLE` environment variable contains the trust bundle (PEM format) in base64 encoded format. See the [Configuring the trust bundle environment variable](#configuring-the-trust-bundle-environment-variable) section for more information. + +It is also possible to configure the additional trust bundle manually by creating a custom `cluster-template`. See the [Configuring the additional trust bundle manually](#configuring-the-additional-trust-bundle-manually) section for more information + +The `NUTANIX_ADDITIONAL_TRUST_BUNDLE` environment variable can be set when initializing the CAPX provider or when creating a workload cluster. If the `NUTANIX_ADDITIONAL_TRUST_BUNDLE` is configured when the CAPX provider is initialized, the additional trust bundle will be used for every CAPX workload cluster. If it is only configured when creating a workload cluster, it will only be applicable for that specific workload cluster. + + +### Configuring the trust bundle environment variable + +Create a PEM encoded file containing the root certificate and all intermediate certificates. Example: +``` +$ cat cert.crt +-----BEGIN CERTIFICATE----- + +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- + +-----END CERTIFICATE----- +``` + +Use a `base64` tool to encode these contents in base64. The command below will provide a `base64` string. +``` +$ cat cert.crt | base64 + +``` +!!! note + Make sure the `base64` string does not contain any newlines (`\n`). If the output string contains newlines, remove them manually or check the manual of the `base64` tool on how to generate a `base64` string without newlines. + +Use the `base64` string as value for the `NUTANIX_ADDITIONAL_TRUST_BUNDLE` environment variable. +``` +$ export NUTANIX_ADDITIONAL_TRUST_BUNDLE="" +``` + +### Configuring the additional trust bundle manually + +To configure the additional trust bundle manually without using the `NUTANIX_ADDITIONAL_TRUST_BUNDLE` environment variable present in the default `cluster-template` files, it is required to: + +- Create a `ConfigMap` containing the additional trust bundle. +- Configure the `prismCentral.additionalTrustBundle` object in the `NutanixCluster` spec. + +#### Creating the additional trust bundle ConfigMap + +CAPX supports two different formats for the ConfigMap containing the additional trust bundle. The first one is to add the additional trust bundle as a multi-line string in the `ConfigMap`, the second option is to add the trust bundle in `base64` encoded format. See the examples below. + +Multi-line string example: +```YAML +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: user-ca-bundle + namespace: ${NAMESPACE} +data: + ca.crt: | + -----BEGIN CERTIFICATE----- + + -----END CERTIFICATE----- + -----BEGIN CERTIFICATE----- + + -----END CERTIFICATE----- +``` + +`base64` example: + +```YAML +apiVersion: v1 +kind: ConfigMap +metadata: + name: user-ca-bundle + namespace: ${NAMESPACE} +binaryData: + ca.crt: +``` + +!!! note + The `base64` string needs to be added as `binaryData`. + + +#### Configuring the NutanixCluster spec + +When the additional trust bundle `ConfigMap` is created, it needs to be referenced in the `NutanixCluster` spec. Add the `prismCentral.additionalTrustBundle` object in the `NutanixCluster` spec as shown below. Make sure the correct additional trust bundle `ConfigMap` is referenced. + +```YAML +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: NutanixCluster +metadata: + name: ${CLUSTER_NAME} + namespace: ${NAMESPACE} +spec: + ... + prismCentral: + ... + additionalTrustBundle: + kind: ConfigMap + name: user-ca-bundle + insecure: false +``` + +!!! note + the default value of `prismCentral.insecure` attribute is `false`. It can be omitted when an additional trust bundle is configured. + + If `prismCentral.insecure` attribute is set to `true`, all certificate verification will be disabled. + + +## Disable certificate verification + +!!! note + Disabling certificate verification is not recommended for production purposes and should only be used for testing. + + +Certificate verification can be disabled by setting the `prismCentral.insecure` attribute to `true` in the `NutanixCluster` spec. Certificate verification will be disabled even if an additional trust bundle is configured. + +Disabled certificate verification example: + +```YAML +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: NutanixCluster +metadata: + name: ${CLUSTER_NAME} + namespace: ${NAMESPACE} +spec: + controlPlaneEndpoint: + host: ${CONTROL_PLANE_ENDPOINT_IP} + port: ${CONTROL_PLANE_ENDPOINT_PORT=6443} + prismCentral: + ... + insecure: true + ... +``` \ No newline at end of file diff --git a/docs/capx/v1.4.x/port_requirements.md b/docs/capx/v1.4.x/port_requirements.md new file mode 100644 index 00000000..af182abb --- /dev/null +++ b/docs/capx/v1.4.x/port_requirements.md @@ -0,0 +1,19 @@ +# Port Requirements + +CAPX uses the ports documented below to create workload clusters. + +!!! note + This page only documents the ports specifically required by CAPX and does not provide the full overview of all ports required in the CAPI framework. + +## Management cluster + +| Source | Destination | Protocol | Port | Description | +|--------------------|---------------------|----------|------|--------------------------------------------------------------------------------------------------| +| Management cluster | External Registries | TCP | 443 | Pull container images from [CAPX public registries](#public-registries-utilized-when-using-capx) | +| Management cluster | Prism Central | TCP | 9440 | Management cluster communication to Prism Central | + +## Public registries utilized when using CAPX + +| Registry name | +|---------------| +| ghcr.io | diff --git a/docs/capx/v1.4.x/tasks/capx_v14x_upgrade_procedure.md b/docs/capx/v1.4.x/tasks/capx_v14x_upgrade_procedure.md new file mode 100644 index 00000000..a64c990c --- /dev/null +++ b/docs/capx/v1.4.x/tasks/capx_v14x_upgrade_procedure.md @@ -0,0 +1,83 @@ +# CAPX v1.4.x Upgrade Procedure + +Starting from CAPX v1.3.0, it is required for all CAPX-managed Kubernetes clusters to use the Nutanix Cloud Controller Manager (CCM). + +Before upgrading CAPX instances to v1.3.0 or later, it is required to follow the [steps](#steps) detailed below for each of the CAPX-managed Kubernetes clusters that don't use Nutanix CCM. + + +## Steps + +This procedure uses [Cluster Resource Set (CRS)](https://cluster-api.sigs.k8s.io/tasks/experimental-features/cluster-resource-set){target=_blank} to install Nutanix CCM but it can also be installed using the [Nutanix CCM Helm chart](https://artifacthub.io/packages/helm/nutanix/nutanix-cloud-provider){target=_blank}. + +!!! warning + Make sure [CRS](https://cluster-api.sigs.k8s.io/tasks/experimental-features/cluster-resource-set){target=_blank} is enabled on the management cluster before following the procedure. + +Perform following steps for each of the CAPX-managed Kubernetes clusters that are not configured to use Nutanix CCM: + +1. Add the `cloud-provider: external` configuration in the `KubeadmConfigTemplate` resources: + ```YAML + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + spec: + template: + spec: + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: + cloud-provider: external + ``` +2. Add the `cloud-provider: external` configuration in the `KubeadmControlPlane` resource: +```YAML +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +kind: KubeadmConfigTemplate +spec: + template: + spec: + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: + cloud-provider: external +--- +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +kind: KubeadmControlPlane +spec: + kubeadmConfigSpec: + clusterConfiguration: + apiServer: + extraArgs: + cloud-provider: external + controllerManager: + extraArgs: + cloud-provider: external + initConfiguration: + nodeRegistration: + kubeletExtraArgs: + cloud-provider: external + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: + cloud-provider: external +``` +3. Add the Nutanix CCM CRS resources: + + - [nutanix-ccm-crs.yaml](https://github.com/nutanix-cloud-native/cluster-api-provider-nutanix/blob/v1.4.0/templates/base/nutanix-ccm-crs.yaml){target=_blank} + - [nutanix-ccm-secret.yaml](https://github.com/nutanix-cloud-native/cluster-api-provider-nutanix/blob/v1.4.0/templates/base/nutanix-ccm-secret.yaml) + - [nutanix-ccm.yaml](https://github.com/nutanix-cloud-native/cluster-api-provider-nutanix/blob/v1.4.0/templates/base/nutanix-ccm.yaml) + + Make sure to update each of the variables before applying the `YAML` files. + +4. Add the `ccm: nutanix` label to the `Cluster` resource: + ```YAML + apiVersion: cluster.x-k8s.io/v1beta1 + kind: Cluster + metadata: + labels: + ccm: nutanix + ``` +5. Verify if the Nutanix CCM pod is up and running: +``` +kubectl get pod -A -l k8s-app=nutanix-cloud-controller-manager +``` +6. Trigger a new rollout of the Kubernetes nodes by performing a Kubernetes upgrade or by using `clusterctl alpha rollout restart`. See the [clusterctl alpha rollout](https://cluster-api.sigs.k8s.io/clusterctl/commands/alpha-rollout#restart){target=_blank} for more information. +7. Upgrade CAPX to v1.4.0 by following the [clusterctl upgrade](https://cluster-api.sigs.k8s.io/clusterctl/commands/upgrade.html?highlight=clusterctl%20upgrade%20pla#clusterctl-upgrade){target=_blank} documentation \ No newline at end of file diff --git a/docs/capx/v1.4.x/tasks/modify_machine_configuration.md b/docs/capx/v1.4.x/tasks/modify_machine_configuration.md new file mode 100644 index 00000000..04a43a95 --- /dev/null +++ b/docs/capx/v1.4.x/tasks/modify_machine_configuration.md @@ -0,0 +1,11 @@ +# Modifying Machine Configurations + +Since all attributes of the `NutanixMachineTemplate` resources are immutable, follow the [Updating Infrastructure Machine Templates](https://cluster-api.sigs.k8s.io/tasks/updating-machine-templates.html?highlight=machine%20template#updating-infrastructure-machine-templates){target=_blank} procedure to modify the configuration of machines in an existing CAPX cluster. +See the [NutanixMachineTemplate](../types/nutanix_machine_template.md) documentation for all supported configuration parameters. + +!!! note + Manually modifying existing and linked `NutanixMachineTemplate` resources will not trigger a rolling update of the machines. + +!!! note + Do not modify the virtual machine configuration of CAPX cluster nodes manually in Prism/Prism Central. + CAPX will not automatically revert the configuration change but performing scale-up/scale-down/upgrade operations will override manual modifications. Only use the `Updating Infrastructure Machine` procedure referenced above to perform configuration changes. \ No newline at end of file diff --git a/docs/capx/v1.4.x/troubleshooting.md b/docs/capx/v1.4.x/troubleshooting.md new file mode 100644 index 00000000..c023d13e --- /dev/null +++ b/docs/capx/v1.4.x/troubleshooting.md @@ -0,0 +1,13 @@ +# Troubleshooting + +## Clusterctl failed with GitHub rate limit error + +By design Clusterctl fetches artifacts from repositories hosted on GitHub, this operation is subject to [GitHub API rate limits](https://docs.github.com/en/rest/overview/resources-in-the-rest-api#rate-limiting){target=_blank}. + +While this is generally okay for the majority of users, there is still a chance that some users (especially developers or CI tools) hit this limit: + +``` +Error: failed to get repository client for the XXX with name YYY: error creating the GitHub repository client: failed to get GitHub latest version: failed to get the list of versions: rate limit for github api has been reached. Please wait one hour or get a personal API tokens a assign it to the GITHUB_TOKEN environment variable +``` + +As explained in the error message, you can increase your API rate limit by [creating a GitHub personal token](https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/creating-a-personal-access-token){target=_blank} and setting a `GITHUB_TOKEN` environment variable using the token. diff --git a/docs/capx/v1.4.x/types/nutanix_cluster.md b/docs/capx/v1.4.x/types/nutanix_cluster.md new file mode 100644 index 00000000..09325cab --- /dev/null +++ b/docs/capx/v1.4.x/types/nutanix_cluster.md @@ -0,0 +1,64 @@ +# NutanixCluster + +The `NutanixCluster` resource defines the configuration of a CAPX Kubernetes cluster. + +Example of a `NutanixCluster` resource: + +```YAML +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: NutanixCluster +metadata: + name: ${CLUSTER_NAME} + namespace: ${NAMESPACE} +spec: + controlPlaneEndpoint: + host: ${CONTROL_PLANE_ENDPOINT_IP} + port: ${CONTROL_PLANE_ENDPOINT_PORT=6443} + prismCentral: + address: ${NUTANIX_ENDPOINT} + additionalTrustBundle: + kind: ConfigMap + name: user-ca-bundle + credentialRef: + kind: Secret + name: ${CLUSTER_NAME} + insecure: ${NUTANIX_INSECURE=false} + port: ${NUTANIX_PORT=9440} +``` + +## NutanixCluster spec +The table below provides an overview of the supported parameters of the `spec` attribute of a `NutanixCluster` resource. + +### Configuration parameters + +| Key |Type |Description | +|--------------------------------------------|------|----------------------------------------------------------------------------------| +|controlPlaneEndpoint |object|Defines the host IP and port of the CAPX Kubernetes cluster. | +|controlPlaneEndpoint.host |string|Host IP to be assigned to the CAPX Kubernetes cluster. | +|controlPlaneEndpoint.port |int |Port of the CAPX Kubernetes cluster. Default: `6443` | +|prismCentral |object|(Optional) Prism Central endpoint definition. | +|prismCentral.address |string|IP/FQDN of Prism Central. | +|prismCentral.port |int |Port of Prism Central. Default: `9440` | +|prismCentral.insecure |bool |Disable Prism Central certificate checking. Default: `false` | +|prismCentral.credentialRef |object|Reference to credentials used for Prism Central connection. | +|prismCentral.credentialRef.kind |string|Kind of the credentialRef. Allowed value: `Secret` | +|prismCentral.credentialRef.name |string|Name of the secret containing the Prism Central credentials. | +|prismCentral.credentialRef.namespace |string|(Optional) Namespace of the secret containing the Prism Central credentials. | +|prismCentral.additionalTrustBundle |object|Reference to the certificate trust bundle used for Prism Central connection. | +|prismCentral.additionalTrustBundle.kind |string|Kind of the additionalTrustBundle. Allowed value: `ConfigMap` | +|prismCentral.additionalTrustBundle.name |string|Name of the `ConfigMap` containing the Prism Central trust bundle. | +|prismCentral.additionalTrustBundle.namespace|string|(Optional) Namespace of the `ConfigMap` containing the Prism Central trust bundle.| +|failureDomains |list |(Optional) Failure domains for the Kubernetes nodes | +|failureDomains.[].name |string|Name of the failure domain | +|failureDomains.[].cluster |object|Reference (name or uuid) to the Prism Element cluster. Name or UUID can be passed | +|failureDomains.[].cluster.type |string|Type to identify the Prism Element cluster. Allowed values: `name` and `uuid` | +|failureDomains.[].cluster.name |string|Name of the Prism Element cluster. | +|failureDomains.[].cluster.uuid |string|UUID of the Prism Element cluster. | +|failureDomains.[].subnets |list |(Optional) Reference (name or uuid) to the subnets to be assigned to the VMs. | +|failureDomains.[].subnets.[].type |string|Type to identify the subnet. Allowed values: `name` and `uuid` | +|failureDomains.[].subnets.[].name |string|Name of the subnet. | +|failureDomains.[].subnets.[].uuid |string|UUID of the subnet. | +|failureDomains.[].controlPlane |bool |Indicates if a failure domain is suited for control plane nodes + +!!! note + To prevent duplicate IP assignments, it is required to assign an IP-address to the `controlPlaneEndpoint.host` variable that is not part of the Nutanix IPAM or DHCP range assigned to the subnet of the CAPX cluster. \ No newline at end of file diff --git a/docs/capx/v1.4.x/types/nutanix_machine_template.md b/docs/capx/v1.4.x/types/nutanix_machine_template.md new file mode 100644 index 00000000..516d1eea --- /dev/null +++ b/docs/capx/v1.4.x/types/nutanix_machine_template.md @@ -0,0 +1,84 @@ +# NutanixMachineTemplate +The `NutanixMachineTemplate` resource defines the configuration of a CAPX Kubernetes VM. + +Example of a `NutanixMachineTemplate` resource. + +```YAML +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: NutanixMachineTemplate +metadata: + name: "${CLUSTER_NAME}-mt-0" + namespace: "${NAMESPACE}" +spec: + template: + spec: + providerID: "nutanix://${CLUSTER_NAME}-m1" + # Supported options for boot type: legacy and uefi + # Defaults to legacy if not set + bootType: ${NUTANIX_MACHINE_BOOT_TYPE=legacy} + vcpusPerSocket: ${NUTANIX_MACHINE_VCPU_PER_SOCKET=1} + vcpuSockets: ${NUTANIX_MACHINE_VCPU_SOCKET=2} + memorySize: "${NUTANIX_MACHINE_MEMORY_SIZE=4Gi}" + systemDiskSize: "${NUTANIX_SYSTEMDISK_SIZE=40Gi}" + image: + type: name + name: "${NUTANIX_MACHINE_TEMPLATE_IMAGE_NAME}" + cluster: + type: name + name: "${NUTANIX_PRISM_ELEMENT_CLUSTER_NAME}" + subnet: + - type: name + name: "${NUTANIX_SUBNET_NAME}" + # Adds additional categories to the virtual machines. + # Note: Categories must already be present in Prism Central + # additionalCategories: + # - key: AppType + # value: Kubernetes + # Adds the cluster virtual machines to a project defined in Prism Central. + # Replace NUTANIX_PROJECT_NAME with the correct project defined in Prism Central + # Note: Project must already be present in Prism Central. + # project: + # type: name + # name: "NUTANIX_PROJECT_NAME" + # gpus: + # - type: name + # name: "GPU NAME" +``` + +## NutanixMachineTemplate spec +The table below provides an overview of the supported parameters of the `spec` attribute of a `NutanixMachineTemplate` resource. + +### Configuration parameters +| Key |Type |Description| +|------------------------------------|------|--------------------------------------------------------------------------------------------------------| +|bootType |string|Boot type of the VM. Depends on the OS image used. Allowed values: `legacy`, `uefi`. Default: `legacy` | +|vcpusPerSocket |int |Amount of vCPUs per socket. Default: `1` | +|vcpuSockets |int |Amount of vCPU sockets. Default: `2` | +|memorySize |string|Amount of Memory. Default: `4Gi` | +|systemDiskSize |string|Amount of storage assigned to the system disk. Default: `40Gi` | +|image |object|Reference (name or uuid) to the OS image used for the system disk. | +|image.type |string|Type to identify the OS image. Allowed values: `name` and `uuid` | +|image.name |string|Name of the image. | +|image.uuid |string|UUID of the image. | +|cluster |object|(Optional) Reference (name or uuid) to the Prism Element cluster. Name or UUID can be passed | +|cluster.type |string|Type to identify the Prism Element cluster. Allowed values: `name` and `uuid` | +|cluster.name |string|Name of the Prism Element cluster. | +|cluster.uuid |string|UUID of the Prism Element cluster. | +|subnets |list |(Optional) Reference (name or uuid) to the subnets to be assigned to the VMs. | +|subnets.[].type |string|Type to identify the subnet. Allowed values: `name` and `uuid` | +|subnets.[].name |string|Name of the subnet. | +|subnets.[].uuid |string|UUID of the subnet. | +|additionalCategories |list |Reference to the categories to be assigned to the VMs. These categories already exist in Prism Central. | +|additionalCategories.[].key |string|Key of the category. | +|additionalCategories.[].value |string|Value of the category. | +|project |object|Reference (name or uuid) to the project. This project must already exist in Prism Central. | +|project.type |string|Type to identify the project. Allowed values: `name` and `uuid` | +|project.name |string|Name of the project. | +|project.uuid |string|UUID of the project. | +|gpus |object|Reference (name or deviceID) to the GPUs to be assigned to the VMs. Can be vGPU or Passthrough. | +|gpus.[].type |string|Type to identify the GPU. Allowed values: `name` and `deviceID` | +|gpus.[].name |string|Name of the GPU or the vGPU profile | +|gpus.[].deviceID |string|DeviceID of the GPU or the vGPU profile | + +!!! note + The `cluster` or `subnets` configuration parameters are optional in case failure domains are defined on the `NutanixCluster` and `MachineDeployment` resources. \ No newline at end of file diff --git a/docs/capx/v1.4.x/user_requirements.md b/docs/capx/v1.4.x/user_requirements.md new file mode 100644 index 00000000..5a4b8604 --- /dev/null +++ b/docs/capx/v1.4.x/user_requirements.md @@ -0,0 +1,36 @@ +# User Requirements + +Cluster API Provider Nutanix Cloud Infrastructure (CAPX) interacts with Nutanix Prism Central (PC) APIs using a Prism Central user account. + +CAPX supports two types of PC users: + +- Local users: must be assigned the `Prism Central Admin` role. +- Domain users: must be assigned a role that at least has the [Minimum required CAPX permissions for domain users](#minimum-required-capx-permissions-for-domain-users) assigned. + +See [Credential Management](./credential_management.md){target=_blank} for more information on how to pass the user credentials to CAPX. + +## Minimum required CAPX permissions for domain users + +The following permissions are required for Prism Central domain users: + +- Create Category Mapping +- Create Image +- Create Or Update Name Category +- Create Or Update Value Category +- Create Virtual Machine +- Delete Category Mapping +- Delete Image +- Delete Name Category +- Delete Value Category +- Delete Virtual Machine +- View Category Mapping +- View Cluster +- View Image +- View Name Category +- View Project +- View Subnet +- View Value Category +- View Virtual Machine + +!!! note + The list of permissions has been validated on PC 2022.6 and above. diff --git a/docs/capx/v1.4.x/validated_integrations.md b/docs/capx/v1.4.x/validated_integrations.md new file mode 100644 index 00000000..5d61d932 --- /dev/null +++ b/docs/capx/v1.4.x/validated_integrations.md @@ -0,0 +1,62 @@ +# Validated Integrations + +Validated integrations are a defined set of specifically tested configurations between technologies that represent the most common combinations that Nutanix customers are using or deploying with CAPX. For these integrations, Nutanix has directly, or through certified partners, exercised a full range of platform tests as part of the product release process. + +## Integration Validation Policy + +Nutanix follows the version validation policies below: + +- Validate at least one active AOS LTS (long term support) version. Validated AOS LTS version for a specific CAPX version is listed in the [AOS](#aos) section.
+ + !!! note + + Typically the latest LTS release at time of CAPX release except when latest is initial release in train (eg x.y.0). Exact version depends on timing and customer adoption. + +- Validate the latest AOS STS (short term support) release at time of CAPX release. +- Validate at least one active Prism Central (PC) version. Validated PC version for a specific CAPX version is listed in the [Prism Central](#prism-central) section.
+ + !!! note + + Typically the the latest PC release at time of CAPX release except when latest is initial release in train (eg x.y.0). Exact version depends on timing and customer adoption. + +- At least one active Cluster-API (CAPI) version. Validated CAPI version for a specific CAPX version is listed in the [Cluster-API](#cluster-api) section.
+ + !!! note + + Typically the the latest Cluster-API release at time of CAPX release except when latest is initial release in train (eg x.y.0). Exact version depends on timing and customer adoption. + +## Validated versions +### Cluster-API +| CAPX | CAPI v1.2.x | CAPI v1.3.x | CAPI v1.4.x | CAPI v1.5.x | CAPI v1.6.x | CAPI v1.7.x | +|--------|-------------|-------------|-------------|-------------|-------------|-------------| +| v1.4.x | No | Yes | Yes | Yes | Yes | Yes | +| v1.3.x | No | Yes | Yes | Yes | Yes | No | +| v1.2.x | No | Yes | Yes | Yes | No | No | +| v1.1.x | Yes | Yes | No | No | No | No | +| v1.0.x | Yes | No | No | No | No | No | +| v0.5.x | Yes | No | No | No | No | No | + +See the [Validated Kubernetes Versions](https://cluster-api.sigs.k8s.io/reference/versions.html?highlight=version#supported-kubernetes-versions){target=_blank} page for more information on CAPI validated versions. + +### AOS + +| CAPX | 5.20.4.5 (LTS) | 6.1.1.5 (STS) | 6.5.x (LTS) | 6.6 (STS) | 6.7 (STS) | 6.8 (STS) | +|--------|----------------|---------------|-------------|-----------|-----------|-----------| +| v1.4.x | No | No | Yes | No | No | Yes | +| v1.3.x | No | No | Yes | Yes | Yes | No | +| v1.2.x | No | No | Yes | Yes | Yes | No | +| v1.1.x | No | No | Yes | No | No | No | +| v1.0.x | Yes | Yes | No | No | No | No | +| v0.5.x | Yes | Yes | No | No | No | No | + + +### Prism Central + +| CAPX | 2022.1.0.2 | pc.2022.6 | pc.2022.9 | pc.2023.x | pc.2024.x | +|--------|------------|-----------|-----------|-----------|-----------| +| v1.4.x | No | Yes | No | Yes | Yes | +| v1.3.x | No | Yes | No | Yes | No | +| v1.2.x | No | Yes | Yes | Yes | No | +| v1.1.x | No | Yes | No | No | No | +| v1.0.x | Yes | Yes | No | No | No | +| v0.5.x | Yes | Yes | No | No | No | diff --git a/mkdocs.yml b/mkdocs.yml index dd0265cb..617d6548 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -1,200 +1,223 @@ site_name: opendocs.nutanix.com theme: - name: material - logo: images/nutanix_x_white.png - features: - - navigation.instant - - content.code.annotate - - navigation.tabs - - navigation.top - favicon: images/favicon.png - icon: - admonition: - note: material/note + name: material + logo: images/nutanix_x_white.png + features: + - navigation.instant + - content.code.annotate + - navigation.tabs + - navigation.top + favicon: images/favicon.png + icon: + admonition: + note: material/note extra_css: - - stylesheets/extra.css + - stylesheets/extra.css nav: - - "Solutions": - - "Cloud Native": - - "Overview": "index.md" - - "Cluster API Provider: Nutanix (CAPX)": - - "v1.3.x (Latest)": - - "Getting Started": "capx/v1.3.x/getting_started.md" - - "Types": - - "NutanixCluster": "capx/v1.3.x/types/nutanix_cluster.md" - - "NutanixMachineTemplate": "capx/v1.3.x/types/nutanix_machine_template.md" - - "Certificate Trust": "capx/v1.3.x/pc_certificates.md" - - "Credential Management": "capx/v1.3.x/credential_management.md" - - "Tasks": - - "Modifying Machine Configuration": "capx/v1.3.x/tasks/modify_machine_configuration.md" - - "CAPX v1.3.x Upgrade Procedure": "capx/v1.3.x/tasks/capx_v13x_upgrade_procedure.md" - - "Port Requirements": "capx/v1.3.x/port_requirements.md" - - "User Requirements": "capx/v1.3.x/user_requirements.md" - - "Addons": - - "CSI Driver Installation": "capx/v1.3.x/addons/install_csi_driver.md" - - "Validated Integrations": "capx/v1.3.x/validated_integrations.md" - - "Experimental": - - "Multi-PE CAPX cluster": "capx/v1.3.x/experimental/capx_multi_pe.md" - - "Autoscaler": "capx/v1.3.x/experimental/autoscaler.md" - - "OIDC Integration": "capx/v1.3.x/experimental/oidc.md" - - "Flow VPC": "capx/v1.3.x/experimental/vpc.md" - - "Proxy Configuration": "capx/v1.3.x/experimental/proxy.md" - - "Registry Mirror Configuration": "capx/v1.3.x/experimental/registry_mirror.md" - - "Troubleshooting": "capx/v1.3.x/troubleshooting.md" - - "v1.2.x": - - "Getting Started": "capx/v1.2.x/getting_started.md" - - "Types": - - "NutanixCluster": "capx/v1.2.x/types/nutanix_cluster.md" - - "NutanixMachineTemplate": "capx/v1.2.x/types/nutanix_machine_template.md" - - "Certificate Trust": "capx/v1.2.x/pc_certificates.md" - - "Credential Management": "capx/v1.2.x/credential_management.md" - - "Tasks": - - "Modifying Machine Configuration": "capx/v1.2.x/tasks/modify_machine_configuration.md" - - "Port Requirements": "capx/v1.2.x/port_requirements.md" - - "User Requirements": "capx/v1.2.x/user_requirements.md" - - "Addons": - - "CSI Driver Installation": "capx/v1.2.x/addons/install_csi_driver.md" - - "Validated Integrations": "capx/v1.2.x/validated_integrations.md" - - "Experimental": - - "Multi-PE CAPX cluster": "capx/v1.2.x/experimental/capx_multi_pe.md" - - "Autoscaler": "capx/v1.2.x/experimental/autoscaler.md" - - "OIDC Integration": "capx/v1.2.x/experimental/oidc.md" - - "Flow VPC": "capx/v1.2.x/experimental/vpc.md" - - "Proxy Configuration": "capx/v1.2.x/experimental/proxy.md" - - "Registry Mirror Configuration": "capx/v1.2.x/experimental/registry_mirror.md" - - "Troubleshooting": "capx/v1.2.x/troubleshooting.md" - - "v1.1.x": - - "Getting Started": "capx/v1.1.x/getting_started.md" - - "Types": - - "NutanixCluster": "capx/v1.1.x/types/nutanix_cluster.md" - - "NutanixMachineTemplate": "capx/v1.1.x/types/nutanix_machine_template.md" - - "Certificate Trust": "capx/v1.1.x/pc_certificates.md" - - "Credential Management": "capx/v1.1.x/credential_management.md" - - "Tasks": - - "Modifying Machine Configuration": "capx/v1.1.x/tasks/modify_machine_configuration.md" - - "Port Requirements": "capx/v1.1.x/port_requirements.md" - - "User Requirements": "capx/v1.1.x/user_requirements.md" - - "Addons": - - "CSI Driver Installation": "capx/v1.1.x/addons/install_csi_driver.md" - - "Validated Integrations": "capx/v1.1.x/validated_integrations.md" - - "Experimental": - - "Multi-PE CAPX cluster": "capx/v1.1.x/experimental/capx_multi_pe.md" - - "Autoscaler": "capx/v1.1.x/experimental/autoscaler.md" - - "OIDC Integration": "capx/v1.1.x/experimental/oidc.md" - - "Flow VPC": "capx/v1.1.x/experimental/vpc.md" - - "Proxy Configuration": "capx/v1.1.x/experimental/proxy.md" - - "Registry Mirror Configuration": "capx/v1.1.x/experimental/registry_mirror.md" - - "Troubleshooting": "capx/v1.1.x/troubleshooting.md" - - "v1.0.x": - - "Getting Started": "capx/v1.0.x/getting_started.md" - - "Types": - - "NutanixCluster": "capx/v1.0.x/types/nutanix_cluster.md" - - "NutanixMachineTemplate": "capx/v1.0.x/types/nutanix_machine_template.md" - - "Credential Management": "capx/v1.0.x/credential_management.md" - - "Tasks": - - "Modifying Machine Configuration": "capx/v1.0.x/tasks/modify_machine_configuration.md" - - "Port Requirements": "capx/v1.0.x/port_requirements.md" - - "Addons": - - "CSI Driver Installation": "capx/v1.0.x/addons/install_csi_driver.md" - - "Validated Integrations": "capx/v1.0.x/validated_integrations.md" - - "Experimental": - - "Multi-PE CAPX cluster": "capx/v1.0.x/experimental/capx_multi_pe.md" - - "Autoscaler": "capx/v1.0.x/experimental/autoscaler.md" - - "Troubleshooting": "capx/v1.0.x/troubleshooting.md" - - "v0.5.x": - - "Getting Started": "capx/v0.5.x/getting_started.md" - - "Credential Management": "capx/v0.5.x/credential_management.md" - - "Addons": - - "CSI Driver Installation": "capx/v0.5.x/addons/install_csi_driver.md" - - "Validated Integrations": "capx/v0.5.x/validated_integrations.md" - - "Experimental": - - "Multi-PE CAPX cluster": "capx/v0.5.x/experimental/capx_multi_pe.md" - - "Autoscaler": "capx/v0.5.x/experimental/autoscaler.md" - - "Troubleshooting": "capx/v0.5.x/troubleshooting.md" - - "Nutanix Cloud Controller Manager (CCM)": - - "v0.3.x (Latest)": - - "Overview": "ccm/v0.3.x/overview.md" - - "Requirements": "ccm/v0.3.x/requirements.md" - - "Configuration": "ccm/v0.3.x/ccm_configuration.md" - - "Certificate Trust": "ccm/v0.3.x/pc_certificates.md" - - "Credentials": "ccm/v0.3.x/ccm_credentials.md" - - "Topology Discovery": "ccm/v0.3.x/topology_discovery.md" - - "Custom Labeling": "ccm/v0.3.x/custom_labeling.md" - - "v0.2.0": - - "Overview": "ccm/v0.2.x/overview.md" - - "Requirements": "ccm/v0.2.x/requirements.md" - - "Configuration": "ccm/v0.2.x/ccm_configuration.md" - - "Credentials": "ccm/v0.2.x/ccm_credentials.md" - - "Topology Discovery": "ccm/v0.2.x/topology_discovery.md" - - "Custom Labeling": "ccm/v0.2.x/custom_labeling.md" - - "Red Hat OpenShift": - - "Install": - - "Agnostic": "openshift/install/agnostic/index.md" - - "IPI": "openshift/install/ipi/index.md" - - "Assisted Installer": "openshift/install/assisted_installer/index.md" - - "Post Install": "openshift/post-install/index.md" - - Operators: - - "CSI": "openshift/operators/csi/index.md" - - "Google Anthos": - - "Architecture": "anthos/architecture/index.md" - - "Install": - - "Manual": "anthos/install/manual/index.md" - - "Amazon EKS Anywhere": - - "Install": "eksa/install/index.md" - - "GPT-in-a-Box 1.0": - - "Overview": "gpt-in-a-box/overview.md" - - "Deploy on Virtual Machine": - - "v0.3": - - "Getting Started": "gpt-in-a-box/vm/v0.3/getting_started.md" - - "Validated Models": "gpt-in-a-box/vm/v0.3/validated_models.md" - - "Generating Model Archive File": "gpt-in-a-box/vm/v0.3/generating_mar.md" - - "Deploying Inference Server": "gpt-in-a-box/vm/v0.3/inference_server.md" - - "Inference Requests": "gpt-in-a-box/vm/v0.3/inference_requests.md" - - "Model Version Support": "gpt-in-a-box/vm/v0.3/model_version.md" - - "HuggingFace Model Support": "gpt-in-a-box/vm/v0.3/huggingface_model.md" - - "Custom Model Support": "gpt-in-a-box/vm/v0.3/custom_model.md" - - "Management Requests": "gpt-in-a-box/vm/v0.3/management_requests.md" - - "v0.2": - - "Getting Started": "gpt-in-a-box/vm/v0.2/getting_started.md" - - "Generating Model Archive File": "gpt-in-a-box/vm/v0.2/generating_mar.md" - - "Deploying Inference Server": "gpt-in-a-box/vm/v0.2/inference_server.md" - - "Inference Requests": "gpt-in-a-box/vm/v0.2/inference_requests.md" - - "Model Version Support": "gpt-in-a-box/vm/v0.2/model_version.md" - - "Custom Model Support": "gpt-in-a-box/vm/v0.2/custom_model.md" - - "Management Requests": "gpt-in-a-box/vm/v0.2/management_requests.md" - - "Deploy on Kubernetes": - - "v0.2": - - "Getting Started": "gpt-in-a-box/kubernetes/v0.2/getting_started.md" - - "Validated Models": "gpt-in-a-box/kubernetes/v0.2/validated_models.md" - - "Generating Model Archive File": "gpt-in-a-box/kubernetes/v0.2/generating_mar.md" - - "Deploying Inference Server": "gpt-in-a-box/kubernetes/v0.2/inference_server.md" - - "Inference Requests": "gpt-in-a-box/kubernetes/v0.2/inference_requests.md" - - "HuggingFace Model Support": "gpt-in-a-box/kubernetes/v0.2/huggingface_model.md" - - "Custom Model Support": "gpt-in-a-box/kubernetes/v0.2/custom_model.md" - - "v0.1": - - "Getting Started": "gpt-in-a-box/kubernetes/v0.1/getting_started.md" - - "Generating Model Archive File": "gpt-in-a-box/kubernetes/v0.1/generating_mar.md" - - "Deploying Inference Server": "gpt-in-a-box/kubernetes/v0.1/inference_server.md" - - "Inference Requests": "gpt-in-a-box/kubernetes/v0.1/inference_requests.md" - - "Custom Model Support": "gpt-in-a-box/kubernetes/v0.1/custom_model.md" - - "Support": "gpt-in-a-box/support.md" - - "Guides": - - "Cloud Native": - - "Red Hat OpenShift": - - "Install": - - "IPI": "guides/openshift/install/ipi/index.md" - - "Custom Cloud Native Role": "guides/cloud_native_role/index.md" + - "Solutions": + - "Cloud Native": + - "Overview": "index.md" + - "Cluster API Provider: Nutanix (CAPX)": + - "v1.4.x (latest)": + - "Getting Started": "capx/v1.4.x/getting_started.md" + - "Types": + - "NutanixCluster": "capx/v1.4.x/types/nutanix_cluster.md" + - "NutanixMachineTemplate": "capx/v1.4.x/types/nutanix_machine_template.md" + - "Certificate Trust": "capx/v1.4.x/pc_certificates.md" + - "Credential Management": "capx/v1.4.x/credential_management.md" + - "Tasks": + - "Modifying Machine Configuration": "capx/v1.4.x/tasks/modify_machine_configuration.md" + - "CAPX v1.4.x Upgrade Procedure": "capx/v1.4.x/tasks/capx_v14x_upgrade_procedure.md" + - "Port Requirements": "capx/v1.4.x/port_requirements.md" + - "User Requirements": "capx/v1.4.x/user_requirements.md" + - "Addons": + - "CSI Driver Installation": "capx/v1.4.x/addons/install_csi_driver.md" + - "Validated Integrations": "capx/v1.4.x/validated_integrations.md" + - "Experimental": + - "Multi-PE CAPX cluster": "capx/v1.4.x/experimental/capx_multi_pe.md" + - "Autoscaler": "capx/v1.4.x/experimental/autoscaler.md" + - "OIDC Integration": "capx/v1.4.x/experimental/oidc.md" + - "Flow VPC": "capx/v1.4.x/experimental/vpc.md" + - "Proxy Configuration": "capx/v1.4.x/experimental/proxy.md" + - "Registry Mirror Configuration": "capx/v1.4.x/experimental/registry_mirror.md" + - "Troubleshooting": "capx/v1.4.x/troubleshooting.md" + - "v1.3.x": + - "Getting Started": "capx/v1.3.x/getting_started.md" + - "Types": + - "NutanixCluster": "capx/v1.3.x/types/nutanix_cluster.md" + - "NutanixMachineTemplate": "capx/v1.3.x/types/nutanix_machine_template.md" + - "Certificate Trust": "capx/v1.3.x/pc_certificates.md" + - "Credential Management": "capx/v1.3.x/credential_management.md" + - "Tasks": + - "Modifying Machine Configuration": "capx/v1.3.x/tasks/modify_machine_configuration.md" + - "CAPX v1.3.x Upgrade Procedure": "capx/v1.3.x/tasks/capx_v13x_upgrade_procedure.md" + - "Port Requirements": "capx/v1.3.x/port_requirements.md" + - "User Requirements": "capx/v1.3.x/user_requirements.md" + - "Addons": + - "CSI Driver Installation": "capx/v1.3.x/addons/install_csi_driver.md" + - "Validated Integrations": "capx/v1.3.x/validated_integrations.md" + - "Experimental": + - "Multi-PE CAPX cluster": "capx/v1.3.x/experimental/capx_multi_pe.md" + - "Autoscaler": "capx/v1.3.x/experimental/autoscaler.md" + - "OIDC Integration": "capx/v1.3.x/experimental/oidc.md" + - "Flow VPC": "capx/v1.3.x/experimental/vpc.md" + - "Proxy Configuration": "capx/v1.3.x/experimental/proxy.md" + - "Registry Mirror Configuration": "capx/v1.3.x/experimental/registry_mirror.md" + - "Troubleshooting": "capx/v1.3.x/troubleshooting.md" + - "v1.2.x": + - "Getting Started": "capx/v1.2.x/getting_started.md" + - "Types": + - "NutanixCluster": "capx/v1.2.x/types/nutanix_cluster.md" + - "NutanixMachineTemplate": "capx/v1.2.x/types/nutanix_machine_template.md" + - "Certificate Trust": "capx/v1.2.x/pc_certificates.md" + - "Credential Management": "capx/v1.2.x/credential_management.md" + - "Tasks": + - "Modifying Machine Configuration": "capx/v1.2.x/tasks/modify_machine_configuration.md" + - "Port Requirements": "capx/v1.2.x/port_requirements.md" + - "User Requirements": "capx/v1.2.x/user_requirements.md" + - "Addons": + - "CSI Driver Installation": "capx/v1.2.x/addons/install_csi_driver.md" + - "Validated Integrations": "capx/v1.2.x/validated_integrations.md" + - "Experimental": + - "Multi-PE CAPX cluster": "capx/v1.2.x/experimental/capx_multi_pe.md" + - "Autoscaler": "capx/v1.2.x/experimental/autoscaler.md" + - "OIDC Integration": "capx/v1.2.x/experimental/oidc.md" + - "Flow VPC": "capx/v1.2.x/experimental/vpc.md" + - "Proxy Configuration": "capx/v1.2.x/experimental/proxy.md" + - "Registry Mirror Configuration": "capx/v1.2.x/experimental/registry_mirror.md" + - "Troubleshooting": "capx/v1.2.x/troubleshooting.md" + - "v1.1.x": + - "Getting Started": "capx/v1.1.x/getting_started.md" + - "Types": + - "NutanixCluster": "capx/v1.1.x/types/nutanix_cluster.md" + - "NutanixMachineTemplate": "capx/v1.1.x/types/nutanix_machine_template.md" + - "Certificate Trust": "capx/v1.1.x/pc_certificates.md" + - "Credential Management": "capx/v1.1.x/credential_management.md" + - "Tasks": + - "Modifying Machine Configuration": "capx/v1.1.x/tasks/modify_machine_configuration.md" + - "Port Requirements": "capx/v1.1.x/port_requirements.md" + - "User Requirements": "capx/v1.1.x/user_requirements.md" + - "Addons": + - "CSI Driver Installation": "capx/v1.1.x/addons/install_csi_driver.md" + - "Validated Integrations": "capx/v1.1.x/validated_integrations.md" + - "Experimental": + - "Multi-PE CAPX cluster": "capx/v1.1.x/experimental/capx_multi_pe.md" + - "Autoscaler": "capx/v1.1.x/experimental/autoscaler.md" + - "OIDC Integration": "capx/v1.1.x/experimental/oidc.md" + - "Flow VPC": "capx/v1.1.x/experimental/vpc.md" + - "Proxy Configuration": "capx/v1.1.x/experimental/proxy.md" + - "Registry Mirror Configuration": "capx/v1.1.x/experimental/registry_mirror.md" + - "Troubleshooting": "capx/v1.1.x/troubleshooting.md" + - "v1.0.x": + - "Getting Started": "capx/v1.0.x/getting_started.md" + - "Types": + - "NutanixCluster": "capx/v1.0.x/types/nutanix_cluster.md" + - "NutanixMachineTemplate": "capx/v1.0.x/types/nutanix_machine_template.md" + - "Credential Management": "capx/v1.0.x/credential_management.md" + - "Tasks": + - "Modifying Machine Configuration": "capx/v1.0.x/tasks/modify_machine_configuration.md" + - "Port Requirements": "capx/v1.0.x/port_requirements.md" + - "Addons": + - "CSI Driver Installation": "capx/v1.0.x/addons/install_csi_driver.md" + - "Validated Integrations": "capx/v1.0.x/validated_integrations.md" + - "Experimental": + - "Multi-PE CAPX cluster": "capx/v1.0.x/experimental/capx_multi_pe.md" + - "Autoscaler": "capx/v1.0.x/experimental/autoscaler.md" + - "Troubleshooting": "capx/v1.0.x/troubleshooting.md" + - "v0.5.x": + - "Getting Started": "capx/v0.5.x/getting_started.md" + - "Credential Management": "capx/v0.5.x/credential_management.md" + - "Addons": + - "CSI Driver Installation": "capx/v0.5.x/addons/install_csi_driver.md" + - "Validated Integrations": "capx/v0.5.x/validated_integrations.md" + - "Experimental": + - "Multi-PE CAPX cluster": "capx/v0.5.x/experimental/capx_multi_pe.md" + - "Autoscaler": "capx/v0.5.x/experimental/autoscaler.md" + - "Troubleshooting": "capx/v0.5.x/troubleshooting.md" + - "Nutanix Cloud Controller Manager (CCM)": + - "v0.3.x (Latest)": + - "Overview": "ccm/v0.3.x/overview.md" + - "Requirements": "ccm/v0.3.x/requirements.md" + - "Configuration": "ccm/v0.3.x/ccm_configuration.md" + - "Certificate Trust": "ccm/v0.3.x/pc_certificates.md" + - "Credentials": "ccm/v0.3.x/ccm_credentials.md" + - "Topology Discovery": "ccm/v0.3.x/topology_discovery.md" + - "Custom Labeling": "ccm/v0.3.x/custom_labeling.md" + - "v0.2.0": + - "Overview": "ccm/v0.2.x/overview.md" + - "Requirements": "ccm/v0.2.x/requirements.md" + - "Configuration": "ccm/v0.2.x/ccm_configuration.md" + - "Credentials": "ccm/v0.2.x/ccm_credentials.md" + - "Topology Discovery": "ccm/v0.2.x/topology_discovery.md" + - "Custom Labeling": "ccm/v0.2.x/custom_labeling.md" + - "Red Hat OpenShift": + - "Install": + - "Agnostic": "openshift/install/agnostic/index.md" + - "IPI": "openshift/install/ipi/index.md" + - "Assisted Installer": "openshift/install/assisted_installer/index.md" + - "Post Install": "openshift/post-install/index.md" + - Operators: + - "CSI": "openshift/operators/csi/index.md" + - "Google Anthos": + - "Architecture": "anthos/architecture/index.md" + - "Install": + - "Manual": "anthos/install/manual/index.md" + - "Amazon EKS Anywhere": + - "Install": "eksa/install/index.md" + - "GPT-in-a-Box 1.0": + - "Overview": "gpt-in-a-box/overview.md" + - "Deploy on Virtual Machine": + - "v0.3": + - "Getting Started": "gpt-in-a-box/vm/v0.3/getting_started.md" + - "Validated Models": "gpt-in-a-box/vm/v0.3/validated_models.md" + - "Generating Model Archive File": "gpt-in-a-box/vm/v0.3/generating_mar.md" + - "Deploying Inference Server": "gpt-in-a-box/vm/v0.3/inference_server.md" + - "Inference Requests": "gpt-in-a-box/vm/v0.3/inference_requests.md" + - "Model Version Support": "gpt-in-a-box/vm/v0.3/model_version.md" + - "HuggingFace Model Support": "gpt-in-a-box/vm/v0.3/huggingface_model.md" + - "Custom Model Support": "gpt-in-a-box/vm/v0.3/custom_model.md" + - "Management Requests": "gpt-in-a-box/vm/v0.3/management_requests.md" + - "v0.2": + - "Getting Started": "gpt-in-a-box/vm/v0.2/getting_started.md" + - "Generating Model Archive File": "gpt-in-a-box/vm/v0.2/generating_mar.md" + - "Deploying Inference Server": "gpt-in-a-box/vm/v0.2/inference_server.md" + - "Inference Requests": "gpt-in-a-box/vm/v0.2/inference_requests.md" + - "Model Version Support": "gpt-in-a-box/vm/v0.2/model_version.md" + - "Custom Model Support": "gpt-in-a-box/vm/v0.2/custom_model.md" + - "Management Requests": "gpt-in-a-box/vm/v0.2/management_requests.md" + - "Deploy on Kubernetes": + - "v0.2": + - "Getting Started": "gpt-in-a-box/kubernetes/v0.2/getting_started.md" + - "Validated Models": "gpt-in-a-box/kubernetes/v0.2/validated_models.md" + - "Generating Model Archive File": "gpt-in-a-box/kubernetes/v0.2/generating_mar.md" + - "Deploying Inference Server": "gpt-in-a-box/kubernetes/v0.2/inference_server.md" + - "Inference Requests": "gpt-in-a-box/kubernetes/v0.2/inference_requests.md" + - "HuggingFace Model Support": "gpt-in-a-box/kubernetes/v0.2/huggingface_model.md" + - "Custom Model Support": "gpt-in-a-box/kubernetes/v0.2/custom_model.md" + - "v0.1": + - "Getting Started": "gpt-in-a-box/kubernetes/v0.1/getting_started.md" + - "Generating Model Archive File": "gpt-in-a-box/kubernetes/v0.1/generating_mar.md" + - "Deploying Inference Server": "gpt-in-a-box/kubernetes/v0.1/inference_server.md" + - "Inference Requests": "gpt-in-a-box/kubernetes/v0.1/inference_requests.md" + - "Custom Model Support": "gpt-in-a-box/kubernetes/v0.1/custom_model.md" + - "Support": "gpt-in-a-box/support.md" + - "Guides": + - "Cloud Native": + - "Red Hat OpenShift": + - "Install": + - "IPI": "guides/openshift/install/ipi/index.md" + - "Custom Cloud Native Role": "guides/cloud_native_role/index.md" markdown_extensions: - - attr_list - - admonition - - pymdownx.details - - pymdownx.superfences - - tables - - toc: - permalink: true + - attr_list + - admonition + - pymdownx.details + - pymdownx.superfences + - tables + - toc: + permalink: true copyright: Copyright © 2021 - 2023 Nutanix, Inc. extra: - generator: false + generator: false repo_url: https://github.com/nutanix-cloud-native/opendocs repo_name: nutanix-cloud-native/opendocs edit_uri: "" From a7ed6678a8df8c8207395de8e6797f0bb17f892f Mon Sep 17 00:00:00 2001 From: Christophe Jauffret Date: Tue, 27 Aug 2024 21:23:46 +0200 Subject: [PATCH 03/15] add CAPX 1.5.x doc (#66) --- docs/capx/latest | 2 +- docs/capx/v1.5.x/addons/install_csi_driver.md | 215 +++++++++ docs/capx/v1.5.x/credential_management.md | 93 ++++ docs/capx/v1.5.x/experimental/autoscaler.md | 129 +++++ .../capx/v1.5.x/experimental/capx_multi_pe.md | 30 ++ docs/capx/v1.5.x/experimental/oidc.md | 31 ++ docs/capx/v1.5.x/experimental/proxy.md | 62 +++ .../v1.5.x/experimental/registry_mirror.md | 96 ++++ docs/capx/v1.5.x/experimental/vpc.md | 40 ++ docs/capx/v1.5.x/getting_started.md | 159 +++++++ docs/capx/v1.5.x/pc_certificates.md | 149 ++++++ docs/capx/v1.5.x/port_requirements.md | 19 + .../tasks/capx_v14x_upgrade_procedure.md | 83 ++++ .../tasks/modify_machine_configuration.md | 11 + docs/capx/v1.5.x/troubleshooting.md | 13 + docs/capx/v1.5.x/types/nutanix_cluster.md | 64 +++ .../v1.5.x/types/nutanix_machine_template.md | 84 ++++ docs/capx/v1.5.x/user_requirements.md | 36 ++ docs/capx/v1.5.x/validated_integrations.md | 65 +++ mkdocs.yml | 449 +++++++++--------- 20 files changed, 1616 insertions(+), 214 deletions(-) create mode 100644 docs/capx/v1.5.x/addons/install_csi_driver.md create mode 100644 docs/capx/v1.5.x/credential_management.md create mode 100644 docs/capx/v1.5.x/experimental/autoscaler.md create mode 100644 docs/capx/v1.5.x/experimental/capx_multi_pe.md create mode 100644 docs/capx/v1.5.x/experimental/oidc.md create mode 100644 docs/capx/v1.5.x/experimental/proxy.md create mode 100644 docs/capx/v1.5.x/experimental/registry_mirror.md create mode 100644 docs/capx/v1.5.x/experimental/vpc.md create mode 100644 docs/capx/v1.5.x/getting_started.md create mode 100644 docs/capx/v1.5.x/pc_certificates.md create mode 100644 docs/capx/v1.5.x/port_requirements.md create mode 100644 docs/capx/v1.5.x/tasks/capx_v14x_upgrade_procedure.md create mode 100644 docs/capx/v1.5.x/tasks/modify_machine_configuration.md create mode 100644 docs/capx/v1.5.x/troubleshooting.md create mode 100644 docs/capx/v1.5.x/types/nutanix_cluster.md create mode 100644 docs/capx/v1.5.x/types/nutanix_machine_template.md create mode 100644 docs/capx/v1.5.x/user_requirements.md create mode 100644 docs/capx/v1.5.x/validated_integrations.md diff --git a/docs/capx/latest b/docs/capx/latest index 25f6cdf3..39f865cc 120000 --- a/docs/capx/latest +++ b/docs/capx/latest @@ -1 +1 @@ -v1.4.x \ No newline at end of file +v1.5.x \ No newline at end of file diff --git a/docs/capx/v1.5.x/addons/install_csi_driver.md b/docs/capx/v1.5.x/addons/install_csi_driver.md new file mode 100644 index 00000000..afb4bdc8 --- /dev/null +++ b/docs/capx/v1.5.x/addons/install_csi_driver.md @@ -0,0 +1,215 @@ +# Nutanix CSI Driver installation with CAPX + +The Nutanix CSI driver is fully supported on CAPI/CAPX deployed clusters where all the nodes meet the [Nutanix CSI driver prerequisites](#capi-workload-cluster-prerequisites-for-the-nutanix-csi-driver). + +There are three methods to install the Nutanix CSI driver on a CAPI/CAPX cluster: + +- Helm +- ClusterResourceSet +- CAPX Flavor + +For more information, check the next sections. + +## CAPI Workload cluster prerequisites for the Nutanix CSI Driver + +Kubernetes workers need the following prerequisites to use the Nutanix CSI Drivers: + +- iSCSI initiator package (for Volumes based block storage) +- NFS client package (for Files based storage) + +These packages may already be present in the image you use with your infrastructure provider or you can also rely on your bootstrap provider to install them. More info is available in the [Prerequisites docs](https://portal.nutanix.com/page/documents/details?targetId=CSI-Volume-Driver-v2_6:csi-csi-plugin-prerequisites-r.html){target=_blank}. + +The package names and installation method will also vary depending on the operating system you plan to use. + +In the example below, `kubeadm` bootstrap provider is used to deploy these packages on top of an Ubuntu 20.04 image. The `kubeadm` bootstrap provider allows defining `preKubeadmCommands` that will be launched before Kubernetes cluster creation. These `preKubeadmCommands` can be defined both in `KubeadmControlPlane` for master nodes and in `KubeadmConfigTemplate` for worker nodes. + +In the example with an Ubuntu 20.04 image, both `KubeadmControlPlane` and `KubeadmConfigTemplate` must be modified as in the example below: + +```yaml +spec: + template: + spec: + # ....... + preKubeadmCommands: + - echo "before kubeadm call" > /var/log/prekubeadm.log + - apt update + - apt install -y nfs-common open-iscsi + - systemctl enable --now iscsid +``` +## Install the Nutanix CSI Driver with Helm + +A recent [Helm](https://helm.sh){target=_blank} version is needed (tested with Helm v3.10.1). + +The example below must be applied on a ready workload cluster. The workload cluster's kubeconfig can be retrieved and used to connect with the following command: + +```shell +clusterctl get kubeconfig $CLUSTER_NAME -n $CLUSTER_NAMESPACE > $CLUSTER_NAME-KUBECONFIG +export KUBECONFIG=$(pwd)/$CLUSTER_NAME-KUBECONFIG +``` + +Once connected to the cluster, follow the [CSI documentation](https://portal.nutanix.com/page/documents/details?targetId=CSI-Volume-Driver-v2_6:csi-csi-driver-install-t.html){target=_blank}. + +First, install the [nutanix-csi-snapshot](https://github.com/nutanix/helm/tree/master/charts/nutanix-csi-snapshot){target=_blank} chart followed by the [nutanix-csi-storage](https://github.com/nutanix/helm/tree/master/charts/nutanix-csi-storage){target=_blank} chart. + +See an example below: + +```shell +#Add the official Nutanix Helm repo and get the latest update +helm repo add nutanix https://nutanix.github.io/helm/ +helm repo update + +# Install the nutanix-csi-snapshot chart +helm install nutanix-csi-snapshot nutanix/nutanix-csi-snapshot -n ntnx-system --create-namespace + +# Install the nutanix-csi-storage chart +helm install nutanix-storage nutanix/nutanix-csi-storage -n ntnx-system --set createSecret=false +``` + +!!! warning + For correct Nutanix CSI driver deployment, a fully functional CNI deployment must be present. + +## Install the Nutanix CSI Driver with `ClusterResourceSet` + +The `ClusterResourceSet` feature was introduced to automatically apply a set of resources (such as CNI/CSI) defined by administrators to matching created/existing workload clusters. + +### Enabling the `ClusterResourceSet` feature + +At the time of writing, `ClusterResourceSet` is an experimental feature that must be enabled during the initialization of a management cluster with the `EXP_CLUSTER_RESOURCE_SET` feature gate. + +To do this, add `EXP_CLUSTER_RESOURCE_SET: "true"` in the `clusterctl` configuration file or just `export EXP_CLUSTER_RESOURCE_SET=true` before initializing the management cluster with `clusterctl init`. + +If the management cluster is already initialized, the `ClusterResourceSet` can be enabled by changing the configuration of the `capi-controller-manager` deployment in the `capi-system` namespace. + + ```shell + kubectl edit deployment -n capi-system capi-controller-manager + ``` + +Locate the section below: + +```yaml + - args: + - --leader-elect + - --metrics-bind-addr=localhost:8080 + - --feature-gates=MachinePool=false,ClusterResourceSet=true,ClusterTopology=false +``` + +Then replace `ClusterResourceSet=false` with `ClusterResourceSet=true`. + +!!! note + Editing the `deployment` resource will cause Kubernetes to automatically start new versions of the containers with the feature enabled. + + + +### Prepare the Nutanix CSI `ClusterResourceSet` + +#### Create the `ConfigMap` for the CSI Plugin + +First, create a `ConfigMap` that contains a YAML manifest with all resources to install the Nutanix CSI driver. + +Since the Nutanix CSI Driver is provided as a Helm chart, use `helm` to extract it before creating the `ConfigMap`. See an example below: + +```shell +helm repo add nutanix https://nutanix.github.io/helm/ +helm repo update + +kubectl create ns ntnx-system --dry-run=client -o yaml > nutanix-csi-namespace.yaml +helm template nutanix-csi-snapshot nutanix/nutanix-csi-snapshot -n ntnx-system > nutanix-csi-snapshot.yaml +helm template nutanix-csi-snapshot nutanix/nutanix-csi-storage -n ntnx-system > nutanix-csi-storage.yaml + +kubectl create configmap nutanix-csi-crs --from-file=nutanix-csi-namespace.yaml --from-file=nutanix-csi-snapshot.yaml --from-file=nutanix-csi-storage.yaml +``` + +#### Create the `ClusterResourceSet` + +Next, create the `ClusterResourceSet` resource that will map the `ConfigMap` defined above to clusters using a `clusterSelector`. + +The `ClusterResourceSet` needs to be created inside the management cluster. See an example below: + +```yaml +--- +apiVersion: addons.cluster.x-k8s.io/v1alpha3 +kind: ClusterResourceSet +metadata: + name: nutanix-csi-crs +spec: + clusterSelector: + matchLabels: + csi: nutanix + resources: + - kind: ConfigMap + name: nutanix-csi-crs +``` + +The `clusterSelector` field controls how Cluster API will match this `ClusterResourceSet` on one or more workload clusters. In the example scenario, the `matchLabels` approach is being used where the `ClusterResourceSet` will be applied to all workload clusters having the `csi: nutanix` label present. If the label isn't present, the `ClusterResourceSet` won't apply to that workload cluster. + +The `resources` field references the `ConfigMap` created above, which contains the manifests for installing the Nutanix CSI driver. + +#### Assign the `ClusterResourceSet` to a workload cluster + +Assign this `ClusterResourceSet` to the workload cluster by adding the correct label to the `Cluster` resource. + +This can be done before workload cluster creation by editing the output of the `clusterctl generate cluster` command or by modifying an already deployed workload cluster. + +In both cases, `Cluster` resources should look like this: + +```yaml +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + name: workload-cluster-name + namespace: workload-cluster-namespace + labels: + csi: nutanix +# ... +``` + +!!! warning + For correct Nutanix CSI driver deployment, a fully functional CNI deployment must be present. + +## Install the Nutanix CSI Driver with a CAPX flavor + +The CAPX provider can utilize a flavor to automatically deploy the Nutanix CSI using a `ClusterResourceSet`. + +### Prerequisites + +The following requirements must be met: + +- The operating system must meet the [Nutanix CSI OS prerequisites](#capi-workload-cluster-prerequisites-for-the-nutanix-csi-driver). +- The Management cluster must be installed with the [`CLUSTER_RESOURCE_SET` feature gate](#enabling-the-clusterresourceset-feature). + +### Installation + +Specify the `csi` flavor during workload cluster creation. See an example below: + +```shell +clusterctl generate cluster my-cluster -f csi +``` + +Additional environment variables are required: + +- `WEBHOOK_CA`: Base64 encoded CA certificate used to sign the webhook certificate +- `WEBHOOK_CERT`: Base64 certificate for the webhook validation component +- `WEBHOOK_KEY`: Base64 key for the webhook validation component + +The three components referenced above can be automatically created and referenced using [this script](https://github.com/nutanix-cloud-native/cluster-api-provider-nutanix/blob/main/scripts/gen-self-cert.sh){target=_blank}: + +``` +source scripts/gen-self-cert.sh +``` + +The certificate must reference the following names: + +- csi-snapshot-webhook +- csi-snapshot-webhook.ntnx-sytem +- csi-snapshot-webhook.ntnx-sytem.svc + +!!! warning + For correct Nutanix CSI driver deployment, a fully functional CNI deployment must be present. + +## Nutanix CSI Driver Configuration + +After the driver is installed, it must be configured for use by minimally defining a `Secret` and `StorageClass`. + +This can be done manually in the workload clusters or by using a `ClusterResourceSet` in the management cluster as explained above. + +See the Official [CSI Driver documentation](https://portal.nutanix.com/page/documents/details?targetId=CSI-Volume-Driver-v2_6:CSI-Volume-Driver-v2_6){target=_blank} on the Nutanix Portal for more configuration information. diff --git a/docs/capx/v1.5.x/credential_management.md b/docs/capx/v1.5.x/credential_management.md new file mode 100644 index 00000000..bebbc5a0 --- /dev/null +++ b/docs/capx/v1.5.x/credential_management.md @@ -0,0 +1,93 @@ +# Credential Management +Cluster API Provider Nutanix Cloud Infrastructure (CAPX) interacts with Nutanix Prism Central (PC) APIs to manage the required Kubernetes cluster infrastructure resources. + +PC credentials are required to authenticate to the PC APIs. CAPX currently supports two mechanisms to supply the required credentials: + +- Credentials injected into the CAPX manager deployment +- Workload cluster specific credentials + +## Credentials injected into the CAPX manager deployment +By default, credentials will be injected into the CAPX manager deployment when CAPX is initialized. See the [getting started guide](./getting_started.md) for more information on the initialization. + +Upon initialization a `nutanix-creds` secret will automatically be created in the `capx-system` namespace. This secret will contain the values supplied via the `NUTANIX_USER` and `NUTANIX_PASSWORD` parameters. + +The `nutanix-creds` secret will be used for workload cluster deployment if no other credential is supplied. + +### Example +An example of the automatically created `nutanix-creds` secret can be found below: +```yaml +--- +apiVersion: v1 +kind: Secret +type: Opaque +metadata: + name: nutanix-creds + namespace: capx-system +stringData: + credentials: | + [ + { + "type": "basic_auth", + "data": { + "prismCentral":{ + "username": "", + "password": "" + }, + "prismElements": null + } + } + ] +``` + +## Workload cluster specific credentials +Users can override the [credentials injected in CAPX manager deployment](#credentials-injected-into-the-capx-manager-deployment) by supplying a credential specific to a workload cluster. The credentials can be supplied by creating a secret in the same namespace as the `NutanixCluster` namespace. + +The secret can be referenced by adding a `credentialRef` inside the `prismCentral` attribute contained in the `NutanixCluster`. +The secret will also be deleted when the `NutanixCluster` is deleted. + +Note: There is a 1:1 relation between the secret and the `NutanixCluster` object. + +### Example +Create a secret in the namespace of the `NutanixCluster`: + +```yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: "" + namespace: "" +stringData: + credentials: | + [ + { + "type": "basic_auth", + "data": { + "prismCentral":{ + "username": "", + "password": "" + }, + "prismElements": null + } + } + ] +``` + +Add a `prismCentral` and corresponding `credentialRef` to the `NutanixCluster`: + +```yaml +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: NutanixCluster +metadata: + name: "" + namespace: "" +spec: + prismCentral: + ... + credentialRef: + name: "" + kind: Secret +... +``` + +See the [NutanixCluster](./types/nutanix_cluster.md) documentation for all supported configuration parameters for the `prismCentral` and `credentialRef` attribute. \ No newline at end of file diff --git a/docs/capx/v1.5.x/experimental/autoscaler.md b/docs/capx/v1.5.x/experimental/autoscaler.md new file mode 100644 index 00000000..2af57213 --- /dev/null +++ b/docs/capx/v1.5.x/experimental/autoscaler.md @@ -0,0 +1,129 @@ +# Using Autoscaler in combination with CAPX + +!!! warning + The scenario and features described on this page are experimental. It's important to note that they have not been fully validated. + +[Autoscaler](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/clusterapi/README.md){target=_blank} can be used in combination with Cluster API to automatically add or remove machines in a cluster. + +Autoscaler can be used in different deployment scenarios. This page will provide an overview of multiple autoscaler deployment scenarios in combination with CAPX. +See the [Testing](#testing) section to see how scale-up/scale-down events can be triggered to validate the autoscaler behaviour. + +More in-depth information on Autoscaler functionality can be found in the [Kubernetes documentation](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/clusterapi/README.md){target=_blank}. + +All Autoscaler configuration parameters can be found [here](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md#what-are-the-parameters-to-ca){target=_blank}. + +## Scenario 1: Management cluster managing an external workload cluster +In this scenario, Autoscaler will be running on a management cluster and it will manage an external workload cluster. See the management cluster managing an external workload cluster section of [Kubernetes documentation](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/clusterapi/README.md#autoscaler-running-in-management-cluster-using-service-account-credentials-with-separate-workload-cluster){target=_blank} for more information. + +### Steps +1. Deploy a management cluster and workload cluster. The [CAPI quickstart](https://cluster-api.sigs.k8s.io/user/quick-start.html){target=_blank} can be used as a starting point. + + !!! note + Make sure a CNI is installed in the workload cluster. + +4. Download the example [Autoscaler deployment file](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/clusterapi/examples/deployment.yaml){target=_blank}. +5. Modify the `deployment.yaml` file: + - Change the namespace of all resources to the namespaces of the workload cluster. + - Choose an autoscale image. + - Change the following parameters in the `Deployment` resource: +```YAML + spec: + containers: + name: cluster-autoscaler + command: + - /cluster-autoscaler + args: + - --cloud-provider=clusterapi + - --kubeconfig=/mnt/kubeconfig/kubeconfig.yml + - --clusterapi-cloud-config-authoritative + - -v=1 + volumeMounts: + - mountPath: /mnt/kubeconfig + name: kubeconfig + readOnly: true + ... + volumes: + - name: kubeconfig + secret: + secretName: -kubeconfig + items: + - key: value + path: kubeconfig.yml +``` +7. Apply the `deployment.yaml` file. +```bash +kubectl apply -f deployment.yaml +``` +8. Add the [annotations](#autoscaler-node-group-annotations) to the workload cluster `MachineDeployment` resource. +9. Test Autoscaler. Go to the [Testing](#testing) section. + +## Scenario 2: Autoscaler running on workload cluster +In this scenario, Autoscaler will be deployed [on top of the workload cluster](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/clusterapi/README.md#autoscaler-running-in-a-joined-cluster-using-service-account-credentials){target=_blank} directly. In order for Autoscaler to work, it is required that the workload cluster resources are moved from the management cluster to the workload cluster. + +### Steps +1. Deploy a management cluster and workload cluster. The [CAPI quickstart](https://cluster-api.sigs.k8s.io/user/quick-start.html){target=_blank} can be used as a starting point. +2. Get the kubeconfig file for the workload cluster and use this kubeconfig to login to the workload cluster. +```bash +clusterctl get kubeconfig -n /path/to/kubeconfig +``` +3. Install a CNI in the workload cluster. +4. Initialise the CAPX components on top of the workload cluster: +```bash +clusterctl init --infrastructure nutanix +``` +5. Migrate the workload cluster custom resources to the workload cluster. Run following command from the management cluster: +```bash +clusterctl move -n --to-kubeconfig /path/to/kubeconfig +``` +6. Verify if the cluster has been migrated by running following command on the workload cluster: +```bash +kubectl get cluster -A +``` +7. Download the example [autoscaler deployment file](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/clusterapi/examples/deployment.yaml){target=_blank}. +8. Create the Autoscaler namespace: +```bash +kubectl create ns autoscaler +``` +9. Apply the `deployment.yaml` file +```bash +kubectl apply -f deployment.yaml +``` +10. Add the [annotations](#autoscaler-node-group-annotations) to the workload cluster `MachineDeployment` resource. +11. Test Autoscaler. Go to the [Testing](#testing) section. + +## Testing + +1. Deploy an example Kubernetes application. For example, the one used in the [Kubernetes HorizontalPodAutoscaler Walkthrough](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough/). +```bash +kubectl apply -f https://k8s.io/examples/application/php-apache.yaml +``` +2. Increase the amount of replicas of the application to trigger a scale-up event: +``` +kubectl scale deployment php-apache --replicas 100 +``` +3. Decrease the amount of replicas of the application again to trigger a scale-down event. + + !!! note + In case of issues check the logs of the Autoscaler pods. + +4. After a while CAPX, will add more machines. Refer to the [Autoscaler configuration parameters](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md#what-are-the-parameters-to-ca){target=_blank} to tweak the behaviour and timeouts. + +## Autoscaler node group annotations +Autoscaler uses following annotations to define the upper and lower boundries of the managed machines: + +| Annotation | Example Value | Description | +|-------------------------------------------------------------|---------------|-----------------------------------------------| +| cluster.x-k8s.io/cluster-api-autoscaler-node-group-max-size | 5 | Maximum amount of machines in this node group | +| cluster.x-k8s.io/cluster-api-autoscaler-node-group-min-size | 1 | Minimum amount of machines in this node group | + +These annotations must be applied to the `MachineDeployment` resources of a CAPX cluster. + +### Example +```YAML +apiVersion: cluster.x-k8s.io/v1beta1 +kind: MachineDeployment +metadata: + annotations: + cluster.x-k8s.io/cluster-api-autoscaler-node-group-max-size: "5" + cluster.x-k8s.io/cluster-api-autoscaler-node-group-min-size: "1" +``` \ No newline at end of file diff --git a/docs/capx/v1.5.x/experimental/capx_multi_pe.md b/docs/capx/v1.5.x/experimental/capx_multi_pe.md new file mode 100644 index 00000000..bd52ccd7 --- /dev/null +++ b/docs/capx/v1.5.x/experimental/capx_multi_pe.md @@ -0,0 +1,30 @@ +# Creating a workload CAPX cluster spanning Prism Element clusters + +!!! warning + The scenario and features described on this page are experimental. It's important to note that they have not been fully validated. + +This page will explain how to deploy CAPX-based Kubernetes clusters where worker nodes are spanning multiple Prism Element (PE) clusters. + +!!! note + All the PE clusters must be managed by the same Prism Central (PC) instance. + +The topology will look like this: + +- One PC managing multiple PE's +- One CAPI management cluster +- One CAPI workload cluster with multiple `MachineDeployment`resources + +Refer to the [CAPI quickstart](https://cluster-api.sigs.k8s.io/user/quick-start.html){target=_blank} to get started with CAPX. + +To create workload clusters spanning multiple Prism Element clusters, it is required to create a `MachineDeployment` and `NutanixMachineTemplate` resource for each Prism Element cluster. The Prism Element specific parameters (name/UUID, subnet,...) are referenced in the `NutanixMachineTemplate`. + +## Steps +1. Create a management cluster that has the CAPX infrastructure provider deployed. +2. Create a `cluster.yml` file containing the workload cluster definition. Refer to the steps defined in the [CAPI quickstart guide](https://cluster-api.sigs.k8s.io/user/quick-start.html){target=_blank} to create an example `cluster.yml` file. +3. Add additional `MachineDeployment` and `NutanixMachineTemplate` resources. + + By default there is only one machine template and machine deployment defined. To add nodes residing on another Prism Element cluster, a new `MachineDeployment` and `NutanixMachineTemplate` resource needs to be added to the yaml file. The autogenerated `MachineDeployment` and `NutanixMachineTemplate` resource definitions can be used as a baseline. + + Make sure to modify the `MachineDeployment` and `NutanixMachineTemplate` parameters. + +4. Apply the modified `cluster.yml` file to the management cluster. diff --git a/docs/capx/v1.5.x/experimental/oidc.md b/docs/capx/v1.5.x/experimental/oidc.md new file mode 100644 index 00000000..0c274121 --- /dev/null +++ b/docs/capx/v1.5.x/experimental/oidc.md @@ -0,0 +1,31 @@ +# OIDC integration + +!!! warning + The scenario and features described on this page are experimental. It's important to note that they have not been fully validated. + +Kubernetes allows users to authenticate using various authentication mechanisms. One of these mechanisms is OIDC. Information on how Kubernetes interacts with OIDC providers can be found in the [OpenID Connect Tokens](https://kubernetes.io/docs/reference/access-authn-authz/authentication/#openid-connect-tokens){target=_blank} section of the official Kubernetes documentation. + + +Follow the steps below to configure a CAPX cluster to use an OIDC identity provider. + +## Steps +1. Generate a `cluster.yaml` file with the required CAPX cluster configuration. Refer to the [Getting Started](../getting_started.md){target=_blank} page for more information on how to generate a `cluster.yaml` file. Do not apply the `cluster.yaml` file. +2. Edit the `cluster.yaml` file and search for the `KubeadmControlPlane` resource. +3. Modify/add the `spec.kubeadmConfigSpec.clusterConfiguration.apiServer.extraArgs` attribute and add the required [API server parameters](https://kubernetes.io/docs/reference/access-authn-authz/authentication/#configuring-the-api-server){target=_blank}. See the [example](#example) below. +4. Apply the `cluster.yaml` file +5. Log in with the OIDC provider once the cluster is provisioned + +## Example +```YAML +kind: KubeadmControlPlane +spec: + kubeadmConfigSpec: + clusterConfiguration: + apiServer: + extraArgs: + ... + oidc-client-id: + oidc-issuer-url: + ... +``` + diff --git a/docs/capx/v1.5.x/experimental/proxy.md b/docs/capx/v1.5.x/experimental/proxy.md new file mode 100644 index 00000000..c8f940d4 --- /dev/null +++ b/docs/capx/v1.5.x/experimental/proxy.md @@ -0,0 +1,62 @@ +# Proxy configuration + +!!! warning + The scenario and features described on this page are experimental. It's important to note that they have not been fully validated. + +CAPX can be configured to use a proxy to connect to external networks. This proxy configuration needs to be applied to control plane and worker nodes. + +Follow the steps below to configure a CAPX cluster to use a proxy. + +## Steps +1. Generate a `cluster.yaml` file with the required CAPX cluster configuration. Refer to the [Getting Started](../getting_started.md){target=_blank} page for more information on how to generate a `cluster.yaml` file. Do not apply the `cluster.yaml` file. +2. Edit the `cluster.yaml` file and modify the following resources as shown in the [example](#example) below to add the proxy configuration. + 1. `KubeadmControlPlane`: + * Add the proxy configuration to the `spec.kubeadmConfigSpec.files` list. Do not modify other items in the list. + * Add `systemctl` commands to apply the proxy config in `spec.kubeadmConfigSpec.preKubeadmCommands`. Do not modify other items in the list. + 2. `KubeadmConfigTemplate`: + * Add the proxy configuration to the `spec.template.spec.files` list. Do not modify other items in the list. + * Add `systemctl` commands to apply the proxy config in `spec.template.spec.preKubeadmCommands`. Do not modify other items in the list. +4. Apply the `cluster.yaml` file + +## Example + +```YAML +--- +# controlplane proxy settings +kind: KubeadmControlPlane +spec: + kubeadmConfigSpec: + files: + - content: | + [Service] + Environment="HTTP_PROXY=" + Environment="HTTPS_PROXY=" + Environment="NO_PROXY=" + owner: root:root + path: /etc/systemd/system/containerd.service.d/http-proxy.conf + ... + preKubeadmCommands: + - sudo systemctl daemon-reload + - sudo systemctl restart containerd + ... +--- +# worker proxy settings +kind: KubeadmConfigTemplate +spec: + template: + spec: + files: + - content: | + [Service] + Environment="HTTP_PROXY=" + Environment="HTTPS_PROXY=" + Environment="NO_PROXY=" + owner: root:root + path: /etc/systemd/system/containerd.service.d/http-proxy.conf + ... + preKubeadmCommands: + - sudo systemctl daemon-reload + - sudo systemctl restart containerd + ... +``` + diff --git a/docs/capx/v1.5.x/experimental/registry_mirror.md b/docs/capx/v1.5.x/experimental/registry_mirror.md new file mode 100644 index 00000000..307a9425 --- /dev/null +++ b/docs/capx/v1.5.x/experimental/registry_mirror.md @@ -0,0 +1,96 @@ +# Registry Mirror configuration + +!!! warning + The scenario and features described on this page are experimental. It's important to note that they have not been fully validated. + +CAPX can be configured to use a private registry to act as a mirror of an external public registry. This registry mirror configuration needs to be applied to control plane and worker nodes. + +Follow the steps below to configure a CAPX cluster to use a registry mirror. + +## Steps +1. Generate a `cluster.yaml` file with the required CAPX cluster configuration. Refer to the [Getting Started](../getting_started.md){target=_blank} page for more information on how to generate a `cluster.yaml` file. Do not apply the `cluster.yaml` file. +2. Edit the `cluster.yaml` file and modify the following resources as shown in the [example](#example) below to add the proxy configuration. + 1. `KubeadmControlPlane`: + * Add the registry mirror configuration to the `spec.kubeadmConfigSpec.files` list. Do not modify other items in the list. + * Update `/etc/containerd/config.toml` commands to apply the registry mirror config in `spec.kubeadmConfigSpec.preKubeadmCommands`. Do not modify other items in the list. + 2. `KubeadmConfigTemplate`: + * Add the registry mirror configuration to the `spec.template.spec.files` list. Do not modify other items in the list. + * Update `/etc/containerd/config.toml` commands to apply the registry mirror config in `spec.template.spec.preKubeadmCommands`. Do not modify other items in the list. +4. Apply the `cluster.yaml` file + +## Example + +This example will configure a registry mirror for the following namespace: + +* registry.k8s.io +* ghcr.io +* quay.io + +and redirect them to corresponding projects of the `` registry. + +```YAML +--- +# controlplane proxy settings +kind: KubeadmControlPlane +spec: + kubeadmConfigSpec: + files: + - content: | + [host."https:///v2/registry.k8s.io"] + capabilities = ["pull", "resolve"] + skip_verify = false + override_path = true + owner: root:root + path: /etc/containerd/certs.d/registry.k8s.io/hosts.toml + - content: | + [host."https:///v2/ghcr.io"] + capabilities = ["pull", "resolve"] + skip_verify = false + override_path = true + owner: root:root + path: /etc/containerd/certs.d/ghcr.io/hosts.toml + - content: | + [host."https:///v2/quay.io"] + capabilities = ["pull", "resolve"] + skip_verify = false + override_path = true + owner: root:root + path: /etc/containerd/certs.d/quay.io/hosts.toml + ... + preKubeadmCommands: + - echo '\n[plugins."io.containerd.grpc.v1.cri".registry]\n config_path = "/etc/containerd/certs.d"' >> /etc/containerd/config.toml + ... +--- +# worker proxy settings +kind: KubeadmConfigTemplate +spec: + template: + spec: + files: + - content: | + [host."https:///v2/registry.k8s.io"] + capabilities = ["pull", "resolve"] + skip_verify = false + override_path = true + owner: root:root + path: /etc/containerd/certs.d/registry.k8s.io/hosts.toml + - content: | + [host."https:///v2/ghcr.io"] + capabilities = ["pull", "resolve"] + skip_verify = false + override_path = true + owner: root:root + path: /etc/containerd/certs.d/ghcr.io/hosts.toml + - content: | + [host."https:///v2/quay.io"] + capabilities = ["pull", "resolve"] + skip_verify = false + override_path = true + owner: root:root + path: /etc/containerd/certs.d/quay.io/hosts.toml + ... + preKubeadmCommands: + - echo '\n[plugins."io.containerd.grpc.v1.cri".registry]\n config_path = "/etc/containerd/certs.d"' >> /etc/containerd/config.toml + ... +``` + diff --git a/docs/capx/v1.5.x/experimental/vpc.md b/docs/capx/v1.5.x/experimental/vpc.md new file mode 100644 index 00000000..3513e47e --- /dev/null +++ b/docs/capx/v1.5.x/experimental/vpc.md @@ -0,0 +1,40 @@ +# Creating a workload CAPX cluster in a Nutanix Flow VPC + +!!! warning + The scenario and features described on this page are experimental. It's important to note that they have not been fully validated. + +!!! note + Nutanix Flow VPCs are only validated with CAPX 1.1.3+ + +[Nutanix Flow Virtual Networking](https://portal.nutanix.com/page/documents/details?targetId=Nutanix-Flow-Virtual-Networking-Guide-vpc_2022_9:Nutanix-Flow-Virtual-Networking-Guide-vpc_2022_9){target=_blank} allows users to create Virtual Private Clouds (VPCs) with Overlay networking. +The steps below will illustrate how a CAPX cluster can be deployed inside an overlay subnet (NAT) inside a VPC while the management cluster resides outside of the VPC. + + +## Steps +1. [Request a floating IP](https://portal.nutanix.com/page/documents/details?targetId=Nutanix-Flow-Networking-Guide:ear-flow-nw-request-floating-ip-pc-t.html){target=_blank} +2. Link the floating IP to an internal IP address inside the overlay subnet that will be used to deploy the CAPX cluster. This address will be assigned to the CAPX loadbalancer. To prevent IP conflicts, make sure the IP address is not part of the IP-pool defined in the subnet. +3. Generate a `cluster.yaml` file with the required CAPX cluster configuration where the `CONTROL_PLANE_ENDPOINT_IP` is set to the floating IP requested in the first step. Refer to the [Getting Started](../getting_started.md){target=_blank} page for more information on how to generate a `cluster.yaml` file. Do not apply the `cluster.yaml` file. +4. Edit the `cluster.yaml` file and search for the `KubeadmControlPlane` resource. +5. Modify the `spec.kubeadmConfigSpec.files.*.content` attribute and change the `kube-vip` definition similar to the [example](#example) below. +6. Apply the `cluster.yaml` file. +7. When the CAPX workload cluster is deployed, it will be reachable via the floating IP. + +## Example +```YAML +kind: KubeadmControlPlane +spec: + kubeadmConfigSpec: + files: + - content: | + apiVersion: v1 + kind: Pod + metadata: + name: kube-vip + namespace: kube-system + spec: + containers: + - env: + - name: address + value: "" +``` + diff --git a/docs/capx/v1.5.x/getting_started.md b/docs/capx/v1.5.x/getting_started.md new file mode 100644 index 00000000..536e5243 --- /dev/null +++ b/docs/capx/v1.5.x/getting_started.md @@ -0,0 +1,159 @@ +# Getting Started + +This is a guide on getting started with Cluster API Provider Nutanix Cloud Infrastructure (CAPX). To learn more about cluster API in more depth, check out the [Cluster API book](https://cluster-api.sigs.k8s.io/){target=_blank}. + +For more information on how install the Nutanix CSI Driver on a CAPX cluster, visit [Nutanix CSI Driver installation with CAPX](./addons/install_csi_driver.md). + +For more information on how CAPX handles credentials, visit [Credential Management](./credential_management.md). + +For more information on the port requirements for CAPX, visit [Port Requirements](./port_requirements.md). + +!!! note + [Nutanix Cloud Controller Manager (CCM)](../../ccm/latest/overview.md) is a mandatory component starting from CAPX v1.3.0. Ensure all CAPX-managed Kubernetes clusters are configured to use Nutanix CCM before upgrading to v1.3.0 or later. See [CAPX v1.5.x Upgrade Procedure](./tasks/capx_v14x_upgrade_procedure.md). + +## Production Workflow + +### Build OS image for NutanixMachineTemplate resource +Cluster API Provider Nutanix Cloud Infrastructure (CAPX) uses the [Image Builder](https://image-builder.sigs.k8s.io/){target=_blank} project to build OS images used for the Nutanix machines. + +Follow the steps detailed in [Building CAPI Images for Nutanix Cloud Platform (NCP)](https://image-builder.sigs.k8s.io/capi/providers/nutanix.html#building-capi-images-for-nutanix-cloud-platform-ncp){target=_blank} to use Image Builder on the Nutanix Cloud Platform. + +For a list of operating systems visit the OS image [Configuration](https://image-builder.sigs.k8s.io/capi/providers/nutanix.html#configuration){target=_blank} page. + +### Prerequisites for using Cluster API Provider Nutanix Cloud Infrastructure +The [Cluster API installation](https://cluster-api.sigs.k8s.io/user/quick-start.html#installation){target=_blank} section provides an overview of all required prerequisites: + +- [Common Prerequisites](https://cluster-api.sigs.k8s.io/user/quick-start.html#common-prerequisites){target=_blank} +- [Install and/or configure a Kubernetes cluster](https://cluster-api.sigs.k8s.io/user/quick-start.html#install-andor-configure-a-kubernetes-cluster){target=_blank} +- [Install clusterctl](https://cluster-api.sigs.k8s.io/user/quick-start.html#install-clusterctl){target=_blank} +- (Optional) [Enabling Feature Gates](https://cluster-api.sigs.k8s.io/user/quick-start.html#enabling-feature-gates){target=_blank} + +Make sure these prerequisites have been met before moving to the [Configure and Install Cluster API Provider Nutanix Cloud Infrastructure](#configure-and-install-cluster-api-provider-nutanix-cloud-infrastructure) step. + + +### Configure and Install Cluster API Provider Nutanix Cloud Infrastructure +To initialize Cluster API Provider Nutanix Cloud Infrastructure, `clusterctl` requires the following variables, which should be set in either `~/.cluster-api/clusterctl.yaml` or as environment variables. +``` +NUTANIX_ENDPOINT: "" # IP or FQDN of Prism Central +NUTANIX_USER: "" # Prism Central user +NUTANIX_PASSWORD: "" # Prism Central password +NUTANIX_INSECURE: false # or true + +KUBERNETES_VERSION: "v1.22.9" +WORKER_MACHINE_COUNT: 3 +NUTANIX_SSH_AUTHORIZED_KEY: "" + +NUTANIX_PRISM_ELEMENT_CLUSTER_NAME: "" +NUTANIX_MACHINE_TEMPLATE_IMAGE_NAME: "" +NUTANIX_SUBNET_NAME: "" + +EXP_CLUSTER_RESOURCE_SET: true # Required for Nutanix CCM installation +``` + +You can also see the required list of variables by running the following: +``` +clusterctl generate cluster mycluster -i nutanix --list-variables +Required Variables: + - CONTROL_PLANE_ENDPOINT_IP + - KUBERNETES_VERSION + - NUTANIX_ENDPOINT + - NUTANIX_MACHINE_TEMPLATE_IMAGE_NAME + - NUTANIX_PASSWORD + - NUTANIX_PRISM_ELEMENT_CLUSTER_NAME + - NUTANIX_SSH_AUTHORIZED_KEY + - NUTANIX_SUBNET_NAME + - NUTANIX_USER + +Optional Variables: + - CONTROL_PLANE_ENDPOINT_PORT (defaults to "6443") + - CONTROL_PLANE_MACHINE_COUNT (defaults to 1) + - KUBEVIP_LB_ENABLE (defaults to "false") + - KUBEVIP_SVC_ENABLE (defaults to "false") + - NAMESPACE (defaults to current Namespace in the KubeConfig file) + - NUTANIX_INSECURE (defaults to "false") + - NUTANIX_MACHINE_BOOT_TYPE (defaults to "legacy") + - NUTANIX_MACHINE_MEMORY_SIZE (defaults to "4Gi") + - NUTANIX_MACHINE_VCPU_PER_SOCKET (defaults to "1") + - NUTANIX_MACHINE_VCPU_SOCKET (defaults to "2") + - NUTANIX_PORT (defaults to "9440") + - NUTANIX_SYSTEMDISK_SIZE (defaults to "40Gi") + - WORKER_MACHINE_COUNT (defaults to 0) +``` + +!!! note + To prevent duplicate IP assignments, it is required to assign an IP-address to the `CONTROL_PLANE_ENDPOINT_IP` variable that is not part of the Nutanix IPAM or DHCP range assigned to the subnet of the CAPX cluster. + +!!! warning + Make sure [Cluster Resource Set (CRS)](https://cluster-api.sigs.k8s.io/tasks/experimental-features/cluster-resource-set){target=_blank} is enabled before running `clusterctl init` + +Now you can instantiate Cluster API with the following: +``` +clusterctl init -i nutanix +``` + +### Deploy a workload cluster on Nutanix Cloud Infrastructure +``` +export TEST_CLUSTER_NAME=mytestcluster1 +export TEST_NAMESPACE=mytestnamespace +CONTROL_PLANE_ENDPOINT_IP=x.x.x.x clusterctl generate cluster ${TEST_CLUSTER_NAME} \ + -i nutanix \ + --target-namespace ${TEST_NAMESPACE} \ + --kubernetes-version v1.22.9 \ + --control-plane-machine-count 1 \ + --worker-machine-count 3 > ./cluster.yaml +kubectl create ns ${TEST_NAMESPACE} +kubectl apply -f ./cluster.yaml -n ${TEST_NAMESPACE} +``` +To customize the configuration of the default `cluster.yaml` file generated by CAPX, visit the [NutanixCluster](./types/nutanix_cluster.md) and [NutanixMachineTemplate](./types/nutanix_machine_template.md) documentation. + +### Access a workload cluster +To access resources on the cluster, you can get the kubeconfig with the following: +``` +clusterctl get kubeconfig ${TEST_CLUSTER_NAME} -n ${TEST_NAMESPACE} > ${TEST_CLUSTER_NAME}.kubeconfig +kubectl --kubeconfig ./${TEST_CLUSTER_NAME}.kubeconfig get nodes +``` + +### Install CNI on workload a cluster + +You must deploy a Container Network Interface (CNI) based pod network add-on so that your pods can communicate with each other. Cluster DNS (CoreDNS) will not start up before a network is installed. + +!!! note + Take care that your pod network must not overlap with any of the host networks. You are likely to see problems if there is any overlap. If you find a collision between your network plugin's preferred pod network and some of your host networks, you must choose a suitable alternative CIDR block to use instead. It can be configured inside the `cluster.yaml` generated by `clusterctl generate cluster` before applying it. + +Several external projects provide Kubernetes pod networks using CNI, some of which also support [Network Policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/){target=_blank}. + +See a list of add-ons that implement the [Kubernetes networking model](https://kubernetes.io/docs/concepts/cluster-administration/networking/#how-to-implement-the-kubernetes-network-model){target=_blank}. At time of writing, the most common are [Calico](https://www.tigera.io/project-calico/){target=_blank} and [Cilium](https://cilium.io){target=_blank}. + +Follow the specific install guide for your selected CNI and install only one pod network per cluster. + +Once a pod network has been installed, you can confirm that it is working by checking that the CoreDNS pod is running in the output of `kubectl get pods --all-namespaces`. + + +### Kube-vip settings + +Kube-vip is a true load balancing solution for the Kubernetes control plane. It distributes API requests across control plane nodes. It also has the capability to provide load balancing for Kubernetes services. + +You can tweak kube-vip settings by using the following properties: + +- `KUBEVIP_LB_ENABLE` + +This setting allows control plane load balancing using IPVS. See +[Control Plane Load-Balancing documentation](https://kube-vip.io/docs/about/architecture/#control-plane-load-balancing){target=_blank} for further information. + +- `KUBEVIP_SVC_ENABLE` + +This setting enables a service of type LoadBalancer. See +[Kubernetes Service Load Balancing documentation](https://kube-vip.io/docs/about/architecture/#kubernetes-service-load-balancing){target=_blank} for further information. + +- `KUBEVIP_SVC_ELECTION` + +This setting enables Load Balancing of Load Balancers. See [Load Balancing Load Balancers](https://kube-vip.io/docs/usage/kubernetes-services/#load-balancing-load-balancers-when-using-arp-mode-yes-you-read-that-correctly-kube-vip-v050){target=_blank} for further information. + +### Delete a workload cluster +To remove a workload cluster from your management cluster, remove the cluster object and the provider will clean-up all resources. + +``` +kubectl delete cluster ${TEST_CLUSTER_NAME} -n ${TEST_NAMESPACE} +``` +!!! note + Deleting the entire cluster template with `kubectl delete -f ./cluster.yaml` may lead to pending resources requiring manual cleanup. diff --git a/docs/capx/v1.5.x/pc_certificates.md b/docs/capx/v1.5.x/pc_certificates.md new file mode 100644 index 00000000..f3fe1699 --- /dev/null +++ b/docs/capx/v1.5.x/pc_certificates.md @@ -0,0 +1,149 @@ +# Certificate Trust + +CAPX invokes Prism Central APIs using the HTTPS protocol. CAPX has different methods to handle the trust of the Prism Central certificates: + +- Enable certificate verification (default) +- Configure an additional trust bundle +- Disable certificate verification + +See the respective sections below for more information. + +!!! note + For more information about replacing Prism Central certificates, see the [Nutanix AOS Security Guide](https://portal.nutanix.com/page/documents/details?targetId=Nutanix-Security-Guide-v6_5:mul-security-ssl-certificate-pc-t.html){target=_blank}. + +## Enable certificate verification (default) +By default CAPX will perform certificate verification when invoking Prism Central API calls. This requires Prism Central to be configured with a publicly trusted certificate authority. +No additional configuration is required in CAPX. + +## Configure an additional trust bundle +CAPX allows users to configure an additional trust bundle. This will allow CAPX to verify certificates that are not issued by a publicy trusted certificate authority. + +To configure an additional trust bundle, the `NUTANIX_ADDITIONAL_TRUST_BUNDLE` environment variable needs to be set. The value of the `NUTANIX_ADDITIONAL_TRUST_BUNDLE` environment variable contains the trust bundle (PEM format) in base64 encoded format. See the [Configuring the trust bundle environment variable](#configuring-the-trust-bundle-environment-variable) section for more information. + +It is also possible to configure the additional trust bundle manually by creating a custom `cluster-template`. See the [Configuring the additional trust bundle manually](#configuring-the-additional-trust-bundle-manually) section for more information + +The `NUTANIX_ADDITIONAL_TRUST_BUNDLE` environment variable can be set when initializing the CAPX provider or when creating a workload cluster. If the `NUTANIX_ADDITIONAL_TRUST_BUNDLE` is configured when the CAPX provider is initialized, the additional trust bundle will be used for every CAPX workload cluster. If it is only configured when creating a workload cluster, it will only be applicable for that specific workload cluster. + + +### Configuring the trust bundle environment variable + +Create a PEM encoded file containing the root certificate and all intermediate certificates. Example: +``` +$ cat cert.crt +-----BEGIN CERTIFICATE----- + +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- + +-----END CERTIFICATE----- +``` + +Use a `base64` tool to encode these contents in base64. The command below will provide a `base64` string. +``` +$ cat cert.crt | base64 + +``` +!!! note + Make sure the `base64` string does not contain any newlines (`\n`). If the output string contains newlines, remove them manually or check the manual of the `base64` tool on how to generate a `base64` string without newlines. + +Use the `base64` string as value for the `NUTANIX_ADDITIONAL_TRUST_BUNDLE` environment variable. +``` +$ export NUTANIX_ADDITIONAL_TRUST_BUNDLE="" +``` + +### Configuring the additional trust bundle manually + +To configure the additional trust bundle manually without using the `NUTANIX_ADDITIONAL_TRUST_BUNDLE` environment variable present in the default `cluster-template` files, it is required to: + +- Create a `ConfigMap` containing the additional trust bundle. +- Configure the `prismCentral.additionalTrustBundle` object in the `NutanixCluster` spec. + +#### Creating the additional trust bundle ConfigMap + +CAPX supports two different formats for the ConfigMap containing the additional trust bundle. The first one is to add the additional trust bundle as a multi-line string in the `ConfigMap`, the second option is to add the trust bundle in `base64` encoded format. See the examples below. + +Multi-line string example: +```YAML +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: user-ca-bundle + namespace: ${NAMESPACE} +data: + ca.crt: | + -----BEGIN CERTIFICATE----- + + -----END CERTIFICATE----- + -----BEGIN CERTIFICATE----- + + -----END CERTIFICATE----- +``` + +`base64` example: + +```YAML +apiVersion: v1 +kind: ConfigMap +metadata: + name: user-ca-bundle + namespace: ${NAMESPACE} +binaryData: + ca.crt: +``` + +!!! note + The `base64` string needs to be added as `binaryData`. + + +#### Configuring the NutanixCluster spec + +When the additional trust bundle `ConfigMap` is created, it needs to be referenced in the `NutanixCluster` spec. Add the `prismCentral.additionalTrustBundle` object in the `NutanixCluster` spec as shown below. Make sure the correct additional trust bundle `ConfigMap` is referenced. + +```YAML +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: NutanixCluster +metadata: + name: ${CLUSTER_NAME} + namespace: ${NAMESPACE} +spec: + ... + prismCentral: + ... + additionalTrustBundle: + kind: ConfigMap + name: user-ca-bundle + insecure: false +``` + +!!! note + the default value of `prismCentral.insecure` attribute is `false`. It can be omitted when an additional trust bundle is configured. + + If `prismCentral.insecure` attribute is set to `true`, all certificate verification will be disabled. + + +## Disable certificate verification + +!!! note + Disabling certificate verification is not recommended for production purposes and should only be used for testing. + + +Certificate verification can be disabled by setting the `prismCentral.insecure` attribute to `true` in the `NutanixCluster` spec. Certificate verification will be disabled even if an additional trust bundle is configured. + +Disabled certificate verification example: + +```YAML +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: NutanixCluster +metadata: + name: ${CLUSTER_NAME} + namespace: ${NAMESPACE} +spec: + controlPlaneEndpoint: + host: ${CONTROL_PLANE_ENDPOINT_IP} + port: ${CONTROL_PLANE_ENDPOINT_PORT=6443} + prismCentral: + ... + insecure: true + ... +``` \ No newline at end of file diff --git a/docs/capx/v1.5.x/port_requirements.md b/docs/capx/v1.5.x/port_requirements.md new file mode 100644 index 00000000..af182abb --- /dev/null +++ b/docs/capx/v1.5.x/port_requirements.md @@ -0,0 +1,19 @@ +# Port Requirements + +CAPX uses the ports documented below to create workload clusters. + +!!! note + This page only documents the ports specifically required by CAPX and does not provide the full overview of all ports required in the CAPI framework. + +## Management cluster + +| Source | Destination | Protocol | Port | Description | +|--------------------|---------------------|----------|------|--------------------------------------------------------------------------------------------------| +| Management cluster | External Registries | TCP | 443 | Pull container images from [CAPX public registries](#public-registries-utilized-when-using-capx) | +| Management cluster | Prism Central | TCP | 9440 | Management cluster communication to Prism Central | + +## Public registries utilized when using CAPX + +| Registry name | +|---------------| +| ghcr.io | diff --git a/docs/capx/v1.5.x/tasks/capx_v14x_upgrade_procedure.md b/docs/capx/v1.5.x/tasks/capx_v14x_upgrade_procedure.md new file mode 100644 index 00000000..4c74a29a --- /dev/null +++ b/docs/capx/v1.5.x/tasks/capx_v14x_upgrade_procedure.md @@ -0,0 +1,83 @@ +# CAPX v1.5.x Upgrade Procedure + +Starting from CAPX v1.3.0, it is required for all CAPX-managed Kubernetes clusters to use the Nutanix Cloud Controller Manager (CCM). + +Before upgrading CAPX instances to v1.3.0 or later, it is required to follow the [steps](#steps) detailed below for each of the CAPX-managed Kubernetes clusters that don't use Nutanix CCM. + + +## Steps + +This procedure uses [Cluster Resource Set (CRS)](https://cluster-api.sigs.k8s.io/tasks/experimental-features/cluster-resource-set){target=_blank} to install Nutanix CCM but it can also be installed using the [Nutanix CCM Helm chart](https://artifacthub.io/packages/helm/nutanix/nutanix-cloud-provider){target=_blank}. + +!!! warning + Make sure [CRS](https://cluster-api.sigs.k8s.io/tasks/experimental-features/cluster-resource-set){target=_blank} is enabled on the management cluster before following the procedure. + +Perform following steps for each of the CAPX-managed Kubernetes clusters that are not configured to use Nutanix CCM: + +1. Add the `cloud-provider: external` configuration in the `KubeadmConfigTemplate` resources: + ```YAML + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + spec: + template: + spec: + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: + cloud-provider: external + ``` +2. Add the `cloud-provider: external` configuration in the `KubeadmControlPlane` resource: +```YAML +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +kind: KubeadmConfigTemplate +spec: + template: + spec: + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: + cloud-provider: external +--- +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +kind: KubeadmControlPlane +spec: + kubeadmConfigSpec: + clusterConfiguration: + apiServer: + extraArgs: + cloud-provider: external + controllerManager: + extraArgs: + cloud-provider: external + initConfiguration: + nodeRegistration: + kubeletExtraArgs: + cloud-provider: external + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: + cloud-provider: external +``` +3. Add the Nutanix CCM CRS resources: + + - [nutanix-ccm-crs.yaml](https://github.com/nutanix-cloud-native/cluster-api-provider-nutanix/blob/v1.5.0/templates/base/nutanix-ccm-crs.yaml){target=_blank} + - [nutanix-ccm-secret.yaml](https://github.com/nutanix-cloud-native/cluster-api-provider-nutanix/blob/v1.5.0/templates/base/nutanix-ccm-secret.yaml) + - [nutanix-ccm.yaml](https://github.com/nutanix-cloud-native/cluster-api-provider-nutanix/blob/v1.5.0/templates/base/nutanix-ccm.yaml) + + Make sure to update each of the variables before applying the `YAML` files. + +4. Add the `ccm: nutanix` label to the `Cluster` resource: + ```YAML + apiVersion: cluster.x-k8s.io/v1beta1 + kind: Cluster + metadata: + labels: + ccm: nutanix + ``` +5. Verify if the Nutanix CCM pod is up and running: +``` +kubectl get pod -A -l k8s-app=nutanix-cloud-controller-manager +``` +6. Trigger a new rollout of the Kubernetes nodes by performing a Kubernetes upgrade or by using `clusterctl alpha rollout restart`. See the [clusterctl alpha rollout](https://cluster-api.sigs.k8s.io/clusterctl/commands/alpha-rollout#restart){target=_blank} for more information. +7. Upgrade CAPX to v1.5.0 by following the [clusterctl upgrade](https://cluster-api.sigs.k8s.io/clusterctl/commands/upgrade.html?highlight=clusterctl%20upgrade%20pla#clusterctl-upgrade){target=_blank} documentation \ No newline at end of file diff --git a/docs/capx/v1.5.x/tasks/modify_machine_configuration.md b/docs/capx/v1.5.x/tasks/modify_machine_configuration.md new file mode 100644 index 00000000..04a43a95 --- /dev/null +++ b/docs/capx/v1.5.x/tasks/modify_machine_configuration.md @@ -0,0 +1,11 @@ +# Modifying Machine Configurations + +Since all attributes of the `NutanixMachineTemplate` resources are immutable, follow the [Updating Infrastructure Machine Templates](https://cluster-api.sigs.k8s.io/tasks/updating-machine-templates.html?highlight=machine%20template#updating-infrastructure-machine-templates){target=_blank} procedure to modify the configuration of machines in an existing CAPX cluster. +See the [NutanixMachineTemplate](../types/nutanix_machine_template.md) documentation for all supported configuration parameters. + +!!! note + Manually modifying existing and linked `NutanixMachineTemplate` resources will not trigger a rolling update of the machines. + +!!! note + Do not modify the virtual machine configuration of CAPX cluster nodes manually in Prism/Prism Central. + CAPX will not automatically revert the configuration change but performing scale-up/scale-down/upgrade operations will override manual modifications. Only use the `Updating Infrastructure Machine` procedure referenced above to perform configuration changes. \ No newline at end of file diff --git a/docs/capx/v1.5.x/troubleshooting.md b/docs/capx/v1.5.x/troubleshooting.md new file mode 100644 index 00000000..c023d13e --- /dev/null +++ b/docs/capx/v1.5.x/troubleshooting.md @@ -0,0 +1,13 @@ +# Troubleshooting + +## Clusterctl failed with GitHub rate limit error + +By design Clusterctl fetches artifacts from repositories hosted on GitHub, this operation is subject to [GitHub API rate limits](https://docs.github.com/en/rest/overview/resources-in-the-rest-api#rate-limiting){target=_blank}. + +While this is generally okay for the majority of users, there is still a chance that some users (especially developers or CI tools) hit this limit: + +``` +Error: failed to get repository client for the XXX with name YYY: error creating the GitHub repository client: failed to get GitHub latest version: failed to get the list of versions: rate limit for github api has been reached. Please wait one hour or get a personal API tokens a assign it to the GITHUB_TOKEN environment variable +``` + +As explained in the error message, you can increase your API rate limit by [creating a GitHub personal token](https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/creating-a-personal-access-token){target=_blank} and setting a `GITHUB_TOKEN` environment variable using the token. diff --git a/docs/capx/v1.5.x/types/nutanix_cluster.md b/docs/capx/v1.5.x/types/nutanix_cluster.md new file mode 100644 index 00000000..09325cab --- /dev/null +++ b/docs/capx/v1.5.x/types/nutanix_cluster.md @@ -0,0 +1,64 @@ +# NutanixCluster + +The `NutanixCluster` resource defines the configuration of a CAPX Kubernetes cluster. + +Example of a `NutanixCluster` resource: + +```YAML +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: NutanixCluster +metadata: + name: ${CLUSTER_NAME} + namespace: ${NAMESPACE} +spec: + controlPlaneEndpoint: + host: ${CONTROL_PLANE_ENDPOINT_IP} + port: ${CONTROL_PLANE_ENDPOINT_PORT=6443} + prismCentral: + address: ${NUTANIX_ENDPOINT} + additionalTrustBundle: + kind: ConfigMap + name: user-ca-bundle + credentialRef: + kind: Secret + name: ${CLUSTER_NAME} + insecure: ${NUTANIX_INSECURE=false} + port: ${NUTANIX_PORT=9440} +``` + +## NutanixCluster spec +The table below provides an overview of the supported parameters of the `spec` attribute of a `NutanixCluster` resource. + +### Configuration parameters + +| Key |Type |Description | +|--------------------------------------------|------|----------------------------------------------------------------------------------| +|controlPlaneEndpoint |object|Defines the host IP and port of the CAPX Kubernetes cluster. | +|controlPlaneEndpoint.host |string|Host IP to be assigned to the CAPX Kubernetes cluster. | +|controlPlaneEndpoint.port |int |Port of the CAPX Kubernetes cluster. Default: `6443` | +|prismCentral |object|(Optional) Prism Central endpoint definition. | +|prismCentral.address |string|IP/FQDN of Prism Central. | +|prismCentral.port |int |Port of Prism Central. Default: `9440` | +|prismCentral.insecure |bool |Disable Prism Central certificate checking. Default: `false` | +|prismCentral.credentialRef |object|Reference to credentials used for Prism Central connection. | +|prismCentral.credentialRef.kind |string|Kind of the credentialRef. Allowed value: `Secret` | +|prismCentral.credentialRef.name |string|Name of the secret containing the Prism Central credentials. | +|prismCentral.credentialRef.namespace |string|(Optional) Namespace of the secret containing the Prism Central credentials. | +|prismCentral.additionalTrustBundle |object|Reference to the certificate trust bundle used for Prism Central connection. | +|prismCentral.additionalTrustBundle.kind |string|Kind of the additionalTrustBundle. Allowed value: `ConfigMap` | +|prismCentral.additionalTrustBundle.name |string|Name of the `ConfigMap` containing the Prism Central trust bundle. | +|prismCentral.additionalTrustBundle.namespace|string|(Optional) Namespace of the `ConfigMap` containing the Prism Central trust bundle.| +|failureDomains |list |(Optional) Failure domains for the Kubernetes nodes | +|failureDomains.[].name |string|Name of the failure domain | +|failureDomains.[].cluster |object|Reference (name or uuid) to the Prism Element cluster. Name or UUID can be passed | +|failureDomains.[].cluster.type |string|Type to identify the Prism Element cluster. Allowed values: `name` and `uuid` | +|failureDomains.[].cluster.name |string|Name of the Prism Element cluster. | +|failureDomains.[].cluster.uuid |string|UUID of the Prism Element cluster. | +|failureDomains.[].subnets |list |(Optional) Reference (name or uuid) to the subnets to be assigned to the VMs. | +|failureDomains.[].subnets.[].type |string|Type to identify the subnet. Allowed values: `name` and `uuid` | +|failureDomains.[].subnets.[].name |string|Name of the subnet. | +|failureDomains.[].subnets.[].uuid |string|UUID of the subnet. | +|failureDomains.[].controlPlane |bool |Indicates if a failure domain is suited for control plane nodes + +!!! note + To prevent duplicate IP assignments, it is required to assign an IP-address to the `controlPlaneEndpoint.host` variable that is not part of the Nutanix IPAM or DHCP range assigned to the subnet of the CAPX cluster. \ No newline at end of file diff --git a/docs/capx/v1.5.x/types/nutanix_machine_template.md b/docs/capx/v1.5.x/types/nutanix_machine_template.md new file mode 100644 index 00000000..516d1eea --- /dev/null +++ b/docs/capx/v1.5.x/types/nutanix_machine_template.md @@ -0,0 +1,84 @@ +# NutanixMachineTemplate +The `NutanixMachineTemplate` resource defines the configuration of a CAPX Kubernetes VM. + +Example of a `NutanixMachineTemplate` resource. + +```YAML +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: NutanixMachineTemplate +metadata: + name: "${CLUSTER_NAME}-mt-0" + namespace: "${NAMESPACE}" +spec: + template: + spec: + providerID: "nutanix://${CLUSTER_NAME}-m1" + # Supported options for boot type: legacy and uefi + # Defaults to legacy if not set + bootType: ${NUTANIX_MACHINE_BOOT_TYPE=legacy} + vcpusPerSocket: ${NUTANIX_MACHINE_VCPU_PER_SOCKET=1} + vcpuSockets: ${NUTANIX_MACHINE_VCPU_SOCKET=2} + memorySize: "${NUTANIX_MACHINE_MEMORY_SIZE=4Gi}" + systemDiskSize: "${NUTANIX_SYSTEMDISK_SIZE=40Gi}" + image: + type: name + name: "${NUTANIX_MACHINE_TEMPLATE_IMAGE_NAME}" + cluster: + type: name + name: "${NUTANIX_PRISM_ELEMENT_CLUSTER_NAME}" + subnet: + - type: name + name: "${NUTANIX_SUBNET_NAME}" + # Adds additional categories to the virtual machines. + # Note: Categories must already be present in Prism Central + # additionalCategories: + # - key: AppType + # value: Kubernetes + # Adds the cluster virtual machines to a project defined in Prism Central. + # Replace NUTANIX_PROJECT_NAME with the correct project defined in Prism Central + # Note: Project must already be present in Prism Central. + # project: + # type: name + # name: "NUTANIX_PROJECT_NAME" + # gpus: + # - type: name + # name: "GPU NAME" +``` + +## NutanixMachineTemplate spec +The table below provides an overview of the supported parameters of the `spec` attribute of a `NutanixMachineTemplate` resource. + +### Configuration parameters +| Key |Type |Description| +|------------------------------------|------|--------------------------------------------------------------------------------------------------------| +|bootType |string|Boot type of the VM. Depends on the OS image used. Allowed values: `legacy`, `uefi`. Default: `legacy` | +|vcpusPerSocket |int |Amount of vCPUs per socket. Default: `1` | +|vcpuSockets |int |Amount of vCPU sockets. Default: `2` | +|memorySize |string|Amount of Memory. Default: `4Gi` | +|systemDiskSize |string|Amount of storage assigned to the system disk. Default: `40Gi` | +|image |object|Reference (name or uuid) to the OS image used for the system disk. | +|image.type |string|Type to identify the OS image. Allowed values: `name` and `uuid` | +|image.name |string|Name of the image. | +|image.uuid |string|UUID of the image. | +|cluster |object|(Optional) Reference (name or uuid) to the Prism Element cluster. Name or UUID can be passed | +|cluster.type |string|Type to identify the Prism Element cluster. Allowed values: `name` and `uuid` | +|cluster.name |string|Name of the Prism Element cluster. | +|cluster.uuid |string|UUID of the Prism Element cluster. | +|subnets |list |(Optional) Reference (name or uuid) to the subnets to be assigned to the VMs. | +|subnets.[].type |string|Type to identify the subnet. Allowed values: `name` and `uuid` | +|subnets.[].name |string|Name of the subnet. | +|subnets.[].uuid |string|UUID of the subnet. | +|additionalCategories |list |Reference to the categories to be assigned to the VMs. These categories already exist in Prism Central. | +|additionalCategories.[].key |string|Key of the category. | +|additionalCategories.[].value |string|Value of the category. | +|project |object|Reference (name or uuid) to the project. This project must already exist in Prism Central. | +|project.type |string|Type to identify the project. Allowed values: `name` and `uuid` | +|project.name |string|Name of the project. | +|project.uuid |string|UUID of the project. | +|gpus |object|Reference (name or deviceID) to the GPUs to be assigned to the VMs. Can be vGPU or Passthrough. | +|gpus.[].type |string|Type to identify the GPU. Allowed values: `name` and `deviceID` | +|gpus.[].name |string|Name of the GPU or the vGPU profile | +|gpus.[].deviceID |string|DeviceID of the GPU or the vGPU profile | + +!!! note + The `cluster` or `subnets` configuration parameters are optional in case failure domains are defined on the `NutanixCluster` and `MachineDeployment` resources. \ No newline at end of file diff --git a/docs/capx/v1.5.x/user_requirements.md b/docs/capx/v1.5.x/user_requirements.md new file mode 100644 index 00000000..5a4b8604 --- /dev/null +++ b/docs/capx/v1.5.x/user_requirements.md @@ -0,0 +1,36 @@ +# User Requirements + +Cluster API Provider Nutanix Cloud Infrastructure (CAPX) interacts with Nutanix Prism Central (PC) APIs using a Prism Central user account. + +CAPX supports two types of PC users: + +- Local users: must be assigned the `Prism Central Admin` role. +- Domain users: must be assigned a role that at least has the [Minimum required CAPX permissions for domain users](#minimum-required-capx-permissions-for-domain-users) assigned. + +See [Credential Management](./credential_management.md){target=_blank} for more information on how to pass the user credentials to CAPX. + +## Minimum required CAPX permissions for domain users + +The following permissions are required for Prism Central domain users: + +- Create Category Mapping +- Create Image +- Create Or Update Name Category +- Create Or Update Value Category +- Create Virtual Machine +- Delete Category Mapping +- Delete Image +- Delete Name Category +- Delete Value Category +- Delete Virtual Machine +- View Category Mapping +- View Cluster +- View Image +- View Name Category +- View Project +- View Subnet +- View Value Category +- View Virtual Machine + +!!! note + The list of permissions has been validated on PC 2022.6 and above. diff --git a/docs/capx/v1.5.x/validated_integrations.md b/docs/capx/v1.5.x/validated_integrations.md new file mode 100644 index 00000000..c90d43a8 --- /dev/null +++ b/docs/capx/v1.5.x/validated_integrations.md @@ -0,0 +1,65 @@ +# Validated Integrations + +Validated integrations are a defined set of specifically tested configurations between technologies that represent the most common combinations that Nutanix customers are using or deploying with CAPX. For these integrations, Nutanix has directly, or through certified partners, exercised a full range of platform tests as part of the product release process. + +## Integration Validation Policy + +Nutanix follows the version validation policies below: + +- Validate at least one active AOS LTS (long term support) version. Validated AOS LTS version for a specific CAPX version is listed in the [AOS](#aos) section.
+ + !!! note + + Typically the latest LTS release at time of CAPX release except when latest is initial release in train (eg x.y.0). Exact version depends on timing and customer adoption. + +- Validate the latest AOS STS (short term support) release at time of CAPX release. +- Validate at least one active Prism Central (PC) version. Validated PC version for a specific CAPX version is listed in the [Prism Central](#prism-central) section.
+ + !!! note + + Typically the the latest PC release at time of CAPX release except when latest is initial release in train (eg x.y.0). Exact version depends on timing and customer adoption. + +- At least one active Cluster-API (CAPI) version. Validated CAPI version for a specific CAPX version is listed in the [Cluster-API](#cluster-api) section.
+ + !!! note + + Typically the the latest Cluster-API release at time of CAPX release except when latest is initial release in train (eg x.y.0). Exact version depends on timing and customer adoption. + +## Validated versions +### Cluster-API +| CAPX | CAPI v1.3.x | CAPI v1.4.x | CAPI v1.5.x | CAPI v1.6.x | CAPI v1.7.x | CAPI v1.8.x | +|--------|-------------|-------------|-------------|-------------|-------------|-------------| +| v1.5.x | Yes | Yes | Yes | Yes | Yes | Yes | +| v1.4.x | Yes | Yes | Yes | Yes | Yes | No | +| v1.3.x | Yes | Yes | Yes | Yes | No | No | +| v1.2.x | Yes | Yes | Yes | No | No | No | +| v1.1.x | Yes | No | No | No | No | No | +| v1.0.x | No | No | No | No | No | No | +| v0.5.x | No | No | No | No | No | No | + +See the [Validated Kubernetes Versions](https://cluster-api.sigs.k8s.io/reference/versions.html?highlight=version#supported-kubernetes-versions){target=_blank} page for more information on CAPI validated versions. + +### AOS + +| CAPX | 5.20.4.5 (LTS) | 6.1.1.5 (STS) | 6.5.x (LTS) | 6.6 (STS) | 6.7 (STS) | 6.8 (STS) | +|--------|----------------|---------------|-------------|-----------|-----------|-----------| +| v1.5.x | No | No | Yes | No | No | Yes | +| v1.4.x | No | No | Yes | No | No | Yes | +| v1.3.x | No | No | Yes | Yes | Yes | No | +| v1.2.x | No | No | Yes | Yes | Yes | No | +| v1.1.x | No | No | Yes | No | No | No | +| v1.0.x | Yes | Yes | No | No | No | No | +| v0.5.x | Yes | Yes | No | No | No | No | + + +### Prism Central + +| CAPX | 2022.1.0.2 | pc.2022.6 | pc.2022.9 | pc.2023.x | pc.2024.x | +|--------|------------|-----------|-----------|-----------|-----------| +| v1.5.x | No | Yes | No | Yes | Yes | +| v1.4.x | No | Yes | No | Yes | Yes | +| v1.3.x | No | Yes | No | Yes | No | +| v1.2.x | No | Yes | Yes | Yes | No | +| v1.1.x | No | Yes | No | No | No | +| v1.0.x | Yes | Yes | No | No | No | +| v0.5.x | Yes | Yes | No | No | No | diff --git a/mkdocs.yml b/mkdocs.yml index 617d6548..6f94729c 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -1,223 +1,246 @@ site_name: opendocs.nutanix.com theme: - name: material - logo: images/nutanix_x_white.png - features: - - navigation.instant - - content.code.annotate - - navigation.tabs - - navigation.top - favicon: images/favicon.png - icon: - admonition: - note: material/note + name: material + logo: images/nutanix_x_white.png + features: + - navigation.instant + - content.code.annotate + - navigation.tabs + - navigation.top + favicon: images/favicon.png + icon: + admonition: + note: material/note extra_css: - - stylesheets/extra.css + - stylesheets/extra.css nav: - - "Solutions": - - "Cloud Native": - - "Overview": "index.md" - - "Cluster API Provider: Nutanix (CAPX)": - - "v1.4.x (latest)": - - "Getting Started": "capx/v1.4.x/getting_started.md" - - "Types": - - "NutanixCluster": "capx/v1.4.x/types/nutanix_cluster.md" - - "NutanixMachineTemplate": "capx/v1.4.x/types/nutanix_machine_template.md" - - "Certificate Trust": "capx/v1.4.x/pc_certificates.md" - - "Credential Management": "capx/v1.4.x/credential_management.md" - - "Tasks": - - "Modifying Machine Configuration": "capx/v1.4.x/tasks/modify_machine_configuration.md" - - "CAPX v1.4.x Upgrade Procedure": "capx/v1.4.x/tasks/capx_v14x_upgrade_procedure.md" - - "Port Requirements": "capx/v1.4.x/port_requirements.md" - - "User Requirements": "capx/v1.4.x/user_requirements.md" - - "Addons": - - "CSI Driver Installation": "capx/v1.4.x/addons/install_csi_driver.md" - - "Validated Integrations": "capx/v1.4.x/validated_integrations.md" - - "Experimental": - - "Multi-PE CAPX cluster": "capx/v1.4.x/experimental/capx_multi_pe.md" - - "Autoscaler": "capx/v1.4.x/experimental/autoscaler.md" - - "OIDC Integration": "capx/v1.4.x/experimental/oidc.md" - - "Flow VPC": "capx/v1.4.x/experimental/vpc.md" - - "Proxy Configuration": "capx/v1.4.x/experimental/proxy.md" - - "Registry Mirror Configuration": "capx/v1.4.x/experimental/registry_mirror.md" - - "Troubleshooting": "capx/v1.4.x/troubleshooting.md" - - "v1.3.x": - - "Getting Started": "capx/v1.3.x/getting_started.md" - - "Types": - - "NutanixCluster": "capx/v1.3.x/types/nutanix_cluster.md" - - "NutanixMachineTemplate": "capx/v1.3.x/types/nutanix_machine_template.md" - - "Certificate Trust": "capx/v1.3.x/pc_certificates.md" - - "Credential Management": "capx/v1.3.x/credential_management.md" - - "Tasks": - - "Modifying Machine Configuration": "capx/v1.3.x/tasks/modify_machine_configuration.md" - - "CAPX v1.3.x Upgrade Procedure": "capx/v1.3.x/tasks/capx_v13x_upgrade_procedure.md" - - "Port Requirements": "capx/v1.3.x/port_requirements.md" - - "User Requirements": "capx/v1.3.x/user_requirements.md" - - "Addons": - - "CSI Driver Installation": "capx/v1.3.x/addons/install_csi_driver.md" - - "Validated Integrations": "capx/v1.3.x/validated_integrations.md" - - "Experimental": - - "Multi-PE CAPX cluster": "capx/v1.3.x/experimental/capx_multi_pe.md" - - "Autoscaler": "capx/v1.3.x/experimental/autoscaler.md" - - "OIDC Integration": "capx/v1.3.x/experimental/oidc.md" - - "Flow VPC": "capx/v1.3.x/experimental/vpc.md" - - "Proxy Configuration": "capx/v1.3.x/experimental/proxy.md" - - "Registry Mirror Configuration": "capx/v1.3.x/experimental/registry_mirror.md" - - "Troubleshooting": "capx/v1.3.x/troubleshooting.md" - - "v1.2.x": - - "Getting Started": "capx/v1.2.x/getting_started.md" - - "Types": - - "NutanixCluster": "capx/v1.2.x/types/nutanix_cluster.md" - - "NutanixMachineTemplate": "capx/v1.2.x/types/nutanix_machine_template.md" - - "Certificate Trust": "capx/v1.2.x/pc_certificates.md" - - "Credential Management": "capx/v1.2.x/credential_management.md" - - "Tasks": - - "Modifying Machine Configuration": "capx/v1.2.x/tasks/modify_machine_configuration.md" - - "Port Requirements": "capx/v1.2.x/port_requirements.md" - - "User Requirements": "capx/v1.2.x/user_requirements.md" - - "Addons": - - "CSI Driver Installation": "capx/v1.2.x/addons/install_csi_driver.md" - - "Validated Integrations": "capx/v1.2.x/validated_integrations.md" - - "Experimental": - - "Multi-PE CAPX cluster": "capx/v1.2.x/experimental/capx_multi_pe.md" - - "Autoscaler": "capx/v1.2.x/experimental/autoscaler.md" - - "OIDC Integration": "capx/v1.2.x/experimental/oidc.md" - - "Flow VPC": "capx/v1.2.x/experimental/vpc.md" - - "Proxy Configuration": "capx/v1.2.x/experimental/proxy.md" - - "Registry Mirror Configuration": "capx/v1.2.x/experimental/registry_mirror.md" - - "Troubleshooting": "capx/v1.2.x/troubleshooting.md" - - "v1.1.x": - - "Getting Started": "capx/v1.1.x/getting_started.md" - - "Types": - - "NutanixCluster": "capx/v1.1.x/types/nutanix_cluster.md" - - "NutanixMachineTemplate": "capx/v1.1.x/types/nutanix_machine_template.md" - - "Certificate Trust": "capx/v1.1.x/pc_certificates.md" - - "Credential Management": "capx/v1.1.x/credential_management.md" - - "Tasks": - - "Modifying Machine Configuration": "capx/v1.1.x/tasks/modify_machine_configuration.md" - - "Port Requirements": "capx/v1.1.x/port_requirements.md" - - "User Requirements": "capx/v1.1.x/user_requirements.md" - - "Addons": - - "CSI Driver Installation": "capx/v1.1.x/addons/install_csi_driver.md" - - "Validated Integrations": "capx/v1.1.x/validated_integrations.md" - - "Experimental": - - "Multi-PE CAPX cluster": "capx/v1.1.x/experimental/capx_multi_pe.md" - - "Autoscaler": "capx/v1.1.x/experimental/autoscaler.md" - - "OIDC Integration": "capx/v1.1.x/experimental/oidc.md" - - "Flow VPC": "capx/v1.1.x/experimental/vpc.md" - - "Proxy Configuration": "capx/v1.1.x/experimental/proxy.md" - - "Registry Mirror Configuration": "capx/v1.1.x/experimental/registry_mirror.md" - - "Troubleshooting": "capx/v1.1.x/troubleshooting.md" - - "v1.0.x": - - "Getting Started": "capx/v1.0.x/getting_started.md" - - "Types": - - "NutanixCluster": "capx/v1.0.x/types/nutanix_cluster.md" - - "NutanixMachineTemplate": "capx/v1.0.x/types/nutanix_machine_template.md" - - "Credential Management": "capx/v1.0.x/credential_management.md" - - "Tasks": - - "Modifying Machine Configuration": "capx/v1.0.x/tasks/modify_machine_configuration.md" - - "Port Requirements": "capx/v1.0.x/port_requirements.md" - - "Addons": - - "CSI Driver Installation": "capx/v1.0.x/addons/install_csi_driver.md" - - "Validated Integrations": "capx/v1.0.x/validated_integrations.md" - - "Experimental": - - "Multi-PE CAPX cluster": "capx/v1.0.x/experimental/capx_multi_pe.md" - - "Autoscaler": "capx/v1.0.x/experimental/autoscaler.md" - - "Troubleshooting": "capx/v1.0.x/troubleshooting.md" - - "v0.5.x": - - "Getting Started": "capx/v0.5.x/getting_started.md" - - "Credential Management": "capx/v0.5.x/credential_management.md" - - "Addons": - - "CSI Driver Installation": "capx/v0.5.x/addons/install_csi_driver.md" - - "Validated Integrations": "capx/v0.5.x/validated_integrations.md" - - "Experimental": - - "Multi-PE CAPX cluster": "capx/v0.5.x/experimental/capx_multi_pe.md" - - "Autoscaler": "capx/v0.5.x/experimental/autoscaler.md" - - "Troubleshooting": "capx/v0.5.x/troubleshooting.md" - - "Nutanix Cloud Controller Manager (CCM)": - - "v0.3.x (Latest)": - - "Overview": "ccm/v0.3.x/overview.md" - - "Requirements": "ccm/v0.3.x/requirements.md" - - "Configuration": "ccm/v0.3.x/ccm_configuration.md" - - "Certificate Trust": "ccm/v0.3.x/pc_certificates.md" - - "Credentials": "ccm/v0.3.x/ccm_credentials.md" - - "Topology Discovery": "ccm/v0.3.x/topology_discovery.md" - - "Custom Labeling": "ccm/v0.3.x/custom_labeling.md" - - "v0.2.0": - - "Overview": "ccm/v0.2.x/overview.md" - - "Requirements": "ccm/v0.2.x/requirements.md" - - "Configuration": "ccm/v0.2.x/ccm_configuration.md" - - "Credentials": "ccm/v0.2.x/ccm_credentials.md" - - "Topology Discovery": "ccm/v0.2.x/topology_discovery.md" - - "Custom Labeling": "ccm/v0.2.x/custom_labeling.md" - - "Red Hat OpenShift": - - "Install": - - "Agnostic": "openshift/install/agnostic/index.md" - - "IPI": "openshift/install/ipi/index.md" - - "Assisted Installer": "openshift/install/assisted_installer/index.md" - - "Post Install": "openshift/post-install/index.md" - - Operators: - - "CSI": "openshift/operators/csi/index.md" - - "Google Anthos": - - "Architecture": "anthos/architecture/index.md" - - "Install": - - "Manual": "anthos/install/manual/index.md" - - "Amazon EKS Anywhere": - - "Install": "eksa/install/index.md" - - "GPT-in-a-Box 1.0": - - "Overview": "gpt-in-a-box/overview.md" - - "Deploy on Virtual Machine": - - "v0.3": - - "Getting Started": "gpt-in-a-box/vm/v0.3/getting_started.md" - - "Validated Models": "gpt-in-a-box/vm/v0.3/validated_models.md" - - "Generating Model Archive File": "gpt-in-a-box/vm/v0.3/generating_mar.md" - - "Deploying Inference Server": "gpt-in-a-box/vm/v0.3/inference_server.md" - - "Inference Requests": "gpt-in-a-box/vm/v0.3/inference_requests.md" - - "Model Version Support": "gpt-in-a-box/vm/v0.3/model_version.md" - - "HuggingFace Model Support": "gpt-in-a-box/vm/v0.3/huggingface_model.md" - - "Custom Model Support": "gpt-in-a-box/vm/v0.3/custom_model.md" - - "Management Requests": "gpt-in-a-box/vm/v0.3/management_requests.md" - - "v0.2": - - "Getting Started": "gpt-in-a-box/vm/v0.2/getting_started.md" - - "Generating Model Archive File": "gpt-in-a-box/vm/v0.2/generating_mar.md" - - "Deploying Inference Server": "gpt-in-a-box/vm/v0.2/inference_server.md" - - "Inference Requests": "gpt-in-a-box/vm/v0.2/inference_requests.md" - - "Model Version Support": "gpt-in-a-box/vm/v0.2/model_version.md" - - "Custom Model Support": "gpt-in-a-box/vm/v0.2/custom_model.md" - - "Management Requests": "gpt-in-a-box/vm/v0.2/management_requests.md" - - "Deploy on Kubernetes": - - "v0.2": - - "Getting Started": "gpt-in-a-box/kubernetes/v0.2/getting_started.md" - - "Validated Models": "gpt-in-a-box/kubernetes/v0.2/validated_models.md" - - "Generating Model Archive File": "gpt-in-a-box/kubernetes/v0.2/generating_mar.md" - - "Deploying Inference Server": "gpt-in-a-box/kubernetes/v0.2/inference_server.md" - - "Inference Requests": "gpt-in-a-box/kubernetes/v0.2/inference_requests.md" - - "HuggingFace Model Support": "gpt-in-a-box/kubernetes/v0.2/huggingface_model.md" - - "Custom Model Support": "gpt-in-a-box/kubernetes/v0.2/custom_model.md" - - "v0.1": - - "Getting Started": "gpt-in-a-box/kubernetes/v0.1/getting_started.md" - - "Generating Model Archive File": "gpt-in-a-box/kubernetes/v0.1/generating_mar.md" - - "Deploying Inference Server": "gpt-in-a-box/kubernetes/v0.1/inference_server.md" - - "Inference Requests": "gpt-in-a-box/kubernetes/v0.1/inference_requests.md" - - "Custom Model Support": "gpt-in-a-box/kubernetes/v0.1/custom_model.md" - - "Support": "gpt-in-a-box/support.md" - - "Guides": - - "Cloud Native": - - "Red Hat OpenShift": - - "Install": - - "IPI": "guides/openshift/install/ipi/index.md" - - "Custom Cloud Native Role": "guides/cloud_native_role/index.md" + - "Solutions": + - "Cloud Native": + - "Overview": "index.md" + - "Cluster API Provider: Nutanix (CAPX)": + - "v1.5.x (latest)": + - "Getting Started": "capx/v1.5.x/getting_started.md" + - "Types": + - "NutanixCluster": "capx/v1.5.x/types/nutanix_cluster.md" + - "NutanixMachineTemplate": "capx/v1.5.x/types/nutanix_machine_template.md" + - "Certificate Trust": "capx/v1.5.x/pc_certificates.md" + - "Credential Management": "capx/v1.5.x/credential_management.md" + - "Tasks": + - "Modifying Machine Configuration": "capx/v1.5.x/tasks/modify_machine_configuration.md" + - "CAPX v1.5.x Upgrade Procedure": "capx/v1.5.x/tasks/capx_v14x_upgrade_procedure.md" + - "Port Requirements": "capx/v1.5.x/port_requirements.md" + - "User Requirements": "capx/v1.5.x/user_requirements.md" + - "Addons": + - "CSI Driver Installation": "capx/v1.5.x/addons/install_csi_driver.md" + - "Validated Integrations": "capx/v1.5.x/validated_integrations.md" + - "Experimental": + - "Multi-PE CAPX cluster": "capx/v1.5.x/experimental/capx_multi_pe.md" + - "Autoscaler": "capx/v1.5.x/experimental/autoscaler.md" + - "OIDC Integration": "capx/v1.5.x/experimental/oidc.md" + - "Flow VPC": "capx/v1.5.x/experimental/vpc.md" + - "Proxy Configuration": "capx/v1.5.x/experimental/proxy.md" + - "Registry Mirror Configuration": "capx/v1.5.x/experimental/registry_mirror.md" + - "Troubleshooting": "capx/v1.5.x/troubleshooting.md" + - "v1.4.x": + - "Getting Started": "capx/v1.4.x/getting_started.md" + - "Types": + - "NutanixCluster": "capx/v1.4.x/types/nutanix_cluster.md" + - "NutanixMachineTemplate": "capx/v1.4.x/types/nutanix_machine_template.md" + - "Certificate Trust": "capx/v1.4.x/pc_certificates.md" + - "Credential Management": "capx/v1.4.x/credential_management.md" + - "Tasks": + - "Modifying Machine Configuration": "capx/v1.4.x/tasks/modify_machine_configuration.md" + - "CAPX v1.4.x Upgrade Procedure": "capx/v1.4.x/tasks/capx_v14x_upgrade_procedure.md" + - "Port Requirements": "capx/v1.4.x/port_requirements.md" + - "User Requirements": "capx/v1.4.x/user_requirements.md" + - "Addons": + - "CSI Driver Installation": "capx/v1.4.x/addons/install_csi_driver.md" + - "Validated Integrations": "capx/v1.4.x/validated_integrations.md" + - "Experimental": + - "Multi-PE CAPX cluster": "capx/v1.4.x/experimental/capx_multi_pe.md" + - "Autoscaler": "capx/v1.4.x/experimental/autoscaler.md" + - "OIDC Integration": "capx/v1.4.x/experimental/oidc.md" + - "Flow VPC": "capx/v1.4.x/experimental/vpc.md" + - "Proxy Configuration": "capx/v1.4.x/experimental/proxy.md" + - "Registry Mirror Configuration": "capx/v1.4.x/experimental/registry_mirror.md" + - "Troubleshooting": "capx/v1.4.x/troubleshooting.md" + - "v1.3.x": + - "Getting Started": "capx/v1.3.x/getting_started.md" + - "Types": + - "NutanixCluster": "capx/v1.3.x/types/nutanix_cluster.md" + - "NutanixMachineTemplate": "capx/v1.3.x/types/nutanix_machine_template.md" + - "Certificate Trust": "capx/v1.3.x/pc_certificates.md" + - "Credential Management": "capx/v1.3.x/credential_management.md" + - "Tasks": + - "Modifying Machine Configuration": "capx/v1.3.x/tasks/modify_machine_configuration.md" + - "CAPX v1.3.x Upgrade Procedure": "capx/v1.3.x/tasks/capx_v13x_upgrade_procedure.md" + - "Port Requirements": "capx/v1.3.x/port_requirements.md" + - "User Requirements": "capx/v1.3.x/user_requirements.md" + - "Addons": + - "CSI Driver Installation": "capx/v1.3.x/addons/install_csi_driver.md" + - "Validated Integrations": "capx/v1.3.x/validated_integrations.md" + - "Experimental": + - "Multi-PE CAPX cluster": "capx/v1.3.x/experimental/capx_multi_pe.md" + - "Autoscaler": "capx/v1.3.x/experimental/autoscaler.md" + - "OIDC Integration": "capx/v1.3.x/experimental/oidc.md" + - "Flow VPC": "capx/v1.3.x/experimental/vpc.md" + - "Proxy Configuration": "capx/v1.3.x/experimental/proxy.md" + - "Registry Mirror Configuration": "capx/v1.3.x/experimental/registry_mirror.md" + - "Troubleshooting": "capx/v1.3.x/troubleshooting.md" + - "v1.2.x": + - "Getting Started": "capx/v1.2.x/getting_started.md" + - "Types": + - "NutanixCluster": "capx/v1.2.x/types/nutanix_cluster.md" + - "NutanixMachineTemplate": "capx/v1.2.x/types/nutanix_machine_template.md" + - "Certificate Trust": "capx/v1.2.x/pc_certificates.md" + - "Credential Management": "capx/v1.2.x/credential_management.md" + - "Tasks": + - "Modifying Machine Configuration": "capx/v1.2.x/tasks/modify_machine_configuration.md" + - "Port Requirements": "capx/v1.2.x/port_requirements.md" + - "User Requirements": "capx/v1.2.x/user_requirements.md" + - "Addons": + - "CSI Driver Installation": "capx/v1.2.x/addons/install_csi_driver.md" + - "Validated Integrations": "capx/v1.2.x/validated_integrations.md" + - "Experimental": + - "Multi-PE CAPX cluster": "capx/v1.2.x/experimental/capx_multi_pe.md" + - "Autoscaler": "capx/v1.2.x/experimental/autoscaler.md" + - "OIDC Integration": "capx/v1.2.x/experimental/oidc.md" + - "Flow VPC": "capx/v1.2.x/experimental/vpc.md" + - "Proxy Configuration": "capx/v1.2.x/experimental/proxy.md" + - "Registry Mirror Configuration": "capx/v1.2.x/experimental/registry_mirror.md" + - "Troubleshooting": "capx/v1.2.x/troubleshooting.md" + - "v1.1.x": + - "Getting Started": "capx/v1.1.x/getting_started.md" + - "Types": + - "NutanixCluster": "capx/v1.1.x/types/nutanix_cluster.md" + - "NutanixMachineTemplate": "capx/v1.1.x/types/nutanix_machine_template.md" + - "Certificate Trust": "capx/v1.1.x/pc_certificates.md" + - "Credential Management": "capx/v1.1.x/credential_management.md" + - "Tasks": + - "Modifying Machine Configuration": "capx/v1.1.x/tasks/modify_machine_configuration.md" + - "Port Requirements": "capx/v1.1.x/port_requirements.md" + - "User Requirements": "capx/v1.1.x/user_requirements.md" + - "Addons": + - "CSI Driver Installation": "capx/v1.1.x/addons/install_csi_driver.md" + - "Validated Integrations": "capx/v1.1.x/validated_integrations.md" + - "Experimental": + - "Multi-PE CAPX cluster": "capx/v1.1.x/experimental/capx_multi_pe.md" + - "Autoscaler": "capx/v1.1.x/experimental/autoscaler.md" + - "OIDC Integration": "capx/v1.1.x/experimental/oidc.md" + - "Flow VPC": "capx/v1.1.x/experimental/vpc.md" + - "Proxy Configuration": "capx/v1.1.x/experimental/proxy.md" + - "Registry Mirror Configuration": "capx/v1.1.x/experimental/registry_mirror.md" + - "Troubleshooting": "capx/v1.1.x/troubleshooting.md" + - "v1.0.x": + - "Getting Started": "capx/v1.0.x/getting_started.md" + - "Types": + - "NutanixCluster": "capx/v1.0.x/types/nutanix_cluster.md" + - "NutanixMachineTemplate": "capx/v1.0.x/types/nutanix_machine_template.md" + - "Credential Management": "capx/v1.0.x/credential_management.md" + - "Tasks": + - "Modifying Machine Configuration": "capx/v1.0.x/tasks/modify_machine_configuration.md" + - "Port Requirements": "capx/v1.0.x/port_requirements.md" + - "Addons": + - "CSI Driver Installation": "capx/v1.0.x/addons/install_csi_driver.md" + - "Validated Integrations": "capx/v1.0.x/validated_integrations.md" + - "Experimental": + - "Multi-PE CAPX cluster": "capx/v1.0.x/experimental/capx_multi_pe.md" + - "Autoscaler": "capx/v1.0.x/experimental/autoscaler.md" + - "Troubleshooting": "capx/v1.0.x/troubleshooting.md" + - "v0.5.x": + - "Getting Started": "capx/v0.5.x/getting_started.md" + - "Credential Management": "capx/v0.5.x/credential_management.md" + - "Addons": + - "CSI Driver Installation": "capx/v0.5.x/addons/install_csi_driver.md" + - "Validated Integrations": "capx/v0.5.x/validated_integrations.md" + - "Experimental": + - "Multi-PE CAPX cluster": "capx/v0.5.x/experimental/capx_multi_pe.md" + - "Autoscaler": "capx/v0.5.x/experimental/autoscaler.md" + - "Troubleshooting": "capx/v0.5.x/troubleshooting.md" + - "Nutanix Cloud Controller Manager (CCM)": + - "v0.3.x (Latest)": + - "Overview": "ccm/v0.3.x/overview.md" + - "Requirements": "ccm/v0.3.x/requirements.md" + - "Configuration": "ccm/v0.3.x/ccm_configuration.md" + - "Certificate Trust": "ccm/v0.3.x/pc_certificates.md" + - "Credentials": "ccm/v0.3.x/ccm_credentials.md" + - "Topology Discovery": "ccm/v0.3.x/topology_discovery.md" + - "Custom Labeling": "ccm/v0.3.x/custom_labeling.md" + - "v0.2.0": + - "Overview": "ccm/v0.2.x/overview.md" + - "Requirements": "ccm/v0.2.x/requirements.md" + - "Configuration": "ccm/v0.2.x/ccm_configuration.md" + - "Credentials": "ccm/v0.2.x/ccm_credentials.md" + - "Topology Discovery": "ccm/v0.2.x/topology_discovery.md" + - "Custom Labeling": "ccm/v0.2.x/custom_labeling.md" + - "Red Hat OpenShift": + - "Install": + - "Agnostic": "openshift/install/agnostic/index.md" + - "IPI": "openshift/install/ipi/index.md" + - "Assisted Installer": "openshift/install/assisted_installer/index.md" + - "Post Install": "openshift/post-install/index.md" + - Operators: + - "CSI": "openshift/operators/csi/index.md" + - "Google Anthos": + - "Architecture": "anthos/architecture/index.md" + - "Install": + - "Manual": "anthos/install/manual/index.md" + - "Amazon EKS Anywhere": + - "Install": "eksa/install/index.md" + - "GPT-in-a-Box 1.0": + - "Overview": "gpt-in-a-box/overview.md" + - "Deploy on Virtual Machine": + - "v0.3": + - "Getting Started": "gpt-in-a-box/vm/v0.3/getting_started.md" + - "Validated Models": "gpt-in-a-box/vm/v0.3/validated_models.md" + - "Generating Model Archive File": "gpt-in-a-box/vm/v0.3/generating_mar.md" + - "Deploying Inference Server": "gpt-in-a-box/vm/v0.3/inference_server.md" + - "Inference Requests": "gpt-in-a-box/vm/v0.3/inference_requests.md" + - "Model Version Support": "gpt-in-a-box/vm/v0.3/model_version.md" + - "HuggingFace Model Support": "gpt-in-a-box/vm/v0.3/huggingface_model.md" + - "Custom Model Support": "gpt-in-a-box/vm/v0.3/custom_model.md" + - "Management Requests": "gpt-in-a-box/vm/v0.3/management_requests.md" + - "v0.2": + - "Getting Started": "gpt-in-a-box/vm/v0.2/getting_started.md" + - "Generating Model Archive File": "gpt-in-a-box/vm/v0.2/generating_mar.md" + - "Deploying Inference Server": "gpt-in-a-box/vm/v0.2/inference_server.md" + - "Inference Requests": "gpt-in-a-box/vm/v0.2/inference_requests.md" + - "Model Version Support": "gpt-in-a-box/vm/v0.2/model_version.md" + - "Custom Model Support": "gpt-in-a-box/vm/v0.2/custom_model.md" + - "Management Requests": "gpt-in-a-box/vm/v0.2/management_requests.md" + - "Deploy on Kubernetes": + - "v0.2": + - "Getting Started": "gpt-in-a-box/kubernetes/v0.2/getting_started.md" + - "Validated Models": "gpt-in-a-box/kubernetes/v0.2/validated_models.md" + - "Generating Model Archive File": "gpt-in-a-box/kubernetes/v0.2/generating_mar.md" + - "Deploying Inference Server": "gpt-in-a-box/kubernetes/v0.2/inference_server.md" + - "Inference Requests": "gpt-in-a-box/kubernetes/v0.2/inference_requests.md" + - "HuggingFace Model Support": "gpt-in-a-box/kubernetes/v0.2/huggingface_model.md" + - "Custom Model Support": "gpt-in-a-box/kubernetes/v0.2/custom_model.md" + - "v0.1": + - "Getting Started": "gpt-in-a-box/kubernetes/v0.1/getting_started.md" + - "Generating Model Archive File": "gpt-in-a-box/kubernetes/v0.1/generating_mar.md" + - "Deploying Inference Server": "gpt-in-a-box/kubernetes/v0.1/inference_server.md" + - "Inference Requests": "gpt-in-a-box/kubernetes/v0.1/inference_requests.md" + - "Custom Model Support": "gpt-in-a-box/kubernetes/v0.1/custom_model.md" + - "Support": "gpt-in-a-box/support.md" + - "Guides": + - "Cloud Native": + - "Red Hat OpenShift": + - "Install": + - "IPI": "guides/openshift/install/ipi/index.md" + - "Custom Cloud Native Role": "guides/cloud_native_role/index.md" markdown_extensions: - - attr_list - - admonition - - pymdownx.details - - pymdownx.superfences - - tables - - toc: - permalink: true + - attr_list + - admonition + - pymdownx.details + - pymdownx.superfences + - tables + - toc: + permalink: true copyright: Copyright © 2021 - 2023 Nutanix, Inc. extra: - generator: false + generator: false repo_url: https://github.com/nutanix-cloud-native/opendocs repo_name: nutanix-cloud-native/opendocs edit_uri: "" From fe8fc8fa4f6ca1b974485cf28ea2fdc16e6f1880 Mon Sep 17 00:00:00 2001 From: Laura Jordana Date: Sun, 24 Nov 2024 23:00:24 -0800 Subject: [PATCH 04/15] Remove GPT-in-a-Box 1.0 docs (#67) * update k8s release link to v0.2.2 * HF token no longer needed in run.sh * add version to solution name * remove GPT-in-a-Box 1.0 docs * remove GIAB 1.0 docs * remove GIAB1.0 files --- .gitignore | 1 + .../kubernetes/v0.1/custom_model.md | 31 ---- .../kubernetes/v0.1/generating_mar.md | 28 ---- .../kubernetes/v0.1/getting_started.md | 85 ----------- docs/gpt-in-a-box/kubernetes/v0.1/image1.png | Bin 150201 -> 0 bytes .../kubernetes/v0.1/inference_requests.md | 58 -------- .../kubernetes/v0.1/inference_server.md | 45 ------ .../kubernetes/v0.2/custom_model.md | 33 ----- .../kubernetes/v0.2/generating_mar.md | 28 ---- .../kubernetes/v0.2/getting_started.md | 85 ----------- .../kubernetes/v0.2/huggingface_model.md | 46 ------ docs/gpt-in-a-box/kubernetes/v0.2/image1.png | Bin 150201 -> 0 bytes .../kubernetes/v0.2/inference_requests.md | 59 -------- .../kubernetes/v0.2/inference_server.md | 43 ------ .../kubernetes/v0.2/validated_models.md | 16 --- docs/gpt-in-a-box/overview.md | 11 -- docs/gpt-in-a-box/support.md | 14 -- docs/gpt-in-a-box/vm/v0.2/custom_model.md | 29 ---- docs/gpt-in-a-box/vm/v0.2/generating_mar.md | 38 ----- docs/gpt-in-a-box/vm/v0.2/getting_started.md | 49 ------- .../vm/v0.2/inference_requests.md | 82 ----------- docs/gpt-in-a-box/vm/v0.2/inference_server.md | 37 ----- .../vm/v0.2/management_requests.md | 133 ------------------ docs/gpt-in-a-box/vm/v0.2/model_version.md | 8 -- docs/gpt-in-a-box/vm/v0.3/custom_model.md | 31 ---- docs/gpt-in-a-box/vm/v0.3/generating_mar.md | 36 ----- docs/gpt-in-a-box/vm/v0.3/getting_started.md | 49 ------- .../gpt-in-a-box/vm/v0.3/huggingface_model.md | 45 ------ .../vm/v0.3/inference_requests.md | 82 ----------- docs/gpt-in-a-box/vm/v0.3/inference_server.md | 36 ----- .../vm/v0.3/management_requests.md | 133 ------------------ docs/gpt-in-a-box/vm/v0.3/model_version.md | 12 -- docs/gpt-in-a-box/vm/v0.3/validated_models.md | 16 --- mkdocs.yml | 39 +---- 34 files changed, 2 insertions(+), 1436 deletions(-) delete mode 100644 docs/gpt-in-a-box/kubernetes/v0.1/custom_model.md delete mode 100644 docs/gpt-in-a-box/kubernetes/v0.1/generating_mar.md delete mode 100644 docs/gpt-in-a-box/kubernetes/v0.1/getting_started.md delete mode 100644 docs/gpt-in-a-box/kubernetes/v0.1/image1.png delete mode 100644 docs/gpt-in-a-box/kubernetes/v0.1/inference_requests.md delete mode 100644 docs/gpt-in-a-box/kubernetes/v0.1/inference_server.md delete mode 100644 docs/gpt-in-a-box/kubernetes/v0.2/custom_model.md delete mode 100644 docs/gpt-in-a-box/kubernetes/v0.2/generating_mar.md delete mode 100644 docs/gpt-in-a-box/kubernetes/v0.2/getting_started.md delete mode 100644 docs/gpt-in-a-box/kubernetes/v0.2/huggingface_model.md delete mode 100644 docs/gpt-in-a-box/kubernetes/v0.2/image1.png delete mode 100644 docs/gpt-in-a-box/kubernetes/v0.2/inference_requests.md delete mode 100644 docs/gpt-in-a-box/kubernetes/v0.2/inference_server.md delete mode 100644 docs/gpt-in-a-box/kubernetes/v0.2/validated_models.md delete mode 100644 docs/gpt-in-a-box/overview.md delete mode 100644 docs/gpt-in-a-box/support.md delete mode 100644 docs/gpt-in-a-box/vm/v0.2/custom_model.md delete mode 100644 docs/gpt-in-a-box/vm/v0.2/generating_mar.md delete mode 100644 docs/gpt-in-a-box/vm/v0.2/getting_started.md delete mode 100644 docs/gpt-in-a-box/vm/v0.2/inference_requests.md delete mode 100644 docs/gpt-in-a-box/vm/v0.2/inference_server.md delete mode 100644 docs/gpt-in-a-box/vm/v0.2/management_requests.md delete mode 100644 docs/gpt-in-a-box/vm/v0.2/model_version.md delete mode 100644 docs/gpt-in-a-box/vm/v0.3/custom_model.md delete mode 100644 docs/gpt-in-a-box/vm/v0.3/generating_mar.md delete mode 100644 docs/gpt-in-a-box/vm/v0.3/getting_started.md delete mode 100644 docs/gpt-in-a-box/vm/v0.3/huggingface_model.md delete mode 100644 docs/gpt-in-a-box/vm/v0.3/inference_requests.md delete mode 100644 docs/gpt-in-a-box/vm/v0.3/inference_server.md delete mode 100644 docs/gpt-in-a-box/vm/v0.3/management_requests.md delete mode 100644 docs/gpt-in-a-box/vm/v0.3/model_version.md delete mode 100644 docs/gpt-in-a-box/vm/v0.3/validated_models.md diff --git a/.gitignore b/.gitignore index 8b03c06b..fabcc5d1 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,3 @@ site/ .DS_Store +venv \ No newline at end of file diff --git a/docs/gpt-in-a-box/kubernetes/v0.1/custom_model.md b/docs/gpt-in-a-box/kubernetes/v0.1/custom_model.md deleted file mode 100644 index 8e1be37d..00000000 --- a/docs/gpt-in-a-box/kubernetes/v0.1/custom_model.md +++ /dev/null @@ -1,31 +0,0 @@ -# Custom Model Support -We provide the capability to generate a MAR file with custom models and start an inference server using Kubeflow serving.
-!!! note - A model is recognised as a custom model if it's model name is not present in the model_config file. - -## Generate Model Archive File for Custom Models -To generate the MAR file, run the following: -``` -python3 $WORK_DIR/llm/download.py --no_download [--repo_version --handler ] --model_name --model_path --output -``` - -* **no_download**: Set flag to skip downloading the model files, must be set for custom models -* **model_name**: Name of custom model, this name must not be in model_config -* **repo_version**: Any model version, defaults to "1.0" (optional) -* **model_path**: Absolute path of custom model files (should be non empty) -* **output**: Mount path to your nfs server to be used in the kube PV where config.properties and model archive file be stored -* **handler**: Path to custom handler, defaults to llm/handler.py (optional)
- -## Start Inference Server with Custom Model Archive File -Run the following command for starting Kubeflow serving and running inference on the given input with a custom MAR file: -``` -bash $WORK_DIR/llm/run.sh -n -g -f -m -e [OPTIONAL -d ] -``` - -* **n**: Name of custom model, this name must not be in model_config -* **d**: Absolute path of input data folder (Optional) -* **g**: Number of gpus to be used to execute (Set 0 to use cpu) -* **f**: NFS server address with share path information -* **m**: Mount path to your nfs server to be used in the kube PV where model files and model archive file be stored -* **e**: Name of the deployment metadata - diff --git a/docs/gpt-in-a-box/kubernetes/v0.1/generating_mar.md b/docs/gpt-in-a-box/kubernetes/v0.1/generating_mar.md deleted file mode 100644 index b2172a6d..00000000 --- a/docs/gpt-in-a-box/kubernetes/v0.1/generating_mar.md +++ /dev/null @@ -1,28 +0,0 @@ -## Download model files and Generate MAR file -Run the following command for downloading model files and generating MAR file: -``` -python3 $WORK_DIR/llm/download.py [--repo_version ] --model_name --output --hf_token -``` - -* **model_name**: Name of model -* **output**: Mount path to your nfs server to be used in the kube PV where model files and model archive file be stored -* **repo_version**: Commit id of model's repo from HuggingFace (optional, if not provided default set in model_config will be used) -* **hf_token**: Your HuggingFace token. Needed to download LLAMA(2) models. - -The available LLMs are mpt_7b (mosaicml/mpt_7b), falcon_7b (tiiuae/falcon-7b), llama2_7b (meta-llama/Llama-2-7b-hf). - -### Examples -The following are example commands to generate the model archive file. - -Download MPT-7B model files and generate model archive for it: -``` -python3 $WORK_DIR/llm/download.py --model_name mpt_7b --output /mnt/llm -``` -Download Falcon-7B model files and generate model archive for it: -``` -python3 $WORK_DIR/llm/download.py --model_name falcon_7b --output /mnt/llm -``` -Download Llama2-7B model files and generate model archive for it: -``` -python3 $WORK_DIR/llm/download.py --model_name llama2_7b --output /mnt/llm --hf_token -``` diff --git a/docs/gpt-in-a-box/kubernetes/v0.1/getting_started.md b/docs/gpt-in-a-box/kubernetes/v0.1/getting_started.md deleted file mode 100644 index 3dcbb811..00000000 --- a/docs/gpt-in-a-box/kubernetes/v0.1/getting_started.md +++ /dev/null @@ -1,85 +0,0 @@ -# Getting Started -This is a guide on getting started with GPT-in-a-Box 1.0 deployment on a Kubernetes Cluster. You can find the open source repository for the K8s version [here](https://github.com/nutanix/nai-llm-k8s). - -## Setup - -Inference experiments are done on a single NKE Cluster with Kubernetes version 1.25.6-0. The NKE Cluster has 3 non-gpu worker nodes with 12 vCPUs and 16G memory and 120 GB Storage. The cluster includes at least 1 gpu worker node with 12 vCPUs and 40G memory, 120 GB Storage and 1 A100-40G GPU passthrough. - -!!! note - Tested with python 3.10, a python virtual environment is preferred to managed dependencies. - -### Spec -**Jump node:** -OS: 22.04 -Resources: 1 VM with 8CPUs, 16G memory and 300 GB storage - -**NKE:** -NKE Version: 2.8 -K8s version: 1.25.6-0 -Resources: 3 cpu nodes with 12 vCPUs, 16G memory and 120 GB storage. - At least 1 gpu node with 12 vCPUs, 40G memory and 120 GB storage (1 A100-40G GPU passthrough) - -**NFS Server:** -Resources: 3 FSVMs with 4 vCPUs, 12 GB memory and 1 TB storage - - -| Software Dependency Matrix(Installed) | | -| --- | --- | -| Istio | 1.17.2 | -| Knative serving | 1.10.1 | -| Cert manager(Jetstack) | 1.3.0 | -| Kserve | 0.11.1 | - -### Jump machine setup -All commands are executed inside the jump machine. -Prerequisites are kubectl and helm. Both are required to orchestrate and set up necessary items in the NKE cluster. - -* [kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl) -* [helm](https://helm.sh/docs/intro/install/) - -Have a NFS mounted into your jump machine at a specific location. This mount location is required to be supplied as parameter to the execution scripts - -Command to mount NFS to local folder -``` -mount -t nfs : -``` -![Screenshot of a Jump Machine Setup.](image1.png) - - -**Follow the steps below to install the necessary prerequisites.** - -### Download and set up KubeConfig -Download and set up KubeConfig by following the steps outlined in [Downloading the Kubeconfig](https://portal.nutanix.com/page/documents/details?targetId=Nutanix-Kubernetes-Engine-v2_5:top-download-kubeconfig-t.html) on the Nutanix Support Portal. - -### Configure Nvidia Driver in the cluster using helm commands -For NKE 2.8, run the following command as per the [official documentaton](https://portal.nutanix.com/page/documents/details?targetId=Release-Notes-Nutanix-Kubernetes-Engine-v2_8:top-validated-config-r.html): -``` -helm repo add nvidia https://nvidia.github.io/gpu-operator && helm repo update -helm install --wait -n gpu-operator --create-namespace gpu-operator nvidia/gpu-operator --version=v23.3.1 --set toolkit.version=v1.13.1-centos7 -``` - -For NKE 2.9, refer the [official documentation](https://portal.nutanix.com/page/documents/details?targetId=Release-Notes-Nutanix-Kubernetes-Engine-v2_9:top-validated-config-r.html) for the validated config. - -### Download nutanix package and Install python libraries -Download the **v0.1** release version from [NAI-LLM-K8s Releases](https://github.com/nutanix/nai-llm-k8s/releases/tag/v0.1) and untar the release. Set the working directory to the root folder containing the extracted release. -``` -export WORK_DIR=absolute_path_to_empty_release_directory -mkdir $WORK_DIR -tar -xvf -C $WORK_DIR --strip-components=1 -``` - -### Kubeflow serving installation into the cluster -``` -curl -s "https://raw.githubusercontent.com/kserve/kserve/v0.11.1/hack/quick_install.sh" | bash -``` -Now we have our cluster ready for inference. - -### Install pip3 -``` -sudo apt-get install python3-pip -``` - -### Install required packages -``` -pip install -r $WORK_DIR/llm/requirements.txt -``` diff --git a/docs/gpt-in-a-box/kubernetes/v0.1/image1.png b/docs/gpt-in-a-box/kubernetes/v0.1/image1.png deleted file mode 100644 index 5be8e71b1c19f2f9c5e566701a6dc2800da2cbd5..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 150201 zcmeFZcRZGT8$W)zk~D}=5mK_UXJi&KBiUO?_R6?ymxh*663Hq;va>I{LCKcwGE%k+ znVH|?bl=bOd+xjIexBd=|L;F~z2ZF2&p1BE@jl+i`}o|yq@hHyk6|AQg`!YVR=A8p z?ZKi@q~hdc@IT}2HS+KeiQ8qR^Qi15#t9UP1*M{JPRGk^8o#?lr(Ljo;S!Y`iiGsP|2S#JLQUfGA?F0m|Nh$p zBxnmW(*N;7D!FQQG-k>v3Qf*}{ck_0Pz#j*{V$N;iczzmiawtgqWGVm#-g5K z{^yjba_^%^axWf^u>IekCao^t^*`pog2i4WCC56xeWbqYf2}qZ3u=_TrAJ2_W%89a;*2B|86nJZ)3@+P<_`<#E|{34Q7!; zHAwzf6WAGHw*nT`&A$J$$p8K{+G5~;uJZpg!T)kn{+|gFJyrkD1poOg|KEGTdpXsG z_Fdn*^DKYX9S{7J7`VlirV$?@Y~A$f&i*6c+fvkS{HUiZ4cNr><2=*|__8B|PF|Ej zuMa6loVtHXGr{C?sybJ?c3PDCM4L)&2tAwIXhVdkz24`?taraR#Tz3}>+-1|ChiK* z{BkHpFH0Z9-Mu?MR2zEit$i0?Prh~d^k9`aCDYkcN@0wGH_i8<$!}kflmF)T>!(T( zdiSeKFV9~tu+ic&D=+!poG2@@D@mXDLAvKff*!hOex4n1Z6A24XlPht#T+BDjEn16 z1_SveF@)RuNJvjA$YJ(J^BU{bNAvU3aqC6aKjnT%IGy@WVW$$pi2vSsH~1#YO;Wua zGu}bJh0sjH{MzmbwR0#EU-EH{VY`kuDvi%YQ}nWoYMrjY42ZssDjJClyWzJ~!)Uc= zzNc1SqCV@J`&;$*A#YmWKd#l(xm9w9 z?%Z=Sn~U1Nrt_P>fBX1wSXkh~O%(dvkrv5Yu_Cr7VPsGGOWpQV1fEqO8i<4D4zZvp zbCxVb;DuPFpoOvKFG|P#Pvu)TNBOTWpSS}Hx}heAAuV>o8%=g*J)|T)Lxpq{iAyA( zRQm-~&?oBdGE9zo{8`;p%|yOp=fSYJAmPiTROB((0qXurf8UAjyyR1AQ9Rd1URV;J zC3z*hZmw~H8VpK+E!XPH;TUn(Bp#!JPdb~ob#U8T>pyDt9FH_9y`fU%(1W}yLeTPp zN}^1~D~))g%deEL+*ZC%!=a&kM(Uy5S8`YaC8z$96Z$!3PrpR-A6s!tlyJMC5r39V zP*Cv3w-0+I0yey=r5D}$Q`C-lZ!UGQ$p;@GnGL5T9{*St0a3j)-*o{ox01aMc$_Yr z3Dz%By!j^VT1g6*aqhOQlJXA^^Pa}3<;n%2;h<@JSx-7gN;SvlvM_q_*NXwh{K6tRya~J_BG02oAtXX z8O2#xFN8e$wJ>JP{!-F6Loe&})6=>U`{+(9m)`gxPT47B%5a`V0L`J95II}9uCYjq z4Lx>-;bpEan<>t{ttv2ZqOUkd_hQ>WZ13$D*pFwt(g9Pphr z6#8{@_x^g}W+~P}yN(D3Q9FY+_E0d+aZ(Ygt>r#vs~`1Ge=c>Ie|gHSe@Nw@LtC}*u zK8W+3J)u~2?`Nm6Gux}Z35vu&Ab@<}$sLQM@BwKw?6gM9W-P9+*s-tZ=VV5XDi}}l zp0Jpc$N@YauzGG$fn<}TGDYG3AtBFcgY?TOs(!IU68~^gc7f{?Fdz+vm25pR*RCn6 z63u6NZE3P&{`uTUxGZ11BqnOKF}7~XoB8~Xr}3}RPE~)b@`U|!idytH(czj~d%ome zdH-#R=imvV+aWi;n;a`&MHT|HOOmtb%rtCE*XBTi#b7?Kd1d+HbWeN9U^Mt0@@Ld7 zBQNVhk8xPmhTPYuGAXpv_1@lGNwKK8c^#MhPsa5ajO!>bnLOfC_u0&?!_i)qE<~ZW5;ZX>O`ZoYzIOym zpq8tyM9Xe(;FS)gjdoZmfo?gWmeCI0<*edw@k*->!q8x;!Ounw9v<&%w&ya@GhZXR&Z2ktP_1 z`!A=_@|hT12xsQ?nEX0FCOCN#o}53_g0BXbH9ysJ_2Yxj8^0D(Aa?%wWg<37o(&7* zQZYe6Lh8UbS9{F)ld*Hfk1n32*JCRKzRfw-uR~xnPw7~|VQ8!H#Z-l*H%D?k@mub( zzMN%Lc-ymi;UD503s@tnri}+pu8>%5pHsm;TeV#|aeoXj0=s4ckH=J}=;i@BWVJJP zFDj2yvtC&3m`+lB`0C30-~efPWTl9WHw+nX+{A8Vyf0QZCpVv6e@Momou=`053^VG zH0xJ+cnZxv*P(B&0%QL1{qvL5_0`pp@Dw<`sZ-iW93k@Jn}`=>wDtkRJGNiMwheI| zHa@{V3X3n!-BRyLJBbAB13}N89R(u{%`_^kcgM7+UVKvI*!MhJQ<3O9smRM=utT8} zBrsT>mJ(OX*5pf#kvi#@KTr4Mr<}V*cKwa^9vBb#eK`oK*WTUUo4U?UDt1wk`1@`` z^2tbh{Va@8a-WFvfXQfcqO+adoqco>-V38D^WQ#R>_|I=3{;hb0xL=`W-LzGn@PJw zY`%XP8)r+lsbq|dKQ2867U|+)9s`ybVQVqnL{$V^h?Db}l^ZNZ-tt``#4t302mOBM zh?#(HH1b;_F^NhXcD}l&O$bfit@FaXDnQ8YE2nmT`$&SQ!b{|d$DFwVn?57mYXC9U z83U3O?q9>>JixcnC3>V2dn5c`5DhraI9^^>ES6$7z(x*fpG5)D{3u>y2F%uFm|>9e zVZgL^zh>xbUcGwNPlFW>+}kVf$+!%1Uo_)%h-^9XP&K5>dupHRfm;YhVBz&g=iFYLH1!R$d6Pf1uJKB8L6e<($9_qbE>mGa`CyyVw7bZ8 zkWW*3Dbl3GrDkky4<#cfgus8@GP; z;=hKwKhwBaKjB!9TZTA!ZEmPWW=acAH?ip-;*&$g zxyT!VcXSD%meY((x;KL;7Rw|Fh3Q^B*qpY9NA6k>`tWkr&pHm z16uAT@>m{eXSOM~sNL7OiJ$e{IiDJ0g#9~{5mU6V4jK&HHU-;cP18(VKHCUcp2?z( z|3Bm-qAjM~QsbbpSNiWlv_w)hw` z=-)Nweh_dgII=&_1QFw;k_E4_S~^b)Uggrzr^l?9U&xUp3j40l6kAneN&ei2dtyOv zt-jPR&-7Jjlm%?AB}oQz{ezb}ig>A#w$l)wUdVwfx2n0h+f2y}ypUm4fIn$d*a;*O z2Oa6gd1R!wQy=CvIz@3z?7#+goalxJaniD&!rm!b*ORl%IR}9sRhj86^lzK)&XZpZ zv)EaFxqkGnJ(Q6U>b!n%85O*SbARMKQRAT^$3o~pU9vp{$*SfH@QpC7!#TT-i`YCG zXR4vyd1w|58GF>NJqLZVM6SFeo#-v(jwC)=3xGelA76;v1?S5l5V+-MJ5XNQ6*Psf zzI*U^SovqctMEb=QZ(c(G!;`_#-F2jV>hT*<42dB0!~AB`bmzk#5C*Gc4gO(-FX3f(o{2z~2_v*_ zVzlje^K@r~+jaFhLZ~K5~J>cl%fyI4p%{)!;k&ofs+(oEc_5ixIa>m;z8(VIdpC3lP&mK4+#vEpZ~|X^y!slvU}$wD zm-Z{Su_m))>)#&oe=i+N$WN6w0_a3a^(ggXY(1o^<*IR#7!9FopRk9CYzut;6s)8; ziI)^a;>TP7qj&cdImXu2$gYhi-vHnLDULB2NhIItSin)goP2>IC%K(}+`p$EV=#LMNL_^r}B270b zee2L)Lj4ch4Ms`gf}ZoDMLdNCl8cYVI{PrKBFtXTTdRMYWy5aB(WNIkGNQY*CjDyw zF;XnV2uKlGNSKj6Ox&+4`0)v>9E$V-9XC5*zoqUQn`=u}6KyFZ<1y?z2M#fGAn#W8 z3HmJBOyJAT{KM&{(u7%iy_-J@`<;M|8A+4{rV5Gotj;$=jv~uK6@>Gcs)xWh<63|a zau6z%J~rKdD3en6iHK8xgXCCW02cr!AY69K zz^K@1hVCRgd+nIUl~Y6=g&d0$)L{k=z(5#+AltH}r2Nx!50Ubjl(0Z3 zoa?QDVEf~#zS-V>78`nEh6ou16k)tvV;F!jOT-eUFGlml>qme`Pz6761xZk-$aCeu zW~|_lF#8Vgmmo1iJy$~hAJD>o{>Uu^50(`j{TY0$yajCMWr$5VEY{5lPXVCb1f&MA z#3E=f4ZAXA5K_wk&JRm?OzL2j&JoYcy0yVMBuv$3!qP>_tq|~58>{oorl)n^oC2Qo z9i)Nq3v%>bN~VbOclLSD*mh-`UIv8D5)#b!_rdLqHTOS!?4BSYtNKr2Op;P-2k3L)6)HE5Q(D>_4jm=(tr4T+u1W zZ_WHWvy3?)s(z%}8KN>8DBUR7-EK=~gxUmi-fb)5ehXQUzk6A%KjL8XqZ$A+OCdyQ zjeSY1xa(Dfw*bQMzz)9}#Im;`PC^@Kgps^rgqi}!!}zMTg@VWnQRro)@Y!r6L<2k? z%N!gml^nAQskKOcb2C73f6kg}hmBKwKs(<%GSQyK24u@QMmp1-u=w{uQvCu7;c-=P z1MI&ad4Wyq`O&8MsXRScek;fe#gW{eoYas7Qu`aj?{xGgUVZxy@}kNDi%<6Qr5#Cz&E4Zx?Dkq`R}7nY6F{^cHE$|tvzMDs-(d4UCtG~t)*#fsV%^)1#GC)yx44UgB$ zHZc@)>Ob0luJstv=~4sSW*pxCX%41aRt;zPmFG+^Gxv`#(VAh55@^<);gOR99diok zx>t1xz-#jX=X>HPz3I;4{>P3KH?Cg(mlj~hn5asT-6``7VI(0{C-d&VFkUPch1HN7D9W2T{{TiO0^CtXS*rAZ9_N3rf&(|?FMTOiSIJpkpf}f7 zq93y=`Fv<`@dDg#v9tKMm05t(OgfR1_7j-AUsCn53i4^jL|^vqGI(pA=sV}YN$8?s zcOxX+M!g&e@tA?ffPoD=m}Yh+x5{-X*Wz63qb_a3VyAeWETfKQy>tNnzn>VnMaVRi zW_yL*NR!SFO{678Iah6Vj+lVo;f2gMWZ!;vh+pKlljVTvy#`T%`K}m(D&h2qQ^pGt zwH~S{Si>u&JO?0nXCC4-eEVv8prX&x4H)V}6!-r?=`X2UTrhycnI#i}kEFbBlzL1>)o~!T2E(`4=93lQ?AVDLOJBo{N_<#)3ue((B>mS z$LG$8h_)%OX*JLXH%%oS`H^$QcDV=Ah=L_FTlaT(8sgol3+h z!7i4UH~Xa ze!0TFu4NrbhJBHRP1vkXf-H}P>Lh>=s+x9QBI|&41b5_ER-k@>1RmHC zS_I7<0m^Ax^y=;3&&S`E4aO&%`x;q~+knQz73SO#@cJJP<&Sw7Bkq%rmj)c-pIQl) z4fyQ)z|f;7e*bf<+_k;1l$}0;Mi6A=AtoN=*|C@t0COp+rSQb2-mM3JSETmk1C`vr z6gT97Fzq>W)(gbyM^uK0V=FWZfqLx*XkeCiJ$!@s@q4fe@}!!@zfA&WQbY$fIMXI1 zNcx9phUF~j&#`8HNF`h~mRnxRM@V`9#HyREOWgQ=ufGhVxsR5U1@?ZgvTW;DP7)W9 zEHO7?2ZlTtc1<910;D?U5u)>u=UGLTadgc-EWddr53fMKd}WA2X^o7`_lqsfVb|!+vN;wX@hcU$n{WCa6ph$rEddrG=88V z_DVicW}qT*IhA$TlGy0(k-{n}8?e>FD$XCuRYPrjf1+7`JaA)JWhA#gpBcLp ztqdCB+cwswa-4Qe;>(KN$6HPe+}eQRR|-T|r)HpLHk+6-=AFbFjvvrNWuBV_#MUnY z2VzR;(N+n52BV=3pi1 zw|&?pdW1+`E*@VuD{?R_bsY)rOD3(}QuWCWTp^g!OZyb?nBsI6s}g+)60B4jAQAA0 zSGb}^TCG(wu1*XbL%1WYCi#)#3R{u_Tf(_C(HD(PLm7l)JCk2*^7@FlnqXQFq1L%o zHZ1@~?=g2Ex&|ME8S3pWTwEO|2>UdF9Q=CM8oQ!|(Q4a|E*G6hXBRI8A zR$Xs>qsxutLh~W^zNFOwf|n&oVHhXm($t6rRAU6b`*_DNqFWIRg`YZ3$j4A%ck=wP zW(=kG@Vh&gfhud)7IzyBK`t!zj1~LYBsqJuiXA1^sQi%%T*O`S1Seuy z&3|AFMC(P>gw7^$K}sS}eGi zk)u;Hp||kMxLTdp#;WTG!8A+1#N`SU+QI<+h_YNsvPbp5QlasNYF3Kk!^2H^jgSJo z(MkX2mQc05Ri=}n*J!9&p`UHSl^C#=KJPof7b_Yv0ED3Hk$Fwo%~PP9IV64EsINYP z?O|R3BHM`M)Ot6zTCQ8-_v>w@X_;PUjyXSYY+`FAo-@z$9!h|htSwLQ9;Hv#bVd+w z(@4$}_KS8QJz9V#tQ)KVr~h1NmOW;l>ERyAt@GM~bkzIjcR}fcU6qS~U?^37e*Y{j zGiwz1^|e+T7)4;HBa{n%LP_D2mxC+CRd55f>o3eX#l3&NGO6&_ z6yJpWR$Y23e2cp@&%Zo>CuAh@AuZR{365G(ROfbrl=oYxr|3a~ZLzqpvCu5}-ou$` z$_-Ah@SE|b!I{2dHKaTS%yI4PFB%w-gncK^FZjbBPzaJ4GcI;Ar5C(`B^~;|E|Dy^PkOCYK^DVEgN1_0y-(qIXl2s^{;1t-CRwXDnUF! z%<=jBVC940a3~F|=r%g;$<-SGUzNx04cSlXTC26lsdlDP+(O5jZw#m@4IQQ@!B278 zx(|cj;OyJ=OD2262w{5fL3KpG;9ijLJSda%UIs&m1AgEFQpD{2&}tDQWEGMZyLV(M z22sL5av#DnkeIa0pi$QmZqFGDf*DrQuAx9kG@IgUD%aY6b>)~3>WjjvY#_BTsJ(}b zlrG^b+{VJ1w+e3dH!#`L&Sk_GC zyM}{7oB(0BtmkjhqwHUcUi(Bf^65G+oJg@S!KN7DH0lFWu3voOa_w6!Z;N_;tyT@c z>KO@aff`tODAg30pex~P$B@>0ZRd|3dvZJ5lbuCxyGJrg+&EhH=(yk9xl-SiY}W&` z&qeLGL^UX`dgy1#I4inRI6XYV&j;clYW!wqnJJT^wCMP<4v1we7(Y&PB9I%-lRSfn zjm5M}=hf?ba>+7q#$OA^oor2a;eSnwpIj{-40rGfF--5BOzesqby_Q8cn+Bzftc%aS{-v<42!zqL~C zGalBtQ<5*1M8pSjuiP4^yz0z$f+ROPh542Ycm-_8#bX`Km$$bz@u`o*9F0cA-I&Un zwEb6R2l;`fxv_r(2}aHpOHs`p1voTGZbwo9aQnZZHL4j1bFEus0)bF~mP2hceQj>%7Tgcu*w@4jw7Ocsyo)tl^jasr8t7pmPs2LCwGx4*-8DhgyRbL$(ZgE#>#ZCMZBVnW#u zWhnJSERtzDqxrrA)FMJntVzd2?M^`*cfl(zcgnxGwli$Oa{5xjV;}#%(FQ zr0G1|De^@d;zevDsP)!QGjzYXlXgfSm05;ea{68}%{Vh2W`Z{*a5CJSk{r!4phoSv z>fqymBm9&G{O-02QCP52eK>1eW5{P|Qle`7h2$p>4sU_|0M-j9_7jUD43In|UwInC ziq-kMVtbeS#W)#kJ|qkcwknh4G+0J*p&Q&oa)5s4BlV z#&%g^^48m!(r~P8>PXT|{noB3tPSU3PY>mM8 z>YZN)d|d&`h<-!O*~@fzx+yq zjz5$TH)q6}*7D>HDLg&SFtp4WEvYw z3}hZ_n(U_eI=1M|%g`_-d9*9+irQiBunXkaU2JM)znKrf?N$UnWpOu`Q{O`aSPDEW z@r)#wnIg09^!n*Mc@2Ec6wa-+ZvlWsL7(6|T!s!nB*ZHerI#t2V(!>=XTEeh{}8Y; z=7t^i7!8f3yH=_?yX;>(Wi6Vsfy z_Xe|)O%N2%(}#{(-hJzH=7x!qY8^p6Jh^k_vlgO@1DvvaHuW}Hbr3^rfQp=~epr(I zOZ%&-<613iAIX$l``H{v4Y?<&P{uEnj}e<7u-zbTb>0OfTF66J6H_RzqZ~XjSp%ZR zP}LeM8`A`{GZXc5kDMQ=dq{Y*sB;J+nkg>3j&_PtMf$WMrjz}Z7sShp6}|@Ht({4d zNun>Hs#umUiI2%#DKNvie_pKs+T$3e^sFk(5+bG=Q0VdghfnJ$SFTNFh^x34=L~30 zL-F4Iu@fH0cDs4TFi_sG?)?#dvq+dEt}ulpm#ik?L6!e6V_6x?_U){8@A3Ibqec8z z$wL1btmk41hPN!>eiGn@TEU5$)1=ik5vj+CtoT=!!GH<$E_@v*?6 zAzGrdW$o)XMHf9Hk@_*X+PJEsv#3rBEzFlm|5Bgg+~6vuaAr}0xhHLfJqAhnPPXRa zT!!B@xm@OkTCKu^Bm4nSh1vG{^!q-4l zdTcCt?FSaj?Q1~}b9(k{KC7HlZ$bKj<1+n=3wyJaQE`{fB@n~J8aT{A>bDo#5VqYSlu6gOv42oA1cbkM{>Y%^dJ~^o!=OVwAzLZ2g=B zVT%rw(X5+Cq*;ZJL?BaFlQB2ig2GSK28oGCZ7kcgY}BJNfxsXGaMm^at-XZ%_#+UZ zOPveIKFD$ET4+VlFfA&UDK@x5uU~COz<)Hx`r5Y-_oK4HbAtaZ+5T%u1wm{ZG(dGU zKkNlk=p1r!t;tHp9dXT`FzQ49p#P{sxU6qUpsP}dK zWr4?Mo|pmpFwW6-HM1-QQYIFtny={}zrJe-0lOf6?oW_~P)2QLteYf_>TE6xATXss zECV^?ir2#Ak$sRO(aw%-`U6lf7n~$oupZhmc&Hyaa9czJS+On+2-__lgbh4ws*!Ug zPIFwH%2O<$HM$4SpUP=>96ZHtC;bCw>^E*A*TRXOcj5;#zl45CcL zczFeAxpRr+5ouRoVc0!Qg_@eL0v(PJ!QUeij;$KRKq6GSZZI#=cX1z-NkA z8?+yDRWV4z0TPoJ_TU!uF({6LXM3?Z=2d(^G0^x&7XtbGu5?{L++=GuaGRS;J9VS$ z5=;eZTHH|lbSrup^w$3MtZl;eT^9akyGB8rCK=l{a%bt;l?+vw$@co-c1V0L1b6ps zz(o;!re`zm?dqijBG20vDlxzDLM;1`{MDs3MhM+{(&8>^6~c5BpjU}#_3*|HN!^is zn~YD{FFrp7aRVchd~RgVaX$;Yp1oLnj0m<1Ulqnwyw(nkzaiN{W{3KAV>V%uT$#aQ zXY*Fm!8|pj_}%bK(A9Go!q{+8aDQ0R6%&X){!Pt^s8uMoP&;;MEqYy%?lqEk;6^53 zXHe$pSZvy_cXwJZ^GTW*K!bF(K4Ouxl@p*rXhu^U^}RzuE)>|RwMp)tO8>G0$HW@O zR6kHQEq0qo%XAqVg6agcWPOG|kZj(ex8U|lVQFSEQej*ICMAEjg#Sg7My-ZybN1*CdqdbZ6 zP)WMLi5uxBc(YRNMh_oGfy|+|yK#cNPsG4)sY72n`vV-`SI{?vv<_J_RUGVG%5E-U zmhqZso1uLdNk!Rg+g`bLvY`&Q zj=amz&0uF}kbJ;QS9R_4A(dFMEX|Qidcdcq$oLa;Cf!=V(T8Q!$3cCYio6)L3h8aF zYg0B!=s=Oj79Pj%&OG7r z_@rWgf^^_kIdyOVuw=D_1}MAV+e%387DRQH;z3zg%T$0{7tQv)jQA9wxn1|0iVkf+ z>i%Q+?(w4UG}pyYolWjcsT%Q<)B1JUN*g}N(&U9>^}-=pIuARtlry>{oKkn+o;&~| zd~xFXK5Y1LSw5et+yF{O#c-i6ZxP2{ktU`|KqE92?MCwI&PvWSsBKo<^+pC(o37@! z$>Xklb|yJy(keiX#pQ&M{Q~jpA-pJ~Z(SK1e_1#n=36h-j}tu>r{Gvz%xE;Bz9$sq zvLE}@9Ah*Yrq-2I;{=q(DeI=VS|{_0l@(x2=_&#{3*Bc2OrhN8U`NNH5ywfxuI`eN z@zNSZO0S^xrgrwYw2uxV4?W}5FA$I=Dd?e}Hnlpy8Z+oR;IlZh) zgLh>p8}~2sna$Sy2*(?#1EwVg^!60ySiar_8c@Vxu(G1Bpe;?4A4g*_mQm)9nd!Ca zm#%QapDhKhPfP#SH$>?TQQvl>7xFES#r+3*^`|kCK!esk?gqhvXzy42PuZ>2<>{{F>7)E30$(tE%VL6W75|*fG4aCyRjJ|pBo;^ zjw+fA)U`o0L=V;$cl^Y3BPY~wYVmJC11TbJiwgsgH*sM* zP%#i~;eTpbx;ku2`&0)LE}L(CAR)hPKGRyd&fPZBo^7z^+Mf*nupGbl+_t z8zONd8sSI7?%$t3ol)iq?NWf1zbCj)r#ia2j$*=v&Xs%s@ckqkBjL3g z9>R*@kM%9P*5fOr^`Q-h^;zI_UR{HTvn>(}0il-~CPkrRO0{ zrXT*nw8$aGl{SMpC8KU?(!bhg`wf{8L;Ww@LRa5DyW>D`MIi*$@cX-=ra8DVlaAI0=>0ddOV338ITHAawc;DjI>&cw*iD5y+Ney+&N=gNO0!&xAh*q>{p3*869~KBiP6GnrU5A`@e!m3pFmU%HlB#; zZmkBL0DFdhj#wDJ^U^9*6E5V&fh1`}I#Vk-IFD5tQSun}kW9yn0h3qTiU;miw9@69 zK<9epQnxqWFsl|~f!O+_ky$vZ`^IGkVwJFaH(+s;e(kSbq|VKkE7?=+S#&V1^kXHB zE=HnqYi(*+kucZ55h11s9OIi0ib&g zb+?@$o_hfz6ob+qYRSJl7ATvc8p{5^I`EpPAljm;C!hQ?iDo*bzs6#YUA zOOqxU!t9>wZ*Pxkpc;I!a$ZnvySuU`aF)m^00Aqc*yW;@A3$9LDwpn4k&PMDs6M}- z7_$%XoYKZ2A!4T|NC2Lr%-?-Q9G;7ZZS#nF+^9a=3pFz{Zi*!i>SEtzTM=$U@-v}!l1XMX?fxryB;1g1i zzo1zMYm2od$tQ7vZh{}+(Vb?l*GGOPzqxdr;?7dnAG+|rFJuxVFXFv}sL^n2C~T2q zd5JpG?!{`szGL7>m1``{Ln)V)iVv!2&1*V;-}0RVx;hLpWls~`QJ6QB>IL|bZUd?< z^mX(Q6#PHE)=J)Z9x^~T-A$jltq~)XV&1GxtN;v+I>4TWeSK=#56yDgu%{fNy(@_CmWvdVko1CLlFZX4*m9`gh70wy*}-!Xoia z7+(u1X!h(jK2{dr03@Xn#bek~tDv;=5CM(!k^!;hdkui)eIA3{>R$`c9d+y+Y|7T) z_Leo5Eodg=gIu5p#_JeMT*34g1qCGhyebJ&rCpUC(YZN~#82Hy(}pEvQ6)vX?|>eg z7w?NU^HGuc^_?bMb`XTk!c<6_W}j{ZBa2f~?0TcCCUm{Q#&%s~M`j9>`O5^MUjXu% zm#VQ~1!8Di*$NDwxz25E(e^X2jNKv8LCA8nvd@7;UllOFltvj8uZ8^A+_iZ?cL#+g zoc!v~A74@vgpDU4t#>DT2dT8c`q3qa(=KyE=OD0zfUH6T9+!!2im?67>&l%%q(Oks)IX(V&`q44k{S!y=;f2lBBlqaCta zNz9((;w~&^j$S_FFYhu7g+1sDObA%BRe5~!4(zDG>i)%LRg(7j{7$0re#Ir zuA^Z$#+sOZ!Eh93Uw0;dKxB+Qr}q-+!TL{gV84`UAlL)F_;p)Qs$zw?AM9Bj-%(zB zl2nXf2Fkse>5;ZNXvE8poLF0$g8f0*ct&ZTuWifwkMK<&xD6e3wzhpDhndkn8*cE2 z@wX;glkb)<_u9X;ZD&VP)A2-pgtFTxWv0$5yKP)(#|9kafp8UpeZMW)pDLE9`yjX5 zXH!xJusQrF!&wpg&I3E`7ofG1TKcM;q$KKlI??aveH&%QPf2O#Y^X9<$ejC2THCS# zsgLY<2&8U^Y?lpbs(Q>x(Aju+4iq^W_dq+~c-i-?Q>u~5WCxD2{RS|Dy?)cw+UjryKU&_IRn!{G9S2LsY@C&B9Ytt+*N5j`V+rI~2P zXI4J$D#AcH^|=1!X#i0kWbZ)8RP25fT1Py}B*nqtqlz>wgQ#0p3e{tTl2$fwhfOsp zf}ySVF0+rad+{vPF%e2f2!dKM>&vBzF8nA^lb-hSeNvv&&*IK{s1{T}RaN-`Ef-pt z+UR#pR3DtaK2O^i@O!VTKisE*+PkLWc7!JoUlu!O^|ss)DCI$mdOb(lzB>w(w^Sj7 zLDK|r*LO37A<$R!wYlo1U;ao>4%~b{IOc}4K_J&v19~W)Vp3xegp%}iXQxc7T19Dx) z)DNp0(4PgDE-*;CO3~qUM`y;hNOxEZGzU_9`vi^hguGj@W}ktZ+jRy@T9jF*ElHpR zI>G{N>=q~m>~m9cl07ss>zENGpP$*lUboAZT2inn81* z0$u7{l>^63x6cfC$wNQ*Wd8A;*1x~m;U8DISlEM_f)cn;MX4r1hy^#mjT;{mKwAD; zOm;6C^aIi3&6p7UK0C;;q)EQcV|DIIF}}tXzs@i?&!H0W0SUiSn~UuckZ-MueRSJP z+v3r&av75&Sr2z#?6d;)`~;QWk0+WkuAS}p3Mlnsd^M^TUrc40`;|cTysvptc>+qS zEUz(F*W+lB;QZIcWZ{8G`~HUEQP6xFuz{q=5rSDv*Qk3}V{*8Roo;l&jtt-8@(bs| zN+IYB2`@A*a^M0_CDvoKssl2J=L-WC(rYWA4lQ*LuNbduKEOP9=!y}!ekSVCerQ)9)zY(QUHM#Ed8X22oLFV70<*Bc-MVx#SM$) zbdR)RFWs_`=JSC79Yv`>tK>O-pfeEM0={k8w!zc_T0w)kZ)s#tn?{pCO zo+y94lohW1_i5_NHFrE1%IdJ$G3$~wWDz2w$D7iS;$+>rr`&EgIW zV>GvZ6FqsXM3i`Sd!gw9j17-4SPpN0KTcEiVrqP5sw1WHKSw6KJ`b&d!_Xra+%S4Zyt zAurrX1;eF&-s`$zGoGR}&|B&N)k%8t+5Xb@wsgDd?3c-wcU`~*@jA-(GL*Rt-vroD zJL5C}0`q%k*H42|vN)2*@G@xYMl#&1IBi0 zeF>P}2WH_fCF=lSbFSTiJA?K;b9~_Qw=)L5{sj1X>bI8;fd1(l|BJeZlY+?!ssQ8^ zwy#F*sQ9U*g1|SbL8p$i=QCC%%3t6SZk~A|Fg^iId`_4>_Ab^|>CiJFOcelWe5S+M z)4gpYv8K1W0FC=fzw4({3`Ygd6n4c`Ew#uW{0y3?i=`ejy}FzCr%~(EC zjsypDY=zt$ikg<2&_1bF7siwVrG1&cR|vHJE8bE-)UnqxX_BG$f|PvRH5MB5mwVsA z338skxzkloPO2pLJ{SID(9(Kosw*1Sh|F5xO7FudtyWO{ymgPnt@kbwhOT{%Fzl^l zIyMEj%kUT%HAoafqr`nP)+?!tQ(et7XZPVd*5MwAXWlvq{>z50rrUy+wf6#K%ITz2 zjc!@u=j{Q6O!?TUf~>ni=KSyN4dmK@g#=8G27`#Ab000&Wd)Gg4CS>#f$JgR^}mMt z0{Y95J8#0{q0*rWRzMS1qaer6fWncz)1x#5$wzV$Y=OPiLXnNy>^HE9Ola0?k~oh4 z=(POB65>QOP}UNOKwCuVWSjJ)H3FZjuzeu8PE7Ivfvj^RR{ANxO%+gp;S*YMP!dA= znN;AEB3TpCQlT4(x%pdRn29u!o`Vz?}aZ-_jcGf#gLAgFghKpV{N-UXm7qk_Y3HYg^ zDKSV&YU6RxSiST)dANe`%_9<)=o9$5@6g7+@M{{#7*41Mx^%V&cwn5NmO0|5SC_7o zQASo~R1A+Lfd#HkaT-}8N5}tPQ3dFdH4#YaWqE%N+P*bvQCrqV}D%j{Mn4Wp|eLJ3_VEW6J=8 z2|sqGY1Loz2N1PSq4ZXIr`m^hamNjX=zy&Mujnvnjs}vZD$y&@j7o(HdcvWpx;))e zQIxr|CcV9}Kr$ZxNB5!0ZiI-NmRi(->!`RvZpCIZPbKcZR?tTnf?!jdOxGMPwY56Z zB-cGXUl&Emq7XX4XMzu%;shWs;Mwtxk zCuwC@+2M+B?>zS8rD=ecl@(II)P*`Ycu`<$svw#z57Wl%J1%iU0sO~FpN7H!?^`=A zCrU{9bEp)$#jch zmYY|DnfYfpaqR{eMqlo=j}mpJ13u3m5F$D;K}6;Q={seXYz2@KRJ);2#)I4`(QOGy z<9FymImbx7)8N+1kqgBT+wR<#UH7*a+chI(1GmQC@0|+?hb|qd(?+ugD**}18U&1{>Z~cDV(3UqUy^-Qh=q3FoH2KJU?0WN!>U;|bub_rHu8$bn2~8!_;}lm<63FiX}0bVODr zbySJ%ul9z7Dvswd>goQT3=VzSxeE7@K zE<-j|%<4QNyfF9tez0d)e9f9O1wM-hC}5fjj;=g;1I*nEb`t{?WqpsoGo5vR1Xw=y zxdvdvYfwCD!h!un!SydOK6>)jAi7i3Jl(qd?KC(!kuV2O+CT!Qr&O|}w`tBhF94&*;p>T+3RHk3Zun=R3en+m$<{bARUM#y80&(o1D96oUgE~@A$aZRqPp@r;;JezZvE!ThD z)0$=4ry-Xv`76_KSaOmmZPRnn47$c(#bdbl9#Fh)${T0;3{X~BvNW1ZvB~DDzbNXIBJw@_Tn1xcSi*Z%?OY>L4 z+P%fjvz{ge%$Uty`$g_V(Ei4x2f#4%WdQGd%(4erVs~BqnH7L0#=O3MA1{^Pj zF9EYvAtrwH1Np>YRh374Tf`5vPh8#L%wMNFLN**Mn zB07(;aM*J8_B72i9cj!YIb|Dgs3tRWPIy93hqO#iS$cDvge7B;qu4bu(W9sT&N*2C zwy}ui<7tPO)r?+?ehsk%(lm5x9=|)Ry^@)d2y~Oal*9)cdL7_4a5nC^q?M zro@V0qp?5R)c9j2FORjowNkUjLpuHiiozT|?uBXJjv7O1{~c;TM>g|KUVH&CngUl_ z)h6y)dX3x++W&PPAH;kFxaYs}eile=Amoq-?5G~vC;AG&^NJ=F%s6saklw;|Aus{C z*Q8CLKVqE8IPokkDX0ycI`x|qZ8^Kq2pkwVmbtQLu*CKKO}*`$`$(Y?v&XlO*UmQW zut@64_en}GJR1}u$K=8lJR^|{1%V@!mchZLYG5Z17slp$NZ?jbt5$qhcG(T-8G}v> z9Zj`T;K}keH&4pJWg89Z&E(FoeiNB}FV5{kdX(y{3y;reu6JApIYtb08yIiKo^gIa z(C8|QfV(to>=-LggGW_E`Z_aHXWc<}=OjEo6T(H{l4VKv`ExCZ|po9_{+&R_9;i6JLj1 zE?RCKxG{fM_)%y4H=hsg&{+}6f55BHaN4m21g=kz#yVZN1cV*ZbU|sQ(~sD>vR9vCf1$m8G1KRq%C$EKjtW%MJ|QdYElRd&DYvYnMAQ~_V@ugrD%`rYAl2ev z*cZF^+84O|2x+G9@+(Tyu)I6zlehq@>S!1C8`flD1YY3%6}qFKS-q|B0w8bg;z6sZ_$lP*_4B(IORj95jAEsN^0SJ~6Q|~A z=>4H5FAENh@;vmSHru?)i7~t!ZaPDF*H`K>ZL9gJz&KPauR;UYCxFXw9-wPEfixea zYt;^HW*oejC?nm(n&Dwxy!8vYBB7}Wft_%1m_SFgS&ks&>DHzcHb;02hL#NRpQmP; z?Q!q*<4wYIJRJo99Mz`t!&Abd7qVUj<&!msC9O!55#Ea7sdjGJ`#0tZ+O8Xj#~O=n zZEpwK(>97kzk*TOp1K%88zI7Cd7bNlOzQ1Zw2G>uAtzL*FF$`GVtbh4dHXBx%iB|h zl~om6zKL$jGE;H+O%lGd3*|W(Q+@?qxULH3DZj}A^YzV2YB`oCu&Tm)6TUzf%;^5{ z&^FL4D4E<_{tEYg8I1Br(IFvBkn@GVMBx~2dGx@U0_!mVu{-HF0{%KB4){`gC z<#h|*4C$r>SSY;N(k*m?9<6@MX7P~&9C3|snZyk>ceAc$)0&y|rJ8VGLMg{|X|Jr| zN}|iQ%bilYRz#5$yR6(4FIjAxKb%3zG3<$dq${P4Cx*Je>5Oi31a{Fl>SGtIvcvgk?!Q(R1W}01Zpzdgpgf#Mh zy>V86R(#O4j!DCYi6GDVCP`0V0~i$C7#)v63*0K0_}#4aja;IT)dSGCeKCBvyg37v za9=05{c9TNkfzslpCADIHE)}PB6atX3+!+^k^$W%4|NEvC;7HhW1WNw&s;+`oZRck zrAKK8dpJY(p81N?9ae*+yj-*4v7xG5)}viBNzpup-=->}fdtArpvTELMVIgfSK;LV%)fo@q}D_lK<|2)85mt>#uT(9H(n{kRWU)gbxF`sZuEey=*Qk@)2@NGga^1=A%_FQ3>8g=G%A&fWXN=yM-&^$Sjv!DhHPXP)oE0sWJ-f! zZ`&+Gk<^={NamRenH9Do^;-{h4(Iw_zkkkquJ;sse?HH159?lQ-HPI{tYM#aWUB`C zhZ^dZS@ReQV@q`4%nsV*5g{^-i*0^3M!`wPf|c z&j#LGB-ur%Lnc_DP;UUDOWA9s_2oYgQNMJS2&}sghze$gYQyjF9N!O8XB1%E+>I(_ zBjq+@i*o%dXaveu7)~E~=vJD{@^1Oxm81VTo*%a2`>91Nbw~2H7Je7%fm!ZF#Y<<7 zP%74s|IsCU7(J~yAX@8Z8SB~&6JyTh9JxGJO_dsw@d@HSS25$x{40s02x)Re$vgY4 zD@K$t6$7ZCZPE3)HRrJp8=}=XDeVq?+Xnr^P}(^^yC9cOuUdX|e_B3m|9pwn`zlbm zwDM7V&`C9zoN5&r9x9S~{00Mp4r?ZFt_X{MQE~n8w|7L68-Ac;?1`ZECX+~5CQn|6 zh5F|Y)=LEb3ztZWBiNR)2dzTfWlbwTs7)Q)v9MOu5ggn?y;m<98&Z$pW#_50uQL`& zYwtsn>_M~`?#qAUm-lPVHvooP4Yxbgr2J1mlyBXWcl>^EZ~M?y-yt7z<{czrie(`7 z)j`{K`@`P2+6+ig7q?^NQc?^I+oF9Ts;i;CrY6+{7>%$n#oeaTm)(_+ib$PRK(aNe zgs|@PlfGN(L66QXkhanras-hvKO$1M$hs#yUXzQyLm)w*@;ryIS<#LE?j_OKNJhLL zbiPYkIQ`H*ctE}i4EQ;eCjceiCEzMv=&FYIxi27+lC$9Y!1nAy15pd#N# zU`%Wlj~~E_kL~3LHt2))Z40_`uOVsWh;*3(xpV0VxUK4K^P4R1vNB>|$zai_V8GEw3aQ#{aE3sFIcoANE7wxx}NE?S3n51W(Xo+Q2dsCO*iYvd&fXVF?tMIld;LuSmQ^lU!4k8~s7m!^8=^bjGgRxW(XIiSjzG~-@bUs;Qg@Sd1cV7;bQqIv4$28E_awnr1CJFlg+ux z=O%(h`v^0t3wu28>xgWF$rv(cDgc!`^wt}qBLP=fJjW%4u5j3x|3s$9r1c|_dmA5@ zTxQphoj#DdV~v3Sd5q)CF4CPCx^YK<;GT5QAN^*d-sHHI-5T`eut!C-sIfEY4oSD7 zq^p{nA9;NFun8=h3o$DyoDG}!P(b!O7Yd7~qKATnIh$TX6X468}#&d}Hv}sKqlj$2oSG{c7buHwd8TO&qX4hEG$ z`7KAe9*GH`b;8Svh^NqixFyEX?Ru`q)7x!pQfdHz^)M21Q!uUwr9UZA?pkxr{ki#+ zNAO3$@j0Ra7Yr(u>N9$P>Nhsm!eU1R_E1Ea!*=Yf1-u4ehcWh0W! z$YCFcZmd%7#gBC7u)Bh}M;%6qVspINzJSV{0^2F6J6b*WC>8O2*UK;COV@&!&=ye( z8^jJi(;ZlD5CQUE1l*$Sis++Vw#hTGs=6Dp;+v1;3d5>{!U6ck})7w&2 z$iC_pRDpH0RQltyryH>u;g(JmN z79Lg}YzbYw^<3up)zx}=Z~s$^UvNOf{>>-19@cOV4m88TDj8g_RW*l@{VhT3*`HD{ zUe^i_OLm(RDqRDJL?56c1?^h#|OFujew#eMos6s z`p^lcHd57B25Cy^H+?{ys;{HNqH@*7A!Sl}mqjYkqBa#==dcvNmDD=8{Fc!X-Wz2{ z;j3Cub3(yGc%^opeRjtROZ7(JahdC^)tjL-$UoEsQ~r-XbI*JLE_uiH$>rwYGUcDd zwG(xNhf>gg>$xKasOQctlGwYGl%>S1dVjdGe9ff>G&Q=q%NQ>ZgoL^{2UpiSs`472J)+B!D zjpHeXCs%cmwH+#`ncwrS6raLCDNrQ_a=c=-+)nNLtoPYZHHxgm*B^a3b2Wv2vniv% z>pwx}zzXmHjtTm;BAO1)uu@m*;h$&ZJspSaShtWnO|qWLeB z@ake_)M9`U+U+FftD!f`0Lt4}_^MGxhWx_c;%Y=Q0!vHG}3)@>U+W=a;&D=Y#90)cjc}CkIFpxi83OrT!^T`;v77FgB&Y z@SV(5lu;*!6?E{^p1b+%c=5bUD$ouc&>(w{OyR5U= zelfH$PEN>)w^TI&8d%k41R@UY=8N4Uue~5X5m{^~h>oym zFBvar_o7z2%ho@GHw|aG*ZMorPg_52$1M=fBc|)&;9WoJ z7ZbOdW1HZnt5r5rT)EtuOML!_*#~rwH$EG=#{XUZ)zCbZ!!^CXY8_YpM8f{3VWNE_ zM$aYfuQBJ+Q0DpkRw0qOZ)k-DhCA=gMfMS2Jc}oJpf`1S@vO+QQD#C}a-`U48sCCn zVVOs|c}d5^6ku)kEsuCidu`@@;XY8OWIhq|R?Mr@Ar;Qv8M{RFo84I_nTW9Kmc6NDEiBK@3k_p~ICfZNs@WD8VK% zepAu)9r=~7JJNq8DzA+Fx+$|KLv7{#O%*0DQK{KSdb2{P=x*x+Y%>=0h{une4iv?~ z%Sv^Z{m-sGNwR$DW7jEXxP=C=b6~i4-y345h=f7VD^3YqR^9*VFH&$!=Uu6LV0##S zPR`79(~8LJcy0Y!8bM(+w^A@^haHZ{t%l|Hoax*wK{(AMB@S4YTkvkK=sjdAlClX7 zd=JD=X1d0XrP2|e#f?aaYPzLrp6?eRBMWFG7vZmPz%?PO?M^^W6|D5F(8GDZ7?|@Y zOs~2LX(mJY8)}VU_0v8;Wt(o;rskX)IDR?GM$Aeuk+ZPXYTdD+0sXtY6jZvQ0E@ibnLB?s>v;q}C(vn)IZI(Js{SwTx2;J(Adq$`fx-w*bg_J4K~JL`W1e|Sl#kRxKao88 zZEqB`H&oX>Z2Ce!I-eOa0oxLV$-`_Zq?Vt`7Axb;SrI47i`aNDV(ez2$m)BkGV z@wQMBM>{HcU<*L;q0loeeoF}>?mSo8Q(4OM2#G~FJd+`l5Ja&s>p>%MO^+PXaCiyQ z`|INX=Q}nfWPacsMU@wiQE$Z6c1cn>$Z@^Aw#Wf4-RWpFPZ^$*?km4{6E}as>bqkm zBGVXKJkUVrnmj?9fi%12DJX5D%BMH(i)IxNwP^jrFt=K37Nm_(UiNq$ZUjCSkIwWF zsw;QRv3;>k1F~sSgLpx`5_IoM;EQv4nL>syZWVgrt`FyJ4CF5uEI1i%B#s?H@ z=M!d62R`|s2)aB=`*EWN!^&F~qC|&zdUZ{tBBJ{{sm%1gt3?fVsC%^blN;76^!~W4 zkL^+I-6!eMRE_w+D|*M_nZm4SVhr$2z3>2U`PnxDlb*J!0#E|#L}%`{%PK=(HP z{q-p9ektbWT5<)fFU8T7+6snMTMSOFmv~Fzk_Xydq#7x+b zFoO2ZU3Kt9++Drc{}fIih&Cl6Qf3$<<^_OkskQ2M;3h>RR>2&K3cqUAMI|&6Y&>}Z z5*fQU97D=sc`cwkR0UT#;##}LG@(~2G=hH69QXK`d`Tf2?EKR%${@wxD!bP?zsH|J zH|$}CCUHChA)SRe~Or+or;&lvz zA|aNT_uT`-3t|_-L%Yllq{V2rvKi;SP)G9tV2-q2XU^~cg7l!>hURjVcGhwO^ zl0_zB>$yOezhLccNrO;L$s+R6r(Q~smWSzlhHuJ-A3Oap%O1>h zj8dSUHU#m1>FgD@N=&y>8&PrVSb8Q{>Aj0h>P&upDOjYJ>vgJ3qO??zkjcBhiyBp^ zLPEP5UvZ<%E%~Y8GZz5Z=<7lY96H|`3<~aiU=`~dy9Sj-XpS;Bkn(SEt~OVQVJV|0 zuIj*D4fgSfib}#nt3?stc>_BoXSzQ3A{CE!Rhz9U<3oTpqarkIzxuMUeAQoz9nE4Z zelESH^1(U|_AhSvMa{i4VMw=!W%0xGEv#(L`h`r z7gcn;J4aG({7VRFA1)(P=bEP*mfr`*S@U5(*F2+u8)b@MlO8eBFn5#`G654P(f$5< z8Ex_SM2LF7y8e62vidDbilu*<4vhq$HFRI~Q78D={lyVbPjf zjDAr3&-#*Bn;L>4*PJ*+ea~%pyo%;ZQhE`NB8wu6-F&4 z-;}2RM#%9o@oyJSp?1A}+OHl5XB)#;dS-dH13V2^86JV7!G4E)kOO75x5I%4XJ>bF zh-E}F($B;B$U~aNCIQgQ@BWe%h6MR#X_8HabOvP*9bjMAmAKbBm~hb?VZGoLt}eIY zFlPF33l-KA{LUu5JKdNt7RBG+Go{uD%wI40fw|09$r#FZ9g+B3QEr7coJ>Jpt{otj zgfsVaRwecwyQnLEUwYuzSl{WwM=yixP-vM!NS{b;mCp+`07m@HVee+r9EMTmD}iu8FPb1CZb|)arl8i)Q%+(Y8m+6XEo7!6VcZnT(CYGYd$#{-+)ZXhkiVTEDdY9Kdq9 zX%SvucVh(De$cf8k)g(OxOab7g>Hw_Y>q?v&8a17sX&oyPCeiJ@IH{CXgyZ1k+h*zdRfnpHV)HD4*W3j|H|<2#0x(b=NW2KUT^-?v@?artDgW zeU-vT&WY|$539`WS;cIme2&k#h$)y_uGu{wQT2gj1c%d(5 zMa4k{nJQP9;FO6d8CLumhd!EoJHAgv z)mY8>HuwLNkWxeaHNAehm2sngYcW4qqr0-rdv-6MQZ7Dyw>_VDHI;TM6=IWBp z@;BQaw+0_xlX=6?yj$G8ewNd2l%K9@KjzGL-o3l1P76_l z5%jkfwNZGqybJ7n{(X6pLVF3kHkf9x8qL6!Jk;jhs1edZs(mmLA+4WaYa{6W@rr^W zdVL*LK`9hg-MsrE)f&`(^lzo2187jl)^SmQye}CI<8}4D2hSELfs}MegUZF7Z{|&T z7#DdTFGN!^-jsLVQPnNB-bW3X9yT&VEDncQRul(N;UH<3$eKzo~oLV|d zu13FU`7q{f4nDK)jL-au1rpRZ)A<+PY;(0v+?Z474$gcn0%l0t_uFKGs&9r%y$SBH z?Qgq5_l~iS0_plB{qYZ;8Sm4CG_i?h&8oXXib6bzSjMN%$rb&q;#r)ve|OiJgBwDm z4KPhUGS9@r{6(sJUt>@AkSLFiYZ9w!h(Sp`Q`433xR^-v5P2Nl{)mr`d_RD=FvlE-gETWU*Qd<;!v*m9pNPJ} z{T)}rY|N+bT?f84E$>Uea27W*hc$ao4V0tmmWxu;1ahn&R=%^D!&Fd>o2kiI*}har zoTGNB6POsqK9}h)zaITu5>EUkuEpIZHQ26HU$y#sd-SB0a<%~wp z)fNI{_4L;t(tiB#B>BMKxxcZj(=qC)`q?@gsdRhSs#4YeIS+XlMsT-J?p8It@D>ZI z20q!N|2fA6x%~+$hDLTEHRU$=OYZ=k>o_{)WMI*Chb+Te>+1JG;L;(LFX8NE7t3-Wvu0SFPoO(2+hO3 zmeXcdrj|XHrU;t4`*Un-WFMn>)^SbHJV+gvI32mvp{rH=_v9N$kOZ7m$xXM&r1c4p z)w&Xboql?|eqH-1r5z%RYq^;yQ1r0wr|fjsl}&i#7FT_xgUVUD%&?)hZ0VH2#Q``* z1;bp%AZD>%Exl#{T9#L2=nOJL9~AzE_Oo8YVA0htF0s6uGASp)`o5zmHJz6z4F8PWF2cZ*QY#u5B>!mxd zo|K+7ABV+v9u56>H3}eW8%|I3&K^qs1fizg7S=vyW7gM{;`MsYL9T&ZeeWEAN^$`*pT%W{e(PGNr|n zybnIHoMBiOX)EG{Z^LdSPTIiXOve>z^0))EHS8KFtq*Ks@N3wK7R_Q6J0-venZ#bw z3s{gT_B$5^+e?ZGE=pK{^=NVLjl4}cqrm>Yi(rhF0Dt4 zP6g9uYvswtQ|&bn(T4SIy#5lL>+q}dXs8Rew~sz6(tgE^Oy%6lya)H#91E?AI~6rs zo2Z>%qkdItul_=8yv3&SsW9xur97uxMI1riOWsv2QUi8Pnw_vtK5_-^*wd~0%Hskd zwW>hXl@fpDSp(s&{?gLu8A7BS0gJ}Q)J|AtdY?Ep^SYU~m&6vP369OtQZYB7$E1tw zx+bC8jR|>1ndauT4Z*R3s`N|$gBY{^#7ak)w#CPdDAs?5r9TkbVsE$%_f~>?tJPJm z_KFY#sJ7rn%})0hh}c!}k}Q~c8zNUQ`1Coy9pgQ;$X)eWsj02JQ=!dQD^aOkzR^Z# z)8k0;8*m(7pGrM}Xxib(6%DDEmGHZWdJs6%Q`Mttb=&Y@7TzknLrz-kEQ4IHti$5q zrC69&J=}9B{em8nI)Uk3e_p=5Pkkpw=j|+Dt>>mC(oO(d{mz0BWZnPfnTE|ON`^YD z7LC~alwkBHA*>~uHnCszsGGR^W-z_IMLu~hpey?zy4G(Ua_`30l>7)fy#vFeP_^3~ z$3Am%@IIgpb&70w!IGULt$OT`} z*fXsdfM`s@nbv zF8ObPtAwyxDw_VX7U$g$E4>zLbyMTj<`jDXjNPc_cRMfOy_}fUws2U!$NUM1;U(q_ z>8mmOLOw(!?rCp!d%xRQ_pr>g>@T1DEPDQqzl-wiS2_feQdz~T6EgF$yQ)*HU~*u@ zv4dNMqN;0%D6-Y7W$Zc6AZgy%zCR9qNF6G_9;j+V(iX~Ksu(xxU1$%mx07_HOwQoJ zGSQ}qV(1r_52(5dLV9~m)z7n-{JPv-j4k=`ioz+35ej=h!V(jWOQEcox=(%i$ah?t z;$+@K7&u-3bc)_c5pgg}1AJPeRo!J(pP8I`-ofk%f~i~K@>|!mxKJRQkc$ ztqm9s)WYzr8@c8YlI*!R1Jy^b)ezv*)mm&SMDkz{z81wcjNJYg5qUsl!uMPNm8qJ* zatik`jb2Dq$iF!2f*gC#ee57#F4bHS`id^bfQ^0-Jo$=yZ`KiunR}*WNwht~%9VmA zaxL+a-n;oce(SP7F9F(6St#7R=|trlaDP6m`I67K?&O(LB#v!HDfH_iY%u03SL($S zUm^Xgrj6BRzh>{t7;!XrKkhmfTbqRl)EiZ6X`n90xTwh7I{fqV{cE*oS9&xbj{XGr zTAjO?nHhF%avL-YVek8N^088q^1!9CLwo)RT--WHCI()HHGV7qY3=#k1(>(y=u-rC z#fo{JORrx4!a?^vu=&}89k_z0I5&J;v^zy#S0sSd`ZiGNS}UB1PX!*Pn86V)8IK9p zau2)1T#B7of3Jb;AD?n@A}f4{?ox0cEg?2S?%*Dr-K0a zwlr~1R6Egk3LN6>89>;tJ{rFw-+3( z`-MSBV@;L9dg0@4-PNT8_}%tbQho_ycol-pqTz1@!m)qc^JrWBjOWK(E4~3VR8bFZ z&hc`+A?=nfl@xg&)j0NeDZ1}jXLna50MyiaB`FLNqrI;V1x;;aJz%fPy7vO}Dbm1O z+(Re0;)9Fz#P_)&^bHX27Q^j{Se>gc<$wDbmAhbX%mqK{(DZj{eck|4y1P?L9>Eby z$voyV=hlk%fVEQOiWQe$9{|lp?ER5LEzB)O>pnurQ~ZeUnR#x)eO2!0#i@M785Y*1W$WJ0qh z9^XXRKz+>Pd-PjN@=5ZMY4Pw)3X_kJb0-r*NukGN*{Ta?3)oqORidSh8PAyaM1o&_ zy$v5%RWeo1anHGo33>H}SxdtLZZ*4wyA0ybRTY9vViZ%CGqNX<`@U2f_`Nh?E{4*QyI8l6Ba8aROvXL1G4qRyzfu%EqK* z#k6$^tbs9@3x@Bhbi9x2z=HbZ5N=YS3Pi`t-H=bR6Rj7u!V|;*>seHiSL#;yJfTHN z*Rpd?UqB5%m4_^qs@^Rm<=Ujcfb-A!=ltV9-Bgzho`{AT(`W>! zpe4~w$HaKY&M?kS5C#{>Rv|O=_&u^2^8(3JXF21Hj(?pEpSU#{9K$}XP2%388v2!X-{?RH`D%M6XwoQaYT<;_*KxJ{jG@R&S`@tn zmAtaZA zD#F?#TQY!r7He+qNn6%l595`x+7V!=F1hW(CXDn)tULA~I_o6AnVe8*-)3W$me^j{ zF#d*Kz(dyil0R#Z)pdqT&{be$a^<*#Gi(cofMgLn0vN$G;(lvi~!`yz*MBx`T!nC zQoN9XT{IkaL+*W7A~rhze6f4_xGbKL{HrI{9_As4wTVtxx6!`MBQC#NSORK3KQa>) znCpRF=)(b}DogIDELWRNkeF3VJoOGNg4!R}I~T3p1b}hagX=D?gVxDhjyCXd&Zk|h zOo!^WTld`x1>NiMbE{~iB|CIW#yCGB`qbGvgGj)in%@{nO3Sy7@~C4hajWmk6Yw28niSW%Q$RWi63s zQy`@ab|{~#eSG32$n%%v)j!r|@GUB$fvc_gfTzP(kV30IoU za^`jTd5_}e?2k&Vin3`zO{@ILF+EOCK8b$e?C=b|OIBRDHnYU79x^4p5u>{Kx<$f1 z{(CrtE*!*XUg^Dl&BWpanlDMne7{4KkkC7cRJsQLw0ultheUP;CDSy%-PkMh#kJQe zWFLFnnfH0xdyl6d*R-iezkAi)>^H%>yF(AJ;;7C4#OB*M+9$z?R3~6vhtU)YKW!4DfRv6r;{gNK`s1hO;L^J}MnoGUV|?e%s{Dxi zIcLr?&JKJk{MhxQR5pZFNO5&EyJp1POL>ABK^p3XWrl4?K(_F9Wuf@Gv!yNXofv(i zfP!uogSRp7UGCEn)gI37W3h%F&>!|j0W8TNlVul?J}85Z1E_K?(U+{xbUM23R@;=? zabm%j*UB{$@tJ0Z*>Uco-)CEr$x*v#Q<6hc?)1Jnp_N%$n|n59>`!9}B1JeG#$2Wk z5WG%Q#l!K{9Y{hfa+=TZ#p4l1=Y*iUY4Ll+OPh1`6H9<>J+sA9lwRv9BbWX6PMq7S zTGi7!$&iV9aBW3Mm&L-6^Tl)%dv>gG6Fon-C?qUVkZ7eZ+I=O9ZG zB4JbFp^Cm#v5{E(HpeP8DOa<0Vr3EQ35QZL( ze6^7`PO&IVYF}*&gPfqzk=2P$X20K}{N5h~lA{U@Cv{$=W4UgFb)7hm-_YeV1+Z4= z!NYgM@|NKyh4dd&=Ajz4fENqeT#iV@Su3STPb~uz-7XqfFioB^{d#F-N^@t zA-TVx|MH2PAOy|Rn_-9~#~-c5%{+`XP<(@|^0sO(A@cJ3RRk-}EkJ+XnnpEw>XU;) zu4smhNU$F6Pfe;jOuaPsw&i_>CBjfookBPB>H@i!64dSu!$WxNHE3m2%LsmrtJW@& zc)}RC)+Sg5g4%Rvjsau;$;_zEX0-e@C~LyELiJ^hD(A6_!_C`1Iu0MGV{TA?kcD=3wP{YjM)5i1n7hrBwzS}s?hJd3;nZSU zO1r<~)PQ;g0kvF(ckYCrhQ@oGLodtU=Tu`O53G@tEc%*3$sWyK)1xKI;|GdkPgnf{ z)eyR$NT#%g{-rh`j?1$Az``PM@)qfnl(!6_%C1aQz}?70&S<5)GUw3Gi_$v<{GQRr z4a;6dfh8mB|9a|UGA577+bw?FLSOO{0oJZ_zqR}Qn2vJril=8Vebyq_RaIcIW7c9S zH@cLL+~P%R1Bvt}+pVWlCWhSJCY{yRiCcT_SL{$;4~xACN@6z0XAM_84^lMDI1i_$ zo(`}7ggnBaEN;)IPecz#(yJf;9j0kk^8iz+Z=s(hJny<2BIl;pZtLno&hD$L4^b@z z$YO`f-YX090SBV0yBItKL)L1-c8|EnePmFdy+AYXI8${^@VJ|B zfIJ>zYMV=Y!tccE#W)-eq+6HA=BHvP zeF6DZ5Iw6QUdLLz)KW*j_$ltmUg$hnGj3QJHBLXmcOLA-`RP`uAoKAFrsem_)39qr`{ly4bsISufhg%Wo&Ga``p zZN+TE76+6tvM25Sc`@TZ<~`|iyuCvRcjT2U$Dev>n2Hy17 zYLbJsBqt8KoKWaq^4Pu*DXx#!EjGn5+mJtQ!(ahGvS{y`WVro=mXZ2S2#CNw0cpWU z^R3b3^XKd9fXkSBKz0@mU_d&4ZW-Ax856ly?$0Oo;(HUORL1W|H1}O^gCX#jLC8@M zf+_Lc!~wYllP{!XfFaIN`D-VM@zFgx2vMgJ2BrybL7}z8mq`JPQ8&(yNIQ+0>~p=wdcahS|4)8NOvD!66gZS3|Ok=Ld=hps79LUeH^ZI$-l@o4uqF#*U`#$7O#X z)rsRJxg6hPRp|!;O91bn>nhOL!Q8ZJb2@BHqEC6nt1b%-aunF@oSd} zYoGW$PNj8Ci`?211+=wx2jQ0eKKA6_p5hgc%{``u$D~B_V^89!C+zud?;imF(Nfro zLUg#+IC~@3;?<;XZYMZpo7nqoGmhpo-gD}aQ%;%CE=@r7T!cuG8nXC8>>n<`@wa<= z`-O8%?rPA$o&gPQSLt`Le?H~-MP0U>JW#Ij2YPdQ-ZL{|0YS8L(b|gZp{unG1p*+l z>fo}yHMl=bA6KwxsvkZ+9*fD>Ik==BG00Dso3S?_@;+>&$Y($uKkQ=Dwq?@SrLDn> z3-g!Qlz|MfoA1GLw#v(pBRwK$Ey7fX$z1}`fr4{Q#2!T5S0WWl5-`=2 z(4RGWeipus+_I_P=ErOUREuN%&jm4X1XA%d+O5qR$5xaF?5KvStf`0b!(K$4+z2Vc z=7|&PU&=<1l(1Cc6inqNM(Gp1{~9Im&jh);qjtV-7&v1`S2zt=!@gLj3%5#3hC5aL<#+ zei*0{;Y{;Rw)kV0;c;f-*|FSxJGh0(nA#X1X#^TSJkyBowHn_}A|=xCpp^7&Z@6pi zr}9fFoZ)On@m|X*sA7g+kqk0Ok@W%bIO)Ye;oYi?-D?nb97dW7ucS@S)!@vo$kW$ZbdWIj6wz)U+IkCU{{*o*f0nPIf7)d)-q!ihDfp(Kw@Qjlb>u&Iz z4z<`jQ@G;zeJMku0L+wz18+pp&HQ^YF65=&>2*b5>Fq-OodLpm>A<%KGLUI?5`#9$u&fHH`l#oJgH4nhqY2I1aQ0Ox=qKoF^oa_v_^ zIHdWSvL{7TLR-j|)0})FgMsk;uz?GOgBaWiNT__4xX-Jf?!qukr-!=~D@7~#;jp%* zN$73>NfHR|BnxC>H{a>Ja98Sh}>FNI4$EM z45~B#`(C%j;l18en;#5k&;?S>mTdKp-5qo)Fw}&#gT(Pz>=5AVcn(n%3a1lTDwDjG8ho;%Jh8#mx+RxfM3bA zo8;LY^B>h@n}7!tHn$4+f`EUS_sC!}T=p{8#qkef*cxcd&9vzTs#-@Neu=*Hi|znL zh_)uX9bIab?yRr{NS$)!Su6e|`hYLv{1n6OUAG?XDm}k(Obo!GGiM8CGvAfuU4GYc z<2P1Am{3ScpPQ{hnp5KNnVy%J!@5J%JH1z8TWjkao`!$Ag1B8mm<+@6{<(BbGM;(9 zC6>d0)<)!zV)xnfDI9da{Wi;H`qrgcf1DOeUvqWi;Epo+Dx;(DuyPaBaV3ydenjwl zHUpOo>|49KNWHiC6u`GSD8EFU?35ExHa>zg+;HZuzaF!ceV}4!INFypY(j;nT;SMo zKKn)9wbn5;Ag{+`Oj8(fOM+X7E#|zm%S1zn@Kd^OJ$i8kArr8Y6ZxQJ&_yx4+ncSd zicfY*31k80p5wU#E{l<2_1M)pn~9C|1Bx?-uRRD8`%ZS*SjRgm3i%L@uI6UTb)!Y} zRwu_@^yYNXl6HSxG1He)Au6**Q}M5S>VejNOw|M97glDi;`dzytUy%m_+{#$^jM&R zk!#`7Qs~2Yg_V)=&C||=h;)T5+C3pAzGYo%mHxH-0{+Gj8|%1W;2Hl^(cSOhWMOwfeWo? z_+piF-?W%#qXLZlLzEZ0uiTuL=b5@^l+|g%aUTVb8%Um>BLbxo|MNU+gMmix7m%QD zseXO6bazCgW28C7gwV3O(+A%WR;@1cZ5jWAeo2mI;WJJ|c|M*qgxWbqP8on#&b;^Q z+ebmuK7(IZ-<{vbIm&%v@BH6Q*Tf%vaTV{aTV=H#Dus?^xS8?Z&guHLW}mMF!c}a! zVV2=s?!H#WMLh~1Rh;)Y=h^^D+z9xq%TL)Da+vL(UqAiOFY#3t`**X&yS+w6VLWbw zQtS?E7%Z3=eU7~bGmK$ZSeBc!`wGTK)%z71KINXoIItGfOJSCw%`E0cTYk#C6L!0M zZgrnZwpIyXQ@r9EhHX5*KVMcJ$O!cs-vD%c9kNf~zF(53vI^bycV+d!$#>587c%6} zP}!v@!EgIUviJWn?pPh5t4M`q3tnb$IE*NjG?_sFRXelfQ;W%9l<4k@zqimY4;dO_ zspO^JRqG8O*;?1?D8yaA!RGNQV6kS==e!HTN6wGB8G;m>I?u8HU9Q#c9^+^m>mCKM z*!g-zJqW8IxhuqMM3_U}Ai1B4!^302tmjy#JEBqaR~M&0MX+S_wDLkJB4x!bX1R`y zG}}lDm43CrL@Xh1eo=-SP1qz95KK_Q#4Ns(KQ1CV;JL|Z2B=iwb=Q2v!6?} zJR8v++Cl$^EzrPpK|xwp_%LP}n_f^F{UPT^V#c%ugped<)Gwx+D7KbufC2V-Cye_Vk`wjfk*}KKLLR@01qCHhGD?_O z154~|AAB=qUv?bO9nIxpF%Xgd;Q59%?@iUss;9vi`j2_ud)BaE7-;N4b2@;kCUS zTSqw*P#kw}L`Gh(>TrdmEEqE~VcOCA1%r@Cm)!g#{5hvY>gmc4y9T;Euw7|c$iFMz zBn_HT*12)EpxFC%5u%}xfSj{(qg#V=8^tEy1))}4g#$l*4bYB0c#6`aAJJ{!+Q_p7 z5Q3ONna%S(2ax4qK&jzm9gCc^g5Pj%x?u<6c)E*_)^YdB#`^p!YO?hkT#gHJ zG{5oTFv5PFiD!G^G`k(;zFsNxxiE4a7>dTakk;w>ooil9?%8OAb8wo*+aKEeS^cZo zDu1TbT5eiTlt)m0jg$4Z9Ra$$*zO|b?`$x9V;Yj0mNMBLNyz{nnA0uS-^noOldR5c zwx_scM4;#%v5U%gUXGY&!v1;jr&bKW4PhI zqrn#W=<(X+MscgB*?3QQ?y_POOYET%5J|M0%UROn(MyR`;I+NMLdl zH7g?!rK9gEFPzTqxKV|x@}Gvn!k7jgV0O?moT4uxO8~eoHh09B)O%`w_v#`}(xESL zE+yctEFo=&sx4Vi>Tbdn0^&sm2ujQ1h{=~I+@MD95>}lK=Mj zvj;+5Ed!%FKKs=%opogC<`PAyV2z#rC$cz*hxg_$Y&2L-&E4YJQz4egA{`3B-Vx3Y zBhJ(hfGesh#dhu7Uu3y#sNY-}XVY8Xf}!u_U;1>8VTOHA+NG<(kp4^4MdEoEi566zX>&*C|0UL^Mk-F~q_l?v5l{sKSM@Ytw6WxX&5B zB%X57qeHU7-!wt+Qqrw>%4mRL6W#~-fFhkJ1Z|P17T=QAiN3r)OtCcW zyzK)gNY53Y9W}E`oY1}9s!Y7To{jjM_n>gt`>4V>U)3{Rf}#kEPPy*)8qOhXDA?%*l$3o8S%anYM)(5 z7AchsJRGJ0R#^My$Dg)|jl+kB@Ko$Xm>R^n>gryZtQR1tj9XVtP9&2_NRsj#C;>$b zYy=PchXWVd`D(ey2aF_QW}SVXH-ny#({|?sF@@|EgDfxwplE2md5?ckd3A#Vo?=d+#2#kfhSf?TzI8^^#n3LkTlKi&Ts{9Jwr>y|j=@3C6fPIemoeWp&- zKJ(cKtLKDenA47XU2B0|O89&;*YOaWPo_p!Apf*|+~asjps_Cly`ttf37-2X+g&b2 zE8OJq)fxC@Tl_C@`O21tPCuL0k0DZTmvb>6J)i!b_I@w#u_~L_31%ZgK>@5YMq(n z4+cpgfUTJ|n+<+Vhc;sSNwYq)vCU^Du>v*puuiS`wN@k*Yw34NnzMy1oXZtv!^K=L z|L`a<2scy-wV3|sc+yF+AN{({aj7C(rPK$JZ<^!&4dq|IL-8Zel~Q;Gz5Zd`-Tjmo z&$ik{z!c|gQq1ZfdI91cYB!V*(MI9xt79C;czps!oL9y9LVm)vH4TJ?jahYX-&M`< zmAe^wY0||__02a;zaY>p{sUkByXSVUyl1qe#K0*K@dimi{+xn;FGn>o!V5r7FP*I8 zWI`=qZTJaC=O`+DxpWCl1OBk`7xdRTAV2k7T%B;iAnwk3HWMt^N>xv0eQSX6Xtg{e zg$Ui;6J{ldxE_>}s(7CT9}&$Vd*`hsf7zv@I}rNv4zL{=Jr^GIv?;c>RX<*7bZ-8vLu8V5!knmD!yO{9zOR4#+a`3GZ-Lr8lgMA=LS-n zauv#=xR|;Bd&cev8erI_l7+k>u1$nPMrMv#no(Q)!>Bs98xUU*S9R&VcEsWo#C65d zrHm^nwFX_oOL{0`nc8NjTd`F9HfyvZyS&$M^hf8FJ5Y=(M!6%0>7+mMlll{wev;*^ z;hU;Uv4Z=4&)-S^%HU~(v$bO)nmf(LL<>s}WY`uT;|mS-BM!s0epj$C?)^LFS$IM&^kqVx;Vppt}fYr5#Vjt z*_g>w*$;o=)=NYz879fuk6e2qYb$RI`Er`WW+`AF)5l_nF+<9AgHZf_$`^d1?ieK~t5&FcEl=hEkd)Sjyd51A97uH97IE{A1+-W%8aJ4TV5 zK8?7UP0t5TJ>dF3^ojh|BF54C7X0U#9bAfQxln#QZ5-K#-)H-+zXeNW7F#Bo=6sQ} ze!q;*p4*-1KcT6*Kl1l%ww*&C&7J;zavPQdwv({#kZ z47^KwNtM$-y-fQa;AUfbG!HJ9Q>RBXM^uhp4y00ZS`$#g*^EZ54$`KZpXS)tq&(!u z8pQIHwYA*rj%n(PEB`@aB%zxmrZ%h3QWg`^t*HY8_&Sszkn#QJt?(crw2hMC?uDbY zcnLN}L#nhFCI@ylL(y8zgqf0%!!kTk9(&-8$|)%KPwB1^{&OQVB(IWXn0PJVWepD% zgWovspg49(*iU&awD`4nE9sHy?&Asrs9N9>*B@XozFcww=`-T{y!9dMBhHHQL;_sM z7<~&v+QSRH&`5`>y(;?m5^6}U#ox9r{b)3Csbn>9kT?UfAhrLeoR#20RGj@@+7p9I ziIPFLY8W9jPtUK-J~ufN?!m}KNBZTLiN9x&gmQ5DSr@V=?$3=sD4Aqcx>$g+701oZleR7I_fnt z5jk>;-%gsFBxBqrcDUqH#WWY+xdvc}VYsecT>=`OfP0gf|1;vh z>EbzQHuwLf(vctcN|^(&wev8)9^*4A)o-2y=+xagTjs7c{HClhsmtGX9#86|3b)?G z(aB(emU1-S_%2M)oEEbc;JeZV4`R6fXaw!S@ zeb0h;y;8a`%jM?E14}IW%qV=5rnE;$K;INGhcUkR`&B+^JioQLOlEm{2){FV#(PGD zr>hFa5#t|7!*=X-BT?z(-qr6Upmjt=(~`!jnlmkhVw%_Ts3skC34vnH0*~)e6f`JGczL zd5Mg`KQmi$Hl}e9>(33t7zR9VFI1)28{_kJH!@(^mF%#kpw5o|FproFB-i#aZeaflvm;?Gt+xVl> zNZ}wuFL?j#ROy8Xh%AF!zIh(E z4`V(m5*fFKWDiHDi3bvDjQ5h`_}a0vLH42-JUfE_w2DUT=RF5Mx8#-s*}x=v_#W@x z$lD|tU^8yMaTQ{n8PPkI{XB(Q_z`eUYG+)`8VG0)q`S?XGlo}D`s=a*oCE)fmtQ24 z;65~pIDokr!ln7RtQKzDKzG&@3yNiP zfyr3xVthu(8GT)jB0LleI1^naCOvLE@fQj`+7kI-I!3sTSW6ASr?TjDX9zsz>q;Y>W(Sz#4VIWtdB zdhJK_&7fw&CkM`B;qWOgCc^nD5S-wPocLkvrbtuX<`%Q3-P6Ql?Ub0ow}p-7M_#&? zg)4r1{7>;bY*6_(pf2JdXbLHEoe=-}uJXB11mnOd&;C|0Ir%BJI3zWDGz4Lhb8E^j7LGRu>*2aocRAJdlPUf*R~CKEmoz$)Sw87&`24I zjG0T31{tG}N*O9-UQ(${p-35$29-=jLacUWh(rk`LsS%zdFDTFD)#q(|M$P|aqPVh z+hMKedG6=Fukk$33#D@rx1E`{duIenajiBmCv-Uksvlsdw*9;XX3Y^ORQbcA6n{)V zY6H14-Cm=O=7>+%Ly)Vf3Vru#90}EucS`B`{@fO0OWgXXeLF|SyoRlU{4GUx9XJqt z@p7=`?PWp|vkil{=T+*Ys>IGuP3>DQw7Kr!DocIA50a@Pc1c}hrcG|9;WqAhvN3OD zW4cb(+1BKkRXUaoW!Lsq-g(yLnv0y-?F;hD8=8CYBWE!$3dc4)qJ>XDjy#d^cwv zt7YTj2nj2RQ*B6;2H($qN<dlH|y!A z$(@X!#O4uM3;(EGKoXWwoDAES11%bfz`3<`bUPoD6AoIna8Wi!9Y!c8Q|)0k1Cn)} z89&-8PBXo1R!1zUrQyGbvuR_(9C}(hu&G5bKOXDhn6GX{C9^7Raost7N{e*nyFTo) zyu=SEf4y+ImUC^hb8a5L)B8nolnP2FBHXtRt#upkFg&=is?z^7K=gX(*prm%EC|x4 zLWCqHF(`96j;{oG8Ww#HCeOu$Jh-UIW)B+v{dS*FW*{7}@RK=FH{_DnH;drB##^I`{bUFHXoG`cYOeSP~2JHILUze%IpTpgr0R4T6{W z1vJyV%AP9?s7_t0Y)ZlsV`DPdV4fOi>&{wdGC1VtOv zo^T8gJb|o-%TYwQ7~8kBBQ=kW-ed3`dP0Rbo&j!;!tmtW&+eXm!{dLL!R=OMXwgc^ zKpI2_A`hUr{aWILCIH)Kgn@t{V^=Mjj|RQfaP?>hC+MY&r&W;CVoNbNueYvp{!}9 zdO*iC#$e!n)*qq5+FVT^yO+11CH8{ljr3}OKGm~5c^ZI=TA$a4u9`3+Jl*cbc%}Il zQG2RRoP8!(@e)O*jLU&K^*yQ)Ksil16zF*6Dg;K)Ynf#`V2?%UTCi<}^cz5wPW zhsX5*n7Ge0iIm}u*u&j%VLb(3pf!-^W+s5NP5QgTn?e5SjKGC~BlQ4}U8qeG!XWb- znsWI)wY3>c_3-7_>_fw$kOOdpk8SQvffduI~P`qvcmX z^N{a&F;DUR9ZfzGV`b|*Q)~|MxxH9G30k7G7iPEd-EkE?oU?w=4=%wU4(*D`3EK0# zAwIa&jouUWk87lJ_d2l38lUDq^WVPckOz(N?8JGu$s`sxNKy;imPI{q2ccNkyITGUJqmQ-%yr)0&^hOGa?NNt*xmqcQFI{h8|6&S|mrZ1t<#Ep%2YKf< zlb9!%l_myJg>}qd`U4dM`6!R=>h|Jwf5i&#wpdI3g^slV(mRRDI1vEy&mH%GrbsYJ zC5EFn#%rz>+NEJ6M73WV)$j+xKkldP$(>Yh)Od=$ivpy%sI%`!j%38VRf^+qT0fc! zA0-#%-c*N1jfbxHnax?6QcCpRj6MDP-uJ3o?@8 zo@MhX;O?rk@olu++wvyxS3g6=>gF3!O7C}zBK4`O#4$c{{H<0Pa`ZrqKXFo9yqwAV zBEnqX$Vg<^^jxN3E($atHWBiZxN3GKiucZP-d-ySMujHqPWjiOs`;89pb`1TTpuntQkNGNqGf zLZj#fqKV7OH>WXDDtDswZkTNDz1!rQ{4;#b?ts`?bVO!{1^^Y?c5QTINE@NVDGBz= zoK*YmM;qX)ri5}}o+r<~rcsD}LJBDP>1Y4pNZyToqQ6%J0l+Gn;2H{<60<9Q`?H&y zg9Xn3H?mCvlOv~v%WAIu{ZvFhJp#mrLpwnujM(USczbQE#9P4+RNh_pGyZTB#;d$1r=%!%b8$m`ezW=W$i@=i=@o1pUQ-2Uoruo>nfg^bvuDRYw z1exW1n&3LhD6rR7h~q`{(7UmM7}<^t`+fJTmQ_(?=1ZSlNy)*mU|^xufh@Eg3==ao zLb+w(CFkYXB0h&XH%!TB==9kzVy0D6TkzfapgGE=OV2+`Ckb?Vrv%238!Y8GPyGdK z0x?j>TC3KDF2~Hjyu|W3+!6-QxK2=bFYu@?psZ>}jAw;FXWbD(8$ATc<{Pz;rwt?c zMD*|Y8b5Ad^qu(wHS(Yz^nItiOF_~+J$1Ek2ST(~Ty~!wb(#PN7mR;$a@Teke@%h8 z%;u(Jm<4?cC;J#Cw}c9zDS0D*6F@o2-B%hi@?bx$fZMr|byk|;Z%4~wCd(i}$#Lzu zCxtK-$|syY3251M0;ozR*W1Nm$YW+lVm&5TYypDA5;@|H_9KCct!;4#n5tsD_S>(S zvZYxv2q*O?-%U>hOd)tdp_!Z3u$(4IGL zMj5uyc5ETv^=DH#l1X{+fcb3<->9D^C`-|gLWo#|Dy+KmwLv-H>6ZcAbgpateI|1` zn>mh@1RGg&o*&s%>JZ7}g^#Cq0C5rwTEI(}7*mgO5CewZuN0eF>Y#?hK4xWmUqa12 z5EUe+U2@B(dtb!C_w~Oz1JJVTOu?x37a^RsLUEsj$T3&$=|5PBTu9m}A#Vfnh@bb? z@lul$zruD(prq0GlmW!wmQqr8c#9V5_lgJCQsiV}_h~NaaiK93M~*)GZ|(M8bOpB( zt_rW*FfxHcB2UG;J3&*y=GT=~d&Cg?`QzqPp_F%ZdE9$yGN1I>Rg^%ZA{1l&?H$n! z%Io+jsvCU?g+VPek(L30gO|PFEtNr9y*GU?c0rv&^~GPy zWpBFVOW%eQw*HyK@6D}`mt#ByAW!b0emb_yh7;#`VAlS64|8BwlGCD#ClIxCf>R3% zm6%%6W0Aplm#Sy4W2JCJqVU%(MjFpp3{cNKGE*pY!sAWxYm@Gx1Xk2(uWmdlk)M!( zwAR+@$ud1d&OIY8|F^3=t&D?4E0F&~u^%_dylA|&UeVu1n)>noYjDr|J9TQNP_xbLdZQ+q0~xT=RqC&A?3#LuEO3P~2UXx3sn}}+a|G&< zo|p%DQ4AEx!qL0%O;L41E5|zn6Dvfaw%|`6JhT+S7t#3`?$bGb-NtU&FjR7H{Dw?W zhi|=cKyT&LrKcf|;FVfBk8C6S_1Y!VY1!l`yv})ec>IPIVk2k88(tbG*8W+FO<1-W z+fV_($Ea&#s5E)Jrq&L2N)aLi?9v=ux64iK%*0C|-;L;mY+c8&Vxppv%zWhyg{c#Zy5ONZ_oeRCC6tZ>GXVjjlw9^d|>>B zO7QZWJcHt)^gp?M6Xqf0C<%!mzj2~;aVs;fVaL_B%H1Eg#P;CmCHq}fzdQAjI38g$ zm!95Ap6?3&+I`V~yX5!`UK8AQa=sE}^*9`qF@SF>Y3E!prQJlQJK~oWh7$9zR17wA zMP`#&b}-vjxCuNG{`v|#^MASI_zYeXKRLkt zGw5H54UC;D<1zSX7+{-Y=xS(#P@vjVb?zh^)n4sUu=>QCp4cVyAl$N|iUZ+9)cc@< zbDyB&HU;;7DCMh$cEST3rpI;-BL^!$#O$j+JgJ_^%% z9KdS&t4sJkL!5>;>CtE0$oXzuLJ1??=&Ci>W9<-Ie?{Q_EpEq6t)&D;b5lFKq%B|b z#V~3VWg1*l+Qt+nMTBKTEHABa4LVULRupiU$;3(29=x|%ZH9rlLbM&eKyLmrz+8pU zja=bsnbM~J8YAm1-$a{&XSE7N;7cg1&FG~B)uIaNwHb|5JL9KD7T>FVPf@Bih$c?9 zd{M$KK++TFub4Uu$K)*DyX2tq0+!R{xR*D`qu|afnv9YZH+o29`m*ACeaNbPe}6xG z7l_YyTAq5AAfSZ%>;TULWgKqbm66?U0hpLWpZ(GeO z6hTALMc~O-k#xsN>3x0vK6!(Gt|cIW(<>AglmJH(iKbnvzvA|3OtEAGrpdg4q0MK( zU56fh0ZP1@f&zJ-p9F9Yw)VROhy!=jxH;JDZb*_{;6GZByXA6zT@tWLAnzInqd-1^DK} z*738cZusV(JyuTm=IacSf1n()ZY7agT!yHcEg>v~gJUV>8}4+<;oA!f)4SubA*9%= z;4_XqUfU&9zZHvN-FpVo6$}k;^bE9s5!f&P8LyH{Vfx1Tji@80qH77UqbSe=88TZo zqE-=t4A=#w_^&9Lg=6jx*#Yh=o4~x_U`=*}#xY6v57pk;_c-VANx(ix-dAQOsvWi! z_>|S4u3iGhd@qzZ5ARh%n%QVG>W_!eMos9o@l~lqg6Mbpt$T^7Z>vCiScmtkI>QA` z&S4jS@DfyWhC8)CV5+A(%-0Q%T}4E_jiudD0ct0~Xax>PCzYguLH8m3=rzJQKlc5t zFiF=_va&E!X9qTK@|Zn#gV~y||FcGp zS_fW&L{}a!O3pkIHbLN2UNJwKxnAK_7Uo-pfP6$tssU#N+I_pEhIJ3#)QK{Jl$j{W z8vg0BpTksdAK$fg>Oo)$#{*@2aY3@KMjDdKoGwc*y$JVZ-=JZF^T@gU7hrJQSz2)# zIChrE)6#!3RYcw6x#S7nfuhZi#CouR&gUe_#BwT7FwF2#F(gz@>L8~Ls7XlCM8fFL zSFlm)oEVY?lc%ol`_raJ@|+Xliq?32w@V3xue_4|d`PdtL0r^AHQYjeeJQ0MfcMLa z9)ktIEO|J+n|&OLIJ6I7v)e`X9lB0uh~sQN{H^fQIMSyuR-}O42IrB^XW=fidOHZ8cRwy@}Q%o@eK9 z-$atOBiVdYqt>CH@6m9##DpP|{2#x7EyOMNP*nl3{hE@le;N2lxW~`sO&{PC58=n) z3cU)S3CRdqhXvRU*XcQr^_@>?EO9+ekZE|{^*d56H*4z?)q)~W)wc33J@*F-aJb}F z)=p^J3P>L=&p0}Y8mQ)UHs0l^J*>+OuSzK{L-kR-XC&|bo}~3{l@<@ATi-i>$=lFv z`eIP0Ha@ofZ4_!tx;B*@rq8?MW1~@jc>1$enW-LNiQOp5@K}hkruVQjow(!B8tXLC z!hq`WKQhkiu<>hH9>+P!xA0~zHmTE0a7yu=&LF}-wHtV)rOYdTDP980_0mWANk}PN zz&|8RZe#le+{3wL2~?OL zE+KX9Cm3}u_ehmi-@ahbw?b1r-LH?Sh%&ngmrmx)|J zMiEZ3pPPD@&eLDwO>_B3U~p=Nw(@foh0c=*_&T_vA$05lZj@Tk*!#Yw!HvYG(E`hY zJS+I_@ra15m%5n%Ipk*EjZ$CU*HQAN8)nfR7n14lYR*za5+D(*igE8e??UFx9BTF7 zcJPIwMiwym>jHfOJ(HBE$d6-Oov0;BDP2AFWQ7CI$O`RPtDYom%+1Y?$}^B8#rQP_{UFt^#d%Yi3pK= zn>>PLUJ=y$loom4ZJBoH2m;#J7S&Mt>5 z4o1SyyCTYxkEI_$PixG=J(w|fdNxtDtzu2s*8(5dXnOQ_3^$WMR30d)0YI0FWud0L z0$q)6!(_8R+#8=|D{ZcTTC{^%=a<3&i|gpH zENYTJ9G91f5Sg2$J#t87xMi)yX>XS994hm!rDCd{7h z<+iL>9iwvmF`8!!bOnmFdweO~d~aiUP+<@GCOa9{!(-CmF-1>!^Po_)O3`z4-8csvEj7u21b@}#@tn*dvQIUH89JJCcoW?Fv0 zV(Zo|fjrnzm(hun+Le?!n6MuA# zJlm^9;Y|?eo4Wbv*YE%}m(6g@{71Y(C{g78yuRF8Y&0qngCF=st&F15-HDBcsuE?}h)+7O} zyCLUJ`->Xr4ACjoXrFnk7{lxYz&eQv^{D6BZ77egopzc)i*Fd7Knc zkr#dARe|3|ULb_5``v2^;MPtWyMIs#;^lXaM)EHa|75{|e=>(hYI)uT z{Ip`Qq%NK>PJi{DI@SAGffnp2bVeai${Y{rgGw83_bh5gP5Z^kFX)2Y0x{!QM}vA} zOOWDVeDPK3RnLUUEjzr`)6{Oc>B{Tuo|WL?_&%yUGwvh$GI5T+C*r}?i@)i@HYFTo zkpb+dYNqM+>9EN2iADattVNljgpD+ooq>X$yx8?;UvO|&!3(bwJ~5*eZ8Hvwg~`)% zQAL)hePHt*y**#(Y<^4sj`FDEorgJDGKZVp0*SiYqBkZmwEjX7sI!Y<#(|D~#3+pc zHxtk?YPE`~ew{_$xeD|5Cjkma9HywhExcJCjq$nvWe@p)HwM_KLK21FV&-5D5r4|- z{|I}u8VD%q++KkAL{4r!wqFDRL15S1RpYp_%@_ov?!308)G-Hyn03+BfOtTQ=6r{A zjc|gdg*DhA&pL5FK^7s={JN%Vvd3><5;s#^7fs70UF!U*TMt^^S|l*@xpq5;tu|zO zTGKv+lP1!LrTmkTkfZ}quHQ9H6x`}%u&ViKr%!RPoJXjqb;X5L2Cp)}EEd56tq)&N zDOLpn!z*~A*EZ0}*?0QBKKm+W^9S349zi`kDL`W9tu4ai$@sQw75!;#M@W3feFRTW zU<;Sj!oTQJ6669A2Us81QENK);Wr@3%V z-3*hq9fRgbbW*;p;uM`=Tyu;agz=Z=vmPD!ume@){i8-f%oP^iaY2}!tBU50wP#_X zh0g;vy7Zy|&+HDMFhzL|w3OE500<#jDdjnQ5{(0$8!>adm{8K6;dl0{UK}z3Bzxs* zF~OzNU{WswY;YyKKl8Y3ysyzoNow+$D_j3nXAUFk$W)`zwso6C#0{&$dLevIAyl(PNBG(?=J<(i1W75rSda94;1hYrFmo1~>*B zrgaGY{5LvkCg#jG$o~VY7|m^)H3~JO26u6X)@e9jcAqlVXk)#3BHH;}>sOMi^w8;Th?kmQ{bhNmfH$8%e$Qv>d zgn%YaKFX=rRe~fc zgaYE&M|yCBb`g<$)L*2B28VGV0Bl4Ys?;l*n_4!U5KYdR{UQ7_Hh0&y7)4XW-g7Mu z|J;d~Wu8Z5&Naj5Kai%%<)s0i7lut$2*se{q$W&Os4|U29;nvD+BEG?*gctza~#G^ zZ};m$QTrU>5^1%G*{BM5;*cuxVK=St1{q!pTjvY5_qR;P8Fuhqx1e{RArOguRJ~P7 zK2pgm2ZC$r-5#lWsy=z(J#T@SY{LnHK}vZ?1{e5ucz8rlznY@@Dt;Uoh=y?pGKyC) zn! zC88;uVjAp$J+2=5*tY)3JI3CY?g`pck?a6P%x^se9(|C`QGn!z-q`)NsZG#x41*6j$8&ld+^$9Wd<;lRaxdaUWqWB`|??aJ&tsjI2+v=OM z-0;{(6PEU#Gu1Bd!+_#txM~PNzKv9xHhlEM?&2OPUBXk^_z$*ATUVe%jQ89p7b#UM zH-GzA%(#Js@3GpSlRBcxo?E_rJMF;ILDe5iiVc>#v^}kBZ_t?YeN~H+u~qGc{oQUm zGPe%Un$C~7=Pnua+F)6H^3H0bCo9UT-$W`3$JiOHs`eVz@fSyDbpoz z@P+XJJp;?FSw%3@YWd)(xtH(sG9-9m{Q$5}>}l4Jil+kNe!NcVK7GbXuCYszlbT$c5FbnyM&!#gcJ%5X=fBm<&J)Dd0 z&N_uRfp#<6l7r{nY>m4-Y;eW3+0`HgAaSFBlmpwwm4xaf0Ze$Zk~_Bv=UkV-MnzILc! z2qKZcIMkLUzI-<8Iv{og%-lQ90DLW;cO3KO1-K7Yw$2MMSa412abBmEIY-uAwI1ikvDxL1AJe-uR>s?~c9h zRQophK5m$5_VYs<6Ikd)my=fC+7jN=6a38)Sy@P?u7X>&hZp~_Y39>UTf`!j7B;%M zt~8By>)H~S)gFA}`@5p=J4L_#idFiO^xW6>nvmlUskhGcEH01!vGA;X$r?=JXW6Hg z!Yuuyry9gnInmVNKqt6i{?5mnhd+JZuKC4dA);-j3ay+KeLY?v{ zQFX(iPIsG_n26Fdk8Nu&CGr`+O3LzFC$1eT@<8hF;3@f<Q$U&apNsjH-%EPlZr(8ny9O?sR)wGWTkk*;Uf*u<;=#x(dvv4eAk|J84@!Y{-0 zG`iKwnCpi+zlA@v)>-WJT`OGdX>q^h8Db}Nr|Zn7KOS{{*xgd|K0yI7s6_XJj!vx) zv(6_r4L><9;qZy&!=sL!+!76Zxk1l$x~V4h8{wlW)BI0yCZn-jHCIDq!f;Ii`AtTk zktEB_j+RNe0mSEnjjWb$PwO4H+ruLjVw=?4bN9rZX$(8o7V(yf#I-g_rtRF{%WUYn zHs>?7f-aUEZy)ijdHyn(H18uktah_369I$a3P;^|ds11T7*Ds)mJ&4azY}c*?~CNj zT&hlE_v5G3!4eclKHg#^zeiIiM)6R>3Lr})tUk&=QL^(^t89Km$>`F4ZPk|~SDb0q zu4HmgNl&iK(VoiWfOx6v$$!y2jJ|>z5F)-~hC>6+sg|F4UHYPezYS`KtrW7$?ee_A zk5x8Ss#9hg8SpjiTRi5n^Pa-qRpZwhZyqWOY$0vB-#WU#_xAF?nu;W#UyYNTeo1n_ zC?2duFx+eK@mJd|crdS_Q}`|GPM<#x+!%vzSD zJhj}{J1d#FYOmP=O~zdkLj9cMxO6-i`Q?TGb)P@^%FqTN6I=KGaH_Wn;G?`T%+OD@ zdb6}Byg0nW;_S6~P1&N4-KCDFEYl4Zto4;$Ly7KvrWP^MvFl+iV9Bq}m9-c82y4$^ z%ZrL8#^*?jVa9*lxs?d9zJNM-ewrCs-!Ux ztp_hOJE=c9(idWQ{G(jk#gdvqtpL%MEoedrK1oY>y&qfDZ&L)@WEG5bLFyqBZrHZXdZrT2H*Vzli4xj$8VCC*m#JL0P`Fzpy@VF*LhHIJ75?s54HenWcNmKpTbiIFt zItCz;9$8`iLWfI^;@R#ayW^L)V4ka8O{Q?vUiZDdZ6B->bMoM|^KLg%-5=Xp<$zJ4Vvv9FcoiEGc6$jTpL^_r{ng9lloLqpiZKc8$? zQ-A7Ar6%{emR#T@;3~!6v$_5c3`fbisMSuC;ogiDJ32q7uB(tcV!mX+{kc(>&C4aL zg)KxqPh2}6%51(yzi#GmVM369+=Cvz{Eh%((3;E+itfIrxRmFlB**Wo{l_i!(s}dx z=Tep^Iqfo18oOljq%*hX;mC~KH9@-{a6T{iPPN~aBC5+9zMzO}QTq(ViYe0{YiY{xB&Hf5cIr&?u?Z8@`LH}jA82vx0$7B05@ z5aQBV)Vri1QV6mVZ{sSxZVt{%V|JasL7nK|ed!Ll43ImbIwd0$4Dmy-t-J_n-*bA2 z(A2C-;0X4_zBuRiJ|3*}B$J%g53VW*i|X)7R#pzaEXmU=)U#;F;7mPHCi*Rjc_zKu zu~}qWv>o5E=gdQWul~_hTpyTOVJ9^m=xuG%>(b|k13v^02^pmXh{BOXul-cGFY zj%(HvCWnKLd;5z*`rV&Qgt2fs+M;5Vkwi`AO8flqedo7}8;S>a+|dz|{3aHToz7$C zHuf|{!mDjg*O2WjdfbK4mS0O90QS*0$*0-)!8d9u$9Ia?`MD zlYR@o3_GULYKcRa~Z5Mi731D3r~5gP~i-o1KF{gr03 z=hn6^>o4s>zOQ>8dEE7rI$E}VztZg0tM`%Pwe&y7kdO_(n6m1XvFcD^rtY4rimW{A zgG-XSgD)3xy~@y26%wRWsAns<)7>)2T58&J|Ky3_u1kW2Rq8U&eUNe(sMmH6p1 zb|wyw1z!l6o0`4Hwo=@`H>e{a30SuIs?_zpYyP%AXFt{%Is0}bZIt@@<+e5w zOM0;bj^d51YsQT{g^8^6^mb&Sr>o>=P2DK}U@3TexiW6hm6_|2`Dk5hiU~smyr`wz zl)>y95S;UL!I33zi#ik zuUC}hk|A@9lx^>XC(x)~_-Ul9!DwP<{zKGtrk~*d7lLgc+zi{|r}z1J4^k5Rh8wyZ zB`g_Zf$n0ax3k^*UVFXn@m)p9@%(wU`)7@M&Y07EJ?#>=W7@tYNm~Xk>~{b08rYLt zX6=ccuPfGk>v8@RtfOey8G#yV#U0s<<&#j2aq_2L1JCbUA$S@McmBW1i6n1diiKm7 zeX?o^Xaotf+iIT#45yCx%2e_vQu_PqdN&7^9R2#wnXYv|R+>CIUGDM2aK)aSRD_*P zN2TjB9ktUDcNSmm$Y~vLwDxaz_PiA&b+GEiWrf=ZZLkav94xtLpAho3rU&YY6Wc{^ z!mkx7k8w@G)6?(31S`~d#w=WNgp%!5XZyV>G2#=__H=6iQ!7;1_=Zhuo8POCDjw@k z&+Lw3xO6SJc;v&e0@H|x1(%D$1L6{bcopY27Bs~#UUI+pbobDkbM!jl*5i^HrT-fN zgxrQSws?h%1lm!P#fuD8#KL?Yvz#wKo6ayAYkoYv@ako4TjMP<@Lb&q%pMKl}GGtrPSCe_q@Qs|GOv278GvOz9`3bvZPatK_7pu8UxsHH2 zB!`g=amqz3p#Fkb-A${9gU7x%yj-O7nGGg!;3s2mMDMe#&Y$T?Ur^6d?2&*YziCw+ z*m>|wzI$f0pP(6bb*EeLEY5SJs{xo7jUia#e0oD z5vU#=x^<*_@D}gT&^gqlcId=xTmhs-=p1gzExo+5xB7iKMdduK^2xs+iF0L!E z`~<*DuO`L2+D$*kgZFbMVUf?-t(!d+3F&>1ECnxFiWagR2qyB=GHi$2wt8s1;5~?v zSO39@Ys#LaTa({ayK+Hi<1f-ILmBU8+O`{@O5w}?l{Zaf4b&tA{B92#6Tr!6g|A58 z8UBRR9+Dl&$K6y3t_-$&5xEunHSW#O_vxejpkLV}Nn%_y6I8sU`hVPQvZ*wH4e=Dz3}7rAx>8RZgm1$xveNqryE}x z;IzleB})#e?4G#0-@wd?g&+Uy?m5C1WkmZQ z0eUOJn*NSS#;^VVgP9^X^BWR8W@Q$r+#M2eqN6ysiqY@V)-9&a}~ANdg{{A zvdM|KW&03i{ABdTt8Dju(_}>8F-ux!@16*^%;B`V_Dxwj3iUr&@yS5u_w{s%?@#@? z8g02Fs4u0@3_Ha6KP=P)B6!MvItfg8m3Ga62WZ|r#tW_ar#Of%iL(;*Vd1@rXzVv4 z_`h87aYXRc&qQO7_F7gON&(r1B{?#0^*h0Or1Ivz8_xY60c<{lfSjl7hyUOx(ouXU z_WjqSo&R9P@tJ?WC3%9zE=+%T5O0YriHN~%^(pZoU2a<9-lr=u(>4S+vLb+!z5t9$ z8mt58W&q&03VP$xPaGQ0VlH7()a;u-F3#eWbJ_}xG~Omj%z_XA3~bIJ1^LyTv=EVP zE5XDLM=q`fN~bWC=@5KuEWEhX$MV&s&26ZbH;PY{%8mt=mFY={&a5UAoq1VSgfPRU3uG5zSPFXO>Q2P|28$Kbyw0)v7z^u#A zs||rr5XSYoErVL|F-W4k0tio>pieP^R0{;7s4U9LNrYGou==)jgj#MoTYr(N+aE&7zJkvX|{zQFs-2#!jV;FMdRE0+=}b5gbRKPvNbzy zsLgbN7U72dc|k;L)}p?pjTo z%tBaBZ&G&dgFu5GrmQa`A`R%ete>;{7bzezjF|{ADxcqNE0iY%(bba-44XP-k3*R3 zDzX!k!K^}`0N}r`XqktC3^q+cbet2wLKEGde#DRf z)ynVfE~*kPp0ozPfRs&sJoM%Vd<&Qu<~9lutkfyg3F>V4^({!L1c6crN1O5$Xj)9s zev8Kw$kqh~7$aVOe~)go>yH(8F^%>qy9F?*f4*N6Mnf)j&3trH#Ylp8sB!sXZvhwp zAKDiJUA;}@A}njOT~aa6-Y{kV8A-pJH4eXlc^vBZ8`J%>Jg~DWupnw`nBz$lQ^lgJ{d~j^Jd|MlFZGm*Vb=CgM2CK z`2H7nr_3Rb>)lVoSZV=ucju3oO&%}hFEHeqdvy&lkm^!0C_CwQFBvmi#ao!~gdpLT~>7GEig^1BWY&F^*fuEu5_*iLp{1ukH0 zUAUDcgBX@=H868p~ zHGKv0?$}`Wp|ZGM%m5LAeBt9^0P&AO$T~mA+BE2F+sCx~msak6CC)sBHfE11NAhvk zO~SAoq;=ZYcX5FL2WGq43mpCgAokJ)R}?)mF$Y2eW+VKf;)X=u?_v+pV%8>`048I{ zb!BQHG+;mBf+1+uCRb`waiH<^P^=Rq0~8^9uZsaRLg<^^@8Iu+NXO}Wr)e?o2;HtM z&XWf0K$|1q(?)cWDB%P z{3)B9LxGE4eI)IdIo=q9l_}H4VC=~VfwqaRhf#XXa^bnO5!$Z+28Ln8-r4)K=!v2Z zafJ1EuoN-oCPZLv8S=*BeC8?Xy5{WWEDwT%fjB>#OBA*O=LqJ!Z`pxSR*g?XCbn=r zZFIOxcK`?>f3HNlkB>Gxwb$}}I!*nA={VNks~;yhs3|f3^`MLoB+6)G$Bboq7B)-G zd+~efjFsg0jElRU;wp_(g;vdKD1orMikXQPT-%TY^P?SovSFsna7TWCbGO9!hHhO@ zg;{eD@YU&_;fVo7LO^b%^?8qN$jb5YV9=?ZK%e-2agnF}y|%zwm{-Q93X=!6RQ7UV)lJ-9;03(hpLWXr<2gwqYYv@S4L-2G~bpFSk-Q{`TN85z>P8StL1_1+(Wz^q_7!x7VhE{ZSnq15gMVG9_hKYl2}y z`>Ky$8K2q6aP`Le`;A^KMTgeEZ$`Cjt$WwD@9*veMFpAL{xv96CE^_!4Lk4~0Zv>^ zQE-ZPJiXf-(|3suO__z#&qV+)g~Ekt0Zy zd#%R3ty(m_ZI&m%|M15vi292$myCVAa+#97WliAf~s|_0LR*&8Z=j8WsaQipz>?Y)NecjjY zlU`pDLDLZZ1xyfqk}bt4f>2-|tCcK^%(ih*F<}1$!u|CYr1>{N9c;l0Oz`@8TymTv zN|z&Is;fbGxeNzlus3;n@nHMfNeiuf5Efds<@GzfoK*ZMl$D#n29Edx*+ByoF#UByi`6>;KDdv=4=~zm}=~ zfJJ7PInaWHxo@K@B}S^qqKexv-v6w@ac(2lm6D1gf~l0?+PMN*ytOk zh#7y^&$MMUvJp%z;z04{CvbXyE>J(-Tm&Nce|^(z^9}-nnliMaDmLp5ltM(I9W~2o zW!r?^Wr%ts@GHbz;&1QL&YMbsm~{x4tNq~}9)G@`LS2IU_c+}^R00_@@D`R_%%FL* z5NFG{ow^L=gwnoY=#Oo$NHksC9Y@X`x17_F(k}Q9r&-(%Iv`DTObSU$RyIH=D3mx2jC{;;e#~eWX-K6@N3zt^IBcF*$fuu?Nlh;LZR zaoC&4oV9nd_G&eQ)>MFqSJhOJ5FaNtoS+8oZog5X3N{-RR)y^#6Dak6Q6Qg0u79?J z$R0s(Z@N4R;O`b}p;K3V@S=Q`-KohHZ$U1~Rjh=NOHW4A!o4Rq5&m1qcR9qsdsjPu za?m{Gp8^#mvtJOI`e)M{Nlq3&WtevLx@&yJrOvYOuk>yT19-Ic1q?Cz{N`EjjgZfXI@*0$K7FLcY%?v)*V3n0q%GOmenAln7VAe!; zdz`rj5aO#{9^0>#wjZz1V+RS;Yv5KkI0anAUHyd`m?z;@pXL+?((g}$>WUcQ+XAD# zqTcafvz&DaZz0k^#<grq5jG_$Wj>PYS{*K<|bkDDU5OU^!DgsGS7>@4j~JX0aam z2pH#&odF>Gu5$>UK?CJU7{-?6@U6j|pYT$v;$xHmb+h{e<< zl9)c;uOjd;%)X=0fYM{mGA)kS8I>gwO-)FQqYS-#`4YF| zY~E5bW?p+{1QQ^%z(G~G{&Mc0hA{L`ke+X_BtY^%E^{)ro;j=8Av}j>*s*PVGc$`C zor2ESjTsE1CXm)v^UyFxdN<70fiOh~lvp&mt2{?D2o%!JV#zn>avsYa2Affz%w~+! z_nJQ5m~L6%ojW#K-B0xcnr>sgFpiILcfrZO;SYyDJzDMNSWOHtpM_8%2i!vV%6DED zacZ(qwF=ac>vv!U!`BC2~QJu?6CL zKlLw_9NCu!Go(S|_`|iQt0Ii!d&}}lS>Oe1%F^jg4lFW;=B-sJz(NMmwRtfi@I-s9 z!uZYKAvZ70cmmJz1;v3pG4mSkMbenrvj$~?9ogC`LX<#LGi2WZN{dz+B%3SWq%o)| z4xySfqDQ8y&~pg`xi?;4(Bk%@$5#tF$&%N7j}bWn;?y(rmA$HtBcPVp_YV#5j>D%~ zN(K6#_SH&y8od7r`~Ckd@PqVHXOn{B`qFja#6pF}`1<4HOPB(2mXiHIQ`waa)!k zM|}&4c59O@Yb zYj*RF%LDs-4Ri@ZVYsunpqP{PAP&kV_Sa#~n6Ig9W|`D|5uM%4xdX{;DD)NAU6*ia zvQs|t>5GJS74=uPOV>@ruisXJY2sGKkOVvCaW}gO;0DL4w>+3d#~^`y*^Mo*6%L1g z$kBT|UU;{z_2;9^cq_*Ga@PFi90d-F30VXv3UL+to#j<*y9pFWEEm#M?PN>Z)fY?MeKh?=XW_*Y*7L#P`A7YDL%hLSGLL(*Z3_fSU`orGP zXr~Q(ZddrBAH=W{s9!)e!_>D$8*C#FHZPV&c5KyJ{@&S9Jpl3}R?Le82 zw!%V5Y4GL7$88PbM556Oc4tuM1%~&%!a%*oJ@@y|U<~?$A~Ya9W1&#JG<9Ss5u=*& zblkmsMhA!yiH5&)Byrcyx#<+B0s&B~P*oKnW z3c6f&d+#gtKalgO!H}GqigiCw0TxA6DfdLK{4CuA*0z^Sj}Kgs0gVYHMl18|W_hg9 z4~qSm?efs3PUuU^#wTpv%}hDd8DME8b@(hO3))xF=kz^rAbuT40R!Okzc!lz-rG>N z)Takv%gWZl_1y$ZgD|nXY(NOQ3?Os=r(I0uF?TaKycaQ{W-@mq)=&9tG~4xjsfqd* z{Oqx5-Q^q}>O~ML@mD{bF@zRNe!sWgh$~2!Ul8@=NrQ+Wt<$slV6#`QZiK|k{%56- z*m{w%%d2#&*PGAjoBqHgWQdSiVNL9MS}PKX^fzGOOYgku#S;5VzQ5&_his7 z@)=dPBAmwk?&45~c$iZs1AaI70je( zT|Vn?k5#|g8~NlbRd$7CWMZ#LmeV#OnS*kfHF~q3y=fl9&0g; zqz{>n_d9GnqC(FW3|1m$nrHEhr}uF4{am|Bj;jANp72hdOp6IwL}~g4-4G=6{;#buPBO@h8-826})v!?Yi6xoLBG27R}}TS?XN-L713 zQ?V8HJmjD;%Xs!eo4cS?h`Wc~8L}?n&1os(rF_KBb_^-n{!T=_f6Z=Y_xy2JDR;Qj zPaYMH&I)6@LNJJJdGizE5Ygqe&jZbq%PxgbM(||shI9CVjICZ@wxn1Q^Mm|{x=&gw zd^2q7`|7LkT>OhM_+L5W3JU`DkWNb$-(uo3r8DF(ixe3l=iczR1X`nFNcp0IqnrED_Z9`bwdrtIB+R$bqeFg<4@flL!3e~0Up_;)T+!)x?zsGU-G zg8j;4w`=jrugi9k%u@hxe%;=V8)HwCviyaPqNQb|X~>S27VsqFlr8UHsC@api53;& zknjc#tA@pWSYc2WS$8L*n9XSLsp_v{R{?zwSiQ_Z%wpV=2T6Ug&CXOMXcDiheQARc z&FmEx^+ej`;aOx32Xiw-+3#ysy0HbJkRBP4u`*O<+j_y9DZ~-l#_vAt)Nzdw2Bq&UW}aM{fZBU~?CFw_{2Rt4hr@d92E! z_IK$JYIpWR7G;dy?!I&fgzj*#BPXl3P+EI*tzQ2L^;8l z<)F;sk!25XqeX!8-a=tV;z(7pnqT(GjvLTVERo-)Wrve{#=#3NO&DGE^Cg@kObUud z&T~Z5yQ-2lAI-V)TeFg?i{*E_SF@Vyk46}$*mbXypU|Yr-SYtuQWubXhR2VP8Xm(u z%>Yv;{2MFrIICS(A1P19BQ(w7mp351U&Wnc3``;FxzRq`&jzo$;m#H5Gdq>*W(=d) z`|@OXjCc=emyIj>C`tY4QPbUMo+CYbh#(F?3ad2AlCnE5yh%FnqcX|Ne~AEskf;Qs zv@M7ezAikSb5+Ae)~z=o#m2yl_13=BcQdHT@8s0bIG>b&nk+#(FbxJ?2ZxT7w}W_tRO|vxh-EwWLl;? z{*U=wCc=P3qx_@Vy9dS_4q=K#A`50xtgn$c*TYT+~?2`^bpCo-msMZ`T4=Ane?MbJXn7m>+n8-m-5pg}w#w)YknEz}1uX!8;>`%^~Q%+)A)D$wZ(Mrre7!uhwQlGY{jEon2X4^H!W!(X70zDRAi z?!XZpszIM|KpRl+xxt+rHTJL4h60<;l1)i%b zWWs3KXDR#?Yq;ren-Zd5e`YE~l-Zr}*7x_rXfdcXqIPib&gohFxLU=5t=O3N+TNk) z^GtZN+jOcE(zUC`h+KE;+oNLtlXZ7eqojO=3cHz&>U441|HIamfK$D$Uv|6LnYS`z zXq$(~P)V6nNF^0z3W-uEQ<14nk|I%P4r$P!BvT1n8e|AbR0<6Uv6Vup{_E3lPUrrg z=R94{z4vJE-|ze0VXgJ9cV*Zq0o`WctdEEJZ3N5994~_`)P|wc+TNShwA1s%DwH{j zd#4o5q$M)v($hKm_MfjZNJ6nWh53HgX)ao@d?dKYmLZ-;*J-H{PfYh3mRhicD4GkK z5b~-41bPeJwB)#V8dLvCgoeCK7Jh4Je^=r7`l|Ru==Q0cL;V0=2Y59KVQ_T;Hn<&r zl0Q!!{!C9}L8;?`&`NyF%FDPsNR1Csi!EK=La^@*ap*&Ko*~ohh5^92k40XAdbbR` zr!c@w#q^SjT~2xr&sawVm&ua>gi5Nq>*Z{eX&3=4J5n$_fF7PqwEKS_+c63R*tUBL z^HbT@cZ^$;_NeC?B6Rcl9eIgN~rnr2jHWr$j%4~XZ_G@=C?aGGT=>(R;vJ>I~PR7TJbxnCL>`l z@%uHjBj4ZMtM(Qc#ii{t*WHf3*46)^`3HD!g|Uan<}7iPFi4Ql@wVV>d3(Nm;$kKh z^Yjy4w;HbP-%pKFH0C@qz)xo{peRqs$5uj9y^4x%RCN;5OgGXQoIn40O|3WAM1l9A zy?lb{uEEf0Wmh--iVMb+1O;Z_GoY2>gT?cJDfR^EXm3uk|Ju_udL|a|6 zJI4twCCAl>MTG^~<={iHzk^)5;^=+O*WBK+PSO(E?y(HVr{13dx+!;v9(}@DyYgs_ z^CaJ%*_&z$)bE*+XLL@HUDuSZp}XvqzWw~bZr7d>aY-CYKb*z=vtD!4HRo`)8ec)% zk|BmeKci@e1n#%#>+Yq$AF!Y{C5xLGFDaLb+VTcC^(2`gg1R#Vpxpw~U$RF55iH#M5V0o( z4brBw7aMrQp57(uFx*v3Xf62a-uH0=SzdX20Q0#~YbVY+;OEfML?Lv_l7LIk!3m-j>s-SIQ#iRe5E22ZTgJwSz}|Xlc@X}*bQX6_$0IF?vk6j%$nQ%_0bsd*jN4#Q0+7P6vtEsf6D^qRlO`DJ9hG=o#Hg37@ja^)BrWBa50?KtbAw3i z_2aIRoOTVDoF;A8_$!;RB)E;`7p~5JTkL&AbDcX*-9eRAis!csfNIeZFG6q^$W!uR zG3pIzvRanrj=ZAR8J!Cr^HpI@^bC}3c|r>sG7IrLtzMjmNa*|64aG6p>SwXGm+e+W zV2QTvt7GCUv&x3UL*V-LhkUaH2(E+oD;^SQJ}>%H5b?{R_J~HFlJKY z(cjsD@t8`4m_!Abu>;voO<7?t*PGeDjuJbNqD*swoAu!EaC5;*Vk88Cz^cN}owX9; zMiIAe#_pahK`1!L5GUSHd=)(aqlF2z8y3ymn7obuA!7tn)Ybuuk7#B94>!k3_8I5< zn`{s5x}fk{v}E6YJ3Ph|wj&tFIo|c@Cb*;=XEes&eH6#Uc5BvaYr3Wu&by+aWQ-Zb zjGHJ%d{s6MO@?E5VUou`ak1T%QrWp^tBahD->Zf1073oh+BVXS1=hNAIzGqURRF1A zbzl_e<7Pga$CIlV8%}Gm!S&4%9a1qXbBn`mbsifVlLyCO@-X1X_wba7ycv((l=)s@ zvHJOe#-lvxT-O$jgtOZtGtR>FFeAE+Y}k&^%wLp2^d>4ly^;tT}Yv zH)v|o>z8xPO=*~ZKXwkZD#UNWojd&3F=kRv7;~J}7|dz;B9mL@Woa?=pnYfZ}PUPJZ*NM4eY-vIz^_Mb~degSf|sFO{BPM%uS5sBmsh1;M89uIe*V)ojC{e_2tX=h!2 zMU<_Pm_~vxMuICI`-e+TWWU10Y*l;qM!-l$(j9UvzN19-n z2!O#h_I2QWWA``{IQHzmtdsy_eU*+vki;Ra@5ZyluiPZK0N=-yJE6jYrAD>BQspV$ z1yM{DPHJuL#Cs;cQi9>~c0{{_YWCVYaQU`~z)fkYzS{YupJ9qwD9BXyRCy3@fYodZ zv2snY`!l2T)rZHZYob~W9TbquW-NS~4=~dDXj!U%E!?qIhwr`a{@`@>o=5kIYZpYB zM$hUJ8z!%tt!vuL)hm4;b}FmO8dt1-)~B=n9&dgb*xzdFEWr;lV>+Jota{ylVk8NZ z@wh69ey^43ntYM?lC)B$_%#Tio`-Fkqn-Nuy%iylw^`o<sa@4S@~{L}hX`Bi(kqO@zsM8wRy2;D*srHa zebLTa5`Yp_1)Roe`#WaT%?4k6lj}fbq_DkSq}kWC!ZyPCwuWlrKOF@=lGAb|lHQ{M zi|D>$ku+gNLPGM@?$M~8IC+ZZ?3jmt3Z^SE%ss5n(qdfkRDoh)d5iXA95*Ka%89y1 zS@1$T3(QC75_l~Ks8KH6tI@KI{D6Jm9qVaf@2VPKa=c!5%=;IdhL9P zZ(ve=B1BEYVz{MF$ml`Po^*`oPAdRMB2nKZ30Eb(y@E1Pn|8OMZBywOMyJ00sJ$K| zPW-mU#gxCkPpdh;&)1d!4>~uBDqq8Z&iMWvzL$ux;mvRFEPfYM?G6+)J1adH`&*D2 zns%+8_Uo$z^KjY}>B1zX;3NR!tW!7efRkZHaw#n{kBu;>08O8Zw}d^bTgh9cgO|&| z{sv4iZQV33`jkiYns}EeS1n3IE>`3Z`*8}omHSc;z)2>vLx;Lom|ioDzuzSkJMcS? zjTc74!l=@}&vK>83(8WuN7q0$Z@Ru2;!D`t%>C;PC|IAjlKRASyy^k7Y%%~)(m(V& zBsU5(k4#?C$2nA|3@DKI!^`Ew7ZNqk+^@Y(| zRkoSl+l)dY(4kUye6I{W45?O64?dw<0mD)YSS{$8b)u{`4ZA@`1T= zc%jRXHqU>522&$AQ7y?C$x;VoRl~CS|#^0dObWd=UR>Jq$ z@s!cdE1~_Qutz}}__Zg(v}VwiT>PH#n3{cL}u6u;|)jvy>viAg;mBp0JN2FK>7^>%;7)1m}C^yX`w zmh$bI6mXF|d3S7ld0NYMy1cMB_bcji(2k_B@GEUZlAPm&l@jzrhAgh?{RupB-02^t z-m&qTbuqlMdzcht-<&p7$1tSK-*ZA{7enk!;^3 zJdjF&kt3QJ&>4K{2R=L$>1giWC~V5p@H^p<7|qJSN7tL8q$J1LB^>;@@93R)@y7ZE zq@2Wlv}ynDFo)@$EjbyafDaO6yW9%Cwq@u1gTFHH3>r?OsRwitp@7eC$RbLlpnzCR z79qiE*~B@lr#@qs_lujhJXq|U*E3LA(BxILFub!yytagRqe3-mya!I9ixcN~pPVS! zI$wDLUqj2~&oF+gCJhY&(`-pMG&m_EUQfI?R7e1;`Q7@`*)k|1!n?5}dw`LHiH=Rr z@-Fn(qGnkD{lS%gU(!wnGxuOHM|SgRv;C??${XH97QMXj2SJlj;J6lqc^osC-U0=TK9xSRKgO)>`nrCUeA;-Dm8RzvF40y~0KgJh0Icz{$+ub6k(exHX z_Sf^UbZx8H%ujl-2ct*qcT&7gZGjM1VGMLt>shS;t$#i_gY&)+0-|o}iM>>(1uW0e zE@^|1$|G9qo#+yxGJl#$wMfh|R3g^}F5j1%fa9E6P0@!GE@0 zv3PoRiD-7)%vZ}!&d(>aOCEx~`GoaE*)uyRwD)vAWnnuih_GwaknfnXa)KK~M$nGK zqbH4aI@8KbJaoB4Eg(=%W*>+22`LBLCHN-^r1~18qz4JR5A< zWJ;il&}tl(eN-&^<#$5Uj+o4DOi@LA(~3p=7O5r3m4oiEYRK=P=_3&AI5eIt{0rnNxJA)(M;NjBZ|N=NX3j=geQHS&sQV6o|zm zsTO1b+wJto;s(QUw9#igq`|NRV4ncy63JRzJZ&$+0bnD)2! zH+h9|Ts}PIPYAw6#nxOdQ+os0_>OO~3o?z59}kXQ^)sdHrNTr#5Wi?IA-)^J)P}H7Z_kx{=$z48w|dI?D_CBmW#r zEna^0m+(HP$Zg%#PR=>$r{L>txL})KZs10RzXa%;(YI;;ndDn6+vwvCP<|a!V zfwOBdS;006(vK>H{a1B0i(DD~u{ zzZ9#Q_C!zP&x@CiM7Q!hPu6(vTVf}Pi29zd>C=2mhMeOtUg5E_2JObIoZSO8Fqb@p z6yD>|++=mtrR+(xi&LMrn_#QV7@EK*m6~6E<(Kg+6D)*V;T(9RbzhJ$>+mCVvAr+f z;N>X6(C^XDco-mLe6I};;tf`uq@udxsd|CGZ@IKlgjywq^?s-R(3i2%>(M--RvsD) z<3yIZ-HTU2fhFp`Kprb1AV^V1QMT%b6&?rMi9#mXP_6a8cMGojkxdkVn^bu7zrN9RQRKd&d(u$VQW)f?0wDsIc^iCbWLSn6FL1ZbRd5J4V<|lmIk}xgm!5r)N!EsnXMiF zPa}!9;B0{lC@QU5_$Hi+PpO1Y)Lf!9^l4r&JN;*vDoSG^n5=f1r&e~-&~^%P+rn7%qhNJnByN-MylHM-2qAgRKzo#kK(_Yamg#!X z=#@{8eQkCb`5tJ8L;l62b^qYH-+l3sGN-e?yc>GDz{a=OZ%+_7+OR#a9_-rL3uj0a#FL=STR6uYo3`D`#b6 z#^ly^SDPhgUX zmz6o4?N2FsQ08`ba#P(`ghP{lhzl-9Y41SmZmVzt%WAJ^PiK7Z(bAV(_^t$q(xnd# zgmQsIU=_L)Wn_rSf%~#nWM0e-3l6?h!)cHV4CacGHS}2arC~SvcMl5Q0v7S7Wpv#_ z5&S~gSEd|J2mw+}ed+U54E^!Fgub;4dqc9^g^t{f6*A4&C||VD$!T)m_05=aqm;DA zKT#$)MfM7{CqH4Jl%twdiE5}|5w;67%y@0X*%7Rb6MpxfKS5@y8Iz*fCMw^~NwqF$ zvY2P*&VlCP#f0tm1Q)aBa+7ftfI8{XZOj;FALXa3$?zn`f8SK4EJ=LVtF}us3=B=m z{>X!k-LoX?89H+}Jz5Bi>RhxZ{vPH$?sVP_z^S%z?fM>xXwdwpboxkVbrk@C-^6*E zUXyOJi_CjvTzuvK;mu;Ax}kd}h41@*oz#FnrtHu=D<)n040tSRr(1-3@2ll3%c*m> zzO*su!=IPc`KI?7fvR+gp3sJ??`d=Z9f{=pKnvR{t?S<~NN732*LOYG*Dq6w8toFc z`Zsj-vwSx8MP~T;&4i+;#5{OKdH7;8L)xS@12?khFbfZS;@J_laSy-^opMGar^n%> z?8uiM{b9ztdDkVUzXWxV{%rvSl`=+S>|Lr;Nig@-;2fG`)8@ zo60~|&}=z^PB%NlFHrGcB?bd&C`=%c`GCgQRbN@#kHEVGJ?gU388PORmy0v=ph)M&L3a`*&IrZN))X)f@|!rcm>AxLHtkrr?E!);NnP(}IOLljF@dEh+i@ z>MoDTfvrCuAakQI*($Nj_9(GH$QRIZD>9DOe)L3EK4jq4Z9g!Zc~7QSvUaCW({R?8 zYt(xzO3BaXFo6F3z^zzr8Hw8(&#sDHLye={(QMJnCHJv$oS*nr%(K1%8UOr!Rwuu~ z31d|IydFcR_1zJ948odu)2RA!EUZ0uZ)Zi4+@Ve!YcJA$x-)wp((5u5-VF?BfHxG@ zzm;16O0&8?!*t@;GvvG`a7f3zmML=9gGa-gx-^$R9r;pA4)uALH99+nIdcQ|+p)AC zfx$eCY4P(JBq(r-3K|<_A{%u(&;}|yu0-_@rJ+w2J=Uo*o7g^AIRvz zcV3Fg|HR|#$44g7i9C{IOzFpv_Ha-cY`ZNnFq%JzS3*=il~^U?F~6_qR+>vl{BVx!30F^<-}+ob&8xxUxC3;N`0QjcI))|P$yst+;; z1a-If9hstRZ9E(USWX6ets4GBzyNyi*vPfb<%7T+)mebNEg@R;rkIU(LZkp{WrcSH zs5@7(9{KZif>1)ywlx1K77fkF5dE6lCrtT-K%4jB@wv$@sz;ELl{ zTLKt`>H2qnc-?WBtQ`Gr%`g}3$PpIW0(p}t>P@+Y2UhfA8k*}7P$}cNocc!=&`L=Z zT@TT~dF{yX3u7qTJ`%ahhcBPJ?s6Wj>$ZP+X0h-KHG*-vp5O@if`qYl*mx*D?_q1R zLEL_x$9~i9dy*}v4yvQ0R4!tJSZ#8nCVU4K<( zrOQY*PcsVI>Q5np3b3H>66l3Dv#X=tNn4P9P~;El&_P)n>>u-Vvj`-M*mi<}-^f4FkdfwfMI z?Pm>kes`)5)y)ac9$p2b4wHiG`ONoKy1N{Cvc(uBCYY2;JxkZzS%(;{>@kJ^Qs&(@N+gxFb(+gT_t;L-T4pc9~2EG;rE}&$h<_})6`8@Oer#qdbo!Z zabkbc$U$i&i=V|n&hm6vpOb~owL(YfmDF?Li#hCLf{XGo4Dw0#O99D}*e_z`gzv~N zuL-fyyQkOB_lq~IJPp(Re~Lvb3gK(icUnj2rsFJH(QXL_CQX&Bx7N9dB#tF`X9c(N ze|&=V%`pPgzb~5ZQgR4I{p!1Hz_4@V!7y0bY&V~`*+46EjdpY!dY~S^!ol+0>R~## zB>5fJ{?QqY2apT}$O7`H^G;DuSa8fu&#n%fSU6?NhRib}KtEW1)MGk67Y+K#cdqaD zal|tF`|9myXF+MaTsxq1sZS6{Inf;QsB~jks3UA9r8rkCiU-X7Zrl0k(i&b{>V*1;5ck^ zVdcAosXwGnAD;IU!D0oB^NydsFEV>MDW2TrpIYu8m~Jb+A+|V6 zeyW@qZ<7`GD~)HtcD%BhRgB@DCV1(8T>id`Sj0e%-EChb7lyVmBig0+iAKl-OwMJ{ zBk;y91abd{EPlQ%);sdOq_jjCk%_`A(E)tgcgHwf=KIARN2C&*PYL$?45;hkV(pC& z)yR|up7!o7E2m4B0HFV5T;ry;sJCM4q%AMjQR|JgutfG|*H>Wl`9H3{lV11>uWb|} zV;}AQZX@gtTW@6JYbtl+FTeQqb(kl2mz0lyw0X4hyY(z=7eZJdFAe?f9Y1k_<_ey^`M&E>zJ5J-Q zM4#ZuRV2KOqQ!BY?4BRTfhs31uT0vy8^)I*1M+kspsSY$>Akxc)Gc19a+iCeP74*)sD8YO!e#_#e`*erpn zdFW>20jdRqgEKkw`cNeRm5iSKpJCK<=+G{1#-Whz`i#vw$%RI~?Hr@?W7W?OE~AY6 zXaoeh%}4qb3(+QU^wHj*$m1L;83e|-!6B3%&uqdiPwC$ndjEV43a9fk1f3|Sv_1B9 z#?c!_c1EDy*tUP}tJ$tBj!)uG-C8DCnuQ+h_>c_k$;Zz98TDmf`}GvYPEVQp8+QGf zUTTP1>FM&=!BMqH^yc^rb_5RW#A~xEewVGsJ^vtc54{CL3N}y=Et;<5ZI^PbYUP#$ zqZ-s-lfS!6oZ(cI6VguPC*0zkVP>q26AfYv_alck{CNXP!og_95+hX zEa5)$rvF88&j1SbWSkiKfkWKsPTxQ~Jnri*)CX&-lznH@VpcJY05HS5hG%bj#H|`v z|MQWxE@?+#ruc0~Xt+rs21k%zKDnZy&CGtzDf3m-o>e(%dopU;#o<;3M_#r_=0{j` zMYeAdifGvIAQscnmb9|&@TLDoF&Ui8iO8+dDt_<_FQ@(#PH}b^l8gKy zWUN>LjPbDm!|Ihl?G7sBg7V$w5N_M&6DOH%1}%Zk3r`%S>L22W=pwVWYuL(OTW`EM z?Zt8O=Vv~#fRSz;x5)Yfw!jP#LlD9b)ZRqfaR~kEVsuiw_ICjdQRNXch*W<97Y8z- znl&8x{kG@pYX}%VR}l9eAll0l0_IfPQDM92Haul>Ey^7ZQ_uFca8!xA;u-LAx_ zrVJEc0p(RrQPf*|jG?&qK>cx3Cwi%7@1yjalNx?qlng4P|8z(Nl!lLQ5sx^~bhar@hTqDwlg(@SD{W#%LNWYdDnmLIS{a$5nVs+#sV(nn26(VG zST%kxbs-tXj#o;cfm3}3FT^FG@LYPp=GjQi@j%AK$sIlGV;A{J>z zJC}JTMU7IXIZSqSspQbf1c$tgRtDkkfed!yI=+rVB}0 zIltM@-VdG5ho&_y6N@8>m>bLOlxn#Yj-djYfpv~N{wDwk;|(gd?2ucu@66{R_`tO6 zFCUFAbK)!$kQpO8kz!KuJBsNMaCz|dlsx%&86k`s^>g$pw|K+Ft{DF6SyJHq6-Ac9 z)|a=&=Nb%`xgrKTeBK0h&Ern2kYR1b+T-T)Op>qcBoI3DM%ZZX>PZc2wfxr7*;6QB z6NHn$KK4@X61O{d^yT0$xk$ZvJ34$-VMGzl*nwdg&*=&2e!EiQ_Cbay1)T}5=i zU*+V2Ad?YzRR1(`Y>yQ2juTNo<5Cu3GHJT?>;hd`uX#w;7bX2MZYK|2`hm1@ya{L9LfW~< z*{d?fRA-{4fO(uq?Ha$L@V4%t1TNnk;LTaud&#@}N=@9oyJunu-;X*|#b43`)+iM! zMv3l;rtT*kE~d4_Q9RPJmwc0p6McHCvWM>>V>YzQHUW_w`}@{~_48HLE4^%Mb8#*W zyD*a{W2@%EmG0k|@rZ%Snip`0Ly@P@qQVvr0vL8| zc{I0>jfn57J9|IP^ief(Uxw!wHjtP4tDOG0e5BX_%uf^_n2b{*oWFe$+_$pe2su7O0OzqA!D zDi5Fm$TOCJ<)iZZ%guP?Mk=c>9|2o#DecL`u;B8I;FAya@Jmy&w$%*-a=xh6jyyL}k87qLAi3rZKA75AjB2bu|Ha!PIAtk} z$vVr4Kqa_rWWSi!>7!>TR{n;-b-}FF*~egz;k^4g#Dm1P z!+ybf$scJfkYFD0`0Lq*lVV z*tk7K!`TVK-HiRDTMq=&&@#_3Vl3+knVkPn4M)oKGl3BL1YUi=**x~OU<7`}X3uwE zTQ=X;_1s; ztQ-tiRwD1-3a@URPK!PQk*Q$jYh&_>eN`eM2(l#lgE3`oVh&)5bnP(KTN?y?5&;# zUav=I_L2ee!v%FE2bo*&3AbgQmLr`$bum${lFDtzw=enfRwwM>iP<^jS|IX>Wlo-0 zE|UzdVc4=Ed_I7Z6!h3$-w7Mkv{GN|<0lAPEJyC%U7Gm>avM^z61&m=X~~uodC#i* zwY4F1yz*0l4)IMjRCdl~41}uQfgF$f%aw+jW$8pG0?*N@Z!jOF04Yr;|B;3yrUb!o zCIrsJq*MDGDt1X_TdJBGe*=YGpb@)Ix!DP**Pi~V_AE>=9ES75=8vl{tXxE_ieL2J z0>lS^L$RCYeGPTz66|ygtn1ivA7C}49T0lNn3~+172Sp#J?%fz4*7Nx>c76a5Kt#g zh)Huv;ax)d5X3JS%jRy+9%IifOVkp0XfIURe^XE`q*?Olp(e_H&zw(JKz@oFBNRmz-Hy z+t+U}O`Q|nhI=uMUs!f-w2RycUX9y-2~gzQcjHrrc4YW3c}YHH&78H6;u&C;$f{l2CB z`vipPBLh6+Vb+Y}Y%~^Jo}KizGRZV$r4@!Cb!ZOOL(sgwEZceY8?R_R4u3}uuj7ye zWQ4itmizA_Q7Qplom(J&CuDvT(~&(9RExBCf)|N67t{>!mA z?cZ7ea!8)+&~lg#dZlF#|8d7{lOtiFjIBY7AGg9SI0?5<*F(yxN0JgevhTx-k&Wml zi4Ds```GKGSH~&I;JOb3b!zK^b}$kb$FzCVG{|~km+((bqSwgCJ4!Y8j~}6aB;IS1 zB86#bZK5B2&^AJI^(tqMlR*Q}hGft_%G-SI;10=PeQ_qdmh1ncalr#`0bhD#tI)r4 zgcjkPE?d4Xw~oLFX6eTxE#lojgpfOT_xzBU=kLA)ZO1kIn|6d+DV#vw8eSIFB6+2u zZ#$Dnh)$ayxI%StZ$7>B0}_)v6hicwPP!&3Nhrdodx^rql2KrX!{Lr6Ojmc9y+`IM z66p!OAgkk7(WR;yF2Am{{FLWyjUovx_XVaS+uZm1$o2k3C}L*uB=W{qqjmjt>q#Ba zlDWQJQHH4S(1<4N>qJ{k2kdxg;XTbEI_n1)e1WgD4z{NVaDO_OnWiC#=gf1y#sIgm zMUH7HgDI_5B_T!;VUZlX80OBUvH?P+1%Qt?T)4GINp`KKN>PXETYx&{LK8 z29_n%%_-{M;6v`u*$W2cq~fKOJvlLS2S4pl7)u5Ia6m8oSBd_!@WIp;)HJ@_<$hu` z-Q)Nc8-+W^URp75g@IoyJUBMzQWbZ`4rxyJdkj6Q4$L_~ULNT3i4r=*JVb&t-OdX0 zq3v2!YpFAs6Z-&5AgV+FYQ?(^|ub6vSJXl=YTe zNZE&otO!NXw@~~g?!d(4U!#d{m%C0C*CXZ*^=mb~>Aw2NLkSzJ#BrYWSmOq_76St_powGP>QxXsYw- zu7-5^rnEt)FsEY}ByLRVqKTs1WvNTT|2^aO^FcH2;B)U0lj1ta3?N|TOqYlKFQ^4u z+pc(srmML^QdbgM;e4wY!^&BE^3mk4Ix9hRZ$B{TpxH*lPo+ya!oy@L9n=6;5_|S? zqZOPRkbnFE-AF7X0~k;=TrgyXcmr|Md3ze}Nf&dO4Zgucm5p4=5lb99cjEY%Wz1c+tU#rL?qGl~Xfw|CD%)rD8 ztYjysUj^9{f|fw%dg$;L6{N`^@AzUyq&1?Sq$N|*3Wr;WYW+(5aJesySG*|ega7M$ zd?vdE#d0IELq9oe?E(67adHjhWBJoFPjlyzDW|JmKe``42;f;e7hEziZXkg4mBCZp z(FUd?C9vDgUtF%?Ro5xrUl`30l54=$wvyB{DCZ|E5{3nPEul=c8SA z?%ZkWhrbOXV??95|3#_LAA_K3xe+!`?_U*rBAUntFqbBtXdn+H>4UBm3}PGv%X~>H z{qkJPxsMQ#S|%JBAi!%IgFX6jqe45{jup=}Lq<;DnU%}j!zZypbRi1u+^0qli#|z9f-4tV{Ee<|u*%J?WCRgq#*>MIXhttioH$d|fj3Qy zi*MQ)uUqyfiI14V*ePAV+t#soF~CkjH2!Wz@fMfhE$*m`Ny0?`WR~IgN$sCam$s+v z{7%wV)?l|(^`2ESW}A*hDwhOfQOPGZTy_asGA_^IppA$`0*HP#^kM(h8L9M{WOR<_ z4&-w_Gm{|6wDuT09iJzSz2Zs6vkSy>_h?DmZq%U_lD9<54Gg^P?X8%bis#K7 zpNIW=x&zkp8 z3Y$w@Eu`jO;aJvg|uO$F=C3WU1`3sP#>8u(TcjBN@ZD8VSOV<>~LsZlgi~%km%$`v3NC%`DTD3N;0;DheNn;IkM=onHs7+XbfiktuX*tB!a*H)Hdh%CwuUu zKW9{Qu9mXo@OYo7H5tO{j43l#*JOMQ&~6s)M$Tt=DFBZn3fpEb*3`wUXr!3qY-=$( zEs-Tkt=}1hc?ooOA|}TZ4oo-NYzRAsR170pf&(IJYPP?VLQ)%bkoeNlNwTz^E)nV0 zuYQHweQYQ0+gZ#9-Mk0H(F8TJ9MpcvkmoGHS2!E-?aR$-i(ZF2kye6XGGhxkble2H z)WyrtBx-3V|D_2*=29dD=)yha))uT|xNW7=Eg0@pR3oZHCAJIQe@4nK{-@rO9+N1_ z^I%49X;Nn3Ej;3Msu(9zJyjv;)8vzaxhp13OPfP*+R(LY=DQgQM)>V`7@ILYchSh*vwA<# z;oor~5z;eMV^6#8cb5QkaMdt$+EROgieo}_MJv40=Q@GjIC)cADaer1y(_)X4V)(R z>D+%^z9WY@w0!^ussZuWn&OUP8zUAfSkRNexDWDqX?TL{q9Z-hP=rjiaaas)TxrWmt-okmAP=~lnr+hmLNSJ@?0Ug(Ev_TO zSXFeyaKj`q{YXG;m%O(mJA{4vGRF^5mwOg-RFQyHN|Bh*js^BE;)J;3fyt}L&m)Ue z-lF!Z1{mX`v+MiKk&-k7N#nUdninf*J3hyOCqA<`)5l#f8Z?}5)sGbizLUkfcs%E& zniXtC_h42jnN@J-CAPIFjGePcx~=wno#bB+96}HRx0V~vlS0dklVP6pM!(d|72VRx zY==4d??J3AM%5P17uo!t)Q!Yg6p&N`<9aN4cW4BW-y+*->0ASl7g7zp@IjO-L$-ZB zTZAmZb5Yn%4Yif(j5$R$&!94^hNmBuawQ-6WodWu@6M`jTq^r8!(lw zNeGv9?ogWSW8txreGENqgRWO$8zuj`gIeDxF5rP)VAHCVk17i9vKPZ(z>YHCW<>W0 z^;Rke)z-#1(l~bMnmC!p0z3h$7jso}#sJPfn+AR^1$qGCF-=$08@+Af)~D89eRa|>1>iTK$cJGU6b;(!UvSBu%KN+F&}A@{cRx7sJJ zSn$!Cg4{(v(Tj=4OI7xya>}XR z@w9Mj+B74#JKPG#9G(Lhn;7n)4x4_xIrm~sYIbLN`HWaivC%s~#>!^ywSP|B%P}o= zGd28EPf8VsRkCu@%$;lWzM*qK2D`C2Gv;F?`z!X3h)G%Q{P%0Y&l>3DTbA%2e9kt+ zn<*JsdPA;R(D+>i1|wJiSq z#pmzZEk?7K80m;f=})6vA~Fl8;&Zn+B`PF;G7ChhK$c&%V#Y-x>QObPk;}xs3wrPf zHR0jPj9K~?xQSN<%rO)I>G1m>!$n}PL9T_kFSeK~hJ3SJx(0xAk8c+gN^k%BfsuIDIc9!6|CVo1=`Uxh$iGj^$te<+9 zVk~7d;96laZwCv7J6eByy_nUPtTUqM5xZ0m`FLvoH6D!`GTI1&u7<1YA z2PH+vj4@U10S~dwPjPR_V5XB4r8z*5b4Yt(r$!%InZvMUp6|KA%9L`7XjC_*P0v)~ zG#BPR66ux4HGp(zbHO9og@|6OiWUj9!ASa|wTz|}bQg(95=YcsN?Xm?wQvOnPNBH) zIe3=RFDJKZU3NqT&*;LGLm72J_xa^%fd^~M_wel+1A3rwk1`xFV5nv~i&+0Xg%81hF;_z00TaTR}N#OT4 zLBeW?fIlIdEel_{uB`dolj%D4pQPAx_G@` zLgE=cw!by*>ec;w9zdf*T(+xmeZr);hL=;O;i!GAMP=iqRlnv_86Fkb zH8#7j$nSL(GBRJ`tDKnz`yZ^n>a30dtGvQn7hKYy)S^dVz+;QDRbDBR?FcUVe^C&U z6i221eZEZ7f%lN9TtYTV>iV+QzuJ)gL(Z4oZbD>DUpPbNXQs&zKI;ckQKw!c=we1B zbWIi*KJkgTg#q&v@%Rr9M}me%_rs_It=gJmwLP3gfwqXzCW?N>#7Eh7xohb5w^dWg zz}S9A=$g)=oBJH|>{bH(A<@ekRoo;s*Fs``Ai2Ts6fB+S5e?^CVolgF?C0qw6aF(j zI$rlqBfagcMCM`yF`akinra7R>aiY69F^!kVow|896Y;DmU(I?dd}4*L_YXfLCPRb zE%VbK4w?Xfc_lfeBQZ-|hT=|Yt&h=1W^~Mf*74%N)qE2MCfQ?a>lmyB98bIe>4`s=zCnqh)?VDHJu@6#iv&Dosw;SdVQL(r7bdC+N-Fc+qW zfJZgsc$z!%P5i`eny{J2WXHo{F+Tv4olu3#@d@_By{dU{%++V>8M7A zp7)yq>)v&et0cA;8(aWUHjfb$$5H=@)EQx^6%-7~%)BCg4aF7+n{?%^?)-m^oQ^m9 zbI3<9`s<-H_Z&eCN&#ogoomi+n^GIVO>*#uuDYv~Ex_fgq>0?}H)DeDFGmJ?NzidB z4V!0etbDZSEAeSl6yL-<~CW8>}nm+n4JHph%Ah`@X6;h8g^4#2|=cp6|iip z9CJ@Rj0OeWZ#otEny}ePo#Irm*RrVKSCxZuS&6^!t$5@tOS&%^x1C9;^{`;=;)dax z1AK^-p;&fVcoIV?u*lDPq2}atVwXi2NGt18AlHhGp zq2lY6UhlWCK#SUec`7mK-iw@q=Vitmh8>?=*NOJwE+cb=udRORWB(}wxwY^NWz6nJ z%C|cq4xOw%eEsXBNQjr0plNH`kW_nH`}QfnepI)q7!tYQnxqizpG*pTM-sIX7@T!- zY(Vg&PN=Fc!6M$O;Qi>Ps{HQ`l;9CwD%*1hBCF}6P>xSjNMA>GdNSl%90X6z-!^Wa z&_8q}6O}lb9;q$Fl)3hX8nNQ_6XqK$OTmi|j=u|6`1YUu8#lzYh>j+XSJtn^=)RofoG@z?!lF1Vxia;q^M=2TAIkYi zGgfSUgNsGRc2slG=~!S!YMGdVsVwI(Ne#~5ola8Y1&iSH06nh>v6Ufudw=DKHWMe_ z3ns3&;QUBB65{Jn#RF8*{y3&50xg+lE-s-3FxpgD?Z9i#(%asbh#>G(;>+D$!O|pJ z{N;_;p1rB* zf%S`7dvVtEgl3-qtU?K9+Qa)Se`E6VUO-OE-?s_Rb;NA!C~BT$y8|zocxjOu1Q;!j zqSh?u_!da;Xta#9)g$?RRPln!JiZGhJ3ih!=Rt`$ESvcmmHD$|qLqbM>R~^syx^{LMpBcZoUXYo`t-HM4b1OBe4lyxR@aweGS2VrMvULU1LLfCaxH27{%05k z2PT0aA`J`YO{eLShLtPw&1`vwY;e}TJ$}k_j?CIc){o7HZl2M~8_f3u>xk znwBvODV?Num+WFGlR6mVUC2%_^~kHQS;P_b1lnjDYvrc3u(1f3Sm|lOT8P(jyjxRjGH7O& z!YubF2VU7;bbn_NU$(rCrZ?hCi41+pZvnx426FW6>u|B%h?0l+wXdt#Sv z;VyfbI?+dr`d@v-5d!M2nl7;GqX$l5_~_7$pp8N~4=K_p&aTvmZoGSYhYt<*VM~db zR75Z=hCXo+d6?jAeC)8GMCK^><+a7DX}d6^+J;ZJ1c=HD={~?>XA#~$eE$}zo(f8e z$6;nS)Z-T)0G^z^re-RA(5BAe;gHF4ckZ&RN$Vez@u!D#L4KR-z;xfxu4>I;z7$r=oCUElO$lBd@L06cMBaAVUlj&RzF(-NJSP=+S=kwI#L zeETK*8((`K1hidIWcNLtMdS!%7cS}p88jx7XGbFeU#zJ|34Ebh4lD`>*@IjiELWq)b!f2;qeUD7HiX)ax`7CnUFxgU# z-@`6rT0)#JatYvxL`Gy4C1Hxn!52F)W~8jVD?YyC7l}`OUF(S_2pZ0CBjE3H>saL= zh|_^o{eRV1%|aZWqgK1F{i)BHAGy=r*MYuo8ds+S2AsE5>!GAPr@NWVaqd)%#NB{j zDmeF;J9w>u(0W{+KwnG7*}x3TC{Op+8oFdIv6l&iN+wi{+x4}lQtb-J6sg)D5sZ}EzQkarEk+k7wmZhoY$ z@0{2Ykq%MW9H9#Pj=reg0dIH1|7|wJ@rrdSR5T~ixlv)cma1E@ZcweKVGgj*|LbR; z{Cb>}vp@qLu1&t~MYWEi8%$j%n5*WIzBd*_5YutKzUVf@Q5P)XIQ7b0E(^QiSrA-)EMNtXWO9~_$GspO4h9tf6KDd&Az*75;&TEzI=sO zJ>3}zcN`U-`7E+|uqcwv9OM+pC_>E@G_J0?fKFX;db{Y438)IJ#jX?6Em!~rBi&?3 zU%j$H3UFI>3#18|zJhD_-Nocc&*=sG=09+=rR1_5OyBxh9C&_v^ABfMCfYjXirM zpX||{?NxY^)$W4oHzSWRy1Zn1#=7tnKmNAp*|#ySp%m45(fYHeSTAu1_&z+uar4fN z7ieu#gAskm7(zlsVS>;1<{V+|dzatXmBJ{;qJ@A109cH&E!XV4-s0Pb$LRJqfF*)V zPlRssnGRdxpk$9ZKHG8))^EgIs1?U$-D6myez<;GLNA?fxdxEcqj*Z|rZB@|;cF$} z{dU~dQ$^{gulI6{fKTo^HK7M<46e^_1!gS`$y^ zPYHdwCBAN1V0B9rAQAO6B|&sGo-=N)Q18cvmSZh4 znV5%=RgrZq(GPP@OaU?r^3}b&i-U8*S2pzHKIZ*9DgRwOj88mdk8$%$(+)2YMG8+{ zS8;c@P^1b+7xR-uvf$97Jko2Dsk5BpD|2$9fH>wuBA{Q{cBn7i%=m*Ho3jeHZk#hd z$Gju%a5n(G7})Z(H#VaD-eHxOA%3uz(cuPe%|$*Tp0xEwS#{KhiBVe`mfuGThcD+>A45rXq~Zfo^bQeonOvpqvO~a3s1ZT zDMpZJufsd$$6}1W3qozvnzad7xmvFy54)y-)qL`&V+zA5T9T-(Em?9M9UPD^b4XFa zwHsTbT$T`XF<1=9xE`EZF$GqNGW~!RfUiqiE^5;$+Stw3jzw{sr0)9VEvZA(%4&_~ zc)4ZN*vq@fk@}$%NH9uSCu=jJG zZ3%}RP*-JDSjYdkGeGvoDc6>$0)9%#8mcjf%9o%0f8StyH?DVM5)1XjIz`Hfcjpf4 zZWCbNOyvXBG`&!ciS7K$3!S1owQd)HOI#x=1XOwJe{8p>vY+4ONx50e#Hcm*1lmBT z1y|yz+v?Obq!7j&NPRmR@~163+@6y~?j0Ou`UHT%`97bV%_o^K%-~0s(LWh1>DxN} zE4q@TH_);`VS>;OJHN|zA3TjJLvW-zjz9i7#~Dx?b2QL_M+xd!}kSoRSNun{66T_^V@nljG9L%?&EPp`BUcePy2-Sk9!!tpI}r zn-BR;+Ok;^BPQ&bJ2fo}FfU4lhilE=ymu<{TAG?Qx{r@u7@Z`AOY~L9H8{Ra9f`YR z#C(UouFE6gI+*wwK21;_8d#_r{`UvF|F-$J5H`?2(-nXlmQ$qv9*oqtBG36A{&L`$t%{10_uP9M+ z1a|4i@;n*tjN$*zEV-?qpzpEaB<6MKTGwSP*HZ(D@vX*jp~Vefe^El2`{S zgj1A%qf*j+=d<&??3sHpV7QCnkd}tg?G^pKTPDSHaVS@wxWCOB^vwEN{`7S)58VdX ztgS-a&{{&eT45kDI$n~!+9QnUPv4yWc*nc(8-5}2`>~LQK7`Jk3s1HuRY7n5_a5St zV)9&s*>Pdd(T@jljcIBHdEWb*F}=Q4Yvi(LP)YsgqO7}k_qVOs90({D_%_)cz+RCGLh?sbum+pqo}F zUa0m6W3~r|G9FI^+!E!RdfNO}xNpqw#n@G3!GYf4YGdanvEM>PUIs#&y|_NDnG2gp zb2kpKQ`gT0;->_!+wPM(nDZ`^FyDK91&@LIm08>!8WF>dE}96!$9KlbJQ&n?2^#$t zlfii$3}*OoGA@%#-C7w*`vsV|2;Xbv6zhB|HO^d4?(vq#yidl?-M><(+{K$aMmO58+{;jt%QaNgnYZn~kQ@Q{ai{Hchp081`%wA` zykWHe6JQhvk&>cqRMe=hh?3pfMq5>AI~g-lcKMiOyX+~R0 zE_8|3;g{>7z7%p+Iax%=HX#snq^lddem@8M8ioDo@iLzFLZ^-P+1B(}rYg?jjFbzE>&qTxrYvPaq~&Nk@cIo~pW z<1c1j;Yzc~olFb&eWT0H;>dsn%f_E0Ln9l%$9d#-^weLW53ci$5Ul|3VgM6XJC2Mj zY?l&)q5ocm+VQX00kH677^>M~D72K$<&6!e_vlxf`RdrsON^rtu?ewNqOvY@Og-p) zHl=52jrNY`J)QqNoF&Jk=VL5q|H8iwGhk$i>FJl2#Pf}n%pT32tR4E#y76BLZzTtA zew){S>x;Qw8yWGY1VVK82H)SHP_ZCv$S4=Vl<9JCsV!#fL=P~03o%WIw0QuTs3o!? zEQ}iXhaBFhZYqtX!eB#$yULf%9nr*cdlT793ujT@15)?g@K;))bLz&1arHbrh;S5f+QG5bqAkz9Onn1E0y_R(C1V`c-y)%*@4S&(pSf?ZuzECxdI_} z4PJHrMb#%ZL>h~ZVFBs(e+JrujW_EE?{MI;b>#dAf+=xsm=tJ;AxdsF_=T;sE6;7Q zqgxm4dt!MY#X#qyA2IX34#S0X549xua?=OV+UVMZi1`Rn4ihg^viG(=+oiUCc6{wx zLJd?6TE8qj(XDZ(=C+Dvoq(=1ef_5=t-YIIfTm}y>h{}@o{NmKBsun?rvlFCuUe{4;WAqe8#U9+W;h8Fjtqvi%JNE;)`j&RYX9vn&Fow7z<%sfxQ}=fPC} z2y>3d9fk!6=WO;(i9L?hGQEb%1=b_@Ekw!h8A*u$VS(&x3&f zTe`5&*x7Jq%p)tP3?5h#Ij$69Mr&S9+$+Oe&PXb<>46gk51(*7$j^-&aqC7Z^
kC#QQZSk_PQ{Hu;UMRLPCoh#p&<539biHdN}UCGeE5wEhV)6L5{-n8hd zq}4&POYZ%R{lKiqhN7lpgGH|~b$*RxbPNPtOM|`9-rh&MV{Q z=)U+;VZ@aVTO6bYWvox?>@lEU_HDS)_1Xu6MxIVpUqF|;k@#gT);Zrz=^10wlaQ@p ze}BO45Q=`u=@g{?_DkAc`cq?F8|Zy;R!3txf*T}8>^miw9&G=u0?YHqE9G!#@vF0* z=h-O(Vv4j=R6BY?V~h-}Fy5o-;^e#pZ9^79?>?|dZNJ=RN(yYyS!x)9SF9Lmcol{= z;=}XtT}hw(l>-{9#@*2S@iwSb%x9pwlAh7jBx5);&+N9FAShCF1)bie>J36|Xg4Z# z2jz{kYike4PmP#X7{*2{Ve^z^nXiYP5W;vPq;iL#3JcAB_fx_;SZsuJ`90M|rRtsO zGea47G_e*IC1WFpxQ4?A%XE zbd*kk;KQo<^K2ogxZt817e^GZ{HSm@W%>>&naTNU(U*2Hr)3y?2~e22f&*TJHno*d zjsSpM^v&h?)>yINz5RU+guuI~k?wimQtAv)4K;QZbi~ia_`Ix}5#r*>6AY0GZ=&i|XjapXpHaRr5Wb^(fZB%KSZ$WU(y z^nJ9+eGUBTDDqtUn)WLzoX4G)kd9?bkXFE*nue_zmo9iy7)wdWFV)|)uWUA$?=1QKi0=BIdZE6&{G_;>Z~jbM~L;@O*=IMbZN)P`7xsK^>Q*a zcs(>$-wLU8ria{}BNHRvwd8G(f7(l(zbRc}N+ab3SVrsLq;VWzak-Uh^gdGND)YRh zCK`?9VyBuF3Dqj~HpfS{>Vsc^@NH33!qD?FtNTzZ`){wB=Oixw+f%{IEDUJOO6Fpj z^?tze5GLj|YgYV-n*R+&ZsZWD1n)Q&TwOy6Zu9){7(Zg{}m)x6!vLl40q!E;N{ zRh?Pb0K5cwdof2WBi_bMcOJ%>cVwW{A2@ce@8XnP?DWYR)36~`st%FtXTeu(({XO_ z70{?OH3UqB$CXvJ+(L@PKVXWw}}Mgmq?>e5s zIJKBY`H>{0sg!4m^AhkN_WaMdtyoTQl!mL!Z_i4;s5Z=l3zL1>=u0l+>%jRG4kx4A zsxZu8UcfhRNId(UQNf;u**x!}4)>zG5s+2YpH+bolkB~K?*`+bgf0!k?>BJ1XT*Ca z-v%-Rktfk&|9%w|yO9(M{be+jX{vaXYc9mj6sC&hb8s2oDw$utAiF7SvWJ3P61iJJ z>!>_r7TJc%qg}TH78mFG%blI)*`W_qrq6eYGiSkO6%X6Hc6C=Vt|VlGr+jTG?y#0J z^=?8M&~@I4CTmaMM-apR06A#vnFx)Ky2{>hoZpM?pNsj|weDDj$805VWwssHDI5wV zUd=<8pmXl9?8}6$tL;47qN{{QviZbKvN=~s!nqaum&qXGt8tQH`QH7#(|fRmSTo}5 zKN?)?b%A7|)7I@2)r^pRm5HW3=!4d>e`OoRJU(2=4_)S+UU{7Ol(mNa7uvOwgtp~g zqLDK0SOvss8JZDSYd@UW#f(w!4e(R0zr*dv<{J}+Imxh^8jXzpm2b{|3VC+m$jmoWyw2mLce18t zcrs-ZsWHlovoh5%3IJ8qTAdyHH9{4?^f|g4yN|}Oy3-+jKD|8QCbA;HMYaYJ`|ba3 zl_?A#@!&rmU%Qlr@ebmZlk%@{rnrOr;LYCKh=Q^b+?T!DdoZp}wXA+qKNN;i%fzJ5 z+#mLYV)?yjJG->pbYft5auf~iM8RR`X(sB1pbz_ge-F0cW|Zm)@8mv;58Kk3)wyO~lwW0Gr#(YMU?x<6@8QV& zELsXkeLG(x<|=9=H({J@!K9q zsiY@84&PAA$L0`V2lJT$)oG26qo4{3ju?XX8uQ6gx_4_QF(LZPPku&zvZ2gQzGi%2 z$jMV*@bf2oEhT}15^%3#A@S#l0mY{u5oMu zr42dfABMDeBxc5=3r6y4dj{*%k7sic8&c|nn0{*&OA(A6evEAzqu|5*=9kATXa2wo z!~0i;0dU(3#C{qiu)c1ctWzvg+4?5)enE~U&xm>f>ot#G792%P-i5%` zhQ6T39iuFMvZ8@FK6Z}dWr_OK0)}owt}$>tJrJg`%7M1CrTi7qgbvTod}CWCCxl~t z?DF^%ZDu&}r?9%)52sdF!i1cYa1d)PwL5jymiMs$yP{O~WnT2({e=to&(dMu<-DLr zzM|%$i+()e$W|aZf|MNFmL~Blw*Dvhvz#C|%gFfn7ZV&m<_&yGGp|8jAs)IslY`?h z_Z(kYZYOlH`+ygoG8@8lgk_c?+d(9QmXr50wSm}v3M_@h#mfJV!(z9$Iiu|$?*9{i zwdYg~?<@Xd0JG}N-FKt-pE2bUd|I|!$H?g=H8)1}#^XOvLq0!(8c$crdS22j=P8>O z&76jj-3X+b<@4Px^X1mZY6%Q?%w4pGhL}9(i5gb9hP9g$x4^L5F?+&(TzK!lbklYu zV7y}(%BZ65UU^^(v}ahq(bzr0_OrVK87BJ$6pgPQ;w`-fj39HI9A|LzUvXr5lChn) z@#GM!O4ODB?%I@})br-vZ{;tIJ%k|r3;u`n&s=MrBCeI`lF0!7Pr^!XQ!gK}#EZd# zgm&HIZ1zJ z@Y{%uoy7=`yxGv@_(@wl@~{o;1xhn>ttB`prW{TD0r-+9tcHflO1f-&hL(OoIyoD9 zuCXF>Mue&t+vG9Y7JLi^Q2Q|U?Vez4PA2QqfS~l`v1LX};#P3o8u|ZHw8G3 z_*t9*8C59a?r>WU03xt=0@tX(yk*{Kv(JM1A?W&7lC3AGvd>z^WqM1FST|-pdnPQc zTMUMg`iKk0B5G7(ft7EAn6mzls+z*cfm|@~K{4+hc`AlWeb3EY2OFN9bu!Cl-sgZG zkM%X@N6Bav2t$T&9c7MFs*+Ccbag`(#|>0ey8tEc2)L*LC}WSZtHSyQ4pfi8ensTs z@n8f;2!i*wZ`Q&$tE_%e#&Lc%UTt0PFOOypqBJ>(43`$EDaoQYAyBKEtS!cEN1+VA z!}tj#^pJ)Ot;KACfPU6~Fe^fnw-tulm&l$#yx2SkqUG}f;!D-iSQENu^I!L zkcL~8}r_=^FblHrY38k@mku-YDDA4$b`40|;I z7@Iy@AR}>8&Lxf29Rwsx3u)(xG^Sicv!{P{Uo5(4ztYz2bfZq7a_kl|=ku9)LbJ%5 zl5XzK(+{=6_HQL}r$3)XEu{j;t(6a(Y6R{gzi9cB5eHdeOm@m=o&?xHa1Kb5udex+ zxejr2C+HMk6O-t3RUEQX3wg&MeTI~9PoE3OvTB<6SxKz!m>i#Yzm%iLx+rQ}kID+SRb}$yC(_+8 z^iYD|-v%@{XQpKVUPr0yQh@h5pPbZY;jf}*Z0?edPxlR;$3O*P!a8P|{qOg&FBGp< z?V7JF485o~Txzd1&b)Fgt8gq7_22e=IWyFGphCtL1TC(%qmt@)-ic%wSDArcMEp}+ z+|iPo6=ySu=D2?u4&zmb)=|i-Y_G0%aV-wsumRCqEHkhLGXym0l6M~Pjk{vROrOLw zE!41JLssAR>t&bT&dHF`^Jzj()8qDaw(N_e^I#wl>@8C(8y7pioR4}X8W*bv_jyzq$e-L6Yzswzi0<>ScRB9 z@UwYyrsMY`^l;49UxU0Oz~-;y;E0tY%OowY6VfDhsqwUJM^@B}!n|Bj z0wV#;sY4aDd)xnsod@St?BeX5dgObP+gDI6s6;HDPbfhq5EN5u|Dc*vsi5!yY$@GU zXfnFZ=tSf&r&V93>*yJ^?9P+Nb^5~im6+75AN-q6Li7CJJR_L~W(LEhhcj15MHP3d zS!4tS^HDHudoBQL*o}j@qHV{}$nor8siPs%r=zOi;q*KWz)%p#Y8RP`5=n`Z5-ok@ zl5ZFH1Ul*${SstV^7AHyLj99!vwoYZP$$TI-sW_KpQj%lAm4T^wwWfAwg_*0mNHlr znd9?65Z*PHu`55u;Q@A0P;XY~xnPV|a&3++o;)iFhRu(Pj zkDG6_7Q?t@9g6kE*@aT5<~aN((Rz~j33ryL$&T;k95+DjQ3Erw6%&k}b(&zFOmHL|Nve&X{~e8Uy{2t3#ms^Hj1s|?SD3@sQV@Qc1-iT(AqWxLUtC{fsPqEwdqtDiFgf_zEzeQ?6}<$#g*jT7cc?Em|=SKs&XEvp(YuNdV|DE(|(3*Y+XBM)PQ+ zHByfZg}>68u2%=ofukITz!jgS{~^uV%G;Rj-S5+87HLBy%CEvjBeM?TqD+E=F3eQW z?7&%@mvTC1&tfwo=8Ep;vai(+CF5mMgWfR)l}BJt37gF+3<$X7f^-8sYOZdzw}e9e z7VPya3@5)mZQ4v{&Lka_`cmHaqRr?r3NM?X^8Xat;ZOES6hp#u(y7Eq#XnN1VKItn zXI&9=nj7+hzLM;q3Q=S)zE(f zuBT@-vH|atL-RJ@#0MXXCn3T!Kc)u%ncy_~_y4_KV+B9eC4 z))~a!=7PkS_`YNmY5ROClNM*}h1*uOiiy<@E0C z0}KHMR~uX&s}V>x%Yo_4VF|S{!G*1JMyZI3P_^%c~ z2!SSI&1jClZzcg&7?R>(nz(nc$8gf7=rP%#)0CSlQ&fHX>tvR`)Zaod@qae#u6(#H8-6l#{YJj9j3JizL!1)?RojTZ`DOJmxRmcG`bBN(hcRr^ky}nNA~+Tg|fFF`JUKu zewrF38W{|WKh;blu8=yk=XN$&ca^ArcNIE7nfCxP|M7J5g!s&JPrQ%84t!MW%4-ZSV0Uf2t_E1ltd54Xs~Q983Y2uL%`9yu#Tvl z5>mfOL)`I*D*ZKbGYZMf-Ku4()8~R(Of4V*vQ`A1kw2lMd_i2$3E@&#Ui6yXH)D=O z&IFzja@dDkNqGj61KD&I12&JQY#*^10VV1!>@~87Bc9EzyS7lQu6m4^E`Vb-bm1QO z<<}6^Lz*;U9SSE`naUnG*Bx=-?2^lY4HcQ=-~4X%`U*eUH7@zQ;dR92kGJhn(po)l z0yd@Vd;(n%n5K{%|2fG3x=rGArKMP~Cn@4$+m4FOT(IK-TyWeiO}jL>?j^%4S;d*QEPJ}S&bvwf2eBzOBx;|mA zkXoJWV6K1Nb@Dv|YKUQxJ{|F1Ft3I;Umv_(8XDBANC$78I&SJVER02mA8Z{1a{LEs zuCvge9rS-A(bEK!FxJ7oUcaJe7tT+P0b_&HL;j;hk0Gw8hz?Z>!pwsV*x%Z`L>Y&D zLOh>%GeOk61`Ch#jl(WziuO(dS!y~P)SETnzY6lqs3&KOg$sM!wniA&5c2`i6j1%*g_==6QPZ{H)Ng8aH@9&R%omNFo@9z z5Z@CP=@?vZ&bKKtO&76O{C)mHWhAp#Pp;qxTgdGCeOF<}ObgQ&qqvbdkI-ir;GXW% z3bmM^E*W4EgJU}>bCj&nZY7VH9w*Wc(1$v+vj}E~dQJY(Es|_ZPc62--%=hWa|ZR< z$>$$X)1I@8<3EYAJO+Krryt87KWZNX76YzS;Mx0?7SD;FLg z7PD!TMUdsXrLx&UPSn81xWQ}B??H{?)rVl_bnGu!RQ^&5S#s50+JKz!8gitSht5IQ zUj(sX^r094Q6TBJl+3ROF)wuEGX#fwhTc?Gvci2Iz$lh+Rwrue!OHOX#q0KOr1y4a zXL6VNuIFBZtYHkD(Xq9W1eUXC*-a?8vWZ>0J#u2B3-_b)g*7Cq|Av=9psjAtfZqSh zVid+eGE}3 zFe<0%y`L=r0CR_XDVZxdbV8{vR+Tc;2ych3P?OCVFKYQKWX<6g4S)wHD*i<$$ASNb z%6I}pgM%%VpOnb@M|Db-EJ*!@aCt$a`zEo zgQ2fkVd(~Xk&U-b!?%ai^rABZ?R&^l0RSnhDkZZ)$=T8~3WnUhh_tn2i}Y5zBA%9s z_hz@1M?=%mFI|U012%M(33bBGqX7|3C+SAiwlx3MkE4d?TGf%+z;JlM!H7>u8Hk*c zNX@k;Vmb^2;m{7dk#hEKpBv)1sSN`{D)?qFQ67#CZZL$psvjuZ6 z*6IY`Kpc^Q5iR&KUjw}2`!I<3N6aMrt5<$ za-y>Glw^m)>|DT-DZdk=$v3kJ#m-l5P-0<~3XAi$Dl+ z1CxES z&deJqy&acn(y%8k)6$R_MT+5U-%hxM$^`5f`ZSv0V8}3bVyxKnT^zw>E$+x;B4NAp z!Q8w7^{TBpj2acJ852psp7&(OS&l0rYHKJIrjR3}{~Or8wVHa#sUJtuEW}69r(XH~5|r)?0g_ za_f=z)?0DZo+~+xm}hiUR6}n$j~?&b`e=!pQC*b6a(UNum`Oc18@Oyy2<9z{XxQA{5&afj)V0r?k1TXe023 zyI?={sF<$-Jto6^g+ctCe!|29s&b~iWSQ+?3e$(#a~53z#dEPzpf_jbfKH{v(Fgx?CDz`ZdeAIiFkf5?qKgx##b{i-p?p9 z6H4*`&M=qk6AM&CLYbes_?Y~!GFp?i7l&na7-Tkk_kxUYgJ;Vb#|0a%Mi5^otdKub zdmb!Gg2eZ?zwHYCQusulfE^(5wgyYOA;ne2>1dJ-8p8v2 zI(-0zC;DAI_tmFKu=ZnaE~pPac3p7Y%XMRzVar6hgUNyFd0yu5a_ZF928yc_*e&j|CB- z%N`WZ9)G|lG!tWV&f|$}0&~ycsL?zaHvS?*ujIg4bA%pF3?ljAZushmKxjpLs7Jp^ z-l0#Uw5giRBIIRIE+&7t8q4kqsCD2Snt*k^uJhTR!D=Y0rvAW474xQ$(0&*T>|A;m zA0B1)Ij{eW04k87wo_qab%O#K{}O@|2K$p^{C3@{eRg;RdA1C0O?KJzPQ$1yy7Ni1 zC&zI&Fg#w!c1S4*Q>@+KLMMnRXHMZp$KYD;eDDAzgpByihnI!16<%Iu>Y(K(tN-!Z z_m0!`s9p?n|Lh}9`Qu%;t+^P;1Uxj^hHC;02+||GMqR%iBMq*}MxD=G#pka+mR@_Al$@`$W-U%Mb*Mk+grkF-r=j?Azic-;pK&*UHx?#2q zM5G>ajlyLFkY#m8G`D(?i+{P1nu{Zq@|Tep&13k#@Q*X{1iH-)OXjhc# z{ovKyY<{jo;u#^Dt~E%D-ygeo+rxtimJVz`M;7;4CxAut0|(1!k{R+Cf7sz-1{%%w zZ2CRb8*g+1Y+ZA`_$YqaemY$00UNMyoLmURdm4x6h<_cN#XNK5_erEegYp1>TvEsvzn0j>f(kz>S;84SQJ(6rv= zg^Nv1cl+C$o1*Vxb+Ck8xnYiTlL>Tatw^0f!Z8R`4)CinJe8rX8?aB7-g67O04bms z)I@A)0`i{Mz369=n})ZR^XA}HTrz1g44K;}hkfXfZL7?onpR&Lw_@s*onNAF9=BMR zMhj9PSp%iMk;jP8YY9eJ`S5@+r!;xOUUf_AU|+IcKXn((q%HlQFG%Vbzc*Fc5P%>l z-W!g3$G@W7us1w|6Fy=E?dc8Y{U5r0I>%Mt8_q4w?7})o(XAZ@2pR-1)K9;Y49w6@ znr2O4x-IFUV^t9yAt}L@)D64wiIDOQD@O)KEaEt^8zeErXKKsieS=NgxF=xbU7;%e zM;+|x`4+#`I!AHd21k65Gj?eRk-^BGY@?pQAVpIGyO|>QKit2Yta;(4b(BhYEXvl_ zRl0!(A11JW8hm*nzBup?m2?{Te*$z9Ya@l0r#H$BnNK7u``o$G8;(iOP;s9GW&UX6 z10m6~B4e|;-FoA}`Zj)bw9t>LM2YV?+{;+gfn-sBqQ>JsuO18AiAQo5q=wc>3=Eh*Qfa$K+=o0Zz^dwOUSPUdf zNwVC~GQ;3L;J)L*p}ZG;^PJZMv5LFR;xm*;^dB!eW?9RW1a3l}n4CR0c?MKMLjS86 zuDGI1AI7Px&7va$)-JhL;i8L2cDR5noKXWdhm?CS)ySL# z*XqQm!5ouA%~>j?KGh3l3#TC+@KxEkD2!Md+75jaXVMU0wk z7L*C=mbH21i$Ez9N$*2LpidOwwn9m99S>NX%O2#=J2^%jL~*$iz2Rf$La&hRBJJ70 z9)leCa&!uptY=7$`YKuruVKIbcX>irfh(E~{NLt2E(3)@SD=cm0DT>F%1!5i zS#2d>Rc*LUV;$@th>3kMTnCicGSM8i7xdt<$yjW?yqE!v$wt_w4KzAo%O68fLl$=?eM*TD@5Zj3%~PR@zl1X)E)4iA*|ft+DXsq@EZfo^2{{NILEaT#-J%(wLp9YzJ_+mz2a8` zfN%`7R)~j|+}8~JF5s!So~0vjVC>jcqGG!8Pgg z;8N@Dco#0rPwyb({d&XuneMX^I=)|bzW*IRre6Eqg407qT@W}>KiGvVKpD8J89FF2 znW^nPH><)u#~MdJSS5{iJa|<54TW=w%YDAZ-Rro!zq*(KgFa$oBJX+zKT35JbW~gV z5E|^a)4UD)Qq8pn&czWP$N#AN>!_IevyBg<#0#N5pOr zn)8xT;2drLNHr4n*fTC2rM=m?5M=D|deVIOIC$E86#@pB3w@p14P4s7TPI`D#ftTYgNHOP_5rggrCUD_ zt#EPq9VMlPwyxKoJ9Bx7`3suJ)NiMY#y+5+)0BU-?=v)S^WE`vRxxY;;9n@xCIMN%Zoz8MeCf}~{CrLa z$1`rv3{|xHZ(YnV$nc(Hg$k6YEQ%4&-)2d&zZ)(^fMdR_1hh2cJi$w4a=^I%@#1^w zVJ0A*)nP5>mkpK@W|8!zIlu{{WFih7!{*-pMwedY6qQhZ zPsK>qTe41-C>S0+N{qU{8Vdu2%kOo5cZIlJ7_*55_H;|ot=DE`0n=^(C zN#Jy_2#tv*IS|cE$OiScw_!T4EBxtCgl?+t%zFY>dpBJv40hSNx8ZA2z74{&ob`6O znYQGAzk!`)A*tW>%1iJv);O%PjpEh3SQj2UPQ6)Y+AeN`?ODL6_xXrhiX5OsILDUj1e~oI*2os9dwq^l z_c~4pJi5c2695}(`*+xVL&?(l;Qso*p&nE}at!0eQ|uxYaX`ix+`#s25U$4fWSN|E zDkcNvUU6SC`8|dk=w#E!Gso{yPUn4FGOTvZT7gqdy{%Z6D@?-P3fDd_G9(_^xH9BB z$Syd8T}G|w>+yz9Mg?lmWk?p+(V(xj>LVp`F=xkrMZ14(uG7(F`B-I#B=}Ih*HUT> zbD#}(Nb+wy3`ka!IU&fy&lx)1ifBX0^75$RWu4((=mD}kn4iCcyT!rT5*^j!ZidRe z8nwpP`({HtxDITq)5e>wATka#64Q8N$1z6Sa zrmOg;mSn3u1D>|T<<3?{7U|t4*mT?Pa?42As z5|wyO;jv>BYTm)r31c12u5yo-4z7>!OD4tqm~t^#PWy2E&m!2<6D3spIpXNTC`aJ~ zCf~u_rw@ty>aE7|9hVy~eTPau2o+t(tBf#fj6eM4DkYp5Q@7I`sn|ud4(E|R?NmEl zN+j6}Jh{xao~)OyFxDTOD?!X!kh`6}v*vueNKk-^Z<7!sZUotB3iPXEQPg_PrLmee={VHQ3<2brGU_6rKn~%eeu=O384vHlXSG5g7MoBLwj3;hw*a)Gj1@yDwg(a?c80a!`mq*0v2b$6&ze_EV+%@hK@~bP{UHFahtV8 z)bC_$gyCF*_l4hLUfCR?NMp)_PG`QoW=v_|0y#MsIY)gfScN<`uUM%)NfdGDyq!N> zcV@x-$qRaK(W;6U&R>$IM(;UlngI6{++Q2zvBA1Ny}~7YJXVO&WJSJn-8UTX`utv$ z*m5U^nk<2{g2WrX)&6f(TAU~x#^n3^s+97|7`>KIZYy{g0gVuV!H@XW41*`AIo%Jk z`4`%wW#KV>Nakxj(RX}If0QhQGzp7$E;{uRwrxlqh@7jicq&$@@+(mOTpVF~LGRqq6=X87^{Nti#= z4h#h;JOc9E0?jgBigP^u<}EwokYMq^Ff(mDLci&2#Q-`^vdfTZB|w44Hcj$t^;6eu z#NEYe`KGF2O%Xz5M5(vce6W}SQ8>kcMW^Erqa)ia^}YIr~xrC{>4_2^(JUpw#>QNj+rwg&JB zmX;R;ajvBNn0(y9%FR6iK4X1l0i0EjcsCJerN0m?0ACpZ>uS7ucLg{-cKz95oJq92ke8DiC(rHG}9ksv-3w_Wi{sKBcxmARSc$BB7?J@&bb8MYp1OWI#^uX^V1 z<$vxD^_u`fk!j+3X{>|aR0qo#pxmzx^5J(I`(xt;w1P=m$@?Ol+Lr-|lxz4CepAew zkGtfcJNuQVqBNt%0#7m|^)Ia=c$!|WyIdEK8YAX;HwfY3er>#-Hn2~2HGxl{y0hQN zLp|AGT2} z?ilc3^ds>S(V~BOqliM}Hs=6^8YR>Wq{C+?n`r8(LEAy2S&b*Eu~T9c=-8fMOk-Oe zRc$_3axoe$iZyte*b%%Le=LRTaP<2FMW%x&QIE&*Mo{xA4pp(g%^B+y=A6*a{;~xV zCJbhB4gLgXJw16*Nq$5KA~B!h2_liX}=# zQTvFoB7MreFQ551jj(kP3SLe{0_DPP@7dl=xrv;rvfl`q5TZ@AKM|Xw|8n}-tq7vL zv&@I%{Gq%D41MUl-X^XRAsz=SZtm+A0PSJ+79fD=8~33$x0+Hz6W+y|PIUj)yfbG|zvD_e;FA=M#p zuP9<;ms%c$oy8beatcih(La%qFU%h9_jx_YHu{8Lehrmd;XGCPqpv47quhOwi;iSM=8?^a-b5DMB2Uy#P$=s2H+U=& zsrX&djCg$l!=n?|&Y3J_IlK7O5sEA{n`F;z2-*DpcEiO<>Y!*d;SmFqB|^oh6?VV` zX+NlgmzEKfVi`q9=tbk!9Yd|aGm0@GKCYpWjv~FIr@U@GHIJif8q&-efoa?7GY#Lv zz-#(zEy+@+jLC|qaK1D5BV@J{3wYr61Q+mP)y};W%xKPrd z*H3MtEvw*UBun!!*(MlS8}OGPbW5*MTD4)x)ItcCDqPmu7iLdAQ^Q+(6q_~Cq6XZZ zMa2y=bC`T}fwK_3*%KR2D`{Q7S2290eJTt5vUG0lK66?c$jH zLXwVP##cSqm}YITY}1jm0TDZ%HnNuwmx2JML}*@PW|EudGW1P%tpG=Yn#)~jAd=B# zro>y!Bx4qV5stZ=g55M*Brb&+@k|&{@X7s~Ec4)fJ}Jz$1DF%=$h1W*fPD zlN{|%DJxlkK2!vRDBm})7`erM7K*1(4ekT*sk>Z+A-M<}b<}~z?bQ_tm!WUYPjcVp z`zL>gg$h~6smO#i1FTCKgl6ZViL~uqQ#F3f@bJ#O^*bpVJLorRVzmlN)aksav!p0y z7ENp2Q2&sD7?^=DpFYL^+^f?tIzxERtPco{Hxdw-6Mbk6+K@$v zz&W(#j=kS!$hFm*kODK&-iz&I(8Th)&)e9xYez-0kagRM*1BnC=7L4UB!0f=VS)I80t^X^ebfEWml0Md#V zlDN}J1|i3@2a84iQjE}a2}E!#Cggq#_0~elZLBhfx!Yk^!Vp7ae)^Vu@_gM{inSVR z49UruEqt!_iyK_LQ zcrG?mfsn%B>=LkLe&|ARFPLPOQ! z#+a`Gf(35>-)k-Qy*L02Pj0CTULm(!%K8I#z!;yDr%vO2-*aUbE}L&s7N)Y|&FCG} zF!l}?cMSVy5Y_ZG^bf^Ah#Lfop@K`bsx7U*A4i|qngGIPInM$<2YPx>{JUBJXY@k$(^zoV@sd92(BN&bh~CEWQjd|$tg{RBiZLa$ zkhi<9%UK7WG^_WZsE$rWv>P^RF0`Wi=!b2qdZKwtnqT~Q2_U7Qvk&9xyTB)0*Ev7- z``Mp{@3@q9#}8cbfKQ1F6_Z}TxoWMN4j~KmAai0KfDH*cxQGU?(}zcALAaur%``-rCF8$6FFQu}p2nNa_&aZdsi_@3#tpO8Pe%ZGm{p(o6uU`8 zkuSb?t-8fW>B{9FbZdpOGpK&B>{3o`r`F(K;>Z7`$>LHW6cStE-lb6po7 z?sXJE^6_Ti9S$p4zh#Atn7sf-9f(|;xgrP^kQW&?bMg4lcxb$u~t@$h^o$NcqIG^(eIUX7&$1Z!O#hzD+bxuSd2Zy{od3w+yC+2_5OMm@~Cr0|VCHMQ-6Mz>Qg`Ne1zKA5j|q9gw5Zwo^*t zh%rW;9HmBPFIqv1LE?7^uYlwSs|xyeM8Z1uxK0q?Ct^FG8A{y~j#fqu^R-rl@#Ce? z%*MNpeDaI>L|vVlhm!B@xK~`?Id36jeo|#ZM|X2Y2k+yIxLcnOdHrN3r(kFbaBM6E z-x#6bk;CeaJr3px-snw!&|&BRKnuOJqbWq6&sBA-V%-+_UhWyh~OTKgED5n(t*tr z7F&Q@025VQRe|XWIA#6*_G-L)e{=HriB}QoNXaFJU-y~!3YwU-ijP@NDY8%&Z@zI< zH-aS#J~~lJTSQ>Hd3qYaZB!E4o+tM@&QRcMKBJ!wDq&pljJW$~_qI3Z7}Ib?zwV zw=FrnF*x~C5|hJN=zT}gKKO<1CR3Kf*w}mHC^#mhQY@2U%?l-d-h|FwqCOoTgYJZ4 zwoFz#RLUlz!>v{yTV;gNee`{7{kD(-bB)7BC~8Ij0LQW3&wy-P|mTCy7AASxso-|I<3 zb^3ljfBgRYegEm<{dk}8I?r*B>%Q*m#`?TDI|O2C-ciMPT+1&!Jrl_Bc7PvHBI4E2wOJqM*Frx1+PWbCW8|)4c7&Gjz?Pw&Cm` z8NS!@m$=e!d86SnkllMGCK~i&%I=AQ(Asc4;r~?iTaIfj!*=G`CMpfVzs-Wv71`)v z{xVQz6L9j8D(UA;By3>_3@I7isz0sp9FtDp!tQ!>OsM+i7Aa>X`1yp;%iEAW+WMj9 z>1Ma?$3k~sP(Ygy99VfO0u^IDkOc-7oqNt5xUdstR5m*q&0xS=z2kerSTe4!`>roF zI)Xb4E}HwUs;&D1XR+fHL`s`JnCbZy-JWL(mdz~_EI*Haloko4Ao%66?@l~JEx?)} zK^UI8Xo9zi8#3zH!mLHp24FED2(yUD5-BABIV%Aw=d7qQG=a=@%Z6Pc413HCdRjd3 zy_3z9av9zQRnHQhiC#A_>g?NhtEIdig`bbeUWC6oCyDEwx-Znfv-pOcH|)?(l+1fgfC$|EtU6P@E`fFd0{`bcH;}o zvHenb_DS;*$wtl%h2ExU3!jSyi+Rh1P$BGUeWuWy(WS4{ci<>0^<%mXn$<1523{Yo zbG!I%-$b>OXkV#uGyZJGa>L`j0e`d+5-J1^d|F^Z9MhY}&W>Q-j#pE`7}&RX5V`r`3f0Vl_>u1gHz zJvrUUFkdy1kIU)QhYwl@4MJw_m{YQ`IW*$Ksq%B0yov1ZihGKN&PAw6qkxZ=y6vy6cnO!{I_t5elY?Y-Z}din|< zDi(Z*{y8-c>nyt!XW(UuZ`oN-1lU>=DPN~06~tIly3<9>PWk!2hC?%$thsRBlfeZKJUaG*B(LmQU$ zn%Mx-I>%&b@2(jNQ|=ybM;+gRR<5owDYvu<)vtKkgrInop0Y9WC-!2P|E_q0Gnk*V zfy5?o{yXlw)+~M$=ijHxJw-l)|0-h!9Tlg_`T)|2S~`QwrnK%Q8Xu*brwtpkKmKW0 zhFL9jSGG8?i%Qt_P;iFx-k~^v@maeEPDnj^Nl6n)sw*-@cbgNRS(l}1`p}6YFvc;M zn76hCNZeCRov03Wm1@VER^Occ)1pp2fMYaJJ$hp3{Xa8`9Uy`T4aF zT_*%xyL(+Nmiu*&*$%9RDw?Kj3QldSRQIfDr=(5wFt8@kCuRFu=z(o+^?ekx0lA<( zs4gmZK+3!il9NM|yO@v_!lkZYS(q>xSi!?xzN;H-S-bsSBxv#oY3S%2CT;1Cta+9! zMG&EZH`kgHfCA+QqZm|f%MaL6_r`)n_t$gV`Qvaj)r*PlN(p?yL&qTqy1wWW@}Nm# zTH_9VIJKmYWPje3-R`vX`{cS`iB-C$f64>DGC&AUiPC?W@9#FLMxIIJyhZ%c35#_9!g zQc>XP7I`ZjqjKc?4;Fj0RoJNzlJuWX-Ae{X*Ed^Cr40$SehEgtu4&;G-_<-+c(^yzdAR#wKw+cB=1tW82==wsjcMry z=Z)l&&`0+!uhqAzh&bv4B`TJ36w*dW#HMo>h4}f!MZ}zHB73uyjhvB zpRFH+LTWb9+c@l~)ms4kcFB*MscZ<2i2Y}w%bwoomx->S_1Rq1xJ^Nnz zUdt_V{xCVv7O=2xxODjb0|yExZv${hZOlbYv1m2z8Z!fXLmY-dKWp_-b$bpNSfa^{ zu43C{6a)RB0jXne>~LciN0n14vip|8aopUWHjCF;Sr!IN8(c57+_LkmhrI)IGI9vh z-J84MEX5zA07TITYPc%{ z9W&(z>fx5ZF4!%|Y&kp0a(477s#08uImnn~>}H14%7(OLGAxaw7IIWwD`50lSDVaF zL)GHx>bC;x(sP$o*F-WVcLGRvK6Ac03_U`zDV4=(*WBuDaMm2VPYuo=B#pD+UU#Lg zqx#Kz6h&cZbAuG~^k(|vjN1WruhX4h$+4vOR*e@<>rD6GwSw8hWyRs)zO~Pt_CkZs z&0*I)L*UVjJ z^$wdPummDNxPhx&>u#jhC*@cfOSMCYM1i4VW2$j%`ogqYqg@(mU?~zZG4=!X6aJW)ba`CM7C{l z%6eW4++b)gV0_lSa#kJSlUU|Mvs@mGR(qbTi(zPU7atwzZ~M*mL`;HxPi+@9m*O%% zHyl=ymYO<215?W{l@{R8`GtEz4dg9&l26yXwG|x#5|#)Vw7#|I@RdbXD%bkNZhakV znE*f$T%4Tc$+Y&TRjT($jNz0j^UQR1u){JM8EA^7*0gRR%0qEdIO*@-%8uy0{$>fy zwP01Yh?oY%QfM5`*%C|KBip$$+* zYNaLGrI7(vyUu+n9|upT+4ce)aWU@R-Fo04?cWKwnvA*w0qC(o>JrgNFrdijjd+g% z)NY8hrrP6ZO4j2Ni|^gXqZ5>@V>nx-&H^-*HFRm5Et~gH1?uvNWr&~ht_*b$qBS(w zwY<2JJi*-hjWx36R(3a$3LKbLnTu$FkRnQsKsKjN)tnMQaswl2la%<|on|{8)r^5B z&s%Ev(ZQ0eyeWojxVTKa10g-rkTX6lt0 zv*Z1bxx)<(WTtuBR=ci=)iX=3G>!f~6dS9l5{Gqcoxx%94XhdobT+F!j{M&(R0D3H z9!e}-J&;o(gE%1GQChhcsY z^L!Y(r{Am`)Z9?i1N4am9?W=7>Ekjk)bHdInu*5uG?Z7`D@YZJOz|pC@&|H}S)O}; z0;D#Ks-j!cQgZ{U7rq%ZRlXRgEYyck9bb2j#G_XfBFurBqQ+PYl%z1==3E}zZn1n6g$Wwl zr9&;Gs;dXixH9if9EI@sK{jpWt)hM=(6XbluaE=h(YN5DdE`e3LdlWGsWAHx$L6>DoW8?wp>1Xc^CE|$GLB)Y-cDGG{4hd#tC ztXQ9%12LBPeb(Ozq|<2yyh$p{@&U~3`TS@xNf+dgC8k@j<2j@ddsj6QZXoElb`j-E z1rK?vF6$pVmGL$+Z10yq2@-Y&T3A%+>((0_PSy>hAzIaN()+RZru8|BM2ie|KjnQ>Ug(GH>i)EgB z<6c`2JdK&Z;2)aKB!ycoho`BkS>xWJb%*{NX-%^0TGi%giu;ywe{sIqMAo0 z6rFGb(}}bzSN_LvwIkRn=$wp3;y3TEf`>=tL4s&1YzE{O% zN1b%qL5+q=AMxB|HYjo)s+C|g>BR<2`LuV%iWNo}sPx%|op0JCe9=CDsv8otj!Q!^ zE_}|qgsIaVlMF~X2@lhJ5=xS?h>x!s^_~d21~*MEM-T>k0+L;0tI+XSi}iV2U&e@r z(r@1Av=zKj-j_qoGiB}W!W(4*PQpDf69*}WU`#=Ra#8y{pZKOK*xXQledm&N1@`cj z?+R~`1aw#E*xALObpjW@@3YR83v~2461h2*UO0Y6^cX;d;kS@EhAyu2S_SI99Jm2F zo1-EgIiYi;0x*$Kv2+Ct<;#vdIPY(QyuUr!NJMUD3Cn+>b~?o)KhTtijzaB|fF;6% zk#A${m7Ym0=~uBfJw1_!Ck!2?r)AV9Y8u1ZlG*6;{b`hWkMb!{o(x4xAp@E1RJ24n z)!Gk=eFDK*4=sGVg3-ULJ$Z)9nGB9FfNe1`YJQg5gP?e{%u@YYu#TnY%Y>}I)V{z| zwIR7z?F>*SfX)!bT}o%%%1h7NFtkchpPQ8!t~ZphtUimper(HYdoWC2nqB4O6X*Gz1n{SuC% zMC?XF*#*()XMY4OCM%s_LZ5`$1+q+tRG)2Qmr0ZxavHM^FeEoF{2}k5tM}< zL(Ey8g2sWsBrMuD(iVYDs8F3YbEfPp|K8aJ3~g5GYcdC3g+i@%#?JLmJ?2{1$BwN^dbz=PaLs# zhf>cfa|VokK5mCGMk&klVqD0d>7Jbd+0s{25?0bbA@*@i_I6u;4GnVlfpEPrUCssZ zC{s14>J@s^C>-`k<@}G_SzHRL=mrXf(zS{4@+rdnZw?#Re~eN;z%B3=P+jO^H=D0$ zbSca^F->w1F=bbfaWyWysl z&I^YbF?JbyQ=~J1vc8C-vh*knI*}po8Y2FAKq1PF14)WJ9M_YMBWV{51O?|B-N{|i z>ve4k&sI`ZB~;LRd|5Hc995>D7>9U3w9brK9#B_V8SSdpZH>{7UP6&77YT2!@^16~ zzz0x91)Ncl3a;-kbvNzm-2c_%?ViIzgrBk<9!aBE@-ztZwba#m8`yGQ9HKDT+mNq! zA3;_dj%etsJ9S7l$zb*R%ncR_YG8^~3b^#2-_&keT~mxWr@F%|U8bnL&VOjYbD(yb zO&gh&lmrxa_u3khuV7W{pq!Sp+31;CdTZRHr&v7~?Zac4tX?oN!fhNOBAuauLfmxe z+01>oo_-C7seq8gRKgKCuakU=_^pe!Wrn)b}W%@?ewgurhY>A zK)288p~++`U3pe8NmMRi3cB+V7tCB5l($3p`5*%~pD#JtWWE>5ZO(a-997|ufj5T3 zV==<3y7LU??oX=>#Y&X#Q2;v>xZ|5GE*HM!sG~o03vR(AG#KpZ>q^ayRC?4e-O*|M zjbZW)gqk({(bjCFH3tiY$-~FKt_LF)7vDD8OZWr3>(0xW7X1`0|6aGgy za{aH))=IJ_2^w7R3r8nt#_lJ0-?UrVF5{3noJ3&N+;8&H+B(+lDg0Q5>Z%QY-QU2# zA&xmL-vTBcW}yX4&az_dh2~!004Nh34Bu+qoK8>6ubf`%)dddYu%$|&n_$Ns)rhm! zcW+#I6t~=#+(WhvAPv4nukDSSlg`X$?LN;(z2=D`y=Gx>YvOKngX0Vi_FV2;p^d>z z20J@7GaJp0r(dh`)G;(XsSrW+8P833t(?y+vns@%*SO8+vNWZ;a@bv)m^LRhQ)0aw zCf!e(IWvQ2#)8LOL?G7T;puZyr2Uyt(mleg>?Nt~9;mhLy2?;##$a&^@twE&7qfOp zT=T9}vAN;36Dqv-Rvb8ZkZ?uBJ~er?e+{OiMZq43us2bvZr%R5I>!1-ck^d$M~G*P ziL4s2CgX=ndr;xf!5wbTIiU7PTR;U9pf5Z-CZw9O1T zxpE%eV-gQgBO5AXD4&IqO3`@_E2nH$wlVinz@**>+kp>{im@LuNp|_>KOFiFD2J~? z;%R@ziVf!G!y65CCMVGexlXi$3@1A+lA~<{%lb2LOzY9fkr;+S*hwkAraKNb<PIk1B~wU9Mh@cr_y$8vxsdkRIEm*PtAFRTMTu>{k&$RnAO*y$0#-s)fgEwV66D4yx?Ygy`fe-H* zg9>CgXgHS2Ggg{W7-?@%?J-4oHJ4BQE+hbo}Ho{I;)yKkLd)zoG!=hkw=F)$u zxwpx9jR%wcJM>wfcUY;jFk3=g=IGtqIUlxA4Cb(;FZ2JXkcH4Mbu9KJqS89 z1m>F_1;jP@!6eS}WjY52QH!Ghf26q=z^ooR^lg`w+b_d2IIrSl=21d$q4{0LoD=Cx z2sDN*e2*$iORnYZXzw@KuxY*`gPNpxmCuX1hf1v&@Pxju=?6X!u^CI1pd2^{6xMJs z-acDoXVDRi5@1bbAMRXp>l>#b{^siRXZp{I(`$Q`5)gK}AqlJ#6xDbAX|&4^pSe;0WbMQ1skVtO zd2U2^f;kK#gC`d_KR1Ms_Qb+u48pBGa14czmpg;4*K9*Li0VSe;f*M*hQ`N>5avL- z@r$D3zK?{aW<26)ydEQXS z^=$5dN)O<0M(A8PEh`KVn27vcJI_DqSPWPKUR+r;tLx~kLDu=5x5J?^;puqF!zR{xjgIlbDp;xP!+o8k~H zo?q*}q+L8&+I|B1k#J&@^SRXh z2CIX9=tgE!5$gAP$Ppw^5fZkzEggo@@+$*8K~P+Es2}b1 zEw|i2ZORC}CG#ecrC(2!S&iicYHw}NFG}sW!lRtWGP7tgsf}I*YCdKE?IDV7AvJZd zqUJ+8AOuB7E)Wn#BaKk%aaVNUecI0!X@AlmDM0Arq;&hx7qkw~)T(-Kv#^_uw=QR4 z&LLEBPfZX~xsM1$aDF^ustVBWH}jice+CG*+cVi)LFI-3`d0!xNk^+-SzE_BW0sui z-J#?DP81r}AyE*94lMgpK)k&H5ex-1|F=t{nIV||2xW44dqSh~XplV9f;3BU&nJ6y zU-s$q9mo70TtnB;IT!h~5He94&0_n}+I&nSSh#jNjepC2!XFeha07!lD*c9auXBoV zCu6LVf=xgSW_S$LT42QU6o+iBXCMZ&b{J4s-8#BvV5|Co7yZO>N|umkc+99f_M@=< zm=l@xF|jKm4Lk7M3Snuj@#rsBGrXmXEQ zU$dAWV?I!}q6sCRS^@9jmRF|G;cv_nX5YpjpVU@@o*>6x|wr zZ;>$ZJ$YLeYXu&XJgkS(iv1xxji9P07%WYn^(aNu-OG46uTecwk2v~@A}PFv_4%FX zRZJ?g7&3y8ZV#KeFfD*To~7S&8dabxe#Jg}xJ`wDjnJGX9Oa0m)w{uLSjQ0s!%=9` zYibsbsMHM3sARdgD|d=((Xa)p zY%|8eoBb6Ic7MlChIKjo!9aV|Thf_;SS}I#$WLXe9oO|c!gkdAeTgM)U5H7)R(PX6 zUTtO*(DTAkE$H@#qbXlPi98^KQG<$O%`$7Nce$vUe}zU4)qtT5Mq*c%ssEskRxD*b zGVopfM=|!pp07TZV2|2c4?cr(V?oi=zPpaBRdxco`JYt!tBM=BWxxzs1+BS= z*Kc%r|629Hz_}z|*lESd&HAL{X4>NE$8(U#=|ai%*zR1@VqgUi;7{yfFcJ*L4qX$N zW2kPj!hFEiR*qEIh z;O~iO{iEA<5X;u%hgr>vc{ztgmHNH}t#f}~TCZzl?Pok;!&q)ZmfgXAfH;sP$dW=I zQl$ZxQ0Pb;_BJyS*jt@iLzqOiLepRLpv{(a!Vv@unuwV^>lsxqmtVuLW_+33irM6h zuIY~oDBI%J#;QnwoltexE9xsp6}1*+_b0S8&+V`ZE`#xM!sk@1XAIVUMO(^{Tr@5> zHYF{+y4o{2KU*!k{Zf4_IsuJ9bS zo6jxk^h7u(vE2D-De-lO$8m8zF*{U;NYhs87+^|&C1uo)(6}?nZ_8Eb$@ic_z3IPW zbKz0LfWwxebWJ+L5)6`4zxoe;MZtf*tnBKc`k`%U4H*XCAibv6SvyB@ zEX4+ye49m8=l0V?;~#4q4!?Hy3Qzx*9Mq;ui1;XD*&`xi`8F}ej4kKmTZUz4Cndp~ z-_m-IzS1Q+EKS#P)wER48`UKyQA6xi3mFXVZ=D?m-M6Q>Euv=k-t8!Z;ojr1ME@}i zXhn^7CzESohNbrjk1XMHw*@TtZMT+IT5DAG!t)r1eF`lGN4z(Gf}Yc4Q{TSi^z%bb zWx&k!L2c;bQZ}_SIdBw&!j{GQ)pIvR?P0H)QHlVen$-n)9l|KPvkiyeO3>(Zb)s80 z__)gS1G-Bz?rTsO8a>5+o@rr4T9&;yHG5XzKW05l*`8FbdfV4?I$yu-EN=RL7n?`q zZ+}BDl?gqha(9fc97#|DtdcT4*FQUzmVTB5a6GqwNQZ!ktB>qhQ$WEaWaWTQ)IzLG z17o^H_4Gx~a~PIe0nS$~-%dhEvyAlIY%(pnNF=D)mZR#619=?1J}jK_j@O61wNG~# zJoXF30+ofH`STLOEEaGXvgFj92n}{YI#=T$qkBi3!mxaS+^l&k!LkuYD(Twq=aY}G z+e)S&4F=%UN`V(M774?kW*}wWtnq)}2A~sgWe*+MSi`stSr)bY#~ZPQWADCwNf5$T z8AOt(gSwJz^id(UA!uoK*IXhDW~kQ@NS+W>uWU|xyC3v4Jof$&T_%rhg^$g7mlJwQ zh&=YjDJ3_ZDGcvMRIyvLkSmJ9$t<)weB!txC^3qNmScxf)}2oT+IHfa$PV^nFgc47 zG-mz%;Pf-NzOi{7_Aab&ae~jqS6+>wBLGZ3hcDix^Su(Hbm0Ac zb9he)k}i}DpgrOg*bJ%_v_w94EO(ZSWfzk|6=cj% zCvh`4m<0%0Zy9~8ku$$GE6(>*kU=_nBS>dSL6VLdrx*EPGxIZ(|Gw;52XNV~oW*pX zufuCd`1Me<9C773x3aXSKvA*K$1PTRY?z6iiy-;Y4Ro-`q6^M5hLuaK;RN38SvB>~ z0Ufvx%B%2sNuhg_eN{okKnrwrVN?;aqHHtT@Rs|b_!Ivdc}`566G+9+LK?K zt|LQeck3Qs%G;TbPrk%|<+q#ly_RBut&6Btq2w0eH3c2>zJ9eL!U;wYFcs25JVnTB zgA+>DwIUm_MwKw$!vGDqGl6o$8n?`03)K`+Bv$k)eVX?jAMsh;xmafb_ zrj7)FsKKk$p$4M|!iF4wK{%VKRF;fsnmB=no%ZkFuLJbzV$7t=VhtX6qnecC3ML;; zzE|#wDLZ2UUSm+R<5Tw`LIdb(smH&JcO)-GC(t10-F4g3ZkcY~no^brz*W(=*IoPF z+H6j*l~`1%$!C7d2ML^l2A{o52q?K|+wS}ib!VOXt!^p@XFpC?2qZR#@k~^XXzma$LZ`bQsvP20V;Ok+#QU9lSb~RR26B!^sA@f zs|S{evqf_Q&{S6&?A?6(1;#I09GRlvdVRSNpL#dJN?XGL z2u%_o%i)ZoWL<8yd90X^u?nMrkK6o2;;BdZhR8tXfzp;ss11F&BvJ&WG zCgJ>iSbJZG9qd1aLA!UR;z&AA!H>T)t?l(-_TAC2o;f92p$7+s>+K}@i8<|k#bjX1 zJPgJXm5M(i4u)MgvBg1E6J@}Oo}Q;Gq+%J{kCoY@@i7a8lgMAfTf7>B-dLORF#o|Nukk3J3MHiFV$2;B&Pdk{}e3&l7ffPoP zXrchCX(_@@tJJik9+!S}I?(&`7xY;kz21|D+>Q*w+Ptp?^DQq;o`ZuAANwYJz~m5# zFJS{*QWRoGU74R*Lk6J%^!ezFStOwB-+!H*oqcMV{JeRF5R0}S+2vvm=rzk@0z*iO zaYh=10j!A-0Q|zi4+0d17R9u4yjx8FJF$I%?>VZ5v-LUWPinxwtUfJmDt&tzdR>MKts~!Ajhkahz1BwXDYUZ-qqzQ!<>(+>?0G*eqUz#G^}v!<;V6;D`+@+dMF-@ z*RIRnqJL(QOH||FErieNE8ES#L-!o1$=8?3>csw!=_M0vGd&wE6yT%tjfkW#%s9>sVg z2biz83x4?1y5$MEdFlYX#X$vgk3b?ch3c{Z2h>+A3yqiSl5HSm@a-@MG2EC59RU#e zt0d>FOwv_)2GkXOw=m2$d;bXjM||on<1v{s!VsUw{*z zfJ7bb`_d?fX_17NJ}^NMYt}DzQo&Y@uz}9q1zAKC{WgpL;k$ZB88+BYoF|$al_4Ss zzDZe?YH410TNfp?Aue=9n`=P|4k}6ka}NXU(*<1g_I$8fD9(4U0^Ka5pNMLp)ZbNX z;|yk9h1<+@nM-@Nzq(qZZ8w1rbKz+z#)Vpz->C#z`4voR4C9|KB9mj z6h3wSre0N(r`q(7!{j!E951t0k-@HIF-!inBR@Ti2km?7()Kx}pg0`OKNx|F+r(!$ zv^q^ROPUTt^U;k)tUO_Nq|J!|2O>y7CgRyX!qFkEc)QzQO5nCef?C_}vZi|m4v~?r{FjBG3%-o70~r&xVB4x!dADxBy?-3ER_Xh1r1HVLU>V0-xx}5bxie z7ADvtAb+AbpK_M38O9iu-l|S}2DcgYnk)|HLm46@{{ZJ&Q(%8}b!nQ;H`_JVg&F?O zKlaqP=_9O{z+IW7y4%@L)0Z%4ftT@`wwY>f>6ULMCLC!+)sVL-Pw@%M*f|(K_a#05 zGp=<5j2zqe?p2e{x1&tfcTXHf|EJ&n`PX}bF_qbbrD}4Jz>^HVn>j$Hx}j=1vWoo* z?%A1!4h-K+C%m2;E58-s6_puw{pk!DPH0wXgL zrNL?)v%Wu}QbN&wt^*@`d{LN5+gx{yv>>(E#pObV_zQF0>Ypb2qTgZ;BH&<7HArK90zagbH13c)Cv_0BG?Ffz#EiOqMHLo> za}98LTe+dFtWO5KE3-(U`^WL}A+g*&wa^2PK)Seu)mV5c$y)}k9qA)sScY6@ zSBtSV`w3D-oPvF9lgrvxEe#?4BaPy!yP8Fa&f~D3_>GG`Y;J!U7#>V;vs~o*YOAt7 zb=%1>|ATqoeG!PfB-I5ieDTcf;1^V3&f;+R%EVKVd~sP%h^#Z9h%TW;&Ziq?i=jtG zdVNOLaCqZC^Q6ecR{Jz+MSE+lES$EQ2^)adJpxD7v3>CiOA~sv%q+C*&Ox>0+SZ)? zKbV670$v!2ap#0k&Af4IS5CcPL#=j+Z;6Z0e67Z21c2f zhjLkHo*}~y8%0nOo{eh5{C}+&@knun;{4G(Z!sPV#TbF80F@hCZoZBmN*>P!F~Fn;z2~v_g@>UkBu$t z33GOqbsrk^BLlVcr=g}~hH>Ey6;pq7SOw;>!-k6EEH9jCLR#}t2OG7@Kd(AY0o?|! zwf$P0{bmmugct84LmKD!75=i6$UY1Q&m%6_mxAOa3|!S|6P%$>OhjJ)WruwTPWH?{ z^!OtVtrIgzH~qYrEs&@n{G(Bi@lc`@fL1B=N861IVLC!cGwXtbWq+L>T$i37yFHc@ z6g&;0(xY4nfkz!vFJU#UpznA=cEgZ0vb9MmCS$ZC+0J zJ%k8(lOF=9kwEbyY5p#)-yOuTRnCeeC`7x|3uSe6>sIp=!`YFFg=h$bL~_-rw+wM( z8k}YR?)GiUhk*gvR(bF-wd;;ZgLn4~h81dZU@)7)^8U_!jm-ok0grRkuJkK_IqbT` zt24e)eayJ2BFJ49|LY1)#ua?*Cm|Ef1JAe$d+nqQX&#UKt||8qN?rJ22fQ@xAVEr) z%aKiLyW37f_x;hR=V!VhOVm*yVhfd8D;%jlycne`7xy=RCMv!+&p9JSHwBZjuA z6A|!VXZ5$WY$JT8A|HK+MN_`F6xF*&AUX?0kTms|rSA-;qoq+AaY4>fzrHGW78vVr z7?n{k`OQ^+BcLj#>Yafzu}TmQaLzuoJf(3=O1 z$l(=%=YHD)Q%E-e)Ej-Tb14Ai$S9}lSGRs|Jsq{szirMXoX2k;|aT0Q2UVnx82+=|jsTS4H#SbeNzjC5kG} z{1_)J=35-pQTOMDOcu(>b3q@`KT5{KB=~b`F>|cy4Vv(YzQVj&GaaLNftClRHbv~ZhAs+3RuU>0s6ZQjy{Ge-I*PsT;A7<1=b$vL(~H})Sq znEwy|RfbYSTgHH-)mYAy#ePtVZ8Ew5q030%my!RK|M=CfrV!r%ZOHr3^0SU2%}+)* zympLw#cx;Kf?!bB>g`)uXeb5eQ=>eI>>|1y#&U*;jvVyCDZLmSxCJ@^@*GCzwZ?-F z5cIEKy_Edw8V|+YfrN@{k1$7m%XD5|5rCefZX2CSQt(uq$x=3G(kc*Y;GUUG#S?xO zS|$uvE69{v*ZQJ%qyJEe4+k=n_iM2lqSTRuy#+ESKOR0ae08i!^(d1vdf(kIkcjky zs)ee$9fo04fb1Trgu?8lx22Jf-|hGzC_yvhx$dE_4HeyY7}*jE@WV_&ed6d3`f-5n z5y#lC&BCI}gVUi6v)f>F1dRPqM5?3Wk2tg!}j+Vs3UxQ2V_~EkAt24KJNFwPWeyo&BjjZ znkazDJEj@T1xKlB)Nd~4+12)Z9iYbdijNSuBw;B>ecf-@+XJRo)w84I)ocnu&(H~~ zE3D?*Z1f-F&Jz&M#4%1>{`mPH_VR{o)~*&KDEk~Nl?A!{B+99IOHVM>9VtVJp{;E| z%I6U0cc|$7&KxLZo&@Yz@asLD%W0@u=@11AJgF)*uqWPJP+1`=&=KmI9{JfK>E1rJLN|`WeDpYNd zc2vJRR_#M4ToRO*um6;3QC|0>!WL;93?zgU)5v`I$a{+a_s``gMuzD4yvx`Q5gGXH zoj-4$dlWrHO#3a2)G6H8ycn>jE0dr>^N;Okp)|sHe3cbxm`6iCr%RfhK`@ketb_Q% zG$f3rZ>El9se#7qXa`WheVEkUwKx4Ds?v%W6JU1e$=^d(uoARLxQ)*=bvlg#L?r}U zTM)iPD(uy!&Ah}SVYM-LP8vJEYe@|#;HBiKp-ovnc*~DVG5VCUP!tenHGRhWIH1lX zk#@S(fQO4K1brF#N~h+J813ilWUgVpX(i&as zg2HDgIUK4nJ4U%2fGZTYcHWO9X7>16p0=13uLS{-z4nLoUw zbZ!daRw3;x5Mm_9`AdTtv)gAW8?%6u*9)Ck4Q5=x&6QW%5s!@$NA8s}$QQ_3<*ded z%Eq3V<3=d=^kWpfAPgaFo{%LD9Z8B^gB?Y_2c^HCd@Wq08S4@zQYP@5XW7wwF}cmv z5tE!8h!02Q|8Dz|sUua2z%xJ6eX43A5Z|D_5K|%;es&*?>Do>u$+Sk08mF@1k3w^< z5=hIbvKy`&g6#A9*8`8EFbeC)H4xGRD|idq#zMh7&3$k8k7tHcnGv2ai!xy{K;`{i zZ7(X@9}-0np!_GQtb$w>%G5^=*iw;BbOx2e1k|1-BP0nXQgTKqxyEB2lXe-nYBMZH zL4u|&`^-7zm?AW7sh4}A;>2;|PQhttZksS>L|mppp9U_hKGKsdfGb=Tz5)MhT^zte zj_;3M_`}H3O%eBG9)UR<3u-8xvB2FvD&N4eFivMAY^S$TP~nRJ@8kghq*oIb;;c-X zKAi|0d-_aZekFc#IRe=@(EmIeEtip(_oSMj+g&~c4Os1jmeJ;33E>_l|gME;S}$!kH& z%9=+A-$mI=6x-1o5rG2s$0j#>-f4f&Ld4VtC#-IWq&)>x0p6)o=Rj&dXxD4fu?7Vk z4~bog5}3n^$Dv!(Q1`zN+*=-f{X0Fmm&t=9546mv1b!{CY2!Smmm~zXcc#UP|HK*$ zY3Q2PgIN)c^~1DcdlWoDVyV@6e3yOX5?mLBn#WjDeUn)=c-f6`TM*0PIOqR*&F?pm zQ)7J~#oeA`T5#Y#4p2Zdd2&I&4Z6A#mv{>T%Y-_E#P4Ck8X6e6`hYp!0h9`{xj=;X zh^FC5krj#WH&K|ZH!J(n*+^CSk36K|(}AdoA=?Rq+0n9`4a}Y3M}!E!Kvw{)Sz=i#SS)=;mNd;&rzX=2n?EJJ&A(Wg9h8b$a_Ff;NS z%Gw9<(QYY=^x+y_B{Zh$wDB}9yu!932)k{8(*s$p|FamMp+n|83@i zgq*blTDQqe=6g+8PG%2(gY4ssw1=QClG2(kAvnRX3s-N2pWoF{XN?MV%ac84#tonI zJ_=z+yC(Fa{dZ~9F=@&4S+k!o`&izry?Ah$wn_6vu_k~oT8z!lIuphtIkITPC1tI% zF;RKLZ>)^(UJLq2*A2pCuY>gSX{(u5(n&$NPSC+0WR~Q>>XKSiQf{7~ zj#@S$%j=_ofs}rPW1~xkKmVaZJa-Yl9$TUlO3<`guogS z0C$<%Vt!_>T%>^q4_{>H06OJiUg8}w(Z&KJ+86@i2n~-&j|u2{qHS{~PIuVG_KUeg zre{Db0V4SKai|I>?eFWDKn>r8bpD3o^05EuE4*|YK)NF05MLSTmu^UgVF1Qgq}7C7 zK-57lUA%o~*v8ZeJcpbr3T|dZvjIh@FAp{`qTXXo$)Wi7SLu^qQ{^BB>X)t^a`=jL zGX=C?UpL!Q6_knMp82+CneElo)5BB{x)KEhfJxNN`|}xJVQtQy>9Tcjs!9nNH%n~; z$Wu)}UigQfV_~%gF5eTrpj9Lgy`4l14ESyXbvE@@I_?B%(j|qLXU{f|IE6P96vQK8 z+)Dt@5YPYL*!&kt$Yw~(RH{j8XtdJ2;UoRm_jn-AUi7|*SAcSYU)iT8L!OdeX;ZY7 zNYj$;;wgJk4N(Bvny(PC1v`kOVG29)U@=^bNs}gtf}7tHv<4H%x&rD`k~x{Qfgl9` zNwMV3%A!N~*dAw$a9iRW)`5mc+$Rd7D@iU_dCh`6ruz{(pltjcx%}q^ps+2$Z0fbf zZngQ>y3iW}MDKMab@+upV1ZwU+7`d3Cuz}k6ni(4SiEKNG$?MkcpXbtNvpgG#fAuY z@CStbLM#>nAFplRO@ya3Q_TI{1A!O7Cfe!>t-t}9jph5O|A8^S7xA|qlCJ)2wWG|B z->);_ZefScNy5y{+tm_Ux_Q|W5rB`ON{$BW+_c7^d<|TSRMgIH*gI?s^m)+5Zv)D| zJp54@Fx67<*hrzx|B-HCM|!5WJoKP!-`MT=^$SP+IX<-!r?IVVPzyhs{ql#2;PpC9BHqHPopbPaQsP0|2}dHjabG2 z#u;=^^r-U8mhMl>^@S#8TjcXWrza#?$77OFzZZgn)R`K>PNH>St9xYvh0{x;flNAt zz7GitmMX?AmI|7tbU{rb2I`cg97bRS(s>W*^iW(5eE@X8C+F0Sqva7>u!;iyOEN~| zttJuzd%BuKKmt&&@+m|)Hym0R_b~#}d8;t#$jHrv4b`%3T+0-_rKb|`E&ub|bp$nvUsU8 z@g;&aI+Hk+&FsL~P=o{m+M{y-U){;$wI|J;%Bw}-vcT2mC*|CtEi`7<9eLCcVU

>u@;(h7CF1;4GDe5pNbS zw=Uh-Q1vz{)<9c84|=A@e#z z>tYdD+5!C^4_T~|kSAw5hk|yQQ!ur9sO@(}7m=coi-*MgQMc&VIrb%NbF+#J(7G=O zcp+#ZdRZ7Ut4ivUM28Elyv{GZxQLC)N~4p(&}dEP?Y=;Uu1H{~*RMHnooV7Z?;?JE zrR#Va?9y@$IBhA0Em6utxKA#}TO8#sN1b7XA9KBii@9~7D4ptH+r$yYhJyNpTXZ~W zCx)Q>dfH!qWe}?QkpiuUU~hP^n)WY4gc(C3!YOrL9j6mEV=%JsgzH(lP!2!%)-=O8!ULoBMVe+oh6Bj{)*!xfzkybs1oNx zy0q{sWMS@}aJ(k+;lKdi0wsk#s2bWvTE##i9z%5dN4dAXg`5s1nWk$@z2 zfVdeVC|T$AX8gM3J!BdRs%s$#Ui6TLX$bh6hAf6jTfh3uX{L*uMHacnRW68$ILB{4 zDzFkKTh-;>KhFKAHFhQy$s8;FO~1V#H~Y^u!f>4tVmp&2hF=>+{;Gk8%4vTZifMv> ze#lQmWh8w5(NDwl$BuG+IOJfM7F-kmhjsb&JqEBdZC8b)Y)AdX-=FUL|M>rUDI12a zIo@MuVPWCC)t8$yR(|j2{VcSMK$e)or}HT6oV+6Hfh?@d{-|STZ^*sZF7dDbPu?mb zBf{c%Y2h*EpD^;_w8gBfz3n@Exc>cBoV@qBIq5I9j~DyDeObdAbryf0S2oNZ_UG@& zrcm5_EN}n(Z)SgZ@$&}O3+G}m`oBNw|1Ht~TcZE}o+3N -o jsonpath='{.status.url}' | cut -d "/" -f 3) -``` -#### Example: -``` -SERVICE_HOSTNAME=$(kubectl get inferenceservice llm-deploy -o jsonpath='{.status.url}' | cut -d "/" -f 3) -``` - -### Curl request to get inference -In the next step inference can be done on the deployed model. -The following is the template command for inferencing with a json file: -``` -curl -v -H "Host: ${SERVICE_HOSTNAME}" -H "Content-Type: application/json" http://${INGRESS_HOST}:${INGRESS_PORT}/v2/models/{model_name}/infer -d @{input_file_path} -``` -#### Examples: -Curl request for MPT-7B model -``` -curl -v -H "Host: ${SERVICE_HOSTNAME}" -H "Content-Type: application/json" http://${INGRESS_HOST}:${INGRESS_PORT}/v2/models/mpt_7b/infer -d @$WORK_DIR/data/qa/sample_test1.json -``` -Curl request for Falcon-7B model -``` -curl -v -H "Host: ${SERVICE_HOSTNAME}" -H "Content-Type: application/json" http://${INGRESS_HOST}:${INGRESS_PORT}/v2/models/falcon_7b/infer -d @$WORK_DIR/data/summarize/sample_test1.json -``` -Curl request for Llama2-7B model -``` -curl -v -H "Host: ${SERVICE_HOSTNAME}" -H "Content-Type: application/json" http://${INGRESS_HOST}:${INGRESS_PORT}/v2/models/llama2_7b/infer -d @$WORK_DIR/data/translate/sample_test1.json -``` - -### Input data format -Input data should be in **JSON** format. The input should be a '.json' file containing the prompt in the format below: -``` -{ - "id": "42", - "inputs": [ - { - "name": "input0", - "shape": [-1], - "datatype": "BYTES", - "data": ["Capital of India?"] - } - ] -} -``` \ No newline at end of file diff --git a/docs/gpt-in-a-box/kubernetes/v0.1/inference_server.md b/docs/gpt-in-a-box/kubernetes/v0.1/inference_server.md deleted file mode 100644 index 3ea3166d..00000000 --- a/docs/gpt-in-a-box/kubernetes/v0.1/inference_server.md +++ /dev/null @@ -1,45 +0,0 @@ -## Start and run Kubeflow Serving - -Run the following command for starting Kubeflow serving and running inference on the given input: -``` -bash $WORK_DIR/llm/run.sh -n -g -f -m -e [OPTIONAL -d -v -t ] -``` - -* **n**: Name of model -* **d**: Absolute path of input data folder (Optional) -* **g**: Number of gpus to be used to execute (Set 0 to use cpu) -* **f**: NFS server address with share path information -* **m**: Mount path to your nfs server to be used in the kube PV where model files and model archive file be stored -* **e**: Name of the deployment metadata -* **v**: Commit id of model's repo from HuggingFace (optional, if not provided default set in model_config will be used) -* **t**: Your HuggingFace token. Needed for LLAMA(2) model. - -The available LLMs model names are mpt_7b (mosaicml/mpt_7b), falcon_7b (tiiuae/falcon-7b), llama2_7b (meta-llama/Llama-2-7b-hf). -Should print "Inference Run Successful" as a message once the Inference Server has successfully started. - -### Examples -The following are example commands to start the Inference Server. - -For 1 GPU Inference with official MPT-7B model and keep inference server alive: -``` -bash $WORK_DIR/llm/run.sh -n mpt_7b -d data/translate -g 1 -e llm-deploy -f '1.1.1.1:/llm' -m /mnt/llm -``` -For 1 GPU Inference with official Falcon-7B model and keep inference server alive: -``` -bash $WORK_DIR/llm/run.sh -n falcon_7b -d data/qa -g 1 -e llm-deploy -f '1.1.1.1:/llm' -m /mnt/llm -``` -For 1 GPU Inference with official Llama2-7B model and keep inference server alive: -``` -bash $WORK_DIR/llm/run.sh -n llama2_7b -d data/summarize -g 1 -e llm-deploy -f '1.1.1.1:/llm' -m /mnt/llm -t -``` - -### Cleanup Inference deployment - -Run the following command to stop the inference server and unmount PV and PVC. -``` -python3 $WORK_DIR/llm/cleanup.py --deploy_name -``` -Example: -``` -python3 $WORK_DIR/llm/cleanup.py --deploy_name llm-deploy -``` \ No newline at end of file diff --git a/docs/gpt-in-a-box/kubernetes/v0.2/custom_model.md b/docs/gpt-in-a-box/kubernetes/v0.2/custom_model.md deleted file mode 100644 index 57096966..00000000 --- a/docs/gpt-in-a-box/kubernetes/v0.2/custom_model.md +++ /dev/null @@ -1,33 +0,0 @@ -# Custom Model Support -In some cases you may want to use a custom model, e.g. a custom fine-tuned model. We provide the capability to generate a MAR file with custom models and start an inference server using Kubeflow serving.
- -## Generate Model Archive File for Custom Models - -!!! note - The model files should be placed in an NFS share accessible by the Nutanix package. This directory will be passed to the --model_path argument. You'll also need to provide the --output path where you want the model archive export to be stored. - -To generate the MAR file, run the following: -``` -python3 $WORK_DIR/llm/generate.py --skip_download [--repo_version --handler ] --model_name --model_path --output -``` - -* **skip_download**: Set flag to skip downloading the model files, must be set for custom models -* **model_name**: Name of custom model -* **repo_version**: Any model version, defaults to "1.0" (optional) -* **model_path**: Absolute path of custom model files (should be non empty) -* **output**: Mount path to your nfs server to be used in the kube PV where config.properties and model archive file be stored -* **handler**: Path to custom handler, defaults to llm/handler.py (optional)
- -## Start Inference Server with Custom Model Archive File -Run the following command for starting Kubeflow serving and running inference on the given input with a custom MAR file: -``` -bash $WORK_DIR/llm/run.sh -n -g -f -m -e [OPTIONAL -d ] -``` - -* **n**: Name of custom model, this name must not be in model_config -* **d**: Absolute path of input data folder (Optional) -* **g**: Number of gpus to be used to execute (Set 0 to use cpu) -* **f**: NFS server address with share path information -* **m**: Mount path to your nfs server to be used in the kube PV where model files and model archive file be stored -* **e**: Name of the deployment metadata - diff --git a/docs/gpt-in-a-box/kubernetes/v0.2/generating_mar.md b/docs/gpt-in-a-box/kubernetes/v0.2/generating_mar.md deleted file mode 100644 index 1e8ccd68..00000000 --- a/docs/gpt-in-a-box/kubernetes/v0.2/generating_mar.md +++ /dev/null @@ -1,28 +0,0 @@ -# Generate PyTorch Model Archive File -We will download the model files and generate a Model Archive file for the desired LLM, which will be used by TorchServe to load the model. Find out more about Torch Model Archiver [here](https://github.com/pytorch/serve/blob/master/model-archiver/README.md). - -Run the following command for downloading model files and generating MAR file: -``` -python3 $WORK_DIR/llm/generate.py [--hf_token --repo_version ] --model_name --output -``` - -* **model_name**: Name of a [validated model](validated_models.md) -* **output**: Mount path to your nfs server to be used in the kube PV where model files and model archive file be stored -* **repo_version**: Commit ID of model's HuggingFace repository (optional, if not provided default set in model_config will be used) -* **hf_token**: Your HuggingFace token. Needed to download LLAMA(2) models. (It can alternatively be set using the environment variable 'HF_TOKEN') - -### Examples -The following are example commands to generate the model archive file. - -Download MPT-7B model files and generate model archive for it: -``` -python3 $WORK_DIR/llm/generate.py --model_name mpt_7b --output /mnt/llm -``` -Download Falcon-7B model files and generate model archive for it: -``` -python3 $WORK_DIR/llm/generate.py --model_name falcon_7b --output /mnt/llm -``` -Download Llama2-7B model files and generate model archive for it: -``` -python3 $WORK_DIR/llm/generate.py --model_name llama2_7b --output /mnt/llm --hf_token -``` diff --git a/docs/gpt-in-a-box/kubernetes/v0.2/getting_started.md b/docs/gpt-in-a-box/kubernetes/v0.2/getting_started.md deleted file mode 100644 index e3c52af2..00000000 --- a/docs/gpt-in-a-box/kubernetes/v0.2/getting_started.md +++ /dev/null @@ -1,85 +0,0 @@ -# Getting Started -This is a guide on getting started with GPT-in-a-Box 1.0 deployment on a Kubernetes Cluster. You can find the open source repository for the K8s version [here](https://github.com/nutanix/nai-llm-k8s). - -## Setup - -Inference experiments are done on a single NKE Cluster with Kubernetes version 1.25.6-0. The NKE Cluster has 3 non-gpu worker nodes with 12 vCPUs and 16G memory and 120 GB Storage. The cluster includes at least 1 gpu worker node with 12 vCPUs and 40G memory, 120 GB Storage and 1 A100-40G GPU passthrough. - -!!! note - Tested with python 3.10, a python virtual environment is preferred to managed dependencies. - -### Spec -**Jump node:** -OS: 22.04 -Resources: 1 VM with 8CPUs, 16G memory and 300 GB storage - -**NKE:** -NKE Version: 2.8 -K8s version: 1.25.6-0 -Resources: 3 cpu nodes with 12 vCPUs, 16G memory and 120 GB storage. - At least 1 gpu node with 12 vCPUs, 40G memory and 120 GB storage (1 A100-40G GPU passthrough) - -**NFS Server:** -Resources: 3 FSVMs with 4 vCPUs, 12 GB memory and 1 TB storage - - -| Software Dependency Matrix(Installed) | | -| --- | --- | -| Istio | 1.17.2 | -| Knative serving | 1.10.1 | -| Cert manager(Jetstack) | 1.3.0 | -| Kserve | 0.11.1 | - -### Jump machine setup -All commands are executed inside the jump machine. -Prerequisites are kubectl and helm. Both are required to orchestrate and set up necessary items in the NKE cluster. - -* [kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl) -* [helm](https://helm.sh/docs/intro/install/) - -Have a NFS mounted into your jump machine at a specific location. This mount location is required to be supplied as parameter to the execution scripts - -Command to mount NFS to local folder -``` -mount -t nfs : -``` -![Screenshot of a Jump Machine Setup.](image1.png) - - -**Follow the steps below to install the necessary prerequisites.** - -### Download and set up KubeConfig -Download and set up KubeConfig by following the steps outlined in [Downloading the Kubeconfig](https://portal.nutanix.com/page/documents/details?targetId=Nutanix-Kubernetes-Engine-v2_5:top-download-kubeconfig-t.html) on the Nutanix Support Portal. - -### Configure Nvidia Driver in the cluster using helm commands -For NKE 2.8, run the following command as per the [official documentaton](https://portal.nutanix.com/page/documents/details?targetId=Release-Notes-Nutanix-Kubernetes-Engine-v2_8:top-validated-config-r.html): -``` -helm repo add nvidia https://nvidia.github.io/gpu-operator && helm repo update -helm install --wait -n gpu-operator --create-namespace gpu-operator nvidia/gpu-operator --version=v23.3.1 --set toolkit.version=v1.13.1-centos7 -``` - -For NKE 2.9, refer the [official documentation](https://portal.nutanix.com/page/documents/details?targetId=Release-Notes-Nutanix-Kubernetes-Engine-v2_9:top-validated-config-r.html) for the validated config. - -### Download nutanix package and Install python libraries -Download the **v0.2.2** release version from [NAI-LLM-K8s Releases](https://github.com/nutanix/nai-llm-k8s/releases/tag/v0.2.2) and untar the release. Set the working directory to the root folder containing the extracted release. -``` -export WORK_DIR=absolute_path_to_empty_release_directory -mkdir $WORK_DIR -tar -xvf -C $WORK_DIR --strip-components=1 -``` - -### Kubeflow serving installation into the cluster -``` -curl -s "https://raw.githubusercontent.com/kserve/kserve/v0.11.1/hack/quick_install.sh" | bash -``` -Now we have our cluster ready for inference. - -### Install pip3 -``` -sudo apt-get install python3-pip -``` - -### Install required packages -``` -pip install -r $WORK_DIR/llm/requirements.txt -``` diff --git a/docs/gpt-in-a-box/kubernetes/v0.2/huggingface_model.md b/docs/gpt-in-a-box/kubernetes/v0.2/huggingface_model.md deleted file mode 100644 index 9c2f5be6..00000000 --- a/docs/gpt-in-a-box/kubernetes/v0.2/huggingface_model.md +++ /dev/null @@ -1,46 +0,0 @@ -# HuggingFace Model Support -!!! Note - To start the inference server for the [**Validated Models**](validated_models.md), refer to the [**Deploying Inference Server**](inference_server.md) documentation. - -We provide the capability to download model files from any HuggingFace repository and generate a MAR file to start an inference server using Kubeflow serving.
- -To start the Inference Server for any other HuggingFace model, follow the steps below. - -## Generate Model Archive File for HuggingFace Models -Run the following command for downloading and generating the Model Archive File (MAR) with the HuggingFace Model files : -``` -python3 $WORK_DIR/llm/generate.py [--hf_token --repo_version --handler ] --model_name --repo_id --model_path --output -``` - -* **model_name**: Name of HuggingFace model -* **repo_id**: HuggingFace Repository ID of the model -* **repo_version**: Commit ID of model's HuggingFace repository, defaults to latest HuggingFace commit ID (optional) -* **model_path**: Absolute path of custom model files (should be empty) -* **output**: Mount path to your nfs server to be used in the kube PV where config.properties and model archive file be stored -* **handler**: Path to custom handler, defaults to llm/handler.py (optional)
-* **hf_token**: Your HuggingFace token. Needed to download and verify LLAMA(2) models. - -### Example -Download model files and generate model archive for codellama/CodeLlama-7b-hf: -``` -python3 $WORK_DIR/llm/generate.py --model_name codellama_7b_hf --repo_id codellama/CodeLlama-7b-hf --model_path /models/codellama_7b_hf/model_files --output /mnt/llm -``` - -## Start Inference Server with HuggingFace Model Archive File -Run the following command for starting Kubeflow serving and running inference on the given input with a custom MAR file: -``` -bash $WORK_DIR/llm/run.sh -n -g -f -m -e [OPTIONAL -d ] -``` - -* **n**: Name of HuggingFace model -* **d**: Absolute path of input data folder (Optional) -* **g**: Number of gpus to be used to execute (Set 0 to use cpu) -* **f**: NFS server address with share path information -* **m**: Mount path to your nfs server to be used in the kube PV where model files and model archive file be stored -* **e**: Name of the deployment metadata - -### Example -To start Inference Server with codellama/CodeLlama-7b-hf: -``` -bash $WORK_DIR/llm/run.sh -n codellama_7b_hf -d data/qa -g 1 -e llm-deploy -f '1.1.1.1:/llm' -m /mnt/llm -``` diff --git a/docs/gpt-in-a-box/kubernetes/v0.2/image1.png b/docs/gpt-in-a-box/kubernetes/v0.2/image1.png deleted file mode 100644 index 5be8e71b1c19f2f9c5e566701a6dc2800da2cbd5..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 150201 zcmeFZcRZGT8$W)zk~D}=5mK_UXJi&KBiUO?_R6?ymxh*663Hq;va>I{LCKcwGE%k+ znVH|?bl=bOd+xjIexBd=|L;F~z2ZF2&p1BE@jl+i`}o|yq@hHyk6|AQg`!YVR=A8p z?ZKi@q~hdc@IT}2HS+KeiQ8qR^Qi15#t9UP1*M{JPRGk^8o#?lr(Ljo;S!Y`iiGsP|2S#JLQUfGA?F0m|Nh$p zBxnmW(*N;7D!FQQG-k>v3Qf*}{ck_0Pz#j*{V$N;iczzmiawtgqWGVm#-g5K z{^yjba_^%^axWf^u>IekCao^t^*`pog2i4WCC56xeWbqYf2}qZ3u=_TrAJ2_W%89a;*2B|86nJZ)3@+P<_`<#E|{34Q7!; zHAwzf6WAGHw*nT`&A$J$$p8K{+G5~;uJZpg!T)kn{+|gFJyrkD1poOg|KEGTdpXsG z_Fdn*^DKYX9S{7J7`VlirV$?@Y~A$f&i*6c+fvkS{HUiZ4cNr><2=*|__8B|PF|Ej zuMa6loVtHXGr{C?sybJ?c3PDCM4L)&2tAwIXhVdkz24`?taraR#Tz3}>+-1|ChiK* z{BkHpFH0Z9-Mu?MR2zEit$i0?Prh~d^k9`aCDYkcN@0wGH_i8<$!}kflmF)T>!(T( zdiSeKFV9~tu+ic&D=+!poG2@@D@mXDLAvKff*!hOex4n1Z6A24XlPht#T+BDjEn16 z1_SveF@)RuNJvjA$YJ(J^BU{bNAvU3aqC6aKjnT%IGy@WVW$$pi2vSsH~1#YO;Wua zGu}bJh0sjH{MzmbwR0#EU-EH{VY`kuDvi%YQ}nWoYMrjY42ZssDjJClyWzJ~!)Uc= zzNc1SqCV@J`&;$*A#YmWKd#l(xm9w9 z?%Z=Sn~U1Nrt_P>fBX1wSXkh~O%(dvkrv5Yu_Cr7VPsGGOWpQV1fEqO8i<4D4zZvp zbCxVb;DuPFpoOvKFG|P#Pvu)TNBOTWpSS}Hx}heAAuV>o8%=g*J)|T)Lxpq{iAyA( zRQm-~&?oBdGE9zo{8`;p%|yOp=fSYJAmPiTROB((0qXurf8UAjyyR1AQ9Rd1URV;J zC3z*hZmw~H8VpK+E!XPH;TUn(Bp#!JPdb~ob#U8T>pyDt9FH_9y`fU%(1W}yLeTPp zN}^1~D~))g%deEL+*ZC%!=a&kM(Uy5S8`YaC8z$96Z$!3PrpR-A6s!tlyJMC5r39V zP*Cv3w-0+I0yey=r5D}$Q`C-lZ!UGQ$p;@GnGL5T9{*St0a3j)-*o{ox01aMc$_Yr z3Dz%By!j^VT1g6*aqhOQlJXA^^Pa}3<;n%2;h<@JSx-7gN;SvlvM_q_*NXwh{K6tRya~J_BG02oAtXX z8O2#xFN8e$wJ>JP{!-F6Loe&})6=>U`{+(9m)`gxPT47B%5a`V0L`J95II}9uCYjq z4Lx>-;bpEan<>t{ttv2ZqOUkd_hQ>WZ13$D*pFwt(g9Pphr z6#8{@_x^g}W+~P}yN(D3Q9FY+_E0d+aZ(Ygt>r#vs~`1Ge=c>Ie|gHSe@Nw@LtC}*u zK8W+3J)u~2?`Nm6Gux}Z35vu&Ab@<}$sLQM@BwKw?6gM9W-P9+*s-tZ=VV5XDi}}l zp0Jpc$N@YauzGG$fn<}TGDYG3AtBFcgY?TOs(!IU68~^gc7f{?Fdz+vm25pR*RCn6 z63u6NZE3P&{`uTUxGZ11BqnOKF}7~XoB8~Xr}3}RPE~)b@`U|!idytH(czj~d%ome zdH-#R=imvV+aWi;n;a`&MHT|HOOmtb%rtCE*XBTi#b7?Kd1d+HbWeN9U^Mt0@@Ld7 zBQNVhk8xPmhTPYuGAXpv_1@lGNwKK8c^#MhPsa5ajO!>bnLOfC_u0&?!_i)qE<~ZW5;ZX>O`ZoYzIOym zpq8tyM9Xe(;FS)gjdoZmfo?gWmeCI0<*edw@k*->!q8x;!Ounw9v<&%w&ya@GhZXR&Z2ktP_1 z`!A=_@|hT12xsQ?nEX0FCOCN#o}53_g0BXbH9ysJ_2Yxj8^0D(Aa?%wWg<37o(&7* zQZYe6Lh8UbS9{F)ld*Hfk1n32*JCRKzRfw-uR~xnPw7~|VQ8!H#Z-l*H%D?k@mub( zzMN%Lc-ymi;UD503s@tnri}+pu8>%5pHsm;TeV#|aeoXj0=s4ckH=J}=;i@BWVJJP zFDj2yvtC&3m`+lB`0C30-~efPWTl9WHw+nX+{A8Vyf0QZCpVv6e@Momou=`053^VG zH0xJ+cnZxv*P(B&0%QL1{qvL5_0`pp@Dw<`sZ-iW93k@Jn}`=>wDtkRJGNiMwheI| zHa@{V3X3n!-BRyLJBbAB13}N89R(u{%`_^kcgM7+UVKvI*!MhJQ<3O9smRM=utT8} zBrsT>mJ(OX*5pf#kvi#@KTr4Mr<}V*cKwa^9vBb#eK`oK*WTUUo4U?UDt1wk`1@`` z^2tbh{Va@8a-WFvfXQfcqO+adoqco>-V38D^WQ#R>_|I=3{;hb0xL=`W-LzGn@PJw zY`%XP8)r+lsbq|dKQ2867U|+)9s`ybVQVqnL{$V^h?Db}l^ZNZ-tt``#4t302mOBM zh?#(HH1b;_F^NhXcD}l&O$bfit@FaXDnQ8YE2nmT`$&SQ!b{|d$DFwVn?57mYXC9U z83U3O?q9>>JixcnC3>V2dn5c`5DhraI9^^>ES6$7z(x*fpG5)D{3u>y2F%uFm|>9e zVZgL^zh>xbUcGwNPlFW>+}kVf$+!%1Uo_)%h-^9XP&K5>dupHRfm;YhVBz&g=iFYLH1!R$d6Pf1uJKB8L6e<($9_qbE>mGa`CyyVw7bZ8 zkWW*3Dbl3GrDkky4<#cfgus8@GP; z;=hKwKhwBaKjB!9TZTA!ZEmPWW=acAH?ip-;*&$g zxyT!VcXSD%meY((x;KL;7Rw|Fh3Q^B*qpY9NA6k>`tWkr&pHm z16uAT@>m{eXSOM~sNL7OiJ$e{IiDJ0g#9~{5mU6V4jK&HHU-;cP18(VKHCUcp2?z( z|3Bm-qAjM~QsbbpSNiWlv_w)hw` z=-)Nweh_dgII=&_1QFw;k_E4_S~^b)Uggrzr^l?9U&xUp3j40l6kAneN&ei2dtyOv zt-jPR&-7Jjlm%?AB}oQz{ezb}ig>A#w$l)wUdVwfx2n0h+f2y}ypUm4fIn$d*a;*O z2Oa6gd1R!wQy=CvIz@3z?7#+goalxJaniD&!rm!b*ORl%IR}9sRhj86^lzK)&XZpZ zv)EaFxqkGnJ(Q6U>b!n%85O*SbARMKQRAT^$3o~pU9vp{$*SfH@QpC7!#TT-i`YCG zXR4vyd1w|58GF>NJqLZVM6SFeo#-v(jwC)=3xGelA76;v1?S5l5V+-MJ5XNQ6*Psf zzI*U^SovqctMEb=QZ(c(G!;`_#-F2jV>hT*<42dB0!~AB`bmzk#5C*Gc4gO(-FX3f(o{2z~2_v*_ zVzlje^K@r~+jaFhLZ~K5~J>cl%fyI4p%{)!;k&ofs+(oEc_5ixIa>m;z8(VIdpC3lP&mK4+#vEpZ~|X^y!slvU}$wD zm-Z{Su_m))>)#&oe=i+N$WN6w0_a3a^(ggXY(1o^<*IR#7!9FopRk9CYzut;6s)8; ziI)^a;>TP7qj&cdImXu2$gYhi-vHnLDULB2NhIItSin)goP2>IC%K(}+`p$EV=#LMNL_^r}B270b zee2L)Lj4ch4Ms`gf}ZoDMLdNCl8cYVI{PrKBFtXTTdRMYWy5aB(WNIkGNQY*CjDyw zF;XnV2uKlGNSKj6Ox&+4`0)v>9E$V-9XC5*zoqUQn`=u}6KyFZ<1y?z2M#fGAn#W8 z3HmJBOyJAT{KM&{(u7%iy_-J@`<;M|8A+4{rV5Gotj;$=jv~uK6@>Gcs)xWh<63|a zau6z%J~rKdD3en6iHK8xgXCCW02cr!AY69K zz^K@1hVCRgd+nIUl~Y6=g&d0$)L{k=z(5#+AltH}r2Nx!50Ubjl(0Z3 zoa?QDVEf~#zS-V>78`nEh6ou16k)tvV;F!jOT-eUFGlml>qme`Pz6761xZk-$aCeu zW~|_lF#8Vgmmo1iJy$~hAJD>o{>Uu^50(`j{TY0$yajCMWr$5VEY{5lPXVCb1f&MA z#3E=f4ZAXA5K_wk&JRm?OzL2j&JoYcy0yVMBuv$3!qP>_tq|~58>{oorl)n^oC2Qo z9i)Nq3v%>bN~VbOclLSD*mh-`UIv8D5)#b!_rdLqHTOS!?4BSYtNKr2Op;P-2k3L)6)HE5Q(D>_4jm=(tr4T+u1W zZ_WHWvy3?)s(z%}8KN>8DBUR7-EK=~gxUmi-fb)5ehXQUzk6A%KjL8XqZ$A+OCdyQ zjeSY1xa(Dfw*bQMzz)9}#Im;`PC^@Kgps^rgqi}!!}zMTg@VWnQRro)@Y!r6L<2k? z%N!gml^nAQskKOcb2C73f6kg}hmBKwKs(<%GSQyK24u@QMmp1-u=w{uQvCu7;c-=P z1MI&ad4Wyq`O&8MsXRScek;fe#gW{eoYas7Qu`aj?{xGgUVZxy@}kNDi%<6Qr5#Cz&E4Zx?Dkq`R}7nY6F{^cHE$|tvzMDs-(d4UCtG~t)*#fsV%^)1#GC)yx44UgB$ zHZc@)>Ob0luJstv=~4sSW*pxCX%41aRt;zPmFG+^Gxv`#(VAh55@^<);gOR99diok zx>t1xz-#jX=X>HPz3I;4{>P3KH?Cg(mlj~hn5asT-6``7VI(0{C-d&VFkUPch1HN7D9W2T{{TiO0^CtXS*rAZ9_N3rf&(|?FMTOiSIJpkpf}f7 zq93y=`Fv<`@dDg#v9tKMm05t(OgfR1_7j-AUsCn53i4^jL|^vqGI(pA=sV}YN$8?s zcOxX+M!g&e@tA?ffPoD=m}Yh+x5{-X*Wz63qb_a3VyAeWETfKQy>tNnzn>VnMaVRi zW_yL*NR!SFO{678Iah6Vj+lVo;f2gMWZ!;vh+pKlljVTvy#`T%`K}m(D&h2qQ^pGt zwH~S{Si>u&JO?0nXCC4-eEVv8prX&x4H)V}6!-r?=`X2UTrhycnI#i}kEFbBlzL1>)o~!T2E(`4=93lQ?AVDLOJBo{N_<#)3ue((B>mS z$LG$8h_)%OX*JLXH%%oS`H^$QcDV=Ah=L_FTlaT(8sgol3+h z!7i4UH~Xa ze!0TFu4NrbhJBHRP1vkXf-H}P>Lh>=s+x9QBI|&41b5_ER-k@>1RmHC zS_I7<0m^Ax^y=;3&&S`E4aO&%`x;q~+knQz73SO#@cJJP<&Sw7Bkq%rmj)c-pIQl) z4fyQ)z|f;7e*bf<+_k;1l$}0;Mi6A=AtoN=*|C@t0COp+rSQb2-mM3JSETmk1C`vr z6gT97Fzq>W)(gbyM^uK0V=FWZfqLx*XkeCiJ$!@s@q4fe@}!!@zfA&WQbY$fIMXI1 zNcx9phUF~j&#`8HNF`h~mRnxRM@V`9#HyREOWgQ=ufGhVxsR5U1@?ZgvTW;DP7)W9 zEHO7?2ZlTtc1<910;D?U5u)>u=UGLTadgc-EWddr53fMKd}WA2X^o7`_lqsfVb|!+vN;wX@hcU$n{WCa6ph$rEddrG=88V z_DVicW}qT*IhA$TlGy0(k-{n}8?e>FD$XCuRYPrjf1+7`JaA)JWhA#gpBcLp ztqdCB+cwswa-4Qe;>(KN$6HPe+}eQRR|-T|r)HpLHk+6-=AFbFjvvrNWuBV_#MUnY z2VzR;(N+n52BV=3pi1 zw|&?pdW1+`E*@VuD{?R_bsY)rOD3(}QuWCWTp^g!OZyb?nBsI6s}g+)60B4jAQAA0 zSGb}^TCG(wu1*XbL%1WYCi#)#3R{u_Tf(_C(HD(PLm7l)JCk2*^7@FlnqXQFq1L%o zHZ1@~?=g2Ex&|ME8S3pWTwEO|2>UdF9Q=CM8oQ!|(Q4a|E*G6hXBRI8A zR$Xs>qsxutLh~W^zNFOwf|n&oVHhXm($t6rRAU6b`*_DNqFWIRg`YZ3$j4A%ck=wP zW(=kG@Vh&gfhud)7IzyBK`t!zj1~LYBsqJuiXA1^sQi%%T*O`S1Seuy z&3|AFMC(P>gw7^$K}sS}eGi zk)u;Hp||kMxLTdp#;WTG!8A+1#N`SU+QI<+h_YNsvPbp5QlasNYF3Kk!^2H^jgSJo z(MkX2mQc05Ri=}n*J!9&p`UHSl^C#=KJPof7b_Yv0ED3Hk$Fwo%~PP9IV64EsINYP z?O|R3BHM`M)Ot6zTCQ8-_v>w@X_;PUjyXSYY+`FAo-@z$9!h|htSwLQ9;Hv#bVd+w z(@4$}_KS8QJz9V#tQ)KVr~h1NmOW;l>ERyAt@GM~bkzIjcR}fcU6qS~U?^37e*Y{j zGiwz1^|e+T7)4;HBa{n%LP_D2mxC+CRd55f>o3eX#l3&NGO6&_ z6yJpWR$Y23e2cp@&%Zo>CuAh@AuZR{365G(ROfbrl=oYxr|3a~ZLzqpvCu5}-ou$` z$_-Ah@SE|b!I{2dHKaTS%yI4PFB%w-gncK^FZjbBPzaJ4GcI;Ar5C(`B^~;|E|Dy^PkOCYK^DVEgN1_0y-(qIXl2s^{;1t-CRwXDnUF! z%<=jBVC940a3~F|=r%g;$<-SGUzNx04cSlXTC26lsdlDP+(O5jZw#m@4IQQ@!B278 zx(|cj;OyJ=OD2262w{5fL3KpG;9ijLJSda%UIs&m1AgEFQpD{2&}tDQWEGMZyLV(M z22sL5av#DnkeIa0pi$QmZqFGDf*DrQuAx9kG@IgUD%aY6b>)~3>WjjvY#_BTsJ(}b zlrG^b+{VJ1w+e3dH!#`L&Sk_GC zyM}{7oB(0BtmkjhqwHUcUi(Bf^65G+oJg@S!KN7DH0lFWu3voOa_w6!Z;N_;tyT@c z>KO@aff`tODAg30pex~P$B@>0ZRd|3dvZJ5lbuCxyGJrg+&EhH=(yk9xl-SiY}W&` z&qeLGL^UX`dgy1#I4inRI6XYV&j;clYW!wqnJJT^wCMP<4v1we7(Y&PB9I%-lRSfn zjm5M}=hf?ba>+7q#$OA^oor2a;eSnwpIj{-40rGfF--5BOzesqby_Q8cn+Bzftc%aS{-v<42!zqL~C zGalBtQ<5*1M8pSjuiP4^yz0z$f+ROPh542Ycm-_8#bX`Km$$bz@u`o*9F0cA-I&Un zwEb6R2l;`fxv_r(2}aHpOHs`p1voTGZbwo9aQnZZHL4j1bFEus0)bF~mP2hceQj>%7Tgcu*w@4jw7Ocsyo)tl^jasr8t7pmPs2LCwGx4*-8DhgyRbL$(ZgE#>#ZCMZBVnW#u zWhnJSERtzDqxrrA)FMJntVzd2?M^`*cfl(zcgnxGwli$Oa{5xjV;}#%(FQ zr0G1|De^@d;zevDsP)!QGjzYXlXgfSm05;ea{68}%{Vh2W`Z{*a5CJSk{r!4phoSv z>fqymBm9&G{O-02QCP52eK>1eW5{P|Qle`7h2$p>4sU_|0M-j9_7jUD43In|UwInC ziq-kMVtbeS#W)#kJ|qkcwknh4G+0J*p&Q&oa)5s4BlV z#&%g^^48m!(r~P8>PXT|{noB3tPSU3PY>mM8 z>YZN)d|d&`h<-!O*~@fzx+yq zjz5$TH)q6}*7D>HDLg&SFtp4WEvYw z3}hZ_n(U_eI=1M|%g`_-d9*9+irQiBunXkaU2JM)znKrf?N$UnWpOu`Q{O`aSPDEW z@r)#wnIg09^!n*Mc@2Ec6wa-+ZvlWsL7(6|T!s!nB*ZHerI#t2V(!>=XTEeh{}8Y; z=7t^i7!8f3yH=_?yX;>(Wi6Vsfy z_Xe|)O%N2%(}#{(-hJzH=7x!qY8^p6Jh^k_vlgO@1DvvaHuW}Hbr3^rfQp=~epr(I zOZ%&-<613iAIX$l``H{v4Y?<&P{uEnj}e<7u-zbTb>0OfTF66J6H_RzqZ~XjSp%ZR zP}LeM8`A`{GZXc5kDMQ=dq{Y*sB;J+nkg>3j&_PtMf$WMrjz}Z7sShp6}|@Ht({4d zNun>Hs#umUiI2%#DKNvie_pKs+T$3e^sFk(5+bG=Q0VdghfnJ$SFTNFh^x34=L~30 zL-F4Iu@fH0cDs4TFi_sG?)?#dvq+dEt}ulpm#ik?L6!e6V_6x?_U){8@A3Ibqec8z z$wL1btmk41hPN!>eiGn@TEU5$)1=ik5vj+CtoT=!!GH<$E_@v*?6 zAzGrdW$o)XMHf9Hk@_*X+PJEsv#3rBEzFlm|5Bgg+~6vuaAr}0xhHLfJqAhnPPXRa zT!!B@xm@OkTCKu^Bm4nSh1vG{^!q-4l zdTcCt?FSaj?Q1~}b9(k{KC7HlZ$bKj<1+n=3wyJaQE`{fB@n~J8aT{A>bDo#5VqYSlu6gOv42oA1cbkM{>Y%^dJ~^o!=OVwAzLZ2g=B zVT%rw(X5+Cq*;ZJL?BaFlQB2ig2GSK28oGCZ7kcgY}BJNfxsXGaMm^at-XZ%_#+UZ zOPveIKFD$ET4+VlFfA&UDK@x5uU~COz<)Hx`r5Y-_oK4HbAtaZ+5T%u1wm{ZG(dGU zKkNlk=p1r!t;tHp9dXT`FzQ49p#P{sxU6qUpsP}dK zWr4?Mo|pmpFwW6-HM1-QQYIFtny={}zrJe-0lOf6?oW_~P)2QLteYf_>TE6xATXss zECV^?ir2#Ak$sRO(aw%-`U6lf7n~$oupZhmc&Hyaa9czJS+On+2-__lgbh4ws*!Ug zPIFwH%2O<$HM$4SpUP=>96ZHtC;bCw>^E*A*TRXOcj5;#zl45CcL zczFeAxpRr+5ouRoVc0!Qg_@eL0v(PJ!QUeij;$KRKq6GSZZI#=cX1z-NkA z8?+yDRWV4z0TPoJ_TU!uF({6LXM3?Z=2d(^G0^x&7XtbGu5?{L++=GuaGRS;J9VS$ z5=;eZTHH|lbSrup^w$3MtZl;eT^9akyGB8rCK=l{a%bt;l?+vw$@co-c1V0L1b6ps zz(o;!re`zm?dqijBG20vDlxzDLM;1`{MDs3MhM+{(&8>^6~c5BpjU}#_3*|HN!^is zn~YD{FFrp7aRVchd~RgVaX$;Yp1oLnj0m<1Ulqnwyw(nkzaiN{W{3KAV>V%uT$#aQ zXY*Fm!8|pj_}%bK(A9Go!q{+8aDQ0R6%&X){!Pt^s8uMoP&;;MEqYy%?lqEk;6^53 zXHe$pSZvy_cXwJZ^GTW*K!bF(K4Ouxl@p*rXhu^U^}RzuE)>|RwMp)tO8>G0$HW@O zR6kHQEq0qo%XAqVg6agcWPOG|kZj(ex8U|lVQFSEQej*ICMAEjg#Sg7My-ZybN1*CdqdbZ6 zP)WMLi5uxBc(YRNMh_oGfy|+|yK#cNPsG4)sY72n`vV-`SI{?vv<_J_RUGVG%5E-U zmhqZso1uLdNk!Rg+g`bLvY`&Q zj=amz&0uF}kbJ;QS9R_4A(dFMEX|Qidcdcq$oLa;Cf!=V(T8Q!$3cCYio6)L3h8aF zYg0B!=s=Oj79Pj%&OG7r z_@rWgf^^_kIdyOVuw=D_1}MAV+e%387DRQH;z3zg%T$0{7tQv)jQA9wxn1|0iVkf+ z>i%Q+?(w4UG}pyYolWjcsT%Q<)B1JUN*g}N(&U9>^}-=pIuARtlry>{oKkn+o;&~| zd~xFXK5Y1LSw5et+yF{O#c-i6ZxP2{ktU`|KqE92?MCwI&PvWSsBKo<^+pC(o37@! z$>Xklb|yJy(keiX#pQ&M{Q~jpA-pJ~Z(SK1e_1#n=36h-j}tu>r{Gvz%xE;Bz9$sq zvLE}@9Ah*Yrq-2I;{=q(DeI=VS|{_0l@(x2=_&#{3*Bc2OrhN8U`NNH5ywfxuI`eN z@zNSZO0S^xrgrwYw2uxV4?W}5FA$I=Dd?e}Hnlpy8Z+oR;IlZh) zgLh>p8}~2sna$Sy2*(?#1EwVg^!60ySiar_8c@Vxu(G1Bpe;?4A4g*_mQm)9nd!Ca zm#%QapDhKhPfP#SH$>?TQQvl>7xFES#r+3*^`|kCK!esk?gqhvXzy42PuZ>2<>{{F>7)E30$(tE%VL6W75|*fG4aCyRjJ|pBo;^ zjw+fA)U`o0L=V;$cl^Y3BPY~wYVmJC11TbJiwgsgH*sM* zP%#i~;eTpbx;ku2`&0)LE}L(CAR)hPKGRyd&fPZBo^7z^+Mf*nupGbl+_t z8zONd8sSI7?%$t3ol)iq?NWf1zbCj)r#ia2j$*=v&Xs%s@ckqkBjL3g z9>R*@kM%9P*5fOr^`Q-h^;zI_UR{HTvn>(}0il-~CPkrRO0{ zrXT*nw8$aGl{SMpC8KU?(!bhg`wf{8L;Ww@LRa5DyW>D`MIi*$@cX-=ra8DVlaAI0=>0ddOV338ITHAawc;DjI>&cw*iD5y+Ney+&N=gNO0!&xAh*q>{p3*869~KBiP6GnrU5A`@e!m3pFmU%HlB#; zZmkBL0DFdhj#wDJ^U^9*6E5V&fh1`}I#Vk-IFD5tQSun}kW9yn0h3qTiU;miw9@69 zK<9epQnxqWFsl|~f!O+_ky$vZ`^IGkVwJFaH(+s;e(kSbq|VKkE7?=+S#&V1^kXHB zE=HnqYi(*+kucZ55h11s9OIi0ib&g zb+?@$o_hfz6ob+qYRSJl7ATvc8p{5^I`EpPAljm;C!hQ?iDo*bzs6#YUA zOOqxU!t9>wZ*Pxkpc;I!a$ZnvySuU`aF)m^00Aqc*yW;@A3$9LDwpn4k&PMDs6M}- z7_$%XoYKZ2A!4T|NC2Lr%-?-Q9G;7ZZS#nF+^9a=3pFz{Zi*!i>SEtzTM=$U@-v}!l1XMX?fxryB;1g1i zzo1zMYm2od$tQ7vZh{}+(Vb?l*GGOPzqxdr;?7dnAG+|rFJuxVFXFv}sL^n2C~T2q zd5JpG?!{`szGL7>m1``{Ln)V)iVv!2&1*V;-}0RVx;hLpWls~`QJ6QB>IL|bZUd?< z^mX(Q6#PHE)=J)Z9x^~T-A$jltq~)XV&1GxtN;v+I>4TWeSK=#56yDgu%{fNy(@_CmWvdVko1CLlFZX4*m9`gh70wy*}-!Xoia z7+(u1X!h(jK2{dr03@Xn#bek~tDv;=5CM(!k^!;hdkui)eIA3{>R$`c9d+y+Y|7T) z_Leo5Eodg=gIu5p#_JeMT*34g1qCGhyebJ&rCpUC(YZN~#82Hy(}pEvQ6)vX?|>eg z7w?NU^HGuc^_?bMb`XTk!c<6_W}j{ZBa2f~?0TcCCUm{Q#&%s~M`j9>`O5^MUjXu% zm#VQ~1!8Di*$NDwxz25E(e^X2jNKv8LCA8nvd@7;UllOFltvj8uZ8^A+_iZ?cL#+g zoc!v~A74@vgpDU4t#>DT2dT8c`q3qa(=KyE=OD0zfUH6T9+!!2im?67>&l%%q(Oks)IX(V&`q44k{S!y=;f2lBBlqaCta zNz9((;w~&^j$S_FFYhu7g+1sDObA%BRe5~!4(zDG>i)%LRg(7j{7$0re#Ir zuA^Z$#+sOZ!Eh93Uw0;dKxB+Qr}q-+!TL{gV84`UAlL)F_;p)Qs$zw?AM9Bj-%(zB zl2nXf2Fkse>5;ZNXvE8poLF0$g8f0*ct&ZTuWifwkMK<&xD6e3wzhpDhndkn8*cE2 z@wX;glkb)<_u9X;ZD&VP)A2-pgtFTxWv0$5yKP)(#|9kafp8UpeZMW)pDLE9`yjX5 zXH!xJusQrF!&wpg&I3E`7ofG1TKcM;q$KKlI??aveH&%QPf2O#Y^X9<$ejC2THCS# zsgLY<2&8U^Y?lpbs(Q>x(Aju+4iq^W_dq+~c-i-?Q>u~5WCxD2{RS|Dy?)cw+UjryKU&_IRn!{G9S2LsY@C&B9Ytt+*N5j`V+rI~2P zXI4J$D#AcH^|=1!X#i0kWbZ)8RP25fT1Py}B*nqtqlz>wgQ#0p3e{tTl2$fwhfOsp zf}ySVF0+rad+{vPF%e2f2!dKM>&vBzF8nA^lb-hSeNvv&&*IK{s1{T}RaN-`Ef-pt z+UR#pR3DtaK2O^i@O!VTKisE*+PkLWc7!JoUlu!O^|ss)DCI$mdOb(lzB>w(w^Sj7 zLDK|r*LO37A<$R!wYlo1U;ao>4%~b{IOc}4K_J&v19~W)Vp3xegp%}iXQxc7T19Dx) z)DNp0(4PgDE-*;CO3~qUM`y;hNOxEZGzU_9`vi^hguGj@W}ktZ+jRy@T9jF*ElHpR zI>G{N>=q~m>~m9cl07ss>zENGpP$*lUboAZT2inn81* z0$u7{l>^63x6cfC$wNQ*Wd8A;*1x~m;U8DISlEM_f)cn;MX4r1hy^#mjT;{mKwAD; zOm;6C^aIi3&6p7UK0C;;q)EQcV|DIIF}}tXzs@i?&!H0W0SUiSn~UuckZ-MueRSJP z+v3r&av75&Sr2z#?6d;)`~;QWk0+WkuAS}p3Mlnsd^M^TUrc40`;|cTysvptc>+qS zEUz(F*W+lB;QZIcWZ{8G`~HUEQP6xFuz{q=5rSDv*Qk3}V{*8Roo;l&jtt-8@(bs| zN+IYB2`@A*a^M0_CDvoKssl2J=L-WC(rYWA4lQ*LuNbduKEOP9=!y}!ekSVCerQ)9)zY(QUHM#Ed8X22oLFV70<*Bc-MVx#SM$) zbdR)RFWs_`=JSC79Yv`>tK>O-pfeEM0={k8w!zc_T0w)kZ)s#tn?{pCO zo+y94lohW1_i5_NHFrE1%IdJ$G3$~wWDz2w$D7iS;$+>rr`&EgIW zV>GvZ6FqsXM3i`Sd!gw9j17-4SPpN0KTcEiVrqP5sw1WHKSw6KJ`b&d!_Xra+%S4Zyt zAurrX1;eF&-s`$zGoGR}&|B&N)k%8t+5Xb@wsgDd?3c-wcU`~*@jA-(GL*Rt-vroD zJL5C}0`q%k*H42|vN)2*@G@xYMl#&1IBi0 zeF>P}2WH_fCF=lSbFSTiJA?K;b9~_Qw=)L5{sj1X>bI8;fd1(l|BJeZlY+?!ssQ8^ zwy#F*sQ9U*g1|SbL8p$i=QCC%%3t6SZk~A|Fg^iId`_4>_Ab^|>CiJFOcelWe5S+M z)4gpYv8K1W0FC=fzw4({3`Ygd6n4c`Ew#uW{0y3?i=`ejy}FzCr%~(EC zjsypDY=zt$ikg<2&_1bF7siwVrG1&cR|vHJE8bE-)UnqxX_BG$f|PvRH5MB5mwVsA z338skxzkloPO2pLJ{SID(9(Kosw*1Sh|F5xO7FudtyWO{ymgPnt@kbwhOT{%Fzl^l zIyMEj%kUT%HAoafqr`nP)+?!tQ(et7XZPVd*5MwAXWlvq{>z50rrUy+wf6#K%ITz2 zjc!@u=j{Q6O!?TUf~>ni=KSyN4dmK@g#=8G27`#Ab000&Wd)Gg4CS>#f$JgR^}mMt z0{Y95J8#0{q0*rWRzMS1qaer6fWncz)1x#5$wzV$Y=OPiLXnNy>^HE9Ola0?k~oh4 z=(POB65>QOP}UNOKwCuVWSjJ)H3FZjuzeu8PE7Ivfvj^RR{ANxO%+gp;S*YMP!dA= znN;AEB3TpCQlT4(x%pdRn29u!o`Vz?}aZ-_jcGf#gLAgFghKpV{N-UXm7qk_Y3HYg^ zDKSV&YU6RxSiST)dANe`%_9<)=o9$5@6g7+@M{{#7*41Mx^%V&cwn5NmO0|5SC_7o zQASo~R1A+Lfd#HkaT-}8N5}tPQ3dFdH4#YaWqE%N+P*bvQCrqV}D%j{Mn4Wp|eLJ3_VEW6J=8 z2|sqGY1Loz2N1PSq4ZXIr`m^hamNjX=zy&Mujnvnjs}vZD$y&@j7o(HdcvWpx;))e zQIxr|CcV9}Kr$ZxNB5!0ZiI-NmRi(->!`RvZpCIZPbKcZR?tTnf?!jdOxGMPwY56Z zB-cGXUl&Emq7XX4XMzu%;shWs;Mwtxk zCuwC@+2M+B?>zS8rD=ecl@(II)P*`Ycu`<$svw#z57Wl%J1%iU0sO~FpN7H!?^`=A zCrU{9bEp)$#jch zmYY|DnfYfpaqR{eMqlo=j}mpJ13u3m5F$D;K}6;Q={seXYz2@KRJ);2#)I4`(QOGy z<9FymImbx7)8N+1kqgBT+wR<#UH7*a+chI(1GmQC@0|+?hb|qd(?+ugD**}18U&1{>Z~cDV(3UqUy^-Qh=q3FoH2KJU?0WN!>U;|bub_rHu8$bn2~8!_;}lm<63FiX}0bVODr zbySJ%ul9z7Dvswd>goQT3=VzSxeE7@K zE<-j|%<4QNyfF9tez0d)e9f9O1wM-hC}5fjj;=g;1I*nEb`t{?WqpsoGo5vR1Xw=y zxdvdvYfwCD!h!un!SydOK6>)jAi7i3Jl(qd?KC(!kuV2O+CT!Qr&O|}w`tBhF94&*;p>T+3RHk3Zun=R3en+m$<{bARUM#y80&(o1D96oUgE~@A$aZRqPp@r;;JezZvE!ThD z)0$=4ry-Xv`76_KSaOmmZPRnn47$c(#bdbl9#Fh)${T0;3{X~BvNW1ZvB~DDzbNXIBJw@_Tn1xcSi*Z%?OY>L4 z+P%fjvz{ge%$Uty`$g_V(Ei4x2f#4%WdQGd%(4erVs~BqnH7L0#=O3MA1{^Pj zF9EYvAtrwH1Np>YRh374Tf`5vPh8#L%wMNFLN**Mn zB07(;aM*J8_B72i9cj!YIb|Dgs3tRWPIy93hqO#iS$cDvge7B;qu4bu(W9sT&N*2C zwy}ui<7tPO)r?+?ehsk%(lm5x9=|)Ry^@)d2y~Oal*9)cdL7_4a5nC^q?M zro@V0qp?5R)c9j2FORjowNkUjLpuHiiozT|?uBXJjv7O1{~c;TM>g|KUVH&CngUl_ z)h6y)dX3x++W&PPAH;kFxaYs}eile=Amoq-?5G~vC;AG&^NJ=F%s6saklw;|Aus{C z*Q8CLKVqE8IPokkDX0ycI`x|qZ8^Kq2pkwVmbtQLu*CKKO}*`$`$(Y?v&XlO*UmQW zut@64_en}GJR1}u$K=8lJR^|{1%V@!mchZLYG5Z17slp$NZ?jbt5$qhcG(T-8G}v> z9Zj`T;K}keH&4pJWg89Z&E(FoeiNB}FV5{kdX(y{3y;reu6JApIYtb08yIiKo^gIa z(C8|QfV(to>=-LggGW_E`Z_aHXWc<}=OjEo6T(H{l4VKv`ExCZ|po9_{+&R_9;i6JLj1 zE?RCKxG{fM_)%y4H=hsg&{+}6f55BHaN4m21g=kz#yVZN1cV*ZbU|sQ(~sD>vR9vCf1$m8G1KRq%C$EKjtW%MJ|QdYElRd&DYvYnMAQ~_V@ugrD%`rYAl2ev z*cZF^+84O|2x+G9@+(Tyu)I6zlehq@>S!1C8`flD1YY3%6}qFKS-q|B0w8bg;z6sZ_$lP*_4B(IORj95jAEsN^0SJ~6Q|~A z=>4H5FAENh@;vmSHru?)i7~t!ZaPDF*H`K>ZL9gJz&KPauR;UYCxFXw9-wPEfixea zYt;^HW*oejC?nm(n&Dwxy!8vYBB7}Wft_%1m_SFgS&ks&>DHzcHb;02hL#NRpQmP; z?Q!q*<4wYIJRJo99Mz`t!&Abd7qVUj<&!msC9O!55#Ea7sdjGJ`#0tZ+O8Xj#~O=n zZEpwK(>97kzk*TOp1K%88zI7Cd7bNlOzQ1Zw2G>uAtzL*FF$`GVtbh4dHXBx%iB|h zl~om6zKL$jGE;H+O%lGd3*|W(Q+@?qxULH3DZj}A^YzV2YB`oCu&Tm)6TUzf%;^5{ z&^FL4D4E<_{tEYg8I1Br(IFvBkn@GVMBx~2dGx@U0_!mVu{-HF0{%KB4){`gC z<#h|*4C$r>SSY;N(k*m?9<6@MX7P~&9C3|snZyk>ceAc$)0&y|rJ8VGLMg{|X|Jr| zN}|iQ%bilYRz#5$yR6(4FIjAxKb%3zG3<$dq${P4Cx*Je>5Oi31a{Fl>SGtIvcvgk?!Q(R1W}01Zpzdgpgf#Mh zy>V86R(#O4j!DCYi6GDVCP`0V0~i$C7#)v63*0K0_}#4aja;IT)dSGCeKCBvyg37v za9=05{c9TNkfzslpCADIHE)}PB6atX3+!+^k^$W%4|NEvC;7HhW1WNw&s;+`oZRck zrAKK8dpJY(p81N?9ae*+yj-*4v7xG5)}viBNzpup-=->}fdtArpvTELMVIgfSK;LV%)fo@q}D_lK<|2)85mt>#uT(9H(n{kRWU)gbxF`sZuEey=*Qk@)2@NGga^1=A%_FQ3>8g=G%A&fWXN=yM-&^$Sjv!DhHPXP)oE0sWJ-f! zZ`&+Gk<^={NamRenH9Do^;-{h4(Iw_zkkkquJ;sse?HH159?lQ-HPI{tYM#aWUB`C zhZ^dZS@ReQV@q`4%nsV*5g{^-i*0^3M!`wPf|c z&j#LGB-ur%Lnc_DP;UUDOWA9s_2oYgQNMJS2&}sghze$gYQyjF9N!O8XB1%E+>I(_ zBjq+@i*o%dXaveu7)~E~=vJD{@^1Oxm81VTo*%a2`>91Nbw~2H7Je7%fm!ZF#Y<<7 zP%74s|IsCU7(J~yAX@8Z8SB~&6JyTh9JxGJO_dsw@d@HSS25$x{40s02x)Re$vgY4 zD@K$t6$7ZCZPE3)HRrJp8=}=XDeVq?+Xnr^P}(^^yC9cOuUdX|e_B3m|9pwn`zlbm zwDM7V&`C9zoN5&r9x9S~{00Mp4r?ZFt_X{MQE~n8w|7L68-Ac;?1`ZECX+~5CQn|6 zh5F|Y)=LEb3ztZWBiNR)2dzTfWlbwTs7)Q)v9MOu5ggn?y;m<98&Z$pW#_50uQL`& zYwtsn>_M~`?#qAUm-lPVHvooP4Yxbgr2J1mlyBXWcl>^EZ~M?y-yt7z<{czrie(`7 z)j`{K`@`P2+6+ig7q?^NQc?^I+oF9Ts;i;CrY6+{7>%$n#oeaTm)(_+ib$PRK(aNe zgs|@PlfGN(L66QXkhanras-hvKO$1M$hs#yUXzQyLm)w*@;ryIS<#LE?j_OKNJhLL zbiPYkIQ`H*ctE}i4EQ;eCjceiCEzMv=&FYIxi27+lC$9Y!1nAy15pd#N# zU`%Wlj~~E_kL~3LHt2))Z40_`uOVsWh;*3(xpV0VxUK4K^P4R1vNB>|$zai_V8GEw3aQ#{aE3sFIcoANE7wxx}NE?S3n51W(Xo+Q2dsCO*iYvd&fXVF?tMIld;LuSmQ^lU!4k8~s7m!^8=^bjGgRxW(XIiSjzG~-@bUs;Qg@Sd1cV7;bQqIv4$28E_awnr1CJFlg+ux z=O%(h`v^0t3wu28>xgWF$rv(cDgc!`^wt}qBLP=fJjW%4u5j3x|3s$9r1c|_dmA5@ zTxQphoj#DdV~v3Sd5q)CF4CPCx^YK<;GT5QAN^*d-sHHI-5T`eut!C-sIfEY4oSD7 zq^p{nA9;NFun8=h3o$DyoDG}!P(b!O7Yd7~qKATnIh$TX6X468}#&d}Hv}sKqlj$2oSG{c7buHwd8TO&qX4hEG$ z`7KAe9*GH`b;8Svh^NqixFyEX?Ru`q)7x!pQfdHz^)M21Q!uUwr9UZA?pkxr{ki#+ zNAO3$@j0Ra7Yr(u>N9$P>Nhsm!eU1R_E1Ea!*=Yf1-u4ehcWh0W! z$YCFcZmd%7#gBC7u)Bh}M;%6qVspINzJSV{0^2F6J6b*WC>8O2*UK;COV@&!&=ye( z8^jJi(;ZlD5CQUE1l*$Sis++Vw#hTGs=6Dp;+v1;3d5>{!U6ck})7w&2 z$iC_pRDpH0RQltyryH>u;g(JmN z79Lg}YzbYw^<3up)zx}=Z~s$^UvNOf{>>-19@cOV4m88TDj8g_RW*l@{VhT3*`HD{ zUe^i_OLm(RDqRDJL?56c1?^h#|OFujew#eMos6s z`p^lcHd57B25Cy^H+?{ys;{HNqH@*7A!Sl}mqjYkqBa#==dcvNmDD=8{Fc!X-Wz2{ z;j3Cub3(yGc%^opeRjtROZ7(JahdC^)tjL-$UoEsQ~r-XbI*JLE_uiH$>rwYGUcDd zwG(xNhf>gg>$xKasOQctlGwYGl%>S1dVjdGe9ff>G&Q=q%NQ>ZgoL^{2UpiSs`472J)+B!D zjpHeXCs%cmwH+#`ncwrS6raLCDNrQ_a=c=-+)nNLtoPYZHHxgm*B^a3b2Wv2vniv% z>pwx}zzXmHjtTm;BAO1)uu@m*;h$&ZJspSaShtWnO|qWLeB z@ake_)M9`U+U+FftD!f`0Lt4}_^MGxhWx_c;%Y=Q0!vHG}3)@>U+W=a;&D=Y#90)cjc}CkIFpxi83OrT!^T`;v77FgB&Y z@SV(5lu;*!6?E{^p1b+%c=5bUD$ouc&>(w{OyR5U= zelfH$PEN>)w^TI&8d%k41R@UY=8N4Uue~5X5m{^~h>oym zFBvar_o7z2%ho@GHw|aG*ZMorPg_52$1M=fBc|)&;9WoJ z7ZbOdW1HZnt5r5rT)EtuOML!_*#~rwH$EG=#{XUZ)zCbZ!!^CXY8_YpM8f{3VWNE_ zM$aYfuQBJ+Q0DpkRw0qOZ)k-DhCA=gMfMS2Jc}oJpf`1S@vO+QQD#C}a-`U48sCCn zVVOs|c}d5^6ku)kEsuCidu`@@;XY8OWIhq|R?Mr@Ar;Qv8M{RFo84I_nTW9Kmc6NDEiBK@3k_p~ICfZNs@WD8VK% zepAu)9r=~7JJNq8DzA+Fx+$|KLv7{#O%*0DQK{KSdb2{P=x*x+Y%>=0h{une4iv?~ z%Sv^Z{m-sGNwR$DW7jEXxP=C=b6~i4-y345h=f7VD^3YqR^9*VFH&$!=Uu6LV0##S zPR`79(~8LJcy0Y!8bM(+w^A@^haHZ{t%l|Hoax*wK{(AMB@S4YTkvkK=sjdAlClX7 zd=JD=X1d0XrP2|e#f?aaYPzLrp6?eRBMWFG7vZmPz%?PO?M^^W6|D5F(8GDZ7?|@Y zOs~2LX(mJY8)}VU_0v8;Wt(o;rskX)IDR?GM$Aeuk+ZPXYTdD+0sXtY6jZvQ0E@ibnLB?s>v;q}C(vn)IZI(Js{SwTx2;J(Adq$`fx-w*bg_J4K~JL`W1e|Sl#kRxKao88 zZEqB`H&oX>Z2Ce!I-eOa0oxLV$-`_Zq?Vt`7Axb;SrI47i`aNDV(ez2$m)BkGV z@wQMBM>{HcU<*L;q0loeeoF}>?mSo8Q(4OM2#G~FJd+`l5Ja&s>p>%MO^+PXaCiyQ z`|INX=Q}nfWPacsMU@wiQE$Z6c1cn>$Z@^Aw#Wf4-RWpFPZ^$*?km4{6E}as>bqkm zBGVXKJkUVrnmj?9fi%12DJX5D%BMH(i)IxNwP^jrFt=K37Nm_(UiNq$ZUjCSkIwWF zsw;QRv3;>k1F~sSgLpx`5_IoM;EQv4nL>syZWVgrt`FyJ4CF5uEI1i%B#s?H@ z=M!d62R`|s2)aB=`*EWN!^&F~qC|&zdUZ{tBBJ{{sm%1gt3?fVsC%^blN;76^!~W4 zkL^+I-6!eMRE_w+D|*M_nZm4SVhr$2z3>2U`PnxDlb*J!0#E|#L}%`{%PK=(HP z{q-p9ektbWT5<)fFU8T7+6snMTMSOFmv~Fzk_Xydq#7x+b zFoO2ZU3Kt9++Drc{}fIih&Cl6Qf3$<<^_OkskQ2M;3h>RR>2&K3cqUAMI|&6Y&>}Z z5*fQU97D=sc`cwkR0UT#;##}LG@(~2G=hH69QXK`d`Tf2?EKR%${@wxD!bP?zsH|J zH|$}CCUHChA)SRe~Or+or;&lvz zA|aNT_uT`-3t|_-L%Yllq{V2rvKi;SP)G9tV2-q2XU^~cg7l!>hURjVcGhwO^ zl0_zB>$yOezhLccNrO;L$s+R6r(Q~smWSzlhHuJ-A3Oap%O1>h zj8dSUHU#m1>FgD@N=&y>8&PrVSb8Q{>Aj0h>P&upDOjYJ>vgJ3qO??zkjcBhiyBp^ zLPEP5UvZ<%E%~Y8GZz5Z=<7lY96H|`3<~aiU=`~dy9Sj-XpS;Bkn(SEt~OVQVJV|0 zuIj*D4fgSfib}#nt3?stc>_BoXSzQ3A{CE!Rhz9U<3oTpqarkIzxuMUeAQoz9nE4Z zelESH^1(U|_AhSvMa{i4VMw=!W%0xGEv#(L`h`r z7gcn;J4aG({7VRFA1)(P=bEP*mfr`*S@U5(*F2+u8)b@MlO8eBFn5#`G654P(f$5< z8Ex_SM2LF7y8e62vidDbilu*<4vhq$HFRI~Q78D={lyVbPjf zjDAr3&-#*Bn;L>4*PJ*+ea~%pyo%;ZQhE`NB8wu6-F&4 z-;}2RM#%9o@oyJSp?1A}+OHl5XB)#;dS-dH13V2^86JV7!G4E)kOO75x5I%4XJ>bF zh-E}F($B;B$U~aNCIQgQ@BWe%h6MR#X_8HabOvP*9bjMAmAKbBm~hb?VZGoLt}eIY zFlPF33l-KA{LUu5JKdNt7RBG+Go{uD%wI40fw|09$r#FZ9g+B3QEr7coJ>Jpt{otj zgfsVaRwecwyQnLEUwYuzSl{WwM=yixP-vM!NS{b;mCp+`07m@HVee+r9EMTmD}iu8FPb1CZb|)arl8i)Q%+(Y8m+6XEo7!6VcZnT(CYGYd$#{-+)ZXhkiVTEDdY9Kdq9 zX%SvucVh(De$cf8k)g(OxOab7g>Hw_Y>q?v&8a17sX&oyPCeiJ@IH{CXgyZ1k+h*zdRfnpHV)HD4*W3j|H|<2#0x(b=NW2KUT^-?v@?artDgW zeU-vT&WY|$539`WS;cIme2&k#h$)y_uGu{wQT2gj1c%d(5 zMa4k{nJQP9;FO6d8CLumhd!EoJHAgv z)mY8>HuwLNkWxeaHNAehm2sngYcW4qqr0-rdv-6MQZ7Dyw>_VDHI;TM6=IWBp z@;BQaw+0_xlX=6?yj$G8ewNd2l%K9@KjzGL-o3l1P76_l z5%jkfwNZGqybJ7n{(X6pLVF3kHkf9x8qL6!Jk;jhs1edZs(mmLA+4WaYa{6W@rr^W zdVL*LK`9hg-MsrE)f&`(^lzo2187jl)^SmQye}CI<8}4D2hSELfs}MegUZF7Z{|&T z7#DdTFGN!^-jsLVQPnNB-bW3X9yT&VEDncQRul(N;UH<3$eKzo~oLV|d zu13FU`7q{f4nDK)jL-au1rpRZ)A<+PY;(0v+?Z474$gcn0%l0t_uFKGs&9r%y$SBH z?Qgq5_l~iS0_plB{qYZ;8Sm4CG_i?h&8oXXib6bzSjMN%$rb&q;#r)ve|OiJgBwDm z4KPhUGS9@r{6(sJUt>@AkSLFiYZ9w!h(Sp`Q`433xR^-v5P2Nl{)mr`d_RD=FvlE-gETWU*Qd<;!v*m9pNPJ} z{T)}rY|N+bT?f84E$>Uea27W*hc$ao4V0tmmWxu;1ahn&R=%^D!&Fd>o2kiI*}har zoTGNB6POsqK9}h)zaITu5>EUkuEpIZHQ26HU$y#sd-SB0a<%~wp z)fNI{_4L;t(tiB#B>BMKxxcZj(=qC)`q?@gsdRhSs#4YeIS+XlMsT-J?p8It@D>ZI z20q!N|2fA6x%~+$hDLTEHRU$=OYZ=k>o_{)WMI*Chb+Te>+1JG;L;(LFX8NE7t3-Wvu0SFPoO(2+hO3 zmeXcdrj|XHrU;t4`*Un-WFMn>)^SbHJV+gvI32mvp{rH=_v9N$kOZ7m$xXM&r1c4p z)w&Xboql?|eqH-1r5z%RYq^;yQ1r0wr|fjsl}&i#7FT_xgUVUD%&?)hZ0VH2#Q``* z1;bp%AZD>%Exl#{T9#L2=nOJL9~AzE_Oo8YVA0htF0s6uGASp)`o5zmHJz6z4F8PWF2cZ*QY#u5B>!mxd zo|K+7ABV+v9u56>H3}eW8%|I3&K^qs1fizg7S=vyW7gM{;`MsYL9T&ZeeWEAN^$`*pT%W{e(PGNr|n zybnIHoMBiOX)EG{Z^LdSPTIiXOve>z^0))EHS8KFtq*Ks@N3wK7R_Q6J0-venZ#bw z3s{gT_B$5^+e?ZGE=pK{^=NVLjl4}cqrm>Yi(rhF0Dt4 zP6g9uYvswtQ|&bn(T4SIy#5lL>+q}dXs8Rew~sz6(tgE^Oy%6lya)H#91E?AI~6rs zo2Z>%qkdItul_=8yv3&SsW9xur97uxMI1riOWsv2QUi8Pnw_vtK5_-^*wd~0%Hskd zwW>hXl@fpDSp(s&{?gLu8A7BS0gJ}Q)J|AtdY?Ep^SYU~m&6vP369OtQZYB7$E1tw zx+bC8jR|>1ndauT4Z*R3s`N|$gBY{^#7ak)w#CPdDAs?5r9TkbVsE$%_f~>?tJPJm z_KFY#sJ7rn%})0hh}c!}k}Q~c8zNUQ`1Coy9pgQ;$X)eWsj02JQ=!dQD^aOkzR^Z# z)8k0;8*m(7pGrM}Xxib(6%DDEmGHZWdJs6%Q`Mttb=&Y@7TzknLrz-kEQ4IHti$5q zrC69&J=}9B{em8nI)Uk3e_p=5Pkkpw=j|+Dt>>mC(oO(d{mz0BWZnPfnTE|ON`^YD z7LC~alwkBHA*>~uHnCszsGGR^W-z_IMLu~hpey?zy4G(Ua_`30l>7)fy#vFeP_^3~ z$3Am%@IIgpb&70w!IGULt$OT`} z*fXsdfM`s@nbv zF8ObPtAwyxDw_VX7U$g$E4>zLbyMTj<`jDXjNPc_cRMfOy_}fUws2U!$NUM1;U(q_ z>8mmOLOw(!?rCp!d%xRQ_pr>g>@T1DEPDQqzl-wiS2_feQdz~T6EgF$yQ)*HU~*u@ zv4dNMqN;0%D6-Y7W$Zc6AZgy%zCR9qNF6G_9;j+V(iX~Ksu(xxU1$%mx07_HOwQoJ zGSQ}qV(1r_52(5dLV9~m)z7n-{JPv-j4k=`ioz+35ej=h!V(jWOQEcox=(%i$ah?t z;$+@K7&u-3bc)_c5pgg}1AJPeRo!J(pP8I`-ofk%f~i~K@>|!mxKJRQkc$ ztqm9s)WYzr8@c8YlI*!R1Jy^b)ezv*)mm&SMDkz{z81wcjNJYg5qUsl!uMPNm8qJ* zatik`jb2Dq$iF!2f*gC#ee57#F4bHS`id^bfQ^0-Jo$=yZ`KiunR}*WNwht~%9VmA zaxL+a-n;oce(SP7F9F(6St#7R=|trlaDP6m`I67K?&O(LB#v!HDfH_iY%u03SL($S zUm^Xgrj6BRzh>{t7;!XrKkhmfTbqRl)EiZ6X`n90xTwh7I{fqV{cE*oS9&xbj{XGr zTAjO?nHhF%avL-YVek8N^088q^1!9CLwo)RT--WHCI()HHGV7qY3=#k1(>(y=u-rC z#fo{JORrx4!a?^vu=&}89k_z0I5&J;v^zy#S0sSd`ZiGNS}UB1PX!*Pn86V)8IK9p zau2)1T#B7of3Jb;AD?n@A}f4{?ox0cEg?2S?%*Dr-K0a zwlr~1R6Egk3LN6>89>;tJ{rFw-+3( z`-MSBV@;L9dg0@4-PNT8_}%tbQho_ycol-pqTz1@!m)qc^JrWBjOWK(E4~3VR8bFZ z&hc`+A?=nfl@xg&)j0NeDZ1}jXLna50MyiaB`FLNqrI;V1x;;aJz%fPy7vO}Dbm1O z+(Re0;)9Fz#P_)&^bHX27Q^j{Se>gc<$wDbmAhbX%mqK{(DZj{eck|4y1P?L9>Eby z$voyV=hlk%fVEQOiWQe$9{|lp?ER5LEzB)O>pnurQ~ZeUnR#x)eO2!0#i@M785Y*1W$WJ0qh z9^XXRKz+>Pd-PjN@=5ZMY4Pw)3X_kJb0-r*NukGN*{Ta?3)oqORidSh8PAyaM1o&_ zy$v5%RWeo1anHGo33>H}SxdtLZZ*4wyA0ybRTY9vViZ%CGqNX<`@U2f_`Nh?E{4*QyI8l6Ba8aROvXL1G4qRyzfu%EqK* z#k6$^tbs9@3x@Bhbi9x2z=HbZ5N=YS3Pi`t-H=bR6Rj7u!V|;*>seHiSL#;yJfTHN z*Rpd?UqB5%m4_^qs@^Rm<=Ujcfb-A!=ltV9-Bgzho`{AT(`W>! zpe4~w$HaKY&M?kS5C#{>Rv|O=_&u^2^8(3JXF21Hj(?pEpSU#{9K$}XP2%388v2!X-{?RH`D%M6XwoQaYT<;_*KxJ{jG@R&S`@tn zmAtaZA zD#F?#TQY!r7He+qNn6%l595`x+7V!=F1hW(CXDn)tULA~I_o6AnVe8*-)3W$me^j{ zF#d*Kz(dyil0R#Z)pdqT&{be$a^<*#Gi(cofMgLn0vN$G;(lvi~!`yz*MBx`T!nC zQoN9XT{IkaL+*W7A~rhze6f4_xGbKL{HrI{9_As4wTVtxx6!`MBQC#NSORK3KQa>) znCpRF=)(b}DogIDELWRNkeF3VJoOGNg4!R}I~T3p1b}hagX=D?gVxDhjyCXd&Zk|h zOo!^WTld`x1>NiMbE{~iB|CIW#yCGB`qbGvgGj)in%@{nO3Sy7@~C4hajWmk6Yw28niSW%Q$RWi63s zQy`@ab|{~#eSG32$n%%v)j!r|@GUB$fvc_gfTzP(kV30IoU za^`jTd5_}e?2k&Vin3`zO{@ILF+EOCK8b$e?C=b|OIBRDHnYU79x^4p5u>{Kx<$f1 z{(CrtE*!*XUg^Dl&BWpanlDMne7{4KkkC7cRJsQLw0ultheUP;CDSy%-PkMh#kJQe zWFLFnnfH0xdyl6d*R-iezkAi)>^H%>yF(AJ;;7C4#OB*M+9$z?R3~6vhtU)YKW!4DfRv6r;{gNK`s1hO;L^J}MnoGUV|?e%s{Dxi zIcLr?&JKJk{MhxQR5pZFNO5&EyJp1POL>ABK^p3XWrl4?K(_F9Wuf@Gv!yNXofv(i zfP!uogSRp7UGCEn)gI37W3h%F&>!|j0W8TNlVul?J}85Z1E_K?(U+{xbUM23R@;=? zabm%j*UB{$@tJ0Z*>Uco-)CEr$x*v#Q<6hc?)1Jnp_N%$n|n59>`!9}B1JeG#$2Wk z5WG%Q#l!K{9Y{hfa+=TZ#p4l1=Y*iUY4Ll+OPh1`6H9<>J+sA9lwRv9BbWX6PMq7S zTGi7!$&iV9aBW3Mm&L-6^Tl)%dv>gG6Fon-C?qUVkZ7eZ+I=O9ZG zB4JbFp^Cm#v5{E(HpeP8DOa<0Vr3EQ35QZL( ze6^7`PO&IVYF}*&gPfqzk=2P$X20K}{N5h~lA{U@Cv{$=W4UgFb)7hm-_YeV1+Z4= z!NYgM@|NKyh4dd&=Ajz4fENqeT#iV@Su3STPb~uz-7XqfFioB^{d#F-N^@t zA-TVx|MH2PAOy|Rn_-9~#~-c5%{+`XP<(@|^0sO(A@cJ3RRk-}EkJ+XnnpEw>XU;) zu4smhNU$F6Pfe;jOuaPsw&i_>CBjfookBPB>H@i!64dSu!$WxNHE3m2%LsmrtJW@& zc)}RC)+Sg5g4%Rvjsau;$;_zEX0-e@C~LyELiJ^hD(A6_!_C`1Iu0MGV{TA?kcD=3wP{YjM)5i1n7hrBwzS}s?hJd3;nZSU zO1r<~)PQ;g0kvF(ckYCrhQ@oGLodtU=Tu`O53G@tEc%*3$sWyK)1xKI;|GdkPgnf{ z)eyR$NT#%g{-rh`j?1$Az``PM@)qfnl(!6_%C1aQz}?70&S<5)GUw3Gi_$v<{GQRr z4a;6dfh8mB|9a|UGA577+bw?FLSOO{0oJZ_zqR}Qn2vJril=8Vebyq_RaIcIW7c9S zH@cLL+~P%R1Bvt}+pVWlCWhSJCY{yRiCcT_SL{$;4~xACN@6z0XAM_84^lMDI1i_$ zo(`}7ggnBaEN;)IPecz#(yJf;9j0kk^8iz+Z=s(hJny<2BIl;pZtLno&hD$L4^b@z z$YO`f-YX090SBV0yBItKL)L1-c8|EnePmFdy+AYXI8${^@VJ|B zfIJ>zYMV=Y!tccE#W)-eq+6HA=BHvP zeF6DZ5Iw6QUdLLz)KW*j_$ltmUg$hnGj3QJHBLXmcOLA-`RP`uAoKAFrsem_)39qr`{ly4bsISufhg%Wo&Ga``p zZN+TE76+6tvM25Sc`@TZ<~`|iyuCvRcjT2U$Dev>n2Hy17 zYLbJsBqt8KoKWaq^4Pu*DXx#!EjGn5+mJtQ!(ahGvS{y`WVro=mXZ2S2#CNw0cpWU z^R3b3^XKd9fXkSBKz0@mU_d&4ZW-Ax856ly?$0Oo;(HUORL1W|H1}O^gCX#jLC8@M zf+_Lc!~wYllP{!XfFaIN`D-VM@zFgx2vMgJ2BrybL7}z8mq`JPQ8&(yNIQ+0>~p=wdcahS|4)8NOvD!66gZS3|Ok=Ld=hps79LUeH^ZI$-l@o4uqF#*U`#$7O#X z)rsRJxg6hPRp|!;O91bn>nhOL!Q8ZJb2@BHqEC6nt1b%-aunF@oSd} zYoGW$PNj8Ci`?211+=wx2jQ0eKKA6_p5hgc%{``u$D~B_V^89!C+zud?;imF(Nfro zLUg#+IC~@3;?<;XZYMZpo7nqoGmhpo-gD}aQ%;%CE=@r7T!cuG8nXC8>>n<`@wa<= z`-O8%?rPA$o&gPQSLt`Le?H~-MP0U>JW#Ij2YPdQ-ZL{|0YS8L(b|gZp{unG1p*+l z>fo}yHMl=bA6KwxsvkZ+9*fD>Ik==BG00Dso3S?_@;+>&$Y($uKkQ=Dwq?@SrLDn> z3-g!Qlz|MfoA1GLw#v(pBRwK$Ey7fX$z1}`fr4{Q#2!T5S0WWl5-`=2 z(4RGWeipus+_I_P=ErOUREuN%&jm4X1XA%d+O5qR$5xaF?5KvStf`0b!(K$4+z2Vc z=7|&PU&=<1l(1Cc6inqNM(Gp1{~9Im&jh);qjtV-7&v1`S2zt=!@gLj3%5#3hC5aL<#+ zei*0{;Y{;Rw)kV0;c;f-*|FSxJGh0(nA#X1X#^TSJkyBowHn_}A|=xCpp^7&Z@6pi zr}9fFoZ)On@m|X*sA7g+kqk0Ok@W%bIO)Ye;oYi?-D?nb97dW7ucS@S)!@vo$kW$ZbdWIj6wz)U+IkCU{{*o*f0nPIf7)d)-q!ihDfp(Kw@Qjlb>u&Iz z4z<`jQ@G;zeJMku0L+wz18+pp&HQ^YF65=&>2*b5>Fq-OodLpm>A<%KGLUI?5`#9$u&fHH`l#oJgH4nhqY2I1aQ0Ox=qKoF^oa_v_^ zIHdWSvL{7TLR-j|)0})FgMsk;uz?GOgBaWiNT__4xX-Jf?!qukr-!=~D@7~#;jp%* zN$73>NfHR|BnxC>H{a>Ja98Sh}>FNI4$EM z45~B#`(C%j;l18en;#5k&;?S>mTdKp-5qo)Fw}&#gT(Pz>=5AVcn(n%3a1lTDwDjG8ho;%Jh8#mx+RxfM3bA zo8;LY^B>h@n}7!tHn$4+f`EUS_sC!}T=p{8#qkef*cxcd&9vzTs#-@Neu=*Hi|znL zh_)uX9bIab?yRr{NS$)!Su6e|`hYLv{1n6OUAG?XDm}k(Obo!GGiM8CGvAfuU4GYc z<2P1Am{3ScpPQ{hnp5KNnVy%J!@5J%JH1z8TWjkao`!$Ag1B8mm<+@6{<(BbGM;(9 zC6>d0)<)!zV)xnfDI9da{Wi;H`qrgcf1DOeUvqWi;Epo+Dx;(DuyPaBaV3ydenjwl zHUpOo>|49KNWHiC6u`GSD8EFU?35ExHa>zg+;HZuzaF!ceV}4!INFypY(j;nT;SMo zKKn)9wbn5;Ag{+`Oj8(fOM+X7E#|zm%S1zn@Kd^OJ$i8kArr8Y6ZxQJ&_yx4+ncSd zicfY*31k80p5wU#E{l<2_1M)pn~9C|1Bx?-uRRD8`%ZS*SjRgm3i%L@uI6UTb)!Y} zRwu_@^yYNXl6HSxG1He)Au6**Q}M5S>VejNOw|M97glDi;`dzytUy%m_+{#$^jM&R zk!#`7Qs~2Yg_V)=&C||=h;)T5+C3pAzGYo%mHxH-0{+Gj8|%1W;2Hl^(cSOhWMOwfeWo? z_+piF-?W%#qXLZlLzEZ0uiTuL=b5@^l+|g%aUTVb8%Um>BLbxo|MNU+gMmix7m%QD zseXO6bazCgW28C7gwV3O(+A%WR;@1cZ5jWAeo2mI;WJJ|c|M*qgxWbqP8on#&b;^Q z+ebmuK7(IZ-<{vbIm&%v@BH6Q*Tf%vaTV{aTV=H#Dus?^xS8?Z&guHLW}mMF!c}a! zVV2=s?!H#WMLh~1Rh;)Y=h^^D+z9xq%TL)Da+vL(UqAiOFY#3t`**X&yS+w6VLWbw zQtS?E7%Z3=eU7~bGmK$ZSeBc!`wGTK)%z71KINXoIItGfOJSCw%`E0cTYk#C6L!0M zZgrnZwpIyXQ@r9EhHX5*KVMcJ$O!cs-vD%c9kNf~zF(53vI^bycV+d!$#>587c%6} zP}!v@!EgIUviJWn?pPh5t4M`q3tnb$IE*NjG?_sFRXelfQ;W%9l<4k@zqimY4;dO_ zspO^JRqG8O*;?1?D8yaA!RGNQV6kS==e!HTN6wGB8G;m>I?u8HU9Q#c9^+^m>mCKM z*!g-zJqW8IxhuqMM3_U}Ai1B4!^302tmjy#JEBqaR~M&0MX+S_wDLkJB4x!bX1R`y zG}}lDm43CrL@Xh1eo=-SP1qz95KK_Q#4Ns(KQ1CV;JL|Z2B=iwb=Q2v!6?} zJR8v++Cl$^EzrPpK|xwp_%LP}n_f^F{UPT^V#c%ugped<)Gwx+D7KbufC2V-Cye_Vk`wjfk*}KKLLR@01qCHhGD?_O z154~|AAB=qUv?bO9nIxpF%Xgd;Q59%?@iUss;9vi`j2_ud)BaE7-;N4b2@;kCUS zTSqw*P#kw}L`Gh(>TrdmEEqE~VcOCA1%r@Cm)!g#{5hvY>gmc4y9T;Euw7|c$iFMz zBn_HT*12)EpxFC%5u%}xfSj{(qg#V=8^tEy1))}4g#$l*4bYB0c#6`aAJJ{!+Q_p7 z5Q3ONna%S(2ax4qK&jzm9gCc^g5Pj%x?u<6c)E*_)^YdB#`^p!YO?hkT#gHJ zG{5oTFv5PFiD!G^G`k(;zFsNxxiE4a7>dTakk;w>ooil9?%8OAb8wo*+aKEeS^cZo zDu1TbT5eiTlt)m0jg$4Z9Ra$$*zO|b?`$x9V;Yj0mNMBLNyz{nnA0uS-^noOldR5c zwx_scM4;#%v5U%gUXGY&!v1;jr&bKW4PhI zqrn#W=<(X+MscgB*?3QQ?y_POOYET%5J|M0%UROn(MyR`;I+NMLdl zH7g?!rK9gEFPzTqxKV|x@}Gvn!k7jgV0O?moT4uxO8~eoHh09B)O%`w_v#`}(xESL zE+yctEFo=&sx4Vi>Tbdn0^&sm2ujQ1h{=~I+@MD95>}lK=Mj zvj;+5Ed!%FKKs=%opogC<`PAyV2z#rC$cz*hxg_$Y&2L-&E4YJQz4egA{`3B-Vx3Y zBhJ(hfGesh#dhu7Uu3y#sNY-}XVY8Xf}!u_U;1>8VTOHA+NG<(kp4^4MdEoEi566zX>&*C|0UL^Mk-F~q_l?v5l{sKSM@Ytw6WxX&5B zB%X57qeHU7-!wt+Qqrw>%4mRL6W#~-fFhkJ1Z|P17T=QAiN3r)OtCcW zyzK)gNY53Y9W}E`oY1}9s!Y7To{jjM_n>gt`>4V>U)3{Rf}#kEPPy*)8qOhXDA?%*l$3o8S%anYM)(5 z7AchsJRGJ0R#^My$Dg)|jl+kB@Ko$Xm>R^n>gryZtQR1tj9XVtP9&2_NRsj#C;>$b zYy=PchXWVd`D(ey2aF_QW}SVXH-ny#({|?sF@@|EgDfxwplE2md5?ckd3A#Vo?=d+#2#kfhSf?TzI8^^#n3LkTlKi&Ts{9Jwr>y|j=@3C6fPIemoeWp&- zKJ(cKtLKDenA47XU2B0|O89&;*YOaWPo_p!Apf*|+~asjps_Cly`ttf37-2X+g&b2 zE8OJq)fxC@Tl_C@`O21tPCuL0k0DZTmvb>6J)i!b_I@w#u_~L_31%ZgK>@5YMq(n z4+cpgfUTJ|n+<+Vhc;sSNwYq)vCU^Du>v*puuiS`wN@k*Yw34NnzMy1oXZtv!^K=L z|L`a<2scy-wV3|sc+yF+AN{({aj7C(rPK$JZ<^!&4dq|IL-8Zel~Q;Gz5Zd`-Tjmo z&$ik{z!c|gQq1ZfdI91cYB!V*(MI9xt79C;czps!oL9y9LVm)vH4TJ?jahYX-&M`< zmAe^wY0||__02a;zaY>p{sUkByXSVUyl1qe#K0*K@dimi{+xn;FGn>o!V5r7FP*I8 zWI`=qZTJaC=O`+DxpWCl1OBk`7xdRTAV2k7T%B;iAnwk3HWMt^N>xv0eQSX6Xtg{e zg$Ui;6J{ldxE_>}s(7CT9}&$Vd*`hsf7zv@I}rNv4zL{=Jr^GIv?;c>RX<*7bZ-8vLu8V5!knmD!yO{9zOR4#+a`3GZ-Lr8lgMA=LS-n zauv#=xR|;Bd&cev8erI_l7+k>u1$nPMrMv#no(Q)!>Bs98xUU*S9R&VcEsWo#C65d zrHm^nwFX_oOL{0`nc8NjTd`F9HfyvZyS&$M^hf8FJ5Y=(M!6%0>7+mMlll{wev;*^ z;hU;Uv4Z=4&)-S^%HU~(v$bO)nmf(LL<>s}WY`uT;|mS-BM!s0epj$C?)^LFS$IM&^kqVx;Vppt}fYr5#Vjt z*_g>w*$;o=)=NYz879fuk6e2qYb$RI`Er`WW+`AF)5l_nF+<9AgHZf_$`^d1?ieK~t5&FcEl=hEkd)Sjyd51A97uH97IE{A1+-W%8aJ4TV5 zK8?7UP0t5TJ>dF3^ojh|BF54C7X0U#9bAfQxln#QZ5-K#-)H-+zXeNW7F#Bo=6sQ} ze!q;*p4*-1KcT6*Kl1l%ww*&C&7J;zavPQdwv({#kZ z47^KwNtM$-y-fQa;AUfbG!HJ9Q>RBXM^uhp4y00ZS`$#g*^EZ54$`KZpXS)tq&(!u z8pQIHwYA*rj%n(PEB`@aB%zxmrZ%h3QWg`^t*HY8_&Sszkn#QJt?(crw2hMC?uDbY zcnLN}L#nhFCI@ylL(y8zgqf0%!!kTk9(&-8$|)%KPwB1^{&OQVB(IWXn0PJVWepD% zgWovspg49(*iU&awD`4nE9sHy?&Asrs9N9>*B@XozFcww=`-T{y!9dMBhHHQL;_sM z7<~&v+QSRH&`5`>y(;?m5^6}U#ox9r{b)3Csbn>9kT?UfAhrLeoR#20RGj@@+7p9I ziIPFLY8W9jPtUK-J~ufN?!m}KNBZTLiN9x&gmQ5DSr@V=?$3=sD4Aqcx>$g+701oZleR7I_fnt z5jk>;-%gsFBxBqrcDUqH#WWY+xdvc}VYsecT>=`OfP0gf|1;vh z>EbzQHuwLf(vctcN|^(&wev8)9^*4A)o-2y=+xagTjs7c{HClhsmtGX9#86|3b)?G z(aB(emU1-S_%2M)oEEbc;JeZV4`R6fXaw!S@ zeb0h;y;8a`%jM?E14}IW%qV=5rnE;$K;INGhcUkR`&B+^JioQLOlEm{2){FV#(PGD zr>hFa5#t|7!*=X-BT?z(-qr6Upmjt=(~`!jnlmkhVw%_Ts3skC34vnH0*~)e6f`JGczL zd5Mg`KQmi$Hl}e9>(33t7zR9VFI1)28{_kJH!@(^mF%#kpw5o|FproFB-i#aZeaflvm;?Gt+xVl> zNZ}wuFL?j#ROy8Xh%AF!zIh(E z4`V(m5*fFKWDiHDi3bvDjQ5h`_}a0vLH42-JUfE_w2DUT=RF5Mx8#-s*}x=v_#W@x z$lD|tU^8yMaTQ{n8PPkI{XB(Q_z`eUYG+)`8VG0)q`S?XGlo}D`s=a*oCE)fmtQ24 z;65~pIDokr!ln7RtQKzDKzG&@3yNiP zfyr3xVthu(8GT)jB0LleI1^naCOvLE@fQj`+7kI-I!3sTSW6ASr?TjDX9zsz>q;Y>W(Sz#4VIWtdB zdhJK_&7fw&CkM`B;qWOgCc^nD5S-wPocLkvrbtuX<`%Q3-P6Ql?Ub0ow}p-7M_#&? zg)4r1{7>;bY*6_(pf2JdXbLHEoe=-}uJXB11mnOd&;C|0Ir%BJI3zWDGz4Lhb8E^j7LGRu>*2aocRAJdlPUf*R~CKEmoz$)Sw87&`24I zjG0T31{tG}N*O9-UQ(${p-35$29-=jLacUWh(rk`LsS%zdFDTFD)#q(|M$P|aqPVh z+hMKedG6=Fukk$33#D@rx1E`{duIenajiBmCv-Uksvlsdw*9;XX3Y^ORQbcA6n{)V zY6H14-Cm=O=7>+%Ly)Vf3Vru#90}EucS`B`{@fO0OWgXXeLF|SyoRlU{4GUx9XJqt z@p7=`?PWp|vkil{=T+*Ys>IGuP3>DQw7Kr!DocIA50a@Pc1c}hrcG|9;WqAhvN3OD zW4cb(+1BKkRXUaoW!Lsq-g(yLnv0y-?F;hD8=8CYBWE!$3dc4)qJ>XDjy#d^cwv zt7YTj2nj2RQ*B6;2H($qN<dlH|y!A z$(@X!#O4uM3;(EGKoXWwoDAES11%bfz`3<`bUPoD6AoIna8Wi!9Y!c8Q|)0k1Cn)} z89&-8PBXo1R!1zUrQyGbvuR_(9C}(hu&G5bKOXDhn6GX{C9^7Raost7N{e*nyFTo) zyu=SEf4y+ImUC^hb8a5L)B8nolnP2FBHXtRt#upkFg&=is?z^7K=gX(*prm%EC|x4 zLWCqHF(`96j;{oG8Ww#HCeOu$Jh-UIW)B+v{dS*FW*{7}@RK=FH{_DnH;drB##^I`{bUFHXoG`cYOeSP~2JHILUze%IpTpgr0R4T6{W z1vJyV%AP9?s7_t0Y)ZlsV`DPdV4fOi>&{wdGC1VtOv zo^T8gJb|o-%TYwQ7~8kBBQ=kW-ed3`dP0Rbo&j!;!tmtW&+eXm!{dLL!R=OMXwgc^ zKpI2_A`hUr{aWILCIH)Kgn@t{V^=Mjj|RQfaP?>hC+MY&r&W;CVoNbNueYvp{!}9 zdO*iC#$e!n)*qq5+FVT^yO+11CH8{ljr3}OKGm~5c^ZI=TA$a4u9`3+Jl*cbc%}Il zQG2RRoP8!(@e)O*jLU&K^*yQ)Ksil16zF*6Dg;K)Ynf#`V2?%UTCi<}^cz5wPW zhsX5*n7Ge0iIm}u*u&j%VLb(3pf!-^W+s5NP5QgTn?e5SjKGC~BlQ4}U8qeG!XWb- znsWI)wY3>c_3-7_>_fw$kOOdpk8SQvffduI~P`qvcmX z^N{a&F;DUR9ZfzGV`b|*Q)~|MxxH9G30k7G7iPEd-EkE?oU?w=4=%wU4(*D`3EK0# zAwIa&jouUWk87lJ_d2l38lUDq^WVPckOz(N?8JGu$s`sxNKy;imPI{q2ccNkyITGUJqmQ-%yr)0&^hOGa?NNt*xmqcQFI{h8|6&S|mrZ1t<#Ep%2YKf< zlb9!%l_myJg>}qd`U4dM`6!R=>h|Jwf5i&#wpdI3g^slV(mRRDI1vEy&mH%GrbsYJ zC5EFn#%rz>+NEJ6M73WV)$j+xKkldP$(>Yh)Od=$ivpy%sI%`!j%38VRf^+qT0fc! zA0-#%-c*N1jfbxHnax?6QcCpRj6MDP-uJ3o?@8 zo@MhX;O?rk@olu++wvyxS3g6=>gF3!O7C}zBK4`O#4$c{{H<0Pa`ZrqKXFo9yqwAV zBEnqX$Vg<^^jxN3E($atHWBiZxN3GKiucZP-d-ySMujHqPWjiOs`;89pb`1TTpuntQkNGNqGf zLZj#fqKV7OH>WXDDtDswZkTNDz1!rQ{4;#b?ts`?bVO!{1^^Y?c5QTINE@NVDGBz= zoK*YmM;qX)ri5}}o+r<~rcsD}LJBDP>1Y4pNZyToqQ6%J0l+Gn;2H{<60<9Q`?H&y zg9Xn3H?mCvlOv~v%WAIu{ZvFhJp#mrLpwnujM(USczbQE#9P4+RNh_pGyZTB#;d$1r=%!%b8$m`ezW=W$i@=i=@o1pUQ-2Uoruo>nfg^bvuDRYw z1exW1n&3LhD6rR7h~q`{(7UmM7}<^t`+fJTmQ_(?=1ZSlNy)*mU|^xufh@Eg3==ao zLb+w(CFkYXB0h&XH%!TB==9kzVy0D6TkzfapgGE=OV2+`Ckb?Vrv%238!Y8GPyGdK z0x?j>TC3KDF2~Hjyu|W3+!6-QxK2=bFYu@?psZ>}jAw;FXWbD(8$ATc<{Pz;rwt?c zMD*|Y8b5Ad^qu(wHS(Yz^nItiOF_~+J$1Ek2ST(~Ty~!wb(#PN7mR;$a@Teke@%h8 z%;u(Jm<4?cC;J#Cw}c9zDS0D*6F@o2-B%hi@?bx$fZMr|byk|;Z%4~wCd(i}$#Lzu zCxtK-$|syY3251M0;ozR*W1Nm$YW+lVm&5TYypDA5;@|H_9KCct!;4#n5tsD_S>(S zvZYxv2q*O?-%U>hOd)tdp_!Z3u$(4IGL zMj5uyc5ETv^=DH#l1X{+fcb3<->9D^C`-|gLWo#|Dy+KmwLv-H>6ZcAbgpateI|1` zn>mh@1RGg&o*&s%>JZ7}g^#Cq0C5rwTEI(}7*mgO5CewZuN0eF>Y#?hK4xWmUqa12 z5EUe+U2@B(dtb!C_w~Oz1JJVTOu?x37a^RsLUEsj$T3&$=|5PBTu9m}A#Vfnh@bb? z@lul$zruD(prq0GlmW!wmQqr8c#9V5_lgJCQsiV}_h~NaaiK93M~*)GZ|(M8bOpB( zt_rW*FfxHcB2UG;J3&*y=GT=~d&Cg?`QzqPp_F%ZdE9$yGN1I>Rg^%ZA{1l&?H$n! z%Io+jsvCU?g+VPek(L30gO|PFEtNr9y*GU?c0rv&^~GPy zWpBFVOW%eQw*HyK@6D}`mt#ByAW!b0emb_yh7;#`VAlS64|8BwlGCD#ClIxCf>R3% zm6%%6W0Aplm#Sy4W2JCJqVU%(MjFpp3{cNKGE*pY!sAWxYm@Gx1Xk2(uWmdlk)M!( zwAR+@$ud1d&OIY8|F^3=t&D?4E0F&~u^%_dylA|&UeVu1n)>noYjDr|J9TQNP_xbLdZQ+q0~xT=RqC&A?3#LuEO3P~2UXx3sn}}+a|G&< zo|p%DQ4AEx!qL0%O;L41E5|zn6Dvfaw%|`6JhT+S7t#3`?$bGb-NtU&FjR7H{Dw?W zhi|=cKyT&LrKcf|;FVfBk8C6S_1Y!VY1!l`yv})ec>IPIVk2k88(tbG*8W+FO<1-W z+fV_($Ea&#s5E)Jrq&L2N)aLi?9v=ux64iK%*0C|-;L;mY+c8&Vxppv%zWhyg{c#Zy5ONZ_oeRCC6tZ>GXVjjlw9^d|>>B zO7QZWJcHt)^gp?M6Xqf0C<%!mzj2~;aVs;fVaL_B%H1Eg#P;CmCHq}fzdQAjI38g$ zm!95Ap6?3&+I`V~yX5!`UK8AQa=sE}^*9`qF@SF>Y3E!prQJlQJK~oWh7$9zR17wA zMP`#&b}-vjxCuNG{`v|#^MASI_zYeXKRLkt zGw5H54UC;D<1zSX7+{-Y=xS(#P@vjVb?zh^)n4sUu=>QCp4cVyAl$N|iUZ+9)cc@< zbDyB&HU;;7DCMh$cEST3rpI;-BL^!$#O$j+JgJ_^%% z9KdS&t4sJkL!5>;>CtE0$oXzuLJ1??=&Ci>W9<-Ie?{Q_EpEq6t)&D;b5lFKq%B|b z#V~3VWg1*l+Qt+nMTBKTEHABa4LVULRupiU$;3(29=x|%ZH9rlLbM&eKyLmrz+8pU zja=bsnbM~J8YAm1-$a{&XSE7N;7cg1&FG~B)uIaNwHb|5JL9KD7T>FVPf@Bih$c?9 zd{M$KK++TFub4Uu$K)*DyX2tq0+!R{xR*D`qu|afnv9YZH+o29`m*ACeaNbPe}6xG z7l_YyTAq5AAfSZ%>;TULWgKqbm66?U0hpLWpZ(GeO z6hTALMc~O-k#xsN>3x0vK6!(Gt|cIW(<>AglmJH(iKbnvzvA|3OtEAGrpdg4q0MK( zU56fh0ZP1@f&zJ-p9F9Yw)VROhy!=jxH;JDZb*_{;6GZByXA6zT@tWLAnzInqd-1^DK} z*738cZusV(JyuTm=IacSf1n()ZY7agT!yHcEg>v~gJUV>8}4+<;oA!f)4SubA*9%= z;4_XqUfU&9zZHvN-FpVo6$}k;^bE9s5!f&P8LyH{Vfx1Tji@80qH77UqbSe=88TZo zqE-=t4A=#w_^&9Lg=6jx*#Yh=o4~x_U`=*}#xY6v57pk;_c-VANx(ix-dAQOsvWi! z_>|S4u3iGhd@qzZ5ARh%n%QVG>W_!eMos9o@l~lqg6Mbpt$T^7Z>vCiScmtkI>QA` z&S4jS@DfyWhC8)CV5+A(%-0Q%T}4E_jiudD0ct0~Xax>PCzYguLH8m3=rzJQKlc5t zFiF=_va&E!X9qTK@|Zn#gV~y||FcGp zS_fW&L{}a!O3pkIHbLN2UNJwKxnAK_7Uo-pfP6$tssU#N+I_pEhIJ3#)QK{Jl$j{W z8vg0BpTksdAK$fg>Oo)$#{*@2aY3@KMjDdKoGwc*y$JVZ-=JZF^T@gU7hrJQSz2)# zIChrE)6#!3RYcw6x#S7nfuhZi#CouR&gUe_#BwT7FwF2#F(gz@>L8~Ls7XlCM8fFL zSFlm)oEVY?lc%ol`_raJ@|+Xliq?32w@V3xue_4|d`PdtL0r^AHQYjeeJQ0MfcMLa z9)ktIEO|J+n|&OLIJ6I7v)e`X9lB0uh~sQN{H^fQIMSyuR-}O42IrB^XW=fidOHZ8cRwy@}Q%o@eK9 z-$atOBiVdYqt>CH@6m9##DpP|{2#x7EyOMNP*nl3{hE@le;N2lxW~`sO&{PC58=n) z3cU)S3CRdqhXvRU*XcQr^_@>?EO9+ekZE|{^*d56H*4z?)q)~W)wc33J@*F-aJb}F z)=p^J3P>L=&p0}Y8mQ)UHs0l^J*>+OuSzK{L-kR-XC&|bo}~3{l@<@ATi-i>$=lFv z`eIP0Ha@ofZ4_!tx;B*@rq8?MW1~@jc>1$enW-LNiQOp5@K}hkruVQjow(!B8tXLC z!hq`WKQhkiu<>hH9>+P!xA0~zHmTE0a7yu=&LF}-wHtV)rOYdTDP980_0mWANk}PN zz&|8RZe#le+{3wL2~?OL zE+KX9Cm3}u_ehmi-@ahbw?b1r-LH?Sh%&ngmrmx)|J zMiEZ3pPPD@&eLDwO>_B3U~p=Nw(@foh0c=*_&T_vA$05lZj@Tk*!#Yw!HvYG(E`hY zJS+I_@ra15m%5n%Ipk*EjZ$CU*HQAN8)nfR7n14lYR*za5+D(*igE8e??UFx9BTF7 zcJPIwMiwym>jHfOJ(HBE$d6-Oov0;BDP2AFWQ7CI$O`RPtDYom%+1Y?$}^B8#rQP_{UFt^#d%Yi3pK= zn>>PLUJ=y$loom4ZJBoH2m;#J7S&Mt>5 z4o1SyyCTYxkEI_$PixG=J(w|fdNxtDtzu2s*8(5dXnOQ_3^$WMR30d)0YI0FWud0L z0$q)6!(_8R+#8=|D{ZcTTC{^%=a<3&i|gpH zENYTJ9G91f5Sg2$J#t87xMi)yX>XS994hm!rDCd{7h z<+iL>9iwvmF`8!!bOnmFdweO~d~aiUP+<@GCOa9{!(-CmF-1>!^Po_)O3`z4-8csvEj7u21b@}#@tn*dvQIUH89JJCcoW?Fv0 zV(Zo|fjrnzm(hun+Le?!n6MuA# zJlm^9;Y|?eo4Wbv*YE%}m(6g@{71Y(C{g78yuRF8Y&0qngCF=st&F15-HDBcsuE?}h)+7O} zyCLUJ`->Xr4ACjoXrFnk7{lxYz&eQv^{D6BZ77egopzc)i*Fd7Knc zkr#dARe|3|ULb_5``v2^;MPtWyMIs#;^lXaM)EHa|75{|e=>(hYI)uT z{Ip`Qq%NK>PJi{DI@SAGffnp2bVeai${Y{rgGw83_bh5gP5Z^kFX)2Y0x{!QM}vA} zOOWDVeDPK3RnLUUEjzr`)6{Oc>B{Tuo|WL?_&%yUGwvh$GI5T+C*r}?i@)i@HYFTo zkpb+dYNqM+>9EN2iADattVNljgpD+ooq>X$yx8?;UvO|&!3(bwJ~5*eZ8Hvwg~`)% zQAL)hePHt*y**#(Y<^4sj`FDEorgJDGKZVp0*SiYqBkZmwEjX7sI!Y<#(|D~#3+pc zHxtk?YPE`~ew{_$xeD|5Cjkma9HywhExcJCjq$nvWe@p)HwM_KLK21FV&-5D5r4|- z{|I}u8VD%q++KkAL{4r!wqFDRL15S1RpYp_%@_ov?!308)G-Hyn03+BfOtTQ=6r{A zjc|gdg*DhA&pL5FK^7s={JN%Vvd3><5;s#^7fs70UF!U*TMt^^S|l*@xpq5;tu|zO zTGKv+lP1!LrTmkTkfZ}quHQ9H6x`}%u&ViKr%!RPoJXjqb;X5L2Cp)}EEd56tq)&N zDOLpn!z*~A*EZ0}*?0QBKKm+W^9S349zi`kDL`W9tu4ai$@sQw75!;#M@W3feFRTW zU<;Sj!oTQJ6669A2Us81QENK);Wr@3%V z-3*hq9fRgbbW*;p;uM`=Tyu;agz=Z=vmPD!ume@){i8-f%oP^iaY2}!tBU50wP#_X zh0g;vy7Zy|&+HDMFhzL|w3OE500<#jDdjnQ5{(0$8!>adm{8K6;dl0{UK}z3Bzxs* zF~OzNU{WswY;YyKKl8Y3ysyzoNow+$D_j3nXAUFk$W)`zwso6C#0{&$dLevIAyl(PNBG(?=J<(i1W75rSda94;1hYrFmo1~>*B zrgaGY{5LvkCg#jG$o~VY7|m^)H3~JO26u6X)@e9jcAqlVXk)#3BHH;}>sOMi^w8;Th?kmQ{bhNmfH$8%e$Qv>d zgn%YaKFX=rRe~fc zgaYE&M|yCBb`g<$)L*2B28VGV0Bl4Ys?;l*n_4!U5KYdR{UQ7_Hh0&y7)4XW-g7Mu z|J;d~Wu8Z5&Naj5Kai%%<)s0i7lut$2*se{q$W&Os4|U29;nvD+BEG?*gctza~#G^ zZ};m$QTrU>5^1%G*{BM5;*cuxVK=St1{q!pTjvY5_qR;P8Fuhqx1e{RArOguRJ~P7 zK2pgm2ZC$r-5#lWsy=z(J#T@SY{LnHK}vZ?1{e5ucz8rlznY@@Dt;Uoh=y?pGKyC) zn! zC88;uVjAp$J+2=5*tY)3JI3CY?g`pck?a6P%x^se9(|C`QGn!z-q`)NsZG#x41*6j$8&ld+^$9Wd<;lRaxdaUWqWB`|??aJ&tsjI2+v=OM z-0;{(6PEU#Gu1Bd!+_#txM~PNzKv9xHhlEM?&2OPUBXk^_z$*ATUVe%jQ89p7b#UM zH-GzA%(#Js@3GpSlRBcxo?E_rJMF;ILDe5iiVc>#v^}kBZ_t?YeN~H+u~qGc{oQUm zGPe%Un$C~7=Pnua+F)6H^3H0bCo9UT-$W`3$JiOHs`eVz@fSyDbpoz z@P+XJJp;?FSw%3@YWd)(xtH(sG9-9m{Q$5}>}l4Jil+kNe!NcVK7GbXuCYszlbT$c5FbnyM&!#gcJ%5X=fBm<&J)Dd0 z&N_uRfp#<6l7r{nY>m4-Y;eW3+0`HgAaSFBlmpwwm4xaf0Ze$Zk~_Bv=UkV-MnzILc! z2qKZcIMkLUzI-<8Iv{og%-lQ90DLW;cO3KO1-K7Yw$2MMSa412abBmEIY-uAwI1ikvDxL1AJe-uR>s?~c9h zRQophK5m$5_VYs<6Ikd)my=fC+7jN=6a38)Sy@P?u7X>&hZp~_Y39>UTf`!j7B;%M zt~8By>)H~S)gFA}`@5p=J4L_#idFiO^xW6>nvmlUskhGcEH01!vGA;X$r?=JXW6Hg z!Yuuyry9gnInmVNKqt6i{?5mnhd+JZuKC4dA);-j3ay+KeLY?v{ zQFX(iPIsG_n26Fdk8Nu&CGr`+O3LzFC$1eT@<8hF;3@f<Q$U&apNsjH-%EPlZr(8ny9O?sR)wGWTkk*;Uf*u<;=#x(dvv4eAk|J84@!Y{-0 zG`iKwnCpi+zlA@v)>-WJT`OGdX>q^h8Db}Nr|Zn7KOS{{*xgd|K0yI7s6_XJj!vx) zv(6_r4L><9;qZy&!=sL!+!76Zxk1l$x~V4h8{wlW)BI0yCZn-jHCIDq!f;Ii`AtTk zktEB_j+RNe0mSEnjjWb$PwO4H+ruLjVw=?4bN9rZX$(8o7V(yf#I-g_rtRF{%WUYn zHs>?7f-aUEZy)ijdHyn(H18uktah_369I$a3P;^|ds11T7*Ds)mJ&4azY}c*?~CNj zT&hlE_v5G3!4eclKHg#^zeiIiM)6R>3Lr})tUk&=QL^(^t89Km$>`F4ZPk|~SDb0q zu4HmgNl&iK(VoiWfOx6v$$!y2jJ|>z5F)-~hC>6+sg|F4UHYPezYS`KtrW7$?ee_A zk5x8Ss#9hg8SpjiTRi5n^Pa-qRpZwhZyqWOY$0vB-#WU#_xAF?nu;W#UyYNTeo1n_ zC?2duFx+eK@mJd|crdS_Q}`|GPM<#x+!%vzSD zJhj}{J1d#FYOmP=O~zdkLj9cMxO6-i`Q?TGb)P@^%FqTN6I=KGaH_Wn;G?`T%+OD@ zdb6}Byg0nW;_S6~P1&N4-KCDFEYl4Zto4;$Ly7KvrWP^MvFl+iV9Bq}m9-c82y4$^ z%ZrL8#^*?jVa9*lxs?d9zJNM-ewrCs-!Ux ztp_hOJE=c9(idWQ{G(jk#gdvqtpL%MEoedrK1oY>y&qfDZ&L)@WEG5bLFyqBZrHZXdZrT2H*Vzli4xj$8VCC*m#JL0P`Fzpy@VF*LhHIJ75?s54HenWcNmKpTbiIFt zItCz;9$8`iLWfI^;@R#ayW^L)V4ka8O{Q?vUiZDdZ6B->bMoM|^KLg%-5=Xp<$zJ4Vvv9FcoiEGc6$jTpL^_r{ng9lloLqpiZKc8$? zQ-A7Ar6%{emR#T@;3~!6v$_5c3`fbisMSuC;ogiDJ32q7uB(tcV!mX+{kc(>&C4aL zg)KxqPh2}6%51(yzi#GmVM369+=Cvz{Eh%((3;E+itfIrxRmFlB**Wo{l_i!(s}dx z=Tep^Iqfo18oOljq%*hX;mC~KH9@-{a6T{iPPN~aBC5+9zMzO}QTq(ViYe0{YiY{xB&Hf5cIr&?u?Z8@`LH}jA82vx0$7B05@ z5aQBV)Vri1QV6mVZ{sSxZVt{%V|JasL7nK|ed!Ll43ImbIwd0$4Dmy-t-J_n-*bA2 z(A2C-;0X4_zBuRiJ|3*}B$J%g53VW*i|X)7R#pzaEXmU=)U#;F;7mPHCi*Rjc_zKu zu~}qWv>o5E=gdQWul~_hTpyTOVJ9^m=xuG%>(b|k13v^02^pmXh{BOXul-cGFY zj%(HvCWnKLd;5z*`rV&Qgt2fs+M;5Vkwi`AO8flqedo7}8;S>a+|dz|{3aHToz7$C zHuf|{!mDjg*O2WjdfbK4mS0O90QS*0$*0-)!8d9u$9Ia?`MD zlYR@o3_GULYKcRa~Z5Mi731D3r~5gP~i-o1KF{gr03 z=hn6^>o4s>zOQ>8dEE7rI$E}VztZg0tM`%Pwe&y7kdO_(n6m1XvFcD^rtY4rimW{A zgG-XSgD)3xy~@y26%wRWsAns<)7>)2T58&J|Ky3_u1kW2Rq8U&eUNe(sMmH6p1 zb|wyw1z!l6o0`4Hwo=@`H>e{a30SuIs?_zpYyP%AXFt{%Is0}bZIt@@<+e5w zOM0;bj^d51YsQT{g^8^6^mb&Sr>o>=P2DK}U@3TexiW6hm6_|2`Dk5hiU~smyr`wz zl)>y95S;UL!I33zi#ik zuUC}hk|A@9lx^>XC(x)~_-Ul9!DwP<{zKGtrk~*d7lLgc+zi{|r}z1J4^k5Rh8wyZ zB`g_Zf$n0ax3k^*UVFXn@m)p9@%(wU`)7@M&Y07EJ?#>=W7@tYNm~Xk>~{b08rYLt zX6=ccuPfGk>v8@RtfOey8G#yV#U0s<<&#j2aq_2L1JCbUA$S@McmBW1i6n1diiKm7 zeX?o^Xaotf+iIT#45yCx%2e_vQu_PqdN&7^9R2#wnXYv|R+>CIUGDM2aK)aSRD_*P zN2TjB9ktUDcNSmm$Y~vLwDxaz_PiA&b+GEiWrf=ZZLkav94xtLpAho3rU&YY6Wc{^ z!mkx7k8w@G)6?(31S`~d#w=WNgp%!5XZyV>G2#=__H=6iQ!7;1_=Zhuo8POCDjw@k z&+Lw3xO6SJc;v&e0@H|x1(%D$1L6{bcopY27Bs~#UUI+pbobDkbM!jl*5i^HrT-fN zgxrQSws?h%1lm!P#fuD8#KL?Yvz#wKo6ayAYkoYv@ako4TjMP<@Lb&q%pMKl}GGtrPSCe_q@Qs|GOv278GvOz9`3bvZPatK_7pu8UxsHH2 zB!`g=amqz3p#Fkb-A${9gU7x%yj-O7nGGg!;3s2mMDMe#&Y$T?Ur^6d?2&*YziCw+ z*m>|wzI$f0pP(6bb*EeLEY5SJs{xo7jUia#e0oD z5vU#=x^<*_@D}gT&^gqlcId=xTmhs-=p1gzExo+5xB7iKMdduK^2xs+iF0L!E z`~<*DuO`L2+D$*kgZFbMVUf?-t(!d+3F&>1ECnxFiWagR2qyB=GHi$2wt8s1;5~?v zSO39@Ys#LaTa({ayK+Hi<1f-ILmBU8+O`{@O5w}?l{Zaf4b&tA{B92#6Tr!6g|A58 z8UBRR9+Dl&$K6y3t_-$&5xEunHSW#O_vxejpkLV}Nn%_y6I8sU`hVPQvZ*wH4e=Dz3}7rAx>8RZgm1$xveNqryE}x z;IzleB})#e?4G#0-@wd?g&+Uy?m5C1WkmZQ z0eUOJn*NSS#;^VVgP9^X^BWR8W@Q$r+#M2eqN6ysiqY@V)-9&a}~ANdg{{A zvdM|KW&03i{ABdTt8Dju(_}>8F-ux!@16*^%;B`V_Dxwj3iUr&@yS5u_w{s%?@#@? z8g02Fs4u0@3_Ha6KP=P)B6!MvItfg8m3Ga62WZ|r#tW_ar#Of%iL(;*Vd1@rXzVv4 z_`h87aYXRc&qQO7_F7gON&(r1B{?#0^*h0Or1Ivz8_xY60c<{lfSjl7hyUOx(ouXU z_WjqSo&R9P@tJ?WC3%9zE=+%T5O0YriHN~%^(pZoU2a<9-lr=u(>4S+vLb+!z5t9$ z8mt58W&q&03VP$xPaGQ0VlH7()a;u-F3#eWbJ_}xG~Omj%z_XA3~bIJ1^LyTv=EVP zE5XDLM=q`fN~bWC=@5KuEWEhX$MV&s&26ZbH;PY{%8mt=mFY={&a5UAoq1VSgfPRU3uG5zSPFXO>Q2P|28$Kbyw0)v7z^u#A zs||rr5XSYoErVL|F-W4k0tio>pieP^R0{;7s4U9LNrYGou==)jgj#MoTYr(N+aE&7zJkvX|{zQFs-2#!jV;FMdRE0+=}b5gbRKPvNbzy zsLgbN7U72dc|k;L)}p?pjTo z%tBaBZ&G&dgFu5GrmQa`A`R%ete>;{7bzezjF|{ADxcqNE0iY%(bba-44XP-k3*R3 zDzX!k!K^}`0N}r`XqktC3^q+cbet2wLKEGde#DRf z)ynVfE~*kPp0ozPfRs&sJoM%Vd<&Qu<~9lutkfyg3F>V4^({!L1c6crN1O5$Xj)9s zev8Kw$kqh~7$aVOe~)go>yH(8F^%>qy9F?*f4*N6Mnf)j&3trH#Ylp8sB!sXZvhwp zAKDiJUA;}@A}njOT~aa6-Y{kV8A-pJH4eXlc^vBZ8`J%>Jg~DWupnw`nBz$lQ^lgJ{d~j^Jd|MlFZGm*Vb=CgM2CK z`2H7nr_3Rb>)lVoSZV=ucju3oO&%}hFEHeqdvy&lkm^!0C_CwQFBvmi#ao!~gdpLT~>7GEig^1BWY&F^*fuEu5_*iLp{1ukH0 zUAUDcgBX@=H868p~ zHGKv0?$}`Wp|ZGM%m5LAeBt9^0P&AO$T~mA+BE2F+sCx~msak6CC)sBHfE11NAhvk zO~SAoq;=ZYcX5FL2WGq43mpCgAokJ)R}?)mF$Y2eW+VKf;)X=u?_v+pV%8>`048I{ zb!BQHG+;mBf+1+uCRb`waiH<^P^=Rq0~8^9uZsaRLg<^^@8Iu+NXO}Wr)e?o2;HtM z&XWf0K$|1q(?)cWDB%P z{3)B9LxGE4eI)IdIo=q9l_}H4VC=~VfwqaRhf#XXa^bnO5!$Z+28Ln8-r4)K=!v2Z zafJ1EuoN-oCPZLv8S=*BeC8?Xy5{WWEDwT%fjB>#OBA*O=LqJ!Z`pxSR*g?XCbn=r zZFIOxcK`?>f3HNlkB>Gxwb$}}I!*nA={VNks~;yhs3|f3^`MLoB+6)G$Bboq7B)-G zd+~efjFsg0jElRU;wp_(g;vdKD1orMikXQPT-%TY^P?SovSFsna7TWCbGO9!hHhO@ zg;{eD@YU&_;fVo7LO^b%^?8qN$jb5YV9=?ZK%e-2agnF}y|%zwm{-Q93X=!6RQ7UV)lJ-9;03(hpLWXr<2gwqYYv@S4L-2G~bpFSk-Q{`TN85z>P8StL1_1+(Wz^q_7!x7VhE{ZSnq15gMVG9_hKYl2}y z`>Ky$8K2q6aP`Le`;A^KMTgeEZ$`Cjt$WwD@9*veMFpAL{xv96CE^_!4Lk4~0Zv>^ zQE-ZPJiXf-(|3suO__z#&qV+)g~Ekt0Zy zd#%R3ty(m_ZI&m%|M15vi292$myCVAa+#97WliAf~s|_0LR*&8Z=j8WsaQipz>?Y)NecjjY zlU`pDLDLZZ1xyfqk}bt4f>2-|tCcK^%(ih*F<}1$!u|CYr1>{N9c;l0Oz`@8TymTv zN|z&Is;fbGxeNzlus3;n@nHMfNeiuf5Efds<@GzfoK*ZMl$D#n29Edx*+ByoF#UByi`6>;KDdv=4=~zm}=~ zfJJ7PInaWHxo@K@B}S^qqKexv-v6w@ac(2lm6D1gf~l0?+PMN*ytOk zh#7y^&$MMUvJp%z;z04{CvbXyE>J(-Tm&Nce|^(z^9}-nnliMaDmLp5ltM(I9W~2o zW!r?^Wr%ts@GHbz;&1QL&YMbsm~{x4tNq~}9)G@`LS2IU_c+}^R00_@@D`R_%%FL* z5NFG{ow^L=gwnoY=#Oo$NHksC9Y@X`x17_F(k}Q9r&-(%Iv`DTObSU$RyIH=D3mx2jC{;;e#~eWX-K6@N3zt^IBcF*$fuu?Nlh;LZR zaoC&4oV9nd_G&eQ)>MFqSJhOJ5FaNtoS+8oZog5X3N{-RR)y^#6Dak6Q6Qg0u79?J z$R0s(Z@N4R;O`b}p;K3V@S=Q`-KohHZ$U1~Rjh=NOHW4A!o4Rq5&m1qcR9qsdsjPu za?m{Gp8^#mvtJOI`e)M{Nlq3&WtevLx@&yJrOvYOuk>yT19-Ic1q?Cz{N`EjjgZfXI@*0$K7FLcY%?v)*V3n0q%GOmenAln7VAe!; zdz`rj5aO#{9^0>#wjZz1V+RS;Yv5KkI0anAUHyd`m?z;@pXL+?((g}$>WUcQ+XAD# zqTcafvz&DaZz0k^#<grq5jG_$Wj>PYS{*K<|bkDDU5OU^!DgsGS7>@4j~JX0aam z2pH#&odF>Gu5$>UK?CJU7{-?6@U6j|pYT$v;$xHmb+h{e<< zl9)c;uOjd;%)X=0fYM{mGA)kS8I>gwO-)FQqYS-#`4YF| zY~E5bW?p+{1QQ^%z(G~G{&Mc0hA{L`ke+X_BtY^%E^{)ro;j=8Av}j>*s*PVGc$`C zor2ESjTsE1CXm)v^UyFxdN<70fiOh~lvp&mt2{?D2o%!JV#zn>avsYa2Affz%w~+! z_nJQ5m~L6%ojW#K-B0xcnr>sgFpiILcfrZO;SYyDJzDMNSWOHtpM_8%2i!vV%6DED zacZ(qwF=ac>vv!U!`BC2~QJu?6CL zKlLw_9NCu!Go(S|_`|iQt0Ii!d&}}lS>Oe1%F^jg4lFW;=B-sJz(NMmwRtfi@I-s9 z!uZYKAvZ70cmmJz1;v3pG4mSkMbenrvj$~?9ogC`LX<#LGi2WZN{dz+B%3SWq%o)| z4xySfqDQ8y&~pg`xi?;4(Bk%@$5#tF$&%N7j}bWn;?y(rmA$HtBcPVp_YV#5j>D%~ zN(K6#_SH&y8od7r`~Ckd@PqVHXOn{B`qFja#6pF}`1<4HOPB(2mXiHIQ`waa)!k zM|}&4c59O@Yb zYj*RF%LDs-4Ri@ZVYsunpqP{PAP&kV_Sa#~n6Ig9W|`D|5uM%4xdX{;DD)NAU6*ia zvQs|t>5GJS74=uPOV>@ruisXJY2sGKkOVvCaW}gO;0DL4w>+3d#~^`y*^Mo*6%L1g z$kBT|UU;{z_2;9^cq_*Ga@PFi90d-F30VXv3UL+to#j<*y9pFWEEm#M?PN>Z)fY?MeKh?=XW_*Y*7L#P`A7YDL%hLSGLL(*Z3_fSU`orGP zXr~Q(ZddrBAH=W{s9!)e!_>D$8*C#FHZPV&c5KyJ{@&S9Jpl3}R?Le82 zw!%V5Y4GL7$88PbM556Oc4tuM1%~&%!a%*oJ@@y|U<~?$A~Ya9W1&#JG<9Ss5u=*& zblkmsMhA!yiH5&)Byrcyx#<+B0s&B~P*oKnW z3c6f&d+#gtKalgO!H}GqigiCw0TxA6DfdLK{4CuA*0z^Sj}Kgs0gVYHMl18|W_hg9 z4~qSm?efs3PUuU^#wTpv%}hDd8DME8b@(hO3))xF=kz^rAbuT40R!Okzc!lz-rG>N z)Takv%gWZl_1y$ZgD|nXY(NOQ3?Os=r(I0uF?TaKycaQ{W-@mq)=&9tG~4xjsfqd* z{Oqx5-Q^q}>O~ML@mD{bF@zRNe!sWgh$~2!Ul8@=NrQ+Wt<$slV6#`QZiK|k{%56- z*m{w%%d2#&*PGAjoBqHgWQdSiVNL9MS}PKX^fzGOOYgku#S;5VzQ5&_his7 z@)=dPBAmwk?&45~c$iZs1AaI70je( zT|Vn?k5#|g8~NlbRd$7CWMZ#LmeV#OnS*kfHF~q3y=fl9&0g; zqz{>n_d9GnqC(FW3|1m$nrHEhr}uF4{am|Bj;jANp72hdOp6IwL}~g4-4G=6{;#buPBO@h8-826})v!?Yi6xoLBG27R}}TS?XN-L713 zQ?V8HJmjD;%Xs!eo4cS?h`Wc~8L}?n&1os(rF_KBb_^-n{!T=_f6Z=Y_xy2JDR;Qj zPaYMH&I)6@LNJJJdGizE5Ygqe&jZbq%PxgbM(||shI9CVjICZ@wxn1Q^Mm|{x=&gw zd^2q7`|7LkT>OhM_+L5W3JU`DkWNb$-(uo3r8DF(ixe3l=iczR1X`nFNcp0IqnrED_Z9`bwdrtIB+R$bqeFg<4@flL!3e~0Up_;)T+!)x?zsGU-G zg8j;4w`=jrugi9k%u@hxe%;=V8)HwCviyaPqNQb|X~>S27VsqFlr8UHsC@api53;& zknjc#tA@pWSYc2WS$8L*n9XSLsp_v{R{?zwSiQ_Z%wpV=2T6Ug&CXOMXcDiheQARc z&FmEx^+ej`;aOx32Xiw-+3#ysy0HbJkRBP4u`*O<+j_y9DZ~-l#_vAt)Nzdw2Bq&UW}aM{fZBU~?CFw_{2Rt4hr@d92E! z_IK$JYIpWR7G;dy?!I&fgzj*#BPXl3P+EI*tzQ2L^;8l z<)F;sk!25XqeX!8-a=tV;z(7pnqT(GjvLTVERo-)Wrve{#=#3NO&DGE^Cg@kObUud z&T~Z5yQ-2lAI-V)TeFg?i{*E_SF@Vyk46}$*mbXypU|Yr-SYtuQWubXhR2VP8Xm(u z%>Yv;{2MFrIICS(A1P19BQ(w7mp351U&Wnc3``;FxzRq`&jzo$;m#H5Gdq>*W(=d) z`|@OXjCc=emyIj>C`tY4QPbUMo+CYbh#(F?3ad2AlCnE5yh%FnqcX|Ne~AEskf;Qs zv@M7ezAikSb5+Ae)~z=o#m2yl_13=BcQdHT@8s0bIG>b&nk+#(FbxJ?2ZxT7w}W_tRO|vxh-EwWLl;? z{*U=wCc=P3qx_@Vy9dS_4q=K#A`50xtgn$c*TYT+~?2`^bpCo-msMZ`T4=Ane?MbJXn7m>+n8-m-5pg}w#w)YknEz}1uX!8;>`%^~Q%+)A)D$wZ(Mrre7!uhwQlGY{jEon2X4^H!W!(X70zDRAi z?!XZpszIM|KpRl+xxt+rHTJL4h60<;l1)i%b zWWs3KXDR#?Yq;ren-Zd5e`YE~l-Zr}*7x_rXfdcXqIPib&gohFxLU=5t=O3N+TNk) z^GtZN+jOcE(zUC`h+KE;+oNLtlXZ7eqojO=3cHz&>U441|HIamfK$D$Uv|6LnYS`z zXq$(~P)V6nNF^0z3W-uEQ<14nk|I%P4r$P!BvT1n8e|AbR0<6Uv6Vup{_E3lPUrrg z=R94{z4vJE-|ze0VXgJ9cV*Zq0o`WctdEEJZ3N5994~_`)P|wc+TNShwA1s%DwH{j zd#4o5q$M)v($hKm_MfjZNJ6nWh53HgX)ao@d?dKYmLZ-;*J-H{PfYh3mRhicD4GkK z5b~-41bPeJwB)#V8dLvCgoeCK7Jh4Je^=r7`l|Ru==Q0cL;V0=2Y59KVQ_T;Hn<&r zl0Q!!{!C9}L8;?`&`NyF%FDPsNR1Csi!EK=La^@*ap*&Ko*~ohh5^92k40XAdbbR` zr!c@w#q^SjT~2xr&sawVm&ua>gi5Nq>*Z{eX&3=4J5n$_fF7PqwEKS_+c63R*tUBL z^HbT@cZ^$;_NeC?B6Rcl9eIgN~rnr2jHWr$j%4~XZ_G@=C?aGGT=>(R;vJ>I~PR7TJbxnCL>`l z@%uHjBj4ZMtM(Qc#ii{t*WHf3*46)^`3HD!g|Uan<}7iPFi4Ql@wVV>d3(Nm;$kKh z^Yjy4w;HbP-%pKFH0C@qz)xo{peRqs$5uj9y^4x%RCN;5OgGXQoIn40O|3WAM1l9A zy?lb{uEEf0Wmh--iVMb+1O;Z_GoY2>gT?cJDfR^EXm3uk|Ju_udL|a|6 zJI4twCCAl>MTG^~<={iHzk^)5;^=+O*WBK+PSO(E?y(HVr{13dx+!;v9(}@DyYgs_ z^CaJ%*_&z$)bE*+XLL@HUDuSZp}XvqzWw~bZr7d>aY-CYKb*z=vtD!4HRo`)8ec)% zk|BmeKci@e1n#%#>+Yq$AF!Y{C5xLGFDaLb+VTcC^(2`gg1R#Vpxpw~U$RF55iH#M5V0o( z4brBw7aMrQp57(uFx*v3Xf62a-uH0=SzdX20Q0#~YbVY+;OEfML?Lv_l7LIk!3m-j>s-SIQ#iRe5E22ZTgJwSz}|Xlc@X}*bQX6_$0IF?vk6j%$nQ%_0bsd*jN4#Q0+7P6vtEsf6D^qRlO`DJ9hG=o#Hg37@ja^)BrWBa50?KtbAw3i z_2aIRoOTVDoF;A8_$!;RB)E;`7p~5JTkL&AbDcX*-9eRAis!csfNIeZFG6q^$W!uR zG3pIzvRanrj=ZAR8J!Cr^HpI@^bC}3c|r>sG7IrLtzMjmNa*|64aG6p>SwXGm+e+W zV2QTvt7GCUv&x3UL*V-LhkUaH2(E+oD;^SQJ}>%H5b?{R_J~HFlJKY z(cjsD@t8`4m_!Abu>;voO<7?t*PGeDjuJbNqD*swoAu!EaC5;*Vk88Cz^cN}owX9; zMiIAe#_pahK`1!L5GUSHd=)(aqlF2z8y3ymn7obuA!7tn)Ybuuk7#B94>!k3_8I5< zn`{s5x}fk{v}E6YJ3Ph|wj&tFIo|c@Cb*;=XEes&eH6#Uc5BvaYr3Wu&by+aWQ-Zb zjGHJ%d{s6MO@?E5VUou`ak1T%QrWp^tBahD->Zf1073oh+BVXS1=hNAIzGqURRF1A zbzl_e<7Pga$CIlV8%}Gm!S&4%9a1qXbBn`mbsifVlLyCO@-X1X_wba7ycv((l=)s@ zvHJOe#-lvxT-O$jgtOZtGtR>FFeAE+Y}k&^%wLp2^d>4ly^;tT}Yv zH)v|o>z8xPO=*~ZKXwkZD#UNWojd&3F=kRv7;~J}7|dz;B9mL@Woa?=pnYfZ}PUPJZ*NM4eY-vIz^_Mb~degSf|sFO{BPM%uS5sBmsh1;M89uIe*V)ojC{e_2tX=h!2 zMU<_Pm_~vxMuICI`-e+TWWU10Y*l;qM!-l$(j9UvzN19-n z2!O#h_I2QWWA``{IQHzmtdsy_eU*+vki;Ra@5ZyluiPZK0N=-yJE6jYrAD>BQspV$ z1yM{DPHJuL#Cs;cQi9>~c0{{_YWCVYaQU`~z)fkYzS{YupJ9qwD9BXyRCy3@fYodZ zv2snY`!l2T)rZHZYob~W9TbquW-NS~4=~dDXj!U%E!?qIhwr`a{@`@>o=5kIYZpYB zM$hUJ8z!%tt!vuL)hm4;b}FmO8dt1-)~B=n9&dgb*xzdFEWr;lV>+Jota{ylVk8NZ z@wh69ey^43ntYM?lC)B$_%#Tio`-Fkqn-Nuy%iylw^`o<sa@4S@~{L}hX`Bi(kqO@zsM8wRy2;D*srHa zebLTa5`Yp_1)Roe`#WaT%?4k6lj}fbq_DkSq}kWC!ZyPCwuWlrKOF@=lGAb|lHQ{M zi|D>$ku+gNLPGM@?$M~8IC+ZZ?3jmt3Z^SE%ss5n(qdfkRDoh)d5iXA95*Ka%89y1 zS@1$T3(QC75_l~Ks8KH6tI@KI{D6Jm9qVaf@2VPKa=c!5%=;IdhL9P zZ(ve=B1BEYVz{MF$ml`Po^*`oPAdRMB2nKZ30Eb(y@E1Pn|8OMZBywOMyJ00sJ$K| zPW-mU#gxCkPpdh;&)1d!4>~uBDqq8Z&iMWvzL$ux;mvRFEPfYM?G6+)J1adH`&*D2 zns%+8_Uo$z^KjY}>B1zX;3NR!tW!7efRkZHaw#n{kBu;>08O8Zw}d^bTgh9cgO|&| z{sv4iZQV33`jkiYns}EeS1n3IE>`3Z`*8}omHSc;z)2>vLx;Lom|ioDzuzSkJMcS? zjTc74!l=@}&vK>83(8WuN7q0$Z@Ru2;!D`t%>C;PC|IAjlKRASyy^k7Y%%~)(m(V& zBsU5(k4#?C$2nA|3@DKI!^`Ew7ZNqk+^@Y(| zRkoSl+l)dY(4kUye6I{W45?O64?dw<0mD)YSS{$8b)u{`4ZA@`1T= zc%jRXHqU>522&$AQ7y?C$x;VoRl~CS|#^0dObWd=UR>Jq$ z@s!cdE1~_Qutz}}__Zg(v}VwiT>PH#n3{cL}u6u;|)jvy>viAg;mBp0JN2FK>7^>%;7)1m}C^yX`w zmh$bI6mXF|d3S7ld0NYMy1cMB_bcji(2k_B@GEUZlAPm&l@jzrhAgh?{RupB-02^t z-m&qTbuqlMdzcht-<&p7$1tSK-*ZA{7enk!;^3 zJdjF&kt3QJ&>4K{2R=L$>1giWC~V5p@H^p<7|qJSN7tL8q$J1LB^>;@@93R)@y7ZE zq@2Wlv}ynDFo)@$EjbyafDaO6yW9%Cwq@u1gTFHH3>r?OsRwitp@7eC$RbLlpnzCR z79qiE*~B@lr#@qs_lujhJXq|U*E3LA(BxILFub!yytagRqe3-mya!I9ixcN~pPVS! zI$wDLUqj2~&oF+gCJhY&(`-pMG&m_EUQfI?R7e1;`Q7@`*)k|1!n?5}dw`LHiH=Rr z@-Fn(qGnkD{lS%gU(!wnGxuOHM|SgRv;C??${XH97QMXj2SJlj;J6lqc^osC-U0=TK9xSRKgO)>`nrCUeA;-Dm8RzvF40y~0KgJh0Icz{$+ub6k(exHX z_Sf^UbZx8H%ujl-2ct*qcT&7gZGjM1VGMLt>shS;t$#i_gY&)+0-|o}iM>>(1uW0e zE@^|1$|G9qo#+yxGJl#$wMfh|R3g^}F5j1%fa9E6P0@!GE@0 zv3PoRiD-7)%vZ}!&d(>aOCEx~`GoaE*)uyRwD)vAWnnuih_GwaknfnXa)KK~M$nGK zqbH4aI@8KbJaoB4Eg(=%W*>+22`LBLCHN-^r1~18qz4JR5A< zWJ;il&}tl(eN-&^<#$5Uj+o4DOi@LA(~3p=7O5r3m4oiEYRK=P=_3&AI5eIt{0rnNxJA)(M;NjBZ|N=NX3j=geQHS&sQV6o|zm zsTO1b+wJto;s(QUw9#igq`|NRV4ncy63JRzJZ&$+0bnD)2! zH+h9|Ts}PIPYAw6#nxOdQ+os0_>OO~3o?z59}kXQ^)sdHrNTr#5Wi?IA-)^J)P}H7Z_kx{=$z48w|dI?D_CBmW#r zEna^0m+(HP$Zg%#PR=>$r{L>txL})KZs10RzXa%;(YI;;ndDn6+vwvCP<|a!V zfwOBdS;006(vK>H{a1B0i(DD~u{ zzZ9#Q_C!zP&x@CiM7Q!hPu6(vTVf}Pi29zd>C=2mhMeOtUg5E_2JObIoZSO8Fqb@p z6yD>|++=mtrR+(xi&LMrn_#QV7@EK*m6~6E<(Kg+6D)*V;T(9RbzhJ$>+mCVvAr+f z;N>X6(C^XDco-mLe6I};;tf`uq@udxsd|CGZ@IKlgjywq^?s-R(3i2%>(M--RvsD) z<3yIZ-HTU2fhFp`Kprb1AV^V1QMT%b6&?rMi9#mXP_6a8cMGojkxdkVn^bu7zrN9RQRKd&d(u$VQW)f?0wDsIc^iCbWLSn6FL1ZbRd5J4V<|lmIk}xgm!5r)N!EsnXMiF zPa}!9;B0{lC@QU5_$Hi+PpO1Y)Lf!9^l4r&JN;*vDoSG^n5=f1r&e~-&~^%P+rn7%qhNJnByN-MylHM-2qAgRKzo#kK(_Yamg#!X z=#@{8eQkCb`5tJ8L;l62b^qYH-+l3sGN-e?yc>GDz{a=OZ%+_7+OR#a9_-rL3uj0a#FL=STR6uYo3`D`#b6 z#^ly^SDPhgUX zmz6o4?N2FsQ08`ba#P(`ghP{lhzl-9Y41SmZmVzt%WAJ^PiK7Z(bAV(_^t$q(xnd# zgmQsIU=_L)Wn_rSf%~#nWM0e-3l6?h!)cHV4CacGHS}2arC~SvcMl5Q0v7S7Wpv#_ z5&S~gSEd|J2mw+}ed+U54E^!Fgub;4dqc9^g^t{f6*A4&C||VD$!T)m_05=aqm;DA zKT#$)MfM7{CqH4Jl%twdiE5}|5w;67%y@0X*%7Rb6MpxfKS5@y8Iz*fCMw^~NwqF$ zvY2P*&VlCP#f0tm1Q)aBa+7ftfI8{XZOj;FALXa3$?zn`f8SK4EJ=LVtF}us3=B=m z{>X!k-LoX?89H+}Jz5Bi>RhxZ{vPH$?sVP_z^S%z?fM>xXwdwpboxkVbrk@C-^6*E zUXyOJi_CjvTzuvK;mu;Ax}kd}h41@*oz#FnrtHu=D<)n040tSRr(1-3@2ll3%c*m> zzO*su!=IPc`KI?7fvR+gp3sJ??`d=Z9f{=pKnvR{t?S<~NN732*LOYG*Dq6w8toFc z`Zsj-vwSx8MP~T;&4i+;#5{OKdH7;8L)xS@12?khFbfZS;@J_laSy-^opMGar^n%> z?8uiM{b9ztdDkVUzXWxV{%rvSl`=+S>|Lr;Nig@-;2fG`)8@ zo60~|&}=z^PB%NlFHrGcB?bd&C`=%c`GCgQRbN@#kHEVGJ?gU388PORmy0v=ph)M&L3a`*&IrZN))X)f@|!rcm>AxLHtkrr?E!);NnP(}IOLljF@dEh+i@ z>MoDTfvrCuAakQI*($Nj_9(GH$QRIZD>9DOe)L3EK4jq4Z9g!Zc~7QSvUaCW({R?8 zYt(xzO3BaXFo6F3z^zzr8Hw8(&#sDHLye={(QMJnCHJv$oS*nr%(K1%8UOr!Rwuu~ z31d|IydFcR_1zJ948odu)2RA!EUZ0uZ)Zi4+@Ve!YcJA$x-)wp((5u5-VF?BfHxG@ zzm;16O0&8?!*t@;GvvG`a7f3zmML=9gGa-gx-^$R9r;pA4)uALH99+nIdcQ|+p)AC zfx$eCY4P(JBq(r-3K|<_A{%u(&;}|yu0-_@rJ+w2J=Uo*o7g^AIRvz zcV3Fg|HR|#$44g7i9C{IOzFpv_Ha-cY`ZNnFq%JzS3*=il~^U?F~6_qR+>vl{BVx!30F^<-}+ob&8xxUxC3;N`0QjcI))|P$yst+;; z1a-If9hstRZ9E(USWX6ets4GBzyNyi*vPfb<%7T+)mebNEg@R;rkIU(LZkp{WrcSH zs5@7(9{KZif>1)ywlx1K77fkF5dE6lCrtT-K%4jB@wv$@sz;ELl{ zTLKt`>H2qnc-?WBtQ`Gr%`g}3$PpIW0(p}t>P@+Y2UhfA8k*}7P$}cNocc!=&`L=Z zT@TT~dF{yX3u7qTJ`%ahhcBPJ?s6Wj>$ZP+X0h-KHG*-vp5O@if`qYl*mx*D?_q1R zLEL_x$9~i9dy*}v4yvQ0R4!tJSZ#8nCVU4K<( zrOQY*PcsVI>Q5np3b3H>66l3Dv#X=tNn4P9P~;El&_P)n>>u-Vvj`-M*mi<}-^f4FkdfwfMI z?Pm>kes`)5)y)ac9$p2b4wHiG`ONoKy1N{Cvc(uBCYY2;JxkZzS%(;{>@kJ^Qs&(@N+gxFb(+gT_t;L-T4pc9~2EG;rE}&$h<_})6`8@Oer#qdbo!Z zabkbc$U$i&i=V|n&hm6vpOb~owL(YfmDF?Li#hCLf{XGo4Dw0#O99D}*e_z`gzv~N zuL-fyyQkOB_lq~IJPp(Re~Lvb3gK(icUnj2rsFJH(QXL_CQX&Bx7N9dB#tF`X9c(N ze|&=V%`pPgzb~5ZQgR4I{p!1Hz_4@V!7y0bY&V~`*+46EjdpY!dY~S^!ol+0>R~## zB>5fJ{?QqY2apT}$O7`H^G;DuSa8fu&#n%fSU6?NhRib}KtEW1)MGk67Y+K#cdqaD zal|tF`|9myXF+MaTsxq1sZS6{Inf;QsB~jks3UA9r8rkCiU-X7Zrl0k(i&b{>V*1;5ck^ zVdcAosXwGnAD;IU!D0oB^NydsFEV>MDW2TrpIYu8m~Jb+A+|V6 zeyW@qZ<7`GD~)HtcD%BhRgB@DCV1(8T>id`Sj0e%-EChb7lyVmBig0+iAKl-OwMJ{ zBk;y91abd{EPlQ%);sdOq_jjCk%_`A(E)tgcgHwf=KIARN2C&*PYL$?45;hkV(pC& z)yR|up7!o7E2m4B0HFV5T;ry;sJCM4q%AMjQR|JgutfG|*H>Wl`9H3{lV11>uWb|} zV;}AQZX@gtTW@6JYbtl+FTeQqb(kl2mz0lyw0X4hyY(z=7eZJdFAe?f9Y1k_<_ey^`M&E>zJ5J-Q zM4#ZuRV2KOqQ!BY?4BRTfhs31uT0vy8^)I*1M+kspsSY$>Akxc)Gc19a+iCeP74*)sD8YO!e#_#e`*erpn zdFW>20jdRqgEKkw`cNeRm5iSKpJCK<=+G{1#-Whz`i#vw$%RI~?Hr@?W7W?OE~AY6 zXaoeh%}4qb3(+QU^wHj*$m1L;83e|-!6B3%&uqdiPwC$ndjEV43a9fk1f3|Sv_1B9 z#?c!_c1EDy*tUP}tJ$tBj!)uG-C8DCnuQ+h_>c_k$;Zz98TDmf`}GvYPEVQp8+QGf zUTTP1>FM&=!BMqH^yc^rb_5RW#A~xEewVGsJ^vtc54{CL3N}y=Et;<5ZI^PbYUP#$ zqZ-s-lfS!6oZ(cI6VguPC*0zkVP>q26AfYv_alck{CNXP!og_95+hX zEa5)$rvF88&j1SbWSkiKfkWKsPTxQ~Jnri*)CX&-lznH@VpcJY05HS5hG%bj#H|`v z|MQWxE@?+#ruc0~Xt+rs21k%zKDnZy&CGtzDf3m-o>e(%dopU;#o<;3M_#r_=0{j` zMYeAdifGvIAQscnmb9|&@TLDoF&Ui8iO8+dDt_<_FQ@(#PH}b^l8gKy zWUN>LjPbDm!|Ihl?G7sBg7V$w5N_M&6DOH%1}%Zk3r`%S>L22W=pwVWYuL(OTW`EM z?Zt8O=Vv~#fRSz;x5)Yfw!jP#LlD9b)ZRqfaR~kEVsuiw_ICjdQRNXch*W<97Y8z- znl&8x{kG@pYX}%VR}l9eAll0l0_IfPQDM92Haul>Ey^7ZQ_uFca8!xA;u-LAx_ zrVJEc0p(RrQPf*|jG?&qK>cx3Cwi%7@1yjalNx?qlng4P|8z(Nl!lLQ5sx^~bhar@hTqDwlg(@SD{W#%LNWYdDnmLIS{a$5nVs+#sV(nn26(VG zST%kxbs-tXj#o;cfm3}3FT^FG@LYPp=GjQi@j%AK$sIlGV;A{J>z zJC}JTMU7IXIZSqSspQbf1c$tgRtDkkfed!yI=+rVB}0 zIltM@-VdG5ho&_y6N@8>m>bLOlxn#Yj-djYfpv~N{wDwk;|(gd?2ucu@66{R_`tO6 zFCUFAbK)!$kQpO8kz!KuJBsNMaCz|dlsx%&86k`s^>g$pw|K+Ft{DF6SyJHq6-Ac9 z)|a=&=Nb%`xgrKTeBK0h&Ern2kYR1b+T-T)Op>qcBoI3DM%ZZX>PZc2wfxr7*;6QB z6NHn$KK4@X61O{d^yT0$xk$ZvJ34$-VMGzl*nwdg&*=&2e!EiQ_Cbay1)T}5=i zU*+V2Ad?YzRR1(`Y>yQ2juTNo<5Cu3GHJT?>;hd`uX#w;7bX2MZYK|2`hm1@ya{L9LfW~< z*{d?fRA-{4fO(uq?Ha$L@V4%t1TNnk;LTaud&#@}N=@9oyJunu-;X*|#b43`)+iM! zMv3l;rtT*kE~d4_Q9RPJmwc0p6McHCvWM>>V>YzQHUW_w`}@{~_48HLE4^%Mb8#*W zyD*a{W2@%EmG0k|@rZ%Snip`0Ly@P@qQVvr0vL8| zc{I0>jfn57J9|IP^ief(Uxw!wHjtP4tDOG0e5BX_%uf^_n2b{*oWFe$+_$pe2su7O0OzqA!D zDi5Fm$TOCJ<)iZZ%guP?Mk=c>9|2o#DecL`u;B8I;FAya@Jmy&w$%*-a=xh6jyyL}k87qLAi3rZKA75AjB2bu|Ha!PIAtk} z$vVr4Kqa_rWWSi!>7!>TR{n;-b-}FF*~egz;k^4g#Dm1P z!+ybf$scJfkYFD0`0Lq*lVV z*tk7K!`TVK-HiRDTMq=&&@#_3Vl3+knVkPn4M)oKGl3BL1YUi=**x~OU<7`}X3uwE zTQ=X;_1s; ztQ-tiRwD1-3a@URPK!PQk*Q$jYh&_>eN`eM2(l#lgE3`oVh&)5bnP(KTN?y?5&;# zUav=I_L2ee!v%FE2bo*&3AbgQmLr`$bum${lFDtzw=enfRwwM>iP<^jS|IX>Wlo-0 zE|UzdVc4=Ed_I7Z6!h3$-w7Mkv{GN|<0lAPEJyC%U7Gm>avM^z61&m=X~~uodC#i* zwY4F1yz*0l4)IMjRCdl~41}uQfgF$f%aw+jW$8pG0?*N@Z!jOF04Yr;|B;3yrUb!o zCIrsJq*MDGDt1X_TdJBGe*=YGpb@)Ix!DP**Pi~V_AE>=9ES75=8vl{tXxE_ieL2J z0>lS^L$RCYeGPTz66|ygtn1ivA7C}49T0lNn3~+172Sp#J?%fz4*7Nx>c76a5Kt#g zh)Huv;ax)d5X3JS%jRy+9%IifOVkp0XfIURe^XE`q*?Olp(e_H&zw(JKz@oFBNRmz-Hy z+t+U}O`Q|nhI=uMUs!f-w2RycUX9y-2~gzQcjHrrc4YW3c}YHH&78H6;u&C;$f{l2CB z`vipPBLh6+Vb+Y}Y%~^Jo}KizGRZV$r4@!Cb!ZOOL(sgwEZceY8?R_R4u3}uuj7ye zWQ4itmizA_Q7Qplom(J&CuDvT(~&(9RExBCf)|N67t{>!mA z?cZ7ea!8)+&~lg#dZlF#|8d7{lOtiFjIBY7AGg9SI0?5<*F(yxN0JgevhTx-k&Wml zi4Ds```GKGSH~&I;JOb3b!zK^b}$kb$FzCVG{|~km+((bqSwgCJ4!Y8j~}6aB;IS1 zB86#bZK5B2&^AJI^(tqMlR*Q}hGft_%G-SI;10=PeQ_qdmh1ncalr#`0bhD#tI)r4 zgcjkPE?d4Xw~oLFX6eTxE#lojgpfOT_xzBU=kLA)ZO1kIn|6d+DV#vw8eSIFB6+2u zZ#$Dnh)$ayxI%StZ$7>B0}_)v6hicwPP!&3Nhrdodx^rql2KrX!{Lr6Ojmc9y+`IM z66p!OAgkk7(WR;yF2Am{{FLWyjUovx_XVaS+uZm1$o2k3C}L*uB=W{qqjmjt>q#Ba zlDWQJQHH4S(1<4N>qJ{k2kdxg;XTbEI_n1)e1WgD4z{NVaDO_OnWiC#=gf1y#sIgm zMUH7HgDI_5B_T!;VUZlX80OBUvH?P+1%Qt?T)4GINp`KKN>PXETYx&{LK8 z29_n%%_-{M;6v`u*$W2cq~fKOJvlLS2S4pl7)u5Ia6m8oSBd_!@WIp;)HJ@_<$hu` z-Q)Nc8-+W^URp75g@IoyJUBMzQWbZ`4rxyJdkj6Q4$L_~ULNT3i4r=*JVb&t-OdX0 zq3v2!YpFAs6Z-&5AgV+FYQ?(^|ub6vSJXl=YTe zNZE&otO!NXw@~~g?!d(4U!#d{m%C0C*CXZ*^=mb~>Aw2NLkSzJ#BrYWSmOq_76St_powGP>QxXsYw- zu7-5^rnEt)FsEY}ByLRVqKTs1WvNTT|2^aO^FcH2;B)U0lj1ta3?N|TOqYlKFQ^4u z+pc(srmML^QdbgM;e4wY!^&BE^3mk4Ix9hRZ$B{TpxH*lPo+ya!oy@L9n=6;5_|S? zqZOPRkbnFE-AF7X0~k;=TrgyXcmr|Md3ze}Nf&dO4Zgucm5p4=5lb99cjEY%Wz1c+tU#rL?qGl~Xfw|CD%)rD8 ztYjysUj^9{f|fw%dg$;L6{N`^@AzUyq&1?Sq$N|*3Wr;WYW+(5aJesySG*|ega7M$ zd?vdE#d0IELq9oe?E(67adHjhWBJoFPjlyzDW|JmKe``42;f;e7hEziZXkg4mBCZp z(FUd?C9vDgUtF%?Ro5xrUl`30l54=$wvyB{DCZ|E5{3nPEul=c8SA z?%ZkWhrbOXV??95|3#_LAA_K3xe+!`?_U*rBAUntFqbBtXdn+H>4UBm3}PGv%X~>H z{qkJPxsMQ#S|%JBAi!%IgFX6jqe45{jup=}Lq<;DnU%}j!zZypbRi1u+^0qli#|z9f-4tV{Ee<|u*%J?WCRgq#*>MIXhttioH$d|fj3Qy zi*MQ)uUqyfiI14V*ePAV+t#soF~CkjH2!Wz@fMfhE$*m`Ny0?`WR~IgN$sCam$s+v z{7%wV)?l|(^`2ESW}A*hDwhOfQOPGZTy_asGA_^IppA$`0*HP#^kM(h8L9M{WOR<_ z4&-w_Gm{|6wDuT09iJzSz2Zs6vkSy>_h?DmZq%U_lD9<54Gg^P?X8%bis#K7 zpNIW=x&zkp8 z3Y$w@Eu`jO;aJvg|uO$F=C3WU1`3sP#>8u(TcjBN@ZD8VSOV<>~LsZlgi~%km%$`v3NC%`DTD3N;0;DheNn;IkM=onHs7+XbfiktuX*tB!a*H)Hdh%CwuUu zKW9{Qu9mXo@OYo7H5tO{j43l#*JOMQ&~6s)M$Tt=DFBZn3fpEb*3`wUXr!3qY-=$( zEs-Tkt=}1hc?ooOA|}TZ4oo-NYzRAsR170pf&(IJYPP?VLQ)%bkoeNlNwTz^E)nV0 zuYQHweQYQ0+gZ#9-Mk0H(F8TJ9MpcvkmoGHS2!E-?aR$-i(ZF2kye6XGGhxkble2H z)WyrtBx-3V|D_2*=29dD=)yha))uT|xNW7=Eg0@pR3oZHCAJIQe@4nK{-@rO9+N1_ z^I%49X;Nn3Ej;3Msu(9zJyjv;)8vzaxhp13OPfP*+R(LY=DQgQM)>V`7@ILYchSh*vwA<# z;oor~5z;eMV^6#8cb5QkaMdt$+EROgieo}_MJv40=Q@GjIC)cADaer1y(_)X4V)(R z>D+%^z9WY@w0!^ussZuWn&OUP8zUAfSkRNexDWDqX?TL{q9Z-hP=rjiaaas)TxrWmt-okmAP=~lnr+hmLNSJ@?0Ug(Ev_TO zSXFeyaKj`q{YXG;m%O(mJA{4vGRF^5mwOg-RFQyHN|Bh*js^BE;)J;3fyt}L&m)Ue z-lF!Z1{mX`v+MiKk&-k7N#nUdninf*J3hyOCqA<`)5l#f8Z?}5)sGbizLUkfcs%E& zniXtC_h42jnN@J-CAPIFjGePcx~=wno#bB+96}HRx0V~vlS0dklVP6pM!(d|72VRx zY==4d??J3AM%5P17uo!t)Q!Yg6p&N`<9aN4cW4BW-y+*->0ASl7g7zp@IjO-L$-ZB zTZAmZb5Yn%4Yif(j5$R$&!94^hNmBuawQ-6WodWu@6M`jTq^r8!(lw zNeGv9?ogWSW8txreGENqgRWO$8zuj`gIeDxF5rP)VAHCVk17i9vKPZ(z>YHCW<>W0 z^;Rke)z-#1(l~bMnmC!p0z3h$7jso}#sJPfn+AR^1$qGCF-=$08@+Af)~D89eRa|>1>iTK$cJGU6b;(!UvSBu%KN+F&}A@{cRx7sJJ zSn$!Cg4{(v(Tj=4OI7xya>}XR z@w9Mj+B74#JKPG#9G(Lhn;7n)4x4_xIrm~sYIbLN`HWaivC%s~#>!^ywSP|B%P}o= zGd28EPf8VsRkCu@%$;lWzM*qK2D`C2Gv;F?`z!X3h)G%Q{P%0Y&l>3DTbA%2e9kt+ zn<*JsdPA;R(D+>i1|wJiSq z#pmzZEk?7K80m;f=})6vA~Fl8;&Zn+B`PF;G7ChhK$c&%V#Y-x>QObPk;}xs3wrPf zHR0jPj9K~?xQSN<%rO)I>G1m>!$n}PL9T_kFSeK~hJ3SJx(0xAk8c+gN^k%BfsuIDIc9!6|CVo1=`Uxh$iGj^$te<+9 zVk~7d;96laZwCv7J6eByy_nUPtTUqM5xZ0m`FLvoH6D!`GTI1&u7<1YA z2PH+vj4@U10S~dwPjPR_V5XB4r8z*5b4Yt(r$!%InZvMUp6|KA%9L`7XjC_*P0v)~ zG#BPR66ux4HGp(zbHO9og@|6OiWUj9!ASa|wTz|}bQg(95=YcsN?Xm?wQvOnPNBH) zIe3=RFDJKZU3NqT&*;LGLm72J_xa^%fd^~M_wel+1A3rwk1`xFV5nv~i&+0Xg%81hF;_z00TaTR}N#OT4 zLBeW?fIlIdEel_{uB`dolj%D4pQPAx_G@` zLgE=cw!by*>ec;w9zdf*T(+xmeZr);hL=;O;i!GAMP=iqRlnv_86Fkb zH8#7j$nSL(GBRJ`tDKnz`yZ^n>a30dtGvQn7hKYy)S^dVz+;QDRbDBR?FcUVe^C&U z6i221eZEZ7f%lN9TtYTV>iV+QzuJ)gL(Z4oZbD>DUpPbNXQs&zKI;ckQKw!c=we1B zbWIi*KJkgTg#q&v@%Rr9M}me%_rs_It=gJmwLP3gfwqXzCW?N>#7Eh7xohb5w^dWg zz}S9A=$g)=oBJH|>{bH(A<@ekRoo;s*Fs``Ai2Ts6fB+S5e?^CVolgF?C0qw6aF(j zI$rlqBfagcMCM`yF`akinra7R>aiY69F^!kVow|896Y;DmU(I?dd}4*L_YXfLCPRb zE%VbK4w?Xfc_lfeBQZ-|hT=|Yt&h=1W^~Mf*74%N)qE2MCfQ?a>lmyB98bIe>4`s=zCnqh)?VDHJu@6#iv&Dosw;SdVQL(r7bdC+N-Fc+qW zfJZgsc$z!%P5i`eny{J2WXHo{F+Tv4olu3#@d@_By{dU{%++V>8M7A zp7)yq>)v&et0cA;8(aWUHjfb$$5H=@)EQx^6%-7~%)BCg4aF7+n{?%^?)-m^oQ^m9 zbI3<9`s<-H_Z&eCN&#ogoomi+n^GIVO>*#uuDYv~Ex_fgq>0?}H)DeDFGmJ?NzidB z4V!0etbDZSEAeSl6yL-<~CW8>}nm+n4JHph%Ah`@X6;h8g^4#2|=cp6|iip z9CJ@Rj0OeWZ#otEny}ePo#Irm*RrVKSCxZuS&6^!t$5@tOS&%^x1C9;^{`;=;)dax z1AK^-p;&fVcoIV?u*lDPq2}atVwXi2NGt18AlHhGp zq2lY6UhlWCK#SUec`7mK-iw@q=Vitmh8>?=*NOJwE+cb=udRORWB(}wxwY^NWz6nJ z%C|cq4xOw%eEsXBNQjr0plNH`kW_nH`}QfnepI)q7!tYQnxqizpG*pTM-sIX7@T!- zY(Vg&PN=Fc!6M$O;Qi>Ps{HQ`l;9CwD%*1hBCF}6P>xSjNMA>GdNSl%90X6z-!^Wa z&_8q}6O}lb9;q$Fl)3hX8nNQ_6XqK$OTmi|j=u|6`1YUu8#lzYh>j+XSJtn^=)RofoG@z?!lF1Vxia;q^M=2TAIkYi zGgfSUgNsGRc2slG=~!S!YMGdVsVwI(Ne#~5ola8Y1&iSH06nh>v6Ufudw=DKHWMe_ z3ns3&;QUBB65{Jn#RF8*{y3&50xg+lE-s-3FxpgD?Z9i#(%asbh#>G(;>+D$!O|pJ z{N;_;p1rB* zf%S`7dvVtEgl3-qtU?K9+Qa)Se`E6VUO-OE-?s_Rb;NA!C~BT$y8|zocxjOu1Q;!j zqSh?u_!da;Xta#9)g$?RRPln!JiZGhJ3ih!=Rt`$ESvcmmHD$|qLqbM>R~^syx^{LMpBcZoUXYo`t-HM4b1OBe4lyxR@aweGS2VrMvULU1LLfCaxH27{%05k z2PT0aA`J`YO{eLShLtPw&1`vwY;e}TJ$}k_j?CIc){o7HZl2M~8_f3u>xk znwBvODV?Num+WFGlR6mVUC2%_^~kHQS;P_b1lnjDYvrc3u(1f3Sm|lOT8P(jyjxRjGH7O& z!YubF2VU7;bbn_NU$(rCrZ?hCi41+pZvnx426FW6>u|B%h?0l+wXdt#Sv z;VyfbI?+dr`d@v-5d!M2nl7;GqX$l5_~_7$pp8N~4=K_p&aTvmZoGSYhYt<*VM~db zR75Z=hCXo+d6?jAeC)8GMCK^><+a7DX}d6^+J;ZJ1c=HD={~?>XA#~$eE$}zo(f8e z$6;nS)Z-T)0G^z^re-RA(5BAe;gHF4ckZ&RN$Vez@u!D#L4KR-z;xfxu4>I;z7$r=oCUElO$lBd@L06cMBaAVUlj&RzF(-NJSP=+S=kwI#L zeETK*8((`K1hidIWcNLtMdS!%7cS}p88jx7XGbFeU#zJ|34Ebh4lD`>*@IjiELWq)b!f2;qeUD7HiX)ax`7CnUFxgU# z-@`6rT0)#JatYvxL`Gy4C1Hxn!52F)W~8jVD?YyC7l}`OUF(S_2pZ0CBjE3H>saL= zh|_^o{eRV1%|aZWqgK1F{i)BHAGy=r*MYuo8ds+S2AsE5>!GAPr@NWVaqd)%#NB{j zDmeF;J9w>u(0W{+KwnG7*}x3TC{Op+8oFdIv6l&iN+wi{+x4}lQtb-J6sg)D5sZ}EzQkarEk+k7wmZhoY$ z@0{2Ykq%MW9H9#Pj=reg0dIH1|7|wJ@rrdSR5T~ixlv)cma1E@ZcweKVGgj*|LbR; z{Cb>}vp@qLu1&t~MYWEi8%$j%n5*WIzBd*_5YutKzUVf@Q5P)XIQ7b0E(^QiSrA-)EMNtXWO9~_$GspO4h9tf6KDd&Az*75;&TEzI=sO zJ>3}zcN`U-`7E+|uqcwv9OM+pC_>E@G_J0?fKFX;db{Y438)IJ#jX?6Em!~rBi&?3 zU%j$H3UFI>3#18|zJhD_-Nocc&*=sG=09+=rR1_5OyBxh9C&_v^ABfMCfYjXirM zpX||{?NxY^)$W4oHzSWRy1Zn1#=7tnKmNAp*|#ySp%m45(fYHeSTAu1_&z+uar4fN z7ieu#gAskm7(zlsVS>;1<{V+|dzatXmBJ{;qJ@A109cH&E!XV4-s0Pb$LRJqfF*)V zPlRssnGRdxpk$9ZKHG8))^EgIs1?U$-D6myez<;GLNA?fxdxEcqj*Z|rZB@|;cF$} z{dU~dQ$^{gulI6{fKTo^HK7M<46e^_1!gS`$y^ zPYHdwCBAN1V0B9rAQAO6B|&sGo-=N)Q18cvmSZh4 znV5%=RgrZq(GPP@OaU?r^3}b&i-U8*S2pzHKIZ*9DgRwOj88mdk8$%$(+)2YMG8+{ zS8;c@P^1b+7xR-uvf$97Jko2Dsk5BpD|2$9fH>wuBA{Q{cBn7i%=m*Ho3jeHZk#hd z$Gju%a5n(G7})Z(H#VaD-eHxOA%3uz(cuPe%|$*Tp0xEwS#{KhiBVe`mfuGThcD+>A45rXq~Zfo^bQeonOvpqvO~a3s1ZT zDMpZJufsd$$6}1W3qozvnzad7xmvFy54)y-)qL`&V+zA5T9T-(Em?9M9UPD^b4XFa zwHsTbT$T`XF<1=9xE`EZF$GqNGW~!RfUiqiE^5;$+Stw3jzw{sr0)9VEvZA(%4&_~ zc)4ZN*vq@fk@}$%NH9uSCu=jJG zZ3%}RP*-JDSjYdkGeGvoDc6>$0)9%#8mcjf%9o%0f8StyH?DVM5)1XjIz`Hfcjpf4 zZWCbNOyvXBG`&!ciS7K$3!S1owQd)HOI#x=1XOwJe{8p>vY+4ONx50e#Hcm*1lmBT z1y|yz+v?Obq!7j&NPRmR@~163+@6y~?j0Ou`UHT%`97bV%_o^K%-~0s(LWh1>DxN} zE4q@TH_);`VS>;OJHN|zA3TjJLvW-zjz9i7#~Dx?b2QL_M+xd!}kSoRSNun{66T_^V@nljG9L%?&EPp`BUcePy2-Sk9!!tpI}r zn-BR;+Ok;^BPQ&bJ2fo}FfU4lhilE=ymu<{TAG?Qx{r@u7@Z`AOY~L9H8{Ra9f`YR z#C(UouFE6gI+*wwK21;_8d#_r{`UvF|F-$J5H`?2(-nXlmQ$qv9*oqtBG36A{&L`$t%{10_uP9M+ z1a|4i@;n*tjN$*zEV-?qpzpEaB<6MKTGwSP*HZ(D@vX*jp~Vefe^El2`{S zgj1A%qf*j+=d<&??3sHpV7QCnkd}tg?G^pKTPDSHaVS@wxWCOB^vwEN{`7S)58VdX ztgS-a&{{&eT45kDI$n~!+9QnUPv4yWc*nc(8-5}2`>~LQK7`Jk3s1HuRY7n5_a5St zV)9&s*>Pdd(T@jljcIBHdEWb*F}=Q4Yvi(LP)YsgqO7}k_qVOs90({D_%_)cz+RCGLh?sbum+pqo}F zUa0m6W3~r|G9FI^+!E!RdfNO}xNpqw#n@G3!GYf4YGdanvEM>PUIs#&y|_NDnG2gp zb2kpKQ`gT0;->_!+wPM(nDZ`^FyDK91&@LIm08>!8WF>dE}96!$9KlbJQ&n?2^#$t zlfii$3}*OoGA@%#-C7w*`vsV|2;Xbv6zhB|HO^d4?(vq#yidl?-M><(+{K$aMmO58+{;jt%QaNgnYZn~kQ@Q{ai{Hchp081`%wA` zykWHe6JQhvk&>cqRMe=hh?3pfMq5>AI~g-lcKMiOyX+~R0 zE_8|3;g{>7z7%p+Iax%=HX#snq^lddem@8M8ioDo@iLzFLZ^-P+1B(}rYg?jjFbzE>&qTxrYvPaq~&Nk@cIo~pW z<1c1j;Yzc~olFb&eWT0H;>dsn%f_E0Ln9l%$9d#-^weLW53ci$5Ul|3VgM6XJC2Mj zY?l&)q5ocm+VQX00kH677^>M~D72K$<&6!e_vlxf`RdrsON^rtu?ewNqOvY@Og-p) zHl=52jrNY`J)QqNoF&Jk=VL5q|H8iwGhk$i>FJl2#Pf}n%pT32tR4E#y76BLZzTtA zew){S>x;Qw8yWGY1VVK82H)SHP_ZCv$S4=Vl<9JCsV!#fL=P~03o%WIw0QuTs3o!? zEQ}iXhaBFhZYqtX!eB#$yULf%9nr*cdlT793ujT@15)?g@K;))bLz&1arHbrh;S5f+QG5bqAkz9Onn1E0y_R(C1V`c-y)%*@4S&(pSf?ZuzECxdI_} z4PJHrMb#%ZL>h~ZVFBs(e+JrujW_EE?{MI;b>#dAf+=xsm=tJ;AxdsF_=T;sE6;7Q zqgxm4dt!MY#X#qyA2IX34#S0X549xua?=OV+UVMZi1`Rn4ihg^viG(=+oiUCc6{wx zLJd?6TE8qj(XDZ(=C+Dvoq(=1ef_5=t-YIIfTm}y>h{}@o{NmKBsun?rvlFCuUe{4;WAqe8#U9+W;h8Fjtqvi%JNE;)`j&RYX9vn&Fow7z<%sfxQ}=fPC} z2y>3d9fk!6=WO;(i9L?hGQEb%1=b_@Ekw!h8A*u$VS(&x3&f zTe`5&*x7Jq%p)tP3?5h#Ij$69Mr&S9+$+Oe&PXb<>46gk51(*7$j^-&aqC7Z^
kC#QQZSk_PQ{Hu;UMRLPCoh#p&<539biHdN}UCGeE5wEhV)6L5{-n8hd zq}4&POYZ%R{lKiqhN7lpgGH|~b$*RxbPNPtOM|`9-rh&MV{Q z=)U+;VZ@aVTO6bYWvox?>@lEU_HDS)_1Xu6MxIVpUqF|;k@#gT);Zrz=^10wlaQ@p ze}BO45Q=`u=@g{?_DkAc`cq?F8|Zy;R!3txf*T}8>^miw9&G=u0?YHqE9G!#@vF0* z=h-O(Vv4j=R6BY?V~h-}Fy5o-;^e#pZ9^79?>?|dZNJ=RN(yYyS!x)9SF9Lmcol{= z;=}XtT}hw(l>-{9#@*2S@iwSb%x9pwlAh7jBx5);&+N9FAShCF1)bie>J36|Xg4Z# z2jz{kYike4PmP#X7{*2{Ve^z^nXiYP5W;vPq;iL#3JcAB_fx_;SZsuJ`90M|rRtsO zGea47G_e*IC1WFpxQ4?A%XE zbd*kk;KQo<^K2ogxZt817e^GZ{HSm@W%>>&naTNU(U*2Hr)3y?2~e22f&*TJHno*d zjsSpM^v&h?)>yINz5RU+guuI~k?wimQtAv)4K;QZbi~ia_`Ix}5#r*>6AY0GZ=&i|XjapXpHaRr5Wb^(fZB%KSZ$WU(y z^nJ9+eGUBTDDqtUn)WLzoX4G)kd9?bkXFE*nue_zmo9iy7)wdWFV)|)uWUA$?=1QKi0=BIdZE6&{G_;>Z~jbM~L;@O*=IMbZN)P`7xsK^>Q*a zcs(>$-wLU8ria{}BNHRvwd8G(f7(l(zbRc}N+ab3SVrsLq;VWzak-Uh^gdGND)YRh zCK`?9VyBuF3Dqj~HpfS{>Vsc^@NH33!qD?FtNTzZ`){wB=Oixw+f%{IEDUJOO6Fpj z^?tze5GLj|YgYV-n*R+&ZsZWD1n)Q&TwOy6Zu9){7(Zg{}m)x6!vLl40q!E;N{ zRh?Pb0K5cwdof2WBi_bMcOJ%>cVwW{A2@ce@8XnP?DWYR)36~`st%FtXTeu(({XO_ z70{?OH3UqB$CXvJ+(L@PKVXWw}}Mgmq?>e5s zIJKBY`H>{0sg!4m^AhkN_WaMdtyoTQl!mL!Z_i4;s5Z=l3zL1>=u0l+>%jRG4kx4A zsxZu8UcfhRNId(UQNf;u**x!}4)>zG5s+2YpH+bolkB~K?*`+bgf0!k?>BJ1XT*Ca z-v%-Rktfk&|9%w|yO9(M{be+jX{vaXYc9mj6sC&hb8s2oDw$utAiF7SvWJ3P61iJJ z>!>_r7TJc%qg}TH78mFG%blI)*`W_qrq6eYGiSkO6%X6Hc6C=Vt|VlGr+jTG?y#0J z^=?8M&~@I4CTmaMM-apR06A#vnFx)Ky2{>hoZpM?pNsj|weDDj$805VWwssHDI5wV zUd=<8pmXl9?8}6$tL;47qN{{QviZbKvN=~s!nqaum&qXGt8tQH`QH7#(|fRmSTo}5 zKN?)?b%A7|)7I@2)r^pRm5HW3=!4d>e`OoRJU(2=4_)S+UU{7Ol(mNa7uvOwgtp~g zqLDK0SOvss8JZDSYd@UW#f(w!4e(R0zr*dv<{J}+Imxh^8jXzpm2b{|3VC+m$jmoWyw2mLce18t zcrs-ZsWHlovoh5%3IJ8qTAdyHH9{4?^f|g4yN|}Oy3-+jKD|8QCbA;HMYaYJ`|ba3 zl_?A#@!&rmU%Qlr@ebmZlk%@{rnrOr;LYCKh=Q^b+?T!DdoZp}wXA+qKNN;i%fzJ5 z+#mLYV)?yjJG->pbYft5auf~iM8RR`X(sB1pbz_ge-F0cW|Zm)@8mv;58Kk3)wyO~lwW0Gr#(YMU?x<6@8QV& zELsXkeLG(x<|=9=H({J@!K9q zsiY@84&PAA$L0`V2lJT$)oG26qo4{3ju?XX8uQ6gx_4_QF(LZPPku&zvZ2gQzGi%2 z$jMV*@bf2oEhT}15^%3#A@S#l0mY{u5oMu zr42dfABMDeBxc5=3r6y4dj{*%k7sic8&c|nn0{*&OA(A6evEAzqu|5*=9kATXa2wo z!~0i;0dU(3#C{qiu)c1ctWzvg+4?5)enE~U&xm>f>ot#G792%P-i5%` zhQ6T39iuFMvZ8@FK6Z}dWr_OK0)}owt}$>tJrJg`%7M1CrTi7qgbvTod}CWCCxl~t z?DF^%ZDu&}r?9%)52sdF!i1cYa1d)PwL5jymiMs$yP{O~WnT2({e=to&(dMu<-DLr zzM|%$i+()e$W|aZf|MNFmL~Blw*Dvhvz#C|%gFfn7ZV&m<_&yGGp|8jAs)IslY`?h z_Z(kYZYOlH`+ygoG8@8lgk_c?+d(9QmXr50wSm}v3M_@h#mfJV!(z9$Iiu|$?*9{i zwdYg~?<@Xd0JG}N-FKt-pE2bUd|I|!$H?g=H8)1}#^XOvLq0!(8c$crdS22j=P8>O z&76jj-3X+b<@4Px^X1mZY6%Q?%w4pGhL}9(i5gb9hP9g$x4^L5F?+&(TzK!lbklYu zV7y}(%BZ65UU^^(v}ahq(bzr0_OrVK87BJ$6pgPQ;w`-fj39HI9A|LzUvXr5lChn) z@#GM!O4ODB?%I@})br-vZ{;tIJ%k|r3;u`n&s=MrBCeI`lF0!7Pr^!XQ!gK}#EZd# zgm&HIZ1zJ z@Y{%uoy7=`yxGv@_(@wl@~{o;1xhn>ttB`prW{TD0r-+9tcHflO1f-&hL(OoIyoD9 zuCXF>Mue&t+vG9Y7JLi^Q2Q|U?Vez4PA2QqfS~l`v1LX};#P3o8u|ZHw8G3 z_*t9*8C59a?r>WU03xt=0@tX(yk*{Kv(JM1A?W&7lC3AGvd>z^WqM1FST|-pdnPQc zTMUMg`iKk0B5G7(ft7EAn6mzls+z*cfm|@~K{4+hc`AlWeb3EY2OFN9bu!Cl-sgZG zkM%X@N6Bav2t$T&9c7MFs*+Ccbag`(#|>0ey8tEc2)L*LC}WSZtHSyQ4pfi8ensTs z@n8f;2!i*wZ`Q&$tE_%e#&Lc%UTt0PFOOypqBJ>(43`$EDaoQYAyBKEtS!cEN1+VA z!}tj#^pJ)Ot;KACfPU6~Fe^fnw-tulm&l$#yx2SkqUG}f;!D-iSQENu^I!L zkcL~8}r_=^FblHrY38k@mku-YDDA4$b`40|;I z7@Iy@AR}>8&Lxf29Rwsx3u)(xG^Sicv!{P{Uo5(4ztYz2bfZq7a_kl|=ku9)LbJ%5 zl5XzK(+{=6_HQL}r$3)XEu{j;t(6a(Y6R{gzi9cB5eHdeOm@m=o&?xHa1Kb5udex+ zxejr2C+HMk6O-t3RUEQX3wg&MeTI~9PoE3OvTB<6SxKz!m>i#Yzm%iLx+rQ}kID+SRb}$yC(_+8 z^iYD|-v%@{XQpKVUPr0yQh@h5pPbZY;jf}*Z0?edPxlR;$3O*P!a8P|{qOg&FBGp< z?V7JF485o~Txzd1&b)Fgt8gq7_22e=IWyFGphCtL1TC(%qmt@)-ic%wSDArcMEp}+ z+|iPo6=ySu=D2?u4&zmb)=|i-Y_G0%aV-wsumRCqEHkhLGXym0l6M~Pjk{vROrOLw zE!41JLssAR>t&bT&dHF`^Jzj()8qDaw(N_e^I#wl>@8C(8y7pioR4}X8W*bv_jyzq$e-L6Yzswzi0<>ScRB9 z@UwYyrsMY`^l;49UxU0Oz~-;y;E0tY%OowY6VfDhsqwUJM^@B}!n|Bj z0wV#;sY4aDd)xnsod@St?BeX5dgObP+gDI6s6;HDPbfhq5EN5u|Dc*vsi5!yY$@GU zXfnFZ=tSf&r&V93>*yJ^?9P+Nb^5~im6+75AN-q6Li7CJJR_L~W(LEhhcj15MHP3d zS!4tS^HDHudoBQL*o}j@qHV{}$nor8siPs%r=zOi;q*KWz)%p#Y8RP`5=n`Z5-ok@ zl5ZFH1Ul*${SstV^7AHyLj99!vwoYZP$$TI-sW_KpQj%lAm4T^wwWfAwg_*0mNHlr znd9?65Z*PHu`55u;Q@A0P;XY~xnPV|a&3++o;)iFhRu(Pj zkDG6_7Q?t@9g6kE*@aT5<~aN((Rz~j33ryL$&T;k95+DjQ3Erw6%&k}b(&zFOmHL|Nve&X{~e8Uy{2t3#ms^Hj1s|?SD3@sQV@Qc1-iT(AqWxLUtC{fsPqEwdqtDiFgf_zEzeQ?6}<$#g*jT7cc?Em|=SKs&XEvp(YuNdV|DE(|(3*Y+XBM)PQ+ zHByfZg}>68u2%=ofukITz!jgS{~^uV%G;Rj-S5+87HLBy%CEvjBeM?TqD+E=F3eQW z?7&%@mvTC1&tfwo=8Ep;vai(+CF5mMgWfR)l}BJt37gF+3<$X7f^-8sYOZdzw}e9e z7VPya3@5)mZQ4v{&Lka_`cmHaqRr?r3NM?X^8Xat;ZOES6hp#u(y7Eq#XnN1VKItn zXI&9=nj7+hzLM;q3Q=S)zE(f zuBT@-vH|atL-RJ@#0MXXCn3T!Kc)u%ncy_~_y4_KV+B9eC4 z))~a!=7PkS_`YNmY5ROClNM*}h1*uOiiy<@E0C z0}KHMR~uX&s}V>x%Yo_4VF|S{!G*1JMyZI3P_^%c~ z2!SSI&1jClZzcg&7?R>(nz(nc$8gf7=rP%#)0CSlQ&fHX>tvR`)Zaod@qae#u6(#H8-6l#{YJj9j3JizL!1)?RojTZ`DOJmxRmcG`bBN(hcRr^ky}nNA~+Tg|fFF`JUKu zewrF38W{|WKh;blu8=yk=XN$&ca^ArcNIE7nfCxP|M7J5g!s&JPrQ%84t!MW%4-ZSV0Uf2t_E1ltd54Xs~Q983Y2uL%`9yu#Tvl z5>mfOL)`I*D*ZKbGYZMf-Ku4()8~R(Of4V*vQ`A1kw2lMd_i2$3E@&#Ui6yXH)D=O z&IFzja@dDkNqGj61KD&I12&JQY#*^10VV1!>@~87Bc9EzyS7lQu6m4^E`Vb-bm1QO z<<}6^Lz*;U9SSE`naUnG*Bx=-?2^lY4HcQ=-~4X%`U*eUH7@zQ;dR92kGJhn(po)l z0yd@Vd;(n%n5K{%|2fG3x=rGArKMP~Cn@4$+m4FOT(IK-TyWeiO}jL>?j^%4S;d*QEPJ}S&bvwf2eBzOBx;|mA zkXoJWV6K1Nb@Dv|YKUQxJ{|F1Ft3I;Umv_(8XDBANC$78I&SJVER02mA8Z{1a{LEs zuCvge9rS-A(bEK!FxJ7oUcaJe7tT+P0b_&HL;j;hk0Gw8hz?Z>!pwsV*x%Z`L>Y&D zLOh>%GeOk61`Ch#jl(WziuO(dS!y~P)SETnzY6lqs3&KOg$sM!wniA&5c2`i6j1%*g_==6QPZ{H)Ng8aH@9&R%omNFo@9z z5Z@CP=@?vZ&bKKtO&76O{C)mHWhAp#Pp;qxTgdGCeOF<}ObgQ&qqvbdkI-ir;GXW% z3bmM^E*W4EgJU}>bCj&nZY7VH9w*Wc(1$v+vj}E~dQJY(Es|_ZPc62--%=hWa|ZR< z$>$$X)1I@8<3EYAJO+Krryt87KWZNX76YzS;Mx0?7SD;FLg z7PD!TMUdsXrLx&UPSn81xWQ}B??H{?)rVl_bnGu!RQ^&5S#s50+JKz!8gitSht5IQ zUj(sX^r094Q6TBJl+3ROF)wuEGX#fwhTc?Gvci2Iz$lh+Rwrue!OHOX#q0KOr1y4a zXL6VNuIFBZtYHkD(Xq9W1eUXC*-a?8vWZ>0J#u2B3-_b)g*7Cq|Av=9psjAtfZqSh zVid+eGE}3 zFe<0%y`L=r0CR_XDVZxdbV8{vR+Tc;2ych3P?OCVFKYQKWX<6g4S)wHD*i<$$ASNb z%6I}pgM%%VpOnb@M|Db-EJ*!@aCt$a`zEo zgQ2fkVd(~Xk&U-b!?%ai^rABZ?R&^l0RSnhDkZZ)$=T8~3WnUhh_tn2i}Y5zBA%9s z_hz@1M?=%mFI|U012%M(33bBGqX7|3C+SAiwlx3MkE4d?TGf%+z;JlM!H7>u8Hk*c zNX@k;Vmb^2;m{7dk#hEKpBv)1sSN`{D)?qFQ67#CZZL$psvjuZ6 z*6IY`Kpc^Q5iR&KUjw}2`!I<3N6aMrt5<$ za-y>Glw^m)>|DT-DZdk=$v3kJ#m-l5P-0<~3XAi$Dl+ z1CxES z&deJqy&acn(y%8k)6$R_MT+5U-%hxM$^`5f`ZSv0V8}3bVyxKnT^zw>E$+x;B4NAp z!Q8w7^{TBpj2acJ852psp7&(OS&l0rYHKJIrjR3}{~Or8wVHa#sUJtuEW}69r(XH~5|r)?0g_ za_f=z)?0DZo+~+xm}hiUR6}n$j~?&b`e=!pQC*b6a(UNum`Oc18@Oyy2<9z{XxQA{5&afj)V0r?k1TXe023 zyI?={sF<$-Jto6^g+ctCe!|29s&b~iWSQ+?3e$(#a~53z#dEPzpf_jbfKH{v(Fgx?CDz`ZdeAIiFkf5?qKgx##b{i-p?p9 z6H4*`&M=qk6AM&CLYbes_?Y~!GFp?i7l&na7-Tkk_kxUYgJ;Vb#|0a%Mi5^otdKub zdmb!Gg2eZ?zwHYCQusulfE^(5wgyYOA;ne2>1dJ-8p8v2 zI(-0zC;DAI_tmFKu=ZnaE~pPac3p7Y%XMRzVar6hgUNyFd0yu5a_ZF928yc_*e&j|CB- z%N`WZ9)G|lG!tWV&f|$}0&~ycsL?zaHvS?*ujIg4bA%pF3?ljAZushmKxjpLs7Jp^ z-l0#Uw5giRBIIRIE+&7t8q4kqsCD2Snt*k^uJhTR!D=Y0rvAW474xQ$(0&*T>|A;m zA0B1)Ij{eW04k87wo_qab%O#K{}O@|2K$p^{C3@{eRg;RdA1C0O?KJzPQ$1yy7Ni1 zC&zI&Fg#w!c1S4*Q>@+KLMMnRXHMZp$KYD;eDDAzgpByihnI!16<%Iu>Y(K(tN-!Z z_m0!`s9p?n|Lh}9`Qu%;t+^P;1Uxj^hHC;02+||GMqR%iBMq*}MxD=G#pka+mR@_Al$@`$W-U%Mb*Mk+grkF-r=j?Azic-;pK&*UHx?#2q zM5G>ajlyLFkY#m8G`D(?i+{P1nu{Zq@|Tep&13k#@Q*X{1iH-)OXjhc# z{ovKyY<{jo;u#^Dt~E%D-ygeo+rxtimJVz`M;7;4CxAut0|(1!k{R+Cf7sz-1{%%w zZ2CRb8*g+1Y+ZA`_$YqaemY$00UNMyoLmURdm4x6h<_cN#XNK5_erEegYp1>TvEsvzn0j>f(kz>S;84SQJ(6rv= zg^Nv1cl+C$o1*Vxb+Ck8xnYiTlL>Tatw^0f!Z8R`4)CinJe8rX8?aB7-g67O04bms z)I@A)0`i{Mz369=n})ZR^XA}HTrz1g44K;}hkfXfZL7?onpR&Lw_@s*onNAF9=BMR zMhj9PSp%iMk;jP8YY9eJ`S5@+r!;xOUUf_AU|+IcKXn((q%HlQFG%Vbzc*Fc5P%>l z-W!g3$G@W7us1w|6Fy=E?dc8Y{U5r0I>%Mt8_q4w?7})o(XAZ@2pR-1)K9;Y49w6@ znr2O4x-IFUV^t9yAt}L@)D64wiIDOQD@O)KEaEt^8zeErXKKsieS=NgxF=xbU7;%e zM;+|x`4+#`I!AHd21k65Gj?eRk-^BGY@?pQAVpIGyO|>QKit2Yta;(4b(BhYEXvl_ zRl0!(A11JW8hm*nzBup?m2?{Te*$z9Ya@l0r#H$BnNK7u``o$G8;(iOP;s9GW&UX6 z10m6~B4e|;-FoA}`Zj)bw9t>LM2YV?+{;+gfn-sBqQ>JsuO18AiAQo5q=wc>3=Eh*Qfa$K+=o0Zz^dwOUSPUdf zNwVC~GQ;3L;J)L*p}ZG;^PJZMv5LFR;xm*;^dB!eW?9RW1a3l}n4CR0c?MKMLjS86 zuDGI1AI7Px&7va$)-JhL;i8L2cDR5noKXWdhm?CS)ySL# z*XqQm!5ouA%~>j?KGh3l3#TC+@KxEkD2!Md+75jaXVMU0wk z7L*C=mbH21i$Ez9N$*2LpidOwwn9m99S>NX%O2#=J2^%jL~*$iz2Rf$La&hRBJJ70 z9)leCa&!uptY=7$`YKuruVKIbcX>irfh(E~{NLt2E(3)@SD=cm0DT>F%1!5i zS#2d>Rc*LUV;$@th>3kMTnCicGSM8i7xdt<$yjW?yqE!v$wt_w4KzAo%O68fLl$=?eM*TD@5Zj3%~PR@zl1X)E)4iA*|ft+DXsq@EZfo^2{{NILEaT#-J%(wLp9YzJ_+mz2a8` zfN%`7R)~j|+}8~JF5s!So~0vjVC>jcqGG!8Pgg z;8N@Dco#0rPwyb({d&XuneMX^I=)|bzW*IRre6Eqg407qT@W}>KiGvVKpD8J89FF2 znW^nPH><)u#~MdJSS5{iJa|<54TW=w%YDAZ-Rro!zq*(KgFa$oBJX+zKT35JbW~gV z5E|^a)4UD)Qq8pn&czWP$N#AN>!_IevyBg<#0#N5pOr zn)8xT;2drLNHr4n*fTC2rM=m?5M=D|deVIOIC$E86#@pB3w@p14P4s7TPI`D#ftTYgNHOP_5rggrCUD_ zt#EPq9VMlPwyxKoJ9Bx7`3suJ)NiMY#y+5+)0BU-?=v)S^WE`vRxxY;;9n@xCIMN%Zoz8MeCf}~{CrLa z$1`rv3{|xHZ(YnV$nc(Hg$k6YEQ%4&-)2d&zZ)(^fMdR_1hh2cJi$w4a=^I%@#1^w zVJ0A*)nP5>mkpK@W|8!zIlu{{WFih7!{*-pMwedY6qQhZ zPsK>qTe41-C>S0+N{qU{8Vdu2%kOo5cZIlJ7_*55_H;|ot=DE`0n=^(C zN#Jy_2#tv*IS|cE$OiScw_!T4EBxtCgl?+t%zFY>dpBJv40hSNx8ZA2z74{&ob`6O znYQGAzk!`)A*tW>%1iJv);O%PjpEh3SQj2UPQ6)Y+AeN`?ODL6_xXrhiX5OsILDUj1e~oI*2os9dwq^l z_c~4pJi5c2695}(`*+xVL&?(l;Qso*p&nE}at!0eQ|uxYaX`ix+`#s25U$4fWSN|E zDkcNvUU6SC`8|dk=w#E!Gso{yPUn4FGOTvZT7gqdy{%Z6D@?-P3fDd_G9(_^xH9BB z$Syd8T}G|w>+yz9Mg?lmWk?p+(V(xj>LVp`F=xkrMZ14(uG7(F`B-I#B=}Ih*HUT> zbD#}(Nb+wy3`ka!IU&fy&lx)1ifBX0^75$RWu4((=mD}kn4iCcyT!rT5*^j!ZidRe z8nwpP`({HtxDITq)5e>wATka#64Q8N$1z6Sa zrmOg;mSn3u1D>|T<<3?{7U|t4*mT?Pa?42As z5|wyO;jv>BYTm)r31c12u5yo-4z7>!OD4tqm~t^#PWy2E&m!2<6D3spIpXNTC`aJ~ zCf~u_rw@ty>aE7|9hVy~eTPau2o+t(tBf#fj6eM4DkYp5Q@7I`sn|ud4(E|R?NmEl zN+j6}Jh{xao~)OyFxDTOD?!X!kh`6}v*vueNKk-^Z<7!sZUotB3iPXEQPg_PrLmee={VHQ3<2brGU_6rKn~%eeu=O384vHlXSG5g7MoBLwj3;hw*a)Gj1@yDwg(a?c80a!`mq*0v2b$6&ze_EV+%@hK@~bP{UHFahtV8 z)bC_$gyCF*_l4hLUfCR?NMp)_PG`QoW=v_|0y#MsIY)gfScN<`uUM%)NfdGDyq!N> zcV@x-$qRaK(W;6U&R>$IM(;UlngI6{++Q2zvBA1Ny}~7YJXVO&WJSJn-8UTX`utv$ z*m5U^nk<2{g2WrX)&6f(TAU~x#^n3^s+97|7`>KIZYy{g0gVuV!H@XW41*`AIo%Jk z`4`%wW#KV>Nakxj(RX}If0QhQGzp7$E;{uRwrxlqh@7jicq&$@@+(mOTpVF~LGRqq6=X87^{Nti#= z4h#h;JOc9E0?jgBigP^u<}EwokYMq^Ff(mDLci&2#Q-`^vdfTZB|w44Hcj$t^;6eu z#NEYe`KGF2O%Xz5M5(vce6W}SQ8>kcMW^Erqa)ia^}YIr~xrC{>4_2^(JUpw#>QNj+rwg&JB zmX;R;ajvBNn0(y9%FR6iK4X1l0i0EjcsCJerN0m?0ACpZ>uS7ucLg{-cKz95oJq92ke8DiC(rHG}9ksv-3w_Wi{sKBcxmARSc$BB7?J@&bb8MYp1OWI#^uX^V1 z<$vxD^_u`fk!j+3X{>|aR0qo#pxmzx^5J(I`(xt;w1P=m$@?Ol+Lr-|lxz4CepAew zkGtfcJNuQVqBNt%0#7m|^)Ia=c$!|WyIdEK8YAX;HwfY3er>#-Hn2~2HGxl{y0hQN zLp|AGT2} z?ilc3^ds>S(V~BOqliM}Hs=6^8YR>Wq{C+?n`r8(LEAy2S&b*Eu~T9c=-8fMOk-Oe zRc$_3axoe$iZyte*b%%Le=LRTaP<2FMW%x&QIE&*Mo{xA4pp(g%^B+y=A6*a{;~xV zCJbhB4gLgXJw16*Nq$5KA~B!h2_liX}=# zQTvFoB7MreFQ551jj(kP3SLe{0_DPP@7dl=xrv;rvfl`q5TZ@AKM|Xw|8n}-tq7vL zv&@I%{Gq%D41MUl-X^XRAsz=SZtm+A0PSJ+79fD=8~33$x0+Hz6W+y|PIUj)yfbG|zvD_e;FA=M#p zuP9<;ms%c$oy8beatcih(La%qFU%h9_jx_YHu{8Lehrmd;XGCPqpv47quhOwi;iSM=8?^a-b5DMB2Uy#P$=s2H+U=& zsrX&djCg$l!=n?|&Y3J_IlK7O5sEA{n`F;z2-*DpcEiO<>Y!*d;SmFqB|^oh6?VV` zX+NlgmzEKfVi`q9=tbk!9Yd|aGm0@GKCYpWjv~FIr@U@GHIJif8q&-efoa?7GY#Lv zz-#(zEy+@+jLC|qaK1D5BV@J{3wYr61Q+mP)y};W%xKPrd z*H3MtEvw*UBun!!*(MlS8}OGPbW5*MTD4)x)ItcCDqPmu7iLdAQ^Q+(6q_~Cq6XZZ zMa2y=bC`T}fwK_3*%KR2D`{Q7S2290eJTt5vUG0lK66?c$jH zLXwVP##cSqm}YITY}1jm0TDZ%HnNuwmx2JML}*@PW|EudGW1P%tpG=Yn#)~jAd=B# zro>y!Bx4qV5stZ=g55M*Brb&+@k|&{@X7s~Ec4)fJ}Jz$1DF%=$h1W*fPD zlN{|%DJxlkK2!vRDBm})7`erM7K*1(4ekT*sk>Z+A-M<}b<}~z?bQ_tm!WUYPjcVp z`zL>gg$h~6smO#i1FTCKgl6ZViL~uqQ#F3f@bJ#O^*bpVJLorRVzmlN)aksav!p0y z7ENp2Q2&sD7?^=DpFYL^+^f?tIzxERtPco{Hxdw-6Mbk6+K@$v zz&W(#j=kS!$hFm*kODK&-iz&I(8Th)&)e9xYez-0kagRM*1BnC=7L4UB!0f=VS)I80t^X^ebfEWml0Md#V zlDN}J1|i3@2a84iQjE}a2}E!#Cggq#_0~elZLBhfx!Yk^!Vp7ae)^Vu@_gM{inSVR z49UruEqt!_iyK_LQ zcrG?mfsn%B>=LkLe&|ARFPLPOQ! z#+a`Gf(35>-)k-Qy*L02Pj0CTULm(!%K8I#z!;yDr%vO2-*aUbE}L&s7N)Y|&FCG} zF!l}?cMSVy5Y_ZG^bf^Ah#Lfop@K`bsx7U*A4i|qngGIPInM$<2YPx>{JUBJXY@k$(^zoV@sd92(BN&bh~CEWQjd|$tg{RBiZLa$ zkhi<9%UK7WG^_WZsE$rWv>P^RF0`Wi=!b2qdZKwtnqT~Q2_U7Qvk&9xyTB)0*Ev7- z``Mp{@3@q9#}8cbfKQ1F6_Z}TxoWMN4j~KmAai0KfDH*cxQGU?(}zcALAaur%``-rCF8$6FFQu}p2nNa_&aZdsi_@3#tpO8Pe%ZGm{p(o6uU`8 zkuSb?t-8fW>B{9FbZdpOGpK&B>{3o`r`F(K;>Z7`$>LHW6cStE-lb6po7 z?sXJE^6_Ti9S$p4zh#Atn7sf-9f(|;xgrP^kQW&?bMg4lcxb$u~t@$h^o$NcqIG^(eIUX7&$1Z!O#hzD+bxuSd2Zy{od3w+yC+2_5OMm@~Cr0|VCHMQ-6Mz>Qg`Ne1zKA5j|q9gw5Zwo^*t zh%rW;9HmBPFIqv1LE?7^uYlwSs|xyeM8Z1uxK0q?Ct^FG8A{y~j#fqu^R-rl@#Ce? z%*MNpeDaI>L|vVlhm!B@xK~`?Id36jeo|#ZM|X2Y2k+yIxLcnOdHrN3r(kFbaBM6E z-x#6bk;CeaJr3px-snw!&|&BRKnuOJqbWq6&sBA-V%-+_UhWyh~OTKgED5n(t*tr z7F&Q@025VQRe|XWIA#6*_G-L)e{=HriB}QoNXaFJU-y~!3YwU-ijP@NDY8%&Z@zI< zH-aS#J~~lJTSQ>Hd3qYaZB!E4o+tM@&QRcMKBJ!wDq&pljJW$~_qI3Z7}Ib?zwV zw=FrnF*x~C5|hJN=zT}gKKO<1CR3Kf*w}mHC^#mhQY@2U%?l-d-h|FwqCOoTgYJZ4 zwoFz#RLUlz!>v{yTV;gNee`{7{kD(-bB)7BC~8Ij0LQW3&wy-P|mTCy7AASxso-|I<3 zb^3ljfBgRYegEm<{dk}8I?r*B>%Q*m#`?TDI|O2C-ciMPT+1&!Jrl_Bc7PvHBI4E2wOJqM*Frx1+PWbCW8|)4c7&Gjz?Pw&Cm` z8NS!@m$=e!d86SnkllMGCK~i&%I=AQ(Asc4;r~?iTaIfj!*=G`CMpfVzs-Wv71`)v z{xVQz6L9j8D(UA;By3>_3@I7isz0sp9FtDp!tQ!>OsM+i7Aa>X`1yp;%iEAW+WMj9 z>1Ma?$3k~sP(Ygy99VfO0u^IDkOc-7oqNt5xUdstR5m*q&0xS=z2kerSTe4!`>roF zI)Xb4E}HwUs;&D1XR+fHL`s`JnCbZy-JWL(mdz~_EI*Haloko4Ao%66?@l~JEx?)} zK^UI8Xo9zi8#3zH!mLHp24FED2(yUD5-BABIV%Aw=d7qQG=a=@%Z6Pc413HCdRjd3 zy_3z9av9zQRnHQhiC#A_>g?NhtEIdig`bbeUWC6oCyDEwx-Znfv-pOcH|)?(l+1fgfC$|EtU6P@E`fFd0{`bcH;}o zvHenb_DS;*$wtl%h2ExU3!jSyi+Rh1P$BGUeWuWy(WS4{ci<>0^<%mXn$<1523{Yo zbG!I%-$b>OXkV#uGyZJGa>L`j0e`d+5-J1^d|F^Z9MhY}&W>Q-j#pE`7}&RX5V`r`3f0Vl_>u1gHz zJvrUUFkdy1kIU)QhYwl@4MJw_m{YQ`IW*$Ksq%B0yov1ZihGKN&PAw6qkxZ=y6vy6cnO!{I_t5elY?Y-Z}din|< zDi(Z*{y8-c>nyt!XW(UuZ`oN-1lU>=DPN~06~tIly3<9>PWk!2hC?%$thsRBlfeZKJUaG*B(LmQU$ zn%Mx-I>%&b@2(jNQ|=ybM;+gRR<5owDYvu<)vtKkgrInop0Y9WC-!2P|E_q0Gnk*V zfy5?o{yXlw)+~M$=ijHxJw-l)|0-h!9Tlg_`T)|2S~`QwrnK%Q8Xu*brwtpkKmKW0 zhFL9jSGG8?i%Qt_P;iFx-k~^v@maeEPDnj^Nl6n)sw*-@cbgNRS(l}1`p}6YFvc;M zn76hCNZeCRov03Wm1@VER^Occ)1pp2fMYaJJ$hp3{Xa8`9Uy`T4aF zT_*%xyL(+Nmiu*&*$%9RDw?Kj3QldSRQIfDr=(5wFt8@kCuRFu=z(o+^?ekx0lA<( zs4gmZK+3!il9NM|yO@v_!lkZYS(q>xSi!?xzN;H-S-bsSBxv#oY3S%2CT;1Cta+9! zMG&EZH`kgHfCA+QqZm|f%MaL6_r`)n_t$gV`Qvaj)r*PlN(p?yL&qTqy1wWW@}Nm# zTH_9VIJKmYWPje3-R`vX`{cS`iB-C$f64>DGC&AUiPC?W@9#FLMxIIJyhZ%c35#_9!g zQc>XP7I`ZjqjKc?4;Fj0RoJNzlJuWX-Ae{X*Ed^Cr40$SehEgtu4&;G-_<-+c(^yzdAR#wKw+cB=1tW82==wsjcMry z=Z)l&&`0+!uhqAzh&bv4B`TJ36w*dW#HMo>h4}f!MZ}zHB73uyjhvB zpRFH+LTWb9+c@l~)ms4kcFB*MscZ<2i2Y}w%bwoomx->S_1Rq1xJ^Nnz zUdt_V{xCVv7O=2xxODjb0|yExZv${hZOlbYv1m2z8Z!fXLmY-dKWp_-b$bpNSfa^{ zu43C{6a)RB0jXne>~LciN0n14vip|8aopUWHjCF;Sr!IN8(c57+_LkmhrI)IGI9vh z-J84MEX5zA07TITYPc%{ z9W&(z>fx5ZF4!%|Y&kp0a(477s#08uImnn~>}H14%7(OLGAxaw7IIWwD`50lSDVaF zL)GHx>bC;x(sP$o*F-WVcLGRvK6Ac03_U`zDV4=(*WBuDaMm2VPYuo=B#pD+UU#Lg zqx#Kz6h&cZbAuG~^k(|vjN1WruhX4h$+4vOR*e@<>rD6GwSw8hWyRs)zO~Pt_CkZs z&0*I)L*UVjJ z^$wdPummDNxPhx&>u#jhC*@cfOSMCYM1i4VW2$j%`ogqYqg@(mU?~zZG4=!X6aJW)ba`CM7C{l z%6eW4++b)gV0_lSa#kJSlUU|Mvs@mGR(qbTi(zPU7atwzZ~M*mL`;HxPi+@9m*O%% zHyl=ymYO<215?W{l@{R8`GtEz4dg9&l26yXwG|x#5|#)Vw7#|I@RdbXD%bkNZhakV znE*f$T%4Tc$+Y&TRjT($jNz0j^UQR1u){JM8EA^7*0gRR%0qEdIO*@-%8uy0{$>fy zwP01Yh?oY%QfM5`*%C|KBip$$+* zYNaLGrI7(vyUu+n9|upT+4ce)aWU@R-Fo04?cWKwnvA*w0qC(o>JrgNFrdijjd+g% z)NY8hrrP6ZO4j2Ni|^gXqZ5>@V>nx-&H^-*HFRm5Et~gH1?uvNWr&~ht_*b$qBS(w zwY<2JJi*-hjWx36R(3a$3LKbLnTu$FkRnQsKsKjN)tnMQaswl2la%<|on|{8)r^5B z&s%Ev(ZQ0eyeWojxVTKa10g-rkTX6lt0 zv*Z1bxx)<(WTtuBR=ci=)iX=3G>!f~6dS9l5{Gqcoxx%94XhdobT+F!j{M&(R0D3H z9!e}-J&;o(gE%1GQChhcsY z^L!Y(r{Am`)Z9?i1N4am9?W=7>Ekjk)bHdInu*5uG?Z7`D@YZJOz|pC@&|H}S)O}; z0;D#Ks-j!cQgZ{U7rq%ZRlXRgEYyck9bb2j#G_XfBFurBqQ+PYl%z1==3E}zZn1n6g$Wwl zr9&;Gs;dXixH9if9EI@sK{jpWt)hM=(6XbluaE=h(YN5DdE`e3LdlWGsWAHx$L6>DoW8?wp>1Xc^CE|$GLB)Y-cDGG{4hd#tC ztXQ9%12LBPeb(Ozq|<2yyh$p{@&U~3`TS@xNf+dgC8k@j<2j@ddsj6QZXoElb`j-E z1rK?vF6$pVmGL$+Z10yq2@-Y&T3A%+>((0_PSy>hAzIaN()+RZru8|BM2ie|KjnQ>Ug(GH>i)EgB z<6c`2JdK&Z;2)aKB!ycoho`BkS>xWJb%*{NX-%^0TGi%giu;ywe{sIqMAo0 z6rFGb(}}bzSN_LvwIkRn=$wp3;y3TEf`>=tL4s&1YzE{O% zN1b%qL5+q=AMxB|HYjo)s+C|g>BR<2`LuV%iWNo}sPx%|op0JCe9=CDsv8otj!Q!^ zE_}|qgsIaVlMF~X2@lhJ5=xS?h>x!s^_~d21~*MEM-T>k0+L;0tI+XSi}iV2U&e@r z(r@1Av=zKj-j_qoGiB}W!W(4*PQpDf69*}WU`#=Ra#8y{pZKOK*xXQledm&N1@`cj z?+R~`1aw#E*xALObpjW@@3YR83v~2461h2*UO0Y6^cX;d;kS@EhAyu2S_SI99Jm2F zo1-EgIiYi;0x*$Kv2+Ct<;#vdIPY(QyuUr!NJMUD3Cn+>b~?o)KhTtijzaB|fF;6% zk#A${m7Ym0=~uBfJw1_!Ck!2?r)AV9Y8u1ZlG*6;{b`hWkMb!{o(x4xAp@E1RJ24n z)!Gk=eFDK*4=sGVg3-ULJ$Z)9nGB9FfNe1`YJQg5gP?e{%u@YYu#TnY%Y>}I)V{z| zwIR7z?F>*SfX)!bT}o%%%1h7NFtkchpPQ8!t~ZphtUimper(HYdoWC2nqB4O6X*Gz1n{SuC% zMC?XF*#*()XMY4OCM%s_LZ5`$1+q+tRG)2Qmr0ZxavHM^FeEoF{2}k5tM}< zL(Ey8g2sWsBrMuD(iVYDs8F3YbEfPp|K8aJ3~g5GYcdC3g+i@%#?JLmJ?2{1$BwN^dbz=PaLs# zhf>cfa|VokK5mCGMk&klVqD0d>7Jbd+0s{25?0bbA@*@i_I6u;4GnVlfpEPrUCssZ zC{s14>J@s^C>-`k<@}G_SzHRL=mrXf(zS{4@+rdnZw?#Re~eN;z%B3=P+jO^H=D0$ zbSca^F->w1F=bbfaWyWysl z&I^YbF?JbyQ=~J1vc8C-vh*knI*}po8Y2FAKq1PF14)WJ9M_YMBWV{51O?|B-N{|i z>ve4k&sI`ZB~;LRd|5Hc995>D7>9U3w9brK9#B_V8SSdpZH>{7UP6&77YT2!@^16~ zzz0x91)Ncl3a;-kbvNzm-2c_%?ViIzgrBk<9!aBE@-ztZwba#m8`yGQ9HKDT+mNq! zA3;_dj%etsJ9S7l$zb*R%ncR_YG8^~3b^#2-_&keT~mxWr@F%|U8bnL&VOjYbD(yb zO&gh&lmrxa_u3khuV7W{pq!Sp+31;CdTZRHr&v7~?Zac4tX?oN!fhNOBAuauLfmxe z+01>oo_-C7seq8gRKgKCuakU=_^pe!Wrn)b}W%@?ewgurhY>A zK)288p~++`U3pe8NmMRi3cB+V7tCB5l($3p`5*%~pD#JtWWE>5ZO(a-997|ufj5T3 zV==<3y7LU??oX=>#Y&X#Q2;v>xZ|5GE*HM!sG~o03vR(AG#KpZ>q^ayRC?4e-O*|M zjbZW)gqk({(bjCFH3tiY$-~FKt_LF)7vDD8OZWr3>(0xW7X1`0|6aGgy za{aH))=IJ_2^w7R3r8nt#_lJ0-?UrVF5{3noJ3&N+;8&H+B(+lDg0Q5>Z%QY-QU2# zA&xmL-vTBcW}yX4&az_dh2~!004Nh34Bu+qoK8>6ubf`%)dddYu%$|&n_$Ns)rhm! zcW+#I6t~=#+(WhvAPv4nukDSSlg`X$?LN;(z2=D`y=Gx>YvOKngX0Vi_FV2;p^d>z z20J@7GaJp0r(dh`)G;(XsSrW+8P833t(?y+vns@%*SO8+vNWZ;a@bv)m^LRhQ)0aw zCf!e(IWvQ2#)8LOL?G7T;puZyr2Uyt(mleg>?Nt~9;mhLy2?;##$a&^@twE&7qfOp zT=T9}vAN;36Dqv-Rvb8ZkZ?uBJ~er?e+{OiMZq43us2bvZr%R5I>!1-ck^d$M~G*P ziL4s2CgX=ndr;xf!5wbTIiU7PTR;U9pf5Z-CZw9O1T zxpE%eV-gQgBO5AXD4&IqO3`@_E2nH$wlVinz@**>+kp>{im@LuNp|_>KOFiFD2J~? z;%R@ziVf!G!y65CCMVGexlXi$3@1A+lA~<{%lb2LOzY9fkr;+S*hwkAraKNb<PIk1B~wU9Mh@cr_y$8vxsdkRIEm*PtAFRTMTu>{k&$RnAO*y$0#-s)fgEwV66D4yx?Ygy`fe-H* zg9>CgXgHS2Ggg{W7-?@%?J-4oHJ4BQE+hbo}Ho{I;)yKkLd)zoG!=hkw=F)$u zxwpx9jR%wcJM>wfcUY;jFk3=g=IGtqIUlxA4Cb(;FZ2JXkcH4Mbu9KJqS89 z1m>F_1;jP@!6eS}WjY52QH!Ghf26q=z^ooR^lg`w+b_d2IIrSl=21d$q4{0LoD=Cx z2sDN*e2*$iORnYZXzw@KuxY*`gPNpxmCuX1hf1v&@Pxju=?6X!u^CI1pd2^{6xMJs z-acDoXVDRi5@1bbAMRXp>l>#b{^siRXZp{I(`$Q`5)gK}AqlJ#6xDbAX|&4^pSe;0WbMQ1skVtO zd2U2^f;kK#gC`d_KR1Ms_Qb+u48pBGa14czmpg;4*K9*Li0VSe;f*M*hQ`N>5avL- z@r$D3zK?{aW<26)ydEQXS z^=$5dN)O<0M(A8PEh`KVn27vcJI_DqSPWPKUR+r;tLx~kLDu=5x5J?^;puqF!zR{xjgIlbDp;xP!+o8k~H zo?q*}q+L8&+I|B1k#J&@^SRXh z2CIX9=tgE!5$gAP$Ppw^5fZkzEggo@@+$*8K~P+Es2}b1 zEw|i2ZORC}CG#ecrC(2!S&iicYHw}NFG}sW!lRtWGP7tgsf}I*YCdKE?IDV7AvJZd zqUJ+8AOuB7E)Wn#BaKk%aaVNUecI0!X@AlmDM0Arq;&hx7qkw~)T(-Kv#^_uw=QR4 z&LLEBPfZX~xsM1$aDF^ustVBWH}jice+CG*+cVi)LFI-3`d0!xNk^+-SzE_BW0sui z-J#?DP81r}AyE*94lMgpK)k&H5ex-1|F=t{nIV||2xW44dqSh~XplV9f;3BU&nJ6y zU-s$q9mo70TtnB;IT!h~5He94&0_n}+I&nSSh#jNjepC2!XFeha07!lD*c9auXBoV zCu6LVf=xgSW_S$LT42QU6o+iBXCMZ&b{J4s-8#BvV5|Co7yZO>N|umkc+99f_M@=< zm=l@xF|jKm4Lk7M3Snuj@#rsBGrXmXEQ zU$dAWV?I!}q6sCRS^@9jmRF|G;cv_nX5YpjpVU@@o*>6x|wr zZ;>$ZJ$YLeYXu&XJgkS(iv1xxji9P07%WYn^(aNu-OG46uTecwk2v~@A}PFv_4%FX zRZJ?g7&3y8ZV#KeFfD*To~7S&8dabxe#Jg}xJ`wDjnJGX9Oa0m)w{uLSjQ0s!%=9` zYibsbsMHM3sARdgD|d=((Xa)p zY%|8eoBb6Ic7MlChIKjo!9aV|Thf_;SS}I#$WLXe9oO|c!gkdAeTgM)U5H7)R(PX6 zUTtO*(DTAkE$H@#qbXlPi98^KQG<$O%`$7Nce$vUe}zU4)qtT5Mq*c%ssEskRxD*b zGVopfM=|!pp07TZV2|2c4?cr(V?oi=zPpaBRdxco`JYt!tBM=BWxxzs1+BS= z*Kc%r|629Hz_}z|*lESd&HAL{X4>NE$8(U#=|ai%*zR1@VqgUi;7{yfFcJ*L4qX$N zW2kPj!hFEiR*qEIh z;O~iO{iEA<5X;u%hgr>vc{ztgmHNH}t#f}~TCZzl?Pok;!&q)ZmfgXAfH;sP$dW=I zQl$ZxQ0Pb;_BJyS*jt@iLzqOiLepRLpv{(a!Vv@unuwV^>lsxqmtVuLW_+33irM6h zuIY~oDBI%J#;QnwoltexE9xsp6}1*+_b0S8&+V`ZE`#xM!sk@1XAIVUMO(^{Tr@5> zHYF{+y4o{2KU*!k{Zf4_IsuJ9bS zo6jxk^h7u(vE2D-De-lO$8m8zF*{U;NYhs87+^|&C1uo)(6}?nZ_8Eb$@ic_z3IPW zbKz0LfWwxebWJ+L5)6`4zxoe;MZtf*tnBKc`k`%U4H*XCAibv6SvyB@ zEX4+ye49m8=l0V?;~#4q4!?Hy3Qzx*9Mq;ui1;XD*&`xi`8F}ej4kKmTZUz4Cndp~ z-_m-IzS1Q+EKS#P)wER48`UKyQA6xi3mFXVZ=D?m-M6Q>Euv=k-t8!Z;ojr1ME@}i zXhn^7CzESohNbrjk1XMHw*@TtZMT+IT5DAG!t)r1eF`lGN4z(Gf}Yc4Q{TSi^z%bb zWx&k!L2c;bQZ}_SIdBw&!j{GQ)pIvR?P0H)QHlVen$-n)9l|KPvkiyeO3>(Zb)s80 z__)gS1G-Bz?rTsO8a>5+o@rr4T9&;yHG5XzKW05l*`8FbdfV4?I$yu-EN=RL7n?`q zZ+}BDl?gqha(9fc97#|DtdcT4*FQUzmVTB5a6GqwNQZ!ktB>qhQ$WEaWaWTQ)IzLG z17o^H_4Gx~a~PIe0nS$~-%dhEvyAlIY%(pnNF=D)mZR#619=?1J}jK_j@O61wNG~# zJoXF30+ofH`STLOEEaGXvgFj92n}{YI#=T$qkBi3!mxaS+^l&k!LkuYD(Twq=aY}G z+e)S&4F=%UN`V(M774?kW*}wWtnq)}2A~sgWe*+MSi`stSr)bY#~ZPQWADCwNf5$T z8AOt(gSwJz^id(UA!uoK*IXhDW~kQ@NS+W>uWU|xyC3v4Jof$&T_%rhg^$g7mlJwQ zh&=YjDJ3_ZDGcvMRIyvLkSmJ9$t<)weB!txC^3qNmScxf)}2oT+IHfa$PV^nFgc47 zG-mz%;Pf-NzOi{7_Aab&ae~jqS6+>wBLGZ3hcDix^Su(Hbm0Ac zb9he)k}i}DpgrOg*bJ%_v_w94EO(ZSWfzk|6=cj% zCvh`4m<0%0Zy9~8ku$$GE6(>*kU=_nBS>dSL6VLdrx*EPGxIZ(|Gw;52XNV~oW*pX zufuCd`1Me<9C773x3aXSKvA*K$1PTRY?z6iiy-;Y4Ro-`q6^M5hLuaK;RN38SvB>~ z0Ufvx%B%2sNuhg_eN{okKnrwrVN?;aqHHtT@Rs|b_!Ivdc}`566G+9+LK?K zt|LQeck3Qs%G;TbPrk%|<+q#ly_RBut&6Btq2w0eH3c2>zJ9eL!U;wYFcs25JVnTB zgA+>DwIUm_MwKw$!vGDqGl6o$8n?`03)K`+Bv$k)eVX?jAMsh;xmafb_ zrj7)FsKKk$p$4M|!iF4wK{%VKRF;fsnmB=no%ZkFuLJbzV$7t=VhtX6qnecC3ML;; zzE|#wDLZ2UUSm+R<5Tw`LIdb(smH&JcO)-GC(t10-F4g3ZkcY~no^brz*W(=*IoPF z+H6j*l~`1%$!C7d2ML^l2A{o52q?K|+wS}ib!VOXt!^p@XFpC?2qZR#@k~^XXzma$LZ`bQsvP20V;Ok+#QU9lSb~RR26B!^sA@f zs|S{evqf_Q&{S6&?A?6(1;#I09GRlvdVRSNpL#dJN?XGL z2u%_o%i)ZoWL<8yd90X^u?nMrkK6o2;;BdZhR8tXfzp;ss11F&BvJ&WG zCgJ>iSbJZG9qd1aLA!UR;z&AA!H>T)t?l(-_TAC2o;f92p$7+s>+K}@i8<|k#bjX1 zJPgJXm5M(i4u)MgvBg1E6J@}Oo}Q;Gq+%J{kCoY@@i7a8lgMAfTf7>B-dLORF#o|Nukk3J3MHiFV$2;B&Pdk{}e3&l7ffPoP zXrchCX(_@@tJJik9+!S}I?(&`7xY;kz21|D+>Q*w+Ptp?^DQq;o`ZuAANwYJz~m5# zFJS{*QWRoGU74R*Lk6J%^!ezFStOwB-+!H*oqcMV{JeRF5R0}S+2vvm=rzk@0z*iO zaYh=10j!A-0Q|zi4+0d17R9u4yjx8FJF$I%?>VZ5v-LUWPinxwtUfJmDt&tzdR>MKts~!Ajhkahz1BwXDYUZ-qqzQ!<>(+>?0G*eqUz#G^}v!<;V6;D`+@+dMF-@ z*RIRnqJL(QOH||FErieNE8ES#L-!o1$=8?3>csw!=_M0vGd&wE6yT%tjfkW#%s9>sVg z2biz83x4?1y5$MEdFlYX#X$vgk3b?ch3c{Z2h>+A3yqiSl5HSm@a-@MG2EC59RU#e zt0d>FOwv_)2GkXOw=m2$d;bXjM||on<1v{s!VsUw{*z zfJ7bb`_d?fX_17NJ}^NMYt}DzQo&Y@uz}9q1zAKC{WgpL;k$ZB88+BYoF|$al_4Ss zzDZe?YH410TNfp?Aue=9n`=P|4k}6ka}NXU(*<1g_I$8fD9(4U0^Ka5pNMLp)ZbNX z;|yk9h1<+@nM-@Nzq(qZZ8w1rbKz+z#)Vpz->C#z`4voR4C9|KB9mj z6h3wSre0N(r`q(7!{j!E951t0k-@HIF-!inBR@Ti2km?7()Kx}pg0`OKNx|F+r(!$ zv^q^ROPUTt^U;k)tUO_Nq|J!|2O>y7CgRyX!qFkEc)QzQO5nCef?C_}vZi|m4v~?r{FjBG3%-o70~r&xVB4x!dADxBy?-3ER_Xh1r1HVLU>V0-xx}5bxie z7ADvtAb+AbpK_M38O9iu-l|S}2DcgYnk)|HLm46@{{ZJ&Q(%8}b!nQ;H`_JVg&F?O zKlaqP=_9O{z+IW7y4%@L)0Z%4ftT@`wwY>f>6ULMCLC!+)sVL-Pw@%M*f|(K_a#05 zGp=<5j2zqe?p2e{x1&tfcTXHf|EJ&n`PX}bF_qbbrD}4Jz>^HVn>j$Hx}j=1vWoo* z?%A1!4h-K+C%m2;E58-s6_puw{pk!DPH0wXgL zrNL?)v%Wu}QbN&wt^*@`d{LN5+gx{yv>>(E#pObV_zQF0>Ypb2qTgZ;BH&<7HArK90zagbH13c)Cv_0BG?Ffz#EiOqMHLo> za}98LTe+dFtWO5KE3-(U`^WL}A+g*&wa^2PK)Seu)mV5c$y)}k9qA)sScY6@ zSBtSV`w3D-oPvF9lgrvxEe#?4BaPy!yP8Fa&f~D3_>GG`Y;J!U7#>V;vs~o*YOAt7 zb=%1>|ATqoeG!PfB-I5ieDTcf;1^V3&f;+R%EVKVd~sP%h^#Z9h%TW;&Ziq?i=jtG zdVNOLaCqZC^Q6ecR{Jz+MSE+lES$EQ2^)adJpxD7v3>CiOA~sv%q+C*&Ox>0+SZ)? zKbV670$v!2ap#0k&Af4IS5CcPL#=j+Z;6Z0e67Z21c2f zhjLkHo*}~y8%0nOo{eh5{C}+&@knun;{4G(Z!sPV#TbF80F@hCZoZBmN*>P!F~Fn;z2~v_g@>UkBu$t z33GOqbsrk^BLlVcr=g}~hH>Ey6;pq7SOw;>!-k6EEH9jCLR#}t2OG7@Kd(AY0o?|! zwf$P0{bmmugct84LmKD!75=i6$UY1Q&m%6_mxAOa3|!S|6P%$>OhjJ)WruwTPWH?{ z^!OtVtrIgzH~qYrEs&@n{G(Bi@lc`@fL1B=N861IVLC!cGwXtbWq+L>T$i37yFHc@ z6g&;0(xY4nfkz!vFJU#UpznA=cEgZ0vb9MmCS$ZC+0J zJ%k8(lOF=9kwEbyY5p#)-yOuTRnCeeC`7x|3uSe6>sIp=!`YFFg=h$bL~_-rw+wM( z8k}YR?)GiUhk*gvR(bF-wd;;ZgLn4~h81dZU@)7)^8U_!jm-ok0grRkuJkK_IqbT` zt24e)eayJ2BFJ49|LY1)#ua?*Cm|Ef1JAe$d+nqQX&#UKt||8qN?rJ22fQ@xAVEr) z%aKiLyW37f_x;hR=V!VhOVm*yVhfd8D;%jlycne`7xy=RCMv!+&p9JSHwBZjuA z6A|!VXZ5$WY$JT8A|HK+MN_`F6xF*&AUX?0kTms|rSA-;qoq+AaY4>fzrHGW78vVr z7?n{k`OQ^+BcLj#>Yafzu}TmQaLzuoJf(3=O1 z$l(=%=YHD)Q%E-e)Ej-Tb14Ai$S9}lSGRs|Jsq{szirMXoX2k;|aT0Q2UVnx82+=|jsTS4H#SbeNzjC5kG} z{1_)J=35-pQTOMDOcu(>b3q@`KT5{KB=~b`F>|cy4Vv(YzQVj&GaaLNftClRHbv~ZhAs+3RuU>0s6ZQjy{Ge-I*PsT;A7<1=b$vL(~H})Sq znEwy|RfbYSTgHH-)mYAy#ePtVZ8Ew5q030%my!RK|M=CfrV!r%ZOHr3^0SU2%}+)* zympLw#cx;Kf?!bB>g`)uXeb5eQ=>eI>>|1y#&U*;jvVyCDZLmSxCJ@^@*GCzwZ?-F z5cIEKy_Edw8V|+YfrN@{k1$7m%XD5|5rCefZX2CSQt(uq$x=3G(kc*Y;GUUG#S?xO zS|$uvE69{v*ZQJ%qyJEe4+k=n_iM2lqSTRuy#+ESKOR0ae08i!^(d1vdf(kIkcjky zs)ee$9fo04fb1Trgu?8lx22Jf-|hGzC_yvhx$dE_4HeyY7}*jE@WV_&ed6d3`f-5n z5y#lC&BCI}gVUi6v)f>F1dRPqM5?3Wk2tg!}j+Vs3UxQ2V_~EkAt24KJNFwPWeyo&BjjZ znkazDJEj@T1xKlB)Nd~4+12)Z9iYbdijNSuBw;B>ecf-@+XJRo)w84I)ocnu&(H~~ zE3D?*Z1f-F&Jz&M#4%1>{`mPH_VR{o)~*&KDEk~Nl?A!{B+99IOHVM>9VtVJp{;E| z%I6U0cc|$7&KxLZo&@Yz@asLD%W0@u=@11AJgF)*uqWPJP+1`=&=KmI9{JfK>E1rJLN|`WeDpYNd zc2vJRR_#M4ToRO*um6;3QC|0>!WL;93?zgU)5v`I$a{+a_s``gMuzD4yvx`Q5gGXH zoj-4$dlWrHO#3a2)G6H8ycn>jE0dr>^N;Okp)|sHe3cbxm`6iCr%RfhK`@ketb_Q% zG$f3rZ>El9se#7qXa`WheVEkUwKx4Ds?v%W6JU1e$=^d(uoARLxQ)*=bvlg#L?r}U zTM)iPD(uy!&Ah}SVYM-LP8vJEYe@|#;HBiKp-ovnc*~DVG5VCUP!tenHGRhWIH1lX zk#@S(fQO4K1brF#N~h+J813ilWUgVpX(i&as zg2HDgIUK4nJ4U%2fGZTYcHWO9X7>16p0=13uLS{-z4nLoUw zbZ!daRw3;x5Mm_9`AdTtv)gAW8?%6u*9)Ck4Q5=x&6QW%5s!@$NA8s}$QQ_3<*ded z%Eq3V<3=d=^kWpfAPgaFo{%LD9Z8B^gB?Y_2c^HCd@Wq08S4@zQYP@5XW7wwF}cmv z5tE!8h!02Q|8Dz|sUua2z%xJ6eX43A5Z|D_5K|%;es&*?>Do>u$+Sk08mF@1k3w^< z5=hIbvKy`&g6#A9*8`8EFbeC)H4xGRD|idq#zMh7&3$k8k7tHcnGv2ai!xy{K;`{i zZ7(X@9}-0np!_GQtb$w>%G5^=*iw;BbOx2e1k|1-BP0nXQgTKqxyEB2lXe-nYBMZH zL4u|&`^-7zm?AW7sh4}A;>2;|PQhttZksS>L|mppp9U_hKGKsdfGb=Tz5)MhT^zte zj_;3M_`}H3O%eBG9)UR<3u-8xvB2FvD&N4eFivMAY^S$TP~nRJ@8kghq*oIb;;c-X zKAi|0d-_aZekFc#IRe=@(EmIeEtip(_oSMj+g&~c4Os1jmeJ;33E>_l|gME;S}$!kH& z%9=+A-$mI=6x-1o5rG2s$0j#>-f4f&Ld4VtC#-IWq&)>x0p6)o=Rj&dXxD4fu?7Vk z4~bog5}3n^$Dv!(Q1`zN+*=-f{X0Fmm&t=9546mv1b!{CY2!Smmm~zXcc#UP|HK*$ zY3Q2PgIN)c^~1DcdlWoDVyV@6e3yOX5?mLBn#WjDeUn)=c-f6`TM*0PIOqR*&F?pm zQ)7J~#oeA`T5#Y#4p2Zdd2&I&4Z6A#mv{>T%Y-_E#P4Ck8X6e6`hYp!0h9`{xj=;X zh^FC5krj#WH&K|ZH!J(n*+^CSk36K|(}AdoA=?Rq+0n9`4a}Y3M}!E!Kvw{)Sz=i#SS)=;mNd;&rzX=2n?EJJ&A(Wg9h8b$a_Ff;NS z%Gw9<(QYY=^x+y_B{Zh$wDB}9yu!932)k{8(*s$p|FamMp+n|83@i zgq*blTDQqe=6g+8PG%2(gY4ssw1=QClG2(kAvnRX3s-N2pWoF{XN?MV%ac84#tonI zJ_=z+yC(Fa{dZ~9F=@&4S+k!o`&izry?Ah$wn_6vu_k~oT8z!lIuphtIkITPC1tI% zF;RKLZ>)^(UJLq2*A2pCuY>gSX{(u5(n&$NPSC+0WR~Q>>XKSiQf{7~ zj#@S$%j=_ofs}rPW1~xkKmVaZJa-Yl9$TUlO3<`guogS z0C$<%Vt!_>T%>^q4_{>H06OJiUg8}w(Z&KJ+86@i2n~-&j|u2{qHS{~PIuVG_KUeg zre{Db0V4SKai|I>?eFWDKn>r8bpD3o^05EuE4*|YK)NF05MLSTmu^UgVF1Qgq}7C7 zK-57lUA%o~*v8ZeJcpbr3T|dZvjIh@FAp{`qTXXo$)Wi7SLu^qQ{^BB>X)t^a`=jL zGX=C?UpL!Q6_knMp82+CneElo)5BB{x)KEhfJxNN`|}xJVQtQy>9Tcjs!9nNH%n~; z$Wu)}UigQfV_~%gF5eTrpj9Lgy`4l14ESyXbvE@@I_?B%(j|qLXU{f|IE6P96vQK8 z+)Dt@5YPYL*!&kt$Yw~(RH{j8XtdJ2;UoRm_jn-AUi7|*SAcSYU)iT8L!OdeX;ZY7 zNYj$;;wgJk4N(Bvny(PC1v`kOVG29)U@=^bNs}gtf}7tHv<4H%x&rD`k~x{Qfgl9` zNwMV3%A!N~*dAw$a9iRW)`5mc+$Rd7D@iU_dCh`6ruz{(pltjcx%}q^ps+2$Z0fbf zZngQ>y3iW}MDKMab@+upV1ZwU+7`d3Cuz}k6ni(4SiEKNG$?MkcpXbtNvpgG#fAuY z@CStbLM#>nAFplRO@ya3Q_TI{1A!O7Cfe!>t-t}9jph5O|A8^S7xA|qlCJ)2wWG|B z->);_ZefScNy5y{+tm_Ux_Q|W5rB`ON{$BW+_c7^d<|TSRMgIH*gI?s^m)+5Zv)D| zJp54@Fx67<*hrzx|B-HCM|!5WJoKP!-`MT=^$SP+IX<-!r?IVVPzyhs{ql#2;PpC9BHqHPopbPaQsP0|2}dHjabG2 z#u;=^^r-U8mhMl>^@S#8TjcXWrza#?$77OFzZZgn)R`K>PNH>St9xYvh0{x;flNAt zz7GitmMX?AmI|7tbU{rb2I`cg97bRS(s>W*^iW(5eE@X8C+F0Sqva7>u!;iyOEN~| zttJuzd%BuKKmt&&@+m|)Hym0R_b~#}d8;t#$jHrv4b`%3T+0-_rKb|`E&ub|bp$nvUsU8 z@g;&aI+Hk+&FsL~P=o{m+M{y-U){;$wI|J;%Bw}-vcT2mC*|CtEi`7<9eLCcVU

>u@;(h7CF1;4GDe5pNbS zw=Uh-Q1vz{)<9c84|=A@e#z z>tYdD+5!C^4_T~|kSAw5hk|yQQ!ur9sO@(}7m=coi-*MgQMc&VIrb%NbF+#J(7G=O zcp+#ZdRZ7Ut4ivUM28Elyv{GZxQLC)N~4p(&}dEP?Y=;Uu1H{~*RMHnooV7Z?;?JE zrR#Va?9y@$IBhA0Em6utxKA#}TO8#sN1b7XA9KBii@9~7D4ptH+r$yYhJyNpTXZ~W zCx)Q>dfH!qWe}?QkpiuUU~hP^n)WY4gc(C3!YOrL9j6mEV=%JsgzH(lP!2!%)-=O8!ULoBMVe+oh6Bj{)*!xfzkybs1oNx zy0q{sWMS@}aJ(k+;lKdi0wsk#s2bWvTE##i9z%5dN4dAXg`5s1nWk$@z2 zfVdeVC|T$AX8gM3J!BdRs%s$#Ui6TLX$bh6hAf6jTfh3uX{L*uMHacnRW68$ILB{4 zDzFkKTh-;>KhFKAHFhQy$s8;FO~1V#H~Y^u!f>4tVmp&2hF=>+{;Gk8%4vTZifMv> ze#lQmWh8w5(NDwl$BuG+IOJfM7F-kmhjsb&JqEBdZC8b)Y)AdX-=FUL|M>rUDI12a zIo@MuVPWCC)t8$yR(|j2{VcSMK$e)or}HT6oV+6Hfh?@d{-|STZ^*sZF7dDbPu?mb zBf{c%Y2h*EpD^;_w8gBfz3n@Exc>cBoV@qBIq5I9j~DyDeObdAbryf0S2oNZ_UG@& zrcm5_EN}n(Z)SgZ@$&}O3+G}m`oBNw|1Ht~TcZE}o+3N -o jsonpath='{.status.url}' | cut -d "/" -f 3) -``` -#### Example: -``` -SERVICE_HOSTNAME=$(kubectl get inferenceservice llm-deploy -o jsonpath='{.status.url}' | cut -d "/" -f 3) -``` - -### Curl request to get inference -In the next step inference can be done on the deployed model. -The following is the template command for inferencing with a json file: -``` -curl -v -H "Host: ${SERVICE_HOSTNAME}" -H "Content-Type: application/json" http://${INGRESS_HOST}:${INGRESS_PORT}/v2/models/{model_name}/infer -d @{input_file_path} -``` -#### Examples: -Curl request for MPT-7B model -``` -curl -v -H "Host: ${SERVICE_HOSTNAME}" -H "Content-Type: application/json" http://${INGRESS_HOST}:${INGRESS_PORT}/v2/models/mpt_7b/infer -d @$WORK_DIR/data/qa/sample_text1.json -``` -Curl request for Falcon-7B model -``` -curl -v -H "Host: ${SERVICE_HOSTNAME}" -H "Content-Type: application/json" http://${INGRESS_HOST}:${INGRESS_PORT}/v2/models/falcon_7b/infer -d @$WORK_DIR/data/summarize/sample_text1.json -``` -Curl request for Llama2-7B model -``` -curl -v -H "Host: ${SERVICE_HOSTNAME}" -H "Content-Type: application/json" http://${INGRESS_HOST}:${INGRESS_PORT}/v2/models/llama2_7b/infer -d @$WORK_DIR/data/translate/sample_text1.json -``` - -### Input data format -Input data should be in **JSON** format. The input should be a '.json' file containing the prompt in the format below: -``` -{ - "id": "42", - "inputs": [ - { - "name": "input0", - "shape": [-1], - "datatype": "BYTES", - "data": ["Capital of India?"] - } - ] -} -``` \ No newline at end of file diff --git a/docs/gpt-in-a-box/kubernetes/v0.2/inference_server.md b/docs/gpt-in-a-box/kubernetes/v0.2/inference_server.md deleted file mode 100644 index 58cb9b06..00000000 --- a/docs/gpt-in-a-box/kubernetes/v0.2/inference_server.md +++ /dev/null @@ -1,43 +0,0 @@ -## Start and run Kubeflow Serving - -Run the following command for starting Kubeflow serving and running inference on the given input: -``` -bash $WORK_DIR/llm/run.sh -n -g -f -m -e [OPTIONAL -d -v ] -``` - -* **n**: Name of a [validated model](validated_models.md) -* **d**: Absolute path of input data folder (Optional) -* **g**: Number of gpus to be used to execute (Set 0 to use cpu) -* **f**: NFS server address with share path information -* **m**: Mount path to your nfs server to be used in the kube PV where model files and model archive file be stored -* **e**: Desired name of the deployment metadata (will be created) -* **v**: Commit ID of model's HuggingFace repository (optional, if not provided default set in model_config will be used) - -Should print "Inference Run Successful" as a message once the Inference Server has successfully started. - -### Examples -The following are example commands to start the Inference Server. - -For 1 GPU Inference with official MPT-7B model and keep inference server alive: -``` -bash $WORK_DIR/llm/run.sh -n mpt_7b -d data/translate -g 1 -e llm-deploy -f '1.1.1.1:/llm' -m /mnt/llm -``` -For 1 GPU Inference with official Falcon-7B model and keep inference server alive: -``` -bash $WORK_DIR/llm/run.sh -n falcon_7b -d data/qa -g 1 -e llm-deploy -f '1.1.1.1:/llm' -m /mnt/llm -``` -For 1 GPU Inference with official Llama2-7B model and keep inference server alive: -``` -bash $WORK_DIR/llm/run.sh -n llama2_7b -d data/summarize -g 1 -e llm-deploy -f '1.1.1.1:/llm' -m /mnt/llm -``` - -### Cleanup Inference deployment - -Run the following command to stop the inference server and unmount PV and PVC. -``` -python3 $WORK_DIR/llm/cleanup.py --deploy_name -``` -Example: -``` -python3 $WORK_DIR/llm/cleanup.py --deploy_name llm-deploy -``` \ No newline at end of file diff --git a/docs/gpt-in-a-box/kubernetes/v0.2/validated_models.md b/docs/gpt-in-a-box/kubernetes/v0.2/validated_models.md deleted file mode 100644 index 0e31b95a..00000000 --- a/docs/gpt-in-a-box/kubernetes/v0.2/validated_models.md +++ /dev/null @@ -1,16 +0,0 @@ -# Validated Models for Kubernetes Version - -GPT-in-a-Box 1.0 has been validated on a curated set of HuggingFace models Information pertaining to these models is stored in the ```llm/model_config.json``` file. - -The Validated Models are : - -| Model Name | HuggingFace Repository ID | -| --- | --- | -| mpt_7b | [mosaicml/mpt_7b](https://huggingface.co/mosaicml/mpt-7b) | -| falcon_7b | [tiiuae/falcon-7b](https://huggingface.co/tiiuae/falcon-7b) | -| llama2_7b | [meta-llama/Llama-2-7b-hf](https://huggingface.co/meta-llama/Llama-2-7b-hf) | -| codellama_7b_python | [codellama/CodeLlama-7b-Python-hf](https://huggingface.co/codellama/CodeLlama-7b-Python-hf) | -| llama2_7b_chat | [meta-llama/Llama-2-7b-chat-hf](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf) | - -!!! note - To start the inference server with any HuggingFace model, refer to [**HuggingFace Model Support**](huggingface_model.md) documentation. \ No newline at end of file diff --git a/docs/gpt-in-a-box/overview.md b/docs/gpt-in-a-box/overview.md deleted file mode 100644 index 8f3d6c2b..00000000 --- a/docs/gpt-in-a-box/overview.md +++ /dev/null @@ -1,11 +0,0 @@ -# Nutanix GPT-in-a-Box 1.0 Documentation - -Welcome to the official home dedicated to documenting how to run Nutanix GPT-in-a-Box 1.0. Nutanix GPT-in-a-Box 1.0 is a turnkey solution that includes everything needed to build AI-ready infrastructure. Here, you'll find information and code to run Nutanix GPT-in-a-Box 1.0 on Virtual Machines or Kubernetes Clusters. - -This new solution includes: - -- Software-defined Nutanix Cloud Platformâ„¢ infrastructure supporting GPU-enabled server nodes for seamless scaling of virtualized compute, storage, and networking supporting both traditional virtual machines and Kubernetes-orchestrated containers -- Files and Objects storage; to fine-tune and run a choice of GPT models -- Open source software to deploy and run AI workloads including PyTorch framework & KubeFlow MLOps platform -- The management interface for enhanced terminal UI or standard CLI -- Support for a curated set of LLMs including Llama2, Falcon and MPT diff --git a/docs/gpt-in-a-box/support.md b/docs/gpt-in-a-box/support.md deleted file mode 100644 index 7ac49a3c..00000000 --- a/docs/gpt-in-a-box/support.md +++ /dev/null @@ -1,14 +0,0 @@ -# Nutanix GPT-in-a-Box 1.0 Support - -Nutanix maintains public GitHub repositories for GPT in a box. Support is handled directly via the repository. Issues and enhancement requests can be submitted in the Issues tab of the relevant repository. Search for and review existing open issues before submitting a new issue. To report a new issue navigate to the GitHub repository: - -[GitHub - nutanix/nai-llm ](https://github.com/nutanix/nai-llm) - -This is the official repository for the virtual machine version of Nutanix GPT-in-a-Box 1.0. - -[GitHub - nutanix/nai-llm-k8s](https://github.com/nutanix/nai-llm-k8s) - -This is the official repository for the Kubernetes version of Nutanix GPT-in-a-Box 1.0. - -The support procedure is documented in [KB 16159](https://portal.nutanix.com/page/documents/kbs/details?targetId=kA0VO0000000dJ70AI). - diff --git a/docs/gpt-in-a-box/vm/v0.2/custom_model.md b/docs/gpt-in-a-box/vm/v0.2/custom_model.md deleted file mode 100644 index 997a4bbe..00000000 --- a/docs/gpt-in-a-box/vm/v0.2/custom_model.md +++ /dev/null @@ -1,29 +0,0 @@ -# Custom Model Support -We provide the capability to generate a MAR file with custom models and start an inference server using it with Torchserve. -!!! note - A model is recognised as a custom model if it's model name is not present in the model_config file. - -## Generate Model Archive File for Custom Models -Run the following command for generating the Model Archive File (MAR) with the Custom Model files : -``` -python3 $WORK_DIR/llm/download.py --no_download [--repo_version --handler ] --model_name --model_path --mar_output -``` -Where the arguments are : - -- **model_name**: Name of custom model -- **repo_version**: Any model version, defaults to "1.0" (optional) -- **model_path**: Absolute path of custom model files (should be a non empty folder) -- **mar_output**: Absolute path of export of MAR file (.mar) -- **no_download**: Flag to skip downloading the model files, must be set for custom models -- **handler**: Path to custom handler, defaults to llm/handler.py (optional) - -## Start Inference Server with Custom Model Archive File -Run the following command to start TorchServe (Inference Server) and run inference on the provided input for custom models: -``` -bash $WORK_DIR/llm/run.sh -n -a [OPTIONAL -d ] -``` -Where the arguments are : - -- **n**: Name of custom model -- **d**: Absolute path of input data folder (optional) -- **a**: Absolute path to the Model Store directory \ No newline at end of file diff --git a/docs/gpt-in-a-box/vm/v0.2/generating_mar.md b/docs/gpt-in-a-box/vm/v0.2/generating_mar.md deleted file mode 100644 index 4aed2fb0..00000000 --- a/docs/gpt-in-a-box/vm/v0.2/generating_mar.md +++ /dev/null @@ -1,38 +0,0 @@ -# Generate PyTorch Model Archive File -We will download the model files and generate a Model Archive file for the desired LLM, which will be used by TorchServe to load the model. Find out more about Torch Model Archiver [here](https://github.com/pytorch/serve/blob/master/model-archiver/README.md). - -Make two new directories, one to store the model files (model_path) and another to store the Model Archive files (mar_output). - -!!! note - The model store directory (i.e, mar_output) can be the same for multiple Model Archive files. But model files directory (i.e, model_path) should be empty if you're downloading the model. - -Run the following command for downloading model files and generating the Model Archive File (MAR) of the desired LLM: -``` -python3 $WORK_DIR/llm/download.py [--no_download --repo_version ] --model_name --model_path --mar_output --hf_token -``` -Where the arguments are : - -- **model_name**: Name of model -- **repo_version**: Commit ID of model's HuggingFace repository (optional, if not provided default set in model_config will be used) -- **model_path**: Absolute path of model files (should be empty if downloading) -- **mar_output**: Absolute path of export of MAR file (.mar) -- **no_download**: Flag to skip downloading the model files -- **hf_token**: Your HuggingFace token. Needed to download and verify LLAMA(2) models. - -The available LLMs are mpt_7b (mosaicml/mpt_7b), falcon_7b (tiiuae/falcon-7b), llama2_7b (meta-llama/Llama-2-7b-hf). - -## Examples -The following are example commands to generate the model archive file. - -Download MPT-7B model files and generate model archive for it: -``` -python3 $WORK_DIR/llm/download.py --model_name mpt_7b --model_path /home/ubuntu/models/mpt_7b/model_files --mar_output /home/ubuntu/models/model_store -``` -Download Falcon-7B model files and generate model archive for it: -``` -python3 $WORK_DIR/llm/download.py --model_name falcon_7b --model_path /home/ubuntu/models/falcon_7b/model_files --mar_output /home/ubuntu/models/model_store -``` -Download Llama2-7B model files and generate model archive for it: -``` -python3 $WORK_DIR/llm/download.py --model_name llama2_7b --model_path /home/ubuntu/models/llama2_7b/model_files --mar_output /home/ubuntu/models/model_store --hf_token -``` \ No newline at end of file diff --git a/docs/gpt-in-a-box/vm/v0.2/getting_started.md b/docs/gpt-in-a-box/vm/v0.2/getting_started.md deleted file mode 100644 index d5ac8d3e..00000000 --- a/docs/gpt-in-a-box/vm/v0.2/getting_started.md +++ /dev/null @@ -1,49 +0,0 @@ -# Getting Started -This is a guide on getting started with GPT-in-a-Box 1.0 deployment on a Virtual Machine. You can find the open source repository for the virtual machine version [here](https://github.com/nutanix/nai-llm). - -Tested Specifications: - -| Specification | Tested Version | -| --- | --- | -| Python | 3.10 | -| Operating System | Ubuntu 20.04 | -| GPU | NVIDIA A100 40G | -| CPU | 8 vCPUs | -| System Memory | 32 GB | - -Follow the steps below to install the necessary prerequisites. - -### Install openjdk, pip3 -Run the following command to install pip3 and openjdk -``` -sudo apt-get install openjdk-17-jdk python3-pip -``` - -### Install NVIDIA Drivers -To install the NVIDIA Drivers, refer to the official [Installation Reference](https://docs.nvidia.com/datacenter/tesla/tesla-installation-notes/index.html#runfile). - -Proceed to downloading the latest [Datacenter NVIDIA drivers](https://www.nvidia.com/download/index.aspx) for your GPU type. - -For NVIDIA A100, Select A100 in Datacenter Tesla for Linux 64 bit with CUDA toolkit 11.7, latest driver is 515.105.01. - -``` -curl -fSsl -O https://us.download.nvidia.com/tesla/515.105.01/NVIDIA-Linux-x86_64-515.105.01.run -sudo sh NVIDIA-Linux-x86_64-515.105.01.run -s -``` -!!! note - We don’t need to install CUDA toolkit separately as it is bundled with PyTorch installation. Just NVIDIA driver installation is enough. - -### Download Nutanix package -Download the **v0.2** release version from the [NAI-LLM Releases](https://github.com/nutanix/nai-llm/releases/tag/v0.2) and untar the release on the node. Set the working directory to the root folder containing the extracted release. - -``` -export WORK_DIR=absolute_path_to_empty_release_directory -mkdir $WORK_DIR -tar -xvf -C $WORK_DIR --strip-components=1 -``` - -### Install required packages -Run the following command to install the required python packages. -``` -pip install -r $WORK_DIR/llm/requirements.txt -``` \ No newline at end of file diff --git a/docs/gpt-in-a-box/vm/v0.2/inference_requests.md b/docs/gpt-in-a-box/vm/v0.2/inference_requests.md deleted file mode 100644 index b69243ab..00000000 --- a/docs/gpt-in-a-box/vm/v0.2/inference_requests.md +++ /dev/null @@ -1,82 +0,0 @@ -# Inference Requests -The Inference Server can be inferenced through the TorchServe Inference API. Find out more about it in the official [TorchServe Inference API](https://pytorch.org/serve/inference_api.html) documentation. - -**Server Configuration** - -| Variable | Value | -| --- | --- | -| inference_server_endpoint | localhost | -| inference_port | 8080 | - -The following are example cURL commands to send inference requests to the Inference Server. - -## Ping Request -To find out the status of a TorchServe server, you can use the ping API that TorchServe supports: -``` -curl http://{inference_server_endpoint}:{inference_port}/ping -``` -### Example -``` -curl http://localhost:8080/ping -``` -!!! note - This only provides information on whether the TorchServe server is running. To check whether a model is successfully registered, use the "List Registered Models" request in the [Management Requests](management_requests.md#list-registered-models) documentation. - -## Inference Requests -The following is the template command for inferencing with a text file: -``` -curl -v -H "Content-Type: application/text" http://{inference_server_endpoint}:{inference_port}/predictions/{model_name} -d @path/to/data.txt -``` - -The following is the template command for inferencing with a json file: -``` -curl -v -H "Content-Type: application/json" http://{inference_server_endpoint}:{inference_port}/predictions/{model_name} -d @path/to/data.json -``` - -Input data files can be found in the `$WORK_DIR/data` folder. - -### Examples - -For MPT-7B model -``` -curl -v -H "Content-Type: application/text" http://localhost:8080/predictions/mpt_7b -d @$WORK_DIR/data/qa/sample_text1.txt -``` -``` -curl -v -H "Content-Type: application/json" http://localhost:8080/predictions/mpt_7b -d @$WORK_DIR/data/qa/sample_text4.json -``` - -For Falcon-7B model -``` -curl -v -H "Content-Type: application/text" http://localhost:8080/predictions/falcon_7b -d @$WORK_DIR/data/summarize/sample_text1.txt -``` -``` -curl -v -H "Content-Type: application/json" http://localhost:8080/predictions/falcon_7b -d @$WORK_DIR/data/summarize/sample_text3.json -``` - -For Llama2-7B model -``` -curl -v -H "Content-Type: application/text" http://localhost:8080/predictions/llama2_7b -d @$WORK_DIR/data/translate/sample_text1.txt -``` -``` -curl -v -H "Content-Type: application/json" http://localhost:8080/predictions/llama2_7b -d @$WORK_DIR/data/translate/sample_text3.json -``` - -### Input data format -Input data can be in either **text** or **JSON** format. - -1. For text format, the input should be a '.txt' file containing the prompt - -2. For JSON format, the input should be a '.json' file containing the prompt in the format below: -``` -{ - "id": "42", - "inputs": [ - { - "name": "input0", - "shape": [-1], - "datatype": "BYTES", - "data": ["Capital of India?"] - } - ] -} -``` \ No newline at end of file diff --git a/docs/gpt-in-a-box/vm/v0.2/inference_server.md b/docs/gpt-in-a-box/vm/v0.2/inference_server.md deleted file mode 100644 index a89a8079..00000000 --- a/docs/gpt-in-a-box/vm/v0.2/inference_server.md +++ /dev/null @@ -1,37 +0,0 @@ -# Deploying Inference Server -Run the following command to start TorchServe (Inference Server) and run inference on the provided input: -``` -bash $WORK_DIR/llm/run.sh -n -a [OPTIONAL -d -v ] -``` -Where the arguments are : - -- **n**: Name of model -- **v**: Commit ID of model's HuggingFace repository (optional, if not provided default set in model_config will be used) -- **d**: Absolute path of input data folder (optional) -- **a**: Absolute path to the Model Store directory - -The available LLMs model names are mpt_7b (mosaicml/mpt_7b), falcon_7b (tiiuae/falcon-7b), llama2_7b (meta-llama/Llama-2-7b-hf). - -Once the Inference Server has successfully started, you should see a "Ready For Inferencing" message. - -### Examples -The following are example commands to start the Inference Server. - -For Inference with official MPT-7B model: -``` -bash $WORK_DIR/llm/run.sh -n mpt_7b -d $WORK_DIR/data/translate -a /home/ubuntu/models/model_store -``` -For Inference with official Falcon-7B model: -``` -bash $WORK_DIR/llm/run.sh -n falcon_7b -d $WORK_DIR/data/qa -a /home/ubuntu/models/model_store -``` -For Inference with official Llama2-7B model: -``` -bash $WORK_DIR/llm/run.sh -n llama2_7b -d $WORK_DIR/data/summarize -a /home/ubuntu/models/model_store -``` - -## Stop Inference Server and Cleanup -Run the following command to stop the Inference Server and clean up temporarily generate files. -``` -python3 $WORK_DIR/llm/cleanup.py -``` \ No newline at end of file diff --git a/docs/gpt-in-a-box/vm/v0.2/management_requests.md b/docs/gpt-in-a-box/vm/v0.2/management_requests.md deleted file mode 100644 index cb9819c6..00000000 --- a/docs/gpt-in-a-box/vm/v0.2/management_requests.md +++ /dev/null @@ -1,133 +0,0 @@ -# Management Requests -The Inference Server can be managed through the TorchServe Management API. Find out more about it in the official [TorchServe Management API](https://pytorch.org/serve/management_api.html) documentation - -**Server Configuration** - -| Variable | Value | -| --- | --- | -| inference_server_endpoint | localhost | -| management_port | 8081 | - -The following are example cURL commands to send management requests to the Inference Server. - -## List Registered Models -To describe all registered models, the template command is: -``` -curl http://{inference_server_endpoint}:{management_port}/models -``` - -### Example -For all registered models -``` -curl http://localhost:8081/models -``` - -## Describe Registered Models -Once a model is loaded on the Inference Server, we can use the following request to describe the model and it's configuration. - -The following is the template command for the same: -``` -curl http://{inference_server_endpoint}:{management_port}/models/{model_name} -``` -Example response of the describe models request: -``` -[ - { - "modelName": "llama2_7b", - "modelVersion": "6fdf2e60f86ff2481f2241aaee459f85b5b0bbb9", - "modelUrl": "llama2_7b_6fdf2e6.mar", - "runtime": "python", - "minWorkers": 1, - "maxWorkers": 1, - "batchSize": 1, - "maxBatchDelay": 200, - "loadedAtStartup": false, - "workers": [ - { - "id": "9000", - "startTime": "2023-11-28T06:39:28.081Z", - "status": "READY", - "memoryUsage": 0, - "pid": 57379, - "gpu": true, - "gpuUsage": "gpuId::0 utilization.gpu [%]::0 % utilization.memory [%]::0 % memory.used [MiB]::13423 MiB" - } - ], - "jobQueueStatus": { - "remainingCapacity": 1000, - "pendingRequests": 0 - } - } -] -``` - -!!! note - From this request, you can validate if a model is ready for inferencing. You can do this by referring to the values under the "workers" -> "status" keys of the response. - -### Examples -For MPT-7B model -``` -curl http://localhost:8081/models/mpt_7b -``` -For Falcon-7B model -``` -curl http://localhost:8081/models/falcon_7b -``` -For Llama2-7B model -``` -curl http://localhost:8081/models/llama2_7b -``` - -## Register Additional Models -TorchServe allows the registering (loading) of multiple models simultaneously. To register multiple models, make sure that the Model Archive Files for the concerned models are stored in the same directory. - -The following is the template command for the same: -``` -curl -X POST "http://{inference_server_endpoint}:{management_port}/models?url={model_archive_file_name}.mar&initial_workers=1&synchronous=true" -``` - -### Examples -For MPT-7B model -``` -curl -X POST "http://localhost:8081/models?url=mpt_7b.mar&initial_workers=1&synchronous=true" -``` -For Falcon-7B model -``` -curl -X POST "http://localhost:8081/models?url=falcon_7b.mar&initial_workers=1&synchronous=true" -``` -For Llama2-7B model -``` -curl -X POST "http://localhost:8081/models?url=llama2_7b.mar&initial_workers=1&synchronous=true" -``` -!!! note - Make sure the Model Archive file name given in the cURL request is correct and is present in the model store directory. - -## Edit Registered Model Configuration -The model can be configured after registration using the Management API of TorchServe. - -The following is the template command for the same: -``` -curl -v -X PUT "http://{inference_server_endpoint}:{management_port}/models/{model_name}?min_workers={number}&max_workers={number}&batch_size={number}&max_batch_delay={delay_in_ms}" -``` - -### Examples -For MPT-7B model -``` -curl -v -X PUT "http://localhost:8081/models/mpt_7b?min_worker=2&max_worker=2" -``` -For Falcon-7B model -``` -curl -v -X PUT "http://localhost:8081/models/falcon_7b?min_worker=2&max_worker=2" -``` -For Llama2-7B model -``` -curl -v -X PUT "http://localhost:8081/models/llama2_7b?min_worker=2&max_worker=2" -``` -!!! note - Make sure to have enough GPU and System Memory before increasing number of workers, else the additional workers will fail to load. - -## Unregister a Model -The following is the template command to unregister a model from the Inference Server: -``` -curl -X DELETE "http://{inference_server_endpoint}:{management_port}/models/{model_name}/{repo_version}" -``` diff --git a/docs/gpt-in-a-box/vm/v0.2/model_version.md b/docs/gpt-in-a-box/vm/v0.2/model_version.md deleted file mode 100644 index 8816593b..00000000 --- a/docs/gpt-in-a-box/vm/v0.2/model_version.md +++ /dev/null @@ -1,8 +0,0 @@ -# Model Version Support -We provide the capability to download and register various commits of the single model from HuggingFace. By specifying the commit ID as "repo_version", you can produce MAR files for multiple iterations of the same model and register them simultaneously. To transition between these versions, you can set a default version within TorchServe while it is running and inference the desired version. - -## Set Default Model Version -If multiple versions of the same model are registered, we can set a particular version as the default for inferencing by running the following command: -``` -curl -v -X PUT "http://{inference_server_endpoint}:{management_port}/{model_name}/{repo_version}/set-default" -``` diff --git a/docs/gpt-in-a-box/vm/v0.3/custom_model.md b/docs/gpt-in-a-box/vm/v0.3/custom_model.md deleted file mode 100644 index f6abf945..00000000 --- a/docs/gpt-in-a-box/vm/v0.3/custom_model.md +++ /dev/null @@ -1,31 +0,0 @@ -# Custom Model Support -In some cases you may want to use a custom model, e.g. a custom fine-tuned model. We provide the capability to generate a MAR file with custom model files and start an inference server using it with Torchserve. - -## Generate Model Archive File for Custom Models - -!!! note - The model archive files should be placed in a directory accessible by the Nutanix package, e.g. /home/ubuntu/models/<custom_model_name>/model_files. This directory will be passed to the --model_path argument. You'll also need to provide the --mar_output path where you want the model archive export to be stored. - -Run the following command for generating the Model Archive File (MAR) with the Custom Model files : -``` -python3 $WORK_DIR/llm/generate.py --skip_download [--repo_version --handler ] --model_name --model_path --mar_output -``` -Where the arguments are : - -- **model_name**: Name of custom model -- **repo_version**: Any model version, defaults to "1.0" (optional) -- **model_path**: Absolute path of custom model files (should be a non empty folder) -- **mar_output**: Absolute path of export of MAR file (.mar) -- **skip_download**: Flag to skip downloading the model files, must be set for custom models -- **handler**: Path to custom handler, defaults to llm/handler.py (optional) - -## Start Inference Server with Custom Model Archive File -Run the following command to start TorchServe (Inference Server) and run inference on the provided input for custom models: -``` -bash $WORK_DIR/llm/run.sh -n -a [OPTIONAL -d ] -``` -Where the arguments are : - -- **n**: Name of custom model -- **d**: Absolute path of input data folder (optional) -- **a**: Absolute path to the Model Store directory \ No newline at end of file diff --git a/docs/gpt-in-a-box/vm/v0.3/generating_mar.md b/docs/gpt-in-a-box/vm/v0.3/generating_mar.md deleted file mode 100644 index a1b6f495..00000000 --- a/docs/gpt-in-a-box/vm/v0.3/generating_mar.md +++ /dev/null @@ -1,36 +0,0 @@ -# Generate PyTorch Model Archive File -We will download the model files and generate a Model Archive file for the desired LLM, which will be used by TorchServe to load the model. Find out more about Torch Model Archiver [here](https://github.com/pytorch/serve/blob/master/model-archiver/README.md). - -Make two new directories, one to store the model files (model_path) and another to store the Model Archive files (mar_output). - -!!! note - The model store directory (i.e, mar_output) can be the same for multiple Model Archive files. But model files directory (i.e, model_path) should be empty if you're downloading the model. - -Run the following command for downloading model files and generating the Model Archive File (MAR) of the desired LLM: -``` -python3 $WORK_DIR/llm/generate.py [--skip_download --repo_version --hf_token ] --model_name --model_path --mar_output -``` -Where the arguments are : - -- **model_name**: Name of a [validated model](validated_models.md) -- **repo_version**: Commit ID of model's HuggingFace repository (optional, if not provided default set in model_config will be used) -- **model_path**: Absolute path of model files (should be empty if downloading) -- **mar_output**: Absolute path of export of MAR file (.mar) -- **skip_download**: Flag to skip downloading the model files -- **hf_token**: Your HuggingFace token. Needed to download and verify LLAMA(2) models. (It can alternatively be set using the environment variable 'HF_TOKEN') - -## Examples -The following are example commands to generate the model archive file. - -Download MPT-7B model files and generate model archive for it: -``` -python3 $WORK_DIR/llm/generate.py --model_name mpt_7b --model_path /home/ubuntu/models/mpt_7b/model_files --mar_output /home/ubuntu/models/model_store -``` -Download Falcon-7B model files and generate model archive for it: -``` -python3 $WORK_DIR/llm/generate.py --model_name falcon_7b --model_path /home/ubuntu/models/falcon_7b/model_files --mar_output /home/ubuntu/models/model_store -``` -Download Llama2-7B model files and generate model archive for it: -``` -python3 $WORK_DIR/llm/generate.py --model_name llama2_7b --model_path /home/ubuntu/models/llama2_7b/model_files --mar_output /home/ubuntu/models/model_store --hf_token -``` \ No newline at end of file diff --git a/docs/gpt-in-a-box/vm/v0.3/getting_started.md b/docs/gpt-in-a-box/vm/v0.3/getting_started.md deleted file mode 100644 index 2603c5fe..00000000 --- a/docs/gpt-in-a-box/vm/v0.3/getting_started.md +++ /dev/null @@ -1,49 +0,0 @@ -# Getting Started -This is a guide on getting started with GPT-in-a-Box 1.0 deployment on a Virtual Machine. You can find the open source repository for the virtual machine version [here](https://github.com/nutanix/nai-llm). - -Tested Specifications: - -| Specification | Tested Version | -| --- | --- | -| Python | 3.10 | -| Operating System | Ubuntu 20.04 | -| GPU | NVIDIA A100 40G | -| CPU | 8 vCPUs | -| System Memory | 32 GB | - -Follow the steps below to install the necessary prerequisites. - -### Install openjdk, pip3 -Run the following command to install pip3 and openjdk -``` -sudo apt-get install openjdk-17-jdk python3-pip -``` - -### Install NVIDIA Drivers -To install the NVIDIA Drivers, refer to the official [Installation Reference](https://docs.nvidia.com/datacenter/tesla/tesla-installation-notes/index.html#runfile). - -Proceed to downloading the latest [Datacenter NVIDIA drivers](https://www.nvidia.com/download/index.aspx) for your GPU type. - -For NVIDIA A100, Select A100 in Datacenter Tesla for Linux 64 bit with CUDA toolkit 11.7, latest driver is 515.105.01. - -``` -curl -fSsl -O https://us.download.nvidia.com/tesla/515.105.01/NVIDIA-Linux-x86_64-515.105.01.run -sudo sh NVIDIA-Linux-x86_64-515.105.01.run -s -``` -!!! note - There is no need to install CUDA toolkit separately as it is bundled with PyTorch installation. The NVIDIA driver installation is sufficient. - -### Download Nutanix package -Download the **v0.3** release version from the [NAI-LLM Releases](https://github.com/nutanix/nai-llm/releases/tag/v0.3) and untar the release on the node. Set the working directory to the root folder containing the extracted release. - -``` -export WORK_DIR=absolute_path_to_empty_release_directory -mkdir $WORK_DIR -tar -xvf -C $WORK_DIR --strip-components=1 -``` - -### Install required packages -Run the following command to install the required python packages. -``` -pip install -r $WORK_DIR/llm/requirements.txt -``` diff --git a/docs/gpt-in-a-box/vm/v0.3/huggingface_model.md b/docs/gpt-in-a-box/vm/v0.3/huggingface_model.md deleted file mode 100644 index 6abf2836..00000000 --- a/docs/gpt-in-a-box/vm/v0.3/huggingface_model.md +++ /dev/null @@ -1,45 +0,0 @@ -# HuggingFace Model Support -!!! Note - To start the inference server for the [**Validated Models**](validated_models.md), refer to the [**Deploying Inference Server**](inference_server.md) documentation. - -We provide the capability to download model files from any HuggingFace repository and generate a MAR file to start an inference server using it with Torchserve. - -To start the Inference Server for any other HuggingFace model, follow the steps below. - -## Generate Model Archive File for HuggingFace Models -Run the following command for downloading and generating the Model Archive File (MAR) with the HuggingFace Model files : -``` -python3 $WORK_DIR/llm/generate.py [--hf_token --repo_version --handler ] --model_name --repo_id --model_path --mar_output -``` -Where the arguments are : - -- **model_name**: Name of HuggingFace model -- **repo_id**: HuggingFace Repository ID of the model -- **repo_version**: Commit ID of model's HuggingFace repository, defaults to latest HuggingFace commit ID (optional) -- **model_path**: Absolute path of model files (should be an empty folder) -- **mar_output**: Absolute path of export of MAR file (.mar) -- **handler**: Path to custom handler, defaults to llm/handler.py (optional) -- **hf_token**: Your HuggingFace token. Needed to download and verify LLAMA(2) models. - -### Example -Download model files and generate model archive for codellama/CodeLlama-7b-hf: -``` -python3 $WORK_DIR/llm/generate.py --model_name codellama_7b_hf --repo_id codellama/CodeLlama-7b-hf --model_path /models/codellama_7b_hf/model_files --mar_output /models/model_store -``` - -## Start Inference Server with HuggingFace Model -Run the following command to start TorchServe (Inference Server) and run inference on the provided input for HuggingFace models: -``` -bash $WORK_DIR/llm/run.sh -n -a [OPTIONAL -d ] -``` -Where the arguments are : - -- **n**: Name of HuggingFace model -- **d**: Absolute path of input data folder (optional) -- **a**: Absolute path to the Model Store directory - -### Example -To start Inference Server with codellama/CodeLlama-7b-hf: -``` -bash $WORK_DIR/llm/run.sh -n codellama_7b_hf -a /models/model_store -d $WORK_DIR/data/summarize -``` diff --git a/docs/gpt-in-a-box/vm/v0.3/inference_requests.md b/docs/gpt-in-a-box/vm/v0.3/inference_requests.md deleted file mode 100644 index 22c6905d..00000000 --- a/docs/gpt-in-a-box/vm/v0.3/inference_requests.md +++ /dev/null @@ -1,82 +0,0 @@ -# Inference Requests -The Inference Server can be inferenced through the TorchServe Inference API. Find out more about it in the official [TorchServe Inference API](https://pytorch.org/serve/inference_api.html) documentation. - -**Server Configuration** - -| Variable | Value | -| --- | --- | -| inference_server_endpoint | localhost | -| inference_port | 8080 | - -The following are example cURL commands to send inference requests to the Inference Server. - -## Ping Request -To find out the status of a TorchServe server, you can use the ping API that TorchServe supports: -``` -curl http://{inference_server_endpoint}:{inference_port}/ping -``` -### Example -``` -curl http://localhost:8080/ping -``` -!!! note - This only provides information on whether the TorchServe server is running. To check whether a model is successfully registered on TorchServe, you can [**list all models**](management_requests.md#list-registered-models) and [**describe a registered model**](management_requests.md#describe-registered-models). - -## Inference Requests -The following is the template command for inferencing with a text file: -``` -curl -v -H "Content-Type: application/text" http://{inference_server_endpoint}:{inference_port}/predictions/{model_name} -d @path/to/data.txt -``` - -The following is the template command for inferencing with a json file: -``` -curl -v -H "Content-Type: application/json" http://{inference_server_endpoint}:{inference_port}/predictions/{model_name} -d @path/to/data.json -``` - -Input data files can be found in the `$WORK_DIR/data` folder. - -### Examples - -For MPT-7B model -``` -curl -v -H "Content-Type: application/text" http://localhost:8080/predictions/mpt_7b -d @$WORK_DIR/data/qa/sample_text1.txt -``` -``` -curl -v -H "Content-Type: application/json" http://localhost:8080/predictions/mpt_7b -d @$WORK_DIR/data/qa/sample_text4.json -``` - -For Falcon-7B model -``` -curl -v -H "Content-Type: application/text" http://localhost:8080/predictions/falcon_7b -d @$WORK_DIR/data/summarize/sample_text1.txt -``` -``` -curl -v -H "Content-Type: application/json" http://localhost:8080/predictions/falcon_7b -d @$WORK_DIR/data/summarize/sample_text3.json -``` - -For Llama2-7B model -``` -curl -v -H "Content-Type: application/text" http://localhost:8080/predictions/llama2_7b -d @$WORK_DIR/data/translate/sample_text1.txt -``` -``` -curl -v -H "Content-Type: application/json" http://localhost:8080/predictions/llama2_7b -d @$WORK_DIR/data/translate/sample_text3.json -``` - -### Input data format -Input data can be in either **text** or **JSON** format. - -1. For text format, the input should be a '.txt' file containing the prompt - -2. For JSON format, the input should be a '.json' file containing the prompt in the format below: -``` -{ - "id": "42", - "inputs": [ - { - "name": "input0", - "shape": [-1], - "datatype": "BYTES", - "data": ["Capital of India?"] - } - ] -} -``` \ No newline at end of file diff --git a/docs/gpt-in-a-box/vm/v0.3/inference_server.md b/docs/gpt-in-a-box/vm/v0.3/inference_server.md deleted file mode 100644 index 4a899d9a..00000000 --- a/docs/gpt-in-a-box/vm/v0.3/inference_server.md +++ /dev/null @@ -1,36 +0,0 @@ -# Deploying Inference Server - -Run the following command to start TorchServe (Inference Server) and run inference on the provided input: -``` -bash $WORK_DIR/llm/run.sh -n -a [OPTIONAL -d -v ] -``` -Where the arguments are : - -- **n**: Name of a [validated model](validated_models.md) -- **v**: Commit ID of model's HuggingFace repository (optional, if not provided default set in model_config will be used) -- **d**: Absolute path of input data folder (optional) -- **a**: Absolute path to the Model Store directory - -Once the Inference Server has successfully started, you should see a "Ready For Inferencing" message. - -### Examples -The following are example commands to start the Inference Server. - -For Inference with official MPT-7B model: -``` -bash $WORK_DIR/llm/run.sh -n mpt_7b -d $WORK_DIR/data/translate -a /home/ubuntu/models/model_store -``` -For Inference with official Falcon-7B model: -``` -bash $WORK_DIR/llm/run.sh -n falcon_7b -d $WORK_DIR/data/qa -a /home/ubuntu/models/model_store -``` -For Inference with official Llama2-7B model: -``` -bash $WORK_DIR/llm/run.sh -n llama2_7b -d $WORK_DIR/data/summarize -a /home/ubuntu/models/model_store -``` - -## Stop Inference Server and Cleanup -Run the following command to stop the Inference Server and clean up temporarily generate files. -``` -python3 $WORK_DIR/llm/cleanup.py -``` \ No newline at end of file diff --git a/docs/gpt-in-a-box/vm/v0.3/management_requests.md b/docs/gpt-in-a-box/vm/v0.3/management_requests.md deleted file mode 100644 index cb9819c6..00000000 --- a/docs/gpt-in-a-box/vm/v0.3/management_requests.md +++ /dev/null @@ -1,133 +0,0 @@ -# Management Requests -The Inference Server can be managed through the TorchServe Management API. Find out more about it in the official [TorchServe Management API](https://pytorch.org/serve/management_api.html) documentation - -**Server Configuration** - -| Variable | Value | -| --- | --- | -| inference_server_endpoint | localhost | -| management_port | 8081 | - -The following are example cURL commands to send management requests to the Inference Server. - -## List Registered Models -To describe all registered models, the template command is: -``` -curl http://{inference_server_endpoint}:{management_port}/models -``` - -### Example -For all registered models -``` -curl http://localhost:8081/models -``` - -## Describe Registered Models -Once a model is loaded on the Inference Server, we can use the following request to describe the model and it's configuration. - -The following is the template command for the same: -``` -curl http://{inference_server_endpoint}:{management_port}/models/{model_name} -``` -Example response of the describe models request: -``` -[ - { - "modelName": "llama2_7b", - "modelVersion": "6fdf2e60f86ff2481f2241aaee459f85b5b0bbb9", - "modelUrl": "llama2_7b_6fdf2e6.mar", - "runtime": "python", - "minWorkers": 1, - "maxWorkers": 1, - "batchSize": 1, - "maxBatchDelay": 200, - "loadedAtStartup": false, - "workers": [ - { - "id": "9000", - "startTime": "2023-11-28T06:39:28.081Z", - "status": "READY", - "memoryUsage": 0, - "pid": 57379, - "gpu": true, - "gpuUsage": "gpuId::0 utilization.gpu [%]::0 % utilization.memory [%]::0 % memory.used [MiB]::13423 MiB" - } - ], - "jobQueueStatus": { - "remainingCapacity": 1000, - "pendingRequests": 0 - } - } -] -``` - -!!! note - From this request, you can validate if a model is ready for inferencing. You can do this by referring to the values under the "workers" -> "status" keys of the response. - -### Examples -For MPT-7B model -``` -curl http://localhost:8081/models/mpt_7b -``` -For Falcon-7B model -``` -curl http://localhost:8081/models/falcon_7b -``` -For Llama2-7B model -``` -curl http://localhost:8081/models/llama2_7b -``` - -## Register Additional Models -TorchServe allows the registering (loading) of multiple models simultaneously. To register multiple models, make sure that the Model Archive Files for the concerned models are stored in the same directory. - -The following is the template command for the same: -``` -curl -X POST "http://{inference_server_endpoint}:{management_port}/models?url={model_archive_file_name}.mar&initial_workers=1&synchronous=true" -``` - -### Examples -For MPT-7B model -``` -curl -X POST "http://localhost:8081/models?url=mpt_7b.mar&initial_workers=1&synchronous=true" -``` -For Falcon-7B model -``` -curl -X POST "http://localhost:8081/models?url=falcon_7b.mar&initial_workers=1&synchronous=true" -``` -For Llama2-7B model -``` -curl -X POST "http://localhost:8081/models?url=llama2_7b.mar&initial_workers=1&synchronous=true" -``` -!!! note - Make sure the Model Archive file name given in the cURL request is correct and is present in the model store directory. - -## Edit Registered Model Configuration -The model can be configured after registration using the Management API of TorchServe. - -The following is the template command for the same: -``` -curl -v -X PUT "http://{inference_server_endpoint}:{management_port}/models/{model_name}?min_workers={number}&max_workers={number}&batch_size={number}&max_batch_delay={delay_in_ms}" -``` - -### Examples -For MPT-7B model -``` -curl -v -X PUT "http://localhost:8081/models/mpt_7b?min_worker=2&max_worker=2" -``` -For Falcon-7B model -``` -curl -v -X PUT "http://localhost:8081/models/falcon_7b?min_worker=2&max_worker=2" -``` -For Llama2-7B model -``` -curl -v -X PUT "http://localhost:8081/models/llama2_7b?min_worker=2&max_worker=2" -``` -!!! note - Make sure to have enough GPU and System Memory before increasing number of workers, else the additional workers will fail to load. - -## Unregister a Model -The following is the template command to unregister a model from the Inference Server: -``` -curl -X DELETE "http://{inference_server_endpoint}:{management_port}/models/{model_name}/{repo_version}" -``` diff --git a/docs/gpt-in-a-box/vm/v0.3/model_version.md b/docs/gpt-in-a-box/vm/v0.3/model_version.md deleted file mode 100644 index 647199ca..00000000 --- a/docs/gpt-in-a-box/vm/v0.3/model_version.md +++ /dev/null @@ -1,12 +0,0 @@ -# Model Version Support -We provide the capability to download and register various commits of the single model from HuggingFace. Follow the steps below for the same : - -- [Generate MAR files](generating_mar.md) for the required HuggingFace commits by passing it's commit ID in the "--repo_version" argument -- [Deploy TorchServe](inference_server.md) with any one of the versions passed through the "--repo_version" argument -- Register the rest of the required versions through the [register additional models](management_requests.md#register-additional-models) request. - -## Set Default Model Version -If multiple versions of the same model are registered, we can set a particular version as the default for inferencing by running the following command: -``` -curl -v -X PUT "http://{inference_server_endpoint}:{management_port}/{model_name}/{repo_version}/set-default" -``` diff --git a/docs/gpt-in-a-box/vm/v0.3/validated_models.md b/docs/gpt-in-a-box/vm/v0.3/validated_models.md deleted file mode 100644 index 0f4aebd0..00000000 --- a/docs/gpt-in-a-box/vm/v0.3/validated_models.md +++ /dev/null @@ -1,16 +0,0 @@ -# Validated Models for Virtual Machine Version - -GPT-in-a-Box 1.0 has been validated on a curated set of HuggingFace models. Information pertaining to these models is stored in the ```llm/model_config.json``` file. - -The Validated Models are : - -| Model Name | HuggingFace Repository ID | -| --- | --- | -| mpt_7b | [mosaicml/mpt_7b](https://huggingface.co/mosaicml/mpt-7b) | -| falcon_7b | [tiiuae/falcon-7b](https://huggingface.co/tiiuae/falcon-7b) | -| llama2_7b | [meta-llama/Llama-2-7b-hf](https://huggingface.co/meta-llama/Llama-2-7b-hf) | -| codellama_7b_python | [codellama/CodeLlama-7b-Python-hf](https://huggingface.co/codellama/CodeLlama-7b-Python-hf) | -| llama2_7b_chat | [meta-llama/Llama-2-7b-chat-hf](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf) | - -!!! note - To start the inference server with any HuggingFace model, refer to [**HuggingFace Model Support**](huggingface_model.md) documentation. \ No newline at end of file diff --git a/mkdocs.yml b/mkdocs.yml index 6f94729c..1bc9ac0f 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -187,43 +187,6 @@ nav: - "Manual": "anthos/install/manual/index.md" - "Amazon EKS Anywhere": - "Install": "eksa/install/index.md" - - "GPT-in-a-Box 1.0": - - "Overview": "gpt-in-a-box/overview.md" - - "Deploy on Virtual Machine": - - "v0.3": - - "Getting Started": "gpt-in-a-box/vm/v0.3/getting_started.md" - - "Validated Models": "gpt-in-a-box/vm/v0.3/validated_models.md" - - "Generating Model Archive File": "gpt-in-a-box/vm/v0.3/generating_mar.md" - - "Deploying Inference Server": "gpt-in-a-box/vm/v0.3/inference_server.md" - - "Inference Requests": "gpt-in-a-box/vm/v0.3/inference_requests.md" - - "Model Version Support": "gpt-in-a-box/vm/v0.3/model_version.md" - - "HuggingFace Model Support": "gpt-in-a-box/vm/v0.3/huggingface_model.md" - - "Custom Model Support": "gpt-in-a-box/vm/v0.3/custom_model.md" - - "Management Requests": "gpt-in-a-box/vm/v0.3/management_requests.md" - - "v0.2": - - "Getting Started": "gpt-in-a-box/vm/v0.2/getting_started.md" - - "Generating Model Archive File": "gpt-in-a-box/vm/v0.2/generating_mar.md" - - "Deploying Inference Server": "gpt-in-a-box/vm/v0.2/inference_server.md" - - "Inference Requests": "gpt-in-a-box/vm/v0.2/inference_requests.md" - - "Model Version Support": "gpt-in-a-box/vm/v0.2/model_version.md" - - "Custom Model Support": "gpt-in-a-box/vm/v0.2/custom_model.md" - - "Management Requests": "gpt-in-a-box/vm/v0.2/management_requests.md" - - "Deploy on Kubernetes": - - "v0.2": - - "Getting Started": "gpt-in-a-box/kubernetes/v0.2/getting_started.md" - - "Validated Models": "gpt-in-a-box/kubernetes/v0.2/validated_models.md" - - "Generating Model Archive File": "gpt-in-a-box/kubernetes/v0.2/generating_mar.md" - - "Deploying Inference Server": "gpt-in-a-box/kubernetes/v0.2/inference_server.md" - - "Inference Requests": "gpt-in-a-box/kubernetes/v0.2/inference_requests.md" - - "HuggingFace Model Support": "gpt-in-a-box/kubernetes/v0.2/huggingface_model.md" - - "Custom Model Support": "gpt-in-a-box/kubernetes/v0.2/custom_model.md" - - "v0.1": - - "Getting Started": "gpt-in-a-box/kubernetes/v0.1/getting_started.md" - - "Generating Model Archive File": "gpt-in-a-box/kubernetes/v0.1/generating_mar.md" - - "Deploying Inference Server": "gpt-in-a-box/kubernetes/v0.1/inference_server.md" - - "Inference Requests": "gpt-in-a-box/kubernetes/v0.1/inference_requests.md" - - "Custom Model Support": "gpt-in-a-box/kubernetes/v0.1/custom_model.md" - - "Support": "gpt-in-a-box/support.md" - "Guides": - "Cloud Native": - "Red Hat OpenShift": @@ -238,7 +201,7 @@ markdown_extensions: - tables - toc: permalink: true -copyright: Copyright © 2021 - 2023 Nutanix, Inc. +copyright: Copyright © 2021 - 2024 Nutanix, Inc. extra: generator: false repo_url: https://github.com/nutanix-cloud-native/opendocs From 90fc55314a24293374f7fd94ec8600577aa03b00 Mon Sep 17 00:00:00 2001 From: Greta Aquilina Date: Wed, 27 Nov 2024 16:47:59 +0100 Subject: [PATCH 05/15] fixed: Yaml indentation in example (#68) Incorrect Yaml indentation of the storageType parameter in the "Option B" example, causing the parameter to be misinterpreted. This change has been successfully tested on Red Hat Openshift Container Platform. --- docs/openshift/post-install/index.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/openshift/post-install/index.md b/docs/openshift/post-install/index.md index b8e55f96..59bc1df4 100644 --- a/docs/openshift/post-install/index.md +++ b/docs/openshift/post-install/index.md @@ -97,7 +97,7 @@ Based on requirements, choose one of the following options: #nfsServerName above is File Server Name in Prism without DNS suffix, not the FQDN. csi.storage.k8s.io/provisioner-secret-name: ntnx-secret csi.storage.k8s.io/provisioner-secret-namespace: openshift-cluster-csi-drivers - storageType: NutanixFiles + storageType: NutanixFiles 2. Create a PVC yaml file like the below example and apply in the openshift-image-registry namespace (`oc -n openshift-image-registry apply -f `). From 4bc9d78ccf21fe53930a980ae8963484f99d028c Mon Sep 17 00:00:00 2001 From: rohithkumar-nutanix Date: Mon, 21 Jul 2025 15:38:15 +0530 Subject: [PATCH 06/15] feat: docs(ccm) update v0.4.x and v0.5.x feature documentation based on changelog (#69) - Add security and performance enhancements documentation for v0.4.x - Update symbolic link to point latest -> v0.5.x - Add v0.4.x and v0.5.x sections to mkdocs navigation --- docs/ccm/latest | 2 +- docs/ccm/v0.4.x/ccm_configuration.md | 64 +++++++++++++ docs/ccm/v0.4.x/ccm_credentials.md | 29 ++++++ docs/ccm/v0.4.x/custom_labeling.md | 14 +++ docs/ccm/v0.4.x/overview.md | 22 +++++ docs/ccm/v0.4.x/pc_certificates.md | 104 +++++++++++++++++++++ docs/ccm/v0.4.x/requirements.md | 33 +++++++ docs/ccm/v0.4.x/topology_discovery.md | 124 ++++++++++++++++++++++++++ docs/ccm/v0.5.x/ccm_configuration.md | 64 +++++++++++++ docs/ccm/v0.5.x/ccm_credentials.md | 29 ++++++ docs/ccm/v0.5.x/custom_labeling.md | 14 +++ docs/ccm/v0.5.x/overview.md | 24 +++++ docs/ccm/v0.5.x/pc_certificates.md | 104 +++++++++++++++++++++ docs/ccm/v0.5.x/requirements.md | 33 +++++++ docs/ccm/v0.5.x/topology_discovery.md | 124 ++++++++++++++++++++++++++ mkdocs.yml | 18 +++- 16 files changed, 800 insertions(+), 2 deletions(-) create mode 100644 docs/ccm/v0.4.x/ccm_configuration.md create mode 100644 docs/ccm/v0.4.x/ccm_credentials.md create mode 100644 docs/ccm/v0.4.x/custom_labeling.md create mode 100644 docs/ccm/v0.4.x/overview.md create mode 100644 docs/ccm/v0.4.x/pc_certificates.md create mode 100644 docs/ccm/v0.4.x/requirements.md create mode 100644 docs/ccm/v0.4.x/topology_discovery.md create mode 100644 docs/ccm/v0.5.x/ccm_configuration.md create mode 100644 docs/ccm/v0.5.x/ccm_credentials.md create mode 100644 docs/ccm/v0.5.x/custom_labeling.md create mode 100644 docs/ccm/v0.5.x/overview.md create mode 100644 docs/ccm/v0.5.x/pc_certificates.md create mode 100644 docs/ccm/v0.5.x/requirements.md create mode 100644 docs/ccm/v0.5.x/topology_discovery.md diff --git a/docs/ccm/latest b/docs/ccm/latest index 7157a9c5..88fe387d 120000 --- a/docs/ccm/latest +++ b/docs/ccm/latest @@ -1 +1 @@ -v0.3.x \ No newline at end of file +v0.5.x \ No newline at end of file diff --git a/docs/ccm/v0.4.x/ccm_configuration.md b/docs/ccm/v0.4.x/ccm_configuration.md new file mode 100644 index 00000000..c7b7d2b7 --- /dev/null +++ b/docs/ccm/v0.4.x/ccm_configuration.md @@ -0,0 +1,64 @@ +# Nutanix CCM Configuration + +Nutanix CCM can be configured via a `JSON` formated file stored in a configmap called `nutanix-config`. This configmap is located in the same namespace as the Nutanix CCM deployment. See the `manifests/cloud-provider-nutanix-deployment.yaml` file for details on the Nutanix CCM deployment. + +Example `nutanix-config` configmap: +```YAML +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: nutanix-config + namespace: kube-system +data: + nutanix_config.json: |- + { + "prismCentral": { + "address": "${NUTANIX_ENDPOINT}", + "port": ${NUTANIX_PORT}, + "insecure": ${NUTANIX_INSECURE}, + "credentialRef": { + "kind": "secret", + "name": "nutanix-creds" + }, + "additionalTrustBundle": { + "kind": "ConfigMap", + "name": "user-ca-bundle" + } + }, + "enableCustomLabeling": false, + "topologyDiscovery": { + "type": "Categories", + "topologyCategories": { + "regionCategory": "${NUTANIX_REGION_CATEGORY}", + "zoneCategory": "${NUTANIX_ZONE_CATEGORY}" + } + } + } + +``` + +The table below provides an overview of the supported configuration parameters. + +### Configuration parameters + +| Key |Type |Description | +|---------------------------------------------------|------|------------------------------------------------------------------------------------------------------------------------------------------------------| +|topologyDiscovery |object|(Optional) Configures the topology discovery mode.
`Prism` topology discovery is used by default if `topologyDiscovery` attribute is not passed. | +|topologyDiscovery.type |string|Topology Discovery mode. Can be `Prism` or `Categories`. See [Topology Discovery](./topology_discovery.md) for more information. | +|topologyDiscovery.topologyCategories |object|Required if topology discovery mode is `Categories`.
| +|topologyDiscovery.topologyCategories.regionCategory|string|Category key defining the region of the Kubernetes node. | +|topologyDiscovery.topologyCategories.zoneCategory |string|Category key defining the zone of the Kubernetes node. | +|enableCustomLabeling |bool |Boolean value to enable custom labeling. See [Custom Labeling](./custom_labeling.md) for more information.
Default: `false` | +|prismCentral |object|Prism Central endpoint configuration. | +|prismCentral.address |string|FQDN/IP of the Prism Central endpoint. | +|prismCentral.port |int |Port to connect to Prism Central.
Default: `9440` | +|prismCentral.insecure |bool |Disable Prism Central certificate checking.
Default: `false` | +|prismCentral.credentialRef |object|Prism Central credential configuration. See [Credentials](./ccm_credentials.md) for more information. | +|prismCentral.credentialRef.kind |string|Credential kind.
Allowed value: `secret` | +|prismCentral.credentialRef.name |string|Name of the secret. | +|prismCentral.credentialRef.namespace |string|(Optional) Namespace of the secret. | +|prismCentral.additionalTrustBundle |object|Reference to the certificate trust bundle used for Prism Central connection. | +|prismCentral.additionalTrustBundle.kind |string|Kind of the additionalTrustBundle. Allowed value: `ConfigMap` | +|prismCentral.additionalTrustBundle.name |string|Name of the `ConfigMap` containing the Prism Central trust bundle. | +|prismCentral.additionalTrustBundle.namespace |string|(Optional) Namespace of the `ConfigMap` containing the Prism Central trust bundle. See [Certificate Trust](./pc_certificates.md) for more information.| \ No newline at end of file diff --git a/docs/ccm/v0.4.x/ccm_credentials.md b/docs/ccm/v0.4.x/ccm_credentials.md new file mode 100644 index 00000000..7bda06e2 --- /dev/null +++ b/docs/ccm/v0.4.x/ccm_credentials.md @@ -0,0 +1,29 @@ +# Credentials + +Nutanix CCM requires credentials to connect to Prism Central. These credentials need to be stored in a secret in following format: + +```YAML +--- +apiVersion: v1 +kind: Secret +metadata: + name: nutanix-creds + namespace: kube-system +stringData: + credentials: | + [ + { + "type": "basic_auth", + "data": { + "prismCentral":{ + "username": "$NUTANIX_USERNAME", + "password": "$NUTANIX_PASSWORD" + }, + "prismElements": null + } + } + ] + +``` + +See [Requirements](./requirements.md) for more information on the required permissions. \ No newline at end of file diff --git a/docs/ccm/v0.4.x/custom_labeling.md b/docs/ccm/v0.4.x/custom_labeling.md new file mode 100644 index 00000000..4db89462 --- /dev/null +++ b/docs/ccm/v0.4.x/custom_labeling.md @@ -0,0 +1,14 @@ +# Custom Labeling + +Enabling the Nutanix CCM custom labeling feature will add additional labels to the Kubernetes nodes. See [Nutanix CCM Configuration](./ccm_configuration.md) for more information on how to configure CCM to enable custom labeling. + +The following labels will be added: + +|Label |Description | +|------------------------------|-----------------------------------------------------------------| +|nutanix.com/prism-element-uuid|UUID of the Prism Element cluster hosting the Kubernetes node VM.| +|nutanix.com/prism-element-name|Name of the Prism Element cluster hosting the Kubernetes node VM.| +|nutanix.com/prism-host-uuid |UUID of the Prism AHV host hosting the Kubernetes node VM. | +|nutanix.com/prism-host-name |Name of the Prism AHV host hosting the Kubernetes node VM. | + +Nutanix CCM will reconcile the labels periodically. \ No newline at end of file diff --git a/docs/ccm/v0.4.x/overview.md b/docs/ccm/v0.4.x/overview.md new file mode 100644 index 00000000..02a426c7 --- /dev/null +++ b/docs/ccm/v0.4.x/overview.md @@ -0,0 +1,22 @@ +# Overview + +Nutanix CCM provides Cloud Controller Manager functionality to Kubernetes clusters running on the Nutanix AHV hypervisor. Visit the [Kubernetes Cloud Controller Manager](https://kubernetes.io/docs/concepts/architecture/cloud-controller/) documentation for more information about the general design of a Kubernetes CCM. + +Nutanix CCM communicates with Prism Central (CCM) to fetch all required information. See the [Requirements](./requirements.md) page for more details. + +## Nutanix CCM functionality + +|Version|Node Controller|Route Controller|Service Controller| +|-------|---------------|----------------|------------------| +|v0.4.x |Yes |No |No | +|v0.3.x |Yes |No |No | +|v0.2.x |Yes |No |No | + + +Nutanix CCM specific features: + +|Version|[Topology Discovery](./topology_discovery.md)|[Custom Labeling](./custom_labeling.md)| +|-------|---------------------------------------------|---------------------------------------| +|v0.4.x |Prism, Categories |Yes | +|v0.3.x |Prism, Categories |Yes | +|v0.2.x |Prism, Categories |Yes | \ No newline at end of file diff --git a/docs/ccm/v0.4.x/pc_certificates.md b/docs/ccm/v0.4.x/pc_certificates.md new file mode 100644 index 00000000..be9071bf --- /dev/null +++ b/docs/ccm/v0.4.x/pc_certificates.md @@ -0,0 +1,104 @@ +# Certificate Trust + +CCM invokes Prism Central APIs using the HTTPS protocol. CCM has different methods to handle the trust of the Prism Central certificates: + +- Enable certificate verification (default) +- Configure an additional trust bundle +- Disable certificate verification + +See the respective sections below for more information. + +## Enable certificate verification (default) +By default CCM will perform certificate verification when invoking Prism Central API calls. This requires Prism Central to be configured with a publicly trusted certificate authority. +No additional configuration is required in CCM. + +## Configure an additional trust bundle +CCM allows users to configure an additional trust bundle. This will allow CCM to verify certificates that are not issued by a publicy trusted certificate authority. + +To configure an additional trust bundle, see the [Configuring the additional trust bundle](#configuring-the-additional-trust-bundle) section for more information. + + +### Configuring the additional trust bundle + +To configure the additional trust bundle it is required to: + +- Create a `ConfigMap` containing the additional trust bundle +- Configure the `prismCentral.additionalTrustBundle` object in the CCM `ConfigMap` called `nutanix-config`. + +#### Creating the additional trust bundle ConfigMap + +CCM supports two different formats for the `ConfigMap` containing the additional trust bundle. The first one is to add the additional trust bundle as a multi-line string in the `ConfigMap`, the second option is to add the trust bundle in `base64` encoded format. See the examples below. + +Multi-line string example: +```YAML +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: user-ca-bundle + namespace: ${NAMESPACE} +data: + ca.crt: | + -----BEGIN CERTIFICATE----- + + -----END CERTIFICATE----- + -----BEGIN CERTIFICATE----- + + -----END CERTIFICATE----- +``` + +`base64` example: + +```YAML +apiVersion: v1 +kind: ConfigMap +metadata: + name: user-ca-bundle + namespace: ${NAMESPACE} +binaryData: + ca.crt: +``` + +!!! note + The `base64` string needs to be added as `binaryData`. + + +#### Configuring the CCM for an additional trust bundle + +When the additional trust bundle `ConfigMap` is created, it needs to be referenced in the `nutanix-config` `ConfigMap`. Add the `prismCentral.additionalTrustBundle` object as shown below. Make sure the correct additional trust bundle `ConfigMap` is referenced. + +```JSON + ... + "prismCentral": { + ... + "additionalTrustBundle": { + "kind": "ConfigMap", + "name": "user-ca-bundle" + } + }, + ... +``` + +!!! note + The default value of `prismCentral.insecure` attribute is `false`. It can be omitted when an additional trust bundle is configured. + If `prismCentral.insecure` attribute is set to `true`, all certificate verification will be disabled. + + +## Disable certificate verification + +!!! note + Disabling certificate verification is not recommended for production purposes and should only be used for testing. + + +Certificate verification can be disabled by setting the `prismCentral.insecure` attribute to `true` in the `nutanix-config` `ConfigMap`. Certificate verification will be disabled even if an additional trust bundle is configured and the `prismCentral.insecure` attribute is set to `true`. + +Example of how to disable certificate verification: + +```JSON +... +"prismCentral": { + ... + "insecure": true +}, +... +``` \ No newline at end of file diff --git a/docs/ccm/v0.4.x/requirements.md b/docs/ccm/v0.4.x/requirements.md new file mode 100644 index 00000000..0bb867e2 --- /dev/null +++ b/docs/ccm/v0.4.x/requirements.md @@ -0,0 +1,33 @@ +# Requirements + +This section provides an overview of the requirements for Nutanix CCM: + +## Port requirements + +Nutanix CCM uses Prism Central APIs to fetch the required information for the Kubernetes nodes. As a result, the Kubernetes nodes need to have access to the Prism Central endpoint that is configured in the `nutanix-config` configmap. + +|Source |Destination |Protocol |Port |Description | +|------------------|--------------------|----------|-----|----------------------------------------| +|Kubernetes nodes |Prism Central |TCP |9440 |Nutanix CCM communication to Prism Central| + +## User permissions +Nutanix CCM will only perform read operations and requires a user account with an assigned `Viewer` role to consume Prism Central APIs. + +### Required roles: Local user + +|Role |Required| +|-------------------|--------| +|User Admin |No | +|Prism Central Admin|No | + +!!! note + + For local users, if no role is assigned, the local user will only get `Viewer` permissions + +### Required roles: Directory user + +Assign following role in the user role-mapping if a non-local user is required: + +|Role |Required| +|-------------------|--------| +|Viewer |Yes | diff --git a/docs/ccm/v0.4.x/topology_discovery.md b/docs/ccm/v0.4.x/topology_discovery.md new file mode 100644 index 00000000..7349e5b7 --- /dev/null +++ b/docs/ccm/v0.4.x/topology_discovery.md @@ -0,0 +1,124 @@ +# Topology Discovery + +One of the responsibilities of the CCM node controller is to annotate and label the nodes in a Kubernetes cluster with toplogy (region and zone) information. The Nutanix Cloud Controller Manager supports following topology discovery methods: + +- [Prism](#prism) +- [Categories](#categories) + +The topology discovery method can be configured via the `nutanix-config` configmap. See [Nutanix CCM Configuration](./ccm_configuration.md) for more information on the configuration parameters. + +## Prism + +Prism-based topology discovery is the default mode for Nutanix CCM. In this mode CCM will discover the Prism Element (PE) cluster and Prism Central (PC) instance that host the Kubernetes node VM. Prism Central is configured as the region for the node, while Prism Element is configured as the zone. + +Prism-based topology discovery can be configured by omitting the `topologyDiscovery` attribute from the `nutanix-config` configmap or by passing following object: +```JSON + "topologyDiscovery": { + "type": "Prism" + } +``` + +### Example +If a Kubernetes Node VM is hosted on PC `my-pc-instance` and PE `my-pe-cluster-1`, Nutanix CCM will assign following labels to the Kubernetes node: + +|Key |Value | +|-----------------------------|---------------| +|topology.kubernetes.io/region|my-pc-instance | +|topology.kubernetes.io/zone |my-pe-cluster-1| + +## Categories + +The category-based topology discovery mode allows users to assign categories to Prism Element clusters and Kubernetes Node VMs to define a custom topology. Nutanix CCM will hierarchically search for the required categories on the VM/PE. + +!!! note + + Categories assigned to the VM object will take precedence over the categories assigned to the PE cluster. + +It is required for the categories to exist inside of the PC environment. CCM will not create and assign the categories. +Visit the [Prism Central documentation](https://portal.nutanix.com/page/documents/details?targetId=Prism-Central-Guide-vpc_2022_6:ssp-ssp-categories-manage-pc-c.html){target=_blank} for more information regarding categories. + +To enable the Categories topology discovery mode for Nutanix CCM, provide following information in the `topologyDiscovery` attribute: + +```JSON + "topologyDiscovery": { + "type": "Categories", + "topologyCategories": { + "regionCategory": "${NUTANIX_REGION_CATEGORY}", + "zoneCategory": "${NUTANIX_ZONE_CATEGORY}" + } + } +``` + +### Example + +Define a set of categories in PC that will be used for topology discovery: + +|Key |Value | +|------------------|-----------------------| +|my-region-category|region-1, region-2 | +|my-zone-category |zone-1, zone-2, zone-3 | + +Assign the categories to the Nutanix entities: + +|Nutanix entity |Categories | +|---------------|------------------------------------------------------| +|my-pe-cluster-1|my-region-category:region-1
my-zone-category:zone-2| +|my-pe-cluster-2|my-region-category:region-2
my-zone-category:zone-3| +|k8s-node-3 |my-region-category:region-2
my-zone-category:zone-2| +|k8s-node-4 |my-zone-category:zone-1 | + + +Configure CCM to use categories for topology discovery: +```JSON + "topologyDiscovery": { + "type": "Categories", + "topologyCategories": { + "regionCategory": "my-region-category", + "zoneCategory": "my-zone-category" + } + } +``` + +!!! example "Scenario 1: Kubernetes node k8s-node-1 is running on my-pe-cluster-1" + + Following topology labels will be assigned to Kubernetes node `k8s-node-1`: + + |Key |Value | + |-----------------------------|---------------| + |topology.kubernetes.io/region|region-1 | + |topology.kubernetes.io/zone |zone-2 | + + Categories assigned to PE will be used. + +!!! example "Scenario 2: Kubernetes node k8s-node-2 is running on my-pe-cluster-2" + + Following topology labels will be assigned to Kubernetes node `k8s-node-2`: + + |Key |Value | + |-----------------------------|---------------| + |topology.kubernetes.io/region|region-2 | + |topology.kubernetes.io/zone |zone-3 | + + Categories assigned to PE will be used. + +!!! example "Scenario 3: Kubernetes node k8s-node-3 is running on my-pe-cluster-2" + + Following topology labels will be assigned to Kubernetes node `k8s-node-3`: + + |Key |Value | + |-----------------------------|---------------| + |topology.kubernetes.io/region|region-2 | + |topology.kubernetes.io/zone |zone-2 | + + Categories assigned to the VM will be used. + +!!! example "Scenario 4: Kubernetes node k8s-node-4 is running on my-pe-cluster-1" + + Following topology labels will be assigned to Kubernetes node `k8s-node-4`: + + |Key |Value | + |-----------------------------|---------------| + |topology.kubernetes.io/region|region-1 | + |topology.kubernetes.io/zone |zone-1 | + + In this scenario Nutanix CCM will use the value of the `my-zone-category` category that is assigned to the VM. Since the `my-region-category`is not assigned to the VM, Nutanix CCM will search for the category on PE and use the corresponding category value. \ No newline at end of file diff --git a/docs/ccm/v0.5.x/ccm_configuration.md b/docs/ccm/v0.5.x/ccm_configuration.md new file mode 100644 index 00000000..c7b7d2b7 --- /dev/null +++ b/docs/ccm/v0.5.x/ccm_configuration.md @@ -0,0 +1,64 @@ +# Nutanix CCM Configuration + +Nutanix CCM can be configured via a `JSON` formated file stored in a configmap called `nutanix-config`. This configmap is located in the same namespace as the Nutanix CCM deployment. See the `manifests/cloud-provider-nutanix-deployment.yaml` file for details on the Nutanix CCM deployment. + +Example `nutanix-config` configmap: +```YAML +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: nutanix-config + namespace: kube-system +data: + nutanix_config.json: |- + { + "prismCentral": { + "address": "${NUTANIX_ENDPOINT}", + "port": ${NUTANIX_PORT}, + "insecure": ${NUTANIX_INSECURE}, + "credentialRef": { + "kind": "secret", + "name": "nutanix-creds" + }, + "additionalTrustBundle": { + "kind": "ConfigMap", + "name": "user-ca-bundle" + } + }, + "enableCustomLabeling": false, + "topologyDiscovery": { + "type": "Categories", + "topologyCategories": { + "regionCategory": "${NUTANIX_REGION_CATEGORY}", + "zoneCategory": "${NUTANIX_ZONE_CATEGORY}" + } + } + } + +``` + +The table below provides an overview of the supported configuration parameters. + +### Configuration parameters + +| Key |Type |Description | +|---------------------------------------------------|------|------------------------------------------------------------------------------------------------------------------------------------------------------| +|topologyDiscovery |object|(Optional) Configures the topology discovery mode.
`Prism` topology discovery is used by default if `topologyDiscovery` attribute is not passed. | +|topologyDiscovery.type |string|Topology Discovery mode. Can be `Prism` or `Categories`. See [Topology Discovery](./topology_discovery.md) for more information. | +|topologyDiscovery.topologyCategories |object|Required if topology discovery mode is `Categories`.
| +|topologyDiscovery.topologyCategories.regionCategory|string|Category key defining the region of the Kubernetes node. | +|topologyDiscovery.topologyCategories.zoneCategory |string|Category key defining the zone of the Kubernetes node. | +|enableCustomLabeling |bool |Boolean value to enable custom labeling. See [Custom Labeling](./custom_labeling.md) for more information.
Default: `false` | +|prismCentral |object|Prism Central endpoint configuration. | +|prismCentral.address |string|FQDN/IP of the Prism Central endpoint. | +|prismCentral.port |int |Port to connect to Prism Central.
Default: `9440` | +|prismCentral.insecure |bool |Disable Prism Central certificate checking.
Default: `false` | +|prismCentral.credentialRef |object|Prism Central credential configuration. See [Credentials](./ccm_credentials.md) for more information. | +|prismCentral.credentialRef.kind |string|Credential kind.
Allowed value: `secret` | +|prismCentral.credentialRef.name |string|Name of the secret. | +|prismCentral.credentialRef.namespace |string|(Optional) Namespace of the secret. | +|prismCentral.additionalTrustBundle |object|Reference to the certificate trust bundle used for Prism Central connection. | +|prismCentral.additionalTrustBundle.kind |string|Kind of the additionalTrustBundle. Allowed value: `ConfigMap` | +|prismCentral.additionalTrustBundle.name |string|Name of the `ConfigMap` containing the Prism Central trust bundle. | +|prismCentral.additionalTrustBundle.namespace |string|(Optional) Namespace of the `ConfigMap` containing the Prism Central trust bundle. See [Certificate Trust](./pc_certificates.md) for more information.| \ No newline at end of file diff --git a/docs/ccm/v0.5.x/ccm_credentials.md b/docs/ccm/v0.5.x/ccm_credentials.md new file mode 100644 index 00000000..7bda06e2 --- /dev/null +++ b/docs/ccm/v0.5.x/ccm_credentials.md @@ -0,0 +1,29 @@ +# Credentials + +Nutanix CCM requires credentials to connect to Prism Central. These credentials need to be stored in a secret in following format: + +```YAML +--- +apiVersion: v1 +kind: Secret +metadata: + name: nutanix-creds + namespace: kube-system +stringData: + credentials: | + [ + { + "type": "basic_auth", + "data": { + "prismCentral":{ + "username": "$NUTANIX_USERNAME", + "password": "$NUTANIX_PASSWORD" + }, + "prismElements": null + } + } + ] + +``` + +See [Requirements](./requirements.md) for more information on the required permissions. \ No newline at end of file diff --git a/docs/ccm/v0.5.x/custom_labeling.md b/docs/ccm/v0.5.x/custom_labeling.md new file mode 100644 index 00000000..4db89462 --- /dev/null +++ b/docs/ccm/v0.5.x/custom_labeling.md @@ -0,0 +1,14 @@ +# Custom Labeling + +Enabling the Nutanix CCM custom labeling feature will add additional labels to the Kubernetes nodes. See [Nutanix CCM Configuration](./ccm_configuration.md) for more information on how to configure CCM to enable custom labeling. + +The following labels will be added: + +|Label |Description | +|------------------------------|-----------------------------------------------------------------| +|nutanix.com/prism-element-uuid|UUID of the Prism Element cluster hosting the Kubernetes node VM.| +|nutanix.com/prism-element-name|Name of the Prism Element cluster hosting the Kubernetes node VM.| +|nutanix.com/prism-host-uuid |UUID of the Prism AHV host hosting the Kubernetes node VM. | +|nutanix.com/prism-host-name |Name of the Prism AHV host hosting the Kubernetes node VM. | + +Nutanix CCM will reconcile the labels periodically. \ No newline at end of file diff --git a/docs/ccm/v0.5.x/overview.md b/docs/ccm/v0.5.x/overview.md new file mode 100644 index 00000000..698169aa --- /dev/null +++ b/docs/ccm/v0.5.x/overview.md @@ -0,0 +1,24 @@ +# Overview + +Nutanix CCM provides Cloud Controller Manager functionality to Kubernetes clusters running on the Nutanix AHV hypervisor. Visit the [Kubernetes Cloud Controller Manager](https://kubernetes.io/docs/concepts/architecture/cloud-controller/) documentation for more information about the general design of a Kubernetes CCM. + +Nutanix CCM communicates with Prism Central (CCM) to fetch all required information. See the [Requirements](./requirements.md) page for more details. + +## Nutanix CCM functionality + +|Version|Node Controller|Route Controller|Service Controller| +|-------|---------------|----------------|------------------| +|v0.5.x |Yes |No |No | +|v0.4.x |Yes |No |No | +|v0.3.x |Yes |No |No | +|v0.2.x |Yes |No |No | + + +Nutanix CCM specific features: + +|Version|[Topology Discovery](./topology_discovery.md)|[Custom Labeling](./custom_labeling.md)| +|-------|---------------------------------------------|---------------------------------------| +|v0.5.x |Prism, Categories |Yes | +|v0.4.x |Prism, Categories |Yes | +|v0.3.x |Prism, Categories |Yes | +|v0.2.x |Prism, Categories |Yes | \ No newline at end of file diff --git a/docs/ccm/v0.5.x/pc_certificates.md b/docs/ccm/v0.5.x/pc_certificates.md new file mode 100644 index 00000000..be9071bf --- /dev/null +++ b/docs/ccm/v0.5.x/pc_certificates.md @@ -0,0 +1,104 @@ +# Certificate Trust + +CCM invokes Prism Central APIs using the HTTPS protocol. CCM has different methods to handle the trust of the Prism Central certificates: + +- Enable certificate verification (default) +- Configure an additional trust bundle +- Disable certificate verification + +See the respective sections below for more information. + +## Enable certificate verification (default) +By default CCM will perform certificate verification when invoking Prism Central API calls. This requires Prism Central to be configured with a publicly trusted certificate authority. +No additional configuration is required in CCM. + +## Configure an additional trust bundle +CCM allows users to configure an additional trust bundle. This will allow CCM to verify certificates that are not issued by a publicy trusted certificate authority. + +To configure an additional trust bundle, see the [Configuring the additional trust bundle](#configuring-the-additional-trust-bundle) section for more information. + + +### Configuring the additional trust bundle + +To configure the additional trust bundle it is required to: + +- Create a `ConfigMap` containing the additional trust bundle +- Configure the `prismCentral.additionalTrustBundle` object in the CCM `ConfigMap` called `nutanix-config`. + +#### Creating the additional trust bundle ConfigMap + +CCM supports two different formats for the `ConfigMap` containing the additional trust bundle. The first one is to add the additional trust bundle as a multi-line string in the `ConfigMap`, the second option is to add the trust bundle in `base64` encoded format. See the examples below. + +Multi-line string example: +```YAML +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: user-ca-bundle + namespace: ${NAMESPACE} +data: + ca.crt: | + -----BEGIN CERTIFICATE----- + + -----END CERTIFICATE----- + -----BEGIN CERTIFICATE----- + + -----END CERTIFICATE----- +``` + +`base64` example: + +```YAML +apiVersion: v1 +kind: ConfigMap +metadata: + name: user-ca-bundle + namespace: ${NAMESPACE} +binaryData: + ca.crt: +``` + +!!! note + The `base64` string needs to be added as `binaryData`. + + +#### Configuring the CCM for an additional trust bundle + +When the additional trust bundle `ConfigMap` is created, it needs to be referenced in the `nutanix-config` `ConfigMap`. Add the `prismCentral.additionalTrustBundle` object as shown below. Make sure the correct additional trust bundle `ConfigMap` is referenced. + +```JSON + ... + "prismCentral": { + ... + "additionalTrustBundle": { + "kind": "ConfigMap", + "name": "user-ca-bundle" + } + }, + ... +``` + +!!! note + The default value of `prismCentral.insecure` attribute is `false`. It can be omitted when an additional trust bundle is configured. + If `prismCentral.insecure` attribute is set to `true`, all certificate verification will be disabled. + + +## Disable certificate verification + +!!! note + Disabling certificate verification is not recommended for production purposes and should only be used for testing. + + +Certificate verification can be disabled by setting the `prismCentral.insecure` attribute to `true` in the `nutanix-config` `ConfigMap`. Certificate verification will be disabled even if an additional trust bundle is configured and the `prismCentral.insecure` attribute is set to `true`. + +Example of how to disable certificate verification: + +```JSON +... +"prismCentral": { + ... + "insecure": true +}, +... +``` \ No newline at end of file diff --git a/docs/ccm/v0.5.x/requirements.md b/docs/ccm/v0.5.x/requirements.md new file mode 100644 index 00000000..0bb867e2 --- /dev/null +++ b/docs/ccm/v0.5.x/requirements.md @@ -0,0 +1,33 @@ +# Requirements + +This section provides an overview of the requirements for Nutanix CCM: + +## Port requirements + +Nutanix CCM uses Prism Central APIs to fetch the required information for the Kubernetes nodes. As a result, the Kubernetes nodes need to have access to the Prism Central endpoint that is configured in the `nutanix-config` configmap. + +|Source |Destination |Protocol |Port |Description | +|------------------|--------------------|----------|-----|----------------------------------------| +|Kubernetes nodes |Prism Central |TCP |9440 |Nutanix CCM communication to Prism Central| + +## User permissions +Nutanix CCM will only perform read operations and requires a user account with an assigned `Viewer` role to consume Prism Central APIs. + +### Required roles: Local user + +|Role |Required| +|-------------------|--------| +|User Admin |No | +|Prism Central Admin|No | + +!!! note + + For local users, if no role is assigned, the local user will only get `Viewer` permissions + +### Required roles: Directory user + +Assign following role in the user role-mapping if a non-local user is required: + +|Role |Required| +|-------------------|--------| +|Viewer |Yes | diff --git a/docs/ccm/v0.5.x/topology_discovery.md b/docs/ccm/v0.5.x/topology_discovery.md new file mode 100644 index 00000000..7349e5b7 --- /dev/null +++ b/docs/ccm/v0.5.x/topology_discovery.md @@ -0,0 +1,124 @@ +# Topology Discovery + +One of the responsibilities of the CCM node controller is to annotate and label the nodes in a Kubernetes cluster with toplogy (region and zone) information. The Nutanix Cloud Controller Manager supports following topology discovery methods: + +- [Prism](#prism) +- [Categories](#categories) + +The topology discovery method can be configured via the `nutanix-config` configmap. See [Nutanix CCM Configuration](./ccm_configuration.md) for more information on the configuration parameters. + +## Prism + +Prism-based topology discovery is the default mode for Nutanix CCM. In this mode CCM will discover the Prism Element (PE) cluster and Prism Central (PC) instance that host the Kubernetes node VM. Prism Central is configured as the region for the node, while Prism Element is configured as the zone. + +Prism-based topology discovery can be configured by omitting the `topologyDiscovery` attribute from the `nutanix-config` configmap or by passing following object: +```JSON + "topologyDiscovery": { + "type": "Prism" + } +``` + +### Example +If a Kubernetes Node VM is hosted on PC `my-pc-instance` and PE `my-pe-cluster-1`, Nutanix CCM will assign following labels to the Kubernetes node: + +|Key |Value | +|-----------------------------|---------------| +|topology.kubernetes.io/region|my-pc-instance | +|topology.kubernetes.io/zone |my-pe-cluster-1| + +## Categories + +The category-based topology discovery mode allows users to assign categories to Prism Element clusters and Kubernetes Node VMs to define a custom topology. Nutanix CCM will hierarchically search for the required categories on the VM/PE. + +!!! note + + Categories assigned to the VM object will take precedence over the categories assigned to the PE cluster. + +It is required for the categories to exist inside of the PC environment. CCM will not create and assign the categories. +Visit the [Prism Central documentation](https://portal.nutanix.com/page/documents/details?targetId=Prism-Central-Guide-vpc_2022_6:ssp-ssp-categories-manage-pc-c.html){target=_blank} for more information regarding categories. + +To enable the Categories topology discovery mode for Nutanix CCM, provide following information in the `topologyDiscovery` attribute: + +```JSON + "topologyDiscovery": { + "type": "Categories", + "topologyCategories": { + "regionCategory": "${NUTANIX_REGION_CATEGORY}", + "zoneCategory": "${NUTANIX_ZONE_CATEGORY}" + } + } +``` + +### Example + +Define a set of categories in PC that will be used for topology discovery: + +|Key |Value | +|------------------|-----------------------| +|my-region-category|region-1, region-2 | +|my-zone-category |zone-1, zone-2, zone-3 | + +Assign the categories to the Nutanix entities: + +|Nutanix entity |Categories | +|---------------|------------------------------------------------------| +|my-pe-cluster-1|my-region-category:region-1
my-zone-category:zone-2| +|my-pe-cluster-2|my-region-category:region-2
my-zone-category:zone-3| +|k8s-node-3 |my-region-category:region-2
my-zone-category:zone-2| +|k8s-node-4 |my-zone-category:zone-1 | + + +Configure CCM to use categories for topology discovery: +```JSON + "topologyDiscovery": { + "type": "Categories", + "topologyCategories": { + "regionCategory": "my-region-category", + "zoneCategory": "my-zone-category" + } + } +``` + +!!! example "Scenario 1: Kubernetes node k8s-node-1 is running on my-pe-cluster-1" + + Following topology labels will be assigned to Kubernetes node `k8s-node-1`: + + |Key |Value | + |-----------------------------|---------------| + |topology.kubernetes.io/region|region-1 | + |topology.kubernetes.io/zone |zone-2 | + + Categories assigned to PE will be used. + +!!! example "Scenario 2: Kubernetes node k8s-node-2 is running on my-pe-cluster-2" + + Following topology labels will be assigned to Kubernetes node `k8s-node-2`: + + |Key |Value | + |-----------------------------|---------------| + |topology.kubernetes.io/region|region-2 | + |topology.kubernetes.io/zone |zone-3 | + + Categories assigned to PE will be used. + +!!! example "Scenario 3: Kubernetes node k8s-node-3 is running on my-pe-cluster-2" + + Following topology labels will be assigned to Kubernetes node `k8s-node-3`: + + |Key |Value | + |-----------------------------|---------------| + |topology.kubernetes.io/region|region-2 | + |topology.kubernetes.io/zone |zone-2 | + + Categories assigned to the VM will be used. + +!!! example "Scenario 4: Kubernetes node k8s-node-4 is running on my-pe-cluster-1" + + Following topology labels will be assigned to Kubernetes node `k8s-node-4`: + + |Key |Value | + |-----------------------------|---------------| + |topology.kubernetes.io/region|region-1 | + |topology.kubernetes.io/zone |zone-1 | + + In this scenario Nutanix CCM will use the value of the `my-zone-category` category that is assigned to the VM. Since the `my-region-category`is not assigned to the VM, Nutanix CCM will search for the category on PE and use the corresponding category value. \ No newline at end of file diff --git a/mkdocs.yml b/mkdocs.yml index 1bc9ac0f..bc98a761 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -158,7 +158,23 @@ nav: - "Autoscaler": "capx/v0.5.x/experimental/autoscaler.md" - "Troubleshooting": "capx/v0.5.x/troubleshooting.md" - "Nutanix Cloud Controller Manager (CCM)": - - "v0.3.x (Latest)": + - "v0.5.x (Latest)": + - "Overview": "ccm/v0.5.x/overview.md" + - "Requirements": "ccm/v0.5.x/requirements.md" + - "Configuration": "ccm/v0.5.x/ccm_configuration.md" + - "Certificate Trust": "ccm/v0.5.x/pc_certificates.md" + - "Credentials": "ccm/v0.5.x/ccm_credentials.md" + - "Topology Discovery": "ccm/v0.5.x/topology_discovery.md" + - "Custom Labeling": "ccm/v0.5.x/custom_labeling.md" + - "v0.4.x": + - "Overview": "ccm/v0.4.x/overview.md" + - "Requirements": "ccm/v0.4.x/requirements.md" + - "Configuration": "ccm/v0.4.x/ccm_configuration.md" + - "Certificate Trust": "ccm/v0.4.x/pc_certificates.md" + - "Credentials": "ccm/v0.4.x/ccm_credentials.md" + - "Topology Discovery": "ccm/v0.4.x/topology_discovery.md" + - "Custom Labeling": "ccm/v0.4.x/custom_labeling.md" + - "v0.3.x": - "Overview": "ccm/v0.3.x/overview.md" - "Requirements": "ccm/v0.3.x/requirements.md" - "Configuration": "ccm/v0.3.x/ccm_configuration.md" From da452c763a2611facefcd74ebeb6e5eb58b267c2 Mon Sep 17 00:00:00 2001 From: Atul Verma Date: Mon, 28 Jul 2025 16:54:00 +0530 Subject: [PATCH 07/15] docs: added for capx v1.6.x (latest) (#70) * docs: adds copy of v1.5.x * docs: rename v1.5.x copy to v1.6.x * docs: updated upgrade procedure and fixed resource links for v1.4 and v1.5 * docs: reference for upgrade_procedure in getting_started * docs: updated validated integrations * docs: updated symlink and mkdocs nav * docs: adds imageLookup and dataDisks to nutanix machine template * docs: fmt --- docs/capx/latest | 2 +- .../tasks/capx_v14x_upgrade_procedure.md | 6 +- docs/capx/v1.5.x/getting_started.md | 2 +- ...dure.md => capx_v15x_upgrade_procedure.md} | 6 +- docs/capx/v1.6.x/addons/install_csi_driver.md | 215 ++++++++++++++++++ docs/capx/v1.6.x/credential_management.md | 93 ++++++++ docs/capx/v1.6.x/experimental/autoscaler.md | 129 +++++++++++ .../capx/v1.6.x/experimental/capx_multi_pe.md | 30 +++ docs/capx/v1.6.x/experimental/oidc.md | 31 +++ docs/capx/v1.6.x/experimental/proxy.md | 62 +++++ .../v1.6.x/experimental/registry_mirror.md | 96 ++++++++ docs/capx/v1.6.x/experimental/vpc.md | 40 ++++ docs/capx/v1.6.x/getting_started.md | 159 +++++++++++++ docs/capx/v1.6.x/pc_certificates.md | 149 ++++++++++++ docs/capx/v1.6.x/port_requirements.md | 19 ++ .../tasks/capx_v16x_upgrade_procedure.md | 83 +++++++ .../tasks/modify_machine_configuration.md | 11 + docs/capx/v1.6.x/troubleshooting.md | 13 ++ docs/capx/v1.6.x/types/nutanix_cluster.md | 64 ++++++ .../v1.6.x/types/nutanix_machine_template.md | 124 ++++++++++ docs/capx/v1.6.x/user_requirements.md | 36 +++ docs/capx/v1.6.x/validated_integrations.md | 68 ++++++ mkdocs.yml | 27 ++- 23 files changed, 1455 insertions(+), 10 deletions(-) rename docs/capx/v1.5.x/tasks/{capx_v14x_upgrade_procedure.md => capx_v15x_upgrade_procedure.md} (92%) create mode 100644 docs/capx/v1.6.x/addons/install_csi_driver.md create mode 100644 docs/capx/v1.6.x/credential_management.md create mode 100644 docs/capx/v1.6.x/experimental/autoscaler.md create mode 100644 docs/capx/v1.6.x/experimental/capx_multi_pe.md create mode 100644 docs/capx/v1.6.x/experimental/oidc.md create mode 100644 docs/capx/v1.6.x/experimental/proxy.md create mode 100644 docs/capx/v1.6.x/experimental/registry_mirror.md create mode 100644 docs/capx/v1.6.x/experimental/vpc.md create mode 100644 docs/capx/v1.6.x/getting_started.md create mode 100644 docs/capx/v1.6.x/pc_certificates.md create mode 100644 docs/capx/v1.6.x/port_requirements.md create mode 100644 docs/capx/v1.6.x/tasks/capx_v16x_upgrade_procedure.md create mode 100644 docs/capx/v1.6.x/tasks/modify_machine_configuration.md create mode 100644 docs/capx/v1.6.x/troubleshooting.md create mode 100644 docs/capx/v1.6.x/types/nutanix_cluster.md create mode 100644 docs/capx/v1.6.x/types/nutanix_machine_template.md create mode 100644 docs/capx/v1.6.x/user_requirements.md create mode 100644 docs/capx/v1.6.x/validated_integrations.md diff --git a/docs/capx/latest b/docs/capx/latest index 39f865cc..2d54a8c3 120000 --- a/docs/capx/latest +++ b/docs/capx/latest @@ -1 +1 @@ -v1.5.x \ No newline at end of file +v1.6.x \ No newline at end of file diff --git a/docs/capx/v1.4.x/tasks/capx_v14x_upgrade_procedure.md b/docs/capx/v1.4.x/tasks/capx_v14x_upgrade_procedure.md index a64c990c..14602f73 100644 --- a/docs/capx/v1.4.x/tasks/capx_v14x_upgrade_procedure.md +++ b/docs/capx/v1.4.x/tasks/capx_v14x_upgrade_procedure.md @@ -61,9 +61,9 @@ spec: ``` 3. Add the Nutanix CCM CRS resources: - - [nutanix-ccm-crs.yaml](https://github.com/nutanix-cloud-native/cluster-api-provider-nutanix/blob/v1.4.0/templates/base/nutanix-ccm-crs.yaml){target=_blank} - - [nutanix-ccm-secret.yaml](https://github.com/nutanix-cloud-native/cluster-api-provider-nutanix/blob/v1.4.0/templates/base/nutanix-ccm-secret.yaml) - - [nutanix-ccm.yaml](https://github.com/nutanix-cloud-native/cluster-api-provider-nutanix/blob/v1.4.0/templates/base/nutanix-ccm.yaml) + - [nutanix-ccm-crs.yaml](https://github.com/nutanix-cloud-native/cluster-api-provider-nutanix/blob/v1.4.0/templates/ccm/nutanix-ccm-crs.yaml){target=_blank} + - [nutanix-ccm-secret.yaml](https://github.com/nutanix-cloud-native/cluster-api-provider-nutanix/blob/v1.4.0/templates/ccm/nutanix-ccm-secret.yaml) + - [nutanix-ccm.yaml](https://github.com/nutanix-cloud-native/cluster-api-provider-nutanix/blob/v1.4.0/templates/ccm/nutanix-ccm.yaml) Make sure to update each of the variables before applying the `YAML` files. diff --git a/docs/capx/v1.5.x/getting_started.md b/docs/capx/v1.5.x/getting_started.md index 536e5243..d8191883 100644 --- a/docs/capx/v1.5.x/getting_started.md +++ b/docs/capx/v1.5.x/getting_started.md @@ -9,7 +9,7 @@ For more information on how CAPX handles credentials, visit [Credential Manageme For more information on the port requirements for CAPX, visit [Port Requirements](./port_requirements.md). !!! note - [Nutanix Cloud Controller Manager (CCM)](../../ccm/latest/overview.md) is a mandatory component starting from CAPX v1.3.0. Ensure all CAPX-managed Kubernetes clusters are configured to use Nutanix CCM before upgrading to v1.3.0 or later. See [CAPX v1.5.x Upgrade Procedure](./tasks/capx_v14x_upgrade_procedure.md). + [Nutanix Cloud Controller Manager (CCM)](../../ccm/latest/overview.md) is a mandatory component starting from CAPX v1.3.0. Ensure all CAPX-managed Kubernetes clusters are configured to use Nutanix CCM before upgrading to v1.3.0 or later. See [CAPX v1.5.x Upgrade Procedure](./tasks/capx_v15x_upgrade_procedure.md). ## Production Workflow diff --git a/docs/capx/v1.5.x/tasks/capx_v14x_upgrade_procedure.md b/docs/capx/v1.5.x/tasks/capx_v15x_upgrade_procedure.md similarity index 92% rename from docs/capx/v1.5.x/tasks/capx_v14x_upgrade_procedure.md rename to docs/capx/v1.5.x/tasks/capx_v15x_upgrade_procedure.md index 4c74a29a..5361700b 100644 --- a/docs/capx/v1.5.x/tasks/capx_v14x_upgrade_procedure.md +++ b/docs/capx/v1.5.x/tasks/capx_v15x_upgrade_procedure.md @@ -61,9 +61,9 @@ spec: ``` 3. Add the Nutanix CCM CRS resources: - - [nutanix-ccm-crs.yaml](https://github.com/nutanix-cloud-native/cluster-api-provider-nutanix/blob/v1.5.0/templates/base/nutanix-ccm-crs.yaml){target=_blank} - - [nutanix-ccm-secret.yaml](https://github.com/nutanix-cloud-native/cluster-api-provider-nutanix/blob/v1.5.0/templates/base/nutanix-ccm-secret.yaml) - - [nutanix-ccm.yaml](https://github.com/nutanix-cloud-native/cluster-api-provider-nutanix/blob/v1.5.0/templates/base/nutanix-ccm.yaml) + - [nutanix-ccm-crs.yaml](https://github.com/nutanix-cloud-native/cluster-api-provider-nutanix/blob/v1.5.0/templates/ccm/nutanix-ccm-crs.yaml){target=_blank} + - [nutanix-ccm-secret.yaml](https://github.com/nutanix-cloud-native/cluster-api-provider-nutanix/blob/v1.5.0/templates/ccm/nutanix-ccm-secret.yaml) + - [nutanix-ccm.yaml](https://github.com/nutanix-cloud-native/cluster-api-provider-nutanix/blob/v1.5.0/templates/ccm/nutanix-ccm.yaml) Make sure to update each of the variables before applying the `YAML` files. diff --git a/docs/capx/v1.6.x/addons/install_csi_driver.md b/docs/capx/v1.6.x/addons/install_csi_driver.md new file mode 100644 index 00000000..afb4bdc8 --- /dev/null +++ b/docs/capx/v1.6.x/addons/install_csi_driver.md @@ -0,0 +1,215 @@ +# Nutanix CSI Driver installation with CAPX + +The Nutanix CSI driver is fully supported on CAPI/CAPX deployed clusters where all the nodes meet the [Nutanix CSI driver prerequisites](#capi-workload-cluster-prerequisites-for-the-nutanix-csi-driver). + +There are three methods to install the Nutanix CSI driver on a CAPI/CAPX cluster: + +- Helm +- ClusterResourceSet +- CAPX Flavor + +For more information, check the next sections. + +## CAPI Workload cluster prerequisites for the Nutanix CSI Driver + +Kubernetes workers need the following prerequisites to use the Nutanix CSI Drivers: + +- iSCSI initiator package (for Volumes based block storage) +- NFS client package (for Files based storage) + +These packages may already be present in the image you use with your infrastructure provider or you can also rely on your bootstrap provider to install them. More info is available in the [Prerequisites docs](https://portal.nutanix.com/page/documents/details?targetId=CSI-Volume-Driver-v2_6:csi-csi-plugin-prerequisites-r.html){target=_blank}. + +The package names and installation method will also vary depending on the operating system you plan to use. + +In the example below, `kubeadm` bootstrap provider is used to deploy these packages on top of an Ubuntu 20.04 image. The `kubeadm` bootstrap provider allows defining `preKubeadmCommands` that will be launched before Kubernetes cluster creation. These `preKubeadmCommands` can be defined both in `KubeadmControlPlane` for master nodes and in `KubeadmConfigTemplate` for worker nodes. + +In the example with an Ubuntu 20.04 image, both `KubeadmControlPlane` and `KubeadmConfigTemplate` must be modified as in the example below: + +```yaml +spec: + template: + spec: + # ....... + preKubeadmCommands: + - echo "before kubeadm call" > /var/log/prekubeadm.log + - apt update + - apt install -y nfs-common open-iscsi + - systemctl enable --now iscsid +``` +## Install the Nutanix CSI Driver with Helm + +A recent [Helm](https://helm.sh){target=_blank} version is needed (tested with Helm v3.10.1). + +The example below must be applied on a ready workload cluster. The workload cluster's kubeconfig can be retrieved and used to connect with the following command: + +```shell +clusterctl get kubeconfig $CLUSTER_NAME -n $CLUSTER_NAMESPACE > $CLUSTER_NAME-KUBECONFIG +export KUBECONFIG=$(pwd)/$CLUSTER_NAME-KUBECONFIG +``` + +Once connected to the cluster, follow the [CSI documentation](https://portal.nutanix.com/page/documents/details?targetId=CSI-Volume-Driver-v2_6:csi-csi-driver-install-t.html){target=_blank}. + +First, install the [nutanix-csi-snapshot](https://github.com/nutanix/helm/tree/master/charts/nutanix-csi-snapshot){target=_blank} chart followed by the [nutanix-csi-storage](https://github.com/nutanix/helm/tree/master/charts/nutanix-csi-storage){target=_blank} chart. + +See an example below: + +```shell +#Add the official Nutanix Helm repo and get the latest update +helm repo add nutanix https://nutanix.github.io/helm/ +helm repo update + +# Install the nutanix-csi-snapshot chart +helm install nutanix-csi-snapshot nutanix/nutanix-csi-snapshot -n ntnx-system --create-namespace + +# Install the nutanix-csi-storage chart +helm install nutanix-storage nutanix/nutanix-csi-storage -n ntnx-system --set createSecret=false +``` + +!!! warning + For correct Nutanix CSI driver deployment, a fully functional CNI deployment must be present. + +## Install the Nutanix CSI Driver with `ClusterResourceSet` + +The `ClusterResourceSet` feature was introduced to automatically apply a set of resources (such as CNI/CSI) defined by administrators to matching created/existing workload clusters. + +### Enabling the `ClusterResourceSet` feature + +At the time of writing, `ClusterResourceSet` is an experimental feature that must be enabled during the initialization of a management cluster with the `EXP_CLUSTER_RESOURCE_SET` feature gate. + +To do this, add `EXP_CLUSTER_RESOURCE_SET: "true"` in the `clusterctl` configuration file or just `export EXP_CLUSTER_RESOURCE_SET=true` before initializing the management cluster with `clusterctl init`. + +If the management cluster is already initialized, the `ClusterResourceSet` can be enabled by changing the configuration of the `capi-controller-manager` deployment in the `capi-system` namespace. + + ```shell + kubectl edit deployment -n capi-system capi-controller-manager + ``` + +Locate the section below: + +```yaml + - args: + - --leader-elect + - --metrics-bind-addr=localhost:8080 + - --feature-gates=MachinePool=false,ClusterResourceSet=true,ClusterTopology=false +``` + +Then replace `ClusterResourceSet=false` with `ClusterResourceSet=true`. + +!!! note + Editing the `deployment` resource will cause Kubernetes to automatically start new versions of the containers with the feature enabled. + + + +### Prepare the Nutanix CSI `ClusterResourceSet` + +#### Create the `ConfigMap` for the CSI Plugin + +First, create a `ConfigMap` that contains a YAML manifest with all resources to install the Nutanix CSI driver. + +Since the Nutanix CSI Driver is provided as a Helm chart, use `helm` to extract it before creating the `ConfigMap`. See an example below: + +```shell +helm repo add nutanix https://nutanix.github.io/helm/ +helm repo update + +kubectl create ns ntnx-system --dry-run=client -o yaml > nutanix-csi-namespace.yaml +helm template nutanix-csi-snapshot nutanix/nutanix-csi-snapshot -n ntnx-system > nutanix-csi-snapshot.yaml +helm template nutanix-csi-snapshot nutanix/nutanix-csi-storage -n ntnx-system > nutanix-csi-storage.yaml + +kubectl create configmap nutanix-csi-crs --from-file=nutanix-csi-namespace.yaml --from-file=nutanix-csi-snapshot.yaml --from-file=nutanix-csi-storage.yaml +``` + +#### Create the `ClusterResourceSet` + +Next, create the `ClusterResourceSet` resource that will map the `ConfigMap` defined above to clusters using a `clusterSelector`. + +The `ClusterResourceSet` needs to be created inside the management cluster. See an example below: + +```yaml +--- +apiVersion: addons.cluster.x-k8s.io/v1alpha3 +kind: ClusterResourceSet +metadata: + name: nutanix-csi-crs +spec: + clusterSelector: + matchLabels: + csi: nutanix + resources: + - kind: ConfigMap + name: nutanix-csi-crs +``` + +The `clusterSelector` field controls how Cluster API will match this `ClusterResourceSet` on one or more workload clusters. In the example scenario, the `matchLabels` approach is being used where the `ClusterResourceSet` will be applied to all workload clusters having the `csi: nutanix` label present. If the label isn't present, the `ClusterResourceSet` won't apply to that workload cluster. + +The `resources` field references the `ConfigMap` created above, which contains the manifests for installing the Nutanix CSI driver. + +#### Assign the `ClusterResourceSet` to a workload cluster + +Assign this `ClusterResourceSet` to the workload cluster by adding the correct label to the `Cluster` resource. + +This can be done before workload cluster creation by editing the output of the `clusterctl generate cluster` command or by modifying an already deployed workload cluster. + +In both cases, `Cluster` resources should look like this: + +```yaml +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + name: workload-cluster-name + namespace: workload-cluster-namespace + labels: + csi: nutanix +# ... +``` + +!!! warning + For correct Nutanix CSI driver deployment, a fully functional CNI deployment must be present. + +## Install the Nutanix CSI Driver with a CAPX flavor + +The CAPX provider can utilize a flavor to automatically deploy the Nutanix CSI using a `ClusterResourceSet`. + +### Prerequisites + +The following requirements must be met: + +- The operating system must meet the [Nutanix CSI OS prerequisites](#capi-workload-cluster-prerequisites-for-the-nutanix-csi-driver). +- The Management cluster must be installed with the [`CLUSTER_RESOURCE_SET` feature gate](#enabling-the-clusterresourceset-feature). + +### Installation + +Specify the `csi` flavor during workload cluster creation. See an example below: + +```shell +clusterctl generate cluster my-cluster -f csi +``` + +Additional environment variables are required: + +- `WEBHOOK_CA`: Base64 encoded CA certificate used to sign the webhook certificate +- `WEBHOOK_CERT`: Base64 certificate for the webhook validation component +- `WEBHOOK_KEY`: Base64 key for the webhook validation component + +The three components referenced above can be automatically created and referenced using [this script](https://github.com/nutanix-cloud-native/cluster-api-provider-nutanix/blob/main/scripts/gen-self-cert.sh){target=_blank}: + +``` +source scripts/gen-self-cert.sh +``` + +The certificate must reference the following names: + +- csi-snapshot-webhook +- csi-snapshot-webhook.ntnx-sytem +- csi-snapshot-webhook.ntnx-sytem.svc + +!!! warning + For correct Nutanix CSI driver deployment, a fully functional CNI deployment must be present. + +## Nutanix CSI Driver Configuration + +After the driver is installed, it must be configured for use by minimally defining a `Secret` and `StorageClass`. + +This can be done manually in the workload clusters or by using a `ClusterResourceSet` in the management cluster as explained above. + +See the Official [CSI Driver documentation](https://portal.nutanix.com/page/documents/details?targetId=CSI-Volume-Driver-v2_6:CSI-Volume-Driver-v2_6){target=_blank} on the Nutanix Portal for more configuration information. diff --git a/docs/capx/v1.6.x/credential_management.md b/docs/capx/v1.6.x/credential_management.md new file mode 100644 index 00000000..bebbc5a0 --- /dev/null +++ b/docs/capx/v1.6.x/credential_management.md @@ -0,0 +1,93 @@ +# Credential Management +Cluster API Provider Nutanix Cloud Infrastructure (CAPX) interacts with Nutanix Prism Central (PC) APIs to manage the required Kubernetes cluster infrastructure resources. + +PC credentials are required to authenticate to the PC APIs. CAPX currently supports two mechanisms to supply the required credentials: + +- Credentials injected into the CAPX manager deployment +- Workload cluster specific credentials + +## Credentials injected into the CAPX manager deployment +By default, credentials will be injected into the CAPX manager deployment when CAPX is initialized. See the [getting started guide](./getting_started.md) for more information on the initialization. + +Upon initialization a `nutanix-creds` secret will automatically be created in the `capx-system` namespace. This secret will contain the values supplied via the `NUTANIX_USER` and `NUTANIX_PASSWORD` parameters. + +The `nutanix-creds` secret will be used for workload cluster deployment if no other credential is supplied. + +### Example +An example of the automatically created `nutanix-creds` secret can be found below: +```yaml +--- +apiVersion: v1 +kind: Secret +type: Opaque +metadata: + name: nutanix-creds + namespace: capx-system +stringData: + credentials: | + [ + { + "type": "basic_auth", + "data": { + "prismCentral":{ + "username": "", + "password": "" + }, + "prismElements": null + } + } + ] +``` + +## Workload cluster specific credentials +Users can override the [credentials injected in CAPX manager deployment](#credentials-injected-into-the-capx-manager-deployment) by supplying a credential specific to a workload cluster. The credentials can be supplied by creating a secret in the same namespace as the `NutanixCluster` namespace. + +The secret can be referenced by adding a `credentialRef` inside the `prismCentral` attribute contained in the `NutanixCluster`. +The secret will also be deleted when the `NutanixCluster` is deleted. + +Note: There is a 1:1 relation between the secret and the `NutanixCluster` object. + +### Example +Create a secret in the namespace of the `NutanixCluster`: + +```yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: "" + namespace: "" +stringData: + credentials: | + [ + { + "type": "basic_auth", + "data": { + "prismCentral":{ + "username": "", + "password": "" + }, + "prismElements": null + } + } + ] +``` + +Add a `prismCentral` and corresponding `credentialRef` to the `NutanixCluster`: + +```yaml +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: NutanixCluster +metadata: + name: "" + namespace: "" +spec: + prismCentral: + ... + credentialRef: + name: "" + kind: Secret +... +``` + +See the [NutanixCluster](./types/nutanix_cluster.md) documentation for all supported configuration parameters for the `prismCentral` and `credentialRef` attribute. \ No newline at end of file diff --git a/docs/capx/v1.6.x/experimental/autoscaler.md b/docs/capx/v1.6.x/experimental/autoscaler.md new file mode 100644 index 00000000..2af57213 --- /dev/null +++ b/docs/capx/v1.6.x/experimental/autoscaler.md @@ -0,0 +1,129 @@ +# Using Autoscaler in combination with CAPX + +!!! warning + The scenario and features described on this page are experimental. It's important to note that they have not been fully validated. + +[Autoscaler](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/clusterapi/README.md){target=_blank} can be used in combination with Cluster API to automatically add or remove machines in a cluster. + +Autoscaler can be used in different deployment scenarios. This page will provide an overview of multiple autoscaler deployment scenarios in combination with CAPX. +See the [Testing](#testing) section to see how scale-up/scale-down events can be triggered to validate the autoscaler behaviour. + +More in-depth information on Autoscaler functionality can be found in the [Kubernetes documentation](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/clusterapi/README.md){target=_blank}. + +All Autoscaler configuration parameters can be found [here](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md#what-are-the-parameters-to-ca){target=_blank}. + +## Scenario 1: Management cluster managing an external workload cluster +In this scenario, Autoscaler will be running on a management cluster and it will manage an external workload cluster. See the management cluster managing an external workload cluster section of [Kubernetes documentation](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/clusterapi/README.md#autoscaler-running-in-management-cluster-using-service-account-credentials-with-separate-workload-cluster){target=_blank} for more information. + +### Steps +1. Deploy a management cluster and workload cluster. The [CAPI quickstart](https://cluster-api.sigs.k8s.io/user/quick-start.html){target=_blank} can be used as a starting point. + + !!! note + Make sure a CNI is installed in the workload cluster. + +4. Download the example [Autoscaler deployment file](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/clusterapi/examples/deployment.yaml){target=_blank}. +5. Modify the `deployment.yaml` file: + - Change the namespace of all resources to the namespaces of the workload cluster. + - Choose an autoscale image. + - Change the following parameters in the `Deployment` resource: +```YAML + spec: + containers: + name: cluster-autoscaler + command: + - /cluster-autoscaler + args: + - --cloud-provider=clusterapi + - --kubeconfig=/mnt/kubeconfig/kubeconfig.yml + - --clusterapi-cloud-config-authoritative + - -v=1 + volumeMounts: + - mountPath: /mnt/kubeconfig + name: kubeconfig + readOnly: true + ... + volumes: + - name: kubeconfig + secret: + secretName: -kubeconfig + items: + - key: value + path: kubeconfig.yml +``` +7. Apply the `deployment.yaml` file. +```bash +kubectl apply -f deployment.yaml +``` +8. Add the [annotations](#autoscaler-node-group-annotations) to the workload cluster `MachineDeployment` resource. +9. Test Autoscaler. Go to the [Testing](#testing) section. + +## Scenario 2: Autoscaler running on workload cluster +In this scenario, Autoscaler will be deployed [on top of the workload cluster](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/clusterapi/README.md#autoscaler-running-in-a-joined-cluster-using-service-account-credentials){target=_blank} directly. In order for Autoscaler to work, it is required that the workload cluster resources are moved from the management cluster to the workload cluster. + +### Steps +1. Deploy a management cluster and workload cluster. The [CAPI quickstart](https://cluster-api.sigs.k8s.io/user/quick-start.html){target=_blank} can be used as a starting point. +2. Get the kubeconfig file for the workload cluster and use this kubeconfig to login to the workload cluster. +```bash +clusterctl get kubeconfig -n /path/to/kubeconfig +``` +3. Install a CNI in the workload cluster. +4. Initialise the CAPX components on top of the workload cluster: +```bash +clusterctl init --infrastructure nutanix +``` +5. Migrate the workload cluster custom resources to the workload cluster. Run following command from the management cluster: +```bash +clusterctl move -n --to-kubeconfig /path/to/kubeconfig +``` +6. Verify if the cluster has been migrated by running following command on the workload cluster: +```bash +kubectl get cluster -A +``` +7. Download the example [autoscaler deployment file](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/clusterapi/examples/deployment.yaml){target=_blank}. +8. Create the Autoscaler namespace: +```bash +kubectl create ns autoscaler +``` +9. Apply the `deployment.yaml` file +```bash +kubectl apply -f deployment.yaml +``` +10. Add the [annotations](#autoscaler-node-group-annotations) to the workload cluster `MachineDeployment` resource. +11. Test Autoscaler. Go to the [Testing](#testing) section. + +## Testing + +1. Deploy an example Kubernetes application. For example, the one used in the [Kubernetes HorizontalPodAutoscaler Walkthrough](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough/). +```bash +kubectl apply -f https://k8s.io/examples/application/php-apache.yaml +``` +2. Increase the amount of replicas of the application to trigger a scale-up event: +``` +kubectl scale deployment php-apache --replicas 100 +``` +3. Decrease the amount of replicas of the application again to trigger a scale-down event. + + !!! note + In case of issues check the logs of the Autoscaler pods. + +4. After a while CAPX, will add more machines. Refer to the [Autoscaler configuration parameters](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md#what-are-the-parameters-to-ca){target=_blank} to tweak the behaviour and timeouts. + +## Autoscaler node group annotations +Autoscaler uses following annotations to define the upper and lower boundries of the managed machines: + +| Annotation | Example Value | Description | +|-------------------------------------------------------------|---------------|-----------------------------------------------| +| cluster.x-k8s.io/cluster-api-autoscaler-node-group-max-size | 5 | Maximum amount of machines in this node group | +| cluster.x-k8s.io/cluster-api-autoscaler-node-group-min-size | 1 | Minimum amount of machines in this node group | + +These annotations must be applied to the `MachineDeployment` resources of a CAPX cluster. + +### Example +```YAML +apiVersion: cluster.x-k8s.io/v1beta1 +kind: MachineDeployment +metadata: + annotations: + cluster.x-k8s.io/cluster-api-autoscaler-node-group-max-size: "5" + cluster.x-k8s.io/cluster-api-autoscaler-node-group-min-size: "1" +``` \ No newline at end of file diff --git a/docs/capx/v1.6.x/experimental/capx_multi_pe.md b/docs/capx/v1.6.x/experimental/capx_multi_pe.md new file mode 100644 index 00000000..bd52ccd7 --- /dev/null +++ b/docs/capx/v1.6.x/experimental/capx_multi_pe.md @@ -0,0 +1,30 @@ +# Creating a workload CAPX cluster spanning Prism Element clusters + +!!! warning + The scenario and features described on this page are experimental. It's important to note that they have not been fully validated. + +This page will explain how to deploy CAPX-based Kubernetes clusters where worker nodes are spanning multiple Prism Element (PE) clusters. + +!!! note + All the PE clusters must be managed by the same Prism Central (PC) instance. + +The topology will look like this: + +- One PC managing multiple PE's +- One CAPI management cluster +- One CAPI workload cluster with multiple `MachineDeployment`resources + +Refer to the [CAPI quickstart](https://cluster-api.sigs.k8s.io/user/quick-start.html){target=_blank} to get started with CAPX. + +To create workload clusters spanning multiple Prism Element clusters, it is required to create a `MachineDeployment` and `NutanixMachineTemplate` resource for each Prism Element cluster. The Prism Element specific parameters (name/UUID, subnet,...) are referenced in the `NutanixMachineTemplate`. + +## Steps +1. Create a management cluster that has the CAPX infrastructure provider deployed. +2. Create a `cluster.yml` file containing the workload cluster definition. Refer to the steps defined in the [CAPI quickstart guide](https://cluster-api.sigs.k8s.io/user/quick-start.html){target=_blank} to create an example `cluster.yml` file. +3. Add additional `MachineDeployment` and `NutanixMachineTemplate` resources. + + By default there is only one machine template and machine deployment defined. To add nodes residing on another Prism Element cluster, a new `MachineDeployment` and `NutanixMachineTemplate` resource needs to be added to the yaml file. The autogenerated `MachineDeployment` and `NutanixMachineTemplate` resource definitions can be used as a baseline. + + Make sure to modify the `MachineDeployment` and `NutanixMachineTemplate` parameters. + +4. Apply the modified `cluster.yml` file to the management cluster. diff --git a/docs/capx/v1.6.x/experimental/oidc.md b/docs/capx/v1.6.x/experimental/oidc.md new file mode 100644 index 00000000..0c274121 --- /dev/null +++ b/docs/capx/v1.6.x/experimental/oidc.md @@ -0,0 +1,31 @@ +# OIDC integration + +!!! warning + The scenario and features described on this page are experimental. It's important to note that they have not been fully validated. + +Kubernetes allows users to authenticate using various authentication mechanisms. One of these mechanisms is OIDC. Information on how Kubernetes interacts with OIDC providers can be found in the [OpenID Connect Tokens](https://kubernetes.io/docs/reference/access-authn-authz/authentication/#openid-connect-tokens){target=_blank} section of the official Kubernetes documentation. + + +Follow the steps below to configure a CAPX cluster to use an OIDC identity provider. + +## Steps +1. Generate a `cluster.yaml` file with the required CAPX cluster configuration. Refer to the [Getting Started](../getting_started.md){target=_blank} page for more information on how to generate a `cluster.yaml` file. Do not apply the `cluster.yaml` file. +2. Edit the `cluster.yaml` file and search for the `KubeadmControlPlane` resource. +3. Modify/add the `spec.kubeadmConfigSpec.clusterConfiguration.apiServer.extraArgs` attribute and add the required [API server parameters](https://kubernetes.io/docs/reference/access-authn-authz/authentication/#configuring-the-api-server){target=_blank}. See the [example](#example) below. +4. Apply the `cluster.yaml` file +5. Log in with the OIDC provider once the cluster is provisioned + +## Example +```YAML +kind: KubeadmControlPlane +spec: + kubeadmConfigSpec: + clusterConfiguration: + apiServer: + extraArgs: + ... + oidc-client-id: + oidc-issuer-url: + ... +``` + diff --git a/docs/capx/v1.6.x/experimental/proxy.md b/docs/capx/v1.6.x/experimental/proxy.md new file mode 100644 index 00000000..c8f940d4 --- /dev/null +++ b/docs/capx/v1.6.x/experimental/proxy.md @@ -0,0 +1,62 @@ +# Proxy configuration + +!!! warning + The scenario and features described on this page are experimental. It's important to note that they have not been fully validated. + +CAPX can be configured to use a proxy to connect to external networks. This proxy configuration needs to be applied to control plane and worker nodes. + +Follow the steps below to configure a CAPX cluster to use a proxy. + +## Steps +1. Generate a `cluster.yaml` file with the required CAPX cluster configuration. Refer to the [Getting Started](../getting_started.md){target=_blank} page for more information on how to generate a `cluster.yaml` file. Do not apply the `cluster.yaml` file. +2. Edit the `cluster.yaml` file and modify the following resources as shown in the [example](#example) below to add the proxy configuration. + 1. `KubeadmControlPlane`: + * Add the proxy configuration to the `spec.kubeadmConfigSpec.files` list. Do not modify other items in the list. + * Add `systemctl` commands to apply the proxy config in `spec.kubeadmConfigSpec.preKubeadmCommands`. Do not modify other items in the list. + 2. `KubeadmConfigTemplate`: + * Add the proxy configuration to the `spec.template.spec.files` list. Do not modify other items in the list. + * Add `systemctl` commands to apply the proxy config in `spec.template.spec.preKubeadmCommands`. Do not modify other items in the list. +4. Apply the `cluster.yaml` file + +## Example + +```YAML +--- +# controlplane proxy settings +kind: KubeadmControlPlane +spec: + kubeadmConfigSpec: + files: + - content: | + [Service] + Environment="HTTP_PROXY=" + Environment="HTTPS_PROXY=" + Environment="NO_PROXY=" + owner: root:root + path: /etc/systemd/system/containerd.service.d/http-proxy.conf + ... + preKubeadmCommands: + - sudo systemctl daemon-reload + - sudo systemctl restart containerd + ... +--- +# worker proxy settings +kind: KubeadmConfigTemplate +spec: + template: + spec: + files: + - content: | + [Service] + Environment="HTTP_PROXY=" + Environment="HTTPS_PROXY=" + Environment="NO_PROXY=" + owner: root:root + path: /etc/systemd/system/containerd.service.d/http-proxy.conf + ... + preKubeadmCommands: + - sudo systemctl daemon-reload + - sudo systemctl restart containerd + ... +``` + diff --git a/docs/capx/v1.6.x/experimental/registry_mirror.md b/docs/capx/v1.6.x/experimental/registry_mirror.md new file mode 100644 index 00000000..307a9425 --- /dev/null +++ b/docs/capx/v1.6.x/experimental/registry_mirror.md @@ -0,0 +1,96 @@ +# Registry Mirror configuration + +!!! warning + The scenario and features described on this page are experimental. It's important to note that they have not been fully validated. + +CAPX can be configured to use a private registry to act as a mirror of an external public registry. This registry mirror configuration needs to be applied to control plane and worker nodes. + +Follow the steps below to configure a CAPX cluster to use a registry mirror. + +## Steps +1. Generate a `cluster.yaml` file with the required CAPX cluster configuration. Refer to the [Getting Started](../getting_started.md){target=_blank} page for more information on how to generate a `cluster.yaml` file. Do not apply the `cluster.yaml` file. +2. Edit the `cluster.yaml` file and modify the following resources as shown in the [example](#example) below to add the proxy configuration. + 1. `KubeadmControlPlane`: + * Add the registry mirror configuration to the `spec.kubeadmConfigSpec.files` list. Do not modify other items in the list. + * Update `/etc/containerd/config.toml` commands to apply the registry mirror config in `spec.kubeadmConfigSpec.preKubeadmCommands`. Do not modify other items in the list. + 2. `KubeadmConfigTemplate`: + * Add the registry mirror configuration to the `spec.template.spec.files` list. Do not modify other items in the list. + * Update `/etc/containerd/config.toml` commands to apply the registry mirror config in `spec.template.spec.preKubeadmCommands`. Do not modify other items in the list. +4. Apply the `cluster.yaml` file + +## Example + +This example will configure a registry mirror for the following namespace: + +* registry.k8s.io +* ghcr.io +* quay.io + +and redirect them to corresponding projects of the `` registry. + +```YAML +--- +# controlplane proxy settings +kind: KubeadmControlPlane +spec: + kubeadmConfigSpec: + files: + - content: | + [host."https:///v2/registry.k8s.io"] + capabilities = ["pull", "resolve"] + skip_verify = false + override_path = true + owner: root:root + path: /etc/containerd/certs.d/registry.k8s.io/hosts.toml + - content: | + [host."https:///v2/ghcr.io"] + capabilities = ["pull", "resolve"] + skip_verify = false + override_path = true + owner: root:root + path: /etc/containerd/certs.d/ghcr.io/hosts.toml + - content: | + [host."https:///v2/quay.io"] + capabilities = ["pull", "resolve"] + skip_verify = false + override_path = true + owner: root:root + path: /etc/containerd/certs.d/quay.io/hosts.toml + ... + preKubeadmCommands: + - echo '\n[plugins."io.containerd.grpc.v1.cri".registry]\n config_path = "/etc/containerd/certs.d"' >> /etc/containerd/config.toml + ... +--- +# worker proxy settings +kind: KubeadmConfigTemplate +spec: + template: + spec: + files: + - content: | + [host."https:///v2/registry.k8s.io"] + capabilities = ["pull", "resolve"] + skip_verify = false + override_path = true + owner: root:root + path: /etc/containerd/certs.d/registry.k8s.io/hosts.toml + - content: | + [host."https:///v2/ghcr.io"] + capabilities = ["pull", "resolve"] + skip_verify = false + override_path = true + owner: root:root + path: /etc/containerd/certs.d/ghcr.io/hosts.toml + - content: | + [host."https:///v2/quay.io"] + capabilities = ["pull", "resolve"] + skip_verify = false + override_path = true + owner: root:root + path: /etc/containerd/certs.d/quay.io/hosts.toml + ... + preKubeadmCommands: + - echo '\n[plugins."io.containerd.grpc.v1.cri".registry]\n config_path = "/etc/containerd/certs.d"' >> /etc/containerd/config.toml + ... +``` + diff --git a/docs/capx/v1.6.x/experimental/vpc.md b/docs/capx/v1.6.x/experimental/vpc.md new file mode 100644 index 00000000..3513e47e --- /dev/null +++ b/docs/capx/v1.6.x/experimental/vpc.md @@ -0,0 +1,40 @@ +# Creating a workload CAPX cluster in a Nutanix Flow VPC + +!!! warning + The scenario and features described on this page are experimental. It's important to note that they have not been fully validated. + +!!! note + Nutanix Flow VPCs are only validated with CAPX 1.1.3+ + +[Nutanix Flow Virtual Networking](https://portal.nutanix.com/page/documents/details?targetId=Nutanix-Flow-Virtual-Networking-Guide-vpc_2022_9:Nutanix-Flow-Virtual-Networking-Guide-vpc_2022_9){target=_blank} allows users to create Virtual Private Clouds (VPCs) with Overlay networking. +The steps below will illustrate how a CAPX cluster can be deployed inside an overlay subnet (NAT) inside a VPC while the management cluster resides outside of the VPC. + + +## Steps +1. [Request a floating IP](https://portal.nutanix.com/page/documents/details?targetId=Nutanix-Flow-Networking-Guide:ear-flow-nw-request-floating-ip-pc-t.html){target=_blank} +2. Link the floating IP to an internal IP address inside the overlay subnet that will be used to deploy the CAPX cluster. This address will be assigned to the CAPX loadbalancer. To prevent IP conflicts, make sure the IP address is not part of the IP-pool defined in the subnet. +3. Generate a `cluster.yaml` file with the required CAPX cluster configuration where the `CONTROL_PLANE_ENDPOINT_IP` is set to the floating IP requested in the first step. Refer to the [Getting Started](../getting_started.md){target=_blank} page for more information on how to generate a `cluster.yaml` file. Do not apply the `cluster.yaml` file. +4. Edit the `cluster.yaml` file and search for the `KubeadmControlPlane` resource. +5. Modify the `spec.kubeadmConfigSpec.files.*.content` attribute and change the `kube-vip` definition similar to the [example](#example) below. +6. Apply the `cluster.yaml` file. +7. When the CAPX workload cluster is deployed, it will be reachable via the floating IP. + +## Example +```YAML +kind: KubeadmControlPlane +spec: + kubeadmConfigSpec: + files: + - content: | + apiVersion: v1 + kind: Pod + metadata: + name: kube-vip + namespace: kube-system + spec: + containers: + - env: + - name: address + value: "" +``` + diff --git a/docs/capx/v1.6.x/getting_started.md b/docs/capx/v1.6.x/getting_started.md new file mode 100644 index 00000000..3866492a --- /dev/null +++ b/docs/capx/v1.6.x/getting_started.md @@ -0,0 +1,159 @@ +# Getting Started + +This is a guide on getting started with Cluster API Provider Nutanix Cloud Infrastructure (CAPX). To learn more about cluster API in more depth, check out the [Cluster API book](https://cluster-api.sigs.k8s.io/){target=_blank}. + +For more information on how install the Nutanix CSI Driver on a CAPX cluster, visit [Nutanix CSI Driver installation with CAPX](./addons/install_csi_driver.md). + +For more information on how CAPX handles credentials, visit [Credential Management](./credential_management.md). + +For more information on the port requirements for CAPX, visit [Port Requirements](./port_requirements.md). + +!!! note + [Nutanix Cloud Controller Manager (CCM)](../../ccm/latest/overview.md) is a mandatory component starting from CAPX v1.3.0. Ensure all CAPX-managed Kubernetes clusters are configured to use Nutanix CCM before upgrading to v1.3.0 or later. See [CAPX v1.6.x Upgrade Procedure](./tasks/capx_v16x_upgrade_procedure.md). + +## Production Workflow + +### Build OS image for NutanixMachineTemplate resource +Cluster API Provider Nutanix Cloud Infrastructure (CAPX) uses the [Image Builder](https://image-builder.sigs.k8s.io/){target=_blank} project to build OS images used for the Nutanix machines. + +Follow the steps detailed in [Building CAPI Images for Nutanix Cloud Platform (NCP)](https://image-builder.sigs.k8s.io/capi/providers/nutanix.html#building-capi-images-for-nutanix-cloud-platform-ncp){target=_blank} to use Image Builder on the Nutanix Cloud Platform. + +For a list of operating systems visit the OS image [Configuration](https://image-builder.sigs.k8s.io/capi/providers/nutanix.html#configuration){target=_blank} page. + +### Prerequisites for using Cluster API Provider Nutanix Cloud Infrastructure +The [Cluster API installation](https://cluster-api.sigs.k8s.io/user/quick-start.html#installation){target=_blank} section provides an overview of all required prerequisites: + +- [Common Prerequisites](https://cluster-api.sigs.k8s.io/user/quick-start.html#common-prerequisites){target=_blank} +- [Install and/or configure a Kubernetes cluster](https://cluster-api.sigs.k8s.io/user/quick-start.html#install-andor-configure-a-kubernetes-cluster){target=_blank} +- [Install clusterctl](https://cluster-api.sigs.k8s.io/user/quick-start.html#install-clusterctl){target=_blank} +- (Optional) [Enabling Feature Gates](https://cluster-api.sigs.k8s.io/user/quick-start.html#enabling-feature-gates){target=_blank} + +Make sure these prerequisites have been met before moving to the [Configure and Install Cluster API Provider Nutanix Cloud Infrastructure](#configure-and-install-cluster-api-provider-nutanix-cloud-infrastructure) step. + + +### Configure and Install Cluster API Provider Nutanix Cloud Infrastructure +To initialize Cluster API Provider Nutanix Cloud Infrastructure, `clusterctl` requires the following variables, which should be set in either `~/.cluster-api/clusterctl.yaml` or as environment variables. +``` +NUTANIX_ENDPOINT: "" # IP or FQDN of Prism Central +NUTANIX_USER: "" # Prism Central user +NUTANIX_PASSWORD: "" # Prism Central password +NUTANIX_INSECURE: false # or true + +KUBERNETES_VERSION: "v1.22.9" +WORKER_MACHINE_COUNT: 3 +NUTANIX_SSH_AUTHORIZED_KEY: "" + +NUTANIX_PRISM_ELEMENT_CLUSTER_NAME: "" +NUTANIX_MACHINE_TEMPLATE_IMAGE_NAME: "" +NUTANIX_SUBNET_NAME: "" + +EXP_CLUSTER_RESOURCE_SET: true # Required for Nutanix CCM installation +``` + +You can also see the required list of variables by running the following: +``` +clusterctl generate cluster mycluster -i nutanix --list-variables +Required Variables: + - CONTROL_PLANE_ENDPOINT_IP + - KUBERNETES_VERSION + - NUTANIX_ENDPOINT + - NUTANIX_MACHINE_TEMPLATE_IMAGE_NAME + - NUTANIX_PASSWORD + - NUTANIX_PRISM_ELEMENT_CLUSTER_NAME + - NUTANIX_SSH_AUTHORIZED_KEY + - NUTANIX_SUBNET_NAME + - NUTANIX_USER + +Optional Variables: + - CONTROL_PLANE_ENDPOINT_PORT (defaults to "6443") + - CONTROL_PLANE_MACHINE_COUNT (defaults to 1) + - KUBEVIP_LB_ENABLE (defaults to "false") + - KUBEVIP_SVC_ENABLE (defaults to "false") + - NAMESPACE (defaults to current Namespace in the KubeConfig file) + - NUTANIX_INSECURE (defaults to "false") + - NUTANIX_MACHINE_BOOT_TYPE (defaults to "legacy") + - NUTANIX_MACHINE_MEMORY_SIZE (defaults to "4Gi") + - NUTANIX_MACHINE_VCPU_PER_SOCKET (defaults to "1") + - NUTANIX_MACHINE_VCPU_SOCKET (defaults to "2") + - NUTANIX_PORT (defaults to "9440") + - NUTANIX_SYSTEMDISK_SIZE (defaults to "40Gi") + - WORKER_MACHINE_COUNT (defaults to 0) +``` + +!!! note + To prevent duplicate IP assignments, it is required to assign an IP-address to the `CONTROL_PLANE_ENDPOINT_IP` variable that is not part of the Nutanix IPAM or DHCP range assigned to the subnet of the CAPX cluster. + +!!! warning + Make sure [Cluster Resource Set (CRS)](https://cluster-api.sigs.k8s.io/tasks/experimental-features/cluster-resource-set){target=_blank} is enabled before running `clusterctl init` + +Now you can instantiate Cluster API with the following: +``` +clusterctl init -i nutanix +``` + +### Deploy a workload cluster on Nutanix Cloud Infrastructure +``` +export TEST_CLUSTER_NAME=mytestcluster1 +export TEST_NAMESPACE=mytestnamespace +CONTROL_PLANE_ENDPOINT_IP=x.x.x.x clusterctl generate cluster ${TEST_CLUSTER_NAME} \ + -i nutanix \ + --target-namespace ${TEST_NAMESPACE} \ + --kubernetes-version v1.22.9 \ + --control-plane-machine-count 1 \ + --worker-machine-count 3 > ./cluster.yaml +kubectl create ns ${TEST_NAMESPACE} +kubectl apply -f ./cluster.yaml -n ${TEST_NAMESPACE} +``` +To customize the configuration of the default `cluster.yaml` file generated by CAPX, visit the [NutanixCluster](./types/nutanix_cluster.md) and [NutanixMachineTemplate](./types/nutanix_machine_template.md) documentation. + +### Access a workload cluster +To access resources on the cluster, you can get the kubeconfig with the following: +``` +clusterctl get kubeconfig ${TEST_CLUSTER_NAME} -n ${TEST_NAMESPACE} > ${TEST_CLUSTER_NAME}.kubeconfig +kubectl --kubeconfig ./${TEST_CLUSTER_NAME}.kubeconfig get nodes +``` + +### Install CNI on workload a cluster + +You must deploy a Container Network Interface (CNI) based pod network add-on so that your pods can communicate with each other. Cluster DNS (CoreDNS) will not start up before a network is installed. + +!!! note + Take care that your pod network must not overlap with any of the host networks. You are likely to see problems if there is any overlap. If you find a collision between your network plugin's preferred pod network and some of your host networks, you must choose a suitable alternative CIDR block to use instead. It can be configured inside the `cluster.yaml` generated by `clusterctl generate cluster` before applying it. + +Several external projects provide Kubernetes pod networks using CNI, some of which also support [Network Policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/){target=_blank}. + +See a list of add-ons that implement the [Kubernetes networking model](https://kubernetes.io/docs/concepts/cluster-administration/networking/#how-to-implement-the-kubernetes-network-model){target=_blank}. At time of writing, the most common are [Calico](https://www.tigera.io/project-calico/){target=_blank} and [Cilium](https://cilium.io){target=_blank}. + +Follow the specific install guide for your selected CNI and install only one pod network per cluster. + +Once a pod network has been installed, you can confirm that it is working by checking that the CoreDNS pod is running in the output of `kubectl get pods --all-namespaces`. + + +### Kube-vip settings + +Kube-vip is a true load balancing solution for the Kubernetes control plane. It distributes API requests across control plane nodes. It also has the capability to provide load balancing for Kubernetes services. + +You can tweak kube-vip settings by using the following properties: + +- `KUBEVIP_LB_ENABLE` + +This setting allows control plane load balancing using IPVS. See +[Control Plane Load-Balancing documentation](https://kube-vip.io/docs/about/architecture/#control-plane-load-balancing){target=_blank} for further information. + +- `KUBEVIP_SVC_ENABLE` + +This setting enables a service of type LoadBalancer. See +[Kubernetes Service Load Balancing documentation](https://kube-vip.io/docs/about/architecture/#kubernetes-service-load-balancing){target=_blank} for further information. + +- `KUBEVIP_SVC_ELECTION` + +This setting enables Load Balancing of Load Balancers. See [Load Balancing Load Balancers](https://kube-vip.io/docs/usage/kubernetes-services/#load-balancing-load-balancers-when-using-arp-mode-yes-you-read-that-correctly-kube-vip-v050){target=_blank} for further information. + +### Delete a workload cluster +To remove a workload cluster from your management cluster, remove the cluster object and the provider will clean-up all resources. + +``` +kubectl delete cluster ${TEST_CLUSTER_NAME} -n ${TEST_NAMESPACE} +``` +!!! note + Deleting the entire cluster template with `kubectl delete -f ./cluster.yaml` may lead to pending resources requiring manual cleanup. diff --git a/docs/capx/v1.6.x/pc_certificates.md b/docs/capx/v1.6.x/pc_certificates.md new file mode 100644 index 00000000..f3fe1699 --- /dev/null +++ b/docs/capx/v1.6.x/pc_certificates.md @@ -0,0 +1,149 @@ +# Certificate Trust + +CAPX invokes Prism Central APIs using the HTTPS protocol. CAPX has different methods to handle the trust of the Prism Central certificates: + +- Enable certificate verification (default) +- Configure an additional trust bundle +- Disable certificate verification + +See the respective sections below for more information. + +!!! note + For more information about replacing Prism Central certificates, see the [Nutanix AOS Security Guide](https://portal.nutanix.com/page/documents/details?targetId=Nutanix-Security-Guide-v6_5:mul-security-ssl-certificate-pc-t.html){target=_blank}. + +## Enable certificate verification (default) +By default CAPX will perform certificate verification when invoking Prism Central API calls. This requires Prism Central to be configured with a publicly trusted certificate authority. +No additional configuration is required in CAPX. + +## Configure an additional trust bundle +CAPX allows users to configure an additional trust bundle. This will allow CAPX to verify certificates that are not issued by a publicy trusted certificate authority. + +To configure an additional trust bundle, the `NUTANIX_ADDITIONAL_TRUST_BUNDLE` environment variable needs to be set. The value of the `NUTANIX_ADDITIONAL_TRUST_BUNDLE` environment variable contains the trust bundle (PEM format) in base64 encoded format. See the [Configuring the trust bundle environment variable](#configuring-the-trust-bundle-environment-variable) section for more information. + +It is also possible to configure the additional trust bundle manually by creating a custom `cluster-template`. See the [Configuring the additional trust bundle manually](#configuring-the-additional-trust-bundle-manually) section for more information + +The `NUTANIX_ADDITIONAL_TRUST_BUNDLE` environment variable can be set when initializing the CAPX provider or when creating a workload cluster. If the `NUTANIX_ADDITIONAL_TRUST_BUNDLE` is configured when the CAPX provider is initialized, the additional trust bundle will be used for every CAPX workload cluster. If it is only configured when creating a workload cluster, it will only be applicable for that specific workload cluster. + + +### Configuring the trust bundle environment variable + +Create a PEM encoded file containing the root certificate and all intermediate certificates. Example: +``` +$ cat cert.crt +-----BEGIN CERTIFICATE----- + +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- + +-----END CERTIFICATE----- +``` + +Use a `base64` tool to encode these contents in base64. The command below will provide a `base64` string. +``` +$ cat cert.crt | base64 + +``` +!!! note + Make sure the `base64` string does not contain any newlines (`\n`). If the output string contains newlines, remove them manually or check the manual of the `base64` tool on how to generate a `base64` string without newlines. + +Use the `base64` string as value for the `NUTANIX_ADDITIONAL_TRUST_BUNDLE` environment variable. +``` +$ export NUTANIX_ADDITIONAL_TRUST_BUNDLE="" +``` + +### Configuring the additional trust bundle manually + +To configure the additional trust bundle manually without using the `NUTANIX_ADDITIONAL_TRUST_BUNDLE` environment variable present in the default `cluster-template` files, it is required to: + +- Create a `ConfigMap` containing the additional trust bundle. +- Configure the `prismCentral.additionalTrustBundle` object in the `NutanixCluster` spec. + +#### Creating the additional trust bundle ConfigMap + +CAPX supports two different formats for the ConfigMap containing the additional trust bundle. The first one is to add the additional trust bundle as a multi-line string in the `ConfigMap`, the second option is to add the trust bundle in `base64` encoded format. See the examples below. + +Multi-line string example: +```YAML +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: user-ca-bundle + namespace: ${NAMESPACE} +data: + ca.crt: | + -----BEGIN CERTIFICATE----- + + -----END CERTIFICATE----- + -----BEGIN CERTIFICATE----- + + -----END CERTIFICATE----- +``` + +`base64` example: + +```YAML +apiVersion: v1 +kind: ConfigMap +metadata: + name: user-ca-bundle + namespace: ${NAMESPACE} +binaryData: + ca.crt: +``` + +!!! note + The `base64` string needs to be added as `binaryData`. + + +#### Configuring the NutanixCluster spec + +When the additional trust bundle `ConfigMap` is created, it needs to be referenced in the `NutanixCluster` spec. Add the `prismCentral.additionalTrustBundle` object in the `NutanixCluster` spec as shown below. Make sure the correct additional trust bundle `ConfigMap` is referenced. + +```YAML +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: NutanixCluster +metadata: + name: ${CLUSTER_NAME} + namespace: ${NAMESPACE} +spec: + ... + prismCentral: + ... + additionalTrustBundle: + kind: ConfigMap + name: user-ca-bundle + insecure: false +``` + +!!! note + the default value of `prismCentral.insecure` attribute is `false`. It can be omitted when an additional trust bundle is configured. + + If `prismCentral.insecure` attribute is set to `true`, all certificate verification will be disabled. + + +## Disable certificate verification + +!!! note + Disabling certificate verification is not recommended for production purposes and should only be used for testing. + + +Certificate verification can be disabled by setting the `prismCentral.insecure` attribute to `true` in the `NutanixCluster` spec. Certificate verification will be disabled even if an additional trust bundle is configured. + +Disabled certificate verification example: + +```YAML +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: NutanixCluster +metadata: + name: ${CLUSTER_NAME} + namespace: ${NAMESPACE} +spec: + controlPlaneEndpoint: + host: ${CONTROL_PLANE_ENDPOINT_IP} + port: ${CONTROL_PLANE_ENDPOINT_PORT=6443} + prismCentral: + ... + insecure: true + ... +``` \ No newline at end of file diff --git a/docs/capx/v1.6.x/port_requirements.md b/docs/capx/v1.6.x/port_requirements.md new file mode 100644 index 00000000..af182abb --- /dev/null +++ b/docs/capx/v1.6.x/port_requirements.md @@ -0,0 +1,19 @@ +# Port Requirements + +CAPX uses the ports documented below to create workload clusters. + +!!! note + This page only documents the ports specifically required by CAPX and does not provide the full overview of all ports required in the CAPI framework. + +## Management cluster + +| Source | Destination | Protocol | Port | Description | +|--------------------|---------------------|----------|------|--------------------------------------------------------------------------------------------------| +| Management cluster | External Registries | TCP | 443 | Pull container images from [CAPX public registries](#public-registries-utilized-when-using-capx) | +| Management cluster | Prism Central | TCP | 9440 | Management cluster communication to Prism Central | + +## Public registries utilized when using CAPX + +| Registry name | +|---------------| +| ghcr.io | diff --git a/docs/capx/v1.6.x/tasks/capx_v16x_upgrade_procedure.md b/docs/capx/v1.6.x/tasks/capx_v16x_upgrade_procedure.md new file mode 100644 index 00000000..8e998c97 --- /dev/null +++ b/docs/capx/v1.6.x/tasks/capx_v16x_upgrade_procedure.md @@ -0,0 +1,83 @@ +# CAPX v1.6.x Upgrade Procedure + +Starting from CAPX v1.3.0, it is required for all CAPX-managed Kubernetes clusters to use the Nutanix Cloud Controller Manager (CCM). + +Before upgrading CAPX instances to v1.3.0 or later, it is required to follow the [steps](#steps) detailed below for each of the CAPX-managed Kubernetes clusters that don't use Nutanix CCM. + + +## Steps + +This procedure uses [Cluster Resource Set (CRS)](https://cluster-api.sigs.k8s.io/tasks/experimental-features/cluster-resource-set){target=_blank} to install Nutanix CCM but it can also be installed using the [Nutanix CCM Helm chart](https://artifacthub.io/packages/helm/nutanix/nutanix-cloud-provider){target=_blank}. + +!!! warning + Make sure [CRS](https://cluster-api.sigs.k8s.io/tasks/experimental-features/cluster-resource-set){target=_blank} is enabled on the management cluster before following the procedure. + +Perform following steps for each of the CAPX-managed Kubernetes clusters that are not configured to use Nutanix CCM: + +1. Add the `cloud-provider: external` configuration in the `KubeadmConfigTemplate` resources: + ```YAML + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + spec: + template: + spec: + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: + cloud-provider: external + ``` +2. Add the `cloud-provider: external` configuration in the `KubeadmControlPlane` resource: +```YAML +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +kind: KubeadmConfigTemplate +spec: + template: + spec: + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: + cloud-provider: external +--- +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +kind: KubeadmControlPlane +spec: + kubeadmConfigSpec: + clusterConfiguration: + apiServer: + extraArgs: + cloud-provider: external + controllerManager: + extraArgs: + cloud-provider: external + initConfiguration: + nodeRegistration: + kubeletExtraArgs: + cloud-provider: external + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: + cloud-provider: external +``` +3. Add the Nutanix CCM CRS resources: + + - [nutanix-ccm-crs.yaml](https://github.com/nutanix-cloud-native/cluster-api-provider-nutanix/blob/v1.6.0/templates/ccm/nutanix-ccm-crs.yaml){target=_blank} + - [nutanix-ccm-secret.yaml](https://github.com/nutanix-cloud-native/cluster-api-provider-nutanix/blob/v1.6.0/templates/ccm/nutanix-ccm-secret.yaml) + - [nutanix-ccm.yaml](https://github.com/nutanix-cloud-native/cluster-api-provider-nutanix/blob/v1.6.0/templates/ccm/nutanix-ccm.yaml) + + Make sure to update each of the variables before applying the `YAML` files. + +4. Add the `ccm: nutanix` label to the `Cluster` resource: + ```YAML + apiVersion: cluster.x-k8s.io/v1beta1 + kind: Cluster + metadata: + labels: + ccm: nutanix + ``` +5. Verify if the Nutanix CCM pod is up and running: +``` +kubectl get pod -A -l k8s-app=nutanix-cloud-controller-manager +``` +6. Trigger a new rollout of the Kubernetes nodes by performing a Kubernetes upgrade or by using `clusterctl alpha rollout restart`. See the [clusterctl alpha rollout](https://cluster-api.sigs.k8s.io/clusterctl/commands/alpha-rollout#restart){target=_blank} for more information. +7. Upgrade CAPX to v1.6.0 by following the [clusterctl upgrade](https://cluster-api.sigs.k8s.io/clusterctl/commands/upgrade.html?highlight=clusterctl%20upgrade%20pla#clusterctl-upgrade){target=_blank} documentation \ No newline at end of file diff --git a/docs/capx/v1.6.x/tasks/modify_machine_configuration.md b/docs/capx/v1.6.x/tasks/modify_machine_configuration.md new file mode 100644 index 00000000..04a43a95 --- /dev/null +++ b/docs/capx/v1.6.x/tasks/modify_machine_configuration.md @@ -0,0 +1,11 @@ +# Modifying Machine Configurations + +Since all attributes of the `NutanixMachineTemplate` resources are immutable, follow the [Updating Infrastructure Machine Templates](https://cluster-api.sigs.k8s.io/tasks/updating-machine-templates.html?highlight=machine%20template#updating-infrastructure-machine-templates){target=_blank} procedure to modify the configuration of machines in an existing CAPX cluster. +See the [NutanixMachineTemplate](../types/nutanix_machine_template.md) documentation for all supported configuration parameters. + +!!! note + Manually modifying existing and linked `NutanixMachineTemplate` resources will not trigger a rolling update of the machines. + +!!! note + Do not modify the virtual machine configuration of CAPX cluster nodes manually in Prism/Prism Central. + CAPX will not automatically revert the configuration change but performing scale-up/scale-down/upgrade operations will override manual modifications. Only use the `Updating Infrastructure Machine` procedure referenced above to perform configuration changes. \ No newline at end of file diff --git a/docs/capx/v1.6.x/troubleshooting.md b/docs/capx/v1.6.x/troubleshooting.md new file mode 100644 index 00000000..c023d13e --- /dev/null +++ b/docs/capx/v1.6.x/troubleshooting.md @@ -0,0 +1,13 @@ +# Troubleshooting + +## Clusterctl failed with GitHub rate limit error + +By design Clusterctl fetches artifacts from repositories hosted on GitHub, this operation is subject to [GitHub API rate limits](https://docs.github.com/en/rest/overview/resources-in-the-rest-api#rate-limiting){target=_blank}. + +While this is generally okay for the majority of users, there is still a chance that some users (especially developers or CI tools) hit this limit: + +``` +Error: failed to get repository client for the XXX with name YYY: error creating the GitHub repository client: failed to get GitHub latest version: failed to get the list of versions: rate limit for github api has been reached. Please wait one hour or get a personal API tokens a assign it to the GITHUB_TOKEN environment variable +``` + +As explained in the error message, you can increase your API rate limit by [creating a GitHub personal token](https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/creating-a-personal-access-token){target=_blank} and setting a `GITHUB_TOKEN` environment variable using the token. diff --git a/docs/capx/v1.6.x/types/nutanix_cluster.md b/docs/capx/v1.6.x/types/nutanix_cluster.md new file mode 100644 index 00000000..09325cab --- /dev/null +++ b/docs/capx/v1.6.x/types/nutanix_cluster.md @@ -0,0 +1,64 @@ +# NutanixCluster + +The `NutanixCluster` resource defines the configuration of a CAPX Kubernetes cluster. + +Example of a `NutanixCluster` resource: + +```YAML +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: NutanixCluster +metadata: + name: ${CLUSTER_NAME} + namespace: ${NAMESPACE} +spec: + controlPlaneEndpoint: + host: ${CONTROL_PLANE_ENDPOINT_IP} + port: ${CONTROL_PLANE_ENDPOINT_PORT=6443} + prismCentral: + address: ${NUTANIX_ENDPOINT} + additionalTrustBundle: + kind: ConfigMap + name: user-ca-bundle + credentialRef: + kind: Secret + name: ${CLUSTER_NAME} + insecure: ${NUTANIX_INSECURE=false} + port: ${NUTANIX_PORT=9440} +``` + +## NutanixCluster spec +The table below provides an overview of the supported parameters of the `spec` attribute of a `NutanixCluster` resource. + +### Configuration parameters + +| Key |Type |Description | +|--------------------------------------------|------|----------------------------------------------------------------------------------| +|controlPlaneEndpoint |object|Defines the host IP and port of the CAPX Kubernetes cluster. | +|controlPlaneEndpoint.host |string|Host IP to be assigned to the CAPX Kubernetes cluster. | +|controlPlaneEndpoint.port |int |Port of the CAPX Kubernetes cluster. Default: `6443` | +|prismCentral |object|(Optional) Prism Central endpoint definition. | +|prismCentral.address |string|IP/FQDN of Prism Central. | +|prismCentral.port |int |Port of Prism Central. Default: `9440` | +|prismCentral.insecure |bool |Disable Prism Central certificate checking. Default: `false` | +|prismCentral.credentialRef |object|Reference to credentials used for Prism Central connection. | +|prismCentral.credentialRef.kind |string|Kind of the credentialRef. Allowed value: `Secret` | +|prismCentral.credentialRef.name |string|Name of the secret containing the Prism Central credentials. | +|prismCentral.credentialRef.namespace |string|(Optional) Namespace of the secret containing the Prism Central credentials. | +|prismCentral.additionalTrustBundle |object|Reference to the certificate trust bundle used for Prism Central connection. | +|prismCentral.additionalTrustBundle.kind |string|Kind of the additionalTrustBundle. Allowed value: `ConfigMap` | +|prismCentral.additionalTrustBundle.name |string|Name of the `ConfigMap` containing the Prism Central trust bundle. | +|prismCentral.additionalTrustBundle.namespace|string|(Optional) Namespace of the `ConfigMap` containing the Prism Central trust bundle.| +|failureDomains |list |(Optional) Failure domains for the Kubernetes nodes | +|failureDomains.[].name |string|Name of the failure domain | +|failureDomains.[].cluster |object|Reference (name or uuid) to the Prism Element cluster. Name or UUID can be passed | +|failureDomains.[].cluster.type |string|Type to identify the Prism Element cluster. Allowed values: `name` and `uuid` | +|failureDomains.[].cluster.name |string|Name of the Prism Element cluster. | +|failureDomains.[].cluster.uuid |string|UUID of the Prism Element cluster. | +|failureDomains.[].subnets |list |(Optional) Reference (name or uuid) to the subnets to be assigned to the VMs. | +|failureDomains.[].subnets.[].type |string|Type to identify the subnet. Allowed values: `name` and `uuid` | +|failureDomains.[].subnets.[].name |string|Name of the subnet. | +|failureDomains.[].subnets.[].uuid |string|UUID of the subnet. | +|failureDomains.[].controlPlane |bool |Indicates if a failure domain is suited for control plane nodes + +!!! note + To prevent duplicate IP assignments, it is required to assign an IP-address to the `controlPlaneEndpoint.host` variable that is not part of the Nutanix IPAM or DHCP range assigned to the subnet of the CAPX cluster. \ No newline at end of file diff --git a/docs/capx/v1.6.x/types/nutanix_machine_template.md b/docs/capx/v1.6.x/types/nutanix_machine_template.md new file mode 100644 index 00000000..4aa613b8 --- /dev/null +++ b/docs/capx/v1.6.x/types/nutanix_machine_template.md @@ -0,0 +1,124 @@ +# NutanixMachineTemplate +The `NutanixMachineTemplate` resource defines the configuration of a CAPX Kubernetes VM. + +Example of a `NutanixMachineTemplate` resource. + +```YAML +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: NutanixMachineTemplate +metadata: + name: "${CLUSTER_NAME}-mt-0" + namespace: "${NAMESPACE}" +spec: + template: + spec: + providerID: "nutanix://${CLUSTER_NAME}-m1" + # Supported options for boot type: legacy and uefi + # Defaults to legacy if not set + bootType: ${NUTANIX_MACHINE_BOOT_TYPE=legacy} + vcpusPerSocket: ${NUTANIX_MACHINE_VCPU_PER_SOCKET=1} + vcpuSockets: ${NUTANIX_MACHINE_VCPU_SOCKET=2} + memorySize: "${NUTANIX_MACHINE_MEMORY_SIZE=4Gi}" + systemDiskSize: "${NUTANIX_SYSTEMDISK_SIZE=40Gi}" + image: + type: name + name: "${NUTANIX_MACHINE_TEMPLATE_IMAGE_NAME}" + cluster: + type: name + name: "${NUTANIX_PRISM_ELEMENT_CLUSTER_NAME}" + subnet: + - type: name + name: "${NUTANIX_SUBNET_NAME}" + # Adds additional categories to the virtual machines. + # Note: Categories must already be present in Prism Central + # additionalCategories: + # - key: AppType + # value: Kubernetes + # Adds the cluster virtual machines to a project defined in Prism Central. + # Replace NUTANIX_PROJECT_NAME with the correct project defined in Prism Central + # Note: Project must already be present in Prism Central. + # project: + # type: name + # name: "NUTANIX_PROJECT_NAME" + # gpus: + # - type: name + # name: "GPU NAME" + # Note: Either of `image` or `imageLookup` must be set, but not both. + # imageLookup: + # format: "NUTANIX_IMAGE_LOOKUP_FORMAT" + # baseOS: "NUTANIX_IMAGE_LOOKUP_BASE_OS" + # dataDisks: + # - diskSize: + # deviceProperties: + # deviceType: Disk + # adapterType: SCSI + # deviceIndex: 1 + # storageConfig: + # diskMode: Standard + # storageContainer: + # type: name + # name: "NUTANIX_VM_DISK_STORAGE_CONTAINER" + # dataSource: + # type: name + # name: "NUTANIX_DATA_SOURCE_IMAGE_NAME" +``` + +## NutanixMachineTemplate spec +The table below provides an overview of the supported parameters of the `spec` attribute of a `NutanixMachineTemplate` resource. + +### Configuration parameters +| Key |Type |Description | +|----------------------------------------------------|------|--------------------------------------------------------------------------------------------------------| +|bootType |string|Boot type of the VM. Depends on the OS image used. Allowed values: `legacy`, `uefi`. Default: `legacy` | +|vcpusPerSocket |int |Amount of vCPUs per socket. Default: `1` | +|vcpuSockets |int |Amount of vCPU sockets. Default: `2` | +|memorySize |string|Amount of Memory. Default: `4Gi` | +|systemDiskSize |string|Amount of storage assigned to the system disk. Default: `40Gi` | +|image |object|Reference (name or uuid) to the OS image used for the system disk. | +|image.type |string|Type to identify the OS image. Allowed values: `name` and `uuid` | +|image.name |string|Name of the image. | +|image.uuid |string|UUID of the image. | +|cluster |object|(Optional) Reference (name or uuid) to the Prism Element cluster. Name or UUID can be passed | +|cluster.type |string|Type to identify the Prism Element cluster. Allowed values: `name` and `uuid` | +|cluster.name |string|Name of the Prism Element cluster. | +|cluster.uuid |string|UUID of the Prism Element cluster. | +|subnets |list |(Optional) Reference (name or uuid) to the subnets to be assigned to the VMs. | +|subnets.[].type |string|Type to identify the subnet. Allowed values: `name` and `uuid` | +|subnets.[].name |string|Name of the subnet. | +|subnets.[].uuid |string|UUID of the subnet. | +|additionalCategories |list |Reference to the categories to be assigned to the VMs. These categories already exist in Prism Central. | +|additionalCategories.[].key |string|Key of the category. | +|additionalCategories.[].value |string|Value of the category. | +|project |object|Reference (name or uuid) to the project. This project must already exist in Prism Central. | +|project.type |string|Type to identify the project. Allowed values: `name` and `uuid` | +|project.name |string|Name of the project. | +|project.uuid |string|UUID of the project. | +|gpus |object|Reference (name or deviceID) to the GPUs to be assigned to the VMs. Can be vGPU or Passthrough. | +|gpus.[].type |string|Type to identify the GPU. Allowed values: `name` and `deviceID` | +|gpus.[].name |string|Name of the GPU or the vGPU profile | +|gpus.[].deviceID |string|DeviceID of the GPU or the vGPU profile | +|imageLookup |object|(Optional) Reference to a container that holds how to look up rhcos images for the cluster. | +|imageLookup.format |string|Naming format to look up the image for the machine. Default: `capx-{{.BaseOS}}-{{.K8sVersion}}-*` | +|imageLookup.baseOS |string|Name of the base operating system to use for image lookup. | +|dataDisks |list |(Optional) Reference to the data disks to be attached to the VM. | +|dataDisks.[].diskSize |string|Size (in Quantity format) of the disk attached to the VM. The minimum diskSize is `1GB`. | +|dataDisks.[].deviceProperties |object|(Optional) Reference to the properties of the disk device. | +|dataDisks.[].deviceProperties.deviceType |string|VM disk device type. Allowed values: `Disk` (default) and `CDRom` | +|dataDisks.[].deviceProperties.adapterType |string|Adapter type of the disk address. | +|dataDisks.[].deviceProperties.deviceIndex |int |(Optional) Index of the disk address. Allowed values: non-negative integers (default: `0`) | +|dataDisks.[].storageConfig |object|(Optional) Reference to the storage configuration parameters of the VM disks. | +|dataDisks.[].storageConfig.diskMode |string|Specifies the disk mode. Allowed values: `Standard` (default) and `Flash` | +|dataDisks.[].storageConfig.storageContainer |object|(Optional) Reference (name or uuid) to the storage_container used by the VM disk. | +|dataDisks.[].storageConfig.storageContainer.type |string|Type to identify the storage container. Allowed values: `name` and `uuid` | +|dataDisks.[].storageConfig.storageContainer.name |string|Name of the storage container. | +|dataDisks.[].storageConfig.storageContainer.uuid |string|UUID of the storage container. | +|dataDisks.[].dataSource |object|(Optional) Reference (name or uuid) to a data source image for the VM disk. | +|dataDisks.[].dataSource.type |string|Type to identify the data source image. Allowed values: `name` and `uuid` | +|dataDisks.[].dataSource.name |string|Name of the data source image. | +|dataDisks.[].dataSource.uuid |string|UUID of the data source image. | + +!!! note + - The `cluster` or `subnets` configuration parameters are optional in case failure domains are defined on the `NutanixCluster` and `MachineDeployment` resources. + - If the `deviceType` is `Disk`, the valid `adapterType` can be `SCSI`, `IDE`, `PCI`, `SATA` or `SPAPR`. If the `deviceType` is `CDRom`, the valid `adapterType` can be `IDE` or `SATA`. + - Either of `image` or `imageLookup` must be set, but not both. + - For a Machine VM, the `deviceIndex` for the disks with the same `deviceType.adapterType` combination should start from `0` and increase consecutively afterwards. Note that for each Machine VM, the `Disk.SCSI.0` and `CDRom.IDE.0` are reserved to be used by the VM's system. So for `dataDisks` of Disk.SCSI and CDRom.IDE, the `deviceIndex` should start from `1`. \ No newline at end of file diff --git a/docs/capx/v1.6.x/user_requirements.md b/docs/capx/v1.6.x/user_requirements.md new file mode 100644 index 00000000..5a4b8604 --- /dev/null +++ b/docs/capx/v1.6.x/user_requirements.md @@ -0,0 +1,36 @@ +# User Requirements + +Cluster API Provider Nutanix Cloud Infrastructure (CAPX) interacts with Nutanix Prism Central (PC) APIs using a Prism Central user account. + +CAPX supports two types of PC users: + +- Local users: must be assigned the `Prism Central Admin` role. +- Domain users: must be assigned a role that at least has the [Minimum required CAPX permissions for domain users](#minimum-required-capx-permissions-for-domain-users) assigned. + +See [Credential Management](./credential_management.md){target=_blank} for more information on how to pass the user credentials to CAPX. + +## Minimum required CAPX permissions for domain users + +The following permissions are required for Prism Central domain users: + +- Create Category Mapping +- Create Image +- Create Or Update Name Category +- Create Or Update Value Category +- Create Virtual Machine +- Delete Category Mapping +- Delete Image +- Delete Name Category +- Delete Value Category +- Delete Virtual Machine +- View Category Mapping +- View Cluster +- View Image +- View Name Category +- View Project +- View Subnet +- View Value Category +- View Virtual Machine + +!!! note + The list of permissions has been validated on PC 2022.6 and above. diff --git a/docs/capx/v1.6.x/validated_integrations.md b/docs/capx/v1.6.x/validated_integrations.md new file mode 100644 index 00000000..066a9e00 --- /dev/null +++ b/docs/capx/v1.6.x/validated_integrations.md @@ -0,0 +1,68 @@ +# Validated Integrations + +Validated integrations are a defined set of specifically tested configurations between technologies that represent the most common combinations that Nutanix customers are using or deploying with CAPX. For these integrations, Nutanix has directly, or through certified partners, exercised a full range of platform tests as part of the product release process. + +## Integration Validation Policy + +Nutanix follows the version validation policies below: + +- Validate at least one active AOS LTS (long term support) version. Validated AOS LTS version for a specific CAPX version is listed in the [AOS](#aos) section.
+ + !!! note + + Typically the latest LTS release at time of CAPX release except when latest is initial release in train (eg x.y.0). Exact version depends on timing and customer adoption. + +- Validate the latest AOS STS (short term support) release at time of CAPX release. +- Validate at least one active Prism Central (PC) version. Validated PC version for a specific CAPX version is listed in the [Prism Central](#prism-central) section.
+ + !!! note + + Typically the the latest PC release at time of CAPX release except when latest is initial release in train (eg x.y.0). Exact version depends on timing and customer adoption. + +- At least one active Cluster-API (CAPI) version. Validated CAPI version for a specific CAPX version is listed in the [Cluster-API](#cluster-api) section.
+ + !!! note + + Typically the the latest Cluster-API release at time of CAPX release except when latest is initial release in train (eg x.y.0). Exact version depends on timing and customer adoption. + +## Validated versions +### Cluster-API +| CAPX | CAPI v1.3.x | CAPI v1.4.x | CAPI v1.5.x | CAPI v1.6.x | CAPI v1.7.x | CAPI v1.8.x | CAPI v1.9.x | +|--------|-------------|-------------|-------------|-------------|-------------|-------------|-------------| +| v1.6.x | Yes | Yes | Yes | Yes | Yes | Yes | Yes | +| v1.5.x | Yes | Yes | Yes | Yes | Yes | Yes | No | +| v1.4.x | Yes | Yes | Yes | Yes | Yes | No | No | +| v1.3.x | Yes | Yes | Yes | Yes | No | No | No | +| v1.2.x | Yes | Yes | Yes | No | No | No | No | +| v1.1.x | Yes | No | No | No | No | No | No | +| v1.0.x | No | No | No | No | No | No | No | +| v0.5.x | No | No | No | No | No | No | No | + +See the [Validated Kubernetes Versions](https://cluster-api.sigs.k8s.io/reference/versions.html?highlight=version#supported-kubernetes-versions){target=_blank} page for more information on CAPI validated versions. + +### AOS + +| CAPX | 5.20.4.5 (LTS) | 6.1.1.5 (STS) | 6.5.x (LTS) | 6.6 (STS) | 6.7 (STS) | 6.8 (STS) | 6.10 | 7.0 | 7.3 | +|--------|----------------|---------------|-------------|-----------|-----------|-----------|------|-----|-----| +| v1.6.x | No | No | Yes | No | No | Yes | Yes | Yes | Yes | +| v1.5.x | No | No | Yes | No | No | Yes | Yes | Yes | Yes | +| v1.4.x | No | No | Yes | No | No | Yes | No | No | No | +| v1.3.x | No | No | Yes | Yes | Yes | No | No | No | No | +| v1.2.x | No | No | Yes | Yes | Yes | No | No | No | No | +| v1.1.x | No | No | Yes | No | No | No | No | No | No | +| v1.0.x | Yes | Yes | No | No | No | No | No | No | No | +| v0.5.x | Yes | Yes | No | No | No | No | No | No | No | + + +### Prism Central + +| CAPX | 2022.1.0.2 | pc.2022.6 | pc.2022.9 | pc.2023.x | pc.2024.x | pc.7.3 | +|--------|------------|-----------|-----------|-----------|-----------|--------| +| v1.6.x | No | Yes | No | Yes | Yes | Yes | +| v1.5.x | No | Yes | No | Yes | Yes | Yes | +| v1.4.x | No | Yes | No | Yes | Yes | No | +| v1.3.x | No | Yes | No | Yes | No | No | +| v1.2.x | No | Yes | Yes | Yes | No | No | +| v1.1.x | No | Yes | No | No | No | No | +| v1.0.x | Yes | Yes | No | No | No | No | +| v0.5.x | Yes | Yes | No | No | No | No | diff --git a/mkdocs.yml b/mkdocs.yml index bc98a761..54c95fe8 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -18,7 +18,30 @@ nav: - "Cloud Native": - "Overview": "index.md" - "Cluster API Provider: Nutanix (CAPX)": - - "v1.5.x (latest)": + - "v1.6.x (latest)": + - "Getting Started": "capx/v1.6.x/getting_started.md" + - "Types": + - "NutanixCluster": "capx/v1.6.x/types/nutanix_cluster.md" + - "NutanixMachineTemplate": "capx/v1.6.x/types/nutanix_machine_template.md" + - "Certificate Trust": "capx/v1.6.x/pc_certificates.md" + - "Credential Management": "capx/v1.6.x/credential_management.md" + - "Tasks": + - "Modifying Machine Configuration": "capx/v1.6.x/tasks/modify_machine_configuration.md" + - "CAPX v1.6.x Upgrade Procedure": "capx/v1.6.x/tasks/capx_v16x_upgrade_procedure.md" + - "Port Requirements": "capx/v1.6.x/port_requirements.md" + - "User Requirements": "capx/v1.6.x/user_requirements.md" + - "Addons": + - "CSI Driver Installation": "capx/v1.6.x/addons/install_csi_driver.md" + - "Validated Integrations": "capx/v1.6.x/validated_integrations.md" + - "Experimental": + - "Multi-PE CAPX cluster": "capx/v1.6.x/experimental/capx_multi_pe.md" + - "Autoscaler": "capx/v1.6.x/experimental/autoscaler.md" + - "OIDC Integration": "capx/v1.6.x/experimental/oidc.md" + - "Flow VPC": "capx/v1.6.x/experimental/vpc.md" + - "Proxy Configuration": "capx/v1.6.x/experimental/proxy.md" + - "Registry Mirror Configuration": "capx/v1.6.x/experimental/registry_mirror.md" + - "Troubleshooting": "capx/v1.6.x/troubleshooting.md" + - "v1.5.x": - "Getting Started": "capx/v1.5.x/getting_started.md" - "Types": - "NutanixCluster": "capx/v1.5.x/types/nutanix_cluster.md" @@ -27,7 +50,7 @@ nav: - "Credential Management": "capx/v1.5.x/credential_management.md" - "Tasks": - "Modifying Machine Configuration": "capx/v1.5.x/tasks/modify_machine_configuration.md" - - "CAPX v1.5.x Upgrade Procedure": "capx/v1.5.x/tasks/capx_v14x_upgrade_procedure.md" + - "CAPX v1.5.x Upgrade Procedure": "capx/v1.5.x/tasks/capx_v15x_upgrade_procedure.md" - "Port Requirements": "capx/v1.5.x/port_requirements.md" - "User Requirements": "capx/v1.5.x/user_requirements.md" - "Addons": From 00832308eb4c8b021c9c68c63d719af0bddc6472 Mon Sep 17 00:00:00 2001 From: Atul Verma Date: Wed, 30 Jul 2025 20:35:55 +0530 Subject: [PATCH 08/15] purge validated integrations table for capx v1.6.x (#72) * purge validated integrations table for capx v1.6.x * fix validated versions for v1.6.x * tidy validated integrations --- docs/capx/v1.6.x/validated_integrations.md | 36 ++++++---------------- 1 file changed, 10 insertions(+), 26 deletions(-) diff --git a/docs/capx/v1.6.x/validated_integrations.md b/docs/capx/v1.6.x/validated_integrations.md index 066a9e00..6240f4b7 100644 --- a/docs/capx/v1.6.x/validated_integrations.md +++ b/docs/capx/v1.6.x/validated_integrations.md @@ -32,37 +32,21 @@ Nutanix follows the version validation policies below: | v1.6.x | Yes | Yes | Yes | Yes | Yes | Yes | Yes | | v1.5.x | Yes | Yes | Yes | Yes | Yes | Yes | No | | v1.4.x | Yes | Yes | Yes | Yes | Yes | No | No | -| v1.3.x | Yes | Yes | Yes | Yes | No | No | No | -| v1.2.x | Yes | Yes | Yes | No | No | No | No | -| v1.1.x | Yes | No | No | No | No | No | No | -| v1.0.x | No | No | No | No | No | No | No | -| v0.5.x | No | No | No | No | No | No | No | See the [Validated Kubernetes Versions](https://cluster-api.sigs.k8s.io/reference/versions.html?highlight=version#supported-kubernetes-versions){target=_blank} page for more information on CAPI validated versions. ### AOS -| CAPX | 5.20.4.5 (LTS) | 6.1.1.5 (STS) | 6.5.x (LTS) | 6.6 (STS) | 6.7 (STS) | 6.8 (STS) | 6.10 | 7.0 | 7.3 | -|--------|----------------|---------------|-------------|-----------|-----------|-----------|------|-----|-----| -| v1.6.x | No | No | Yes | No | No | Yes | Yes | Yes | Yes | -| v1.5.x | No | No | Yes | No | No | Yes | Yes | Yes | Yes | -| v1.4.x | No | No | Yes | No | No | Yes | No | No | No | -| v1.3.x | No | No | Yes | Yes | Yes | No | No | No | No | -| v1.2.x | No | No | Yes | Yes | Yes | No | No | No | No | -| v1.1.x | No | No | Yes | No | No | No | No | No | No | -| v1.0.x | Yes | Yes | No | No | No | No | No | No | No | -| v0.5.x | Yes | Yes | No | No | No | No | No | No | No | - +| CAPX | 6.5.x (LTS) | 6.8 (STS) | 6.10 | 7.0 | 7.3 | +|--------|-------------|-----------|------|-----|-----| +| v1.6.x | No | Yes | Yes | Yes | Yes | +| v1.5.x | Yes | Yes | Yes | Yes | Yes | +| v1.4.x | Yes | Yes | No | No | No | ### Prism Central -| CAPX | 2022.1.0.2 | pc.2022.6 | pc.2022.9 | pc.2023.x | pc.2024.x | pc.7.3 | -|--------|------------|-----------|-----------|-----------|-----------|--------| -| v1.6.x | No | Yes | No | Yes | Yes | Yes | -| v1.5.x | No | Yes | No | Yes | Yes | Yes | -| v1.4.x | No | Yes | No | Yes | Yes | No | -| v1.3.x | No | Yes | No | Yes | No | No | -| v1.2.x | No | Yes | Yes | Yes | No | No | -| v1.1.x | No | Yes | No | No | No | No | -| v1.0.x | Yes | Yes | No | No | No | No | -| v0.5.x | Yes | Yes | No | No | No | No | +| CAPX | pc.2022.6 | pc.2023.x | pc.2024.x | pc.7.3 | +|--------|-----------|-----------|-----------|--------| +| v1.6.x | No | Yes | Yes | Yes | +| v1.5.x | Yes | Yes | Yes | Yes | +| v1.4.x | Yes | Yes | Yes | No | From ac14472f6bda55bd3d0c456b11069586db79f18d Mon Sep 17 00:00:00 2001 From: rohithkumar-nutanix Date: Wed, 30 Jul 2025 20:53:24 +0530 Subject: [PATCH 09/15] docs: adds docs for ccm v0.4x and 0.5x (#71) * feat: docs(ccm) update v0.4.x and v0.5.x feature documentation based on changelog * chore: update description of igonorednodeip's --- docs/ccm/v0.4.x/ccm_configuration.md | 2 ++ docs/ccm/v0.5.x/ccm_configuration.md | 2 ++ 2 files changed, 4 insertions(+) diff --git a/docs/ccm/v0.4.x/ccm_configuration.md b/docs/ccm/v0.4.x/ccm_configuration.md index c7b7d2b7..63e1b714 100644 --- a/docs/ccm/v0.4.x/ccm_configuration.md +++ b/docs/ccm/v0.4.x/ccm_configuration.md @@ -27,6 +27,7 @@ data: } }, "enableCustomLabeling": false, + "ignoredNodeIPs": [], "topologyDiscovery": { "type": "Categories", "topologyCategories": { @@ -50,6 +51,7 @@ The table below provides an overview of the supported configuration parameters. |topologyDiscovery.topologyCategories.regionCategory|string|Category key defining the region of the Kubernetes node. | |topologyDiscovery.topologyCategories.zoneCategory |string|Category key defining the zone of the Kubernetes node. | |enableCustomLabeling |bool |Boolean value to enable custom labeling. See [Custom Labeling](./custom_labeling.md) for more information.
Default: `false` | +|ignoredNodeIPs |array |List of node IPs to ignore. Optional. | |prismCentral |object|Prism Central endpoint configuration. | |prismCentral.address |string|FQDN/IP of the Prism Central endpoint. | |prismCentral.port |int |Port to connect to Prism Central.
Default: `9440` | diff --git a/docs/ccm/v0.5.x/ccm_configuration.md b/docs/ccm/v0.5.x/ccm_configuration.md index c7b7d2b7..1df8e394 100644 --- a/docs/ccm/v0.5.x/ccm_configuration.md +++ b/docs/ccm/v0.5.x/ccm_configuration.md @@ -27,6 +27,7 @@ data: } }, "enableCustomLabeling": false, + "ignoredNodeIPs": [], "topologyDiscovery": { "type": "Categories", "topologyCategories": { @@ -50,6 +51,7 @@ The table below provides an overview of the supported configuration parameters. |topologyDiscovery.topologyCategories.regionCategory|string|Category key defining the region of the Kubernetes node. | |topologyDiscovery.topologyCategories.zoneCategory |string|Category key defining the zone of the Kubernetes node. | |enableCustomLabeling |bool |Boolean value to enable custom labeling. See [Custom Labeling](./custom_labeling.md) for more information.
Default: `false` | +|ignoredNodeIPs |array |List of node IPs, IP ranges (e.g. "10.0.0.1-10.0.0.10"), or CIDR prefixes (e.g. "10.0.0.0/24") to ignore. Optional. | |prismCentral |object|Prism Central endpoint configuration. | |prismCentral.address |string|FQDN/IP of the Prism Central endpoint. | |prismCentral.port |int |Port to connect to Prism Central.
Default: `9440` | From 58c4f55c4149c445db1062bfbb6e3803ecd8eb90 Mon Sep 17 00:00:00 2001 From: Abhay Aggrawal Date: Fri, 29 Aug 2025 11:42:02 +0530 Subject: [PATCH 10/15] NCN-109101: Opendocs for capx version 1.7 (#73) * opendocs for capx version 1.7 * adding docs for NutanixFailureDomains * adding example for failure domain * Update nutanix_failure_domains.md * adding steps to configure faillure domains * adding steps to configure faillure domains * adding steps to configure faillure domains * adding steps to configure faillure domains --- docs/capx/latest | 2 +- docs/capx/v1.7.x/addons/install_csi_driver.md | 215 ++++++++++++++ docs/capx/v1.7.x/credential_management.md | 93 ++++++ docs/capx/v1.7.x/experimental/autoscaler.md | 129 ++++++++ .../capx/v1.7.x/experimental/capx_multi_pe.md | 30 ++ docs/capx/v1.7.x/experimental/oidc.md | 31 ++ docs/capx/v1.7.x/experimental/proxy.md | 62 ++++ .../v1.7.x/experimental/registry_mirror.md | 96 ++++++ docs/capx/v1.7.x/experimental/vpc.md | 40 +++ docs/capx/v1.7.x/getting_started.md | 280 ++++++++++++++++++ docs/capx/v1.7.x/pc_certificates.md | 149 ++++++++++ docs/capx/v1.7.x/port_requirements.md | 19 ++ .../tasks/capx_v17x_upgrade_procedure.md | 83 ++++++ .../tasks/modify_machine_configuration.md | 11 + docs/capx/v1.7.x/troubleshooting.md | 13 + docs/capx/v1.7.x/types/nutanix_cluster.md | 55 ++++ .../v1.7.x/types/nutanix_failure_domains.md | 99 +++++++ .../v1.7.x/types/nutanix_machine_template.md | 124 ++++++++ docs/capx/v1.7.x/user_requirements.md | 36 +++ docs/capx/v1.7.x/validated_integrations.md | 55 ++++ mkdocs.yml | 25 +- 21 files changed, 1645 insertions(+), 2 deletions(-) create mode 100644 docs/capx/v1.7.x/addons/install_csi_driver.md create mode 100644 docs/capx/v1.7.x/credential_management.md create mode 100644 docs/capx/v1.7.x/experimental/autoscaler.md create mode 100644 docs/capx/v1.7.x/experimental/capx_multi_pe.md create mode 100644 docs/capx/v1.7.x/experimental/oidc.md create mode 100644 docs/capx/v1.7.x/experimental/proxy.md create mode 100644 docs/capx/v1.7.x/experimental/registry_mirror.md create mode 100644 docs/capx/v1.7.x/experimental/vpc.md create mode 100644 docs/capx/v1.7.x/getting_started.md create mode 100644 docs/capx/v1.7.x/pc_certificates.md create mode 100644 docs/capx/v1.7.x/port_requirements.md create mode 100644 docs/capx/v1.7.x/tasks/capx_v17x_upgrade_procedure.md create mode 100644 docs/capx/v1.7.x/tasks/modify_machine_configuration.md create mode 100644 docs/capx/v1.7.x/troubleshooting.md create mode 100644 docs/capx/v1.7.x/types/nutanix_cluster.md create mode 100644 docs/capx/v1.7.x/types/nutanix_failure_domains.md create mode 100644 docs/capx/v1.7.x/types/nutanix_machine_template.md create mode 100644 docs/capx/v1.7.x/user_requirements.md create mode 100644 docs/capx/v1.7.x/validated_integrations.md diff --git a/docs/capx/latest b/docs/capx/latest index 2d54a8c3..bbdaae4e 120000 --- a/docs/capx/latest +++ b/docs/capx/latest @@ -1 +1 @@ -v1.6.x \ No newline at end of file +v1.7.x \ No newline at end of file diff --git a/docs/capx/v1.7.x/addons/install_csi_driver.md b/docs/capx/v1.7.x/addons/install_csi_driver.md new file mode 100644 index 00000000..afb4bdc8 --- /dev/null +++ b/docs/capx/v1.7.x/addons/install_csi_driver.md @@ -0,0 +1,215 @@ +# Nutanix CSI Driver installation with CAPX + +The Nutanix CSI driver is fully supported on CAPI/CAPX deployed clusters where all the nodes meet the [Nutanix CSI driver prerequisites](#capi-workload-cluster-prerequisites-for-the-nutanix-csi-driver). + +There are three methods to install the Nutanix CSI driver on a CAPI/CAPX cluster: + +- Helm +- ClusterResourceSet +- CAPX Flavor + +For more information, check the next sections. + +## CAPI Workload cluster prerequisites for the Nutanix CSI Driver + +Kubernetes workers need the following prerequisites to use the Nutanix CSI Drivers: + +- iSCSI initiator package (for Volumes based block storage) +- NFS client package (for Files based storage) + +These packages may already be present in the image you use with your infrastructure provider or you can also rely on your bootstrap provider to install them. More info is available in the [Prerequisites docs](https://portal.nutanix.com/page/documents/details?targetId=CSI-Volume-Driver-v2_6:csi-csi-plugin-prerequisites-r.html){target=_blank}. + +The package names and installation method will also vary depending on the operating system you plan to use. + +In the example below, `kubeadm` bootstrap provider is used to deploy these packages on top of an Ubuntu 20.04 image. The `kubeadm` bootstrap provider allows defining `preKubeadmCommands` that will be launched before Kubernetes cluster creation. These `preKubeadmCommands` can be defined both in `KubeadmControlPlane` for master nodes and in `KubeadmConfigTemplate` for worker nodes. + +In the example with an Ubuntu 20.04 image, both `KubeadmControlPlane` and `KubeadmConfigTemplate` must be modified as in the example below: + +```yaml +spec: + template: + spec: + # ....... + preKubeadmCommands: + - echo "before kubeadm call" > /var/log/prekubeadm.log + - apt update + - apt install -y nfs-common open-iscsi + - systemctl enable --now iscsid +``` +## Install the Nutanix CSI Driver with Helm + +A recent [Helm](https://helm.sh){target=_blank} version is needed (tested with Helm v3.10.1). + +The example below must be applied on a ready workload cluster. The workload cluster's kubeconfig can be retrieved and used to connect with the following command: + +```shell +clusterctl get kubeconfig $CLUSTER_NAME -n $CLUSTER_NAMESPACE > $CLUSTER_NAME-KUBECONFIG +export KUBECONFIG=$(pwd)/$CLUSTER_NAME-KUBECONFIG +``` + +Once connected to the cluster, follow the [CSI documentation](https://portal.nutanix.com/page/documents/details?targetId=CSI-Volume-Driver-v2_6:csi-csi-driver-install-t.html){target=_blank}. + +First, install the [nutanix-csi-snapshot](https://github.com/nutanix/helm/tree/master/charts/nutanix-csi-snapshot){target=_blank} chart followed by the [nutanix-csi-storage](https://github.com/nutanix/helm/tree/master/charts/nutanix-csi-storage){target=_blank} chart. + +See an example below: + +```shell +#Add the official Nutanix Helm repo and get the latest update +helm repo add nutanix https://nutanix.github.io/helm/ +helm repo update + +# Install the nutanix-csi-snapshot chart +helm install nutanix-csi-snapshot nutanix/nutanix-csi-snapshot -n ntnx-system --create-namespace + +# Install the nutanix-csi-storage chart +helm install nutanix-storage nutanix/nutanix-csi-storage -n ntnx-system --set createSecret=false +``` + +!!! warning + For correct Nutanix CSI driver deployment, a fully functional CNI deployment must be present. + +## Install the Nutanix CSI Driver with `ClusterResourceSet` + +The `ClusterResourceSet` feature was introduced to automatically apply a set of resources (such as CNI/CSI) defined by administrators to matching created/existing workload clusters. + +### Enabling the `ClusterResourceSet` feature + +At the time of writing, `ClusterResourceSet` is an experimental feature that must be enabled during the initialization of a management cluster with the `EXP_CLUSTER_RESOURCE_SET` feature gate. + +To do this, add `EXP_CLUSTER_RESOURCE_SET: "true"` in the `clusterctl` configuration file or just `export EXP_CLUSTER_RESOURCE_SET=true` before initializing the management cluster with `clusterctl init`. + +If the management cluster is already initialized, the `ClusterResourceSet` can be enabled by changing the configuration of the `capi-controller-manager` deployment in the `capi-system` namespace. + + ```shell + kubectl edit deployment -n capi-system capi-controller-manager + ``` + +Locate the section below: + +```yaml + - args: + - --leader-elect + - --metrics-bind-addr=localhost:8080 + - --feature-gates=MachinePool=false,ClusterResourceSet=true,ClusterTopology=false +``` + +Then replace `ClusterResourceSet=false` with `ClusterResourceSet=true`. + +!!! note + Editing the `deployment` resource will cause Kubernetes to automatically start new versions of the containers with the feature enabled. + + + +### Prepare the Nutanix CSI `ClusterResourceSet` + +#### Create the `ConfigMap` for the CSI Plugin + +First, create a `ConfigMap` that contains a YAML manifest with all resources to install the Nutanix CSI driver. + +Since the Nutanix CSI Driver is provided as a Helm chart, use `helm` to extract it before creating the `ConfigMap`. See an example below: + +```shell +helm repo add nutanix https://nutanix.github.io/helm/ +helm repo update + +kubectl create ns ntnx-system --dry-run=client -o yaml > nutanix-csi-namespace.yaml +helm template nutanix-csi-snapshot nutanix/nutanix-csi-snapshot -n ntnx-system > nutanix-csi-snapshot.yaml +helm template nutanix-csi-snapshot nutanix/nutanix-csi-storage -n ntnx-system > nutanix-csi-storage.yaml + +kubectl create configmap nutanix-csi-crs --from-file=nutanix-csi-namespace.yaml --from-file=nutanix-csi-snapshot.yaml --from-file=nutanix-csi-storage.yaml +``` + +#### Create the `ClusterResourceSet` + +Next, create the `ClusterResourceSet` resource that will map the `ConfigMap` defined above to clusters using a `clusterSelector`. + +The `ClusterResourceSet` needs to be created inside the management cluster. See an example below: + +```yaml +--- +apiVersion: addons.cluster.x-k8s.io/v1alpha3 +kind: ClusterResourceSet +metadata: + name: nutanix-csi-crs +spec: + clusterSelector: + matchLabels: + csi: nutanix + resources: + - kind: ConfigMap + name: nutanix-csi-crs +``` + +The `clusterSelector` field controls how Cluster API will match this `ClusterResourceSet` on one or more workload clusters. In the example scenario, the `matchLabels` approach is being used where the `ClusterResourceSet` will be applied to all workload clusters having the `csi: nutanix` label present. If the label isn't present, the `ClusterResourceSet` won't apply to that workload cluster. + +The `resources` field references the `ConfigMap` created above, which contains the manifests for installing the Nutanix CSI driver. + +#### Assign the `ClusterResourceSet` to a workload cluster + +Assign this `ClusterResourceSet` to the workload cluster by adding the correct label to the `Cluster` resource. + +This can be done before workload cluster creation by editing the output of the `clusterctl generate cluster` command or by modifying an already deployed workload cluster. + +In both cases, `Cluster` resources should look like this: + +```yaml +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + name: workload-cluster-name + namespace: workload-cluster-namespace + labels: + csi: nutanix +# ... +``` + +!!! warning + For correct Nutanix CSI driver deployment, a fully functional CNI deployment must be present. + +## Install the Nutanix CSI Driver with a CAPX flavor + +The CAPX provider can utilize a flavor to automatically deploy the Nutanix CSI using a `ClusterResourceSet`. + +### Prerequisites + +The following requirements must be met: + +- The operating system must meet the [Nutanix CSI OS prerequisites](#capi-workload-cluster-prerequisites-for-the-nutanix-csi-driver). +- The Management cluster must be installed with the [`CLUSTER_RESOURCE_SET` feature gate](#enabling-the-clusterresourceset-feature). + +### Installation + +Specify the `csi` flavor during workload cluster creation. See an example below: + +```shell +clusterctl generate cluster my-cluster -f csi +``` + +Additional environment variables are required: + +- `WEBHOOK_CA`: Base64 encoded CA certificate used to sign the webhook certificate +- `WEBHOOK_CERT`: Base64 certificate for the webhook validation component +- `WEBHOOK_KEY`: Base64 key for the webhook validation component + +The three components referenced above can be automatically created and referenced using [this script](https://github.com/nutanix-cloud-native/cluster-api-provider-nutanix/blob/main/scripts/gen-self-cert.sh){target=_blank}: + +``` +source scripts/gen-self-cert.sh +``` + +The certificate must reference the following names: + +- csi-snapshot-webhook +- csi-snapshot-webhook.ntnx-sytem +- csi-snapshot-webhook.ntnx-sytem.svc + +!!! warning + For correct Nutanix CSI driver deployment, a fully functional CNI deployment must be present. + +## Nutanix CSI Driver Configuration + +After the driver is installed, it must be configured for use by minimally defining a `Secret` and `StorageClass`. + +This can be done manually in the workload clusters or by using a `ClusterResourceSet` in the management cluster as explained above. + +See the Official [CSI Driver documentation](https://portal.nutanix.com/page/documents/details?targetId=CSI-Volume-Driver-v2_6:CSI-Volume-Driver-v2_6){target=_blank} on the Nutanix Portal for more configuration information. diff --git a/docs/capx/v1.7.x/credential_management.md b/docs/capx/v1.7.x/credential_management.md new file mode 100644 index 00000000..bebbc5a0 --- /dev/null +++ b/docs/capx/v1.7.x/credential_management.md @@ -0,0 +1,93 @@ +# Credential Management +Cluster API Provider Nutanix Cloud Infrastructure (CAPX) interacts with Nutanix Prism Central (PC) APIs to manage the required Kubernetes cluster infrastructure resources. + +PC credentials are required to authenticate to the PC APIs. CAPX currently supports two mechanisms to supply the required credentials: + +- Credentials injected into the CAPX manager deployment +- Workload cluster specific credentials + +## Credentials injected into the CAPX manager deployment +By default, credentials will be injected into the CAPX manager deployment when CAPX is initialized. See the [getting started guide](./getting_started.md) for more information on the initialization. + +Upon initialization a `nutanix-creds` secret will automatically be created in the `capx-system` namespace. This secret will contain the values supplied via the `NUTANIX_USER` and `NUTANIX_PASSWORD` parameters. + +The `nutanix-creds` secret will be used for workload cluster deployment if no other credential is supplied. + +### Example +An example of the automatically created `nutanix-creds` secret can be found below: +```yaml +--- +apiVersion: v1 +kind: Secret +type: Opaque +metadata: + name: nutanix-creds + namespace: capx-system +stringData: + credentials: | + [ + { + "type": "basic_auth", + "data": { + "prismCentral":{ + "username": "", + "password": "" + }, + "prismElements": null + } + } + ] +``` + +## Workload cluster specific credentials +Users can override the [credentials injected in CAPX manager deployment](#credentials-injected-into-the-capx-manager-deployment) by supplying a credential specific to a workload cluster. The credentials can be supplied by creating a secret in the same namespace as the `NutanixCluster` namespace. + +The secret can be referenced by adding a `credentialRef` inside the `prismCentral` attribute contained in the `NutanixCluster`. +The secret will also be deleted when the `NutanixCluster` is deleted. + +Note: There is a 1:1 relation between the secret and the `NutanixCluster` object. + +### Example +Create a secret in the namespace of the `NutanixCluster`: + +```yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: "" + namespace: "" +stringData: + credentials: | + [ + { + "type": "basic_auth", + "data": { + "prismCentral":{ + "username": "", + "password": "" + }, + "prismElements": null + } + } + ] +``` + +Add a `prismCentral` and corresponding `credentialRef` to the `NutanixCluster`: + +```yaml +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: NutanixCluster +metadata: + name: "" + namespace: "" +spec: + prismCentral: + ... + credentialRef: + name: "" + kind: Secret +... +``` + +See the [NutanixCluster](./types/nutanix_cluster.md) documentation for all supported configuration parameters for the `prismCentral` and `credentialRef` attribute. \ No newline at end of file diff --git a/docs/capx/v1.7.x/experimental/autoscaler.md b/docs/capx/v1.7.x/experimental/autoscaler.md new file mode 100644 index 00000000..2af57213 --- /dev/null +++ b/docs/capx/v1.7.x/experimental/autoscaler.md @@ -0,0 +1,129 @@ +# Using Autoscaler in combination with CAPX + +!!! warning + The scenario and features described on this page are experimental. It's important to note that they have not been fully validated. + +[Autoscaler](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/clusterapi/README.md){target=_blank} can be used in combination with Cluster API to automatically add or remove machines in a cluster. + +Autoscaler can be used in different deployment scenarios. This page will provide an overview of multiple autoscaler deployment scenarios in combination with CAPX. +See the [Testing](#testing) section to see how scale-up/scale-down events can be triggered to validate the autoscaler behaviour. + +More in-depth information on Autoscaler functionality can be found in the [Kubernetes documentation](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/clusterapi/README.md){target=_blank}. + +All Autoscaler configuration parameters can be found [here](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md#what-are-the-parameters-to-ca){target=_blank}. + +## Scenario 1: Management cluster managing an external workload cluster +In this scenario, Autoscaler will be running on a management cluster and it will manage an external workload cluster. See the management cluster managing an external workload cluster section of [Kubernetes documentation](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/clusterapi/README.md#autoscaler-running-in-management-cluster-using-service-account-credentials-with-separate-workload-cluster){target=_blank} for more information. + +### Steps +1. Deploy a management cluster and workload cluster. The [CAPI quickstart](https://cluster-api.sigs.k8s.io/user/quick-start.html){target=_blank} can be used as a starting point. + + !!! note + Make sure a CNI is installed in the workload cluster. + +4. Download the example [Autoscaler deployment file](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/clusterapi/examples/deployment.yaml){target=_blank}. +5. Modify the `deployment.yaml` file: + - Change the namespace of all resources to the namespaces of the workload cluster. + - Choose an autoscale image. + - Change the following parameters in the `Deployment` resource: +```YAML + spec: + containers: + name: cluster-autoscaler + command: + - /cluster-autoscaler + args: + - --cloud-provider=clusterapi + - --kubeconfig=/mnt/kubeconfig/kubeconfig.yml + - --clusterapi-cloud-config-authoritative + - -v=1 + volumeMounts: + - mountPath: /mnt/kubeconfig + name: kubeconfig + readOnly: true + ... + volumes: + - name: kubeconfig + secret: + secretName: -kubeconfig + items: + - key: value + path: kubeconfig.yml +``` +7. Apply the `deployment.yaml` file. +```bash +kubectl apply -f deployment.yaml +``` +8. Add the [annotations](#autoscaler-node-group-annotations) to the workload cluster `MachineDeployment` resource. +9. Test Autoscaler. Go to the [Testing](#testing) section. + +## Scenario 2: Autoscaler running on workload cluster +In this scenario, Autoscaler will be deployed [on top of the workload cluster](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/clusterapi/README.md#autoscaler-running-in-a-joined-cluster-using-service-account-credentials){target=_blank} directly. In order for Autoscaler to work, it is required that the workload cluster resources are moved from the management cluster to the workload cluster. + +### Steps +1. Deploy a management cluster and workload cluster. The [CAPI quickstart](https://cluster-api.sigs.k8s.io/user/quick-start.html){target=_blank} can be used as a starting point. +2. Get the kubeconfig file for the workload cluster and use this kubeconfig to login to the workload cluster. +```bash +clusterctl get kubeconfig -n /path/to/kubeconfig +``` +3. Install a CNI in the workload cluster. +4. Initialise the CAPX components on top of the workload cluster: +```bash +clusterctl init --infrastructure nutanix +``` +5. Migrate the workload cluster custom resources to the workload cluster. Run following command from the management cluster: +```bash +clusterctl move -n --to-kubeconfig /path/to/kubeconfig +``` +6. Verify if the cluster has been migrated by running following command on the workload cluster: +```bash +kubectl get cluster -A +``` +7. Download the example [autoscaler deployment file](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/clusterapi/examples/deployment.yaml){target=_blank}. +8. Create the Autoscaler namespace: +```bash +kubectl create ns autoscaler +``` +9. Apply the `deployment.yaml` file +```bash +kubectl apply -f deployment.yaml +``` +10. Add the [annotations](#autoscaler-node-group-annotations) to the workload cluster `MachineDeployment` resource. +11. Test Autoscaler. Go to the [Testing](#testing) section. + +## Testing + +1. Deploy an example Kubernetes application. For example, the one used in the [Kubernetes HorizontalPodAutoscaler Walkthrough](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough/). +```bash +kubectl apply -f https://k8s.io/examples/application/php-apache.yaml +``` +2. Increase the amount of replicas of the application to trigger a scale-up event: +``` +kubectl scale deployment php-apache --replicas 100 +``` +3. Decrease the amount of replicas of the application again to trigger a scale-down event. + + !!! note + In case of issues check the logs of the Autoscaler pods. + +4. After a while CAPX, will add more machines. Refer to the [Autoscaler configuration parameters](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md#what-are-the-parameters-to-ca){target=_blank} to tweak the behaviour and timeouts. + +## Autoscaler node group annotations +Autoscaler uses following annotations to define the upper and lower boundries of the managed machines: + +| Annotation | Example Value | Description | +|-------------------------------------------------------------|---------------|-----------------------------------------------| +| cluster.x-k8s.io/cluster-api-autoscaler-node-group-max-size | 5 | Maximum amount of machines in this node group | +| cluster.x-k8s.io/cluster-api-autoscaler-node-group-min-size | 1 | Minimum amount of machines in this node group | + +These annotations must be applied to the `MachineDeployment` resources of a CAPX cluster. + +### Example +```YAML +apiVersion: cluster.x-k8s.io/v1beta1 +kind: MachineDeployment +metadata: + annotations: + cluster.x-k8s.io/cluster-api-autoscaler-node-group-max-size: "5" + cluster.x-k8s.io/cluster-api-autoscaler-node-group-min-size: "1" +``` \ No newline at end of file diff --git a/docs/capx/v1.7.x/experimental/capx_multi_pe.md b/docs/capx/v1.7.x/experimental/capx_multi_pe.md new file mode 100644 index 00000000..bd52ccd7 --- /dev/null +++ b/docs/capx/v1.7.x/experimental/capx_multi_pe.md @@ -0,0 +1,30 @@ +# Creating a workload CAPX cluster spanning Prism Element clusters + +!!! warning + The scenario and features described on this page are experimental. It's important to note that they have not been fully validated. + +This page will explain how to deploy CAPX-based Kubernetes clusters where worker nodes are spanning multiple Prism Element (PE) clusters. + +!!! note + All the PE clusters must be managed by the same Prism Central (PC) instance. + +The topology will look like this: + +- One PC managing multiple PE's +- One CAPI management cluster +- One CAPI workload cluster with multiple `MachineDeployment`resources + +Refer to the [CAPI quickstart](https://cluster-api.sigs.k8s.io/user/quick-start.html){target=_blank} to get started with CAPX. + +To create workload clusters spanning multiple Prism Element clusters, it is required to create a `MachineDeployment` and `NutanixMachineTemplate` resource for each Prism Element cluster. The Prism Element specific parameters (name/UUID, subnet,...) are referenced in the `NutanixMachineTemplate`. + +## Steps +1. Create a management cluster that has the CAPX infrastructure provider deployed. +2. Create a `cluster.yml` file containing the workload cluster definition. Refer to the steps defined in the [CAPI quickstart guide](https://cluster-api.sigs.k8s.io/user/quick-start.html){target=_blank} to create an example `cluster.yml` file. +3. Add additional `MachineDeployment` and `NutanixMachineTemplate` resources. + + By default there is only one machine template and machine deployment defined. To add nodes residing on another Prism Element cluster, a new `MachineDeployment` and `NutanixMachineTemplate` resource needs to be added to the yaml file. The autogenerated `MachineDeployment` and `NutanixMachineTemplate` resource definitions can be used as a baseline. + + Make sure to modify the `MachineDeployment` and `NutanixMachineTemplate` parameters. + +4. Apply the modified `cluster.yml` file to the management cluster. diff --git a/docs/capx/v1.7.x/experimental/oidc.md b/docs/capx/v1.7.x/experimental/oidc.md new file mode 100644 index 00000000..0c274121 --- /dev/null +++ b/docs/capx/v1.7.x/experimental/oidc.md @@ -0,0 +1,31 @@ +# OIDC integration + +!!! warning + The scenario and features described on this page are experimental. It's important to note that they have not been fully validated. + +Kubernetes allows users to authenticate using various authentication mechanisms. One of these mechanisms is OIDC. Information on how Kubernetes interacts with OIDC providers can be found in the [OpenID Connect Tokens](https://kubernetes.io/docs/reference/access-authn-authz/authentication/#openid-connect-tokens){target=_blank} section of the official Kubernetes documentation. + + +Follow the steps below to configure a CAPX cluster to use an OIDC identity provider. + +## Steps +1. Generate a `cluster.yaml` file with the required CAPX cluster configuration. Refer to the [Getting Started](../getting_started.md){target=_blank} page for more information on how to generate a `cluster.yaml` file. Do not apply the `cluster.yaml` file. +2. Edit the `cluster.yaml` file and search for the `KubeadmControlPlane` resource. +3. Modify/add the `spec.kubeadmConfigSpec.clusterConfiguration.apiServer.extraArgs` attribute and add the required [API server parameters](https://kubernetes.io/docs/reference/access-authn-authz/authentication/#configuring-the-api-server){target=_blank}. See the [example](#example) below. +4. Apply the `cluster.yaml` file +5. Log in with the OIDC provider once the cluster is provisioned + +## Example +```YAML +kind: KubeadmControlPlane +spec: + kubeadmConfigSpec: + clusterConfiguration: + apiServer: + extraArgs: + ... + oidc-client-id: + oidc-issuer-url: + ... +``` + diff --git a/docs/capx/v1.7.x/experimental/proxy.md b/docs/capx/v1.7.x/experimental/proxy.md new file mode 100644 index 00000000..c8f940d4 --- /dev/null +++ b/docs/capx/v1.7.x/experimental/proxy.md @@ -0,0 +1,62 @@ +# Proxy configuration + +!!! warning + The scenario and features described on this page are experimental. It's important to note that they have not been fully validated. + +CAPX can be configured to use a proxy to connect to external networks. This proxy configuration needs to be applied to control plane and worker nodes. + +Follow the steps below to configure a CAPX cluster to use a proxy. + +## Steps +1. Generate a `cluster.yaml` file with the required CAPX cluster configuration. Refer to the [Getting Started](../getting_started.md){target=_blank} page for more information on how to generate a `cluster.yaml` file. Do not apply the `cluster.yaml` file. +2. Edit the `cluster.yaml` file and modify the following resources as shown in the [example](#example) below to add the proxy configuration. + 1. `KubeadmControlPlane`: + * Add the proxy configuration to the `spec.kubeadmConfigSpec.files` list. Do not modify other items in the list. + * Add `systemctl` commands to apply the proxy config in `spec.kubeadmConfigSpec.preKubeadmCommands`. Do not modify other items in the list. + 2. `KubeadmConfigTemplate`: + * Add the proxy configuration to the `spec.template.spec.files` list. Do not modify other items in the list. + * Add `systemctl` commands to apply the proxy config in `spec.template.spec.preKubeadmCommands`. Do not modify other items in the list. +4. Apply the `cluster.yaml` file + +## Example + +```YAML +--- +# controlplane proxy settings +kind: KubeadmControlPlane +spec: + kubeadmConfigSpec: + files: + - content: | + [Service] + Environment="HTTP_PROXY=" + Environment="HTTPS_PROXY=" + Environment="NO_PROXY=" + owner: root:root + path: /etc/systemd/system/containerd.service.d/http-proxy.conf + ... + preKubeadmCommands: + - sudo systemctl daemon-reload + - sudo systemctl restart containerd + ... +--- +# worker proxy settings +kind: KubeadmConfigTemplate +spec: + template: + spec: + files: + - content: | + [Service] + Environment="HTTP_PROXY=" + Environment="HTTPS_PROXY=" + Environment="NO_PROXY=" + owner: root:root + path: /etc/systemd/system/containerd.service.d/http-proxy.conf + ... + preKubeadmCommands: + - sudo systemctl daemon-reload + - sudo systemctl restart containerd + ... +``` + diff --git a/docs/capx/v1.7.x/experimental/registry_mirror.md b/docs/capx/v1.7.x/experimental/registry_mirror.md new file mode 100644 index 00000000..307a9425 --- /dev/null +++ b/docs/capx/v1.7.x/experimental/registry_mirror.md @@ -0,0 +1,96 @@ +# Registry Mirror configuration + +!!! warning + The scenario and features described on this page are experimental. It's important to note that they have not been fully validated. + +CAPX can be configured to use a private registry to act as a mirror of an external public registry. This registry mirror configuration needs to be applied to control plane and worker nodes. + +Follow the steps below to configure a CAPX cluster to use a registry mirror. + +## Steps +1. Generate a `cluster.yaml` file with the required CAPX cluster configuration. Refer to the [Getting Started](../getting_started.md){target=_blank} page for more information on how to generate a `cluster.yaml` file. Do not apply the `cluster.yaml` file. +2. Edit the `cluster.yaml` file and modify the following resources as shown in the [example](#example) below to add the proxy configuration. + 1. `KubeadmControlPlane`: + * Add the registry mirror configuration to the `spec.kubeadmConfigSpec.files` list. Do not modify other items in the list. + * Update `/etc/containerd/config.toml` commands to apply the registry mirror config in `spec.kubeadmConfigSpec.preKubeadmCommands`. Do not modify other items in the list. + 2. `KubeadmConfigTemplate`: + * Add the registry mirror configuration to the `spec.template.spec.files` list. Do not modify other items in the list. + * Update `/etc/containerd/config.toml` commands to apply the registry mirror config in `spec.template.spec.preKubeadmCommands`. Do not modify other items in the list. +4. Apply the `cluster.yaml` file + +## Example + +This example will configure a registry mirror for the following namespace: + +* registry.k8s.io +* ghcr.io +* quay.io + +and redirect them to corresponding projects of the `` registry. + +```YAML +--- +# controlplane proxy settings +kind: KubeadmControlPlane +spec: + kubeadmConfigSpec: + files: + - content: | + [host."https:///v2/registry.k8s.io"] + capabilities = ["pull", "resolve"] + skip_verify = false + override_path = true + owner: root:root + path: /etc/containerd/certs.d/registry.k8s.io/hosts.toml + - content: | + [host."https:///v2/ghcr.io"] + capabilities = ["pull", "resolve"] + skip_verify = false + override_path = true + owner: root:root + path: /etc/containerd/certs.d/ghcr.io/hosts.toml + - content: | + [host."https:///v2/quay.io"] + capabilities = ["pull", "resolve"] + skip_verify = false + override_path = true + owner: root:root + path: /etc/containerd/certs.d/quay.io/hosts.toml + ... + preKubeadmCommands: + - echo '\n[plugins."io.containerd.grpc.v1.cri".registry]\n config_path = "/etc/containerd/certs.d"' >> /etc/containerd/config.toml + ... +--- +# worker proxy settings +kind: KubeadmConfigTemplate +spec: + template: + spec: + files: + - content: | + [host."https:///v2/registry.k8s.io"] + capabilities = ["pull", "resolve"] + skip_verify = false + override_path = true + owner: root:root + path: /etc/containerd/certs.d/registry.k8s.io/hosts.toml + - content: | + [host."https:///v2/ghcr.io"] + capabilities = ["pull", "resolve"] + skip_verify = false + override_path = true + owner: root:root + path: /etc/containerd/certs.d/ghcr.io/hosts.toml + - content: | + [host."https:///v2/quay.io"] + capabilities = ["pull", "resolve"] + skip_verify = false + override_path = true + owner: root:root + path: /etc/containerd/certs.d/quay.io/hosts.toml + ... + preKubeadmCommands: + - echo '\n[plugins."io.containerd.grpc.v1.cri".registry]\n config_path = "/etc/containerd/certs.d"' >> /etc/containerd/config.toml + ... +``` + diff --git a/docs/capx/v1.7.x/experimental/vpc.md b/docs/capx/v1.7.x/experimental/vpc.md new file mode 100644 index 00000000..3513e47e --- /dev/null +++ b/docs/capx/v1.7.x/experimental/vpc.md @@ -0,0 +1,40 @@ +# Creating a workload CAPX cluster in a Nutanix Flow VPC + +!!! warning + The scenario and features described on this page are experimental. It's important to note that they have not been fully validated. + +!!! note + Nutanix Flow VPCs are only validated with CAPX 1.1.3+ + +[Nutanix Flow Virtual Networking](https://portal.nutanix.com/page/documents/details?targetId=Nutanix-Flow-Virtual-Networking-Guide-vpc_2022_9:Nutanix-Flow-Virtual-Networking-Guide-vpc_2022_9){target=_blank} allows users to create Virtual Private Clouds (VPCs) with Overlay networking. +The steps below will illustrate how a CAPX cluster can be deployed inside an overlay subnet (NAT) inside a VPC while the management cluster resides outside of the VPC. + + +## Steps +1. [Request a floating IP](https://portal.nutanix.com/page/documents/details?targetId=Nutanix-Flow-Networking-Guide:ear-flow-nw-request-floating-ip-pc-t.html){target=_blank} +2. Link the floating IP to an internal IP address inside the overlay subnet that will be used to deploy the CAPX cluster. This address will be assigned to the CAPX loadbalancer. To prevent IP conflicts, make sure the IP address is not part of the IP-pool defined in the subnet. +3. Generate a `cluster.yaml` file with the required CAPX cluster configuration where the `CONTROL_PLANE_ENDPOINT_IP` is set to the floating IP requested in the first step. Refer to the [Getting Started](../getting_started.md){target=_blank} page for more information on how to generate a `cluster.yaml` file. Do not apply the `cluster.yaml` file. +4. Edit the `cluster.yaml` file and search for the `KubeadmControlPlane` resource. +5. Modify the `spec.kubeadmConfigSpec.files.*.content` attribute and change the `kube-vip` definition similar to the [example](#example) below. +6. Apply the `cluster.yaml` file. +7. When the CAPX workload cluster is deployed, it will be reachable via the floating IP. + +## Example +```YAML +kind: KubeadmControlPlane +spec: + kubeadmConfigSpec: + files: + - content: | + apiVersion: v1 + kind: Pod + metadata: + name: kube-vip + namespace: kube-system + spec: + containers: + - env: + - name: address + value: "" +``` + diff --git a/docs/capx/v1.7.x/getting_started.md b/docs/capx/v1.7.x/getting_started.md new file mode 100644 index 00000000..ce39a649 --- /dev/null +++ b/docs/capx/v1.7.x/getting_started.md @@ -0,0 +1,280 @@ +# Getting Started + +This is a guide on getting started with Cluster API Provider Nutanix Cloud Infrastructure (CAPX). To learn more about cluster API in more depth, check out the [Cluster API book](https://cluster-api.sigs.k8s.io/){target=_blank}. + +For more information on how install the Nutanix CSI Driver on a CAPX cluster, visit [Nutanix CSI Driver installation with CAPX](./addons/install_csi_driver.md). + +For more information on how CAPX handles credentials, visit [Credential Management](./credential_management.md). + +For more information on the port requirements for CAPX, visit [Port Requirements](./port_requirements.md). + +!!! note + [Nutanix Cloud Controller Manager (CCM)](../../ccm/latest/overview.md) is a mandatory component starting from CAPX v1.3.0. Ensure all CAPX-managed Kubernetes clusters are configured to use Nutanix CCM before upgrading to v1.3.0 or later. See [CAPX v7.x Upgrade Procedure](./tasks/capx_v17x_upgrade_procedure.md). + +## Production Workflow + +### Build OS image for NutanixMachineTemplate resource +Cluster API Provider Nutanix Cloud Infrastructure (CAPX) uses the [Image Builder](https://image-builder.sigs.k8s.io/){target=_blank} project to build OS images used for the Nutanix machines. + +Follow the steps detailed in [Building CAPI Images for Nutanix Cloud Platform (NCP)](https://image-builder.sigs.k8s.io/capi/providers/nutanix.html#building-capi-images-for-nutanix-cloud-platform-ncp){target=_blank} to use Image Builder on the Nutanix Cloud Platform. + +For a list of operating systems visit the OS image [Configuration](https://image-builder.sigs.k8s.io/capi/providers/nutanix.html#configuration){target=_blank} page. + +### Prerequisites for using Cluster API Provider Nutanix Cloud Infrastructure +The [Cluster API installation](https://cluster-api.sigs.k8s.io/user/quick-start.html#installation){target=_blank} section provides an overview of all required prerequisites: + +- [Common Prerequisites](https://cluster-api.sigs.k8s.io/user/quick-start.html#common-prerequisites){target=_blank} +- [Install and/or configure a Kubernetes cluster](https://cluster-api.sigs.k8s.io/user/quick-start.html#install-andor-configure-a-kubernetes-cluster){target=_blank} +- [Install clusterctl](https://cluster-api.sigs.k8s.io/user/quick-start.html#install-clusterctl){target=_blank} +- (Optional) [Enabling Feature Gates](https://cluster-api.sigs.k8s.io/user/quick-start.html#enabling-feature-gates){target=_blank} + +Make sure these prerequisites have been met before moving to the [Configure and Install Cluster API Provider Nutanix Cloud Infrastructure](#configure-and-install-cluster-api-provider-nutanix-cloud-infrastructure) step. + +### Configure and Install Cluster API Provider Nutanix Cloud Infrastructure +To initialize Cluster API Provider Nutanix Cloud Infrastructure, `clusterctl` requires the following variables, which should be set in either `~/.cluster-api/clusterctl.yaml` or as environment variables. +``` +NUTANIX_ENDPOINT: "" # IP or FQDN of Prism Central +NUTANIX_USER: "" # Prism Central user +NUTANIX_PASSWORD: "" # Prism Central password +NUTANIX_INSECURE: false # or true + +KUBERNETES_VERSION: "v1.22.9" +WORKER_MACHINE_COUNT: 3 +NUTANIX_SSH_AUTHORIZED_KEY: "" + +NUTANIX_PRISM_ELEMENT_CLUSTER_NAME: "" +NUTANIX_MACHINE_TEMPLATE_IMAGE_NAME: "" +NUTANIX_SUBNET_NAME: "" + +EXP_CLUSTER_RESOURCE_SET: true # Required for Nutanix CCM installation +``` + +You can also see the required list of variables by running the following: +``` +clusterctl generate cluster mycluster -i nutanix --list-variables +Required Variables: + - CONTROL_PLANE_ENDPOINT_IP + - KUBERNETES_VERSION + - NUTANIX_ENDPOINT + - NUTANIX_MACHINE_TEMPLATE_IMAGE_NAME + - NUTANIX_PASSWORD + - NUTANIX_PRISM_ELEMENT_CLUSTER_NAME + - NUTANIX_SSH_AUTHORIZED_KEY + - NUTANIX_SUBNET_NAME + - NUTANIX_USER + +Optional Variables: + - CONTROL_PLANE_ENDPOINT_PORT (defaults to "6443") + - CONTROL_PLANE_MACHINE_COUNT (defaults to 1) + - KUBEVIP_LB_ENABLE (defaults to "false") + - KUBEVIP_SVC_ENABLE (defaults to "false") + - NAMESPACE (defaults to current Namespace in the KubeConfig file) + - NUTANIX_INSECURE (defaults to "false") + - NUTANIX_MACHINE_BOOT_TYPE (defaults to "legacy") + - NUTANIX_MACHINE_MEMORY_SIZE (defaults to "4Gi") + - NUTANIX_MACHINE_VCPU_PER_SOCKET (defaults to "1") + - NUTANIX_MACHINE_VCPU_SOCKET (defaults to "2") + - NUTANIX_PORT (defaults to "9440") + - NUTANIX_SYSTEMDISK_SIZE (defaults to "40Gi") + - WORKER_MACHINE_COUNT (defaults to 0) +``` + +!!! note + To prevent duplicate IP assignments, it is required to assign an IP-address to the `CONTROL_PLANE_ENDPOINT_IP` variable that is not part of the Nutanix IPAM or DHCP range assigned to the subnet of the CAPX cluster. + +!!! warning + Make sure [Cluster Resource Set (CRS)](https://cluster-api.sigs.k8s.io/tasks/experimental-features/cluster-resource-set){target=_blank} is enabled before running `clusterctl init` + +Now you can instantiate Cluster API with the following: +``` +clusterctl init -i nutanix +``` + +### Deploy a workload cluster on Nutanix Cloud Infrastructure +``` +export TEST_CLUSTER_NAME=mytestcluster1 +export TEST_NAMESPACE=mytestnamespace +CONTROL_PLANE_ENDPOINT_IP=x.x.x.x clusterctl generate cluster ${TEST_CLUSTER_NAME} \ + -i nutanix \ + --target-namespace ${TEST_NAMESPACE} \ + --kubernetes-version v1.22.9 \ + --control-plane-machine-count 1 \ + --worker-machine-count 3 > ./cluster.yaml +kubectl create ns ${TEST_NAMESPACE} +kubectl apply -f ./cluster.yaml -n ${TEST_NAMESPACE} +``` +To customize the configuration of the default `cluster.yaml` file generated by CAPX, visit the [NutanixCluster](./types/nutanix_cluster.md) and [NutanixMachineTemplate](./types/nutanix_machine_template.md) documentation. + +### Access a workload cluster +To access resources on the cluster, you can get the kubeconfig with the following: +``` +clusterctl get kubeconfig ${TEST_CLUSTER_NAME} -n ${TEST_NAMESPACE} > ${TEST_CLUSTER_NAME}.kubeconfig +kubectl --kubeconfig ./${TEST_CLUSTER_NAME}.kubeconfig get nodes +``` + +### Install CNI on a workload cluster + +You must deploy a Container Network Interface (CNI) based pod network add-on so that your pods can communicate with each other. Cluster DNS (CoreDNS) will not start up before a network is installed. + +!!! note + Take care that your pod network must not overlap with any of the host networks. You are likely to see problems if there is any overlap. If you find a collision between your network plugin's preferred pod network and some of your host networks, you must choose a suitable alternative CIDR block to use instead. It can be configured inside the `cluster.yaml` generated by `clusterctl generate cluster` before applying it. + +Several external projects provide Kubernetes pod networks using CNI, some of which also support [Network Policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/){target=_blank}. + +See a list of add-ons that implement the [Kubernetes networking model](https://kubernetes.io/docs/concepts/cluster-administration/networking/#how-to-implement-the-kubernetes-network-model){target=_blank}. At time of writing, the most common are [Calico](https://www.tigera.io/project-calico/){target=_blank} and [Cilium](https://cilium.io){target=_blank}. + +Follow the specific install guide for your selected CNI and install only one pod network per cluster. + +Once a pod network has been installed, you can confirm that it is working by checking that the CoreDNS pod is running in the output of `kubectl get pods --all-namespaces`. + +### Add Failure Domain to Cluster + +To update your cluster to use new or modified failure domains after initial deployment, follow these steps: + +1. Create NutanixFailureDomain resource + + For example, define a failure domain in example.yaml: +``` +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: NutanixFailureDomain +metadata: + name: fd-domain-1 +spec: + prismElementCluster: + type: name + name: "PrismClusterA" + subnets: + - type: name + name: "SubnetA" + - type: name + name: "SubnetB" +``` + +2. Apply the resource + + ``` +kubectl apply -f example.yaml +``` + +3. Edit the NutanixCluster resource to reference the failure domain(s) + + ``` +kubectl edit nutanixcluster -n +``` + + In the spec section, add the controlPlaneFailureDomains field: + + ``` +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: NutanixCluster +metadata: +spec: + controlPlaneFailureDomains: # add controlPlaneFailureDomains + - name: "fd-domain-1" # failureDomain name + - name: "fd-domain-2" # failureDomain name + controlPlaneEndpoint: + prismCentral: +``` + +4. Verify the update + + Check that the failure domains are registered with the cluster: + + ``` +kubectl get cluster -n -o yaml +``` + + Look for the failureDomains in status section: + + ``` +failureDomains: + fd-domain-1: + controlPlane: true + fd-domain-2: + controlPlane: true +``` + +### Add Failure Domain to MachineDeployment + +To associate a MachineDeployment with a specific failure domain: + +1. Export the MachineDeployment definition + + ``` +kubectl get machinedeployments -n -o yaml > machinedeployment.yaml +``` + +2. Edit the manifest to add the failure domain + + Under spec.template.spec, add a failureDomain field: + + ``` +apiVersion: cluster.x-k8s.io/v1beta1 +kind: MachineDeployment +metadata: + name: your-machinedeployment + namespace: your-namespace +spec: + replicas: 3 + selector: + matchLabels: + cluster.x-k8s.io/deployment-name: your-machinedeployment + template: + metadata: + labels: + cluster.x-k8s.io/deployment-name: your-machinedeployment + spec: + failureDomain: "fd-domain-1" + # other fields like bootstrap, infrastructureRef ... +``` + +3. Apply the changes + + ``` +kubectl apply -f machinedeployment.yaml +``` + +4. Verify the Update + + Confirm that the failure domain field was updated: + + ``` +kubectl get machinedeployments -n -o yaml | grep failureDomain +``` + +5. Check placement of machines + + Ensure new machines are placed in the specified failure domain: + + ``` +kubectl get machines -l cluster.x-k8s.io/deployment-name= -n -o yaml +``` + +### Kube-vip settings + +Kube-vip is a true load balancing solution for the Kubernetes control plane. It distributes API requests across control plane nodes. It also has the capability to provide load balancing for Kubernetes services. + +You can tweak kube-vip settings by using the following properties: + +- `KUBEVIP_LB_ENABLE` + +This setting allows control plane load balancing using IPVS. See +[Control Plane Load-Balancing documentation](https://kube-vip.io/docs/about/architecture/#control-plane-load-balancing){target=_blank} for further information. + +- `KUBEVIP_SVC_ENABLE` + +This setting enables a service of type LoadBalancer. See +[Kubernetes Service Load Balancing documentation](https://kube-vip.io/docs/about/architecture/#kubernetes-service-load-balancing){target=_blank} for further information. + +- `KUBEVIP_SVC_ELECTION` + +This setting enables Load Balancing of Load Balancers. See [Load Balancing Load Balancers](https://kube-vip.io/docs/usage/kubernetes-services/#load-balancing-load-balancers-when-using-arp-mode-yes-you-read-that-correctly-kube-vip-v050){target=_blank} for further information. + +### Delete a workload cluster +To remove a workload cluster from your management cluster, remove the cluster object and the provider will clean-up all resources. + +``` +kubectl delete cluster ${TEST_CLUSTER_NAME} -n ${TEST_NAMESPACE} +``` +!!! note + Deleting the entire cluster template with `kubectl delete -f ./cluster.yaml` may lead to pending resources requiring manual cleanup. diff --git a/docs/capx/v1.7.x/pc_certificates.md b/docs/capx/v1.7.x/pc_certificates.md new file mode 100644 index 00000000..f3fe1699 --- /dev/null +++ b/docs/capx/v1.7.x/pc_certificates.md @@ -0,0 +1,149 @@ +# Certificate Trust + +CAPX invokes Prism Central APIs using the HTTPS protocol. CAPX has different methods to handle the trust of the Prism Central certificates: + +- Enable certificate verification (default) +- Configure an additional trust bundle +- Disable certificate verification + +See the respective sections below for more information. + +!!! note + For more information about replacing Prism Central certificates, see the [Nutanix AOS Security Guide](https://portal.nutanix.com/page/documents/details?targetId=Nutanix-Security-Guide-v6_5:mul-security-ssl-certificate-pc-t.html){target=_blank}. + +## Enable certificate verification (default) +By default CAPX will perform certificate verification when invoking Prism Central API calls. This requires Prism Central to be configured with a publicly trusted certificate authority. +No additional configuration is required in CAPX. + +## Configure an additional trust bundle +CAPX allows users to configure an additional trust bundle. This will allow CAPX to verify certificates that are not issued by a publicy trusted certificate authority. + +To configure an additional trust bundle, the `NUTANIX_ADDITIONAL_TRUST_BUNDLE` environment variable needs to be set. The value of the `NUTANIX_ADDITIONAL_TRUST_BUNDLE` environment variable contains the trust bundle (PEM format) in base64 encoded format. See the [Configuring the trust bundle environment variable](#configuring-the-trust-bundle-environment-variable) section for more information. + +It is also possible to configure the additional trust bundle manually by creating a custom `cluster-template`. See the [Configuring the additional trust bundle manually](#configuring-the-additional-trust-bundle-manually) section for more information + +The `NUTANIX_ADDITIONAL_TRUST_BUNDLE` environment variable can be set when initializing the CAPX provider or when creating a workload cluster. If the `NUTANIX_ADDITIONAL_TRUST_BUNDLE` is configured when the CAPX provider is initialized, the additional trust bundle will be used for every CAPX workload cluster. If it is only configured when creating a workload cluster, it will only be applicable for that specific workload cluster. + + +### Configuring the trust bundle environment variable + +Create a PEM encoded file containing the root certificate and all intermediate certificates. Example: +``` +$ cat cert.crt +-----BEGIN CERTIFICATE----- + +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- + +-----END CERTIFICATE----- +``` + +Use a `base64` tool to encode these contents in base64. The command below will provide a `base64` string. +``` +$ cat cert.crt | base64 + +``` +!!! note + Make sure the `base64` string does not contain any newlines (`\n`). If the output string contains newlines, remove them manually or check the manual of the `base64` tool on how to generate a `base64` string without newlines. + +Use the `base64` string as value for the `NUTANIX_ADDITIONAL_TRUST_BUNDLE` environment variable. +``` +$ export NUTANIX_ADDITIONAL_TRUST_BUNDLE="" +``` + +### Configuring the additional trust bundle manually + +To configure the additional trust bundle manually without using the `NUTANIX_ADDITIONAL_TRUST_BUNDLE` environment variable present in the default `cluster-template` files, it is required to: + +- Create a `ConfigMap` containing the additional trust bundle. +- Configure the `prismCentral.additionalTrustBundle` object in the `NutanixCluster` spec. + +#### Creating the additional trust bundle ConfigMap + +CAPX supports two different formats for the ConfigMap containing the additional trust bundle. The first one is to add the additional trust bundle as a multi-line string in the `ConfigMap`, the second option is to add the trust bundle in `base64` encoded format. See the examples below. + +Multi-line string example: +```YAML +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: user-ca-bundle + namespace: ${NAMESPACE} +data: + ca.crt: | + -----BEGIN CERTIFICATE----- + + -----END CERTIFICATE----- + -----BEGIN CERTIFICATE----- + + -----END CERTIFICATE----- +``` + +`base64` example: + +```YAML +apiVersion: v1 +kind: ConfigMap +metadata: + name: user-ca-bundle + namespace: ${NAMESPACE} +binaryData: + ca.crt: +``` + +!!! note + The `base64` string needs to be added as `binaryData`. + + +#### Configuring the NutanixCluster spec + +When the additional trust bundle `ConfigMap` is created, it needs to be referenced in the `NutanixCluster` spec. Add the `prismCentral.additionalTrustBundle` object in the `NutanixCluster` spec as shown below. Make sure the correct additional trust bundle `ConfigMap` is referenced. + +```YAML +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: NutanixCluster +metadata: + name: ${CLUSTER_NAME} + namespace: ${NAMESPACE} +spec: + ... + prismCentral: + ... + additionalTrustBundle: + kind: ConfigMap + name: user-ca-bundle + insecure: false +``` + +!!! note + the default value of `prismCentral.insecure` attribute is `false`. It can be omitted when an additional trust bundle is configured. + + If `prismCentral.insecure` attribute is set to `true`, all certificate verification will be disabled. + + +## Disable certificate verification + +!!! note + Disabling certificate verification is not recommended for production purposes and should only be used for testing. + + +Certificate verification can be disabled by setting the `prismCentral.insecure` attribute to `true` in the `NutanixCluster` spec. Certificate verification will be disabled even if an additional trust bundle is configured. + +Disabled certificate verification example: + +```YAML +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: NutanixCluster +metadata: + name: ${CLUSTER_NAME} + namespace: ${NAMESPACE} +spec: + controlPlaneEndpoint: + host: ${CONTROL_PLANE_ENDPOINT_IP} + port: ${CONTROL_PLANE_ENDPOINT_PORT=6443} + prismCentral: + ... + insecure: true + ... +``` \ No newline at end of file diff --git a/docs/capx/v1.7.x/port_requirements.md b/docs/capx/v1.7.x/port_requirements.md new file mode 100644 index 00000000..af182abb --- /dev/null +++ b/docs/capx/v1.7.x/port_requirements.md @@ -0,0 +1,19 @@ +# Port Requirements + +CAPX uses the ports documented below to create workload clusters. + +!!! note + This page only documents the ports specifically required by CAPX and does not provide the full overview of all ports required in the CAPI framework. + +## Management cluster + +| Source | Destination | Protocol | Port | Description | +|--------------------|---------------------|----------|------|--------------------------------------------------------------------------------------------------| +| Management cluster | External Registries | TCP | 443 | Pull container images from [CAPX public registries](#public-registries-utilized-when-using-capx) | +| Management cluster | Prism Central | TCP | 9440 | Management cluster communication to Prism Central | + +## Public registries utilized when using CAPX + +| Registry name | +|---------------| +| ghcr.io | diff --git a/docs/capx/v1.7.x/tasks/capx_v17x_upgrade_procedure.md b/docs/capx/v1.7.x/tasks/capx_v17x_upgrade_procedure.md new file mode 100644 index 00000000..16a2c91a --- /dev/null +++ b/docs/capx/v1.7.x/tasks/capx_v17x_upgrade_procedure.md @@ -0,0 +1,83 @@ +# CAPX v1.7.x Upgrade Procedure + +Starting from CAPX v1.3.0, it is required for all CAPX-managed Kubernetes clusters to use the Nutanix Cloud Controller Manager (CCM). + +Before upgrading CAPX instances to v1.3.0 or later, it is required to follow the [steps](#steps) detailed below for each of the CAPX-managed Kubernetes clusters that don't use Nutanix CCM. + + +## Steps + +This procedure uses [Cluster Resource Set (CRS)](https://cluster-api.sigs.k8s.io/tasks/experimental-features/cluster-resource-set){target=_blank} to install Nutanix CCM but it can also be installed using the [Nutanix CCM Helm chart](https://artifacthub.io/packages/helm/nutanix/nutanix-cloud-provider){target=_blank}. + +!!! warning + Make sure [CRS](https://cluster-api.sigs.k8s.io/tasks/experimental-features/cluster-resource-set){target=_blank} is enabled on the management cluster before following the procedure. + +Perform following steps for each of the CAPX-managed Kubernetes clusters that are not configured to use Nutanix CCM: + +1. Add the `cloud-provider: external` configuration in the `KubeadmConfigTemplate` resources: + ```YAML + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + spec: + template: + spec: + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: + cloud-provider: external + ``` +2. Add the `cloud-provider: external` configuration in the `KubeadmControlPlane` resource: +```YAML +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +kind: KubeadmConfigTemplate +spec: + template: + spec: + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: + cloud-provider: external +--- +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +kind: KubeadmControlPlane +spec: + kubeadmConfigSpec: + clusterConfiguration: + apiServer: + extraArgs: + cloud-provider: external + controllerManager: + extraArgs: + cloud-provider: external + initConfiguration: + nodeRegistration: + kubeletExtraArgs: + cloud-provider: external + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: + cloud-provider: external +``` +3. Add the Nutanix CCM CRS resources: + + - [nutanix-ccm-crs.yaml](https://github.com/nutanix-cloud-native/cluster-api-provider-nutanix/blob/v1.7.0/templates/ccm/nutanix-ccm-crs.yaml){target=_blank} + - [nutanix-ccm-secret.yaml](https://github.com/nutanix-cloud-native/cluster-api-provider-nutanix/blob/v1.7.0/templates/ccm/nutanix-ccm-secret.yaml) + - [nutanix-ccm.yaml](https://github.com/nutanix-cloud-native/cluster-api-provider-nutanix/blob/v1.7.0/templates/ccm/nutanix-ccm.yaml) + + Make sure to update each of the variables before applying the `YAML` files. + +4. Add the `ccm: nutanix` label to the `Cluster` resource: + ```YAML + apiVersion: cluster.x-k8s.io/v1beta1 + kind: Cluster + metadata: + labels: + ccm: nutanix + ``` +5. Verify if the Nutanix CCM pod is up and running: +``` +kubectl get pod -A -l k8s-app=nutanix-cloud-controller-manager +``` +6. Trigger a new rollout of the Kubernetes nodes by performing a Kubernetes upgrade or by using `clusterctl alpha rollout restart`. See the [clusterctl alpha rollout](https://cluster-api.sigs.k8s.io/clusterctl/commands/alpha-rollout#restart){target=_blank} for more information. +7. Upgrade CAPX to v1.7.0 by following the [clusterctl upgrade](https://cluster-api.sigs.k8s.io/clusterctl/commands/upgrade.html?highlight=clusterctl%20upgrade%20pla#clusterctl-upgrade){target=_blank} documentation \ No newline at end of file diff --git a/docs/capx/v1.7.x/tasks/modify_machine_configuration.md b/docs/capx/v1.7.x/tasks/modify_machine_configuration.md new file mode 100644 index 00000000..04a43a95 --- /dev/null +++ b/docs/capx/v1.7.x/tasks/modify_machine_configuration.md @@ -0,0 +1,11 @@ +# Modifying Machine Configurations + +Since all attributes of the `NutanixMachineTemplate` resources are immutable, follow the [Updating Infrastructure Machine Templates](https://cluster-api.sigs.k8s.io/tasks/updating-machine-templates.html?highlight=machine%20template#updating-infrastructure-machine-templates){target=_blank} procedure to modify the configuration of machines in an existing CAPX cluster. +See the [NutanixMachineTemplate](../types/nutanix_machine_template.md) documentation for all supported configuration parameters. + +!!! note + Manually modifying existing and linked `NutanixMachineTemplate` resources will not trigger a rolling update of the machines. + +!!! note + Do not modify the virtual machine configuration of CAPX cluster nodes manually in Prism/Prism Central. + CAPX will not automatically revert the configuration change but performing scale-up/scale-down/upgrade operations will override manual modifications. Only use the `Updating Infrastructure Machine` procedure referenced above to perform configuration changes. \ No newline at end of file diff --git a/docs/capx/v1.7.x/troubleshooting.md b/docs/capx/v1.7.x/troubleshooting.md new file mode 100644 index 00000000..c023d13e --- /dev/null +++ b/docs/capx/v1.7.x/troubleshooting.md @@ -0,0 +1,13 @@ +# Troubleshooting + +## Clusterctl failed with GitHub rate limit error + +By design Clusterctl fetches artifacts from repositories hosted on GitHub, this operation is subject to [GitHub API rate limits](https://docs.github.com/en/rest/overview/resources-in-the-rest-api#rate-limiting){target=_blank}. + +While this is generally okay for the majority of users, there is still a chance that some users (especially developers or CI tools) hit this limit: + +``` +Error: failed to get repository client for the XXX with name YYY: error creating the GitHub repository client: failed to get GitHub latest version: failed to get the list of versions: rate limit for github api has been reached. Please wait one hour or get a personal API tokens a assign it to the GITHUB_TOKEN environment variable +``` + +As explained in the error message, you can increase your API rate limit by [creating a GitHub personal token](https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/creating-a-personal-access-token){target=_blank} and setting a `GITHUB_TOKEN` environment variable using the token. diff --git a/docs/capx/v1.7.x/types/nutanix_cluster.md b/docs/capx/v1.7.x/types/nutanix_cluster.md new file mode 100644 index 00000000..daa8d8cc --- /dev/null +++ b/docs/capx/v1.7.x/types/nutanix_cluster.md @@ -0,0 +1,55 @@ +# NutanixCluster + +The `NutanixCluster` resource defines the configuration of a CAPX Kubernetes cluster. + +Example of a `NutanixCluster` resource: + +```YAML +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: NutanixCluster +metadata: + name: ${CLUSTER_NAME} + namespace: ${NAMESPACE} +spec: + controlPlaneEndpoint: + host: ${CONTROL_PLANE_ENDPOINT_IP} + port: ${CONTROL_PLANE_ENDPOINT_PORT=6443} + prismCentral: + address: ${NUTANIX_ENDPOINT} + additionalTrustBundle: + kind: ConfigMap + name: user-ca-bundle + credentialRef: + kind: Secret + name: ${CLUSTER_NAME} + insecure: ${NUTANIX_INSECURE=false} + port: ${NUTANIX_PORT=9440} +``` + +## NutanixCluster spec +The table below provides an overview of the supported parameters of the `spec` attribute of a `NutanixCluster` resource. + +### Configuration parameters + +| Key |Type |Description | +|--------------------------------------------|------|----------------------------------------------------------------------------------| +|controlPlaneEndpoint |object|Defines the host IP and port of the CAPX Kubernetes cluster. | +|controlPlaneEndpoint.host |string|Host IP to be assigned to the CAPX Kubernetes cluster. | +|controlPlaneEndpoint.port |int |Port of the CAPX Kubernetes cluster. Default: `6443` | +|prismCentral |object|(Optional) Prism Central endpoint definition. | +|prismCentral.address |string|IP/FQDN of Prism Central. | +|prismCentral.port |int |Port of Prism Central. Default: `9440` | +|prismCentral.insecure |bool |Disable Prism Central certificate checking. Default: `false` | +|prismCentral.credentialRef |object|Reference to credentials used for Prism Central connection. | +|prismCentral.credentialRef.kind |string|Kind of the credentialRef. Allowed value: `Secret` | +|prismCentral.credentialRef.name |string|Name of the secret containing the Prism Central credentials. | +|prismCentral.credentialRef.namespace |string|(Optional) Namespace of the secret containing the Prism Central credentials. | +|prismCentral.additionalTrustBundle |object|Reference to the certificate trust bundle used for Prism Central connection. | +|prismCentral.additionalTrustBundle.kind |string|Kind of the additionalTrustBundle. Allowed value: `ConfigMap` | +|prismCentral.additionalTrustBundle.name |string|Name of the `ConfigMap` containing the Prism Central trust bundle. | +|prismCentral.additionalTrustBundle.namespace|string|(Optional) Namespace of the `ConfigMap` containing the Prism Central trust bundle.| +|controlPlaneFailureDomains |list |(optional) List of local references to failure domains for control plane nodes. | +|controlPlaneFailureDomains.Name |string|Name of the failure domain used for control plane nodes. | + +!!! note + To prevent duplicate IP assignments, it is required to assign an IP-address to the `controlPlaneEndpoint.host` variable that is not part of the Nutanix IPAM or DHCP range assigned to the subnet of the CAPX cluster. \ No newline at end of file diff --git a/docs/capx/v1.7.x/types/nutanix_failure_domains.md b/docs/capx/v1.7.x/types/nutanix_failure_domains.md new file mode 100644 index 00000000..cefae92c --- /dev/null +++ b/docs/capx/v1.7.x/types/nutanix_failure_domains.md @@ -0,0 +1,99 @@ +# NutanixFailureDomain + +The `NutanixFailureDomain` resource configuration of a CAPX Kubernetes Failure Domain. + +Example of a `NutanixFailureDomain` resource: +```YAML +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: NutanixFailureDomain +metadata: + name: "${FAILURE_DOMAIN_NAME}" + namespace: "${CLUSTER_NAMESPACE}" +spec: + prismElementCluster: + type: name + uuid: "${NUTANIX_PRISM_ELEMENT_CLUSTER_NAME}" + subnets: + - type: uuid + uuid: "${NUTANIX_SUBNET_UUID}" + - type: name + name: "${NUTANIX_SUBNET_NAME}" +``` + +## NutanixFailureDomain spec +The table below provides an overview of the supported parameters of the `spec` attribute of a `NutanixFailureDomain` resource. + +### Configuration parameters +| Key |Type |Description | +|--------------------------------------------|------|--------------------------------------------------------------------------------------------| +|prismElementCluster |object|Defines the identify the Prism Element cluster in the Prism Central for the failure domain. | +|prismElementCluster.type |string|Type to identify the Prism Element cluster. Allowed values: `name` and `uuid` | +|prismElementCluster.name |string|Name of the Prism Element cluster. | +|prismElementCluster.uuid |string|UUID of the Prism Element cluster. | +|subnets |list |Reference (name or uuid) to the subnets to be assigned to the VMs. | +|subnets.[].type |string|Type to identify the subnet. Allowed values: `name` and `uuid` | +|subnets.[].name |string|Name of the subnet. | +|subnets.[].uuid |string|UUID of the subnet. | + +!!! note + The `NutanixFailureDomain` resource allows you to define logical groupings of Nutanix infrastructure for high availability and workload placement in Kubernetes clusters managed by CAPX. Each failure domain maps to a Prism Element cluster and a set of subnets, ensuring that workloads can be distributed across different infrastructure segments. + +## Usage Notes + +- The `prismElementCluster` field is **required** and must specify either the `name` or `uuid` of the Prism Element cluster. +- The `subnets` field is **required**. You can provide one or more subnets by `name` or `uuid`. +- Failure domains are used by Cluster API to spread machines across different infrastructure segments for resilience. + +## Example Scenarios + +### Single Subnet by UUID + +```yaml +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: NutanixFailureDomain +metadata: + name: fd-uuid +spec: + prismElementCluster: + type: uuid + uuid: "00000000-0000-0000-0000-000000000000" + subnets: + - type: uuid + uuid: "11111111-1111-1111-1111-111111111111" +``` + +### Multiple Subnets by Name + +```yaml +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: NutanixFailureDomain +metadata: + name: fd-names +spec: + prismElementCluster: + type: name + name: "PrismClusterA" + subnets: + - type: name + name: "SubnetA" + - type: name + name: "SubnetB" +``` + +### Multiple Subnets by Name and UUID + +```yaml +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: NutanixFailureDomain +metadata: + name: fd-names +spec: + prismElementCluster: + type: name + name: "PrismClusterA" + subnets: + - type: name + name: "SubnetA" + - type: uuid + name: "11111111-1111-1111-1111-111111111111" +``` \ No newline at end of file diff --git a/docs/capx/v1.7.x/types/nutanix_machine_template.md b/docs/capx/v1.7.x/types/nutanix_machine_template.md new file mode 100644 index 00000000..4aa613b8 --- /dev/null +++ b/docs/capx/v1.7.x/types/nutanix_machine_template.md @@ -0,0 +1,124 @@ +# NutanixMachineTemplate +The `NutanixMachineTemplate` resource defines the configuration of a CAPX Kubernetes VM. + +Example of a `NutanixMachineTemplate` resource. + +```YAML +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: NutanixMachineTemplate +metadata: + name: "${CLUSTER_NAME}-mt-0" + namespace: "${NAMESPACE}" +spec: + template: + spec: + providerID: "nutanix://${CLUSTER_NAME}-m1" + # Supported options for boot type: legacy and uefi + # Defaults to legacy if not set + bootType: ${NUTANIX_MACHINE_BOOT_TYPE=legacy} + vcpusPerSocket: ${NUTANIX_MACHINE_VCPU_PER_SOCKET=1} + vcpuSockets: ${NUTANIX_MACHINE_VCPU_SOCKET=2} + memorySize: "${NUTANIX_MACHINE_MEMORY_SIZE=4Gi}" + systemDiskSize: "${NUTANIX_SYSTEMDISK_SIZE=40Gi}" + image: + type: name + name: "${NUTANIX_MACHINE_TEMPLATE_IMAGE_NAME}" + cluster: + type: name + name: "${NUTANIX_PRISM_ELEMENT_CLUSTER_NAME}" + subnet: + - type: name + name: "${NUTANIX_SUBNET_NAME}" + # Adds additional categories to the virtual machines. + # Note: Categories must already be present in Prism Central + # additionalCategories: + # - key: AppType + # value: Kubernetes + # Adds the cluster virtual machines to a project defined in Prism Central. + # Replace NUTANIX_PROJECT_NAME with the correct project defined in Prism Central + # Note: Project must already be present in Prism Central. + # project: + # type: name + # name: "NUTANIX_PROJECT_NAME" + # gpus: + # - type: name + # name: "GPU NAME" + # Note: Either of `image` or `imageLookup` must be set, but not both. + # imageLookup: + # format: "NUTANIX_IMAGE_LOOKUP_FORMAT" + # baseOS: "NUTANIX_IMAGE_LOOKUP_BASE_OS" + # dataDisks: + # - diskSize: + # deviceProperties: + # deviceType: Disk + # adapterType: SCSI + # deviceIndex: 1 + # storageConfig: + # diskMode: Standard + # storageContainer: + # type: name + # name: "NUTANIX_VM_DISK_STORAGE_CONTAINER" + # dataSource: + # type: name + # name: "NUTANIX_DATA_SOURCE_IMAGE_NAME" +``` + +## NutanixMachineTemplate spec +The table below provides an overview of the supported parameters of the `spec` attribute of a `NutanixMachineTemplate` resource. + +### Configuration parameters +| Key |Type |Description | +|----------------------------------------------------|------|--------------------------------------------------------------------------------------------------------| +|bootType |string|Boot type of the VM. Depends on the OS image used. Allowed values: `legacy`, `uefi`. Default: `legacy` | +|vcpusPerSocket |int |Amount of vCPUs per socket. Default: `1` | +|vcpuSockets |int |Amount of vCPU sockets. Default: `2` | +|memorySize |string|Amount of Memory. Default: `4Gi` | +|systemDiskSize |string|Amount of storage assigned to the system disk. Default: `40Gi` | +|image |object|Reference (name or uuid) to the OS image used for the system disk. | +|image.type |string|Type to identify the OS image. Allowed values: `name` and `uuid` | +|image.name |string|Name of the image. | +|image.uuid |string|UUID of the image. | +|cluster |object|(Optional) Reference (name or uuid) to the Prism Element cluster. Name or UUID can be passed | +|cluster.type |string|Type to identify the Prism Element cluster. Allowed values: `name` and `uuid` | +|cluster.name |string|Name of the Prism Element cluster. | +|cluster.uuid |string|UUID of the Prism Element cluster. | +|subnets |list |(Optional) Reference (name or uuid) to the subnets to be assigned to the VMs. | +|subnets.[].type |string|Type to identify the subnet. Allowed values: `name` and `uuid` | +|subnets.[].name |string|Name of the subnet. | +|subnets.[].uuid |string|UUID of the subnet. | +|additionalCategories |list |Reference to the categories to be assigned to the VMs. These categories already exist in Prism Central. | +|additionalCategories.[].key |string|Key of the category. | +|additionalCategories.[].value |string|Value of the category. | +|project |object|Reference (name or uuid) to the project. This project must already exist in Prism Central. | +|project.type |string|Type to identify the project. Allowed values: `name` and `uuid` | +|project.name |string|Name of the project. | +|project.uuid |string|UUID of the project. | +|gpus |object|Reference (name or deviceID) to the GPUs to be assigned to the VMs. Can be vGPU or Passthrough. | +|gpus.[].type |string|Type to identify the GPU. Allowed values: `name` and `deviceID` | +|gpus.[].name |string|Name of the GPU or the vGPU profile | +|gpus.[].deviceID |string|DeviceID of the GPU or the vGPU profile | +|imageLookup |object|(Optional) Reference to a container that holds how to look up rhcos images for the cluster. | +|imageLookup.format |string|Naming format to look up the image for the machine. Default: `capx-{{.BaseOS}}-{{.K8sVersion}}-*` | +|imageLookup.baseOS |string|Name of the base operating system to use for image lookup. | +|dataDisks |list |(Optional) Reference to the data disks to be attached to the VM. | +|dataDisks.[].diskSize |string|Size (in Quantity format) of the disk attached to the VM. The minimum diskSize is `1GB`. | +|dataDisks.[].deviceProperties |object|(Optional) Reference to the properties of the disk device. | +|dataDisks.[].deviceProperties.deviceType |string|VM disk device type. Allowed values: `Disk` (default) and `CDRom` | +|dataDisks.[].deviceProperties.adapterType |string|Adapter type of the disk address. | +|dataDisks.[].deviceProperties.deviceIndex |int |(Optional) Index of the disk address. Allowed values: non-negative integers (default: `0`) | +|dataDisks.[].storageConfig |object|(Optional) Reference to the storage configuration parameters of the VM disks. | +|dataDisks.[].storageConfig.diskMode |string|Specifies the disk mode. Allowed values: `Standard` (default) and `Flash` | +|dataDisks.[].storageConfig.storageContainer |object|(Optional) Reference (name or uuid) to the storage_container used by the VM disk. | +|dataDisks.[].storageConfig.storageContainer.type |string|Type to identify the storage container. Allowed values: `name` and `uuid` | +|dataDisks.[].storageConfig.storageContainer.name |string|Name of the storage container. | +|dataDisks.[].storageConfig.storageContainer.uuid |string|UUID of the storage container. | +|dataDisks.[].dataSource |object|(Optional) Reference (name or uuid) to a data source image for the VM disk. | +|dataDisks.[].dataSource.type |string|Type to identify the data source image. Allowed values: `name` and `uuid` | +|dataDisks.[].dataSource.name |string|Name of the data source image. | +|dataDisks.[].dataSource.uuid |string|UUID of the data source image. | + +!!! note + - The `cluster` or `subnets` configuration parameters are optional in case failure domains are defined on the `NutanixCluster` and `MachineDeployment` resources. + - If the `deviceType` is `Disk`, the valid `adapterType` can be `SCSI`, `IDE`, `PCI`, `SATA` or `SPAPR`. If the `deviceType` is `CDRom`, the valid `adapterType` can be `IDE` or `SATA`. + - Either of `image` or `imageLookup` must be set, but not both. + - For a Machine VM, the `deviceIndex` for the disks with the same `deviceType.adapterType` combination should start from `0` and increase consecutively afterwards. Note that for each Machine VM, the `Disk.SCSI.0` and `CDRom.IDE.0` are reserved to be used by the VM's system. So for `dataDisks` of Disk.SCSI and CDRom.IDE, the `deviceIndex` should start from `1`. \ No newline at end of file diff --git a/docs/capx/v1.7.x/user_requirements.md b/docs/capx/v1.7.x/user_requirements.md new file mode 100644 index 00000000..5a4b8604 --- /dev/null +++ b/docs/capx/v1.7.x/user_requirements.md @@ -0,0 +1,36 @@ +# User Requirements + +Cluster API Provider Nutanix Cloud Infrastructure (CAPX) interacts with Nutanix Prism Central (PC) APIs using a Prism Central user account. + +CAPX supports two types of PC users: + +- Local users: must be assigned the `Prism Central Admin` role. +- Domain users: must be assigned a role that at least has the [Minimum required CAPX permissions for domain users](#minimum-required-capx-permissions-for-domain-users) assigned. + +See [Credential Management](./credential_management.md){target=_blank} for more information on how to pass the user credentials to CAPX. + +## Minimum required CAPX permissions for domain users + +The following permissions are required for Prism Central domain users: + +- Create Category Mapping +- Create Image +- Create Or Update Name Category +- Create Or Update Value Category +- Create Virtual Machine +- Delete Category Mapping +- Delete Image +- Delete Name Category +- Delete Value Category +- Delete Virtual Machine +- View Category Mapping +- View Cluster +- View Image +- View Name Category +- View Project +- View Subnet +- View Value Category +- View Virtual Machine + +!!! note + The list of permissions has been validated on PC 2022.6 and above. diff --git a/docs/capx/v1.7.x/validated_integrations.md b/docs/capx/v1.7.x/validated_integrations.md new file mode 100644 index 00000000..83ee53a0 --- /dev/null +++ b/docs/capx/v1.7.x/validated_integrations.md @@ -0,0 +1,55 @@ +# Validated Integrations + +Validated integrations are a defined set of specifically tested configurations between technologies that represent the most common combinations that Nutanix customers are using or deploying with CAPX. For these integrations, Nutanix has directly, or through certified partners, exercised a full range of platform tests as part of the product release process. + +## Integration Validation Policy + +Nutanix follows the version validation policies below: + +- Validate at least one active AOS LTS (long term support) version. Validated AOS LTS version for a specific CAPX version is listed in the [AOS](#aos) section.
+ + !!! note + + Typically the latest LTS release at time of CAPX release except when latest is initial release in train (eg x.y.0). Exact version depends on timing and customer adoption. + +- Validate the latest AOS STS (short term support) release at time of CAPX release. +- Validate at least one active Prism Central (PC) version. Validated PC version for a specific CAPX version is listed in the [Prism Central](#prism-central) section.
+ + !!! note + + Typically the the latest PC release at time of CAPX release except when latest is initial release in train (eg x.y.0). Exact version depends on timing and customer adoption. + +- At least one active Cluster-API (CAPI) version. Validated CAPI version for a specific CAPX version is listed in the [Cluster-API](#cluster-api) section.
+ + !!! note + + Typically the the latest Cluster-API release at time of CAPX release except when latest is initial release in train (eg x.y.0). Exact version depends on timing and customer adoption. + +## Validated versions +### Cluster-API +| CAPX | CAPI v1.3.x | CAPI v1.4.x | CAPI v1.5.x | CAPI v1.6.x | CAPI v1.7.x | CAPI v1.8.x | CAPI v1.9.x | +|--------|-------------|-------------|-------------|-------------|-------------|-------------|-------------| +| v1.7.x | Yes | Yes | Yes | Yes | Yes | Yes | Yes | +| v1.6.x | Yes | Yes | Yes | Yes | Yes | Yes | Yes | +| v1.5.x | Yes | Yes | Yes | Yes | Yes | Yes | No | +| v1.4.x | Yes | Yes | Yes | Yes | Yes | No | No | + +See the [Validated Kubernetes Versions](https://cluster-api.sigs.k8s.io/reference/versions.html?highlight=version#supported-kubernetes-versions){target=_blank} page for more information on CAPI validated versions. + +### AOS + +| CAPX | 6.5.x (LTS) | 6.8 (STS) | 6.10 | 7.0 | 7.3 | +|--------|-------------|-----------|------|-----|-----| +| v1.7.x | No | Yes | Yes | Yes | Yes | +| v1.6.x | No | Yes | Yes | Yes | Yes | +| v1.5.x | Yes | Yes | Yes | Yes | Yes | +| v1.4.x | Yes | Yes | No | No | No | + +### Prism Central + +| CAPX | pc.2022.6 | pc.2023.x | pc.2024.x | pc.7.3 | +|--------|-----------|-----------|-----------|--------| +| v1.7.x | No | Yes | Yes | Yes | +| v1.6.x | No | Yes | Yes | Yes | +| v1.5.x | Yes | Yes | Yes | Yes | +| v1.4.x | Yes | Yes | Yes | No | diff --git a/mkdocs.yml b/mkdocs.yml index 54c95fe8..879458fe 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -18,7 +18,30 @@ nav: - "Cloud Native": - "Overview": "index.md" - "Cluster API Provider: Nutanix (CAPX)": - - "v1.6.x (latest)": + - "v1.7.x (latest)": + - "Getting Started": "capx/v1.7.x/getting_started.md" + - "Types": + - "NutanixCluster": "capx/v1.7.x/types/nutanix_cluster.md" + - "NutanixMachineTemplate": "capx/v1.7.x/types/nutanix_machine_template.md" + - "Certificate Trust": "capx/v1.7.x/pc_certificates.md" + - "Credential Management": "capx/v1.7.x/credential_management.md" + - "Tasks": + - "Modifying Machine Configuration": "capx/v1.7.x/tasks/modify_machine_configuration.md" + - "CAPX v1.7.x Upgrade Procedure": "capx/v1.7.x/tasks/capx_v17x_upgrade_procedure.md" + - "Port Requirements": "capx/v1.7.x/port_requirements.md" + - "User Requirements": "capx/v1.7.x/user_requirements.md" + - "Addons": + - "CSI Driver Installation": "capx/v1.7.x/addons/install_csi_driver.md" + - "Validated Integrations": "capx/v1.7.x/validated_integrations.md" + - "Experimental": + - "Multi-PE CAPX cluster": "capx/v1.7.x/experimental/capx_multi_pe.md" + - "Autoscaler": "capx/v1.7.x/experimental/autoscaler.md" + - "OIDC Integration": "capx/v1.7.x/experimental/oidc.md" + - "Flow VPC": "capx/v1.7.x/experimental/vpc.md" + - "Proxy Configuration": "capx/v1.7.x/experimental/proxy.md" + - "Registry Mirror Configuration": "capx/v1.7.x/experimental/registry_mirror.md" + - "Troubleshooting": "capx/v1.7.x/troubleshooting.md" + - "v1.6.x": - "Getting Started": "capx/v1.6.x/getting_started.md" - "Types": - "NutanixCluster": "capx/v1.6.x/types/nutanix_cluster.md" From 0938cee5fb0add7526831e0bc6e45ca322ac6c2d Mon Sep 17 00:00:00 2001 From: Abhay Aggrawal Date: Mon, 1 Sep 2025 12:49:35 +0530 Subject: [PATCH 11/15] Adding nutanixFailureDomain.md to mkdocs.yaml (#75) * Adding nutanixFailureDomain.md to mkdocs.yaml * Adding nutanixFailureDomain.md to mkdocs.yaml --- docs/capx/v1.7.x/getting_started.md | 20 ++++++++++---------- mkdocs.yml | 1 + 2 files changed, 11 insertions(+), 10 deletions(-) diff --git a/docs/capx/v1.7.x/getting_started.md b/docs/capx/v1.7.x/getting_started.md index ce39a649..f4dbc487 100644 --- a/docs/capx/v1.7.x/getting_started.md +++ b/docs/capx/v1.7.x/getting_started.md @@ -152,19 +152,19 @@ spec: 2. Apply the resource - ``` +``` kubectl apply -f example.yaml ``` 3. Edit the NutanixCluster resource to reference the failure domain(s) - ``` +``` kubectl edit nutanixcluster -n ``` In the spec section, add the controlPlaneFailureDomains field: - ``` +``` apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 kind: NutanixCluster metadata: @@ -180,13 +180,13 @@ spec: Check that the failure domains are registered with the cluster: - ``` +``` kubectl get cluster -n -o yaml ``` Look for the failureDomains in status section: - ``` +``` failureDomains: fd-domain-1: controlPlane: true @@ -200,7 +200,7 @@ To associate a MachineDeployment with a specific failure domain: 1. Export the MachineDeployment definition - ``` +``` kubectl get machinedeployments -n -o yaml > machinedeployment.yaml ``` @@ -208,7 +208,7 @@ kubectl get machinedeployments -n -o yaml > machinedeployment Under spec.template.spec, add a failureDomain field: - ``` +``` apiVersion: cluster.x-k8s.io/v1beta1 kind: MachineDeployment metadata: @@ -230,7 +230,7 @@ spec: 3. Apply the changes - ``` +``` kubectl apply -f machinedeployment.yaml ``` @@ -238,7 +238,7 @@ kubectl apply -f machinedeployment.yaml Confirm that the failure domain field was updated: - ``` +``` kubectl get machinedeployments -n -o yaml | grep failureDomain ``` @@ -246,7 +246,7 @@ kubectl get machinedeployments -n -o yaml | grep failureDomai Ensure new machines are placed in the specified failure domain: - ``` +``` kubectl get machines -l cluster.x-k8s.io/deployment-name= -n -o yaml ``` diff --git a/mkdocs.yml b/mkdocs.yml index 879458fe..436fd323 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -23,6 +23,7 @@ nav: - "Types": - "NutanixCluster": "capx/v1.7.x/types/nutanix_cluster.md" - "NutanixMachineTemplate": "capx/v1.7.x/types/nutanix_machine_template.md" + - "NutanixFailureDomain": "capx/v1.7.x/types/nutanix_failure_domains.md" - "Certificate Trust": "capx/v1.7.x/pc_certificates.md" - "Credential Management": "capx/v1.7.x/credential_management.md" - "Tasks": From 286e345e9d3aed5537da2d7d2463fde559b2f905 Mon Sep 17 00:00:00 2001 From: narasimha-ntnx Date: Tue, 18 Nov 2025 17:32:16 +0530 Subject: [PATCH 12/15] [ENG-812744] Update documentation for nutanix csi operator 3.3.4 version (#74) * update doc for nutanix csi operator 3.3.4 version * Update docs/openshift/operators/csi/index.md Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> * Update docs/openshift/operators/csi/index.md Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> * Update docs/openshift/operators/csi/index.md Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> * Update docs/openshift/operators/csi/index.md Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> * add storage class example spec for pc mode csi * fix * add service account based auth info * added seperator * fomatting * Update section headers for clarity in index.md --------- Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> Co-authored-by: Christophe Jauffret --- docs/openshift/operators/csi/index.md | 177 ++++++++++++++++++++++++-- 1 file changed, 164 insertions(+), 13 deletions(-) diff --git a/docs/openshift/operators/csi/index.md b/docs/openshift/operators/csi/index.md index a069b841..02c099ab 100644 --- a/docs/openshift/operators/csi/index.md +++ b/docs/openshift/operators/csi/index.md @@ -26,29 +26,92 @@ With Nutanix CSI Provider you can: 2. Install the Operator by using the "openshift-cluster-csi-drivers" namespace and selecting defaults. ### Installing the CSI Driver using the Operator - 1. In the OpenShift web console, navigate to the Operators → Installed Operators page. 2. Select **Nutanix CSI Operator**. 3. Select **Create instance** and then **Create**. +4. To install Nutanix CSI Driver interacting in PC Mode + + apiVersion: crd.nutanix.com/v1alpha1 + kind: NutanixCsiStorage + metadata: + name: nutanixcsistorage + namespace: openshift-cluster-csi-drivers + spec: + ntnxInitConfigMap: + usePC : true + +5. To install Nutanix CSI Driver interacting in PE Mode + + apiVersion: crd.nutanix.com/v1alpha1 + kind: NutanixCsiStorage + metadata: + name: nutanixcsistorage + namespace: openshift-cluster-csi-drivers + spec: + ntnxInitConfigMap: + usePC : false + +CSI 3.3.8 supports PC service account-based authentication in Nutanix Volumes and Nutanix Files. Instead of using username and password secrets, Prism Central administrators can now create service accounts, configure RBAC, and use generated API keys for secure storage provisioning. + +6. To install Nutanix CSI Driver with service account based authentication + + apiVersion: crd.nutanix.com/v1alpha1 + kind: NutanixCsiStorage + metadata: + name: nutanixcsistorage + namespace: openshift-cluster-csi-drivers + spec: + authType: "service-auth" + ntnxInitConfigMap: + usePC : true + ### Configuring the K8s secret and storage class -In order to use this driver, create the relevant storage classes and secrets using the OpenShift CLI, by followinig the below section: +In order to use this driver, create the relevant storage classes and secrets using the OpenShift CLI, by following the below section: -1. Create a secret yaml file like the below example and apply (`oc -n openshift-cluster-csi-drivers apply -f `). +1. Depending on the mode of interaction of the CSI Driver(Interacting with PC or PE), create a secret yaml file like the below example and apply (`oc -n openshift-cluster-csi-drivers apply -f `). + ### Nutanix PC based secret apiVersion: v1 kind: Secret metadata: - name: ntnx-secret + name: ntnx-pc-secret namespace: openshift-cluster-csi-drivers stringData: - # prism-element-ip:prism-port:admin:password - key: 10.0.0.14:9440:admin:password + # prism-central-ip:prism-port:username:password. + key: 1.2.3.4:9440:admin:password -2. Create storage class yaml like the below example and apply (`oc apply -f `). + ### Nutanix PE based secret + apiVersion: v1 + kind: Secret + metadata: + name: ntnx-pe-secret + namespace: openshift-cluster-csi-drivers + stringData: + # prism-element-ip:prism-port:username:password. + key: 1.2.3.4:9440:admin:password + files-key: "fileserver01.sample.com:csi:password1" # For dynamic files mode + + ### Nutanix PC secret with service account based authentication + apiVersion: v1 + kind: Secret + metadata: + name: ntnx-pc-secret + namespace: openshift-cluster-csi-drivers + type: Opaque + stringData: + host: 1.2.3.4 + port: 9440 + key_type: "api-key" + key_value: "xxxxxxxxxxx" + auth_type: "service-auth" + +2. Depending on the mode of interaction of the CSI Driver(Interacting with PC or PE and storageType NutanixVolumes or NutanixFiles), create a storageclass yaml file like the below example and apply (`oc -n openshift-cluster-csi-drivers apply -f `). + + ### Nutanix Volumes on PE based installation kind: StorageClass apiVersion: storage.k8s.io/v1 @@ -56,25 +119,113 @@ In order to use this driver, create the relevant storage classes and secrets usi name: nutanix-volume provisioner: csi.nutanix.com parameters: - csi.storage.k8s.io/provisioner-secret-name: ntnx-secret + csi.storage.k8s.io/provisioner-secret-name: ntnx-pe-secret + csi.storage.k8s.io/provisioner-secret-namespace: openshift-cluster-csi-drivers + csi.storage.k8s.io/node-publish-secret-name: ntnx-pe-secret + csi.storage.k8s.io/node-publish-secret-namespace: openshift-cluster-csi-drivers + csi.storage.k8s.io/controller-expand-secret-name: ntnx-pe-secret + csi.storage.k8s.io/controller-expand-secret-namespace: openshift-cluster-csi-drivers + csi.storage.k8s.io/controller-publish-secret-name: ntnx-pe-secret + csi.storage.k8s.io/controller-publish-secret-namespace: openshift-cluster-csi-drivers + csi.storage.k8s.io/fstype: ext4 + storageContainer: default-container + storageType: NutanixVolumes + #description: "description added to each storage object created by the driver" + #isSegmentedIscsiNetwork: "false" + #whitelistIPMode: ENABLED + #chapAuth: ENABLED + #isLVMVolume: "false" + #numLVMDisks: 4 + allowVolumeExpansion: true + reclaimPolicy: Delete + + ### Nutanix dynamic files on PE based installation + + kind: StorageClass + apiVersion: storage.k8s.io/v1 + metadata: + name: nutanix-dynfiles + provisioner: csi.nutanix.com + parameters: + dynamicProv: ENABLED + nfsServerName: fs + csi.storage.k8s.io/provisioner-secret-name: ntnx-pe-secret csi.storage.k8s.io/provisioner-secret-namespace: openshift-cluster-csi-drivers - csi.storage.k8s.io/node-publish-secret-name: ntnx-secret + csi.storage.k8s.io/node-publish-secret-name: ntnx-pe-secret csi.storage.k8s.io/node-publish-secret-namespace: openshift-cluster-csi-drivers - csi.storage.k8s.io/controller-expand-secret-name: ntnx-secret + csi.storage.k8s.io/controller-expand-secret-name: ntnx-pe-secret csi.storage.k8s.io/controller-expand-secret-namespace: openshift-cluster-csi-drivers + csi.storage.k8s.io/controller-publish-secret-name: ntnx-pe-secret + csi.storage.k8s.io/controller-publish-secret-namespace: openshift-cluster-csi-drivers + storageType: NutanixFiles + squashType: "none" + #description: "description added to each storage object created by the driver" + allowVolumeExpansion: true + + ### Nutanix Volumes on PC based installation + + kind: StorageClass + apiVersion: storage.k8s.io/v1 + metadata: + name: nutanix-volume + provisioner: csi.nutanix.com + parameters: csi.storage.k8s.io/fstype: ext4 - dataServiceEndPoint: 10.0.0.15:3260 storageContainer: default-container storageType: NutanixVolumes + #description: "description added to each storage object created by the driver" + #isSegmentedIscsiNetwork: "false" #whitelistIPMode: ENABLED #chapAuth: ENABLED + #isLVMVolume: "false" + #numLVMDisks: 4 allowVolumeExpansion: true reclaimPolicy: Delete **Note:** By default, new RHCOS based nodes are provisioned with the required `scsi-initiator-utils` package installed, but with the `iscsid` service disabled. This can result in messages like `iscsiadm: can not connect to iSCSI daemon (111)!`. When this occurs, confirm that the `iscsid.service` is running on worker nodes. It can be enabled and started globally using the Machine Config Operator or directly on each node using systemctl (`sudo systemctl enable --now iscsid`). -See the Managing Storage section of [CSI Driver documentation](https://portal.nutanix.com/page/documents/details?targetId=CSI-Volume-Driver-v2_5:csi-csi-plugin-storage-c.html){target=_blank} on the Nutanix Portal for more information on configuring storage classes. +See the Managing Storage section of [CSI Driver documentation](https://portal.nutanix.com/page/documents/details?targetId=CSI-Volume-Driver-v3_3:csi-csi-plugin-storage-c.html){target=_blank} on the Nutanix Portal for more information on configuring storage classes. + +### Upgrading Nutanix CSI Driver from 2.6.x to 3.3 +Please read the following instructions carefully before upgrading from 2.6.x to 3.3, for more information please refer to [documentation](https://portal.nutanix.com/page/documents/details?targetId=CSI-Volume-Driver-v3_3:CSI-Volume-Driver-v3_3) + +1. Please do not upgrade to the CSI 3.x operator if: + * You are using LVM volumes. + +2. To upgrade from the CSI 2.6.x to CSI 3.3 (interacting with Prism Central) operator + * Create a Nutanix Prism Central secret as explained above. + * Delete the csidriver object from the cluster: + + ``` + oc delete csidriver csi.nutanix.com + ``` + + * In the installed operators, go to Nutanix CSI Operator and change the subscription channel from stable to stable-3.x. + If you have installed the operator with automatic update approval, the operator will be automatically upgraded to CSI 3.3, and then the nutanixcsistorage resource will be upgraded. + An update plan will be generated for manual updates. Upon approval, the operator will be successfully upgraded. + +3. Direct upgrades from CSI 2.6.x to CSI 3.3 interacting with Prism Element are not supported. + The only solution is to recreate the nutanixcsistorage instance by following the below procedure: + - In the installed operators, go to Nutanix CSI Operator and delete the nutanixcsistorage instance. + - Next change the subscription channel from stable to stable-3.x. + - Verify the following points: + - Ensure a Nutanix Prism Element secret is present in the namespace. + - Ensure that all the storage classes with provisioner: csi.nutanix.com have a controller publish secret as explained below. + + ``` + csi.storage.k8s.io/controller-publish-secret-name: ntnx-pe-secret + csi.storage.k8s.io/controller-publish-secret-namespace: openshift-cluster-csi-drivers + ``` + + If this secret is not present in the storage class please delete and recreate the storage classes with the required secrets. + - Create a new instance of nutanixcsistorage from this operator by specifying `usePC: false` in YAML spec section. + - Caution: Moving from CSI driver interacting with Prism Central to CSI driver interacting with Prism Element is not supported. + +4. Troubleshooting: + + If the upgrade was unsuccessful and you want to revert to version CSI 2.6.x, please delete the csidriver object as explained above, uninstall the operator (no need to delete the nutanixcsistorage custom resource), and install version CSI 2.6.x from the stable channel. + ### Using the Nutanix CSI Operator on restricted networks @@ -82,4 +233,4 @@ For OpenShift Container Platform clusters that are installed on restricted netwo The Nutanix CSI Operator is fully compatible with a restricted networks architecture and supported in disconnected mode. Follow the [OpenShift documentation](https://docs.openshift.com/container-platform/latest/operators/admin/olm-restricted-networks.html){target=_blank} to configure. -You need to mirror the `certified-operator-index` and keep the `nutanixcsioperator` package in your pruned index. \ No newline at end of file +You need to mirror the `certified-operator-index` and keep the `nutanixcsioperator` package in your pruned index. From e8b887ab4f15359f1db41e430c9268c08816791f Mon Sep 17 00:00:00 2001 From: Abhay Aggrawal Date: Wed, 3 Dec 2025 13:35:37 +0530 Subject: [PATCH 13/15] NCN-111085: Update CAPX opendocs for release v1.8.x (#76) * docs for capx v1.8.x --- docs/capx/latest | 2 +- docs/capx/v1.4.x/user_requirements.md | 1 + docs/capx/v1.5.x/user_requirements.md | 1 + docs/capx/v1.6.x/user_requirements.md | 1 + docs/capx/v1.7.x/user_requirements.md | 1 + docs/capx/v1.8.x/addons/install_csi_driver.md | 215 ++++++++++++++ docs/capx/v1.8.x/credential_management.md | 93 ++++++ docs/capx/v1.8.x/experimental/autoscaler.md | 129 ++++++++ docs/capx/v1.8.x/experimental/oidc.md | 31 ++ docs/capx/v1.8.x/experimental/proxy.md | 62 ++++ .../v1.8.x/experimental/registry_mirror.md | 96 ++++++ docs/capx/v1.8.x/experimental/vpc.md | 40 +++ docs/capx/v1.8.x/getting_started.md | 280 ++++++++++++++++++ docs/capx/v1.8.x/pc_certificates.md | 149 ++++++++++ docs/capx/v1.8.x/port_requirements.md | 19 ++ .../tasks/capx_v18x_upgrade_procedure.md | 83 ++++++ .../tasks/modify_machine_configuration.md | 11 + docs/capx/v1.8.x/topology/capx_multi_pe.md | 30 ++ docs/capx/v1.8.x/troubleshooting.md | 13 + docs/capx/v1.8.x/types/nutanix_cluster.md | 55 ++++ .../v1.8.x/types/nutanix_failure_domains.md | 99 +++++++ .../v1.8.x/types/nutanix_machine_template.md | 124 ++++++++ docs/capx/v1.8.x/user_requirements.md | 67 +++++ docs/capx/v1.8.x/validated_integrations.md | 56 ++++ mkdocs.yml | 27 +- 25 files changed, 1683 insertions(+), 2 deletions(-) create mode 100644 docs/capx/v1.8.x/addons/install_csi_driver.md create mode 100644 docs/capx/v1.8.x/credential_management.md create mode 100644 docs/capx/v1.8.x/experimental/autoscaler.md create mode 100644 docs/capx/v1.8.x/experimental/oidc.md create mode 100644 docs/capx/v1.8.x/experimental/proxy.md create mode 100644 docs/capx/v1.8.x/experimental/registry_mirror.md create mode 100644 docs/capx/v1.8.x/experimental/vpc.md create mode 100644 docs/capx/v1.8.x/getting_started.md create mode 100644 docs/capx/v1.8.x/pc_certificates.md create mode 100644 docs/capx/v1.8.x/port_requirements.md create mode 100644 docs/capx/v1.8.x/tasks/capx_v18x_upgrade_procedure.md create mode 100644 docs/capx/v1.8.x/tasks/modify_machine_configuration.md create mode 100644 docs/capx/v1.8.x/topology/capx_multi_pe.md create mode 100644 docs/capx/v1.8.x/troubleshooting.md create mode 100644 docs/capx/v1.8.x/types/nutanix_cluster.md create mode 100644 docs/capx/v1.8.x/types/nutanix_failure_domains.md create mode 100644 docs/capx/v1.8.x/types/nutanix_machine_template.md create mode 100644 docs/capx/v1.8.x/user_requirements.md create mode 100644 docs/capx/v1.8.x/validated_integrations.md diff --git a/docs/capx/latest b/docs/capx/latest index bbdaae4e..3979bfc0 120000 --- a/docs/capx/latest +++ b/docs/capx/latest @@ -1 +1 @@ -v1.7.x \ No newline at end of file +v1.8.x \ No newline at end of file diff --git a/docs/capx/v1.4.x/user_requirements.md b/docs/capx/v1.4.x/user_requirements.md index 5a4b8604..05e971a5 100644 --- a/docs/capx/v1.4.x/user_requirements.md +++ b/docs/capx/v1.4.x/user_requirements.md @@ -23,6 +23,7 @@ The following permissions are required for Prism Central domain users: - Delete Name Category - Delete Value Category - Delete Virtual Machine +- Detach Volume Group From AHV VM - View Category Mapping - View Cluster - View Image diff --git a/docs/capx/v1.5.x/user_requirements.md b/docs/capx/v1.5.x/user_requirements.md index 5a4b8604..05e971a5 100644 --- a/docs/capx/v1.5.x/user_requirements.md +++ b/docs/capx/v1.5.x/user_requirements.md @@ -23,6 +23,7 @@ The following permissions are required for Prism Central domain users: - Delete Name Category - Delete Value Category - Delete Virtual Machine +- Detach Volume Group From AHV VM - View Category Mapping - View Cluster - View Image diff --git a/docs/capx/v1.6.x/user_requirements.md b/docs/capx/v1.6.x/user_requirements.md index 5a4b8604..05e971a5 100644 --- a/docs/capx/v1.6.x/user_requirements.md +++ b/docs/capx/v1.6.x/user_requirements.md @@ -23,6 +23,7 @@ The following permissions are required for Prism Central domain users: - Delete Name Category - Delete Value Category - Delete Virtual Machine +- Detach Volume Group From AHV VM - View Category Mapping - View Cluster - View Image diff --git a/docs/capx/v1.7.x/user_requirements.md b/docs/capx/v1.7.x/user_requirements.md index 5a4b8604..05e971a5 100644 --- a/docs/capx/v1.7.x/user_requirements.md +++ b/docs/capx/v1.7.x/user_requirements.md @@ -23,6 +23,7 @@ The following permissions are required for Prism Central domain users: - Delete Name Category - Delete Value Category - Delete Virtual Machine +- Detach Volume Group From AHV VM - View Category Mapping - View Cluster - View Image diff --git a/docs/capx/v1.8.x/addons/install_csi_driver.md b/docs/capx/v1.8.x/addons/install_csi_driver.md new file mode 100644 index 00000000..afb4bdc8 --- /dev/null +++ b/docs/capx/v1.8.x/addons/install_csi_driver.md @@ -0,0 +1,215 @@ +# Nutanix CSI Driver installation with CAPX + +The Nutanix CSI driver is fully supported on CAPI/CAPX deployed clusters where all the nodes meet the [Nutanix CSI driver prerequisites](#capi-workload-cluster-prerequisites-for-the-nutanix-csi-driver). + +There are three methods to install the Nutanix CSI driver on a CAPI/CAPX cluster: + +- Helm +- ClusterResourceSet +- CAPX Flavor + +For more information, check the next sections. + +## CAPI Workload cluster prerequisites for the Nutanix CSI Driver + +Kubernetes workers need the following prerequisites to use the Nutanix CSI Drivers: + +- iSCSI initiator package (for Volumes based block storage) +- NFS client package (for Files based storage) + +These packages may already be present in the image you use with your infrastructure provider or you can also rely on your bootstrap provider to install them. More info is available in the [Prerequisites docs](https://portal.nutanix.com/page/documents/details?targetId=CSI-Volume-Driver-v2_6:csi-csi-plugin-prerequisites-r.html){target=_blank}. + +The package names and installation method will also vary depending on the operating system you plan to use. + +In the example below, `kubeadm` bootstrap provider is used to deploy these packages on top of an Ubuntu 20.04 image. The `kubeadm` bootstrap provider allows defining `preKubeadmCommands` that will be launched before Kubernetes cluster creation. These `preKubeadmCommands` can be defined both in `KubeadmControlPlane` for master nodes and in `KubeadmConfigTemplate` for worker nodes. + +In the example with an Ubuntu 20.04 image, both `KubeadmControlPlane` and `KubeadmConfigTemplate` must be modified as in the example below: + +```yaml +spec: + template: + spec: + # ....... + preKubeadmCommands: + - echo "before kubeadm call" > /var/log/prekubeadm.log + - apt update + - apt install -y nfs-common open-iscsi + - systemctl enable --now iscsid +``` +## Install the Nutanix CSI Driver with Helm + +A recent [Helm](https://helm.sh){target=_blank} version is needed (tested with Helm v3.10.1). + +The example below must be applied on a ready workload cluster. The workload cluster's kubeconfig can be retrieved and used to connect with the following command: + +```shell +clusterctl get kubeconfig $CLUSTER_NAME -n $CLUSTER_NAMESPACE > $CLUSTER_NAME-KUBECONFIG +export KUBECONFIG=$(pwd)/$CLUSTER_NAME-KUBECONFIG +``` + +Once connected to the cluster, follow the [CSI documentation](https://portal.nutanix.com/page/documents/details?targetId=CSI-Volume-Driver-v2_6:csi-csi-driver-install-t.html){target=_blank}. + +First, install the [nutanix-csi-snapshot](https://github.com/nutanix/helm/tree/master/charts/nutanix-csi-snapshot){target=_blank} chart followed by the [nutanix-csi-storage](https://github.com/nutanix/helm/tree/master/charts/nutanix-csi-storage){target=_blank} chart. + +See an example below: + +```shell +#Add the official Nutanix Helm repo and get the latest update +helm repo add nutanix https://nutanix.github.io/helm/ +helm repo update + +# Install the nutanix-csi-snapshot chart +helm install nutanix-csi-snapshot nutanix/nutanix-csi-snapshot -n ntnx-system --create-namespace + +# Install the nutanix-csi-storage chart +helm install nutanix-storage nutanix/nutanix-csi-storage -n ntnx-system --set createSecret=false +``` + +!!! warning + For correct Nutanix CSI driver deployment, a fully functional CNI deployment must be present. + +## Install the Nutanix CSI Driver with `ClusterResourceSet` + +The `ClusterResourceSet` feature was introduced to automatically apply a set of resources (such as CNI/CSI) defined by administrators to matching created/existing workload clusters. + +### Enabling the `ClusterResourceSet` feature + +At the time of writing, `ClusterResourceSet` is an experimental feature that must be enabled during the initialization of a management cluster with the `EXP_CLUSTER_RESOURCE_SET` feature gate. + +To do this, add `EXP_CLUSTER_RESOURCE_SET: "true"` in the `clusterctl` configuration file or just `export EXP_CLUSTER_RESOURCE_SET=true` before initializing the management cluster with `clusterctl init`. + +If the management cluster is already initialized, the `ClusterResourceSet` can be enabled by changing the configuration of the `capi-controller-manager` deployment in the `capi-system` namespace. + + ```shell + kubectl edit deployment -n capi-system capi-controller-manager + ``` + +Locate the section below: + +```yaml + - args: + - --leader-elect + - --metrics-bind-addr=localhost:8080 + - --feature-gates=MachinePool=false,ClusterResourceSet=true,ClusterTopology=false +``` + +Then replace `ClusterResourceSet=false` with `ClusterResourceSet=true`. + +!!! note + Editing the `deployment` resource will cause Kubernetes to automatically start new versions of the containers with the feature enabled. + + + +### Prepare the Nutanix CSI `ClusterResourceSet` + +#### Create the `ConfigMap` for the CSI Plugin + +First, create a `ConfigMap` that contains a YAML manifest with all resources to install the Nutanix CSI driver. + +Since the Nutanix CSI Driver is provided as a Helm chart, use `helm` to extract it before creating the `ConfigMap`. See an example below: + +```shell +helm repo add nutanix https://nutanix.github.io/helm/ +helm repo update + +kubectl create ns ntnx-system --dry-run=client -o yaml > nutanix-csi-namespace.yaml +helm template nutanix-csi-snapshot nutanix/nutanix-csi-snapshot -n ntnx-system > nutanix-csi-snapshot.yaml +helm template nutanix-csi-snapshot nutanix/nutanix-csi-storage -n ntnx-system > nutanix-csi-storage.yaml + +kubectl create configmap nutanix-csi-crs --from-file=nutanix-csi-namespace.yaml --from-file=nutanix-csi-snapshot.yaml --from-file=nutanix-csi-storage.yaml +``` + +#### Create the `ClusterResourceSet` + +Next, create the `ClusterResourceSet` resource that will map the `ConfigMap` defined above to clusters using a `clusterSelector`. + +The `ClusterResourceSet` needs to be created inside the management cluster. See an example below: + +```yaml +--- +apiVersion: addons.cluster.x-k8s.io/v1alpha3 +kind: ClusterResourceSet +metadata: + name: nutanix-csi-crs +spec: + clusterSelector: + matchLabels: + csi: nutanix + resources: + - kind: ConfigMap + name: nutanix-csi-crs +``` + +The `clusterSelector` field controls how Cluster API will match this `ClusterResourceSet` on one or more workload clusters. In the example scenario, the `matchLabels` approach is being used where the `ClusterResourceSet` will be applied to all workload clusters having the `csi: nutanix` label present. If the label isn't present, the `ClusterResourceSet` won't apply to that workload cluster. + +The `resources` field references the `ConfigMap` created above, which contains the manifests for installing the Nutanix CSI driver. + +#### Assign the `ClusterResourceSet` to a workload cluster + +Assign this `ClusterResourceSet` to the workload cluster by adding the correct label to the `Cluster` resource. + +This can be done before workload cluster creation by editing the output of the `clusterctl generate cluster` command or by modifying an already deployed workload cluster. + +In both cases, `Cluster` resources should look like this: + +```yaml +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + name: workload-cluster-name + namespace: workload-cluster-namespace + labels: + csi: nutanix +# ... +``` + +!!! warning + For correct Nutanix CSI driver deployment, a fully functional CNI deployment must be present. + +## Install the Nutanix CSI Driver with a CAPX flavor + +The CAPX provider can utilize a flavor to automatically deploy the Nutanix CSI using a `ClusterResourceSet`. + +### Prerequisites + +The following requirements must be met: + +- The operating system must meet the [Nutanix CSI OS prerequisites](#capi-workload-cluster-prerequisites-for-the-nutanix-csi-driver). +- The Management cluster must be installed with the [`CLUSTER_RESOURCE_SET` feature gate](#enabling-the-clusterresourceset-feature). + +### Installation + +Specify the `csi` flavor during workload cluster creation. See an example below: + +```shell +clusterctl generate cluster my-cluster -f csi +``` + +Additional environment variables are required: + +- `WEBHOOK_CA`: Base64 encoded CA certificate used to sign the webhook certificate +- `WEBHOOK_CERT`: Base64 certificate for the webhook validation component +- `WEBHOOK_KEY`: Base64 key for the webhook validation component + +The three components referenced above can be automatically created and referenced using [this script](https://github.com/nutanix-cloud-native/cluster-api-provider-nutanix/blob/main/scripts/gen-self-cert.sh){target=_blank}: + +``` +source scripts/gen-self-cert.sh +``` + +The certificate must reference the following names: + +- csi-snapshot-webhook +- csi-snapshot-webhook.ntnx-sytem +- csi-snapshot-webhook.ntnx-sytem.svc + +!!! warning + For correct Nutanix CSI driver deployment, a fully functional CNI deployment must be present. + +## Nutanix CSI Driver Configuration + +After the driver is installed, it must be configured for use by minimally defining a `Secret` and `StorageClass`. + +This can be done manually in the workload clusters or by using a `ClusterResourceSet` in the management cluster as explained above. + +See the Official [CSI Driver documentation](https://portal.nutanix.com/page/documents/details?targetId=CSI-Volume-Driver-v2_6:CSI-Volume-Driver-v2_6){target=_blank} on the Nutanix Portal for more configuration information. diff --git a/docs/capx/v1.8.x/credential_management.md b/docs/capx/v1.8.x/credential_management.md new file mode 100644 index 00000000..bebbc5a0 --- /dev/null +++ b/docs/capx/v1.8.x/credential_management.md @@ -0,0 +1,93 @@ +# Credential Management +Cluster API Provider Nutanix Cloud Infrastructure (CAPX) interacts with Nutanix Prism Central (PC) APIs to manage the required Kubernetes cluster infrastructure resources. + +PC credentials are required to authenticate to the PC APIs. CAPX currently supports two mechanisms to supply the required credentials: + +- Credentials injected into the CAPX manager deployment +- Workload cluster specific credentials + +## Credentials injected into the CAPX manager deployment +By default, credentials will be injected into the CAPX manager deployment when CAPX is initialized. See the [getting started guide](./getting_started.md) for more information on the initialization. + +Upon initialization a `nutanix-creds` secret will automatically be created in the `capx-system` namespace. This secret will contain the values supplied via the `NUTANIX_USER` and `NUTANIX_PASSWORD` parameters. + +The `nutanix-creds` secret will be used for workload cluster deployment if no other credential is supplied. + +### Example +An example of the automatically created `nutanix-creds` secret can be found below: +```yaml +--- +apiVersion: v1 +kind: Secret +type: Opaque +metadata: + name: nutanix-creds + namespace: capx-system +stringData: + credentials: | + [ + { + "type": "basic_auth", + "data": { + "prismCentral":{ + "username": "", + "password": "" + }, + "prismElements": null + } + } + ] +``` + +## Workload cluster specific credentials +Users can override the [credentials injected in CAPX manager deployment](#credentials-injected-into-the-capx-manager-deployment) by supplying a credential specific to a workload cluster. The credentials can be supplied by creating a secret in the same namespace as the `NutanixCluster` namespace. + +The secret can be referenced by adding a `credentialRef` inside the `prismCentral` attribute contained in the `NutanixCluster`. +The secret will also be deleted when the `NutanixCluster` is deleted. + +Note: There is a 1:1 relation between the secret and the `NutanixCluster` object. + +### Example +Create a secret in the namespace of the `NutanixCluster`: + +```yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: "" + namespace: "" +stringData: + credentials: | + [ + { + "type": "basic_auth", + "data": { + "prismCentral":{ + "username": "", + "password": "" + }, + "prismElements": null + } + } + ] +``` + +Add a `prismCentral` and corresponding `credentialRef` to the `NutanixCluster`: + +```yaml +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: NutanixCluster +metadata: + name: "" + namespace: "" +spec: + prismCentral: + ... + credentialRef: + name: "" + kind: Secret +... +``` + +See the [NutanixCluster](./types/nutanix_cluster.md) documentation for all supported configuration parameters for the `prismCentral` and `credentialRef` attribute. \ No newline at end of file diff --git a/docs/capx/v1.8.x/experimental/autoscaler.md b/docs/capx/v1.8.x/experimental/autoscaler.md new file mode 100644 index 00000000..2af57213 --- /dev/null +++ b/docs/capx/v1.8.x/experimental/autoscaler.md @@ -0,0 +1,129 @@ +# Using Autoscaler in combination with CAPX + +!!! warning + The scenario and features described on this page are experimental. It's important to note that they have not been fully validated. + +[Autoscaler](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/clusterapi/README.md){target=_blank} can be used in combination with Cluster API to automatically add or remove machines in a cluster. + +Autoscaler can be used in different deployment scenarios. This page will provide an overview of multiple autoscaler deployment scenarios in combination with CAPX. +See the [Testing](#testing) section to see how scale-up/scale-down events can be triggered to validate the autoscaler behaviour. + +More in-depth information on Autoscaler functionality can be found in the [Kubernetes documentation](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/clusterapi/README.md){target=_blank}. + +All Autoscaler configuration parameters can be found [here](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md#what-are-the-parameters-to-ca){target=_blank}. + +## Scenario 1: Management cluster managing an external workload cluster +In this scenario, Autoscaler will be running on a management cluster and it will manage an external workload cluster. See the management cluster managing an external workload cluster section of [Kubernetes documentation](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/clusterapi/README.md#autoscaler-running-in-management-cluster-using-service-account-credentials-with-separate-workload-cluster){target=_blank} for more information. + +### Steps +1. Deploy a management cluster and workload cluster. The [CAPI quickstart](https://cluster-api.sigs.k8s.io/user/quick-start.html){target=_blank} can be used as a starting point. + + !!! note + Make sure a CNI is installed in the workload cluster. + +4. Download the example [Autoscaler deployment file](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/clusterapi/examples/deployment.yaml){target=_blank}. +5. Modify the `deployment.yaml` file: + - Change the namespace of all resources to the namespaces of the workload cluster. + - Choose an autoscale image. + - Change the following parameters in the `Deployment` resource: +```YAML + spec: + containers: + name: cluster-autoscaler + command: + - /cluster-autoscaler + args: + - --cloud-provider=clusterapi + - --kubeconfig=/mnt/kubeconfig/kubeconfig.yml + - --clusterapi-cloud-config-authoritative + - -v=1 + volumeMounts: + - mountPath: /mnt/kubeconfig + name: kubeconfig + readOnly: true + ... + volumes: + - name: kubeconfig + secret: + secretName: -kubeconfig + items: + - key: value + path: kubeconfig.yml +``` +7. Apply the `deployment.yaml` file. +```bash +kubectl apply -f deployment.yaml +``` +8. Add the [annotations](#autoscaler-node-group-annotations) to the workload cluster `MachineDeployment` resource. +9. Test Autoscaler. Go to the [Testing](#testing) section. + +## Scenario 2: Autoscaler running on workload cluster +In this scenario, Autoscaler will be deployed [on top of the workload cluster](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/clusterapi/README.md#autoscaler-running-in-a-joined-cluster-using-service-account-credentials){target=_blank} directly. In order for Autoscaler to work, it is required that the workload cluster resources are moved from the management cluster to the workload cluster. + +### Steps +1. Deploy a management cluster and workload cluster. The [CAPI quickstart](https://cluster-api.sigs.k8s.io/user/quick-start.html){target=_blank} can be used as a starting point. +2. Get the kubeconfig file for the workload cluster and use this kubeconfig to login to the workload cluster. +```bash +clusterctl get kubeconfig -n /path/to/kubeconfig +``` +3. Install a CNI in the workload cluster. +4. Initialise the CAPX components on top of the workload cluster: +```bash +clusterctl init --infrastructure nutanix +``` +5. Migrate the workload cluster custom resources to the workload cluster. Run following command from the management cluster: +```bash +clusterctl move -n --to-kubeconfig /path/to/kubeconfig +``` +6. Verify if the cluster has been migrated by running following command on the workload cluster: +```bash +kubectl get cluster -A +``` +7. Download the example [autoscaler deployment file](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/clusterapi/examples/deployment.yaml){target=_blank}. +8. Create the Autoscaler namespace: +```bash +kubectl create ns autoscaler +``` +9. Apply the `deployment.yaml` file +```bash +kubectl apply -f deployment.yaml +``` +10. Add the [annotations](#autoscaler-node-group-annotations) to the workload cluster `MachineDeployment` resource. +11. Test Autoscaler. Go to the [Testing](#testing) section. + +## Testing + +1. Deploy an example Kubernetes application. For example, the one used in the [Kubernetes HorizontalPodAutoscaler Walkthrough](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough/). +```bash +kubectl apply -f https://k8s.io/examples/application/php-apache.yaml +``` +2. Increase the amount of replicas of the application to trigger a scale-up event: +``` +kubectl scale deployment php-apache --replicas 100 +``` +3. Decrease the amount of replicas of the application again to trigger a scale-down event. + + !!! note + In case of issues check the logs of the Autoscaler pods. + +4. After a while CAPX, will add more machines. Refer to the [Autoscaler configuration parameters](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md#what-are-the-parameters-to-ca){target=_blank} to tweak the behaviour and timeouts. + +## Autoscaler node group annotations +Autoscaler uses following annotations to define the upper and lower boundries of the managed machines: + +| Annotation | Example Value | Description | +|-------------------------------------------------------------|---------------|-----------------------------------------------| +| cluster.x-k8s.io/cluster-api-autoscaler-node-group-max-size | 5 | Maximum amount of machines in this node group | +| cluster.x-k8s.io/cluster-api-autoscaler-node-group-min-size | 1 | Minimum amount of machines in this node group | + +These annotations must be applied to the `MachineDeployment` resources of a CAPX cluster. + +### Example +```YAML +apiVersion: cluster.x-k8s.io/v1beta1 +kind: MachineDeployment +metadata: + annotations: + cluster.x-k8s.io/cluster-api-autoscaler-node-group-max-size: "5" + cluster.x-k8s.io/cluster-api-autoscaler-node-group-min-size: "1" +``` \ No newline at end of file diff --git a/docs/capx/v1.8.x/experimental/oidc.md b/docs/capx/v1.8.x/experimental/oidc.md new file mode 100644 index 00000000..0c274121 --- /dev/null +++ b/docs/capx/v1.8.x/experimental/oidc.md @@ -0,0 +1,31 @@ +# OIDC integration + +!!! warning + The scenario and features described on this page are experimental. It's important to note that they have not been fully validated. + +Kubernetes allows users to authenticate using various authentication mechanisms. One of these mechanisms is OIDC. Information on how Kubernetes interacts with OIDC providers can be found in the [OpenID Connect Tokens](https://kubernetes.io/docs/reference/access-authn-authz/authentication/#openid-connect-tokens){target=_blank} section of the official Kubernetes documentation. + + +Follow the steps below to configure a CAPX cluster to use an OIDC identity provider. + +## Steps +1. Generate a `cluster.yaml` file with the required CAPX cluster configuration. Refer to the [Getting Started](../getting_started.md){target=_blank} page for more information on how to generate a `cluster.yaml` file. Do not apply the `cluster.yaml` file. +2. Edit the `cluster.yaml` file and search for the `KubeadmControlPlane` resource. +3. Modify/add the `spec.kubeadmConfigSpec.clusterConfiguration.apiServer.extraArgs` attribute and add the required [API server parameters](https://kubernetes.io/docs/reference/access-authn-authz/authentication/#configuring-the-api-server){target=_blank}. See the [example](#example) below. +4. Apply the `cluster.yaml` file +5. Log in with the OIDC provider once the cluster is provisioned + +## Example +```YAML +kind: KubeadmControlPlane +spec: + kubeadmConfigSpec: + clusterConfiguration: + apiServer: + extraArgs: + ... + oidc-client-id: + oidc-issuer-url: + ... +``` + diff --git a/docs/capx/v1.8.x/experimental/proxy.md b/docs/capx/v1.8.x/experimental/proxy.md new file mode 100644 index 00000000..c8f940d4 --- /dev/null +++ b/docs/capx/v1.8.x/experimental/proxy.md @@ -0,0 +1,62 @@ +# Proxy configuration + +!!! warning + The scenario and features described on this page are experimental. It's important to note that they have not been fully validated. + +CAPX can be configured to use a proxy to connect to external networks. This proxy configuration needs to be applied to control plane and worker nodes. + +Follow the steps below to configure a CAPX cluster to use a proxy. + +## Steps +1. Generate a `cluster.yaml` file with the required CAPX cluster configuration. Refer to the [Getting Started](../getting_started.md){target=_blank} page for more information on how to generate a `cluster.yaml` file. Do not apply the `cluster.yaml` file. +2. Edit the `cluster.yaml` file and modify the following resources as shown in the [example](#example) below to add the proxy configuration. + 1. `KubeadmControlPlane`: + * Add the proxy configuration to the `spec.kubeadmConfigSpec.files` list. Do not modify other items in the list. + * Add `systemctl` commands to apply the proxy config in `spec.kubeadmConfigSpec.preKubeadmCommands`. Do not modify other items in the list. + 2. `KubeadmConfigTemplate`: + * Add the proxy configuration to the `spec.template.spec.files` list. Do not modify other items in the list. + * Add `systemctl` commands to apply the proxy config in `spec.template.spec.preKubeadmCommands`. Do not modify other items in the list. +4. Apply the `cluster.yaml` file + +## Example + +```YAML +--- +# controlplane proxy settings +kind: KubeadmControlPlane +spec: + kubeadmConfigSpec: + files: + - content: | + [Service] + Environment="HTTP_PROXY=" + Environment="HTTPS_PROXY=" + Environment="NO_PROXY=" + owner: root:root + path: /etc/systemd/system/containerd.service.d/http-proxy.conf + ... + preKubeadmCommands: + - sudo systemctl daemon-reload + - sudo systemctl restart containerd + ... +--- +# worker proxy settings +kind: KubeadmConfigTemplate +spec: + template: + spec: + files: + - content: | + [Service] + Environment="HTTP_PROXY=" + Environment="HTTPS_PROXY=" + Environment="NO_PROXY=" + owner: root:root + path: /etc/systemd/system/containerd.service.d/http-proxy.conf + ... + preKubeadmCommands: + - sudo systemctl daemon-reload + - sudo systemctl restart containerd + ... +``` + diff --git a/docs/capx/v1.8.x/experimental/registry_mirror.md b/docs/capx/v1.8.x/experimental/registry_mirror.md new file mode 100644 index 00000000..307a9425 --- /dev/null +++ b/docs/capx/v1.8.x/experimental/registry_mirror.md @@ -0,0 +1,96 @@ +# Registry Mirror configuration + +!!! warning + The scenario and features described on this page are experimental. It's important to note that they have not been fully validated. + +CAPX can be configured to use a private registry to act as a mirror of an external public registry. This registry mirror configuration needs to be applied to control plane and worker nodes. + +Follow the steps below to configure a CAPX cluster to use a registry mirror. + +## Steps +1. Generate a `cluster.yaml` file with the required CAPX cluster configuration. Refer to the [Getting Started](../getting_started.md){target=_blank} page for more information on how to generate a `cluster.yaml` file. Do not apply the `cluster.yaml` file. +2. Edit the `cluster.yaml` file and modify the following resources as shown in the [example](#example) below to add the proxy configuration. + 1. `KubeadmControlPlane`: + * Add the registry mirror configuration to the `spec.kubeadmConfigSpec.files` list. Do not modify other items in the list. + * Update `/etc/containerd/config.toml` commands to apply the registry mirror config in `spec.kubeadmConfigSpec.preKubeadmCommands`. Do not modify other items in the list. + 2. `KubeadmConfigTemplate`: + * Add the registry mirror configuration to the `spec.template.spec.files` list. Do not modify other items in the list. + * Update `/etc/containerd/config.toml` commands to apply the registry mirror config in `spec.template.spec.preKubeadmCommands`. Do not modify other items in the list. +4. Apply the `cluster.yaml` file + +## Example + +This example will configure a registry mirror for the following namespace: + +* registry.k8s.io +* ghcr.io +* quay.io + +and redirect them to corresponding projects of the `` registry. + +```YAML +--- +# controlplane proxy settings +kind: KubeadmControlPlane +spec: + kubeadmConfigSpec: + files: + - content: | + [host."https:///v2/registry.k8s.io"] + capabilities = ["pull", "resolve"] + skip_verify = false + override_path = true + owner: root:root + path: /etc/containerd/certs.d/registry.k8s.io/hosts.toml + - content: | + [host."https:///v2/ghcr.io"] + capabilities = ["pull", "resolve"] + skip_verify = false + override_path = true + owner: root:root + path: /etc/containerd/certs.d/ghcr.io/hosts.toml + - content: | + [host."https:///v2/quay.io"] + capabilities = ["pull", "resolve"] + skip_verify = false + override_path = true + owner: root:root + path: /etc/containerd/certs.d/quay.io/hosts.toml + ... + preKubeadmCommands: + - echo '\n[plugins."io.containerd.grpc.v1.cri".registry]\n config_path = "/etc/containerd/certs.d"' >> /etc/containerd/config.toml + ... +--- +# worker proxy settings +kind: KubeadmConfigTemplate +spec: + template: + spec: + files: + - content: | + [host."https:///v2/registry.k8s.io"] + capabilities = ["pull", "resolve"] + skip_verify = false + override_path = true + owner: root:root + path: /etc/containerd/certs.d/registry.k8s.io/hosts.toml + - content: | + [host."https:///v2/ghcr.io"] + capabilities = ["pull", "resolve"] + skip_verify = false + override_path = true + owner: root:root + path: /etc/containerd/certs.d/ghcr.io/hosts.toml + - content: | + [host."https:///v2/quay.io"] + capabilities = ["pull", "resolve"] + skip_verify = false + override_path = true + owner: root:root + path: /etc/containerd/certs.d/quay.io/hosts.toml + ... + preKubeadmCommands: + - echo '\n[plugins."io.containerd.grpc.v1.cri".registry]\n config_path = "/etc/containerd/certs.d"' >> /etc/containerd/config.toml + ... +``` + diff --git a/docs/capx/v1.8.x/experimental/vpc.md b/docs/capx/v1.8.x/experimental/vpc.md new file mode 100644 index 00000000..3513e47e --- /dev/null +++ b/docs/capx/v1.8.x/experimental/vpc.md @@ -0,0 +1,40 @@ +# Creating a workload CAPX cluster in a Nutanix Flow VPC + +!!! warning + The scenario and features described on this page are experimental. It's important to note that they have not been fully validated. + +!!! note + Nutanix Flow VPCs are only validated with CAPX 1.1.3+ + +[Nutanix Flow Virtual Networking](https://portal.nutanix.com/page/documents/details?targetId=Nutanix-Flow-Virtual-Networking-Guide-vpc_2022_9:Nutanix-Flow-Virtual-Networking-Guide-vpc_2022_9){target=_blank} allows users to create Virtual Private Clouds (VPCs) with Overlay networking. +The steps below will illustrate how a CAPX cluster can be deployed inside an overlay subnet (NAT) inside a VPC while the management cluster resides outside of the VPC. + + +## Steps +1. [Request a floating IP](https://portal.nutanix.com/page/documents/details?targetId=Nutanix-Flow-Networking-Guide:ear-flow-nw-request-floating-ip-pc-t.html){target=_blank} +2. Link the floating IP to an internal IP address inside the overlay subnet that will be used to deploy the CAPX cluster. This address will be assigned to the CAPX loadbalancer. To prevent IP conflicts, make sure the IP address is not part of the IP-pool defined in the subnet. +3. Generate a `cluster.yaml` file with the required CAPX cluster configuration where the `CONTROL_PLANE_ENDPOINT_IP` is set to the floating IP requested in the first step. Refer to the [Getting Started](../getting_started.md){target=_blank} page for more information on how to generate a `cluster.yaml` file. Do not apply the `cluster.yaml` file. +4. Edit the `cluster.yaml` file and search for the `KubeadmControlPlane` resource. +5. Modify the `spec.kubeadmConfigSpec.files.*.content` attribute and change the `kube-vip` definition similar to the [example](#example) below. +6. Apply the `cluster.yaml` file. +7. When the CAPX workload cluster is deployed, it will be reachable via the floating IP. + +## Example +```YAML +kind: KubeadmControlPlane +spec: + kubeadmConfigSpec: + files: + - content: | + apiVersion: v1 + kind: Pod + metadata: + name: kube-vip + namespace: kube-system + spec: + containers: + - env: + - name: address + value: "" +``` + diff --git a/docs/capx/v1.8.x/getting_started.md b/docs/capx/v1.8.x/getting_started.md new file mode 100644 index 00000000..5a002ed8 --- /dev/null +++ b/docs/capx/v1.8.x/getting_started.md @@ -0,0 +1,280 @@ +# Getting Started + +This is a guide on getting started with Cluster API Provider Nutanix Cloud Infrastructure (CAPX). To learn more about cluster API in more depth, check out the [Cluster API book](https://cluster-api.sigs.k8s.io/){target=_blank}. + +For more information on how install the Nutanix CSI Driver on a CAPX cluster, visit [Nutanix CSI Driver installation with CAPX](./addons/install_csi_driver.md). + +For more information on how CAPX handles credentials, visit [Credential Management](./credential_management.md). + +For more information on the port requirements for CAPX, visit [Port Requirements](./port_requirements.md). + +!!! note + [Nutanix Cloud Controller Manager (CCM)](../../ccm/latest/overview.md) is a mandatory component starting from CAPX v1.3.0. Ensure all CAPX-managed Kubernetes clusters are configured to use Nutanix CCM before upgrading to v1.3.0 or later. See [CAPX v1.8.x Upgrade Procedure](./tasks/capx_v18x_upgrade_procedure.md). + +## Production Workflow + +### Build OS image for NutanixMachineTemplate resource +Cluster API Provider Nutanix Cloud Infrastructure (CAPX) uses the [Image Builder](https://image-builder.sigs.k8s.io/){target=_blank} project to build OS images used for the Nutanix machines. + +Follow the steps detailed in [Building CAPI Images for Nutanix Cloud Platform (NCP)](https://image-builder.sigs.k8s.io/capi/providers/nutanix.html#building-capi-images-for-nutanix-cloud-platform-ncp){target=_blank} to use Image Builder on the Nutanix Cloud Platform. + +For a list of operating systems visit the OS image [Configuration](https://image-builder.sigs.k8s.io/capi/providers/nutanix.html#configuration){target=_blank} page. + +### Prerequisites for using Cluster API Provider Nutanix Cloud Infrastructure +The [Cluster API installation](https://cluster-api.sigs.k8s.io/user/quick-start.html#installation){target=_blank} section provides an overview of all required prerequisites: + +- [Common Prerequisites](https://cluster-api.sigs.k8s.io/user/quick-start.html#common-prerequisites){target=_blank} +- [Install and/or configure a Kubernetes cluster](https://cluster-api.sigs.k8s.io/user/quick-start.html#install-andor-configure-a-kubernetes-cluster){target=_blank} +- [Install clusterctl](https://cluster-api.sigs.k8s.io/user/quick-start.html#install-clusterctl){target=_blank} +- (Optional) [Enabling Feature Gates](https://cluster-api.sigs.k8s.io/user/quick-start.html#enabling-feature-gates){target=_blank} + +Make sure these prerequisites have been met before moving to the [Configure and Install Cluster API Provider Nutanix Cloud Infrastructure](#configure-and-install-cluster-api-provider-nutanix-cloud-infrastructure) step. + +### Configure and Install Cluster API Provider Nutanix Cloud Infrastructure +To initialize Cluster API Provider Nutanix Cloud Infrastructure, `clusterctl` requires the following variables, which should be set in either `~/.cluster-api/clusterctl.yaml` or as environment variables. +``` +NUTANIX_ENDPOINT: "" # IP or FQDN of Prism Central +NUTANIX_USER: "" # Prism Central user +NUTANIX_PASSWORD: "" # Prism Central password +NUTANIX_INSECURE: false # or true + +KUBERNETES_VERSION: "v1.22.9" +WORKER_MACHINE_COUNT: 3 +NUTANIX_SSH_AUTHORIZED_KEY: "" + +NUTANIX_PRISM_ELEMENT_CLUSTER_NAME: "" +NUTANIX_MACHINE_TEMPLATE_IMAGE_NAME: "" +NUTANIX_SUBNET_NAME: "" + +EXP_CLUSTER_RESOURCE_SET: true # Required for Nutanix CCM installation +``` + +You can also see the required list of variables by running the following: +``` +clusterctl generate cluster mycluster -i nutanix --list-variables +Required Variables: + - CONTROL_PLANE_ENDPOINT_IP + - KUBERNETES_VERSION + - NUTANIX_ENDPOINT + - NUTANIX_MACHINE_TEMPLATE_IMAGE_NAME + - NUTANIX_PASSWORD + - NUTANIX_PRISM_ELEMENT_CLUSTER_NAME + - NUTANIX_SSH_AUTHORIZED_KEY + - NUTANIX_SUBNET_NAME + - NUTANIX_USER + +Optional Variables: + - CONTROL_PLANE_ENDPOINT_PORT (defaults to "6443") + - CONTROL_PLANE_MACHINE_COUNT (defaults to 1) + - KUBEVIP_LB_ENABLE (defaults to "false") + - KUBEVIP_SVC_ENABLE (defaults to "false") + - NAMESPACE (defaults to current Namespace in the KubeConfig file) + - NUTANIX_INSECURE (defaults to "false") + - NUTANIX_MACHINE_BOOT_TYPE (defaults to "legacy") + - NUTANIX_MACHINE_MEMORY_SIZE (defaults to "4Gi") + - NUTANIX_MACHINE_VCPU_PER_SOCKET (defaults to "1") + - NUTANIX_MACHINE_VCPU_SOCKET (defaults to "2") + - NUTANIX_PORT (defaults to "9440") + - NUTANIX_SYSTEMDISK_SIZE (defaults to "40Gi") + - WORKER_MACHINE_COUNT (defaults to 0) +``` + +!!! note + To prevent duplicate IP assignments, it is required to assign an IP-address to the `CONTROL_PLANE_ENDPOINT_IP` variable that is not part of the Nutanix IPAM or DHCP range assigned to the subnet of the CAPX cluster. + +!!! warning + Make sure [Cluster Resource Set (CRS)](https://cluster-api.sigs.k8s.io/tasks/experimental-features/cluster-resource-set){target=_blank} is enabled before running `clusterctl init` + +Now you can instantiate Cluster API with the following: +``` +clusterctl init -i nutanix +``` + +### Deploy a workload cluster on Nutanix Cloud Infrastructure +``` +export TEST_CLUSTER_NAME=mytestcluster1 +export TEST_NAMESPACE=mytestnamespace +CONTROL_PLANE_ENDPOINT_IP=x.x.x.x clusterctl generate cluster ${TEST_CLUSTER_NAME} \ + -i nutanix \ + --target-namespace ${TEST_NAMESPACE} \ + --kubernetes-version v1.22.9 \ + --control-plane-machine-count 1 \ + --worker-machine-count 3 > ./cluster.yaml +kubectl create ns ${TEST_NAMESPACE} +kubectl apply -f ./cluster.yaml -n ${TEST_NAMESPACE} +``` +To customize the configuration of the default `cluster.yaml` file generated by CAPX, visit the [NutanixCluster](./types/nutanix_cluster.md) and [NutanixMachineTemplate](./types/nutanix_machine_template.md) documentation. + +### Access a workload cluster +To access resources on the cluster, you can get the kubeconfig with the following: +``` +clusterctl get kubeconfig ${TEST_CLUSTER_NAME} -n ${TEST_NAMESPACE} > ${TEST_CLUSTER_NAME}.kubeconfig +kubectl --kubeconfig ./${TEST_CLUSTER_NAME}.kubeconfig get nodes +``` + +### Install CNI on a workload cluster + +You must deploy a Container Network Interface (CNI) based pod network add-on so that your pods can communicate with each other. Cluster DNS (CoreDNS) will not start up before a network is installed. + +!!! note + Take care that your pod network must not overlap with any of the host networks. You are likely to see problems if there is any overlap. If you find a collision between your network plugin's preferred pod network and some of your host networks, you must choose a suitable alternative CIDR block to use instead. It can be configured inside the `cluster.yaml` generated by `clusterctl generate cluster` before applying it. + +Several external projects provide Kubernetes pod networks using CNI, some of which also support [Network Policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/){target=_blank}. + +See a list of add-ons that implement the [Kubernetes networking model](https://kubernetes.io/docs/concepts/cluster-administration/networking/#how-to-implement-the-kubernetes-network-model){target=_blank}. At time of writing, the most common are [Calico](https://www.tigera.io/project-calico/){target=_blank} and [Cilium](https://cilium.io){target=_blank}. + +Follow the specific install guide for your selected CNI and install only one pod network per cluster. + +Once a pod network has been installed, you can confirm that it is working by checking that the CoreDNS pod is running in the output of `kubectl get pods --all-namespaces`. + +### Add Failure Domain to Cluster + +To update your cluster to use new or modified failure domains after initial deployment, follow these steps: + +1. Create NutanixFailureDomain resource + + For example, define a failure domain in example.yaml: +``` +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: NutanixFailureDomain +metadata: + name: fd-domain-1 +spec: + prismElementCluster: + type: name + name: "PrismClusterA" + subnets: + - type: name + name: "SubnetA" + - type: name + name: "SubnetB" +``` + +2. Apply the resource + +``` +kubectl apply -f example.yaml +``` + +3. Edit the NutanixCluster resource to reference the failure domain(s) + +``` +kubectl edit nutanixcluster -n +``` + + In the spec section, add the controlPlaneFailureDomains field: + +``` +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: NutanixCluster +metadata: +spec: + controlPlaneFailureDomains: # add controlPlaneFailureDomains + - name: "fd-domain-1" # failureDomain name + - name: "fd-domain-2" # failureDomain name + controlPlaneEndpoint: + prismCentral: +``` + +4. Verify the update + + Check that the failure domains are registered with the cluster: + +``` +kubectl get cluster -n -o yaml +``` + + Look for the failureDomains in status section: + +``` +failureDomains: + fd-domain-1: + controlPlane: true + fd-domain-2: + controlPlane: true +``` + +### Add Failure Domain to MachineDeployment + +To associate a MachineDeployment with a specific failure domain: + +1. Export the MachineDeployment definition + +``` +kubectl get machinedeployments -n -o yaml > machinedeployment.yaml +``` + +2. Edit the manifest to add the failure domain + + Under spec.template.spec, add a failureDomain field: + +``` +apiVersion: cluster.x-k8s.io/v1beta1 +kind: MachineDeployment +metadata: + name: your-machinedeployment + namespace: your-namespace +spec: + replicas: 3 + selector: + matchLabels: + cluster.x-k8s.io/deployment-name: your-machinedeployment + template: + metadata: + labels: + cluster.x-k8s.io/deployment-name: your-machinedeployment + spec: + failureDomain: "fd-domain-1" + # other fields like bootstrap, infrastructureRef ... +``` + +3. Apply the changes + +``` +kubectl apply -f machinedeployment.yaml +``` + +4. Verify the Update + + Confirm that the failure domain field was updated: + +``` +kubectl get machinedeployments -n -o yaml | grep failureDomain +``` + +5. Check placement of machines + + Ensure new machines are placed in the specified failure domain: + +``` +kubectl get machines -l cluster.x-k8s.io/deployment-name= -n -o yaml +``` + +### Kube-vip settings + +Kube-vip is a true load balancing solution for the Kubernetes control plane. It distributes API requests across control plane nodes. It also has the capability to provide load balancing for Kubernetes services. + +You can tweak kube-vip settings by using the following properties: + +- `KUBEVIP_LB_ENABLE` + +This setting allows control plane load balancing using IPVS. See +[Control Plane Load-Balancing documentation](https://kube-vip.io/docs/about/architecture/#control-plane-load-balancing){target=_blank} for further information. + +- `KUBEVIP_SVC_ENABLE` + +This setting enables a service of type LoadBalancer. See +[Kubernetes Service Load Balancing documentation](https://kube-vip.io/docs/about/architecture/#kubernetes-service-load-balancing){target=_blank} for further information. + +- `KUBEVIP_SVC_ELECTION` + +This setting enables Load Balancing of Load Balancers. See [Load Balancing Load Balancers](https://kube-vip.io/docs/usage/kubernetes-services/#load-balancing-load-balancers-when-using-arp-mode-yes-you-read-that-correctly-kube-vip-v050){target=_blank} for further information. + +### Delete a workload cluster +To remove a workload cluster from your management cluster, remove the cluster object and the provider will clean-up all resources. + +``` +kubectl delete cluster ${TEST_CLUSTER_NAME} -n ${TEST_NAMESPACE} +``` +!!! note + Deleting the entire cluster template with `kubectl delete -f ./cluster.yaml` may lead to pending resources requiring manual cleanup. diff --git a/docs/capx/v1.8.x/pc_certificates.md b/docs/capx/v1.8.x/pc_certificates.md new file mode 100644 index 00000000..f3fe1699 --- /dev/null +++ b/docs/capx/v1.8.x/pc_certificates.md @@ -0,0 +1,149 @@ +# Certificate Trust + +CAPX invokes Prism Central APIs using the HTTPS protocol. CAPX has different methods to handle the trust of the Prism Central certificates: + +- Enable certificate verification (default) +- Configure an additional trust bundle +- Disable certificate verification + +See the respective sections below for more information. + +!!! note + For more information about replacing Prism Central certificates, see the [Nutanix AOS Security Guide](https://portal.nutanix.com/page/documents/details?targetId=Nutanix-Security-Guide-v6_5:mul-security-ssl-certificate-pc-t.html){target=_blank}. + +## Enable certificate verification (default) +By default CAPX will perform certificate verification when invoking Prism Central API calls. This requires Prism Central to be configured with a publicly trusted certificate authority. +No additional configuration is required in CAPX. + +## Configure an additional trust bundle +CAPX allows users to configure an additional trust bundle. This will allow CAPX to verify certificates that are not issued by a publicy trusted certificate authority. + +To configure an additional trust bundle, the `NUTANIX_ADDITIONAL_TRUST_BUNDLE` environment variable needs to be set. The value of the `NUTANIX_ADDITIONAL_TRUST_BUNDLE` environment variable contains the trust bundle (PEM format) in base64 encoded format. See the [Configuring the trust bundle environment variable](#configuring-the-trust-bundle-environment-variable) section for more information. + +It is also possible to configure the additional trust bundle manually by creating a custom `cluster-template`. See the [Configuring the additional trust bundle manually](#configuring-the-additional-trust-bundle-manually) section for more information + +The `NUTANIX_ADDITIONAL_TRUST_BUNDLE` environment variable can be set when initializing the CAPX provider or when creating a workload cluster. If the `NUTANIX_ADDITIONAL_TRUST_BUNDLE` is configured when the CAPX provider is initialized, the additional trust bundle will be used for every CAPX workload cluster. If it is only configured when creating a workload cluster, it will only be applicable for that specific workload cluster. + + +### Configuring the trust bundle environment variable + +Create a PEM encoded file containing the root certificate and all intermediate certificates. Example: +``` +$ cat cert.crt +-----BEGIN CERTIFICATE----- + +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- + +-----END CERTIFICATE----- +``` + +Use a `base64` tool to encode these contents in base64. The command below will provide a `base64` string. +``` +$ cat cert.crt | base64 + +``` +!!! note + Make sure the `base64` string does not contain any newlines (`\n`). If the output string contains newlines, remove them manually or check the manual of the `base64` tool on how to generate a `base64` string without newlines. + +Use the `base64` string as value for the `NUTANIX_ADDITIONAL_TRUST_BUNDLE` environment variable. +``` +$ export NUTANIX_ADDITIONAL_TRUST_BUNDLE="" +``` + +### Configuring the additional trust bundle manually + +To configure the additional trust bundle manually without using the `NUTANIX_ADDITIONAL_TRUST_BUNDLE` environment variable present in the default `cluster-template` files, it is required to: + +- Create a `ConfigMap` containing the additional trust bundle. +- Configure the `prismCentral.additionalTrustBundle` object in the `NutanixCluster` spec. + +#### Creating the additional trust bundle ConfigMap + +CAPX supports two different formats for the ConfigMap containing the additional trust bundle. The first one is to add the additional trust bundle as a multi-line string in the `ConfigMap`, the second option is to add the trust bundle in `base64` encoded format. See the examples below. + +Multi-line string example: +```YAML +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: user-ca-bundle + namespace: ${NAMESPACE} +data: + ca.crt: | + -----BEGIN CERTIFICATE----- + + -----END CERTIFICATE----- + -----BEGIN CERTIFICATE----- + + -----END CERTIFICATE----- +``` + +`base64` example: + +```YAML +apiVersion: v1 +kind: ConfigMap +metadata: + name: user-ca-bundle + namespace: ${NAMESPACE} +binaryData: + ca.crt: +``` + +!!! note + The `base64` string needs to be added as `binaryData`. + + +#### Configuring the NutanixCluster spec + +When the additional trust bundle `ConfigMap` is created, it needs to be referenced in the `NutanixCluster` spec. Add the `prismCentral.additionalTrustBundle` object in the `NutanixCluster` spec as shown below. Make sure the correct additional trust bundle `ConfigMap` is referenced. + +```YAML +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: NutanixCluster +metadata: + name: ${CLUSTER_NAME} + namespace: ${NAMESPACE} +spec: + ... + prismCentral: + ... + additionalTrustBundle: + kind: ConfigMap + name: user-ca-bundle + insecure: false +``` + +!!! note + the default value of `prismCentral.insecure` attribute is `false`. It can be omitted when an additional trust bundle is configured. + + If `prismCentral.insecure` attribute is set to `true`, all certificate verification will be disabled. + + +## Disable certificate verification + +!!! note + Disabling certificate verification is not recommended for production purposes and should only be used for testing. + + +Certificate verification can be disabled by setting the `prismCentral.insecure` attribute to `true` in the `NutanixCluster` spec. Certificate verification will be disabled even if an additional trust bundle is configured. + +Disabled certificate verification example: + +```YAML +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: NutanixCluster +metadata: + name: ${CLUSTER_NAME} + namespace: ${NAMESPACE} +spec: + controlPlaneEndpoint: + host: ${CONTROL_PLANE_ENDPOINT_IP} + port: ${CONTROL_PLANE_ENDPOINT_PORT=6443} + prismCentral: + ... + insecure: true + ... +``` \ No newline at end of file diff --git a/docs/capx/v1.8.x/port_requirements.md b/docs/capx/v1.8.x/port_requirements.md new file mode 100644 index 00000000..af182abb --- /dev/null +++ b/docs/capx/v1.8.x/port_requirements.md @@ -0,0 +1,19 @@ +# Port Requirements + +CAPX uses the ports documented below to create workload clusters. + +!!! note + This page only documents the ports specifically required by CAPX and does not provide the full overview of all ports required in the CAPI framework. + +## Management cluster + +| Source | Destination | Protocol | Port | Description | +|--------------------|---------------------|----------|------|--------------------------------------------------------------------------------------------------| +| Management cluster | External Registries | TCP | 443 | Pull container images from [CAPX public registries](#public-registries-utilized-when-using-capx) | +| Management cluster | Prism Central | TCP | 9440 | Management cluster communication to Prism Central | + +## Public registries utilized when using CAPX + +| Registry name | +|---------------| +| ghcr.io | diff --git a/docs/capx/v1.8.x/tasks/capx_v18x_upgrade_procedure.md b/docs/capx/v1.8.x/tasks/capx_v18x_upgrade_procedure.md new file mode 100644 index 00000000..0f0e6154 --- /dev/null +++ b/docs/capx/v1.8.x/tasks/capx_v18x_upgrade_procedure.md @@ -0,0 +1,83 @@ +# CAPX v1.8.x Upgrade Procedure + +Starting from CAPX v1.3.0, it is required for all CAPX-managed Kubernetes clusters to use the Nutanix Cloud Controller Manager (CCM). + +Before upgrading CAPX instances to v1.3.0 or later, it is required to follow the [steps](#steps) detailed below for each of the CAPX-managed Kubernetes clusters that don't use Nutanix CCM. + + +## Steps + +This procedure uses [Cluster Resource Set (CRS)](https://cluster-api.sigs.k8s.io/tasks/experimental-features/cluster-resource-set){target=_blank} to install Nutanix CCM but it can also be installed using the [Nutanix CCM Helm chart](https://artifacthub.io/packages/helm/nutanix/nutanix-cloud-provider){target=_blank}. + +!!! warning + Make sure [CRS](https://cluster-api.sigs.k8s.io/tasks/experimental-features/cluster-resource-set){target=_blank} is enabled on the management cluster before following the procedure. + +Perform following steps for each of the CAPX-managed Kubernetes clusters that are not configured to use Nutanix CCM: + +1. Add the `cloud-provider: external` configuration in the `KubeadmConfigTemplate` resources: + ```YAML + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + spec: + template: + spec: + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: + cloud-provider: external + ``` +2. Add the `cloud-provider: external` configuration in the `KubeadmControlPlane` resource: +```YAML +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +kind: KubeadmConfigTemplate +spec: + template: + spec: + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: + cloud-provider: external +--- +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +kind: KubeadmControlPlane +spec: + kubeadmConfigSpec: + clusterConfiguration: + apiServer: + extraArgs: + cloud-provider: external + controllerManager: + extraArgs: + cloud-provider: external + initConfiguration: + nodeRegistration: + kubeletExtraArgs: + cloud-provider: external + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: + cloud-provider: external +``` +3. Add the Nutanix CCM CRS resources: + + - [nutanix-ccm-crs.yaml](https://github.com/nutanix-cloud-native/cluster-api-provider-nutanix/blob/v1.8.0/templates/ccm/nutanix-ccm-crs.yaml){target=_blank} + - [nutanix-ccm-secret.yaml](https://github.com/nutanix-cloud-native/cluster-api-provider-nutanix/blob/v1.8.0/templates/ccm/nutanix-ccm-secret.yaml) + - [nutanix-ccm.yaml](https://github.com/nutanix-cloud-native/cluster-api-provider-nutanix/blob/v1.8.0/templates/ccm/nutanix-ccm.yaml) + + Make sure to update each of the variables before applying the `YAML` files. + +4. Add the `ccm: nutanix` label to the `Cluster` resource: + ```YAML + apiVersion: cluster.x-k8s.io/v1beta1 + kind: Cluster + metadata: + labels: + ccm: nutanix + ``` +5. Verify if the Nutanix CCM pod is up and running: +``` +kubectl get pod -A -l k8s-app=nutanix-cloud-controller-manager +``` +6. Trigger a new rollout of the Kubernetes nodes by performing a Kubernetes upgrade or by using `clusterctl alpha rollout restart`. See the [clusterctl alpha rollout](https://cluster-api.sigs.k8s.io/clusterctl/commands/alpha-rollout#restart){target=_blank} for more information. +7. Upgrade CAPX to v1.8.0 by following the [clusterctl upgrade](https://cluster-api.sigs.k8s.io/clusterctl/commands/upgrade.html?highlight=clusterctl%20upgrade%20pla#clusterctl-upgrade){target=_blank} documentation \ No newline at end of file diff --git a/docs/capx/v1.8.x/tasks/modify_machine_configuration.md b/docs/capx/v1.8.x/tasks/modify_machine_configuration.md new file mode 100644 index 00000000..04a43a95 --- /dev/null +++ b/docs/capx/v1.8.x/tasks/modify_machine_configuration.md @@ -0,0 +1,11 @@ +# Modifying Machine Configurations + +Since all attributes of the `NutanixMachineTemplate` resources are immutable, follow the [Updating Infrastructure Machine Templates](https://cluster-api.sigs.k8s.io/tasks/updating-machine-templates.html?highlight=machine%20template#updating-infrastructure-machine-templates){target=_blank} procedure to modify the configuration of machines in an existing CAPX cluster. +See the [NutanixMachineTemplate](../types/nutanix_machine_template.md) documentation for all supported configuration parameters. + +!!! note + Manually modifying existing and linked `NutanixMachineTemplate` resources will not trigger a rolling update of the machines. + +!!! note + Do not modify the virtual machine configuration of CAPX cluster nodes manually in Prism/Prism Central. + CAPX will not automatically revert the configuration change but performing scale-up/scale-down/upgrade operations will override manual modifications. Only use the `Updating Infrastructure Machine` procedure referenced above to perform configuration changes. \ No newline at end of file diff --git a/docs/capx/v1.8.x/topology/capx_multi_pe.md b/docs/capx/v1.8.x/topology/capx_multi_pe.md new file mode 100644 index 00000000..bd52ccd7 --- /dev/null +++ b/docs/capx/v1.8.x/topology/capx_multi_pe.md @@ -0,0 +1,30 @@ +# Creating a workload CAPX cluster spanning Prism Element clusters + +!!! warning + The scenario and features described on this page are experimental. It's important to note that they have not been fully validated. + +This page will explain how to deploy CAPX-based Kubernetes clusters where worker nodes are spanning multiple Prism Element (PE) clusters. + +!!! note + All the PE clusters must be managed by the same Prism Central (PC) instance. + +The topology will look like this: + +- One PC managing multiple PE's +- One CAPI management cluster +- One CAPI workload cluster with multiple `MachineDeployment`resources + +Refer to the [CAPI quickstart](https://cluster-api.sigs.k8s.io/user/quick-start.html){target=_blank} to get started with CAPX. + +To create workload clusters spanning multiple Prism Element clusters, it is required to create a `MachineDeployment` and `NutanixMachineTemplate` resource for each Prism Element cluster. The Prism Element specific parameters (name/UUID, subnet,...) are referenced in the `NutanixMachineTemplate`. + +## Steps +1. Create a management cluster that has the CAPX infrastructure provider deployed. +2. Create a `cluster.yml` file containing the workload cluster definition. Refer to the steps defined in the [CAPI quickstart guide](https://cluster-api.sigs.k8s.io/user/quick-start.html){target=_blank} to create an example `cluster.yml` file. +3. Add additional `MachineDeployment` and `NutanixMachineTemplate` resources. + + By default there is only one machine template and machine deployment defined. To add nodes residing on another Prism Element cluster, a new `MachineDeployment` and `NutanixMachineTemplate` resource needs to be added to the yaml file. The autogenerated `MachineDeployment` and `NutanixMachineTemplate` resource definitions can be used as a baseline. + + Make sure to modify the `MachineDeployment` and `NutanixMachineTemplate` parameters. + +4. Apply the modified `cluster.yml` file to the management cluster. diff --git a/docs/capx/v1.8.x/troubleshooting.md b/docs/capx/v1.8.x/troubleshooting.md new file mode 100644 index 00000000..c023d13e --- /dev/null +++ b/docs/capx/v1.8.x/troubleshooting.md @@ -0,0 +1,13 @@ +# Troubleshooting + +## Clusterctl failed with GitHub rate limit error + +By design Clusterctl fetches artifacts from repositories hosted on GitHub, this operation is subject to [GitHub API rate limits](https://docs.github.com/en/rest/overview/resources-in-the-rest-api#rate-limiting){target=_blank}. + +While this is generally okay for the majority of users, there is still a chance that some users (especially developers or CI tools) hit this limit: + +``` +Error: failed to get repository client for the XXX with name YYY: error creating the GitHub repository client: failed to get GitHub latest version: failed to get the list of versions: rate limit for github api has been reached. Please wait one hour or get a personal API tokens a assign it to the GITHUB_TOKEN environment variable +``` + +As explained in the error message, you can increase your API rate limit by [creating a GitHub personal token](https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/creating-a-personal-access-token){target=_blank} and setting a `GITHUB_TOKEN` environment variable using the token. diff --git a/docs/capx/v1.8.x/types/nutanix_cluster.md b/docs/capx/v1.8.x/types/nutanix_cluster.md new file mode 100644 index 00000000..daa8d8cc --- /dev/null +++ b/docs/capx/v1.8.x/types/nutanix_cluster.md @@ -0,0 +1,55 @@ +# NutanixCluster + +The `NutanixCluster` resource defines the configuration of a CAPX Kubernetes cluster. + +Example of a `NutanixCluster` resource: + +```YAML +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: NutanixCluster +metadata: + name: ${CLUSTER_NAME} + namespace: ${NAMESPACE} +spec: + controlPlaneEndpoint: + host: ${CONTROL_PLANE_ENDPOINT_IP} + port: ${CONTROL_PLANE_ENDPOINT_PORT=6443} + prismCentral: + address: ${NUTANIX_ENDPOINT} + additionalTrustBundle: + kind: ConfigMap + name: user-ca-bundle + credentialRef: + kind: Secret + name: ${CLUSTER_NAME} + insecure: ${NUTANIX_INSECURE=false} + port: ${NUTANIX_PORT=9440} +``` + +## NutanixCluster spec +The table below provides an overview of the supported parameters of the `spec` attribute of a `NutanixCluster` resource. + +### Configuration parameters + +| Key |Type |Description | +|--------------------------------------------|------|----------------------------------------------------------------------------------| +|controlPlaneEndpoint |object|Defines the host IP and port of the CAPX Kubernetes cluster. | +|controlPlaneEndpoint.host |string|Host IP to be assigned to the CAPX Kubernetes cluster. | +|controlPlaneEndpoint.port |int |Port of the CAPX Kubernetes cluster. Default: `6443` | +|prismCentral |object|(Optional) Prism Central endpoint definition. | +|prismCentral.address |string|IP/FQDN of Prism Central. | +|prismCentral.port |int |Port of Prism Central. Default: `9440` | +|prismCentral.insecure |bool |Disable Prism Central certificate checking. Default: `false` | +|prismCentral.credentialRef |object|Reference to credentials used for Prism Central connection. | +|prismCentral.credentialRef.kind |string|Kind of the credentialRef. Allowed value: `Secret` | +|prismCentral.credentialRef.name |string|Name of the secret containing the Prism Central credentials. | +|prismCentral.credentialRef.namespace |string|(Optional) Namespace of the secret containing the Prism Central credentials. | +|prismCentral.additionalTrustBundle |object|Reference to the certificate trust bundle used for Prism Central connection. | +|prismCentral.additionalTrustBundle.kind |string|Kind of the additionalTrustBundle. Allowed value: `ConfigMap` | +|prismCentral.additionalTrustBundle.name |string|Name of the `ConfigMap` containing the Prism Central trust bundle. | +|prismCentral.additionalTrustBundle.namespace|string|(Optional) Namespace of the `ConfigMap` containing the Prism Central trust bundle.| +|controlPlaneFailureDomains |list |(optional) List of local references to failure domains for control plane nodes. | +|controlPlaneFailureDomains.Name |string|Name of the failure domain used for control plane nodes. | + +!!! note + To prevent duplicate IP assignments, it is required to assign an IP-address to the `controlPlaneEndpoint.host` variable that is not part of the Nutanix IPAM or DHCP range assigned to the subnet of the CAPX cluster. \ No newline at end of file diff --git a/docs/capx/v1.8.x/types/nutanix_failure_domains.md b/docs/capx/v1.8.x/types/nutanix_failure_domains.md new file mode 100644 index 00000000..cefae92c --- /dev/null +++ b/docs/capx/v1.8.x/types/nutanix_failure_domains.md @@ -0,0 +1,99 @@ +# NutanixFailureDomain + +The `NutanixFailureDomain` resource configuration of a CAPX Kubernetes Failure Domain. + +Example of a `NutanixFailureDomain` resource: +```YAML +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: NutanixFailureDomain +metadata: + name: "${FAILURE_DOMAIN_NAME}" + namespace: "${CLUSTER_NAMESPACE}" +spec: + prismElementCluster: + type: name + uuid: "${NUTANIX_PRISM_ELEMENT_CLUSTER_NAME}" + subnets: + - type: uuid + uuid: "${NUTANIX_SUBNET_UUID}" + - type: name + name: "${NUTANIX_SUBNET_NAME}" +``` + +## NutanixFailureDomain spec +The table below provides an overview of the supported parameters of the `spec` attribute of a `NutanixFailureDomain` resource. + +### Configuration parameters +| Key |Type |Description | +|--------------------------------------------|------|--------------------------------------------------------------------------------------------| +|prismElementCluster |object|Defines the identify the Prism Element cluster in the Prism Central for the failure domain. | +|prismElementCluster.type |string|Type to identify the Prism Element cluster. Allowed values: `name` and `uuid` | +|prismElementCluster.name |string|Name of the Prism Element cluster. | +|prismElementCluster.uuid |string|UUID of the Prism Element cluster. | +|subnets |list |Reference (name or uuid) to the subnets to be assigned to the VMs. | +|subnets.[].type |string|Type to identify the subnet. Allowed values: `name` and `uuid` | +|subnets.[].name |string|Name of the subnet. | +|subnets.[].uuid |string|UUID of the subnet. | + +!!! note + The `NutanixFailureDomain` resource allows you to define logical groupings of Nutanix infrastructure for high availability and workload placement in Kubernetes clusters managed by CAPX. Each failure domain maps to a Prism Element cluster and a set of subnets, ensuring that workloads can be distributed across different infrastructure segments. + +## Usage Notes + +- The `prismElementCluster` field is **required** and must specify either the `name` or `uuid` of the Prism Element cluster. +- The `subnets` field is **required**. You can provide one or more subnets by `name` or `uuid`. +- Failure domains are used by Cluster API to spread machines across different infrastructure segments for resilience. + +## Example Scenarios + +### Single Subnet by UUID + +```yaml +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: NutanixFailureDomain +metadata: + name: fd-uuid +spec: + prismElementCluster: + type: uuid + uuid: "00000000-0000-0000-0000-000000000000" + subnets: + - type: uuid + uuid: "11111111-1111-1111-1111-111111111111" +``` + +### Multiple Subnets by Name + +```yaml +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: NutanixFailureDomain +metadata: + name: fd-names +spec: + prismElementCluster: + type: name + name: "PrismClusterA" + subnets: + - type: name + name: "SubnetA" + - type: name + name: "SubnetB" +``` + +### Multiple Subnets by Name and UUID + +```yaml +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: NutanixFailureDomain +metadata: + name: fd-names +spec: + prismElementCluster: + type: name + name: "PrismClusterA" + subnets: + - type: name + name: "SubnetA" + - type: uuid + name: "11111111-1111-1111-1111-111111111111" +``` \ No newline at end of file diff --git a/docs/capx/v1.8.x/types/nutanix_machine_template.md b/docs/capx/v1.8.x/types/nutanix_machine_template.md new file mode 100644 index 00000000..4aa613b8 --- /dev/null +++ b/docs/capx/v1.8.x/types/nutanix_machine_template.md @@ -0,0 +1,124 @@ +# NutanixMachineTemplate +The `NutanixMachineTemplate` resource defines the configuration of a CAPX Kubernetes VM. + +Example of a `NutanixMachineTemplate` resource. + +```YAML +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: NutanixMachineTemplate +metadata: + name: "${CLUSTER_NAME}-mt-0" + namespace: "${NAMESPACE}" +spec: + template: + spec: + providerID: "nutanix://${CLUSTER_NAME}-m1" + # Supported options for boot type: legacy and uefi + # Defaults to legacy if not set + bootType: ${NUTANIX_MACHINE_BOOT_TYPE=legacy} + vcpusPerSocket: ${NUTANIX_MACHINE_VCPU_PER_SOCKET=1} + vcpuSockets: ${NUTANIX_MACHINE_VCPU_SOCKET=2} + memorySize: "${NUTANIX_MACHINE_MEMORY_SIZE=4Gi}" + systemDiskSize: "${NUTANIX_SYSTEMDISK_SIZE=40Gi}" + image: + type: name + name: "${NUTANIX_MACHINE_TEMPLATE_IMAGE_NAME}" + cluster: + type: name + name: "${NUTANIX_PRISM_ELEMENT_CLUSTER_NAME}" + subnet: + - type: name + name: "${NUTANIX_SUBNET_NAME}" + # Adds additional categories to the virtual machines. + # Note: Categories must already be present in Prism Central + # additionalCategories: + # - key: AppType + # value: Kubernetes + # Adds the cluster virtual machines to a project defined in Prism Central. + # Replace NUTANIX_PROJECT_NAME with the correct project defined in Prism Central + # Note: Project must already be present in Prism Central. + # project: + # type: name + # name: "NUTANIX_PROJECT_NAME" + # gpus: + # - type: name + # name: "GPU NAME" + # Note: Either of `image` or `imageLookup` must be set, but not both. + # imageLookup: + # format: "NUTANIX_IMAGE_LOOKUP_FORMAT" + # baseOS: "NUTANIX_IMAGE_LOOKUP_BASE_OS" + # dataDisks: + # - diskSize: + # deviceProperties: + # deviceType: Disk + # adapterType: SCSI + # deviceIndex: 1 + # storageConfig: + # diskMode: Standard + # storageContainer: + # type: name + # name: "NUTANIX_VM_DISK_STORAGE_CONTAINER" + # dataSource: + # type: name + # name: "NUTANIX_DATA_SOURCE_IMAGE_NAME" +``` + +## NutanixMachineTemplate spec +The table below provides an overview of the supported parameters of the `spec` attribute of a `NutanixMachineTemplate` resource. + +### Configuration parameters +| Key |Type |Description | +|----------------------------------------------------|------|--------------------------------------------------------------------------------------------------------| +|bootType |string|Boot type of the VM. Depends on the OS image used. Allowed values: `legacy`, `uefi`. Default: `legacy` | +|vcpusPerSocket |int |Amount of vCPUs per socket. Default: `1` | +|vcpuSockets |int |Amount of vCPU sockets. Default: `2` | +|memorySize |string|Amount of Memory. Default: `4Gi` | +|systemDiskSize |string|Amount of storage assigned to the system disk. Default: `40Gi` | +|image |object|Reference (name or uuid) to the OS image used for the system disk. | +|image.type |string|Type to identify the OS image. Allowed values: `name` and `uuid` | +|image.name |string|Name of the image. | +|image.uuid |string|UUID of the image. | +|cluster |object|(Optional) Reference (name or uuid) to the Prism Element cluster. Name or UUID can be passed | +|cluster.type |string|Type to identify the Prism Element cluster. Allowed values: `name` and `uuid` | +|cluster.name |string|Name of the Prism Element cluster. | +|cluster.uuid |string|UUID of the Prism Element cluster. | +|subnets |list |(Optional) Reference (name or uuid) to the subnets to be assigned to the VMs. | +|subnets.[].type |string|Type to identify the subnet. Allowed values: `name` and `uuid` | +|subnets.[].name |string|Name of the subnet. | +|subnets.[].uuid |string|UUID of the subnet. | +|additionalCategories |list |Reference to the categories to be assigned to the VMs. These categories already exist in Prism Central. | +|additionalCategories.[].key |string|Key of the category. | +|additionalCategories.[].value |string|Value of the category. | +|project |object|Reference (name or uuid) to the project. This project must already exist in Prism Central. | +|project.type |string|Type to identify the project. Allowed values: `name` and `uuid` | +|project.name |string|Name of the project. | +|project.uuid |string|UUID of the project. | +|gpus |object|Reference (name or deviceID) to the GPUs to be assigned to the VMs. Can be vGPU or Passthrough. | +|gpus.[].type |string|Type to identify the GPU. Allowed values: `name` and `deviceID` | +|gpus.[].name |string|Name of the GPU or the vGPU profile | +|gpus.[].deviceID |string|DeviceID of the GPU or the vGPU profile | +|imageLookup |object|(Optional) Reference to a container that holds how to look up rhcos images for the cluster. | +|imageLookup.format |string|Naming format to look up the image for the machine. Default: `capx-{{.BaseOS}}-{{.K8sVersion}}-*` | +|imageLookup.baseOS |string|Name of the base operating system to use for image lookup. | +|dataDisks |list |(Optional) Reference to the data disks to be attached to the VM. | +|dataDisks.[].diskSize |string|Size (in Quantity format) of the disk attached to the VM. The minimum diskSize is `1GB`. | +|dataDisks.[].deviceProperties |object|(Optional) Reference to the properties of the disk device. | +|dataDisks.[].deviceProperties.deviceType |string|VM disk device type. Allowed values: `Disk` (default) and `CDRom` | +|dataDisks.[].deviceProperties.adapterType |string|Adapter type of the disk address. | +|dataDisks.[].deviceProperties.deviceIndex |int |(Optional) Index of the disk address. Allowed values: non-negative integers (default: `0`) | +|dataDisks.[].storageConfig |object|(Optional) Reference to the storage configuration parameters of the VM disks. | +|dataDisks.[].storageConfig.diskMode |string|Specifies the disk mode. Allowed values: `Standard` (default) and `Flash` | +|dataDisks.[].storageConfig.storageContainer |object|(Optional) Reference (name or uuid) to the storage_container used by the VM disk. | +|dataDisks.[].storageConfig.storageContainer.type |string|Type to identify the storage container. Allowed values: `name` and `uuid` | +|dataDisks.[].storageConfig.storageContainer.name |string|Name of the storage container. | +|dataDisks.[].storageConfig.storageContainer.uuid |string|UUID of the storage container. | +|dataDisks.[].dataSource |object|(Optional) Reference (name or uuid) to a data source image for the VM disk. | +|dataDisks.[].dataSource.type |string|Type to identify the data source image. Allowed values: `name` and `uuid` | +|dataDisks.[].dataSource.name |string|Name of the data source image. | +|dataDisks.[].dataSource.uuid |string|UUID of the data source image. | + +!!! note + - The `cluster` or `subnets` configuration parameters are optional in case failure domains are defined on the `NutanixCluster` and `MachineDeployment` resources. + - If the `deviceType` is `Disk`, the valid `adapterType` can be `SCSI`, `IDE`, `PCI`, `SATA` or `SPAPR`. If the `deviceType` is `CDRom`, the valid `adapterType` can be `IDE` or `SATA`. + - Either of `image` or `imageLookup` must be set, but not both. + - For a Machine VM, the `deviceIndex` for the disks with the same `deviceType.adapterType` combination should start from `0` and increase consecutively afterwards. Note that for each Machine VM, the `Disk.SCSI.0` and `CDRom.IDE.0` are reserved to be used by the VM's system. So for `dataDisks` of Disk.SCSI and CDRom.IDE, the `deviceIndex` should start from `1`. \ No newline at end of file diff --git a/docs/capx/v1.8.x/user_requirements.md b/docs/capx/v1.8.x/user_requirements.md new file mode 100644 index 00000000..6ee9b802 --- /dev/null +++ b/docs/capx/v1.8.x/user_requirements.md @@ -0,0 +1,67 @@ +# User Requirements + +Cluster API Provider Nutanix Cloud Infrastructure (CAPX) interacts with Nutanix Prism Central (PC) APIs using a Prism Central user account. + +CAPX supports two types of PC users: + +- Local users: must be assigned the `Prism Central Admin` role. +- Domain users: must be assigned a role that at least has the [Minimum required CAPX permissions for domain users](#minimum-required-capx-permissions-for-domain-users) assigned. + +See [Credential Management](./credential_management.md){target=_blank} for more information on how to pass the user credentials to CAPX. + +## Minimum required CAPX permissions for domain users + +The following permissions are required for Prism Central domain users: + +- Create Category +- View Cluster Pgpu Profiles +- View Cluster Vgpu Profiles +- Create Image +- Create New Virtual Machine +- Delete Image +- Delete Category +- Delete Virtual Machine +- Detach Volume Group From AHV VM +- Power On Virtual Machine +- View Category +- View Cluster +- View Image +- View Project +- View Subnet +- View Virtual Machine + +!!! note + The list of permissions has been validated on PC 7.3 and above. + +## CAPX v1.8.x Upgrade Requirements + +When upgrading CAPX v1.7.x to v1.8.x, users must meet the following additional requirements: + +The following permissions are required for Prism Central domain users: + +- Create Category +- Create Category Mapping +- Create Image +- Create New Virtual Machine +- Create Or Update Name Category +- Create Or Update Value Category +- Create Virtual Machine +- Delete Category +- Delete Category Mapping +- Delete Image +- Delete Name Category +- Delete Value Category +- Delete Virtual Machine +- Detach Volume Group From AHV VM +- Power On Virtual Machine +- View Category +- View Category Mapping +- View Cluster +- View Cluster Pgpu Profiles +- View Cluster Vgpu Profiles +- View Image +- View Name Category +- View Project +- View Subnet +- View Value Category +- View Virtual Machine diff --git a/docs/capx/v1.8.x/validated_integrations.md b/docs/capx/v1.8.x/validated_integrations.md new file mode 100644 index 00000000..de5d4849 --- /dev/null +++ b/docs/capx/v1.8.x/validated_integrations.md @@ -0,0 +1,56 @@ +# Validated Integrations + +Validated integrations are a defined set of specifically tested configurations between technologies that represent the most common combinations that Nutanix customers are using or deploying with CAPX. For these integrations, Nutanix has directly, or through certified partners, exercised a full range of platform tests as part of the product release process. + +## Integration Validation Policy + +Nutanix follows the version validation policies below: + +- Validate at least one active AOS LTS (long term support) version. Validated AOS LTS version for a specific CAPX version is listed in the [AOS](#aos) section.
+ + !!! note + + Typically the latest LTS release at time of CAPX release except when latest is initial release in train (eg x.y.0). Exact version depends on timing and customer adoption. + +- Validate the latest AOS STS (short term support) release at time of CAPX release. +- Validate at least one active Prism Central (PC) version. Validated PC version for a specific CAPX version is listed in the [Prism Central](#prism-central) section.
+ + !!! note + + Typically the the latest PC release at time of CAPX release except when latest is initial release in train (eg x.y.0). Exact version depends on timing and customer adoption. + +- At least one active Cluster-API (CAPI) version. Validated CAPI version for a specific CAPX version is listed in the [Cluster-API](#cluster-api) section.
+ + !!! note + + Typically the the latest Cluster-API release at time of CAPX release except when latest is initial release in train (eg x.y.0). Exact version depends on timing and customer adoption. + +## Validated versions +### Cluster-API +| CAPX | CAPI v1.8.x | CAPI v1.9.x | CAPI v1.10.x | +|--------|-------------|-------------|--------------| +| v1.8.x | Yes | Yes | Yes | +| v1.7.x | Yes | Yes | Yes | +| v1.6.x | Yes | Yes | No | + +See the [Validated Kubernetes Versions](https://cluster-api.sigs.k8s.io/reference/versions.html?highlight=version#supported-kubernetes-versions){target=_blank} page for more information on CAPI validated versions. + +### AOS + +| CAPX | 6.5.x (LTS) | 6.8 (STS) | 6.10 | 7.0 | 7.3 | +|--------|-------------|-----------|------|-----|-----| +| v1.8.x | No | Yes | Yes | Yes | Yes | +| v1.7.x | No | Yes | Yes | Yes | Yes | +| v1.6.x | No | Yes | Yes | Yes | Yes | + +!!! warning "Cloud-Init Compatibility with AOS 7.3" + + When using CAPX v1.8.x with AOS 7.3, operating systems that do not use cloud-config for cloud-init may experience issues. Ensure your OS images are configured to use cloud-config format for cloud-init to avoid compatibility problems. + +### Prism Central + +| CAPX | pc.2022.6 | pc.2023.x | pc.2024.x | pc.7.3 | +|--------|-----------|-----------|-----------|--------| +| v1.8.x | No | No | No | Yes | +| v1.7.x | No | Yes | Yes | Yes | +| v1.6.x | No | Yes | Yes | Yes | diff --git a/mkdocs.yml b/mkdocs.yml index 436fd323..f7bfe7c9 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -18,7 +18,32 @@ nav: - "Cloud Native": - "Overview": "index.md" - "Cluster API Provider: Nutanix (CAPX)": - - "v1.7.x (latest)": + - "v1.8.x (latest)": + - "Getting Started": "capx/v1.8.x/getting_started.md" + - "Types": + - "NutanixCluster": "capx/v1.8.x/types/nutanix_cluster.md" + - "NutanixMachineTemplate": "capx/v1.8.x/types/nutanix_machine_template.md" + - "NutanixFailureDomain": "capx/v1.8.x/types/nutanix_failure_domains.md" + - "Certificate Trust": "capx/v1.8.x/pc_certificates.md" + - "Credential Management": "capx/v1.8.x/credential_management.md" + - "Tasks": + - "Modifying Machine Configuration": "capx/v1.8.x/tasks/modify_machine_configuration.md" + - "CAPX v1.8.x Upgrade Procedure": "capx/v1.8.x/tasks/capx_v18x_upgrade_procedure.md" + - "Port Requirements": "capx/v1.8.x/port_requirements.md" + - "User Requirements": "capx/v1.8.x/user_requirements.md" + - "Addons": + - "CSI Driver Installation": "capx/v1.8.x/addons/install_csi_driver.md" + - "Validated Integrations": "capx/v1.8.x/validated_integrations.md" + - "Topology": + - "Multi-PE CAPX cluster": "capx/v1.8.x/topology/capx_multi_pe.md" + - "Experimental": + - "Autoscaler": "capx/v1.8.x/experimental/autoscaler.md" + - "OIDC Integration": "capx/v1.8.x/experimental/oidc.md" + - "Flow VPC": "capx/v1.8.x/experimental/vpc.md" + - "Proxy Configuration": "capx/v1.8.x/experimental/proxy.md" + - "Registry Mirror Configuration": "capx/v1.8.x/experimental/registry_mirror.md" + - "Troubleshooting": "capx/v1.8.x/troubleshooting.md" + - "v1.7.x": - "Getting Started": "capx/v1.7.x/getting_started.md" - "Types": - "NutanixCluster": "capx/v1.7.x/types/nutanix_cluster.md" From a5be30d73fc088461227144b18adc3749b1d25fc Mon Sep 17 00:00:00 2001 From: Abhay Aggrawal Date: Tue, 9 Dec 2025 16:27:05 +0530 Subject: [PATCH 14/15] Update CCM opendocs for release v0.6.x (#77) * docs for ccm v0.6.x --- docs/ccm/v0.6.x/ccm_configuration.md | 66 ++++++++++++ docs/ccm/v0.6.x/ccm_credentials.md | 29 +++++ docs/ccm/v0.6.x/custom_labeling.md | 14 +++ docs/ccm/v0.6.x/overview.md | 37 +++++++ docs/ccm/v0.6.x/pc_certificates.md | 104 ++++++++++++++++++ docs/ccm/v0.6.x/requirements.md | 41 +++++++ docs/ccm/v0.6.x/topology_discovery.md | 124 ++++++++++++++++++++++ docs/ccm/v0.6.x/validated_integrations.md | 52 +++++++++ mkdocs.yml | 11 +- 9 files changed, 477 insertions(+), 1 deletion(-) create mode 100644 docs/ccm/v0.6.x/ccm_configuration.md create mode 100644 docs/ccm/v0.6.x/ccm_credentials.md create mode 100644 docs/ccm/v0.6.x/custom_labeling.md create mode 100644 docs/ccm/v0.6.x/overview.md create mode 100644 docs/ccm/v0.6.x/pc_certificates.md create mode 100644 docs/ccm/v0.6.x/requirements.md create mode 100644 docs/ccm/v0.6.x/topology_discovery.md create mode 100644 docs/ccm/v0.6.x/validated_integrations.md diff --git a/docs/ccm/v0.6.x/ccm_configuration.md b/docs/ccm/v0.6.x/ccm_configuration.md new file mode 100644 index 00000000..1df8e394 --- /dev/null +++ b/docs/ccm/v0.6.x/ccm_configuration.md @@ -0,0 +1,66 @@ +# Nutanix CCM Configuration + +Nutanix CCM can be configured via a `JSON` formated file stored in a configmap called `nutanix-config`. This configmap is located in the same namespace as the Nutanix CCM deployment. See the `manifests/cloud-provider-nutanix-deployment.yaml` file for details on the Nutanix CCM deployment. + +Example `nutanix-config` configmap: +```YAML +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: nutanix-config + namespace: kube-system +data: + nutanix_config.json: |- + { + "prismCentral": { + "address": "${NUTANIX_ENDPOINT}", + "port": ${NUTANIX_PORT}, + "insecure": ${NUTANIX_INSECURE}, + "credentialRef": { + "kind": "secret", + "name": "nutanix-creds" + }, + "additionalTrustBundle": { + "kind": "ConfigMap", + "name": "user-ca-bundle" + } + }, + "enableCustomLabeling": false, + "ignoredNodeIPs": [], + "topologyDiscovery": { + "type": "Categories", + "topologyCategories": { + "regionCategory": "${NUTANIX_REGION_CATEGORY}", + "zoneCategory": "${NUTANIX_ZONE_CATEGORY}" + } + } + } + +``` + +The table below provides an overview of the supported configuration parameters. + +### Configuration parameters + +| Key |Type |Description | +|---------------------------------------------------|------|------------------------------------------------------------------------------------------------------------------------------------------------------| +|topologyDiscovery |object|(Optional) Configures the topology discovery mode.
`Prism` topology discovery is used by default if `topologyDiscovery` attribute is not passed. | +|topologyDiscovery.type |string|Topology Discovery mode. Can be `Prism` or `Categories`. See [Topology Discovery](./topology_discovery.md) for more information. | +|topologyDiscovery.topologyCategories |object|Required if topology discovery mode is `Categories`.
| +|topologyDiscovery.topologyCategories.regionCategory|string|Category key defining the region of the Kubernetes node. | +|topologyDiscovery.topologyCategories.zoneCategory |string|Category key defining the zone of the Kubernetes node. | +|enableCustomLabeling |bool |Boolean value to enable custom labeling. See [Custom Labeling](./custom_labeling.md) for more information.
Default: `false` | +|ignoredNodeIPs |array |List of node IPs, IP ranges (e.g. "10.0.0.1-10.0.0.10"), or CIDR prefixes (e.g. "10.0.0.0/24") to ignore. Optional. | +|prismCentral |object|Prism Central endpoint configuration. | +|prismCentral.address |string|FQDN/IP of the Prism Central endpoint. | +|prismCentral.port |int |Port to connect to Prism Central.
Default: `9440` | +|prismCentral.insecure |bool |Disable Prism Central certificate checking.
Default: `false` | +|prismCentral.credentialRef |object|Prism Central credential configuration. See [Credentials](./ccm_credentials.md) for more information. | +|prismCentral.credentialRef.kind |string|Credential kind.
Allowed value: `secret` | +|prismCentral.credentialRef.name |string|Name of the secret. | +|prismCentral.credentialRef.namespace |string|(Optional) Namespace of the secret. | +|prismCentral.additionalTrustBundle |object|Reference to the certificate trust bundle used for Prism Central connection. | +|prismCentral.additionalTrustBundle.kind |string|Kind of the additionalTrustBundle. Allowed value: `ConfigMap` | +|prismCentral.additionalTrustBundle.name |string|Name of the `ConfigMap` containing the Prism Central trust bundle. | +|prismCentral.additionalTrustBundle.namespace |string|(Optional) Namespace of the `ConfigMap` containing the Prism Central trust bundle. See [Certificate Trust](./pc_certificates.md) for more information.| \ No newline at end of file diff --git a/docs/ccm/v0.6.x/ccm_credentials.md b/docs/ccm/v0.6.x/ccm_credentials.md new file mode 100644 index 00000000..7bda06e2 --- /dev/null +++ b/docs/ccm/v0.6.x/ccm_credentials.md @@ -0,0 +1,29 @@ +# Credentials + +Nutanix CCM requires credentials to connect to Prism Central. These credentials need to be stored in a secret in following format: + +```YAML +--- +apiVersion: v1 +kind: Secret +metadata: + name: nutanix-creds + namespace: kube-system +stringData: + credentials: | + [ + { + "type": "basic_auth", + "data": { + "prismCentral":{ + "username": "$NUTANIX_USERNAME", + "password": "$NUTANIX_PASSWORD" + }, + "prismElements": null + } + } + ] + +``` + +See [Requirements](./requirements.md) for more information on the required permissions. \ No newline at end of file diff --git a/docs/ccm/v0.6.x/custom_labeling.md b/docs/ccm/v0.6.x/custom_labeling.md new file mode 100644 index 00000000..4db89462 --- /dev/null +++ b/docs/ccm/v0.6.x/custom_labeling.md @@ -0,0 +1,14 @@ +# Custom Labeling + +Enabling the Nutanix CCM custom labeling feature will add additional labels to the Kubernetes nodes. See [Nutanix CCM Configuration](./ccm_configuration.md) for more information on how to configure CCM to enable custom labeling. + +The following labels will be added: + +|Label |Description | +|------------------------------|-----------------------------------------------------------------| +|nutanix.com/prism-element-uuid|UUID of the Prism Element cluster hosting the Kubernetes node VM.| +|nutanix.com/prism-element-name|Name of the Prism Element cluster hosting the Kubernetes node VM.| +|nutanix.com/prism-host-uuid |UUID of the Prism AHV host hosting the Kubernetes node VM. | +|nutanix.com/prism-host-name |Name of the Prism AHV host hosting the Kubernetes node VM. | + +Nutanix CCM will reconcile the labels periodically. \ No newline at end of file diff --git a/docs/ccm/v0.6.x/overview.md b/docs/ccm/v0.6.x/overview.md new file mode 100644 index 00000000..36d7ebce --- /dev/null +++ b/docs/ccm/v0.6.x/overview.md @@ -0,0 +1,37 @@ +# Overview + +Nutanix CCM provides Cloud Controller Manager functionality to Kubernetes clusters running on the Nutanix AHV hypervisor. Visit the [Kubernetes Cloud Controller Manager](https://kubernetes.io/docs/concepts/architecture/cloud-controller/) documentation for more information about the general design of a Kubernetes CCM. + +Nutanix CCM communicates with Prism Central (CCM) to fetch all required information. See the [Requirements](./requirements.md) page for more details. + +## Nutanix CCM functionality + +|Version|Node Controller|Route Controller|Service Controller| +|-------|---------------|----------------|------------------| +|v0.6.x |Yes |No |No | +|v0.5.x |Yes |No |No | +|v0.4.x |Yes |No |No | +|v0.3.x |Yes |No |No | +|v0.2.x |Yes |No |No | + + +Nutanix CCM specific features: + +|Version|[Topology Discovery](./topology_discovery.md)|[Custom Labeling](./custom_labeling.md)| +|-------|---------------------------------------------|---------------------------------------| +|v0.6.x |Prism, Categories |Yes | +|v0.5.x |Prism, Categories |Yes | +|v0.4.x |Prism, Categories |Yes | +|v0.3.x |Prism, Categories |Yes | +|v0.2.x |Prism, Categories |Yes | + +## What's New in v0.6.x + +CCM v0.6.x introduces the following enhancements: + +- **Enhanced Node Discovery**: Improved node discovery mechanisms for better cloud integration +- **Performance Optimizations**: Optimized API calls to Prism Central for reduced latency +- **Improved Logging**: Enhanced logging capabilities for better troubleshooting and monitoring +- **Bug Fixes**: Various stability improvements and bug fixes from v0.5.x + +For detailed configuration examples and migration guidance, see the [Configuration](./ccm_configuration.md) page. \ No newline at end of file diff --git a/docs/ccm/v0.6.x/pc_certificates.md b/docs/ccm/v0.6.x/pc_certificates.md new file mode 100644 index 00000000..be9071bf --- /dev/null +++ b/docs/ccm/v0.6.x/pc_certificates.md @@ -0,0 +1,104 @@ +# Certificate Trust + +CCM invokes Prism Central APIs using the HTTPS protocol. CCM has different methods to handle the trust of the Prism Central certificates: + +- Enable certificate verification (default) +- Configure an additional trust bundle +- Disable certificate verification + +See the respective sections below for more information. + +## Enable certificate verification (default) +By default CCM will perform certificate verification when invoking Prism Central API calls. This requires Prism Central to be configured with a publicly trusted certificate authority. +No additional configuration is required in CCM. + +## Configure an additional trust bundle +CCM allows users to configure an additional trust bundle. This will allow CCM to verify certificates that are not issued by a publicy trusted certificate authority. + +To configure an additional trust bundle, see the [Configuring the additional trust bundle](#configuring-the-additional-trust-bundle) section for more information. + + +### Configuring the additional trust bundle + +To configure the additional trust bundle it is required to: + +- Create a `ConfigMap` containing the additional trust bundle +- Configure the `prismCentral.additionalTrustBundle` object in the CCM `ConfigMap` called `nutanix-config`. + +#### Creating the additional trust bundle ConfigMap + +CCM supports two different formats for the `ConfigMap` containing the additional trust bundle. The first one is to add the additional trust bundle as a multi-line string in the `ConfigMap`, the second option is to add the trust bundle in `base64` encoded format. See the examples below. + +Multi-line string example: +```YAML +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: user-ca-bundle + namespace: ${NAMESPACE} +data: + ca.crt: | + -----BEGIN CERTIFICATE----- + + -----END CERTIFICATE----- + -----BEGIN CERTIFICATE----- + + -----END CERTIFICATE----- +``` + +`base64` example: + +```YAML +apiVersion: v1 +kind: ConfigMap +metadata: + name: user-ca-bundle + namespace: ${NAMESPACE} +binaryData: + ca.crt: +``` + +!!! note + The `base64` string needs to be added as `binaryData`. + + +#### Configuring the CCM for an additional trust bundle + +When the additional trust bundle `ConfigMap` is created, it needs to be referenced in the `nutanix-config` `ConfigMap`. Add the `prismCentral.additionalTrustBundle` object as shown below. Make sure the correct additional trust bundle `ConfigMap` is referenced. + +```JSON + ... + "prismCentral": { + ... + "additionalTrustBundle": { + "kind": "ConfigMap", + "name": "user-ca-bundle" + } + }, + ... +``` + +!!! note + The default value of `prismCentral.insecure` attribute is `false`. It can be omitted when an additional trust bundle is configured. + If `prismCentral.insecure` attribute is set to `true`, all certificate verification will be disabled. + + +## Disable certificate verification + +!!! note + Disabling certificate verification is not recommended for production purposes and should only be used for testing. + + +Certificate verification can be disabled by setting the `prismCentral.insecure` attribute to `true` in the `nutanix-config` `ConfigMap`. Certificate verification will be disabled even if an additional trust bundle is configured and the `prismCentral.insecure` attribute is set to `true`. + +Example of how to disable certificate verification: + +```JSON +... +"prismCentral": { + ... + "insecure": true +}, +... +``` \ No newline at end of file diff --git a/docs/ccm/v0.6.x/requirements.md b/docs/ccm/v0.6.x/requirements.md new file mode 100644 index 00000000..8df2c94c --- /dev/null +++ b/docs/ccm/v0.6.x/requirements.md @@ -0,0 +1,41 @@ +# Requirements + +Nutanix Cloud Controller Manager (CCM) interacts with Nutanix Prism Central (PC) APIs using a Prism Central user account to fetch the required information for Kubernetes nodes. + +CCM supports two types of PC users: + +- Local users: automatically get `Viewer` permissions when no role is assigned. +- Domain users: must be assigned a role that includes the `Viewer` role. + +## Port requirements + +Nutanix CCM uses Prism Central APIs to communicate with the Prism Central endpoint configured in the `nutanix-config` configmap. The following network connectivity is required: + +|Source |Destination |Protocol |Port |Description | +|------------------|--------------------|----------|-----|----------------------------------------| +|Kubernetes nodes |Prism Central |TCP |9440 |Nutanix CCM communication to Prism Central| + +## User permissions + +Nutanix CCM performs read-only operations and requires minimal permissions to consume Prism Central APIs. + +### Required permissions for local users + +Local users automatically receive the necessary permissions: + +- View Cluster +- View Category +- View Host +- View Virtual Machine + +!!! note + For local users, if no role is assigned, the local user will only get `Viewer` permissions, which are sufficient for CCM operations. + +### Required permissions for domain users + +The following role must be assigned for Prism Central domain users: + +- Viewer + +!!! note + Domain users must be explicitly assigned the `Viewer` role in the user role-mapping configuration. diff --git a/docs/ccm/v0.6.x/topology_discovery.md b/docs/ccm/v0.6.x/topology_discovery.md new file mode 100644 index 00000000..7349e5b7 --- /dev/null +++ b/docs/ccm/v0.6.x/topology_discovery.md @@ -0,0 +1,124 @@ +# Topology Discovery + +One of the responsibilities of the CCM node controller is to annotate and label the nodes in a Kubernetes cluster with toplogy (region and zone) information. The Nutanix Cloud Controller Manager supports following topology discovery methods: + +- [Prism](#prism) +- [Categories](#categories) + +The topology discovery method can be configured via the `nutanix-config` configmap. See [Nutanix CCM Configuration](./ccm_configuration.md) for more information on the configuration parameters. + +## Prism + +Prism-based topology discovery is the default mode for Nutanix CCM. In this mode CCM will discover the Prism Element (PE) cluster and Prism Central (PC) instance that host the Kubernetes node VM. Prism Central is configured as the region for the node, while Prism Element is configured as the zone. + +Prism-based topology discovery can be configured by omitting the `topologyDiscovery` attribute from the `nutanix-config` configmap or by passing following object: +```JSON + "topologyDiscovery": { + "type": "Prism" + } +``` + +### Example +If a Kubernetes Node VM is hosted on PC `my-pc-instance` and PE `my-pe-cluster-1`, Nutanix CCM will assign following labels to the Kubernetes node: + +|Key |Value | +|-----------------------------|---------------| +|topology.kubernetes.io/region|my-pc-instance | +|topology.kubernetes.io/zone |my-pe-cluster-1| + +## Categories + +The category-based topology discovery mode allows users to assign categories to Prism Element clusters and Kubernetes Node VMs to define a custom topology. Nutanix CCM will hierarchically search for the required categories on the VM/PE. + +!!! note + + Categories assigned to the VM object will take precedence over the categories assigned to the PE cluster. + +It is required for the categories to exist inside of the PC environment. CCM will not create and assign the categories. +Visit the [Prism Central documentation](https://portal.nutanix.com/page/documents/details?targetId=Prism-Central-Guide-vpc_2022_6:ssp-ssp-categories-manage-pc-c.html){target=_blank} for more information regarding categories. + +To enable the Categories topology discovery mode for Nutanix CCM, provide following information in the `topologyDiscovery` attribute: + +```JSON + "topologyDiscovery": { + "type": "Categories", + "topologyCategories": { + "regionCategory": "${NUTANIX_REGION_CATEGORY}", + "zoneCategory": "${NUTANIX_ZONE_CATEGORY}" + } + } +``` + +### Example + +Define a set of categories in PC that will be used for topology discovery: + +|Key |Value | +|------------------|-----------------------| +|my-region-category|region-1, region-2 | +|my-zone-category |zone-1, zone-2, zone-3 | + +Assign the categories to the Nutanix entities: + +|Nutanix entity |Categories | +|---------------|------------------------------------------------------| +|my-pe-cluster-1|my-region-category:region-1
my-zone-category:zone-2| +|my-pe-cluster-2|my-region-category:region-2
my-zone-category:zone-3| +|k8s-node-3 |my-region-category:region-2
my-zone-category:zone-2| +|k8s-node-4 |my-zone-category:zone-1 | + + +Configure CCM to use categories for topology discovery: +```JSON + "topologyDiscovery": { + "type": "Categories", + "topologyCategories": { + "regionCategory": "my-region-category", + "zoneCategory": "my-zone-category" + } + } +``` + +!!! example "Scenario 1: Kubernetes node k8s-node-1 is running on my-pe-cluster-1" + + Following topology labels will be assigned to Kubernetes node `k8s-node-1`: + + |Key |Value | + |-----------------------------|---------------| + |topology.kubernetes.io/region|region-1 | + |topology.kubernetes.io/zone |zone-2 | + + Categories assigned to PE will be used. + +!!! example "Scenario 2: Kubernetes node k8s-node-2 is running on my-pe-cluster-2" + + Following topology labels will be assigned to Kubernetes node `k8s-node-2`: + + |Key |Value | + |-----------------------------|---------------| + |topology.kubernetes.io/region|region-2 | + |topology.kubernetes.io/zone |zone-3 | + + Categories assigned to PE will be used. + +!!! example "Scenario 3: Kubernetes node k8s-node-3 is running on my-pe-cluster-2" + + Following topology labels will be assigned to Kubernetes node `k8s-node-3`: + + |Key |Value | + |-----------------------------|---------------| + |topology.kubernetes.io/region|region-2 | + |topology.kubernetes.io/zone |zone-2 | + + Categories assigned to the VM will be used. + +!!! example "Scenario 4: Kubernetes node k8s-node-4 is running on my-pe-cluster-1" + + Following topology labels will be assigned to Kubernetes node `k8s-node-4`: + + |Key |Value | + |-----------------------------|---------------| + |topology.kubernetes.io/region|region-1 | + |topology.kubernetes.io/zone |zone-1 | + + In this scenario Nutanix CCM will use the value of the `my-zone-category` category that is assigned to the VM. Since the `my-region-category`is not assigned to the VM, Nutanix CCM will search for the category on PE and use the corresponding category value. \ No newline at end of file diff --git a/docs/ccm/v0.6.x/validated_integrations.md b/docs/ccm/v0.6.x/validated_integrations.md new file mode 100644 index 00000000..61ff91c2 --- /dev/null +++ b/docs/ccm/v0.6.x/validated_integrations.md @@ -0,0 +1,52 @@ +# Validated Integrations + +Validated integrations are a defined set of specifically tested configurations between technologies that represent the most common combinations that Nutanix customers are using or deploying with Nutanix CCM. For these integrations, Nutanix has directly, or through certified partners, exercised a full range of platform tests as part of the product release process. + +## Integration Validation Policy + +Nutanix follows the version validation policies below for CCM: + +- Validate at least one active AOS LTS (long term support) version. Validated AOS LTS version for a specific CCM version is listed in the [AOS](#aos) section.
+ + !!! note + + Typically the latest LTS release at time of CCM release except when latest is initial release in train (eg x.y.0). Exact version depends on timing and customer adoption. + +- Validate the latest AOS STS (short term support) release at time of CCM release. +- Validate at least one active Prism Central (PC) version. Validated PC version for a specific CCM version is listed in the [Prism Central](#prism-central) section.
+ + !!! note + + Typically the latest PC release at time of CCM release except when latest is initial release in train (eg x.y.0). Exact version depends on timing and customer adoption. + +- At least two active Kubernetes versions. Validated Kubernetes versions for a specific CCM version are listed in the [Kubernetes](#kubernetes) section.
+ + !!! note + + Typically the current stable Kubernetes release and the previous stable release at time of CCM release. + +## Validated versions + +### AOS + +| CCM | 6.5.x (LTS) | 6.8 (STS) | 6.10 | 7.0 | 7.3 | +|--------|-------------|-----------|------|-----|-----| +| v0.6.x | No | Yes | Yes | Yes | Yes | +| v0.5.x | Yes | Yes | Yes | Yes | Yes | +| v0.4.x | Yes | Yes | Yes | Yes | No | + +### Prism Central + +| CCM | pc.2022.6 | pc.2023.x | pc.2024.x | pc.7.3 | +|--------|-----------|-----------|-----------|--------| +| v0.6.x | No | No | No | Yes | +| v0.5.x | Yes | Yes | Yes | Yes | +| v0.4.x | Yes | Yes | No | No | + +### CAPX Integration + +| CCM | CAPX v1.6.x | CAPX v1.7.x | CAPX v1.8.x | +|--------|-------------|-------------|-------------| +| v0.6.x | Yes | Yes | Yes | +| v0.5.x | Yes | Yes | Yes | +| v0.4.x | Yes | Yes | No | diff --git a/mkdocs.yml b/mkdocs.yml index f7bfe7c9..ad1f4025 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -230,7 +230,16 @@ nav: - "Autoscaler": "capx/v0.5.x/experimental/autoscaler.md" - "Troubleshooting": "capx/v0.5.x/troubleshooting.md" - "Nutanix Cloud Controller Manager (CCM)": - - "v0.5.x (Latest)": + - "v0.6.x (Latest)": + - "Overview": "ccm/v0.6.x/overview.md" + - "Requirements": "ccm/v0.6.x/requirements.md" + - "Configuration": "ccm/v0.6.x/ccm_configuration.md" + - "Certificate Trust": "ccm/v0.6.x/pc_certificates.md" + - "Credentials": "ccm/v0.6.x/ccm_credentials.md" + - "Topology Discovery": "ccm/v0.6.x/topology_discovery.md" + - "Custom Labeling": "ccm/v0.6.x/custom_labeling.md" + - "Validated Integrations": "ccm/v0.6.x/validated_integrations.md" + - "v0.5.x": - "Overview": "ccm/v0.5.x/overview.md" - "Requirements": "ccm/v0.5.x/requirements.md" - "Configuration": "ccm/v0.5.x/ccm_configuration.md" From c4b1b6a3dee53f13b2a239721e61caccd0c63498 Mon Sep 17 00:00:00 2001 From: Abhay Aggrawal Date: Mon, 12 Jan 2026 11:07:03 +0530 Subject: [PATCH 15/15] fix: updation valid integration version for CCM v0.6.x and CAPX v1.8.x (#78) --- docs/capx/v1.8.x/validated_integrations.md | 2 +- docs/ccm/v0.6.x/validated_integrations.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/capx/v1.8.x/validated_integrations.md b/docs/capx/v1.8.x/validated_integrations.md index de5d4849..8e407150 100644 --- a/docs/capx/v1.8.x/validated_integrations.md +++ b/docs/capx/v1.8.x/validated_integrations.md @@ -39,7 +39,7 @@ See the [Validated Kubernetes Versions](https://cluster-api.sigs.k8s.io/referenc | CAPX | 6.5.x (LTS) | 6.8 (STS) | 6.10 | 7.0 | 7.3 | |--------|-------------|-----------|------|-----|-----| -| v1.8.x | No | Yes | Yes | Yes | Yes | +| v1.8.x | No | No | No | No | Yes | | v1.7.x | No | Yes | Yes | Yes | Yes | | v1.6.x | No | Yes | Yes | Yes | Yes | diff --git a/docs/ccm/v0.6.x/validated_integrations.md b/docs/ccm/v0.6.x/validated_integrations.md index 61ff91c2..807f6051 100644 --- a/docs/ccm/v0.6.x/validated_integrations.md +++ b/docs/ccm/v0.6.x/validated_integrations.md @@ -31,7 +31,7 @@ Nutanix follows the version validation policies below for CCM: | CCM | 6.5.x (LTS) | 6.8 (STS) | 6.10 | 7.0 | 7.3 | |--------|-------------|-----------|------|-----|-----| -| v0.6.x | No | Yes | Yes | Yes | Yes | +| v0.6.x | No | No | No | No | Yes | | v0.5.x | Yes | Yes | Yes | Yes | Yes | | v0.4.x | Yes | Yes | Yes | Yes | No |