diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index 85aa65030..270c74f47 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -20,7 +20,7 @@ "settings": { "files.eol": "\n", "editor.tabSize": 2, - "terminal.integrated.scrollback": 32000, + "terminal.integrated.scrollback": 64000, }, // Uncomment the next line if you want start specific services in your Docker Compose config. @@ -30,13 +30,11 @@ // "shutdownAction": "none", // Uncomment the next line to run commands after the container is created. - "postCreateCommand": "sudo cp -R /tmp/.ssh-localhost/* ~/.ssh && sudo chown -R $(whoami):$(whoami) /tf/caf ~/.ssh && sudo chmod 400 ~/.ssh/* && git config --global core.editor vi && pre-commit install && pre-commit autoupdate", - + "postCreateCommand": "sudo cp -R /tmp/.ssh-localhost/* ~/.ssh && sudo chown -R $(whoami):$(whoami) /tf/caf ~/.ssh && sudo chmod 400 ~/.ssh/* && git config --global core.editor vim && pre-commit install && pre-commit autoupdate", + // Add the IDs of extensions you want installed when the container is created in the array below. "extensions": [ "4ops.terraform", - "mutantdino.resourcemonitor", - "ms-azure-devops.azure-pipelines", - "omartawfik.github-actions-vscode" + "mutantdino.resourcemonitor" ] } \ No newline at end of file diff --git a/.devcontainer/docker-compose.yml b/.devcontainer/docker-compose.yml index 1334b3cf8..ce4d97d6d 100644 --- a/.devcontainer/docker-compose.yml +++ b/.devcontainer/docker-compose.yml @@ -6,7 +6,7 @@ version: '3.7' services: rover: - image: aztfmod/rover:1.0.9-2111.0103 + image: aztfmod/rover:1.1.3-2201.2106 user: vscode labels: diff --git a/.github/workflows/landingzones-tf100.yml b/.github/workflows/landingzones-tf100.yml index b50cd4e4c..ed53e56d1 100644 --- a/.github/workflows/landingzones-tf100.yml +++ b/.github/workflows/landingzones-tf100.yml @@ -23,6 +23,8 @@ env: ARM_CLIENT_ID: ${{ secrets.ARM_CLIENT_ID }} ARM_SUBSCRIPTION_ID: ${{ secrets.ARM_SUBSCRIPTION_ID }} ARM_TENANT_ID: ${{ secrets.ARM_TENANT_ID }} + TF_REGISTRY_DISCOVERY_RETRY: 5 + TF_REGISTRY_CLIENT_TIMEOUT: 15 ROVER_RUNNER: true jobs: @@ -37,7 +39,7 @@ jobs: random_length: ['5'] container: - image: aztfmod/rover:1.0.9-2111.0103 + image: aztfmod/rover:1.1.3-2201.2106 options: --user 0 steps: @@ -64,7 +66,7 @@ jobs: - name: foundations run: | - sleep 120 + sleep 180 /tf/rover/rover.sh -lz ${GITHUB_WORKSPACE}/caf_solution -a apply \ -var-folder ${GITHUB_WORKSPACE}/caf_solution/scenario/foundations/100-passthrough \ -tfstate caf_foundations.tfstate \ @@ -90,7 +92,7 @@ jobs: ] container: - image: aztfmod/rover:1.0.9-2111.0103 + image: aztfmod/rover:1.1.3-2201.2106 options: --user 0 steps: @@ -118,7 +120,7 @@ jobs: -parallelism=30 \ -var-folder ${GITHUB_WORKSPACE}/${{ matrix.config_files }} \ --environment ${{ github.run_id }} \ - -refresh=false + -refresh=false foundations200: name: foundations-200 @@ -133,7 +135,7 @@ jobs: random_length: ['5'] container: - image: aztfmod/rover:1.0.9-2111.0103 + image: aztfmod/rover:1.1.3-2201.2106 options: --user 0 steps: @@ -184,7 +186,7 @@ jobs: ] container: - image: aztfmod/rover:1.0.9-2111.0103 + image: aztfmod/rover:1.1.3-2201.2106 options: --user 0 steps: @@ -212,7 +214,7 @@ jobs: -parallelism=30 \ -var-folder ${GITHUB_WORKSPACE}/${{ matrix.config_files }} \ --environment ${{ github.run_id }} \ - -refresh=false + -refresh=false foundations_destroy: name: foundations_destroy @@ -226,7 +228,7 @@ jobs: random_length: ['5'] container: - image: aztfmod/rover:1.0.9-2111.0103 + image: aztfmod/rover:1.1.3-2201.2106 options: --user 0 steps: diff --git a/.github/workflows/landingzones-tf14.yml b/.github/workflows/landingzones-tf14.yml deleted file mode 100644 index 8e84714f7..000000000 --- a/.github/workflows/landingzones-tf14.yml +++ /dev/null @@ -1,268 +0,0 @@ -# -# Copyright (c) Microsoft Corporation -# Licensed under the MIT License. -# - -name: landingzones-tf14 - -on: - workflow_dispatch: - schedule: - - cron: '0 0 * * *' - -env: - TF_CLI_ARGS: '-no-color' - TF_CLI_ARGS_destroy: '-refresh=false' - ARM_CLIENT_SECRET: ${{ secrets.ARM_CLIENT_SECRET }} - ARM_CLIENT_ID: ${{ secrets.ARM_CLIENT_ID }} - ARM_SUBSCRIPTION_ID: ${{ secrets.ARM_SUBSCRIPTION_ID }} - ARM_TENANT_ID: ${{ secrets.ARM_TENANT_ID }} - ROVER_RUNNER: true - -jobs: - foundations100: - name: foundations-100 - runs-on: ubuntu-latest - - strategy: - fail-fast: true - max-parallel: 1 - matrix: - random_length: ['5'] - - container: - image: aztfmod/rover:0.14.11-2111.0103 - options: --user 0 - - steps: - - uses: actions/checkout@v2 - - - name: Login azure - run: | - az login --service-principal -u '${{ env.ARM_CLIENT_ID }}' -p '${{ env.ARM_CLIENT_SECRET }}' --tenant '${{ env.ARM_TENANT_ID }}' - az account set -s ${{ env.ARM_SUBSCRIPTION_ID }} - - echo "local user: $(whoami)" - - - name: launchpad - run: | - /tf/rover/rover.sh -lz ${GITHUB_WORKSPACE}/caf_launchpad -a apply \ - -var-folder ${GITHUB_WORKSPACE}/caf_launchpad/scenario/100 \ - -level level0 \ - -launchpad \ - -parallelism=30 \ - --environment ${{ github.run_id }} \ - '-var random_length=${{ matrix.random_length }}' \ - '-var prefix=g${{ github.run_id }}' \ - '-var tags={testing_job_id="${{ github.run_id }}"}' - - - name: foundations - run: | - /tf/rover/rover.sh -lz ${GITHUB_WORKSPACE}/caf_solution -a apply \ - -var-folder ${GITHUB_WORKSPACE}/caf_solution/scenario/foundations/100-passthrough \ - -tfstate caf_foundations.tfstate \ - -level level1 \ - -parallelism=30 \ - --environment ${{ github.run_id }} \ - '-var tags={testing_job_id="${{ github.run_id }}"}' - - networking100: - name: networking-100 - runs-on: ubuntu-latest - - needs: foundations100 - - strategy: - fail-fast: false - matrix: - config_files: [ - "caf_solution/scenario/networking/100-single-region-hub", - "caf_solution/scenario/networking/101-multi-region-hub", - "caf_solution/scenario/networking/105-hub-and-spoke", - "caf_solution/scenario/networking/106-hub-virtual-wan-firewall" - ] - - container: - image: aztfmod/rover:0.14.11-2111.0103 - options: --user 0 - - steps: - - uses: actions/checkout@v2 - - - name: Login azure - run: | - az login --service-principal -u '${{ env.ARM_CLIENT_ID }}' -p '${{ env.ARM_CLIENT_SECRET }}' --tenant '${{ env.ARM_TENANT_ID }}' - az account set -s ${{ env.ARM_SUBSCRIPTION_ID }} - - - name: deploy example - run: | - /tf/rover/rover.sh -lz ${GITHUB_WORKSPACE}/caf_solution/ -a apply \ - -tfstate $(basename ${{ matrix.config_files }}).tfstate \ - -level level2 \ - -parallelism=30 \ - -var-folder ${GITHUB_WORKSPACE}/${{ matrix.config_files }} \ - --environment ${{ github.run_id }} - - - name: destroy example - run: | - /tf/rover/rover.sh -lz ${GITHUB_WORKSPACE}/caf_solution/ -a destroy \ - -tfstate $(basename ${{ matrix.config_files }}).tfstate \ - -level level2 \ - -parallelism=30 \ - -var-folder ${GITHUB_WORKSPACE}/${{ matrix.config_files }} \ - --environment ${{ github.run_id }} \ - -refresh=false - - foundations200: - name: foundations-200 - runs-on: ubuntu-latest - needs: networking100 - if: always() - - strategy: - fail-fast: true - max-parallel: 1 - matrix: - random_length: ['5'] - - container: - image: aztfmod/rover:0.14.11-2111.0103 - options: --user 0 - - steps: - - uses: actions/checkout@v2 - - - name: Login azure - run: | - az login --service-principal -u '${{ env.ARM_CLIENT_ID }}' -p '${{ env.ARM_CLIENT_SECRET }}' --tenant '${{ env.ARM_TENANT_ID }}' - az account set -s ${{ env.ARM_SUBSCRIPTION_ID }} - - echo "local user: $(whoami)" - - - name: launchpad-200-upgrade - run: | - /tf/rover/rover.sh -lz ${GITHUB_WORKSPACE}/caf_launchpad -a apply \ - -var-folder ${GITHUB_WORKSPACE}/caf_launchpad/scenario/200 \ - -level level0 \ - -launchpad \ - -parallelism=30 \ - --environment ${{ github.run_id }} \ - '-var random_length=${{ matrix.random_length }}' \ - '-var prefix=g${{ github.run_id }}' \ - '-var tags={testing_job_id="${{ github.run_id }}"}' - - - name: foundations-200-upgrade - run: | - /tf/rover/rover.sh -lz ${GITHUB_WORKSPACE}/caf_solution -a apply \ - -var-folder ${GITHUB_WORKSPACE}/caf_solution/scenario/foundations/gitops \ - -tfstate caf_foundations.tfstate \ - -level level1 \ - -parallelism=30 \ - --environment ${{ github.run_id }} \ - '-var tags={testing_job_id="${{ github.run_id }}"}' - - networking200: - name: networking-200 - runs-on: ubuntu-latest - - needs: foundations200 - - strategy: - fail-fast: false - matrix: - config_files: [ - "caf_solution/scenario/networking/200-single-region-hub", - "caf_solution/scenario/networking/201-multi-region-hub", - "caf_solution/scenario/networking/210-aks-private" - ] - - container: - image: aztfmod/rover:0.14.11-2111.0103 - options: --user 0 - - steps: - - uses: actions/checkout@v2 - - - name: Login azure - run: | - az login --service-principal -u '${{ env.ARM_CLIENT_ID }}' -p '${{ env.ARM_CLIENT_SECRET }}' --tenant '${{ env.ARM_TENANT_ID }}' - az account set -s ${{ env.ARM_SUBSCRIPTION_ID }} - - - name: deploy example - run: | - /tf/rover/rover.sh -lz ${GITHUB_WORKSPACE}/caf_solution/ -a apply \ - -tfstate $(basename ${{ matrix.config_files }}).tfstate \ - -level level2 \ - -parallelism=30 \ - -var-folder ${GITHUB_WORKSPACE}/${{ matrix.config_files }} \ - --environment ${{ github.run_id }} - - - name: destroy example - run: | - /tf/rover/rover.sh -lz ${GITHUB_WORKSPACE}/caf_solution/ -a destroy \ - -tfstate $(basename ${{ matrix.config_files }}).tfstate \ - -level level2 \ - -parallelism=30 \ - -var-folder ${GITHUB_WORKSPACE}/${{ matrix.config_files }} \ - --environment ${{ github.run_id }} \ - -refresh=false - - foundations_destroy: - name: foundations_destroy - runs-on: ubuntu-latest - if: always() - needs: networking200 - - strategy: - fail-fast: false - matrix: - random_length: ['5'] - - container: - image: aztfmod/rover:0.14.11-2111.0103 - options: --user 0 - - steps: - - uses: actions/checkout@v2 - - - name: Login azure - run: | - az login --service-principal -u '${{ env.ARM_CLIENT_ID }}' -p '${{ env.ARM_CLIENT_SECRET }}' --tenant '${{ env.ARM_TENANT_ID }}' - az account set -s ${{ env.ARM_SUBSCRIPTION_ID }} - - echo "local user: $(whoami)" - - - name: foundations - run: | - /tf/rover/rover.sh -lz ${GITHUB_WORKSPACE}/caf_solution -a destroy \ - -var-folder ${GITHUB_WORKSPACE}/caf_solution/scenario/foundations/gitops \ - -tfstate caf_foundations.tfstate \ - -level level1 \ - -parallelism=30 \ - --environment ${{ github.run_id }} \ - '-var tags={testing_job_id="${{ github.run_id }}"}' - - - name: Remove launchpad - run: | - /tf/rover/rover.sh -lz ${GITHUB_WORKSPACE}/caf_launchpad -a destroy \ - -var-folder ${GITHUB_WORKSPACE}/caf_launchpad/scenario/200 \ - -level level0 \ - -launchpad \ - -parallelism=30 \ - --environment ${{ github.run_id }} \ - '-var random_length=${{ matrix.random_length }}' \ - '-var prefix=g${{ github.run_id }}' \ - '-var tags={testing_job_id="${{ github.run_id }}"}' - - - - name: Complete purge - if: ${{ always() }} - run: | - for i in `az monitor diagnostic-settings subscription list -o tsv --query "value[?contains(name, '${{ github.run_id }}' )].name"`; do echo "purging subscription diagnostic-settings: $i" && $(az monitor diagnostic-settings subscription delete --name $i --yes); done - for i in `az monitor log-profiles list -o tsv --query '[].name'`; do az monitor log-profiles delete --name $i; done - for i in `az ad group list --query "[?contains(displayName, '${{ github.run_id }}')].objectId" -o tsv`; do echo "purging Azure AD group: $i" && $(az ad group delete --verbose --group $i || true); done - for i in `az ad app list --query "[?contains(displayName, '${{ github.run_id }}')].appId" -o tsv`; do echo "purging Azure AD app: $i" && $(az ad app delete --verbose --id $i || true); done - for i in `az keyvault list-deleted --query "[?tags.environment=='${{ github.run_id }}'].name" -o tsv`; do az keyvault purge --name $i; done - for i in `az group list --query "[?tags.environment=='${{ github.run_id }}'].name" -o tsv`; do echo "purging resource group: $i" && $(az group delete -n $i -y --no-wait || true); done - for i in `az role assignment list --query "[?contains(roleDefinitionName, '${{ github.run_id }}')].roleDefinitionName" -o tsv`; do echo "purging role assignment: $i" && $(az role assignment delete --role $i || true); done - for i in `az role definition list --query "[?contains(roleName, '${{ github.run_id }}')].roleName" -o tsv`; do echo "purging custom role definition: $i" && $(az role definition delete --name $i || true); done diff --git a/.github/workflows/landingzones-tf15.yml b/.github/workflows/landingzones-tf15.yml index 3d462f2b9..4722c3c46 100644 --- a/.github/workflows/landingzones-tf15.yml +++ b/.github/workflows/landingzones-tf15.yml @@ -17,6 +17,8 @@ env: ARM_CLIENT_ID: ${{ secrets.ARM_CLIENT_ID }} ARM_SUBSCRIPTION_ID: ${{ secrets.ARM_SUBSCRIPTION_ID }} ARM_TENANT_ID: ${{ secrets.ARM_TENANT_ID }} + TF_REGISTRY_DISCOVERY_RETRY: 5 + TF_REGISTRY_CLIENT_TIMEOUT: 15 ROVER_RUNNER: true jobs: @@ -31,7 +33,7 @@ jobs: random_length: ['5'] container: - image: aztfmod/rover:0.15.5-2111.0103 + image: aztfmod/rover:0.15.5-2201.2106 options: --user 0 steps: @@ -58,6 +60,7 @@ jobs: - name: foundations run: | + sleep 180 /tf/rover/rover.sh -lz ${GITHUB_WORKSPACE}/caf_solution -a apply \ -var-folder ${GITHUB_WORKSPACE}/caf_solution/scenario/foundations/100-passthrough \ -tfstate caf_foundations.tfstate \ @@ -83,7 +86,7 @@ jobs: ] container: - image: aztfmod/rover:0.15.5-2111.0103 + image: aztfmod/rover:0.15.5-2201.2106 options: --user 0 steps: @@ -111,7 +114,7 @@ jobs: -parallelism=30 \ -var-folder ${GITHUB_WORKSPACE}/${{ matrix.config_files }} \ --environment ${{ github.run_id }} \ - -refresh=false + -refresh=false foundations200: name: foundations-200 @@ -126,7 +129,7 @@ jobs: random_length: ['5'] container: - image: aztfmod/rover:0.15.5-2111.0103 + image: aztfmod/rover:0.15.5-2201.2106 options: --user 0 steps: @@ -177,7 +180,7 @@ jobs: ] container: - image: aztfmod/rover:0.15.5-2111.0103 + image: aztfmod/rover:0.15.5-2201.2106 options: --user 0 steps: @@ -205,7 +208,7 @@ jobs: -parallelism=30 \ -var-folder ${GITHUB_WORKSPACE}/${{ matrix.config_files }} \ --environment ${{ github.run_id }} \ - -refresh=false + -refresh=false foundations_destroy: name: foundations_destroy @@ -219,7 +222,7 @@ jobs: random_length: ['5'] container: - image: aztfmod/rover:0.15.5-2111.0103 + image: aztfmod/rover:0.15.5-2201.2106 options: --user 0 steps: @@ -240,7 +243,7 @@ jobs: -level level1 \ -parallelism=30 \ --environment ${{ github.run_id }} \ - '-var tags={testing_job_id="${{ github.run_id }}"}' + '-var tags={testing_job_id="${{ github.run_id }}"}' - name: Remove launchpad run: | @@ -252,7 +255,7 @@ jobs: --environment ${{ github.run_id }} \ '-var random_length=${{ matrix.random_length }}' \ '-var prefix=g${{ github.run_id }}' \ - '-var tags={testing_job_id="${{ github.run_id }}"}' + '-var tags={testing_job_id="${{ github.run_id }}"}' - name: Complete purge diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 59e70d7db..5b8d832d2 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -2,7 +2,7 @@ # See http://pre-commit.com/hooks.html for more hooks repos: - repo: git://github.com/antonbabenko/pre-commit-terraform - rev: v1.50.0 + rev: v1.62.3 hooks: - id: terraform_fmt - id: terraform_docs @@ -10,7 +10,7 @@ repos: # - id: terraform_validate # - id: terraform_tfsec - repo: git://github.com/pre-commit/pre-commit-hooks - rev: v4.0.1 + rev: v4.1.0 hooks: - id: check-merge-conflict - id: trailing-whitespace diff --git a/.vscode/settings.json b/.vscode/settings.json index 9c1c2fc1d..1d6767cd0 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -1,8 +1,23 @@ { + "files.eol": "\n", + "editor.tabSize": 2, + "terminal.integrated.defaultProfile.linux": "caf (zsh)", + "terminal.integrated.scrollback": 32000, + "terminal.integrated.profiles.linux": { + "caf (rover on docker)": { + "path": "docker-compose", + "args": ["-f", "rover_on_ssh_host.yml", "run", "-e", "ROVER_RUNNER=true", "--rm", "-w", "/tf/caf" ,"rover", "/usr/bin/zsh"], + "overrideName": true + }, + "caf (zsh)": { + "path": "zsh", + "overrideName": true + } + }, "markdownlint.config": { "MD028": false, "MD025": { - "front_matter_title": "" + "front_matter_title": "" } - } +} } \ No newline at end of file diff --git a/README.md b/README.md index 6de6611e9..57663bcd9 100644 --- a/README.md +++ b/README.md @@ -8,7 +8,7 @@ Microsoft [Cloud Adoption Framework for Azure](https://docs.microsoft.com/azure/ A landing zone is a segment of a cloud environment, that has been pre-provisioned through code, and is dedicated to the support of one or more workloads. Landing zones provide access to foundational tools and controls to establish a compliant place to innovate and build new workloads in the cloud, or to migrate existing workloads to the cloud. Landing zones use defined sets of cloud services and best practices to set you up for success. -We leverage Azure enterprise-scale landing zones and propose a Terraform-native structure, set of mechanisms and artifacts to get started to deploy workloads fast. +We leverage Azure enterprise-scale landing zones and propose a Terraform-native structure, set of mechanisms and artifacts to get started to deploy workloads fast. You can review the different components parts of the Cloud Adoption Framework for Azure Terraform landing zones and look at the quick intro :vhs: below: @@ -16,14 +16,14 @@ You can review the different components parts of the Cloud Adoption Framework fo ## Goals -Cloud Adoption Framework for Azure Terraform landing zones is an Open Source project equiping the Site Reliability Engineer on Azure with: +Cloud Adoption Framework for Azure Terraform landing zones is an open-source project equipping the Site Reliability Engineers on Azure with: -* Enable the community with a set of reusable landing artifacts. +* Reusable community artifacts. * Standardize deployments using battlefield-proven components. * Accelerate the setup of complex environments on Azure. * Implement Azure enterprise-scale design and approach with native Terraform and DevOps. * Propose a prescriptive guidance on how to enable DevOps for infrastructure as code on Microsoft Azure. -* Foster a community of Azure *Terraformers* using a common set of practices and sharing best practices. +* Develop configuration-based "infrastructure-as-data" as a democratization of "infrastructure-as-code". ## :rocket: Getting started @@ -31,9 +31,9 @@ When starting an enterprise deployment, we recommend you start creating a config The best way to start is to clone the [starter repository](https://github.com/Azure/caf-terraform-landingzones-starter) and getting started with the configuration files, you can find a quick [onboarding video here](https://www.youtube.com/watch?v=M5BXm30IpdY) -## Documentation +## :books: Documentation -The documentation on this repo is on how to develop, deploy and operate with landing zones can be found in the reference section [here](./documentation/README.md) +You can refer to our new integrated documentation: [GitHub Pages documentation](https://aztfmod.github.io/documentation) ## Repositories diff --git a/caf_launchpad/dynamic_secrets.tf b/caf_launchpad/dynamic_secrets.tf index d1f9f05e1..278997136 100644 --- a/caf_launchpad/dynamic_secrets.tf +++ b/caf_launchpad/dynamic_secrets.tf @@ -1,9 +1,9 @@ module "dynamic_keyvault_secrets" { source = "aztfmod/caf/azurerm//modules/security/dynamic_keyvault_secrets" - version = "~>5.4.2" + version = "5.5.1" - # source = "git::https://github.com/aztfmod/terraform-azurerm-caf.git//modules/security/dynamic_keyvault_secrets?ref=master" + #source = "git::https://github.com/aztfmod/terraform-azurerm-caf.git//modules/security/dynamic_keyvault_secrets?ref=master" for_each = try(var.dynamic_keyvault_secrets, {}) diff --git a/caf_launchpad/landingzone.tf b/caf_launchpad/landingzone.tf index bfed9e82d..64ce2a226 100644 --- a/caf_launchpad/landingzone.tf +++ b/caf_launchpad/landingzone.tf @@ -1,10 +1,15 @@ module "launchpad" { source = "aztfmod/caf/azurerm" - version = "~>5.4.2" + version = "5.5.1" - # source = "git::https://github.com/aztfmod/terraform-azurerm-caf.git?ref=patch.5.4.6" + + # source = "git::https://github.com/aztfmod/terraform-azurerm-caf.git?ref=master" # source = "../../aztfmod" + providers = { + azurerm.vhub = azurerm + } + current_landingzone_key = var.landingzone.key custom_role_definitions = var.custom_role_definitions enable = var.enable @@ -48,8 +53,9 @@ module "launchpad" { } compute = { - virtual_machines = try(var.compute.virtual_machines, var.virtual_machines) bastion_hosts = try(var.compute.bastion_hosts, var.bastion_hosts) + container_groups = var.container_groups + virtual_machines = try(var.compute.virtual_machines, var.virtual_machines) } networking = { diff --git a/caf_launchpad/main.tf b/caf_launchpad/main.tf index ca832f9a8..83cbe9208 100644 --- a/caf_launchpad/main.tf +++ b/caf_launchpad/main.tf @@ -1,29 +1,29 @@ terraform { + required_version = ">= 0.15" required_providers { // azurerm version driven by the caf module // azuread version driven by the caf module random = { source = "hashicorp/random" - version = "~> 2.2.1" + version = "~> 3.1.0" } external = { source = "hashicorp/external" - version = "~> 1.2.0" + version = "~> 2.2.0" } null = { source = "hashicorp/null" - version = "~> 2.1.0" + version = "~> 3.1.0" } tls = { source = "hashicorp/tls" - version = "~> 2.2.0" + version = "~> 3.1.0" } azurecaf = { source = "aztfmod/azurecaf" version = "~> 1.2.0" } } - required_version = ">= 0.13" } diff --git a/caf_launchpad/scenario/100/README.md b/caf_launchpad/scenario/100/README.md index dfa8197a7..429acc329 100644 --- a/caf_launchpad/scenario/100/README.md +++ b/caf_launchpad/scenario/100/README.md @@ -19,14 +19,16 @@ This scenario require the following privileges: ## Deployment ```bash -rover -lz /tf/caf/landingzones/caf_launchpad \ - -launchpad -var-folder \ - /tf/caf/landingzones/caf_launchpad/scenario/100 \ - -a apply +rover -lz /tf/caf/caf_launchpad \ + -launchpad \ + -var-folder /tf/caf/caf_launchpad/scenario/100 \ + -env \ + -a plan -rover -lz /tf/caf/landingzones/caf_launchpad \ +rover -lz /tf/caf/caf_launchpad \ -launchpad \ - -var-folder /tf/caf/landingzones/caf_launchpad/scenario/100 \ + -var-folder /tf/caf/caf_launchpad/scenario/100 \ + -env \ -a destroy ``` diff --git a/caf_launchpad/scenario/200/readme.md b/caf_launchpad/scenario/200/readme.md index 82f7b4440..461b1ab60 100644 --- a/caf_launchpad/scenario/200/readme.md +++ b/caf_launchpad/scenario/200/readme.md @@ -19,15 +19,15 @@ This scenario require the following privileges: ## Deployment ```bash -rover -lz /tf/caf/landingzones/caf_launchpad \ +rover -lz /tf/caf/caf_launchpad \ -launchpad \ - -var-folder /tf/caf/landingzones/caf_launchpad/scenario/200 \ + -var-folder /tf/caf/caf_launchpad/scenario/200 \ -level level0 \ -a apply -rover -lz /tf/caf/landingzones/caf_launchpad \ +rover -lz /tf/caf/caf_launchpad \ -launchpad \ - -var-folder /tf/caf/landingzones/caf_launchpad/scenario/200 \ + -var-folder /tf/caf/caf_launchpad/scenario/200 \ -level level0 \ -a destroy ``` diff --git a/caf_launchpad/variables.tf b/caf_launchpad/variables.tf index 3bf25dce1..dfe7937a9 100644 --- a/caf_launchpad/variables.tf +++ b/caf_launchpad/variables.tf @@ -235,4 +235,8 @@ variable "route_tables" { variable "propagate_launchpad_identities" { default = false +} + +variable "container_groups" { + default = {} } \ No newline at end of file diff --git a/caf_solution/add-ons/aad-pod-identity/aad-msi-binding.yaml b/caf_solution/add-ons/aad-pod-identity/aad-msi-binding.yaml index c8e5a083f..61ccbb7b6 100644 --- a/caf_solution/add-ons/aad-pod-identity/aad-msi-binding.yaml +++ b/caf_solution/add-ons/aad-pod-identity/aad-msi-binding.yaml @@ -1,9 +1,12 @@ # https://github.com/Azure/aad-pod-identity/blob/b3ee1d07209f26c47a96abf3ba20749932763de6/website/content/en/docs/Concepts/azureidentity.md +# +# Note, while the ${} values are not required for kustomize to work, they signify which values are +# eligible for configuration. apiVersion: aadpodidentity.k8s.io/v1 kind: AzureIdentity metadata: - name: podmi-caf-rover-platform-level0 + name: ${azureidentity_name} spec: type: 0 resourceID: ${resource_id} @@ -12,8 +15,8 @@ spec: apiVersion: aadpodidentity.k8s.io/v1 kind: AzureIdentityBinding metadata: - name: podmi-gitlab-runner-binding + name: ${azureidentitybinding_name} spec: - azureIdentity: podmi-caf-rover-platform-level0 - selector: podmi-caf-rover-platform-level0 + azureIdentity: ${azureidentity_name} + selector: ${azureidentity_selector} diff --git a/caf_solution/add-ons/aad-pod-identity/aad_pod_identity.tf b/caf_solution/add-ons/aad-pod-identity/aad_pod_identity.tf index 800058f13..03c11bc94 100644 --- a/caf_solution/add-ons/aad-pod-identity/aad_pod_identity.tf +++ b/caf_solution/add-ons/aad-pod-identity/aad_pod_identity.tf @@ -27,6 +27,18 @@ data "kustomization_overlay" "aad_pod_identity" { namespace = var.aad_pod_identity.namespace + patches { + patch = <<-EOF + - op: replace + path: /metadata/name + value: ${each.value.name} + EOF + + target = { + kind = "AzureIdentity" + } + } + patches { patch = <<-EOF - op: replace @@ -67,7 +79,7 @@ data "kustomization_overlay" "aad_pod_identity" { patch = <<-EOF - op: replace path: /metadata/name - value: ${each.value.name}-binding + value: ${each.value.name} EOF target = { @@ -87,11 +99,13 @@ data "kustomization_overlay" "aad_pod_identity" { } } + # You can provide a managed_identities..aadpodidentity_selector to specify the value here, + # alternatively provide none to have the MSI name used as the selector. patches { patch = <<-EOF - op: replace path: /spec/selector - value: ${each.value.name} + value: ${each.value.selector} EOF target = { @@ -100,9 +114,6 @@ data "kustomization_overlay" "aad_pod_identity" { } } -output "manifests" { - value = data.kustomization_overlay.aad_pod_identity -} locals { msi = { @@ -112,6 +123,7 @@ locals { for msi_key in value.msi_keys : { key = key msi_key = msi_key + selector = try(value.aadpodidentity_selector, local.remote.managed_identities[value.lz_key][msi_key].name) client_id = local.remote.managed_identities[value.lz_key][msi_key].client_id id = local.remote.managed_identities[value.lz_key][msi_key].id name = local.remote.managed_identities[value.lz_key][msi_key].name diff --git a/caf_solution/add-ons/aad-pod-identity/aks-pod-identity-assignment.tf b/caf_solution/add-ons/aad-pod-identity/aks-pod-identity-assignment.tf new file mode 100644 index 000000000..dde524d67 --- /dev/null +++ b/caf_solution/add-ons/aad-pod-identity/aks-pod-identity-assignment.tf @@ -0,0 +1,24 @@ +resource "azurerm_role_assignment" "kubelet_user_msi" { + for_each = local.msi_to_grant_permissions + + scope = each.value.id + role_definition_name = "Managed Identity Operator" + principal_id = local.remote.aks_clusters[var.aks_clusters[var.aks_cluster_key].lz_key][var.aks_cluster_key].kubelet_identity[0].object_id +} + +locals { + msi_to_grant_permissions = { + for msi in flatten( + [ + for key, value in var.managed_identities : [ + for msi_key in value.msi_keys : { + key = key + msi_key = msi_key + id = local.remote.managed_identities[value.lz_key][msi_key].id + principal_id = local.remote.managed_identities[value.lz_key][msi_key].principal_id + } + ] + ] + ) : format("%s-%s", msi.key, msi.msi_key) => msi + } +} diff --git a/caf_solution/add-ons/aad-pod-identity/output.tf b/caf_solution/add-ons/aad-pod-identity/output.tf new file mode 100644 index 000000000..07f99928e --- /dev/null +++ b/caf_solution/add-ons/aad-pod-identity/output.tf @@ -0,0 +1,8 @@ +output "manifests" { + value = data.kustomization_overlay.aad_pod_identity +} + +output "managed_identities" { + value = local.remote.managed_identities + sensitive = true +} \ No newline at end of file diff --git a/caf_solution/add-ons/aad-pod-identity/variables.tf b/caf_solution/add-ons/aad-pod-identity/variables.tf index be494fbdc..283ad123c 100644 --- a/caf_solution/add-ons/aad-pod-identity/variables.tf +++ b/caf_solution/add-ons/aad-pod-identity/variables.tf @@ -23,9 +23,6 @@ variable "tags" { variable "aks_cluster_key" { description = "AKS cluster key to deploy the Gitlab Helm charts. The key must be defined in the variable aks_clusters" -} -variable "aks_cluster_vnet_key" { - } variable "aks_clusters" {} variable "vnets" { @@ -35,4 +32,4 @@ variable "managed_identities" { description = "Map of the user managed identities." } -variable "aad_pod_identity" {} \ No newline at end of file +variable "aad_pod_identity" {} diff --git a/caf_solution/add-ons/aks_applications/app/module.tf b/caf_solution/add-ons/aks_applications/app/module.tf index 138110a32..0870bfdd4 100644 --- a/caf_solution/add-ons/aks_applications/app/module.tf +++ b/caf_solution/add-ons/aks_applications/app/module.tf @@ -22,6 +22,7 @@ resource "helm_release" "charts" { skip_crds = try(each.value.skip_crds, false) create_namespace = try(each.value.create_namespace, false) values = try(each.value.values, null) + version = try(each.value.version, null) dynamic "set" { for_each = try(each.value.sets, {}) @@ -44,4 +45,4 @@ resource "helm_release" "charts" { # values = [ # "${file("values.yaml")}" # ] -} \ No newline at end of file +} diff --git a/caf_solution/add-ons/databricks/backend.azurerm b/caf_solution/add-ons/aks_gitlab_agents/backend.azurerm similarity index 100% rename from caf_solution/add-ons/databricks/backend.azurerm rename to caf_solution/add-ons/aks_gitlab_agents/backend.azurerm diff --git a/caf_solution/add-ons/aks_gitlab_agents/charts.tf b/caf_solution/add-ons/aks_gitlab_agents/charts.tf new file mode 100644 index 000000000..b03abf1dc --- /dev/null +++ b/caf_solution/add-ons/aks_gitlab_agents/charts.tf @@ -0,0 +1,27 @@ + +resource "kubernetes_namespace" "gitlab_runners" { + for_each = var.aks_namespaces + + metadata { + name = each.value + } +} + +resource "helm_release" "chart" { + depends_on = [kubernetes_namespace.gitlab_runners] + for_each = var.aad-pod-identity.runnerRegistrationTokens_mapping.mapping + + chart = var.helm.chart + # create_namespace = try(each.value.create_namespace, false) + name = each.value.name + namespace = try(each.value.namespace, var.helm.namespace) + repository = var.helm.repository + timeout = try(var.helm.timeout, 4000) + values = [file(try(each.value.value_file, var.helm.value_file))] + wait = try(var.helm.wait, true) + + set { + name = "podLabels.aadpodidbinding" + value = local.remote.managed_identities[var.aad-pod-identity.runnerRegistrationTokens_mapping.lz_key][each.key].name + } +} \ No newline at end of file diff --git a/caf_solution/add-ons/aks_gitlab_agents/local.remote_tfstates.tf b/caf_solution/add-ons/aks_gitlab_agents/local.remote_tfstates.tf new file mode 100644 index 000000000..1a51e908e --- /dev/null +++ b/caf_solution/add-ons/aks_gitlab_agents/local.remote_tfstates.tf @@ -0,0 +1,62 @@ +locals { + landingzone = { + current = { + storage_account_name = var.tfstate_storage_account_name + container_name = var.tfstate_container_name + resource_group_name = var.tfstate_resource_group_name + } + lower = { + storage_account_name = var.lower_storage_account_name + container_name = var.lower_container_name + resource_group_name = var.lower_resource_group_name + } + } +} + +data "terraform_remote_state" "remote" { + for_each = try(var.landingzone.tfstates, {}) + + backend = var.landingzone.backend_type + config = { + storage_account_name = local.landingzone[try(each.value.level, "current")].storage_account_name + container_name = local.landingzone[try(each.value.level, "current")].container_name + resource_group_name = local.landingzone[try(each.value.level, "current")].resource_group_name + subscription_id = var.tfstate_subscription_id + key = each.value.tfstate + } +} + +locals { + landingzone_tag = { + "landingzone" = var.landingzone.key + } + + global_settings = merge( + try(data.terraform_remote_state.remote[var.landingzone.global_settings_key].outputs.objects[var.landingzone.global_settings_key].global_settings, null), + try(data.terraform_remote_state.remote[var.landingzone.global_settings_key].outputs.global_settings, null) + ) + + diagnostics = merge( + try(data.terraform_remote_state.remote[var.landingzone.global_settings_key].outputs.objects[var.landingzone.global_settings_key].diagnostics, null), + try(data.terraform_remote_state.remote[var.landingzone.global_settings_key].outputs.diagnostics, null) + ) + + remote = { + tags = merge(local.global_settings.tags, local.landingzone_tag, { "level" = var.landingzone.level }, { "environment" = local.global_settings.environment }, { "rover_version" = var.rover_version }, var.tags) + global_settings = local.global_settings + diagnostics = local.diagnostics + + + aks_clusters = { + for key, value in try(var.landingzone.tfstates, {}) : key => merge(try(data.terraform_remote_state.remote[key].outputs.objects[key].aks_clusters, {})) + } + + + managed_identities = data.terraform_remote_state.remote[var.aad-pod-identity.lz_key].outputs[var.aad-pod-identity.output_key] + } + +} + +output "managed_identities" { + value = local.remote.managed_identities +} \ No newline at end of file diff --git a/caf_solution/add-ons/aks_gitlab_agents/main.tf b/caf_solution/add-ons/aks_gitlab_agents/main.tf new file mode 100644 index 000000000..263eade2a --- /dev/null +++ b/caf_solution/add-ons/aks_gitlab_agents/main.tf @@ -0,0 +1,17 @@ +terraform { + required_providers { + azurerm = { + source = "hashicorp/azurerm" + version = "~> 2.51.0" + } + kubernetes = { + source = "hashicorp/kubernetes" + version = "~> 2.0.2" + } + helm = { + source = "hashicorp/helm" + version = "~> 2.0.3" + } + } + required_version = ">= 0.13" +} diff --git a/caf_solution/add-ons/aks_gitlab_agents/providers.tf b/caf_solution/add-ons/aks_gitlab_agents/providers.tf new file mode 100644 index 000000000..6af45c41c --- /dev/null +++ b/caf_solution/add-ons/aks_gitlab_agents/providers.tf @@ -0,0 +1,46 @@ + +provider "azurerm" { + features { + } +} + +provider "kubernetes" { + host = local.k8sconfigs[var.aks_cluster_key].host + username = local.k8sconfigs[var.aks_cluster_key].username + password = local.k8sconfigs[var.aks_cluster_key].password + client_certificate = local.k8sconfigs[var.aks_cluster_key].client_certificate + client_key = local.k8sconfigs[var.aks_cluster_key].client_key + cluster_ca_certificate = local.k8sconfigs[var.aks_cluster_key].cluster_ca_certificate +} + +provider "helm" { + kubernetes { + host = local.k8sconfigs[var.aks_cluster_key].host + username = local.k8sconfigs[var.aks_cluster_key].username + password = local.k8sconfigs[var.aks_cluster_key].password + client_certificate = local.k8sconfigs[var.aks_cluster_key].client_certificate + client_key = local.k8sconfigs[var.aks_cluster_key].client_key + cluster_ca_certificate = local.k8sconfigs[var.aks_cluster_key].cluster_ca_certificate + } +} + +locals { + k8sconfigs = { + for key, value in var.aks_clusters : key => { + host = local.remote.aks_clusters[value.lz_key][value.key].enable_rbac ? data.azurerm_kubernetes_cluster.kubeconfig[key].kube_admin_config.0.host : data.azurerm_kubernetes_cluster.kubeconfig[key].kube_config.0.host + username = local.remote.aks_clusters[value.lz_key][value.key].enable_rbac ? data.azurerm_kubernetes_cluster.kubeconfig[key].kube_admin_config.0.username : data.azurerm_kubernetes_cluster.kubeconfig[key].kube_config.0.username + password = local.remote.aks_clusters[value.lz_key][value.key].enable_rbac ? data.azurerm_kubernetes_cluster.kubeconfig[key].kube_admin_config.0.password : data.azurerm_kubernetes_cluster.kubeconfig[key].kube_config.0.password + client_certificate = local.remote.aks_clusters[value.lz_key][value.key].enable_rbac ? base64decode(data.azurerm_kubernetes_cluster.kubeconfig[key].kube_admin_config.0.client_certificate) : base64decode(data.azurerm_kubernetes_cluster.kubeconfig[key].kube_config.0.client_certificate) + client_key = local.remote.aks_clusters[value.lz_key][value.key].enable_rbac ? base64decode(data.azurerm_kubernetes_cluster.kubeconfig[key].kube_admin_config.0.client_key) : base64decode(data.azurerm_kubernetes_cluster.kubeconfig[key].kube_config.0.client_key) + cluster_ca_certificate = local.remote.aks_clusters[value.lz_key][value.key].enable_rbac ? base64decode(data.azurerm_kubernetes_cluster.kubeconfig[key].kube_admin_config.0.cluster_ca_certificate) : base64decode(data.azurerm_kubernetes_cluster.kubeconfig[key].kube_config.0.cluster_ca_certificate) + } + } +} + +# Get kubeconfig from AKS clusters +data "azurerm_kubernetes_cluster" "kubeconfig" { + for_each = var.aks_clusters + + name = local.remote.aks_clusters[each.value.lz_key][each.value.key].cluster_name + resource_group_name = local.remote.aks_clusters[each.value.lz_key][each.value.key].resource_group_name +} \ No newline at end of file diff --git a/caf_solution/add-ons/aks_gitlab_agents/variables.tf b/caf_solution/add-ons/aks_gitlab_agents/variables.tf new file mode 100644 index 000000000..3703353f6 --- /dev/null +++ b/caf_solution/add-ons/aks_gitlab_agents/variables.tf @@ -0,0 +1,33 @@ +# Map of the remote data state for lower level +variable "lower_storage_account_name" {} +variable "lower_container_name" {} +variable "lower_resource_group_name" {} + +variable "tfstate_subscription_id" { + description = "This value is propulated by the rover. subscription id hosting the remote tfstates" +} +variable "tfstate_storage_account_name" {} +variable "tfstate_container_name" {} +variable "tfstate_key" {} +variable "tfstate_resource_group_name" {} + +variable "landingzone" {} +variable "rover_version" { + default = null +} +variable "tags" { + default = null +} + +variable "helm" {} +variable "aks_namespaces" { + default = {} +} +variable "aks_cluster_key" { + description = "AKS cluster key to deploy the Gitlab Helm charts. The key must be defined in the variable aks_clusters" +} +variable "aks_cluster_vnet_key" { + +} +variable "aks_clusters" {} +variable "aad-pod-identity" {} diff --git a/caf_solution/add-ons/aks_secure_baseline_v2/aks-pod-identity-assignment.tf b/caf_solution/add-ons/aks_secure_baseline_v2/aks-pod-identity-assignment.tf index 45954f1aa..03a004560 100644 --- a/caf_solution/add-ons/aks_secure_baseline_v2/aks-pod-identity-assignment.tf +++ b/caf_solution/add-ons/aks_secure_baseline_v2/aks-pod-identity-assignment.tf @@ -46,27 +46,27 @@ resource "azurerm_role_assignment" "kubelet_subnets_networkcontrib" { # principal_id = local.remote.aks_clusters[var.aks_clusters[var.aks_cluster_key].lz_key][var.aks_cluster_key].identity[0].principal_id # } -resource "azurerm_role_assignment" "kubelet_user_msi" { - for_each = local.msi_to_grant_permissions +# resource "azurerm_role_assignment" "kubelet_user_msi" { +# for_each = local.msi_to_grant_permissions - scope = each.value.id - role_definition_name = "Managed Identity Operator" - principal_id = local.remote.aks_clusters[var.aks_clusters[var.aks_cluster_key].lz_key][var.aks_cluster_key].kubelet_identity[0].object_id -} +# scope = each.value.id +# role_definition_name = "Managed Identity Operator" +# principal_id = local.remote.aks_clusters[var.aks_clusters[var.aks_cluster_key].lz_key][var.aks_cluster_key].kubelet_identity[0].object_id +# } -locals { - msi_to_grant_permissions = { - for msi in flatten( - [ - for key, value in var.managed_identities : [ - for msi_key in value.msi_keys : { - key = key - msi_key = msi_key - id = local.remote.managed_identities[value.lz_key][msi_key].id - principal_id = local.remote.managed_identities[value.lz_key][msi_key].principal_id - } - ] - ] - ) : format("%s-%s", msi.key, msi.msi_key) => msi - } -} +# locals { +# msi_to_grant_permissions = { +# for msi in flatten( +# [ +# for key, value in var.managed_identities : [ +# for msi_key in value.msi_keys : { +# key = key +# msi_key = msi_key +# id = local.remote.managed_identities[value.lz_key][msi_key].id +# principal_id = local.remote.managed_identities[value.lz_key][msi_key].principal_id +# } +# ] +# ] +# ) : format("%s-%s", msi.key, msi.msi_key) => msi +# } +# } diff --git a/caf_solution/add-ons/azure_devops/main.tf b/caf_solution/add-ons/azure_devops/main.tf index 35b1d8ad8..0a11d6d44 100644 --- a/caf_solution/add-ons/azure_devops/main.tf +++ b/caf_solution/add-ons/azure_devops/main.tf @@ -1,9 +1,6 @@ terraform { required_providers { - azurerm = { - source = "hashicorp/azurerm" - version = "~> 2.81.0" - } + // azurerm version driven by the caf module azuread = { source = "hashicorp/azuread" version = "~> 1.4.0" diff --git a/caf_solution/add-ons/azure_devops/readme.md b/caf_solution/add-ons/azure_devops/readme.md index 4604a00a6..101e6b471 100644 --- a/caf_solution/add-ons/azure_devops/readme.md +++ b/caf_solution/add-ons/azure_devops/readme.md @@ -39,9 +39,9 @@ Azure: ## Deployment ```bash -rover -lz /tf/caf/landingzones/caf_launchpad/add-ons/azure_devops \ +rover -lz /tf/caf/caf_launchpad/add-ons/azure_devops \ -tfstate azure_devops-contoso_demo.tfstate \ - -var-folder /tf/caf/landingzones/caf_launchpad/add-ons/azure_devops/scenario/200-contoso_demo \ + -var-folder /tf/caf/caf_launchpad/add-ons/azure_devops/scenario/200-contoso_demo \ -parallelism 30 \ -level level0 \ -env sandpit \ @@ -49,10 +49,10 @@ rover -lz /tf/caf/landingzones/caf_launchpad/add-ons/azure_devops \ # If the tfstates are stored in a different subscription you need to execute the following command -rover -lz /tf/caf/landingzones/caf_launchpad/add-ons/azure_devops \ +rover -lz /tf/caf/caf_launchpad/add-ons/azure_devops \ -tfstate_subscription_id \ -tfstate azure_devops-contoso_demo.tfstate \ - -var-folder /tf/caf/landingzones/caf_launchpad/add-ons/azure_devops/scenario/200-contoso_demo \ + -var-folder /tf/caf/caf_launchpad/add-ons/azure_devops/scenario/200-contoso_demo \ -parallelism 30 \ -level level0 \ -env sandpit \ diff --git a/caf_solution/add-ons/azure_devops_agent/local.azuread.tf b/caf_solution/add-ons/azure_devops_agent/local.azuread.tf new file mode 100644 index 000000000..ccb4d7e85 --- /dev/null +++ b/caf_solution/add-ons/azure_devops_agent/local.azuread.tf @@ -0,0 +1,9 @@ +locals { + azuread = merge( + var.azuread, + { + azuread_apps = var.azuread_apps + azuread_groups = var.azuread_groups + } + ) +} diff --git a/caf_solution/add-ons/azure_devops_agent/main.tf b/caf_solution/add-ons/azure_devops_agent/main.tf index e0623cf27..9bb2f0f1f 100644 --- a/caf_solution/add-ons/azure_devops_agent/main.tf +++ b/caf_solution/add-ons/azure_devops_agent/main.tf @@ -1,9 +1,6 @@ terraform { required_providers { - azurerm = { - source = "hashicorp/azurerm" - version = "~> 2.81.0" - } + // azurerm version driven by the caf module azuread = { source = "hashicorp/azuread" version = "~> 1.4.0" diff --git a/caf_solution/add-ons/azure_devops_agent/readme.md b/caf_solution/add-ons/azure_devops_agent/readme.md index dd7b29571..e3503c64e 100644 --- a/caf_solution/add-ons/azure_devops_agent/readme.md +++ b/caf_solution/add-ons/azure_devops_agent/readme.md @@ -21,9 +21,9 @@ Landing zone: ### Deploy the Azure Devops agent for level0 ```bash -rover -lz /tf/caf/landingzones/caf_launchpad/add-ons/azure_devops_agent \ +rover -lz /tf/caf/caf_launchpad/add-ons/azure_devops_agent \ -tfstate level0_azure_devops_agents.tfstate \ - -var-folder /tf/caf/landingzones/caf_launchpad/add-ons/azure_devops_agent/scenario/200-contoso_demo/level0 \ + -var-folder /tf/caf/caf_launchpad/add-ons/azure_devops_agent/scenario/200-contoso_demo/level0 \ -parallelism 30 \ -level level0 \ -env sandpit \ @@ -32,9 +32,9 @@ rover -lz /tf/caf/landingzones/caf_launchpad/add-ons/azure_devops_agent \ ### Deploy the Azure Devops agent for level1 ```bash -rover -lz /tf/caf/landingzones/caf_launchpad/add-ons/azure_devops_agent \ +rover -lz /tf/caf/caf_launchpad/add-ons/azure_devops_agent \ -tfstate azdo-agent-level1.tfstate \ - -var-folder /tf/caf/landingzones/caf_launchpad/add-ons/azure_devops_agent/scenario/200-contoso_demo/level1 \ + -var-folder /tf/caf/caf_launchpad/add-ons/azure_devops_agent/scenario/200-contoso_demo/level1 \ -parallelism 30 \ -level level1 \ -env sandpit \ @@ -43,10 +43,10 @@ rover -lz /tf/caf/landingzones/caf_launchpad/add-ons/azure_devops_agent \ # If the tfstates are stored in a different subscription you need to execute the following command -rover -lz /tf/caf/landingzones/caf_launchpad/add-ons/azure_devops_agent \ +rover -lz /tf/caf/caf_launchpad/add-ons/azure_devops_agent \ -tfstate_subscription_id \ -tfstate azdo-agent-level1.tfstate \ - -var-folder /tf/caf/landingzones/caf_launchpad/add-ons/azure_devops_agent/scenario/200-contoso_demo/level1 \ + -var-folder /tf/caf/caf_launchpad/add-ons/azure_devops_agent/scenario/200-contoso_demo/level1 \ -parallelism 30 \ -level level1 \ -env sandpit \ diff --git a/caf_solution/add-ons/azure_devops_agent/solution.tf b/caf_solution/add-ons/azure_devops_agent/solution.tf index 6d87d2f5d..05e4e070d 100644 --- a/caf_solution/add-ons/azure_devops_agent/solution.tf +++ b/caf_solution/add-ons/azure_devops_agent/solution.tf @@ -1,7 +1,8 @@ module "caf" { source = "aztfmod/caf/azurerm" - version = "~>5.3.0" + version = "~>5.4.2" + azuread = local.azuread current_landingzone_key = var.landingzone.key tenant_id = var.tenant_id tfstates = local.tfstates @@ -13,13 +14,11 @@ module "caf" { logged_aad_app_objectId = var.logged_aad_app_objectId resource_groups = var.resource_groups storage_accounts = var.storage_accounts - azuread_groups = var.azuread_groups keyvaults = var.keyvaults keyvault_access_policies = var.keyvault_access_policies managed_identities = var.managed_identities role_mapping = var.role_mapping custom_role_definitions = var.custom_role_definitions - azuread_apps = var.azuread_apps compute = { virtual_machines = var.virtual_machines } diff --git a/caf_solution/add-ons/azure_devops_agent/variables.tf b/caf_solution/add-ons/azure_devops_agent/variables.tf index 45c344a59..0b5ad965c 100644 --- a/caf_solution/add-ons/azure_devops_agent/variables.tf +++ b/caf_solution/add-ons/azure_devops_agent/variables.tf @@ -64,6 +64,9 @@ variable "storage_accounts" { variable "storage_account_blobs" { default = {} } +variable "azuread" { + default = {} +} variable "azuread_groups" { default = {} } @@ -99,4 +102,4 @@ variable "dynamic_keyvault_secrets" { } variable "managed_identities" { default = {} -} \ No newline at end of file +} diff --git a/caf_solution/add-ons/azure_devops_v1/azdo_pipelines.tf b/caf_solution/add-ons/azure_devops_v1/azdo_pipelines.tf index 37f49712f..e8645c875 100644 --- a/caf_solution/add-ons/azure_devops_v1/azdo_pipelines.tf +++ b/caf_solution/add-ons/azure_devops_v1/azdo_pipelines.tf @@ -16,6 +16,7 @@ resource "azuredevops_build_definition" "build_definition" { azuredevops_variable_group.variable_group[key].id ] + # This block handles repos that are hosted in AZDO dynamic "repository" { for_each = { for key, value in try(data.azuredevops_git_repositories.repos[try(each.value.repo_project_key, each.value.project_key)].repositories, {}) : key => value @@ -27,7 +28,20 @@ resource "azuredevops_build_definition" "build_definition" { repo_type = each.value.repo_type yml_path = each.value.yaml branch_name = lookup(each.value, "branch_name", null) - # service_connection_id = lookup(each.value, "repo_type", null) == "github" ? null : azuredevops_serviceendpoint_azurerm.github[each.value.service_connection_key].id + } + } + + # This block handles repos that are hosted in GitHub and require a service connection + dynamic "repository" { + for_each = each.value.repo_type == "GitHub" ? [1] : [] + + content { + repo_id = each.value.git_repo_name + repo_type = each.value.repo_type + yml_path = each.value.yaml + branch_name = lookup(each.value, "branch_name", null) + service_connection_id = azuredevops_serviceendpoint_github.serviceendpoint_github[ + each.value.service_connection_key].id } } @@ -52,15 +66,5 @@ resource "azuredevops_build_definition" "build_definition" { value = jsonencode(variable.value) } } -} - -# See https://registry.terraform.io/providers/microsoft/azuredevops/latest/docs/resources/build_definition_permissions#permissions for a list of available permissions. -resource "azuredevops_build_definition_permissions" "permissions" { - for_each = try(var.permissions.build_definitions, {}) - - project_id = data.azuredevops_project.project[each.value.project_key].id - principal = azuredevops_group.group[each.value.group_key].id - build_definition_id = azuredevops_build_definition.build_definition[each.key].id - permissions = each.value.permissions } diff --git a/caf_solution/add-ons/azure_devops_v1/azdo_service_endpoint.github.tf b/caf_solution/add-ons/azure_devops_v1/azdo_service_endpoint.github.tf new file mode 100644 index 000000000..bfb14dad4 --- /dev/null +++ b/caf_solution/add-ons/azure_devops_v1/azdo_service_endpoint.github.tf @@ -0,0 +1,31 @@ + +# To support cross subscription +data "external" "github_pat" { + for_each = { + for key, value in var.service_endpoints : key => value + if try(value.type, null) == "Github" + } + + program = [ + "bash", "-c", + format( + "az keyvault secret show --id '%s'secrets/'%s' --query '{value: value}' -o json", + local.remote.keyvaults[each.value.keyvault.lz_key][each.value.keyvault.key].vault_uri, + each.value.keyvault.secret_name + ) + ] +} + +resource "azuredevops_serviceendpoint_github" "serviceendpoint_github" { + for_each = { + for key, value in var.service_endpoints : key => value + if try(value.type, null) == "Github" + } + + project_id = data.azuredevops_project.project[each.value.project_key].id + service_endpoint_name = each.value.endpoint_name + + auth_personal { + personal_access_token = data.external.github_pat[each.key].result.value + } +} diff --git a/caf_solution/add-ons/azure_devops_v1/azdo_service_endpoint.tf b/caf_solution/add-ons/azure_devops_v1/azdo_service_endpoint.tf index bd8de7a90..cb99bc64e 100644 --- a/caf_solution/add-ons/azure_devops_v1/azdo_service_endpoint.tf +++ b/caf_solution/add-ons/azure_devops_v1/azdo_service_endpoint.tf @@ -1,7 +1,11 @@ # To support cross subscription data "external" "client_secret" { - for_each = var.service_endpoints + for_each = { + for key, value in var.service_endpoints : key => value + if try(value.type, "TfsGit") == "TfsGit" + } + program = [ "bash", "-c", format( @@ -13,7 +17,10 @@ data "external" "client_secret" { } resource "azuredevops_serviceendpoint_azurerm" "azure" { - for_each = var.service_endpoints + for_each = { + for key, value in var.service_endpoints : key => value + if try(value.type, "TfsGit") == "TfsGit" + } project_id = data.azuredevops_project.project[each.value.project_key].id service_endpoint_name = each.value.endpoint_name @@ -37,7 +44,10 @@ resource "azuredevops_serviceendpoint_azurerm" "azure" { # resource "azuredevops_resource_authorization" "endpoint" { - for_each = var.service_endpoints + for_each = { + for key, value in var.service_endpoints : key => value + if try(value.type, "TfsGit") == "TfsGit" + } project_id = data.azuredevops_project.project[each.value.project_key].id resource_id = azuredevops_serviceendpoint_azurerm.azure[each.key].id diff --git a/caf_solution/add-ons/azure_devops_v1/azdo_service_endpoints.acr.tf b/caf_solution/add-ons/azure_devops_v1/azdo_service_endpoints.acr.tf new file mode 100644 index 000000000..0f40de6d8 --- /dev/null +++ b/caf_solution/add-ons/azure_devops_v1/azdo_service_endpoints.acr.tf @@ -0,0 +1,36 @@ +data "azurerm_container_registry" "acr" { + for_each = { + for key, value in var.service_endpoints : key => value + if try(value.type, "") == "AzureContainerRegistry" + } + + name = local.remote.azure_container_registries[each.value.azure_container_registry.lz_key][each.value.azure_container_registry.key].name + resource_group_name = local.remote.azure_container_registries[each.value.azure_container_registry.lz_key][each.value.azure_container_registry.key].resource_group_name +} + +resource "azuredevops_serviceendpoint_dockerregistry" "acr" { + for_each = { + for key, value in var.service_endpoints : key => value + if try(value.type, "") == "AzureContainerRegistry" + } + + project_id = data.azuredevops_project.project[each.value.project_key].id + service_endpoint_name = each.value.endpoint_name + + docker_registry = format("https://%s", data.azurerm_container_registry.acr[each.key].login_server) + docker_username = data.azurerm_container_registry.acr[each.key].admin_username + docker_password = data.azurerm_container_registry.acr[each.key].admin_password + registry_type = "Others" +} + +resource "azuredevops_resource_authorization" "registry_endpoint" { + for_each = { + for key, value in var.service_endpoints : key => value + if try(value.type, "") == "AzureContainerRegistry" + } + + project_id = data.azuredevops_project.project[each.value.project_key].id + resource_id = azuredevops_serviceendpoint_dockerregistry.acr[each.key].id + type = "endpoint" + authorized = try(each.value.authorized, false) +} diff --git a/caf_solution/add-ons/azure_devops_v1/azuredevops_group_membership/group_membership.tf b/caf_solution/add-ons/azure_devops_v1/azuredevops_group_membership/group_membership.tf deleted file mode 100644 index bdee126ca..000000000 --- a/caf_solution/add-ons/azure_devops_v1/azuredevops_group_membership/group_membership.tf +++ /dev/null @@ -1,10 +0,0 @@ -data "azuredevops_users" "user" { - for_each = toset(var.group_settings.members.user_principal_names) - - principal_name = each.value -} - -resource "azuredevops_group_membership" "membership" { - group = var.group_descriptor - members = flatten(values(data.azuredevops_users.user)[*].users[*].descriptor) -} diff --git a/caf_solution/add-ons/azure_devops_v1/azuredevops_group_membership/main.tf b/caf_solution/add-ons/azure_devops_v1/azuredevops_group_membership/main.tf deleted file mode 100644 index a533fb035..000000000 --- a/caf_solution/add-ons/azure_devops_v1/azuredevops_group_membership/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -terraform { - required_providers { - azuredevops = { - source = "microsoft/azuredevops" - } - } -} \ No newline at end of file diff --git a/caf_solution/add-ons/azure_devops_v1/azuredevops_group_membership/variables.tf b/caf_solution/add-ons/azure_devops_v1/azuredevops_group_membership/variables.tf deleted file mode 100644 index e2d2cb504..000000000 --- a/caf_solution/add-ons/azure_devops_v1/azuredevops_group_membership/variables.tf +++ /dev/null @@ -1,5 +0,0 @@ -variable "group_descriptor" { -} - -variable "group_settings" { -} diff --git a/caf_solution/add-ons/azure_devops_v1/azuredevops_projects.tf b/caf_solution/add-ons/azure_devops_v1/azuredevops_projects.tf index 10cd00a9c..ebe0afc6c 100644 --- a/caf_solution/add-ons/azure_devops_v1/azuredevops_projects.tf +++ b/caf_solution/add-ons/azure_devops_v1/azuredevops_projects.tf @@ -35,32 +35,4 @@ resource "azuredevops_project_features" "project" { "repositories" = try(lower(each.value.features.repositories), "disabled") "testplans" = try(lower(each.value.features.testplans), "disabled") } -} - -resource "azuredevops_group" "group" { - for_each = var.groups - - scope = data.azuredevops_project.project[each.value.project_key].id - display_name = each.value.display_name - description = each.value.description -} - -module "azuredevops_group_membership" { - source = "./azuredevops_group_membership" - for_each = { - for key, value in var.groups : key => value - if try(value.members.user_principal_names, null) != null - } - - group_descriptor = azuredevops_group.group[each.key].descriptor - group_settings = each.value -} - -# See https://registry.terraform.io/providers/microsoft/azuredevops/latest/docs/resources/project_permissions#permissions for a list of available permissions. -resource "azuredevops_project_permissions" "project_perm" { - for_each = try(var.permissions.projects, {}) - - project_id = data.azuredevops_project.project[each.key].id - principal = azuredevops_group.group[each.value.group_key].id - permissions = each.value.permissions -} +} \ No newline at end of file diff --git a/caf_solution/add-ons/azure_devops_v1/locals.remote_tfstates.tf b/caf_solution/add-ons/azure_devops_v1/locals.remote_tfstates.tf index 8482df931..3144726b0 100644 --- a/caf_solution/add-ons/azure_devops_v1/locals.remote_tfstates.tf +++ b/caf_solution/add-ons/azure_devops_v1/locals.remote_tfstates.tf @@ -54,6 +54,9 @@ locals { azuread_groups = { for key, value in try(var.landingzone.tfstates, {}) : key => merge(try(data.terraform_remote_state.remote[key].outputs.objects[key].azuread_groups, {})) } + azure_container_registries = { + for key, value in try(var.landingzone.tfstates, {}) : key => merge(try(data.terraform_remote_state.remote[key].outputs.objects[key].azure_container_registries, {})) + } keyvaults = { for key, value in try(var.landingzone.tfstates, {}) : key => merge(try(data.terraform_remote_state.remote[key].outputs.objects[key].keyvaults, {})) } diff --git a/caf_solution/add-ons/azure_devops_v1/readme.md b/caf_solution/add-ons/azure_devops_v1/readme.md index 058bb07a6..6b1b3e13e 100644 --- a/caf_solution/add-ons/azure_devops_v1/readme.md +++ b/caf_solution/add-ons/azure_devops_v1/readme.md @@ -26,16 +26,64 @@ Azure Devops (example): - sample yaml attached [here](./scenario/200-contoso_demo/pipeline/rover.yaml). Azure: -* PAT Token : PAT Token should be updated in keyvault secret that deployed by launchpad LZ as below +* AZDO PAT Token : PAT Token should be updated in keyvault secret that deployed by launchpad LZ as below +* Github PAT Token : If building from repos hosted in Github, a Github PAT Token should be added to a keyvault secret. + + +## Pipelines +AZDO supports creating pipelines from a number of sources, such as AZDO itself, Github, Bitbucket, +etc. For repos hosted in Github, you must configure a [service connection][https://docs.microsoft.com/en-us/azure/devops/pipelines/library/service-endpoints]. + +To do this, create a Github PAT token (repo read access is sufficient), and add it to a KeyVault (we +recommend the 'secrets' KeyVault typically provisioned in level0). Then provide the following config +directive to configure the connection: + +``` +service_endpoints = { + github_endpoint = { + endpoint_name = "github_endpoint" + type = "Github" + project_key = "my_project"" + keyvault = { + lz_key = "launchpad" + key = "secrets" + secret_name = "github-pat" + } + } +} +``` + +When configuring pipelines via the pipelines{} config directive, you can then set the following +parameters: + +``` +pipelines = { + launchpad = { + project_key = "my_project" + repo_project_key = "my_project_repo" + name = "launchpad" + folder = "\\configuration\\level0" + yaml = "configuration/dev/pipelines/test.yml" + repo_type = "GitHub" + git_repo_name = "github_org/repo_name" + branch_name = "main" + service_connection_key = "github_endpoint" + variables = { + ... + } + } +} +``` + ![](./documentation/images/pat_token.png) ## Deployment ```bash -rover -lz /tf/caf/landingzones/caf_solution/add-ons/azure_devops_v1 \ +rover -lz /tf/caf/caf_solution/add-ons/azure_devops_v1 \ -tfstate azure_devops-contoso_demo.tfstate \ - -var-folder /tf/caf/landingzones/caf_solution/add-ons/azure_devops_v1/scenario/200-contoso_demo \ + -var-folder /tf/caf/caf_solution/add-ons/azure_devops_v1/scenario/200-contoso_demo \ -parallelism 30 \ -level level0 \ -env sandpit \ @@ -43,10 +91,10 @@ rover -lz /tf/caf/landingzones/caf_solution/add-ons/azure_devops_v1 \ # If the tfstates are stored in a different subscription you need to execute the following command -rover -lz /tf/caf/landingzones/caf_solution/add-ons/azure_devops_v1 \ +rover -lz /tf/caf/caf_solution/add-ons/azure_devops_v1 \ -tfstate_subscription_id \ -tfstate azure_devops-contoso_demo.tfstate \ - -var-folder /tf/caf/landingzones/caf_solution/add-ons/azure_devops_v1/scenario/200-contoso_demo \ + -var-folder /tf/caf/caf_solution/add-ons/azure_devops_v1/scenario/200-contoso_demo \ -parallelism 30 \ -level level0 \ -env sandpit \ diff --git a/caf_solution/add-ons/azure_devops_v1/variables.tf b/caf_solution/add-ons/azure_devops_v1/variables.tf index 245eadd0c..36a172638 100644 --- a/caf_solution/add-ons/azure_devops_v1/variables.tf +++ b/caf_solution/add-ons/azure_devops_v1/variables.tf @@ -75,9 +75,3 @@ variable "azdo_pat_admin" { default = null description = "(Optional). Azure Devops PAT Token. If not provided with this value must be retrieved from the Keyvault secret." } -variable "groups" { - default = {} -} -variable "permissions" { - default = {} -} diff --git a/caf_solution/add-ons/caf_eslz/custom_landing_zones.tf b/caf_solution/add-ons/caf_eslz/custom_landing_zones.tf index f0963f36c..ea5353e54 100644 --- a/caf_solution/add-ons/caf_eslz/custom_landing_zones.tf +++ b/caf_solution/add-ons/caf_eslz/custom_landing_zones.tf @@ -1,7 +1,13 @@ -data "azurerm_management_group" "id" { +data "external" "reconcile_susbscription_ids_from_management_groups" { for_each = var.reconcile_vending_subscriptions ? var.custom_landing_zones : {} - name = each.key + program = [ + "bash", "-c", + format( + "az rest --method GET --url https://management.azure.com/providers/Microsoft.Management/managementGroups/%s/subscriptions?api-version=2020-05-01 --query \"[value][].name | sort(@) | {subscription_ids: join(',', @)}\" -o json 2>/dev/null || echo '{\"subscription_ids\":\"\"}' | jq -r", + each.key + ) + ] } locals { @@ -23,7 +29,7 @@ locals { [ for key, value in mg_value.subscriptions : local.caf.subscriptions[value.lz_key][value.key].subscription_id ], - try(tolist(data.azurerm_management_group.id[mg_id].subscription_ids), []), + try(split(",",data.external.reconcile_susbscription_ids_from_management_groups[mg_id].result.subscription_ids), []), try(mg_value.subscription_ids, []) ) ) diff --git a/caf_solution/add-ons/caf_eslz/enterprise_scale.tf b/caf_solution/add-ons/caf_eslz/enterprise_scale.tf index a85f8699c..d5e4237e6 100644 --- a/caf_solution/add-ons/caf_eslz/enterprise_scale.tf +++ b/caf_solution/add-ons/caf_eslz/enterprise_scale.tf @@ -2,10 +2,16 @@ module "enterprise_scale" { source = "Azure/caf-enterprise-scale/azurerm" - version = "~> 0.3.0" + version = "1.1.1" # source = "../../../../eslz" + providers = { + azurerm = azurerm + azurerm.connectivity = azurerm + azurerm.management = azurerm + } + root_parent_id = data.azurerm_client_config.current.tenant_id default_location = local.global_settings.regions[local.global_settings.default_region] diff --git a/caf_solution/add-ons/caf_eslz/main.tf b/caf_solution/add-ons/caf_eslz/main.tf index 47fea53cb..1ee5a91e2 100644 --- a/caf_solution/add-ons/caf_eslz/main.tf +++ b/caf_solution/add-ons/caf_eslz/main.tf @@ -3,7 +3,7 @@ terraform { required_providers { azurerm = { source = "hashicorp/azurerm" - version = "~> 2.65.0" + version = "~> 2.93.1" } } required_version = ">= 0.14" diff --git a/caf_solution/add-ons/caf_eslz/output.tf b/caf_solution/add-ons/caf_eslz/output.tf new file mode 100644 index 000000000..9c05cc8ff --- /dev/null +++ b/caf_solution/add-ons/caf_eslz/output.tf @@ -0,0 +1,4 @@ +output "objects" { + value = module.enterprise_scale + sensitive = true +} \ No newline at end of file diff --git a/caf_solution/add-ons/cross_tenant_hub_connection/hub_connection.tf b/caf_solution/add-ons/cross_tenant_hub_connection/hub_connection.tf old mode 100755 new mode 100644 index 39df5ef59..c97697803 --- a/caf_solution/add-ons/cross_tenant_hub_connection/hub_connection.tf +++ b/caf_solution/add-ons/cross_tenant_hub_connection/hub_connection.tf @@ -9,7 +9,8 @@ resource "null_resource" "wait_for_virtual_hub_state" { command = format("%s/scripts/wait.sh", path.module) environment = { - VIRTUAL_HUB_ID = data.terraform_remote_state.remote[each.value.virtual_hub.lz_key].outputs.objects[each.value.virtual_hub.lz_key].virtual_hubs[each.value.virtual_hub.key].id + VIRTUAL_HUB_ID = try(data.terraform_remote_state.remote[each.value.virtual_hub.lz_key].outputs.objects[each.value.virtual_hub.lz_key].virtual_hubs[each.value.virtual_hub.key].id, + data.terraform_remote_state.remote[each.value.virtual_hub.lz_key].outputs.objects[each.value.virtual_hub.lz_key].virtual_wans[each.value.virtual_hub.vwan_key].virtual_hubs[each.value.virtual_hub.key].id) } } } @@ -19,7 +20,8 @@ resource "azurerm_virtual_hub_connection" "conn" { depends_on = [null_resource.wait_for_virtual_hub_state] name = each.value.name - virtual_hub_id = data.terraform_remote_state.remote[each.value.virtual_hub.lz_key].outputs.objects[each.value.virtual_hub.lz_key].virtual_hubs[each.value.virtual_hub.key].id + virtual_hub_id = try(data.terraform_remote_state.remote[each.value.virtual_hub.lz_key].outputs.objects[each.value.virtual_hub.lz_key].virtual_hubs[each.value.virtual_hub.key].id, + data.terraform_remote_state.remote[each.value.virtual_hub.lz_key].outputs.objects[each.value.virtual_hub.lz_key].virtual_wans[each.value.virtual_hub.vwan_key].virtual_hubs[each.value.virtual_hub.key].id) remote_virtual_network_id = try( each.value.vnet.id, data.terraform_remote_state.remote[each.value.vnet.lz_key].outputs.objects[each.value.vnet.lz_key].vnets[each.value.vnet.vnet_key].id diff --git a/caf_solution/add-ons/cross_tenant_hub_connection/scripts/wait.sh b/caf_solution/add-ons/cross_tenant_hub_connection/scripts/wait.sh old mode 100755 new mode 100644 diff --git a/caf_solution/add-ons/databricks/databricks.tf b/caf_solution/add-ons/databricks/databricks.tf deleted file mode 100644 index 1c81174b7..000000000 --- a/caf_solution/add-ons/databricks/databricks.tf +++ /dev/null @@ -1,21 +0,0 @@ -locals { - azure_workspace_resource_id = local.remote.databricks_workspaces[var.databricks.lz_key][var.databricks.workspace_key].id -} - -provider "databricks" { - azure_workspace_resource_id = local.azure_workspace_resource_id - # azure_client_id = var.client_id - # azure_client_secret = var.client_secret - # azure_tenant_id = var.tenant_id -} - -module "databricks" { - source = "../../modules/databricks" - - settings = var.databricks -} - -output "databricks" { - value = module.databricks - sensitive = false -} diff --git a/caf_solution/add-ons/databricks/main.tf b/caf_solution/add-ons/databricks/main.tf deleted file mode 100644 index 513475308..000000000 --- a/caf_solution/add-ons/databricks/main.tf +++ /dev/null @@ -1,73 +0,0 @@ -terraform { - required_providers { - azurerm = { - source = "hashicorp/azurerm" - version = "~> 2.43" - } - azurecaf = { - source = "aztfmod/azurecaf" - version = "1.0.0" - } - databricks = { - source = "databrickslabs/databricks" - version = "~> 0.2.5" - } - } - required_version = ">= 0.13" -} - -provider "azurerm" { - features { - key_vault { - purge_soft_delete_on_destroy = true - } - } -} - -data "azurerm_client_config" "current" {} - -data "terraform_remote_state" "landingzone" { - backend = "azurerm" - config = { - storage_account_name = var.tfstate_storage_account_name - container_name = var.tfstate_container_name - key = var.tfstate_key - resource_group_name = var.tfstate_resource_group_name - } -} - -locals { - diagnostics = { - diagnostics_definition = merge(data.terraform_remote_state.landingzone.outputs.diagnostics.diagnostics_definition, var.diagnostics_definition) - diagnostics_destinations = data.terraform_remote_state.landingzone.outputs.diagnostics.diagnostics_destinations - storage_accounts = data.terraform_remote_state.landingzone.outputs.diagnostics.storage_accounts - log_analytics = data.terraform_remote_state.landingzone.outputs.diagnostics.log_analytics - } - - - - # Update the tfstates map - tfstates = merge( - tomap( - { - (var.landingzone.key) = local.backend[var.landingzone.backend_type] - } - ) - , - data.terraform_remote_state.remote[var.landingzone.global_settings_key].outputs.tfstates - ) - - - backend = { - azurerm = { - storage_account_name = var.tfstate_storage_account_name - container_name = var.tfstate_container_name - resource_group_name = var.tfstate_resource_group_name - key = var.tfstate_key - level = var.landingzone.level - tenant_id = var.tenant_id - subscription_id = data.azurerm_client_config.current.subscription_id - } - } - -} diff --git a/caf_solution/add-ons/databricks/variables.tf b/caf_solution/add-ons/databricks/variables.tf deleted file mode 100644 index 54aa3cfd8..000000000 --- a/caf_solution/add-ons/databricks/variables.tf +++ /dev/null @@ -1,157 +0,0 @@ -# Map of the remote data state for lower level -variable "lower_storage_account_name" {} -variable "lower_container_name" {} -variable "lower_resource_group_name" {} - -variable "tfstate_storage_account_name" {} -variable "tfstate_container_name" {} -variable "tfstate_key" {} -variable "tfstate_resource_group_name" {} - -variable "global_settings" { - default = {} -} - -variable "landingzone" { - default = "" -} - -variable "environment" { - default = "sandpit" -} -variable "rover_version" { - default = null -} -variable "max_length" { - default = 40 -} -variable "logged_user_objectId" { - default = null -} -variable "logged_aad_app_objectId" { - default = null -} -variable "tags" { - default = null - type = map(any) -} -variable "diagnostic_log_analytics" { - default = {} -} -variable "app_service_environments" { - default = {} -} -variable "app_service_plans" { - default = {} -} -variable "app_services" { - default = {} -} -variable "diagnostics_definition" { - default = null -} -variable "resource_groups" { - default = null -} -variable "network_security_group_definition" { - default = {} -} -variable "vnets" { - default = {} -} -variable "azurerm_redis_caches" { - default = {} -} -variable "mssql_servers" { - default = {} -} -variable "mssql_databases" { - default = {} -} -variable "mssql_elastic_pools" { - default = {} -} -variable "storage_accounts" { - default = {} -} -variable "azuread_groups" { - default = {} -} -variable "keyvaults" { - default = {} -} -variable "keyvault_access_policies" { - default = {} -} -variable "virtual_machines" { - default = {} -} -variable "azure_container_registries" { - default = {} -} -variable "bastion_hosts" { - default = {} -} -variable "public_ip_addresses" { - default = {} -} -variable "diagnostic_storage_accounts" { - default = {} -} -variable "managed_identities" { - default = {} -} -variable "private_dns" { - default = {} -} -variable "synapse_workspaces" { - default = {} -} -variable "azurerm_application_insights" { - default = {} -} -variable "role_mapping" { - default = {} -} -variable "aks_clusters" { - default = {} -} -variable "databricks_workspaces" { - default = {} -} -variable "machine_learning_workspaces" { - default = {} -} -variable "monitoring" { - default = {} -} -variable "virtual_wans" { - default = {} -} -variable "event_hub_namespaces" { - default = {} -} -variable "application_gateways" { - default = {} -} -variable "application_gateway_applications" { - default = {} -} -variable "application_gateway_waf_policies" { - default = {} -} -variable "dynamic_keyvault_secrets" { - default = {} -} -variable "disk_encryption_sets" { - default = {} -} -variable "keyvault_keys" { - default = {} -} -variable "databricks" { - default = {} -} -variable "var_folder_path" { - default = {} -} \ No newline at end of file diff --git a/caf_solution/add-ons/databricks_v1/backend.azurerm b/caf_solution/add-ons/databricks_v1/backend.azurerm new file mode 100644 index 000000000..5d026b233 --- /dev/null +++ b/caf_solution/add-ons/databricks_v1/backend.azurerm @@ -0,0 +1,4 @@ +terraform { + backend "azurerm" { + } +} \ No newline at end of file diff --git a/caf_solution/add-ons/databricks_v1/cluster.tf b/caf_solution/add-ons/databricks_v1/cluster.tf new file mode 100644 index 000000000..0582a4180 --- /dev/null +++ b/caf_solution/add-ons/databricks_v1/cluster.tf @@ -0,0 +1,111 @@ +resource "databricks_cluster" "cluster" { + for_each = var.databricks_clusters + + # + # Required + # + + spark_version = data.databricks_spark_version.runtime[each.key].id + + # + # Required - Optional if .... + # + + # Required - optional if instance_pool_id is given + node_type_id = can(each.value.instance_pool) ? null : data.databricks_node_type.node_type[each.key].id + + # + # Optional + # + + autotermination_minutes = try(each.value.autotermination_minutes, null) + cluster_name = try(each.value.name, null) + custom_tags = try(each.value.custom_tags, null) + driver_node_type_id = can(each.value.driver_node_type) ? data.databricks_node_type.driver_node_type[each.key].id : data.databricks_node_type.node_type[each.key].id + enable_local_disk_encryption = try(each.value.enable_local_disk_encryption, null) + idempotency_token = try(each.value.idempotency_token, null) + is_pinned = try(each.value.is_pinned, false) + single_user_name = try(each.value.single_user_name, null) + spark_conf = try(each.value.spark_conf, null) + spark_env_vars = try(each.value.spark_env_vars, null) + ssh_public_keys = try(each.value.ssh_public_keys, null) + + dynamic "autoscale" { + for_each = try(each.value.autoscale, null) == null ? [] : [1] + + content { + min_workers = try(each.value.autoscale.min_workers, null) + max_workers = try(each.value.autoscale.max_workers, null) + } + } + + # Add library block - Doc not super clear - https://registry.terraform.io/providers/databrickslabs/databricks/latest/docs/resources/cluster#library-configuration-block + + dynamic "cluster_log_conf" { + for_each = try(each.value.cluster_log_conf, {}) + + content { + dynamic "dbfs" { + for_each = each.value.cluster_log_conf.dbfs + + content { + destination = dbfs.value + } + } + } + } + +} + +data "databricks_node_type" "driver_node_type" { + for_each = { + for key, value in var.databricks_clusters : key => value + if can(value.driver_node_type) + } + + category = try(each.value.driver_node_type.min_memory_gb, "General Purpose (HDD)") + gb_per_core = try(each.value.driver_node_type.min_memory_gb, 0) + is_io_cache_enabled = try(each.value.driver_node_type.is_io_cache_enabled, false) + local_disk = try(each.value.driver_node_type.local_disk, false) + min_cores = try(each.value.driver_node_type.min_cores, 0) + min_gpus = try(each.value.driver_node_type.min_gpus, 0) + min_memory_gb = try(each.value.driver_node_type.min_memory_gb, 0) + photon_driver_capable = try(each.value.driver_node_type.photon_driver_capable, false) + photon_worker_capable = try(each.value.driver_node_type.photon_worker_capable, false) + support_port_forwarding = try(each.value.driver_node_type.support_port_forwarding, false) + +} + +data "databricks_node_type" "node_type" { + for_each = { + for key, value in var.databricks_clusters : key => value + if can(value.node_type) + } + + category = try(each.value.node_type.min_memory_gb, "General Purpose (HDD)") + gb_per_core = try(each.value.node_type.min_memory_gb, 0) + is_io_cache_enabled = try(each.value.node_type.is_io_cache_enabled, false) + local_disk = try(each.value.node_type.local_disk, false) + min_cores = try(each.value.node_type.min_cores, 0) + min_gpus = try(each.value.node_type.min_gpus, 0) + min_memory_gb = try(each.value.node_type.min_memory_gb, 0) + photon_driver_capable = try(each.value.node_type.photon_driver_capable, false) + photon_worker_capable = try(each.value.node_type.photon_worker_capable, false) + support_port_forwarding = try(each.value.node_type.support_port_forwarding, false) + +} + +data "databricks_spark_version" "runtime" { + for_each = var.databricks_clusters + + beta = try(each.value.spark_version.beta, false) + genomics = try(each.value.spark_version.genomics, false) + gpu = try(each.value.spark_version.gpu, false) + latest = try(each.value.spark_version.latest, true) + long_term_support = try(each.value.spark_version.long_term_support, false) + ml = try(each.value.spark_version.ml, false) + photon = try(each.value.spark_version.photon, false) + scala = try(each.value.spark_version.scala, "2.12") + spark_version = try(each.value.spark_version.spark_version, "3.0.1") + +} \ No newline at end of file diff --git a/caf_solution/modules/databricks/instance_pool.tf b/caf_solution/add-ons/databricks_v1/instance_pool.tf similarity index 100% rename from caf_solution/modules/databricks/instance_pool.tf rename to caf_solution/add-ons/databricks_v1/instance_pool.tf diff --git a/caf_solution/add-ons/databricks_v1/locals.remote_tfstates.tf b/caf_solution/add-ons/databricks_v1/locals.remote_tfstates.tf new file mode 100644 index 000000000..aff79011b --- /dev/null +++ b/caf_solution/add-ons/databricks_v1/locals.remote_tfstates.tf @@ -0,0 +1,46 @@ +locals { + landingzone = { + current = { + storage_account_name = var.tfstate_storage_account_name + container_name = var.tfstate_container_name + resource_group_name = var.tfstate_resource_group_name + } + lower = { + storage_account_name = var.lower_storage_account_name + container_name = var.lower_container_name + resource_group_name = var.lower_resource_group_name + } + } +} + +data "terraform_remote_state" "remote" { + for_each = try(var.landingzone.tfstates, {}) + + backend = var.landingzone.backend_type + config = local.remote_state[try(each.value.backend_type, var.landingzone.backend_type, "azurerm")][each.key] +} + +locals { + + remote_state = { + azurerm = { + for key, value in try(var.landingzone.tfstates, {}) : key => { + container_name = try(value.workspace, local.landingzone[try(value.level, "current")].container_name) + key = value.tfstate + resource_group_name = try(value.resource_group_name, local.landingzone[try(value.level, "current")].resource_group_name) + storage_account_name = try(value.storage_account_name, local.landingzone[try(value.level, "current")].storage_account_name) + subscription_id = try(value.subscription_id, data.azurerm_client_config.current.subscription_id) + tenant_id = try(value.tenant_id, data.azurerm_client_config.current.tenant_id) + } + } + } + + global_settings = data.terraform_remote_state.remote[var.landingzone.global_settings_key].outputs.objects[var.landingzone.global_settings_key].global_settings + + remote = { + databricks_workspaces = { + for key, value in try(var.landingzone.tfstates, {}) : key => merge(try(data.terraform_remote_state.remote[key].outputs.objects[key].databricks_workspaces, {})) + } + } + +} diff --git a/caf_solution/add-ons/databricks_v1/main.tf b/caf_solution/add-ons/databricks_v1/main.tf new file mode 100644 index 000000000..1f332cb48 --- /dev/null +++ b/caf_solution/add-ons/databricks_v1/main.tf @@ -0,0 +1,31 @@ +terraform { + required_providers { + azurerm = { + source = "hashicorp/azurerm" + version = "~> 2.82.0" + } + azurecaf = { + source = "aztfmod/azurecaf" + version = "~> 1.2.0" + } + databricks = { + source = "databrickslabs/databricks" + version = "~> 0.3.9" + } + } + required_version = ">= 0.13" +} + +provider "azurerm" { + features {} +} + +data "azurerm_client_config" "current" {} + +locals { + azure_workspace_resource = local.remote.databricks_workspaces[var.databricks_workspace.lz_key][var.databricks_workspace.workspace_key] +} + +provider "databricks" { + host = local.azure_workspace_resource.workspace_url +} diff --git a/caf_solution/add-ons/databricks_v1/output.tf b/caf_solution/add-ons/databricks_v1/output.tf new file mode 100644 index 000000000..f42fe1158 --- /dev/null +++ b/caf_solution/add-ons/databricks_v1/output.tf @@ -0,0 +1,3 @@ +output "cluster" { + value = databricks_cluster.cluster +} \ No newline at end of file diff --git a/caf_solution/add-ons/databricks_v1/variables.tf b/caf_solution/add-ons/databricks_v1/variables.tf new file mode 100644 index 000000000..0842d9ef0 --- /dev/null +++ b/caf_solution/add-ons/databricks_v1/variables.tf @@ -0,0 +1,24 @@ +# Map of the remote data state for lower level +variable "lower_storage_account_name" {} +variable "lower_container_name" {} +variable "lower_resource_group_name" {} + +variable "tfstate_storage_account_name" {} +variable "tfstate_container_name" {} +variable "tfstate_key" {} +variable "tfstate_resource_group_name" {} + +variable "global_settings" { + default = {} +} + +variable "landingzone" { + default = "" +} +variable "databricks_clusters" { + description = "This resource allows you to create, update, and delete clusters." + default = {} +} +variable "databricks_workspace" { + description = "Azure Databricks workspace where the resources will be created" +} \ No newline at end of file diff --git a/caf_solution/add-ons/hashicorp_vault_secrets/README.md b/caf_solution/add-ons/hashicorp_vault_secrets/README.md index d0b2e8ba7..50547d8a4 100644 --- a/caf_solution/add-ons/hashicorp_vault_secrets/README.md +++ b/caf_solution/add-ons/hashicorp_vault_secrets/README.md @@ -30,8 +30,8 @@ export environment=[YOUR_ENVIRONMENT] ```bash rover \ - -lz /tf/caf/landingzones/caf_solution/add-ons/hashicorp_vault_secrets \ - -var-folder /tf/caf/landingzones/caf_solution/add-ons/hashicorp_vault_secrets/scenario/100-simple-hashicorp-vault-secrets \ + -lz /tf/caf/caf_solution/add-ons/hashicorp_vault_secrets \ + -var-folder /tf/caf/caf_solution/add-ons/hashicorp_vault_secrets/scenario/100-simple-hashicorp-vault-secrets \ -tfstate vault.tfstate \ -env ${environment}} \ -level level1 \ diff --git a/caf_solution/add-ons/helm-charts/charts.tf b/caf_solution/add-ons/helm-charts/charts.tf index be33a7906..b592c08bb 100644 --- a/caf_solution/add-ons/helm-charts/charts.tf +++ b/caf_solution/add-ons/helm-charts/charts.tf @@ -19,4 +19,20 @@ resource "helm_release" "chart" { timeout = try(each.value.timeout, 4000) values = [file(each.value.value_file)] wait = try(each.value.wait, true) + + dynamic "set" { + for_each = try(each.value.sets, {}) + content { + name = set.key + value = set.value + } + } + + dynamic "set_sensitive" { + for_each = try(each.value.sets_sensitive, {}) + content { + name = set_sensitive.key + value = set_sensitive.value + } + } } \ No newline at end of file diff --git a/caf_solution/add-ons/secrets-store-csi-driver-provider-azure/README.md b/caf_solution/add-ons/secrets-store-csi-driver-provider-azure/README.md new file mode 100644 index 000000000..b9ecc5706 --- /dev/null +++ b/caf_solution/add-ons/secrets-store-csi-driver-provider-azure/README.md @@ -0,0 +1,36 @@ +# Overview + +This module provides a method to configure and apply a `SecretProviderClass` object, which is +required to use the `secrets-store-csi-driver-provider-azure` AKS addon. + +Note, similar to the `aad-pod-identity` module (`caf_solution/add-ons/aad-pod-identity`), this +module does not install the addon itself. This can be accomplished in a number of ways, including +manually, via Flux, or using helm via `caf_solution/add-ons/aks_applications`. + +# Prerequisites + +* An AKS cluster +* The `aad-pod-identity` addon +* The `secrets-store-csi-driver-provider-azure` addon + +# Usage + +``` +aks_cluster_key = "cluster_re1" + +aks_clusters = { + cluster_re1 = { + lz_key = "aks" + key = "cluster_re1" + } +} + +csi_keyvault_provider = { + namespace = "kube-system" + create = false + secretproviderclass_name = "azure-tls" + secret_name = "azure-tls" + cert_name = "wildcard-ingress" + keyvault_name = "kv-app-gateway-certs" +} +``` diff --git a/caf_solution/add-ons/secrets-store-csi-driver-provider-azure/backend.azurerm b/caf_solution/add-ons/secrets-store-csi-driver-provider-azure/backend.azurerm new file mode 100644 index 000000000..5d026b233 --- /dev/null +++ b/caf_solution/add-ons/secrets-store-csi-driver-provider-azure/backend.azurerm @@ -0,0 +1,4 @@ +terraform { + backend "azurerm" { + } +} \ No newline at end of file diff --git a/caf_solution/add-ons/secrets-store-csi-driver-provider-azure/build/kustomization_build.tf b/caf_solution/add-ons/secrets-store-csi-driver-provider-azure/build/kustomization_build.tf new file mode 100644 index 000000000..985bd96f5 --- /dev/null +++ b/caf_solution/add-ons/secrets-store-csi-driver-provider-azure/build/kustomization_build.tf @@ -0,0 +1,16 @@ +resource "kustomization_resource" "p0" { + for_each = var.settings.ids_prio[0] + manifest = var.settings.manifests[each.value] +} + +resource "kustomization_resource" "p1" { + depends_on = [kustomization_resource.p0] + for_each = var.settings.ids_prio[1] + manifest = var.settings.manifests[each.value] +} + +resource "kustomization_resource" "p2" { + depends_on = [kustomization_resource.p1] + for_each = var.settings.ids_prio[2] + manifest = var.settings.manifests[each.value] +} diff --git a/caf_solution/add-ons/secrets-store-csi-driver-provider-azure/build/main.tf b/caf_solution/add-ons/secrets-store-csi-driver-provider-azure/build/main.tf new file mode 100644 index 000000000..e65c6fa22 --- /dev/null +++ b/caf_solution/add-ons/secrets-store-csi-driver-provider-azure/build/main.tf @@ -0,0 +1,7 @@ +terraform { + required_providers { + kustomization = { + source = "kbst/kustomization" + } + } +} \ No newline at end of file diff --git a/caf_solution/add-ons/secrets-store-csi-driver-provider-azure/build/variables.tf b/caf_solution/add-ons/secrets-store-csi-driver-provider-azure/build/variables.tf new file mode 100644 index 000000000..f5c321890 --- /dev/null +++ b/caf_solution/add-ons/secrets-store-csi-driver-provider-azure/build/variables.tf @@ -0,0 +1,2 @@ +variable "settings" { +} diff --git a/caf_solution/add-ons/secrets-store-csi-driver-provider-azure/csi_keyvault_provider.tf b/caf_solution/add-ons/secrets-store-csi-driver-provider-azure/csi_keyvault_provider.tf new file mode 100644 index 000000000..6d7226af8 --- /dev/null +++ b/caf_solution/add-ons/secrets-store-csi-driver-provider-azure/csi_keyvault_provider.tf @@ -0,0 +1,109 @@ +resource "kubernetes_namespace" "ns" { + count = var.csi_keyvault_provider.namespace != {} && try(var.csi_keyvault_provider.create, true) ? 1 : 0 + + metadata { + name = var.csi_keyvault_provider.namespace + } +} + +module "build" { + depends_on = [kubernetes_namespace.ns] + source = "./build" + settings = data.kustomization_overlay.csi_keyvault_provider +} + +data "kustomization_overlay" "csi_keyvault_provider" { + resources = [ + "secretproviderclass.yaml", + ] + + namespace = var.csi_keyvault_provider.namespace + + patches { + patch = <<-EOF + - op: replace + path: /metadata/name + value: ${var.csi_keyvault_provider.secretproviderclass_name} + EOF + + target = { + kind = "SecretProviderClass" + } + } + + patches { + patch = <<-EOF + - op: replace + path: /spec/secretObjects/0/secretName + value: ${var.csi_keyvault_provider.secret_name} + EOF + + target = { + kind = "SecretProviderClass" + } + } + + patches { + patch = <<-EOF + - op: replace + path: /spec/secretObjects/0/data/0/objectName + value: ${var.csi_keyvault_provider.cert_name} + EOF + + target = { + kind = "SecretProviderClass" + } + } + + patches { + patch = <<-EOF + - op: replace + path: /spec/secretObjects/0/data/1/objectName + value: ${var.csi_keyvault_provider.cert_name} + EOF + + target = { + kind = "SecretProviderClass" + } + } + + patches { + patch = <<-EOF + - op: replace + path: /spec/parameters/keyvaultName + value: ${var.csi_keyvault_provider.keyvault_name} + EOF + + target = { + kind = "SecretProviderClass" + } + } + + patches { + patch = <<-EOF + - op: replace + path: /spec/parameters/objects + value: | + array: + - | + objectName: ${var.csi_keyvault_provider.cert_name} + objectType: secret + EOF + + target = { + kind = "SecretProviderClass" + } + } + + patches { + patch = <<-EOF + - op: replace + path: /spec/parameters/tenantId + value: ${try(var.csi_keyvault_provider.keyvault_tenant_id, data.azurerm_client_config.current.tenant_id)} + EOF + + target = { + kind = "SecretProviderClass" + } + } +} diff --git a/caf_solution/add-ons/databricks/locals.remote_tfstates.tf b/caf_solution/add-ons/secrets-store-csi-driver-provider-azure/local.remote_tfstates.tf similarity index 60% rename from caf_solution/add-ons/databricks/locals.remote_tfstates.tf rename to caf_solution/add-ons/secrets-store-csi-driver-provider-azure/local.remote_tfstates.tf index 2a5367f32..6ac4adb34 100644 --- a/caf_solution/add-ons/databricks/locals.remote_tfstates.tf +++ b/caf_solution/add-ons/secrets-store-csi-driver-provider-azure/local.remote_tfstates.tf @@ -19,8 +19,9 @@ data "terraform_remote_state" "remote" { backend = var.landingzone.backend_type config = { storage_account_name = local.landingzone[try(each.value.level, "current")].storage_account_name - container_name = local.landingzone[try(each.value.level, "current")].container_name + container_name = try(each.value.container, local.landingzone[try(each.value.level, "current")].container_name) resource_group_name = local.landingzone[try(each.value.level, "current")].resource_group_name + subscription_id = var.tfstate_subscription_id key = each.value.tfstate } } @@ -30,14 +31,16 @@ locals { "landingzone" = var.landingzone.key } - tags = merge(local.global_settings.tags, local.landingzone_tag, { "level" = var.landingzone.level }, { "environment" = local.global_settings.environment }, { "rover_version" = var.rover_version }, var.tags) - global_settings = data.terraform_remote_state.remote[var.landingzone.global_settings_key].outputs.objects[var.landingzone.global_settings_key].global_settings + diagnostics = data.terraform_remote_state.remote[var.landingzone.global_settings_key].outputs.objects[var.landingzone.global_settings_key].diagnostics remote = { - databricks_workspaces = { - for key, value in try(var.landingzone.tfstates, {}) : key => merge(try(data.terraform_remote_state.remote[key].outputs.databricks_workspaces[key], {})) + tags = merge(local.global_settings.tags, local.landingzone_tag, { "level" = var.landingzone.level }, { "environment" = local.global_settings.environment }, { "rover_version" = var.rover_version }, var.tags) + global_settings = local.global_settings + diagnostics = local.diagnostics + + aks_clusters = { + for key, value in try(var.landingzone.tfstates, {}) : key => merge(try(data.terraform_remote_state.remote[key].outputs.objects[key].aks_clusters, {})) } } - } diff --git a/caf_solution/add-ons/secrets-store-csi-driver-provider-azure/main.tf b/caf_solution/add-ons/secrets-store-csi-driver-provider-azure/main.tf new file mode 100644 index 000000000..9c8722371 --- /dev/null +++ b/caf_solution/add-ons/secrets-store-csi-driver-provider-azure/main.tf @@ -0,0 +1,19 @@ +terraform { + required_providers { + azurerm = { + source = "hashicorp/azurerm" + version = "~> 2.55.0" + } + kubernetes = { + source = "hashicorp/kubernetes" + version = "~> 2.0.2" + } + kustomization = { + source = "kbst/kustomization" + version = "~> 0.5.0" + } + } + required_version = ">= 0.13" +} + +data "azurerm_client_config" "current" {} diff --git a/caf_solution/add-ons/secrets-store-csi-driver-provider-azure/output.tf b/caf_solution/add-ons/secrets-store-csi-driver-provider-azure/output.tf new file mode 100644 index 000000000..e5d6da98a --- /dev/null +++ b/caf_solution/add-ons/secrets-store-csi-driver-provider-azure/output.tf @@ -0,0 +1,3 @@ +output "manifests" { + value = data.kustomization_overlay.csi_keyvault_provider +} diff --git a/caf_solution/add-ons/secrets-store-csi-driver-provider-azure/providers.tf b/caf_solution/add-ons/secrets-store-csi-driver-provider-azure/providers.tf new file mode 100644 index 000000000..06e22a736 --- /dev/null +++ b/caf_solution/add-ons/secrets-store-csi-driver-provider-azure/providers.tf @@ -0,0 +1,40 @@ + +provider "azurerm" { + features { + } +} + +provider "kubernetes" { + host = local.k8sconfigs[var.aks_cluster_key].host + username = local.k8sconfigs[var.aks_cluster_key].username + password = local.k8sconfigs[var.aks_cluster_key].password + client_certificate = local.k8sconfigs[var.aks_cluster_key].client_certificate + client_key = local.k8sconfigs[var.aks_cluster_key].client_key + cluster_ca_certificate = local.k8sconfigs[var.aks_cluster_key].cluster_ca_certificate +} + +provider "kustomization" { + kubeconfig_raw = local.k8sconfigs[var.aks_cluster_key].kube_admin_config_raw +} + +locals { + k8sconfigs = { + for key, value in var.aks_clusters : key => { + kube_admin_config_raw = data.azurerm_kubernetes_cluster.kubeconfig[key].kube_admin_config_raw + host = local.remote.aks_clusters[value.lz_key][value.key].enable_rbac ? data.azurerm_kubernetes_cluster.kubeconfig[key].kube_admin_config.0.host : data.azurerm_kubernetes_cluster.kubeconfig[key].kube_config.0.host + username = local.remote.aks_clusters[value.lz_key][value.key].enable_rbac ? data.azurerm_kubernetes_cluster.kubeconfig[key].kube_admin_config.0.username : data.azurerm_kubernetes_cluster.kubeconfig[key].kube_config.0.username + password = local.remote.aks_clusters[value.lz_key][value.key].enable_rbac ? data.azurerm_kubernetes_cluster.kubeconfig[key].kube_admin_config.0.password : data.azurerm_kubernetes_cluster.kubeconfig[key].kube_config.0.password + client_certificate = local.remote.aks_clusters[value.lz_key][value.key].enable_rbac ? base64decode(data.azurerm_kubernetes_cluster.kubeconfig[key].kube_admin_config.0.client_certificate) : base64decode(data.azurerm_kubernetes_cluster.kubeconfig[key].kube_config.0.client_certificate) + client_key = local.remote.aks_clusters[value.lz_key][value.key].enable_rbac ? base64decode(data.azurerm_kubernetes_cluster.kubeconfig[key].kube_admin_config.0.client_key) : base64decode(data.azurerm_kubernetes_cluster.kubeconfig[key].kube_config.0.client_key) + cluster_ca_certificate = local.remote.aks_clusters[value.lz_key][value.key].enable_rbac ? base64decode(data.azurerm_kubernetes_cluster.kubeconfig[key].kube_admin_config.0.cluster_ca_certificate) : base64decode(data.azurerm_kubernetes_cluster.kubeconfig[key].kube_config.0.cluster_ca_certificate) + } + } +} + +# Get kubeconfig from AKS clusters +data "azurerm_kubernetes_cluster" "kubeconfig" { + for_each = var.aks_clusters + + name = local.remote.aks_clusters[each.value.lz_key][each.value.key].cluster_name + resource_group_name = local.remote.aks_clusters[each.value.lz_key][each.value.key].resource_group_name +} \ No newline at end of file diff --git a/caf_solution/add-ons/secrets-store-csi-driver-provider-azure/secretproviderclass.yaml b/caf_solution/add-ons/secrets-store-csi-driver-provider-azure/secretproviderclass.yaml new file mode 100644 index 000000000..b9907644b --- /dev/null +++ b/caf_solution/add-ons/secrets-store-csi-driver-provider-azure/secretproviderclass.yaml @@ -0,0 +1,25 @@ +# https://azure.github.io/secrets-store-csi-driver-provider-azure/getting-started/usage/#create-your-own-secretproviderclass-object + +apiVersion: secrets-store.csi.x-k8s.io/v1 +kind: SecretProviderClass +metadata: + name: ${secretproviderclass_name} +spec: + provider: azure + secretObjects: # secretObjects defines the desired state of synced K8s secret objects + - secretName: ${secret_name} # secretName is what gets provided to the target resource (e.g. ingress controller) + type: kubernetes.io/tls + data: + - objectName: ${cert_name} + key: tls.key + - objectName: ${cert_name} + key: tls.crt + parameters: + usePodIdentity: "true" + keyvaultName: ${keyvault_name} + objects: | + array: + - | + objectName: ${cert_name} + objectType: secret + tenantId: ${keyvault_tenant_id} diff --git a/caf_solution/add-ons/secrets-store-csi-driver-provider-azure/variables.tf b/caf_solution/add-ons/secrets-store-csi-driver-provider-azure/variables.tf new file mode 100644 index 000000000..e2164663d --- /dev/null +++ b/caf_solution/add-ons/secrets-store-csi-driver-provider-azure/variables.tf @@ -0,0 +1,26 @@ +# Map of the remote data state for lower level +variable "lower_storage_account_name" {} +variable "lower_container_name" {} +variable "lower_resource_group_name" {} + +variable "tfstate_subscription_id" { + description = "This value is populated by the rover. subscription id hosting the remote tfstates" +} +variable "tfstate_storage_account_name" {} +variable "tfstate_container_name" {} +variable "tfstate_key" {} +variable "tfstate_resource_group_name" {} + +variable "landingzone" {} +variable "rover_version" { + default = null +} +variable "tags" { + default = {} +} + +###### + +variable "aks_cluster_key" {} +variable "aks_clusters" {} +variable "csi_keyvault_provider" {} diff --git a/caf_solution/add-ons/terraform_cloud/readme.md b/caf_solution/add-ons/terraform_cloud/readme.md index 737a49a1f..0358a5d48 100644 --- a/caf_solution/add-ons/terraform_cloud/readme.md +++ b/caf_solution/add-ons/terraform_cloud/readme.md @@ -25,20 +25,20 @@ This will setup TFC organization, workspaces and variables to host landing zones ```bash # Deploy -rover -lz /tf/caf/landingzones/caf_solution/add-ons/terraform_cloud/ \ --var-folder /tf/caf/landingzones/caf_solution/add-ons/terraform_cloud/example/ \ +rover -lz /tf/caf/caf_solution/add-ons/terraform_cloud/ \ +-var-folder /tf/caf/caf_solution/add-ons/terraform_cloud/example/ \ -a plan -launchpad or -cd /tf/caf/landingzones/caf_solution/add-ons/terraform_cloud/ +cd /tf/caf/caf_solution/add-ons/terraform_cloud/ terraform init terraform plan \ --var-file /tf/caf/landingzones/caf_solution/add-ons/terraform_cloud/example/tfc.tfvars +-var-file /tf/caf/caf_solution/add-ons/terraform_cloud/example/tfc.tfvars ``` Once ready, you can create your configuration: ```bash terraform apply \ --var-file /tf/caf/landingzones/caf_solution/add-ons/terraform_cloud/example/tfc.tfvars +-var-file /tf/caf/caf_solution/add-ons/terraform_cloud/example/tfc.tfvars ``` \ No newline at end of file diff --git a/caf_solution/dynamic_secrets.tf b/caf_solution/dynamic_secrets.tf index cd1d61f17..e5185523b 100644 --- a/caf_solution/dynamic_secrets.tf +++ b/caf_solution/dynamic_secrets.tf @@ -1,8 +1,8 @@ module "dynamic_keyvault_secrets" { source = "aztfmod/caf/azurerm//modules/security/dynamic_keyvault_secrets" - version = "~>5.4.2" + version = "5.5.1" - # source = "git::https://github.com/aztfmod/terraform-azurerm-caf.git//modules/security/dynamic_keyvault_secrets?ref=master" + #source = "git::https://github.com/aztfmod/terraform-azurerm-caf.git//modules/security/dynamic_keyvault_secrets?ref=master" for_each = { for keyvault_key, secrets in try(var.dynamic_keyvault_secrets, {}) : keyvault_key => { diff --git a/caf_solution/landingzone.tf b/caf_solution/landingzone.tf index 7530cfa5b..9899b41ae 100644 --- a/caf_solution/landingzone.tf +++ b/caf_solution/landingzone.tf @@ -1,13 +1,18 @@ module "solution" { source = "aztfmod/caf/azurerm" - version = "~>5.4.2" + version = "5.5.1" - # source = "git::https://github.com/aztfmod/terraform-azurerm-caf.git?ref=patch.5.4.6" + # source = "git::https://github.com/aztfmod/terraform-azurerm-caf.git?ref=master" # source = "../../aztfmod" + providers = { + azurerm.vhub = azurerm.vhub + } + azuread = local.azuread cloud = local.cloud compute = local.compute + apim = local.apim cognitive_services = local.cognitive_services current_landingzone_key = var.landingzone.key custom_role_definitions = var.custom_role_definitions @@ -22,6 +27,7 @@ module "solution" { event_hub_namespaces = var.event_hub_namespaces event_hubs = var.event_hubs global_settings = local.global_settings + identity = local.identity keyvault_access_policies = var.keyvault_access_policies keyvault_access_policies_azuread_apps = var.keyvault_access_policies_azuread_apps keyvault_certificate_issuers = var.keyvault_certificate_issuers @@ -31,6 +37,7 @@ module "solution" { logged_user_objectId = var.logged_user_objectId logic_app = var.logic_app managed_identities = var.managed_identities + messaging = local.messaging networking = local.networking random_strings = var.random_strings remote_objects = local.remote diff --git a/caf_solution/local.apim.tf b/caf_solution/local.apim.tf new file mode 100644 index 000000000..07c5ae156 --- /dev/null +++ b/caf_solution/local.apim.tf @@ -0,0 +1,20 @@ +locals { + apim = merge( + var.apim, + { + api_management = var.api_management + api_management_api = var.api_management_api + api_management_api_diagnostic = var.api_management_api_diagnostic + api_management_logger = var.api_management_logger + api_management_api_operation = var.api_management_api_operation + api_management_backend = var.api_management_backend + api_management_api_policy = var.api_management_api_policy + api_management_api_operation_tag = var.api_management_api_operation_tag + api_management_api_operation_policy = var.api_management_api_operation_policy + api_management_user = var.api_management_user + api_management_custom_domain = var.api_management_custom_domain + api_management_diagnostic = var.api_management_diagnostic + api_management_certificate = var.api_management_certificate + } + ) +} diff --git a/caf_solution/local.compute.tf b/caf_solution/local.compute.tf index 15e7034d6..56a4f65fc 100644 --- a/caf_solution/local.compute.tf +++ b/caf_solution/local.compute.tf @@ -9,14 +9,15 @@ locals { container_groups = var.container_groups dedicated_host_groups = var.dedicated_host_groups dedicated_hosts = var.dedicated_hosts + machine_learning_compute_instance = var.machine_learning_compute_instance proximity_placement_groups = var.proximity_placement_groups - virtual_machines = var.virtual_machines virtual_machine_scale_sets = var.virtual_machine_scale_sets - vmware_private_clouds = var.vmware_private_clouds + virtual_machines = var.virtual_machines vmware_clusters = var.vmware_clusters vmware_express_route_authorizations = var.vmware_express_route_authorizations - wvd_applications = var.wvd_applications + vmware_private_clouds = var.vmware_private_clouds wvd_application_groups = var.wvd_application_groups + wvd_applications = var.wvd_applications wvd_host_pools = var.wvd_host_pools wvd_workspaces = var.wvd_workspaces } diff --git a/caf_solution/local.custom_variables.tf b/caf_solution/local.custom_variables.tf new file mode 100644 index 000000000..b7f2932d8 --- /dev/null +++ b/caf_solution/local.custom_variables.tf @@ -0,0 +1,94 @@ +locals { + connectivity_subscription_id = can(local.tfstates[local.custom_variables.virtual_hub_lz_key].subscription_id) ? local.tfstates[local.custom_variables.virtual_hub_lz_key].subscription_id : data.azurerm_client_config.current.subscription_id + connectivity_tenant_id = can(local.tfstates[local.custom_variables.virtual_hub_lz_key].tenant_id) ? local.tfstates[local.custom_variables.virtual_hub_lz_key].tenant_id : data.azurerm_client_config.current.tenant_id + + remote_custom_variables = { + for key, value in try(var.landingzone.tfstates, {}) : "deep_merged_l1" => merge(try(data.terraform_remote_state.remote[key].outputs.custom_variables, {}))... + } + # + # Produces the following structure + # + remote_custom_variables = { + # + deep_merged_l1 = [ + # + {}, + # + { + # + another_var = "other" + # + virtual_hub_lz_key = "vhub_prod" + # }, + # + { + # + ddos_protection_plan = "/subscription/ddos_plan.id" + # + virtual_hub_lz_key = "vhub_prod" + # }, + # ] + # } + + deep_merged_l1 = { + for mapping in + flatten( + [ + for key, value in try(local.remote_custom_variables.deep_merged_l1, {}) : + [ + for lkey in keys(value) : { + value = lookup(value, lkey) + key = lkey + } + ] + ] + ) : mapping.key => mapping.value... + } + # + # Produces the folowing structure + # + deep_merged_l1 = { + # + another_var = [ + # + "other", + # ] + # + ddos_protection_plan = [ + # + "/subscription/ddos_plan.id", + # ] + # + virtual_hub_lz_key = [ + # + "vhub_prod", + # + "vhub_prod", + # ] + # } + + deep_merged_l2 = { + for mapping in + flatten( + [ + for key, value in try(local.deep_merged_l1, {}) : + { + key = key + value = value[0] + } + ] + ) : mapping.key => mapping.value + } + # + # Produces the following structure + # + custom_variables = { + # + another_var = "other" + # + ddos_protection_plan = "/subscription/ddos_plan.id" + # + virtual_hub_lz_key = "vhub_prod" + # } + + custom_variables = merge( + try(local.deep_merged_l2, {}), + var.custom_variables + ) + +} + +output "custom_variables" { + value = local.custom_variables +} + +output "connectivity_subscription_id" { + value = local.connectivity_subscription_id +} + +output "connectivity_tenant_id" { + value = local.connectivity_tenant_id +} + +output "virtual_hub_lz_key" { + value = try(local.custom_variables.virtual_hub_lz_key, null) +} \ No newline at end of file diff --git a/caf_solution/local.data_factory.tf b/caf_solution/local.data_factory.tf index 14861c1c7..bf51069be 100644 --- a/caf_solution/local.data_factory.tf +++ b/caf_solution/local.data_factory.tf @@ -1,22 +1,28 @@ locals { - data_factory = merge( - var.data_factory, - { - data_factory_pipeline = var.data_factory_pipeline - data_factory_trigger_schedule = var.data_factory_trigger_schedule - datasets = { - azure_blob = try(var.datasets.azure_blob, {}) - cosmosdb_sqlapi = try(var.datasets.cosmosdb_sqlapi, {}) - delimited_text = try(var.datasets.delimited_text, {}) - http = try(var.datasets.http, {}) - json = try(var.datasets.json, {}) - mysql = try(var.datasets.mysql, {}) - postgresql = try(var.datasets.postgresql, {}) - sql_server_table = try(var.datasets.sql_server_table, {}) - } - linked_services = { - azure_blob_storage = try(var.linked_services.azure_blob_storage, {}) - } + data_factory = { + data_factory = var.data_factory + data_factory_pipeline = var.data_factory_pipeline + data_factory_trigger_schedule = var.data_factory_trigger_schedule + data_factory_integration_runtime_self_hosted = var.data_factory_integration_runtime_self_hosted + datasets = { + azure_blob = merge(try(var.datasets.azure_blob, {}), try(var.data_factory_datasets.azure_blob, {})) + cosmosdb_sqlapi = merge(try(var.datasets.cosmosdb_sqlapi, {}), try(var.data_factory_datasets.cosmosdb_sqlapi, {})) + delimited_text = merge(try(var.datasets.delimited_text, {}), try(var.data_factory_datasets.delimited_text, {})) + http = merge(try(var.datasets.http, {}), try(var.data_factory_datasets.http, {})) + json = merge(try(var.datasets.json, {}), try(var.data_factory_datasets.json, {})) + mysql = merge(try(var.datasets.mysql, {}), try(var.data_factory_datasets.mysql, {})) + postgresql = merge(try(var.datasets.postgresql, {}), try(var.data_factory_datasets.postgresql, {})) + sql_server_table = merge(try(var.datasets.sql_server_table, {}), try(var.data_factory_datasets.sql_server_table, {})) } - ) + linked_services = { + azure_blob_storage = merge(try(var.linked_services.azure_blob_storage, {}), try(var.data_factory_linked_services.azure_blob_storage, {}), var.data_factory_linked_services_azure_blob_storages) + azure_databricks = merge(try(var.data_factory_linked_services.azure_databricks, {}), try(var.data_factory_linked_services.azure_databricks, var.data_factory_linked_service_azure_databricks)) + cosmosdb = merge(try(var.data_factory_linked_services.cosmosdb, {}), try(var.data_factory_linked_services.cosmosdb, {})) + key_vault = var.data_factory_linked_service_key_vaults + mysql = merge(try(var.data_factory_linked_services.mysql, {}), try(var.data_factory_linked_services.mysql, {})) + postgresql = merge(try(var.data_factory_linked_services.postgresql, {}), try(var.data_factory_linked_services.postgresql, {})) + sql_server = merge(try(var.data_factory_linked_services.sql_server, {}), try(var.data_factory_linked_services.sql_server, {})) + web = merge(try(var.data_factory_linked_services.web, {}), try(var.data_factory_linked_services.web, {})) + } + } } diff --git a/caf_solution/local.database.tf b/caf_solution/local.database.tf index 4671bc5ba..0bca77e4b 100644 --- a/caf_solution/local.database.tf +++ b/caf_solution/local.database.tf @@ -5,6 +5,7 @@ locals { app_config = var.app_config azurerm_redis_caches = var.azurerm_redis_caches cosmos_dbs = var.cosmos_dbs + cosmosdb_sql_databases = var.cosmosdb_sql_databases databricks_workspaces = var.databricks_workspaces machine_learning_workspaces = var.machine_learning_workspaces mariadb_databases = var.mariadb_databases diff --git a/caf_solution/local.identity.tf b/caf_solution/local.identity.tf new file mode 100644 index 000000000..2527bf13b --- /dev/null +++ b/caf_solution/local.identity.tf @@ -0,0 +1,6 @@ +locals { + identity = { + active_directory_domain_service = var.active_directory_domain_service + active_directory_domain_service_replica_set = var.active_directory_domain_service_replica_set + } +} diff --git a/caf_solution/local.messaging.tf b/caf_solution/local.messaging.tf new file mode 100644 index 000000000..a83f8a0f0 --- /dev/null +++ b/caf_solution/local.messaging.tf @@ -0,0 +1,11 @@ +locals { + messaging = merge( + var.messaging, + { + signalr_services = var.signalr_services + servicebus_namespaces = var.servicebus_namespaces + servicebus_topics = var.servicebus_topics + servicebus_queues = var.servicebus_queues + } + ) +} diff --git a/caf_solution/local.networking.tf b/caf_solution/local.networking.tf index dfd5ecbf1..c27380a3f 100644 --- a/caf_solution/local.networking.tf +++ b/caf_solution/local.networking.tf @@ -15,12 +15,16 @@ locals { azurerm_firewall_policy_rule_collection_groups = var.azurerm_firewall_policy_rule_collection_groups azurerm_firewalls = var.azurerm_firewalls azurerm_routes = var.azurerm_routes + cdn_profiles = var.cdn_profiles + cdn_endpoints = var.cdn_endpoints ddos_services = var.ddos_services dns_zone_records = var.dns_zone_records dns_zones = var.dns_zones domain_name_registrations = var.domain_name_registrations express_route_circuit_authorizations = var.express_route_circuit_authorizations + express_route_circuit_peerings = var.express_route_circuit_peerings express_route_circuits = var.express_route_circuits + express_route_connections = var.express_route_connections front_door_waf_policies = var.front_door_waf_policies front_doors = var.front_doors ip_groups = var.ip_groups @@ -47,7 +51,6 @@ locals { vnets = var.vnets virtual_subnets = var.virtual_subnets vpn_sites = var.vpn_sites - vpn_gateway_connections = var.vpn_gateway_connections } ) } diff --git a/caf_solution/local.remote.tf b/caf_solution/local.remote.tf index 80ce0d607..3817d6fd6 100644 --- a/caf_solution/local.remote.tf +++ b/caf_solution/local.remote.tf @@ -68,6 +68,12 @@ locals { container_registry = { for key, value in try(var.landingzone.tfstates, {}) : key => merge(try(data.terraform_remote_state.remote[key].outputs.objects[key].azure_container_registries, {})) } + databricks_workspaces = { + for key, value in try(var.landingzone.tfstates, {}) : key => merge(try(data.terraform_remote_state.remote[key].outputs.objects[key].databricks_workspaces, {})) + } + cosmos_dbs = { + for key, value in try(var.landingzone.tfstates, {}) : key => merge(try(data.terraform_remote_state.remote[key].outputs.objects[key].cosmos_dbs, {})) + } disk_encryption_sets = { for key, value in try(var.landingzone.tfstates, {}) : key => merge(try(data.terraform_remote_state.remote[key].outputs.objects[key].disk_encryption_sets, {})) } @@ -83,12 +89,24 @@ locals { event_hub_namespaces = { for key, value in try(var.landingzone.tfstates, {}) : key => merge(try(data.terraform_remote_state.remote[key].outputs.objects[key].event_hub_namespaces, {})) } + express_route_circuit_authorizations = { + for key, value in try(var.landingzone.tfstates, {}) : key => merge(try(data.terraform_remote_state.remote[key].outputs.objects[key].express_route_circuit_authorizations, {})) + } + express_route_circuits = { + for key, value in try(var.landingzone.tfstates, {}) : key => merge(try(data.terraform_remote_state.remote[key].outputs.objects[key].express_route_circuits, {})) + } + express_route_circuit_peerings = { + for key, value in try(var.landingzone.tfstates, {}) : key => merge(try(data.terraform_remote_state.remote[key].outputs.objects[key].express_route_circuit_peerings, {})) + } front_door_waf_policies = { for key, value in try(var.landingzone.tfstates, {}) : key => merge(try(data.terraform_remote_state.remote[key].outputs.objects[key].front_door_waf_policies, {})) } integration_service_environment = { for key, value in try(var.landingzone.tfstates, {}) : key => merge(try(data.terraform_remote_state.remote[key].outputs.objects[key].integration_service_environment, {})) } + keyvault_certificates = { + for key, value in try(var.landingzone.tfstates, {}) : key => merge(try(data.terraform_remote_state.remote[key].outputs.objects[key].keyvault_certificates, {})) + } keyvault_certificate_requests = { for key, value in try(var.landingzone.tfstates, {}) : key => merge(try(data.terraform_remote_state.remote[key].outputs.objects[key].keyvault_certificate_requests, {})) } @@ -115,6 +133,9 @@ locals { ) } ) + signalr_services = { + for key, value in try(var.landingzone.tfstates, {}) : key => merge(try(data.terraform_remote_state.remote[key].outputs.objects[key].signalr_services, {})) + } mssql_databases = { for key, value in try(var.landingzone.tfstates, {}) : key => merge(try(data.terraform_remote_state.remote[key].outputs.objects[key].mssql_databases, {})) } @@ -160,9 +181,21 @@ locals { resource_groups = { for key, value in try(var.landingzone.tfstates, {}) : key => merge(try(data.terraform_remote_state.remote[key].outputs.objects[key].resource_groups, {})) } + servicebus_namespaces = { + for key, value in try(var.landingzone.tfstates, {}) : key => merge(try(data.terraform_remote_state.remote[key].outputs.objects[key].servicebus_namespaces, {})) + } + servicebus_topics = { + for key, value in try(var.landingzone.tfstates, {}) : key => merge(try(data.terraform_remote_state.remote[key].outputs.objects[key].servicebus_topics, {})) + } + servicebus_queues = { + for key, value in try(var.landingzone.tfstates, {}) : key => merge(try(data.terraform_remote_state.remote[key].outputs.objects[key].servicebus_queues, {})) + } storage_accounts = { for key, value in try(var.landingzone.tfstates, {}) : key => merge(try(data.terraform_remote_state.remote[key].outputs.objects[key].storage_accounts, {})) } + storage_containers = { + for key, value in try(var.landingzone.tfstates, {}) : key => merge(try(data.terraform_remote_state.remote[key].outputs.objects[key].storage_containers, {})) + } subscriptions = { for key, value in try(var.landingzone.tfstates, {}) : key => merge(try(data.terraform_remote_state.remote[key].outputs.objects[key].subscriptions, {})) } @@ -209,4 +242,4 @@ locals { for key, value in try(var.landingzone.tfstates, {}) : key => merge(try(data.terraform_remote_state.remote[key].outputs.objects[key].wvd_workspaces, {})) } } -} \ No newline at end of file +} diff --git a/caf_solution/local.shared_services.tf b/caf_solution/local.shared_services.tf index a1aa3d091..a3fa9af95 100644 --- a/caf_solution/local.shared_services.tf +++ b/caf_solution/local.shared_services.tf @@ -2,13 +2,16 @@ locals { shared_services = merge( var.shared_services, { - automations = var.automations - recovery_vaults = var.recovery_vaults - monitoring = var.monitoring - shared_image_galleries = var.shared_image_galleries - image_definitions = var.image_definitions - packer_service_principal = var.packer_service_principal - packer_managed_identity = var.packer_managed_identity + automations = var.automations + consumption_budgets = var.consumption_budgets + image_definitions = var.image_definitions + monitor_autoscale_settings = var.monitor_autoscale_settings + monitor_action_groups = var.monitor_action_groups + monitoring = var.monitoring + packer_managed_identity = var.packer_managed_identity + packer_service_principal = var.packer_service_principal + recovery_vaults = var.recovery_vaults + shared_image_galleries = var.shared_image_galleries } ) } diff --git a/caf_solution/local.storage.tf b/caf_solution/local.storage.tf index 00b5c9bb1..559eb742e 100644 --- a/caf_solution/local.storage.tf +++ b/caf_solution/local.storage.tf @@ -5,6 +5,7 @@ locals { netapp_accounts = var.netapp_accounts storage_account_blobs = var.storage_account_blobs storage_account_queues = var.storage_account_queues + storage_containers = var.storage_containers } ) } diff --git a/caf_solution/locals.remote_tfstates.tf b/caf_solution/locals.remote_tfstates.tf index e9516eb1d..a8c20d74e 100644 --- a/caf_solution/locals.remote_tfstates.tf +++ b/caf_solution/locals.remote_tfstates.tf @@ -16,7 +16,7 @@ locals { data "terraform_remote_state" "remote" { for_each = try(var.landingzone.tfstates, {}) - backend = var.landingzone.backend_type + backend = try(each.value.backend_type, var.landingzone.backend_type, "azurerm") config = local.remote_state[try(each.value.backend_type, var.landingzone.backend_type, "azurerm")][each.key] } @@ -32,9 +32,9 @@ locals { subscription_id = try(value.subscription_id, var.tfstate_subscription_id) tenant_id = try(value.tenant_id, data.azurerm_client_config.current.tenant_id) sas_token = try(value.sas_token, null) != null ? var.sas_token : null + use_azuread_auth = try(value.use_azuread_auth, true) } } - } tags = merge(try(local.global_settings.tags, {}), { "level" = var.landingzone.level }, try({ "environment" = local.global_settings.environment }, {}), { "rover_version" = var.rover_version }, var.tags) @@ -43,7 +43,8 @@ locals { var.global_settings, try(data.terraform_remote_state.remote[var.landingzone.global_settings_key].outputs.objects[var.landingzone.global_settings_key].global_settings, null), try(data.terraform_remote_state.remote[var.landingzone.global_settings_key].outputs.global_settings, null), - try(data.terraform_remote_state.remote[keys(var.landingzone.tfstates)[0]].outputs.global_settings, null) + try(data.terraform_remote_state.remote[keys(var.landingzone.tfstates)[0]].outputs.global_settings, null), + local.custom_variables ) diff --git a/caf_solution/main.tf b/caf_solution/main.tf index a03e00bc8..22512b9e3 100644 --- a/caf_solution/main.tf +++ b/caf_solution/main.tf @@ -4,26 +4,26 @@ terraform { // azuread version driven by the caf module random = { source = "hashicorp/random" - version = "~> 2.2.1" + version = "~> 3.1.0" } external = { source = "hashicorp/external" - version = "~> 1.2.0" + version = "~> 2.2.0" } null = { source = "hashicorp/null" - version = "~> 2.1.0" + version = "~> 3.1.0" } tls = { source = "hashicorp/tls" - version = "~> 2.2.0" + version = "~> 3.1.0" } azurecaf = { source = "aztfmod/azurecaf" - version = "~>1.2.0" + version = "~> 1.2.0" } } - required_version = ">= 0.13" + required_version = ">= 0.15" } @@ -37,6 +37,14 @@ provider "azurerm" { } } +provider "azurerm" { + alias = "vhub" + skip_provider_registration = true + features {} + subscription_id = local.connectivity_subscription_id + tenant_id = local.connectivity_tenant_id +} + data "azurerm_client_config" "current" {} @@ -66,4 +74,5 @@ locals { } } -} \ No newline at end of file +} + diff --git a/caf_solution/modules/databricks/cluster.tf b/caf_solution/modules/databricks/cluster.tf deleted file mode 100644 index 25310d783..000000000 --- a/caf_solution/modules/databricks/cluster.tf +++ /dev/null @@ -1,17 +0,0 @@ -resource "databricks_cluster" "cluster" { - cluster_name = var.settings.name - spark_version = var.settings.spark_version - node_type_id = var.settings.node_type_id - autotermination_minutes = var.settings.autotermination_minutes - - dynamic "autoscale" { - for_each = try(var.settings.autoscale, null) == null ? [] : [1] - - content { - min_workers = try(var.settings.autoscale.min_workers, null) - max_workers = try(var.settings.autoscale.max_workers, null) - } - } - - -} \ No newline at end of file diff --git a/caf_solution/modules/databricks/main.tf b/caf_solution/modules/databricks/main.tf deleted file mode 100644 index 8993136bd..000000000 --- a/caf_solution/modules/databricks/main.tf +++ /dev/null @@ -1,9 +0,0 @@ -terraform { - required_providers { - databricks = { - source = "databrickslabs/databricks" - } - } - required_version = ">= 0.13" -} - diff --git a/caf_solution/modules/databricks/output.tf b/caf_solution/modules/databricks/output.tf deleted file mode 100644 index d6bc7bc48..000000000 --- a/caf_solution/modules/databricks/output.tf +++ /dev/null @@ -1,7 +0,0 @@ -output "cluster" { - value = { - id = databricks_cluster.cluster.id - default_tags = databricks_cluster.cluster.default_tags - state = databricks_cluster.cluster.state - } -} \ No newline at end of file diff --git a/caf_solution/modules/databricks/variables.tf b/caf_solution/modules/databricks/variables.tf deleted file mode 100644 index 719f36726..000000000 --- a/caf_solution/modules/databricks/variables.tf +++ /dev/null @@ -1,4 +0,0 @@ -variable "azure_workspace_resource_id" { - default = {} -} -variable "settings" {} \ No newline at end of file diff --git a/caf_solution/scenario/networking/100-single-region-hub/readme.md b/caf_solution/scenario/networking/100-single-region-hub/readme.md index 624dfff6b..ced6f570f 100644 --- a/caf_solution/scenario/networking/100-single-region-hub/readme.md +++ b/caf_solution/scenario/networking/100-single-region-hub/readme.md @@ -38,8 +38,8 @@ Please review the configuration files and make sure you are deploying in the exp Once you have picked a scenario for test, you can deploy it using: ```bash -rover -lz /tf/caf/landingzones/caf_networking \ +rover -lz /tf/caf/caf_networking \ -level level2 \ --var-folder /tf/caf/landingzones/caf_networking/scenario/100-single-region-hub \ +-var-folder /tf/caf/caf_networking/scenario/100-single-region-hub \ -a apply ``` diff --git a/caf_solution/scenario/networking/101-multi-region-hub/readme.md b/caf_solution/scenario/networking/101-multi-region-hub/readme.md index 8d7ad2ac7..68e3481b6 100644 --- a/caf_solution/scenario/networking/101-multi-region-hub/readme.md +++ b/caf_solution/scenario/networking/101-multi-region-hub/readme.md @@ -43,8 +43,8 @@ Please review the configuration files and make sure you are deploying in the exp Once you have picked a scenario for test, you can deploy it using: ```bash -rover -lz /tf/caf/landingzones/caf_networking \ +rover -lz /tf/caf/caf_networking \ -level level2 \ --var-folder /tf/caf/landingzones/caf_networking/scenario/101-multi-region-hub \ +-var-folder /tf/caf/caf_networking/scenario/101-multi-region-hub \ -a apply ``` diff --git a/caf_solution/scenario/networking/105-hub-and-spoke/readme.md b/caf_solution/scenario/networking/105-hub-and-spoke/readme.md index b2820fb78..5ae6ab38d 100644 --- a/caf_solution/scenario/networking/105-hub-and-spoke/readme.md +++ b/caf_solution/scenario/networking/105-hub-and-spoke/readme.md @@ -45,8 +45,8 @@ Please review the configuration files and make sure you are deploying in the exp Once you have picked a scenario for test, you can deploy it using: ```bash -rover -lz /tf/caf/landingzones/caf_networking \ +rover -lz /tf/caf/caf_networking \ -level level2 \ --var-folder /tf/caf/landingzones/caf_networking/scenario/105-hub-and-spoke \ +-var-folder /tf/caf/caf_networking/scenario/105-hub-and-spoke \ -a apply ``` diff --git a/caf_solution/scenario/networking/106-hub-virtual-wan-firewall/readme.md b/caf_solution/scenario/networking/106-hub-virtual-wan-firewall/readme.md index 0fccc4337..c062db1c2 100644 --- a/caf_solution/scenario/networking/106-hub-virtual-wan-firewall/readme.md +++ b/caf_solution/scenario/networking/106-hub-virtual-wan-firewall/readme.md @@ -40,8 +40,8 @@ Please review the configuration files and make sure you are deploying in the exp Once you have picked a scenario for test, you can deploy it using: ```bash -rover -lz /tf/caf/landingzones/caf_solution \ +rover -lz /tf/caf/caf_solution \ -level level2 \ --var-folder /tf/caf/landingzones/caf_solution/scenario/networking/106-hub-virtual-wan-firewall \ +-var-folder /tf/caf/caf_solution/scenario/networking/106-hub-virtual-wan-firewall \ -a apply ``` diff --git a/caf_solution/variables.apim.tf b/caf_solution/variables.apim.tf new file mode 100644 index 000000000..1c2bd542e --- /dev/null +++ b/caf_solution/variables.apim.tf @@ -0,0 +1,42 @@ +variable "apim" { + default = {} +} +variable "api_management" { + default = {} +} +variable "api_management_api" { + default = {} +} +variable "api_management_api_diagnostic" { + default = {} +} +variable "api_management_logger" { + default = {} +} +variable "api_management_api_operation" { + default = {} +} +variable "api_management_backend" { + default = {} +} +variable "api_management_api_policy" { + default = {} +} +variable "api_management_api_operation_tag" { + default = {} +} +variable "api_management_api_operation_policy" { + default = {} +} +variable "api_management_user" { + default = {} +} +variable "api_management_custom_domain" { + default = {} +} +variable "api_management_diagnostic" { + default = {} +} +variable "api_management_certificate" { + default = {} +} \ No newline at end of file diff --git a/caf_solution/variables.compute.tf b/caf_solution/variables.compute.tf index 20b7d9b30..0f43b707f 100644 --- a/caf_solution/variables.compute.tf +++ b/caf_solution/variables.compute.tf @@ -27,6 +27,9 @@ variable "dedicated_host_groups" { variable "dedicated_hosts" { default = {} } +variable "machine_learning_compute_instance" { + default = {} +} variable "proximity_placement_groups" { default = {} } diff --git a/caf_solution/variables.data_factory.tf b/caf_solution/variables.data_factory.tf index df21c82db..b1c8f10cb 100644 --- a/caf_solution/variables.data_factory.tf +++ b/caf_solution/variables.data_factory.tf @@ -23,4 +23,33 @@ variable "linked_services" { default = { # azure_blob_storage } +} +variable "data_factory_datasets" { + default = { + # azure_blob + # cosmosdb_sqlapi + # delimited_text + # http + # json + # mysql + # postgresql + # sql_server_table + } +} +variable "data_factory_linked_services" { + default = { + # azure_blob_storage + } +} +variable "data_factory_linked_service_key_vaults" { + default = {} +} +variable "data_factory_linked_services_azure_blob_storages" { + default = {} +} +variable "data_factory_linked_service_azure_databricks" { + default = {} +} +variable "data_factory_integration_runtime_self_hosted" { + default = {} } \ No newline at end of file diff --git a/caf_solution/variables.database.tf b/caf_solution/variables.database.tf index d6b4a7ab4..cdb357439 100644 --- a/caf_solution/variables.database.tf +++ b/caf_solution/variables.database.tf @@ -7,6 +7,9 @@ variable "azurerm_redis_caches" { variable "cosmos_dbs" { default = {} } +variable "cosmosdb_sql_databases" { + default = {} +} variable "database" { description = "Database configuration objects" default = {} diff --git a/caf_solution/variables.identity.tf b/caf_solution/variables.identity.tf new file mode 100644 index 000000000..7f8048b9a --- /dev/null +++ b/caf_solution/variables.identity.tf @@ -0,0 +1,9 @@ +variable "active_directory_domain_service" { + description = "Manages an Active Directory Domain Service." + default = {} +} + +variable "active_directory_domain_service_replica_set" { + description = "Manages a Replica Set for an Active Directory Domain Service." + default = {} +} \ No newline at end of file diff --git a/caf_solution/variables.messaging.tf b/caf_solution/variables.messaging.tf new file mode 100644 index 000000000..fb5ef3ea8 --- /dev/null +++ b/caf_solution/variables.messaging.tf @@ -0,0 +1,15 @@ +variable "messaging" { + default = {} +} +variable "signalr_services" { + default = {} +} +variable "servicebus_namespaces" { + default = {} +} +variable "servicebus_queues" { + default = {} +} +variable "servicebus_topics" { + default = {} +} \ No newline at end of file diff --git a/caf_solution/variables.networking.tf b/caf_solution/variables.networking.tf index f1920fc9e..bcb5b24ba 100644 --- a/caf_solution/variables.networking.tf +++ b/caf_solution/variables.networking.tf @@ -38,6 +38,12 @@ variable "azurerm_firewall_policy_rule_collection_groups" { variable "azurerm_routes" { default = {} } +variable "cdn_profiles" { + default = {} +} +variable "cdn_endpoints" { + default = {} +} variable "ddos_services" { default = {} } @@ -56,6 +62,12 @@ variable "express_route_circuits" { variable "express_route_circuit_authorizations" { default = {} } +variable "express_route_circuit_peerings" { + default = {} +} +variable "express_route_connections" { + default = {} +} variable "load_balancers" { default = {} } diff --git a/caf_solution/variables.shared_services.tf b/caf_solution/variables.shared_services.tf index 90c1d0697..107bf5888 100644 --- a/caf_solution/variables.shared_services.tf +++ b/caf_solution/variables.shared_services.tf @@ -12,10 +12,18 @@ variable "automations" { default = {} } +variable "consumption_budgets" { + default = {} +} + variable "image_definitions" { default = {} } +variable "monitor_action_groups" { + default = {} +} + variable "monitoring" { default = {} } @@ -34,4 +42,9 @@ variable "recovery_vaults" { variable "shared_image_galleries" { default = {} -} \ No newline at end of file +} + +variable "monitor_autoscale_settings" { + default = {} + description = "The map from the monitor_autoscale_settings module configuration" +} diff --git a/caf_solution/variables.storage.tf b/caf_solution/variables.storage.tf index ee0bdd4ed..f81463b28 100644 --- a/caf_solution/variables.storage.tf +++ b/caf_solution/variables.storage.tf @@ -13,4 +13,7 @@ variable "storage_accounts" { } variable "storage_account_queues" { default = {} +} +variable "storage_containers" { + default = {} } \ No newline at end of file diff --git a/caf_solution/variables.tf b/caf_solution/variables.tf index 4bc098866..ff7252cbc 100644 --- a/caf_solution/variables.tf +++ b/caf_solution/variables.tf @@ -8,7 +8,10 @@ variable "lower_container_name" { variable "lower_resource_group_name" { default = null } - +variable "custom_variables" { + description = "Global custom variables to allow sharing variables between tfstates." + default = {} +} variable "tfstate_subscription_id" { description = "This value is propulated by the rover. subscription id hosting the remote tfstates" default = null diff --git a/caf_solution/vm_extensions.tf b/caf_solution/vm_extensions.tf index a061fd69c..9bb84b770 100644 --- a/caf_solution/vm_extensions.tf +++ b/caf_solution/vm_extensions.tf @@ -4,9 +4,9 @@ module "vm_extension_monitoring_agent" { source = "aztfmod/caf/azurerm//modules/compute/virtual_machine_extensions" - version = "~>5.4.0" + version = "5.5.1" - # source = "/tf/caf/aztfmod/modules/compute/virtual_machine_extensions" + #source = "git::https://github.com/aztfmod/terraform-azurerm-caf.git//modules/compute/virtual_machine_extensions?ref=master" depends_on = [module.solution] @@ -26,9 +26,9 @@ module "vm_extension_monitoring_agent" { module "vm_extension_diagnostics" { source = "aztfmod/caf/azurerm//modules/compute/virtual_machine_extensions" - version = "~>5.4.0" + version = "5.5.1" - # source = "/tf/caf/aztfmod/modules/compute/virtual_machine_extensions" + #source = "git::https://github.com/aztfmod/terraform-azurerm-caf.git//modules/compute/virtual_machine_extensions?ref=master" depends_on = [module.solution] @@ -51,11 +51,9 @@ module "vm_extension_diagnostics" { module "vm_extension_microsoft_azure_domainjoin" { source = "aztfmod/caf/azurerm//modules/compute/virtual_machine_extensions" - version = "~>5.4.0" + version = "5.5.1" - # source = "/tf/caf/aztfmod/modules/compute/virtual_machine_extensions" - - # source = "git::https://github.com/aztfmod/terraform-azurerm-caf.git//modules/compute/virtual_machine_extensions?ref=master" + #source = "git::https://github.com/aztfmod/terraform-azurerm-caf.git//modules/compute/virtual_machine_extensions?ref=master" depends_on = [module.solution] @@ -73,11 +71,9 @@ module "vm_extension_microsoft_azure_domainjoin" { module "vm_extension_session_host_dscextension" { source = "aztfmod/caf/azurerm//modules/compute/virtual_machine_extensions" - version = "~>5.4.0" - - # source = "/tf/caf/aztfmod/modules/compute/virtual_machine_extensions" + version = "5.5.1" - # source = "git::https://github.com/aztfmod/terraform-azurerm-caf.git//modules/compute/virtual_machine_extensions?ref=master" + #source = "git::https://github.com/aztfmod/terraform-azurerm-caf.git//modules/compute/virtual_machine_extensions?ref=master" depends_on = [module.vm_extension_microsoft_azure_domainjoin] @@ -92,4 +88,26 @@ module "vm_extension_session_host_dscextension" { extension_name = "session_host_dscextension" keyvaults = merge(tomap({ (var.landingzone.key) = module.solution.keyvaults }), try(local.remote.keyvaults, {})) wvd_host_pools = merge(tomap({ (var.landingzone.key) = module.solution.wvd_host_pools }), try(local.remote.wvd_host_pools, {})) +} + + +module "vm_extension_custom_scriptextension" { + source = "aztfmod/caf/azurerm//modules/compute/virtual_machine_extensions" + version = "5.5.1" + + #source = "git::https://github.com/aztfmod/terraform-azurerm-caf.git//modules/compute/virtual_machine_extensions?ref=master" + + depends_on = [module.solution, module.vm_extension_microsoft_azure_domainjoin] + + for_each = { + for key, value in try(var.virtual_machines, {}) : key => value + if try(value.virtual_machine_extensions.custom_script, null) != null + } + + client_config = module.solution.client_config + virtual_machine_id = module.solution.virtual_machines[each.key].id + extension = each.value.virtual_machine_extensions.custom_script + extension_name = "custom_script" + managed_identities = merge(tomap({ (var.landingzone.key) = module.solution.managed_identities }), try(local.remote.managed_identities, {})) + storage_accounts = merge(tomap({ (var.landingzone.key) = module.solution.storage_accounts }), try(local.remote.storage_accounts, {})) } \ No newline at end of file diff --git a/caf_solution/vmss_extensions.tf b/caf_solution/vmss_extensions.tf new file mode 100644 index 000000000..e82ecf21d --- /dev/null +++ b/caf_solution/vmss_extensions.tf @@ -0,0 +1,41 @@ +module "vmss_extension_microsoft_azure_domainjoin" { + source = "aztfmod/caf/azurerm//modules/compute/virtual_machine_scale_set_extensions" + version = "5.5.1" + + # source = "git::https://github.com/aztfmod/terraform-azurerm-caf.git//modules/compute/virtual_machine_scale_set_extensions?ref=master" + + depends_on = [module.solution] + + for_each = { + for key, value in try(var.virtual_machine_scale_sets, {}) : key => value + if try(value.virtual_machine_scale_set_extensions.microsoft_azure_domainjoin, null) != null + } + + client_config = module.solution.client_config + virtual_machine_scale_set_id = module.solution.virtual_machine_scale_sets[each.key].id + extension = each.value.virtual_machine_scale_set_extensions.microsoft_azure_domainjoin + extension_name = "microsoft_azure_domainJoin" + keyvaults = merge(tomap({ (var.landingzone.key) = module.solution.keyvaults }), try(local.remote.keyvaults, {})) +} + + +module "vmss_extension_custom_scriptextension" { + source = "aztfmod/caf/azurerm//modules/compute/virtual_machine_scale_set_extensions" + version = "5.5.1" + + # source = "git::https://github.com/aztfmod/terraform-azurerm-caf.git//modules/compute/virtual_machine_scale_set_extensions?ref=master" + + depends_on = [module.solution] + + for_each = { + for key, value in try(var.virtual_machine_scale_sets, {}) : key => value + if try(value.virtual_machine_scale_set_extensions.custom_script, null) != null + } + + client_config = module.solution.client_config + virtual_machine_scale_set_id = module.solution.virtual_machine_scale_sets[each.key].id + extension = each.value.virtual_machine_scale_set_extensions.custom_script + extension_name = "custom_script" + managed_identities = merge(tomap({ (var.landingzone.key) = module.solution.managed_identities }), try(local.remote.managed_identities, {})) + storage_accounts = merge(tomap({ (var.landingzone.key) = module.solution.storage_accounts }), try(local.remote.storage_accounts, {})) +} diff --git a/documentation/code_architecture/hierarchy.md b/documentation/code_architecture/hierarchy.md deleted file mode 100644 index 408e28995..000000000 --- a/documentation/code_architecture/hierarchy.md +++ /dev/null @@ -1,72 +0,0 @@ -# Understanding landing zones hierarchy - -To address enterprise complexity, we recommend isolating different landing zones and therefore the separation of different Terraform state files. - -## Layered approach - -Separating the Terraform states enables us to: - -- **Control blast radius**: if one configuration is deficient, its only impact the landing zone in scope and does not compromise the whole environment. -- **Enforce standard configuration**: by using different Terraform state, you can compose a complex environment very fast. -- **Enable autonomy**: different landing zones can enable innovation and features at different pace from each other. -- **Least privilege**: Each level is protected by its own identity to prevent unwanted modifications. - -Cloud Adoption Framework for Azure proposes a hierarchy of landing zones based on 5 levels as described below: - -![Hierarchy](../../_pictures/code_architecture/hierarchy.png) - -### Level 0: Transition from manual to automation - -Level 0 creates and onboards the desired subscriptions, creates the Terraform state repository for the different environments (sandpit, production, dev), creates the privileged access workstation, and service principals. Level 0 also enables the connection to the Azure DevOps environment as well as the creation of the DevOps private agents as well as the necessary Azure AD groups for collaboration between the developers of the environment. - -### Level 1: Core security, Governance and Accounting - -Level 1 is responsible for Role Based Access Control (RBAC) enforcement of the subscriptions, subscription behavior configuration using Azure Management groups. Azure Policies ensures deployment of preventive and reactive controls. This level is also in charge of deploying the fundamental configuration for Azure Monitor and Log analytics, shared security services, including Azure Event Hub namespace for integration with third parties SIEM solutions. - -### Level 2: Shared services - -Shared services include each environment's core networking components (using hub and spoke or any other network topology). Level 2 also includes services like Azure Site Recovery Vault for Backup and Disaster Recovery, Azure Monitor settings for the monitoring of the environment alongside with Azure Automation for patch management of the resources. Other resources could be image management for virtual machines in the environment. - -### Level 3: Application infrastructure - -This layer is responsible for enforcing the application's environment overall configuration for instance the Azure AppService environment, the Azure Kubernetes Services Cluster, the API Management services and all its dependency to deliver a service: deploying the Azure Application Gateway, Web Application Firewall. - -### Level 4: Application layer - -This level contains the application configuration and links to the source repository and frameworks. It describes which framework is used (for instance Springboot microservices, dotnet core, etc.) and describes the configuration of the application (how many instances, how to link to the database, etc.). - -## Operate with landing zones hierarchy - -A deployment will typically contain: - -* one \"level 0\" landing zones -* a few \"level 1\" and \"2\" -* a couple of \"level 3\" -* many \"level 4\" applications will exist in an environment. - -It is important to keep in mind that each landing zone will be enforced by a pipeline as showing below: - -![Hierarchy3](../../_pictures/code_architecture/hierarchy3.png) - -For a given "level" in the environment, each Agent VM will be assigned a managed identity that will be used to authenticate and authorize operations on: -- The target Azure Subscription -- The Terraform state file: will be Read and Write permissions for the current level, will be Read only permissions for a "lower" level type of landing zone, avoiding alterations on more privileged environments. - -In the example above, each pipeline will have its lifecycle management: - -* Level 0 and 1 will be called at every new subscription creation -* Level 2 will be triggered when you are opening a new regional hub -* Level 3 will be triggered when you deploy a new service (Application Gateway, App Service Environment, Azure Kubernetes Services, etc.) -* Level 4 can be initiated as many times a day as you deploy code in your application environment). - -### Service composition across layers - -To deliver a complete environment, just as for any other software project, we want to avoid a monolithic configuration and instead compose an environment calling multiple landing zones. - -With Terraform, you can read a state file's output and use it as input variables for another landing zone. We use this feature to compose complex architectures as illustrated below: - -![Composition](../../_pictures/code_architecture/landingzone_composition.png) - -For more details, you can refer to: [Introduction to service composition inside landing zones](./service_composition.md) - -[Back to summary](../README.md) diff --git a/documentation/code_architecture/intro_architecture.md b/documentation/code_architecture/intro_architecture.md deleted file mode 100644 index 71923b176..000000000 --- a/documentation/code_architecture/intro_architecture.md +++ /dev/null @@ -1,83 +0,0 @@ -# Introduction to Azure landing zones components - -Azure landing zones help you deploy a complete environment leveraging -the following elements: - -![Overview](../../_pictures/code_architecture/components.png) - -## DevOps Toolset - -In order to bootstrap an environment, we provide the following minimal DevOps components tools. - -### rover - -[Source here](https://github.com/aztfmod/rover) - -The \"rover\" is part of the fundamental toolset of the Azure CAF landing zone model. The rover allows you to deploy all landing zones in a consistent and automated way: - -* It is a Docker **container** running on all platforms transparently: Windows, Linux, Mac. -* Allows a validated **versioned** tool set. - -Advantages of using the rover compared to running Terraform directly on your machine: - -* Simplifies setup and configuration across DevOps teams: everyone works with the same versions of the tools. -* Abstracts and helps with the Terraform state management. -* Helps preserve stability across components versions. -* Helps testing different versions of binaries (new version of Terraform, Azure CLI, jq, tflint etc.) -* Facilitates the identity transition to any CI/CD. -* Allows easy transition from one DevOps environment to another (GitHub Actions, Azure DevOps, Jenkins, CircleCI etc.) - -![Rover](../../_pictures/code_architecture/rover.png) - -## Modules - -[Source here](https://github.com/aztfmod/) - -Cloud Adoption Framework maintains a set of curated modules. We mainly use module to enforce a consistent set of configuration settings and requirements. - -Modules must have a strong versioning, in the CAF modules, we use semantic versioning, and all modules are published on the [Hashicorp Terraform registry](https://registry.terraform.io/modules/aztfmod) - -![Modules](../../_pictures/code_architecture/modules.png) - -## Landing zone - -[Source here](https://github.com/aztfmod/landingzones) - -A landing zone is a composition of multiple resources (modules, blueprints/services) that deliver a full application environment. - -![Landingzone](../../_pictures/code_architecture/landingzone.png) - -The landing zone is **responsible** for the **Terraform state** and will produce outputs that may be reused by other landing zones as follow: - -![Landingzone](../../_pictures/code_architecture/landingzone_state.png) - -A landing zone can contain subparts called blueprints, also called services, which are reusable sets of infrastructure components that have been assembled to deliver a service, for instance, an egress DMZ or a solution like Network Virtual Appliance from a third party vendor. - -Blueprints/Services can be stored either inside the landing zones (as a subdirectory for instance) or re-used across landing zones while stored in another directory. - -The delivery of a full landing zone might be decomposed in multiples levels in order to manage different personas and contain the blast radius that a mistake might incur in one landing zone. - -### Launchpad -[Source here](https://github.com/Azure/caf-terraform-landingzones/tree/master/caf_launchpad) - -The launchpad is a special landing zone and is the first landing zone you will create for your environment (**level 0**). -This landing zone acts as your toolbox to deploy the resources that in turn helps manage the fundamentals of a full landing zone deployment: - -A level0 landing zone is the foundation of account and subscription management. As such it is responsible for: - -* Defining how to store and retrieve the Terraform state. -* Defining the core of secrets protection for the Terraform state. -* Defining the management of the principals or identities for a complex environment. -* Defining how to access/partition the different subscriptions. -* Defining the DevOps foundations. -* Defining initial blueprints - -Currently we support an open source version of [level0 launchpad](https://github.com/Azure/caf-terraform-landingzones/tree/master/caf_launchpad). - -We are currently working on a [Terraform Cloud](https://www.terraform.io/docs/cloud/index.html) edition of level0 blueprint. - -[Back to summary](../README.md) - -![Launchpad](../../_pictures/code_architecture/launchpad.png) - - diff --git a/documentation/code_architecture/service_composition.md b/documentation/code_architecture/service_composition.md deleted file mode 100644 index c01b7fe99..000000000 --- a/documentation/code_architecture/service_composition.md +++ /dev/null @@ -1,111 +0,0 @@ -# Composing a complete environment with landing zones - -A landing zone describes a complex environment running on Microsoft Azure. - -From the software prospective, the landing zone is the element orchestrating the service composition by calling different blueprints *(sometimes also referred as services)* to deploy the complete environment as in the picture below:
- -![landingzoneoverview](../../_pictures/caf_landing_zone_overview.png) - -In our approach, a landing zone: - -- Manages a Terraform state file, and outputs which might be reused by other landing zones. -- Assembles the blueprints/services. -- Manages a deployment for a set of regions (calling the blueprints which are by convention mono-region). -- Locks a version of azurerm_provider for which is has been tested. -- Is versioned in GitHub repository. - -## Composing within the landing zone - -A landing zones will invoke mainly: - -- Resources (as called directly from the azurerm provider) - for the elements that are not highly reusable, and for the stitching of modules/blueprints when needed. -- Blueprints or services - stereotyped architecture elements (DMZ blueprint, egress blueprint, etc.) - -Depending on the blueprints maturity, you can:
- -- **store them locally** in the landing zone files hierarchy. This is a good approach when you start writing a landing zones and are not sure of the reusability of the elements you are developing. For example, the vdc_demo landing zone uses a set of local blueprints as below:
-![localblueprint](../../_pictures/caf_local_blueprint.png)
You will call each blueprint as a module:
- -```hcl - -module "blueprint_networking_shared_egress" { - source = "./blueprint_networking_shared_egress" - - ... -} - -``` - -- **reuse them from the official repos** from Azure Cloud Adoption Framework blueprints. We recommend/require that you use strict versionning. Please refer to the [following repository](https://github.com/aztfmod/blueprints) for the standard blueprints.
-You will call each blueprint as a module **please note the call to a specific GitHub release and/or a branch**: - -```hcl -module "blueprint_foundations" { - source = "github.com/aztfmod/blueprints?ref=1911.1/blueprint_foundations" - - ... -} -``` - -In the following code, you can see, within the *caf_foundations* landing zone, we are calling some variables (var.global_settings, etc.), but also the output called *resource_group_hub_names* from *blueprint_foundations_accounting* (module.blueprint_foundations_accounting.resource_group_hub_names) - -```hcl -module "blueprint_foundations_security" { - source = "./blueprint_foundations_security/" - - location = var.global_settings.location_map.region1 - tags_hub = var.global_settings.tags_hub - resource_groups_hub = module.blueprint_foundations_accounting.resource_group_hub_names - log_analytics = module.blueprint_foundations_accounting.log_analytics_workspace - - security_settings = var.security_settings -} -``` - -
- -## Composing from multiples landing zones - -To deliver a complete environment, just as for any other software project, you **want** to avoid a monolithic configuration and compose an environment calling multiple landing zones. - -With Terraform, you can read a state files' output and use it as input variables for another landing zone. We use this feature as a way to compose complex architectures. - -![landingzonesinputoutput](../../_pictures/code_architecture/lz_exchanges.png) - -From the code prospective, this data exchange between landing zones is done by calling a Terraform data remote state object, as in the following example: - -```hcl -data "terraform_remote_state" "landingzone_caf_foundations" { - backend = "azurerm" - config = { - storage_account_name = var.lowerlevel_storage_account_name - container_name = var.lowerlevel_container_name - key = "landingzone_caf_foundations.tfstate" - resource_group_name = var.lowerlevel_resource_group_name - } -} -``` - -As a convention in our code, and in order to simplify code readability, we create local variables that represent the data (variables) that we read from the output Terraform state, below is an example: - -```hcl -locals { - variable = data.terraform_remote_state.landingzone_caf_foundations.outputs.variable -} -``` - -The decision factors to create/manage layers: - -- Different personas. -- Different levels of privileges. -- Different frequency of changes. -- Blast radius control. - -Downside: - -- Code might be harder to manage. -- Some layers might not be obvious cut in one or another layer. - -Whereas you can read a state file from Terraform and use the information in it, we implement a hierarchy model that landing zones must follow to enforce a good separation of duty, this model is reviewed in more details in the "CAF landing zones hierarchy model" article. - -[Back to summary](../README.md) diff --git a/documentation/delivery/intro_ci_ado.md b/documentation/delivery/intro_ci_ado.md index d94b85b6a..631d742bb 100644 --- a/documentation/delivery/intro_ci_ado.md +++ b/documentation/delivery/intro_ci_ado.md @@ -124,7 +124,7 @@ jobs: az login --identity - /tf/rover/rover.sh /tf/caf/landingzones/landingzone_caf_foundations \ + /tf/rover/rover.sh /tf/caf/landingzone_caf_foundations \ apply \ -w $(TF_VAR_workspace) \ -tfstate "landingzone_caf_foundations" \ diff --git a/documentation/delivery/intro_ci_gha.md b/documentation/delivery/intro_ci_gha.md index 16a8e8c49..74fd0b4e1 100644 --- a/documentation/delivery/intro_ci_gha.md +++ b/documentation/delivery/intro_ci_gha.md @@ -130,7 +130,7 @@ caf_foundations: - name: deploy caf_foundations run: | - /tf/rover/rover.sh /tf/caf/landingzones/${{ matrix.landingzone }} apply \ + /tf/rover/rover.sh /tf/caf/${{ matrix.landingzone }} apply \ '-var tags={testing-job-id="${{ github.run_id }}"}' \ '-var-file ${{ env.TFVARS_PATH }}/${{ matrix.environment }}/${{ matrix.landingzone }}/${{ matrix.landingzone }}_${{ matrix.region }}_${{ matrix.convention }}.tfvars' ``` @@ -185,7 +185,7 @@ Once the fundamentals are set, we can now proceed to deploy the test landing zon - name: deploy landing_zone run: | - /tf/rover/rover.sh /tf/caf/landingzones/${{ matrix.landingzone }} apply \ + /tf/rover/rover.sh /tf/caf/${{ matrix.landingzone }} apply \ '-var tags={testing-job-id="${{ github.run_id }}"}' \ '-var-file ${{ env.TFVARS_PATH }}/${{ matrix.environment }}/${{ matrix.landingzone }}/${{ matrix.landingzone }}.tfvars' \ '-var workspace=caffoundationsci' @@ -193,7 +193,7 @@ Once the fundamentals are set, we can now proceed to deploy the test landing zon - name: destroy landing_zone if: always() run: | - /tf/rover/rover.sh /tf/caf/landingzones/${{ matrix.landingzone }} destroy \ + /tf/rover/rover.sh /tf/caf/${{ matrix.landingzone }} destroy \ '-var tags={testing-job-id="${{ github.run_id }}"}' \ '-var-file ${{ env.TFVARS_PATH }}/${{ matrix.environment }}/${{ matrix.landingzone }}/${{ matrix.landingzone }}.tfvars' \ '-var workspace=caffoundationsci' diff --git a/documentation/getting_started/getting_started_codespaces.md b/documentation/getting_started/getting_started_codespaces.md deleted file mode 100644 index 2f9acee57..000000000 --- a/documentation/getting_started/getting_started_codespaces.md +++ /dev/null @@ -1,104 +0,0 @@ -# Getting stated with Azure Cloud Adoption Framework landing zones on Visual Studio Codespaces - -## Introduction - -Visual Studio Codespaces is a browser-based editor with support for Git repos, extensions, and a built-in command line interface so you can edit, run, and debug your applications from any device. For more details on Visual Studio Codespace, you can visit the product page [here](https://visualstudio.microsoft.com/services/visual-studio-codespaces/) - -## Prerequisites - -In order to start deploying your with CAF landing zones on VS Codespaces, you need: - -* an Azure subscription (Trial, MSDN, etc.) - -## Create your account - -Let's authenticate first: -[https://aka.ms/vso-login](https://aka.ms/vso-login) -![Signin](../../_pictures/getting_started/vs_codespaces_getting_started.png) - -## Create the repository in Visual Studio Code - -Create the landing zones Codespaces clicking here: [![VScodespaces](https://img.shields.io/endpoint?url=https%3A%2F%2Faka.ms%2Fvso-badge)](https://online.visualstudio.com/environments/new?name=caf%20landing%20zones&repo=azure/caf-terraform-landingzones) - -![Create](../../_pictures/getting_started/vs_codespaces_create.png) - -The create process will look something like that: - -![Create](../../_pictures/getting_started/vs_codespaces_create2.png) - -Once ready, you should have your Visual Studio Interface as follow: -![Create](../../_pictures/getting_started/vs_codespaces_create3.png) - -Open a Terminal using ```CTRL``` + ```J``` or ```Command``` + ```J``` - -![Create](../../_pictures/getting_started/vs_codespaces_create4.png) - -You are ready to use landing zones by launching the rover as below: - -```bash -rover -``` - -![Create](../../_pictures/getting_started/vs_codespaces_rover.png) - -## Deploying your first landing zone - -You must be authenticated first: -For that we will rely on Azure authentication as completed by Azure Cli, via browser method: - -```bash -rover login -``` - -We recommend that you verify the output of the login and make sure the subscription selected by default is the one you want to work on. If not, you can use the following switch: - -```bash -az account set --subscription -``` - - -## Deploying the DevOps requirements - -On the first run, you need to use the launchpad to create the foundations for Terraform environment. This will set storage accounts to store the state files, and key vaults to get you started with a secure environment. It can eventually be upgraded to support your DevOps environment (pipelines, etc.) - -You can find more information on the launchpad settings [Here](../../landingzones/caf_launchpad) - -```bash -rover -launchpad -lz /tf/caf/landingzones/caf_launchpad \ --var-folder /tf/caf/landingzones/caf_launchpad/scenario/100 \ --a apply -``` - -## Deploying the foundations - -Next step is to deploy the foundations (this typically could include management groups, policies, etc.). We can start with it empty, and enrich it later. - -You can find more information on the launchpad settings [Here](../../landingzones/caf_launchpad) - - -Get started with the basic foundations: - -```bash -rover -lz /tf/caf/landingzones/caf_foundations \ --level level1 \ --a apply -``` - -## Deploying a networking scenario - -You can deploy one of the typical Azure network topologies (hub, hub and spoke, Virtual WAN). - -Have a look at the example scenario [Here](../../landingzones/caf_networking) and pick one you want to deploy. - -You can deploy a simple hub networking using: - -```bash -rover -lz /tf/caf/landingzones/caf_networking \ --level level2 \ --var-folder /tf/caf/landingzones/caf_networking/scenario/100-single-region-hub \ --a apply -``` - -## Destroying your test - -Once you are done testing, just use the same commands as before, juste replace the last line ```-a apply``` by ```-a destroy```. diff --git a/rover_on_ssh_host.yml b/rover_on_ssh_host.yml new file mode 100644 index 000000000..958a7824b --- /dev/null +++ b/rover_on_ssh_host.yml @@ -0,0 +1,37 @@ +--- +#------------------------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information. +#------------------------------------------------------------------------------------------------------------- + +# +# Docker compose to open the rover in remote ssh shells +# + +version: '3.7' +services: + rover: + image: aztfmod/rover:1.1.3-2201.2106 + + user: vscode + + labels: + - "caf=Azure CAF" + + volumes: + # This is where VS Code should expect to find your project's source code + # and the value of "workspaceFolder" in .devcontainer/devcontainer.json + - .:/tf/caf + - volume-caf-vscode:/home/vscode + - volume-caf-vscode-bashhistory:/commandhistory + - ~/.ssh:/tmp/.ssh-localhost:ro + - /var/run/docker.sock:/var/run/docker.sock + + # Overrides default command so things don't shut down after the process ends. + command: /bin/sh -c "while sleep 1000; do :; done" + +volumes: + volume-caf-vscode: + labels: + - "caf=Azure CAF" + volume-caf-vscode-bashhistory: diff --git a/templates/applications/action_plugins/__pycache__/merge_vars.cpython-39.pyc b/templates/applications/action_plugins/__pycache__/merge_vars.cpython-39.pyc new file mode 100644 index 000000000..fdb1cae7f Binary files /dev/null and b/templates/applications/action_plugins/__pycache__/merge_vars.cpython-39.pyc differ diff --git a/templates/applications/action_plugins/merge_vars.py b/templates/applications/action_plugins/merge_vars.py new file mode 100644 index 000000000..b7edcc27a --- /dev/null +++ b/templates/applications/action_plugins/merge_vars.py @@ -0,0 +1,155 @@ +#!/usr/bin/env python + +""" +An Ansible action plugin to allow explicit merging of dict and list facts. + +https://github.com/leapfrogonline/ansible-merge-vars/blob/master/LICENSE.md + +""" + +from ansible.plugins.action import ActionBase +from ansible.errors import AnsibleError +from ansible.utils.vars import isidentifier + + +# Funky import dance for Ansible backwards compatitility (not sure if we +# actually need to do this or not) +try: + from __main__ import display +except ImportError: + from ansible.utils.display import Display # pylint: disable=ungrouped-imports + display = Display() + + +class ActionModule(ActionBase): + """ + Merge all variables in context with a certain suffix (lists or dicts only) + and create a new variable that contains the result of this merge. These + initial suffixed variables can be definied anywhere in the inventory, or by + any other means; as long as they're in the context for the running play, + they'll be merged. + + """ + def run(self, tmp=None, task_vars=None): + suffix_to_merge = self._task.args.get('suffix_to_merge', '') + merged_var_name = self._task.args.get('merged_var_name', '') + dedup = self._task.args.get('dedup', True) + expected_type = self._task.args.get('expected_type') + recursive_dict_merge = bool(self._task.args.get('recursive_dict_merge', False)) + + if 'cacheable' in self._task.args.keys(): + display.deprecated( + "The `cacheable` option does not actually do anything, since Ansible 2.5. " + "No matter what, the variable set by this plugin will be set in the fact " + "cache if you have fact caching enabled. To get rid of this warning, " + "remove the `cacheable` argument from your merge_vars task. This warning " + "will be removed in a future version of this plugin." + ) + + # Validate args + if expected_type not in ['dict', 'list']: + raise AnsibleError("expected_type must be set ('dict' or 'list').") + if not merged_var_name: + raise AnsibleError("merged_var_name must be set") + if not isidentifier(merged_var_name): + raise AnsibleError("merged_var_name '%s' is not a valid identifier" % merged_var_name) + if not suffix_to_merge.endswith('__to_merge'): + raise AnsibleError("Merge suffix must end with '__to_merge', sorry!") + + keys = sorted([key for key in task_vars.keys() + if key.endswith(suffix_to_merge)]) + + display.v("Merging vars in this order: {}".format(keys)) + + # We need to render any jinja in the merged var now, because once it + # leaves this plugin, ansible will cleanse it by turning any jinja tags + # into comments. + # And we need it done before merging the variables, + # in case any structured data is specified with templates. + merge_vals = [self._templar.template(task_vars[key]) for key in keys] + + # Dispatch based on type that we're merging + if merge_vals == []: + if expected_type == 'list': + merged = [] + else: + merged = {} + elif isinstance(merge_vals[0], list): + merged = merge_list(merge_vals, dedup) + elif isinstance(merge_vals[0], dict): + merged = merge_dict(merge_vals, dedup, recursive_dict_merge) + else: + raise AnsibleError( + "Don't know how to merge variables of type: {}".format(type(merge_vals[0])) + ) + + return { + 'ansible_facts': {merged_var_name: merged}, + 'changed': False, + } + + +def merge_dict(merge_vals, dedup, recursive_dict_merge): + """ + To merge dicts, just update one with the values of the next, etc. + """ + check_type(merge_vals, dict) + merged = {} + for val in merge_vals: + if not recursive_dict_merge: + merged.update(val) + else: + # Recursive merging of dictionaries with overlapping keys: + # LISTS: merge with merge_list + # DICTS: recursively merge with merge_dict + # any other types: replace (same as usual behaviour) + for key in val.keys(): + if not key in merged: + # first hit of the value - just assign + merged[key] = val[key] + elif isinstance(merged[key], list): + merged[key] = merge_list([merged[key], val[key]], dedup) + elif isinstance(merged[key], dict): + merged[key] = merge_dict([merged[key], val[key]], dedup, recursive_dict_merge) + else: + merged[key] = val[key] + return merged + + +def merge_list(merge_vals, dedup): + """ To merge lists, just concat them. Dedup if wanted. """ + check_type(merge_vals, list) + merged = flatten(merge_vals) + if dedup: + merged = deduplicate(merged) + return merged + + +def check_type(mylist, _type): + """ Ensure that all members of mylist are of type _type. """ + if not all(isinstance(item, _type) for item in mylist): + raise AnsibleError("All values to merge must be of the same type, either dict or list") + + +def flatten(list_of_lists): + """ + Flattens a list of lists: + >>> flatten([[1, 2] [3, 4]]) + [1, 2, 3, 4] + + I wish Python had this in the standard lib :( + """ + return list((x for y in list_of_lists for x in y)) + + +def deduplicate(mylist): + """ + Just brute force it. This lets us keep order, and lets us dedup unhashable + things, like dicts. Hopefully you won't run into such big lists that + this will ever be a performance issue. + """ + deduped = [] + for item in mylist: + if item not in deduped: + deduped.append(item) + return deduped \ No newline at end of file diff --git a/templates/applications/ansible-get-platform-details.yaml b/templates/applications/ansible-get-platform-details.yaml new file mode 100644 index 000000000..0f24f413d --- /dev/null +++ b/templates/applications/ansible-get-platform-details.yaml @@ -0,0 +1,80 @@ +# Get Platform subscriptions + +- name: "Get platform subscriptions tfstate details" + register: subscription_tfstate_file_name + shell: | + az storage account list \ + --subscription {{ config.caf_terraform.launchpad.subscription_id }} \ + --query "[?tags.caf_tfstate=='{{ config.tfstates["platform"].platform_subscriptions.level | default('level1') }}' && tags.caf_environment=='{{ config.caf_terraform.launchpad.caf_environment }}'].{name:name}[0]" -o json | jq -r .name + +- debug: + msg: "{{ subscription_tfstate_file_name.stdout }}" + +- name: "Download platform subscriptions tfstate details" + register: platform_subscription_tfstate_exists + shell: | + az storage blob download \ + --name "{{ config.tfstates["platform"].platform_subscriptions.tfstate | default('platform_subscriptions.tfstate') }}" \ + --account-name "{{ subscription_tfstate_file_name.stdout }}" \ + --container-name "tfstate" \ + --auth-mode "login" \ + --file "{{ job_cache_base_path }}/{{ config.tfstates["platform"].platform_subscriptions.tfstate | default('platform_subscriptions.tfstate') }}" + +- name: "Get platform_subscriptions details" + when: platform_subscription_tfstate_exists.rc == 0 + shell: "cat {{ job_cache_base_path }}/{{ config.tfstates[\"platform\"].platform_subscriptions.tfstate | default('platform_subscriptions.tfstate') }}" + register: platform_subscriptions + +- name: "Get platform_subscriptions json data" + when: platform_subscription_tfstate_exists.rc == 0 + set_fact: + platform_sub_jsondata: "{{ platform_subscriptions.stdout | from_json }}" + +- name: "Get subscriptions list" + when: platform_subscription_tfstate_exists.rc == 0 + set_fact: + platform_subscriptions_details: "{{ platform_sub_jsondata | json_query(path) }}" + vars: + path: 'outputs.objects.value.{{ config.tfstates["platform"].platform_subscriptions.lz_key_name }}.subscriptions' + + +# Get Platform keyvaults +- name: "Get tfstate keyvaults account name" + register: launchpad_storage_account + ignore_errors: yes + shell: | + az storage account list \ + --subscription {{ config.caf_terraform.launchpad.subscription_id }} \ + --query "[?tags.caf_tfstate=='{{ config.tfstates["platform"].launchpad.level | default('level0') }}' && tags.caf_environment=='{{ config.caf_terraform.launchpad.caf_environment }}'].{name:name}[0]" -o json | jq -r .name + +- debug: + msg: "{{launchpad_storage_account}}" + +- name: "Get tfstate keyvaults details" + register: credentials_tfstate_exists + when: launchpad_storage_account.stderr == "" + ignore_errors: yes + shell: | + az storage blob download \ + --name "{{ config.tfstates["platform"].launchpad_credentials.tfstate | default('launchpad_credentials.tfstate') }}" \ + --account-name "{{ launchpad_storage_account.stdout }}" \ + --container-name "{{ config.tfstates["platform"].launchpad.workspace | default('tfstate') }}" \ + --auth-mode "login" \ + --file "~/.terraform.cache/launchpad/{{ config.tfstates["platform"].launchpad_credentials.tfstate | default('launchpad_credentials.tfstate') }}" + +- name: "Get launchpad_credentials details" + when: credentials_tfstate_exists is not skipped + shell: "cat ~/.terraform.cache/launchpad/{{ config.tfstates[\"platform\"].launchpad_credentials.tfstate | default('launchpad_credentials.tfstate') }}" + register: launchpad_credentials + +- name: "Get launchpad_credentials json data" + when: credentials_tfstate_exists is not skipped + set_fact: + credjsondata: "{{ launchpad_credentials.stdout | from_json }}" + +- name: "Set keyvaults variable" + when: credentials_tfstate_exists is not skipped + set_fact: + keyvaults: "{{ credjsondata | json_query(path) }}" + vars: + path: 'outputs.objects.value.launchpad_credentials_rotation.keyvaults' diff --git a/templates/applications/ansible.yaml b/templates/applications/ansible.yaml new file mode 100644 index 000000000..c25bbbd50 --- /dev/null +++ b/templates/applications/ansible.yaml @@ -0,0 +1,84 @@ +- name: CAF Terraform - Generate configuration files + hosts: localhost + vars: + base_templates_folder: "{{ base_templates_folder }}/asvm" + resource_template_folder: "{{ base_templates_folder }}/resources" + level: level3 + + + tasks: + + - name: "Load variable for landingzones config" + include_vars: + name: asvm_config__to_merge + dir: "{{config_folder}}" + depth: 1 + ignore_unknown_extensions: true + files_matching: "config.asvm.yaml|tfstates.asvm.yaml|deployments.yaml" + + - name: "Set base variables" + set_fact: + job_cache_base_path: "/home/vscode/.terraform.cache" + config: "{{asvm_config__to_merge}}" + + - name: "Content of asvm_config__to_merge" + debug: + msg: "{{asvm_config__to_merge}}" + + - name: "Load variable for platform config" + include_vars: + name: platform_config__to_merge + dir: "{{config_folder_platform | default(config_folder)}}" + depth: 1 + ignore_unknown_extensions: true + files_matching: "caf.platform.yaml|tfstates.caf.yaml|tfstates.yaml" + + - name: "Content of platform_config__to_merge" + debug: + msg: "{{platform_config__to_merge}}" + + - name: Merge asvm and platform variables + merge_vars: + suffix_to_merge: config__to_merge + merged_var_name: config + expected_type: 'dict' + recursive_dict_merge: True + + - name: "Set base config variables" + set_fact: + config: "{{ ansible_facts.config }}" + + - name: "Content of config" + debug: + msg: "{{config}}" + + + - name: "Creates cache directory" + file: + path: "{{ job_cache_base_path }}/launchpad" + state: directory + + + - name: "{{ level }} | Get platform details (requires '-e config_folder_platform=path to yamls' path to be set)" + include_tasks: "ansible-get-platform-details.yaml" + when: config_folder_platform is defined + +# +# Level 3 +# + + # landingzones deployments + + - name: "{{ level }} | landingzones" + include_tasks: "{{ level }}/ansible.yaml" + loop: "{{asvm_config__to_merge.deployments.keys()}}" + loop_control: + loop_var: asvm_long_folder + +# +# Linters +# + + - name: Terraform linter + shell: | + terraform fmt -recursive {{ destination_base_path }} diff --git a/templates/applications/level3/ansible-subscription-id.yaml b/templates/applications/level3/ansible-subscription-id.yaml new file mode 100644 index 000000000..571777bb6 --- /dev/null +++ b/templates/applications/level3/ansible-subscription-id.yaml @@ -0,0 +1,66 @@ +- name: "Load variable for subscriptions" + include_vars: + name: subscriptions + dir: "{{config_folder}}" + depth: 1 + ignore_unknown_extensions: true + files_matching: "subscriptions.asvm.yaml|subscription.asvm.yaml" + +- name: "Content of subscriptions" + debug: + msg: "{{subscriptions}}" + +- name: "[{{ level }}-{{ subscription_key }}] Get tfstate details" + register: subscription_tfstate_storage_account_name + shell: | + az storage account list \ + --subscription {{ config.caf_terraform.launchpad.subscription_id }} \ + --query "[?tags.caf_tfstate=='{{ config.tfstates['asvm'][subscription_key].level }}' && tags.caf_environment=='{{ config.caf_terraform.launchpad.caf_environment }}'].{name:name}[0]" -o json | jq -r .name + +- debug: + msg: "{{ subscription_tfstate_storage_account_name.stdout }}" + +- name: "[{{ level }}-{{ subscription_key }}] Download tfstate details" + register: subscription_tfstate_exists + ignore_errors: true + shell: | + az storage blob download \ + --name "{{ config.tfstates['asvm'][subscription_key].subscriptions.tfstate }}" \ + --account-name "{{ subscription_tfstate_storage_account_name.stdout }}" \ + --container-name "{{ config.tfstates['asvm'][subscription_key].workspace }}" \ + --auth-mode "login" \ + --file "{{ job_cache_base_path }}/{{ config.tfstates['asvm'][subscription_key].subscriptions.tfstate }}" + +- debug: + msg: "{{ subscription_tfstate_exists }}" + when: subscriptions.subscriptions[subscription_key] is defined + +- name: "[{{ level }}-{{ subscription_key }}] Get landingzones_subscriptions details" + shell: "cat {{ job_cache_base_path }}/{{ config.tfstates['asvm'][subscription_key].subscriptions.tfstate }}" + register: platform_subscriptions + when: + - subscriptions.subscriptions[subscription_key] is defined + - subscription_tfstate_exists.rc == 0 + +- name: "[{{ level }}-{{ subscription_key }}] Get subscriptions data" + when: + - subscriptions.subscriptions[subscription_key] is defined + - subscription_tfstate_exists.rc == 0 + set_fact: + asvm_subscriptions_details: "{{ platform_subscriptions.stdout | from_json | json_query(path) }}" + vars: + path: 'outputs.objects.value."{{ config.tfstates["asvm"][subscription_key].subscriptions.lz_key_name }}".subscriptions' + +- name: "[{{ level }}-{{ subscription_key }}] cleanup" + when: + - subscriptions.subscriptions[subscription_key] is defined + - subscription_tfstate_exists.rc == 0 + file: + path: "{{ job_cache_base_path }}/{{ config.tfstates['asvm'][subscription_key].subscriptions.tfstate }}" + state: absent + +- debug: + msg: "Platform subscriptions - {{ asvm_subscriptions_details }}" + when: + - subscriptions.subscriptions[subscription_key] is defined + - subscription_tfstate_exists.rc == 0 diff --git a/templates/applications/level3/ansible-subscription.yaml b/templates/applications/level3/ansible-subscription.yaml new file mode 100644 index 000000000..da33410fe --- /dev/null +++ b/templates/applications/level3/ansible-subscription.yaml @@ -0,0 +1,62 @@ + +- name: set destination paths + set_fact: + destination_path: "{{ destination_base_path }}/{{ subscription_key }}/subscription" + deployment: "subscriptions" + +- name: "Clean-up directory - subscription - {{ destination_path }}" + file: + path: "{{ destination_path }}" + state: absent + +- name: "Content of subscriptions' resources" + debug: + msg: "{{resources}}" + +- name: "[{{ level }} {{ subscription_key }}] Creates directory" + file: + path: "{{ destination_path }}" + state: directory + +# +# global_settings +# +- name: "[{{ level }} {{ subscription_key }}] - subscription - global_settings" + when: resources.subscriptions[subscription_key].global_settings is defined + ansible.builtin.template: + src: "{{ item }}" + dest: "{{ destination_path }}/{{ item | basename | regex_replace('.j2$', '') }}" + force: yes + with_fileglob: + - "{{ resource_template_folder }}/global_settings.tfvars.j2" +# +# landingzone +# +- name: "[{{ level }} {{ subscription_key }}] - subscription - landingzone" + ansible.builtin.template: + src: "{{ item }}" + dest: "{{ destination_path }}/{{ item | basename | regex_replace('.j2$', '') }}" + force: yes + with_fileglob: + - "{{ resource_template_folder }}/landingzone.tfvars.j2" +# +# subscription +# +- name: "[{{ level }} {{ subscription_key }}] - subscription - subscription" + ansible.builtin.template: + src: "{{ item }}" + dest: "{{ destination_path }}/{{ item | basename | regex_replace('.j2$', '') }}" + force: yes + with_fileglob: + - "{{ resource_template_folder }}/subscriptions.tfvars.j2" + +# +# Readme +# +- name: "[{{ level }}-{{ subscription_key }}] - subscription - *.md" + ansible.builtin.template: + src: "{{ item }}" + dest: "{{ destination_path }}/{{ item | basename | regex_replace('.j2$', '') }}" + force: yes + with_fileglob: + - "{{ base_templates_folder }}/{{ level }}/subscription/*.md" diff --git a/templates/applications/level3/ansible.yaml b/templates/applications/level3/ansible.yaml new file mode 100644 index 000000000..ed6688cfb --- /dev/null +++ b/templates/applications/level3/ansible.yaml @@ -0,0 +1,58 @@ +- name: set asvm context + set_fact: + asvm_folder: "{{ asvm_long_folder if 'path' not in asvm_long_folder else asvm_long_folder.path | regex_search('[^\/]+(?=\/$|$)') }}" + +- name: "[{{ level }}-{{ asvm_folder }}] Set cache folder" + set_fact: + # job_cache_base_path: "/home/vscode/.terraform.cache" + subscription_key: "{{ asvm_folder }}" + +- name: "Load variable for deployments" + include_vars: + name: deployments + dir: "{{config_folder}}" + depth: 1 + ignore_unknown_extensions: true + files_matching: "deployments.asvm.yaml|deployments.yaml" + +- debug: + msg: "{{deployments}}" + +### Generate remote state storage containers + +- name: "[{{ level }} {{ subscription_key }}] - remote state container" + include_tasks: "{{ level }}/storage_containers/ansible.yaml" + when: + - deployments.deployments[subscription_key].storage_containers is defined + +#### Get subscription_id + +- name: "[{{ level }} {{ subscription_key }}] - subscription" + include_tasks: "{{ level }}/ansible-subscription-id.yaml" + when: + - config.tfstates['asvm'][subscription_key].subscriptions is defined + - config.tfstates['asvm'][subscription_key].subscriptions.subscription_id is not defined + +### Subscription + +- name: "Load variable for subscriptions" + include_vars: + name: resources + dir: "{{config_folder}}" + depth: 1 + ignore_unknown_extensions: true + files_matching: "subscriptions.asvm.yaml|subscription.asvm.yaml|tfstates.asvm.yaml" + +- name: "[{{ level }} {{ subscription_key }}] - subscription" + include_tasks: "{{ level }}/ansible-subscription.yaml" + when: + - resources.subscriptions[subscription_key] is defined + - config.tfstates['asvm'][subscription_key].subscriptions.subscription_id is not defined + + +#### Privileged resources to deploy in the landingzone + +- name: "[{{ level }} {{ subscription_key }}] - resources" + include_tasks: "{{ level }}/resources/ansible.yaml" + when: + - config.tfstates['asvm'][subscription_key].resources is defined diff --git a/templates/applications/level3/resources/ansible.yaml b/templates/applications/level3/resources/ansible.yaml new file mode 100644 index 000000000..5eb5ae7b2 --- /dev/null +++ b/templates/applications/level3/resources/ansible.yaml @@ -0,0 +1,262 @@ + +- name: set destination paths + set_fact: + destination_path: "{{ destination_base_path }}/{{ subscription_key }}/resources" + deployment: "resources" + +- name: "Clean-up directory - subscription - {{ destination_path }}" + file: + path: "{{ destination_path }}" + state: absent + when: config.configuration_folders.asvm.cleanup_destination | default(true) | bool + +- name: "Load variable for resources" + include_vars: + name: resources + dir: "{{config_folder}}" + depth: 1 + ignore_unknown_extensions: true + files_matching: "resources.asvm.yaml" + +- name: "Content of resources" + debug: + msg: "{{resources}}" + +- name: "[{{ level }} {{ asvm_folder }}] - resources - Creates directory" + file: + path: "{{ destination_path }}" + state: directory +# +# azuread_credentials +# +- name: "[{{ level }}-{{ subscription_key }}] - resources - azuread_credentials" + when: + - resources.subscriptions[subscription_key].azuread_credentials is defined + ansible.builtin.template: + src: "{{ item }}" + dest: "{{ destination_path }}/{{ item | basename | regex_replace('.j2$', '') }}" + force: yes + with_fileglob: + - "{{ resource_template_folder }}/azuread_credentials.tfvars.j2" + +# +# azuread_applications +# +- name: "[{{ level }}-{{ subscription_key }}] - resources - azuread_applications" + when: + - resources.subscriptions[subscription_key].azuread_applications is defined + ansible.builtin.template: + src: "{{ item }}" + dest: "{{ destination_path }}/{{ item | basename | regex_replace('.j2$', '') }}" + force: yes + with_fileglob: + - "{{ resource_template_folder }}/azuread_applications.tfvars.j2" + +# +# azuread_credential_policies +# +- name: "[{{ level }}-{{ subscription_key }}] - resources - azuread_credential_policies" + when: + - resources.subscriptions[subscription_key].azuread_credential_policies is defined + ansible.builtin.template: + src: "{{ item }}" + dest: "{{ destination_path }}/{{ item | basename | regex_replace('.j2$', '') }}" + force: yes + with_fileglob: + - "{{ resource_template_folder }}/azuread_credential_policies.tfvars.j2" + +# +# azuread_groups +# +- name: "[{{ level }}-{{ subscription_key }}] - resources - azuread_groups" + when: + - resources.subscriptions[subscription_key].azuread_groups is defined + ansible.builtin.template: + src: "{{ item }}" + dest: "{{ destination_path }}/{{ item | basename | regex_replace('.j2$', '') }}" + force: yes + with_fileglob: + - "{{ resource_template_folder }}/azuread_groups.tfvars.j2" + +# +# azuread_groups_membership +# +- name: "[{{ level }}-{{ subscription_key }}] - resources - azuread_groups_membership" + when: + - resources.subscriptions[subscription_key].azuread_groups_membership is defined + ansible.builtin.template: + src: "{{ item }}" + dest: "{{ destination_path }}/{{ item | basename | regex_replace('.j2$', '') }}" + force: yes + with_fileglob: + - "{{ resource_template_folder }}/azuread_groups_membership.tfvars.j2" + +# +# azuread_service_principals +# +- name: "[{{ level }}-{{ subscription_key }}] - resources - azuread_service_principals" + when: + - resources.subscriptions[subscription_key].azuread_service_principals is defined + ansible.builtin.template: + src: "{{ item }}" + dest: "{{ destination_path }}/{{ item | basename | regex_replace('.j2$', '') }}" + force: yes + with_fileglob: + - "{{ resource_template_folder }}/azuread_service_principals.tfvars.j2" + +# +# custom_role_definitions +# +- name: "[{{ level }}-{{ subscription_key }}] - resources - custom_role_definitions" + when: + - resources.subscriptions[subscription_key].custom_role_definitions is defined + ansible.builtin.template: + src: "{{ item }}" + dest: "{{ destination_path }}/{{ item | basename | regex_replace('.j2$', '') }}" + force: yes + with_fileglob: + - "{{ resource_template_folder }}/custom_role_definitions.tfvars.j2" + +# +# keyvaults +# +- name: "[{{ level }}-{{ subscription_key }}] - resources - keyvaults" + when: + - resources.subscriptions[subscription_key].keyvaults is defined + ansible.builtin.template: + src: "{{ item }}" + dest: "{{ destination_path }}/{{ item | basename | regex_replace('.j2$', '') }}" + force: yes + with_fileglob: + - "{{ resource_template_folder }}/keyvaults.tfvars.j2" + +# +# keyvault_access_policies +# +- name: "[{{ level }}-{{ subscription_key }}] - resources - keyvault_access_policies" + when: + - resources.subscriptions[subscription_key].keyvault_access_policies is defined + ansible.builtin.template: + src: "{{ item }}" + dest: "{{ destination_path }}/{{ item | basename | regex_replace('.j2$', '') }}" + force: yes + with_fileglob: + - "{{ resource_template_folder }}/keyvault_access_policies.tfvars.j2" + +# +# landingzone +# +- name: "[{{ level }}-{{ subscription_key }}] - resources - landingzone" + when: + - deployments.deployments[subscription_key][deployment].landingzone is defined + ansible.builtin.template: + src: "{{ item }}" + dest: "{{ destination_path }}/{{ item | basename | regex_replace('.j2$', '') }}" + force: yes + with_fileglob: + - "{{ resource_template_folder }}/landingzone.tfvars.j2" + +# +# managed_identities +# +- name: "[{{ level }}-{{ subscription_key }}] - resources - managed_identities" + when: + - resources.subscriptions[subscription_key].managed_identities is defined + ansible.builtin.template: + src: "{{ item }}" + dest: "{{ destination_path }}/{{ item | basename | regex_replace('.j2$', '') }}" + force: yes + with_fileglob: + - "{{ resource_template_folder }}/managed_identities.tfvars.j2" + +# +# network_security_group_definition +# +- name: "[{{ level }}-{{ subscription_key }}] - resources - network_security_group_definition" + when: + - resources.subscriptions[subscription_key].network_security_group_definition is defined + ansible.builtin.template: + src: "{{ item }}" + dest: "{{ destination_path }}/{{ item | basename | regex_replace('.j2$', '') }}" + force: yes + with_fileglob: + - "{{ resource_template_folder }}/network_security_group_definition.tfvars.j2" + +# +# recovery_vaults +# +- name: "[{{ level }}-{{ subscription_key }}] - resources - recovery_vaults" + when: + - resources.subscriptions[subscription_key].recovery_vaults is defined + ansible.builtin.template: + src: "{{ item }}" + dest: "{{ destination_path }}/{{ item | basename | regex_replace('.j2$', '') }}" + force: yes + with_fileglob: + - "{{ resource_template_folder }}/recovery_vaults.tfvars.j2" + +# +# resource_groups +# +- name: "[{{ level }}-{{ subscription_key }}] - resources - resource_groups" + when: + - resources.subscriptions[subscription_key].resource_groups is defined + ansible.builtin.template: + src: "{{ item }}" + dest: "{{ destination_path }}/{{ item | basename | regex_replace('.j2$', '') }}" + force: yes + with_fileglob: + - "{{ resource_template_folder }}/resource_groups.tfvars.j2" + +# +# role_mapping +# +- name: "[{{ level }}-{{ subscription_key }}] - resources - role_mapping" + when: + - resources.subscriptions[subscription_key].role_mapping is defined + ansible.builtin.template: + src: "{{ item }}" + dest: "{{ destination_path }}/{{ item | basename | regex_replace('.j2$', '') }}" + force: yes + with_fileglob: + - "{{ resource_template_folder }}/role_mapping.tfvars.j2" + +# +# virtual_hub_connections +# +- name: "[{{ level }}-{{ subscription_key }}] - resources - virtual_hub_connections" + when: + - resources.subscriptions[subscription_key].virtual_hub_connections is defined + ansible.builtin.template: + src: "{{ item }}" + dest: "{{ destination_path }}/{{ item | basename | regex_replace('.j2$', '') }}" + force: yes + with_fileglob: + - "{{ resource_template_folder }}/virtual_hub_connections.tfvars.j2" + + +# +# virtual_networks +# +- name: "[{{ level }}-{{ subscription_key }}] - resources - virtual_networks" + when: + - resources.subscriptions[subscription_key].virtual_networks is defined + ansible.builtin.template: + src: "{{ item }}" + dest: "{{ destination_path }}/{{ item | basename | regex_replace('.j2$', '') }}" + force: yes + with_fileglob: + - "{{ resource_template_folder }}/virtual_networks.tfvars.j2" + + +# +# Readme +# +- name: "[{{ level }}-{{ subscription_key }}] - resources - *.md" + when: subscription_tfstate_exists.rc == 0 + ansible.builtin.template: + src: "{{ item }}" + dest: "{{ destination_path }}/{{ item | basename | regex_replace('.j2$', '') }}" + force: yes + with_fileglob: + - "{{ base_templates_folder }}/{{ level }}/resources/*.md" diff --git a/templates/applications/level3/resources/readme.md b/templates/applications/level3/resources/readme.md new file mode 100644 index 000000000..01f97b9cf --- /dev/null +++ b/templates/applications/level3/resources/readme.md @@ -0,0 +1,31 @@ + +### Deploy base resources in {{ asvm_folder }} + +```bash +rover login -t {{ config.platform_identity.tenant_name }} + +unset ARM_SKIP_PROVIDER_REGISTRATION + +cd /tf/caf/landingzones +git pull +git checkout {{ resources.gitops.landingzones }} + +rover \ +{% if config.platform_identity.azuread_identity_mode != "logged_in_user" %} + --impersonate-sp-from-keyvault-url {{ keyvaults.cred_subscription_creation_landingzones.vault_uri }} \ +{% endif %} + -lz /tf/caf/landingzones/caf_solution \ + -var-folder {{ destination_path }} \ + -tfstate_subscription_id {{ config.caf_terraform.launchpad.subscription_id }} \ + -target_subscription {{ asvm_subscriptions_details[asvm_folder].subscription_id }} \ + -tfstate {{ config.tfstates['asvm'][asvm_folder].resources.tfstate }} \ + --workspace {{ config.tfstates['asvm'][asvm_folder].workspace }} \ + -log-severity {{ config.gitops.rover_log_error }} \ + -env {{ config.caf_terraform.launchpad.caf_environment }} \ + -level {{ level }} \ + -p ${TF_DATA_DIR}/{{ config.tfstates['asvm'][asvm_folder].resources.tfstate }}.tfplan \ + -a plan + +rover logout + +``` diff --git a/templates/applications/level3/storage_containers/ansible.yaml b/templates/applications/level3/storage_containers/ansible.yaml new file mode 100644 index 000000000..903064ad1 --- /dev/null +++ b/templates/applications/level3/storage_containers/ansible.yaml @@ -0,0 +1,80 @@ +- name: set destination paths + set_fact: + destination_path: "{{ destination_base_path }}/storage_containers" + deployment: "storage_containers" + +- name: "Load variable for resources" + include_vars: + name: resources + dir: "{{config_folder}}" + depth: 1 + ignore_unknown_extensions: true + files_matching: "tfstates.asvm.yaml|subscriptions.asvm.yaml|subscription.asvm.yaml" + +- name: "Content of resources" + debug: + msg: "{{resources}}" + +- name: "[{{ level }} {{ asvm_folder }}] - storage_containers - Creates directory" + file: + path: "{{ destination_path }}" + state: directory + +# +# Get storage account names +# + +- name: "[{{ level }}-{{ subscription_key }}] - storage_containers - launchpad level3" + register: storage_account_level3 + shell: | + az storage account list \ + --subscription {{ config.caf_terraform.launchpad.subscription_id }} \ + --query "[?tags.caf_tfstate=='level3' && tags.caf_environment=='{{ config.caf_terraform.launchpad.caf_environment }}'].{name:name, resource_group:resourceGroup}[0]" -o json | jq -r + +- debug: + msg: "{{storage_account_level3.stdout}}" + +- name: "[{{ level }}-{{ subscription_key }}] - storage_containers - launchpad level4" + register: storage_account_level4 + shell: | + az storage account list \ + --subscription {{ config.caf_terraform.launchpad.subscription_id }} \ + --query "[?tags.caf_tfstate=='level4' && tags.caf_environment=='{{ config.caf_terraform.launchpad.caf_environment }}'].{name:name, resource_group:resourceGroup}[0]" -o json | jq -r + +- debug: + msg: "{{storage_account_level4.stdout}}" + + +# +# landingzone +# +- name: "[{{ level }}-{{ subscription_key }}] - storage_containers - landingzone" + ansible.builtin.template: + src: "{{ item }}" + dest: "{{ destination_path }}/{{ item | basename | regex_replace('.j2$', '') }}" + force: yes + with_fileglob: + - "{{ resource_template_folder }}/landingzone.tfvars.j2" + +# +# storage_containers +# +- name: "[{{ level }}-{{ subscription_key }}] - storage_containers - storage_containers" + ansible.builtin.template: + src: "{{ item }}" + dest: "{{ destination_path }}/{{ item | basename | regex_replace('.j2$', '') }}" + force: yes + with_fileglob: + - "{{ base_templates_folder }}/{{ level }}/storage_containers/storage_containers.tfvars.j2" + + +# +# Readme +# +- name: "[{{ level }}-{{ subscription_key }}] - storage_containers - *.md" + ansible.builtin.template: + src: "{{ item }}" + dest: "{{ destination_path }}/{{ item | basename | regex_replace('.j2$', '') }}" + force: yes + with_fileglob: + - "{{ base_templates_folder }}/{{ level }}/storage_containers/*.md" diff --git a/templates/applications/level3/storage_containers/readme.md b/templates/applications/level3/storage_containers/readme.md new file mode 100644 index 000000000..ad1f01be9 --- /dev/null +++ b/templates/applications/level3/storage_containers/readme.md @@ -0,0 +1,28 @@ + +### Create storage containers for the landingzone + +```bash +rover login -t {{ config.platform_identity.tenant_name }} + +cd /tf/caf/landingzones +git pull +git checkout {{ resources.gitops.landingzones }} + +rover \ +{% if config.platform_identity.azuread_identity_mode != "logged_in_user" %} + --impersonate-sp-from-keyvault-url {{ keyvaults.cred_subscription_creation_landingzones.vault_uri }} \ +{% endif %} + -lz /tf/caf/landingzones/caf_solution \ + -var-folder {{ destination_path }} \ + -tfstate_subscription_id {{ config.caf_terraform.launchpad.subscription_id }} \ + -target_subscription {{config.caf_terraform.launchpad.subscription_id }} \ + -tfstate {{ config.tfstates.asvm[asvm_folder].subscriptions.tfstate }} \ + --workspace {{ config.tfstates.asvm[asvm_folder].subscriptions.workspace | default('tfstate') }} \ + -env {{ config.caf_terraform.launchpad.caf_environment }} \ + -level {{ level }} \ + -p ${TF_DATA_DIR}/{{ config.tfstates.asvm[asvm_folder].subscriptions.tfstate }}.tfplan \ + -a plan + +rover logout + +``` diff --git a/templates/applications/level3/storage_containers/storage_containers.tfvars.j2 b/templates/applications/level3/storage_containers/storage_containers.tfvars.j2 new file mode 100644 index 000000000..8784606bf --- /dev/null +++ b/templates/applications/level3/storage_containers/storage_containers.tfvars.j2 @@ -0,0 +1,16 @@ +storage_containers = { +{% for key in resources.subscriptions.keys() %} + {{ key }}_level3 = { + name = "{{ resources.tfstates.asvm[key].workspace }}" + storage_account = { + name = "{{storage_account_level3.stdout|from_json|json_query('name')}}" + } + } + {{ key }}_level4 = { + name = "{{ resources.tfstates.asvm[key].workspace }}" + storage_account = { + name = "{{storage_account_level4.stdout|from_json|json_query('name')}}" + } + } +{% endfor %} +} \ No newline at end of file diff --git a/templates/applications/level3/subscription/readme.md b/templates/applications/level3/subscription/readme.md new file mode 100644 index 000000000..8f1ad09ab --- /dev/null +++ b/templates/applications/level3/subscription/readme.md @@ -0,0 +1,40 @@ + +### Generate asvm for {{ asvm_folder }} + +```bash +rover login -t {{ config.platform_identity.tenant_name }} + +ARM_SKIP_PROVIDER_REGISTRATION=true && rover \ +{% if config.platform_identity.azuread_identity_mode != "logged_in_user" %} + --impersonate-sp-from-keyvault-url {{ keyvaults.cred_subscription_creation_landingzones.vault_uri }} \ +{% endif %} + -lz /tf/caf/landingzones/caf_solution \ + -var-folder {{ destination_path }} \ + -tfstate_subscription_id {{ config.caf_terraform.launchpad.subscription_id }} \ + -tfstate {{ config.tfstates["asvm"][asvm_folder].subscriptions.tfstate }} \ + --workspace {{ config.tfstates["asvm"][asvm_folder].workspace }} \ + -env {{ config.caf_terraform.launchpad.caf_environment }} \ + -level {{ level }} \ + -p ${TF_DATA_DIR}/{{ config.tfstates["asvm"][asvm_folder].subscriptions.tfstate }}.tfplan \ + -a plan + +rover logout + +``` +Once you have executed the rover apply to create the subscription, you need to re-execute the rover ignite to generate the instructions for the next steps. + +Note you need to logout and login as a caf_maintainer group member + +```bash +rover login -t {{ config.platform_identity.tenant_name }} + +rover ignite \ + --playbook /tf/caf/starter/templates/landingzones/ansible.yaml \ + -e base_templates_folder={{ base_templates_folder }} \ + -e resource_template_folder={{ resource_template_folder }} \ + -e config_folder={{ config_folder }} \ + -e destination_base_path={{ destination_base_path }} \ + -e config_folder_platform={{ config_folder_platform }} + +``` + diff --git a/templates/applications/level4/readme.md b/templates/applications/level4/readme.md new file mode 100644 index 000000000..b3905517b --- /dev/null +++ b/templates/applications/level4/readme.md @@ -0,0 +1 @@ +yaml support for level coming soon. \ No newline at end of file diff --git a/templates/enterprise-scale/contoso/platform/README.md b/templates/enterprise-scale/contoso/platform/README.md new file mode 100644 index 000000000..a0a3c91ad --- /dev/null +++ b/templates/enterprise-scale/contoso/platform/README.md @@ -0,0 +1,31 @@ +# Cloud Adoption Framework landing zones for Terraform - Starter template for Azure Platform + + +## Commands + +### clone the landingzone project (Terraform base code) +```bash +cd /tf/caf/landingzones +git pull +git checkout 2112.int + +``` + +### Rover ignite the platform +Rover ignite will now process the yaml files and start building the configuration structure of the tfvars. Note during the creation of the platform landingones you will have to run rover ignite many times as some deployments are required to be completed before you can perform the next steps. +Rover ignite creates the tfvars and also the documentation. + +```bash +rover login -t tenantname -s + +rover ignite \ + --playbook /tf/caf/landingzones/templates/platform/ansible.yaml \ + -e base_templates_folder=/tf/caf/landingzones/templates/platform \ + -e resource_template_folder=/tf/caf/landingzones/templates/resources \ + -e config_folder=/tf/caf/orgs/contoso/platform + +``` + +### Next step + +Once the rover ignite command has been executed, go to your configuration folder when the platform launchpad configuration has been created. \ No newline at end of file diff --git a/templates/enterprise-scale/contoso/platform/asvm.yaml b/templates/enterprise-scale/contoso/platform/asvm.yaml new file mode 100644 index 000000000..658826a33 --- /dev/null +++ b/templates/enterprise-scale/contoso/platform/asvm.yaml @@ -0,0 +1,35 @@ +subscriptions: + asvm: + resource_groups: + level3: + name: caf-level3 + tags: + level: level3 + level4: + name: caf-level4 + tags: + level: level4 + + storage_accounts: + level3: + name: l3 + resource_group_key: level3 + level4: + name: l4 + resource_group_key: level4 + + keyvaults: + level3: + name: l3 + resource_group_key: level3 + level4: + name: l4 + resource_group_key: level4 + + + azuread_groups: + caf_ac_landingzone_maintainers_non_prod: + name: caf_ac_landingzone_maintainers_non_prod + + caf_ac_landingzone_maintainers_prod: + name: caf_ac_landingzone_maintainers_prod diff --git a/templates/enterprise-scale/contoso/platform/connectivity.yaml b/templates/enterprise-scale/contoso/platform/connectivity.yaml new file mode 100644 index 000000000..77210f19d --- /dev/null +++ b/templates/enterprise-scale/contoso/platform/connectivity.yaml @@ -0,0 +1,34 @@ +virtual_networks: + connectivity_hub1_firewall_egress: + name: firewall-egress-re1 + resource_group_key: contoso_global_firewall + region_key: region1 + address_space: + - + specialsubnets: + AzureFirewallSubnet: + name: AzureFirewallSubnet + cidr: + - + + +azurerm_firewalls: + egress_fw_region1: + name: egress-firewall + resource_group_key: contoso_global_firewall + region_key: region1 + # egress_fw_region2: + # name: egress-firewall + # resource_group_key: contoso_global_wan + # region_key: region2 + +resource_groups: + contoso_global_dns: + name: contoso-connectivity-global-dns + region_key: region1 + contoso_global_firewall: + name: contoso-connectivity-global-firewall + region_key: region1 + contoso_global_er_circuits: + name: contoso-connectivity-global-er-circuits + region_key: region1 diff --git a/templates/enterprise-scale/contoso/platform/connectivity_express_route_peerings.yaml b/templates/enterprise-scale/contoso/platform/connectivity_express_route_peerings.yaml new file mode 100644 index 000000000..21b5fead7 --- /dev/null +++ b/templates/enterprise-scale/contoso/platform/connectivity_express_route_peerings.yaml @@ -0,0 +1,22 @@ +# Deployed in a separate tfstate. Circuit must be provisioned first. +express_route_circuit_peerings: + prod: + private_peering: + express_route: + lz_key: connectivity_express_route_prod + key: prod + peering_type: AzurePrivatePeering + primary_peer_address_prefix: + secondary_peer_address_prefix: + peer_asn: + vlan_id: + non_prod: + private_peering: + express_route: + lz_key: connectivity_express_route_non_prod + key: non_prod + peering_type: AzurePrivatePeering + primary_peer_address_prefix: + secondary_peer_address_prefix: + peer_asn: + vlan_id: diff --git a/templates/enterprise-scale/contoso/platform/connectivity_express_routes.yaml b/templates/enterprise-scale/contoso/platform/connectivity_express_routes.yaml new file mode 100644 index 000000000..46eb0a79f --- /dev/null +++ b/templates/enterprise-scale/contoso/platform/connectivity_express_routes.yaml @@ -0,0 +1,39 @@ +gitops: + caf_landingzone_branch: 2112.int + +express_route_circuits: + prod: + name: er-1-prod + resource_group_key: prod + service_provider_name: XL Axiata + peering_location: Jakarta + tier: Standard + family: MeteredData + bandwidth_in_mbps: 50 + non_prod: + name: er-1-nonprod + resource_group_key: non_prod + service_provider_name: XL Axiata + peering_location: Jakarta + tier: Standard + family: MeteredData + bandwidth_in_mbps: 50 + +express_route_circuit_authorizations: + prod: + vhub-prod: + name: er-auth-vhub-prod + resource_group_key: prod + non_prod: + vhub-non-prod: + name: er-auth-vhub-non-prod + resource_group_key: dev + + +resource_groups: + prod: + name: connectivity-express-route-prod + region_key: region1 + non_prod: + name: connectivity-express-route-non-prod + region_key: region1 diff --git a/templates/enterprise-scale/contoso/platform/connectivity_firewall.yaml b/templates/enterprise-scale/contoso/platform/connectivity_firewall.yaml new file mode 100644 index 000000000..c4c3a8cbd --- /dev/null +++ b/templates/enterprise-scale/contoso/platform/connectivity_firewall.yaml @@ -0,0 +1,5 @@ + +resource_groups: + contoso_global_firewall: + name: contoso-connectivity-global-firewall + region_key: region1 \ No newline at end of file diff --git a/templates/enterprise-scale/contoso/platform/connectivity_virtual_hub.yaml b/templates/enterprise-scale/contoso/platform/connectivity_virtual_hub.yaml new file mode 100644 index 000000000..821036a2f --- /dev/null +++ b/templates/enterprise-scale/contoso/platform/connectivity_virtual_hub.yaml @@ -0,0 +1,59 @@ +virtual_hubs: + prod: + name: Production + virtual_wan: + lz_key: virtual_wan + key: global_wan + region_key: region1 + hub_address_prefix: + deploy_s2s: false + s2s_config: + name: prod + scale_unit: 1 + deploy_er: false + enable_er_connections: false + er_config: + name: prod + scale_units: 1 + + non_prod: + name: Non Production + virtual_wan: + lz_key: virtual_wan + key: global_wan + region_key: region1 + hub_address_prefix: + deploy_s2s: false + s2s_config: + name: non-prod + scale_unit: 1 + deploy_er: false + enable_er_connections: false + er_config: + name: non-prod + scale_units: 1 + + +express_route_connections: + prod: + name: erc-er-1-prod + virtual_hub: + key: prod + circuit_peering: + tfstate_key: prod + key: private_peering + express_route_circuit_authorization: + tfstate_key: prod + key: vhub-prod + + non_prod: + name: er-1-non-prod + virtual_hub: + key: non_prod + circuit_peering: + tfstate_key: non_prod + key: private_peering + express_route_circuit_authorization: + tfstate_key: non_prod + key: vhub-non-prod + diff --git a/templates/enterprise-scale/contoso/platform/connectivity_virtual_wan.yaml b/templates/enterprise-scale/contoso/platform/connectivity_virtual_wan.yaml new file mode 100644 index 000000000..6de263ee3 --- /dev/null +++ b/templates/enterprise-scale/contoso/platform/connectivity_virtual_wan.yaml @@ -0,0 +1,10 @@ +virtual_wans: + global_wan: + name: vwan + resource_group_key: global_wan + region_key: region1 + +resource_groups: + global_wan: + name: connectivity-global-wan + region_key: region1 diff --git a/templates/enterprise-scale/contoso/platform/connectivity_vpn_gateway_connections.yaml b/templates/enterprise-scale/contoso/platform/connectivity_vpn_gateway_connections.yaml new file mode 100644 index 000000000..c8eeeee61 --- /dev/null +++ b/templates/enterprise-scale/contoso/platform/connectivity_vpn_gateway_connections.yaml @@ -0,0 +1,53 @@ +vpn_gateway_connections: + prod: + name: production + internet_security_enabled: true + vpn_site: + key: prod + virtual_hub: + lz_key: connectivity_virtual_hub_prod + key: prod + vpn_links: + prod: + name: prod + shared_key: + bgp_enabled: false + bandwidth_mbps: 100 + link_index: 0 + protocol: IKEv2 + ipsec_policies: + policy1: + dh_group: DHGroup24 + ike_encryption_algorithm: AES256 + ike_integrity_algorithm: SHA256 + encryption_algorithm: AES256 + integrity_algorithm: SHA256 + pfs_group: None + sa_data_size_kb: 102400000 + sa_lifetime_sec: 3600 + non_prod: + name: production + internet_security_enabled: true + vpn_site: + key: non_prod + virtual_hub: + lz_key: connectivity_virtual_hub_non_prod + key: non_prod + vpn_links: + non_prod: + name: non_prod + shared_key: + bgp_enabled: false + bandwidth_mbps: 100 + link_index: 0 + protocol: IKEv2 + ipsec_policies: + policy1: + dh_group: DHGroup24 + ike_encryption_algorithm: AES256 + ike_integrity_algorithm: SHA256 + encryption_algorithm: AES256 + integrity_algorithm: SHA256 + pfs_group: None + sa_data_size_kb: 102400000 + sa_lifetime_sec: 3600 diff --git a/templates/enterprise-scale/contoso/platform/connectivity_vpn_sites.yaml b/templates/enterprise-scale/contoso/platform/connectivity_vpn_sites.yaml new file mode 100644 index 000000000..4eeda4c57 --- /dev/null +++ b/templates/enterprise-scale/contoso/platform/connectivity_vpn_sites.yaml @@ -0,0 +1,35 @@ +vpn_sites: + prod: + name: "Production" + resource_group: + lz_key: connectivity_virtual_wan + key: global_wan + virtual_wan: + lz_key: connectivity_virtual_wan + key: global_wan + device_vendor: checkpoint + address_cidrs: + - + links: + primary: + name: primary + ip_address: + provider_name: Microsoft + speed_in_mbps: 100 + non_prod: + name: "Non Production" + resource_group: + lz_key: connectivity_virtual_wan + key: global_wan + virtual_wan: + lz_key: connectivity_virtual_wan + key: global_wan + device_vendor: checkpoint + address_cidrs: + - + links: + primary: + name: primary + ip_address: + provider_name: Microsoft + speed_in_mbps: 100 diff --git a/templates/enterprise-scale/contoso/platform/contoso.caf.platform.yaml b/templates/enterprise-scale/contoso/platform/contoso.caf.platform.yaml new file mode 100644 index 000000000..65f8dd692 --- /dev/null +++ b/templates/enterprise-scale/contoso/platform/contoso.caf.platform.yaml @@ -0,0 +1,118 @@ +caf_terraform: + naming_convention: + # When set to false use the CAF provider to generate names aligned to CAF guidance + # true: use the name as defined in the configuration files. You may have to iterate multiple times to prevent conflicts with Azure unique names with servides like storage account, keyvault or log analytics workspace. + passthrough: false + inherit_tags: false + # set: define the prefix to add to all resource names + # unset: if passthrough is set to false, generate a random prefix + prefix: cont + # if passthrough is set to false, add random suffix to name, up to the random_lenght value. + random_length: 5 + launchpad: + caf_environment: contoso + account_replication_type: GRS + regions: + region1: + # set the short form of the Azure region + name: southeastasia # Use the lower-case region's name, short version with no space + slug: sea + region2: + name: eastasia # Use the lower-case region's name, short version with no space + slug: ea + default_region_key: region1 + # Define the number of CAF levels to use. Recommeded is 3 for the platform. + number_of_levels: 3 + blob_versioning_enabled: true + container_delete_retention_policy: 7 + delete_retention_policy: 7 + # Subscription_id to deploy the launchpad. Note 1 existing manual subscription is required to deploy the launhchapd. + subscription_id: + subscription_name: contoso-caf-launchpad + tenant_id: + + billing_subscription_role_delegations: + # true: enable this deployment. The remaining attributes are required. + # false: disable this deployment. + # azuread_user_ea_account_owner: set the upn of the user doing the manual deployment of the platform + # azuread_user_ea_account_owner_object_id: if that user is already loged-in to an azure cli session you can get the object_id by running: + # az ad signed-in-user show --query objectId -o tsv + # The remaining attributes are ignored: [billing_account_name, enrollment_account_name] + enable: false + # Azure Active Directory User (UPN) that is Account Owner in the EA portal + # if enable=false, set the upn of the user doing the manual deployment + azuread_user_ea_account_owner: + # see comments above to get the object_id + azuread_user_ea_account_owner_object_id: + # Only set the following two attributes when enable=true + billing_account_name: + enrollment_account_name: + +# cleanup_destination - recommended to clean and recreated a clean state from template. +configuration_folders: + platform: + # true: force the destination folder to be deleted and re-created before the files are created. + # false: create the target folder structure if it does not exist. On sub-sequent executions, the folder structure is reused as is. + cleanup_destination: true + # base destination folder where rover ignite will store the tfvars files. No / at the end + destination_base_path: /tf/caf + # destination relative path to destination_base_path folder where rover ignite will store the tfvars files. No / at begining and end + destination_relative_path: configuration/contoso/platform + + +platform_core_setup: + sku: + keyvault: standard + enterprise_scale: + enable: true + scenario: contoso + model: demo + management_group_name: "Contoso Industries" + management_group_prefix: contoso + deploy_core_landing_zones: true + enable_azure_subscription_vending_machine: true + clean_up_destination_folder: false + update_lib_folder: true + subscription_deployment_mode: dedicated_new + private_lib: + version_to_deploy: v1.1.1 + v0.1.x: + caf_landingzone_branch: "2107.1" + v0.3.3: + caf_landingzone_branch: "patch.5.4.4" + v1.1.1: + caf_landingzone_branch: "2112.int" + +platform_management: + enable: true + +networking_topology: + deployment_option: virtual_wan + +platform_identity: + # Set the Azure Active Directory tenant name (primary domain name) + # has to be the default domain name (custom dns name or tenantname.onmicrosoft.com) + # check the AAD property + tenant_name: + # only service_principal supported with rover ignite at the moment + azuread_identity_mode: service_principal + # UPNs you want to add in the caf_platform_maintainers Azure AD group + # Can use user or guest accounts + # Those users will have full permissions on platform. + # Once setup, you can remove them from here or add them from + # Check in Azure AD the User Principal Name attribute value. Note there is a special convention for guest accounts. + caf_platform_maintainers: + - + +notifications: + service_health_alerts: + emails: + support1: + name: + email_address: + security_center_email_contact: + +gitops: + caf_landingzone_branch: "2112.int" + deployment_mode: interactive + rover_log_error: ERROR diff --git a/templates/enterprise-scale/contoso/platform/deployments/non_prod/connectivity_firewall_policies.yaml b/templates/enterprise-scale/contoso/platform/deployments/non_prod/connectivity_firewall_policies.yaml new file mode 100644 index 000000000..e9c4e245d --- /dev/null +++ b/templates/enterprise-scale/contoso/platform/deployments/non_prod/connectivity_firewall_policies.yaml @@ -0,0 +1,34 @@ +gitops: + landingzones: 2112.int + +deployments: + connectivity: + landingzone: + key: + platform: + private_dns: non_prod + global_settings_key: + platform: + management: + remote_tfstates: + platform: + management: + asvm: + +subscriptions: + connectivity: + resource_groups: + firewall_policies: + name: connectivity-non-prod-firewall-policies + region_key: region1 + + azurerm_firewall_policies: + root: + name: "non-prod-root-policy" + region_key: region1 + resource_group: + key: firewall_policies + dns: + proxy_enabled: true + threat_intelligence_mode: "Alert" + diff --git a/templates/enterprise-scale/contoso/platform/deployments/non_prod/connectivity_firewalls.yaml b/templates/enterprise-scale/contoso/platform/deployments/non_prod/connectivity_firewalls.yaml new file mode 100644 index 000000000..b89bb2a17 --- /dev/null +++ b/templates/enterprise-scale/contoso/platform/deployments/non_prod/connectivity_firewalls.yaml @@ -0,0 +1,77 @@ +gitops: + landingzones: 2112.int + +deployments: + connectivity: + landingzone: + key: + platform: + azurerm_firewalls: non_prod + global_settings_key: + platform: + management: + remote_tfstates: + platform: + virtual_hubs: non_prod + azurerm_firewall_policies: non_prod + + +subscriptions: + connectivity: + resource_groups: + firewall_policies: + name: connectivity-non-prod-firewall + region_key: region1 + + + virtual_networks: + vnet: + name: vnet-connectivity-non-prod-fw-plinks + resource_group_key: firewall_policies + region_key: region1 + address_space: + - 10.51.4.0/26 + specialsubnets: + AzureFirewallSubnet: + name: AzureFirewallSubnet + cidr: + - 10.51.4.0/26 + + public_ip_addresses: + fw_pip1: + name: pip-non-prod-fw-01 + resource_group_key: firewall_policies + sku: Standard + allocation_method: Static + ip_version: IPv4 + idle_timeout_in_minutes: 4 + + azurerm_firewalls: + fw_non_prod_plinks_01: + name: fw-non-prod-plinks-01 + resource_group_key: firewall_policies + vnet_key: vnet + sku_tier: Standard + firewall_policy: + key: root + lz_key: connectivity_firewall_policies_non_prod + zones: + - 1 + - 2 + - 3 + public_ips: + ip1: + name: pip1 + public_ip_key: fw_pip1 + vnet_key: vnet + subnet_key: AzureFirewallSubnet + + + virtual_hub_connections: + vnet_to_hub: + name: vnet-connectivity-non-prod-fw-plinks-TO-vhub-non_prod + virtual_hub: + lz_key: connectivity_virtual_hub_non_prod + key: non_prod + vnet: + vnet_key: vnet \ No newline at end of file diff --git a/templates/enterprise-scale/contoso/platform/deployments/non_prod/connectivity_private_dns.yaml b/templates/enterprise-scale/contoso/platform/deployments/non_prod/connectivity_private_dns.yaml new file mode 100644 index 000000000..b1218f556 --- /dev/null +++ b/templates/enterprise-scale/contoso/platform/deployments/non_prod/connectivity_private_dns.yaml @@ -0,0 +1,189 @@ +gitops: + landingzones: 2112.int + +deployments: + connectivity: + landingzone: + key: + platform: + private_dns: non_prod + global_settings_key: + platform: + management: + remote_tfstates: + platform: + management: + asvm: + + +subscriptions: + connectivity: + resource_groups: + dns_connectivity_non_prod: + name: dns-connectivity-non-prod + private_dns: + privatelink.adf.azure.com: + name: privatelink.adf.azure.com + resource_group_key: dns_connectivity_non_prod + vnet_links: + fw_non_prod_plinks_01: + name: fw-non-prod-plinks-01 + vnet_key: vnet + lz_key: connectivity_firewalls_non_prod + privatelink.datafactory.azure.net: + name: privatelink.datafactory.azure.net + resource_group_key: dns_connectivity_non_prod + vnet_links: + fw_non_prod_plinks_01: + name: fw-non-prod-plinks-01 + vnet_key: vnet + lz_key: connectivity_firewalls_non_prod + privatelink.blob.core.windows.net: + name: privatelink.blob.core.windows.net + resource_group_key: dns_connectivity_non_prod + vnet_links: + fw_non_prod_plinks_01: + name: fw-non-prod-plinks-01 + vnet_key: vnet + lz_key: connectivity_firewalls_non_prod + privatelink.file.core.windows.net: + name: privatelink.file.core.windows.net + resource_group_key: dns_connectivity_prod + vnet_links: + fw_non_prod_plinks_01: + name: fw-non-prod-plinks-01 + vnet_key: vnet + lz_key: connectivity_firewalls_non_prod + privatelink.notebooks.azure.net: + name: privatelink.notebooks.azure.net + resource_group_key: dns_connectivity_non_prod + vnet_links: + fw_non_prod_plinks_01: + name: fw-non-prod-plinks-01 + vnet_key: vnet + lz_key: connectivity_firewalls_non_prod + privatelink.dfs.core.windows.net: + name: privatelink.dfs.core.windows.net + resource_group_key: dns_connectivity_non_prod + vnet_links: + fw_non_prod_plinks_01: + name: fw-non-prod-plinks-01 + vnet_key: vnet + lz_key: connectivity_firewalls_non_prod + privatelink.vaultcore.azure.net: + name: privatelink.vaultcore.azure.net + resource_group_key: dns_connectivity_non_prod + vnet_links: + fw_non_prod_plinks_01: + name: fw-non-prod-plinks-01 + vnet_key: vnet + lz_key: connectivity_firewalls_non_prod + privatelink.southeastasia.azmk8s.io: + name: privatelink.southeastasia.azmk8s.io + resource_group_key: dns_connectivity_non_prod + vnet_links: + fw_non_prod_plinks_01: + name: fw-non-prod-plinks-01 + vnet_key: vnet + lz_key: connectivity_firewalls_non_prod + privatelink.azurecr.io: + name: privatelink.azurecr.io + resource_group_key: dns_connectivity_non_prod + vnet_links: + fw_non_prod_plinks_01: + name: fw-non-prod-plinks-01 + vnet_key: vnet + lz_key: connectivity_firewalls_non_prod + privatelink.southeastasia.backup.windowsazure.com: + name: privatelink.southeastasia.backup.windowsazure.com + resource_group_key: dns_connectivity_non_prod + vnet_links: + fw_non_prod_plinks_01: + name: fw-non-prod-plinks-01 + vnet_key: vnet + lz_key: connectivity_firewalls_non_prod + privatelink.siterecovery.windowsazure.com: + name: privatelink.siterecovery.windowsazure.com + resource_group_key: dns_connectivity_non_prod + vnet_links: + fw_non_prod_plinks_01: + name: fw-non-prod-plinks-01 + vnet_key: vnet + lz_key: connectivity_firewalls_non_prod + privatelink.servicebus.windows.net: + name: privatelink.servicebus.windows.net + resource_group_key: dns_connectivity_non_prod + vnet_links: + fw_non_prod_plinks_01: + name: fw-non-prod-plinks-01 + vnet_key: vnet + lz_key: connectivity_firewalls_non_prod + privatelink.api.azureml.ms: + name: privatelink.api.azureml.ms + resource_group_key: dns_connectivity_non_prod + vnet_links: + fw_non_prod_plinks_01: + name: fw-non-prod-plinks-01 + vnet_key: vnet + lz_key: connectivity_firewalls_non_prod + privatelink.monitor.azure.com: + name: privatelink.monitor.azure.com + resource_group_key: dns_connectivity_non_prod + vnet_links: + fw_non_prod_plinks_01: + name: fw-non-prod-plinks-01 + vnet_key: vnet + lz_key: connectivity_firewalls_non_prod + privatelink.oms.opinsights.non_prod.com: + name: privatelink.oms.opinsights.azure.com + resource_group_key: dns_connectivity_non_prod + vnet_links: + fw_non_prod_plinks_01: + name: fw-non-prod-plinks-01 + vnet_key: vnet + lz_key: connectivity_firewalls_non_prod + privatelink.ods.opinsights.azure.com: + name: privatelink.ods.opinsights.azure.com + resource_group_key: dns_connectivity_non_prod + vnet_links: + fw_non_prod_plinks_01: + name: fw-non-prod-plinks-01 + vnet_key: vnet + lz_key: connectivity_firewalls_non_prod + privatelink.agentsvc.azure-automation.net: + name: privatelink.agentsvc.azure-automation.net + resource_group_key: dns_connectivity_non_prod + vnet_links: + fw_non_prod_plinks_01: + name: fw-non-prod-plinks-01 + vnet_key: vnet + lz_key: connectivity_firewalls_non_prod + + custom_role_definitions: + landgingzone_extended: + name: landingzone-networking-non-prod-private-dns-extended + useprefix: true + description: "(non-prod) Provides additional permissions for the level4 principal to perform activies on the level2 private dns zones for private links." + permissions: + actions: + - Microsoft.Network/privateDnsZones/join/action + - Microsoft.Network/privateEndpoints/privateDnsZoneGroups/read + - Microsoft.Network/privateEndpoints/privateDnsZoneGroups/write + + role_mapping: + custom_role_mapping: + resource_groups: + dns_connectivity_prod: + landgingzone_extended: + azuread_groups: + lz_key: identity_level2 + keys: + - caf_non_prod_landingzones_dns_contributors + built_in_role_mapping: + resource_groups: + dns_connectivity_non_prod: + Private DNS Zone Contributor: + azuread_groups: + lz_key: identity_level2 + keys: + - caf_non_prod_landingzones_dns_contributors diff --git a/templates/enterprise-scale/contoso/platform/deployments/prod/connectivity_firewall_policies.yaml b/templates/enterprise-scale/contoso/platform/deployments/prod/connectivity_firewall_policies.yaml new file mode 100644 index 000000000..720fc7954 --- /dev/null +++ b/templates/enterprise-scale/contoso/platform/deployments/prod/connectivity_firewall_policies.yaml @@ -0,0 +1,33 @@ +gitops: + landingzones: 2112.int + +deployments: + connectivity: + landingzone: + key: + platform: + private_dns: prod + global_settings_key: + platform: + management: + remote_tfstates: + platform: + management: + asvm: + +subscriptions: + connectivity: + resource_groups: + firewall_policies: + name: connectivity-prod-firewall-policies + region_key: region1 + + azurerm_firewall_policies: + root: + name: "prod-root-policy" + region_key: region1 + resource_group: + key: firewall_policies + dns: + proxy_enabled: true + threat_intelligence_mode: "Alert" diff --git a/templates/enterprise-scale/contoso/platform/deployments/prod/connectivity_firewalls.yaml b/templates/enterprise-scale/contoso/platform/deployments/prod/connectivity_firewalls.yaml new file mode 100644 index 000000000..54639c529 --- /dev/null +++ b/templates/enterprise-scale/contoso/platform/deployments/prod/connectivity_firewalls.yaml @@ -0,0 +1,77 @@ +gitops: + landingzones: 2112.int + +deployments: + connectivity: + landingzone: + key: + platform: + azurerm_firewalls: prod + global_settings_key: + platform: + management: + remote_tfstates: + platform: + virtual_hubs: prod + azurerm_firewall_policies: prod + + +subscriptions: + connectivity: + resource_groups: + firewall_policies: + name: connectivity-prod-firewall + region_key: region1 + + + virtual_networks: + vnet: + name: vnet-connectivity-prod-fw-plinks + resource_group_key: firewall_policies + region_key: region1 + address_space: + - 10.51.196.0/26 + specialsubnets: + AzureFirewallSubnet: + name: AzureFirewallSubnet + cidr: + - 10.51.196.0/26 + + public_ip_addresses: + fw_pip1: + name: pip-prod-fw-01 + resource_group_key: firewall_policies + sku: Standard + allocation_method: Static + ip_version: IPv4 + idle_timeout_in_minutes: 4 + + azurerm_firewalls: + fw_prod_plinks_01: + name: fw-prod-plinks-01 + resource_group_key: firewall_policies + vnet_key: vnet + sku_tier: Standard + firewall_policy: + key: root + lz_key: connectivity_firewall_policies_prod + zones: + - 1 + - 2 + - 3 + public_ips: + ip1: + name: pip1 + public_ip_key: fw_pip1 + vnet_key: vnet + subnet_key: AzureFirewallSubnet + + + virtual_hub_connections: + vnet_to_hub: + name: vnet-connectivity-prod-fw-plinks-TO-vhub-prod + virtual_hub: + lz_key: connectivity_virtual_hub_prod + key: prod + vnet: + vnet_key: vnet \ No newline at end of file diff --git a/templates/enterprise-scale/contoso/platform/deployments/prod/connectivity_private_dns.yaml b/templates/enterprise-scale/contoso/platform/deployments/prod/connectivity_private_dns.yaml new file mode 100644 index 000000000..4bfa756d6 --- /dev/null +++ b/templates/enterprise-scale/contoso/platform/deployments/prod/connectivity_private_dns.yaml @@ -0,0 +1,188 @@ +gitops: + landingzones: 2112.int + +deployments: + connectivity: + landingzone: + key: + platform: + private_dns: prod + global_settings_key: + platform: + management: + remote_tfstates: + platform: + management: + +subscriptions: + connectivity: + resource_groups: + dns_connectivity_prod: + name: dns-connectivity-prod + private_dns: + privatelink.adf.azure.com: + name: privatelink.adf.azure.com + resource_group_key: dns_connectivity_prod + vnet_links: + fw_prod_plinks_01: + name: fw-prod-plinks-01 + vnet_key: vnet + lz_key: connectivity_firewalls_prod + privatelink.datafactory.azure.net: + name: privatelink.datafactory.azure.net + resource_group_key: dns_connectivity_prod + vnet_links: + fw_prod_plinks_01: + name: fw-prod-plinks-01 + vnet_key: vnet + lz_key: connectivity_firewalls_prod + privatelink.blob.core.windows.net: + name: privatelink.blob.core.windows.net + resource_group_key: dns_connectivity_prod + vnet_links: + fw_prod_plinks_01: + name: fw-prod-plinks-01 + vnet_key: vnet + lz_key: connectivity_firewalls_prod + privatelink.file.core.windows.net: + name: privatelink.file.core.windows.net + resource_group_key: dns_connectivity_prod + vnet_links: + fw_prod_plinks_01: + name: fw-prod-plinks-01 + vnet_key: vnet + lz_key: connectivity_firewalls_prod + privatelink.notebooks.azure.net: + name: privatelink.notebooks.azure.net + resource_group_key: dns_connectivity_prod + vnet_links: + fw_prod_plinks_01: + name: fw-prod-plinks-01 + vnet_key: vnet + lz_key: connectivity_firewalls_prod + privatelink.dfs.core.windows.net: + name: privatelink.dfs.core.windows.net + resource_group_key: dns_connectivity_prod + vnet_links: + fw_prod_plinks_01: + name: fw-prod-plinks-01 + vnet_key: vnet + lz_key: connectivity_firewalls_prod + privatelink.vaultcore.azure.net: + name: privatelink.vaultcore.azure.net + resource_group_key: dns_connectivity_prod + vnet_links: + fw_prod_plinks_01: + name: fw-prod-plinks-01 + vnet_key: vnet + lz_key: connectivity_firewalls_prod + privatelink.southeastasia.azmk8s.io: + name: privatelink.southeastasia.azmk8s.io + resource_group_key: dns_connectivity_prod + vnet_links: + fw_prod_plinks_01: + name: fw-prod-plinks-01 + vnet_key: vnet + lz_key: connectivity_firewalls_prod + privatelink.azurecr.io: + name: privatelink.azurecr.io + resource_group_key: dns_connectivity_prod + vnet_links: + fw_prod_plinks_01: + name: fw-prod-plinks-01 + vnet_key: vnet + lz_key: connectivity_firewalls_prod + privatelink.southeastasia.backup.windowsazure.com: + name: privatelink.southeastasia.backup.windowsazure.com + resource_group_key: dns_connectivity_prod + vnet_links: + fw_prod_plinks_01: + name: fw-prod-plinks-01 + vnet_key: vnet + lz_key: connectivity_firewalls_prod + privatelink.siterecovery.windowsazure.com: + name: privatelink.siterecovery.windowsazure.com + resource_group_key: dns_connectivity_prod + vnet_links: + fw_prod_plinks_01: + name: fw-prod-plinks-01 + vnet_key: vnet + lz_key: connectivity_firewalls_prod + privatelink.servicebus.windows.net: + name: privatelink.servicebus.windows.net + resource_group_key: dns_connectivity_prod + vnet_links: + fw_prod_plinks_01: + name: fw-prod-plinks-01 + vnet_key: vnet + lz_key: connectivity_firewalls_prod + privatelink.api.azureml.ms: + name: privatelink.api.azureml.ms + resource_group_key: dns_connectivity_prod + vnet_links: + fw_prod_plinks_01: + name: fw-prod-plinks-01 + vnet_key: vnet + lz_key: connectivity_firewalls_prod + privatelink.monitor.azure.com: + name: privatelink.monitor.azure.com + resource_group_key: dns_connectivity_prod + vnet_links: + fw_prod_plinks_01: + name: fw-prod-plinks-01 + vnet_key: vnet + lz_key: connectivity_firewalls_prod + privatelink.oms.opinsights.azure.com: + name: privatelink.oms.opinsights.azure.com + resource_group_key: dns_connectivity_prod + vnet_links: + fw_prod_plinks_01: + name: fw-prod-plinks-01 + vnet_key: vnet + lz_key: connectivity_firewalls_prod + privatelink.ods.opinsights.azure.com: + name: privatelink.ods.opinsights.azure.com + resource_group_key: dns_connectivity_prod + vnet_links: + fw_prod_plinks_01: + name: fw-prod-plinks-01 + vnet_key: vnet + lz_key: connectivity_firewalls_prod + privatelink.agentsvc.azure-automation.net: + name: privatelink.agentsvc.azure-automation.net + resource_group_key: dns_connectivity_prod + vnet_links: + fw_prod_plinks_01: + name: fw-prod-plinks-01 + vnet_key: vnet + lz_key: connectivity_firewalls_prod + + custom_role_definitions: + landgingzone_extended: + name: landingzone-networking-private-dns-extended + useprefix: true + description: "(prod) Provides additional permissions for the level4 principal to perform activies on the level2 private dns zones for private links." + permissions: + actions: + - Microsoft.Network/privateDnsZones/join/action + - Microsoft.Network/privateEndpoints/privateDnsZoneGroups/read + - Microsoft.Network/privateEndpoints/privateDnsZoneGroups/write + + role_mapping: + custom_role_mapping: + resource_groups: + dns_connectivity_prod: + landgingzone_extended: + azuread_groups: + lz_key: identity_level2 + keys: + - caf_prod_landingzones_dns_contributors + built_in_role_mapping: + resource_groups: + dns_connectivity_prod: + Private DNS Zone Contributor: + azuread_groups: + lz_key: identity_level2 + keys: + - caf_prod_landingzones_dns_contributors + diff --git a/templates/enterprise-scale/contoso/platform/deployments/prod/identity_level2.yaml b/templates/enterprise-scale/contoso/platform/deployments/prod/identity_level2.yaml new file mode 100644 index 000000000..5edab1e12 --- /dev/null +++ b/templates/enterprise-scale/contoso/platform/deployments/prod/identity_level2.yaml @@ -0,0 +1,13 @@ +gitops: + landingzones: 2112.int + +deployments: + identity: + +subscriptions: + identity: + azuread_groups: + caf_non_prod_landingzones_dns_contributors: + name: caf ac non_prod landingzones dns contributors + caf_prod_landingzones_dns_contributors: + name: caf ac prod landingzones dns contributors \ No newline at end of file diff --git a/templates/enterprise-scale/contoso/platform/deployments/prod/identity_level2_aadds.yaml b/templates/enterprise-scale/contoso/platform/deployments/prod/identity_level2_aadds.yaml new file mode 100644 index 000000000..3e3ab8a7c --- /dev/null +++ b/templates/enterprise-scale/contoso/platform/deployments/prod/identity_level2_aadds.yaml @@ -0,0 +1,170 @@ +gitops: + landingzones: 2112.int + +deployments: + identity: + landingzone: + key: + platform: + identity_aadds: prod + global_settings_key: + platform: + management: + remote_tfstates: + platform: + management: + virtual_hubs: prod + + +subscriptions: + identity: + resource_groups: + rg: + name: identity-prod-aadds + region_key: region1 + + + virtual_networks: + vnet: + name: vnet-identity-prod-aadds + resource_group_key: rg + region_key: region1 + address_space: + - 10.10.100.0/27 + dns_servers: + - 10.10.100.4 + - 10.10.100.5 + subnets: + aadds: + name: snet-aadds + cidr: + - 10.10.100.0/28 + nsg_key: aadds_re1 + management: + name: snet-aadds-management + cidr: + - 10.10.100.16/28 + + virtual_hub_connections: + vnet_to_hub: + name: vnet-identity-prod-aadds-TO-vhub-prod + virtual_hub: + lz_key: connectivity_virtual_hub_prod + key: prod + vnet: + vnet_key: vnet + + active_directory_domain_service: + aadds: + name: aadds + region: region1 + resource_group: + key: rg + domain_name: aadds-contoso.net + sku: Standard + filtered_sync_enabled: false + initial_replica_set: + region: region1 + subnet: + vnet_key: vnet + key: aadds + notifications: + additional_recipients: + - notifyA@example.net + - notifyB@example.net + notify_dc_admins: true + notify_global_admins: false + security: + ntlm_v1_enabled: false + sync_kerberos_passwords: true + sync_ntlm_passwords: false + sync_on_prem_passwords: true + tls_v1_enabled: false + + azuread_groups: + aad_dc_administrators: + name: AAD DC Administrators + prevent_duplicate_name: true + + network_security_group_definition: + aadds_re1: + version: 1 + resource_group_key: rg + region: region1 + name: nsg-aadds-re1 + nsg: + Inbound: + 400: + name: Debugging for support. + access: Allow + protocol: tcp + source_port_range: "*" + destination_port_range: "3389" + source_address_prefix: CorpNetSaw + destination_address_prefix: "*" + 401: + name: Powershell remoting. + access: Allow + protocol: tcp + source_port_range: "*" + destination_port_range: "5986" + source_address_prefix: AzureActiveDirectoryDomainServices + destination_address_prefix: "*" + Outbound: + 400: + name: Communication with the Azure AD Domain Services management service. + access: Allow + protocol: tcp + source_port_range: "*" + destination_port_range: "443" + source_address_prefix: "*" + destination_address_prefix: "AzureActiveDirectoryDomainServices" + 401: + name: Monitoring of the virtual machines. + access: Allow + protocol: tcp + source_port_range: "*" + destination_port_range: "443" + source_address_prefix: "*" + destination_address_prefix: "AzureMonitor" + 402: + name: Communication with Azure Storage. + access: Allow + protocol: tcp + source_port_range: "*" + destination_port_range: "443" + source_address_prefix: "*" + destination_address_prefix: "Storage" + 403: + name: Communication with Azure Active Directory. + access: Allow + protocol: tcp + source_port_range: "*" + destination_port_range: "443" + source_address_prefix: "*" + destination_address_prefix: "AzureActiveDirectory" + 404: + name: Communication with Windows Update. + access: Allow + protocol: tcp + source_port_range: "*" + destination_port_range: "443" + source_address_prefix: "*" + destination_address_prefix: "AzureUpdateDelivery" + 405: + name: Download of patches from Windows Update. + access: Allow + protocol: tcp + source_port_range: "*" + destination_port_range: "443" + source_address_prefix: "*" + destination_address_prefix: "AzureFrontDoor.FirstParty" + 406: + name: Automated management of security patches. + access: Allow + protocol: tcp + source_port_range: "*" + destination_port_range: "443" + source_address_prefix: "*" + destination_address_prefix: "GuestAndHybridManagement" + diff --git a/templates/enterprise-scale/contoso/platform/eslz/archetype_config_overrides.caf.platform.yaml b/templates/enterprise-scale/contoso/platform/eslz/archetype_config_overrides.caf.platform.yaml new file mode 100644 index 000000000..c09590f41 --- /dev/null +++ b/templates/enterprise-scale/contoso/platform/eslz/archetype_config_overrides.caf.platform.yaml @@ -0,0 +1,311 @@ +archetype_definitions: + root: + archetype_id: root + policy_assignments: + Allowed-Locations: + listOfAllowedLocations: + - southeastasia # Use the lower-case region's name, short version with no space + - eastasia + Deny-RSG-Locations: + listOfAllowedLocations: + - southeastasia + - eastasia + # Set to Audit as Terraform cannot combine both in one operation yet. + Deny-Subnet-Without-Nsg: + effect: Audit + # Set to Audit as Terraform cannot combine both in one operation yet. + Deny-Subnet-Without-Udr: + effect: Audit + # More details on in the parameters in the Azure Policy definition (Azure Security Benchmark) 1f3afdf9-d0c9-4c3d-847f-89da613e70a8 + # Change the attributes values as they are not the same. Some are DeployIfNotExist, Some Disabled, Enabled... + Deploy-ASC-Monitoring: + aadAuthenticationInSqlServerMonitoringEffect: Disabled + diskEncryptionMonitoringEffect: Disabled + encryptionOfAutomationAccountMonitoringEffect: Disabled + identityDesignateLessThanOwnersMonitoringEffect: Disabled + identityDesignateMoreThanOneOwnerMonitoringEffect: Disabled + identityEnableMFAForWritePermissionsMonitoringEffect: Disabled + identityRemoveDeprecatedAccountMonitoringEffect: Disabled + identityRemoveDeprecatedAccountWithOwnerPermissionsMonitoringEffect: Disabled + identityRemoveExternalAccountWithOwnerPermissionsMonitoringEffect: Disabled + identityRemoveExternalAccountWithReadPermissionsMonitoringEffect: Disabled + identityRemoveExternalAccountWithWritePermissionsMonitoringEffect: Disabled + jitNetworkAccessMonitoringEffect: Disabled + networkSecurityGroupsOnSubnetsMonitoringEffect: Disabled + sqlDbEncryptionMonitoringEffect: Disabled + sqlManagedInstanceAdvancedDataSecurityEmailAdminsMonitoringEffect: Disabled + sqlManagedInstanceAdvancedDataSecurityEmailsMonitoringEffect: Disabled + sqlServerAdvancedDataSecurityEmailAdminsMonitoringEffect: Disabled + sqlServerAdvancedDataSecurityMonitoringEffect: Disabled + systemUpdatesMonitoringEffect: Disabled + useRbacRulesMonitoringEffect: Disabled + vmssSystemUpdatesMonitoringEffect: Disabled + windowsDefenderExploitGuardMonitoringEffect: Disabled + Deploy-ASCDF-Config: + emailSecurityContact: + logAnalytics: + lz_key: management + output_key: diagnostics + resource_type: log_analytics + resource_key: central_logs_sea + attribute_key: id + enableAscForKubernetes: DeployIfNotExists + enableAscForSql: DeployIfNotExists + enableAscForSqlOnVm: DeployIfNotExists + enableAscForDns: DeployIfNotExists + enableAscForArm: DeployIfNotExists + enableAscForOssDb: DeployIfNotExists + enableAscForAppServices: DeployIfNotExists + enableAscForRegistries: DeployIfNotExists + enableAscForKeyVault: DeployIfNotExists + enableAscForStorage: DeployIfNotExists + enableAscForServers: DeployIfNotExists + Deploy-AzActivity-Log: + logAnalytics: + lz_key: management + output_key: diagnostics + resource_type: log_analytics + resource_key: central_logs_sea + attribute_key: id + Deploy-LX-Arc-Monitoring: + Deploy-Resource-Diag: + profileName: eslz-diagnostic-log + logAnalytics: + lz_key: management + output_key: diagnostics + resource_type: log_analytics + resource_key: central_logs_sea + attribute_key: id + Deploy-WS-Arc-Monitoring: + logAnalytics: + lz_key: management + output_key: diagnostics + resource_type: log_analytics + resource_key: central_logs_sea + attribute_key: id + Deploy-VM-Monitoring: + logAnalytics_1: + lz_key: management + output_key: diagnostics + resource_type: log_analytics + resource_key: central_logs_sea + attribute_key: id + Deploy-VMSS-Monitoring: + logAnalytics_1: + lz_key: management + output_key: diagnostics + resource_type: log_analytics + resource_key: central_logs_sea + attribute_key: id + policy_definitions: + Append-AppService-httpsonly: + Append-AppService-latestTLS: + Append-KV-SoftDelete: + Append-Redis-disableNonSslPort: + Append-Redis-sslEnforcement: + Audit-MachineLearning-PrivateEndpointId: + Deny-AA-child-resources: + Deny-AppGW-Without-WAF: + Deny-AppServiceApiApp-http: + Deny-AppServiceFunctionApp-http: + Deny-AppServiceWebApp-http: + Deny-Databricks-NoPublicIp: + Deny-Databricks-Sku: + Deny-Databricks-VirtualNetwork: + Deny-MachineLearning-Aks: + Deny-MachineLearning-Compute-SubnetId: + Deny-MachineLearning-Compute-VmSize: + Deny-MachineLearning-ComputeCluster-RemoteLoginPortPublicAccess: + Deny-MachineLearning-ComputeCluster-Scale: + Deny-MachineLearning-HbiWorkspace: + Deny-MachineLearning-PublicAccessWhenBehindVnet: + Deny-MachineLearning-PublicNetworkAccess: + Deny-MySql-http: + Deny-PostgreSql-http: + Deny-Private-DNS-Zones: + Deny-PublicEndpoint-MariaDB: + Deny-PublicIP: + Deny-RDP-From-Internet: + Deny-Redis-http: + Deny-Sql-minTLS: + Deny-SqlMi-minTLS: + Deny-Storage-minTLS: + Deny-Subnet-Without-Nsg: + Deny-Subnet-Without-Udr: + Deny-VNET-Peer-Cross-Sub: + Deny-VNet-Peering: + Deploy-ASC-SecurityContacts: + Deploy-Budget: + Deploy-Custom-Route-Table: + Deploy-DDoSProtection: + Deploy-Diagnostics-AA: + Deploy-Diagnostics-ACI: + Deploy-Diagnostics-ACR: + Deploy-Diagnostics-AnalysisService: + Deploy-Diagnostics-ApiForFHIR: + Deploy-Diagnostics-APIMgmt: + Deploy-Diagnostics-ApplicationGateway: + Deploy-Diagnostics-CDNEndpoints: + Deploy-Diagnostics-CognitiveServices: + Deploy-Diagnostics-CosmosDB: + Deploy-Diagnostics-Databricks: + Deploy-Diagnostics-DataExplorerCluster: + Deploy-Diagnostics-DataFactory: + Deploy-Diagnostics-DLAnalytics: + Deploy-Diagnostics-EventGridSub: + Deploy-Diagnostics-EventGridSystemTopic: + Deploy-Diagnostics-EventGridTopic: + Deploy-Diagnostics-ExpressRoute: + Deploy-Diagnostics-Firewall: + Deploy-Diagnostics-FrontDoor: + Deploy-Diagnostics-Function: + Deploy-Diagnostics-HDInsight: + Deploy-Diagnostics-iotHub: + Deploy-Diagnostics-LoadBalancer: + Deploy-Diagnostics-LogicAppsISE: + Deploy-Diagnostics-MariaDB: + Deploy-Diagnostics-MediaService: + Deploy-Diagnostics-MlWorkspace: + Deploy-Diagnostics-MySQL: + Deploy-Diagnostics-NetworkSecurityGroups: + Deploy-Diagnostics-NIC: + Deploy-Diagnostics-PostgreSQL: + Deploy-Diagnostics-PowerBIEmbedded: + Deploy-Diagnostics-RedisCache: + Deploy-Diagnostics-Relay: + Deploy-Diagnostics-SignalR: + Deploy-Diagnostics-SQLElasticPools: + Deploy-Diagnostics-SQLMI: + Deploy-Diagnostics-TimeSeriesInsights: + Deploy-Diagnostics-TrafficManager: + Deploy-Diagnostics-VirtualNetwork: + Deploy-Diagnostics-VM: + Deploy-Diagnostics-VMSS: + Deploy-Diagnostics-VNetGW: + Deploy-Diagnostics-WebServerFarm: + Deploy-Diagnostics-Website: + Deploy-Diagnostics-WVDAppGroup: + Deploy-Diagnostics-WVDHostPools: + Deploy-Diagnostics-WVDWorkspace: + Deploy-FirewallPolicy: + Deploy-MySQL-sslEnforcement: + Deploy-Nsg-FlowLogs-to-LA: + Deploy-Nsg-FlowLogs: + Deploy-PostgreSQL-sslEnforcement: + Deploy-Sql-AuditingSettings: + Deploy-SQL-minTLS: + Deploy-Sql-SecurityAlertPolicies: + Deploy-Sql-Tde: + Deploy-Sql-vulnerabilityAssessments: + Deploy-SqlMi-minTLS: + Deploy-Storage-sslEnforcement: + Deploy-Windows-DomainJoin: + policy_set_definitions: + Deny-PublicPaaSEndpoints: + Deploy-ASCDF-Config: + Deploy-Diagnostics-LogAnalytics: + Deploy-Sql-Security: + Enforce-Encryption-CMK: + Enforce-EncryptTransit: + remediation: + policy: + policy_set_definitions: + # policyDefinitionReferenceId = https://github.com/Azure/terraform-azurerm-caf-enterprise-scale/blob/c7958266bd227e52dc1a3468e8c881633bc1b373/modules/archetypes/lib/policy_set_definitions/policy_set_definition_es_deploy_diagnostics_loganalytics.tmpl.json#L766 + # /providers/microsoft.management/managementgroups/contlle/providers/microsoft.authorization/policyassignments/deploy-resource-diag + Deploy-Diagnostics-LogAnalytics: + - ExpressRouteDeployDiagnosticLogDeployLogAnalytics + - FirewallDeployDiagnosticLogDeployLogAnalytics + - KeyVaultDeployDiagnosticLogDeployLogAnalytics + - LoadBalancerDeployDiagnosticLogDeployLogAnalytics + - NetworkNICDeployDiagnosticLogDeployLogAnalytics + - NetworkPublicIPNicDeployDiagnosticLogDeployLogAnalytics + - NetworkSecurityGroupsDeployDiagnosticLogDeployLogAnalytics + - RecoveryVaultDeployDiagnosticLogDeployLogAnalytics + - storageaccountdeploydiagnosticlogdeployloganalytics + - VirtualNetworkDeployDiagnosticLogDeployLogAnalytics + - VNetGWDeployDiagnosticLogDeployLogAnalytics + Deploy-ASCDF-Config: + - defenderForOssDb + - defenderForVM + - defenderForSqlServerVirtualMachines + - defenderForAppServices + - defenderForStorageAccounts + - defenderForKubernetesService + - defenderForContainerRegistry + - defenderForKeyVaults + - defenderForDns + - defenderForArm + - defenderForSqlPaas + - securityEmailContact + - ascExport + landing-zones: + archetype_id: landing-zones + policy_assignments: + Deny-IP-Forwarding: + Deny-Priv-Containers-AKS: + Deny-Priv-Escalation-AKS: + Deny-RDP-From-Internet: + Deny-Storage-http: + Deploy-AKS-Policy: + Deploy-SQL-DB-Auditing: + Deploy-SQL-Threat: + Enable-DDoS-VNET: + Enforce-AKS-HTTPS: + Enforce-TLS-SSL: + archetype_config: + access_control: + Owner: + azuread_groups: + lz_key: launchpad + attribute_key: id + resource_keys: + - subscription_creation_landingzones + connectivity: + archetype_id: platform_connectivity + policy_assignments: + Enable-DDoS-VNET: + role_definitions: + CAF-network-vhub-peering: + archetype_config: + access_control: + Owner: + azuread_groups: + lz_key: launchpad + attribute_key: id + resource_keys: + - connectivity + '[-CONNECTIVITY] CAF-network-vhub-peering': + azuread_groups: + lz_key: launchpad + attribute_key: id + resource_keys: + - subscription_creation_landingzones + management: + archetype_id: platform_management + archetype_config: + access_control: + Owner: + azuread_groups: + lz_key: launchpad + attribute_key: id + resource_keys: + - management + identity: + archetype_id: platform_identity + policy_assignments: + Deny-RDP-From-Internet: + Deny-Public-IP: + archetype_config: + access_control: + Owner: + azuread_groups: + lz_key: launchpad + attribute_key: id + resource_keys: + - identity + decommissioned: + archetype_id: platform_decommissioned + platform: + archetype_id: platform + sandboxes: + archetype_id: platform_sandboxes \ No newline at end of file diff --git a/templates/enterprise-scale/contoso/platform/eslz/custom_landing_zones.caf.platform.yaml b/templates/enterprise-scale/contoso/platform/eslz/custom_landing_zones.caf.platform.yaml new file mode 100644 index 000000000..bdb6f5356 --- /dev/null +++ b/templates/enterprise-scale/contoso/platform/eslz/custom_landing_zones.caf.platform.yaml @@ -0,0 +1,21 @@ +archetype_definitions: + corp: + display_name: Corp + archetype_id: landingzone_corp + parent_management_group_id: landing-zones + online: + display_name: Online + archetype_id: landingzone_online + parent_management_group_id: landing-zones + corp-prod: + display_name: Production + archetype_id: landingzone_prod + parent_management_group_id: corp + corp-non-prod: + display_name: Non Production + archetype_id: landingzone_non_prod + parent_management_group_id: corp + online-web: + display_name: Non Production + archetype_id: landingzone_online_web + parent_management_group_id: online \ No newline at end of file diff --git a/templates/enterprise-scale/contoso/platform/eslz/lib/policy_assignments/README.md b/templates/enterprise-scale/contoso/platform/eslz/lib/policy_assignments/README.md new file mode 100644 index 000000000..def2a5a6d --- /dev/null +++ b/templates/enterprise-scale/contoso/platform/eslz/lib/policy_assignments/README.md @@ -0,0 +1,10 @@ + +# Public documentation of the custom landingzones + +https://github.com/Azure/terraform-azurerm-caf-enterprise-scale/wiki/%5BUser-Guide%5D-Archetype-Definitions + +https://github.com/Azure/terraform-azurerm-caf-enterprise-scale/wiki/%5BExamples%5D-Deploy-Custom-Landing-Zone-Archetypes + +# List of the default policy assignments + +https://github.com/Azure/terraform-azurerm-caf-enterprise-scale/tree/main/modules/archetypes/lib/policy_assignments diff --git a/templates/enterprise-scale/contoso/platform/eslz/lib/policy_assignments/policy_assignment_caf_aks_capability.json b/templates/enterprise-scale/contoso/platform/eslz/lib/policy_assignments/policy_assignment_caf_aks_capability.json new file mode 100644 index 000000000..789acde60 --- /dev/null +++ b/templates/enterprise-scale/contoso/platform/eslz/lib/policy_assignments/policy_assignment_caf_aks_capability.json @@ -0,0 +1,18 @@ +{ + "name": "aks-capability", + "type": "Microsoft.Authorization/policyAssignments", + "apiVersion": "2019-09-01", + "properties": { + "description": "Restrict the capabilities to reduce the attack surface of containers in a Kubernetes cluster. This recommendation is part of CIS 5.2.8 and CIS 5.2.9 which are intended to improve the security of your Kubernetes environments. This policy is generally available for Kubernetes Service (AKS), and preview for AKS Engine and Azure Arc enabled Kubernetes. For more information, see https://aka.ms/kubepolicydoc. (labelSelector example - https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#resources-that-support-set-based-requirements)", + "displayName": "Kubernetes cluster containers should only use allowed capabilities.", + "notScopes": [], + "parameters": {}, + "policyDefinitionId": "/providers/Microsoft.Authorization/policyDefinitions/c26596ff-4d70-4e6a-9a30-c2506bd2f80c", + "scope": "${current_scope_resource_id}", + "enforcementMode": true + }, + "location": "${default_location}", + "identity": { + "type": "None" + } +} \ No newline at end of file diff --git a/templates/enterprise-scale/contoso/platform/eslz/lib/policy_assignments/policy_assignment_es_allowed_locations.json b/templates/enterprise-scale/contoso/platform/eslz/lib/policy_assignments/policy_assignment_es_allowed_locations.json new file mode 100644 index 000000000..bce0689ad --- /dev/null +++ b/templates/enterprise-scale/contoso/platform/eslz/lib/policy_assignments/policy_assignment_es_allowed_locations.json @@ -0,0 +1,18 @@ +{ + "name": "Allowed-Locations", + "type": "Microsoft.Authorization/policyAssignments", + "apiVersion": "2019-09-01", + "properties": { + "description": "Specifies the allowed locations (regions) where Resources can be deployed.", + "displayName": "Limit allowed locations for Resources", + "notScopes": [], + "parameters": {}, + "policyDefinitionId": "/providers/Microsoft.Authorization/policyDefinitions/e56962a6-4747-49cd-b67b-bf8b01975c4c", + "scope": "${current_scope_resource_id}", + "enforcementMode": null + }, + "location": "${default_location}", + "identity": { + "type": "None" + } +} \ No newline at end of file diff --git a/templates/enterprise-scale/contoso/platform/eslz/lib/policy_definitions/README.md b/templates/enterprise-scale/contoso/platform/eslz/lib/policy_definitions/README.md new file mode 100644 index 000000000..e47f922fd --- /dev/null +++ b/templates/enterprise-scale/contoso/platform/eslz/lib/policy_definitions/README.md @@ -0,0 +1,10 @@ + +# Public documentation of the custom landingzones + +https://github.com/Azure/terraform-azurerm-caf-enterprise-scale/wiki/%5BUser-Guide%5D-Archetype-Definitions + +https://github.com/Azure/terraform-azurerm-caf-enterprise-scale/wiki/%5BExamples%5D-Deploy-Custom-Landing-Zone-Archetypes + +# List of the default policy definitions + +https://github.com/Azure/terraform-azurerm-caf-enterprise-scale/tree/main/modules/archetypes/lib/policy_definitions diff --git a/templates/enterprise-scale/contoso/platform/eslz/lib/policy_set_definitions/README.md b/templates/enterprise-scale/contoso/platform/eslz/lib/policy_set_definitions/README.md new file mode 100644 index 000000000..c09d2c016 --- /dev/null +++ b/templates/enterprise-scale/contoso/platform/eslz/lib/policy_set_definitions/README.md @@ -0,0 +1,10 @@ + +# Public documentation of the custom landingzones + +https://github.com/Azure/terraform-azurerm-caf-enterprise-scale/wiki/%5BUser-Guide%5D-Archetype-Definitions + +https://github.com/Azure/terraform-azurerm-caf-enterprise-scale/wiki/%5BExamples%5D-Deploy-Custom-Landing-Zone-Archetypes + +# List of the default policy set definitions + +https://github.com/Azure/terraform-azurerm-caf-enterprise-scale/tree/main/modules/archetypes/lib/policy_set_definitions diff --git a/templates/enterprise-scale/contoso/platform/eslz/lib/readme.md b/templates/enterprise-scale/contoso/platform/eslz/lib/readme.md new file mode 100644 index 000000000..187ef6e20 --- /dev/null +++ b/templates/enterprise-scale/contoso/platform/eslz/lib/readme.md @@ -0,0 +1,2 @@ +# Custom ESLZ library +In this folder you can store the custom definition and assignment objects you need to add to augment your custom governance. \ No newline at end of file diff --git a/templates/enterprise-scale/contoso/platform/eslz/lib/role_definitions/README.md b/templates/enterprise-scale/contoso/platform/eslz/lib/role_definitions/README.md new file mode 100644 index 000000000..2230928aa --- /dev/null +++ b/templates/enterprise-scale/contoso/platform/eslz/lib/role_definitions/README.md @@ -0,0 +1,11 @@ + +# Public documentation of the custom landingzones + +https://github.com/Azure/terraform-azurerm-caf-enterprise-scale/wiki/%5BUser-Guide%5D-Archetype-Definitions + +https://github.com/Azure/terraform-azurerm-caf-enterprise-scale/wiki/%5BExamples%5D-Deploy-Custom-Landing-Zone-Archetypes + + +# List of the default role defitions + +https://github.com/Azure/terraform-azurerm-caf-enterprise-scale/tree/main/modules/archetypes/lib/role_definitions diff --git a/templates/enterprise-scale/contoso/platform/eslz/lib/role_definitions/role_definition_caf_vhub_peering.json b/templates/enterprise-scale/contoso/platform/eslz/lib/role_definitions/role_definition_caf_vhub_peering.json new file mode 100644 index 000000000..ef7060687 --- /dev/null +++ b/templates/enterprise-scale/contoso/platform/eslz/lib/role_definitions/role_definition_caf_vhub_peering.json @@ -0,0 +1,26 @@ +{ + "name": "48ec94a9-9a14-488d-928d-5e73f96b335c", + "type": "Microsoft.Authorization/roleDefinitions", + "apiVersion": "2018-01-01-preview", + "properties": { + "roleName": "CAF-network-vhub-peering", + "description": "Authorize vnet peerings to the vhub.", + "type": "customRole", + "permissions": [ + { + "actions": [ + "Microsoft.Network/virtualHubs/hubVirtualNetworkConnections/*", + "Microsoft.Network/virtualHubs/read", + "Microsoft.Resources/subscriptions/resourceGroups/read" + ], + "notActions": [ + ], + "dataActions": [], + "notDataActions": [] + } + ], + "assignableScopes": [ + "${current_scope_resource_id}" + ] + } +} \ No newline at end of file diff --git a/templates/enterprise-scale/contoso/platform/identity.yaml b/templates/enterprise-scale/contoso/platform/identity.yaml new file mode 100644 index 000000000..aa201e898 --- /dev/null +++ b/templates/enterprise-scale/contoso/platform/identity.yaml @@ -0,0 +1,69 @@ +subscriptions: + identity: + resource_groups: + management: + name: management + alerts: + name: alerts + + service_health_alerts: + enable_service_health_alerts: true + name: alerts + shortname: HealthAlerts + resource_group_key: alerts + action_group_name: actiongrp + email_alert_settings: + support1: + name: email_alert_support1 + email_address: lalesle@microsoft.com + use_common_alert_schema: false + + recovery_vaults: + asr: + name: asr + resource_group_key: management + soft_delete_enabled: true + backup_policies: + vms: + default: + name: vm-default-policy + # Default to UTC + # possible values - https://jackstromberg.com/2017/01/list-of-time-zones-consumed-by-azure/ + timezone: "SE Asia Standard Time" + backup: + frequency: Daily + time: "23:00" + retention_daily: + count: 7 + retention_weekly: + count: 2 + weekdays: + - Sunday + retention_monthly: + count: 2 + weeks: + - First + weekdays: + - Sunday + retention_yearly: + count: 1 + weeks: + - First + months: + - January + weekdays: + - Sunday + + +# Bring here you existing active directory security groups. +# Those are the groups you will inject to RBAC in the Enterprise Scale deployment. +# Note Terraform will create a new Azure AD group and add the existing as a member +# +# level1: +# azuread_groups: +# network_ops_team: +# name: netops +# members: +# # Set the list of the existing groups +# objects_ids: +# - existing_azure_ad_group_object_id diff --git a/templates/enterprise-scale/contoso/platform/launchpad.yaml b/templates/enterprise-scale/contoso/platform/launchpad.yaml new file mode 100644 index 000000000..c9cf11fea --- /dev/null +++ b/templates/enterprise-scale/contoso/platform/launchpad.yaml @@ -0,0 +1,37 @@ +subscriptions: + launchpad: + resource_groups: + level0: + name: caf-level0 + tags: + level: level0 + level1: + name: caf-level1 + tags: + level: level1 + level2: + name: caf-level2 + tags: + level: level2 + + storage_accounts: + level0: + name: l0 + resource_group_key: level0 + level1: + name: l1 + resource_group_key: level1 + level2: + name: l2 + resource_group_key: level2 + + keyvaults: + level0: + name: l0 + resource_group_key: level0 + level1: + name: l1 + resource_group_key: level1 + level2: + name: l2 + resource_group_key: level2 diff --git a/templates/enterprise-scale/contoso/platform/launchpad_credentials.yaml b/templates/enterprise-scale/contoso/platform/launchpad_credentials.yaml new file mode 100644 index 000000000..7d843436b --- /dev/null +++ b/templates/enterprise-scale/contoso/platform/launchpad_credentials.yaml @@ -0,0 +1,415 @@ +subscriptions: + launchpad_credentials: + resource_groups: + sp_credentials: + name: credentials + + keyvaults: + cred_ea_account_owner: + name: eaowner + resource_group_key: sp_credentials + purge_protection_enabled: false + creation_policies: + caf_platform_maintainers: + lz_key: launchpad + azuread_group_key: caf_platform_maintainers + secret_permissions: + - Set + - Get + - List + - Delete + - Purge + - Recover + identity_azuread_group: + lz_key: launchpad + azuread_group_key: identity + secret_permissions: + - Set + - Get + - List + - Delete + - Purge + - Recover + cred_level0: + name: idl0 + resource_group_key: sp_credentials + purge_protection_enabled: false + creation_policies: + caf_platform_maintainers: + lz_key: launchpad + azuread_group_key: caf_platform_maintainers + secret_permissions: + - Set + - Get + - List + - Delete + - Purge + - Recover + identity_azuread_group: + lz_key: launchpad + azuread_group_key: identity + secret_permissions: + - Set + - Get + - List + - Delete + - Purge + - Recover + level0: + lz_key: launchpad + azuread_group_key: level0 + secret_permissions: + - Get + cred_identity: + name: id + resource_group_key: sp_credentials + purge_protection_enabled: false + creation_policies: + caf_platform_maintainers: + lz_key: launchpad + azuread_group_key: caf_platform_maintainers + secret_permissions: + - Set + - Get + - List + - Delete + - Purge + - Recover + identity_azuread_group: + lz_key: launchpad + azuread_group_key: identity + secret_permissions: + - Set + - Get + - List + - Delete + - Purge + - Recover + cred_management: + name: mg + resource_group_key: sp_credentials + purge_protection_enabled: false + creation_policies: + caf_platform_maintainers: + lz_key: launchpad + azuread_group_key: caf_platform_maintainers + secret_permissions: + - Set + - Get + - List + - Delete + - Purge + - Recover + identity_azuread_group: + lz_key: launchpad + azuread_group_key: identity + secret_permissions: + - Set + - Get + - List + - Delete + - Purge + - Recover + management_azuread_group: + lz_key: launchpad + azuread_group_key: management + secret_permissions: + - Get + cred_eslz: + name: es + resource_group_key: sp_credentials + purge_protection_enabled: false + creation_policies: + caf_platform_maintainers: + lz_key: launchpad + azuread_group_key: caf_platform_maintainers + secret_permissions: + - Set + - Get + - List + - Delete + - Purge + - Recover + identity_azuread_group: + lz_key: launchpad + azuread_group_key: identity + secret_permissions: + - Set + - Get + - List + - Delete + - Purge + - Recover + eslz_azuread_group: + lz_key: launchpad + azuread_group_key: eslz + secret_permissions: + - Get + cred_connectivity: + name: co + resource_group_key: sp_credentials + purge_protection_enabled: false + creation_policies: + caf_platform_maintainers: + lz_key: launchpad + azuread_group_key: caf_platform_maintainers + secret_permissions: + - Set + - Get + - List + - Delete + - Purge + - Recover + identity_azuread_group: + lz_key: launchpad + azuread_group_key: identity + secret_permissions: + - Set + - Get + - List + - Delete + - Purge + - Recover + connectivity_azuread_group: + lz_key: launchpad + azuread_group_key: connectivity + secret_permissions: + - Get + cred_subscription_creation_platform: + name: scp + resource_group_key: sp_credentials + purge_protection_enabled: false + creation_policies: + caf_platform_maintainers: + lz_key: launchpad + azuread_group_key: caf_platform_maintainers + secret_permissions: + - Set + - Get + - List + - Delete + - Purge + - Recover + identity_azuread_group: + lz_key: launchpad + azuread_group_key: identity + secret_permissions: + - Set + - Get + - List + - Delete + - Purge + - Recover + subscription_creation_platform_azuread_group: + lz_key: launchpad + azuread_group_key: subscription_creation_platform + secret_permissions: + - Get + cred_subscription_creation_landingzones: + name: scl + resource_group_key: sp_credentials + purge_protection_enabled: false + creation_policies: + caf_platform_maintainers: + lz_key: launchpad + azuread_group_key: caf_platform_maintainers + secret_permissions: + - Set + - Get + - List + - Delete + - Purge + - Recover + identity_azuread_group: + lz_key: launchpad + azuread_group_key: identity + secret_permissions: + - Set + - Get + - List + - Delete + - Purge + - Recover + subscription_creation_platform_azuread_group: + lz_key: launchpad + azuread_group_key: subscription_creation_landingzones + secret_permissions: + - Get + cred_gitops: + name: gitops + resource_group_key: sp_credentials + purge_protection_enabled: false + creation_policies: + caf_platform_maintainers: + lz_key: launchpad + azuread_group_key: caf_platform_maintainers + secret_permissions: + - Set + - Get + - List + - Delete + - Purge + - Recover + identity_azuread_group: + lz_key: launchpad + azuread_group_key: identity + secret_permissions: + - Set + - Get + - List + - Delete + - Purge + - Recover + + keyvault_access_policies: + cred_ea_account_owner: + gitops: + azuread_service_principal_key: gitops + secret_permissions: + - Get + cred_level0: + gitops: + azuread_service_principal_key: gitops + secret_permissions: + - Get + cred_identity: + gitops: + azuread_service_principal_key: gitops + secret_permissions: + - Get + cred_management: + gitops: + azuread_service_principal_key: gitops + secret_permissions: + - Get + cred_eslz: + gitops: + azuread_service_principal_key: gitops + secret_permissions: + - Get + cred_connectivity: + gitops: + azuread_service_principal_key: gitops + secret_permissions: + - Get + cred_subscription_creation_platform: + gitops: + azuread_service_principal_key: gitops + secret_permissions: + - Get + cred_subscription_creation_landingzones: + gitops: + azuread_service_principal_key: gitops + secret_permissions: + - Get + cred_gitops: + gitops: + azuread_service_principal_key: gitops + secret_permissions: + - Get + + + azuread_applications: + gitops: + application_name: app-azure-platform-credentials-for-gitops + + azuread_service_principals: + gitops: + azuread_application: + key: gitops + + azuread_credentials: + gitops: + type: password + azuread_credential_policy_key: gitops + azuread_application: + key: gitops + keyvaults: + cred_gitops: + secret_prefix: sp + level0: + type: password + azuread_credential_policy_key: default_policy + azuread_application: + lz_key: launchpad + key: level0 + keyvaults: + cred_level0: + secret_prefix: sp + identity: + type: password + azuread_credential_policy_key: default_policy + azuread_application: + lz_key: launchpad + key: identity + keyvaults: + cred_identity: + secret_prefix: sp + management: + type: password + azuread_credential_policy_key: default_policy + azuread_application: + lz_key: launchpad + key: management + keyvaults: + cred_management: + secret_prefix: sp + eslz: + type: password + azuread_credential_policy_key: default_policy + azuread_application: + lz_key: launchpad + key: eslz + keyvaults: + cred_eslz: + secret_prefix: sp + connectivity: + type: password + azuread_credential_policy_key: default_policy + azuread_application: + lz_key: launchpad + key: connectivity + keyvaults: + cred_connectivity: + secret_prefix: sp + subscription_creation_platform: + type: password + azuread_credential_policy_key: default_policy + azuread_application: + lz_key: launchpad + key: subscription_creation_platform + keyvaults: + cred_subscription_creation_platform: + secret_prefix: sp + subscription_creation_landingzones: + type: password + azuread_credential_policy_key: default_policy + azuread_application: + lz_key: launchpad + key: subscription_creation_landingzones + keyvaults: + cred_subscription_creation_landingzones: + secret_prefix: sp + + azuread_credential_policies: + gitops: + length: 250 + special: false + upper: true + number: true + expire_in_days: 360 + rotation_key0: + days: 181 + rotation_key1: + days: 300 + default_policy: + length: 250 + special: false + upper: true + number: true + expire_in_days: 65 + rotation_key0: + days: 33 + rotation_key1: + days: 58 + diff --git a/templates/enterprise-scale/contoso/platform/management.yaml b/templates/enterprise-scale/contoso/platform/management.yaml new file mode 100644 index 000000000..773853f28 --- /dev/null +++ b/templates/enterprise-scale/contoso/platform/management.yaml @@ -0,0 +1,83 @@ +subscriptions: + management: + resource_groups: + management: + name: management + alerts: + name: alerts + + diagnostic_log_analytics: + # if you change this key you also need to change it in the ESLZ deployment + # eslz/archetype_config_overrides.caf.platform.yaml + # eslz/custom_landing_zones.caf.platform.yaml + region1: + name: logre1 + resource_group_key: management + + monitor_action_groups: + networking_operations: + action_group_name: Networking Operations + shortname: netops + arm_role_alert: + contributors: + name: contributors + role_name: servicehealth-alerts-contributors + use_common_alert_schema: false + email_receiver: + noc: + name: email_alert_support1 + email_address: + use_common_alert_schema: false + + service_health_alerts: + enable_service_health_alerts: true + name: alerts + shortname: HealthAlerts + resource_group_key: alerts + action_group_name: actiongrp + email_alert_settings: + support1: + name: email_alert_support1 + email_address: + use_common_alert_schema: false + + automation_accounts: + account1: + name: automationAccount1 + resource_group_key: management + + recovery_vaults: + asr: + name: asr + resource_group_key: management + soft_delete_enabled: true + backup_policies: + vms: + default: + name: vm-default-policy + # Default to UTC + # possible values - https://jackstromberg.com/2017/01/list-of-time-zones-consumed-by-azure/ + timezone: "SE Asia Standard Time" + backup: + frequency: Daily + time: "23:00" + retention_daily: + count: 7 + retention_weekly: + count: 2 + weekdays: + - Sunday + retention_monthly: + count: 2 + weeks: + - First + weekdays: + - Sunday + retention_yearly: + count: 1 + weeks: + - First + months: + - January + weekdays: + - Sunday diff --git a/templates/enterprise-scale/contoso/platform/subscriptions.yaml b/templates/enterprise-scale/contoso/platform/subscriptions.yaml new file mode 100644 index 000000000..66b633780 --- /dev/null +++ b/templates/enterprise-scale/contoso/platform/subscriptions.yaml @@ -0,0 +1,16 @@ +platform_subscriptions: + management: + alias: "management" + name: "-management" + # Do not set the subscription_id when using the automated subscripiton creation + # In that case delete the following attribute. + # When re-using an existing subscripiton, set the GUID of the subscripiton. + subscription_id: + identity: + alias: "identity" + name: "-identity" + subscription_id: + connectivity: + alias: "connectivity" + name: "-connectivity" + subscription_id: \ No newline at end of file diff --git a/templates/enterprise-scale/contoso/platform/tfstates.yaml b/templates/enterprise-scale/contoso/platform/tfstates.yaml new file mode 100644 index 000000000..a85b401da --- /dev/null +++ b/templates/enterprise-scale/contoso/platform/tfstates.yaml @@ -0,0 +1,148 @@ +tfstates: + platform: + ### Level0 ### + launchpad: + lz_key_name: launchpad + tfstate: caf_launchpad.tfstate + workspace: tfstate + base_config_path: launchpad + level: level0 + billing_subscription_role_delegations: + lz_key_name: billing_subscription_role_delegations + tfstate: billing_subscription_role_delegations.tfstate + base_config_path: billing_subscription_role_delegations + level: level0 + launchpad_credentials: + lz_key_name: launchpad_credentials_rotation + tfstate: launchpad_credentials_rotation.tfstate + base_config_path: credentials + level: level0 + + ### Level1 ### + management: + lz_key_name: management + tfstate: management.tfstate + base_config_path: management + level: level1 + identity: + lz_key_name: identity + tfstate: identity.tfstate + base_config_path: identity + level: level1 + eslz: + lz_key_name: eslz + tfstate: eslz.tfstate + base_config_path: eslz + level: level1 + platform_subscriptions: + lz_key_name: platform_subscriptions + tfstate: platform_subscriptions.tfstate + base_config_path: subscriptions + level: level1 + + ## Level2 ## + identity_level2: + prod: + lz_key_name: identity_level2 + tfstate: identity_level2.tfstate + level: level2 + non_prod: + lz_key_name: identity_level2 + tfstate: identity_level2.tfstate + level: level2 + + identity_level2_aadds: + prod: + lz_key_name: identity_level2_aadds + tfstate: identity_level2_aadds.tfstate + level: level2 + + virtual_wan: + lz_key_name: connectivity_virtual_wan + tfstate: connectivity_virtual_wan.tfstate + base_config_path: connectivity/virtual_wan + level: level2 + + virtual_hubs: + prod: + lz_key_name: connectivity_virtual_hub_prod + tfstate: connectivity_virtual_hub_prod.tfstate + workspace: tfstate + base_config_path: connectivity/virtual_hub + level: level2 + non_prod: + lz_key_name: connectivity_virtual_hub_non_prod + tfstate: connectivity_virtual_hub_non_prod.tfstate + workspace: tfstate + base_config_path: connectivity/virtual_hub + level: level2 + + vpn_sites: + prod: + lz_key_name: connectivity_vpn_sites_prod + tfstate: connectivity_vpn_sites_prod.tfstate + base_config_path: connectivity/vpn_sites + level: level2 + non_prod: + lz_key_name: connectivity_vpn_sites_non_prod + tfstate: connectivity_vpn_sites_non_prod.tfstate + base_config_path: connectivity/vpn_sites + level: level2 + + express_route_circuits: + prod: + lz_key_name: connectivity_express_route_prod + tfstate: connectivity_express_route_prod.tfstate + base_config_path: connectivity/express_route + level: level2 + non_prod: + lz_key_name: connectivity_express_route_non_prod + tfstate: connectivity_express_route_non_prod.tfstate + base_config_path: connectivity/express_route + level: level2 + + express_route_circuit_peerings: + prod: + lz_key_name: connectivity_express_route_peerings_prod + tfstate: connectivity_express_route_peerings_prod.tfstate + level: level2 + non_prod: + lz_key_name: connectivity_express_route_peerings_non_prod + tfstate: connectivity_express_route_peerings_non_prod.tfstate + level: level2 + + azurerm_firewalls: + prod: + lz_key_name: connectivity_firewalls_prod + tfstate: connectivity_firewalls_prod.tfstate + level: level2 + non_prod: + lz_key_name: connectivity_firewalls_non_prod + tfstate: connectivity_firewalls_non_prod.tfstate + level: level2 + + azurerm_firewall_policies: + prod: + lz_key_name: connectivity_firewall_policies_prod + tfstate: connectivity_firewall_policies_prod.tfstate + level: level2 + non_prod: + lz_key_name: connectivity_firewall_policies_non_prod + tfstate: connectivity_firewall_policies_non_prod.tfstate + level: level2 + + private_dns: + prod: + lz_key_name: connectivity_private_dns_prod + tfstate: connectivity_private_dns_prod.tfstate + level: level2 + non_prod: + lz_key_name: connectivity_private_dns_non_prod + tfstate: connectivity_private_dns_non_prod.tfstate + level: level2 + + + asvm: + lz_key_name: asvm + tfstate: asvm_subscription_vending_machine.tfstate + level: level2 diff --git a/templates/platform/ansible.yaml b/templates/platform/ansible.yaml new file mode 100644 index 000000000..d6d4feb8f --- /dev/null +++ b/templates/platform/ansible.yaml @@ -0,0 +1,287 @@ +- name: CAF Terraform - Generate Azure Subscription Vending Machine (asvm) configuration files + hosts: localhost + vars: + connectivity_virtual_wan: "{{ lookup('file', '{{ config_folder }}/connectivity_virtual_wan.yaml') | from_yaml }}" + connectivity_virtual_hub: "{{ lookup('file', '{{ config_folder }}/connectivity_virtual_hub.yaml') | from_yaml }}" + connectivity_firewall: "{{ lookup('file', '{{ config_folder }}/connectivity_firewall.yaml') | from_yaml }}" + connectivity_firewall_policies: "{{ lookup('file', '{{ config_folder }}/connectivity_firewall_policies.yaml') | from_yaml }}" + connectivity_vpn_sites: "{{ lookup('file', '{{ config_folder }}/connectivity_vpn_sites.yaml') | from_yaml }}" + connectivity_vpn_gateway_connections: "{{ lookup('file', '{{ config_folder }}/connectivity_vpn_gateway_connections.yaml') | from_yaml }}" + connectivity_express_routes: "{{ lookup('file', '{{ config_folder }}/connectivity_express_routes.yaml') | from_yaml }}" + connectivity_express_route_peerings: "{{ lookup('file', '{{ config_folder }}/connectivity_express_route_peerings.yaml') | from_yaml }}" + identity: "{{ lookup('file', '{{ config_folder }}/identity.yaml') | from_yaml }}" + management: "{{ lookup('file', '{{ config_folder }}/management.yaml') | from_yaml }}" + subscriptions: "{{ lookup('file', '{{ config_folder }}/subscriptions.yaml') | from_yaml }}" + mg: "{{ lookup('file', '{{ config_folder }}/eslz/archetype_config_overrides.caf.platform.yaml') | from_yaml }}" + mg_custom: "{{ lookup('file', '{{ config_folder }}/eslz/custom_landing_zones.caf.platform.yaml') | from_yaml }}" + mg_struture: "{{ lookup('file', '{{ config_folder }}/eslz/structure.caf.platform.yaml') | from_yaml }}" + tfstates: "{{ lookup('file', '{{ config_folder }}/tfstates.yaml') | from_yaml }}" + base_templates_folder: "{{ base_templates_folder }}" + boostrap_launchpad: boostrap_launchpad | default(false) + deploy_subscriptions: deploy_subscriptions | default(false) + + + tasks: + + - name: "Load variable for platform config" + include_vars: + name: config + dir: "{{config_folder_platform | default(config_folder)}}" + depth: 1 + ignore_unknown_extensions: true + files_matching: "caf.platform.yaml|tfstates.caf.yaml|tfstates.yaml" + + - name: "Get latest cache folder" + set_fact: + job_cache_base_path: "/home/vscode/.terraform.cache" + destination_base: "{{ destination_base_path | default(config.configuration_folders.platform.destination_base_path) }}" + + - name: "Creates cache directory" + file: + path: "{{ job_cache_base_path }}/launchpad" + state: directory + + - name: "Destination folder" + debug: + msg: "{{destination_base}}" + + - name: "Content of config" + debug: + msg: "{{config}}" + +# +# Level 0 +# + +## launchpad + + - name: "[{{ level }}-{{ base_folder }}] launchpad" + import_tasks: "{{ level }}/{{ base_folder }}/ansible.yaml" + vars: + base_folder: "launchpad" + level: "level0" + subscription_key: launchpad + +## credentials + - name: "[{{ level }}-{{ base_folder }}] Setup credentials" + import_tasks: "{{ level }}/{{ base_folder }}/ansible.yaml" + when: + - config.platform_identity.azuread_identity_mode == "service_principal" + - launchpad_tfstate_exists.rc == 0 + vars: + base_folder: "credentials" + level: "level0" + subscription_key: launchpad_credentials + + - name: "[{{ level }}-{{ base_folder }}] Clean-up directory" + file: + path: "{{ destination_base }}/{{ config.configuration_folders.platform.destination_relative_path }}/{{ level }}/{{ base_folder }}" + state: absent + when: + - config.platform_identity.azuread_identity_mode == "logged_in_user" + - launchpad_tfstate_exists.rc == 0 + vars: + base_folder: "credentials" + level: "level0" + +## billing_subscription_role_delegations + - name: "[{{ level }}-{{ base_folder }}] Configure subscription role delegations" + import_tasks: "{{ level }}/{{ base_folder }}/ansible.yaml" + when: ((config.caf_terraform.billing_subscription_role_delegations is defined) and (config.platform_identity.azuread_identity_mode == "service_principal") and (launchpad_tfstate_exists.rc == 0) and (credentials_tfstate_exists is not skipped)) + vars: + base_folder: "billing_subscription_role_delegations" + level: "level0" + + - name: "[{{ level }}-{{ base_folder }}] Clean-up directory" + file: + path: "{{ destination_base }}/{{ config.configuration_folders.platform.destination_relative_path }}/{{ level }}/{{ base_folder }}" + state: absent + when: + - level0_billing_subscription_role_delegations is skipped + vars: + base_folder: "billing_subscription_role_delegations" + level: "level0" + + +# +# Level 1 +# + +## subscriptions + - name: "{{ level }}-{{ base_folder }} | Create platform subscriptions" + import_tasks: "{{ level }}/{{ base_folder }}/ansible.yaml" + when: (config.platform_core_setup.enterprise_scale.subscription_deployment_mode == "dedicated_new" and config.platform_identity.azuread_identity_mode != "logged_in_user" and launchpad_tfstate_exists is succeeded and credentials_tfstate_exists is succeeded) + vars: + base_folder: "subscriptions" + level: "level1" + + - name: "{{ level }}-{{ base_folder }} | Clean-up directory" + file: + path: "{{ destination_base }}/{{ config.configuration_folders.platform.destination_relative_path }}/{{ level }}/{{ base_folder }}" + state: absent + when: + - level1_subscriptions is skipped + vars: + base_folder: "subscriptions" + level: "level1" + +## management + - name: "{{ level }}-{{ base_folder }} | Management services" + import_tasks: "{{ level }}/{{ base_folder }}/ansible.yaml" + when: + - (config.platform_management.enable | bool) + - level1_subscriptions is not skipped + - platform_subscriptions_details is defined + + vars: + base_folder: "management" + level: "level1" + subscription_key: management + +## identity + - name: "{{ level }}-{{ base_folder }} | Identity services" + import_tasks: "{{ level }}/{{ base_folder }}/ansible.yaml" + when: + # - config.platform_core_setup.enterprise_scale.subscription_deployment_mode != "single_reuse" + - launchpad_tfstate_exists is not skipped + - credentials_tfstate_exists is not skipped + - level1_subscriptions is not skipped + - platform_subscriptions_details is defined + - identity.subscriptions is defined + + vars: + base_folder: "identity" + level: "level1" + subscription_key: identity + +## eslz + - name: "{{ level }}-{{ base_folder }} | Enterprise-scale services" + import_tasks: "{{ level }}/{{ base_folder }}/ansible.yaml" + when: + - (config.platform_core_setup.enterprise_scale.enable | bool) + - ( (config.platform_core_setup.enterprise_scale.enable | bool) and (level1_subscriptions is not skipped) ) or (config.platform_core_setup.enterprise_scale.subscription_deployment_mode == "single_reuse") + - (platform_subscriptions_details is defined) or (config.platform_core_setup.enterprise_scale.subscription_deployment_mode == "single_reuse") + - platform_subscriptions_details.identity is defined + - platform_subscriptions_details.management is defined + + vars: + base_folder: "eslz" + level: "level1" + + +# +# Level 2 +# + +## asvm + - name: "{{ level }}-{{ base_folder }} | Azure Subscription Vending Machine (asvm)" + import_tasks: "{{ level }}/{{ base_folder }}/ansible.yaml" + when: + - config.platform_core_setup.enterprise_scale.enable_azure_subscription_vending_machine + - launchpad_azuread_groups is defined + - platform_subscriptions_details is defined + vars: + base_folder: "asvm" + level: "level2" + subscription_key: asvm + +## Connectivity + - name: "{{ level }}-{{ base_folder }} | Connectivity services" + import_tasks: "{{ level }}/{{ base_folder }}/ansible.yaml" + when: + - ( (config.networking_topology.deployment_option == "virtual_wan") or (config.platform_identity.azuread_identity_mode == 'logged_in_user') ) + - (platform_subscriptions_details is defined) or (config.platform_core_setup.enterprise_scale.subscription_deployment_mode == "single_reuse") + vars: + base_folder: "connectivity" + level: "level2" + folders: + - virtual_wan + +## identity + - name: "{{ level }}-{{ base_folder }} | Identity services" + import_tasks: "{{ level }}/{{ base_folder }}/ansible.yaml" + when: + - config.platform_core_setup.enterprise_scale.subscription_deployment_mode != "single_reuse" + - launchpad_tfstate_exists is not skipped + - credentials_tfstate_exists is not skipped + - level1_subscriptions is not skipped + - (platform_subscriptions_details is defined) or (config.platform_core_setup.enterprise_scale.subscription_deployment_mode == "single_reuse") + + vars: + base_folder: "identity" + level: "level2" + +## Platform readme + + - name: "[{{ base_templates_folder }}] readme" + ansible.builtin.template: + src: "{{ base_templates_folder }}/readme.md" + dest: "{{ destination_base }}/{{ config.configuration_folders.platform.destination_relative_path }}/readme.md" + force: yes + +# +# Formatting & Linters +# + + - name: Terraform Formatting + shell: | + terraform fmt -recursive {{ destination_base }}/{{ config.configuration_folders.platform.destination_relative_path }} + +# - name: Level 2 - identity +# hosts: localhost +# vars: +# config: "{{ lookup('file', '{{ config_folder }}/platform.yaml') | from_yaml }}" +# identity: "{{ lookup('file', '{{ config_folder }}/identity.yaml') | from_yaml }}" +# connectivity_virtual_wan: "{{ lookup('file', '{{ config_folder }}/connectivity_virtual_wan.yaml') | from_yaml }}" +# connectivity_virtual_hub: "{{ lookup('file', '{{ config_folder }}/connectivity_virtual_hub.yaml') | from_yaml }}" +# connectivity_firewall: "{{ lookup('file', '{{ config_folder }}/connectivity_firewall.yaml') | from_yaml }}" +# connectivity_firewall_policies: "{{ lookup('file', '{{ config_folder }}/connectivity_firewall_policies.yaml') | from_yaml }}" +# cidr: "{{ lookup('file', '{{ config_folder }}/cidr.yaml') | from_yaml }}" +# tfstates: "{{ lookup('file', '{{ config_folder }}/tfstates.yaml') | from_yaml }}" +# base_templates_folder: /tf/caf/templates/platform +# base_folder: identity +# level: level2 +# folders: +# - virtual_wan + + +# tasks: +# - name: Creates {{ level }} directory +# file: +# path: "{{ destination_base_path }}{{ config.configuration_folders.destination_relative_path }}/{{ level }}" +# state: directory + +# - name: Creates {{ base_folder }} directory strcture +# file: +# path: "{{ destination_base_path }}{{ config.configuration_folders.destination_relative_path }}/{{ level }}/{{ base_folder }}" +# state: directory + +# - name: "{{ base_folder }} - Readme" +# ansible.builtin.template: +# src: "{{ item }}" +# dest: "{{ destination_base_path }}{{ config.configuration_folders.destination_relative_path }}/{{ level }}/{{ base_folder }}/{{ item | basename | regex_replace('.j2$', '') }}" +# force: yes +# with_fileglob: +# - "{{ level }}/{{ base_folder }}/*.md" + +# - name: "{{ base_folder }} - adds" +# include_tasks: "{{ base_templates_folder }}/{{ level }}/{{ base_folder }}/platform.yaml" + + + +# # +# # Pipelines +# # +# - name: Pipelines +# hosts: localhost +# vars: +# config: "{{ lookup('file', '{{ config_folder }}/platform.yaml') | from_yaml }}" +# connectivity: "{{ lookup('file', '{{ config_folder }}/connectivity.yaml') | from_yaml }}" +# cidr: "{{ lookup('file', '{{ config_folder }}/cidr.yaml') | from_yaml }}" +# tfstates: "{{ lookup('file', '{{ config_folder }}/tfstates.yaml') | from_yaml }}" +# base_templates_folder: /tf/caf/templates/platform +# base_folder: pipelines + +# tasks: +# - import_tasks: "{{ base_folder }}/platform.yaml" +# - debug: msg="You can now proceed to the next steps and execute the deployment. Refer to the readme in {{ destination_base_path }}{{ config.configuration_folders.destination_relative_path }}/README.md" diff --git a/templates/platform/level0/README.md b/templates/platform/level0/README.md new file mode 100644 index 000000000..49212d2d2 --- /dev/null +++ b/templates/platform/level0/README.md @@ -0,0 +1,51 @@ +## Introduction +This directory contains details around the configurations which are deployed to the config. All the components are deployed in a layered approach. + +### Level 0 +Deployment Elements | Resources Deployed +---------------------| ------------------ +bootstrap | This steps bootstrap the environment with gitops prod subscription, deploys caf subscription, and create service principals. +launchpad | This step deploys launchpad store terraform states and manage deployments. + +### Level 1 + +Deployment Elements | Resources Deployed +----------------------------- | ------------------ +Platform- Subscriptions | Deploys platform subscriptions such as management, conncetivity, and identity +management | Foundation resources to management subscription such as service health alerts, log analytics +gitops | This directory hosts the Azure DevOps configurations such as Azure DevOps projects, pipelines variable groups +Identity | This hosts the identities for the pipelines and identies are pushed to vault after created +Enterprise scale - Platform | Deploys eslz resources suych as management groups, custom roles, policies, and map that to management groups + + +### Level 2 + +Deployment Elements | Resources Deployed +------------------------------------| ------------------ +Connectivity - Platform | Deploys platform connectivity resources Resource Groups, Firewalls, app gateways, Vnet, Public IPs +Connectivity - hub_connection | Deploys virtual hub connections for the virtual networks +gitops | Deploys Azure DevOps agents, aks configurations, identity etc. + + +# Deployment steps +Below are the steps to be followed for deployment. + +## Login the Azure AD Tenant + +```bash +az account clear +rover login -t + +``` + +## Prerequisites + + + +You need a developer machine configured with the dependencies. + +| Repo | Description | +|---------------------------------------------------------------------------------------------------|------------------------------------------------------------| +| [Azure Windows 10](../../../documentation/maintainer/set_azure_devops_vm.md) | Azure Windows 10 Virtual Desktop with docker engine, wsl2 and vscode | + +``` diff --git a/templates/platform/level0/billing_subscription_role_delegations/ansible.yaml b/templates/platform/level0/billing_subscription_role_delegations/ansible.yaml new file mode 100644 index 000000000..bdcf02efd --- /dev/null +++ b/templates/platform/level0/billing_subscription_role_delegations/ansible.yaml @@ -0,0 +1,22 @@ +- name: "[{{ level }}-{{ base_folder }}] Clean-up directory" + file: + path: "{{ destination_base }}/{{ config.configuration_folders.platform.destination_relative_path }}/{{ level }}/{{ base_folder }}" + state: absent + when: config.configuration_folders.platform.cleanup_destination | bool + +- name: "[{{ level }}-{{ base_folder }}] Creates directory" + when: config.caf_terraform.billing_subscription_role_delegations.enable == true + register: level0_billing_subscription_role_delegations + file: + path: "{{ destination_base }}/{{ config.configuration_folders.platform.destination_relative_path }}/{{ level }}/{{ base_folder }}" + state: directory + +- name: "[{{ level }}-{{ base_folder }}] subscription role delegation" + when: config.caf_terraform.billing_subscription_role_delegations.enable == true + ansible.builtin.template: + src: "{{ item }}" + dest: "{{ destination_base }}/{{ config.configuration_folders.platform.destination_relative_path }}/{{ level }}/{{ base_folder }}/{{ item | basename | regex_replace('.j2$', '') }}" + force: yes + with_fileglob: + - "{{ level }}/{{ base_folder }}/*.tfvars.j2" + - "{{ level }}/{{ base_folder }}/*.md" diff --git a/templates/platform/level0/billing_subscription_role_delegations/landingzone.tfvars.j2 b/templates/platform/level0/billing_subscription_role_delegations/landingzone.tfvars.j2 new file mode 100644 index 000000000..3d6c53658 --- /dev/null +++ b/templates/platform/level0/billing_subscription_role_delegations/landingzone.tfvars.j2 @@ -0,0 +1,12 @@ +landingzone = { + backend_type = "{{ caf_terraform.launchpad.backend_type | default("azurerm")}}" + global_settings_key = "{{ config.tfstates.platform.launchpad.lz_key_name }}" + level = "{{ config.tfstates.platform.launchpad.level }}" + key = "{{ config.tfstates.platform.billing_subscription_role_delegations.lz_key_name }}" + tfstates = { + {{ config.tfstates.platform.launchpad.lz_key_name }} = { + level = "current" + tfstate = "{{ config.tfstates.platform.launchpad.tfstate }}" + } + } +} diff --git a/templates/platform/level0/billing_subscription_role_delegations/readme.md b/templates/platform/level0/billing_subscription_role_delegations/readme.md new file mode 100644 index 000000000..5e25a441a --- /dev/null +++ b/templates/platform/level0/billing_subscription_role_delegations/readme.md @@ -0,0 +1,49 @@ + +### billing_subscription_role_delegations +Set-up the subscription delegations for platform and landingzone subscriptions + +```bash +# Login to the subscription {{ config.caf_terraform.launchpad.subscription_name }} with the user {{ config.caf_terraform.billing_subscription_role_delegations.azuread_user_ea_account_owner }} +rover login -t {{ config.platform_identity.tenant_name }} + +rover \ + -lz /tf/caf/landingzones/caf_solution \ + -var-folder {{ destination_base }}/{{ config.configuration_folders.platform.destination_relative_path }}/level0/billing_subscription_role_delegations \ + -tfstate_subscription_id {{ config.caf_terraform.launchpad.subscription_id }} \ + -tfstate {{ config.tfstates.platform.billing_subscription_role_delegations.tfstate }} \ + -target_subscription {{ config.caf_terraform.launchpad.subscription_id }} \ + -log-severity {{ config.gitops.rover_log_error }} \ + -launchpad \ + -env {{ config.caf_terraform.launchpad.caf_environment }} \ + -level {{ level }} \ + -p ${TF_DATA_DIR}/{{ config.tfstates.platform.billing_subscription_role_delegations.tfstate }}.tfplan \ + -a plan + +rover logout + +``` + +# Run rover ignite to generate the next level configuration files + +To execute this step you need to login with on of the CAF maintainers: +{% for maintainer in config.platform_identity.caf_platform_maintainers %} + - {{ maintainer }} +{% endfor %} + +```bash + +rover login -t {{ config.platform_identity.tenant_name }} + +rover ignite \ + --playbook /tf/caf/starter/templates/platform/ansible.yaml \ + -e base_templates_folder={{ base_templates_folder }} \ + -e resource_template_folder={{resource_template_folder}} \ + -e config_folder={{ config_folder }} + +``` + +# Next steps + +When you have successfully deployed the level0 components, you can move to the next step. + +[Deploy the subscriptions](../../level1/subscriptions/readme.md) \ No newline at end of file diff --git a/templates/platform/level0/billing_subscription_role_delegations/subscription_creation_roles.tfvars.j2 b/templates/platform/level0/billing_subscription_role_delegations/subscription_creation_roles.tfvars.j2 new file mode 100644 index 000000000..af20cb1ea --- /dev/null +++ b/templates/platform/level0/billing_subscription_role_delegations/subscription_creation_roles.tfvars.j2 @@ -0,0 +1,24 @@ +subscription_billing_role_assignments = { + # Delegated accounts who can create subscriptions. + # Used by Gitops pipelines + subscription_creators = { + billing_account_name = "{{ config.caf_terraform.billing_subscription_role_delegations.billing_account_name }}" + enrollment_account_name = "{{ config.caf_terraform.billing_subscription_role_delegations.enrollment_account_name }}" + billing_role_definition_name = "Enrollment account subscription creator" + + principals = { + azuread_service_principals = { + subscription_creation_platform = { + lz_key = "{{ config.tfstates.platform.launchpad.lz_key_name }}" + key = "subscription_creation_platform" + } + subscription_creation_landingzones = { + lz_key = "{{ config.tfstates.platform.launchpad.lz_key_name }}" + key = "subscription_creation_landingzones" + } + } + } + + } + +} \ No newline at end of file diff --git a/templates/platform/level0/credentials/ansible.yaml b/templates/platform/level0/credentials/ansible.yaml new file mode 100644 index 000000000..f5abc0fed --- /dev/null +++ b/templates/platform/level0/credentials/ansible.yaml @@ -0,0 +1,154 @@ +- name: "[{{ level }}-{{ base_folder }}] - Set variables" + set_fact: + destination_path: "{{ destination_base }}/{{ config.configuration_folders.platform.destination_relative_path }}/{{ level }}/{{ base_folder }}" + +- name: "[{{ level }}-{{ base_folder }}] - Load variable for launchpad" + include_vars: + name: resources + dir: "{{config_folder}}" + depth: 1 + ignore_unknown_extensions: true + files_matching: "launchpad_credentials.yaml" + +- debug: + msg: "{{resources}}" + +- name: "[{{ level }}-{{ base_folder }}] Clean-up directory" + file: + path: "{{ destination_base }}/{{ config.configuration_folders.platform.destination_relative_path }}/{{ level }}/{{ base_folder }}" + state: absent + when: config.configuration_folders.platform.cleanup_destination | bool + +- name: "[{{ level }}-{{ base_folder }}] Creates directory" + file: + path: "{{ destination_base }}/{{ config.configuration_folders.platform.destination_relative_path }}/{{ level }}/{{ base_folder }}" + state: directory + +# +# resource_groups +# +- name: "[{{ level }}-{{ base_folder }}] - resource_groups" + when: + - resources.subscriptions[subscription_key].resource_groups is defined + ansible.builtin.template: + src: "{{ item }}" + dest: "{{ destination_path }}/{{ item | basename | regex_replace('.j2$', '') }}" + force: yes + with_fileglob: + - "{{ resource_template_folder }}/resource_groups.tfvars.j2" +# +# azuread_credentials +# +- name: "[{{ level }}-{{ subscription_key }}] - credentials - azuread_credentials" + when: + - resources.subscriptions[subscription_key].azuread_credentials is defined + ansible.builtin.template: + src: "{{ item }}" + dest: "{{ destination_path }}/{{ item | basename | regex_replace('.j2$', '') }}" + force: yes + with_fileglob: + - "{{ resource_template_folder }}/azuread_credentials.tfvars.j2" + +# +# azuread_applications +# +- name: "[{{ level }}-{{ subscription_key }}] - credentials - azuread_applications" + when: + - resources.subscriptions[subscription_key].azuread_applications is defined + ansible.builtin.template: + src: "{{ item }}" + dest: "{{ destination_path }}/{{ item | basename | regex_replace('.j2$', '') }}" + force: yes + with_fileglob: + - "{{ resource_template_folder }}/azuread_applications.tfvars.j2" + +# +# azuread_credential_policies +# +- name: "[{{ level }}-{{ subscription_key }}] - credentials - azuread_credential_policies" + when: + - resources.subscriptions[subscription_key].azuread_credential_policies is defined + ansible.builtin.template: + src: "{{ item }}" + dest: "{{ destination_path }}/{{ item | basename | regex_replace('.j2$', '') }}" + force: yes + with_fileglob: + - "{{ resource_template_folder }}/azuread_credential_policies.tfvars.j2" + +# +# azuread_service_principals +# +- name: "[{{ level }}-{{ subscription_key }}] - credentials - azuread_service_principals" + when: + - resources.subscriptions[subscription_key].azuread_service_principals is defined + ansible.builtin.template: + src: "{{ item }}" + dest: "{{ destination_path }}/{{ item | basename | regex_replace('.j2$', '') }}" + force: yes + with_fileglob: + - "{{ resource_template_folder }}/azuread_service_principals.tfvars.j2" + + +# +# keyvaults +# +- name: "[{{ level }}-{{ subscription_key }}] - credentials - keyvaults" + when: + - resources.subscriptions[subscription_key].keyvaults is defined + ansible.builtin.template: + src: "{{ item }}" + dest: "{{ destination_path }}/{{ item | basename | regex_replace('.j2$', '') }}" + force: yes + with_fileglob: + - "{{ resource_template_folder }}/keyvaults.tfvars.j2" + +# +# keyvault_access_policies +# +- name: "[{{ level }}-{{ subscription_key }}] - credentials - keyvault_access_policies" + when: + - resources.subscriptions[subscription_key].keyvault_access_policies is defined + ansible.builtin.template: + src: "{{ item }}" + dest: "{{ destination_path }}/{{ item | basename | regex_replace('.j2$', '') }}" + force: yes + with_fileglob: + - "{{ resource_template_folder }}/keyvault_access_policies.tfvars.j2" + + +- name: "[{{ level }}-{{ base_folder }}] generate configuration files." + ansible.builtin.template: + src: "{{ item }}" + dest: "{{ destination_base }}/{{ config.configuration_folders.platform.destination_relative_path }}/{{ level }}/{{ base_folder }}/{{ item | basename | regex_replace('.j2$', '') }}" + force: yes + with_fileglob: + - "{{ level }}/{{ base_folder }}/*.tfvars.j2" + +- name: "[{{ level }}-{{ base_folder }}] deploy." + when: boostrap_launchpad | bool + shell: | + /tf/rover/rover.sh \ + -lz /tf/caf/landingzones/caf_solution \ + -var-folder {{ destination_base }}/{{ config.configuration_folders.platform.destination_relative_path }}/{{ level }}/{{ base_folder }} \ + -tfstate_subscription_id {{ config.caf_terraform.launchpad.subscription_id }} \ + -target_subscription {{ config.caf_terraform.launchpad.subscription_id }} \ + -tfstate {{ tfstates.launchpad_credentials.tfstate }} \ + -launchpad \ + -log-severity {{ config.gitops.rover_log_error }} \ + -env {{ config.caf_terraform.launchpad.caf_environment }} \ + -level {{ level }} \ + -a apply + args: + warn: no + +- debug: + msg: "{{ keyvaults.cred_subscription_creation_platform.vault_uri }}" + when: credentials_tfstate_exists.rc == 0 + +- name: "[{{ level }}-{{ base_folder }}] generate configuration files." + ansible.builtin.template: + src: "{{ item }}" + dest: "{{ destination_base }}/{{ config.configuration_folders.platform.destination_relative_path }}/{{ level }}/{{ base_folder }}/{{ item | basename | regex_replace('.j2$', '') }}" + force: yes + with_fileglob: + - "{{ level }}/{{ base_folder }}/*.md" diff --git a/templates/platform/level0/credentials/dynamic_secrets.tfvars.j2 b/templates/platform/level0/credentials/dynamic_secrets.tfvars.j2 new file mode 100644 index 000000000..3ac296361 --- /dev/null +++ b/templates/platform/level0/credentials/dynamic_secrets.tfvars.j2 @@ -0,0 +1,21 @@ + +# Store output attributes into keyvault secret +# Those values are used by the rover to connect the current remote state and +# identity the lower level +dynamic_keyvault_secrets = { + cred_ea_account_owner = { # ea account owner + account_owner_username = { + secret_name = "account-owner-username" + value = "" + } + account_owner_password = { + secret_name = "account-owner-password" + value = "" + } + tenant_id = { + secret_name = "tenant-id" + value = "{{ config.caf_terraform.launchpad.tenant_id }}" # {{ config.platform_identity.tenant_name }} Tenant + } + } + +} \ No newline at end of file diff --git a/templates/platform/level0/credentials/landingzone.tfvars.j2 b/templates/platform/level0/credentials/landingzone.tfvars.j2 new file mode 100644 index 000000000..30d5e3f8f --- /dev/null +++ b/templates/platform/level0/credentials/landingzone.tfvars.j2 @@ -0,0 +1,12 @@ +landingzone = { + backend_type = "{{ caf_terraform.launchpad.backend_type | default("azurerm")}}" + global_settings_key = "{{ config.tfstates.platform.launchpad.lz_key_name }}" + level = "{{ config.tfstates.platform.launchpad.level }}" + key = "{{ config.tfstates.platform.launchpad_credentials.lz_key_name }}" + tfstates = { + {{ config.tfstates.platform.launchpad.lz_key_name }} = { + level = "current" + tfstate = "{{ config.tfstates.platform.launchpad.tfstate }}" + } + } +} diff --git a/templates/platform/level0/credentials/readme.md b/templates/platform/level0/credentials/readme.md new file mode 100644 index 000000000..23353e464 --- /dev/null +++ b/templates/platform/level0/credentials/readme.md @@ -0,0 +1,74 @@ + +### Generate launchpad credentials + +```bash +# For manual bootstrap: +# Login to the subscription {{ config.caf_terraform.launchpad.subscription_name }} with the user {{ config.caf_terraform.billing_subscription_role_delegations.azuread_user_ea_account_owner }} +rover login -t {{ config.platform_identity.tenant_name }} + +rover \ +{% if ((config.platform_identity.azuread_identity_mode != "logged_in_user") and (credentials_tfstate_exists.rc == 0)) %} + --impersonate-sp-from-keyvault-url {{ keyvaults.cred_identity.vault_uri }} \ +{% endif %} + -lz /tf/caf/landingzones/caf_solution \ + -var-folder {{ destination_base }}/{{ config.configuration_folders.platform.destination_relative_path }}/{{ level }}/{{ base_folder }} \ + -tfstate_subscription_id {{ config.caf_terraform.launchpad.subscription_id }} \ + -target_subscription {{ config.caf_terraform.launchpad.subscription_id }} \ + -tfstate {{ config.tfstates.platform.launchpad_credentials.tfstate }} \ + -launchpad \ + -log-severity {{ config.gitops.rover_log_error }} \ + -env {{ config.caf_terraform.launchpad.caf_environment }} \ + -level {{ level }} \ + -p ${TF_DATA_DIR}/{{ config.tfstates.platform.launchpad_credentials.tfstate }}.tfplan \ + -a plan + +``` + +If the plan is not successfull you need to come back to the yaml contoso.caf.platform.yaml, fix the values, re-execute the rover ignite and then rover plan. + + +```bash +# On success plan, execute + +rover \ +{% if ((config.platform_identity.azuread_identity_mode != "logged_in_user") and (credentials_tfstate_exists.rc == 0)) %} + --impersonate-sp-from-keyvault-url {{ keyvaults.cred_identity.vault_uri }} \ +{% endif %} + -lz /tf/caf/landingzones/caf_solution \ + -var-folder {{ destination_base }}/{{ config.configuration_folders.platform.destination_relative_path }}/{{ level }}/{{ base_folder }} \ + -tfstate_subscription_id {{ config.caf_terraform.launchpad.subscription_id }} \ + -target_subscription {{ config.caf_terraform.launchpad.subscription_id }} \ + -tfstate {{ config.tfstates.platform.launchpad_credentials.tfstate }} \ + -launchpad \ + -log-severity {{ config.gitops.rover_log_error }} \ + -env {{ config.caf_terraform.launchpad.caf_environment }} \ + -level {{ level }} \ + -p ${TF_DATA_DIR}/{{ config.tfstates.platform.launchpad_credentials.tfstate }}.tfplan \ + -a apply + +``` + +```bash +# On success, re-execute the rover ignite + +rover ignite \ + --playbook /tf/caf/landingzones/templates/platform/ansible.yaml \ + -e base_templates_folder={{ base_templates_folder }} \ + -e resource_template_folder={{resource_template_folder}} \ + -e config_folder={{ config_folder }} + +``` + +Now if you refresh the readme of the credentials deployment, you will notice the rover command has been updated to impersonate the execution of the rover based on the credentials that have just been created and stored in the keyvault. The goal here is to execute the deployment steps using the same privileges that will be used later when using a pipeline. + +Just re-execute the plan/apply command as above and you will notice the rover will login as the identity service principal. When executed, execute a rover logout as the next step will be executed under a different security context. + +# Next steps + +When you have successfully deployed the launchpad you can move to the next step. + +{% if config.caf_terraform.billing_subscription_role_delegations.enable %} + [[Deploy the billing subscription role delegation](../billing_subscription_role_delegations/readme.md) +{% else %} + [Deploy the subscription services](../../level1/subscriptions/readme.md) +{% endif %} diff --git a/templates/platform/level0/credentials/role_mappings.tfvars.j2 b/templates/platform/level0/credentials/role_mappings.tfvars.j2 new file mode 100644 index 000000000..16b3339f7 --- /dev/null +++ b/templates/platform/level0/credentials/role_mappings.tfvars.j2 @@ -0,0 +1,24 @@ + +# +# Services supported: subscriptions, storage accounts and resource groups +# Can assign roles to: AD groups, AD object ID, AD applications, Managed identities +# + +role_mapping = { + built_in_role_mapping = { +{% if config.platform_identity.azuread_identity_mode != 'logged_in_user' %} + resource_groups = { + sp_credentials = { + "Contributor" = { + azuread_groups = { + keys = [ + "identity" + ] + lz_key = "{{ config.tfstates.platform.launchpad.lz_key_name }}" + } + } + } + } +{% endif %} + } +} diff --git a/templates/platform/level0/launchpad/ansible.yaml b/templates/platform/level0/launchpad/ansible.yaml new file mode 100644 index 000000000..bd329f53c --- /dev/null +++ b/templates/platform/level0/launchpad/ansible.yaml @@ -0,0 +1,188 @@ +- name: "[{{ level }}-{{ base_folder }}] - Set variables" + set_fact: + destination_path: "{{ destination_base }}/{{ config.configuration_folders.platform.destination_relative_path }}/{{ level }}/{{ base_folder }}" + +- name: "[{{ level }}-{{ base_folder }}] - Load variable for launchpad" + include_vars: + name: resources + dir: "{{config_folder}}" + depth: 1 + ignore_unknown_extensions: true + files_matching: "launchpad.yaml|level0.yaml|configuration.caf.platform.yaml" + +- debug: + msg: "{{resources}}" + +- name: "[{{ level }}-{{ base_folder }}] Clean-up directory" + file: + path: "{{ destination_base }}/{{ config.configuration_folders.platform.destination_relative_path }}/{{ level }}/{{ base_folder }}" + state: absent + when: config.configuration_folders.platform.cleanup_destination | bool + +- name: "[{{ level }}-{{ base_folder }}] Creates directory" + file: + path: "{{ destination_base }}/{{ config.configuration_folders.platform.destination_relative_path }}/{{ level }}/{{ base_folder }}" + state: directory + + +# +# resource_groups +# +- name: "[{{ level }}-{{ base_folder }}] - resources - resource_groups" + when: + - resources.subscriptions[subscription_key].resource_groups is defined + ansible.builtin.template: + src: "{{ item }}" + dest: "{{ destination_path }}/{{ item | basename | regex_replace('.j2$', '') }}" + force: yes + with_fileglob: + - "{{ resource_template_folder }}/resource_groups.tfvars.j2" + + +- name: "[{{ level }}-{{ base_folder }}] launchpad" + ansible.builtin.template: + src: "{{ level }}/{{ base_folder }}/{{ item }}.tfvars.j2" + dest: "{{ destination_base }}/{{ config.configuration_folders.platform.destination_relative_path }}/{{ level }}/{{ base_folder }}/{{ item }}.tfvars" + force: yes + loop: + - dynamic_secrets + - global_settings + - keyvaults + - landingzone + - role_mappings + - storage_accounts + +- name: "[{{ level }}-{{ base_folder }}] Clean-up identity files" + file: + path: "{{ destination_base }}/{{ config.configuration_folders.platform.destination_relative_path }}/{{ level }}/{{ base_folder }}/{{ item }}.tfvars" + state: absent + when: config.platform_identity.azuread_identity_mode == "logged_in_user" + loop: + - azuread_api_permissions + - azuread_applications + - azuread_group_members + - azuread_groups + - azuread_roles + - keyvault_policies + - service_principals + +- name: "[{{ level }}-{{ base_folder }}] lauchpad - identity - service_principal" + ansible.builtin.template: + src: "{{ level }}/{{ base_folder }}/{{ item }}.tfvars.j2" + dest: "{{ destination_base }}/{{ config.configuration_folders.platform.destination_relative_path }}/{{ level }}/{{ base_folder }}/{{ item }}.tfvars" + force: yes + when: config.platform_identity.azuread_identity_mode != 'logged_in_user' + loop: + - azuread_api_permissions + - azuread_applications + - azuread_group_members + - azuread_groups + - azuread_roles + - keyvault_policies + - service_principals + +- name: "[{{ level }}-{{ base_folder }}] Deploy the launchpad" + when: boostrap_launchpad | bool | default(false) + shell: | + /tf/rover/rover.sh \ + -lz /tf/caf/landingzones/caf_launchpad \ + -var-folder {{ destination_base }}/{{ config.configuration_folders.platform.destination_relative_path }}/{{ level }}/{{ base_folder }} \ + -tfstate_subscription_id {{ config.caf_terraform.launchpad.subscription_id }} \ + -target_subscription {{ config.caf_terraform.launchpad.subscription_id }} \ + -tfstate {{ config.tfstates.platform.launchpad.tfstate }} \ + -log-severity {{ config.gitops.rover_log_error }} \ + -launchpad \ + -env {{ config.caf_terraform.launchpad.caf_environment }} \ + -level {{ level }} \ + -a apply + +- name: "[{{ level }}-{{ base_folder }}] Get tfstate account name" + register: launchpad_storage_account + shell: | + az storage account list \ + --subscription {{ config.caf_terraform.launchpad.subscription_id }} \ + --query "[?tags.caf_tfstate=='{{ config.tfstates.platform.launchpad.level }}' && tags.caf_environment=='{{ config.caf_terraform.launchpad.caf_environment }}'].{name:name}[0]" -o json | jq -r .name + +- debug: + msg: "{{launchpad_storage_account}}" + +- name: "[{{ level }}-{{ base_folder }}] Get launchpad tfstate details" + register: launchpad_tfstate_exists + ignore_errors: true + shell: | + az storage blob download \ + --name "{{ config.tfstates.platform.launchpad.tfstate }}" \ + --account-name "{{ launchpad_storage_account.stdout | default('') }}" \ + --container-name "{{ config.tfstates.platform.launchpad.workspace | default('tfstate') }}" \ + --auth-mode "login" \ + --file "~/.terraform.cache/launchpad/{{ config.tfstates.platform.launchpad.tfstate }}" + +- name: "[{{ level }}-{{ base_folder }}] Get subscription_creation_landingzones details" + when: + - launchpad_tfstate_exists.rc == 0 + - config.platform_core_setup.enterprise_scale.enable_azure_subscription_vending_machine + shell: "cat ~/.terraform.cache/launchpad/{{ config.tfstates.platform.launchpad.tfstate }}" + register: launchpad_tfstate + +- name: "[{{ level }}-{{ base_folder }}] Get launchpad json data" + when: + - launchpad_tfstate_exists.rc == 0 + - config.platform_core_setup.enterprise_scale.enable_azure_subscription_vending_machine + set_fact: + scljsondata: "{{ launchpad_tfstate.stdout | from_json }}" + +- name: "[{{ level }}-{{ base_folder }}] set launchpad_azuread_groups" + when: + - launchpad_tfstate_exists.rc == 0 + - config.platform_core_setup.enterprise_scale.enable_azure_subscription_vending_machine + set_fact: + launchpad_azuread_groups: "{{ scljsondata | json_query(path) }}" + vars: + path: 'outputs.objects.value.launchpad.azuread_groups' + +- name: "[{{ level }}-{{ base_folder }}] Get credentials tfstate details" + register: credentials_tfstate_exists + ignore_errors: true + shell: | + az storage blob download \ + --name "{{ config.tfstates.platform.launchpad_credentials.tfstate }}" \ + --account-name "{{ launchpad_storage_account.stdout }}" \ + --container-name "{{ config.tfstates.platform.launchpad.workspace | default('tfstate') }}" \ + --auth-mode "login" \ + --file "~/.terraform.cache/launchpad/{{ config.tfstates.platform.launchpad_credentials.tfstate }}" + +- name: "[{{ level }}-{{ base_folder }}] Get launchpad_credentials details" + when: credentials_tfstate_exists.rc == 0 + shell: "cat ~/.terraform.cache/launchpad/{{ config.tfstates.platform.launchpad_credentials.tfstate }}" + register: launchpad_credentials + +- name: "[{{ level }}-{{ base_folder }}] Get launchpad_credentials json data" + when: credentials_tfstate_exists.rc == 0 + set_fact: + credjsondata: "{{ launchpad_credentials.stdout | from_json }}" + +- name: "[{{ level }}-{{ base_folder }}] set keyvaults" + when: credentials_tfstate_exists.rc == 0 + set_fact: + keyvaults: "{{ credjsondata | json_query(path) }}" + vars: + path: 'outputs.objects.value.launchpad_credentials_rotation.keyvaults' + +- name: "[{{ level }}-{{ base_folder }}] cleanup" + when: credentials_tfstate_exists.rc == 0 + file: + path: "~/.terraform.cache/launchpad/{{ config.tfstates.platform.launchpad_credentials.tfstate }}" + state: absent + +- name: "[{{ level }}-{{ base_folder }}] cleanup" + when: launchpad_tfstate_exists.rc == 0 + file: + path: "~/.terraform.cache/launchpad/{{ config.tfstates.platform.launchpad.tfstate }}" + state: absent + +# Update readme +- name: "[{{ level }}-{{ base_folder }}] launchpad - readme" + ansible.builtin.template: + src: "{{ level }}/{{ base_folder }}/readme.md" + dest: "{{ destination_base }}/{{ config.configuration_folders.platform.destination_relative_path }}/{{ level }}/{{ base_folder }}/readme.md" + force: yes \ No newline at end of file diff --git a/templates/platform/level0/launchpad/azuread_api_permissions.tfvars.j2 b/templates/platform/level0/launchpad/azuread_api_permissions.tfvars.j2 new file mode 100644 index 000000000..01e3e4656 --- /dev/null +++ b/templates/platform/level0/launchpad/azuread_api_permissions.tfvars.j2 @@ -0,0 +1,60 @@ + + +azuread_api_permissions = { + level0 = { + microsoft_graph = { + resource_app_id = "00000003-0000-0000-c000-000000000000" + resource_access = { + AppRoleAssignment_ReadWrite_All = { + id = "06b708a9-e830-4db3-a914-8e69da51d44f" + type = "Role" + } + DelegatedPermissionGrant_ReadWrite_All = { + id = "8e8e4742-1d95-4f68-9d56-6ee75648c72a" + type = "Role" + } + DelegatedPermissionGrant_ReadWrite_All = { + id = "18a4783c-866b-4cc7-a460-3d5e5662c884" + type = "Role" + } + } + } + } + identity = { + active_directory_graph = { + resource_app_id = "00000002-0000-0000-c000-000000000000" + resource_access = { + Application_ReadWrite_OwnedBy = { + id = "824c81eb-e3f8-4ee6-8f6d-de7f50d565b7" + type = "Role" + } + Directory_ReadWrite_All = { + id = "78c8a3c8-a07e-4b9e-af1b-b5ccab50a175" + type = "Role" + } + } + } + microsoft_graph = { + resource_app_id = "00000003-0000-0000-c000-000000000000" + resource_access = { + AppRoleAssignment_ReadWrite_All = { + id = "06b708a9-e830-4db3-a914-8e69da51d44f" + type = "Role" + } + DelegatedPermissionGrant_ReadWrite_All = { + id = "8e8e4742-1d95-4f68-9d56-6ee75648c72a" + type = "Role" + } + GroupReadWriteAll = { + id = "62a82d76-70ea-41e2-9197-370581804d09" + type = "Role" + } + RoleManagement_ReadWrite_Directory = { + id = "9e3f62cf-ca93-4989-b6ce-bf83c28f9fe8" + type = "Role" + } + } + } + } + +} diff --git a/templates/platform/level0/launchpad/azuread_applications.tfvars.j2 b/templates/platform/level0/launchpad/azuread_applications.tfvars.j2 new file mode 100644 index 000000000..f929fa977 --- /dev/null +++ b/templates/platform/level0/launchpad/azuread_applications.tfvars.j2 @@ -0,0 +1,44 @@ +azuread_applications = { + level0 = { + application_name = "sp-caf-level0" +{% if config.caf_terraform.billing_subscription_role_delegations.azuread_user_ea_account_owner_object_id is defined %} + owners = ["{{ config.caf_terraform.billing_subscription_role_delegations.azuread_user_ea_account_owner_object_id }}"] // EA account +{% endif %} + } + identity = { + application_name = "sp-caf-identity" +{% if config.caf_terraform.billing_subscription_role_delegations.azuread_user_ea_account_owner_object_id is defined %} + owners = ["{{ config.caf_terraform.billing_subscription_role_delegations.azuread_user_ea_account_owner_object_id }}"] // EA account +{% endif %} + } + management = { + application_name = "sp-caf-management" +{% if config.caf_terraform.billing_subscription_role_delegations.azuread_user_ea_account_owner_object_id is defined %} + owners = ["{{ config.caf_terraform.billing_subscription_role_delegations.azuread_user_ea_account_owner_object_id }}"] // EA account +{% endif %} + } + eslz = { + application_name = "sp-caf-eslz" +{% if config.caf_terraform.billing_subscription_role_delegations.azuread_user_ea_account_owner_object_id is defined %} + owners = ["{{ config.caf_terraform.billing_subscription_role_delegations.azuread_user_ea_account_owner_object_id }}"] // EA account +{% endif %} + } + connectivity = { + application_name = "sp-caf-connectivity" +{% if config.caf_terraform.billing_subscription_role_delegations.azuread_user_ea_account_owner_object_id is defined %} + owners = ["{{ config.caf_terraform.billing_subscription_role_delegations.azuread_user_ea_account_owner_object_id }}"] // EA account +{% endif %} + } + subscription_creation_platform = { + application_name = "sp-caf-subscription-creation-platform" +{% if config.caf_terraform.billing_subscription_role_delegations.azuread_user_ea_account_owner_object_id is defined %} + owners = ["{{ config.caf_terraform.billing_subscription_role_delegations.azuread_user_ea_account_owner_object_id }}"] // EA account +{% endif %} + } + subscription_creation_landingzones = { + application_name = "sp-caf-subscription-creation-landingzones" +{% if config.caf_terraform.billing_subscription_role_delegations.azuread_user_ea_account_owner_object_id is defined %} + owners = ["{{ config.caf_terraform.billing_subscription_role_delegations.azuread_user_ea_account_owner_object_id }}"] // EA account +{% endif %} + } +} \ No newline at end of file diff --git a/templates/platform/level0/launchpad/azuread_group_members.tfvars.j2 b/templates/platform/level0/launchpad/azuread_group_members.tfvars.j2 new file mode 100644 index 000000000..ec156a815 --- /dev/null +++ b/templates/platform/level0/launchpad/azuread_group_members.tfvars.j2 @@ -0,0 +1,34 @@ +azuread_groups_membership = { + caf_platform_maintainers = { +{% if config.platform_identity.azuread_identity_mode == 'logged_in_user' %} + object_ids = { + logged_in = { + keys = ["user"] + } + } +{% endif %} +{% if config.platform_identity.azuread_identity_mode != 'logged_in_user' %} + members = { + user_principal_names = [ + "{{ config.caf_terraform.billing_subscription_role_delegations.azuread_user_ea_account_owner }}", +{% if config.platform_identity.azuread_identity_mode != 'logged_in_user'%} +{% for user in config.platform_identity.caf_platform_maintainers %} + "{{ user }}", +{% endfor %} +{% endif %} + ] + } +{% endif %} + } + caf_platform_contributors = { + members = { + user_principal_names = [ +{% if config.platform_identity.azuread_identity_mode != 'logged_in_user' and config.platform_identity.caf_platform_contributors is defined %} +{% for user in config.platform_identity.caf_platform_contributors %} + "{{ user }}", +{% endfor %} +{% endif %} + ] + } + } +} \ No newline at end of file diff --git a/templates/platform/level0/launchpad/azuread_groups.tfvars.j2 b/templates/platform/level0/launchpad/azuread_groups.tfvars.j2 new file mode 100644 index 000000000..a551a004a --- /dev/null +++ b/templates/platform/level0/launchpad/azuread_groups.tfvars.j2 @@ -0,0 +1,97 @@ +azuread_groups = { + caf_platform_maintainers = { + name = "caf-platform-maintainers" + description = "High privileged group to run all CAF deployments from vscode. Can be used to bootstrap or troubleshoot deployments." + prevent_duplicate_name = true +{% if config.caf_terraform.billing_subscription_role_delegations.azuread_user_ea_account_owner_object_id is defined %} + owners = ["{{ config.caf_terraform.billing_subscription_role_delegations.azuread_user_ea_account_owner_object_id }}"] // EA account +{% endif %} + } + + caf_platform_contributors = { + name = "caf-platform-contributors" + description = "Can only execute terraform plans for level1 and level2. They can test platform improvements and propose PR." + prevent_duplicate_name = true +{% if config.caf_terraform.billing_subscription_role_delegations.azuread_user_ea_account_owner_object_id is defined %} + owners = ["{{ config.caf_terraform.billing_subscription_role_delegations.azuread_user_ea_account_owner_object_id }}"] // EA account +{% endif %} + } + + level0 = { + name = "caf-level0" + prevent_duplicate_name = true +{% if config.caf_terraform.billing_subscription_role_delegations.azuread_user_ea_account_owner_object_id is defined %} + owners = ["{{ config.caf_terraform.billing_subscription_role_delegations.azuread_user_ea_account_owner_object_id }}"] // EA account +{% endif %} + members = { + azuread_service_principal_keys = ["level0"] + } + } + + eslz = { + name = "caf-eslz" + prevent_duplicate_name = true +{% if config.caf_terraform.billing_subscription_role_delegations.azuread_user_ea_account_owner_object_id is defined %} + owners = ["{{ config.caf_terraform.billing_subscription_role_delegations.azuread_user_ea_account_owner_object_id }}"] // EA account +{% endif %} + members = { + azuread_service_principal_keys = ["eslz"] + } + } + + identity = { + name = "caf-identity" + prevent_duplicate_name = true +{% if config.caf_terraform.billing_subscription_role_delegations.azuread_user_ea_account_owner_object_id is defined %} + owners = ["{{ config.caf_terraform.billing_subscription_role_delegations.azuread_user_ea_account_owner_object_id }}"] // EA account +{% endif %} + members = { + azuread_service_principal_keys = ["identity"] + } + } + + management = { + name = "caf-management" + prevent_duplicate_name = true +{% if config.caf_terraform.billing_subscription_role_delegations.azuread_user_ea_account_owner_object_id is defined %} + owners = ["{{ config.caf_terraform.billing_subscription_role_delegations.azuread_user_ea_account_owner_object_id }}"] // EA account +{% endif %} + members = { + azuread_service_principal_keys = ["management"] + } + } + + connectivity = { + name = "caf-connectivity" + prevent_duplicate_name = true +{% if config.caf_terraform.billing_subscription_role_delegations.azuread_user_ea_account_owner_object_id is defined %} + owners = ["{{ config.caf_terraform.billing_subscription_role_delegations.azuread_user_ea_account_owner_object_id }}"] // EA account +{% endif %} + members = { + azuread_service_principal_keys = ["connectivity"] + } + } + + subscription_creation_platform = { + name = "caf-subscription_creation_platform" + prevent_duplicate_name = true +{% if config.caf_terraform.billing_subscription_role_delegations.azuread_user_ea_account_owner_object_id is defined %} + owners = ["{{ config.caf_terraform.billing_subscription_role_delegations.azuread_user_ea_account_owner_object_id }}"] // EA account +{% endif %} + members = { + azuread_service_principal_keys = ["subscription_creation_platform"] + } + } + + subscription_creation_landingzones = { + name = "caf-subscription_creation_landingzones" + prevent_duplicate_name = true +{% if config.caf_terraform.billing_subscription_role_delegations.azuread_user_ea_account_owner_object_id is defined %} + owners = ["{{ config.caf_terraform.billing_subscription_role_delegations.azuread_user_ea_account_owner_object_id }}"] // EA account +{% endif %} + members = { + azuread_service_principal_keys = ["subscription_creation_landingzones"] + } + } + +} diff --git a/templates/platform/level0/launchpad/azuread_roles.tfvars.j2 b/templates/platform/level0/launchpad/azuread_roles.tfvars.j2 new file mode 100644 index 000000000..88162fee6 --- /dev/null +++ b/templates/platform/level0/launchpad/azuread_roles.tfvars.j2 @@ -0,0 +1,28 @@ +# +# Available roles: +# az rest --method Get --uri https://graph.microsoft.com/v1.0/directoryRoleTemplates -o json | jq -r .value[].displayName +# +azuread_roles = { + azuread_service_principals = { + level0 = { + roles = [ + "Privileged Role Administrator", + "Application Administrator", + "Groups Administrator" + ] + } + identity = { + roles = [ + "User Administrator", + "Application Administrator", + "Groups Administrator" + ] + } + subscription_creation_landingzones = { + roles = [ + "Application Administrator", + "Groups Administrator" + ] + } + } +} \ No newline at end of file diff --git a/templates/platform/level0/launchpad/dynamic_secrets.tfvars.j2 b/templates/platform/level0/launchpad/dynamic_secrets.tfvars.j2 new file mode 100644 index 000000000..0b967297e --- /dev/null +++ b/templates/platform/level0/launchpad/dynamic_secrets.tfvars.j2 @@ -0,0 +1,67 @@ + +# Store output attributes into keyvault secret +# Those values are used by the rover to connect the current remote state and +# identity the lower level +dynamic_keyvault_secrets = { + level0 = { + subscription_id = { + output_key = "client_config" + attribute_key = "subscription_id" + secret_name = "subscription-id" + } + tenant_id = { + output_key = "client_config" + attribute_key = "tenant_id" + secret_name = "tenant-id" + } + } + level1 = { + lower_stg = { + output_key = "storage_accounts" + resource_key = "level0" + attribute_key = "name" + secret_name = "lower-storage-account-name" + } + lower_rg = { + output_key = "resource_groups" + resource_key = "level0" + attribute_key = "name" + secret_name = "lower-resource-group-name" + } + subscription_id = { + output_key = "client_config" + attribute_key = "subscription_id" + secret_name = "subscription-id" + } + tenant_id = { + output_key = "client_config" + attribute_key = "tenant_id" + secret_name = "tenant-id" + } + } + level2 = { + lower_stg = { + output_key = "storage_accounts" + resource_key = "level1" + attribute_key = "name" + secret_name = "lower-storage-account-name" + } + lower_rg = { + output_key = "resource_groups" + resource_key = "level1" + attribute_key = "name" + secret_name = "lower-resource-group-name" + } + subscription_id = { + output_key = "client_config" + attribute_key = "subscription_id" + secret_name = "subscription-id" + } + tenant_id = { + output_key = "client_config" + attribute_key = "tenant_id" + secret_name = "tenant-id" + } + } + +} \ No newline at end of file diff --git a/templates/platform/level0/launchpad/global_settings.tfvars.j2 b/templates/platform/level0/launchpad/global_settings.tfvars.j2 new file mode 100644 index 000000000..6bd04bfce --- /dev/null +++ b/templates/platform/level0/launchpad/global_settings.tfvars.j2 @@ -0,0 +1,43 @@ +# naming convention settings +# for more settings on naming convention, please refer to the provider documentation: https://github.com/aztfmod/terraform-provider-azurecaf +# +# passthrough means the default CAF naming convention is not applied and you are responsible +# of the unicity of the names you are giving. the CAF provider will clear out +passthrough = {{ config.caf_terraform.naming_convention.passthrough | string | lower }} +# adds random chars at the end of the names produced by the provider +# Do not change the following values once the launchpad deployed. +# Enable tag inheritance (can be changed) +inherit_tags = {{ config.caf_terraform.naming_convention.inherit_tags | string | lower }} +# When passthrough is set to false, define the number of random characters to add to the names +random_length = {{ config.caf_terraform.naming_convention.random_length }} +# Set the prefix that will be added to all azure resources. +# if not set and passthrough=false, the CAF module generates a random one. +{% if config.caf_terraform.naming_convention.prefix is defined %} +prefix = "{{ config.caf_terraform.naming_convention.prefix }}" +{% endif %} + +# Default region. When not set to a resource it will use that value +default_region = "{{ config.caf_terraform.launchpad.default_region_key }}" + +# You can reference the regions by using region1, region2 or set your own keys +regions = { +{% for key in config.caf_terraform.launchpad.regions.keys() %} + {{ key }} = "{{ config.caf_terraform.launchpad.regions[key].name }}" +{% endfor %} +} + +# Rover will adjust some tags to enable the discovery of the launchpad. +launchpad_key_names = { + keyvault = "level0" + tfstates = [ + "level0", + "level1", + "level2" + ] +} + +# Global tags +tags = { + ApplicationOwner = "sre" + BusinessUnit = "sre" +} \ No newline at end of file diff --git a/templates/platform/level0/launchpad/keyvault_policies.tfvars.j2 b/templates/platform/level0/launchpad/keyvault_policies.tfvars.j2 new file mode 100644 index 000000000..0835a0d73 --- /dev/null +++ b/templates/platform/level0/launchpad/keyvault_policies.tfvars.j2 @@ -0,0 +1,64 @@ +keyvault_access_policies = { + # A maximum of 16 access policies per keyvault + level0 = { + sp_level0 = { + azuread_group_key = "level0" + secret_permissions = ["Set", "Get", "List", "Delete", "Purge", "Recover"] + } + identity = { + azuread_group_key = "identity" + secret_permissions = ["Get"] + } + } + + # A maximum of 16 access policies per keyvault + level1 = { + sp_level0 = { + # Allow level1 devops agent to be managed from agent pool level0 + azuread_group_key = "level0" + secret_permissions = ["Set", "Get", "List", "Delete", "Purge", "Recover"] + } + identity = { + azuread_group_key = "identity" + secret_permissions = ["Get"] + } + management = { + azuread_group_key = "management" + secret_permissions = ["Get"] + } + eslz = { + azuread_group_key = "eslz" + secret_permissions = ["Get"] + } + subscription_creation_platform = { + azuread_group_key = "subscription_creation_platform" + secret_permissions = ["Get"] + } + } + # A maximum of 16 access policies per keyvault + level2 = { + sp_level0 = { + azuread_group_key = "level0" + secret_permissions = ["Set", "Get", "List", "Delete", "Purge", "Recover"] + } + connectivity = { + azuread_group_key = "connectivity" + secret_permissions = ["Get"] + } + identity = { + azuread_group_key = "identity" + secret_permissions = ["Get"] + } + management = { + azuread_group_key = "management" + secret_permissions = ["Get"] + } +{% if config.platform_core_setup.enterprise_scale.enable_azure_subscription_vending_machine %} + subscription_creation_landingzones = { + azuread_group_key = "subscription_creation_landingzones" + secret_permissions = ["Get"] + } +{% endif %} + } + +} \ No newline at end of file diff --git a/templates/platform/level0/launchpad/keyvaults.tfvars.j2 b/templates/platform/level0/launchpad/keyvaults.tfvars.j2 new file mode 100644 index 000000000..7bc86e60e --- /dev/null +++ b/templates/platform/level0/launchpad/keyvaults.tfvars.j2 @@ -0,0 +1,98 @@ + +keyvaults = { + level0 = { + name = "{{ resources.subscriptions[subscription_key].keyvaults.level0.name }}" + resource_group_key = "{{ resources.subscriptions[subscription_key].keyvaults.level0.resource_group_key }}" + sku_name = "{{ config.platform_core_setup.sku.keyvault}}" + tags = { + caf_tfstate = "level0" + caf_environment = "{{ config.caf_terraform.launchpad.caf_environment }}" + } + + creation_policies = { + // {{ config.caf_terraform.billing_subscription_role_delegations.azuread_user_ea_account_owner }} + bootstrap_user = { + object_id = "{{ config.caf_terraform.billing_subscription_role_delegations.azuread_user_ea_account_owner_object_id }}" + secret_permissions = ["Set", "Get", "List", "Delete", "Purge", "Recover"] + } +{% if config.platform_identity.azuread_identity_mode != 'logged_in_user' %} + caf_platform_maintainers = { + azuread_group_key = "caf_platform_maintainers" + secret_permissions = ["Set", "Get", "List", "Delete", "Purge", "Recover"] + } +{% endif %} +{% if config.platform_identity.azuread_identity_mode == 'logged_in_user' %} + logged_in_user = { + # if the key is set to "logged_in_user" add the user running terraform in the keyvault policy + # More examples in /examples/keyvault + secret_permissions = ["Set", "Get", "List", "Delete", "Purge", "Recover"] + } +{% endif %} + } + + } + + level1 = { + name = "{{ resources.subscriptions[subscription_key].keyvaults.level1.name }}" + resource_group_key = "{{ resources.subscriptions[subscription_key].keyvaults.level1.resource_group_key }}" + sku_name = "{{ config.platform_core_setup.sku.keyvault}}" + tags = { + caf_tfstate = "level1" + caf_environment = "{{ config.caf_terraform.launchpad.caf_environment }}" + } + + creation_policies = { + // {{ config.caf_terraform.billing_subscription_role_delegations.azuread_user_ea_account_owner }} + bootstrap_user = { + object_id = "{{ config.caf_terraform.billing_subscription_role_delegations.azuread_user_ea_account_owner_object_id }}" + secret_permissions = ["Set", "Get", "List", "Delete", "Purge", "Recover"] + } +{% if config.platform_identity.azuread_identity_mode != 'logged_in_user' %} + caf_platform_maintainers = { + azuread_group_key = "caf_platform_maintainers" + secret_permissions = ["Set", "Get", "List", "Delete", "Purge", "Recover"] + } +{% endif %} +{% if config.platform_identity.azuread_identity_mode == 'logged_in_user' %} + logged_in_user = { + # if the key is set to "logged_in_user" add the user running terraform in the keyvault policy + # More examples in /examples/keyvault + secret_permissions = ["Set", "Get", "List", "Delete", "Purge", "Recover"] + } +{% endif %} + } + + } + + level2 = { + name = "{{ resources.subscriptions[subscription_key].keyvaults.level2.name }}" + resource_group_key = "{{ resources.subscriptions[subscription_key].keyvaults.level2.resource_group_key }}" + sku_name = "{{ config.platform_core_setup.sku.keyvault}}" + tags = { + caf_tfstate = "level2" + caf_environment = "{{ config.caf_terraform.launchpad.caf_environment }}" + } + + creation_policies = { + // {{ config.caf_terraform.billing_subscription_role_delegations.azuread_user_ea_account_owner }} + bootstrap_user = { + object_id = "{{ config.caf_terraform.billing_subscription_role_delegations.azuread_user_ea_account_owner_object_id }}" + secret_permissions = ["Set", "Get", "List", "Delete", "Purge", "Recover"] + } +{% if config.platform_identity.azuread_identity_mode != 'logged_in_user' %} + caf_platform_maintainers = { + azuread_group_key = "caf_platform_maintainers" + secret_permissions = ["Set", "Get", "List", "Delete", "Purge", "Recover"] + } +{% endif %} +{% if config.platform_identity.azuread_identity_mode == 'logged_in_user' %} + logged_in_user = { + # if the key is set to "logged_in_user" add the user running terraform in the keyvault policy + # More examples in /examples/keyvault + secret_permissions = ["Set", "Get", "List", "Delete", "Purge", "Recover"] + } +{% endif %} + } + + } +} diff --git a/templates/platform/level0/launchpad/landingzone.tfvars.j2 b/templates/platform/level0/launchpad/landingzone.tfvars.j2 new file mode 100644 index 000000000..9fe64e7ca --- /dev/null +++ b/templates/platform/level0/launchpad/landingzone.tfvars.j2 @@ -0,0 +1,5 @@ +landingzone = { + backend_type = "{{ caf_terraform.launchpad.backend_type | default("azurerm")}}" + level = "{{ config.tfstates.platform.launchpad.level }}" + key = "{{ config.tfstates.platform.launchpad.lz_key_name }}" +} \ No newline at end of file diff --git a/templates/platform/level0/launchpad/readme.md b/templates/platform/level0/launchpad/readme.md new file mode 100644 index 000000000..fb887b631 --- /dev/null +++ b/templates/platform/level0/launchpad/readme.md @@ -0,0 +1,117 @@ +# Launchpad - {{ config.caf_terraform.launchpad.caf_environment }} + +## Pre-requisites + +This scenario requires the following privileges: + +| Component | Privileges | +|--------------------|--------------------| +| Active Directory | None | +| Azure subscription | Subscription owner | + +## Deployment + +{% if config.caf_terraform.billing_subscription_role_delegations is defined %} +### Pre-requisite + +Elevate your credentials to the tenant root level to have enough privileges to create the management group hierarchy. + +```bash +{% if config.caf_terraform.billing_subscription_role_delegations.enable %} +# Login to the subscription {{ config.caf_terraform.launchpad.subscription_name }} with the user {{ config.caf_terraform.billing_subscription_role_delegations.azuread_user_ea_account_owner }} +{% else %} +# Login to the subscription {{ config.caf_terraform.launchpad.subscription_name }} with an account owner. +{% endif %} +rover login -t {{ config.platform_identity.tenant_name }} +az rest --method post --url "/providers/Microsoft.Authorization/elevateAccess?api-version=2016-07-01" + +``` +{% endif %} + +### Launchpad + +```bash +{% if config.caf_terraform.billing_subscription_role_delegations is defined %} +{% if config.caf_terraform.billing_subscription_role_delegations.enable %} +# Login to the subscription {{ config.caf_terraform.launchpad.subscription_name }} with the user {{ config.caf_terraform.billing_subscription_role_delegations.azuread_user_ea_account_owner }} +{% else %} +# Login to the subscription {{ config.caf_terraform.launchpad.subscription_name }} with an account owner. +{% endif %} +{% endif %} +rover login -t {{ config.platform_identity.tenant_name }} -s {{ config.caf_terraform.launchpad.subscription_id }} + +cd /tf/caf/landingzones +git fetch origin +git checkout {{ config.gitops.caf_landingzone_branch }} + +rover \ +{% if ((config.platform_identity.azuread_identity_mode != "logged_in_user") and (credentials_tfstate_exists.rc == 0)) %} + --impersonate-sp-from-keyvault-url {{ keyvaults.cred_level0.vault_uri }} \ +{% endif %} + -lz /tf/caf/landingzones/caf_launchpad \ + -var-folder {{ destination_base }}/{{ config.configuration_folders.platform.destination_relative_path }}/{{ level }}/{{ base_folder }} \ + -tfstate_subscription_id {{ config.caf_terraform.launchpad.subscription_id }} \ + -target_subscription {{ config.caf_terraform.launchpad.subscription_id }} \ + -tfstate {{ config.tfstates.platform.launchpad.tfstate }} \ + -log-severity {{ config.gitops.rover_log_error }} \ + -launchpad \ + -env {{ config.caf_terraform.launchpad.caf_environment }} \ + -level {{ level }} \ + -p ${TF_DATA_DIR}/{{ config.tfstates.platform.launchpad.tfstate }}.tfplan \ + -a plan + +``` + +If the plan is not successfull you need to come back to the yaml contoso.caf.platform.yaml, fix the values, re-execute the rover ignite and then rover plan. + + +```bash +# On success plan, execute + +rover \ +{% if ((config.platform_identity.azuread_identity_mode != "logged_in_user") and (credentials_tfstate_exists.rc == 0)) %} + --impersonate-sp-from-keyvault-url {{ keyvaults.cred_level0.vault_uri }} \ +{% endif %} + -lz /tf/caf/landingzones/caf_launchpad \ + -var-folder {{ destination_base }}/{{ config.configuration_folders.platform.destination_relative_path }}/{{ level }}/{{ base_folder }} \ + -tfstate_subscription_id {{ config.caf_terraform.launchpad.subscription_id }} \ + -target_subscription {{ config.caf_terraform.launchpad.subscription_id }} \ + -tfstate {{ config.tfstates.platform.launchpad.tfstate }} \ + -log-severity {{ config.gitops.rover_log_error }} \ + -launchpad \ + -env {{ config.caf_terraform.launchpad.caf_environment }} \ + -level {{ level }} \ + -p ${TF_DATA_DIR}/{{ config.tfstates.platform.launchpad.tfstate }}.tfplan \ + -a apply + +``` + +```bash +# On success, re-execute the rover ignite + +rover ignite \ + --playbook /tf/caf/landingzones/templates/platform/ansible.yaml \ + -e base_templates_folder={{ base_templates_folder }} \ + -e resource_template_folder={{resource_template_folder}} \ + -e config_folder={{ config_folder }} + +``` + +Execute a rover logout and rover login in order to make sure your azure sessions has the Azure groups membership updated. + +```bash +rover logout + +rover login -t {{ config.platform_identity.tenant_name }} + +``` + +# Next steps + +When you have successfully deployed the launchpad you can move to the next step. + +{% if config.platform_identity.azuread_identity_mode == 'service_principal' %} + [Deploy the credentials landing zone](../credentials/readme.md) +{% else %} + [Deploy the management services](../../level1/management/readme.md) +{% endif %} \ No newline at end of file diff --git a/templates/platform/level0/launchpad/role_mappings.tfvars.j2 b/templates/platform/level0/launchpad/role_mappings.tfvars.j2 new file mode 100644 index 000000000..0eca43529 --- /dev/null +++ b/templates/platform/level0/launchpad/role_mappings.tfvars.j2 @@ -0,0 +1,198 @@ + +# +# Services supported: subscriptions, storage accounts and resource groups +# Can assign roles to: AD groups, AD object ID, AD applications, Managed identities +# + +role_mapping = { + built_in_role_mapping = { +{% if config.platform_core_setup %} + management_group = { + root = { + "User Access Administrator" = { +{% if config.platform_identity.azuread_identity_mode == 'logged_in_user' %} + logged_in = { + keys = ["user"] + } +{% endif %} +{% if config.platform_identity.azuread_identity_mode != 'logged_in_user' %} + azuread_groups = { + keys = ["level0"] + } +{% endif %} + } + "Management Group Contributor" = { +{% if config.platform_identity.azuread_identity_mode == 'logged_in_user' %} + logged_in = { + keys = ["user"] + } +{% endif %} +{% if config.platform_identity.azuread_identity_mode != 'logged_in_user' %} + azuread_groups = { + keys = ["eslz", "caf_platform_maintainers"] + } +{% endif %} + } + "Owner" = { +{% if config.platform_identity.azuread_identity_mode == 'logged_in_user' %} + logged_in = { + keys = ["user"] + } +{% endif %} +{% if config.platform_identity.azuread_identity_mode != 'logged_in_user' %} + azuread_groups = { + keys = ["eslz", "caf_platform_maintainers"] + } +{% endif %} + } + } + } +{% endif %} +{% if config.platform_identity.azuread_identity_mode != 'logged_in_user' %} + subscriptions = { + logged_in_subscription = { +{% if config.platform_identity.azuread_identity_mode != 'logged_in_user' %} + "Owner" = { + azuread_groups = { + keys = ["level0", "caf_platform_maintainers", "subscription_creation_platform"] + } + } +{% endif %} +{% if config.platform_identity.azuread_identity_mode != 'logged_in_user' %} + "Reader" = { + azuread_groups = { + keys = ["identity"] + } + } +{% endif %} + } + } +{% endif %} + +{% if config.platform_identity.azuread_identity_mode != 'logged_in_user' %} + resource_groups = { + level0 = { + "Reader" = { + azuread_groups = { + keys = [ + "identity", + "subscription_creation_platform" + ] + } + } + } + level1 = { + "Reader" = { + azuread_groups = { + keys = [ + "identity", + "management", + "eslz", + "subscription_creation_platform" + ] + } + } + } + level2 = { + "Reader" = { + azuread_groups = { + keys = [ + "identity", + "connectivity", + "management", + "subscription_creation_landingzones" + ] + } + } + } + } +{% endif %} + + storage_accounts = { + level0 = { + "Storage Blob Data Contributor" = { + logged_in = { + keys = ["user"] + } +{% if config.platform_identity.azuread_identity_mode != 'logged_in_user' %} + azuread_groups = { + keys = ["level0", "caf_platform_maintainers", "identity"] + } +{% endif %} + } +{% if config.platform_identity.azuread_identity_mode != 'logged_in_user' %} + "Storage Blob Data Reader" = { + azuread_groups = { + keys = [ + "management", + "eslz", + "subscription_creation_platform" + ] + } + } +{% endif %} + } + + level1 = { + "Storage Blob Data Contributor" = { + logged_in = { + keys = ["user"] + } +{% if config.platform_identity.azuread_identity_mode != 'logged_in_user' %} + azuread_groups = { + keys = [ + "caf_platform_maintainers", + "identity", + "management", + "eslz", + "subscription_creation_platform" + ] + } +{% endif %} + } +{% if config.platform_identity.azuread_identity_mode != 'logged_in_user' %} + "Storage Blob Data Reader" = { + azuread_groups = { + keys = [ + "connectivity", +{% if config.platform_core_setup.enterprise_scale.enable_azure_subscription_vending_machine %} + "level0" +{% endif %} + ] + } + } +{% endif %} + } + + level2 = { + "Storage Blob Data Contributor" = { + logged_in = { + keys = ["user"] + } +{% if config.platform_identity.azuread_identity_mode != 'logged_in_user' %} + azuread_groups = { + keys = [ + "identity", + "connectivity", + "management", + "caf_platform_maintainers", +{% if config.platform_core_setup.enterprise_scale.enable_azure_subscription_vending_machine %} + "level0" +{% endif %} + ] + } + } +{% endif %} + "Storage Blob Data Reader" = { + azuread_groups = { + keys = [ +{% if config.platform_core_setup.enterprise_scale.enable_azure_subscription_vending_machine %} + "subscription_creation_landingzones" +{% endif %} + ] + } + } + } + } + } +} diff --git a/templates/platform/level0/launchpad/service_principals.tfvars.j2 b/templates/platform/level0/launchpad/service_principals.tfvars.j2 new file mode 100644 index 000000000..75e0ef5d2 --- /dev/null +++ b/templates/platform/level0/launchpad/service_principals.tfvars.j2 @@ -0,0 +1,44 @@ +azuread_service_principals = { + # Manage the deployment of the level0 + level0 = { + azuread_application = { + key = "level0" + } + } + # Manage the deployment of Enterprise Scale + eslz = { + azuread_application = { + key = "eslz" + } + } + # Manage the deployment of the connectivity services + connectivity = { + azuread_application = { + key = "connectivity" + } + } + # Manage the deployment of the shared services + management = { + azuread_application = { + key = "management" + } + } + # Manage the deployment of the identity services + identity = { + azuread_application = { + key = "identity" + } + } + # Has delegation to create platform subscriptions + subscription_creation_platform = { + azuread_application = { + key = "subscription_creation_platform" + } + } + # Has delegation to create landingzone subscriptions + subscription_creation_landingzones = { + azuread_application = { + key = "subscription_creation_landingzones" + } + } +} \ No newline at end of file diff --git a/templates/platform/level0/launchpad/storage_accounts.tfvars.j2 b/templates/platform/level0/launchpad/storage_accounts.tfvars.j2 new file mode 100644 index 000000000..8aa17568c --- /dev/null +++ b/templates/platform/level0/launchpad/storage_accounts.tfvars.j2 @@ -0,0 +1,90 @@ + +storage_accounts = { + level0 = { + name = "{{ resources.subscriptions[subscription_key].storage_accounts.level0.name }}" + resource_group_key = "{{ resources.subscriptions[subscription_key].storage_accounts.level0.resource_group_key }}" + account_kind = "BlobStorage" + account_tier = "Standard" + shared_access_key_enabled = false + account_replication_type = "{{ config.caf_terraform.launchpad.account_replication_type }}" + + tags = { + ## Those tags must never be changed after being set as they are used by the rover to locate the launchpad and the tfstates. + # Only adjust the environment value at creation time + caf_environment = "{{ config.caf_terraform.launchpad.caf_environment }}" + caf_launchpad = "launchpad" + caf_tfstate = "level0" + ## + } + + blob_properties = { + versioning_enabled = {{ config.caf_terraform.launchpad.blob_versioning_enabled | string | lower | default('true') }} + container_delete_retention_policy = {{ config.caf_terraform.launchpad.container_delete_retention_policy | default(7) }} + delete_retention_policy = {{ config.caf_terraform.launchpad.delete_retention_policy | default(7) }} + } + + containers = { + {{ config.tfstates.platform.launchpad.workspace | default('tfstate') }} = { + name = "{{ config.tfstates.platform.launchpad.workspace | default('tfstate') }}" + } + } + } + + level1 = { + name = "{{ resources.subscriptions[subscription_key].storage_accounts.level1.name }}" + resource_group_key = "{{ resources.subscriptions[subscription_key].storage_accounts.level1.resource_group_key }}" + account_kind = "BlobStorage" + account_tier = "Standard" + shared_access_key_enabled = false + account_replication_type = "{{ config.caf_terraform.launchpad.account_replication_type }}" + + tags = { + # Those tags must never be changed while set as they are used by the rover to locate the launchpad and the tfstates. + caf_environment = "{{ config.caf_terraform.launchpad.caf_environment }}" + caf_launchpad = "launchpad" + caf_tfstate = "level1" + } + + blob_properties = { + versioning_enabled = {{ config.caf_terraform.launchpad.blob_versioning_enabled | string | lower | default('true') }} + container_delete_retention_policy = {{ config.caf_terraform.launchpad.container_delete_retention_policy | default(7) }} + delete_retention_policy = {{ config.caf_terraform.launchpad.delete_retention_policy | default(7) }} + } + + containers = { + {{ config.tfstates.platform.launchpad.workspace | default('tfstate') }} = { + name = "{{ config.tfstates.platform.launchpad.workspace | default('tfstate') }}" + } + } + } + + level2 = { + name = "{{ resources.subscriptions[subscription_key].storage_accounts.level2.name }}" + resource_group_key = "{{ resources.subscriptions[subscription_key].storage_accounts.level2.resource_group_key }}" + account_kind = "BlobStorage" + account_tier = "Standard" + shared_access_key_enabled = false + account_replication_type = "{{ config.caf_terraform.launchpad.account_replication_type }}" + + tags = { + # Those tags must never be changed while set as they are used by the rover to locate the launchpad and the tfstates. + caf_environment = "{{ config.caf_terraform.launchpad.caf_environment }}" + caf_launchpad = "launchpad" + caf_tfstate = "level2" + } + + blob_properties = { + versioning_enabled = {{ config.caf_terraform.launchpad.blob_versioning_enabled | string | lower | default('true') }} + container_delete_retention_policy = {{ config.caf_terraform.launchpad.container_delete_retention_policy | default(7) }} + delete_retention_policy = {{ config.caf_terraform.launchpad.delete_retention_policy | default(7) }} + } + + containers = { + {{ config.tfstates.platform.launchpad.workspace | default('tfstate') }} = { + name = "{{ config.tfstates.platform.launchpad.workspace | default('tfstate') }}" + } + } + } + + +} \ No newline at end of file diff --git a/templates/platform/level1/README.md b/templates/platform/level1/README.md new file mode 100644 index 000000000..bde936c5d --- /dev/null +++ b/templates/platform/level1/README.md @@ -0,0 +1,3 @@ +# Cloud Adoption Framework landing zones for Terraform - Starter template + +Place here your production environment configuration files. \ No newline at end of file diff --git a/templates/platform/level1/eslz/ansible.yaml b/templates/platform/level1/eslz/ansible.yaml new file mode 100644 index 000000000..583a6ed3e --- /dev/null +++ b/templates/platform/level1/eslz/ansible.yaml @@ -0,0 +1,58 @@ +- name: "{{ level }}-{{ base_folder }} | Clean-up base directory" + shell: | + rm -rf "{{ destination_base }}/{{ config.configuration_folders.platform.destination_relative_path }}/{{ level }}/{{ base_folder }}" + when: + - config.platform_core_setup.enterprise_scale.enable + - config.platform_core_setup.enterprise_scale.clean_up_destination_folder + +- name: "{{ level }}-{{ base_folder }} | Creates directory structure" + shell: mkdir -p "{{ destination_base }}/{{ config.configuration_folders.platform.destination_relative_path }}/{{ level }}/{{ base_folder }}/lib/{{ item.path }}" + with_filetree: "{{ level }}/{{ base_folder }}/lib/{{ config.platform_core_setup.enterprise_scale.private_lib.version_to_deploy }}" + when: item.state == 'directory' + +- name: "{{ level }}-{{ base_folder }} | Tfvars" + ansible.builtin.template: + src: "{{ item }}" + dest: "{{ destination_base }}/{{ config.configuration_folders.platform.destination_relative_path }}/{{ level }}/{{ base_folder }}/{{ item | basename | regex_replace('.j2$', '') }}" + force: yes + with_fileglob: + - "{{ level }}/{{ base_folder }}/*.j2" + - "{{ level }}/{{ base_folder }}/*.md" + +- name: "{{ level }}-{{ base_folder }} | Lib - archetypes - built-in" + ansible.builtin.template: + src: "{{ base_templates_folder }}/{{ level }}/eslz/lib/{{ config.platform_core_setup.enterprise_scale.private_lib.version_to_deploy }}/archetype_definitions/archetype_definition_template.json.j2" + dest: "{{ destination_base }}/{{ config.configuration_folders.platform.destination_relative_path }}/{{ level }}/{{ base_folder }}/lib/archetype_definitions/archetype_definition_{{ mg.archetype_definitions[item].archetype_id }}.json" + force: yes + loop: "{{ mg.archetype_definitions.keys() }}" + loop_control: + loop_var: item + +- name: "{{ level }}-{{ base_folder }} | Lib - archetypes - custom" + when: + - mg_custom.archetype_definitions is defined + ansible.builtin.template: + src: "{{ base_templates_folder }}/{{ level }}/eslz/lib/{{ config.platform_core_setup.enterprise_scale.private_lib.version_to_deploy }}/archetype_definitions/custom_landing_zone_template.json.j2" + dest: "{{ destination_base }}/{{ config.configuration_folders.platform.destination_relative_path }}/{{ level }}/{{ base_folder }}/lib/archetype_definitions/archetype_definition_{{ mg_custom.archetype_definitions[item].archetype_id }}.json" + force: yes + loop: "{{ mg_custom.archetype_definitions.keys() }}" + loop_control: + loop_var: item + +- name: "{{ level }}-{{ base_folder }} | Lib" + ansible.builtin.template: + src: "{{ item.src }}" + dest: "{{ destination_base }}/{{ config.configuration_folders.platform.destination_relative_path }}/{{ level }}/{{ base_folder }}/lib/{{ item.path }}" + force: yes + with_filetree: "{{ config_folder }}/eslz/lib" + when: item.state == 'file' and config.platform_core_setup.enterprise_scale.update_lib_folder + +- name: "{{ level }}-{{ base_folder }} | overrides" + when: + - mg_custom.archetype_definitions is defined + ansible.builtin.template: + src: "{{ item }}" + dest: "{{ destination_base }}/{{ config.configuration_folders.platform.destination_relative_path }}/{{ level }}/{{ base_folder }}/{{ item | basename | regex_replace('.j2$', '') }}" + force: yes + with_fileglob: + - "{{ level }}/{{ base_folder }}/lib/{{ config.platform_core_setup.enterprise_scale.private_lib.version_to_deploy }}/*.tfvars.j2" diff --git a/templates/platform/level1/eslz/configuration.tfvars.j2 b/templates/platform/level1/eslz/configuration.tfvars.j2 new file mode 100644 index 000000000..87f255e2d --- /dev/null +++ b/templates/platform/level1/eslz/configuration.tfvars.j2 @@ -0,0 +1,28 @@ +landingzone = { + backend_type = "{{ caf_terraform.launchpad.backend_type | default("azurerm")}}" + global_settings_key = "{{ config.tfstates.platform.launchpad.lz_key_name }}" + level = "level1" + key = "{{ config.tfstates.platform.eslz.lz_key_name }}" + tfstates = { + {{ config.tfstates.platform.launchpad.lz_key_name }} = { + level = "lower" + tfstate = "{{ config.tfstates.platform.launchpad.tfstate }}" + } + {{ config.tfstates.platform.management.lz_key_name }} = { + level = "current" + tfstate = "{{ config.tfstates.platform.management.tfstate }}" + } +{% if config.platform_identity.azuread_identity_mode != 'logged_in_user' %} + {{ config.tfstates.platform.platform_subscriptions.lz_key_name }} = { + level = "current" + tfstate = "{{ config.tfstates.platform.platform_subscriptions.tfstate }}" + } +{% endif %} +{% if config.platform_identity.azuread_identity_mode != 'logged_in_user' %} + {{ config.tfstates.platform.identity.lz_key_name }} = { + level = "current" + tfstate = "{{ config.tfstates.platform.identity.tfstate }}" + } +{% endif %} + } +} diff --git a/templates/platform/level1/eslz/enterprise_scale.tfvars.j2 b/templates/platform/level1/eslz/enterprise_scale.tfvars.j2 new file mode 100644 index 000000000..4c56e4470 --- /dev/null +++ b/templates/platform/level1/eslz/enterprise_scale.tfvars.j2 @@ -0,0 +1,7 @@ +library_path = "../../../../{{ config.configuration_folders.platform.destination_relative_path }}/{{ level }}/{{ base_folder }}/lib" +root_id = "{{ config.platform_core_setup.enterprise_scale.management_group_prefix }}" +root_name = "{{ config.platform_core_setup.enterprise_scale.management_group_name }}" +deploy_core_landing_zones = {{ config.platform_core_setup.enterprise_scale.deploy_core_landing_zones | string | lower }} +{% if (config.platform_core_setup.enterprise_scale.enable_azure_subscription_vending_machine | default(false)) and config.platform_identity.azuread_identity_mode != 'logged_in_user' %} +reconcile_vending_subscriptions = true +{% endif %} \ No newline at end of file diff --git a/templates/platform/level1/eslz/lib/v0.1.x/archetype_config_overrides.tfvars.j2 b/templates/platform/level1/eslz/lib/v0.1.x/archetype_config_overrides.tfvars.j2 new file mode 100644 index 000000000..c731806ec --- /dev/null +++ b/templates/platform/level1/eslz/lib/v0.1.x/archetype_config_overrides.tfvars.j2 @@ -0,0 +1,97 @@ +archetype_config_overrides = { + + root = { + archetype_id = "root" + parameters = { + "Deny-Resource-Locations" = { + "listOfAllowedLocations" = { + value = [ + "{{ config.caf_terraform.launchpad.regions.region1.name }}", + "{{ config.caf_terraform.launchpad.regions.region2.name }}" + ] + } + } + "Deny-RSG-Locations" = { + "listOfAllowedLocations" = { + value = [ + "{{ config.caf_terraform.launchpad.regions.region1.name }}", + "{{ config.caf_terraform.launchpad.regions.region2.name }}" + ] + } + } + "Deploy-Resource-Diag" = { + "logAnalytics" = { + lz_key = "{{ config.tfstates.platform.management.lz_key_name }}" + output_key = "diagnostics" + resource_type = "log_analytics" + resource_key = "central_logs_region1" + attribute_key = "id" + } + "profileName" = { + value = "eslz-diagnostic-log" + } + } + "Deploy-VM-Monitoring" = { + "logAnalytics" = { + lz_key = "{{ config.tfstates.platform.management.lz_key_name }}" + output_key = "diagnostics" + resource_type = "log_analytics" + resource_key = "central_logs_region1" + attribute_key = "id" + } + } + "Deploy-VMSS-Monitoring" = { + "logAnalytics" = { + lz_key = "{{ config.tfstates.platform.management.lz_key_name }}" + output_key = "diagnostics" + resource_type = "log_analytics" + resource_key = "central_logs_region1" + attribute_key = "id" + } + } + } + access_control = {} + } //root + + landing-zones = { + archetype_id = "landingzone" + parameters = {} + access_control = {} + } + + platform = { + archetype_id = "platform" + parameters = {} + access_control = {} + } + + connectivity = { + archetype_id = "platform_connectivity" + parameters = {} + access_control = {} + } + + identity = { + archetype_id = "platform_identity" + parameters = {} + access_control = {} + } + + management = { + archetype_id = "platform_management" + parameters = {} + access_control = {} + } + + decommissioned = { + archetype_id = "es_decommissioned" + parameters = {} + access_control = {} + } + + sandboxes = { + archetype_id = "es_sandboxes" + parameters = {} + access_control = {} + } +} diff --git a/templates/platform/level1/eslz/lib/v0.1.x/archetype_definitions/README.md b/templates/platform/level1/eslz/lib/v0.1.x/archetype_definitions/README.md new file mode 100644 index 000000000..cb8923540 --- /dev/null +++ b/templates/platform/level1/eslz/lib/v0.1.x/archetype_definitions/README.md @@ -0,0 +1,10 @@ + +# Public documentation of the custom landingzones + +https://github.com/Azure/terraform-azurerm-caf-enterprise-scale/wiki/%5BUser-Guide%5D-Archetype-Definitions + +https://github.com/Azure/terraform-azurerm-caf-enterprise-scale/wiki/%5BExamples%5D-Deploy-Custom-Landing-Zone-Archetypes + +# List of the default archetypes + +https://github.com/Azure/terraform-azurerm-caf-enterprise-scale/tree/main/modules/archetypes/lib/archetype_definitions diff --git a/templates/platform/level1/eslz/lib/v0.1.x/archetype_definitions/archetype_definition_landingzone.json b/templates/platform/level1/eslz/lib/v0.1.x/archetype_definitions/archetype_definition_landingzone.json new file mode 100644 index 000000000..bb3475aa6 --- /dev/null +++ b/templates/platform/level1/eslz/lib/v0.1.x/archetype_definitions/archetype_definition_landingzone.json @@ -0,0 +1,16 @@ +{ + "landingzone": { + "policy_assignments": [ + ], + "policy_definitions": [ + ], + "policy_set_definitions": [ + ], + "role_definitions": [ + ], + "archetype_config": { + "parameters": {}, + "access_control": {} + } + } +} diff --git a/templates/platform/level1/eslz/lib/v0.1.x/archetype_definitions/archetype_definition_landingzone_corp.json b/templates/platform/level1/eslz/lib/v0.1.x/archetype_definitions/archetype_definition_landingzone_corp.json new file mode 100644 index 000000000..f4b455b07 --- /dev/null +++ b/templates/platform/level1/eslz/lib/v0.1.x/archetype_definitions/archetype_definition_landingzone_corp.json @@ -0,0 +1,16 @@ +{ + "landingzone_corp": { + "policy_assignments": [ + ], + "policy_definitions": [ + ], + "policy_set_definitions": [ + ], + "role_definitions": [ + ], + "archetype_config": { + "parameters": {}, + "access_control": {} + } + } +} diff --git a/templates/platform/level1/eslz/lib/v0.1.x/archetype_definitions/archetype_definition_landingzone_online.json b/templates/platform/level1/eslz/lib/v0.1.x/archetype_definitions/archetype_definition_landingzone_online.json new file mode 100644 index 000000000..21b00cfae --- /dev/null +++ b/templates/platform/level1/eslz/lib/v0.1.x/archetype_definitions/archetype_definition_landingzone_online.json @@ -0,0 +1,29 @@ +{ + "landingzone_online": { + "policy_assignments": [ + "Deploy-ASC-Defender" + ], + "policy_definitions": [ + ], + "policy_set_definitions": [ + ], + "role_definitions": [ + ], + "archetype_config": { + "parameters": { + "Deploy-ASC-Defender": { + "pricingTierAppServices": "Free", + "pricingTierVMs": "Free", + "pricingTierSqlServers": "Standard", + "pricingTierStorageAccounts": "Standard", + "pricingTierContainerRegistry": "Free", + "pricingTierKeyVaults": "Standard", + "pricingTierKubernetesService": "Free", + "pricingTierDns": "Free", + "pricingTierArm": "Free" + } + }, + "access_control": {} + } + } +} diff --git a/templates/platform/level1/eslz/lib/v0.1.x/archetype_definitions/archetype_definition_platform.json b/templates/platform/level1/eslz/lib/v0.1.x/archetype_definitions/archetype_definition_platform.json new file mode 100644 index 000000000..4bbaffea4 --- /dev/null +++ b/templates/platform/level1/eslz/lib/v0.1.x/archetype_definitions/archetype_definition_platform.json @@ -0,0 +1,29 @@ +{ + "platform": { + "policy_assignments": [ + "Deploy-ASC-Defender" + ], + "policy_definitions": [ + ], + "policy_set_definitions": [ + ], + "role_definitions": [ + ], + "archetype_config": { + "parameters": { + "Deploy-ASC-Defender": { + "pricingTierAppServices": "Free", + "pricingTierVMs": "Free", + "pricingTierSqlServers": "Standard", + "pricingTierStorageAccounts": "Standard", + "pricingTierContainerRegistry": "Free", + "pricingTierKeyVaults": "Standard", + "pricingTierKubernetesService": "Free", + "pricingTierDns": "Free", + "pricingTierArm": "Free" + } + }, + "access_control": {} + } + } +} diff --git a/templates/platform/level1/eslz/lib/v0.1.x/archetype_definitions/archetype_definition_platform_connectivity.json b/templates/platform/level1/eslz/lib/v0.1.x/archetype_definitions/archetype_definition_platform_connectivity.json new file mode 100644 index 000000000..246dd5c5a --- /dev/null +++ b/templates/platform/level1/eslz/lib/v0.1.x/archetype_definitions/archetype_definition_platform_connectivity.json @@ -0,0 +1,16 @@ +{ + "platform_connectivity": { + "policy_assignments": [ + ], + "policy_definitions": [ + ], + "policy_set_definitions": [ + ], + "role_definitions": [ + ], + "archetype_config": { + "parameters": {}, + "access_control": {} + } + } +} diff --git a/templates/platform/level1/eslz/lib/v0.1.x/archetype_definitions/archetype_definition_platform_identity.json b/templates/platform/level1/eslz/lib/v0.1.x/archetype_definitions/archetype_definition_platform_identity.json new file mode 100644 index 000000000..1a068e388 --- /dev/null +++ b/templates/platform/level1/eslz/lib/v0.1.x/archetype_definitions/archetype_definition_platform_identity.json @@ -0,0 +1,16 @@ +{ + "platform_identity": { + "policy_assignments": [ + ], + "policy_definitions": [ + ], + "policy_set_definitions": [ + ], + "role_definitions": [ + ], + "archetype_config": { + "parameters": {}, + "access_control": {} + } + } +} diff --git a/templates/platform/level1/eslz/lib/v0.1.x/archetype_definitions/archetype_definition_platform_management.json b/templates/platform/level1/eslz/lib/v0.1.x/archetype_definitions/archetype_definition_platform_management.json new file mode 100644 index 000000000..6fa99aa2b --- /dev/null +++ b/templates/platform/level1/eslz/lib/v0.1.x/archetype_definitions/archetype_definition_platform_management.json @@ -0,0 +1,16 @@ +{ + "platform_management": { + "policy_assignments": [ + ], + "policy_definitions": [ + ], + "policy_set_definitions": [ + ], + "role_definitions": [ + ], + "archetype_config": { + "parameters": {}, + "access_control": {} + } + } +} diff --git a/templates/platform/level1/eslz/lib/v0.1.x/archetype_definitions/archetype_definition_root.json b/templates/platform/level1/eslz/lib/v0.1.x/archetype_definitions/archetype_definition_root.json new file mode 100644 index 000000000..9f1f2061d --- /dev/null +++ b/templates/platform/level1/eslz/lib/v0.1.x/archetype_definitions/archetype_definition_root.json @@ -0,0 +1,121 @@ +{ + "root": { + "policy_assignments": [ + "Deploy-Resource-Diag", + "Deny-Resource-Locations", + "Deny-RSG-Locations" + ], + "policy_definitions": [ + "Append-KV-SoftDelete", + "Deny-AA-child-resources", + "Deny-AppGW-Without-WAF", + "Deny-Private-DNS-Zones", + "Deny-PublicEndpoint-Aks", + "Deny-PublicEndpoint-CosmosDB", + "Deny-PublicEndpoint-KeyVault", + "Deny-PublicEndpoint-MariaDB", + "Deny-PublicEndpoint-MySQL", + "Deny-PublicEndpoint-PostgreSql", + "Deny-PublicEndpoint-Sql", + "Deny-PublicEndpoint-Storage", + "Deny-PublicIP", + "Deny-RDP-From-Internet", + "Deny-Subnet-Without-Nsg", + "Deny-Subnet-Without-Udr", + "Deny-VNET-Peer-Cross-Sub", + "Deny-VNet-Peering", + "Deploy-ASC-Standard", + "Deploy-Budget", + "Deploy-DDoSProtection", + "Deploy-Diagnostics-AA", + "Deploy-Diagnostics-ACI", + "Deploy-Diagnostics-ACR", + "Deploy-Diagnostics-ActivityLog", + "Deploy-Diagnostics-AKS", + "Deploy-Diagnostics-AnalysisService", + "Deploy-Diagnostics-ApiForFHIR", + "Deploy-Diagnostics-APIMgmt", + "Deploy-Diagnostics-ApplicationGateway", + "Deploy-Diagnostics-Batch", + "Deploy-Diagnostics-CDNEndpoints", + "Deploy-Diagnostics-CognitiveServices", + "Deploy-Diagnostics-CosmosDB", + "Deploy-Diagnostics-Databricks", + "Deploy-Diagnostics-DataExplorerCluster", + "Deploy-Diagnostics-DataFactory", + "Deploy-Diagnostics-DataLakeStore", + "Deploy-Diagnostics-DLAnalytics", + "Deploy-Diagnostics-EventGridSub", + "Deploy-Diagnostics-EventGridSystemTopic", + "Deploy-Diagnostics-EventGridTopic", + "Deploy-Diagnostics-EventHub", + "Deploy-Diagnostics-ExpressRoute", + "Deploy-Diagnostics-Firewall", + "Deploy-Diagnostics-FrontDoor", + "Deploy-Diagnostics-Function", + "Deploy-Diagnostics-HDInsight", + "Deploy-Diagnostics-iotHub", + "Deploy-Diagnostics-KeyVault", + "Deploy-Diagnostics-LoadBalancer", + "Deploy-Diagnostics-LogicAppsISE", + "Deploy-Diagnostics-LogicAppsWF", + "Deploy-Diagnostics-MariaDB", + "Deploy-Diagnostics-MediaService", + "Deploy-Diagnostics-MlWorkspace", + "Deploy-Diagnostics-MySQL", + "Deploy-Diagnostics-NetworkSecurityGroups", + "Deploy-Diagnostics-NIC", + "Deploy-Diagnostics-PostgreSQL", + "Deploy-Diagnostics-PowerBIEmbedded", + "Deploy-Diagnostics-PublicIP", + "Deploy-Diagnostics-RecoveryVault", + "Deploy-Diagnostics-RedisCache", + "Deploy-Diagnostics-Relay", + "Deploy-Diagnostics-SearchServices", + "Deploy-Diagnostics-ServiceBus", + "Deploy-Diagnostics-SignalR", + "Deploy-Diagnostics-SQLDBs", + "Deploy-Diagnostics-SQLElasticPools", + "Deploy-Diagnostics-SQLMI", + "Deploy-Diagnostics-StreamAnalytics", + "Deploy-Diagnostics-TimeSeriesInsights", + "Deploy-Diagnostics-TrafficManager", + "Deploy-Diagnostics-VirtualNetwork", + "Deploy-Diagnostics-VM", + "Deploy-Diagnostics-VMSS", + "Deploy-Diagnostics-VNetGW", + "Deploy-Diagnostics-WebServerFarm", + "Deploy-Diagnostics-Website", + "Deploy-Diagnostics-WVDAppGroup", + "Deploy-Diagnostics-WVDHostPools", + "Deploy-Diagnostics-WVDWorkspace", + "Deploy-DNSZoneGroup-For-Blob-PrivateEndpoint", + "Deploy-DNSZoneGroup-For-File-PrivateEndpoint", + "Deploy-DNSZoneGroup-For-KeyVault-PrivateEndpoint", + "Deploy-DNSZoneGroup-For-Queue-PrivateEndpoint", + "Deploy-DNSZoneGroup-For-Sql-PrivateEndpoint", + "Deploy-DNSZoneGroup-For-Table-PrivateEndpoint", + "Deploy-FirewallPolicy", + "Deploy-LA-Config", + "Deploy-Log-Analytics", + "Deploy-Nsg-FlowLogs-to-LA", + "Deploy-Sql-AuditingSettings", + "Deploy-Sql-SecurityAlertPolicies", + "Deploy-Sql-Tde", + "Deploy-Sql-vulnerabilityAssessments", + "Deploy-Windows-DomainJoin" + ], + "policy_set_definitions": [ + "Deny-PublicEndpoints", + "Deploy-Diag-LogAnalytics", + "Deploy-Sql-Security" + ], + "role_definitions": [ + ], + "archetype_config": { + "parameters": {}, + "access_control": { + } + } + } +} diff --git a/templates/platform/level1/eslz/lib/v0.1.x/custom_landing_zones.tfvars.j2 b/templates/platform/level1/eslz/lib/v0.1.x/custom_landing_zones.tfvars.j2 new file mode 100644 index 000000000..75d3f2d03 --- /dev/null +++ b/templates/platform/level1/eslz/lib/v0.1.x/custom_landing_zones.tfvars.j2 @@ -0,0 +1,24 @@ +custom_landing_zones = { + {{ config.eslz.root_id }}-corp = { + display_name = "Corp" + parent_management_group_id = "{{ config.eslz.root_id }}-landing-zones" + archetype_config = { + archetype_id = "landingzone_corp" + parameters = {} + access_control = {} + } + subscriptions = {} + subscription_ids = [] + } + {{ config.eslz.root_id }}-online = { + display_name = "Online" + parent_management_group_id = "{{ config.eslz.root_id }}-landing-zones" + archetype_config = { + archetype_id = "landingzone_online" + parameters = {} + access_control = {} + } + subscriptions = {} + subscription_ids = [] + } +} \ No newline at end of file diff --git a/templates/platform/level1/eslz/lib/v0.1.x/policy_assignments/README.md b/templates/platform/level1/eslz/lib/v0.1.x/policy_assignments/README.md new file mode 100644 index 000000000..def2a5a6d --- /dev/null +++ b/templates/platform/level1/eslz/lib/v0.1.x/policy_assignments/README.md @@ -0,0 +1,10 @@ + +# Public documentation of the custom landingzones + +https://github.com/Azure/terraform-azurerm-caf-enterprise-scale/wiki/%5BUser-Guide%5D-Archetype-Definitions + +https://github.com/Azure/terraform-azurerm-caf-enterprise-scale/wiki/%5BExamples%5D-Deploy-Custom-Landing-Zone-Archetypes + +# List of the default policy assignments + +https://github.com/Azure/terraform-azurerm-caf-enterprise-scale/tree/main/modules/archetypes/lib/policy_assignments diff --git a/templates/platform/level1/eslz/lib/v0.1.x/policy_assignments/policy_assignment_pru_apply_security_benchmark.tmpl.json b/templates/platform/level1/eslz/lib/v0.1.x/policy_assignments/policy_assignment_pru_apply_security_benchmark.tmpl.json new file mode 100644 index 000000000..d7f58f627 --- /dev/null +++ b/templates/platform/level1/eslz/lib/v0.1.x/policy_assignments/policy_assignment_pru_apply_security_benchmark.tmpl.json @@ -0,0 +1,18 @@ +{ + "name": "CAF-Security-Benchmark", + "type": "Microsoft.Authorization/policyAssignments", + "apiVersion": "2019-09-01", + "properties": { + "description": "The Azure Security Benchmark initiative represents the policies and controls implementing security recommendations defined in Azure Security Benchmark v2, see https://aka.ms/azsecbm. This also serves as the Azure Security Center default policy initiative. You can directly assign this initiative, or manage its policies and compliance results within Azure Security Center.", + "displayName": "Azure Security BenchMark", + "notScopes": [], + "parameters": {}, + "policyDefinitionId": "/providers/Microsoft.Authorization/policySetDefinitions/1f3afdf9-d0c9-4c3d-847f-89da613e70a8", + "scope": "${current_scope_resource_id}", + "enforcementMode": true + }, + "location": "${default_location}", + "identity": { + "type": "None" + } +} diff --git a/templates/platform/level1/eslz/lib/v0.1.x/policy_definitions/README.md b/templates/platform/level1/eslz/lib/v0.1.x/policy_definitions/README.md new file mode 100644 index 000000000..e47f922fd --- /dev/null +++ b/templates/platform/level1/eslz/lib/v0.1.x/policy_definitions/README.md @@ -0,0 +1,10 @@ + +# Public documentation of the custom landingzones + +https://github.com/Azure/terraform-azurerm-caf-enterprise-scale/wiki/%5BUser-Guide%5D-Archetype-Definitions + +https://github.com/Azure/terraform-azurerm-caf-enterprise-scale/wiki/%5BExamples%5D-Deploy-Custom-Landing-Zone-Archetypes + +# List of the default policy definitions + +https://github.com/Azure/terraform-azurerm-caf-enterprise-scale/tree/main/modules/archetypes/lib/policy_definitions diff --git a/templates/platform/level1/eslz/lib/v0.1.x/policy_set_definitions/README.md b/templates/platform/level1/eslz/lib/v0.1.x/policy_set_definitions/README.md new file mode 100644 index 000000000..c09d2c016 --- /dev/null +++ b/templates/platform/level1/eslz/lib/v0.1.x/policy_set_definitions/README.md @@ -0,0 +1,10 @@ + +# Public documentation of the custom landingzones + +https://github.com/Azure/terraform-azurerm-caf-enterprise-scale/wiki/%5BUser-Guide%5D-Archetype-Definitions + +https://github.com/Azure/terraform-azurerm-caf-enterprise-scale/wiki/%5BExamples%5D-Deploy-Custom-Landing-Zone-Archetypes + +# List of the default policy set definitions + +https://github.com/Azure/terraform-azurerm-caf-enterprise-scale/tree/main/modules/archetypes/lib/policy_set_definitions diff --git a/templates/platform/level1/eslz/lib/v0.1.x/role_definitions/README.md b/templates/platform/level1/eslz/lib/v0.1.x/role_definitions/README.md new file mode 100644 index 000000000..2230928aa --- /dev/null +++ b/templates/platform/level1/eslz/lib/v0.1.x/role_definitions/README.md @@ -0,0 +1,11 @@ + +# Public documentation of the custom landingzones + +https://github.com/Azure/terraform-azurerm-caf-enterprise-scale/wiki/%5BUser-Guide%5D-Archetype-Definitions + +https://github.com/Azure/terraform-azurerm-caf-enterprise-scale/wiki/%5BExamples%5D-Deploy-Custom-Landing-Zone-Archetypes + + +# List of the default role defitions + +https://github.com/Azure/terraform-azurerm-caf-enterprise-scale/tree/main/modules/archetypes/lib/role_definitions diff --git a/templates/platform/level1/eslz/lib/v0.3.3/archetype_config_overrides.old b/templates/platform/level1/eslz/lib/v0.3.3/archetype_config_overrides.old new file mode 100644 index 000000000..1875a1bb2 --- /dev/null +++ b/templates/platform/level1/eslz/lib/v0.3.3/archetype_config_overrides.old @@ -0,0 +1,208 @@ +archetype_config_overrides = { + + root = { + archetype_id = "root" + parameters = { + "Allowed-Locations" = { + "listOfAllowedLocations" = { + values = [ +{% for key in config.caf_terraform.launchpad.regions.keys() %} + "{{ config.caf_terraform.launchpad.regions[key].name }}", +{% endfor %} + ] + } + } + "Deny-RSG-Locations" = { + "listOfAllowedLocations" = { + values = [ +{% for key in config.caf_terraform.launchpad.regions.keys() %} + "{{ config.caf_terraform.launchpad.regions[key].name }}", +{% endfor %} + ] + } + } + "Deploy-Resource-Diag" = { + "logAnalytics" = { + lz_key = "{{ config.tfstates.platform.management.lz_key_name }}" + output_key = "diagnostics" + resource_type = "log_analytics" + resource_key = "central_logs_{{config.caf_terraform.launchpad.regions[config.caf_terraform.launchpad.default_region_key].slug}}" + attribute_key = "id" + } + "profileName" = { + value = "eslz-diagnostic-log" + } + } + "Deploy-AzActivity-Log" = { + "logAnalytics" = { + lz_key = "{{ config.tfstates.platform.management.lz_key_name }}" + output_key = "diagnostics" + resource_type = "log_analytics" + resource_key = "central_logs_{{config.caf_terraform.launchpad.regions[config.caf_terraform.launchpad.default_region_key].slug}}" + attribute_key = "id" + } + } +{% if "VM" in config.platform_management.enable_monitoring %} + "Deploy-VM-Monitoring" = { + "logAnalytics_1" = { + lz_key = "{{ config.tfstates.platform.management.lz_key_name }}" + output_key = "diagnostics" + resource_type = "log_analytics" + resource_key = "central_logs_{{config.caf_terraform.launchpad.regions[config.caf_terraform.launchpad.default_region_key].slug}}" + attribute_key = "id" + } + } +{% endif %} +{% if "VMSS" in config.platform_management.enable_monitoring %} + "Deploy-VMSS-Monitoring" = { + "logAnalytics_1" = { + lz_key = "{{ config.tfstates.platform.management.lz_key_name }}" + output_key = "diagnostics" + resource_type = "log_analytics" + resource_key = "central_logs_{{config.caf_terraform.launchpad.regions[config.caf_terraform.launchpad.default_region_key].slug}}" + attribute_key = "id" + } + } +{% endif %} +{% if "Arc" in config.platform_management.enable_monitoring %} + "Deploy-WS-Arc-Monitoring" = { + "logAnalytics" = { + lz_key = "{{ config.tfstates.platform.management.lz_key_name }}" + output_key = "diagnostics" + resource_type = "log_analytics" + resource_key = "central_logs_{{config.caf_terraform.launchpad.regions[config.caf_terraform.launchpad.default_region_key].slug}}" + attribute_key = "id" + } + } + "Deploy-LX-Arc-Monitoring" = { + "logAnalytics" = { + lz_key = "{{ config.tfstates.platform.management.lz_key_name }}" + output_key = "diagnostics" + resource_type = "log_analytics" + resource_key = "central_logs_{{config.caf_terraform.launchpad.regions[config.caf_terraform.launchpad.default_region_key].slug}}" + attribute_key = "id" + } + } +{% endif %} + "Deploy-ASC-Defender" = { + "emailSecurityContact" = { + value = "{{ config.notifications.security_center_email_contact }}" + } + "logAnalytics" = { + lz_key = "{{ config.tfstates.platform.management.lz_key_name }}" + output_key = "diagnostics" + resource_type = "log_analytics" + resource_key = "central_logs_{{config.caf_terraform.launchpad.regions[config.caf_terraform.launchpad.default_region_key].slug}}" + attribute_key = "id" + } +{% for parameter_key in mg.archetype_definitions.root.policy_assignments["Deploy-ASC-Defender"].keys() %} + "{{ parameter_key }}" = { + value = "{{ mg.archetype_definitions.root.policy_assignments["Deploy-ASC-Defender"][parameter_key] }}" + } +{% endfor %} + } + } + access_control = {} + } //root + + landing-zones = { + archetype_id = "landingzone" + parameters = {} + access_control = { +{% if config.platform_identity.azuread_identity_mode != 'logged_in_user' %} + "Owner" = { + "azuread_groups" = { + lz_key = "{{ config.tfstates.platform.launchpad.lz_key_name }}" + attribute_key = "id" + resource_keys = [ + "subscription_creation_landingzones" + ] + } + } +{% endif %} + } + } + + platform = { + archetype_id = "platform" + parameters = {} + access_control = {} + } + + connectivity = { + archetype_id = "platform_connectivity" + parameters = {} + access_control = { +{% if config.platform_identity.azuread_identity_mode != 'logged_in_user' %} + "Owner" = { + "azuread_groups" = { + lz_key = "{{ config.tfstates.platform.launchpad.lz_key_name }}" + attribute_key = "id" + resource_keys = [ + "connectivity" + ] + } + } +{% if config.platform_core_setup.enterprise_scale.enable_azure_subscription_vending_machine %} + "[{{ config.platform_core_setup.enterprise_scale.management_group_prefix | upper }}-CONNECTIVITY] CAF-network-vhub-peering" = { + "azuread_groups" = { + lz_key = "{{ config.tfstates.platform.launchpad.lz_key_name }}" + attribute_key = "id" + resource_keys = [ + "subscription_creation_landingzones" + ] + } + } +{% endif %} +{% endif %} + } + } + + identity = { + archetype_id = "platform_identity" + parameters = {} + access_control = { +{% if config.platform_identity.azuread_identity_mode != 'logged_in_user' %} + "Owner" = { + "azuread_groups" = { + lz_key = "{{ config.tfstates.platform.launchpad.lz_key_name }}" + attribute_key = "id" + resource_keys = [ + "identity" + ] + } + } +{% endif %} + } + } + + management = { + archetype_id = "platform_management" + parameters = {} + access_control = { +{% if config.platform_identity.azuread_identity_mode != 'logged_in_user' %} + "Owner" = { + "azuread_groups" = { + lz_key = "{{ config.tfstates.platform.launchpad.lz_key_name }}" + attribute_key = "id" + resource_keys = [ + "management" + ] + } + } +{% endif %} + } + } + + decommissioned = { + archetype_id = "es_decommissioned" + parameters = {} + access_control = {} + } + + sandboxes = { + archetype_id = "es_sandboxes" + parameters = {} + access_control = {} + } +} diff --git a/templates/platform/level1/eslz/lib/v0.3.3/archetype_config_overrides.tfvars.j2 b/templates/platform/level1/eslz/lib/v0.3.3/archetype_config_overrides.tfvars.j2 new file mode 100644 index 000000000..e24a0bef6 --- /dev/null +++ b/templates/platform/level1/eslz/lib/v0.3.3/archetype_config_overrides.tfvars.j2 @@ -0,0 +1,56 @@ +archetype_config_overrides = { +{% for key, level in mg.archetype_definitions.items() %} + {{ key }} = { + archetype_id = "{{mg.archetype_definitions[key].archetype_id }}" +{% if mg.archetype_definitions[key].policy_assignments is defined %} + parameters = { +{% for pa_key, pa_value in mg.archetype_definitions[key].policy_assignments.items() %} +{% if pa_value is mapping %} + "{{ pa_key }}" = { +{% for attribute, attribute_value in pa_value.items() %} + "{{attribute}}" = { +{% if attribute_value is string %} + value = "{{ attribute_value }}" +{% elif attribute_value is boolean %} + value = {{ attribute_value | string | lower }} +{% elif attribute_value is number %} + value = {{ attribute_value }} +{% else %} +{% if attribute_value is mapping %} +{% for caf_key, caf_value in attribute_value.items() %} + {{ caf_key }} = "{{ caf_value }}" +{% endfor %} +{% else %} + values = {{ attribute_value | replace('None','[]') | replace('\'','\"') }} +{% endif %} +{% endif %} + } +{% endfor %} + } +{% endif %} +{% endfor %} + } +{% else %} + parameters = {} +{% endif %} +{% if level.archetype_config.access_control is defined %} + access_control = { +{% for level_ac_key, level_ac in level.archetype_config.access_control.items() %} + "{{level_ac_key}}" = { +{% for level_role_key, level_role in level_ac.items() %} + "{{ level_role_key }}" = { + lz_key = "{{ level_role.lz_key }}" + attribute_key = "{{ level_role.attribute_key }}" + resource_keys = {{ level_role.resource_keys | replace('None','[]') | replace('\'','\"') }} + } +{% endfor %} + } +{% endfor %} + } +{% else %} + access_control = {} +{% endif %} + } + +{% endfor %} +} \ No newline at end of file diff --git a/templates/platform/level1/eslz/lib/v0.3.3/archetype_definitions/README.md b/templates/platform/level1/eslz/lib/v0.3.3/archetype_definitions/README.md new file mode 100644 index 000000000..519e9f330 --- /dev/null +++ b/templates/platform/level1/eslz/lib/v0.3.3/archetype_definitions/README.md @@ -0,0 +1,11 @@ + +# Public documentation of the custom landingzones + +https://github.com/Azure/terraform-azurerm-caf-enterprise-scale/wiki/%5BUser-Guide%5D-Archetype-Definitions + +https://github.com/Azure/terraform-azurerm-caf-enterprise-scale/wiki/%5BExamples%5D-Deploy-Custom-Landing-Zone-Archetypes + +# List of the default archetypes + +https://github.com/Azure/terraform-azurerm-caf-enterprise-scale/tree/main/modules/archetypes/lib/archetype_definitions + diff --git a/templates/platform/level1/eslz/lib/v0.3.3/archetype_definitions/archetype_definition_template.json.j2 b/templates/platform/level1/eslz/lib/v0.3.3/archetype_definitions/archetype_definition_template.json.j2 new file mode 100644 index 000000000..a489b4cdc --- /dev/null +++ b/templates/platform/level1/eslz/lib/v0.3.3/archetype_definitions/archetype_definition_template.json.j2 @@ -0,0 +1,90 @@ +{ + "{{ mg.archetype_definitions[item].archetype_id }}": { + "policy_assignments": [ +{% if mg.archetype_definitions[item].policy_assignments is defined %} +{% for key in mg.archetype_definitions[item].policy_assignments.keys() %} +{% if loop.last %} + "{{ key }}" +{% else %} + "{{ key }}", +{% endif %} +{% endfor %} +{% endif %} +{% if mg_custom.archetype_definitions[item].policy_assignments is defined %} +{% for key in mg_custom.archetype_definitions[item].policy_assignments.keys() %} +{% if loop.last %} + "{{ key }}" +{% else %} + "{{ key }}", +{% endif %} +{% endfor %} +{% endif %} + ], + "policy_definitions": [ +{% if mg.archetype_definitions[item].policy_definitions is defined %} +{% for key in mg.archetype_definitions[item].policy_definitions.keys() %} +{% if loop.last %} + "{{ key }}" +{% else %} + "{{ key }}", +{% endif %} +{% endfor %} +{% endif %} +{% if mg_custom.archetype_definitions[item].policy_definitions is defined %} +{% for key in mg_custom.archetype_definitions[item].policy_definitions.keys() %} +{% if loop.last %} + "{{ key }}" +{% else %} + "{{ key }}", +{% endif %} +{% endfor %} +{% endif %} + ], + "policy_set_definitions": [ +{% if mg.archetype_definitions[item].policy_set_definitions is defined %} +{% for key in mg.archetype_definitions[item].policy_set_definitions.keys() %} +{% if loop.last %} + "{{ key }}" +{% else %} + "{{ key }}", +{% endif %} +{% endfor %} +{% endif %} +{% if mg_custom.archetype_definitions[item].policy_set_definitions is defined %} +{% for key in mg_custom.archetype_definitions[item].policy_set_definitions.keys() %} +{% if loop.last %} + "{{ key }}" +{% else %} + "{{ key }}", +{% endif %} +{% endfor %} +{% endif %} + ], + "role_definitions": [ +{% if mg.archetype_definitions[item].role_definitions is defined %} +{% for key in mg.archetype_definitions[item].role_definitions.keys() %} +{% if loop.last %} + "{{ key }}" +{% else %} + "{{ key }}", +{% endif %} +{% endfor %} +{% endif %} +{% if mg_custom.archetype_definitions[item].role_definitions is defined %} +{% for key in mg_custom.archetype_definitions[item].role_definitions.keys() %} +{% if loop.last %} + "{{ key }}" +{% else %} + "{{ key }}", +{% endif %} +{% endfor %} +{% endif %} + ], + "archetype_config": { + "parameters": { + }, + "access_control": { + } + } + } +} diff --git a/templates/platform/level1/eslz/lib/v0.3.3/archetype_definitions/custom_landing_zone_template.json.j2 b/templates/platform/level1/eslz/lib/v0.3.3/archetype_definitions/custom_landing_zone_template.json.j2 new file mode 100644 index 000000000..a795469dc --- /dev/null +++ b/templates/platform/level1/eslz/lib/v0.3.3/archetype_definitions/custom_landing_zone_template.json.j2 @@ -0,0 +1,90 @@ +{ + "{{ mg_custom.archetype_definitions[item].archetype_id }}": { + "policy_assignments": [ +{% if mg.archetype_definitions[item].policy_assignments is defined %} +{% for key in mg.archetype_definitions[item].policy_assignments.keys() %} +{% if loop.last %} + "{{ key }}" +{% else %} + "{{ key }}", +{% endif %} +{% endfor %} +{% endif %} +{% if mg_custom.archetype_definitions[item].policy_assignments is defined %} +{% for key in mg_custom.archetype_definitions[item].policy_assignments.keys() %} +{% if loop.last %} + "{{ key }}" +{% else %} + "{{ key }}", +{% endif %} +{% endfor %} +{% endif %} + ], + "policy_definitions": [ +{% if mg.archetype_definitions[item].policy_definitions is defined %} +{% for key in mg.archetype_definitions[item].policy_definitions.keys() %} +{% if loop.last %} + "{{ key }}" +{% else %} + "{{ key }}", +{% endif %} +{% endfor %} +{% endif %} +{% if mg_custom.archetype_definitions[item].policy_definitions is defined %} +{% for key in mg_custom.archetype_definitions[item].policy_definitions.keys() %} +{% if loop.last %} + "{{ key }}" +{% else %} + "{{ key }}", +{% endif %} +{% endfor %} +{% endif %} + ], + "policy_set_definitions": [ +{% if mg.archetype_definitions[item].policy_set_definitions is defined %} +{% for key in mg.archetype_definitions[item].policy_set_definitions.keys() %} +{% if loop.last %} + "{{ key }}" +{% else %} + "{{ key }}", +{% endif %} +{% endfor %} +{% endif %} +{% if mg_custom.archetype_definitions[item].policy_set_definitions is defined %} +{% for key in mg_custom.archetype_definitions[item].policy_set_definitions.keys() %} +{% if loop.last %} + "{{ key }}" +{% else %} + "{{ key }}", +{% endif %} +{% endfor %} +{% endif %} + ], + "role_definitions": [ +{% if mg.archetype_definitions[item].role_definitions is defined %} +{% for key in mg.archetype_definitions[item].role_definitions.keys() %} +{% if loop.last %} + "{{ key }}" +{% else %} + "{{ key }}", +{% endif %} +{% endfor %} +{% endif %} +{% if mg_custom.archetype_definitions[item].role_definitions is defined %} +{% for key in mg_custom.archetype_definitions[item].role_definitions.keys() %} +{% if loop.last %} + "{{ key }}" +{% else %} + "{{ key }}", +{% endif %} +{% endfor %} +{% endif %} + ], + "archetype_config": { + "parameters": { + }, + "access_control": { + } + } + } +} diff --git a/templates/platform/level1/eslz/lib/v0.3.3/custom_landing_zones.tfvars.j2 b/templates/platform/level1/eslz/lib/v0.3.3/custom_landing_zones.tfvars.j2 new file mode 100644 index 000000000..4ebb58b6b --- /dev/null +++ b/templates/platform/level1/eslz/lib/v0.3.3/custom_landing_zones.tfvars.j2 @@ -0,0 +1,62 @@ +custom_landing_zones = { +{% for key, level in mg_custom.archetype_definitions.items() %} + {{ config.platform_core_setup.enterprise_scale.management_group_prefix }}-{{ key }} = { + display_name = "{{ mg_custom.archetype_definitions[key].display_name }}" + parent_management_group_id = "{{ config.platform_core_setup.enterprise_scale.management_group_prefix }}-{{ mg_custom.archetype_definitions[key].parent_management_group_id }}" + archetype_config = { + archetype_id = "{{mg_custom.archetype_definitions[key].archetype_id }}" +{% if mg_custom.archetype_definitions[key].policy_assignments is defined %} + parameters = { +{% for pa_key, pa_value in mg_custom.archetype_definitions[key].policy_assignments.items() %} +{% if pa_value is mapping %} + "{{ pa_key }}" = { +{% for attribute, attribute_value in pa_value.items() %} + "{{attribute}}" = { +{% if attribute_value is string %} + value = "{{ attribute_value }}" +{% elif attribute_value is boolean %} + value = {{ attribute_value | string | lower }} +{% elif attribute_value is number %} + value = {{ attribute_value }} +{% else %} +{% if attribute_value is mapping %} +{% for caf_key, caf_value in attribute_value.items() %} + {{ caf_key }} = "{{ caf_value }}" +{% endfor %} +{% else %} + values = {{ attribute_value | replace('None','[]') | replace('\'','\"') }} +{% endif %} +{% endif %} + } +{% endfor %} + } +{% endif %} +{% endfor %} + } +{% else %} + parameters = {} +{% endif %} +{% if mg_custom.archetype_definitions[key].archetype_config.access_control is defined %} + access_control = { +{% for level_ac_key, level_ac in mg_custom.archetype_definitions[key].archetype_config.access_control.items() %} + "{{level_ac_key}}" = { +{% for level_role_key, level_role in level_ac.items() %} + "{{ level_role_key }}" = { + lz_key = "{{ level_role.lz_key }}" + attribute_key = "{{ level_role.attribute_key }}" + resource_keys = {{ level_role.resource_keys | replace('None','[]') | replace('\'','\"') }} + } +{% endfor %} + } +{% endfor %} + } +{% else %} + access_control = {} +{% endif %} + } + subscriptions = {} + subscription_ids = [] + } + +{% endfor %} +} \ No newline at end of file diff --git a/templates/platform/level1/eslz/lib/v0.3.3/policy_assignments/README.md b/templates/platform/level1/eslz/lib/v0.3.3/policy_assignments/README.md new file mode 100644 index 000000000..def2a5a6d --- /dev/null +++ b/templates/platform/level1/eslz/lib/v0.3.3/policy_assignments/README.md @@ -0,0 +1,10 @@ + +# Public documentation of the custom landingzones + +https://github.com/Azure/terraform-azurerm-caf-enterprise-scale/wiki/%5BUser-Guide%5D-Archetype-Definitions + +https://github.com/Azure/terraform-azurerm-caf-enterprise-scale/wiki/%5BExamples%5D-Deploy-Custom-Landing-Zone-Archetypes + +# List of the default policy assignments + +https://github.com/Azure/terraform-azurerm-caf-enterprise-scale/tree/main/modules/archetypes/lib/policy_assignments diff --git a/templates/platform/level1/eslz/lib/v0.3.3/policy_assignments/policy_assignment_caf_aks_capability.json b/templates/platform/level1/eslz/lib/v0.3.3/policy_assignments/policy_assignment_caf_aks_capability.json new file mode 100644 index 000000000..789acde60 --- /dev/null +++ b/templates/platform/level1/eslz/lib/v0.3.3/policy_assignments/policy_assignment_caf_aks_capability.json @@ -0,0 +1,18 @@ +{ + "name": "aks-capability", + "type": "Microsoft.Authorization/policyAssignments", + "apiVersion": "2019-09-01", + "properties": { + "description": "Restrict the capabilities to reduce the attack surface of containers in a Kubernetes cluster. This recommendation is part of CIS 5.2.8 and CIS 5.2.9 which are intended to improve the security of your Kubernetes environments. This policy is generally available for Kubernetes Service (AKS), and preview for AKS Engine and Azure Arc enabled Kubernetes. For more information, see https://aka.ms/kubepolicydoc. (labelSelector example - https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#resources-that-support-set-based-requirements)", + "displayName": "Kubernetes cluster containers should only use allowed capabilities.", + "notScopes": [], + "parameters": {}, + "policyDefinitionId": "/providers/Microsoft.Authorization/policyDefinitions/c26596ff-4d70-4e6a-9a30-c2506bd2f80c", + "scope": "${current_scope_resource_id}", + "enforcementMode": true + }, + "location": "${default_location}", + "identity": { + "type": "None" + } +} \ No newline at end of file diff --git a/templates/platform/level1/eslz/lib/v0.3.3/policy_assignments/policy_assignment_es_allowed_locations.json b/templates/platform/level1/eslz/lib/v0.3.3/policy_assignments/policy_assignment_es_allowed_locations.json new file mode 100644 index 000000000..bce0689ad --- /dev/null +++ b/templates/platform/level1/eslz/lib/v0.3.3/policy_assignments/policy_assignment_es_allowed_locations.json @@ -0,0 +1,18 @@ +{ + "name": "Allowed-Locations", + "type": "Microsoft.Authorization/policyAssignments", + "apiVersion": "2019-09-01", + "properties": { + "description": "Specifies the allowed locations (regions) where Resources can be deployed.", + "displayName": "Limit allowed locations for Resources", + "notScopes": [], + "parameters": {}, + "policyDefinitionId": "/providers/Microsoft.Authorization/policyDefinitions/e56962a6-4747-49cd-b67b-bf8b01975c4c", + "scope": "${current_scope_resource_id}", + "enforcementMode": null + }, + "location": "${default_location}", + "identity": { + "type": "None" + } +} \ No newline at end of file diff --git a/templates/platform/level1/eslz/lib/v0.3.3/policy_definitions/README.md b/templates/platform/level1/eslz/lib/v0.3.3/policy_definitions/README.md new file mode 100644 index 000000000..e47f922fd --- /dev/null +++ b/templates/platform/level1/eslz/lib/v0.3.3/policy_definitions/README.md @@ -0,0 +1,10 @@ + +# Public documentation of the custom landingzones + +https://github.com/Azure/terraform-azurerm-caf-enterprise-scale/wiki/%5BUser-Guide%5D-Archetype-Definitions + +https://github.com/Azure/terraform-azurerm-caf-enterprise-scale/wiki/%5BExamples%5D-Deploy-Custom-Landing-Zone-Archetypes + +# List of the default policy definitions + +https://github.com/Azure/terraform-azurerm-caf-enterprise-scale/tree/main/modules/archetypes/lib/policy_definitions diff --git a/templates/platform/level1/eslz/lib/v0.3.3/policy_set_definitions/README.md b/templates/platform/level1/eslz/lib/v0.3.3/policy_set_definitions/README.md new file mode 100644 index 000000000..c09d2c016 --- /dev/null +++ b/templates/platform/level1/eslz/lib/v0.3.3/policy_set_definitions/README.md @@ -0,0 +1,10 @@ + +# Public documentation of the custom landingzones + +https://github.com/Azure/terraform-azurerm-caf-enterprise-scale/wiki/%5BUser-Guide%5D-Archetype-Definitions + +https://github.com/Azure/terraform-azurerm-caf-enterprise-scale/wiki/%5BExamples%5D-Deploy-Custom-Landing-Zone-Archetypes + +# List of the default policy set definitions + +https://github.com/Azure/terraform-azurerm-caf-enterprise-scale/tree/main/modules/archetypes/lib/policy_set_definitions diff --git a/templates/platform/level1/eslz/lib/v0.3.3/role_definitions/README.md b/templates/platform/level1/eslz/lib/v0.3.3/role_definitions/README.md new file mode 100644 index 000000000..2230928aa --- /dev/null +++ b/templates/platform/level1/eslz/lib/v0.3.3/role_definitions/README.md @@ -0,0 +1,11 @@ + +# Public documentation of the custom landingzones + +https://github.com/Azure/terraform-azurerm-caf-enterprise-scale/wiki/%5BUser-Guide%5D-Archetype-Definitions + +https://github.com/Azure/terraform-azurerm-caf-enterprise-scale/wiki/%5BExamples%5D-Deploy-Custom-Landing-Zone-Archetypes + + +# List of the default role defitions + +https://github.com/Azure/terraform-azurerm-caf-enterprise-scale/tree/main/modules/archetypes/lib/role_definitions diff --git a/templates/platform/level1/eslz/lib/v0.3.3/role_definitions/role_definition_caf_vhub_peering.json b/templates/platform/level1/eslz/lib/v0.3.3/role_definitions/role_definition_caf_vhub_peering.json new file mode 100644 index 000000000..db51f3c40 --- /dev/null +++ b/templates/platform/level1/eslz/lib/v0.3.3/role_definitions/role_definition_caf_vhub_peering.json @@ -0,0 +1,26 @@ +{ + "name": "48ec94a9-9a14-488d-928d-5e73f96b335c", + "type": "Microsoft.Authorization/roleDefinitions", + "apiVersion": "2018-01-01-preview", + "properties": { + "roleName": "CAF-network-vhub-peering", + "description": "Authorize vnet peerings to the vhub.", + "type": "customRole", + "permissions": [ + { + "actions": [ + "Microsoft.Resources/subscriptions/resourceGroups/read", + "Microsoft.Network/virtualHubs/read", + "Microsoft.Network/virtualHubs/hubVirtualNetworkConnections/*" + ], + "notActions": [ + ], + "dataActions": [], + "notDataActions": [] + } + ], + "assignableScopes": [ + "${current_scope_resource_id}" + ] + } +} \ No newline at end of file diff --git a/templates/platform/level1/eslz/lib/v1.1.1/archetype_config_overrides.tfvars.j2 b/templates/platform/level1/eslz/lib/v1.1.1/archetype_config_overrides.tfvars.j2 new file mode 100644 index 000000000..e24a0bef6 --- /dev/null +++ b/templates/platform/level1/eslz/lib/v1.1.1/archetype_config_overrides.tfvars.j2 @@ -0,0 +1,56 @@ +archetype_config_overrides = { +{% for key, level in mg.archetype_definitions.items() %} + {{ key }} = { + archetype_id = "{{mg.archetype_definitions[key].archetype_id }}" +{% if mg.archetype_definitions[key].policy_assignments is defined %} + parameters = { +{% for pa_key, pa_value in mg.archetype_definitions[key].policy_assignments.items() %} +{% if pa_value is mapping %} + "{{ pa_key }}" = { +{% for attribute, attribute_value in pa_value.items() %} + "{{attribute}}" = { +{% if attribute_value is string %} + value = "{{ attribute_value }}" +{% elif attribute_value is boolean %} + value = {{ attribute_value | string | lower }} +{% elif attribute_value is number %} + value = {{ attribute_value }} +{% else %} +{% if attribute_value is mapping %} +{% for caf_key, caf_value in attribute_value.items() %} + {{ caf_key }} = "{{ caf_value }}" +{% endfor %} +{% else %} + values = {{ attribute_value | replace('None','[]') | replace('\'','\"') }} +{% endif %} +{% endif %} + } +{% endfor %} + } +{% endif %} +{% endfor %} + } +{% else %} + parameters = {} +{% endif %} +{% if level.archetype_config.access_control is defined %} + access_control = { +{% for level_ac_key, level_ac in level.archetype_config.access_control.items() %} + "{{level_ac_key}}" = { +{% for level_role_key, level_role in level_ac.items() %} + "{{ level_role_key }}" = { + lz_key = "{{ level_role.lz_key }}" + attribute_key = "{{ level_role.attribute_key }}" + resource_keys = {{ level_role.resource_keys | replace('None','[]') | replace('\'','\"') }} + } +{% endfor %} + } +{% endfor %} + } +{% else %} + access_control = {} +{% endif %} + } + +{% endfor %} +} \ No newline at end of file diff --git a/templates/platform/level1/eslz/lib/v1.1.1/archetype_definitions/README.md b/templates/platform/level1/eslz/lib/v1.1.1/archetype_definitions/README.md new file mode 100644 index 000000000..519e9f330 --- /dev/null +++ b/templates/platform/level1/eslz/lib/v1.1.1/archetype_definitions/README.md @@ -0,0 +1,11 @@ + +# Public documentation of the custom landingzones + +https://github.com/Azure/terraform-azurerm-caf-enterprise-scale/wiki/%5BUser-Guide%5D-Archetype-Definitions + +https://github.com/Azure/terraform-azurerm-caf-enterprise-scale/wiki/%5BExamples%5D-Deploy-Custom-Landing-Zone-Archetypes + +# List of the default archetypes + +https://github.com/Azure/terraform-azurerm-caf-enterprise-scale/tree/main/modules/archetypes/lib/archetype_definitions + diff --git a/templates/platform/level1/eslz/lib/v1.1.1/archetype_definitions/archetype_definition_template.json.j2 b/templates/platform/level1/eslz/lib/v1.1.1/archetype_definitions/archetype_definition_template.json.j2 new file mode 100644 index 000000000..a489b4cdc --- /dev/null +++ b/templates/platform/level1/eslz/lib/v1.1.1/archetype_definitions/archetype_definition_template.json.j2 @@ -0,0 +1,90 @@ +{ + "{{ mg.archetype_definitions[item].archetype_id }}": { + "policy_assignments": [ +{% if mg.archetype_definitions[item].policy_assignments is defined %} +{% for key in mg.archetype_definitions[item].policy_assignments.keys() %} +{% if loop.last %} + "{{ key }}" +{% else %} + "{{ key }}", +{% endif %} +{% endfor %} +{% endif %} +{% if mg_custom.archetype_definitions[item].policy_assignments is defined %} +{% for key in mg_custom.archetype_definitions[item].policy_assignments.keys() %} +{% if loop.last %} + "{{ key }}" +{% else %} + "{{ key }}", +{% endif %} +{% endfor %} +{% endif %} + ], + "policy_definitions": [ +{% if mg.archetype_definitions[item].policy_definitions is defined %} +{% for key in mg.archetype_definitions[item].policy_definitions.keys() %} +{% if loop.last %} + "{{ key }}" +{% else %} + "{{ key }}", +{% endif %} +{% endfor %} +{% endif %} +{% if mg_custom.archetype_definitions[item].policy_definitions is defined %} +{% for key in mg_custom.archetype_definitions[item].policy_definitions.keys() %} +{% if loop.last %} + "{{ key }}" +{% else %} + "{{ key }}", +{% endif %} +{% endfor %} +{% endif %} + ], + "policy_set_definitions": [ +{% if mg.archetype_definitions[item].policy_set_definitions is defined %} +{% for key in mg.archetype_definitions[item].policy_set_definitions.keys() %} +{% if loop.last %} + "{{ key }}" +{% else %} + "{{ key }}", +{% endif %} +{% endfor %} +{% endif %} +{% if mg_custom.archetype_definitions[item].policy_set_definitions is defined %} +{% for key in mg_custom.archetype_definitions[item].policy_set_definitions.keys() %} +{% if loop.last %} + "{{ key }}" +{% else %} + "{{ key }}", +{% endif %} +{% endfor %} +{% endif %} + ], + "role_definitions": [ +{% if mg.archetype_definitions[item].role_definitions is defined %} +{% for key in mg.archetype_definitions[item].role_definitions.keys() %} +{% if loop.last %} + "{{ key }}" +{% else %} + "{{ key }}", +{% endif %} +{% endfor %} +{% endif %} +{% if mg_custom.archetype_definitions[item].role_definitions is defined %} +{% for key in mg_custom.archetype_definitions[item].role_definitions.keys() %} +{% if loop.last %} + "{{ key }}" +{% else %} + "{{ key }}", +{% endif %} +{% endfor %} +{% endif %} + ], + "archetype_config": { + "parameters": { + }, + "access_control": { + } + } + } +} diff --git a/templates/platform/level1/eslz/lib/v1.1.1/archetype_definitions/custom_landing_zone_template.json.j2 b/templates/platform/level1/eslz/lib/v1.1.1/archetype_definitions/custom_landing_zone_template.json.j2 new file mode 100644 index 000000000..a795469dc --- /dev/null +++ b/templates/platform/level1/eslz/lib/v1.1.1/archetype_definitions/custom_landing_zone_template.json.j2 @@ -0,0 +1,90 @@ +{ + "{{ mg_custom.archetype_definitions[item].archetype_id }}": { + "policy_assignments": [ +{% if mg.archetype_definitions[item].policy_assignments is defined %} +{% for key in mg.archetype_definitions[item].policy_assignments.keys() %} +{% if loop.last %} + "{{ key }}" +{% else %} + "{{ key }}", +{% endif %} +{% endfor %} +{% endif %} +{% if mg_custom.archetype_definitions[item].policy_assignments is defined %} +{% for key in mg_custom.archetype_definitions[item].policy_assignments.keys() %} +{% if loop.last %} + "{{ key }}" +{% else %} + "{{ key }}", +{% endif %} +{% endfor %} +{% endif %} + ], + "policy_definitions": [ +{% if mg.archetype_definitions[item].policy_definitions is defined %} +{% for key in mg.archetype_definitions[item].policy_definitions.keys() %} +{% if loop.last %} + "{{ key }}" +{% else %} + "{{ key }}", +{% endif %} +{% endfor %} +{% endif %} +{% if mg_custom.archetype_definitions[item].policy_definitions is defined %} +{% for key in mg_custom.archetype_definitions[item].policy_definitions.keys() %} +{% if loop.last %} + "{{ key }}" +{% else %} + "{{ key }}", +{% endif %} +{% endfor %} +{% endif %} + ], + "policy_set_definitions": [ +{% if mg.archetype_definitions[item].policy_set_definitions is defined %} +{% for key in mg.archetype_definitions[item].policy_set_definitions.keys() %} +{% if loop.last %} + "{{ key }}" +{% else %} + "{{ key }}", +{% endif %} +{% endfor %} +{% endif %} +{% if mg_custom.archetype_definitions[item].policy_set_definitions is defined %} +{% for key in mg_custom.archetype_definitions[item].policy_set_definitions.keys() %} +{% if loop.last %} + "{{ key }}" +{% else %} + "{{ key }}", +{% endif %} +{% endfor %} +{% endif %} + ], + "role_definitions": [ +{% if mg.archetype_definitions[item].role_definitions is defined %} +{% for key in mg.archetype_definitions[item].role_definitions.keys() %} +{% if loop.last %} + "{{ key }}" +{% else %} + "{{ key }}", +{% endif %} +{% endfor %} +{% endif %} +{% if mg_custom.archetype_definitions[item].role_definitions is defined %} +{% for key in mg_custom.archetype_definitions[item].role_definitions.keys() %} +{% if loop.last %} + "{{ key }}" +{% else %} + "{{ key }}", +{% endif %} +{% endfor %} +{% endif %} + ], + "archetype_config": { + "parameters": { + }, + "access_control": { + } + } + } +} diff --git a/templates/platform/level1/eslz/lib/v1.1.1/custom_landing_zones.tfvars.j2 b/templates/platform/level1/eslz/lib/v1.1.1/custom_landing_zones.tfvars.j2 new file mode 100644 index 000000000..4ebb58b6b --- /dev/null +++ b/templates/platform/level1/eslz/lib/v1.1.1/custom_landing_zones.tfvars.j2 @@ -0,0 +1,62 @@ +custom_landing_zones = { +{% for key, level in mg_custom.archetype_definitions.items() %} + {{ config.platform_core_setup.enterprise_scale.management_group_prefix }}-{{ key }} = { + display_name = "{{ mg_custom.archetype_definitions[key].display_name }}" + parent_management_group_id = "{{ config.platform_core_setup.enterprise_scale.management_group_prefix }}-{{ mg_custom.archetype_definitions[key].parent_management_group_id }}" + archetype_config = { + archetype_id = "{{mg_custom.archetype_definitions[key].archetype_id }}" +{% if mg_custom.archetype_definitions[key].policy_assignments is defined %} + parameters = { +{% for pa_key, pa_value in mg_custom.archetype_definitions[key].policy_assignments.items() %} +{% if pa_value is mapping %} + "{{ pa_key }}" = { +{% for attribute, attribute_value in pa_value.items() %} + "{{attribute}}" = { +{% if attribute_value is string %} + value = "{{ attribute_value }}" +{% elif attribute_value is boolean %} + value = {{ attribute_value | string | lower }} +{% elif attribute_value is number %} + value = {{ attribute_value }} +{% else %} +{% if attribute_value is mapping %} +{% for caf_key, caf_value in attribute_value.items() %} + {{ caf_key }} = "{{ caf_value }}" +{% endfor %} +{% else %} + values = {{ attribute_value | replace('None','[]') | replace('\'','\"') }} +{% endif %} +{% endif %} + } +{% endfor %} + } +{% endif %} +{% endfor %} + } +{% else %} + parameters = {} +{% endif %} +{% if mg_custom.archetype_definitions[key].archetype_config.access_control is defined %} + access_control = { +{% for level_ac_key, level_ac in mg_custom.archetype_definitions[key].archetype_config.access_control.items() %} + "{{level_ac_key}}" = { +{% for level_role_key, level_role in level_ac.items() %} + "{{ level_role_key }}" = { + lz_key = "{{ level_role.lz_key }}" + attribute_key = "{{ level_role.attribute_key }}" + resource_keys = {{ level_role.resource_keys | replace('None','[]') | replace('\'','\"') }} + } +{% endfor %} + } +{% endfor %} + } +{% else %} + access_control = {} +{% endif %} + } + subscriptions = {} + subscription_ids = [] + } + +{% endfor %} +} \ No newline at end of file diff --git a/templates/platform/level1/eslz/lib/v1.1.1/policy_assignments/README.md b/templates/platform/level1/eslz/lib/v1.1.1/policy_assignments/README.md new file mode 100644 index 000000000..def2a5a6d --- /dev/null +++ b/templates/platform/level1/eslz/lib/v1.1.1/policy_assignments/README.md @@ -0,0 +1,10 @@ + +# Public documentation of the custom landingzones + +https://github.com/Azure/terraform-azurerm-caf-enterprise-scale/wiki/%5BUser-Guide%5D-Archetype-Definitions + +https://github.com/Azure/terraform-azurerm-caf-enterprise-scale/wiki/%5BExamples%5D-Deploy-Custom-Landing-Zone-Archetypes + +# List of the default policy assignments + +https://github.com/Azure/terraform-azurerm-caf-enterprise-scale/tree/main/modules/archetypes/lib/policy_assignments diff --git a/templates/platform/level1/eslz/lib/v1.1.1/policy_assignments/policy_assignment_caf_aks_capability.json b/templates/platform/level1/eslz/lib/v1.1.1/policy_assignments/policy_assignment_caf_aks_capability.json new file mode 100644 index 000000000..789acde60 --- /dev/null +++ b/templates/platform/level1/eslz/lib/v1.1.1/policy_assignments/policy_assignment_caf_aks_capability.json @@ -0,0 +1,18 @@ +{ + "name": "aks-capability", + "type": "Microsoft.Authorization/policyAssignments", + "apiVersion": "2019-09-01", + "properties": { + "description": "Restrict the capabilities to reduce the attack surface of containers in a Kubernetes cluster. This recommendation is part of CIS 5.2.8 and CIS 5.2.9 which are intended to improve the security of your Kubernetes environments. This policy is generally available for Kubernetes Service (AKS), and preview for AKS Engine and Azure Arc enabled Kubernetes. For more information, see https://aka.ms/kubepolicydoc. (labelSelector example - https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#resources-that-support-set-based-requirements)", + "displayName": "Kubernetes cluster containers should only use allowed capabilities.", + "notScopes": [], + "parameters": {}, + "policyDefinitionId": "/providers/Microsoft.Authorization/policyDefinitions/c26596ff-4d70-4e6a-9a30-c2506bd2f80c", + "scope": "${current_scope_resource_id}", + "enforcementMode": true + }, + "location": "${default_location}", + "identity": { + "type": "None" + } +} \ No newline at end of file diff --git a/templates/platform/level1/eslz/lib/v1.1.1/policy_assignments/policy_assignment_es_allowed_locations.json b/templates/platform/level1/eslz/lib/v1.1.1/policy_assignments/policy_assignment_es_allowed_locations.json new file mode 100644 index 000000000..bce0689ad --- /dev/null +++ b/templates/platform/level1/eslz/lib/v1.1.1/policy_assignments/policy_assignment_es_allowed_locations.json @@ -0,0 +1,18 @@ +{ + "name": "Allowed-Locations", + "type": "Microsoft.Authorization/policyAssignments", + "apiVersion": "2019-09-01", + "properties": { + "description": "Specifies the allowed locations (regions) where Resources can be deployed.", + "displayName": "Limit allowed locations for Resources", + "notScopes": [], + "parameters": {}, + "policyDefinitionId": "/providers/Microsoft.Authorization/policyDefinitions/e56962a6-4747-49cd-b67b-bf8b01975c4c", + "scope": "${current_scope_resource_id}", + "enforcementMode": null + }, + "location": "${default_location}", + "identity": { + "type": "None" + } +} \ No newline at end of file diff --git a/templates/platform/level1/eslz/lib/v1.1.1/policy_definitions/README.md b/templates/platform/level1/eslz/lib/v1.1.1/policy_definitions/README.md new file mode 100644 index 000000000..e47f922fd --- /dev/null +++ b/templates/platform/level1/eslz/lib/v1.1.1/policy_definitions/README.md @@ -0,0 +1,10 @@ + +# Public documentation of the custom landingzones + +https://github.com/Azure/terraform-azurerm-caf-enterprise-scale/wiki/%5BUser-Guide%5D-Archetype-Definitions + +https://github.com/Azure/terraform-azurerm-caf-enterprise-scale/wiki/%5BExamples%5D-Deploy-Custom-Landing-Zone-Archetypes + +# List of the default policy definitions + +https://github.com/Azure/terraform-azurerm-caf-enterprise-scale/tree/main/modules/archetypes/lib/policy_definitions diff --git a/templates/platform/level1/eslz/lib/v1.1.1/policy_set_definitions/README.md b/templates/platform/level1/eslz/lib/v1.1.1/policy_set_definitions/README.md new file mode 100644 index 000000000..c09d2c016 --- /dev/null +++ b/templates/platform/level1/eslz/lib/v1.1.1/policy_set_definitions/README.md @@ -0,0 +1,10 @@ + +# Public documentation of the custom landingzones + +https://github.com/Azure/terraform-azurerm-caf-enterprise-scale/wiki/%5BUser-Guide%5D-Archetype-Definitions + +https://github.com/Azure/terraform-azurerm-caf-enterprise-scale/wiki/%5BExamples%5D-Deploy-Custom-Landing-Zone-Archetypes + +# List of the default policy set definitions + +https://github.com/Azure/terraform-azurerm-caf-enterprise-scale/tree/main/modules/archetypes/lib/policy_set_definitions diff --git a/templates/platform/level1/eslz/lib/v1.1.1/role_definitions/README.md b/templates/platform/level1/eslz/lib/v1.1.1/role_definitions/README.md new file mode 100644 index 000000000..2230928aa --- /dev/null +++ b/templates/platform/level1/eslz/lib/v1.1.1/role_definitions/README.md @@ -0,0 +1,11 @@ + +# Public documentation of the custom landingzones + +https://github.com/Azure/terraform-azurerm-caf-enterprise-scale/wiki/%5BUser-Guide%5D-Archetype-Definitions + +https://github.com/Azure/terraform-azurerm-caf-enterprise-scale/wiki/%5BExamples%5D-Deploy-Custom-Landing-Zone-Archetypes + + +# List of the default role defitions + +https://github.com/Azure/terraform-azurerm-caf-enterprise-scale/tree/main/modules/archetypes/lib/role_definitions diff --git a/templates/platform/level1/eslz/lib/v1.1.1/role_definitions/role_definition_caf_vhub_peering.json b/templates/platform/level1/eslz/lib/v1.1.1/role_definitions/role_definition_caf_vhub_peering.json new file mode 100644 index 000000000..db51f3c40 --- /dev/null +++ b/templates/platform/level1/eslz/lib/v1.1.1/role_definitions/role_definition_caf_vhub_peering.json @@ -0,0 +1,26 @@ +{ + "name": "48ec94a9-9a14-488d-928d-5e73f96b335c", + "type": "Microsoft.Authorization/roleDefinitions", + "apiVersion": "2018-01-01-preview", + "properties": { + "roleName": "CAF-network-vhub-peering", + "description": "Authorize vnet peerings to the vhub.", + "type": "customRole", + "permissions": [ + { + "actions": [ + "Microsoft.Resources/subscriptions/resourceGroups/read", + "Microsoft.Network/virtualHubs/read", + "Microsoft.Network/virtualHubs/hubVirtualNetworkConnections/*" + ], + "notActions": [ + ], + "dataActions": [], + "notDataActions": [] + } + ], + "assignableScopes": [ + "${current_scope_resource_id}" + ] + } +} \ No newline at end of file diff --git a/templates/platform/level1/eslz/readme.md b/templates/platform/level1/eslz/readme.md new file mode 100644 index 000000000..96b22db83 --- /dev/null +++ b/templates/platform/level1/eslz/readme.md @@ -0,0 +1,34 @@ +# Enterprise scale + +## Deploy Enterprise Scale + +Note you need to adjust the branch to deploy Enterprise Scale to {{ config.platform_core_setup.enterprise_scale.private_lib[config.platform_core_setup.enterprise_scale.private_lib.version_to_deploy].caf_landingzone_branch }} + +```bash +az account clear +# login a with a user member of the caf-platform-maintainers group +rover login -t {{ config.platform_identity.tenant_name }} + +cd {{ destination_base }}/landingzones +git fetch origin +git checkout {{ config.platform_core_setup.enterprise_scale.private_lib[config.platform_core_setup.enterprise_scale.private_lib.version_to_deploy].caf_landingzone_branch }} + +rover \ +{% if keyvaults is defined and config.platform_identity.azuread_identity_mode != "logged_in_user" %} + --impersonate-sp-from-keyvault-url {{ keyvaults.cred_eslz.vault_uri }} \ +{% endif %} + -lz {{ destination_base }}/landingzones/caf_solution/add-ons/caf_eslz \ + -var-folder {{ destination_base }}/{{ config.configuration_folders.platform.destination_relative_path }}/{{ level }}/{{ base_folder }} \ + -tfstate_subscription_id {{ config.caf_terraform.launchpad.subscription_id }} \ + -tfstate {{ config.tfstates.platform.eslz.tfstate }} \ + -log-severity ERROR \ + -env {{ config.caf_terraform.launchpad.caf_environment }} \ + -level {{ level }} \ + -p ${TF_DATA_DIR}/{{ config.tfstates.platform.eslz.tfstate }}.tfplan \ + -a plan + +``` + +# Next steps + + [Deploy Connectivity](../../level2/connectivity/readme.md) diff --git a/templates/platform/level1/eslz/subscription_id_overrides.tfvars.j2 b/templates/platform/level1/eslz/subscription_id_overrides.tfvars.j2 new file mode 100644 index 000000000..7cfcc03e1 --- /dev/null +++ b/templates/platform/level1/eslz/subscription_id_overrides.tfvars.j2 @@ -0,0 +1,45 @@ +subscription_id_overrides = { +{% if config.platform_identity.azuread_identity_mode != 'logged_in_user' %} + root = [] +{% else %} + root = [ + "{{ config.caf_terraform.launchpad.subscription_id }}" + ] +{% endif %} + decommissioned = [] + sandboxes = [] + landing-zones = [] + platform = [] + connectivity = [] + management = [] + identity = [] +} + +{% if config.platform_identity.azuread_identity_mode != 'logged_in_user' %} +subscription_id_overrides_by_keys = { + connectivity = { + connectivity = { + lz_key = "{{ config.tfstates.platform.platform_subscriptions.lz_key_name }}" + key = "connectivity" + } + } + management = { + launchpad = { + lz_key = "{{ config.tfstates.platform.platform_subscriptions.lz_key_name }}" + key = "launchpad" + } + management = { + lz_key = "{{ config.tfstates.platform.platform_subscriptions.lz_key_name }}" + key = "management" + } + } + identity = { + identity = { + lz_key = "{{ config.tfstates.platform.platform_subscriptions.lz_key_name }}" + key = "identity" + } + } +} +{% else %} +subscription_id_overrides_by_keys = {} +{% endif %} \ No newline at end of file diff --git a/templates/platform/level1/identity/ansible.yaml b/templates/platform/level1/identity/ansible.yaml new file mode 100644 index 000000000..f0068fb9a --- /dev/null +++ b/templates/platform/level1/identity/ansible.yaml @@ -0,0 +1,73 @@ +- name: "[{{ level }}-{{ base_folder }}] Clean-up directory" + file: + path: "{{ destination_base }}/{{ config.configuration_folders.platform.destination_relative_path }}/{{ level }}/{{ base_folder }}" + state: absent + when: config.configuration_folders.platform.cleanup_destination | bool + +- name: "[{{ level }}-{{ base_folder }}] Creates directory" + file: + path: "{{ destination_base }}/{{ config.configuration_folders.platform.destination_relative_path }}/{{ level }}/{{ base_folder }}" + state: directory + +- name: "[{{ level }}-{{ base_folder }}] - Set variables" + set_fact: + destination_path: "{{ destination_base }}/{{ config.configuration_folders.platform.destination_relative_path }}/{{ level }}/{{ base_folder }}" + +- name: "[{{ level }}-{{ base_folder }}] - Load variables" + include_vars: + name: resources + dir: "{{config_folder}}" + depth: 1 + ignore_unknown_extensions: true + files_matching: "identity.yaml|identity.caf.platform.yaml" + + +# +# resource_groups +# +- name: "[{{ level }}-{{ base_folder }}] - resources - resource_groups" + when: + - resources.subscriptions[subscription_key].resource_groups is defined + ansible.builtin.template: + src: "{{ item }}" + dest: "{{ destination_path }}/{{ item | basename | regex_replace('.j2$', '') }}" + force: yes + with_fileglob: + - "{{ resource_template_folder }}/resource_groups.tfvars.j2" + +# +# recovery_vaults +# +- name: "[{{ level }}-{{ subscription_key }}] - resources - recovery_vaults" + when: + - resources.subscriptions[subscription_key].recovery_vaults is defined + ansible.builtin.template: + src: "{{ item }}" + dest: "{{ destination_path }}/{{ item | basename | regex_replace('.j2$', '') }}" + force: yes + with_fileglob: + - "{{ resource_template_folder }}/recovery_vaults.tfvars.j2" + +# +# service_health_alerts +# +- name: "[{{ level }}-{{ base_folder }}] - resources - service_health_alerts" + when: + - resources.subscriptions[subscription_key].service_health_alerts is defined + ansible.builtin.template: + src: "{{ item }}" + dest: "{{ destination_path }}/{{ item | basename | regex_replace('.j2$', '') }}" + force: yes + with_fileglob: + - "{{ resource_template_folder }}/servicehealth.tfvars.j2" + + +- name: "[{{ level }}-{{ base_folder }}] generate configuration files." + ansible.builtin.template: + src: "{{ item }}" + dest: "{{ destination_base }}/{{ config.configuration_folders.platform.destination_relative_path }}/{{ level }}/{{ base_folder }}/{{ item | basename | regex_replace('.j2$', '') }}" + force: yes + with_fileglob: + - "{{ level }}/{{ base_folder }}/*.tfvars.j2" + - "{{ level }}/{{ base_folder }}/*.md" + diff --git a/templates/platform/level1/identity/azuread_groups.tfvars.j2 b/templates/platform/level1/identity/azuread_groups.tfvars.j2 new file mode 100644 index 000000000..45c99a42f --- /dev/null +++ b/templates/platform/level1/identity/azuread_groups.tfvars.j2 @@ -0,0 +1,39 @@ +{% if identity.level1.azuread_groups is defined %} +azuread_groups = { +{% for key, ad_group in identity.level1.azuread_groups.items() %} + {{ key }} = { + name = "{{ ad_group.name }}" +{% if ad_group.description is defined %} + description = "{{ ad_group.description }}" +{% endif %} +{% if ad_group.members is defined %} + members = { +{% if ad_group.members.user_principal_names is defined %} + user_principal_names = {{ ad_group.members.user_principal_names | replace('None','[]') | replace('\'','\"') }} +{% endif %} +{% if ad_group.members.group_names is defined %} + group_names = {{ ad_group.members.group_names | replace('None','[]') | replace('\'','\"') }} +{% endif %} +{% if ad_group.members.object_ids is defined %} + object_ids = {{ ad_group.members.object_ids | replace('None','[]') | replace('\'','\"') }} +{% endif %} +{% if ad_group.members.group_keys is defined %} + group_keys = {{ ad_group.members.group_keys | replace('None','[]') | replace('\'','\"') }} +{% endif %} +{% if ad_group.members.service_principal_keys is defined %} + service_principal_keys = {{ ad_group.members.service_principal_keys | replace('None','[]') | replace('\'','\"') }} +{% endif %} + } +{% endif %} +{% if ad_group.owners is defined %} + owners = { +{% if ad_group.owners.user_principal_names is defined %} + user_principal_names = {{ ad_group.owners.user_principal_names | replace('None','[]') | replace('\'','\"') }} +{% endif %} + } +{% endif %} + prevent_duplicate_name = {{ ad_group.owners.prevent_duplicate_name | default(false) | string | lower }} + } +{% endfor %} +} +{% endif %} \ No newline at end of file diff --git a/templates/platform/level1/identity/landingzone.tfvars.j2 b/templates/platform/level1/identity/landingzone.tfvars.j2 new file mode 100644 index 000000000..48704bdde --- /dev/null +++ b/templates/platform/level1/identity/landingzone.tfvars.j2 @@ -0,0 +1,12 @@ +landingzone = { + backend_type = "{{ caf_terraform.launchpad.backend_type | default("azurerm")}}" + global_settings_key = "{{ config.tfstates.platform.launchpad.lz_key_name }}" + level = "{{ config.tfstates.platform.identity.level }}" + key = "{{ config.tfstates.platform.identity.lz_key_name }}" + tfstates = { + {{ config.tfstates.platform.launchpad.lz_key_name }} = { + level = "lower" + tfstate = "{{ config.tfstates.platform.launchpad.tfstate }}" + } + } +} diff --git a/templates/platform/level1/identity/readme.md b/templates/platform/level1/identity/readme.md new file mode 100644 index 000000000..d2a8ecee1 --- /dev/null +++ b/templates/platform/level1/identity/readme.md @@ -0,0 +1,46 @@ + +# Identity +Deploy the identity services + +```bash +#Note: close previous session if you logged with a different service principal using --impersonate-sp-from-keyvault-url +rover logout + +# login a with a user member of the caf-maintainers group +{% if platform_subscriptions_details is defined %} +rover login -t {{ config.platform_identity.tenant_name }} -s {{ platform_subscriptions_details.identity.subscription_id }} +{% elif subscriptions.platform_subscriptions.identity.subscription_id is defined %} +rover login -t {{ config.platform_identity.tenant_name }} -s {{ subscriptions.platform_subscriptions.identity.subscription_id }} +{% else %} +rover login -t {{ config.platform_identity.tenant_name }} -s {{ config.caf_terraform.launchpad.subscription_id }} +{% endif %} + +rover \ +{% if platform_subscriptions_details.eslz is defined %} +{% if keyvaults is defined and config.platform_identity.azuread_identity_mode != "logged_in_user" %} + --impersonate-sp-from-keyvault-url {{ keyvaults.cred_identity.vault_uri }} \ +{% endif %} +{% endif %} + -lz /tf/caf/landingzones/caf_solution \ + -var-folder {{ destination_base }}/{{ config.configuration_folders.platform.destination_relative_path }}/{{ level }}/{{ base_folder }} \ + -tfstate_subscription_id {{ config.caf_terraform.launchpad.subscription_id }} \ +{% if platform_subscriptions_details is defined %} + -target_subscription {{ platform_subscriptions_details.identity.subscription_id }} \ +{% elif subscriptions.platform_subscriptions.identity.subscription_id is defined %} + -target_subscription {{ subscriptions.platform_subscriptions.identity.subscription_id }} \ +{% else %} + -target_subscription {{ config.caf_terraform.launchpad.subscription_id }} \ +{% endif %} + -tfstate {{ config.tfstates.platform.identity.tfstate }} \ + -log-severity {{ config.gitops.rover_log_error }} \ + -env {{ config.caf_terraform.launchpad.caf_environment }} \ + -level {{ level }} \ + -p ${TF_DATA_DIR}/{{ config.tfstates.platform.identity.tfstate }}.tfplan \ + -a plan + +``` + + +# Next steps + + [Deploy Enterprise Scale](../../level1/eslz/readme.md) diff --git a/templates/platform/level1/management/ansible.yaml b/templates/platform/level1/management/ansible.yaml new file mode 100644 index 000000000..c61cf23d6 --- /dev/null +++ b/templates/platform/level1/management/ansible.yaml @@ -0,0 +1,162 @@ +- name: "[{{ level }}-{{ base_folder }}] Clean-up directory" + file: + path: "{{ destination_base }}/{{ config.configuration_folders.platform.destination_relative_path }}/{{ level }}/{{ base_folder }}" + state: absent + when: + - config.configuration_folders.platform.cleanup_destination | bool + +- name: "[{{ level }}-{{ base_folder }}] Creates directory" + file: + path: "{{ destination_base }}/{{ config.configuration_folders.platform.destination_relative_path }}/{{ level }}/{{ base_folder }}" + state: directory + + +- name: "[{{ level }}-{{ base_folder }}] - Set variables" + set_fact: + destination_path: "{{ destination_base }}/{{ config.configuration_folders.platform.destination_relative_path }}/{{ level }}/{{ base_folder }}" + +- name: "[{{ level }}-{{ base_folder }}] - Load variables" + include_vars: + name: resources + dir: "{{config_folder}}" + depth: 1 + ignore_unknown_extensions: true + files_matching: "management.yaml|configuration.caf.platform.yaml" + +# +# automation_accounts +# +- name: "[{{ level }}-{{ base_folder }}] - resources - automation_accounts" + when: + - resources.subscriptions[subscription_key].automation_accounts is defined + ansible.builtin.template: + src: "{{ item }}" + dest: "{{ destination_path }}/{{ item | basename | regex_replace('.j2$', '') }}" + force: yes + with_fileglob: + - "{{ resource_template_folder }}/automation_accounts.tfvars.j2" + +# +# diagnostic_log_analytics +# +- name: "[{{ level }}-{{ base_folder }}] - resources - diagnostic_log_analytics" + when: + - resources.subscriptions[subscription_key].diagnostic_log_analytics is defined + ansible.builtin.template: + src: "{{ item }}" + dest: "{{ destination_path }}/{{ item | basename | regex_replace('.j2$', '') }}" + force: yes + with_fileglob: + - "{{ resource_template_folder }}/diagnostic_log_analytics.tfvars.j2" + +# +# diagnostic_storage_accounts +# +- name: "[{{ level }}-{{ base_folder }}] - resources - diagnostic_storage_accounts" + when: + - resources.subscriptions[subscription_key].diagnostic_storage_accounts is defined + ansible.builtin.template: + src: "{{ item }}" + dest: "{{ destination_path }}/{{ item | basename | regex_replace('.j2$', '') }}" + force: yes + with_fileglob: + - "{{ resource_template_folder }}/diagnostic_storage_accounts.tfvars.j2" + +# diagnostics_definition +# +- name: "[{{ level }}-{{ base_folder }}] - resources - diagnostics_definition" + when: + - resources.subscriptions[subscription_key].diagnostics_definition is defined + ansible.builtin.template: + src: "{{ item }}" + dest: "{{ destination_path }}/{{ item | basename | regex_replace('.j2$', '') }}" + force: yes + with_fileglob: + - "{{ resource_template_folder }}/diagnostics_definition.tfvars.j2" + +# diagnostics_destinations +# +- name: "[{{ level }}-{{ base_folder }}] - resources - diagnostics_destinations" + when: + - resources.subscriptions[subscription_key].diagnostics_destinations is defined + ansible.builtin.template: + src: "{{ item }}" + dest: "{{ destination_path }}/{{ item | basename | regex_replace('.j2$', '') }}" + force: yes + with_fileglob: + - "{{ resource_template_folder }}/diagnostics_destinations.tfvars.j2" + +# +# monitor_action_groups +# +- name: "[{{ level }}-{{ subscription_key }}] - resources - monitor_action_groups" + when: + - resources.subscriptions[subscription_key].monitor_action_groups is defined + ansible.builtin.template: + src: "{{ item }}" + dest: "{{ destination_path }}/{{ item | basename | regex_replace('.j2$', '') }}" + force: yes + with_fileglob: + - "{{ resource_template_folder }}/monitor_action_groups.tfvars.j2" + +# +# recovery_vaults +# +- name: "[{{ level }}-{{ subscription_key }}] - resources - recovery_vaults" + when: + - resources.subscriptions[subscription_key].recovery_vaults is defined + ansible.builtin.template: + src: "{{ item }}" + dest: "{{ destination_path }}/{{ item | basename | regex_replace('.j2$', '') }}" + force: yes + with_fileglob: + - "{{ resource_template_folder }}/recovery_vaults.tfvars.j2" + +# +# resource_groups +# +- name: "[{{ level }}-{{ base_folder }}] - resources - resource_groups" + when: + - resources.subscriptions[subscription_key].resource_groups is defined + ansible.builtin.template: + src: "{{ item }}" + dest: "{{ destination_path }}/{{ item | basename | regex_replace('.j2$', '') }}" + force: yes + with_fileglob: + - "{{ resource_template_folder }}/resource_groups.tfvars.j2" + +# +# service_health_alerts +# +- name: "[{{ level }}-{{ base_folder }}] - resources - service_health_alerts" + when: + - resources.subscriptions[subscription_key].service_health_alerts is defined + ansible.builtin.template: + src: "{{ item }}" + dest: "{{ destination_path }}/{{ item | basename | regex_replace('.j2$', '') }}" + force: yes + with_fileglob: + - "{{ resource_template_folder }}/servicehealth.tfvars.j2" + +# +# Readme +# +- name: "[{{ level }}-{{ base_folder }}] - resources - *.md" + # when: always + ansible.builtin.template: + src: "{{ item }}" + dest: "{{ destination_path }}/{{ item | basename | regex_replace('.j2$', '') }}" + force: yes + with_fileglob: + - "{{ level }}/{{ base_folder }}/*.md" + +# +# Legacy calls +# +- name: "[{{ level }}-{{ base_folder }}] - generate configuration files." + ansible.builtin.template: + src: "{{ item }}" + dest: "{{ destination_base }}/{{ config.configuration_folders.platform.destination_relative_path }}/{{ level }}/{{ base_folder }}/{{ item | basename | regex_replace('.j2$', '') }}" + force: yes + with_fileglob: + - "{{ level }}/{{ base_folder }}/*.tfvars.j2" \ No newline at end of file diff --git a/templates/platform/level1/management/landingzone.tfvars.j2 b/templates/platform/level1/management/landingzone.tfvars.j2 new file mode 100644 index 000000000..367d6aaf8 --- /dev/null +++ b/templates/platform/level1/management/landingzone.tfvars.j2 @@ -0,0 +1,12 @@ +landingzone = { + backend_type = "{{ caf_terraform.launchpad.backend_type | default("azurerm")}}" + global_settings_key = "{{ config.tfstates.platform.launchpad.lz_key_name }}" + level = "{{ config.tfstates.platform.management.level }}" + key = "{{ config.tfstates.platform.management.lz_key_name }}" + tfstates = { + {{ config.tfstates.platform.launchpad.lz_key_name }} = { + level = "lower" + tfstate = "{{ config.tfstates.platform.launchpad.tfstate }}" + } + } +} diff --git a/templates/platform/level1/management/readme.md b/templates/platform/level1/management/readme.md new file mode 100644 index 000000000..130826048 --- /dev/null +++ b/templates/platform/level1/management/readme.md @@ -0,0 +1,52 @@ + +# Management +Deploy the management services + +```bash +#Note: close previous session if you logged with a different service principal using --impersonate-sp-from-keyvault-url +rover logout + +# login a with a user member of the caf-maintainers group +{% if platform_subscriptions_details is defined %} +rover login -t {{ config.platform_identity.tenant_name }} -s {{ platform_subscriptions_details.management.subscription_id }} +{% elif subscriptions.platform_subscriptions.management.subscription_id is defined %} +rover login -t {{ config.platform_identity.tenant_name }} -s {{ subscriptions.platform_subscriptions.management.subscription_id }} +{% else %} +rover login -t {{ config.platform_identity.tenant_name }} -s {{ config.caf_terraform.launchpad.subscription_id }} +{% endif %} + +rover \ +{% if platform_subscriptions_details.eslz is defined %} +{% if keyvaults is defined and config.platform_identity.azuread_identity_mode != "logged_in_user" %} + --impersonate-sp-from-keyvault-url {{ keyvaults.cred_management.vault_uri }} \ +{% endif %} +{% endif %} + -lz /tf/caf/landingzones/caf_solution \ + -var-folder {{ destination_base }}/{{ config.configuration_folders.platform.destination_relative_path }}/{{ level }}/{{ base_folder }} \ + -tfstate_subscription_id {{ config.caf_terraform.launchpad.subscription_id }} \ +{% if platform_subscriptions_details is defined %} + -target_subscription {{ platform_subscriptions_details.management.subscription_id }} \ +{% elif subscriptions.platform_subscriptions.management.subscription_id is defined %} + -target_subscription {{ subscriptions.platform_subscriptions.management.subscription_id }} \ +{% else %} + -target_subscription {{ config.caf_terraform.launchpad.subscription_id }} \ +{% endif %} + -tfstate {{ config.tfstates.platform.management.tfstate }} \ + -log-severity {{ config.gitops.rover_log_error }} \ + -env {{ config.caf_terraform.launchpad.caf_environment }} \ + -level {{ level }} \ + -p ${TF_DATA_DIR}/{{ config.tfstates.platform.management.tfstate }}.tfplan \ + -a plan + +``` + + +# Next steps + +When you have successfully deployed the management landing zone, you can move to the next step: + +{% if config.platform_core_setup.enterprise_scale.enable %} + [Deploy Enterprise Scale](../../level1/eslz/readme.md) +{% else %} + [Deploy Connectivity](../../level2/connectivity/readme.md) +{% endif %} diff --git a/templates/platform/level1/subscriptions/ansible.yaml b/templates/platform/level1/subscriptions/ansible.yaml new file mode 100644 index 000000000..4ac3f8907 --- /dev/null +++ b/templates/platform/level1/subscriptions/ansible.yaml @@ -0,0 +1,88 @@ +- name: "[{{ level }}-{{ base_folder }}] Clean-up directory" + file: + path: "{{ destination_base }}/{{ config.configuration_folders.platform.destination_relative_path }}/{{ level }}/{{ base_folder }}" + state: absent + when: config.configuration_folders.platform.cleanup_destination | bool + +- name: "[{{ level }}-{{ base_folder }}] Creates directory" + register: level1_subscriptions + file: + path: "{{ destination_base }}/{{ config.configuration_folders.platform.destination_relative_path }}/{{ level }}/{{ base_folder }}" + state: directory + +- name: "[{{ level }}-{{ base_folder }}] generate configuration files." + ansible.builtin.template: + src: "{{ item }}" + dest: "{{ destination_base }}/{{ config.configuration_folders.platform.destination_relative_path }}/{{ level }}/{{ base_folder }}/{{ item | basename | regex_replace('.j2$', '') }}" + force: yes + with_fileglob: + - "{{ level }}/{{ base_folder }}/*.tfvars.j2" + - "{{ level }}/{{ base_folder }}/*.md" + +# Create the subscriptions +- name: "[{{ level }}-{{ base_folder }}] Create subscriptions." + when: deploy_subscriptions | bool + shell: | + /tf/rover/rover.sh \ + --impersonate-sp-from-keyvault-url {{ keyvaults.cred_subscription_creation_platform.vault_uri }} \ + -lz /tf/caf/landingzones/caf_solution \ + -var-folder {{ destination_base }}/{{ config.configuration_folders.platform.destination_relative_path }}/{{ level }}/{{ base_folder }} \ + -tfstate_subscription_id {{ config.caf_terraform.launchpad.subscription_id }} \ + -target_subscription {{ config.caf_terraform.launchpad.subscription_id }} \ + -tfstate {{ config.tfstates.platform.platform_subscriptions.tfstate }} \ + -log-severity {{ config.gitops.rover_log_error }} \ + -env {{ config.caf_terraform.launchpad.caf_environment }} \ + -level {{ level }} \ + -a apply + +- name: "[{{ level }}-{{ base_folder }}] Get latest cache folder" + set_fact: + job_cache_base_path: "/home/vscode/.terraform.cache" + +- name: "[{{ level }}-{{ base_folder }}] Get tfstate details" + register: subscription_tfstate_file_name + shell: | + az storage account list \ + --subscription {{ config.caf_terraform.launchpad.subscription_id }} \ + --query "[?tags.caf_tfstate=='{{ config.tfstates.platform.platform_subscriptions.level }}' && tags.caf_environment=='{{ config.caf_terraform.launchpad.caf_environment }}'].{name:name}[0]" -o json | jq -r .name + +- debug: + msg: "{{ subscription_tfstate_file_name.stdout }}" + +- name: "[{{ level }}-{{ base_folder }}] Download tfstate details" + register: platform_subscriptions_tfstate_exists + ignore_errors: true + shell: | + az storage blob download \ + --name "{{ config.tfstates.platform.platform_subscriptions.tfstate }}" \ + --account-name "{{ subscription_tfstate_file_name.stdout }}" \ + --container-name "tfstate" \ + --auth-mode "login" \ + --file "{{ job_cache_base_path }}/{{ config.tfstates.platform.platform_subscriptions.tfstate }}" + +- name: "[{{ level }}-{{ base_folder }}] Get platform_subscriptions details" + shell: "cat {{ job_cache_base_path }}/{{ config.tfstates.platform.platform_subscriptions.tfstate }}" + register: platform_subscriptions + when: platform_subscriptions_tfstate_exists.rc == 0 + +- name: "[{{ level }}-{{ base_folder }}] Get platform_subscriptions json data" + when: platform_subscriptions_tfstate_exists.rc == 0 + set_fact: + platform_sub_jsondata: "{{ platform_subscriptions.stdout | from_json }}" + +- name: "[{{ level }}-{{ base_folder }}] Get subscriptions list" + when: platform_subscriptions_tfstate_exists.rc == 0 + set_fact: + platform_subscriptions_details: "{{ platform_sub_jsondata | json_query(path) }}" + vars: + path: 'outputs.objects.value.{{ config.tfstates.platform.platform_subscriptions.lz_key_name }}.subscriptions' + +- name: "[{{ level }}-{{ base_folder }}] cleanup" + when: platform_subscriptions_tfstate_exists.rc == 0 + file: + path: "{{ job_cache_base_path }}/{{ config.tfstates.platform.platform_subscriptions.tfstate }}" + state: absent + +- debug: + msg: "Platform subscriptions - {{ platform_subscriptions_details }}" + when: platform_subscriptions_tfstate_exists.rc == 0 diff --git a/templates/platform/level1/subscriptions/landingzone.tfvars.j2 b/templates/platform/level1/subscriptions/landingzone.tfvars.j2 new file mode 100644 index 000000000..fae07cba7 --- /dev/null +++ b/templates/platform/level1/subscriptions/landingzone.tfvars.j2 @@ -0,0 +1,12 @@ +landingzone = { + backend_type = "azurerm" + global_settings_key = "{{ config.tfstates.platform.launchpad.lz_key_name }}" + level = "{{ config.tfstates.platform.platform_subscriptions.level }}" + key = "{{ config.tfstates.platform.platform_subscriptions.lz_key_name }}" + tfstates = { + {{ config.tfstates.platform.launchpad.lz_key_name }} = { + level = "lower" + tfstate = "{{ config.tfstates.platform.launchpad.tfstate }}" + } + } +} diff --git a/templates/platform/level1/subscriptions/readme.md b/templates/platform/level1/subscriptions/readme.md new file mode 100644 index 000000000..dc36cd7b9 --- /dev/null +++ b/templates/platform/level1/subscriptions/readme.md @@ -0,0 +1,33 @@ + +### Platform subscriptions +Set-up the subscription delegations for platform and landingzone subscriptions + +```bash +# For manual bootstrap: +# Login to the subscription {{ config.caf_terraform.launchpad.subscription_name }} with the user {{ config.caf_terraform.billing_subscription_role_delegations.azuread_user_ea_account_owner }} +rover login -t {{ config.platform_identity.tenant_name }} -s {{ config.caf_terraform.launchpad.subscription_id }} + +rover \ +{% if platform_subscriptions_details.eslz is defined %} +{% if config.platform_identity.azuread_identity_mode != "logged_in_user" %} + --impersonate-sp-from-keyvault-url {{ keyvaults.cred_subscription_creation_platform.vault_uri }} \ +{% endif %} +{% endif %} + -lz /tf/caf/landingzones/caf_solution \ + -var-folder {{ destination_base }}/{{ config.configuration_folders.platform.destination_relative_path }}/{{ level }}/{{ base_folder }} \ + -tfstate_subscription_id {{ config.caf_terraform.launchpad.subscription_id }} \ + -tfstate {{ config.tfstates.platform.platform_subscriptions.tfstate }} \ + -log-severity {{ config.gitops.rover_log_error }} \ + -env {{ config.caf_terraform.launchpad.caf_environment }} \ + -level {{ level }} \ + -p ${TF_DATA_DIR}/{{ config.tfstates.platform.platform_subscriptions.tfstate }}.tfplan \ + -a plan + +``` + + +# Next steps + +When you have successfully deployed the subscriptions management landing zone, you can move to the next step: + +[Deploy the management services](../../level1/management/readme.md) \ No newline at end of file diff --git a/templates/platform/level1/subscriptions/subscriptions.tfvars.j2 b/templates/platform/level1/subscriptions/subscriptions.tfvars.j2 new file mode 100644 index 000000000..b851adece --- /dev/null +++ b/templates/platform/level1/subscriptions/subscriptions.tfvars.j2 @@ -0,0 +1,38 @@ +# +# Execute the following command to get the billing_account_name and management_group_id +# +# az rest --method get --uri https://management.azure.com/providers/Microsoft.Billing/billingaccounts/?api-version=2020-05-01 +# +# To retrieve the first billing account +# +# billing_account_name=$(az rest --method get --uri https://management.azure.com/providers/Microsoft.Billing/billingaccounts?api-version=2020-05-01 --query "value[?properties.agreementType=='EnterpriseAgreement'].{name:name}" -o tsv) +# +# enrollment_account_name=$(az rest --method get --uri https://management.azure.com/providers/Microsoft.Billing/billingaccounts?api-version=2020-05-01 --query "value[?properties.agreementType=='EnterpriseAgreement'].{name:properties.enrollmentAccounts[0].name}" -o tsv) +# + +subscriptions = { + + {{ config.tfstates.platform.launchpad.lz_key_name }} = { + name = "{{ config.caf_terraform.launchpad.subscription_name }}" + alias = "{{ config.platform_core_setup.enterprise_scale.management_group_prefix }}-launchpad" + subscription_id = "{{ config.caf_terraform.launchpad.subscription_id }}" + } +{% for key in subscriptions.platform_subscriptions.keys() %} + {{ key }} = { + name = "{{ subscriptions.platform_subscriptions[key].name }}" +{% if subscriptions.platform_subscriptions[key].alias is defined %} + alias = "{{ config.platform_core_setup.enterprise_scale.management_group_prefix }}-{{ subscriptions.platform_subscriptions[key].alias }}" +{% endif %} +{% if subscriptions.platform_subscriptions[key].billing_account_name is defined %} + billing_account_name = "{{ config.caf_terraform.billing_subscription_role_delegations.billing_account_name }}" + enrollment_account_name = "{{ config.caf_terraform.billing_subscription_role_delegations.enrollment_account_name }}" + workload = "{{ subscriptions.platform_subscriptions[key].workload | default('DevTest') }}" +{% else %} +{% if subscriptions.platform_subscriptions[key].subscription_id is defined %} + subscription_id = "{{ subscriptions.platform_subscriptions[key].subscription_id }}" +{% endif %} +{% endif %} + } +{% endfor %} + +} \ No newline at end of file diff --git a/templates/platform/level2/ansible_deployment.yaml b/templates/platform/level2/ansible_deployment.yaml new file mode 100644 index 000000000..c9b3967f4 --- /dev/null +++ b/templates/platform/level2/ansible_deployment.yaml @@ -0,0 +1,29 @@ + +- name: "{{display_name}} Load variable for deployments {{deployment}} - {{resource_folder}}" + include_vars: + name: deployments + dir: "{{config_folder}}/deployments/{{deployment}}" + depth: 1 + ignore_unknown_extensions: true + files_matching: "{{ files_matching }}" + +- name: "{{display_name}} Load variable for resources {{deployment}} - {{resource_folder}}" + include_vars: + name: resources + dir: "{{config_folder}}/deployments/{{deployment}}" + depth: 1 + ignore_unknown_extensions: true + files_matching: "{{ files_matching }}" + +- name: "{{display_name}} - Content of resources - {{deployment}}" + debug: + msg: "{{resources}}" + +- name: "{{display_name}} - {{deployment}}" + include_tasks: "{{ level }}/ansible_resource_deployment.yaml" + when: + - config.tfstates.platform[resource_folder] is defined + - resources.deployments.keys is defined + loop: "{{ resources.deployments.keys() }}" + loop_control: + loop_var: subscription_key \ No newline at end of file diff --git a/templates/platform/level2/ansible_resource_deployment.yaml b/templates/platform/level2/ansible_resource_deployment.yaml new file mode 100644 index 000000000..c12ab5523 --- /dev/null +++ b/templates/platform/level2/ansible_resource_deployment.yaml @@ -0,0 +1,31 @@ + +- name: "{{display_name}} - {{level}} - {{subscription_key}} - set destination paths" + set_fact: + destination_path: "{{ destination_base }}/{{ config.configuration_folders.platform.destination_relative_path }}/{{ level }}/{{ base_folder }}/{{resource_folder}}/{{ deployment }}" + +- name: "{{display_name}} - {{level}} - {{subscription_key}} - Clean-up directory - {{ destination_path }}" + file: + path: "{{ destination_path }}" + state: absent + when: config.configuration_folders.asvm.cleanup_destination | default(true) | bool + +- name: "{{display_name}} - {{level}} - {{subscription_key}} - Creates directory - {{ destination_path }}" + file: + path: "{{ destination_path }}" + state: directory + +- name: "{{display_name}} - {{level}} - {{subscription_key}} - {{ deployment }} - Tfvars" + include_tasks: "{{ level }}/ansible_resource_type.yaml" + loop: "{{ resources.subscriptions[subscription_key].keys() }}" + loop_control: + loop_var: resource_type + + +- name: "{{display_name}} - {{level}} - {{subscription_key}} - {{ deployment }} - Overrides" + ansible.builtin.template: + src: "{{ item }}" + dest: "{{ destination_path }}/{{ item | basename | regex_replace('.j2$', '') }}" + force: yes + with_fileglob: + - "{{ level }}/{{ base_folder }}/{{resource_folder}}/*.j2" + - "{{ level }}/{{ base_folder }}/{{resource_folder}}/*.md" diff --git a/templates/platform/level2/ansible_resource_type.yaml b/templates/platform/level2/ansible_resource_type.yaml new file mode 100644 index 000000000..8269d046c --- /dev/null +++ b/templates/platform/level2/ansible_resource_type.yaml @@ -0,0 +1,7 @@ +- name: "{{display_name}} - {{ level }} - {{subscription_key}} - {{ deployment }} - {{ resource_type }}" + ansible.builtin.template: + src: "{{ item }}" + dest: "{{ destination_path }}/{{ item | basename | regex_replace('.j2$', '') }}" + force: yes + with_fileglob: + - "{{ resource_template_folder }}/{{resource_type}}.tfvars.j2" diff --git a/templates/platform/level2/asvm/ansible.yaml b/templates/platform/level2/asvm/ansible.yaml new file mode 100644 index 000000000..2135fd666 --- /dev/null +++ b/templates/platform/level2/asvm/ansible.yaml @@ -0,0 +1,92 @@ +- name: "[{{ level }}-{{ base_folder }}] - Set variables" + set_fact: + destination_path: "{{ destination_base }}/{{ config.configuration_folders.platform.destination_relative_path }}/{{ level }}/{{ base_folder }}" + +- name: "[{{ level }}-{{ base_folder }}] - Load variable for launchpad" + include_vars: + name: resources + dir: "{{config_folder}}" + depth: 1 + ignore_unknown_extensions: true + files_matching: "asvm.yaml" + +- debug: + msg: "{{resources}}" + +- name: "[{{ level }}-{{ base_folder }}] Clean-up directory" + file: + path: "{{ destination_base }}/{{ config.configuration_folders.platform.destination_relative_path }}/{{ level }}/{{ base_folder }}" + state: absent + when: config.configuration_folders.platform.cleanup_destination | bool + +- name: "[{{ level }}-{{ base_folder }}] Creates directory" + file: + path: "{{ destination_base }}/{{ config.configuration_folders.platform.destination_relative_path }}/{{ level }}/{{ base_folder }}" + state: directory + + +- name: "[{{ level }}-{{ base_folder }}] Get level2 tfstate account name" + register: level2_storage_account + shell: | + az storage account list \ + --subscription {{ config.caf_terraform.launchpad.subscription_id }} \ + --query "[?tags.caf_tfstate=='{{ config.tfstates.platform.asvm.level }}' && tags.caf_environment=='{{ config.caf_terraform.launchpad.caf_environment }}'].{name:name}[0]" -o json | jq -r .name + +- debug: + msg: "{{level2_storage_account}}" + + +- name: "[{{ level }}-{{ base_folder }}] Get level2 tfstate account name" + register: level2_storage_rg + shell: | + az storage account list \ + --subscription {{ config.caf_terraform.launchpad.subscription_id }} \ + --query "[?tags.caf_tfstate=='{{ config.tfstates.platform.asvm.level }}' && tags.caf_environment=='{{ config.caf_terraform.launchpad.caf_environment }}'].{resourceGroup:resourceGroup}[0]" -o json | jq -r .resourceGroup + +- debug: + msg: "{{level2_storage_account}}" + + +# +# resource_groups +# +- name: "[{{ level }}-{{ base_folder }}] - resource_groups" + when: + - resources.subscriptions[subscription_key].resource_groups is defined + ansible.builtin.template: + src: "{{ item }}" + dest: "{{ destination_path }}/{{ item | basename | regex_replace('.j2$', '') }}" + force: yes + with_fileglob: + - "{{ resource_template_folder }}/resource_groups.tfvars.j2" + +# +# azuread_groups +# +- name: "[{{ level }}-{{ base_folder }}] - azuread_groups" + when: + - resources.subscriptions[subscription_key].azuread_groups is defined + ansible.builtin.template: + src: "{{ item }}" + dest: "{{ destination_path }}/{{ item | basename | regex_replace('.j2$', '') }}" + force: yes + with_fileglob: + - "{{ resource_template_folder }}/azuread_groups.tfvars.j2" + +- name: "[{{ level }}-{{ base_folder }}] asvm" + ansible.builtin.template: + src: "{{ level }}/{{ base_folder }}/{{ item }}.tfvars.j2" + dest: "{{ destination_base }}/{{ config.configuration_folders.platform.destination_relative_path }}/{{ level }}/{{ base_folder }}/{{ item }}.tfvars" + force: yes + loop: + - dynamic_secrets + - keyvaults + - landingzone + - role_mappings + - storage_accounts + +- name: "[{{ level }}-{{ base_folder }}] launchpad - readme" + ansible.builtin.template: + src: "{{ level }}/{{ base_folder }}/readme.md" + dest: "{{ destination_base }}/{{ config.configuration_folders.platform.destination_relative_path }}/{{ level }}/{{ base_folder }}/readme.md" + force: yes \ No newline at end of file diff --git a/templates/platform/level2/asvm/dynamic_secrets.tfvars.j2 b/templates/platform/level2/asvm/dynamic_secrets.tfvars.j2 new file mode 100644 index 000000000..8ef44fdd7 --- /dev/null +++ b/templates/platform/level2/asvm/dynamic_secrets.tfvars.j2 @@ -0,0 +1,51 @@ + +# Store output attributes into keyvault secret +# Those values are used by the rover to connect the current remote state and +# identity the lower level +dynamic_keyvault_secrets = { + level3 = { + lower_stg = { + secret_name = "lower-storage-account-name" + value = "{{level2_storage_account.stdout}}" + } + lower_rg = { + secret_name = "lower-resource-group-name" + value = "{{level2_storage_rg.stdout}}" + } + subscription_id = { + output_key = "client_config" + attribute_key = "subscription_id" + secret_name = "subscription-id" + } + tenant_id = { + output_key = "client_config" + attribute_key = "tenant_id" + secret_name = "tenant-id" + } + } + level4 = { + lower_stg = { + output_key = "storage_accounts" + resource_key = "level3" + attribute_key = "name" + secret_name = "lower-storage-account-name" + } + lower_rg = { + output_key = "resource_groups" + resource_key = "level3" + attribute_key = "name" + secret_name = "lower-resource-group-name" + } + subscription_id = { + output_key = "client_config" + attribute_key = "subscription_id" + secret_name = "subscription-id" + } + tenant_id = { + output_key = "client_config" + attribute_key = "tenant_id" + secret_name = "tenant-id" + } + } + +} \ No newline at end of file diff --git a/templates/platform/level2/asvm/keyvaults.tfvars.j2 b/templates/platform/level2/asvm/keyvaults.tfvars.j2 new file mode 100644 index 000000000..c73a72a5e --- /dev/null +++ b/templates/platform/level2/asvm/keyvaults.tfvars.j2 @@ -0,0 +1,85 @@ + +keyvaults = { + level3 = { + name = "{{ resources.subscriptions[subscription_key].keyvaults.level3.name }}" + resource_group_key = "{{ resources.subscriptions[subscription_key].keyvaults.level3.resource_group_key }}" + sku_name = "{{ config.platform_core_setup.sku.keyvault}}" + tags = { + tfstate = "level3" + environment = "{{ config.caf_terraform.launchpad.caf_environment }}" + caf_tfstate = "level3" + caf_environment = "{{ config.caf_terraform.launchpad.caf_environment }}" + } + + creation_policies = { + subscription_creation_landingzones = { + object_id = "{{launchpad_azuread_groups.subscription_creation_landingzones.id}}" + secret_permissions = ["Get"] + } + level0 = { + object_id = "{{launchpad_azuread_groups.level0.id}}" + secret_permissions = ["Set", "Get", "List", "Delete", "Purge", "Recover"] + } +{% if config.platform_identity.azuread_identity_mode != 'logged_in_user' %} + caf_platform_maintainers = { + object_id = "{{launchpad_azuread_groups.caf_platform_maintainers.id}}" + secret_permissions = ["Set", "Get", "List", "Delete", "Purge", "Recover"] + } +{% endif %} +{% if config.platform_identity.azuread_identity_mode == 'logged_in_user' %} + logged_in_user = { + # if the key is set to "logged_in_user" add the user running terraform in the keyvault policy + # More examples in /examples/keyvault + secret_permissions = ["Set", "Get", "List", "Delete", "Purge", "Recover"] + } +{% endif %} + } + + } + + level4 = { + name = "{{ resources.subscriptions[subscription_key].keyvaults.level4.name }}" + resource_group_key = "{{ resources.subscriptions[subscription_key].keyvaults.level4.resource_group_key }}" + sku_name = "{{ config.platform_core_setup.sku.keyvault}}" + tags = { + tfstate = "level4" + environment = "{{ config.caf_terraform.launchpad.caf_environment }}" + caf_tfstate = "level4" + caf_environment = "{{ config.caf_terraform.launchpad.caf_environment }}" + } + + creation_policies = { + subscription_creation_landingzones = { + object_id = "{{launchpad_azuread_groups.subscription_creation_landingzones.id}}" + secret_permissions = ["Get"] + } + caf_ac_landingzone_maintainers_non_prod = { + azuread_group_key = "caf_ac_landingzone_maintainers_non_prod" + secret_permissions = ["Get"] + } + caf_ac_landingzone_maintainers_prod = { + azuread_group_key = "caf_ac_landingzone_maintainers_prod" + secret_permissions = ["Get"] + } + level0 = { + object_id = "{{launchpad_azuread_groups.level0.id}}" + secret_permissions = ["Set", "Get", "List", "Delete", "Purge", "Recover"] + } +{% if config.platform_identity.azuread_identity_mode != 'logged_in_user' %} + caf_platform_maintainers = { + object_id = "{{launchpad_azuread_groups.caf_platform_maintainers.id}}" + secret_permissions = ["Set", "Get", "List", "Delete", "Purge", "Recover"] + } +{% endif %} +{% if config.platform_identity.azuread_identity_mode == 'logged_in_user' %} + logged_in_user = { + # if the key is set to "logged_in_user" add the user running terraform in the keyvault policy + # More examples in /examples/keyvault + secret_permissions = ["Set", "Get", "List", "Delete", "Purge", "Recover"] + } +{% endif %} + } + + } + +} diff --git a/templates/platform/level2/asvm/landingzone.tfvars.j2 b/templates/platform/level2/asvm/landingzone.tfvars.j2 new file mode 100644 index 000000000..9c5443ef7 --- /dev/null +++ b/templates/platform/level2/asvm/landingzone.tfvars.j2 @@ -0,0 +1,12 @@ +landingzone = { + backend_type = "{{ caf_terraform.asvm.backend_type | default("azurerm")}}" + global_settings_key = "{{ config.tfstates.platform.management.lz_key_name }}" + level = "{{ config.tfstates.platform.asvm.level }}" + key = "{{ config.tfstates.platform.asvm.lz_key_name }}" + tfstates = { + {{ config.tfstates.platform.management.lz_key_name }} = { + level = "lower" + tfstate = "{{ config.tfstates.platform.management.tfstate }}" + } + } +} \ No newline at end of file diff --git a/templates/platform/level2/asvm/readme.md b/templates/platform/level2/asvm/readme.md new file mode 100644 index 000000000..3e348beca --- /dev/null +++ b/templates/platform/level2/asvm/readme.md @@ -0,0 +1,26 @@ +# Azure Subscription Vending Machine (asvm) + +```bash +# login a with a user member of the caf-platform-maintainers group +rover login -t {{ config.platform_identity.tenant_name }} + +cd {{ destination_base }}/landingzones +git fetch origin +git checkout {{ connectivity_express_routes.gitops.caf_landingzone_branch }} + +rover \ +{% if keyvaults is defined and config.platform_identity.azuread_identity_mode != "logged_in_user" %} + --impersonate-sp-from-keyvault-url {{ keyvaults.cred_level0.vault_uri }} \ +{% endif %} + -lz {{ destination_base }}/landingzones/caf_solution \ + -var-folder {{ destination_base }}/{{ config.configuration_folders.platform.destination_relative_path }}/{{ level }}/{{ base_folder }} \ + -tfstate_subscription_id {{ config.caf_terraform.launchpad.subscription_id }} \ + -target_subscription {{ config.caf_terraform.launchpad.subscription_id }} \ + -tfstate {{ config.tfstates.platform.asvm.tfstate }} \ + -env {{ config.caf_terraform.launchpad.caf_environment }} \ + -level {{ level }} \ + -p ${TF_DATA_DIR}/{{ config.tfstates.platform.asvm.tfstate }}.tfplan \ + -a plan + +``` + diff --git a/templates/platform/level2/asvm/role_mappings.tfvars.j2 b/templates/platform/level2/asvm/role_mappings.tfvars.j2 new file mode 100644 index 000000000..8691e97ca --- /dev/null +++ b/templates/platform/level2/asvm/role_mappings.tfvars.j2 @@ -0,0 +1,83 @@ + +# +# Services supported: subscriptions, storage accounts and resource groups +# Can assign roles to: AD groups, AD object ID, AD applications, Managed identities +# + +role_mapping = { + built_in_role_mapping = { + resource_groups = { + level3 = { + "Reader" = { + object_ids = { + keys = [ + "{{launchpad_azuread_groups.caf_platform_maintainers.id}}", // caf_platform_maintainers + "{{launchpad_azuread_groups.subscription_creation_landingzones.id}}" // subscription_creation_landingzones + ] + } + azuread_groups = { + keys = [ + "caf_ac_landingzone_maintainers_non_prod", + "caf_ac_landingzone_maintainers_prod" + ] + } + } + } + level4 = { + "Reader" = { + object_ids = { + keys = [ + "{{launchpad_azuread_groups.caf_platform_maintainers.id}}", // caf_platform_maintainers + "{{launchpad_azuread_groups.subscription_creation_landingzones.id}}" // subscription_creation_landingzones + ] + } + azuread_groups = { + keys = [ + "caf_ac_landingzone_maintainers_non_prod", + "caf_ac_landingzone_maintainers_prod" + ] + } + } + } + } + + storage_accounts = { + level3 = { + "Storage Blob Data Contributor" = { + object_ids = { + keys = [ + "{{launchpad_azuread_groups.caf_platform_maintainers.id}}", // caf_platform_maintainers + "{{launchpad_azuread_groups.subscription_creation_landingzones.id}}" // subscription_creation_landingzones + ] + } + } + "Owner" = { + object_ids = { + keys = [ + "{{launchpad_azuread_groups.subscription_creation_landingzones.id}}" // subscription_creation_landingzones + ] + } + } + } + + level4 = { + "Storage Blob Data Contributor" = { + object_ids = { + keys = [ + "{{launchpad_azuread_groups.caf_platform_maintainers.id}}", // caf_platform_maintainers + "{{launchpad_azuread_groups.subscription_creation_landingzones.id}}" // subscription_creation_landingzones + ] + } + } + "Owner" = { + object_ids = { + keys = [ + "{{launchpad_azuread_groups.subscription_creation_landingzones.id}}" // subscription_creation_landingzones + ] + } + } + } + + } + } +} diff --git a/templates/platform/level2/asvm/storage_accounts.tfvars.j2 b/templates/platform/level2/asvm/storage_accounts.tfvars.j2 new file mode 100644 index 000000000..11b1bc99b --- /dev/null +++ b/templates/platform/level2/asvm/storage_accounts.tfvars.j2 @@ -0,0 +1,54 @@ + +storage_accounts = { + level3 = { + name = "{{ resources.subscriptions[subscription_key].storage_accounts.level3.name }}" + resource_group_key = "{{ resources.subscriptions[subscription_key].storage_accounts.level3.resource_group_key }}" + account_kind = "BlobStorage" + account_tier = "Standard" + account_replication_type = "{{ config.caf_terraform.launchpad.account_replication_type }}" + tags = { + ## Those tags must never be changed after being set as they are used by the rover to locate the launchpad and the tfstates. + # Only adjust the environment value at creation time + tfstate = "level3" + environment = "{{ config.caf_terraform.launchpad.caf_environment }}" + launchpad = "launchpad" + caf_environment = "{{ config.caf_terraform.launchpad.caf_environment }}" + caf_launchpad = "launchpad" + caf_tfstate = "level3" + ## + } + blob_properties = { + versioning_enabled = {{ config.caf_terraform.launchpad.blob_versioning_enabled | string | lower | default('true') }} + container_delete_retention_policy = {{ config.caf_terraform.launchpad.container_delete_retention_policy | default(7) }} + delete_retention_policy = {{ config.caf_terraform.launchpad.delete_retention_policy | default(7) }} + } + containers = { + {{ config.tfstates.platform.asvm.workspace | default('tfstate') }} = { + name = "{{ config.tfstates.platform.asvm.workspace | default('tfstate') }}" + } + } + } + + level4 = { + name = "{{ resources.subscriptions[subscription_key].storage_accounts.level4.name }}" + resource_group_key = "{{ resources.subscriptions[subscription_key].storage_accounts.level4.resource_group_key }}" + account_kind = "BlobStorage" + account_tier = "Standard" + account_replication_type = "{{ config.caf_terraform.launchpad.account_replication_type }}" + tags = { + # Those tags must never be changed while set as they are used by the rover to locate the launchpad and the tfstates. + tfstate = "level4" + environment = "{{ config.caf_terraform.launchpad.caf_environment }}" + launchpad = "launchpad" + caf_environment = "{{ config.caf_terraform.launchpad.caf_environment }}" + caf_launchpad = "launchpad" + caf_tfstate = "level4" + } + blob_properties = { + versioning_enabled = {{ config.caf_terraform.launchpad.blob_versioning_enabled | string | lower | default('true') }} + container_delete_retention_policy = {{ config.caf_terraform.launchpad.container_delete_retention_policy | default(7) }} + delete_retention_policy = {{ config.caf_terraform.launchpad.delete_retention_policy | default(7) }} + } + } + +} \ No newline at end of file diff --git a/templates/platform/level2/connectivity/ansible.yaml b/templates/platform/level2/connectivity/ansible.yaml new file mode 100644 index 000000000..119700857 --- /dev/null +++ b/templates/platform/level2/connectivity/ansible.yaml @@ -0,0 +1,86 @@ +- name: Creates {{ base_folder }} directory structure + shell: mkdir -p "{{ destination_base }}/{{ config.configuration_folders.platform.destination_relative_path }}/{{ level }}/{{ base_folder }}" + +# - name: "{{ base_folder }} - Readme" +# ansible.builtin.template: +# src: "{{ item }}" +# dest: "{{ destination_base }}/{{ config.configuration_folders.platform.destination_relative_path }}/{{ level }}/{{ base_folder }}/{{ item | basename | regex_replace('.j2$', '') }}" +# force: yes +# with_fileglob: +# - "{{ level }}/{{ base_folder }}/*.md" + +- name: "{{ base_folder }} - Virtual WAN" + include_tasks: "{{ level }}/{{ base_folder }}/{{ folder_name }}/ansible.yaml" + loop: + - virtual_wan + loop_control: + loop_var: folder_name + +- name: Virtual Hubs + include_tasks: "{{ level }}/{{ base_folder }}/virtual_hub/ansible.yaml" + when: + - connectivity_virtual_hub.virtual_hubs is defined + loop: "{{ config.tfstates.platform.virtual_hubs.keys() }}" + loop_control: + loop_var: virtual_hub + +- name: VPN Sites + include_tasks: "{{ level }}/{{ base_folder }}/vpn_site/ansible.yaml" + when: + - connectivity_vpn_sites.vpn_sites is defined + loop: "{{ config.tfstates.platform.vpn_sites.keys() }}" + loop_control: + loop_var: site + +- name: Express Route Circuit + include_tasks: "{{ level }}/{{ base_folder }}/express_route_circuit/ansible.yaml" + when: + - connectivity_express_routes.express_route_circuits is defined + loop: "{{ config.tfstates.platform.express_route_circuits.keys() }}" + loop_control: + loop_var: circuit + +- name: Express Route Circuit Peerings + include_tasks: "{{ level }}/{{ base_folder }}/express_route_circuit_peering/ansible.yaml" + when: + - connectivity_express_routes.express_route_circuits is defined + - connectivity_express_route_peerings.express_route_circuit_peerings is defined + loop: "{{ config.tfstates.platform.express_route_circuit_peerings.keys() }}" + loop_control: + loop_var: circuit + +- name: Private DNS Zones + include_tasks: "{{ level }}/ansible_deployment.yaml" + when: + - config.tfstates.platform.private_dns is defined + loop: "{{ config.tfstates.platform.private_dns.keys() }}" + loop_control: + loop_var: deployment + vars: + files_matching: "connectivity_private_dns.yaml|connectivity_private_dns.caf.yaml" + resource_folder: private_dns + display_name: Private DNS Zones + +- name: Firewall Policies + include_tasks: "{{ level }}/ansible_deployment.yaml" + when: + - config.tfstates.platform.azurerm_firewall_policies is defined + loop: "{{ config.tfstates.platform.azurerm_firewall_policies.keys() }}" + loop_control: + loop_var: deployment + vars: + files_matching: "connectivity_firewall_policies.yaml|connectivity_firewall_policies.caf.yaml" + resource_folder: azurerm_firewall_policies + display_name: Firewall Policies + +- name: Azure Firewalls + include_tasks: "{{ level }}/ansible_deployment.yaml" + when: + - config.tfstates.platform.azurerm_firewalls is defined + loop: "{{ config.tfstates.platform.azurerm_firewalls.keys() }}" + loop_control: + loop_var: deployment + vars: + files_matching: "connectivity_firewalls.yaml|connectivity_firewalls.caf.yaml" + resource_folder: azurerm_firewalls + display_name: Azure Firewalls \ No newline at end of file diff --git a/templates/platform/level2/connectivity/azurerm_firewall_policies/landingzone.tfvars.j2 b/templates/platform/level2/connectivity/azurerm_firewall_policies/landingzone.tfvars.j2 new file mode 100644 index 000000000..6b633635b --- /dev/null +++ b/templates/platform/level2/connectivity/azurerm_firewall_policies/landingzone.tfvars.j2 @@ -0,0 +1,12 @@ +landingzone = { + backend_type = "azurerm" + global_settings_key = "{{ config.tfstates.platform.management.lz_key_name }}" + level = "level2" + key = "{{ config.tfstates.platform.azurerm_firewall_policies[deployment].lz_key_name }}" + tfstates = { + {{ config.tfstates.platform.management.lz_key_name }} = { + level = "lower" + tfstate = "{{ config.tfstates.platform.management.tfstate }}" + } + } +} \ No newline at end of file diff --git a/templates/platform/level2/connectivity/azurerm_firewall_policies/readme.md b/templates/platform/level2/connectivity/azurerm_firewall_policies/readme.md new file mode 100644 index 000000000..bf4d265a2 --- /dev/null +++ b/templates/platform/level2/connectivity/azurerm_firewall_policies/readme.md @@ -0,0 +1,36 @@ + +# Firewall Policy + +## Select the correct branch for the landingzones code + +Note you need to adjust the branch {{ resources.gitops.landingzones }} to deploy the connectivity services + +## {{ environment }} + +```bash +# login a with a user member of the caf-platform-maintainers group +rover login -t {{ config.platform_identity.tenant_name }} + +cd {{ destination_base }}/landingzones +git fetch origin +git checkout {{ resources.gitops.landingzones }} + +rover \ +{% if keyvaults is defined and config.platform_identity.azuread_identity_mode != "logged_in_user" %} + --impersonate-sp-from-keyvault-url {{ keyvaults.cred_connectivity.vault_uri }} \ +{% endif %} + -lz {{ destination_base }}/landingzones/caf_solution \ + -var-folder {{ destination_path }} \ + -tfstate_subscription_id {{ config.caf_terraform.launchpad.subscription_id }} \ +{% if platform_subscriptions_details is defined %} + -target_subscription {{ platform_subscriptions_details.connectivity.subscription_id }} \ +{% else %} + -target_subscription {{ config.caf_terraform.launchpad.subscription_id }} \ +{% endif %} + -tfstate {{ config.tfstates.platform.azurerm_firewall_policies[deployment].tfstate }} \ + -env {{ config.caf_terraform.launchpad.caf_environment }} \ + -level {{ level }} \ + -p ${TF_DATA_DIR}/{{ config.tfstates.platform.azurerm_firewall_policies[deployment].tfstate }}.tfplan \ + -a plan + +``` diff --git a/templates/platform/level2/connectivity/azurerm_firewall_policies/rule_collection_groups/rule_collection_groups_aks.tfvars.j2 b/templates/platform/level2/connectivity/azurerm_firewall_policies/rule_collection_groups/rule_collection_groups_aks.tfvars.j2 new file mode 100644 index 000000000..393cf3264 --- /dev/null +++ b/templates/platform/level2/connectivity/azurerm_firewall_policies/rule_collection_groups/rule_collection_groups_aks.tfvars.j2 @@ -0,0 +1,226 @@ +# Updated list https://docs.microsoft.com/en-us/azure/aks/limit-egress-traffic#required-outbound-network-rules-and-fqdns-for-aks-clusters + +azurerm_firewall_policy_rule_collection_groups = { + aks_egress = { + firewall_policy = { + lz_key = "caf_networking_firewall" + key = "{{ firewall_policy }}" + } + name = "firewall-policy-aks-egress" + priority = 600 + + application_rule_collections = { + aks = { + name = "aks_application" + priority = 10000 + action = "Allow" + rules = { + aks = { + name = "aks" + source_addresses = ["*"] + destination_fqdn_tags = [ + "AzureKubernetesService", + ] + destination_fqdns = [ + "*.azmk8s.io", + ] + protocols = { + https = { + port = "443" + type = "Https" + } + } + } + monitoring = { + name = "azure-monitor" + source_addresses = ["*"] + destination_fqdns = [ + "dc.services.visualstudio.com", + "*.ods.opinsights.azure.com", + "*.oms.opinsights.azure.com", + "*.monitoring.azure.com", + ] + protocols = { + https = { + port = "443" + type = "Https" + } + } + } + policy = { + name = "azure-policy" + source_addresses = ["*"] + destination_fqdns = [ + "data.policy.core.windows.net", + "store.policy.core.windows.net", + "dc.services.visualstudio.com", + ] + protocols = { + https = { + port = "443" + type = "Https" + } + } + } + } + } + packages = { + name = "packages" + priority = 11000 + action = "Allow" + rules = { + nvidia = { + name = "nvidia-gpu" + source_addresses = ["*"] + destination_fqdns = [ + "nvidia.github.io", + "us.download.nvidia.com", + "apt.dockerproject.org", + ] + protocols = { + https = { + port = "443" + type = "Https" + } + } + } + ubuntu = { + name = "ubuntu" + source_addresses = ["*"] + destination_fqdns = [ + "security.ubuntu.com", + "azure.archive.ubuntu.com", + "archive.ubuntu.com", + "changelogs.ubuntu.com", + ] + protocols = { + https = { + port = "443" + type = "Https" + } + http = { + port = "80" + type = "Http" + } + } + }, + docker = { + name = "docker" + source_addresses = ["*"] + destination_fqdns = [ + "download.docker.com", # Docker + "*.docker.io", # Docker images + "*.docker.com" # Docker registry + ] + protocols = { + http = { + port = "443" + type = "Https" + } + } + }, + tools = { + name = "tools" + source_addresses = ["*"] + destination_fqdns = [ + "acs-mirror.azureedge.net", + "packages.microsoft.com", + "azurecliprod.blob.core.windows.net", # Azure cli + "packages.cloud.google.com", # kubectl + "apt.kubernetes.io", # Ubuntu packages for kubectl + "*.snapcraft.io", # snap to install kubectl + ] + protocols = { + http = { + port = "443" + type = "Https" + } + } + }, + github = { + name = "github" + source_addresses = ["*"] + destination_fqdns = [ + "api.github.com", + "github.com", + "github-production-release-asset-2e65be.s3.amazonaws.com", + ] + protocols = { + http = { + port = "443" + type = "Https" + } + } + }, + management = { + name = "management" + source_addresses = ["*"] + destination_fqdns = [ + "login.microsoftonline.com", + "management.azure.com", + "*.mcr.microsoft.com", + "*.data.mcr.microsoft.com", + ] + protocols = { + http = { + port = "443" + type = "Https" + } + } + } + } // rules + } // packages + } // application_rule_collections + + network_rule_collections = { + aks = { + name = "aks_network" + action = "Allow" + priority = 1000 + rules = { + ntp = { + name = "ntp" + source_addresses = ["*"] + destination_ports = [ + "123", + ] + destination_addresses = [ + "*" + ] + protocols = [ + "UDP", + ] + }, + DNS = { + name = "DNS" + source_addresses = ["*"] + destination_ports = [ + "53", + ] + destination_addresses = [ + "*" + ] + protocols = [ + "UDP", + ] + }, + monitor = { + name = "monitor" + source_addresses = ["*"] + destination_ports = [ + "443", + ] + destination_addresses = [ + "AzureMonitor" + ] + protocols = [ + "TCP", + ] + } + } + } + } // network_rule_collection + + } + +} \ No newline at end of file diff --git a/templates/platform/level2/connectivity/azurerm_firewall_policies/rule_collection_groups/rule_collection_groups_root.tfvars.j2 b/templates/platform/level2/connectivity/azurerm_firewall_policies/rule_collection_groups/rule_collection_groups_root.tfvars.j2 new file mode 100644 index 000000000..26233c1d0 --- /dev/null +++ b/templates/platform/level2/connectivity/azurerm_firewall_policies/rule_collection_groups/rule_collection_groups_root.tfvars.j2 @@ -0,0 +1,128 @@ +# Updated list https://docs.microsoft.com/en-us/azure/aks/limit-egress-traffic#required-outbound-network-rules-and-fqdns-for-aks-clusters + +azurerm_firewall_policy_rule_collection_groups = { + root = { + firewall_policy = { + key = "{{ firewall_policy }}" + } + name = "firewall-policy-root-egress" + priority = 500 + + application_rule_collections = { + egress_443 = { + name = "egress 443" + priority = 10000 + action = "Allow" + rules = { + 443 = { + name = "443" + source_addresses = ["*"] + destination_fqdns = ["*"] + protocols = { + https = { + port = "443" + type = "Https" + } + } + } + monitoring = { + name = "azure-monitor" + source_addresses = ["*"] + destination_fqdns = [ + "dc.services.visualstudio.com", + "*.ods.opinsights.azure.com", + "*.oms.opinsights.azure.com", + "*.monitoring.azure.com", + ] + protocols = { + https = { + port = "443" + type = "Https" + } + } + } + policy = { + name = "azure-policy" + source_addresses = ["*"] + destination_fqdns = [ + "data.policy.core.windows.net", + "store.policy.core.windows.net", + "dc.services.visualstudio.com", + ] + protocols = { + https = { + port = "443" + type = "Https" + } + } + } + } + } + security = { + name = "security packages" + priority = 11000 + action = "Allow" + rules = { + ubuntu = { + name = "ubuntu" + source_addresses = ["*"] + destination_fqdns = [ + "security.ubuntu.com", + "azure.archive.ubuntu.com", + "archive.ubuntu.com", + "changelogs.ubuntu.com", + ] + protocols = { + https = { + port = "443" + type = "Https" + } + http = { + port = "80" + type = "Http" + } + } + } + } // rules + } // packages + } // application_rule_collections + + network_rule_collections = { + services = { + name = "services" + action = "Allow" + priority = 1000 + rules = { + ntp = { + name = "ntp" + source_addresses = ["*"] + destination_ports = [ + "123", + ] + destination_addresses = [ + "*" + ] + protocols = [ + "UDP", + ] + }, + DNS = { + name = "DNS" + source_addresses = ["*"] + destination_ports = [ + "53", + ] + destination_addresses = [ + "*" + ] + protocols = [ + "UDP", + ] + } + } + } + } // network_rule_collection + + } + +} \ No newline at end of file diff --git a/templates/platform/level2/connectivity/azurerm_firewalls/landingzone.tfvars.j2 b/templates/platform/level2/connectivity/azurerm_firewalls/landingzone.tfvars.j2 new file mode 100644 index 000000000..6568d80d4 --- /dev/null +++ b/templates/platform/level2/connectivity/azurerm_firewalls/landingzone.tfvars.j2 @@ -0,0 +1,18 @@ +landingzone = { + backend_type = "azurerm" + global_settings_key = "{{ config.tfstates.platform.virtual_hubs[deployment].lz_key_name }}" + level = "{{ config.tfstates.platform.azurerm_firewalls[deployment].level }}" + key = "{{ config.tfstates.platform.azurerm_firewalls[deployment].lz_key_name }}" + tfstates = { + # Virtual Hub + {{ config.tfstates.platform.virtual_hubs[deployment].lz_key_name }} = { + level = "current" + tfstate = "{{ config.tfstates.platform.virtual_hubs[deployment].tfstate }}" + } + # firewall policies + {{ config.tfstates.platform.azurerm_firewall_policies[deployment].lz_key_name }} = { + level = "current" + tfstate = "{{ config.tfstates.platform.azurerm_firewall_policies[deployment].tfstate }}" + } + } +} diff --git a/templates/platform/level2/connectivity/azurerm_firewalls/readme.md b/templates/platform/level2/connectivity/azurerm_firewalls/readme.md new file mode 100644 index 000000000..ebf4e3f3e --- /dev/null +++ b/templates/platform/level2/connectivity/azurerm_firewalls/readme.md @@ -0,0 +1,37 @@ + +# Azure Firewalls + +## Select the correct branch for the landingzones code + +Note you need to adjust the branch {{ resources.gitops.landingzones }} to deploy the connectivity services + +## {{ environment }} + +```bash +# login a with a user member of the caf-platform-maintainers group +rover login -t {{ config.platform_identity.tenant_name }} + +cd {{ destination_base }}/landingzones +git fetch origin +git checkout {{ resources.gitops.landingzones }} + +rover \ +{% if keyvaults is defined and config.platform_identity.azuread_identity_mode != "logged_in_user" %} + --impersonate-sp-from-keyvault-url {{ keyvaults.cred_connectivity.vault_uri }} \ +{% endif %} + -lz {{ destination_base }}/landingzones/caf_solution \ + -var-folder {{ destination_path }} \ + -tfstate_subscription_id {{ config.caf_terraform.launchpad.subscription_id }} \ +{% if platform_subscriptions_details is defined %} + -target_subscription {{ platform_subscriptions_details.connectivity.subscription_id }} \ +{% else %} + -target_subscription {{ config.caf_terraform.launchpad.subscription_id }} \ +{% endif %} + -tfstate {{ config.tfstates.platform.azurerm_firewalls[deployment].tfstate }} \ + -env {{ config.caf_terraform.launchpad.caf_environment }} \ + -level {{ level }} \ + -p ${TF_DATA_DIR}/{{ config.tfstates.platform.azurerm_firewalls[deployment].tfstate }}.tfplan \ + -a plan + +``` + diff --git a/templates/platform/level2/connectivity/express_route_circuit/ansible.yaml b/templates/platform/level2/connectivity/express_route_circuit/ansible.yaml new file mode 100644 index 000000000..10950aaed --- /dev/null +++ b/templates/platform/level2/connectivity/express_route_circuit/ansible.yaml @@ -0,0 +1,75 @@ +- name: Express_routes {{ circuit }} - Clean-up directory + file: + path: "{{ destination_base }}/{{ config.configuration_folders.platform.destination_relative_path }}/{{ level }}/{{ base_folder }}/express_route_circuit/{{ circuit }}" + state: absent + when: + - config.configuration_folders.platform.cleanup_destination | bool + +- name: Express_routes {{ circuit }} - Creates directory structure + file: + path: "{{ destination_base }}/{{ config.configuration_folders.platform.destination_relative_path }}/{{ level }}/{{ base_folder }}/express_route_circuit/{{ circuit }}" + state: directory + +- name: Express_routes {{ circuit }} - variables + set_fact: + destination_path_resources: "{{ destination_base }}/{{ config.configuration_folders.platform.destination_relative_path }}/{{ level }}/{{ base_folder }}/express_route_circuit/{{ circuit }}" + +- name: Express_routes {{ circuit }} - readme + ansible.builtin.template: + src: "{{ item }}" + dest: "{{ destination_base }}/{{ config.configuration_folders.platform.destination_relative_path }}/{{ level }}/{{ base_folder }}/express_route_circuit/{{ circuit }}/{{ item | basename | regex_replace('.j2$', '') }}" + force: yes + with_fileglob: + - "{{ level }}/{{ base_folder }}/express_route_circuit/*.md" + +# +# landingzone +# +- name: "[{{ level }}-{{ circuit }}] - express route - landingzone" + when: + - connectivity_express_routes.express_route_circuits is defined + ansible.builtin.template: + src: "{{ item }}" + dest: "{{ destination_path_resources }}/{{ item | basename | regex_replace('.j2$', '') }}" + force: yes + with_fileglob: + - "{{ level }}/{{ base_folder }}/express_route_circuit/landingzone.tfvars.j2" + +# +# resource_groups +# +- name: "[{{ level }}-{{ circuit }}] - express route - resource_groups" + when: + - connectivity_express_routes.resource_groups is defined + ansible.builtin.template: + src: "{{ item }}" + dest: "{{ destination_path_resources }}/{{ item | basename | regex_replace('.j2$', '') }}" + force: yes + with_fileglob: + - "{{ level }}/{{ base_folder }}/express_route_circuit/resource_groups.tfvars.j2" + +# +# express_route_circuits +# +- name: "[{{ level }}-{{ circuit }}] - express route - express_route_circuits" + when: + - connectivity_express_routes.express_route_circuits is defined + ansible.builtin.template: + src: "{{ item }}" + dest: "{{ destination_path_resources }}/{{ item | basename | regex_replace('.j2$', '') }}" + force: yes + with_fileglob: + - "{{ level }}/{{ base_folder }}/express_route_circuit/express_route_circuits.tfvars.j2" + +# +# express_route_circuit_authorizations +# +- name: "[{{ level }}-{{ circuit }}] - express route - express_route_circuit_authorizations" + when: + - connectivity_express_routes.express_route_circuit_authorizations is defined + ansible.builtin.template: + src: "{{ item }}" + dest: "{{ destination_path_resources }}/{{ item | basename | regex_replace('.j2$', '') }}" + force: yes + with_fileglob: + - "{{ level }}/{{ base_folder }}/express_route_circuit/express_route_circuit_authorizations.tfvars.j2" diff --git a/templates/platform/level2/connectivity/express_route_circuit/express_route_circuit_authorizations.tfvars.j2 b/templates/platform/level2/connectivity/express_route_circuit/express_route_circuit_authorizations.tfvars.j2 new file mode 100644 index 000000000..9c696e921 --- /dev/null +++ b/templates/platform/level2/connectivity/express_route_circuit/express_route_circuit_authorizations.tfvars.j2 @@ -0,0 +1,9 @@ +express_route_circuit_authorizations = { +{% for key, value in connectivity_express_routes.express_route_circuit_authorizations[circuit].items() %} + {{ key }} = { + name = "{{ value.name }}" + resource_group_key = "{{ value.resource_group_key }}" + express_route_key = "{{ circuit }}" + } +{% endfor %} +} \ No newline at end of file diff --git a/templates/platform/level2/connectivity/express_route_circuit/express_route_circuits.tfvars.j2 b/templates/platform/level2/connectivity/express_route_circuit/express_route_circuits.tfvars.j2 new file mode 100644 index 000000000..274b28614 --- /dev/null +++ b/templates/platform/level2/connectivity/express_route_circuit/express_route_circuits.tfvars.j2 @@ -0,0 +1,18 @@ +express_route_circuits = { + {{ circuit }} = { + name = "{{ connectivity_express_routes.express_route_circuits[circuit].name }}" + resource_group_key = "{{ connectivity_express_routes.express_route_circuits[circuit].resource_group_key }}" + service_provider_name = "{{ connectivity_express_routes.express_route_circuits[circuit].service_provider_name }}" + peering_location = "{{ connectivity_express_routes.express_route_circuits[circuit].peering_location }}" + tier = "{{ connectivity_express_routes.express_route_circuits[circuit].tier }}" + family = "{{ connectivity_express_routes.express_route_circuits[circuit].family }}" + bandwidth_in_mbps = {{ connectivity_express_routes.express_route_circuits[circuit].bandwidth_in_mbps }} +{% if connectivity_express_routes.express_route_circuits[circuit].tags is defined %} + tags = { +{% for k_tag, tag in connectivity_express_routes.express_route_circuits[circuit].tags.items() %} + "{{ k_tag }}" = "{{ tag }}" + } +{% endfor %} +{% endif %} + } +} \ No newline at end of file diff --git a/templates/platform/level2/connectivity/express_route_circuit/landingzone.tfvars.j2 b/templates/platform/level2/connectivity/express_route_circuit/landingzone.tfvars.j2 new file mode 100644 index 000000000..b5d785b79 --- /dev/null +++ b/templates/platform/level2/connectivity/express_route_circuit/landingzone.tfvars.j2 @@ -0,0 +1,12 @@ +landingzone = { + backend_type = "azurerm" + global_settings_key = "{{ config.tfstates.platform.management.lz_key_name }}" + level = "{{ config.tfstates.platform.express_route_circuits[circuit].level }}" + key = "{{ config.tfstates.platform.express_route_circuits[circuit].lz_key_name }}" + tfstates = { + {{ config.tfstates.platform.management.lz_key_name }} = { + level = "lower" + tfstate = "{{ config.tfstates.platform.management.tfstate }}" + } + } +} diff --git a/templates/platform/level2/connectivity/express_route_circuit/readme.md b/templates/platform/level2/connectivity/express_route_circuit/readme.md new file mode 100644 index 000000000..a8c1fd5cf --- /dev/null +++ b/templates/platform/level2/connectivity/express_route_circuit/readme.md @@ -0,0 +1,37 @@ + +# Express Route + +## Select the correct branch for the landingzones code + +Note you need to adjust the branch {{ connectivity_express_routes.gitops.caf_landingzone_branch }} to deploy the connectivity services + +## {{ connectivity_express_routes.express_route_circuits[circuit].name }} + +```bash +# login a with a user member of the caf-platform-maintainers group +rover login -t {{ config.platform_identity.tenant_name }} + +cd {{ destination_base }}/landingzones +git fetch origin +git checkout {{ connectivity_express_routes.gitops.caf_landingzone_branch }} + +rover \ +{% if keyvaults is defined and config.platform_identity.azuread_identity_mode != "logged_in_user" %} + --impersonate-sp-from-keyvault-url {{ keyvaults.cred_connectivity.vault_uri }} \ +{% endif %} + -lz {{ destination_base }}/landingzones/caf_solution \ + -var-folder {{ destination_base }}/{{ config.configuration_folders.platform.destination_relative_path }}/{{ level }}/{{ base_folder }}/express_route_circuit/{{ circuit }} \ + -tfstate_subscription_id {{ config.caf_terraform.launchpad.subscription_id }} \ +{% if platform_subscriptions_details is defined %} + -target_subscription {{ platform_subscriptions_details.connectivity.subscription_id }} \ +{% else %} + -target_subscription {{ config.caf_terraform.launchpad.subscription_id }} \ +{% endif %} + -tfstate {{ config.tfstates.platform.express_route_circuits[circuit].tfstate }} \ + -env {{ config.caf_terraform.launchpad.caf_environment }} \ + -level {{ level }} \ + -p ${TF_DATA_DIR}/{{ config.tfstates.platform.express_route_circuits[circuit].tfstate }}.tfplan \ + -a plan + +``` + diff --git a/templates/platform/level2/connectivity/express_route_circuit/resource_groups.tfvars.j2 b/templates/platform/level2/connectivity/express_route_circuit/resource_groups.tfvars.j2 new file mode 100644 index 000000000..c5ea12808 --- /dev/null +++ b/templates/platform/level2/connectivity/express_route_circuit/resource_groups.tfvars.j2 @@ -0,0 +1,6 @@ +resource_groups = { + {{ circuit }} = { + name = "{{ connectivity_express_routes.resource_groups[circuit].name }}" + region = "{{ connectivity_express_routes.resource_groups[circuit].region_key }}" + } +} \ No newline at end of file diff --git a/templates/platform/level2/connectivity/express_route_circuit_peering/ansible.yaml b/templates/platform/level2/connectivity/express_route_circuit_peering/ansible.yaml new file mode 100644 index 000000000..faf21e149 --- /dev/null +++ b/templates/platform/level2/connectivity/express_route_circuit_peering/ansible.yaml @@ -0,0 +1,45 @@ +- name: "[{{ level }}-{{ circuit }}] - express route peering - Clean-up directory" + file: + path: "{{ destination_base }}/{{ config.configuration_folders.platform.destination_relative_path }}/{{ level }}/{{ base_folder }}/express_route_circuit_peering/{{ circuit }}" + state: absent + when: + - config.configuration_folders.platform.cleanup_destination | bool + +- name: "[{{ level }}-{{ circuit }}] - express route peering - Creates directory structure" + file: + path: "{{ destination_base }}/{{ config.configuration_folders.platform.destination_relative_path }}/{{ level }}/{{ base_folder }}/express_route_circuit_peering/{{ circuit }}" + state: directory + +- name: "[{{ level }}-{{ circuit }}] - express route peering - variables" + set_fact: + destination_path_resources: "{{ destination_base }}/{{ config.configuration_folders.platform.destination_relative_path }}/{{ level }}/{{ base_folder }}/express_route_circuit_peering/{{ circuit }}" + +- name: "[{{ level }}-{{ circuit }}] - express route peering - readme" + ansible.builtin.template: + src: "{{ item }}" + dest: "{{ destination_base }}/{{ config.configuration_folders.platform.destination_relative_path }}/{{ level }}/{{ base_folder }}/express_route_circuit_peering/{{ circuit }}/{{ item | basename | regex_replace('.j2$', '') }}" + force: yes + with_fileglob: + - "{{ level }}/{{ base_folder }}/express_route_circuit_peering/*.md" + +# +# landingzone +# +- name: "[{{ level }}-{{ circuit }}] - express route peering - landingzone" + ansible.builtin.template: + src: "{{ item }}" + dest: "{{ destination_path_resources }}/{{ item | basename | regex_replace('.j2$', '') }}" + force: yes + with_fileglob: + - "{{ level }}/{{ base_folder }}/express_route_circuit_peering/landingzone.tfvars.j2" + +# +# express_route_circuit_peerings +# +- name: "[{{ level }}-{{ circuit }}] - express route peering - express_route_circuit_peerings" + ansible.builtin.template: + src: "{{ item }}" + dest: "{{ destination_path_resources }}/{{ item | basename | regex_replace('.j2$', '') }}" + force: yes + with_fileglob: + - "{{ level }}/{{ base_folder }}/express_route_circuit_peering/express_route_circuit_peerings.tfvars.j2" diff --git a/templates/platform/level2/connectivity/express_route_circuit_peering/express_route_circuit_peerings.tfvars.j2 b/templates/platform/level2/connectivity/express_route_circuit_peering/express_route_circuit_peerings.tfvars.j2 new file mode 100644 index 000000000..8b9b36de9 --- /dev/null +++ b/templates/platform/level2/connectivity/express_route_circuit_peering/express_route_circuit_peerings.tfvars.j2 @@ -0,0 +1,17 @@ +express_route_circuit_peerings = { +{% for key, value in connectivity_express_route_peerings.express_route_circuit_peerings[circuit].items() %} + {{ key }} = { + express_route = { + lz_key = "{{ value.express_route.lz_key }}" + key = "{{ value.express_route.key }}" + } + peering_type = "{{ value.peering_type }}" + primary_peer_address_prefix = "{{ value.primary_peer_address_prefix }}" + secondary_peer_address_prefix = "{{ value.secondary_peer_address_prefix }}" + vlan_id = "{{ value.vlan_id }}" +{% if value.peer_asn is defined %} + peer_asn = "{{ value.peer_asn }}" +{% endif %} + } +{% endfor %} +} \ No newline at end of file diff --git a/templates/platform/level2/connectivity/express_route_circuit_peering/landingzone.tfvars.j2 b/templates/platform/level2/connectivity/express_route_circuit_peering/landingzone.tfvars.j2 new file mode 100644 index 000000000..a6e040839 --- /dev/null +++ b/templates/platform/level2/connectivity/express_route_circuit_peering/landingzone.tfvars.j2 @@ -0,0 +1,12 @@ +landingzone = { + backend_type = "azurerm" + global_settings_key = "{{ config.tfstates.platform.express_route_circuits[circuit].lz_key_name }}" + level = "{{ config.tfstates.platform.express_route_circuit_peerings[circuit].level }}" + key = "{{ config.tfstates.platform.express_route_circuit_peerings[circuit].lz_key_name }}" + tfstates = { + {{ config.tfstates.platform.express_route_circuits[circuit].lz_key_name }} = { + level = "current" + tfstate = "{{ config.tfstates.platform.express_route_circuits[circuit].tfstate }}" + } + } +} diff --git a/templates/platform/level2/connectivity/express_route_circuit_peering/readme.md b/templates/platform/level2/connectivity/express_route_circuit_peering/readme.md new file mode 100644 index 000000000..0d661fa44 --- /dev/null +++ b/templates/platform/level2/connectivity/express_route_circuit_peering/readme.md @@ -0,0 +1,31 @@ + +# Express Route Circuit Peerings for {{ circuit }} + +```bash +# login a with a user member of the caf-platform-maintainers group +rover login -t {{ config.platform_identity.tenant_name }} + +cd {{ destination_base }}/landingzones +git fetch origin +git checkout {{ connectivity_express_routes.gitops.caf_landingzone_branch }} + +rover \ +{% if keyvaults is defined and config.platform_identity.azuread_identity_mode != "logged_in_user" %} + --impersonate-sp-from-keyvault-url {{ keyvaults.cred_connectivity.vault_uri }} \ +{% endif %} + -lz {{ destination_base }}/landingzones/caf_solution \ + -var-folder {{ destination_base }}/{{ config.configuration_folders.platform.destination_relative_path }}/{{ level }}/{{ base_folder }}/express_route_circuit_peering/{{ circuit }} \ + -tfstate_subscription_id {{ config.caf_terraform.launchpad.subscription_id }} \ +{% if platform_subscriptions_details is defined %} + -target_subscription {{ platform_subscriptions_details.connectivity.subscription_id }} \ +{% else %} + -target_subscription {{ config.caf_terraform.launchpad.subscription_id }} \ +{% endif %} + -tfstate {{ config.tfstates.platform.express_route_circuit_peerings[circuit].tfstate }} \ + -env {{ config.caf_terraform.launchpad.caf_environment }} \ + -level {{ level }} \ + -p ${TF_DATA_DIR}/{{ config.tfstates.platform.express_route_circuit_peerings[circuit].tfstate }}.tfplan \ + -a plan + +``` + diff --git a/templates/platform/level2/connectivity/private_dns/landingzone.tfvars.j2 b/templates/platform/level2/connectivity/private_dns/landingzone.tfvars.j2 new file mode 100644 index 000000000..cf533e078 --- /dev/null +++ b/templates/platform/level2/connectivity/private_dns/landingzone.tfvars.j2 @@ -0,0 +1,20 @@ +landingzone = { + backend_type = "azurerm" + global_settings_key = "{{ config.tfstates.platform.virtual_wan.lz_key_name }}" + level = "{{ config.tfstates.platform.private_dns[deployment].level }}" + key = "{{ config.tfstates.platform.private_dns[deployment].lz_key_name }}" + tfstates = { +{% if config.tfstates.platform.azurerm_firewalls is defined %} + # Firewall + {{ config.tfstates.platform.azurerm_firewalls[deployment].lz_key_name }} = { + level = "current" + tfstate = "{{ config.tfstates.platform.azurerm_firewalls[deployment].tfstate }}" + } +{% endif %} + # Identity Level2 + {{ config.tfstates.platform.identity_level2[deployment].lz_key_name }} = { + level = "current" + tfstate = "{{ config.tfstates.platform.identity_level2[deployment].tfstate }}" + } + } +} diff --git a/templates/platform/level2/connectivity/private_dns/readme.md b/templates/platform/level2/connectivity/private_dns/readme.md new file mode 100644 index 000000000..4af38984f --- /dev/null +++ b/templates/platform/level2/connectivity/private_dns/readme.md @@ -0,0 +1,37 @@ + +# Private DNS Zones + +## Select the correct branch for the landingzones code + +Note you need to adjust the branch {{ resources.gitops.landingzones }} to deploy the connectivity services + +## {{ environment }} + +```bash +# login a with a user member of the caf-platform-maintainers group +rover login -t {{ config.platform_identity.tenant_name }} + +cd {{ destination_base }}/landingzones +git fetch origin +git checkout {{ resources.gitops.landingzones }} + +rover \ +{% if keyvaults is defined and config.platform_identity.azuread_identity_mode != "logged_in_user" %} + --impersonate-sp-from-keyvault-url {{ keyvaults.cred_connectivity.vault_uri }} \ +{% endif %} + -lz {{ destination_base }}/landingzones/caf_solution \ + -var-folder {{ destination_path }} \ + -tfstate_subscription_id {{ config.caf_terraform.launchpad.subscription_id }} \ +{% if platform_subscriptions_details is defined %} + -target_subscription {{ platform_subscriptions_details.connectivity.subscription_id }} \ +{% else %} + -target_subscription {{ config.caf_terraform.launchpad.subscription_id }} \ +{% endif %} + -tfstate {{ config.tfstates.platform.private_dns[deployment].tfstate }} \ + -env {{ config.caf_terraform.launchpad.caf_environment }} \ + -level {{ level }} \ + -p ${TF_DATA_DIR}/{{ config.tfstates.platform.private_dns[deployment].tfstate }}.tfplan \ + -a plan + +``` + diff --git a/templates/platform/level2/connectivity/readme.md b/templates/platform/level2/connectivity/readme.md new file mode 100644 index 000000000..a74d2129b --- /dev/null +++ b/templates/platform/level2/connectivity/readme.md @@ -0,0 +1,151 @@ + +# Connectivity +You have selected the vwan networking option to build your Enteprise Scale platform. The following instructions guides you through the steps to follow. + +## Select the correct branch for the landingzones code + +Note you need to adjust the branch {{ config.gitops.caf_landingzone_branch }} to deploy the connectivity services + +{% for folder_name in folders %} +## Virtual Wan + +```bash +# login a with a user member of the caf-platform-maintainers group +rover login -t {{ config.platform_identity.tenant_name }} + +cd {{ destination_base }}/landingzones +git fetch origin +git checkout {{ config.gitops.caf_landingzone_branch }} + +rover \ +{% if keyvaults is defined and config.platform_identity.azuread_identity_mode != "logged_in_user" %} + --impersonate-sp-from-keyvault-url {{ keyvaults.cred_connectivity.vault_uri }} \ +{% endif %} + -lz {{ destination_base }}/landingzones/caf_solution \ + -var-folder {{ destination_base }}/{{ config.configuration_folders.platform.destination_relative_path }}/{{ level }}/{{ base_folder }}/{{ folder_name }} \ + -tfstate_subscription_id {{ config.caf_terraform.launchpad.subscription_id }} \ +{% if platform_subscriptions_details is defined %} + -target_subscription {{ platform_subscriptions_details.connectivity.subscription_id }} \ +{% else %} + -target_subscription {{ config.caf_terraform.launchpad.subscription_id }} \ +{% endif %} + -tfstate {{ config.tfstates.platform.[folder_name].tfstate }} \ + -env {{ config.caf_terraform.launchpad.caf_environment }} \ + -level {{ level }} \ + -p ${TF_DATA_DIR}/{{ config.tfstates.platform.[folder_name].tfstate }}.tfplan \ + -a plan + +``` +{% endfor %} + +## Virtual hubs + +{% for virtual_hub in tfstates.virtual_hubs.keys() %} +### {{ virtual_hub }} + +```bash +# login a with a user member of the caf-platform-maintainers group +rover login -t {{ config.platform_identity.tenant_name }} + +cd {{ destination_base }}/landingzones +git fetch origin +git checkout {{ config.gitops.caf_landingzone_branch }} + +rover \ +{% if keyvaults is defined and config.platform_identity.azuread_identity_mode != "logged_in_user" %} + --impersonate-sp-from-keyvault-url {{ keyvaults.cred_connectivity.vault_uri }} \ +{% endif %} + -lz {{ destination_base }}/landingzones/caf_solution \ + -var-folder {{ destination_base }}/{{ config.configuration_folders.platform.destination_relative_path }}/{{ level }}/{{ base_folder }}/virtual_hubs/{{ virtual_hub }} \ + -tfstate_subscription_id {{ config.caf_terraform.launchpad.subscription_id }} \ +{% if platform_subscriptions_details is defined %} + -target_subscription {{ platform_subscriptions_details.connectivity.subscription_id }} \ +{% else %} + -target_subscription {{ config.caf_terraform.launchpad.subscription_id }} \ +{% endif %} + -tfstate {{ config.tfstates.platform.virtual_hubs[virtual_hub].tfstate }} \ + -log-severity ERROR \ + -env {{ config.caf_terraform.launchpad.caf_environment }} \ + -level {{ level }} \ + -p ${TF_DATA_DIR}/{{ config.tfstates.platform.virtual_hubs[virtual_hub].tfstate }}.tfplan \ + -a plan + + +``` +{% endfor %} + +{% if connectivity_vpn_sites.vpn_sites is defined %} +## Virtual Hub VPN Sites + +{% for vpnsite in connectivity_vpn_sites.vpn_sites.keys() %} +### {{ vpnsite }} + +```bash +# login a with a user member of the caf-platform-maintainers group +rover login -t {{ config.platform_identity.tenant_name }} + +cd {{ destination_base }}/landingzones +git fetch origin +git checkout {{ config.gitops.caf_landingzone_branch }} + +rover \ +{% if keyvaults is defined and config.platform_identity.azuread_identity_mode != "logged_in_user" %} + --impersonate-sp-from-keyvault-url {{ keyvaults.cred_connectivity.vault_uri }} \ +{% endif %} + -lz {{ destination_base }}/landingzones/caf_solution \ + -var-folder {{ destination_base }}/{{ config.configuration_folders.platform.destination_relative_path }}/{{ level }}/{{ base_folder }}/vpn_sites/{{ vpnsite }} \ + -tfstate_subscription_id {{ config.caf_terraform.launchpad.subscription_id }} \ +{% if platform_subscriptions_details is defined %} + -target_subscription {{ platform_subscriptions_details.connectivity.subscription_id }} \ +{% else %} + -target_subscription {{ config.caf_terraform.launchpad.subscription_id }} \ +{% endif %} + -tfstate {{ config.tfstates.platform.vpn_sites[vpnsite].tfstate }} \ + -log-severity ERROR \ + -env {{ config.caf_terraform.launchpad.caf_environment }} \ + -level {{ level }} \ + -p ${TF_DATA_DIR}/{{ config.tfstates.platform.vpn_sites[vpnsite].tfstate }}.tfplan \ + -a plan + + +``` +{% endfor %} +{% endif %} + +{% if tfstates.firewall_policies is defined %} +## Firewall policies + +{% for firewall_policy in tfstates.firewall_policies.keys() %} +### {{ firewall_policy }} + +```bash +# login a with a user member of the caf-platform-maintainers group +rover login -t {{ config.platform_identity.tenant_name }} + +cd {{ destination_base }}/landingzones +git fetch origin +git checkout {{ config.gitops.caf_landingzone_branch }} + +rover \ +{% if keyvaults is defined and config.platform_identity.azuread_identity_mode != "logged_in_user" %} + --impersonate-sp-from-keyvault-url {{ keyvaults.cred_connectivity.vault_uri }} \ +{% endif %} + -lz {{ destination_base }}/landingzones/caf_solution \ + -var-folder {{ destination_base }}/{{ config.configuration_folders.platform.destination_relative_path }}/{{ level }}/{{ base_folder }}/firewall_policies/{{ firewall_policy }} \ + -tfstate_subscription_id {{ config.caf_terraform.launchpad.subscription_id }} \ +{% if platform_subscriptions_details is defined %} + -target_subscription {{ platform_subscriptions_details.connectivity.subscription_id }} \ +{% else %} + -target_subscription {{ config.caf_terraform.launchpad.subscription_id }} \ +{% endif %} + -tfstate {{ config.tfstates.platform.firewall_policies[firewall_policy].tfstate }} \ + -log-severity ERROR \ + -env {{ config.caf_terraform.launchpad.caf_environment }} \ + -level {{ level }} \ + -p ${TF_DATA_DIR}/{{ config.tfstates.platform.firewall_policies[firewall_policy].tfstate }}.tfplan \ + -a plan + + +``` +{% endfor %} +{% endif %} \ No newline at end of file diff --git a/templates/platform/level2/connectivity/virtual_hub/ansible.yaml b/templates/platform/level2/connectivity/virtual_hub/ansible.yaml new file mode 100644 index 000000000..1ac4b4ca5 --- /dev/null +++ b/templates/platform/level2/connectivity/virtual_hub/ansible.yaml @@ -0,0 +1,20 @@ +- name: Virtual_hubs {{ virtual_hub }} - Clean-up directory + file: + path: "{{ destination_base }}/{{ config.configuration_folders.platform.destination_relative_path }}/{{ level }}/{{ base_folder }}/virtual_hubs/{{ virtual_hub }}" + state: absent + when: + - config.configuration_folders.platform.cleanup_destination | bool + +- name: Virtual_hubs {{ virtual_hub }} - Creates directory structure + file: + path: "{{ destination_base }}/{{ config.configuration_folders.platform.destination_relative_path }}/{{ level }}/{{ base_folder }}/virtual_hubs/{{ virtual_hub }}" + state: directory + +- name: Virtual_hubs {{ virtual_hub }} - Tfvars + ansible.builtin.template: + src: "{{ item }}" + dest: "{{ destination_base }}/{{ config.configuration_folders.platform.destination_relative_path }}/{{ level }}/{{ base_folder }}/virtual_hubs/{{ virtual_hub }}/{{ item | basename | regex_replace('.j2$', '') }}" + force: yes + with_fileglob: + - "{{ level }}/{{ base_folder }}/virtual_hub/*.j2" + - "{{ level }}/{{ base_folder }}/virtual_hub/*.md" diff --git a/templates/platform/level2/connectivity/virtual_hub/configuration.tfvars.j2 b/templates/platform/level2/connectivity/virtual_hub/configuration.tfvars.j2 new file mode 100644 index 000000000..6a7990e31 --- /dev/null +++ b/templates/platform/level2/connectivity/virtual_hub/configuration.tfvars.j2 @@ -0,0 +1,31 @@ +landingzone = { + backend_type = "azurerm" + global_settings_key = "{{ config.tfstates.platform.virtual_wan.lz_key_name }}" + level = "{{ config.tfstates.platform.virtual_hubs[virtual_hub].level }}" + key = "{{ config.tfstates.platform.virtual_hubs[virtual_hub].lz_key_name }}" + tfstates = { + # Virtual WAN + {{ config.tfstates.platform.virtual_wan.lz_key_name }} = { + level = "current" + tfstate = "{{ config.tfstates.platform.virtual_wan.tfstate }}" + } +{% if connectivity_virtual_hub.virtual_hubs[virtual_hub].enable_er_connections and connectivity_virtual_hub.express_route_connections[virtual_hub] is defined %} + # Express Route Circuit + {{ config.tfstates.platform.express_route_circuits[connectivity_virtual_hub.express_route_connections[virtual_hub].express_route_circuit_authorization.tfstate_key].lz_key_name }} = { + level = "current" + tfstate = "{{ config.tfstates.platform.express_route_circuits[connectivity_virtual_hub.express_route_connections[virtual_hub].express_route_circuit_authorization.tfstate_key].tfstate }}" + } +{% endif %} +{% if connectivity_virtual_hub.virtual_hubs[virtual_hub].enable_er_connections and connectivity_virtual_hub.express_route_connections[virtual_hub].circuit_peering is defined %} + # Express Route Circuit Peerings + {{ config.tfstates.platform.express_route_circuit_peerings[connectivity_virtual_hub.express_route_connections[virtual_hub].circuit_peering.tfstate_key].lz_key_name }} = { + level = "current" + tfstate = "{{ config.tfstates.platform.express_route_circuit_peerings[connectivity_virtual_hub.express_route_connections[virtual_hub].circuit_peering.tfstate_key].tfstate }}" + } +{% endif %} + } +} + +custom_variables = { + virtual_hub_lz_key = "{{ config.tfstates.platform.virtual_hubs[virtual_hub].lz_key_name }}" +} \ No newline at end of file diff --git a/templates/platform/level2/connectivity/virtual_hub/virtual_hubs.tfvars.j2 b/templates/platform/level2/connectivity/virtual_hub/virtual_hubs.tfvars.j2 new file mode 100644 index 000000000..42a7db4c7 --- /dev/null +++ b/templates/platform/level2/connectivity/virtual_hub/virtual_hubs.tfvars.j2 @@ -0,0 +1,82 @@ +virtual_hubs = { + {{ virtual_hub }} = { + virtual_wan = { +{% if connectivity_virtual_hub.virtual_hubs[virtual_hub].virtual_wan.lz_key is defined %} + lz_key = "{{ config.tfstates.platform.virtual_wan.lz_key_name }}" +{% endif %} + key = "{{ connectivity_virtual_hub.virtual_hubs[virtual_hub].virtual_wan.key }}" + } + + resource_group = { +{% if connectivity_virtual_hub.virtual_hubs[virtual_hub].virtual_wan.lz_key is defined %} + lz_key = "{{ config.tfstates.platform.virtual_wan.lz_key_name }}" +{% endif %} + key = "{{ connectivity_virtual_hub.virtual_hubs[virtual_hub].virtual_wan.key }}" + } + + hub_name = "{{ connectivity_virtual_hub.virtual_hubs[virtual_hub].name }}" + region = "{{ connectivity_virtual_hub.virtual_hubs[virtual_hub].region_key }}" + hub_address_prefix = "{{ connectivity_virtual_hub.virtual_hubs[virtual_hub].hub_address_prefix }}" + deploy_firewall = false + deploy_p2s = false + p2s_config = {} +{% if connectivity_virtual_hub.virtual_hubs[virtual_hub].deploy_s2s %} + deploy_s2s = {{ connectivity_virtual_hub.virtual_hubs[virtual_hub].deploy_s2s | string | lower }} + s2s_config = { + name = "{{ connectivity_virtual_hub.virtual_hubs[virtual_hub].s2s_config.name }}" + scale_unit = {{ connectivity_virtual_hub.virtual_hubs[virtual_hub].s2s_config.scale_unit }} + } +{% else %} + deploy_s2s = false +{% endif %} +{% if connectivity_virtual_hub.virtual_hubs[virtual_hub].deploy_er %} + deploy_er = {{ connectivity_virtual_hub.virtual_hubs[virtual_hub].deploy_er | string | lower }} + er_config = { + name = "{{ connectivity_virtual_hub.virtual_hubs[virtual_hub].er_config.name }}" + scale_units = {{ connectivity_virtual_hub.virtual_hubs[virtual_hub].er_config.scale_units }} + } +{% else %} + deploy_er = false +{% endif %} + } +} + +{% if connectivity_virtual_hub.virtual_hubs[virtual_hub].enable_er_connections and connectivity_virtual_hub.express_route_connections is defined %} +express_route_connections = { + {{ virtual_hub }} = { + name = "{{ connectivity_virtual_hub.express_route_connections[virtual_hub].name }}" +{% if connectivity_virtual_hub.express_route_connections[virtual_hub].enable_internet_security is defined %} + enable_internet_security = {{ connectivity_virtual_hub.express_route_connections[virtual_hub].enable_internet_security | string | lower }} +{% endif %} +{% if connectivity_virtual_hub.virtual_hubs[virtual_hub].enable_er_connections %} + enable_er_connections = {{ connectivity_virtual_hub.virtual_hubs[virtual_hub].enable_er_connections | string | lower }} +{% endif %} +{% if connectivity_virtual_hub.express_route_connections[virtual_hub].routing_weight is defined %} + routing_weight = {{ connectivity_virtual_hub.express_route_connections[virtual_hub].routing_weight }} +{% endif %} + virtual_hub = { + key = "{{ connectivity_virtual_hub.express_route_connections[virtual_hub].virtual_hub.key }}" + } + circuit_peering = { + lz_key = "{{ config.tfstates.platform.express_route_circuit_peerings[connectivity_virtual_hub.express_route_connections[virtual_hub].circuit_peering.tfstate_key].lz_key_name }}" + key = "{{ connectivity_virtual_hub.express_route_connections[virtual_hub].circuit_peering.key }}" + } +{% if connectivity_virtual_hub.express_route_connections[virtual_hub].express_route_circuit_authorization is defined %} + express_route_circuit_authorization = { + lz_key = "{{ config.tfstates.platform.express_route_circuits[connectivity_virtual_hub.express_route_connections[virtual_hub].express_route_circuit_authorization.tfstate_key].lz_key_name }}" + key = "{{ connectivity_virtual_hub.express_route_connections[virtual_hub].express_route_circuit_authorization.key }}" + } +{% endif %} +{% if connectivity_virtual_hub.express_route_connections[virtual_hub].route_table is defined %} + route_table = { + key = "{{ connectivity_virtual_hub.express_route_connections[virtual_hub].route_table.key }}" + } +{% endif %} +{% if connectivity_virtual_hub.express_route_connections[virtual_hub].propagated_route_tables is defined %} + propagated_route_tables = { + key = "{{ connectivity_virtual_hub.express_route_connections[virtual_hub].propagated_route_tables.key }}" + } +{% endif %} + } +} +{% endif %} \ No newline at end of file diff --git a/templates/platform/level2/connectivity/virtual_hub/virtual_hubs_route_tables.tfvars.j2 b/templates/platform/level2/connectivity/virtual_hub/virtual_hubs_route_tables.tfvars.j2 new file mode 100644 index 000000000..c1d31f8c5 --- /dev/null +++ b/templates/platform/level2/connectivity/virtual_hub/virtual_hubs_route_tables.tfvars.j2 @@ -0,0 +1,35 @@ +{% if connectivity_virtual_hub[virtual_hub].virtual_hub_route_tables is defined %} +virtual_hub_route_tables = { +{% for key, route_table in connectivity_virtual_hub[virtual_hub].virtual_hub_route_tables.items() %} + {{ key }} = { + name = "{{ route_table.name }}" + + virtual_hub = { + key = "{{ virtual_hub }}" + } + + # labels = ["label1"] + # routes = { + # egress_internet = { + # name = "egress-internet" + # destinations_type = "CIDR" + # destinations = ["0.0.0.0/0"] + + # # Either next_hop or next_hop_id can be used + # # + # # When using next_hop, the virtual_hub_connection must be deployed in a different landingzone. This cannot be tested in the standalone module. + # # Will be covered in the landingzone starter production configuration in future releases. + # # + # next_hop = { + # lz_key = "" # + # resource_type = "virtual_hub_connection" # Only supported value. + # resource_key = "egress-fw" + # } + # #to cather for external object + # #next_hop_id = "Azure_Resource_ID" + # } + # } + } +{% endfor %} +} +{% endif %} \ No newline at end of file diff --git a/templates/platform/level2/connectivity/virtual_wan/ansible.yaml b/templates/platform/level2/connectivity/virtual_wan/ansible.yaml new file mode 100644 index 000000000..9294e0ff0 --- /dev/null +++ b/templates/platform/level2/connectivity/virtual_wan/ansible.yaml @@ -0,0 +1,20 @@ +- name: ({{ folder_name }}) - Clean-up directory + file: + path: "{{ destination_base }}/{{ config.configuration_folders.platform.destination_relative_path }}/{{ level }}/{{ base_folder }}/{{ folder_name }}" + state: absent + when: + - config.configuration_folders.platform.cleanup_destination | bool + +- name: ({{ folder_name }}) - Creates directory structure + file: + path: "{{ destination_base }}/{{ config.configuration_folders.platform.destination_relative_path }}/{{ level }}/{{ base_folder }}/{{ folder_name }}" + state: directory + +- name: ({{ folder_name }}) - Tfvars + ansible.builtin.template: + src: "{{ item }}" + dest: "{{ destination_base }}/{{ config.configuration_folders.platform.destination_relative_path }}/{{ level }}/{{ base_folder }}/{{ folder_name }}/{{ item | basename | regex_replace('.j2$', '') }}" + force: yes + with_fileglob: + - "{{ level }}/{{ base_folder }}/{{ folder_name }}/*.j2" + - "{{ level }}/{{ base_folder }}/{{ folder_name }}/*.md" diff --git a/templates/platform/level2/connectivity/virtual_wan/configuration.tfvars.j2 b/templates/platform/level2/connectivity/virtual_wan/configuration.tfvars.j2 new file mode 100644 index 000000000..d9a3f9cd5 --- /dev/null +++ b/templates/platform/level2/connectivity/virtual_wan/configuration.tfvars.j2 @@ -0,0 +1,12 @@ +landingzone = { + backend_type = "azurerm" + global_settings_key = "{{ config.tfstates.platform.management.lz_key_name }}" + level = "{{ config.tfstates.platform.virtual_wan.level }}" + key = "{{ config.tfstates.platform.virtual_wan.lz_key_name }}" + tfstates = { + {{ config.tfstates.platform.management.lz_key_name }} = { + level = "lower" + tfstate = "{{ config.tfstates.platform.management.tfstate }}" + } + } +} diff --git a/templates/platform/level2/connectivity/virtual_wan/readme.md b/templates/platform/level2/connectivity/virtual_wan/readme.md new file mode 100644 index 000000000..c54911c52 --- /dev/null +++ b/templates/platform/level2/connectivity/virtual_wan/readme.md @@ -0,0 +1,29 @@ +# Virtual Wan + +```bash +# login a with a user member of the caf-platform-maintainers group +rover login -t {{ config.platform_identity.tenant_name }} + +cd {{ destination_base }}/landingzones +git fetch origin +git checkout {{ config.gitops.caf_landingzone_branch }} + +rover \ +{% if keyvaults is defined and config.platform_identity.azuread_identity_mode != "logged_in_user" %} + --impersonate-sp-from-keyvault-url {{ keyvaults.cred_connectivity.vault_uri }} \ +{% endif %} + -lz {{ destination_base }}/landingzones/caf_solution \ + -var-folder {{ destination_base }}/{{ config.configuration_folders.platform.destination_relative_path }}/{{ level }}/{{ base_folder }}/{{ folder_name }} \ + -tfstate_subscription_id {{ config.caf_terraform.launchpad.subscription_id }} \ +{% if platform_subscriptions_details is defined %} + -target_subscription {{ platform_subscriptions_details.connectivity.subscription_id }} \ +{% else %} + -target_subscription {{ config.caf_terraform.launchpad.subscription_id }} \ +{% endif %} + -tfstate {{ config.tfstates.platform[folder_name].tfstate }} \ + -env {{ config.caf_terraform.launchpad.caf_environment }} \ + -level {{ level }} \ + -p ${TF_DATA_DIR}/{{ config.tfstates.platform[folder_name].tfstate }}.tfplan \ + -a plan + +``` diff --git a/templates/platform/level2/connectivity/virtual_wan/resource_groups.tfvars.j2 b/templates/platform/level2/connectivity/virtual_wan/resource_groups.tfvars.j2 new file mode 100644 index 000000000..eae3fe158 --- /dev/null +++ b/templates/platform/level2/connectivity/virtual_wan/resource_groups.tfvars.j2 @@ -0,0 +1,8 @@ +resource_groups = { +{% for key, resource_group in connectivity_virtual_wan.resource_groups.items() %} + {{ key }} = { + name = "{{ resource_group.name }}" + region = "{{ resource_group.region_key | default(config.caf_terraform.launchpad.default_region_key) }}" + } +{% endfor %} +} \ No newline at end of file diff --git a/templates/platform/level2/connectivity/virtual_wan/virtual_wan.tfvars.j2 b/templates/platform/level2/connectivity/virtual_wan/virtual_wan.tfvars.j2 new file mode 100644 index 000000000..128c9e9ad --- /dev/null +++ b/templates/platform/level2/connectivity/virtual_wan/virtual_wan.tfvars.j2 @@ -0,0 +1,9 @@ +virtual_wans = { +{% for key, vwan in connectivity_virtual_wan.virtual_wans.items() %} + "{{key}}" = { + resource_group_key = "{{ vwan.resource_group_key }}" + name = "{{ vwan.name }}" + region = "{{ vwan.region_key }}" + } +{% endfor %} +} diff --git a/templates/platform/level2/connectivity/vpn_site/ansible.yaml b/templates/platform/level2/connectivity/vpn_site/ansible.yaml new file mode 100644 index 000000000..9d620b06b --- /dev/null +++ b/templates/platform/level2/connectivity/vpn_site/ansible.yaml @@ -0,0 +1,20 @@ +- name: VPN Site {{ site }} - Clean-up directory + file: + path: "{{ destination_base }}/{{ config.configuration_folders.platform.destination_relative_path }}/{{ level }}/{{ base_folder }}/vpn_sites/{{ site }}" + state: absent + when: + - config.configuration_folders.platform.cleanup_destination | bool + +- name: VPN Site {{ site }} - Creates directory structure + file: + path: "{{ destination_base }}/{{ config.configuration_folders.platform.destination_relative_path }}/{{ level }}/{{ base_folder }}/vpn_sites/{{ site }}" + state: directory + +- name: VPN Site {{ site }} - Tfvars + ansible.builtin.template: + src: "{{ item }}" + dest: "{{ destination_base }}/{{ config.configuration_folders.platform.destination_relative_path }}/{{ level }}/{{ base_folder }}/vpn_sites/{{ site }}/{{ item | basename | regex_replace('.j2$', '') }}" + force: yes + with_fileglob: + - "{{ level }}/{{ base_folder }}/vpn_site/*.j2" + - "{{ level }}/{{ base_folder }}/vpn_site/*.md" diff --git a/templates/platform/level2/connectivity/vpn_site/configuration.tfvars.j2 b/templates/platform/level2/connectivity/vpn_site/configuration.tfvars.j2 new file mode 100644 index 000000000..3870a6e90 --- /dev/null +++ b/templates/platform/level2/connectivity/vpn_site/configuration.tfvars.j2 @@ -0,0 +1,16 @@ +landingzone = { + backend_type = "azurerm" + global_settings_key = "{{ config.tfstates.platform.virtual_wan.lz_key_name }}" + level = "{{ config.tfstates.platform.vpn_sites[site].level }}" + key = "{{ config.tfstates.platform.vpn_sites[site].lz_key_name }}" + tfstates = { + {{ config.tfstates.platform.virtual_wan.lz_key_name }} = { + level = "current" + tfstate = "{{ config.tfstates.platform.virtual_wan.tfstate }}" + } + {{ config.tfstates.platform.virtual_hubs[connectivity_vpn_gateway_connections.vpn_gateway_connections[site].vpn_site.key].lz_key_name }} = { + level = "current" + tfstate = "{{ config.tfstates.platform.virtual_hubs[connectivity_vpn_gateway_connections.vpn_gateway_connections[site].vpn_site.key].tfstate }}" + } + } +} \ No newline at end of file diff --git a/templates/platform/level2/connectivity/vpn_site/vpn_gateways_connections.tfvars.j2 b/templates/platform/level2/connectivity/vpn_site/vpn_gateways_connections.tfvars.j2 new file mode 100644 index 000000000..0720ce648 --- /dev/null +++ b/templates/platform/level2/connectivity/vpn_site/vpn_gateways_connections.tfvars.j2 @@ -0,0 +1,40 @@ +vpn_gateway_connections = { + {{ site }} = { + name = "{{ connectivity_vpn_gateway_connections.vpn_gateway_connections[site].name }}" + internet_security_enabled = {{ connectivity_vpn_gateway_connections.vpn_gateway_connections[site].internet_security_enabled | default(true) | string | lower }} // propagate to default route table + vpn_site = { + key = "{{ connectivity_vpn_gateway_connections.vpn_gateway_connections[site].vpn_site.key }}" + } + virtual_hub = { + lz_key = "{{ connectivity_vpn_gateway_connections.vpn_gateway_connections[site].virtual_hub.lz_key }}" + key = "{{ connectivity_vpn_gateway_connections.vpn_gateway_connections[site].virtual_hub.key }}" + } + + vpn_links = { +{% for link_key, link in connectivity_vpn_gateway_connections.vpn_gateway_connections[site].vpn_links.items() %} + {{ link_key }} = { + name = "{{ link.name }}" + shared_key = "{{ link.shared_key }}" + bgp_enabled = {{ link.bgp_enabled | default(false) | string | lower }} + bandwidth_mbps = {{ link.bandwidth_mbps }} + link_index = {{ link.link_index }} + protocol = "{{ link.protocol }}" + ipsec_policies = { +{% for pol_key, policy in link.ipsec_policies.items() %} + {{ pol_key }} = { + dh_group = "{{policy.dh_group}}" + ike_encryption_algorithm = "{{policy.ike_encryption_algorithm}}" + ike_integrity_algorithm = "{{policy.ike_integrity_algorithm}}" + encryption_algorithm = "{{policy.encryption_algorithm}}" + integrity_algorithm = "{{policy.integrity_algorithm}}" + pfs_group = "{{policy.pfs_group}}" + sa_data_size_kb = "{{policy.sa_data_size_kb}}" + sa_lifetime_sec = "{{policy.sa_lifetime_sec}}" + } +{% endfor %} + } + } +{% endfor %} + } + } +} \ No newline at end of file diff --git a/templates/platform/level2/connectivity/vpn_site/vpn_sites.tfvars.j2 b/templates/platform/level2/connectivity/vpn_site/vpn_sites.tfvars.j2 new file mode 100644 index 000000000..ee6a97350 --- /dev/null +++ b/templates/platform/level2/connectivity/vpn_site/vpn_sites.tfvars.j2 @@ -0,0 +1,27 @@ +vpn_sites = { + {{ site }} = { + name = "{{ connectivity_vpn_sites.vpn_sites[site].name }}" + resource_group = { + lz_key = "{{ connectivity_vpn_sites.vpn_sites[site].resource_group.lz_key }}" + key = "{{ connectivity_vpn_sites.vpn_sites[site].resource_group.key }}" + } + virtual_wan = { + lz_key = "{{ connectivity_vpn_sites.vpn_sites[site].virtual_wan.lz_key }}" + key = "{{ connectivity_vpn_sites.vpn_sites[site].virtual_wan.key }}" + } + device_vendor = "{{ connectivity_vpn_sites.vpn_sites[site].device_vendor }}" +{% if connectivity_vpn_sites.vpn_sites[site].address_cidrs is defined %} + address_cidrs = {{ connectivity_vpn_sites.vpn_sites[site].address_cidrs | replace('None','[]') | replace('\'','\"') }} +{% endif %} + links = { +{% for link_key, link in connectivity_vpn_sites.vpn_sites[site].links.items() %} + {{ link_key }} = { + name = "{{ link.name }}" + ip_address = "{{ link.ip_address }}" + provider_name = "{{ link.provider_name }}" + speed_in_mbps = "{{ link.speed_in_mbps }}" + } +{% endfor %} + } + } +} \ No newline at end of file diff --git a/templates/platform/level2/identity/adds/azure_monitor.tfvars b/templates/platform/level2/identity/adds/azure_monitor.tfvars new file mode 100644 index 000000000..e69de29bb diff --git a/templates/platform/level2/identity/adds/configuration.tfvars.j2 b/templates/platform/level2/identity/adds/configuration.tfvars.j2 new file mode 100644 index 000000000..7c369c753 --- /dev/null +++ b/templates/platform/level2/identity/adds/configuration.tfvars.j2 @@ -0,0 +1,18 @@ +landingzone = { + backend_type = "azurerm" + global_settings_key = "{{ config.tfstates.platform.management.lz_key_name }}" + level = "level2" + key = "{{ config.tfstates.platform.identity_adds.lz_key_name }}" + tfstates = { + {{ config.tfstates.platform.management.lz_key_name }} = { + level = "lower" + tfstate = "{{ config.tfstates.platform.management.tfstate }}" + } +{% for key, virtual_hub in tfstates.virtual_hubs.items() %} + {{ config.tfstates.platform.virtual_hubs[key].lz_key_name }} = { + level = "current" + tfstate = "{{ config.tfstates.platform.virtual_hubs[key].tfstate }}" + } +{% endfor %} + } +} diff --git a/templates/platform/level2/identity/adds/cost_management.tfvars b/templates/platform/level2/identity/adds/cost_management.tfvars new file mode 100644 index 000000000..e69de29bb diff --git a/templates/platform/level2/identity/adds/demo.yaml b/templates/platform/level2/identity/adds/demo.yaml new file mode 100644 index 000000000..c91a21597 --- /dev/null +++ b/templates/platform/level2/identity/adds/demo.yaml @@ -0,0 +1,34 @@ +- name: Identity - ADDS - Clean-up directory + file: + path: "{{ destination_base_path }}{{ config.configuration_folders.destination_relative_path }}/{{ level }}/{{ base_folder }}/adds" + state: absent + when: + - config.configuration_folders.cleanup_destination | bool + +- name: Identity - ADDS - Creates directory structure + file: + path: "{{ destination_base_path }}{{ config.configuration_folders.destination_relative_path }}/{{ level }}/{{ base_folder }}/adds" + state: directory + +- name: Identity - ADDS - Creates directory structure for diagnostics + file: + path: "{{ destination_base_path }}{{ config.configuration_folders.destination_relative_path }}/{{ level }}/{{ base_folder }}/adds/diagnostics" + state: directory + +- name: Identity - ADDS - tfvars + ansible.builtin.template: + src: "{{ item }}" + dest: "{{ destination_base_path }}{{ config.configuration_folders.destination_relative_path }}/{{ level }}/{{ base_folder }}/adds/{{ item | basename | regex_replace('.j2$', '') }}" + force: yes + with_fileglob: + - "{{ level }}/{{ base_folder }}/*.j2" + - "{{ level }}/{{ base_folder }}/*.md" + +- name: Identity - ADDS - diagnostics + ansible.builtin.template: + src: "{{ item }}" + dest: "{{ destination_base_path }}{{ config.configuration_folders.destination_relative_path }}/{{ level }}/{{ base_folder }}/adds/diagnostics/{{ item | basename | regex_replace('.j2$', '') }}" + force: yes + with_fileglob: + - "{{ level }}/{{ base_folder }}/diagnostics/*.xml" + diff --git a/templates/platform/level2/identity/adds/diagnostics/wadcfg.xml b/templates/platform/level2/identity/adds/diagnostics/wadcfg.xml new file mode 100644 index 000000000..0222feff3 --- /dev/null +++ b/templates/platform/level2/identity/adds/diagnostics/wadcfg.xml @@ -0,0 +1,22 @@ + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/templates/platform/level2/identity/adds/domain_controllers.tfvars.j2 b/templates/platform/level2/identity/adds/domain_controllers.tfvars.j2 new file mode 100644 index 000000000..2f3b079e0 --- /dev/null +++ b/templates/platform/level2/identity/adds/domain_controllers.tfvars.j2 @@ -0,0 +1,102 @@ +# Availability set for domain controllers +availability_sets = { + avset1 = { + name = "avset-dc" + region = "region1" + resource_group_key = "contoso_identity_adds" + # Depends on the region, update and fault domain count availability varies. + platform_update_domain_count = 2 + platform_fault_domain_count = 2 + # By default availability set is configured as managed. Below can be used to change it to unmanged. + # managed = false + } +} + +# Virtual machines +virtual_machines = { +{% for key, vm in identity.virtual_machines.items() %} + # Configuration to deploy a bastion host linux virtual machine + {{ key }} = { + resource_group_key = "{{ vm.resource_group_key}}" + provision_vm_agent = true + boot_diagnostics_storage_account_key = "bootdiag_region1" + + os_type = "windows" + + # the auto-generated ssh key in keyvault secret. Secret name being {VM name}-ssh-public and {VM name}-ssh-private + keyvault_key = "dc01" + + # Define the number of networking cards to attach the virtual machine + networking_interfaces = { + nic0 = { + # Value of the keys from networking.tfvars + vnet_key = "identity_adds" + subnet_key = "ActiveDirectory" + name = "{{ vm.name }}" + enable_ip_forwarding = false + + diagnostic_profiles = { + operations = { + definition_key = "network_interface_card" + destination_type = "storage" + destination_key = "all_regions" + } + } + } + } + + virtual_machine_settings = { + windows = { + name = "{{ vm.name }}" + size = "{{ vm.size }}" + admin_username = "adminuser" + availability_set_key = "avset1" + + # Value of the nic keys to attach the VM. The first one in the list is the default nic + network_interface_keys = ["nic0"] + + os_disk = { + name = "{{ vm.name }}-os" + caching = "ReadWrite" + storage_account_type = "Standard_LRS" + } + + source_image_reference = { + publisher = "MicrosoftWindowsServer" + offer = "WindowsServer" + sku = "2019-Datacenter" + version = "latest" + } + + } + } + + virtual_machine_extensions = { + microsoft_enterprise_cloud_monitoring = { + diagnostic_log_analytics_key = "central_logs_region1" + } + + microsoft_azure_diagnostics = { + # Requires at least one diagnostics storage account + diagnostics_storage_account_keys = ["bootdiag_region1"] + + # Relative path to the configuration folder or full path + xml_diagnostics_file = "{{ destination_base_path }}{{ config.configuration_folders.destination_relative_path }}/{{ level }}/{{ base_folder }}/adds/diagnostics/wadcfg.xml" + } + } + } +{% endfor %} +} + +diagnostic_storage_accounts = { + # Stores boot diagnostic for region1 + bootdiag_region1 = { + name = "boot-dc-re1" + resource_group_key = "contoso_identity_adds" + account_kind = "StorageV2" + account_tier = "Standard" + account_replication_type = "LRS" + access_tier = "Cool" + } +} + diff --git a/templates/platform/level2/identity/adds/keyvaults.tfvars.j2 b/templates/platform/level2/identity/adds/keyvaults.tfvars.j2 new file mode 100644 index 000000000..6c5053f05 --- /dev/null +++ b/templates/platform/level2/identity/adds/keyvaults.tfvars.j2 @@ -0,0 +1,31 @@ +keyvaults = { + dc01 = { + name = "dc01-secrets" + resource_group_key = "contoso_identity_adds" + sku_name = "{{ config.platform_core_setup.sku.keyvault }}" + + creation_policies = { + logged_in_user = { + certificate_permissions = ["Get", "List", "Update", "Create", "Import", "Delete", "Purge", "Recover"] + secret_permissions = ["Set", "Get", "List", "Delete", "Purge", "Recover"] + } + logged_in_app = { + certificate_permissions = ["Get", "List", "Update", "Create", "Import", "Delete", "Purge", "Recover"] + secret_permissions = ["Set", "Get", "List", "Delete", "Purge", "Recover"] + } + } + + diagnostic_profiles = { + operations = { + definition_key = "default_all" + destination_type = "log_analytics" + destination_key = "central_logs" + } + siem = { + definition_key = "siem_all" + destination_type = "storage" + destination_key = "all_regions" + } + } + } +} diff --git a/templates/platform/level2/identity/adds/network_security_groups.tfvars.j2 b/templates/platform/level2/identity/adds/network_security_groups.tfvars.j2 new file mode 100644 index 000000000..72740f61a --- /dev/null +++ b/templates/platform/level2/identity/adds/network_security_groups.tfvars.j2 @@ -0,0 +1,481 @@ +# +# Definition of the networking security groups +# +network_security_group_definition = { + # This entry is applied to all subnets with no NSG defined + empty_nsg = { + flow_logs = { + version = 2 + enabled = true + storage_account = { + storage_account_destination = "all_regions" + retention = { + enabled = true + days = 30 + } + } + traffic_analytics = { + enabled = true + log_analytics_workspace_destination = "central_logs" + interval_in_minutes = "10" + } + } + diagnostic_profiles = { + nsg = { + definition_key = "network_security_group" + destination_type = "storage" + destination_key = "all_regions" + } + operations = { + name = "operations" + definition_key = "network_security_group" + destination_type = "log_analytics" + destination_key = "central_logs" + } + } + nsg = [] + } + + azure_bastion_nsg = { + flow_logs = { + version = 2 + enabled = true + storage_account = { + storage_account_destination = "all_regions" + retention = { + enabled = true + days = 30 + } + } + traffic_analytics = { + enabled = false + log_analytics_workspace_destination = "central_logs" + interval_in_minutes = "10" + } + } + + diagnostic_profiles = { + nsg = { + definition_key = "network_security_group" + destination_type = "storage" + destination_key = "all_regions" + } + operations = { + name = "operations" + definition_key = "network_security_group" + destination_type = "log_analytics" + destination_key = "central_logs" + } + } + + nsg = [ + { + name = "bastion-in-allow", + priority = "100" + direction = "Inbound" + access = "Allow" + protocol = "tcp" + source_port_range = "*" + destination_port_range = "443" + source_address_prefix = "*" + destination_address_prefix = "*" + }, + { + name = "bastion-control-in-allow-443", + priority = "120" + direction = "Inbound" + access = "Allow" + protocol = "tcp" + source_port_range = "*" + destination_port_range = "135" + source_address_prefix = "GatewayManager" + destination_address_prefix = "*" + }, + { + name = "Kerberos-password-change", + priority = "121" + direction = "Inbound" + access = "Allow" + protocol = "tcp" + source_port_range = "*" + destination_port_range = "4443" + source_address_prefix = "GatewayManager" + destination_address_prefix = "*" + }, + { + name = "bastion-vnet-out-allow-22", + priority = "103" + direction = "Outbound" + access = "Allow" + protocol = "tcp" + source_port_range = "*" + destination_port_range = "22" + source_address_prefix = "*" + destination_address_prefix = "VirtualNetwork" + }, + { + name = "bastion-vnet-out-allow-3389", + priority = "101" + direction = "Outbound" + access = "Allow" + protocol = "tcp" + source_port_range = "*" + destination_port_range = "3389" + source_address_prefix = "*" + destination_address_prefix = "VirtualNetwork" + }, + { + name = "bastion-azure-out-allow", + priority = "120" + direction = "Outbound" + access = "Allow" + protocol = "tcp" + source_port_range = "*" + destination_port_range = "443" + source_address_prefix = "*" + destination_address_prefix = "AzureCloud" + } + ] + } + + application_gateway = { + + diagnostic_profiles = { + nsg = { + definition_key = "network_security_group" + destination_type = "storage" + destination_key = "all_regions" + } + operations = { + name = "operations" + definition_key = "network_security_group" + destination_type = "log_analytics" + destination_key = "central_logs" + } + } + + flow_logs = { + version = 2 + enabled = true + storage_account = { + storage_account_destination = "all_regions" + retention = { + enabled = true + days = 30 + } + } + traffic_analytics = { + enabled = true + log_analytics_workspace_destination = "central_logs" + interval_in_minutes = "10" + } + } + + nsg = [ + { + name = "Inbound-HTTP", + priority = "120" + direction = "Inbound" + access = "Allow" + protocol = "*" + source_port_range = "*" + destination_port_range = "80-82" + source_address_prefix = "*" + destination_address_prefix = "*" + }, + { + name = "Inbound-HTTPs", + priority = "130" + direction = "Inbound" + access = "Allow" + protocol = "*" + source_port_range = "*" + destination_port_range = "443" + source_address_prefix = "*" + destination_address_prefix = "*" + }, + { + name = "Inbound-AGW", + priority = "140" + direction = "Inbound" + access = "Allow" + protocol = "*" + source_port_range = "*" + destination_port_range = "65200-65535" + source_address_prefix = "*" + destination_address_prefix = "*" + }, + ] + } + + api_management = { + + diagnostic_profiles = { + nsg = { + definition_key = "network_security_group" + destination_type = "storage" + destination_key = "all_regions" + } + operations = { + name = "operations" + definition_key = "network_security_group" + destination_type = "log_analytics" + destination_key = "central_logs" + } + } + flow_logs = { + version = 2 + enabled = true + storage_account = { + storage_account_destination = "all_regions" + retention = { + enabled = true + days = 30 + } + } + traffic_analytics = { + enabled = true + log_analytics_workspace_destination = "central_logs" + interval_in_minutes = "10" + } + } + + nsg = [ + { + name = "Inbound-APIM", + priority = "100" + direction = "Inbound" + access = "Allow" + protocol = "tcp" + source_port_range = "*" + destination_port_range = "3443" + source_address_prefix = "ApiManagement" + destination_address_prefix = "VirtualNetwork" + }, + { + name = "Inbound-Redis", + priority = "110" + direction = "Inbound" + access = "Allow" + protocol = "tcp" + source_port_range = "*" + destination_port_range = "6381-6383" + source_address_prefix = "VirtualNetwork" + destination_address_prefix = "VirtualNetwork" + }, + { + name = "Inbound-LoadBalancer", + priority = "120" + direction = "Inbound" + access = "Allow" + protocol = "tcp" + source_port_range = "*" + destination_port_range = "*" + source_address_prefix = "AzureLoadBalancer" + destination_address_prefix = "VirtualNetwork" + }, + { + name = "Outbound-StorageHttp", + priority = "100" + direction = "Outbound" + access = "Allow" + protocol = "tcp" + source_port_range = "*" + destination_port_range = "80" + source_address_prefix = "VirtualNetwork" + destination_address_prefix = "Storage" + }, + { + name = "Outbound-StorageHttps", + priority = "110" + direction = "Outbound" + access = "Allow" + protocol = "tcp" + source_port_range = "*" + destination_port_range = "443" + source_address_prefix = "VirtualNetwork" + destination_address_prefix = "Storage" + }, + { + name = "Outbound-AADHttp", + priority = "120" + direction = "Outbound" + access = "Allow" + protocol = "tcp" + source_port_range = "*" + destination_port_range = "80" + source_address_prefix = "VirtualNetwork" + destination_address_prefix = "AzureActiveDirectory" + }, + { + name = "Outbound-AADHttps", + priority = "130" + direction = "Outbound" + access = "Allow" + protocol = "tcp" + source_port_range = "*" + destination_port_range = "443" + source_address_prefix = "VirtualNetwork" + destination_address_prefix = "AzureActiveDirectory" + }, + { + name = "Outbound-SQL", + priority = "140" + direction = "Outbound" + access = "Allow" + protocol = "tcp" + source_port_range = "*" + destination_port_range = "1433" + source_address_prefix = "VirtualNetwork" + destination_address_prefix = "SQL" + }, + { + name = "Outbound-EventHub", + priority = "150" + direction = "Outbound" + access = "Allow" + protocol = "tcp" + source_port_range = "*" + destination_port_range = "5671-5672" + source_address_prefix = "VirtualNetwork" + destination_address_prefix = "EventHub" + }, + { + name = "Outbound-EventHubHttps", + priority = "160" + direction = "Outbound" + access = "Allow" + protocol = "tcp" + source_port_range = "*" + destination_port_range = "443" + source_address_prefix = "VirtualNetwork" + destination_address_prefix = "EventHub" + }, + { + name = "Outbound-FileShareGit", + priority = "170" + direction = "Outbound" + access = "Allow" + protocol = "tcp" + source_port_range = "*" + destination_port_range = "445" + source_address_prefix = "VirtualNetwork" + destination_address_prefix = "Storage" + }, + { + name = "Outbound-Health", + priority = "180" + direction = "Outbound" + access = "Allow" + protocol = "tcp" + source_port_range = "*" + destination_port_range = "1886" + source_address_prefix = "VirtualNetwork" + destination_address_prefix = "INTERNET" + }, + { + name = "Outbound-Monitor", + priority = "190" + direction = "Outbound" + access = "Allow" + protocol = "tcp" + source_port_range = "*" + destination_port_range = "443" + source_address_prefix = "VirtualNetwork" + destination_address_prefix = "AzureMonitor" + }, + { + name = "Outbound-MoSMTP1itor", + priority = "200" + direction = "Outbound" + access = "Allow" + protocol = "tcp" + source_port_range = "*" + destination_port_range = "25" + source_address_prefix = "VirtualNetwork" + destination_address_prefix = "INTERNET" + }, + { + name = "Outbound-SMTP2", + priority = "210" + direction = "Outbound" + access = "Allow" + protocol = "tcp" + source_port_range = "*" + destination_port_range = "587" + source_address_prefix = "VirtualNetwork" + destination_address_prefix = "INTERNET" + }, + { + name = "Outbound-SMTP3", + priority = "220" + direction = "Outbound" + access = "Allow" + protocol = "tcp" + source_port_range = "*" + destination_port_range = "25028" + source_address_prefix = "VirtualNetwork" + destination_address_prefix = "INTERNET" + }, + { + name = "Outbound-Redis", + priority = "230" + direction = "Outbound" + access = "Allow" + protocol = "tcp" + source_port_range = "*" + destination_port_range = "6381-6383" + source_address_prefix = "VirtualNetwork" + destination_address_prefix = "VirtualNetwork" + }, + ] + } + + jumpbox = { + flow_logs = { + version = 2 + enabled = true + storage_account = { + storage_account_destination = "all_regions" + retention = { + enabled = true + days = 30 + } + } + traffic_analytics = { + enabled = true + log_analytics_workspace_destination = "central_logs" + interval_in_minutes = "10" + } + } + + diagnostic_profiles = { + nsg = { + definition_key = "network_security_group" + destination_type = "storage" + destination_key = "all_regions" + } + operations = { + name = "operations" + definition_key = "network_security_group" + destination_type = "log_analytics" + destination_key = "central_logs" + } + } + + nsg = [ + { + name = "ssh-inbound-22", + priority = "200" + direction = "Inbound" + access = "Allow" + protocol = "tcp" + source_port_range = "*" + destination_port_range = "22" + source_address_prefix = "*" + destination_address_prefix = "VirtualNetwork" + }, + ] + } + +} diff --git a/templates/platform/level2/identity/adds/readme.md b/templates/platform/level2/identity/adds/readme.md new file mode 100644 index 000000000..be20632b9 --- /dev/null +++ b/templates/platform/level2/identity/adds/readme.md @@ -0,0 +1,26 @@ + +### Identity - Active Directory Domain Controllers (ADDS) + +Deploy 2 domain controllers in the primary region + +```bash +# login a with a user member of the caf-maintainers group +rover login -t {{ config.tenant_name }} + +cd {{ destination_base_path }}landingzones +git fetch origin +git checkout {{ config.caf_landingzone_branch }} + +export ARM_USE_AZUREAD=true +caf_env="{{ config.caf_terraform.launchpad.caf_environment }}" + +rover \ + -lz {{ destination_base_path }}landingzones/caf_solution \ + -var-folder {{ destination_base_path }}{{ config.configuration_folders.destination_relative_path }}/{{ level }}/{{ tfstates["identity_adds"].base_config_path }}/adds \ + -tfstate {{ tfstates["identity_adds"].tfstate }} \ + -log-severity ERROR \ + -env ${caf_env} \ + -level {{ level }} \ + -a plan + +``` diff --git a/templates/platform/level2/identity/adds/resource_groups.tfvars.j2 b/templates/platform/level2/identity/adds/resource_groups.tfvars.j2 new file mode 100644 index 000000000..7d66f7c3f --- /dev/null +++ b/templates/platform/level2/identity/adds/resource_groups.tfvars.j2 @@ -0,0 +1,8 @@ +resource_groups = { +{% for key, resource_group in identity.resource_groups.items() %} + {{ key }} = { + name = "{{ resource_group.name }}" + region = "{{ resource_group.region_key }}" + } +{% endfor %} +} \ No newline at end of file diff --git a/templates/platform/level2/identity/adds/site_recovery.tfvars b/templates/platform/level2/identity/adds/site_recovery.tfvars new file mode 100644 index 000000000..e69de29bb diff --git a/templates/platform/level2/identity/adds/virtual_hub_connections.tfvars.j2 b/templates/platform/level2/identity/adds/virtual_hub_connections.tfvars.j2 new file mode 100644 index 000000000..5117df94c --- /dev/null +++ b/templates/platform/level2/identity/adds/virtual_hub_connections.tfvars.j2 @@ -0,0 +1,32 @@ +virtual_hub_connections = { +{% for virtual_network_key, virtual_network in identity.virtual_networks.items() %} +{% for vhub_conn_key, vhub_connection in virtual_network.virtual_hub_connection.items() %} + {{ vhub_conn_key }} = { + name = "{{ vhub_connection.name }}" + virtual_hub = { + lz_key = "{{ config.tfstates.platform.virtual_hubs[vhub_connection.virtual_hub.lz_key_name].lz_key_name }}" + key = "{{ vhub_connection.virtual_hub.key }}" + } + vnet = { + vnet_key = "{{ virtual_network_key }}" + } + routing = { +{% for rt_key, route_table in vhub_connection.routing.items() %} + {{ rt_key }} = { + virtual_hub_route_table_key = "{{ route_table.route_table.key }}" + lz_key = "{{ config.tfstates.platform.virtual_hubs[route_table.route_table.lz_key_name].lz_key_name }}" + + propagated_route_table = { +{% if route_table.propagated_route_table.virtual_hub_route_table_keys is defined %} + lz_key = "{{ config.tfstates.platform.virtual_hubs[route_table.propagated_route_table.lz_key_name].lz_key_name }}" + virtual_hub_route_table_keys = {{ route_table.propagated_route_table.virtual_hub_route_table_keys | replace('None','[]') | replace('\'','\"') }} +{% endif %} + labels = {{ route_table.propagated_route_table.labels | replace('None','[]') | replace('\'','\"') }} + } + } +{% endfor %} + } + } +{% endfor %} +{% endfor %} +} \ No newline at end of file diff --git a/templates/platform/level2/identity/adds/virtual_networks.tfvars.j2 b/templates/platform/level2/identity/adds/virtual_networks.tfvars.j2 new file mode 100644 index 000000000..c1080e5a3 --- /dev/null +++ b/templates/platform/level2/identity/adds/virtual_networks.tfvars.j2 @@ -0,0 +1,41 @@ +vnets = { +{% for key, vnet in identity.virtual_networks.items() %} + {{ key }} = { + resource_group_key = "{{vnet.resource_group_key}}" + vnet = { + name = "{{ vnet.name }}" + address_space = {{ vnet.address_space | replace('None','[]') | replace('\'','\"') }} + } + subnets = { +{% if vnet.subnets is defined %} +{% for subnet_key, subnet in vnet.subnets.items() %} + {{ subnet_key }} = { + name = "{{subnet.name}}" + cidr = {{ vnet.subnets[subnet_key].cidr | replace('None','[]') | replace('\'','\"') }} + nsg_key = "empty_nsg" + } +{% endfor %} +{% endif %} + } +{% if vnet.specialsubnets is defined %} + specialsubnets = { +{% for subnet_key, subnet in vnet.specialsubnets.items() %} + {{ subnet_key }} = { + name = "{{subnet.name}}" + cidr = {{ vnet.specialsubnets[subnet_key].cidr | replace('None','[]') | replace('\'','\"') }} + } +{% endfor %} + } +{% endif %} + + # you can setup up to 5 keys - vnet diganostic + diagnostic_profiles = { + vnet = { + definition_key = "networking_all" + destination_type = "log_analytics" + destination_key = "central_logs" + } + } + } +{% endfor %} +} \ No newline at end of file diff --git a/templates/platform/level2/identity/ansible.yaml b/templates/platform/level2/identity/ansible.yaml new file mode 100644 index 000000000..2fdc64f6e --- /dev/null +++ b/templates/platform/level2/identity/ansible.yaml @@ -0,0 +1,26 @@ +- name: Creates {{ base_folder }} directory structure + shell: mkdir -p "{{ destination_base }}/{{ config.configuration_folders.platform.destination_relative_path }}/{{ level }}/{{ base_folder }}" + +- name: Azure Identity level2 + include_tasks: "{{ level }}/ansible_deployment.yaml" + when: + - config.tfstates.platform.identity_level2 is defined + loop: "{{ config.tfstates.platform.identity_level2.keys() }}" + loop_control: + loop_var: deployment + vars: + files_matching: "identity_level2.yaml|identity_level2.caf.yaml" + resource_folder: identity_level2 + display_name: Azure Identity level2 + +- name: Azure Active Directory Domain Services (AADDS) + include_tasks: "{{ level }}/ansible_deployment.yaml" + when: + - config.tfstates.platform.identity_level2_aadds is defined + loop: "{{ config.tfstates.platform.identity_level2_aadds.keys() }}" + loop_control: + loop_var: deployment + vars: + files_matching: "identity_level2_aadds.yaml|identity_level2_aadds.caf.yaml" + resource_folder: identity_level2_aadds + display_name: Azure Active Directory Domain Services (AADDS) diff --git a/templates/platform/level2/identity/identity_level2/landingzone.tfvars.j2 b/templates/platform/level2/identity/identity_level2/landingzone.tfvars.j2 new file mode 100644 index 000000000..f14f87604 --- /dev/null +++ b/templates/platform/level2/identity/identity_level2/landingzone.tfvars.j2 @@ -0,0 +1,12 @@ +landingzone = { + backend_type = "azurerm" + global_settings_key = "{{ config.tfstates.platform.management.lz_key_name }}" + level = "{{ config.tfstates.platform.identity_level2[deployment].level }}" + key = "{{ config.tfstates.platform.identity_level2[deployment].lz_key_name }}" + tfstates = { + {{ config.tfstates.platform.management.lz_key_name }} = { + level = "lower" + tfstate = "{{ config.tfstates.platform.management.tfstate }}" + } + } +} diff --git a/templates/platform/level2/identity/identity_level2/readme.md b/templates/platform/level2/identity/identity_level2/readme.md new file mode 100644 index 000000000..5bef01ac9 --- /dev/null +++ b/templates/platform/level2/identity/identity_level2/readme.md @@ -0,0 +1,32 @@ + +# Identity +Deploy the identity services + +```bash +#Note: close previous session if you logged with a different service principal using --impersonate-sp-from-keyvault-url +rover logout + +# login a with a user member of the caf-maintainers group +rover login -t {{ config.platform_identity.tenant_name }} + +rover \ +{% if config.platform_identity.azuread_identity_mode != "logged_in_user" %} + --impersonate-sp-from-keyvault-url {{ keyvaults.cred_identity.vault_uri }} \ +{% endif %} + -lz /tf/caf/landingzones/caf_solution \ + -var-folder {{ destination_path }} \ + -tfstate_subscription_id {{ config.caf_terraform.launchpad.subscription_id }} \ + -target_subscription {{ platform_subscriptions_details.identity.subscription_id }} \ + -tfstate {{ config.tfstates.platform.identity_level2[deployment].tfstate }} \ + -log-severity {{ config.gitops.rover_log_error }} \ + -env {{ config.caf_terraform.launchpad.caf_environment }} \ + -level {{ level }} \ + -p ${TF_DATA_DIR}/{{ config.tfstates.platform.identity_level2[deployment].tfstate }}.tfplan \ + -a plan + +``` + + +# Next steps + + [Deploy Enterprise Scale](../../level1/eslz/readme.md) diff --git a/templates/platform/level2/identity/identity_level2_aadds/landingzone.tfvars.j2 b/templates/platform/level2/identity/identity_level2_aadds/landingzone.tfvars.j2 new file mode 100644 index 000000000..45333d9e4 --- /dev/null +++ b/templates/platform/level2/identity/identity_level2_aadds/landingzone.tfvars.j2 @@ -0,0 +1,17 @@ +landingzone = { + backend_type = "azurerm" + global_settings_key = "{{ config.tfstates.platform.management.lz_key_name }}" + level = "{{ config.tfstates.platform.identity_level2_aadds[deployment].level }}" + key = "{{ config.tfstates.platform.identity_level2_aadds[deployment].lz_key_name }}" + tfstates = { + # Virtual Hub + {{ config.tfstates.platform.virtual_hubs[deployment].lz_key_name }} = { + level = "current" + tfstate = "{{ config.tfstates.platform.virtual_hubs[deployment].tfstate }}" + } + {{ config.tfstates.platform.management.lz_key_name }} = { + level = "lower" + tfstate = "{{ config.tfstates.platform.management.tfstate }}" + } + } +} diff --git a/templates/platform/level2/identity/identity_level2_aadds/readme.md b/templates/platform/level2/identity/identity_level2_aadds/readme.md new file mode 100644 index 000000000..9106bea3c --- /dev/null +++ b/templates/platform/level2/identity/identity_level2_aadds/readme.md @@ -0,0 +1,37 @@ + +# Azure Active Directory Domain Services (AADDS) + +## Select the correct branch for the landingzones code + +Note you need to adjust the branch {{ resources.gitops.landingzones }} to deploy the AADDS services + +## {{ environment }} + +```bash +# login a with a user member of the caf-platform-maintainers group +rover login -t {{ config.platform_identity.tenant_name }} + +cd {{ destination_base }}/landingzones +git fetch origin +git checkout {{ resources.gitops.landingzones }} + +rover \ +{% if keyvaults is defined and config.platform_identity.azuread_identity_mode != "logged_in_user" %} + --impersonate-sp-from-keyvault-url {{ keyvaults.cred_identity.vault_uri }} \ +{% endif %} + -lz {{ destination_base }}/landingzones/caf_solution \ + -var-folder {{ destination_path }} \ + -tfstate_subscription_id {{ config.caf_terraform.launchpad.subscription_id }} \ +{% if platform_subscriptions_details is defined %} + -target_subscription {{ platform_subscriptions_details.identity.subscription_id }} \ +{% else %} + -target_subscription {{ config.caf_terraform.launchpad.subscription_id }} \ +{% endif %} + -tfstate {{ config.tfstates.platform.identity_level2_aadds[deployment].tfstate }} \ + -env {{ config.caf_terraform.launchpad.caf_environment }} \ + -level {{ level }} \ + -p ${TF_DATA_DIR}/{{ config.tfstates.platform.identity_level2_aadds[deployment].tfstate }}.tfplan \ + -a plan + +``` + diff --git a/templates/platform/pipelines/README.md b/templates/platform/pipelines/README.md new file mode 100644 index 000000000..453154a6a --- /dev/null +++ b/templates/platform/pipelines/README.md @@ -0,0 +1,56 @@ +Alpha version - work in progress + +# Cloud Adoption Framework landing zones for Terraform - Starter template + +## Generate the configuration files + +```bash + +# Generate the contoso demo files (only this scenario is supported at the moment. More to come) +cd /tf/caf/templates/platform && \ +ansible-playbook e2e.yaml \ + -e base_templates_folder=/tf/caf/templates/platform \ + -e config_folder=/tf/caf/enterprise_scale/contoso/platform \ + -e model=demo + +``` + +## Deploy the stack using symphony job + +```bash + +## Prerequisites + +```bash +branch={{ config.eslz.private_lib[config.eslz.private_lib.version_to_deploy].caf_landingzone_branch }} +cd {{ destination_base_path }} +git clone --branch ${branch} https://github.com/Azure/caf-terraform-landingzones.git landingzones + +# If you are planning to submit PR you can clone the a forked version instead +git clone --branch ${branch} git@github.com:Azure/caf-terraform-landingzones.git landingzones + +# Or refresh an existing clone +cd {{ destination_base_path }}landingzones +git fetch origin +git checkout ${branch} +git status + +cd {{ destination_base_path }} +git pull + + +``` + + +# Only launchpad + + rover deploy \ + plan \ + -sc /tf/caf/configuration/contoso/platform/demo/pipelines/symphony_e2e.yaml \ + -b /tf/caf \ + -env sandpit \ + -ct launchpad \ + -level level0 + + +``` \ No newline at end of file diff --git a/templates/platform/pipelines/demo.yaml b/templates/platform/pipelines/demo.yaml new file mode 100644 index 000000000..6830023e0 --- /dev/null +++ b/templates/platform/pipelines/demo.yaml @@ -0,0 +1,26 @@ +- name: Clean-up directory + file: + path: "{{ destination_base_path }}{{ config.configuration_folders.destination_relative_path }}/{{ base_folder }}" + state: absent + when: config.configuration_folders.cleanup_destination | bool + +- name: Creates directory + file: + path: "{{ destination_base_path }}{{ config.configuration_folders.destination_relative_path }}/{{ base_folder }}" + state: directory + +- name: Symphony + ansible.builtin.template: + src: "{{ item }}" + dest: "{{ destination_base_path }}{{ config.configuration_folders.destination_relative_path }}/{{ base_folder }}/{{ item | basename | regex_replace('.j2$', '') }}" + force: yes + with_fileglob: + - "{{ base_folder }}/symphony*.yaml" + +- name: Next steps + ansible.builtin.template: + src: "{{ item }}" + dest: "{{ destination_base_path }}{{ config.configuration_folders.destination_relative_path }}/{{ item | basename | regex_replace('.j2$', '') }}" + force: yes + with_fileglob: + - "{{ base_folder }}/README.md" diff --git a/templates/platform/pipelines/symphony_e2e.yaml b/templates/platform/pipelines/symphony_e2e.yaml new file mode 100644 index 000000000..0d86c0d6c --- /dev/null +++ b/templates/platform/pipelines/symphony_e2e.yaml @@ -0,0 +1,41 @@ +environment: {{ model }} +repositories: + - name: landingzones + uri: https://github.com/Azure/caf-terraform-landingzones.git + branch: {{ config.caf_landingzone_branch }} + + # All paths are relative +levels: +- level: level0 + type: platform + stacks: + - stack: launchpad + landingZonePath: landingzones/caf_launchpad + configurationPath: "{{ config.configuration_folders.destination_relative_path }}/{{ config.tfstates.platform.launchpad.level }}/{{ config.tfstates.platform.launchpad.base_config_path }}" + tfState: {{ config.tfstates.platform.launchpad.tfstate }} + repository: landingzones + launchpad: true + +- level: level1 + type: platform + stacks: + - stack: management + landingZonePath: landingzones/caf_solution + configurationPath: "{{ config.configuration_folders.destination_relative_path }}/{{ config.tfstates.platform.management.level }}/{{ config.tfstates.platform.management.base_config_path }}" + tfState: {{ config.tfstates.platform.management.tfstate }} + repository: landingzones + - stack: eslz + landingZonePath: landingzones/caf_solution/add-ons/caf_eslz + configurationPath: "{{ config.configuration_folders.destination_relative_path }}/{{ config.tfstates.platform.eslz.level }}/{{ config.tfstates.platform.eslz.base_config_path }}" + tfState: {{ config.tfstates.platform.eslz.tfstate }} + repository: landingzones + branch: "{{ config.eslz.private_lib[config.eslz.private_lib.version_to_deploy].caf_landingzone_branch }}" + +- level: level2 + type: platform + stacks: + - stack: connectivity_virtual_wan + landingZonePath: landingzones/caf_solution + configurationPath: "{{ config.configuration_folders.destination_relative_path }}/{{ config.tfstates.platform.management.level }}/{{ config.tfstates.platform.management.base_config_path }}" + tfState: {{ config.tfstates.platform.management.tfstate }} + repository: landingzones \ No newline at end of file diff --git a/templates/platform/readme.md b/templates/platform/readme.md new file mode 100644 index 000000000..d3b1ed5d3 --- /dev/null +++ b/templates/platform/readme.md @@ -0,0 +1,19 @@ + +# Generate the terraform configuration files + +To execute this step you need to login with one of the CAF maintainers accounts: +{% for maintainer in config.platform_identity.caf_platform_maintainers %} + - {{ maintainer }} +{% endfor %} + +```bash +rover login -t {{ config.platform_identity.tenant_name }} + +rover ignite \ + --playbook /tf/caf/landingzones/templates/platform/ansible.yaml \ + -e base_templates_folder={{ base_templates_folder }} \ + -e resource_template_folder={{resource_template_folder}} \ + -e config_folder={{ config_folder }} + ``` + +Get started with the [launchpad](./platform/level0/launchpad) \ No newline at end of file diff --git a/templates/readme.md b/templates/readme.md new file mode 100644 index 000000000..aabe8006c --- /dev/null +++ b/templates/readme.md @@ -0,0 +1,6 @@ +# Rover ignite + +Rover Ignite allows you to create a coherent stack of configuration files for CAF Terraform landing zones. +It integrates all levels in a consistent and interactive way. +In some configuration, the output of an execution is needed to continue, you might have to run multiple times the rover ignite command in order to generate the full configuration files. + diff --git a/templates/resources/active_directory_domain_service.tfvars.j2 b/templates/resources/active_directory_domain_service.tfvars.j2 new file mode 100644 index 000000000..44091996f --- /dev/null +++ b/templates/resources/active_directory_domain_service.tfvars.j2 @@ -0,0 +1,70 @@ +active_directory_domain_service = { +{% for key, value in resources.subscriptions[subscription_key].active_directory_domain_service.items() %} + {{ key }} = { + name = "{{ value.name }}" + region = "{{ value.region }}" +{% if value.resource_group_key is defined %} + resource_group_key = "{{ value.resource_group_key }}" +{% else %} + resource_group = { +{% if value.resource_group.lz_key is defined %} + lz_key = "{{ value.resource_group.lz_key }}" +{% endif %} + key = "{{ value.resource_group.key }}" + } +{% endif %} + domain_name = "{{ value.domain_name }}" + sku = "{{ value.sku }}" + filtered_sync_enabled = {{ value.filtered_sync_enabled | string | lower }} + initial_replica_set = { + region = "{{ value.initial_replica_set.region }}" + subnet = { + vnet_key = "{{ value.initial_replica_set.subnet.vnet_key }}" + key = "{{ value.initial_replica_set.subnet.key }}" +{% if value.initial_replica_set.subnet.lz_key is defined %} + lz_key = "{{ value.initial_replica_set.subnet.lz_key }}" +{% endif %} + } + } +{% if value.notifications is defined %} + notifications = { +{% if value.notifications.notify_dc_admins is defined %} + notify_dc_admins = {{ value.notifications.notify_dc_admins | string | lower }} +{% endif %} +{% if value.notifications.notify_global_admins is defined %} + notify_global_admins = {{ value.notifications.notify_global_admins | string | lower }} +{% endif %} +{% if value.notifications.additional_recipients is defined %} + additional_recipients = {{ value.notifications.additional_recipients | replace('None','[]') | replace('\'','\"') }} +{% endif %} + } +{% endif %} +{% if value.security is defined %} + security = { +{% if value.security.ntlm_v1_enabled is defined %} + ntlm_v1_enabled = {{ value.security.ntlm_v1_enabled | string | lower }} +{% endif %} +{% if value.security.sync_kerberos_passwords is defined %} + sync_kerberos_passwords = {{ value.security.sync_kerberos_passwords | string | lower }} +{% endif %} +{% if value.security.sync_ntlm_passwords is defined %} + sync_ntlm_passwords = {{ value.security.sync_ntlm_passwords | string | lower }} +{% endif %} +{% if value.security.sync_on_prem_passwords is defined %} + sync_on_prem_passwords = {{ value.security.sync_on_prem_passwords | string | lower }} +{% endif %} +{% if value.security.tls_v1_enabled is defined %} + tls_v1_enabled = {{ value.security.tls_v1_enabled | string | lower }} +{% endif %} + } +{% endif %} +{% if value.tags is defined %} + tags = { +{% for tag_key, tag_value in value.tags.items() %} + {{ tag_key }} = "{{ tag_value }}" +{% endfor %} + } +{% endif %} + } +{% endfor %} +} diff --git a/templates/resources/active_directory_domain_service_replica_set.tfvars.j2 b/templates/resources/active_directory_domain_service_replica_set.tfvars.j2 new file mode 100644 index 000000000..70ea38c19 --- /dev/null +++ b/templates/resources/active_directory_domain_service_replica_set.tfvars.j2 @@ -0,0 +1,13 @@ +active_directory_domain_service_replica_set = { +{% for key, value in resources.subscriptions[subscription_key].active_directory_domain_service_replica_set.items() %} + {{ key }} = { + region = "{{ value.region }}" + active_directory_domain_service = { + key = "{{ value.active_directory_domain_service.key }}" + } + subnet = { + vnet_key = "{{ value.subnet.vnet_key }}" + key = "{{ value.subnet.key }}" + } + } +} diff --git a/templates/resources/automation_accounts.tfvars.j2 b/templates/resources/automation_accounts.tfvars.j2 new file mode 100644 index 000000000..b80313ce5 --- /dev/null +++ b/templates/resources/automation_accounts.tfvars.j2 @@ -0,0 +1,9 @@ +automations = { +{% for key, automation in resources.subscriptions[subscription_key].automation_accounts.items() %} + {{ key }} = { + name = "{{ automation.name }}" + sku = "{{ automation.sku | default('Basic') }}" + resource_group_key = "{{ automation.resource_group_key }}" + } +{% endfor %} +} \ No newline at end of file diff --git a/templates/resources/azuread_applications.tfvars.j2 b/templates/resources/azuread_applications.tfvars.j2 new file mode 100644 index 000000000..b3dee9f39 --- /dev/null +++ b/templates/resources/azuread_applications.tfvars.j2 @@ -0,0 +1,7 @@ +azuread_applications = { +{% for key, app in resources.subscriptions[subscription_key].azuread_applications.items() %} + {{ key }} = { + application_name = "{{ app.application_name }}" + } +{% endfor %} +} \ No newline at end of file diff --git a/templates/resources/azuread_credential_policies.tfvars.j2 b/templates/resources/azuread_credential_policies.tfvars.j2 new file mode 100644 index 000000000..d2c3a2874 --- /dev/null +++ b/templates/resources/azuread_credential_policies.tfvars.j2 @@ -0,0 +1,23 @@ +azuread_credential_policies = { +{% for key, value in resources.subscriptions[subscription_key].azuread_credential_policies.items() %} + {{ key }} = { + # Length of the password + length = {{ value.length }} + special = {{ value.special | string | lower | default('false') }} + upper = {{ value.upper | string | lower | default('true') }} + number = {{ value.number | string | lower | default('true') }} + + # Password Expiration date + expire_in_days = {{ value.expire_in_days }} + rotation_key0 = { + # Odd number + days = {{ value.rotation_key0.days }} + } + rotation_key1 = { + # Even number + days = {{ value.rotation_key1.days }} + } + } +{% endfor %} + +} \ No newline at end of file diff --git a/templates/resources/azuread_credentials.tfvars.j2 b/templates/resources/azuread_credentials.tfvars.j2 new file mode 100644 index 000000000..5dcb81e4c --- /dev/null +++ b/templates/resources/azuread_credentials.tfvars.j2 @@ -0,0 +1,22 @@ +azuread_credentials = { +{% for key, cred in resources.subscriptions[subscription_key].azuread_credentials.items() %} + {{ key }} = { + type = "{{ cred.type | default('password') }}" + azuread_credential_policy_key = "{{ cred.azuread_credential_policy_key }}" + + azuread_application = { +{% if cred.azuread_application.lz_key is defined %} + lz_key = "{{ cred.azuread_application.lz_key }}" +{% endif %} + key = "{{ cred.azuread_application.key }}" + } + keyvaults = { +{% for kv_key, kv in cred.keyvaults.items() %} + {{ kv_key }} = { + secret_prefix = "{{ kv.secret_prefix }}" + } +{% endfor %} + } + } +{% endfor %} +} \ No newline at end of file diff --git a/templates/resources/azuread_groups.tfvars.j2 b/templates/resources/azuread_groups.tfvars.j2 new file mode 100644 index 000000000..ee523c96a --- /dev/null +++ b/templates/resources/azuread_groups.tfvars.j2 @@ -0,0 +1,37 @@ +azuread_groups = { +{% for key, ad_group in resources.subscriptions[subscription_key].azuread_groups.items() %} + {{ key }} = { + name = "{{ ad_group.name }}" +{% if ad_group.description is defined %} + description = "{{ ad_group.description }}" +{% endif %} +{% if ad_group.members is defined %} + members = { +{% if ad_group.members.user_principal_names is defined %} + user_principal_names = {{ ad_group.members.user_principal_names | replace('None','[]') | replace('\'','\"') }} +{% endif %} +{% if ad_group.members.group_names is defined %} + group_names = {{ ad_group.members.group_names | replace('None','[]') | replace('\'','\"') }} +{% endif %} +{% if ad_group.members.object_ids is defined %} + object_ids = {{ ad_group.members.object_ids | replace('None','[]') | replace('\'','\"') }} +{% endif %} +{% if ad_group.members.group_keys is defined %} + group_keys = {{ ad_group.members.group_keys | replace('None','[]') | replace('\'','\"') }} +{% endif %} +{% if ad_group.members.service_principal_keys is defined %} + service_principal_keys = {{ ad_group.members.service_principal_keys | replace('None','[]') | replace('\'','\"') }} +{% endif %} + } +{% endif %} +{% if ad_group.owners is defined %} + owners = { +{% if ad_group.owners.user_principal_names is defined %} + user_principal_names = {{ ad_group.owners.user_principal_names | replace('None','[]') | replace('\'','\"') }} +{% endif %} + } +{% endif %} + prevent_duplicate_name = {{ ad_group.owners.prevent_duplicate_name | default(false) | string | lower }} + } +{% endfor %} +} diff --git a/templates/resources/azuread_groups_membership.tfvars.j2 b/templates/resources/azuread_groups_membership.tfvars.j2 new file mode 100644 index 000000000..faf15682b --- /dev/null +++ b/templates/resources/azuread_groups_membership.tfvars.j2 @@ -0,0 +1,21 @@ +azuread_groups_membership = { +{% for key, value in resources.subscriptions[subscription_key].azuread_groups_membership.items() %} + {{ key }} = { +{% for l1_key , l1_value in value.items() %} + {{l1_key}} = { +{% for l2_key, l2_value in l1_value.items() %} + {{l2_key}} = { +{% if l2_value.group_lz_key is defined %} + group_lz_key = "{{ l2_value.group_lz_key }}" +{% endif %} +{% if l2_value.lz_key is defined %} + lz_key = "{{ l2_value.lz_key }}" +{% endif %} + keys = {{ l2_value['keys'] | replace('None','[]') | replace('\'','\"') }} + } +{% endfor %} + } +{% endfor %} + } +{% endfor %} +} \ No newline at end of file diff --git a/templates/resources/azuread_service_principals.tfvars.j2 b/templates/resources/azuread_service_principals.tfvars.j2 new file mode 100644 index 000000000..7503333e3 --- /dev/null +++ b/templates/resources/azuread_service_principals.tfvars.j2 @@ -0,0 +1,10 @@ +azuread_service_principals = { + +{% for key, sp in resources.subscriptions[subscription_key].azuread_service_principals.items() %} + {{ key }} = { + azuread_application = { + key = "{{ sp.azuread_application.key }}" + } + } +{% endfor %} +} \ No newline at end of file diff --git a/templates/resources/azurerm_firewall_policies.tfvars.j2 b/templates/resources/azurerm_firewall_policies.tfvars.j2 new file mode 100644 index 000000000..1b787593a --- /dev/null +++ b/templates/resources/azurerm_firewall_policies.tfvars.j2 @@ -0,0 +1,50 @@ +azurerm_firewall_policies = { +{% for key, value in resources.subscriptions[subscription_key].azurerm_firewall_policies.items() %} + {{ key }} = { + name = "{{ value.name }}" +{% if value.region_key is defined %} + region = "{{ value.region_key }}" +{% endif %} +{% if value.sku is defined %} + sku = "{{ value.sku }}" +{% endif %} + resource_group = { + key = "{{ value.resource_group.key }}" +{% if value.resource_group.lz_key is defined %} + lz_key = "{{ config.tfstates.platform.azurerm_firewall_policies[value.resource_group.lz_key].lz_key_name }}" +{% endif %} + } +{% if value.base_policy is defined %} + base_policy = { + key = "{{ value.base_policy.key }}" +{% if value.resource_group.lz_key is defined %} + lz_key = "{{ config.tfstates.platform.azurerm_firewall_policies[value.resource_group.lz_key].lz_key_name }}" +{% endif %} + } +{% endif %} +{% if value.dns is defined %} + dns = { +{% if value.dns.servers is defined %} + servers = "{{ value.dns.servers }}" +{% endif %} +{% if value.dns.proxy_enabled is defined %} + proxy_enabled = {{ value.dns.proxy_enabled | string | lower }} +{% endif %} + } +{% endif %} +{% if value.threat_intelligence_mode is defined %} + threat_intelligence_mode = "{{ value.threat_intelligence_mode }}" +{% endif %} +{% if value.threat_intelligence_allowlist is defined %} + threat_intelligence_allowlist = { +{% if value.threat_intelligence_allowlist.ip_addresses is defined %} + ip_addresses = {{ value.threat_intelligence_allowlist.ip_addresses | replace('None','[]') | replace('\'','\"') }} +{% endif %} +{% if value.threat_intelligence_allowlist.fqdns is defined %} + fqdns = {{ value.threat_intelligence_allowlist.fqdns | replace('None','[]') | replace('\'','\"') }} +{% endif %} + } +{% endif %} + } +{% endfor %} +} diff --git a/templates/resources/azurerm_firewalls.tfvars.j2 b/templates/resources/azurerm_firewalls.tfvars.j2 new file mode 100644 index 000000000..5711738f7 --- /dev/null +++ b/templates/resources/azurerm_firewalls.tfvars.j2 @@ -0,0 +1,106 @@ +azurerm_firewalls = { +{% for key, value in resources.subscriptions[subscription_key].azurerm_firewalls.items() %} + {{ key }} = { + name = "{{ value.name }}" + resource_group_key = "{{ value.resource_group_key }}" + vnet_key = "{{ value.vnet_key }}" +{% if value.sku_tier is defined %} + sku_tier = "{{ value.sku_tier }}" +{% endif %} +{% if value.sku_name is defined %} + sku_name = "{{ value.sku_name }}" +{% endif %} +{% if value.firewall_policy is defined %} + firewall_policy = { +{% if value.firewall_policy.firewall_policy_key is defined %} + firewall_policy_key = "{{ value.firewall_policy_key }}" +{% else %} +{% if value.firewall_policy.key is defined %} + key = "{{ value.firewall_policy.key }}" +{% endif %} +{% if value.firewall_policy.lz_key is defined %} + lz_key = "{{ value.firewall_policy.lz_key }}" +{% endif %} +{% if value.firewall_policy.id is defined %} + id = "{{ value.firewall_policy.id }}" +{% endif %} +{% endif %} + } +{% endif %} +{% if value.zones is defined %} + zones = {{ value.zones | replace('None','[]') | replace('\'','\"') }} +{% endif %} +{% if value.threat_intel_mode is defined %} + threat_intel_mode = "{{ value.threat_intel_mode }}" +{% endif %} +{% if value.private_ip_ranges is defined %} + private_ip_ranges = {{ value.private_ip_ranges | replace('None','[]') | replace('\'','\"') }} +{% endif %} + +{% if value.tags is defined %} + tags = { +{% for tag_key, tag_value in value.tags.items() %} + {{ tag_key }} = "{{ tag_value }}" +{% endfor %} + } +{% endif %} +{% if value.management_ip_configuration is defined %} + management_ip_configuration = { + lz_key = "{{ value.management_ip_configuration.name }}" +{% if value.management_ip_configuration.public_ip_address_id is defined %} + public_ip_address_id = "{{ value.management_ip_configuration.public_ip_address_id }}" +{% else %} + public_ip_key = "{{ value.management_ip_configuration.public_ip_key }}" +{% endif %} +{% if value.management_ip_configuration.subnet_id is defined %} + subnet_id = "{{ value.management_ip_configuration.subnet_id }}" +{% else %} + vnet_key = "{{ value.management_ip_configuration.vnet_key }}" + subnet_key = "{{ value.management_ip_configuration.subnet_key }}" +{% if value.management_ip_configuration.lz_key is defined %} + lz_key = "{{ value.management_ip_configuration.lz_key }}" +{% endif %} +{% endif %} + } +{% endif %} +{% if value.virtual_hub_id is defined %} + virtual_hub = { +{% if value.virtual_hub.virtual_hub_id is defined %} + virtual_hub_id = "{{ value.virtual_hub_id }}" +{% elif value.virtual_hub is defined %} + virtual_wan_key = " {{ value.virtual_hub.virtual_wan_key }}" + virtual_hub_key = " {{ value.virtual_hub.virtual_hub_key }}" +{% if value.virtual_hub.lz_key is defined %} + lz_key = "{{ value.virtual_hub.lz_key }}" +{% endif %} + virtual_wan_key = " {{ value.virtual_hub.virtual_wan_key }}" +{% endif %} + public_ip_count = " {{ value.virtual_hub.public_ip_count }}" + } +{% endif %} +{% if value.public_ips is defined %} + public_ips = { +{% for p_key, p_value in value.public_ips.items() %} + {{ p_key }} = { + name = "{{ p_value.name }}" +{% if p_value.public_ip_id is defined %} + public_ip_id = "{{ p_value.public_ip_id }}" +{% else %} + public_ip_key = "{{ p_value.public_ip_key }}" +{% endif %} +{% if p_value.subnet_id is defined %} + subnet_id = "{{ p_value.subnet_id }}" +{% else %} + vnet_key = "{{ p_value.vnet_key }}" + subnet_key = "{{ p_value.subnet_key }}" +{% if p_value.lz_key is defined %} + lz_key = "{{ p_value.lz_key }}" +{% endif %} +{% endif %} + } +{% endfor %} + } +{% endif %} + } +{% endfor %} +} diff --git a/templates/resources/custom_role_definitions.tfvars.j2 b/templates/resources/custom_role_definitions.tfvars.j2 new file mode 100644 index 000000000..0cbfac619 --- /dev/null +++ b/templates/resources/custom_role_definitions.tfvars.j2 @@ -0,0 +1,20 @@ +custom_role_definitions = { +{% for key, value in resources.subscriptions[subscription_key].custom_role_definitions.items() %} + {{ key }} = { + name = "{{ value.name }}" +{% if value.useprefix is defined %} + useprefix = "{{ value.useprefix | string | lower }}" +{% endif %} +{% if value.description is defined %} + description = "{{ value.description }}" +{% endif %} +{% if value.permissions is defined %} + permissions = { +{% for p_key, permission in value.permissions.items() %} + {{ p_key }} = {{ permission | sort | replace('None','[]') | replace('\'','\"') | replace(',', ',\n') }} +{% endfor %} + } +{% endif %} + } +{% endfor %} +} \ No newline at end of file diff --git a/templates/resources/diagnostic_log_analytics.tfvars.j2 b/templates/resources/diagnostic_log_analytics.tfvars.j2 new file mode 100644 index 000000000..9fd8fe2a2 --- /dev/null +++ b/templates/resources/diagnostic_log_analytics.tfvars.j2 @@ -0,0 +1,37 @@ +# +# Define the settings for log analytics workspace and solution map +# + +diagnostic_log_analytics = { +{% for key, dla in resources.subscriptions[subscription_key].diagnostic_log_analytics.items() %} + {{ key }} = { + region = "{{ dla.region | default(config.caf_terraform.launchpad.default_region_key)}}" + name = "{{ dla.name }}" + resource_group_key = "{{ dla.resource_group_key }}" + +{% if resources.subscriptions[subscription_key].diagnostic_log_analytics[key].diagnostic_profiles is defined %} + # you can setup up to 5 key + diagnostic_profiles = { +{% for dp_key, dp_value in resources.subscriptions[subscription_key].diagnostic_log_analytics[key].diagnostic_profiles.items() %} + {{ dp_key }} = { + definition_key = "{{ dp_value.definition_key }}" + destination_type = "{{ dp_value.destination_type }}" + destination_key = "{{ dp_value.destination_key }}" + } +{% endfor %} + } +{% endif %} +{% if resources.subscriptions[subscription_key].diagnostic_log_analytics[key].solutions is defined %} + solutions = { +{% for sol_key, sol_value in resources.subscriptions[subscription_key].diagnostic_log_analytics[key].solutions.items() %} + {{ sol_key }} = { + "publisher" = "{{ sol_value.publisher }}" + "product" = "{{ sol_value.product }}" + } +{% endfor %} + + } +{% endif %} + } +{% endfor %} +} diff --git a/templates/resources/diagnostic_storage_accounts.tfvars.j2 b/templates/resources/diagnostic_storage_accounts.tfvars.j2 new file mode 100644 index 000000000..d3dda9027 --- /dev/null +++ b/templates/resources/diagnostic_storage_accounts.tfvars.j2 @@ -0,0 +1,41 @@ +# Defines different repositories for the diagnostics logs +# Storage accounts, log analytics, event hubs + +diagnostic_storage_accounts = { +{% for key in config.caf_terraform.launchpad.regions.keys() %} + # Stores diagnostic logging for {{key}} + diaglogs_{{config.caf_terraform.launchpad.regions[key].slug}} = { + name = "diaglogs{{ config.caf_terraform.launchpad.regions[key].slug }}" + region = "{{key}}" + resource_group_key = "{{ resources.subscriptions[subscription_key].diagnostic_storage_accounts.diagnostics.resource_group_key }}" + account_kind = "{{ resources.subscriptions[subscription_key].diagnostic_storage_accounts.diagnostics.account_kind }}" + account_tier = "{{ resources.subscriptions[subscription_key].diagnostic_storage_accounts.diagnostics.account_tier }}" + account_replication_type = "{{ config.caf_terraform.launchpad.account_replication_type }}" + access_tier = "{{ resources.subscriptions[subscription_key].diagnostic_storage_accounts.diagnostics.access_tier }}" + } +{% endfor %} +{% for key in config.caf_terraform.launchpad.regions.keys() %} + # Stores security logs for siem for {{key}} + diagsiem_{{config.caf_terraform.launchpad.regions[key].slug}} = { + name = "siem{{ config.caf_terraform.launchpad.regions[key].slug }}" + region = "{{key}}" + resource_group_key = "{{ resources.subscriptions[subscription_key].diagnostic_storage_accounts.siem.resource_group_key }}" + account_kind = "{{ resources.subscriptions[subscription_key].diagnostic_storage_accounts.siem.account_kind }}" + account_tier = "{{ resources.subscriptions[subscription_key].diagnostic_storage_accounts.siem.account_tier }}" + account_replication_type = "{{ config.caf_terraform.launchpad.account_replication_type }}" + access_tier = "{{ resources.subscriptions[subscription_key].diagnostic_storage_accounts.siem.access_tier }}" + } +{% endfor %} +{% for key in config.caf_terraform.launchpad.regions.keys() %} + # Stores boot diagnostic for {{key}} + bootdiag_{{config.caf_terraform.launchpad.regions[key].slug}} = { + name = "boot{{ config.caf_terraform.launchpad.regions[key].slug }}" + region = "{{key}}" + resource_group_key = "{{ resources.subscriptions[subscription_key].diagnostic_storage_accounts.bootdiagnostics.resource_group_key }}" + account_kind = "{{ resources.subscriptions[subscription_key].diagnostic_storage_accounts.bootdiagnostics.account_kind }}" + account_tier = "{{ resources.subscriptions[subscription_key].diagnostic_storage_accounts.bootdiagnostics.account_tier }}" + account_replication_type = "{{ config.caf_terraform.launchpad.account_replication_type }}" + access_tier = "{{ resources.subscriptions[subscription_key].diagnostic_storage_accounts.bootdiagnostics.access_tier }}" + } +{% endfor %} +} \ No newline at end of file diff --git a/templates/resources/diagnostics_definition.tfvars.j2 b/templates/resources/diagnostics_definition.tfvars.j2 new file mode 100644 index 000000000..c6a7a3ce2 --- /dev/null +++ b/templates/resources/diagnostics_definition.tfvars.j2 @@ -0,0 +1,274 @@ + +# +# Define a set of settings for the various type of Azure resources +# + +diagnostics_definition = { + log_analytics = { + name = "operational_logs_and_metrics" + categories = { + log = [ + # ["Category name", "Diagnostics Enabled(true/false)", "Retention Enabled(true/false)", Retention_period] + ["Audit", true, false, 7], + ] + metric = [ + #["Category name", "Diagnostics Enabled(true/false)", "Retention Enabled(true/false)", Retention_period] + ["AllMetrics", true, false, 7], + ] + } + + } + + default_all = { + name = "operational_logs_and_metrics" + categories = { + log = [ + # ["Category name", "Diagnostics Enabled(true/false)", "Retention Enabled(true/false)", Retention_period] + ["AuditEvent", true, false, 7], + ] + metric = [ + #["Category name", "Diagnostics Enabled(true/false)", "Retention Enabled(true/false)", Retention_period] + ["AllMetrics", true, false, 7], + ] + } + + } + + bastion_host = { + name = "operational_logs_and_metrics" + categories = { + log = [ + # ["Category name", "Diagnostics Enabled(true/false)", "Retention Enabled(true/false)", Retention_period] + ["BastionAuditLogs", true, false, 7], + ] + } + + } + + networking_all = { + name = "operational_logs_and_metrics" + categories = { + log = [ + # ["Category name", "Diagnostics Enabled(true/false)", "Retention Enabled(true/false)", Retention_period] + ["VMProtectionAlerts", true, false, 7], + ] + metric = [ + #["Category name", "Diagnostics Enabled(true/false)", "Retention Enabled(true/false)", Retention_period] + ["AllMetrics", true, false, 7], + ] + } + + } + + public_ip_address = { + name = "operational_logs_and_metrics" + categories = { + log = [ + # ["Category name", "Diagnostics Enabled(true/false)", "Retention Enabled(true/false)", Retention_period] + ["DDoSProtectionNotifications", true, false, 7], + ["DDoSMitigationFlowLogs", true, false, 7], + ["DDoSMitigationReports", true, false, 7], + ] + metric = [ + #["Category name", "Diagnostics Enabled(true/false)", "Retention Enabled(true/false)", Retention_period] + ["AllMetrics", true, false, 7], + ] + } + + } + + network_security_group = { + name = "operational_logs_and_metrics" + categories = { + log = [ + # ["Category name", "Diagnostics Enabled(true/false)", "Retention Enabled(true/false)", Retention_period] + ["NetworkSecurityGroupEvent", true, false, 7], + ["NetworkSecurityGroupRuleCounter", true, false, 7], + ] + } + + } + + network_interface_card = { + name = "operational_logs_and_metrics" + categories = { + # log = [ + # # ["Category name", "Diagnostics Enabled(true/false)", "Retention Enabled(true/false)", Retention_period] + # ["AuditEvent", true, false, 7], + # ] + metric = [ + #["Category name", "Diagnostics Enabled(true/false)", "Retention Enabled(true/false)", Retention_period] + ["AllMetrics", true, false, 7], + ] + } + + } + + azure_container_registry = { + name = "operational_logs_and_metrics" + categories = { + log = [ + # ["Category name", "Diagnostics Enabled(true/false)", "Retention Enabled(true/false)", Retention_period] + ["ContainerRegistryRepositoryEvents", true, false, 7], + ["ContainerRegistryLoginEvents", true, false, 7], + ] + metric = [ + #["Category name", "Diagnostics Enabled(true/false)", "Retention Enabled(true/false)", Retention_period] + ["AllMetrics", true, false, 7], + ] + } + } + + azure_kubernetes_cluster = { + name = "operational_logs_and_metrics" + categories = { + log = [ + # ["Category name", "Diagnostics Enabled(true/false)", "Retention Enabled(true/false)", Retention_period] + ["kube-apiserver", true, false, 7], + ["kube-audit", true, false, 7], + ["kube-audit-admin", true, false, 7], + ["kube-controller-manager", true, false, 7], + ["kube-scheduler", true, false, 7], + ["cluster-autoscaler", true, false, 7], + ["guard", true, false, 7], + ] + metric = [ + #["Category name", "Diagnostics Enabled(true/false)", "Retention Enabled(true/false)", Retention_period] + ["AllMetrics", true, false, 7], + ] + } + } + + azure_site_recovery = { + name = "operational_logs_and_metrics" + log_analytics_destination_type = "Dedicated" + categories = { + log = [ + # ["Category name", "Diagnostics Enabled(true/false)", "Retention Enabled(true/false)", Retention_period] + ["AzureBackupReport", true, true, 7], + ["CoreAzureBackup", true, true, 7], + ["AddonAzureBackupAlerts", true, true, 7], + ["AddonAzureBackupJobs", true, true, 7], + ["AddonAzureBackupPolicy", true, true, 7], + ["AddonAzureBackupProtectedInstance", true, true, 7], + ["AddonAzureBackupStorage", true, true, 7], + ["AzureSiteRecoveryJobs", true, true, 7], + ["AzureSiteRecoveryEvents", true, true, 7], + ["AzureSiteRecoveryReplicatedItems", true, true, 7], + ["AzureSiteRecoveryReplicationStats", true, true, 7], + ["AzureSiteRecoveryRecoveryPoints", true, true, 7], + ["AzureSiteRecoveryReplicationDataUploadRate", true, true, 7], + ["AzureSiteRecoveryProtectedDiskDataChurn", true, true, 30], + ] + metric = [ + #["AllMetrics", 60, True], + ] + } + + } + + azure_automation = { + name = "operational_logs_and_metrics" + categories = { + log = [ + # ["Category name", "Diagnostics Enabled(true/false)", "Retention Enabled(true/false)", Retention_period] + ["JobLogs", true, true, 30], + ["JobStreams", true, true, 30], + ["DscNodeStatus", true, true, 30], + ] + metric = [ + # ["Category name", "Metric Enabled(true/false)", "Retention Enabled(true/false)", Retention_period] + ["AllMetrics", true, true, 30], + ] + } + + } + + event_hub_namespace = { + name = "operational_logs_and_metrics" + categories = { + log = [ + # ["Category name", "Diagnostics Enabled(true/false)", "Retention Enabled(true/false)", Retention_period] + ["ArchiveLogs", true, false, 7], + ["OperationalLogs", true, false, 7], + ["AutoScaleLogs", true, false, 7], + ["KafkaCoordinatorLogs", true, false, 7], + ["KafkaUserErrorLogs", true, false, 7], + ["EventHubVNetConnectionEvent", true, false, 7], + ["CustomerManagedKeyUserLogs", true, false, 7], + ] + metric = [ + #["Category name", "Diagnostics Enabled(true/false)", "Retention Enabled(true/false)", Retention_period] + ["AllMetrics", true, false, 7], + ] + } + + } + + compliance_all = { + name = "compliance_logs" + categories = { + log = [ + # ["Category name", "Diagnostics Enabled(true/false)", "Retention Enabled(true/false)", Retention_period] + ["AuditEvent", true, true, 365], + ] + metric = [ + #["Category name", "Diagnostics Enabled(true/false)", "Retention Enabled(true/false)", Retention_period] + ["AllMetrics", false, false, 7], + ] + } + + } + + siem_all = { + name = "siem" + categories = { + log = [ + # ["Category name", "Diagnostics Enabled(true/false)", "Retention Enabled(true/false)", Retention_period] + ["AuditEvent", true, true, 0], + ] + + metric = [ + #["Category name", "Diagnostics Enabled(true/false)", "Retention Enabled(true/false)", Retention_period] + ["AllMetrics", false, false, 0], + ] + } + + } + + subscription_operations = { + name = "subscription_operations" + categories = { + log = [ + # ["Category name", "Diagnostics Enabled(true/false)"] + ["Administrative", true], + ["Security", true], + ["ServiceHealth", true], + ["Alert", true], + ["Policy", true], + ["Autoscale", true], + ["ResourceHealth", true], + ["Recommendation", true], + ] + } + } + + subscription_siem = { + name = "activity_logs_for_siem" + categories = { + log = [ + # ["Category name", "Diagnostics Enabled(true/false)"] + ["Administrative", false], + ["Security", true], + ["ServiceHealth", false], + ["Alert", false], + ["Policy", true], + ["Autoscale", false], + ["ResourceHealth", false], + ["Recommendation", false], + ] + } + + } + +} diff --git a/templates/resources/diagnostics_destinations.tfvars.j2 b/templates/resources/diagnostics_destinations.tfvars.j2 new file mode 100644 index 000000000..55f8acce2 --- /dev/null +++ b/templates/resources/diagnostics_destinations.tfvars.j2 @@ -0,0 +1,30 @@ +# Defines the different destination for the different log profiles +# Different profiles to target different operational teams + +diagnostics_destinations = { + # Storage keys must reference the azure region name + # For storage, reference "all_regions" and we will send the logs to the storage account + # in the region of the deployment + storage = { + all_regions = { +{% for key in config.caf_terraform.launchpad.regions.keys() %} + "{{ config.caf_terraform.launchpad.regions[key].name }}" = { + storage_account_key = "diagsiem_{{config.caf_terraform.launchpad.regions[config.caf_terraform.launchpad.default_region_key].slug}}" + } +{% endfor %} + } + } + + log_analytics = { + central_logs = { + log_analytics_key = "central_logs_{{config.caf_terraform.launchpad.regions[config.caf_terraform.launchpad.default_region_key].slug}}" + log_analytics_destination_type = "Dedicated" + } + } + + event_hub_namespaces = { + central_logs = { + event_hub_namespace_key = "central_logs_{{config.caf_terraform.launchpad.regions[config.caf_terraform.launchpad.default_region_key].slug}}" + } + } +} diff --git a/templates/resources/global_settings.tfvars.j2 b/templates/resources/global_settings.tfvars.j2 new file mode 100644 index 000000000..03b0894c4 --- /dev/null +++ b/templates/resources/global_settings.tfvars.j2 @@ -0,0 +1,22 @@ +{% if resources.subscriptions[subscription_key].global_settings is defined %} + global_settings = { + passthrough = {{ resources.subscriptions[subscription_key].global_settings.passthrough | string | lower }} + prefix = "{{ resources.subscriptions[subscription_key].global_settings.prefix }}" + use_slug = {{ resources.subscriptions[subscription_key].global_settings.use_slug | string | lower }} + inherit_tags = {{ resources.subscriptions[subscription_key].global_settings.inherit_tags | string | lower }} + random_length = {{ resources.subscriptions[subscription_key].global_settings.random_length }} +{% if resources.subscriptions[subscription_key].global_settings.tags is defined %} + tags = { +{% for tag, value in resources.subscriptions[subscription_key].global_settings.tags.items() %} + "{{ tag }}" = "{{value}}" +{% endfor %} + } +{% endif %} + default_region = "{{ resources.subscriptions[subscription_key].global_settings.default_region_key }}" + regions = { +{% for key in resources.subscriptions[subscription_key].global_settings.regions.keys() %} + {{ key }} = "{{ resources.subscriptions[subscription_key].global_settings.regions[key].name }}" +{% endfor %} + } + } +{% endif %} \ No newline at end of file diff --git a/templates/resources/keyvault_access_policies.tfvars.j2 b/templates/resources/keyvault_access_policies.tfvars.j2 new file mode 100644 index 000000000..c1626040e --- /dev/null +++ b/templates/resources/keyvault_access_policies.tfvars.j2 @@ -0,0 +1,25 @@ +keyvault_access_policies = { + {% for key, policy in resources.subscriptions[subscription_key].keyvault_access_policies.items() %} + {{ key }} = { +{% for s_key, s_policy in policy.items() %} + {{ s_key }} = { +{% if s_policy.lz_key is defined %} + lz_key = "{{ s_policy.lz_key }}" +{% endif %} +{% if s_policy.azuread_group_key is defined %} + azuread_group_key = "{{ s_policy.azuread_group_key }}" +{% elif s_policy.azuread_service_principal_key is defined %} + azuread_service_principal_key = "{{ s_policy.azuread_service_principal_key }}" +{% elif s_policy.managed_identity_key is defined %} + managed_identity_key = "{{ s_policy.managed_identity_key }}" +{% endif %} +{% if s_policy.secret_permissions is defined %} + secret_permissions = {{ s_policy.secret_permissions | replace('None','[]') | replace('\'','\"') }} +{% elif s_policy.certificate_permissions is defined %} + certificate_permissions = {{ s_policy.certificate_permissions | replace('None','[]') | replace('\'','\"') }} +{% endif %} + } +{% endfor %} + } +{% endfor %} +} \ No newline at end of file diff --git a/templates/resources/keyvaults.tfvars.j2 b/templates/resources/keyvaults.tfvars.j2 new file mode 100644 index 000000000..7eb353ee2 --- /dev/null +++ b/templates/resources/keyvaults.tfvars.j2 @@ -0,0 +1,54 @@ +keyvaults = { +{% for key, keyvault in resources.subscriptions[subscription_key].keyvaults.items() %} + {{ key }} = { + name = "{{ resources.subscriptions[subscription_key].keyvaults[key].name }}" + resource_group_key = "{{ resources.subscriptions[subscription_key].keyvaults[key].resource_group_key }}" + sku_name = "{{ resources.subscriptions[subscription_key].keyvaults[key].sku_name | default('standard')}}" +{% if keyvault.enabled_for_deployment is defined %} + enabled_for_deployment = "{{ keyvault.enabled_for_deployment | string | lower }}" +{% endif %} +{% if keyvault.enabled_for_disk_encryption is defined %} + enabled_for_disk_encryption = "{{ keyvault.enabled_for_disk_encryption | string | lower }}" +{% endif %} +{% if keyvault.enabled_for_template_deployment is defined %} + enabled_for_template_deployment = "{{ keyvault.enabled_for_template_deployment | string | lower }}" +{% endif %} +{% if keyvault.purge_protection_enabled is defined %} + purge_protection_enabled = "{{ keyvault.purge_protection_enabled | string | lower }}" +{% endif %} +{% if keyvault.enable_rbac_authorization is defined %} + enable_rbac_authorization = "{{ keyvault.enable_rbac_authorization | string | lower }}" +{% endif %} +{% if keyvault.soft_delete_retention_days is defined %} + soft_delete_retention_days = {{ keyvault.soft_delete_retention_days }} +{% endif %} + + creation_policies = { +{% if config.platform_identity is defined %} +{% if config.platform_identity.azuread_identity_mode == 'logged_in_user' %} + logged_in_user = { + secret_permissions = ["Set", "Get", "List", "Delete", "Purge", "Recover"] + } +{% endif %} +{% endif %} +{% for p_key, policy in keyvault.creation_policies.items() %} + {{ p_key }} = { +{% if policy.lz_key is defined %} + lz_key = "{{ policy.lz_key }}" +{% endif %} +{% if policy.azuread_group_key is defined %} + azuread_group_key = "{{ policy.azuread_group_key }}" +{% elif policy.azuread_service_principal_key is defined %} + azuread_service_principal_key = "{{ policy.azuread_service_principal_key }}" +{% elif policy.managed_identity_key is defined %} + managed_identity_key = "{{ policy.managed_identity_key }}" +{% endif %} +{% if policy.secret_permissions is defined %} + secret_permissions = {{ policy.secret_permissions | replace('None','[]') | replace('\'','\"') }} +{% endif %} + } +{% endfor %} + } + } +{% endfor %} +} diff --git a/templates/resources/landingzone.tfvars.j2 b/templates/resources/landingzone.tfvars.j2 new file mode 100644 index 000000000..67c882584 --- /dev/null +++ b/templates/resources/landingzone.tfvars.j2 @@ -0,0 +1,50 @@ +landingzone = { + backend_type = "{{ config.caf_terraform.launchpad.backend_type | default("azurerm") }}" + level = "{{ config.tfstates['asvm'][subscription_key].level }}" +{% if deployments.deployments[subscription_key][deployment].landingzone.key.asvm is defined %} +{% for l_key, l_value in deployments.deployments[subscription_key][deployment].landingzone.key.asvm.items() %} + key = "{{ config.tfstates['asvm'][l_key][l_value].lz_key_name}}" +{% endfor %} +{% endif %} +{% if deployments.deployments[subscription_key][deployment].landingzone.global_settings_key.platform is defined %} +{% if deployments.deployments[subscription_key][deployment].landingzone.global_settings_key.platform.virtual_hubs is defined %} + global_settings_key = "{{ config.tfstates['platform'].virtual_hubs[deployments.deployments[subscription_key][deployment].landingzone.global_settings_key.platform.virtual_hubs].lz_key_name }}" +{% elif deployments.deployments[subscription_key][deployment].landingzone.global_settings_key.platform.asvm is defined %} + global_settings_key = "{{ config.tfstates['platform'].asvm.lz_key_name }}" +{% endif %} +{% else %} +{% for m_key, m_value in deployments.deployments[subscription_key][deployment].landingzone.global_settings_key.asvm.items() %} + global_settings_key = "{{ config.tfstates['asvm'][m_key][m_value].lz_key_name }}" +{% endfor %} +{% endif %} + +{% if deployments.deployments[subscription_key][deployment].landingzone.remote_tfstates is defined %} + tfstates = { +{% if deployments.deployments[subscription_key][deployment].landingzone.remote_tfstates.asvm is defined %} +{% for a_key, a_value in deployments.deployments[subscription_key][deployment].landingzone.remote_tfstates.asvm.items() %} + {{ config.tfstates['asvm'][a_key][a_value].lz_key_name }} = { + tfstate = "{{ config.tfstates['asvm'][a_key][a_value].tfstate }}" + workspace = "{{ config.tfstates['asvm'][a_key].workspace }}" + } +{% endfor %} +{% endif %} +{% if deployments.deployments[subscription_key][deployment].landingzone.remote_tfstates.platform is defined %} +{% for p_key in deployments.deployments[subscription_key][deployment].landingzone.remote_tfstates.platform.keys() %} +{% if config.tfstates['platform'][p_key][deployments.deployments[subscription_key][deployment].landingzone.remote_tfstates.platform[p_key]] is defined %} + {{ config.tfstates['platform'][p_key][deployments.deployments[subscription_key][deployment].landingzone.remote_tfstates.platform[p_key]].lz_key_name }} = { + tfstate = "{{ config.tfstates['platform'][p_key][deployments.deployments[subscription_key][deployment].landingzone.remote_tfstates.platform[p_key]].tfstate }}" + level = "lower" + workspace = "{{ config.tfstates['platform'][p_key][deployments.deployments[subscription_key][deployment].landingzone.remote_tfstates.platform[p_key]].workspace | default('tfstate') }}" + } +{% else %} + {{ config.tfstates['platform'][p_key].lz_key_name }} = { + tfstate = "{{ config.tfstates['platform'][p_key].tfstate }}" + level = "lower" + workspace = "{{ config.tfstates['platform'][p_key].workspace | default('tfstate') }}" + } +{% endif %} +{% endfor %} +{% endif %} + } +{% endif %} +} diff --git a/templates/resources/managed_identities.tfvars.j2 b/templates/resources/managed_identities.tfvars.j2 new file mode 100644 index 000000000..d7c483b16 --- /dev/null +++ b/templates/resources/managed_identities.tfvars.j2 @@ -0,0 +1,15 @@ +managed_identities = { +{% for key, value in resources.subscriptions[subscription_key].managed_identities.items() %} + {{ key }} = { + name = "{{ value.name }}" + resource_group_key = "{{ value.resource_group_key }}" +{% if resource_group.tags is defined %} + tags = { +{% for tag_key, tag_value in resource_group.tags.items() %} + {{ tag_key }} = "{{ tag_value }}" +{% endfor %} + } +{% endif %} + } +{% endfor %} +} \ No newline at end of file diff --git a/templates/resources/monitor_action_groups.tfvars.j2 b/templates/resources/monitor_action_groups.tfvars.j2 new file mode 100644 index 000000000..83601f60a --- /dev/null +++ b/templates/resources/monitor_action_groups.tfvars.j2 @@ -0,0 +1,69 @@ +monitor_action_groups = { +{% for key, mag in resources.subscriptions[subscription_key].monitor_action_groups.items() %} + {{ key }} = { + action_group_name = "{{ mag.action_group_name }}" + shortname = "{{ mag.shortname }}" + resource_group_key = "{{ mag.resource_group_key }}" + +{% if mag.arm_role_alert is defined %} + arm_role_alert = { +{% for arm_key, arm_value in mag.arm_role_alert.items() %} + {{ arm_key }} = { + name = "{{arm_value.name}}" + role_name = "{{arm_value.role_name}}" + use_common_alert_schema = {{arm_value.use_common_alert_schema | lower | default(false)}} + } +{% endfor %} + } +{%endif%} + +{% if mag.automation_runbook_receiver is defined %} + automation_runbook_receiver = { +{% for arr_key, arr_value in mag.automation_runbook_receiver.items() %} + {{ arr_key }} = { + name = "{{arr_value.name}}" + automation_account_id = "{{arr_value.automation_account_id}}" + runbook_name = "{{arr_value.runbook_name}}" + webhook_resource_id = "{{arr_value.webhook_resource_id}}" + is_global_runbook = {{arr_value.is_global_runbook}} + service_uri = "{{arr_value.service_uri}}" + use_common_alert_schema = {{arr_value.use_common_alert_schema | lower | default(false)}} + } +{% endfor %} + } +{%endif%} + +{% if mag.email_receiver is defined %} + email_receiver = { +{% for email_key, email_value in mag.email_receiver.items() %} + {{ email_key }} = { + name = "{{email_value.name}}" + email_address = "{{email_value.email_address}}" + use_common_alert_schema = {{email_value.use_common_alert_schema | lower | default(false)}} + } +{% endfor %} + } +{%endif%} + +{% if mag.sms_receiver is defined %} + sms_receiver = { +{% for sms_key, sms_value in mag.sms_receiver.items() %} + {{ sms_key }} = { + name = "{{sms_value.name}}" + country_code = "{{sms_value.country_code}}" + phone_number = "{{sms_value.phone_number}}" + } +{% endfor %} + } +{%endif%} + +{% if mag.tags is defined %} + tags = { +{% for tag_key, tag_value in mag.tags.items() %} + {{ tag_key }} = "{{ tag_value }}" +{% endfor %} + } +{%endif%} + } +{%endfor%} +} \ No newline at end of file diff --git a/templates/resources/network_security_group_definition.tfvars.j2 b/templates/resources/network_security_group_definition.tfvars.j2 new file mode 100644 index 000000000..90cf1d185 --- /dev/null +++ b/templates/resources/network_security_group_definition.tfvars.j2 @@ -0,0 +1,54 @@ +network_security_group_definition = { +{% for key, value in resources.subscriptions[subscription_key].network_security_group_definition.items() %} + {{ key }} = { + version = {{ value.version }} + resource_group_key = "{{ value.resource_group_key }}" + name = "{{ value.name }}" +{% if value.nsg is defined %} + nsg = [ +{% for direction, l1_value in value.nsg.items() %} +{% for priority, l2_value in l1_value.items() %} + { + name = "{{ l2_value.name }}" + priority = "{{ priority }}" + direction = "{{ direction }}" + access = "{{ l2_value.access }}" + protocol = "{{ l2_value.protocol }}" +{% if l2_value.source_port_range is defined %} + source_port_range = "{{ l2_value.source_port_range }}" +{% endif %} +{% if l2_value.source_port_ranges is defined %} + source_port_ranges = {{ l2_value.source_port_ranges | replace('None','[]') | replace('\'','\"') }} +{% endif %} +{% if l2_value.destination_port_range is defined %} + destination_port_range = "{{ l2_value.destination_port_range }}" +{% endif %} +{% if l2_value.destination_port_ranges is defined %} + destination_port_ranges = {{ l2_value.destination_port_ranges | replace('None','[]') | replace('\'','\"') }} +{% endif %} +{% if l2_value.source_address_prefix is defined %} + source_address_prefix = "{{ l2_value.source_address_prefix }}" +{% endif %} +{% if l2_value.source_address_prefixes is defined %} + source_address_prefixes = {{ l2_value.source_address_prefixes | replace('None','[]') | replace('\'','\"') }} +{% endif %} +{% if l2_value.destination_address_prefix is defined %} + destination_address_prefix = "{{ l2_value.destination_address_prefix }}" +{% endif %} +{% if l2_value.destination_address_prefixes is defined %} + destination_address_prefixes = {{ l2_value.destination_address_prefixes | replace('None','[]') | replace('\'','\"') }} +{% endif %} +{% if l2_value.source_application_security_group_ids is defined %} + source_application_security_group_ids = "{{ l2_value.source_application_security_group_ids }}" +{% endif %} +{% if l2_value.destination_application_security_group_ids is defined %} + destination_application_security_group_ids = {{ l2_value.destination_application_security_group_ids | replace('None','[]') | replace('\'','\"') }} +{% endif %} + }, +{% endfor %} +{% endfor %} + ] +{% endif %} + } +{% endfor %} +} \ No newline at end of file diff --git a/templates/resources/private_dns.tfvars.j2 b/templates/resources/private_dns.tfvars.j2 new file mode 100644 index 000000000..aeba296a4 --- /dev/null +++ b/templates/resources/private_dns.tfvars.j2 @@ -0,0 +1,38 @@ +private_dns = { +{% for key, value in resources.subscriptions[subscription_key].private_dns.items() %} + "{{ key }}" = { + name = "{{ value.name }}" + resource_group_key = "{{ value.resource_group_key }}" +{% if value.tags is defined %} + tags = { +{% for k_tag, tag in value.tags.items() %} + "{{ k_tag }}" = "{{ tag }}" + } +{% endfor %} +{% endif %} +{% if value.vnet_links is defined %} + vnet_links = { +{% for v_key, v_value in value.vnet_links.items() %} + {{ v_key }} = { + name = "{{ v_value.name }}" + vnet_key = "{{ v_value.vnet_key }}" +{% if v_value.registration_enabled is defined %} + registration_enabled = {{ v_value.registration_enabled | string | lower }} +{% endif %} +{% if v_value.lz_key is defined %} + lz_key = "{{ v_value.lz_key }}" +{% endif %} +{% if v_value.tags is defined %} + tags = { +{% for v_tag, v_tag in v_value.tags.items() %} + "{{ k_tag }}" = "{{ v_tag }}" + } +{% endfor %} +{% endif %} + } +{% endfor %} + } +{% endif %} + } +{% endfor %} +} \ No newline at end of file diff --git a/templates/resources/public_ip_addresses.tfvars.j2 b/templates/resources/public_ip_addresses.tfvars.j2 new file mode 100644 index 000000000..03e218019 --- /dev/null +++ b/templates/resources/public_ip_addresses.tfvars.j2 @@ -0,0 +1,12 @@ +public_ip_addresses = { +{% for key, value in resources.subscriptions[subscription_key].public_ip_addresses.items() %} + {{ key }} = { + name = "{{ value.name }}" + resource_group_key = "{{ value.resource_group_key }}" + sku = "{{ value.sku }}" + allocation_method = "{{ value.allocation_method }}" + ip_version = "{{ value.ip_version }}" + idle_timeout_in_minutes = "{{ value.idle_timeout_in_minutes }}" + } +{% endfor %} +} \ No newline at end of file diff --git a/templates/resources/recovery_vaults.tfvars.j2 b/templates/resources/recovery_vaults.tfvars.j2 new file mode 100644 index 000000000..6da1e957b --- /dev/null +++ b/templates/resources/recovery_vaults.tfvars.j2 @@ -0,0 +1,64 @@ +recovery_vaults = { +{% for key, asr in resources.subscriptions[subscription_key].recovery_vaults.items() %} + {{ key }} = { + name = "{{ asr.name }}" + resource_group_key = "{{ asr.resource_group_key }}" + + region = "{{ asr.region | default(config.caf_terraform.launchpad.default_region_key) }}" + soft_delete_enabled = {{ asr.soft_delete_enabled | default(true) | string | lower }} +{% if asr.backup_policies is defined %} + backup_policies = { +{% if asr.backup_policies.vms is defined %} + vms = { +{% for bkp_key, bkp in asr.backup_policies.vms.items() %} + {{ bkp_key }} = { + name = "{{ bkp.name }}" + vault_key = "{{ key }}" + rg_key = "{{ asr.resource_group_key }}" + timezone = "{{ bkp.timezone | default('UTC') }}" +{% if bkp.backup is defined %} + backup = { +{% if bkp.backup.frequency is defined %} + frequency = "{{ bkp.backup.frequency }}" +{% endif %} + time = "{{ bkp.backup.time | string }}" +{% if bkp.backup.weekdays is defined %} + weekdays = {{ bkp.backup.weekdays | replace('None','[]') | replace('\'','\"') }} +{% endif %} + } +{% endif %} +{% if bkp.retention_daily is defined %} + retention_daily = { + count = {{ bkp.retention_daily.count }} + } +{% endif %} +{% if bkp.retention_weekly is defined %} + retention_weekly = { + count = {{ bkp.retention_weekly.count }} + weekdays = {{ bkp.retention_weekly.weekdays | replace('None','[]') | replace('\'','\"') }} + } +{% endif %} +{% if bkp.retention_monthly is defined %} + retention_monthly = { + count = {{ bkp.retention_monthly.count }} + weekdays = {{ bkp.retention_monthly.weekdays | replace('None','[]') | replace('\'','\"') }} + weeks = {{ bkp.retention_monthly.weeks | replace('None','[]') | replace('\'','\"') }} + } +{% endif %} +{% if bkp.retention_yearly is defined %} + retention_yearly = { + count = {{ bkp.retention_yearly.count }} + weekdays = {{ bkp.retention_yearly.weekdays | replace('None','[]') | replace('\'','\"') }} + weeks = {{ bkp.retention_yearly.weeks | replace('None','[]') | replace('\'','\"') }} + months = {{ bkp.retention_yearly.months | replace('None','[]') | replace('\'','\"') }} + } +{% endif %} + } +{% endfor %} + } +{% endif %} + } +{% endif %} + } +{% endfor %} +} \ No newline at end of file diff --git a/templates/resources/resource_groups.tfvars.j2 b/templates/resources/resource_groups.tfvars.j2 new file mode 100644 index 000000000..939de697a --- /dev/null +++ b/templates/resources/resource_groups.tfvars.j2 @@ -0,0 +1,15 @@ +resource_groups = { +{% for key, resource_group in resources.subscriptions[subscription_key].resource_groups.items() %} + {{ key }} = { + name = "{{ resource_group.name }}" + region = "{{ resource_group.region_key | default(config.caf_terraform.launchpad.default_region_key) }}" +{% if resource_group.tags is defined %} + tags = { +{% for tag_key, tag_value in resource_group.tags.items() %} + {{ tag_key }} = "{{ tag_value }}" +{% endfor %} + } +{% endif %} + } +{% endfor %} +} \ No newline at end of file diff --git a/templates/resources/role_mapping.tfvars.j2 b/templates/resources/role_mapping.tfvars.j2 new file mode 100644 index 000000000..79827a28b --- /dev/null +++ b/templates/resources/role_mapping.tfvars.j2 @@ -0,0 +1,31 @@ +role_mapping = { +{% for top_key, mappings in resources.subscriptions[subscription_key].role_mapping.items() %} + {{ top_key }} = { +{% for key, role_mappings in mappings.items() %} + {{ key }} = { +{% for resource_key, roles_objects in role_mappings.items() %} + "{{ resource_key }}" = { +{% if roles_objects.lz_key is defined %} + lz_key = "{{ roles_objects.lz_key }}" +{% endif %} +{% for role_name, principals_objects in roles_objects.items() %} +{% if role_name != 'lz_key' %} + "{{ role_name }}" = { +{% for key_principal_type, principal_mapping in principals_objects.items() %} + {{ key_principal_type }} = { +{% if principal_mapping['lz_key'] is defined %} + lz_key = "{{ principal_mapping['lz_key'] }}" +{% endif %} + keys = {{ principal_mapping['keys'] | replace('\'','\"')}} + } +{% endfor %} + } +{% endif %} +{% endfor %} + } +{% endfor %} + } +{% endfor %} + } +{% endfor %} +} diff --git a/templates/resources/servicehealth.tfvars.j2 b/templates/resources/servicehealth.tfvars.j2 new file mode 100644 index 000000000..f2ae44920 --- /dev/null +++ b/templates/resources/servicehealth.tfvars.j2 @@ -0,0 +1,22 @@ +monitoring = { +{% if resources.subscriptions[subscription_key].service_health_alerts is defined %} + service_health_alerts = { + enable_service_health_alerts = {{resources.subscriptions[subscription_key].service_health_alerts.enable_service_health_alerts | lower | default(true)}} + name = "{{resources.subscriptions[subscription_key].service_health_alerts.name}}" + action_group_name = "{{resources.subscriptions[subscription_key].service_health_alerts.action_group_name}}" + shortname = "{{resources.subscriptions[subscription_key].service_health_alerts.shortname}}" + resource_group_key = "{{resources.subscriptions[subscription_key].service_health_alerts.resource_group_key}}" +{% if resources.subscriptions[subscription_key].service_health_alerts.email_alert_settings is defined %} + email_alert_settings = { +{% for key, sha in resources.subscriptions[subscription_key].service_health_alerts.email_alert_settings.items() %} + {{ key }} = { + name = "{{ sha.name }}" + email_address = "{{ sha.email_address }}" + use_common_alert_schema = {{ sha.use_common_alert_schema | lower | default(false) }} + } +{% endfor %} + } +{% endif %} + } +{% endif %} +} diff --git a/templates/resources/subscriptions.tfvars.j2 b/templates/resources/subscriptions.tfvars.j2 new file mode 100644 index 000000000..f11c58513 --- /dev/null +++ b/templates/resources/subscriptions.tfvars.j2 @@ -0,0 +1,22 @@ +subscriptions = { +{% for key, value in resources.subscriptions[subscription_key].items() %} + {{ key }} = { + name = "{{ value.name }}" + billing_account_name = "{{ config.caf_terraform.billing_subscription_role_delegations.billing_account_name }}" + enrollment_account_name = "{{ config.caf_terraform.billing_subscription_role_delegations.enrollment_account_name }}" +{% if value.management_group_suffix is defined %} + management_group_id = "{{ config.platform_core_setup.enterprise_scale.management_group_prefix }}-{{ value.management_group_suffix }}" +{% else %} + management_group_id = "{{ value.management_group_id }}" +{% endif %} + workload = "{{ value.workload | default('Production') }}" +{% if value.tags is defined %} + tags = { +{% for tag_key in value.tags %} + {{ tag_key }} = "{{ value.tags[tag_key] }}" +{% endfor %} + } +{% endif %} + } +{% endfor %} +} \ No newline at end of file diff --git a/templates/resources/virtual_hub_connections.tfvars.j2 b/templates/resources/virtual_hub_connections.tfvars.j2 new file mode 100644 index 000000000..b279dc554 --- /dev/null +++ b/templates/resources/virtual_hub_connections.tfvars.j2 @@ -0,0 +1,17 @@ +virtual_hub_connections = { +{% for key, vhc in resources.subscriptions[subscription_key].virtual_hub_connections.items() %} + {{ key }} = { + name = "{{ vhc.name }}" + virtual_hub = { + lz_key = "{{ vhc.virtual_hub.lz_key }}" + key = "{{ vhc.virtual_hub.key }}" + } + vnet = { +{% if vhc.vnet.lz_key is defined %} + lz_key = "{{ vhc.vnet.lz_key }}" +{% endif %} + vnet_key = "{{ vhc.vnet.vnet_key }}" + } + } +{% endfor %} +} \ No newline at end of file diff --git a/templates/resources/virtual_networks.tfvars.j2 b/templates/resources/virtual_networks.tfvars.j2 new file mode 100644 index 000000000..7a12ad5f0 --- /dev/null +++ b/templates/resources/virtual_networks.tfvars.j2 @@ -0,0 +1,73 @@ +vnets = { +{% for key, vnet in resources.subscriptions[subscription_key].virtual_networks.items() %} + {{ key }} = { + resource_group_key = "{{vnet.resource_group_key}}" + vnet = { + name = "{{ vnet.name }}" + address_space = {{ vnet.address_space | replace('None','[]') | replace('\'','\"') }} +{% if vnet.dns_servers is defined %} + dns_servers = {{ vnet.dns_servers | replace('None','[]') | replace('\'','\"') }} +{% endif %} +{% if vnet.dns_servers_keys is defined %} + dns_servers_keys = [ +{% for dns_key, dns_value in vnet.dns_servers_keys.items() %} + { + resource_type = "{{ dns_value.resource_type }}" + key = "{{ dns_value.key }}" + lz_key = "{{ dns_value.lz_key }}" +{% if dns_value.interface_index is defined %} + interface_index = "{{ dns_value.interface_index }}" +{% endif %} + } + ] +{% endfor %} +{% endif %} + } +{% if vnet.subnets is defined %} + subnets = { +{% for subnet_key, subnet in vnet.subnets.items() %} + {{ subnet_key }} = { + name = "{{subnet.name}}" + cidr = {{ vnet.subnets[subnet_key].cidr | replace('None','[]') | replace('\'','\"') }} +{% if subnet.nsg_key is defined %} + nsg_key = "{{ subnet.nsg_key }}" +{% endif %} +{% if subnet.service_endpoints is defined %} + service_endpoints = {{ subnet.service_endpoints | replace('None','[]') | replace('\'','\"') }} +{% endif %} +{% if subnet.enforce_private_link_service_network_policies is defined %} + enforce_private_link_service_network_policies = true +{% endif %} +{% if subnet.enforce_private_link_endpoint_network_policies is defined %} + enforce_private_link_endpoint_network_policies = true +{% else %} +{% if subnet.nsg_key is not defined %} + nsg_key = "empty_nsg" +{% endif %} +{% endif %} +{% if subnet.delegation is defined %} + delegation = { + name = "{{ subnet.delegation.name }}" + service_delegation = "{{ subnet.delegation.service_delegation }}" +{% if subnet.delegation.actions is defined %} + actions = {{ subnet.delegation.actions | replace('None','[]') | replace('\'','\"') }} +{% endif %} + } +{% endif %} + } +{% endfor %} + } +{% endif %} +{% if vnet.specialsubnets is defined %} + specialsubnets = { +{% for subnet_key, subnet in vnet.specialsubnets.items() %} + {{ subnet_key }} = { + name = "{{subnet.name}}" + cidr = {{ vnet.specialsubnets[subnet_key].cidr | replace('None','[]') | replace('\'','\"') }} + } +{% endfor %} + } +{% endif %} + } +{% endfor %} +} \ No newline at end of file