+## Requirements
+
+| Name | Version |
+|------|---------|
+| [terraform](#requirement\_terraform) | >= 1.5.7 |
+| [aws](#requirement\_aws) | >= 6.0 |
+| [random](#requirement\_random) | >= 2.0 |
+
+## Providers
+
+| Name | Version |
+|------|---------|
+| [aws](#provider\_aws) | >= 6.0 |
+| [random](#provider\_random) | >= 2.0 |
+
+## Modules
+
+| Name | Source | Version |
+|------|--------|---------|
+| [lambda\_function\_with\_efs](#module\_lambda\_function\_with\_efs) | ../../ | n/a |
+| [vpc](#module\_vpc) | terraform-aws-modules/vpc/aws | ~> 5.0 |
+
+## Resources
+
+| Name | Type |
+|------|------|
+| [aws_efs_access_point.lambda](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/efs_access_point) | resource |
+| [aws_efs_file_system.shared](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/efs_file_system) | resource |
+| [aws_efs_mount_target.alpha](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/efs_mount_target) | resource |
+| [random_pet.this](https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/pet) | resource |
+
+## Inputs
+
+No inputs.
+
+## Outputs
+
+| Name | Description |
+|------|-------------|
+| [lambda\_cloudwatch\_log\_group\_arn](#output\_lambda\_cloudwatch\_log\_group\_arn) | The ARN of the Cloudwatch Log Group |
+| [lambda\_function\_arn](#output\_lambda\_function\_arn) | The ARN of the Lambda Function |
+| [lambda\_function\_arn\_static](#output\_lambda\_function\_arn\_static) | The static ARN of the Lambda Function. Use this to avoid cycle errors between resources (e.g., Step Functions) |
+| [lambda\_function\_invoke\_arn](#output\_lambda\_function\_invoke\_arn) | The Invoke ARN of the Lambda Function |
+| [lambda\_function\_kms\_key\_arn](#output\_lambda\_function\_kms\_key\_arn) | The ARN for the KMS encryption key of Lambda Function |
+| [lambda\_function\_last\_modified](#output\_lambda\_function\_last\_modified) | The date Lambda Function resource was last modified |
+| [lambda\_function\_name](#output\_lambda\_function\_name) | The name of the Lambda Function |
+| [lambda\_function\_qualified\_arn](#output\_lambda\_function\_qualified\_arn) | The ARN identifying your Lambda Function Version |
+| [lambda\_function\_source\_code\_hash](#output\_lambda\_function\_source\_code\_hash) | Base64-encoded representation of raw SHA-256 sum of the zip file |
+| [lambda\_function\_source\_code\_size](#output\_lambda\_function\_source\_code\_size) | The size in bytes of the function .zip file |
+| [lambda\_function\_version](#output\_lambda\_function\_version) | Latest published version of Lambda Function |
+| [lambda\_layer\_arn](#output\_lambda\_layer\_arn) | The ARN of the Lambda Layer with version |
+| [lambda\_layer\_created\_date](#output\_lambda\_layer\_created\_date) | The date Lambda Layer resource was created |
+| [lambda\_layer\_layer\_arn](#output\_lambda\_layer\_layer\_arn) | The ARN of the Lambda Layer without version |
+| [lambda\_layer\_source\_code\_size](#output\_lambda\_layer\_source\_code\_size) | The size in bytes of the Lambda Layer .zip file |
+| [lambda\_layer\_version](#output\_lambda\_layer\_version) | The Lambda Layer version |
+| [lambda\_role\_arn](#output\_lambda\_role\_arn) | The ARN of the IAM role created for the Lambda Function |
+| [lambda\_role\_name](#output\_lambda\_role\_name) | The name of the IAM role created for the Lambda Function |
+| [local\_filename](#output\_local\_filename) | The filename of zip archive deployed (if deployment was from local) |
+| [s3\_object](#output\_s3\_object) | The map with S3 object data of zip archive deployed (if deployment was from S3) |
+
diff --git a/examples/with-efs/main.tf b/examples/with-efs/main.tf
new file mode 100644
index 00000000..90a0abed
--- /dev/null
+++ b/examples/with-efs/main.tf
@@ -0,0 +1,84 @@
+provider "aws" {
+ region = "eu-west-1"
+
+ # Make it faster by skipping something
+ skip_metadata_api_check = true
+ skip_region_validation = true
+ skip_credentials_validation = true
+}
+
+resource "random_pet" "this" {
+ length = 2
+}
+
+module "lambda_function_with_efs" {
+ source = "../../"
+
+ function_name = "${random_pet.this.id}-lambda-in-vpc"
+ description = "My awesome lambda function"
+ handler = "index.lambda_handler"
+ runtime = "python3.12"
+
+ source_path = "${path.module}/../fixtures/python-app1"
+
+ vpc_subnet_ids = module.vpc.intra_subnets
+ vpc_security_group_ids = [module.vpc.default_security_group_id]
+ attach_network_policy = true
+
+ ######################
+ # Elastic File System
+ ######################
+
+ file_system_arn = aws_efs_access_point.lambda.arn
+ file_system_local_mount_path = "/mnt/shared-storage"
+
+ # Explicitly declare dependency on EFS mount target.
+ # When creating or updating Lambda functions, mount target must be in 'available' lifecycle state.
+ # Note: depends_on on modules became available in Terraform 0.13
+ depends_on = [aws_efs_mount_target.alpha]
+}
+
+######
+# VPC
+######
+
+module "vpc" {
+ source = "terraform-aws-modules/vpc/aws"
+ version = "~> 5.0"
+
+ name = random_pet.this.id
+ cidr = "10.10.0.0/16"
+
+ azs = ["eu-west-1a"]
+ intra_subnets = ["10.10.101.0/24"]
+}
+
+######
+# EFS
+######
+
+resource "aws_efs_file_system" "shared" {}
+
+resource "aws_efs_mount_target" "alpha" {
+ file_system_id = aws_efs_file_system.shared.id
+ subnet_id = module.vpc.intra_subnets[0]
+ security_groups = [module.vpc.default_security_group_id]
+}
+
+resource "aws_efs_access_point" "lambda" {
+ file_system_id = aws_efs_file_system.shared.id
+
+ posix_user {
+ gid = 1000
+ uid = 1000
+ }
+
+ root_directory {
+ path = "/lambda"
+ creation_info {
+ owner_gid = 1000
+ owner_uid = 1000
+ permissions = "0777"
+ }
+ }
+}
diff --git a/examples/with-efs/outputs.tf b/examples/with-efs/outputs.tf
new file mode 100644
index 00000000..9b554a5a
--- /dev/null
+++ b/examples/with-efs/outputs.tf
@@ -0,0 +1,104 @@
+# Lambda Function
+output "lambda_function_arn" {
+ description = "The ARN of the Lambda Function"
+ value = module.lambda_function_with_efs.lambda_function_arn
+}
+
+output "lambda_function_arn_static" {
+ description = "The static ARN of the Lambda Function. Use this to avoid cycle errors between resources (e.g., Step Functions)"
+ value = module.lambda_function_with_efs.lambda_function_arn_static
+}
+
+output "lambda_function_invoke_arn" {
+ description = "The Invoke ARN of the Lambda Function"
+ value = module.lambda_function_with_efs.lambda_function_invoke_arn
+}
+
+output "lambda_function_name" {
+ description = "The name of the Lambda Function"
+ value = module.lambda_function_with_efs.lambda_function_name
+}
+
+output "lambda_function_qualified_arn" {
+ description = "The ARN identifying your Lambda Function Version"
+ value = module.lambda_function_with_efs.lambda_function_qualified_arn
+}
+
+output "lambda_function_version" {
+ description = "Latest published version of Lambda Function"
+ value = module.lambda_function_with_efs.lambda_function_version
+}
+
+output "lambda_function_last_modified" {
+ description = "The date Lambda Function resource was last modified"
+ value = module.lambda_function_with_efs.lambda_function_last_modified
+}
+
+output "lambda_function_kms_key_arn" {
+ description = "The ARN for the KMS encryption key of Lambda Function"
+ value = module.lambda_function_with_efs.lambda_function_kms_key_arn
+}
+
+output "lambda_function_source_code_hash" {
+ description = "Base64-encoded representation of raw SHA-256 sum of the zip file"
+ value = module.lambda_function_with_efs.lambda_function_source_code_hash
+}
+
+output "lambda_function_source_code_size" {
+ description = "The size in bytes of the function .zip file"
+ value = module.lambda_function_with_efs.lambda_function_source_code_size
+}
+
+# Lambda Layer
+output "lambda_layer_arn" {
+ description = "The ARN of the Lambda Layer with version"
+ value = module.lambda_function_with_efs.lambda_layer_arn
+}
+
+output "lambda_layer_layer_arn" {
+ description = "The ARN of the Lambda Layer without version"
+ value = module.lambda_function_with_efs.lambda_layer_layer_arn
+}
+
+output "lambda_layer_created_date" {
+ description = "The date Lambda Layer resource was created"
+ value = module.lambda_function_with_efs.lambda_layer_created_date
+}
+
+output "lambda_layer_source_code_size" {
+ description = "The size in bytes of the Lambda Layer .zip file"
+ value = module.lambda_function_with_efs.lambda_layer_source_code_size
+}
+
+output "lambda_layer_version" {
+ description = "The Lambda Layer version"
+ value = module.lambda_function_with_efs.lambda_layer_version
+}
+
+# IAM Role
+output "lambda_role_arn" {
+ description = "The ARN of the IAM role created for the Lambda Function"
+ value = module.lambda_function_with_efs.lambda_role_arn
+}
+
+output "lambda_role_name" {
+ description = "The name of the IAM role created for the Lambda Function"
+ value = module.lambda_function_with_efs.lambda_role_name
+}
+
+# CloudWatch Log Group
+output "lambda_cloudwatch_log_group_arn" {
+ description = "The ARN of the Cloudwatch Log Group"
+ value = module.lambda_function_with_efs.lambda_cloudwatch_log_group_arn
+}
+
+# Deployment package
+output "local_filename" {
+ description = "The filename of zip archive deployed (if deployment was from local)"
+ value = module.lambda_function_with_efs.local_filename
+}
+
+output "s3_object" {
+ description = "The map with S3 object data of zip archive deployed (if deployment was from S3)"
+ value = module.lambda_function_with_efs.s3_object
+}
diff --git a/examples/with-efs/variables.tf b/examples/with-efs/variables.tf
new file mode 100644
index 00000000..e69de29b
diff --git a/examples/with-efs/versions.tf b/examples/with-efs/versions.tf
new file mode 100644
index 00000000..d2f4f3e8
--- /dev/null
+++ b/examples/with-efs/versions.tf
@@ -0,0 +1,14 @@
+terraform {
+ required_version = ">= 1.5.7"
+
+ required_providers {
+ aws = {
+ source = "hashicorp/aws"
+ version = ">= 6.0"
+ }
+ random = {
+ source = "hashicorp/random"
+ version = ">= 2.0"
+ }
+ }
+}
diff --git a/examples/with-vpc-s3-endpoint/README.md b/examples/with-vpc-s3-endpoint/README.md
new file mode 100644
index 00000000..f84ba32c
--- /dev/null
+++ b/examples/with-vpc-s3-endpoint/README.md
@@ -0,0 +1,84 @@
+# AWS Lambda with VPC and VPC Endpoint for S3 example
+
+The configuration in this directory creates an AWS Lambda Function deployed within a VPC with a VPC Endpoint for S3 and no Internet access. The Function writes a single object to an S3 bucket that is created as part of the supporting resources.
+
+Be aware, that deletion of AWS Lambda with VPC can take a long time (e.g., 10 minutes).
+
+## Usage
+
+To run this example you need to execute:
+
+```bash
+$ terraform init
+$ terraform plan
+$ terraform apply
+```
+
+Note that this example may create resources which cost money. Run `terraform destroy` when you don't need these resources.
+
+
+## Requirements
+
+| Name | Version |
+|------|---------|
+| [terraform](#requirement\_terraform) | >= 1.5.7 |
+| [aws](#requirement\_aws) | >= 6.0 |
+| [random](#requirement\_random) | >= 3.4 |
+
+## Providers
+
+| Name | Version |
+|------|---------|
+| [aws](#provider\_aws) | >= 6.0 |
+| [random](#provider\_random) | >= 3.4 |
+
+## Modules
+
+| Name | Source | Version |
+|------|--------|---------|
+| [kms](#module\_kms) | terraform-aws-modules/kms/aws | ~> 1.0 |
+| [lambda\_s3\_write](#module\_lambda\_s3\_write) | ../../ | n/a |
+| [s3\_bucket](#module\_s3\_bucket) | terraform-aws-modules/s3-bucket/aws | ~> 5.0 |
+| [security\_group\_lambda](#module\_security\_group\_lambda) | terraform-aws-modules/security-group/aws | ~> 4.0 |
+| [vpc](#module\_vpc) | terraform-aws-modules/vpc/aws | ~> 5.0 |
+| [vpc\_endpoints](#module\_vpc\_endpoints) | terraform-aws-modules/vpc/aws//modules/vpc-endpoints | ~> 5.0 |
+
+## Resources
+
+| Name | Type |
+|------|------|
+| [random_pet.this](https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/pet) | resource |
+| [aws_ec2_managed_prefix_list.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ec2_managed_prefix_list) | data source |
+| [aws_iam_policy_document.bucket](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
+| [aws_iam_policy_document.endpoint](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
+| [aws_region.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/region) | data source |
+
+## Inputs
+
+No inputs.
+
+## Outputs
+
+| Name | Description |
+|------|-------------|
+| [lambda\_cloudwatch\_log\_group\_arn](#output\_lambda\_cloudwatch\_log\_group\_arn) | The ARN of the Cloudwatch Log Group |
+| [lambda\_function\_arn](#output\_lambda\_function\_arn) | The ARN of the Lambda Function |
+| [lambda\_function\_arn\_static](#output\_lambda\_function\_arn\_static) | The static ARN of the Lambda Function. Use this to avoid cycle errors between resources (e.g., Step Functions) |
+| [lambda\_function\_invoke\_arn](#output\_lambda\_function\_invoke\_arn) | The Invoke ARN of the Lambda Function |
+| [lambda\_function\_kms\_key\_arn](#output\_lambda\_function\_kms\_key\_arn) | The ARN for the KMS encryption key of Lambda Function |
+| [lambda\_function\_last\_modified](#output\_lambda\_function\_last\_modified) | The date Lambda Function resource was last modified |
+| [lambda\_function\_name](#output\_lambda\_function\_name) | The name of the Lambda Function |
+| [lambda\_function\_qualified\_arn](#output\_lambda\_function\_qualified\_arn) | The ARN identifying your Lambda Function Version |
+| [lambda\_function\_source\_code\_hash](#output\_lambda\_function\_source\_code\_hash) | Base64-encoded representation of raw SHA-256 sum of the zip file |
+| [lambda\_function\_source\_code\_size](#output\_lambda\_function\_source\_code\_size) | The size in bytes of the function .zip file |
+| [lambda\_function\_version](#output\_lambda\_function\_version) | Latest published version of Lambda Function |
+| [lambda\_layer\_arn](#output\_lambda\_layer\_arn) | The ARN of the Lambda Layer with version |
+| [lambda\_layer\_created\_date](#output\_lambda\_layer\_created\_date) | The date Lambda Layer resource was created |
+| [lambda\_layer\_layer\_arn](#output\_lambda\_layer\_layer\_arn) | The ARN of the Lambda Layer without version |
+| [lambda\_layer\_source\_code\_size](#output\_lambda\_layer\_source\_code\_size) | The size in bytes of the Lambda Layer .zip file |
+| [lambda\_layer\_version](#output\_lambda\_layer\_version) | The Lambda Layer version |
+| [lambda\_role\_arn](#output\_lambda\_role\_arn) | The ARN of the IAM role created for the Lambda Function |
+| [lambda\_role\_name](#output\_lambda\_role\_name) | The name of the IAM role created for the Lambda Function |
+| [local\_filename](#output\_local\_filename) | The filename of zip archive deployed (if deployment was from local) |
+| [s3\_object](#output\_s3\_object) | The map with S3 object data of zip archive deployed (if deployment was from S3) |
+
diff --git a/examples/with-vpc-s3-endpoint/main.tf b/examples/with-vpc-s3-endpoint/main.tf
new file mode 100644
index 00000000..29de6eba
--- /dev/null
+++ b/examples/with-vpc-s3-endpoint/main.tf
@@ -0,0 +1,227 @@
+provider "aws" {
+ region = "eu-west-1"
+
+ # Make it faster by skipping something
+ skip_metadata_api_check = true
+ skip_region_validation = true
+ skip_credentials_validation = true
+}
+
+data "aws_region" "current" {}
+
+################################################################################
+# Lambda Module
+################################################################################
+
+module "lambda_s3_write" {
+ source = "../../"
+
+ description = "Lambda demonstrating writes to an S3 bucket from within a VPC without Internet access"
+
+ function_name = random_pet.this.id
+ handler = "index.lambda_handler"
+ runtime = "python3.12"
+
+ source_path = "${path.module}/../fixtures/python-app2"
+
+ environment_variables = {
+ BUCKET_NAME = module.s3_bucket.s3_bucket_id
+ REGION_NAME = data.aws_region.current.region
+ }
+
+ # Let the module create a role for us
+ create_role = true
+ attach_cloudwatch_logs_policy = true
+ attach_network_policy = true
+
+ # There's no need to attach any extra permission for S3 writes as that's added by the bucket policy when a session is created
+ # See https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html
+
+ vpc_security_group_ids = [module.security_group_lambda.security_group_id]
+ vpc_subnet_ids = module.vpc.intra_subnets
+
+ tags = {
+ Module = "lambda_s3_write"
+ }
+}
+
+################################################################################
+# Extra Resources
+################################################################################
+
+resource "random_pet" "this" {
+ length = 2
+}
+
+data "aws_ec2_managed_prefix_list" "this" {
+ name = "com.amazonaws.${data.aws_region.current.region}.s3"
+}
+
+module "vpc" {
+ source = "terraform-aws-modules/vpc/aws"
+ version = "~> 5.0"
+
+ name = random_pet.this.id
+ cidr = "10.0.0.0/16"
+
+ azs = ["${data.aws_region.current.region}a", "${data.aws_region.current.region}b", "${data.aws_region.current.region}c"]
+
+ # Intra subnets are designed to have no Internet access via NAT Gateway.
+ intra_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"]
+
+ intra_dedicated_network_acl = true
+ intra_inbound_acl_rules = concat(
+ # NACL rule for local traffic
+ [
+ {
+ rule_number = 100
+ rule_action = "allow"
+ from_port = 0
+ to_port = 0
+ protocol = "-1"
+ cidr_block = "10.0.0.0/16"
+ },
+ ],
+ # NACL rules for the response traffic from addresses in the AWS S3 prefix list
+ [for k, v in zipmap(
+ range(length(data.aws_ec2_managed_prefix_list.this.entries[*].cidr)),
+ data.aws_ec2_managed_prefix_list.this.entries[*].cidr
+ ) :
+ {
+ rule_number = 200 + k
+ rule_action = "allow"
+ from_port = 1024
+ to_port = 65535
+ protocol = "tcp"
+ cidr_block = v
+ }
+ ]
+ )
+}
+
+module "vpc_endpoints" {
+ source = "terraform-aws-modules/vpc/aws//modules/vpc-endpoints"
+ version = "~> 5.0"
+
+ vpc_id = module.vpc.vpc_id
+
+ endpoints = {
+ s3 = {
+ service = "s3"
+ service_type = "Gateway"
+ route_table_ids = module.vpc.intra_route_table_ids
+ policy = data.aws_iam_policy_document.endpoint.json
+ }
+ }
+}
+
+data "aws_iam_policy_document" "endpoint" {
+ statement {
+ sid = "RestrictBucketAccessToIAMRole"
+
+ principals {
+ type = "AWS"
+ identifiers = ["*"]
+ }
+
+ actions = [
+ "s3:PutObject",
+ ]
+
+ resources = [
+ "${module.s3_bucket.s3_bucket_arn}/*",
+ ]
+
+ # See https://docs.aws.amazon.com/vpc/latest/privatelink/vpc-endpoints-s3.html#edit-vpc-endpoint-policy-s3
+ condition {
+ test = "ArnEquals"
+ variable = "aws:PrincipalArn"
+ values = [module.lambda_s3_write.lambda_role_arn]
+ }
+ }
+}
+
+module "kms" {
+ source = "terraform-aws-modules/kms/aws"
+ version = "~> 1.0"
+
+ description = "S3 encryption key"
+
+ # Grants
+ grants = {
+ lambda = {
+ grantee_principal = module.lambda_s3_write.lambda_role_arn
+ operations = [
+ "GenerateDataKey",
+ ]
+ }
+ }
+}
+
+module "s3_bucket" {
+ source = "terraform-aws-modules/s3-bucket/aws"
+ version = "~> 5.0"
+
+ bucket_prefix = "${random_pet.this.id}-"
+ force_destroy = true
+
+ # S3 bucket-level Public Access Block configuration
+ block_public_acls = true
+ block_public_policy = true
+ ignore_public_acls = true
+ restrict_public_buckets = true
+
+ versioning = {
+ enabled = true
+ }
+
+ # Bucket policy
+ attach_policy = true
+ policy = data.aws_iam_policy_document.bucket.json
+
+ server_side_encryption_configuration = {
+ rule = {
+ apply_server_side_encryption_by_default = {
+ kms_master_key_id = module.kms.key_id
+ sse_algorithm = "aws:kms"
+ }
+ }
+ }
+}
+
+data "aws_iam_policy_document" "bucket" {
+ statement {
+ sid = "RestrictBucketAccessToIAMRole"
+
+ principals {
+ type = "AWS"
+ identifiers = [module.lambda_s3_write.lambda_role_arn]
+ }
+
+ actions = [
+ "s3:PutObject",
+ ]
+
+ resources = [
+ "${module.s3_bucket.s3_bucket_arn}/*",
+ ]
+ }
+}
+
+module "security_group_lambda" {
+ source = "terraform-aws-modules/security-group/aws"
+ version = "~> 4.0"
+
+ name = random_pet.this.id
+ description = "Security Group for Lambda Egress"
+
+ vpc_id = module.vpc.vpc_id
+
+ egress_cidr_blocks = []
+ egress_ipv6_cidr_blocks = []
+
+ # Prefix list ids to use in all egress rules in this module
+ egress_prefix_list_ids = [module.vpc_endpoints.endpoints["s3"]["prefix_list_id"]]
+
+ egress_rules = ["https-443-tcp"]
+}
diff --git a/examples/with-vpc-s3-endpoint/outputs.tf b/examples/with-vpc-s3-endpoint/outputs.tf
new file mode 100644
index 00000000..7218c63c
--- /dev/null
+++ b/examples/with-vpc-s3-endpoint/outputs.tf
@@ -0,0 +1,104 @@
+# Lambda Function
+output "lambda_function_arn" {
+ description = "The ARN of the Lambda Function"
+ value = module.lambda_s3_write.lambda_function_arn
+}
+
+output "lambda_function_arn_static" {
+ description = "The static ARN of the Lambda Function. Use this to avoid cycle errors between resources (e.g., Step Functions)"
+ value = module.lambda_s3_write.lambda_function_arn_static
+}
+
+output "lambda_function_invoke_arn" {
+ description = "The Invoke ARN of the Lambda Function"
+ value = module.lambda_s3_write.lambda_function_invoke_arn
+}
+
+output "lambda_function_name" {
+ description = "The name of the Lambda Function"
+ value = module.lambda_s3_write.lambda_function_name
+}
+
+output "lambda_function_qualified_arn" {
+ description = "The ARN identifying your Lambda Function Version"
+ value = module.lambda_s3_write.lambda_function_qualified_arn
+}
+
+output "lambda_function_version" {
+ description = "Latest published version of Lambda Function"
+ value = module.lambda_s3_write.lambda_function_version
+}
+
+output "lambda_function_last_modified" {
+ description = "The date Lambda Function resource was last modified"
+ value = module.lambda_s3_write.lambda_function_last_modified
+}
+
+output "lambda_function_kms_key_arn" {
+ description = "The ARN for the KMS encryption key of Lambda Function"
+ value = module.lambda_s3_write.lambda_function_kms_key_arn
+}
+
+output "lambda_function_source_code_hash" {
+ description = "Base64-encoded representation of raw SHA-256 sum of the zip file"
+ value = module.lambda_s3_write.lambda_function_source_code_hash
+}
+
+output "lambda_function_source_code_size" {
+ description = "The size in bytes of the function .zip file"
+ value = module.lambda_s3_write.lambda_function_source_code_size
+}
+
+# Lambda Layer
+output "lambda_layer_arn" {
+ description = "The ARN of the Lambda Layer with version"
+ value = module.lambda_s3_write.lambda_layer_arn
+}
+
+output "lambda_layer_layer_arn" {
+ description = "The ARN of the Lambda Layer without version"
+ value = module.lambda_s3_write.lambda_layer_layer_arn
+}
+
+output "lambda_layer_created_date" {
+ description = "The date Lambda Layer resource was created"
+ value = module.lambda_s3_write.lambda_layer_created_date
+}
+
+output "lambda_layer_source_code_size" {
+ description = "The size in bytes of the Lambda Layer .zip file"
+ value = module.lambda_s3_write.lambda_layer_source_code_size
+}
+
+output "lambda_layer_version" {
+ description = "The Lambda Layer version"
+ value = module.lambda_s3_write.lambda_layer_version
+}
+
+# IAM Role
+output "lambda_role_arn" {
+ description = "The ARN of the IAM role created for the Lambda Function"
+ value = module.lambda_s3_write.lambda_role_arn
+}
+
+output "lambda_role_name" {
+ description = "The name of the IAM role created for the Lambda Function"
+ value = module.lambda_s3_write.lambda_role_name
+}
+
+# CloudWatch Log Group
+output "lambda_cloudwatch_log_group_arn" {
+ description = "The ARN of the Cloudwatch Log Group"
+ value = module.lambda_s3_write.lambda_cloudwatch_log_group_arn
+}
+
+# Deployment package
+output "local_filename" {
+ description = "The filename of zip archive deployed (if deployment was from local)"
+ value = module.lambda_s3_write.local_filename
+}
+
+output "s3_object" {
+ description = "The map with S3 object data of zip archive deployed (if deployment was from S3)"
+ value = module.lambda_s3_write.s3_object
+}
diff --git a/examples/with-vpc-s3-endpoint/variables.tf b/examples/with-vpc-s3-endpoint/variables.tf
new file mode 100644
index 00000000..e69de29b
diff --git a/examples/with-vpc-s3-endpoint/versions.tf b/examples/with-vpc-s3-endpoint/versions.tf
new file mode 100644
index 00000000..7f27783c
--- /dev/null
+++ b/examples/with-vpc-s3-endpoint/versions.tf
@@ -0,0 +1,14 @@
+terraform {
+ required_version = ">= 1.5.7"
+
+ required_providers {
+ aws = {
+ source = "hashicorp/aws"
+ version = ">= 6.0"
+ }
+ random = {
+ source = "hashicorp/random"
+ version = ">= 3.4"
+ }
+ }
+}
diff --git a/examples/with-vpc/README.md b/examples/with-vpc/README.md
index a580d31b..e1808811 100644
--- a/examples/with-vpc/README.md
+++ b/examples/with-vpc/README.md
@@ -16,43 +16,60 @@ $ terraform apply
Note that this example may create resources which cost money. Run `terraform destroy` when you don't need these resources.
-
+
## Requirements
-No requirements.
+| Name | Version |
+|------|---------|
+| [terraform](#requirement\_terraform) | >= 1.5.7 |
+| [aws](#requirement\_aws) | >= 6.0 |
+| [random](#requirement\_random) | >= 2.0 |
## Providers
| Name | Version |
|------|---------|
-| random | n/a |
+| [random](#provider\_random) | >= 2.0 |
+
+## Modules
+
+| Name | Source | Version |
+|------|--------|---------|
+| [lambda\_function\_in\_vpc](#module\_lambda\_function\_in\_vpc) | ../../ | n/a |
+| [vpc](#module\_vpc) | terraform-aws-modules/vpc/aws | ~> 5.0 |
+
+## Resources
+
+| Name | Type |
+|------|------|
+| [random_pet.this](https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/pet) | resource |
## Inputs
-No input.
+No inputs.
## Outputs
| Name | Description |
|------|-------------|
-| lambda\_cloudwatch\_log\_group\_arn | The ARN of the Cloudwatch Log Group |
-| lambda\_role\_arn | The ARN of the IAM role created for the Lambda Function |
-| lambda\_role\_name | The name of the IAM role created for the Lambda Function |
-| local\_filename | The filename of zip archive deployed (if deployment was from local) |
-| s3\_object | The map with S3 object data of zip archive deployed (if deployment was from S3) |
-| this\_lambda\_function\_arn | The ARN of the Lambda Function |
-| this\_lambda\_function\_invoke\_arn | The Invoke ARN of the Lambda Function |
-| this\_lambda\_function\_kms\_key\_arn | The ARN for the KMS encryption key of Lambda Function |
-| this\_lambda\_function\_last\_modified | The date Lambda Function resource was last modified |
-| this\_lambda\_function\_name | The name of the Lambda Function |
-| this\_lambda\_function\_qualified\_arn | The ARN identifying your Lambda Function Version |
-| this\_lambda\_function\_source\_code\_hash | Base64-encoded representation of raw SHA-256 sum of the zip file |
-| this\_lambda\_function\_source\_code\_size | The size in bytes of the function .zip file |
-| this\_lambda\_function\_version | Latest published version of Lambda Function |
-| this\_lambda\_layer\_arn | The ARN of the Lambda Layer with version |
-| this\_lambda\_layer\_created\_date | The date Lambda Layer resource was created |
-| this\_lambda\_layer\_layer\_arn | The ARN of the Lambda Layer without version |
-| this\_lambda\_layer\_source\_code\_size | The size in bytes of the Lambda Layer .zip file |
-| this\_lambda\_layer\_version | The Lambda Layer version |
-
-
+| [lambda\_cloudwatch\_log\_group\_arn](#output\_lambda\_cloudwatch\_log\_group\_arn) | The ARN of the Cloudwatch Log Group |
+| [lambda\_function\_arn](#output\_lambda\_function\_arn) | The ARN of the Lambda Function |
+| [lambda\_function\_arn\_static](#output\_lambda\_function\_arn\_static) | The static ARN of the Lambda Function. Use this to avoid cycle errors between resources (e.g., Step Functions) |
+| [lambda\_function\_invoke\_arn](#output\_lambda\_function\_invoke\_arn) | The Invoke ARN of the Lambda Function |
+| [lambda\_function\_kms\_key\_arn](#output\_lambda\_function\_kms\_key\_arn) | The ARN for the KMS encryption key of Lambda Function |
+| [lambda\_function\_last\_modified](#output\_lambda\_function\_last\_modified) | The date Lambda Function resource was last modified |
+| [lambda\_function\_name](#output\_lambda\_function\_name) | The name of the Lambda Function |
+| [lambda\_function\_qualified\_arn](#output\_lambda\_function\_qualified\_arn) | The ARN identifying your Lambda Function Version |
+| [lambda\_function\_source\_code\_hash](#output\_lambda\_function\_source\_code\_hash) | Base64-encoded representation of raw SHA-256 sum of the zip file |
+| [lambda\_function\_source\_code\_size](#output\_lambda\_function\_source\_code\_size) | The size in bytes of the function .zip file |
+| [lambda\_function\_version](#output\_lambda\_function\_version) | Latest published version of Lambda Function |
+| [lambda\_layer\_arn](#output\_lambda\_layer\_arn) | The ARN of the Lambda Layer with version |
+| [lambda\_layer\_created\_date](#output\_lambda\_layer\_created\_date) | The date Lambda Layer resource was created |
+| [lambda\_layer\_layer\_arn](#output\_lambda\_layer\_layer\_arn) | The ARN of the Lambda Layer without version |
+| [lambda\_layer\_source\_code\_size](#output\_lambda\_layer\_source\_code\_size) | The size in bytes of the Lambda Layer .zip file |
+| [lambda\_layer\_version](#output\_lambda\_layer\_version) | The Lambda Layer version |
+| [lambda\_role\_arn](#output\_lambda\_role\_arn) | The ARN of the IAM role created for the Lambda Function |
+| [lambda\_role\_name](#output\_lambda\_role\_name) | The name of the IAM role created for the Lambda Function |
+| [local\_filename](#output\_local\_filename) | The filename of zip archive deployed (if deployment was from local) |
+| [s3\_object](#output\_s3\_object) | The map with S3 object data of zip archive deployed (if deployment was from S3) |
+
diff --git a/examples/with-vpc/main.tf b/examples/with-vpc/main.tf
index b791fc5e..d373d724 100644
--- a/examples/with-vpc/main.tf
+++ b/examples/with-vpc/main.tf
@@ -2,11 +2,9 @@ provider "aws" {
region = "eu-west-1"
# Make it faster by skipping something
- skip_get_ec2_platforms = true
skip_metadata_api_check = true
skip_region_validation = true
skip_credentials_validation = true
- skip_requesting_account_id = true
}
resource "random_pet" "this" {
@@ -19,17 +17,20 @@ module "lambda_function_in_vpc" {
function_name = "${random_pet.this.id}-lambda-in-vpc"
description = "My awesome lambda function"
handler = "index.lambda_handler"
- runtime = "python3.8"
+ runtime = "python3.12"
- source_path = "${path.module}/../fixtures/python3.8-app1"
+ source_path = "${path.module}/../fixtures/python-app1"
- vpc_subnet_ids = module.vpc.intra_subnets
- vpc_security_group_ids = [module.vpc.default_security_group_id]
- attach_network_policy = true
+ vpc_subnet_ids = module.vpc.intra_subnets
+ vpc_security_group_ids = [module.vpc.default_security_group_id]
+ attach_network_policy = true
+ replace_security_groups_on_destroy = true
+ replacement_security_group_ids = [module.vpc.default_security_group_id]
}
module "vpc" {
- source = "terraform-aws-modules/vpc/aws"
+ source = "terraform-aws-modules/vpc/aws"
+ version = "~> 5.0"
name = random_pet.this.id
cidr = "10.10.0.0/16"
diff --git a/examples/with-vpc/outputs.tf b/examples/with-vpc/outputs.tf
index a505c0de..546b0192 100644
--- a/examples/with-vpc/outputs.tf
+++ b/examples/with-vpc/outputs.tf
@@ -1,73 +1,78 @@
# Lambda Function
-output "this_lambda_function_arn" {
+output "lambda_function_arn" {
description = "The ARN of the Lambda Function"
- value = module.lambda_function_in_vpc.this_lambda_function_arn
+ value = module.lambda_function_in_vpc.lambda_function_arn
}
-output "this_lambda_function_invoke_arn" {
+output "lambda_function_arn_static" {
+ description = "The static ARN of the Lambda Function. Use this to avoid cycle errors between resources (e.g., Step Functions)"
+ value = module.lambda_function_in_vpc.lambda_function_arn_static
+}
+
+output "lambda_function_invoke_arn" {
description = "The Invoke ARN of the Lambda Function"
- value = module.lambda_function_in_vpc.this_lambda_function_invoke_arn
+ value = module.lambda_function_in_vpc.lambda_function_invoke_arn
}
-output "this_lambda_function_name" {
+output "lambda_function_name" {
description = "The name of the Lambda Function"
- value = module.lambda_function_in_vpc.this_lambda_function_name
+ value = module.lambda_function_in_vpc.lambda_function_name
}
-output "this_lambda_function_qualified_arn" {
+output "lambda_function_qualified_arn" {
description = "The ARN identifying your Lambda Function Version"
- value = module.lambda_function_in_vpc.this_lambda_function_qualified_arn
+ value = module.lambda_function_in_vpc.lambda_function_qualified_arn
}
-output "this_lambda_function_version" {
+output "lambda_function_version" {
description = "Latest published version of Lambda Function"
- value = module.lambda_function_in_vpc.this_lambda_function_version
+ value = module.lambda_function_in_vpc.lambda_function_version
}
-output "this_lambda_function_last_modified" {
+output "lambda_function_last_modified" {
description = "The date Lambda Function resource was last modified"
- value = module.lambda_function_in_vpc.this_lambda_function_last_modified
+ value = module.lambda_function_in_vpc.lambda_function_last_modified
}
-output "this_lambda_function_kms_key_arn" {
+output "lambda_function_kms_key_arn" {
description = "The ARN for the KMS encryption key of Lambda Function"
- value = module.lambda_function_in_vpc.this_lambda_function_kms_key_arn
+ value = module.lambda_function_in_vpc.lambda_function_kms_key_arn
}
-output "this_lambda_function_source_code_hash" {
+output "lambda_function_source_code_hash" {
description = "Base64-encoded representation of raw SHA-256 sum of the zip file"
- value = module.lambda_function_in_vpc.this_lambda_function_source_code_hash
+ value = module.lambda_function_in_vpc.lambda_function_source_code_hash
}
-output "this_lambda_function_source_code_size" {
+output "lambda_function_source_code_size" {
description = "The size in bytes of the function .zip file"
- value = module.lambda_function_in_vpc.this_lambda_function_source_code_size
+ value = module.lambda_function_in_vpc.lambda_function_source_code_size
}
# Lambda Layer
-output "this_lambda_layer_arn" {
+output "lambda_layer_arn" {
description = "The ARN of the Lambda Layer with version"
- value = module.lambda_function_in_vpc.this_lambda_layer_arn
+ value = module.lambda_function_in_vpc.lambda_layer_arn
}
-output "this_lambda_layer_layer_arn" {
+output "lambda_layer_layer_arn" {
description = "The ARN of the Lambda Layer without version"
- value = module.lambda_function_in_vpc.this_lambda_layer_layer_arn
+ value = module.lambda_function_in_vpc.lambda_layer_layer_arn
}
-output "this_lambda_layer_created_date" {
+output "lambda_layer_created_date" {
description = "The date Lambda Layer resource was created"
- value = module.lambda_function_in_vpc.this_lambda_layer_created_date
+ value = module.lambda_function_in_vpc.lambda_layer_created_date
}
-output "this_lambda_layer_source_code_size" {
+output "lambda_layer_source_code_size" {
description = "The size in bytes of the Lambda Layer .zip file"
- value = module.lambda_function_in_vpc.this_lambda_layer_source_code_size
+ value = module.lambda_function_in_vpc.lambda_layer_source_code_size
}
-output "this_lambda_layer_version" {
+output "lambda_layer_version" {
description = "The Lambda Layer version"
- value = module.lambda_function_in_vpc.this_lambda_layer_version
+ value = module.lambda_function_in_vpc.lambda_layer_version
}
# IAM Role
diff --git a/examples/with-vpc/variables.tf b/examples/with-vpc/variables.tf
new file mode 100644
index 00000000..e69de29b
diff --git a/examples/with-vpc/versions.tf b/examples/with-vpc/versions.tf
new file mode 100644
index 00000000..d2f4f3e8
--- /dev/null
+++ b/examples/with-vpc/versions.tf
@@ -0,0 +1,14 @@
+terraform {
+ required_version = ">= 1.5.7"
+
+ required_providers {
+ aws = {
+ source = "hashicorp/aws"
+ version = ">= 6.0"
+ }
+ random = {
+ source = "hashicorp/random"
+ version = ">= 2.0"
+ }
+ }
+}
diff --git a/iam.tf b/iam.tf
index 539bc9c2..8b0440e1 100644
--- a/iam.tf
+++ b/iam.tf
@@ -1,10 +1,32 @@
locals {
- create_role = var.create && var.create_function && ! var.create_layer && var.create_role
+ create_role = local.create && var.create_function && !var.create_layer && var.create_role
# Lambda@Edge uses the Cloudwatch region closest to the location where the function is executed
# The region part of the LogGroup ARN is then replaced with a wildcard (*) so Lambda@Edge is able to log in every region
- log_group_arn_regional = element(concat(data.aws_cloudwatch_log_group.lambda.*.arn, aws_cloudwatch_log_group.lambda.*.arn, [""]), 0)
- log_group_arn = local.create_role && var.lambda_at_edge ? format("arn:%s:%s:%s:%s:%s", data.aws_arn.log_group_arn[0].partition, data.aws_arn.log_group_arn[0].service, "*", data.aws_arn.log_group_arn[0].account, data.aws_arn.log_group_arn[0].resource) : local.log_group_arn_regional
+ log_group_arn_regional = try(data.aws_cloudwatch_log_group.lambda[0].arn, aws_cloudwatch_log_group.lambda[0].arn, "")
+ log_group_name = try(data.aws_cloudwatch_log_group.lambda[0].name, aws_cloudwatch_log_group.lambda[0].name, "")
+ log_group_arn = local.create_role && var.lambda_at_edge ? format("arn:%s:%s:%s:%s:%s", data.aws_arn.log_group_arn[0].partition, data.aws_arn.log_group_arn[0].service, var.lambda_at_edge_logs_all_regions ? "*" : "us-east-1", data.aws_arn.log_group_arn[0].account, data.aws_arn.log_group_arn[0].resource) : local.log_group_arn_regional
+
+ # Defaulting to "*" (an invalid character for an IAM Role name) will cause an error when
+ # attempting to plan if the role_name and function_name are not set. This is a workaround
+ # for #83 that will allow one to import resources without receiving an error from coalesce.
+ # @see https://github.com/terraform-aws-modules/terraform-aws-lambda/issues/83
+ role_name = local.create_role ? coalesce(var.role_name, var.function_name, "*") : null
+ policy_name = coalesce(var.policy_name, local.role_name, "*")
+
+ # IAM Role trusted entities is a list of any (allow strings (services) and maps (type+identifiers))
+ trusted_entities_services = distinct(compact(concat(
+ slice(["lambda.amazonaws.com", "edgelambda.amazonaws.com"], 0, var.lambda_at_edge ? 2 : 1),
+ [for service in var.trusted_entities : try(tostring(service), "")]
+ )))
+
+ trusted_entities_principals = [
+ for principal in var.trusted_entities : {
+ type = principal.type
+ identifiers = tolist(principal.identifiers)
+ }
+ if !can(tostring(principal))
+ ]
}
###########
@@ -20,7 +42,51 @@ data "aws_iam_policy_document" "assume_role" {
principals {
type = "Service"
- identifiers = distinct(concat(slice(list("lambda.amazonaws.com", "edgelambda.amazonaws.com"), 0, var.lambda_at_edge ? 2 : 1), var.trusted_entities))
+ identifiers = local.trusted_entities_services
+ }
+
+ dynamic "principals" {
+ for_each = local.trusted_entities_principals
+ content {
+ type = principals.value.type
+ identifiers = principals.value.identifiers
+ }
+ }
+ }
+
+ dynamic "statement" {
+ for_each = var.assume_role_policy_statements
+
+ content {
+ sid = try(statement.value.sid, replace(statement.key, "/[^0-9A-Za-z]*/", ""))
+ effect = try(statement.value.effect, null)
+ actions = try(statement.value.actions, null)
+ not_actions = try(statement.value.not_actions, null)
+
+ dynamic "principals" {
+ for_each = try(statement.value.principals, [])
+ content {
+ type = principals.value.type
+ identifiers = principals.value.identifiers
+ }
+ }
+
+ dynamic "not_principals" {
+ for_each = try(statement.value.not_principals, [])
+ content {
+ type = not_principals.value.type
+ identifiers = not_principals.value.identifiers
+ }
+ }
+
+ dynamic "condition" {
+ for_each = try(statement.value.condition, [])
+ content {
+ test = condition.value.test
+ variable = condition.value.variable
+ values = condition.value.values
+ }
+ }
}
}
}
@@ -28,12 +94,13 @@ data "aws_iam_policy_document" "assume_role" {
resource "aws_iam_role" "lambda" {
count = local.create_role ? 1 : 0
- name = coalesce(var.role_name, var.function_name)
+ name = local.role_name
description = var.role_description
path = var.role_path
force_detach_policies = var.role_force_detach_policies
permissions_boundary = var.role_permissions_boundary
assume_role_policy = data.aws_iam_policy_document.assume_role[0].json
+ max_session_duration = var.role_maximum_session_duration
tags = merge(var.tags, var.role_tags)
}
@@ -54,30 +121,24 @@ data "aws_iam_policy_document" "logs" {
statement {
effect = "Allow"
- actions = [
+ actions = compact([
+ !var.use_existing_cloudwatch_log_group && var.attach_create_log_group_permission ? "logs:CreateLogGroup" : "",
"logs:CreateLogStream",
- "logs:PutLogEvents",
- ]
+ "logs:PutLogEvents"
+ ])
resources = flatten([for _, v in ["%v:*", "%v:*:*"] : format(v, local.log_group_arn)])
}
}
-resource "aws_iam_policy" "logs" {
+resource "aws_iam_role_policy" "logs" {
count = local.create_role && var.attach_cloudwatch_logs_policy ? 1 : 0
- name = "${var.function_name}-logs"
+ name = "${local.policy_name}-logs"
+ role = aws_iam_role.lambda[0].name
policy = data.aws_iam_policy_document.logs[0].json
}
-resource "aws_iam_policy_attachment" "logs" {
- count = local.create_role && var.attach_cloudwatch_logs_policy ? 1 : 0
-
- name = "${var.function_name}-logs"
- roles = [aws_iam_role.lambda[0].name]
- policy_arn = aws_iam_policy.logs[0].arn
-}
-
#####################
# Dead Letter Config
#####################
@@ -99,73 +160,52 @@ data "aws_iam_policy_document" "dead_letter" {
}
}
-resource "aws_iam_policy" "dead_letter" {
+resource "aws_iam_role_policy" "dead_letter" {
count = local.create_role && var.attach_dead_letter_policy ? 1 : 0
- name = "${var.function_name}-dl"
+ name = "${local.policy_name}-dl"
+ role = aws_iam_role.lambda[0].name
policy = data.aws_iam_policy_document.dead_letter[0].json
}
-resource "aws_iam_policy_attachment" "dead_letter" {
- count = local.create_role && var.attach_dead_letter_policy ? 1 : 0
-
- name = "${var.function_name}-dl"
- roles = [aws_iam_role.lambda[0].name]
- policy_arn = aws_iam_policy.dead_letter[0].arn
-}
-
######
# VPC
######
-// Copying AWS managed policy to be able to attach the same policy with multiple roles without overwrites by another function
+# Copying AWS managed policy to be able to attach the same policy with multiple roles without overwrites by another function
data "aws_iam_policy" "vpc" {
count = local.create_role && var.attach_network_policy ? 1 : 0
- arn = "arn:aws:iam::aws:policy/service-role/AWSLambdaENIManagementAccess"
+ arn = "arn:${data.aws_partition.current.partition}:iam::aws:policy/service-role/AWSLambdaENIManagementAccess"
}
-resource "aws_iam_policy" "vpc" {
+resource "aws_iam_role_policy" "vpc" {
count = local.create_role && var.attach_network_policy ? 1 : 0
- name = "${var.function_name}-vpc"
+ name = "${local.policy_name}-vpc"
+ role = aws_iam_role.lambda[0].name
policy = data.aws_iam_policy.vpc[0].policy
}
-resource "aws_iam_policy_attachment" "vpc" {
- count = local.create_role && var.attach_network_policy ? 1 : 0
-
- name = "${var.function_name}-vpc"
- roles = [aws_iam_role.lambda[0].name]
- policy_arn = aws_iam_policy.vpc[0].arn
-}
-
#####################
# Tracing with X-Ray
#####################
-// Copying AWS managed policy to be able to attach the same policy with multiple roles without overwrites by another function
+# Copying AWS managed policy to be able to attach the same policy with multiple roles without overwrites by another function
data "aws_iam_policy" "tracing" {
count = local.create_role && var.attach_tracing_policy ? 1 : 0
- arn = "arn:aws:iam::aws:policy/AWSXrayWriteOnlyAccess"
+ arn = "arn:${data.aws_partition.current.partition}:iam::aws:policy/AWSXRayDaemonWriteAccess"
}
-resource "aws_iam_policy" "tracing" {
+resource "aws_iam_role_policy" "tracing" {
count = local.create_role && var.attach_tracing_policy ? 1 : 0
- name = "${var.function_name}-tracing"
+ name = "${local.policy_name}-tracing"
+ role = aws_iam_role.lambda[0].name
policy = data.aws_iam_policy.tracing[0].policy
}
-resource "aws_iam_policy_attachment" "tracing" {
- count = local.create_role && var.attach_tracing_policy ? 1 : 0
-
- name = "${var.function_name}-tracing"
- roles = [aws_iam_role.lambda[0].name]
- policy_arn = aws_iam_policy.tracing[0].arn
-}
-
###############################
# Failure/Success Async Events
###############################
@@ -179,55 +219,54 @@ data "aws_iam_policy_document" "async" {
actions = [
"sns:Publish",
"sqs:SendMessage",
+ "events:PutEvents",
+ "lambda:InvokeFunction",
]
resources = compact(distinct([var.destination_on_failure, var.destination_on_success]))
}
}
-resource "aws_iam_policy" "async" {
+resource "aws_iam_role_policy" "async" {
count = local.create_role && var.attach_async_event_policy ? 1 : 0
- name = "${var.function_name}-async"
+ name = "${local.policy_name}-async"
+ role = aws_iam_role.lambda[0].name
policy = data.aws_iam_policy_document.async[0].json
}
-resource "aws_iam_policy_attachment" "async" {
- count = local.create_role && var.attach_async_event_policy ? 1 : 0
-
- name = "${var.function_name}-async"
- roles = [aws_iam_role.lambda[0].name]
- policy_arn = aws_iam_policy.async[0].arn
-}
-
###########################
# Additional policy (JSON)
###########################
-resource "aws_iam_policy" "additional_json" {
+resource "aws_iam_role_policy" "additional_json" {
count = local.create_role && var.attach_policy_json ? 1 : 0
- name = var.function_name
+ name = local.policy_name
+ role = aws_iam_role.lambda[0].name
policy = var.policy_json
}
-resource "aws_iam_policy_attachment" "additional_json" {
- count = local.create_role && var.attach_policy_json ? 1 : 0
+#####################################
+# Additional policies (list of JSON)
+#####################################
- name = var.function_name
- roles = [aws_iam_role.lambda[0].name]
- policy_arn = aws_iam_policy.additional_json[0].arn
+resource "aws_iam_role_policy" "additional_jsons" {
+ count = local.create_role && var.attach_policy_jsons ? var.number_of_policy_jsons : 0
+
+ name = "${local.policy_name}-${count.index}"
+ role = aws_iam_role.lambda[0].name
+ policy = var.policy_jsons[count.index]
}
###########################
# ARN of additional policy
###########################
-resource "aws_iam_policy_attachment" "additional_one" {
+resource "aws_iam_role_policy_attachment" "additional_one" {
count = local.create_role && var.attach_policy ? 1 : 0
- name = var.function_name
- roles = [aws_iam_role.lambda[0].name]
+ role = aws_iam_role.lambda[0].name
policy_arn = var.policy
}
@@ -235,11 +274,10 @@ resource "aws_iam_policy_attachment" "additional_one" {
# List of ARNs of additional policies
######################################
-resource "aws_iam_policy_attachment" "additional_many" {
+resource "aws_iam_role_policy_attachment" "additional_many" {
count = local.create_role && var.attach_policies ? var.number_of_policies : 0
- name = var.function_name
- roles = [aws_iam_role.lambda[0].name]
+ role = aws_iam_role.lambda[0].name
policy_arn = var.policies[count.index]
}
@@ -254,15 +292,15 @@ data "aws_iam_policy_document" "additional_inline" {
for_each = var.policy_statements
content {
- sid = lookup(statement.value, "sid", replace(statement.key, "/[^0-9A-Za-z]*/", ""))
- effect = lookup(statement.value, "effect", null)
- actions = lookup(statement.value, "actions", null)
- not_actions = lookup(statement.value, "not_actions", null)
- resources = lookup(statement.value, "resources", null)
- not_resources = lookup(statement.value, "not_resources", null)
+ sid = try(statement.value.sid, replace(statement.key, "/[^0-9A-Za-z]*/", ""))
+ effect = try(statement.value.effect, null)
+ actions = try(statement.value.actions, null)
+ not_actions = try(statement.value.not_actions, null)
+ resources = try(statement.value.resources, null)
+ not_resources = try(statement.value.not_resources, null)
dynamic "principals" {
- for_each = lookup(statement.value, "principals", [])
+ for_each = try(statement.value.principals, [])
content {
type = principals.value.type
identifiers = principals.value.identifiers
@@ -270,7 +308,7 @@ data "aws_iam_policy_document" "additional_inline" {
}
dynamic "not_principals" {
- for_each = lookup(statement.value, "not_principals", [])
+ for_each = try(statement.value.not_principals, [])
content {
type = not_principals.value.type
identifiers = not_principals.value.identifiers
@@ -278,7 +316,7 @@ data "aws_iam_policy_document" "additional_inline" {
}
dynamic "condition" {
- for_each = lookup(statement.value, "condition", [])
+ for_each = try(statement.value.condition, [])
content {
test = condition.value.test
variable = condition.value.variable
@@ -289,17 +327,10 @@ data "aws_iam_policy_document" "additional_inline" {
}
}
-resource "aws_iam_policy" "additional_inline" {
+resource "aws_iam_role_policy" "additional_inline" {
count = local.create_role && var.attach_policy_statements ? 1 : 0
- name = "${var.function_name}-inline"
+ name = "${local.policy_name}-inline"
+ role = aws_iam_role.lambda[0].name
policy = data.aws_iam_policy_document.additional_inline[0].json
}
-
-resource "aws_iam_policy_attachment" "additional_inline" {
- count = local.create_role && var.attach_policy_statements ? 1 : 0
-
- name = var.function_name
- roles = [aws_iam_role.lambda[0].name]
- policy_arn = aws_iam_policy.additional_inline[0].arn
-}
diff --git a/main.tf b/main.tf
index dc7429e1..cc7d011a 100644
--- a/main.tf
+++ b/main.tf
@@ -1,38 +1,75 @@
+data "aws_partition" "current" {}
+data "aws_region" "current" {}
+data "aws_caller_identity" "current" {}
+
locals {
+ create = var.create && var.putin_khuylo
+
+ archive_filename = try(data.external.archive_prepare[0].result.filename, null)
+ archive_filename_string = local.archive_filename != null ? local.archive_filename : ""
+ archive_was_missing = try(data.external.archive_prepare[0].result.was_missing, false)
+
# Use a generated filename to determine when the source code has changed.
# filename - to get package from local
- filename = var.local_existing_package != null ? var.local_existing_package : (var.store_on_s3 ? null : element(concat(data.external.archive_prepare.*.result.filename, [null]), 0))
- was_missing = var.local_existing_package != null ? ! fileexists(var.local_existing_package) : element(concat(data.external.archive_prepare.*.result.was_missing, [false]), 0)
+ filename = var.local_existing_package != null ? var.local_existing_package : (var.store_on_s3 ? null : local.archive_filename)
+ was_missing = var.local_existing_package != null ? !fileexists(var.local_existing_package) : local.archive_was_missing
# s3_* - to get package from S3
- s3_bucket = var.s3_existing_package != null ? lookup(var.s3_existing_package, "bucket", null) : (var.store_on_s3 ? var.s3_bucket : null)
- s3_key = var.s3_existing_package != null ? lookup(var.s3_existing_package, "key", null) : (var.store_on_s3 ? element(concat(data.external.archive_prepare.*.result.filename, [null]), 0) : null)
- s3_object_version = var.s3_existing_package != null ? lookup(var.s3_existing_package, "version_id", null) : (var.store_on_s3 ? element(concat(aws_s3_bucket_object.lambda_package.*.version_id, [null]), 0) : null)
+ s3_bucket = var.s3_existing_package != null ? try(var.s3_existing_package.bucket, null) : (var.store_on_s3 ? var.s3_bucket : null)
+ s3_key = var.s3_existing_package != null ? try(var.s3_existing_package.key, null) : (var.store_on_s3 ? var.s3_prefix != null ? format("%s%s", var.s3_prefix, replace(local.archive_filename_string, "/^.*//", "")) : replace(local.archive_filename_string, "/^\\.//", "") : null)
+ s3_object_version = var.s3_existing_package != null ? try(var.s3_existing_package.version_id, null) : (var.store_on_s3 ? try(aws_s3_object.lambda_package[0].version_id, null) : null)
}
resource "aws_lambda_function" "this" {
- count = var.create && var.create_function && ! var.create_layer ? 1 : 0
-
- function_name = var.function_name
- description = var.description
- role = var.create_role ? aws_iam_role.lambda[0].arn : var.lambda_role
- handler = var.handler
- memory_size = var.memory_size
- reserved_concurrent_executions = var.reserved_concurrent_executions
- runtime = var.runtime
- layers = var.layers
- timeout = var.lambda_at_edge ? min(var.timeout, 5) : var.timeout
- publish = var.lambda_at_edge ? true : var.publish
- kms_key_arn = var.kms_key_arn
+ count = local.create && var.create_function && !var.create_layer ? 1 : 0
+
+ region = var.region
+
+ function_name = var.function_name
+ description = var.description
+ role = var.create_role ? aws_iam_role.lambda[0].arn : var.lambda_role
+ handler = var.package_type != "Zip" ? null : var.handler
+ memory_size = var.memory_size
+ reserved_concurrent_executions = var.reserved_concurrent_executions
+ runtime = var.package_type != "Zip" ? null : var.runtime
+ layers = var.layers
+ timeout = var.lambda_at_edge ? min(var.timeout, 30) : var.timeout
+ publish = (var.lambda_at_edge || var.snap_start) ? true : var.publish
+ kms_key_arn = var.kms_key_arn
+ image_uri = var.image_uri
+ package_type = var.package_type
+ architectures = var.architectures
+ code_signing_config_arn = var.code_signing_config_arn
+ replace_security_groups_on_destroy = var.replace_security_groups_on_destroy
+ replacement_security_group_ids = var.replacement_security_group_ids
+ skip_destroy = var.skip_destroy
+
+ /* ephemeral_storage is not supported in gov-cloud region, so it should be set to `null` */
+ dynamic "ephemeral_storage" {
+ for_each = var.ephemeral_storage_size == null ? [] : [true]
+
+ content {
+ size = var.ephemeral_storage_size
+ }
+ }
filename = local.filename
- source_code_hash = (local.filename == null ? false : fileexists(local.filename)) && ! local.was_missing ? filebase64sha256(local.filename) : null
+ source_code_hash = var.ignore_source_code_hash ? null : (local.filename == null ? false : fileexists(local.filename)) && !local.was_missing ? filebase64sha256(local.filename) : null
s3_bucket = local.s3_bucket
s3_key = local.s3_key
s3_object_version = local.s3_object_version
+ dynamic "image_config" {
+ for_each = length(var.image_config_entry_point) > 0 || length(var.image_config_command) > 0 || var.image_config_working_directory != null ? [true] : []
+ content {
+ entry_point = var.image_config_entry_point
+ command = var.image_config_command
+ working_directory = var.image_config_working_directory
+ }
+ }
+
dynamic "environment" {
for_each = length(keys(var.environment_variables)) == 0 ? [] : [true]
content {
@@ -57,67 +94,159 @@ resource "aws_lambda_function" "this" {
dynamic "vpc_config" {
for_each = var.vpc_subnet_ids != null && var.vpc_security_group_ids != null ? [true] : []
content {
- security_group_ids = var.vpc_security_group_ids
- subnet_ids = var.vpc_subnet_ids
+ security_group_ids = var.vpc_security_group_ids
+ subnet_ids = var.vpc_subnet_ids
+ ipv6_allowed_for_dual_stack = var.ipv6_allowed_for_dual_stack
+ }
+ }
+
+ dynamic "file_system_config" {
+ for_each = var.file_system_arn != null && var.file_system_local_mount_path != null ? [true] : []
+ content {
+ local_mount_path = var.file_system_local_mount_path
+ arn = var.file_system_arn
+ }
+ }
+
+ dynamic "snap_start" {
+ for_each = var.snap_start ? [true] : []
+
+ content {
+ apply_on = "PublishedVersions"
}
}
- tags = var.tags
+ dynamic "logging_config" {
+ # Dont create logging config on gov cloud as it is not avaible.
+ # See https://github.com/hashicorp/terraform-provider-aws/issues/34810
+ for_each = data.aws_partition.current.partition == "aws" ? [true] : []
- depends_on = [null_resource.archive, aws_s3_bucket_object.lambda_package]
+ content {
+ log_group = var.logging_log_group
+ log_format = var.logging_log_format
+ application_log_level = var.logging_log_format == "Text" ? null : var.logging_application_log_level
+ system_log_level = var.logging_log_format == "Text" ? null : var.logging_system_log_level
+ }
+ }
+
+ dynamic "timeouts" {
+ for_each = length(var.timeouts) > 0 ? [true] : []
+
+ content {
+ create = try(var.timeouts.create, null)
+ update = try(var.timeouts.update, null)
+ delete = try(var.timeouts.delete, null)
+ }
+ }
+
+ tags = merge(
+ var.include_default_tag ? { terraform-aws-modules = "lambda" } : {},
+ var.tags,
+ var.function_tags
+ )
+
+ depends_on = [
+ null_resource.archive,
+ aws_s3_object.lambda_package,
+
+ # Depending on the log group is necessary to allow Terraform to create the log group before AWS can.
+ # When a lambda function is invoked, AWS creates the log group automatically if it doesn't exist yet.
+ # Without the dependency, this can result in a race condition if the lambda function is invoked before
+ # Terraform can create the log group.
+ aws_cloudwatch_log_group.lambda,
+
+ # Before the lambda is created the execution role with all its policies should be ready
+ aws_iam_role_policy.additional_inline,
+ aws_iam_role_policy.additional_json,
+ aws_iam_role_policy.additional_jsons,
+ aws_iam_role_policy.async,
+ aws_iam_role_policy.dead_letter,
+ aws_iam_role_policy.logs,
+ aws_iam_role_policy.tracing,
+ aws_iam_role_policy.vpc,
+ aws_iam_role_policy_attachment.additional_many,
+ aws_iam_role_policy_attachment.additional_one,
+ ]
}
resource "aws_lambda_layer_version" "this" {
- count = var.create && var.create_layer ? 1 : 0
+ count = local.create && var.create_layer ? 1 : 0
+
+ region = var.region
layer_name = var.layer_name
description = var.description
license_info = var.license_info
- compatible_runtimes = length(var.compatible_runtimes) > 0 ? var.compatible_runtimes : [var.runtime]
+ compatible_runtimes = length(var.compatible_runtimes) > 0 ? var.compatible_runtimes : (var.runtime == "" ? null : [var.runtime])
+ compatible_architectures = var.compatible_architectures
+ skip_destroy = var.layer_skip_destroy
filename = local.filename
- source_code_hash = (local.filename == null ? false : fileexists(local.filename)) && ! local.was_missing ? filebase64sha256(local.filename) : null
+ source_code_hash = var.ignore_source_code_hash ? null : (local.filename == null ? false : fileexists(local.filename)) && !local.was_missing ? filebase64sha256(local.filename) : null
s3_bucket = local.s3_bucket
s3_key = local.s3_key
s3_object_version = local.s3_object_version
- depends_on = [null_resource.archive, aws_s3_bucket_object.lambda_package]
+ depends_on = [null_resource.archive, aws_s3_object.lambda_package]
}
-resource "aws_s3_bucket_object" "lambda_package" {
- count = var.create && var.store_on_s3 && var.create_package ? 1 : 0
+resource "aws_s3_object" "lambda_package" {
+ count = local.create && var.store_on_s3 && var.create_package ? 1 : 0
+
+ region = var.region
bucket = var.s3_bucket
- key = data.external.archive_prepare[0].result.filename
+ acl = var.s3_acl
+ key = local.s3_key
source = data.external.archive_prepare[0].result.filename
- etag = fileexists(data.external.archive_prepare[0].result.filename) ? filemd5(data.external.archive_prepare[0].result.filename) : null
storage_class = var.s3_object_storage_class
- tags = merge(var.tags, var.s3_object_tags)
+ server_side_encryption = var.s3_server_side_encryption
+ kms_key_id = var.s3_kms_key_id
+
+ tags = var.s3_object_tags_only ? var.s3_object_tags : merge(var.tags, var.s3_object_tags)
+
+ dynamic "override_provider" {
+ for_each = var.s3_object_override_default_tags ? [true] : []
+
+ content {
+ default_tags {
+ tags = {}
+ }
+ }
+ }
depends_on = [null_resource.archive]
}
data "aws_cloudwatch_log_group" "lambda" {
- count = var.create && var.create_function && ! var.create_layer && var.use_existing_cloudwatch_log_group ? 1 : 0
+ count = local.create && var.create_function && !var.create_layer && var.use_existing_cloudwatch_log_group ? 1 : 0
+
+ region = var.region
- name = "/aws/lambda/${var.lambda_at_edge ? "us-east-1." : ""}${var.function_name}"
+ name = coalesce(var.logging_log_group, "/aws/lambda/${var.lambda_at_edge ? "us-east-1." : ""}${var.function_name}")
}
resource "aws_cloudwatch_log_group" "lambda" {
- count = var.create && var.create_function && ! var.create_layer && ! var.use_existing_cloudwatch_log_group ? 1 : 0
+ count = local.create && var.create_function && !var.create_layer && !var.use_existing_cloudwatch_log_group ? 1 : 0
- name = "/aws/lambda/${var.lambda_at_edge ? "us-east-1." : ""}${var.function_name}"
+ region = var.region
+
+ name = coalesce(var.logging_log_group, "/aws/lambda/${var.lambda_at_edge ? "us-east-1." : ""}${var.function_name}")
retention_in_days = var.cloudwatch_logs_retention_in_days
kms_key_id = var.cloudwatch_logs_kms_key_id
+ skip_destroy = var.cloudwatch_logs_skip_destroy
+ log_group_class = var.cloudwatch_logs_log_group_class
tags = merge(var.tags, var.cloudwatch_logs_tags)
}
resource "aws_lambda_provisioned_concurrency_config" "current_version" {
- count = var.create && var.create_function && ! var.create_layer && var.provisioned_concurrent_executions > -1 ? 1 : 0
+ count = local.create && var.create_function && !var.create_layer && var.provisioned_concurrent_executions > -1 ? 1 : 0
+
+ region = var.region
function_name = aws_lambda_function.this[0].function_name
qualifier = aws_lambda_function.this[0].version
@@ -130,7 +259,9 @@ locals {
}
resource "aws_lambda_function_event_invoke_config" "this" {
- for_each = var.create && var.create_function && ! var.create_layer && var.create_async_event_config ? local.qualifiers : {}
+ for_each = { for k, v in local.qualifiers : k => v if v != null && local.create && var.create_function && !var.create_layer && var.create_async_event_config }
+
+ region = var.region
function_name = aws_lambda_function.this[0].function_name
qualifier = each.key == "current_version" ? aws_lambda_function.this[0].version : null
@@ -159,29 +290,247 @@ resource "aws_lambda_function_event_invoke_config" "this" {
}
resource "aws_lambda_permission" "current_version_triggers" {
- for_each = var.create && var.create_function && ! var.create_layer && var.create_current_version_allowed_triggers ? var.allowed_triggers : {}
+ for_each = { for k, v in var.allowed_triggers : k => v if local.create && var.create_function && !var.create_layer && var.create_current_version_allowed_triggers }
+
+ region = var.region
function_name = aws_lambda_function.this[0].function_name
qualifier = aws_lambda_function.this[0].version
- statement_id = lookup(each.value, "statement_id", each.key)
- action = lookup(each.value, "action", "lambda:InvokeFunction")
- principal = lookup(each.value, "principal", format("%s.amazonaws.com", lookup(each.value, "service", "")))
- source_arn = lookup(each.value, "source_arn", lookup(each.value, "service", null) == "apigateway" ? "${lookup(each.value, "arn", "")}/*/*/*" : null)
- source_account = lookup(each.value, "source_account", null)
- event_source_token = lookup(each.value, "event_source_token", null)
+ statement_id_prefix = try(each.value.statement_id, each.key)
+ action = try(each.value.action, "lambda:InvokeFunction")
+ principal = try(each.value.principal, format("%s.amazonaws.com", try(each.value.service, "")))
+ principal_org_id = try(each.value.principal_org_id, null)
+ source_arn = try(each.value.source_arn, null)
+ source_account = try(each.value.source_account, null)
+ event_source_token = try(each.value.event_source_token, null)
+ function_url_auth_type = try(each.value.function_url_auth_type, null)
+
+ lifecycle {
+ create_before_destroy = true
+ }
}
-// Error: Error adding new Lambda Permission for destined-tetra-lambda: InvalidParameterValueException: We currently do not support adding policies for $LATEST.
+# Error: Error adding new Lambda Permission for lambda: InvalidParameterValueException: We currently do not support adding policies for $LATEST.
resource "aws_lambda_permission" "unqualified_alias_triggers" {
- for_each = var.create && var.create_function && ! var.create_layer && var.create_unqualified_alias_allowed_triggers ? var.allowed_triggers : {}
+ for_each = { for k, v in var.allowed_triggers : k => v if local.create && var.create_function && !var.create_layer && var.create_unqualified_alias_allowed_triggers }
+
+ region = var.region
function_name = aws_lambda_function.this[0].function_name
- statement_id = lookup(each.value, "statement_id", each.key)
- action = lookup(each.value, "action", "lambda:InvokeFunction")
- principal = lookup(each.value, "principal", format("%s.amazonaws.com", lookup(each.value, "service", "")))
- source_arn = lookup(each.value, "source_arn", lookup(each.value, "service", null) == "apigateway" ? "${lookup(each.value, "arn", "")}/*/*/*" : null)
- source_account = lookup(each.value, "source_account", null)
- event_source_token = lookup(each.value, "event_source_token", null)
+ statement_id_prefix = try(each.value.statement_id, each.key)
+ action = try(each.value.action, "lambda:InvokeFunction")
+ principal = try(each.value.principal, format("%s.amazonaws.com", try(each.value.service, "")))
+ principal_org_id = try(each.value.principal_org_id, null)
+ source_arn = try(each.value.source_arn, null)
+ source_account = try(each.value.source_account, null)
+ event_source_token = try(each.value.event_source_token, null)
+ function_url_auth_type = try(each.value.function_url_auth_type, null)
+
+ lifecycle {
+ create_before_destroy = true
+ }
+}
+
+resource "aws_lambda_event_source_mapping" "this" {
+ for_each = { for k, v in var.event_source_mapping : k => v if local.create && var.create_function && !var.create_layer && var.create_unqualified_alias_allowed_triggers }
+
+ region = var.region
+
+ function_name = aws_lambda_function.this[0].arn
+
+ event_source_arn = try(each.value.event_source_arn, null)
+
+ batch_size = try(each.value.batch_size, null)
+ maximum_batching_window_in_seconds = try(each.value.maximum_batching_window_in_seconds, null)
+ enabled = try(each.value.enabled, true)
+ starting_position = try(each.value.starting_position, null)
+ starting_position_timestamp = try(each.value.starting_position_timestamp, null)
+ parallelization_factor = try(each.value.parallelization_factor, null)
+ maximum_retry_attempts = try(each.value.maximum_retry_attempts, null)
+ maximum_record_age_in_seconds = try(each.value.maximum_record_age_in_seconds, null)
+ bisect_batch_on_function_error = try(each.value.bisect_batch_on_function_error, null)
+ topics = try(each.value.topics, null)
+ queues = try(each.value.queues, null)
+ function_response_types = try(each.value.function_response_types, null)
+ tumbling_window_in_seconds = try(each.value.tumbling_window_in_seconds, null)
+
+ dynamic "destination_config" {
+ for_each = try(each.value.destination_arn_on_failure, null) != null ? [true] : []
+ content {
+ on_failure {
+ destination_arn = each.value["destination_arn_on_failure"]
+ }
+ }
+ }
+
+ dynamic "scaling_config" {
+ for_each = try([each.value.scaling_config], [])
+ content {
+ maximum_concurrency = try(scaling_config.value.maximum_concurrency, null)
+ }
+ }
+
+
+ dynamic "self_managed_event_source" {
+ for_each = try(each.value.self_managed_event_source, [])
+ content {
+ endpoints = self_managed_event_source.value.endpoints
+ }
+ }
+
+ dynamic "self_managed_kafka_event_source_config" {
+ for_each = try(each.value.self_managed_kafka_event_source_config, [])
+ content {
+ consumer_group_id = self_managed_kafka_event_source_config.value.consumer_group_id
+ }
+ }
+ dynamic "amazon_managed_kafka_event_source_config" {
+ for_each = try(each.value.amazon_managed_kafka_event_source_config, [])
+ content {
+ consumer_group_id = amazon_managed_kafka_event_source_config.value.consumer_group_id
+ }
+ }
+
+ dynamic "source_access_configuration" {
+ for_each = try(each.value.source_access_configuration, [])
+ content {
+ type = source_access_configuration.value["type"]
+ uri = source_access_configuration.value["uri"]
+ }
+ }
+
+ dynamic "filter_criteria" {
+ for_each = try(each.value.filter_criteria, null) != null ? [true] : []
+
+ content {
+ dynamic "filter" {
+ for_each = try(flatten([each.value.filter_criteria]), [])
+
+ content {
+ pattern = try(filter.value.pattern, null)
+ }
+ }
+ }
+ }
+
+ dynamic "document_db_event_source_config" {
+ for_each = try(each.value.document_db_event_source_config, [])
+
+ content {
+ database_name = document_db_event_source_config.value.database_name
+ collection_name = try(document_db_event_source_config.value.collection_name, null)
+ full_document = try(document_db_event_source_config.value.full_document, null)
+ }
+ }
+
+ dynamic "metrics_config" {
+ for_each = try([each.value.metrics_config], [])
+
+ content {
+ metrics = metrics_config.value.metrics
+ }
+ }
+
+ dynamic "provisioned_poller_config" {
+ for_each = try([each.value.provisioned_poller_config], [])
+ content {
+ maximum_pollers = try(provisioned_poller_config.value.maximum_pollers, null)
+ minimum_pollers = try(provisioned_poller_config.value.minimum_pollers, null)
+ }
+ }
+
+ tags = merge(var.tags, try(each.value.tags, {}))
+}
+
+resource "aws_lambda_function_url" "this" {
+ count = local.create && var.create_function && !var.create_layer && var.create_lambda_function_url ? 1 : 0
+
+ region = var.region
+
+ function_name = aws_lambda_function.this[0].function_name
+
+ # Error: error creating Lambda Function URL: ValidationException
+ qualifier = var.create_unqualified_alias_lambda_function_url ? null : aws_lambda_function.this[0].version
+ authorization_type = var.authorization_type
+ invoke_mode = var.invoke_mode
+
+ dynamic "cors" {
+ for_each = length(keys(var.cors)) == 0 ? [] : [var.cors]
+
+ content {
+ allow_credentials = try(cors.value.allow_credentials, null)
+ allow_headers = try(cors.value.allow_headers, null)
+ allow_methods = try(cors.value.allow_methods, null)
+ allow_origins = try(cors.value.allow_origins, null)
+ expose_headers = try(cors.value.expose_headers, null)
+ max_age = try(cors.value.max_age, null)
+ }
+ }
+}
+
+resource "aws_lambda_function_recursion_config" "this" {
+ count = local.create && var.create_function && !var.create_layer && var.recursive_loop == "Allow" ? 1 : 0
+
+ region = var.region
+
+ function_name = aws_lambda_function.this[0].function_name
+ recursive_loop = var.recursive_loop
+}
+
+# This resource contains the extra information required by SAM CLI to provide the testing capabilities
+# to the TF application. The required data is where SAM CLI can find the Lambda function source code
+# and what are the resources that contain the building logic.
+resource "null_resource" "sam_metadata_aws_lambda_function" {
+ count = local.create && var.create_sam_metadata && var.create_package && var.create_function && !var.create_layer ? 1 : 0
+
+ triggers = {
+ # This is a way to let SAM CLI correlates between the Lambda function resource, and this metadata
+ # resource
+ resource_name = "aws_lambda_function.this[0]"
+ resource_type = "ZIP_LAMBDA_FUNCTION"
+
+ # The Lambda function source code.
+ original_source_code = jsonencode(var.source_path)
+
+ # a property to let SAM CLI knows where to find the Lambda function source code if the provided
+ # value for original_source_code attribute is map.
+ source_code_property = "path"
+
+ # A property to let SAM CLI knows where to find the Lambda function built output
+ built_output_path = data.external.archive_prepare[0].result.filename
+ }
+
+ # SAM CLI can run terraform apply -target metadata resource, and this will apply the building
+ # resources as well
+ depends_on = [data.external.archive_prepare, null_resource.archive]
+}
+
+# This resource contains the extra information required by SAM CLI to provide the testing capabilities
+# to the TF application. The required data is where SAM CLI can find the Lambda layer source code
+# and what are the resources that contain the building logic.
+resource "null_resource" "sam_metadata_aws_lambda_layer_version" {
+ count = local.create && var.create_sam_metadata && var.create_package && var.create_layer ? 1 : 0
+
+ triggers = {
+ # This is a way to let SAM CLI correlates between the Lambda layer resource, and this metadata
+ # resource
+ resource_name = "aws_lambda_layer_version.this[0]"
+ resource_type = "LAMBDA_LAYER"
+
+ # The Lambda layer source code.
+ original_source_code = jsonencode(var.source_path)
+
+ # a property to let SAM CLI knows where to find the Lambda layer source code if the provided
+ # value for original_source_code attribute is map.
+ source_code_property = "path"
+
+ # A property to let SAM CLI knows where to find the Lambda layer built output
+ built_output_path = data.external.archive_prepare[0].result.filename
+ }
+
+ # SAM CLI can run terraform apply -target metadata resource, and this will apply the building
+ # resources as well
+ depends_on = [data.external.archive_prepare, null_resource.archive]
}
diff --git a/modules/alias/README.md b/modules/alias/README.md
index 11be29a2..5ee31171 100644
--- a/modules/alias/README.md
+++ b/modules/alias/README.md
@@ -17,7 +17,7 @@ module "lambda_function" {
function_name = "my-lambda1"
handler = "index.lambda_handler"
- runtime = "python3.8"
+ runtime = "python3.12"
source_path = "../src/lambda-function1"
}
@@ -29,13 +29,13 @@ module "alias_no_refresh" {
name = "current-no-refresh"
- function_name = module.lambda_function.this_lambda_function_name
- function_version = module.lambda_function.this_lambda_function_version
+ function_name = module.lambda_function.lambda_function_name
+ function_version = module.lambda_function.lambda_function_version
allowed_triggers = {
AnotherAPIGatewayAny = {
- service = "apigateway"
- arn = "arn:aws:execute-api:eu-west-1:135367859851:abcdedfgse"
+ service = "apigateway"
+ source_arn = "arn:aws:execute-api:eu-west-1:135367859851:abcdedfgse/*/*/*"
}
}
}
@@ -50,7 +50,7 @@ module "alias_refresh" {
source = "terraform-aws-modules/lambda/aws//modules/alias"
name = "current-with-refresh"
- function_name = module.lambda_function.this_lambda_function_name
+ function_name = module.lambda_function.lambda_function_name
}
```
@@ -63,7 +63,7 @@ module "alias_refresh" {
source = "terraform-aws-modules/lambda/aws//modules/alias"
name = "current-with-refresh"
- function_name = module.lambda_function.this_lambda_function_name
+ function_name = module.lambda_function.lambda_function_name
}
module "alias_existing" {
@@ -71,13 +71,13 @@ module "alias_existing" {
use_existing_alias = true
- name = module.alias_refresh.this_lambda_alias_name
- function_name = module.lambda_function.this_lambda_function_name
+ name = module.alias_refresh.lambda_alias_name
+ function_name = module.lambda_function.lambda_function_name
allowed_triggers = {
AnotherAwesomeAPIGateway = {
- service = "apigateway"
- arn = "arn:aws:execute-api:eu-west-1:999967859851:aqnku8akd0"
+ service = "apigateway"
+ source_arn = "arn:aws:execute-api:eu-west-1:999967859851:aqnku8akd0/*/*/*"
}
}
}
@@ -110,54 +110,74 @@ module "lambda" {
* [Alias](https://github.com/terraform-aws-modules/terraform-aws-lambda/tree/master/examples/alias) - Create Lambda function and aliases in various combinations with all supported features.
-
+
## Requirements
| Name | Version |
|------|---------|
-| terraform | ~> 0.12.6 |
-| aws | ~> 2.46 |
+| [terraform](#requirement\_terraform) | >= 1.5.7 |
+| [aws](#requirement\_aws) | >= 6.0 |
## Providers
| Name | Version |
|------|---------|
-| aws | ~> 2.46 |
+| [aws](#provider\_aws) | >= 6.0 |
+
+## Modules
+
+No modules.
+
+## Resources
+
+| Name | Type |
+|------|------|
+| [aws_lambda_alias.no_refresh](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/lambda_alias) | resource |
+| [aws_lambda_alias.with_refresh](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/lambda_alias) | resource |
+| [aws_lambda_event_source_mapping.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/lambda_event_source_mapping) | resource |
+| [aws_lambda_function_event_invoke_config.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/lambda_function_event_invoke_config) | resource |
+| [aws_lambda_permission.qualified_alias_triggers](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/lambda_permission) | resource |
+| [aws_lambda_permission.version_triggers](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/lambda_permission) | resource |
+| [aws_lambda_alias.existing](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/lambda_alias) | data source |
## Inputs
| Name | Description | Type | Default | Required |
|------|-------------|------|---------|:--------:|
-| allowed\_triggers | Map of allowed triggers to create Lambda permissions | `map(any)` | `{}` | no |
-| create | Controls whether resources should be created | `bool` | `true` | no |
-| create\_async\_event\_config | Controls whether async event configuration for Lambda Function/Alias should be created | `bool` | `false` | no |
-| create\_qualified\_alias\_allowed\_triggers | Whether to allow triggers on qualified alias | `bool` | `true` | no |
-| create\_qualified\_alias\_async\_event\_config | Whether to allow async event configuration on qualified alias | `bool` | `true` | no |
-| create\_version\_allowed\_triggers | Whether to allow triggers on version of Lambda Function used by alias (this will revoke permissions from previous version because Terraform manages only current resources) | `bool` | `true` | no |
-| create\_version\_async\_event\_config | Whether to allow async event configuration on version of Lambda Function used by alias (this will revoke permissions from previous version because Terraform manages only current resources) | `bool` | `true` | no |
-| description | Description of the alias. | `string` | `""` | no |
-| destination\_on\_failure | Amazon Resource Name (ARN) of the destination resource for failed asynchronous invocations | `string` | `null` | no |
-| destination\_on\_success | Amazon Resource Name (ARN) of the destination resource for successful asynchronous invocations | `string` | `null` | no |
-| function\_name | The function ARN of the Lambda function for which you want to create an alias. | `string` | `""` | no |
-| function\_version | Lambda function version for which you are creating the alias. Pattern: ($LATEST\|[0-9]+). | `string` | `""` | no |
-| maximum\_event\_age\_in\_seconds | Maximum age of a request that Lambda sends to a function for processing in seconds. Valid values between 60 and 21600. | `number` | `null` | no |
-| maximum\_retry\_attempts | Maximum number of times to retry when the function returns an error. Valid values between 0 and 2. Defaults to 2. | `number` | `null` | no |
-| name | Name for the alias you are creating. | `string` | `""` | no |
-| refresh\_alias | Whether to refresh function version used in the alias. Useful when using this module together with external tool do deployments (eg, AWS CodeDeploy). | `bool` | `true` | no |
-| routing\_additional\_version\_weights | A map that defines the proportion of events that should be sent to different versions of a lambda function. | `map(number)` | `{}` | no |
-| use\_existing\_alias | Whether to manage existing alias instead of creating a new one. Useful when using this module together with external tool do deployments (eg, AWS CodeDeploy). | `bool` | `false` | no |
+| [allowed\_triggers](#input\_allowed\_triggers) | Map of allowed triggers to create Lambda permissions | `map(any)` | `{}` | no |
+| [create](#input\_create) | Controls whether resources should be created | `bool` | `true` | no |
+| [create\_async\_event\_config](#input\_create\_async\_event\_config) | Controls whether async event configuration for Lambda Function/Alias should be created | `bool` | `false` | no |
+| [create\_qualified\_alias\_allowed\_triggers](#input\_create\_qualified\_alias\_allowed\_triggers) | Whether to allow triggers on qualified alias | `bool` | `true` | no |
+| [create\_qualified\_alias\_async\_event\_config](#input\_create\_qualified\_alias\_async\_event\_config) | Whether to allow async event configuration on qualified alias | `bool` | `true` | no |
+| [create\_version\_allowed\_triggers](#input\_create\_version\_allowed\_triggers) | Whether to allow triggers on version of Lambda Function used by alias (this will revoke permissions from previous version because Terraform manages only current resources) | `bool` | `true` | no |
+| [create\_version\_async\_event\_config](#input\_create\_version\_async\_event\_config) | Whether to allow async event configuration on version of Lambda Function used by alias (this will revoke permissions from previous version because Terraform manages only current resources) | `bool` | `true` | no |
+| [description](#input\_description) | Description of the alias. | `string` | `""` | no |
+| [destination\_on\_failure](#input\_destination\_on\_failure) | Amazon Resource Name (ARN) of the destination resource for failed asynchronous invocations | `string` | `null` | no |
+| [destination\_on\_success](#input\_destination\_on\_success) | Amazon Resource Name (ARN) of the destination resource for successful asynchronous invocations | `string` | `null` | no |
+| [event\_source\_mapping](#input\_event\_source\_mapping) | Map of event source mapping | `any` | `{}` | no |
+| [function\_name](#input\_function\_name) | The function ARN of the Lambda function for which you want to create an alias. | `string` | `""` | no |
+| [function\_version](#input\_function\_version) | Lambda function version for which you are creating the alias. Pattern: ($LATEST\|[0-9]+). | `string` | `""` | no |
+| [maximum\_event\_age\_in\_seconds](#input\_maximum\_event\_age\_in\_seconds) | Maximum age of a request that Lambda sends to a function for processing in seconds. Valid values between 60 and 21600. | `number` | `null` | no |
+| [maximum\_retry\_attempts](#input\_maximum\_retry\_attempts) | Maximum number of times to retry when the function returns an error. Valid values between 0 and 2. Defaults to 2. | `number` | `null` | no |
+| [name](#input\_name) | Name for the alias you are creating. | `string` | `""` | no |
+| [refresh\_alias](#input\_refresh\_alias) | Whether to refresh function version used in the alias. Useful when using this module together with external tool do deployments (eg, AWS CodeDeploy). | `bool` | `true` | no |
+| [routing\_additional\_version\_weights](#input\_routing\_additional\_version\_weights) | A map that defines the proportion of events that should be sent to different versions of a lambda function. | `map(number)` | `{}` | no |
+| [use\_existing\_alias](#input\_use\_existing\_alias) | Whether to manage existing alias instead of creating a new one. Useful when using this module together with external tool do deployments (eg, AWS CodeDeploy). | `bool` | `false` | no |
## Outputs
| Name | Description |
|------|-------------|
-| this\_lambda\_alias\_arn | The ARN of the Lambda Function Alias |
-| this\_lambda\_alias\_description | Description of alias |
-| this\_lambda\_alias\_function\_version | Lambda function version which the alias uses |
-| this\_lambda\_alias\_invoke\_arn | The ARN to be used for invoking Lambda Function from API Gateway |
-| this\_lambda\_alias\_name | The name of the Lambda Function Alias |
-
-
+| [lambda\_alias\_arn](#output\_lambda\_alias\_arn) | The ARN of the Lambda Function Alias |
+| [lambda\_alias\_description](#output\_lambda\_alias\_description) | Description of alias |
+| [lambda\_alias\_event\_source\_mapping\_function\_arn](#output\_lambda\_alias\_event\_source\_mapping\_function\_arn) | The the ARN of the Lambda function the event source mapping is sending events to |
+| [lambda\_alias\_event\_source\_mapping\_state](#output\_lambda\_alias\_event\_source\_mapping\_state) | The state of the event source mapping |
+| [lambda\_alias\_event\_source\_mapping\_state\_transition\_reason](#output\_lambda\_alias\_event\_source\_mapping\_state\_transition\_reason) | The reason the event source mapping is in its current state |
+| [lambda\_alias\_event\_source\_mapping\_uuid](#output\_lambda\_alias\_event\_source\_mapping\_uuid) | The UUID of the created event source mapping |
+| [lambda\_alias\_function\_version](#output\_lambda\_alias\_function\_version) | Lambda function version which the alias uses |
+| [lambda\_alias\_invoke\_arn](#output\_lambda\_alias\_invoke\_arn) | The ARN to be used for invoking Lambda Function from API Gateway |
+| [lambda\_alias\_name](#output\_lambda\_alias\_name) | The name of the Lambda Function Alias |
+
## Authors
diff --git a/modules/alias/main.tf b/modules/alias/main.tf
index 9eaefb22..e57079a2 100644
--- a/modules/alias/main.tf
+++ b/modules/alias/main.tf
@@ -1,5 +1,6 @@
locals {
- version = element(concat(data.aws_lambda_alias.existing.*.function_version, aws_lambda_alias.with_refresh.*.function_version, aws_lambda_alias.no_refresh.*.function_version, [""]), 0)
+ alias_arn = try(data.aws_lambda_alias.existing[0].arn, aws_lambda_alias.no_refresh[0].arn, aws_lambda_alias.with_refresh[0].arn, "")
+ version = try(data.aws_lambda_alias.existing[0].function_version, aws_lambda_alias.with_refresh[0].function_version, aws_lambda_alias.no_refresh[0].function_version, "")
qualifiers = zipmap(["version", "qualified_alias"], [var.create_version_async_event_config ? true : null, var.create_qualified_alias_async_event_config ? true : null])
}
@@ -11,7 +12,7 @@ data "aws_lambda_alias" "existing" {
}
resource "aws_lambda_alias" "no_refresh" {
- count = var.create && ! var.use_existing_alias && ! var.refresh_alias ? 1 : 0
+ count = var.create && !var.use_existing_alias && !var.refresh_alias ? 1 : 0
name = var.name
description = var.description
@@ -19,17 +20,21 @@ resource "aws_lambda_alias" "no_refresh" {
function_name = var.function_name
function_version = var.function_version != "" ? var.function_version : "$LATEST"
- // $LATEST is not supported for an alias pointing to more than 1 version
+ # $LATEST is not supported for an alias pointing to more than 1 version
dynamic "routing_config" {
for_each = length(keys(var.routing_additional_version_weights)) == 0 ? [] : [true]
content {
additional_version_weights = var.routing_additional_version_weights
}
}
+
+ lifecycle {
+ ignore_changes = [function_version]
+ }
}
resource "aws_lambda_alias" "with_refresh" {
- count = var.create && ! var.use_existing_alias && var.refresh_alias ? 1 : 0
+ count = var.create && !var.use_existing_alias && var.refresh_alias ? 1 : 0
name = var.name
description = var.description
@@ -37,17 +42,13 @@ resource "aws_lambda_alias" "with_refresh" {
function_name = var.function_name
function_version = var.function_version != "" ? var.function_version : "$LATEST"
- // $LATEST is not supported for an alias pointing to more than 1 version
+ # $LATEST is not supported for an alias pointing to more than 1 version
dynamic "routing_config" {
for_each = length(keys(var.routing_additional_version_weights)) == 0 ? [] : [true]
content {
additional_version_weights = var.routing_additional_version_weights
}
}
-
- lifecycle {
- ignore_changes = [function_version]
- }
}
resource "aws_lambda_function_event_invoke_config" "this" {
@@ -84,15 +85,16 @@ resource "aws_lambda_permission" "version_triggers" {
function_name = var.function_name
- // Error: Error adding new Lambda Permission for ... InvalidParameterValueException: We currently do not support adding policies for $LATEST.
+ # Error: Error adding new Lambda Permission for ... InvalidParameterValueException: We currently do not support adding policies for $LATEST.
qualifier = local.version != "$LATEST" ? local.version : null
- statement_id = lookup(each.value, "statement_id", each.key)
- action = lookup(each.value, "action", "lambda:InvokeFunction")
- principal = lookup(each.value, "principal", format("%s.amazonaws.com", lookup(each.value, "service", "")))
- source_arn = lookup(each.value, "source_arn", lookup(each.value, "service", null) == "apigateway" ? "${lookup(each.value, "arn", "")}/*/*/*" : null)
- source_account = lookup(each.value, "source_account", null)
- event_source_token = lookup(each.value, "event_source_token", null)
+ statement_id = try(each.value.statement_id, each.key)
+ action = try(each.value.action, "lambda:InvokeFunction")
+ principal = try(each.value.principal, format("%s.amazonaws.com", try(each.value.service, "")))
+ principal_org_id = try(each.value.principal_org_id, null)
+ source_arn = try(each.value.source_arn, null)
+ source_account = try(each.value.source_account, null)
+ event_source_token = try(each.value.event_source_token, null)
}
resource "aws_lambda_permission" "qualified_alias_triggers" {
@@ -101,10 +103,91 @@ resource "aws_lambda_permission" "qualified_alias_triggers" {
function_name = var.function_name
qualifier = var.name
- statement_id = lookup(each.value, "statement_id", each.key)
- action = lookup(each.value, "action", "lambda:InvokeFunction")
- principal = lookup(each.value, "principal", format("%s.amazonaws.com", lookup(each.value, "service", "")))
- source_arn = lookup(each.value, "source_arn", lookup(each.value, "service", null) == "apigateway" ? "${lookup(each.value, "arn", "")}/*/*/*" : null)
- source_account = lookup(each.value, "source_account", null)
- event_source_token = lookup(each.value, "event_source_token", null)
+ statement_id = try(each.value.statement_id, each.key)
+ action = try(each.value.action, "lambda:InvokeFunction")
+ principal = try(each.value.principal, format("%s.amazonaws.com", try(each.value.service, "")))
+ principal_org_id = try(each.value.principal_org_id, null)
+ source_arn = try(each.value.source_arn, null)
+ source_account = try(each.value.source_account, null)
+ event_source_token = try(each.value.event_source_token, null)
+}
+
+resource "aws_lambda_event_source_mapping" "this" {
+ for_each = { for k, v in var.event_source_mapping : k => v if var.create }
+
+ function_name = local.alias_arn
+
+ event_source_arn = try(each.value.event_source_arn, null)
+
+ batch_size = try(each.value.batch_size, null)
+ maximum_batching_window_in_seconds = try(each.value.maximum_batching_window_in_seconds, null)
+ enabled = try(each.value.enabled, null)
+ starting_position = try(each.value.starting_position, null)
+ starting_position_timestamp = try(each.value.starting_position_timestamp, null)
+ parallelization_factor = try(each.value.parallelization_factor, null)
+ maximum_retry_attempts = try(each.value.maximum_retry_attempts, null)
+ maximum_record_age_in_seconds = try(each.value.maximum_record_age_in_seconds, null)
+ bisect_batch_on_function_error = try(each.value.bisect_batch_on_function_error, null)
+ topics = try(each.value.topics, null)
+ queues = try(each.value.queues, null)
+ function_response_types = try(each.value.function_response_types, null)
+
+ dynamic "destination_config" {
+ for_each = try(each.value.destination_arn_on_failure, null) != null ? [true] : []
+ content {
+ on_failure {
+ destination_arn = each.value["destination_arn_on_failure"]
+ }
+ }
+ }
+
+ dynamic "scaling_config" {
+ for_each = try([each.value.scaling_config], [])
+ content {
+ maximum_concurrency = try(scaling_config.value.maximum_concurrency, null)
+ }
+ }
+
+ dynamic "self_managed_event_source" {
+ for_each = try(each.value.self_managed_event_source, [])
+ content {
+ endpoints = self_managed_event_source.value.endpoints
+ }
+ }
+
+ dynamic "self_managed_kafka_event_source_config" {
+ for_each = try(each.value.self_managed_kafka_event_source_config, [])
+ content {
+ consumer_group_id = try(self_managed_kafka_event_source_config.value.consumer_group_id, null)
+ }
+ }
+
+ dynamic "amazon_managed_kafka_event_source_config" {
+ for_each = try(each.value.amazon_managed_kafka_event_source_config, [])
+ content {
+ consumer_group_id = try(amazon_managed_kafka_event_source_config.value.consumer_group_id, null)
+ }
+ }
+
+ dynamic "source_access_configuration" {
+ for_each = try(each.value.source_access_configuration, [])
+ content {
+ type = source_access_configuration.value["type"]
+ uri = source_access_configuration.value["uri"]
+ }
+ }
+
+ dynamic "filter_criteria" {
+ for_each = try(each.value.filter_criteria, null) != null ? [true] : []
+
+ content {
+ dynamic "filter" {
+ for_each = try(flatten([each.value.filter_criteria]), [])
+
+ content {
+ pattern = try(filter.value.pattern, null)
+ }
+ }
+ }
+ }
}
diff --git a/modules/alias/outputs.tf b/modules/alias/outputs.tf
index 3e284331..b1a29153 100644
--- a/modules/alias/outputs.tf
+++ b/modules/alias/outputs.tf
@@ -1,25 +1,45 @@
# Lambda Alias
-output "this_lambda_alias_name" {
+output "lambda_alias_name" {
description = "The name of the Lambda Function Alias"
- value = element(concat(data.aws_lambda_alias.existing.*.name, aws_lambda_alias.with_refresh.*.name, aws_lambda_alias.no_refresh.*.name, [""]), 0)
+ value = try(data.aws_lambda_alias.existing[0].name, aws_lambda_alias.with_refresh[0].name, aws_lambda_alias.no_refresh[0].name, "")
}
-output "this_lambda_alias_arn" {
+output "lambda_alias_arn" {
description = "The ARN of the Lambda Function Alias"
- value = element(concat(data.aws_lambda_alias.existing.*.arn, aws_lambda_alias.with_refresh.*.arn, aws_lambda_alias.no_refresh.*.arn, [""]), 0)
+ value = try(data.aws_lambda_alias.existing[0].arn, aws_lambda_alias.with_refresh[0].arn, aws_lambda_alias.no_refresh[0].arn, "")
}
-output "this_lambda_alias_invoke_arn" {
+output "lambda_alias_invoke_arn" {
description = "The ARN to be used for invoking Lambda Function from API Gateway"
- value = element(concat(data.aws_lambda_alias.existing.*.invoke_arn, aws_lambda_alias.with_refresh.*.invoke_arn, aws_lambda_alias.no_refresh.*.invoke_arn, [""]), 0)
+ value = try(data.aws_lambda_alias.existing[0].invoke_arn, aws_lambda_alias.with_refresh[0].invoke_arn, aws_lambda_alias.no_refresh[0].invoke_arn, "")
}
-output "this_lambda_alias_description" {
+output "lambda_alias_description" {
description = "Description of alias"
- value = element(concat(data.aws_lambda_alias.existing.*.description, aws_lambda_alias.with_refresh.*.description, aws_lambda_alias.no_refresh.*.description, [""]), 0)
+ value = try(data.aws_lambda_alias.existing[0].description, aws_lambda_alias.with_refresh[0].description, aws_lambda_alias.no_refresh[0].description, "")
}
-output "this_lambda_alias_function_version" {
+output "lambda_alias_function_version" {
description = "Lambda function version which the alias uses"
- value = element(concat(data.aws_lambda_alias.existing.*.function_version, aws_lambda_alias.with_refresh.*.function_version, aws_lambda_alias.no_refresh.*.function_version, [""]), 0)
+ value = try(data.aws_lambda_alias.existing[0].function_version, aws_lambda_alias.with_refresh[0].function_version, aws_lambda_alias.no_refresh[0].function_version, "")
+}
+
+output "lambda_alias_event_source_mapping_function_arn" {
+ description = "The the ARN of the Lambda function the event source mapping is sending events to"
+ value = { for k, v in aws_lambda_event_source_mapping.this : k => v.function_arn }
+}
+
+output "lambda_alias_event_source_mapping_state" {
+ description = "The state of the event source mapping"
+ value = { for k, v in aws_lambda_event_source_mapping.this : k => v.state }
+}
+
+output "lambda_alias_event_source_mapping_state_transition_reason" {
+ description = "The reason the event source mapping is in its current state"
+ value = { for k, v in aws_lambda_event_source_mapping.this : k => v.state_transition_reason }
+}
+
+output "lambda_alias_event_source_mapping_uuid" {
+ description = "The UUID of the created event source mapping"
+ value = { for k, v in aws_lambda_event_source_mapping.this : k => v.uuid }
}
diff --git a/modules/alias/variables.tf b/modules/alias/variables.tf
index d5601998..732067f6 100644
--- a/modules/alias/variables.tf
+++ b/modules/alias/variables.tf
@@ -117,3 +117,13 @@ variable "allowed_triggers" {
type = map(any)
default = {}
}
+
+############################################
+# Lambda Event Source Mapping
+############################################
+
+variable "event_source_mapping" {
+ description = "Map of event source mapping"
+ type = any
+ default = {}
+}
diff --git a/modules/alias/versions.tf b/modules/alias/versions.tf
index c1b26983..db13b0a8 100644
--- a/modules/alias/versions.tf
+++ b/modules/alias/versions.tf
@@ -1,7 +1,10 @@
terraform {
- required_version = "~> 0.12.6"
+ required_version = ">= 1.5.7"
required_providers {
- aws = "~> 2.46"
+ aws = {
+ source = "hashicorp/aws"
+ version = ">= 6.0"
+ }
}
}
diff --git a/modules/deploy/README.md b/modules/deploy/README.md
index d05af2f1..b5d535c9 100644
--- a/modules/deploy/README.md
+++ b/modules/deploy/README.md
@@ -8,7 +8,7 @@ This module can create AWS CodeDeploy application and deployment group, if neces
During deployment this module does the following:
1. Create JSON object with required AppSpec configuration. Optionally, you can store deploy script for debug purposes by setting `save_deploy_script = true`.
-1. Run [`aws deploy create-deployment` command](https://docs.aws.amazon.com/cli/latest/reference/deploy/create-deployment.html) if `create_deployment = true` was set
+1. Run [`aws deploy create-deployment` command](https://docs.aws.amazon.com/cli/latest/reference/deploy/create-deployment.html) if `create_deployment = true` and `run_deployment = true` was set.
1. After deployment is created, it can wait for the completion if `wait_deployment_completion = true`. Be aware, that Terraform will lock the execution and it can fail if it runs for a long period of time. Set this flag for fast deployments (eg, `deployment_config_name = "CodeDeployDefault.LambdaAllAtOnce"`).
@@ -22,7 +22,7 @@ module "lambda_function" {
function_name = "my-lambda1"
handler = "index.lambda_handler"
- runtime = "python3.8"
+ runtime = "python3.12"
source_path = "../src/lambda-function1"
}
@@ -31,20 +31,20 @@ module "alias_refresh" {
source = "terraform-aws-modules/lambda/aws//modules/alias"
name = "current-with-refresh"
- function_name = module.lambda_function.this_lambda_function_name
+ function_name = module.lambda_function.lambda_function_name
# Set function_version when creating alias to be able to deploy using it,
# because AWS CodeDeploy doesn't understand $LATEST as CurrentVersion.
- function_version = module.lambda_function.this_lambda_function_version
+ function_version = module.lambda_function.lambda_function_version
}
module "deploy" {
source = "terraform-aws-modules/lambda/aws//modules/deploy"
- alias_name = module.alias_refresh.this_lambda_alias_name
- function_name = module.lambda_function.this_lambda_function_name
+ alias_name = module.alias_refresh.lambda_alias_name
+ function_name = module.lambda_function.lambda_function_name
- target_version = module.lambda_function.this_lambda_function_version
+ target_version = module.lambda_function.lambda_function_version
create_app = true
app_name = "my-awesome-app"
@@ -53,6 +53,7 @@ module "deploy" {
deployment_group_name = "something"
create_deployment = true
+ run_deployment = true
wait_deployment_completion = true
triggers = {
@@ -94,72 +95,104 @@ module "lambda" {
* [Deploy](https://github.com/terraform-aws-modules/terraform-aws-lambda/tree/master/examples/deploy) - Creates Lambda Function, Alias, and all resources required to create deployments using AWS CodeDeploy.
-
+
## Requirements
| Name | Version |
|------|---------|
-| terraform | ~> 0.12.6 |
-| aws | ~> 2.46 |
+| [terraform](#requirement\_terraform) | >= 1.5.7 |
+| [aws](#requirement\_aws) | >= 6.0 |
+| [local](#requirement\_local) | >= 1.0 |
+| [null](#requirement\_null) | >= 2.0 |
## Providers
| Name | Version |
|------|---------|
-| aws | ~> 2.46 |
-| local | n/a |
-| null | n/a |
+| [aws](#provider\_aws) | >= 6.0 |
+| [local](#provider\_local) | >= 1.0 |
+| [null](#provider\_null) | >= 2.0 |
+
+## Modules
+
+No modules.
+
+## Resources
+
+| Name | Type |
+|------|------|
+| [aws_codedeploy_app.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/codedeploy_app) | resource |
+| [aws_codedeploy_deployment_group.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/codedeploy_deployment_group) | resource |
+| [aws_iam_policy.hooks](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource |
+| [aws_iam_policy.triggers](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource |
+| [aws_iam_role.codedeploy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource |
+| [aws_iam_role_policy_attachment.codedeploy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
+| [aws_iam_role_policy_attachment.hooks](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
+| [aws_iam_role_policy_attachment.triggers](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
+| [local_file.deploy_script](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource |
+| [null_resource.deploy](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource |
+| [aws_iam_policy_document.assume_role](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
+| [aws_iam_policy_document.hooks](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
+| [aws_iam_policy_document.triggers](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
+| [aws_iam_role.codedeploy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_role) | data source |
+| [aws_lambda_alias.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/lambda_alias) | data source |
+| [aws_lambda_function.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/lambda_function) | data source |
+| [aws_partition.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/partition) | data source |
## Inputs
| Name | Description | Type | Default | Required |
|------|-------------|------|---------|:--------:|
-| after\_allow\_traffic\_hook\_arn | ARN of Lambda function to execute after allow traffic during deployment | `string` | `""` | no |
-| alarm\_enabled | Indicates whether the alarm configuration is enabled. This option is useful when you want to temporarily deactivate alarm monitoring for a deployment group without having to add the same alarms again later. | `bool` | `false` | no |
-| alarm\_ignore\_poll\_alarm\_failure | Indicates whether a deployment should continue if information about the current state of alarms cannot be retrieved from CloudWatch. | `bool` | `false` | no |
-| alarms | A list of alarms configured for the deployment group. A maximum of 10 alarms can be added to a deployment group. | `list(string)` | `[]` | no |
-| alias\_name | Name for the alias | `string` | `""` | no |
-| app\_name | Name of AWS CodeDeploy application | `string` | `""` | no |
-| attach\_triggers\_policy | Whether to attach SNS policy to CodeDeploy role when triggers are defined | `bool` | `false` | no |
-| auto\_rollback\_enabled | Indicates whether a defined automatic rollback configuration is currently enabled for this Deployment Group. | `bool` | `true` | no |
-| auto\_rollback\_events | List of event types that trigger a rollback. Supported types are DEPLOYMENT\_FAILURE and DEPLOYMENT\_STOP\_ON\_ALARM. | `list(string)` | [
"DEPLOYMENT_STOP_ON_ALARM"
]
| no |
-| aws\_cli\_command | Command to run as AWS CLI. May include extra arguments like region and profile. | `string` | `"aws"` | no |
-| before\_allow\_traffic\_hook\_arn | ARN of Lambda function to execute before allow traffic during deployment | `string` | `""` | no |
-| codedeploy\_principals | List of CodeDeploy service principals to allow. The list can include global or regional endpoints. | `list(string)` | [
"codedeploy.amazonaws.com"
]
| no |
-| codedeploy\_role\_name | IAM role name to create or use by CodeDeploy | `string` | `""` | no |
-| create | Controls whether resources should be created | `bool` | `true` | no |
-| create\_app | Whether to create new AWS CodeDeploy app | `bool` | `false` | no |
-| create\_codedeploy\_role | Whether to create new AWS CodeDeploy IAM role | `bool` | `true` | no |
-| create\_deployment | Run AWS CLI command to create deployment | `bool` | `false` | no |
-| create\_deployment\_group | Whether to create new AWS CodeDeploy Deployment Group | `bool` | `false` | no |
-| current\_version | Current version of Lambda function version to deploy (can't be $LATEST) | `string` | `""` | no |
-| deployment\_config\_name | Name of deployment config to use | `string` | `"CodeDeployDefault.LambdaAllAtOnce"` | no |
-| deployment\_group\_name | Name of deployment group to use | `string` | `""` | no |
-| description | Description to use for the deployment | `string` | `""` | no |
-| force\_deploy | Force deployment every time (even when nothing changes) | `bool` | `false` | no |
-| function\_name | The name of the Lambda function to deploy | `string` | `""` | no |
-| save\_deploy\_script | Save deploy script locally | `bool` | `false` | no |
-| target\_version | Target version of Lambda function version to deploy | `string` | `""` | no |
-| triggers | Map of triggers which will be notified when event happens. Valid options for event types are DeploymentStart, DeploymentSuccess, DeploymentFailure, DeploymentStop, DeploymentRollback, DeploymentReady (Applies only to replacement instances in a blue/green deployment), InstanceStart, InstanceSuccess, InstanceFailure, InstanceReady. Note that not all are applicable for Lambda deployments. | `map(any)` | `{}` | no |
-| use\_existing\_app | Whether to use existing AWS CodeDeploy app | `bool` | `false` | no |
-| use\_existing\_deployment\_group | Whether to use existing AWS CodeDeploy Deployment Group | `bool` | `false` | no |
-| wait\_deployment\_completion | Wait until deployment completes. It can take a lot of time and your terraform process may lock execution for long time. | `bool` | `false` | no |
+| [after\_allow\_traffic\_hook\_arn](#input\_after\_allow\_traffic\_hook\_arn) | ARN of Lambda function to execute after allow traffic during deployment. This function should be named CodeDeployHook\_, to match the managed AWSCodeDeployForLambda policy, unless you're using a custom role | `string` | `""` | no |
+| [alarm\_enabled](#input\_alarm\_enabled) | Indicates whether the alarm configuration is enabled. This option is useful when you want to temporarily deactivate alarm monitoring for a deployment group without having to add the same alarms again later. | `bool` | `false` | no |
+| [alarm\_ignore\_poll\_alarm\_failure](#input\_alarm\_ignore\_poll\_alarm\_failure) | Indicates whether a deployment should continue if information about the current state of alarms cannot be retrieved from CloudWatch. | `bool` | `false` | no |
+| [alarms](#input\_alarms) | A list of alarms configured for the deployment group. A maximum of 10 alarms can be added to a deployment group. | `list(string)` | `[]` | no |
+| [alias\_name](#input\_alias\_name) | Name for the alias | `string` | `""` | no |
+| [app\_name](#input\_app\_name) | Name of AWS CodeDeploy application | `string` | `""` | no |
+| [attach\_hooks\_policy](#input\_attach\_hooks\_policy) | Whether to attach Invoke policy to CodeDeploy role when before allow traffic or after allow traffic hooks are defined. | `bool` | `true` | no |
+| [attach\_triggers\_policy](#input\_attach\_triggers\_policy) | Whether to attach SNS policy to CodeDeploy role when triggers are defined | `bool` | `false` | no |
+| [auto\_rollback\_enabled](#input\_auto\_rollback\_enabled) | Indicates whether a defined automatic rollback configuration is currently enabled for this Deployment Group. | `bool` | `true` | no |
+| [auto\_rollback\_events](#input\_auto\_rollback\_events) | List of event types that trigger a rollback. Supported types are DEPLOYMENT\_FAILURE and DEPLOYMENT\_STOP\_ON\_ALARM. | `list(string)` | [
"DEPLOYMENT_STOP_ON_ALARM"
]
| no |
+| [aws\_cli\_command](#input\_aws\_cli\_command) | Command to run as AWS CLI. May include extra arguments like region and profile. | `string` | `"aws"` | no |
+| [before\_allow\_traffic\_hook\_arn](#input\_before\_allow\_traffic\_hook\_arn) | ARN of Lambda function to execute before allow traffic during deployment. This function should be named CodeDeployHook\_, to match the managed AWSCodeDeployForLambda policy, unless you're using a custom role | `string` | `""` | no |
+| [codedeploy\_principals](#input\_codedeploy\_principals) | List of CodeDeploy service principals to allow. The list can include global or regional endpoints. | `list(string)` | [
"codedeploy.amazonaws.com"
]
| no |
+| [codedeploy\_role\_name](#input\_codedeploy\_role\_name) | IAM role name to create or use by CodeDeploy | `string` | `""` | no |
+| [create](#input\_create) | Controls whether resources should be created | `bool` | `true` | no |
+| [create\_app](#input\_create\_app) | Whether to create new AWS CodeDeploy app | `bool` | `false` | no |
+| [create\_codedeploy\_role](#input\_create\_codedeploy\_role) | Whether to create new AWS CodeDeploy IAM role | `bool` | `true` | no |
+| [create\_deployment](#input\_create\_deployment) | Create the AWS resources and script for CodeDeploy | `bool` | `false` | no |
+| [create\_deployment\_group](#input\_create\_deployment\_group) | Whether to create new AWS CodeDeploy Deployment Group | `bool` | `false` | no |
+| [current\_version](#input\_current\_version) | Current version of Lambda function version to deploy (can't be $LATEST) | `string` | `""` | no |
+| [deployment\_config\_name](#input\_deployment\_config\_name) | Name of deployment config to use | `string` | `"CodeDeployDefault.LambdaAllAtOnce"` | no |
+| [deployment\_group\_name](#input\_deployment\_group\_name) | Name of deployment group to use | `string` | `""` | no |
+| [description](#input\_description) | Description to use for the deployment | `string` | `""` | no |
+| [force\_deploy](#input\_force\_deploy) | Force deployment every time (even when nothing changes) | `bool` | `false` | no |
+| [function\_name](#input\_function\_name) | The name of the Lambda function to deploy | `string` | `""` | no |
+| [get\_deployment\_sleep\_timer](#input\_get\_deployment\_sleep\_timer) | Adds additional sleep time to get-deployment command to avoid the service throttling | `number` | `5` | no |
+| [interpreter](#input\_interpreter) | List of interpreter arguments used to execute deploy script, first arg is path | `list(string)` | [
"/bin/bash",
"-c"
]
| no |
+| [run\_deployment](#input\_run\_deployment) | Run AWS CLI command to start the deployment | `bool` | `false` | no |
+| [save\_deploy\_script](#input\_save\_deploy\_script) | Save deploy script locally | `bool` | `false` | no |
+| [tags](#input\_tags) | A map of tags to assign to resources. | `map(string)` | `{}` | no |
+| [target\_version](#input\_target\_version) | Target version of Lambda function version to deploy | `string` | `""` | no |
+| [triggers](#input\_triggers) | Map of triggers which will be notified when event happens. Valid options for event types are DeploymentStart, DeploymentSuccess, DeploymentFailure, DeploymentStop, DeploymentRollback, DeploymentReady (Applies only to replacement instances in a blue/green deployment), InstanceStart, InstanceSuccess, InstanceFailure, InstanceReady. Note that not all are applicable for Lambda deployments. | `map(any)` | `{}` | no |
+| [use\_existing\_app](#input\_use\_existing\_app) | Whether to use existing AWS CodeDeploy app | `bool` | `false` | no |
+| [use\_existing\_deployment\_group](#input\_use\_existing\_deployment\_group) | Whether to use existing AWS CodeDeploy Deployment Group | `bool` | `false` | no |
+| [wait\_deployment\_completion](#input\_wait\_deployment\_completion) | Wait until deployment completes. It can take a lot of time and your terraform process may lock execution for long time. | `bool` | `false` | no |
## Outputs
| Name | Description |
|------|-------------|
-| appspec | n/a |
-| appspec\_content | n/a |
-| appspec\_sha256 | n/a |
-| codedeploy\_app\_name | Name of CodeDeploy application |
-| codedeploy\_deployment\_group\_id | CodeDeploy deployment group id |
-| codedeploy\_deployment\_group\_name | CodeDeploy deployment group name |
-| codedeploy\_iam\_role\_name | Name of IAM role used by CodeDeploy |
-| deploy\_script | n/a |
-| script | n/a |
-
-
+| [appspec](#output\_appspec) | Appspec data as HCL |
+| [appspec\_content](#output\_appspec\_content) | Appspec data as valid JSON |
+| [appspec\_sha256](#output\_appspec\_sha256) | SHA256 of Appspec JSON |
+| [codedeploy\_app\_name](#output\_codedeploy\_app\_name) | Name of CodeDeploy application |
+| [codedeploy\_deployment\_group\_id](#output\_codedeploy\_deployment\_group\_id) | CodeDeploy deployment group id |
+| [codedeploy\_deployment\_group\_name](#output\_codedeploy\_deployment\_group\_name) | CodeDeploy deployment group name |
+| [codedeploy\_iam\_role\_name](#output\_codedeploy\_iam\_role\_name) | Name of IAM role used by CodeDeploy |
+| [deploy\_script](#output\_deploy\_script) | Path to a deployment script |
+| [script](#output\_script) | Deployment script |
+
## Authors
diff --git a/modules/deploy/main.tf b/modules/deploy/main.tf
index f14c7d12..d88c0894 100644
--- a/modules/deploy/main.tf
+++ b/modules/deploy/main.tf
@@ -1,10 +1,10 @@
locals {
# AWS CodeDeploy can't deploy when CurrentVersion is "$LATEST"
- qualifier = element(concat(data.aws_lambda_function.this.*.qualifier, [""]), 0)
+ qualifier = try(data.aws_lambda_function.this[0].qualifier, "")
current_version = local.qualifier == "$LATEST" ? 1 : local.qualifier
- app_name = element(concat(aws_codedeploy_app.this.*.name, [var.app_name]), 0)
- deployment_group_name = element(concat(aws_codedeploy_deployment_group.this.*.deployment_group_name, [var.deployment_group_name]), 0)
+ app_name = try(aws_codedeploy_app.this[0].name, var.app_name)
+ deployment_group_name = try(aws_codedeploy_deployment_group.this[0].deployment_group_name, var.deployment_group_name)
appspec = merge({
version = "0.0"
@@ -16,7 +16,7 @@ locals {
Name = var.function_name
Alias = var.alias_name
CurrentVersion = var.current_version != "" ? var.current_version : local.current_version
- TargetVersion : var.target_version
+ TargetVersion = var.target_version
}
}
}
@@ -25,7 +25,7 @@ locals {
Hooks = [for k, v in zipmap(["BeforeAllowTraffic", "AfterAllowTraffic"], [
var.before_allow_traffic_hook_arn != "" ? var.before_allow_traffic_hook_arn : null,
var.after_allow_traffic_hook_arn != "" ? var.after_allow_traffic_hook_arn : null
- ]) : map(k, v)]
+ ]) : tomap({ (k) = v }) if v != null]
} : {})
appspec_content = replace(jsonencode(local.appspec), "\"", "\\\"")
@@ -33,6 +33,12 @@ locals {
script = <
+## Requirements
+
+| Name | Version |
+|------|---------|
+| [terraform](#requirement\_terraform) | >= 1.5.7 |
+| [aws](#requirement\_aws) | >= 6.0 |
+| [docker](#requirement\_docker) | >= 3.5.0 |
+| [null](#requirement\_null) | >= 2.0 |
+
+## Providers
+
+| Name | Version |
+|------|---------|
+| [aws](#provider\_aws) | >= 6.0 |
+| [docker](#provider\_docker) | >= 3.5.0 |
+| [null](#provider\_null) | >= 2.0 |
+
+## Modules
+
+No modules.
+
+## Resources
+
+| Name | Type |
+|------|------|
+| [aws_ecr_lifecycle_policy.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/ecr_lifecycle_policy) | resource |
+| [aws_ecr_repository.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/ecr_repository) | resource |
+| [docker_image.this](https://registry.terraform.io/providers/kreuzwerker/docker/latest/docs/resources/image) | resource |
+| [docker_registry_image.this](https://registry.terraform.io/providers/kreuzwerker/docker/latest/docs/resources/registry_image) | resource |
+| [null_resource.sam_metadata_docker_registry_image](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource |
+| [aws_caller_identity.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source |
+| [aws_region.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/region) | data source |
+
+## Inputs
+
+| Name | Description | Type | Default | Required |
+|------|-------------|------|---------|:--------:|
+| [build\_args](#input\_build\_args) | A map of Docker build arguments. | `map(string)` | `{}` | no |
+| [build\_target](#input\_build\_target) | Set the target build stage to build | `string` | `null` | no |
+| [builder](#input\_builder) | The buildx builder to use for the Docker build. | `string` | `null` | no |
+| [cache\_from](#input\_cache\_from) | List of images to consider as cache sources when building the image. | `list(string)` | `[]` | no |
+| [create\_ecr\_repo](#input\_create\_ecr\_repo) | Controls whether ECR repository for Lambda image should be created | `bool` | `false` | no |
+| [create\_sam\_metadata](#input\_create\_sam\_metadata) | Controls whether the SAM metadata null resource should be created | `bool` | `false` | no |
+| [docker\_file\_path](#input\_docker\_file\_path) | Path to Dockerfile in source package | `string` | `"Dockerfile"` | no |
+| [ecr\_address](#input\_ecr\_address) | Address of ECR repository for cross-account container image pulling (optional). Option `create_ecr_repo` must be `false` | `string` | `null` | no |
+| [ecr\_force\_delete](#input\_ecr\_force\_delete) | If true, will delete the repository even if it contains images. | `bool` | `true` | no |
+| [ecr\_repo](#input\_ecr\_repo) | Name of ECR repository to use or to create | `string` | `null` | no |
+| [ecr\_repo\_lifecycle\_policy](#input\_ecr\_repo\_lifecycle\_policy) | A JSON formatted ECR lifecycle policy to automate the cleaning up of unused images. | `string` | `null` | no |
+| [ecr\_repo\_tags](#input\_ecr\_repo\_tags) | A map of tags to assign to ECR repository | `map(string)` | `{}` | no |
+| [force\_remove](#input\_force\_remove) | Whether to remove image forcibly when the resource is destroyed. | `bool` | `false` | no |
+| [image\_tag](#input\_image\_tag) | Image tag to use. If not specified current timestamp in format 'YYYYMMDDhhmmss' will be used. This can lead to unnecessary rebuilds. | `string` | `null` | no |
+| [image\_tag\_mutability](#input\_image\_tag\_mutability) | The tag mutability setting for the repository. Must be one of: `MUTABLE` or `IMMUTABLE` | `string` | `"MUTABLE"` | no |
+| [keep\_locally](#input\_keep\_locally) | Whether to delete the Docker image locally on destroy operation. | `bool` | `false` | no |
+| [keep\_remotely](#input\_keep\_remotely) | Whether to keep Docker image in the remote registry on destroy operation. | `bool` | `false` | no |
+| [platform](#input\_platform) | The target architecture platform to build the image for. | `string` | `null` | no |
+| [scan\_on\_push](#input\_scan\_on\_push) | Indicates whether images are scanned after being pushed to the repository | `bool` | `false` | no |
+| [source\_path](#input\_source\_path) | Path to folder containing application code | `string` | `null` | no |
+| [triggers](#input\_triggers) | A map of arbitrary strings that, when changed, will force the docker\_image resource to be replaced. This can be used to rebuild an image when contents of source code folders change | `map(string)` | `{}` | no |
+| [use\_image\_tag](#input\_use\_image\_tag) | Controls whether to use image tag in ECR repository URI or not. Disable this to deploy latest image using ID (sha256:...) | `bool` | `true` | no |
+
+## Outputs
+
+| Name | Description |
+|------|-------------|
+| [image\_id](#output\_image\_id) | The ID of the Docker image |
+| [image\_uri](#output\_image\_uri) | The ECR image URI for deploying lambda |
+
+
+## Authors
+
+Module managed by [Anton Babenko](https://github.com/antonbabenko). Check out [serverless.tf](https://serverless.tf) to learn more about doing serverless with Terraform.
+
+Please reach out to [Betajob](https://www.betajob.com/) if you are looking for commercial support for your Terraform, AWS, or serverless project.
+
+
+## License
+
+Apache 2 Licensed. See LICENSE for full details.
diff --git a/modules/docker-build/main.tf b/modules/docker-build/main.tf
new file mode 100644
index 00000000..559060cb
--- /dev/null
+++ b/modules/docker-build/main.tf
@@ -0,0 +1,75 @@
+data "aws_region" "current" {}
+
+data "aws_caller_identity" "this" {}
+
+locals {
+ ecr_address = coalesce(var.ecr_address, format("%v.dkr.ecr.%v.amazonaws.com", data.aws_caller_identity.this.account_id, data.aws_region.current.region))
+ ecr_repo = var.create_ecr_repo ? aws_ecr_repository.this[0].id : var.ecr_repo
+ image_tag = var.use_image_tag ? coalesce(var.image_tag, formatdate("YYYYMMDDhhmmss", timestamp())) : null
+ ecr_image_name = var.use_image_tag ? format("%v/%v:%v", local.ecr_address, local.ecr_repo, local.image_tag) : format("%v/%v", local.ecr_address, local.ecr_repo)
+}
+
+resource "docker_image" "this" {
+ name = local.ecr_image_name
+
+ build {
+ context = var.source_path
+ dockerfile = var.docker_file_path
+ build_args = var.build_args
+ builder = var.builder
+ target = var.build_target
+ platform = var.platform
+ cache_from = var.cache_from
+ }
+
+ force_remove = var.force_remove
+ keep_locally = var.keep_locally
+ triggers = var.triggers
+}
+
+resource "docker_registry_image" "this" {
+ name = docker_image.this.name
+
+ keep_remotely = var.keep_remotely
+
+ triggers = length(var.triggers) == 0 ? { image_id = docker_image.this.image_id } : var.triggers
+}
+
+resource "aws_ecr_repository" "this" {
+ count = var.create_ecr_repo ? 1 : 0
+
+ force_delete = var.ecr_force_delete
+ name = var.ecr_repo
+ image_tag_mutability = var.image_tag_mutability
+
+ image_scanning_configuration {
+ scan_on_push = var.scan_on_push
+ }
+
+ tags = var.ecr_repo_tags
+}
+
+resource "aws_ecr_lifecycle_policy" "this" {
+ count = var.ecr_repo_lifecycle_policy != null ? 1 : 0
+
+ policy = var.ecr_repo_lifecycle_policy
+ repository = local.ecr_repo
+}
+
+# This resource contains the extra information required by SAM CLI to provide the testing capabilities
+# to the TF application. This resource will maintain the metadata information about the image type lambda
+# functions. It will contain the information required to build the docker image locally.
+resource "null_resource" "sam_metadata_docker_registry_image" {
+ count = var.create_sam_metadata ? 1 : 0
+
+ triggers = {
+ resource_type = "IMAGE_LAMBDA_FUNCTION"
+ docker_context = var.source_path
+ docker_file = var.docker_file_path
+ docker_build_args = jsonencode(var.build_args)
+ docker_tag = var.image_tag
+ built_image_uri = docker_registry_image.this.name
+ }
+
+ depends_on = [docker_registry_image.this]
+}
diff --git a/modules/docker-build/outputs.tf b/modules/docker-build/outputs.tf
new file mode 100644
index 00000000..5b268b54
--- /dev/null
+++ b/modules/docker-build/outputs.tf
@@ -0,0 +1,9 @@
+output "image_uri" {
+ description = "The ECR image URI for deploying lambda"
+ value = var.use_image_tag ? docker_registry_image.this.name : format("%v@%v", docker_registry_image.this.name, docker_registry_image.this.id)
+}
+
+output "image_id" {
+ description = "The ID of the Docker image"
+ value = docker_registry_image.this.id
+}
diff --git a/modules/docker-build/variables.tf b/modules/docker-build/variables.tf
new file mode 100644
index 00000000..110ce554
--- /dev/null
+++ b/modules/docker-build/variables.tf
@@ -0,0 +1,132 @@
+variable "create_ecr_repo" {
+ description = "Controls whether ECR repository for Lambda image should be created"
+ type = bool
+ default = false
+}
+
+variable "create_sam_metadata" {
+ description = "Controls whether the SAM metadata null resource should be created"
+ type = bool
+ default = false
+}
+
+variable "use_image_tag" {
+ description = "Controls whether to use image tag in ECR repository URI or not. Disable this to deploy latest image using ID (sha256:...)"
+ type = bool
+ default = true
+}
+
+variable "ecr_address" {
+ description = "Address of ECR repository for cross-account container image pulling (optional). Option `create_ecr_repo` must be `false`"
+ type = string
+ default = null
+}
+
+variable "ecr_repo" {
+ description = "Name of ECR repository to use or to create"
+ type = string
+ default = null
+}
+
+variable "image_tag" {
+ description = "Image tag to use. If not specified current timestamp in format 'YYYYMMDDhhmmss' will be used. This can lead to unnecessary rebuilds."
+ type = string
+ default = null
+}
+
+variable "source_path" {
+ description = "Path to folder containing application code"
+ type = string
+ default = null
+}
+
+variable "docker_file_path" {
+ description = "Path to Dockerfile in source package"
+ type = string
+ default = "Dockerfile"
+}
+
+
+variable "image_tag_mutability" {
+ description = "The tag mutability setting for the repository. Must be one of: `MUTABLE` or `IMMUTABLE`"
+ type = string
+ default = "MUTABLE"
+}
+
+variable "scan_on_push" {
+ description = "Indicates whether images are scanned after being pushed to the repository"
+ type = bool
+ default = false
+}
+
+variable "ecr_force_delete" {
+ description = "If true, will delete the repository even if it contains images."
+ default = true
+ type = bool
+}
+
+variable "ecr_repo_tags" {
+ description = "A map of tags to assign to ECR repository"
+ type = map(string)
+ default = {}
+}
+
+variable "builder" {
+ description = "The buildx builder to use for the Docker build."
+ type = string
+ default = null
+}
+
+variable "build_args" {
+ description = "A map of Docker build arguments."
+ type = map(string)
+ default = {}
+}
+
+variable "build_target" {
+ description = "Set the target build stage to build"
+ type = string
+ default = null
+}
+
+variable "ecr_repo_lifecycle_policy" {
+ description = "A JSON formatted ECR lifecycle policy to automate the cleaning up of unused images."
+ type = string
+ default = null
+}
+
+variable "keep_remotely" {
+ description = "Whether to keep Docker image in the remote registry on destroy operation."
+ type = bool
+ default = false
+}
+
+variable "platform" {
+ description = "The target architecture platform to build the image for."
+ type = string
+ default = null
+}
+
+variable "force_remove" {
+ description = "Whether to remove image forcibly when the resource is destroyed."
+ type = bool
+ default = false
+}
+
+variable "keep_locally" {
+ description = "Whether to delete the Docker image locally on destroy operation."
+ type = bool
+ default = false
+}
+
+variable "triggers" {
+ description = "A map of arbitrary strings that, when changed, will force the docker_image resource to be replaced. This can be used to rebuild an image when contents of source code folders change"
+ type = map(string)
+ default = {}
+}
+
+variable "cache_from" {
+ description = "List of images to consider as cache sources when building the image."
+ type = list(string)
+ default = []
+}
diff --git a/modules/docker-build/versions.tf b/modules/docker-build/versions.tf
new file mode 100644
index 00000000..b203b635
--- /dev/null
+++ b/modules/docker-build/versions.tf
@@ -0,0 +1,18 @@
+terraform {
+ required_version = ">= 1.5.7"
+
+ required_providers {
+ aws = {
+ source = "hashicorp/aws"
+ version = ">= 6.0"
+ }
+ docker = {
+ source = "kreuzwerker/docker"
+ version = ">= 3.5.0"
+ }
+ null = {
+ source = "hashicorp/null"
+ version = ">= 2.0"
+ }
+ }
+}
diff --git a/outputs.tf b/outputs.tf
index 73d769b4..93624833 100644
--- a/outputs.tf
+++ b/outputs.tf
@@ -1,84 +1,146 @@
# Lambda Function
-output "this_lambda_function_arn" {
+output "lambda_function_arn" {
description = "The ARN of the Lambda Function"
- value = element(concat(aws_lambda_function.this.*.arn, [""]), 0)
+ value = try(aws_lambda_function.this[0].arn, "")
}
-output "this_lambda_function_invoke_arn" {
+output "lambda_function_arn_static" {
+ description = "The static ARN of the Lambda Function. Use this to avoid cycle errors between resources (e.g., Step Functions)"
+ value = local.create && var.create_function && !var.create_layer ? "arn:${data.aws_partition.current.partition}:lambda:${data.aws_region.current.region}:${data.aws_caller_identity.current.account_id}:function:${var.function_name}" : ""
+}
+
+output "lambda_function_invoke_arn" {
description = "The Invoke ARN of the Lambda Function"
- value = element(concat(aws_lambda_function.this.*.invoke_arn, [""]), 0)
+ value = try(aws_lambda_function.this[0].invoke_arn, "")
}
-output "this_lambda_function_name" {
+output "lambda_function_name" {
description = "The name of the Lambda Function"
- value = element(concat(aws_lambda_function.this.*.function_name, [""]), 0)
+ value = try(aws_lambda_function.this[0].function_name, "")
}
-output "this_lambda_function_qualified_arn" {
+output "lambda_function_qualified_arn" {
description = "The ARN identifying your Lambda Function Version"
- value = element(concat(aws_lambda_function.this.*.qualified_arn, [""]), 0)
+ value = try(aws_lambda_function.this[0].qualified_arn, "")
+}
+
+output "lambda_function_qualified_invoke_arn" {
+ description = "The Invoke ARN identifying your Lambda Function Version"
+ value = try(aws_lambda_function.this[0].qualified_invoke_arn, "")
}
-output "this_lambda_function_version" {
+output "lambda_function_version" {
description = "Latest published version of Lambda Function"
- value = element(concat(aws_lambda_function.this.*.version, [""]), 0)
+ value = try(aws_lambda_function.this[0].version, "")
}
-output "this_lambda_function_last_modified" {
+output "lambda_function_last_modified" {
description = "The date Lambda Function resource was last modified"
- value = element(concat(aws_lambda_function.this.*.last_modified, [""]), 0)
+ value = try(aws_lambda_function.this[0].last_modified, "")
}
-output "this_lambda_function_kms_key_arn" {
+output "lambda_function_kms_key_arn" {
description = "The ARN for the KMS encryption key of Lambda Function"
- value = element(concat(aws_lambda_function.this.*.kms_key_arn, [""]), 0)
+ value = try(aws_lambda_function.this[0].kms_key_arn, "")
}
-output "this_lambda_function_source_code_hash" {
+output "lambda_function_source_code_hash" {
description = "Base64-encoded representation of raw SHA-256 sum of the zip file"
- value = element(concat(aws_lambda_function.this.*.source_code_hash, [""]), 0)
+ value = try(aws_lambda_function.this[0].source_code_hash, "")
}
-output "this_lambda_function_source_code_size" {
+output "lambda_function_source_code_size" {
description = "The size in bytes of the function .zip file"
- value = element(concat(aws_lambda_function.this.*.source_code_size, [""]), 0)
+ value = try(aws_lambda_function.this[0].source_code_size, "")
+}
+
+output "lambda_function_signing_job_arn" {
+ description = "ARN of the signing job"
+ value = try(aws_lambda_function.this[0].signing_job_arn, "")
+}
+
+output "lambda_function_signing_profile_version_arn" {
+ description = "ARN of the signing profile version"
+ value = try(aws_lambda_function.this[0].signing_profile_version_arn, "")
+}
+
+# Lambda Function URL
+output "lambda_function_url" {
+ description = "The URL of the Lambda Function URL"
+ value = try(aws_lambda_function_url.this[0].function_url, "")
+}
+
+output "lambda_function_url_id" {
+ description = "The Lambda Function URL generated id"
+ value = try(aws_lambda_function_url.this[0].url_id, "")
}
# Lambda Layer
-output "this_lambda_layer_arn" {
+output "lambda_layer_arn" {
description = "The ARN of the Lambda Layer with version"
- value = element(concat(aws_lambda_layer_version.this.*.arn, [""]), 0)
+ value = try(aws_lambda_layer_version.this[0].arn, "")
}
-output "this_lambda_layer_layer_arn" {
+output "lambda_layer_layer_arn" {
description = "The ARN of the Lambda Layer without version"
- value = element(concat(aws_lambda_layer_version.this.*.layer_arn, [""]), 0)
+ value = try(aws_lambda_layer_version.this[0].layer_arn, "")
}
-output "this_lambda_layer_created_date" {
+output "lambda_layer_created_date" {
description = "The date Lambda Layer resource was created"
- value = element(concat(aws_lambda_layer_version.this.*.created_date, [""]), 0)
+ value = try(aws_lambda_layer_version.this[0].created_date, "")
}
-output "this_lambda_layer_source_code_size" {
+output "lambda_layer_source_code_size" {
description = "The size in bytes of the Lambda Layer .zip file"
- value = element(concat(aws_lambda_layer_version.this.*.source_code_size, [""]), 0)
+ value = try(aws_lambda_layer_version.this[0].source_code_size, "")
}
-output "this_lambda_layer_version" {
+output "lambda_layer_version" {
description = "The Lambda Layer version"
- value = element(concat(aws_lambda_layer_version.this.*.version, [""]), 0)
+ value = try(aws_lambda_layer_version.this[0].version, "")
+}
+
+# Lambda Event Source Mapping
+output "lambda_event_source_mapping_arn" {
+ description = "The event source mapping ARN"
+ value = { for k, v in aws_lambda_event_source_mapping.this : k => v.arn }
+}
+
+output "lambda_event_source_mapping_function_arn" {
+ description = "The the ARN of the Lambda function the event source mapping is sending events to"
+ value = { for k, v in aws_lambda_event_source_mapping.this : k => v.function_arn }
+}
+
+output "lambda_event_source_mapping_state" {
+ description = "The state of the event source mapping"
+ value = { for k, v in aws_lambda_event_source_mapping.this : k => v.state }
+}
+
+output "lambda_event_source_mapping_state_transition_reason" {
+ description = "The reason the event source mapping is in its current state"
+ value = { for k, v in aws_lambda_event_source_mapping.this : k => v.state_transition_reason }
+}
+
+output "lambda_event_source_mapping_uuid" {
+ description = "The UUID of the created event source mapping"
+ value = { for k, v in aws_lambda_event_source_mapping.this : k => v.uuid }
}
# IAM Role
output "lambda_role_arn" {
description = "The ARN of the IAM role created for the Lambda Function"
- value = element(concat(aws_iam_role.lambda.*.arn, [""]), 0)
+ value = try(aws_iam_role.lambda[0].arn, "")
}
output "lambda_role_name" {
description = "The name of the IAM role created for the Lambda Function"
- value = element(concat(aws_iam_role.lambda.*.name, [""]), 0)
+ value = try(aws_iam_role.lambda[0].name, "")
+}
+
+output "lambda_role_unique_id" {
+ description = "The unique id of the IAM role created for the Lambda Function"
+ value = try(aws_iam_role.lambda[0].unique_id, "")
}
# CloudWatch Log Group
@@ -87,13 +149,26 @@ output "lambda_cloudwatch_log_group_arn" {
value = local.log_group_arn
}
+output "lambda_cloudwatch_log_group_name" {
+ description = "The name of the Cloudwatch Log Group"
+ value = local.log_group_name
+}
+
# Deployment package
output "local_filename" {
description = "The filename of zip archive deployed (if deployment was from local)"
value = local.filename
+
+ depends_on = [
+ null_resource.archive,
+ ]
}
output "s3_object" {
description = "The map with S3 object data of zip archive deployed (if deployment was from S3)"
- value = map("bucket", local.s3_bucket, "key", local.s3_key, "version_id", local.s3_object_version)
+ value = {
+ bucket = local.s3_bucket
+ key = local.s3_key
+ version_id = local.s3_object_version
+ }
}
diff --git a/package.py b/package.py
index 0e6f603f..3261a282 100644
--- a/package.py
+++ b/package.py
@@ -29,8 +29,8 @@
PY37 = sys.version_info >= (3, 7)
PY36 = sys.version_info >= (3, 6)
-WINDOWS = platform.system() == 'Windows'
-OSX = platform.system() == 'Darwin'
+WINDOWS = platform.system() == "Windows"
+OSX = platform.system() == "Darwin"
################################################################################
# Logging
@@ -41,29 +41,29 @@
log_handler = None
log = logging.getLogger()
-cmd_log = logging.getLogger('cmd')
+cmd_log = logging.getLogger("cmd")
def configure_logging(use_tf_stderr=False):
global log_handler
- logging.addLevelName(DEBUG2, 'DEBUG2')
- logging.addLevelName(DEBUG3, 'DEBUG3')
- logging.addLevelName(DUMP_ENV, 'DUMP_ENV')
+ logging.addLevelName(DEBUG2, "DEBUG2")
+ logging.addLevelName(DEBUG3, "DEBUG3")
+ logging.addLevelName(DUMP_ENV, "DUMP_ENV")
class LogFormatter(logging.Formatter):
- default_format = '%(message)s'
+ default_format = "%(message)s"
formats = {
- 'root': default_format,
- 'build': default_format,
- 'prepare': '[{}] %(name)s: %(message)s'.format(os.getpid()),
- 'cmd': '> %(message)s',
- '': '%(name)s: %(message)s'
+ "root": default_format,
+ "build": default_format,
+ "prepare": "[{}] %(name)s: %(message)s".format(os.getpid()),
+ "cmd": "> %(message)s",
+ "": "%(name)s: %(message)s",
}
def formatMessage(self, record):
- prefix = record.name.rsplit('.')
- self._style._fmt = self.formats.get(prefix[0], self.formats[''])
+ prefix = record.name.rsplit(".")
+ self._style._fmt = self.formats.get(prefix[0], self.formats[""])
return super().formatMessage(record)
tf_stderr_fd = 5
@@ -71,7 +71,7 @@ def formatMessage(self, record):
if use_tf_stderr:
try:
if os.isatty(tf_stderr_fd):
- log_stream = os.fdopen(tf_stderr_fd, mode='w')
+ log_stream = os.fdopen(tf_stderr_fd, mode="w")
except OSError:
pass
@@ -84,20 +84,22 @@ def formatMessage(self, record):
def dump_env():
if log.isEnabledFor(DUMP_ENV):
- log.debug('ENV: %s', json.dumps(dict(os.environ), indent=2))
+ log.debug("ENV: %s", json.dumps(dict(os.environ), indent=2))
################################################################################
# Backports
+
def shlex_join(split_command):
"""Return a shell-escaped string from *split_command*."""
- return ' '.join(shlex.quote(arg) for arg in split_command)
+ return " ".join(shlex.quote(arg) for arg in split_command)
################################################################################
# Common functions
+
def abort(message):
"""Exits with an error message."""
log.error(message)
@@ -109,7 +111,7 @@ def cd(path, silent=False):
"""Changes the working directory."""
cwd = os.getcwd()
if not silent:
- cmd_log.info('cd %s', shlex.quote(path))
+ cmd_log.info("cd %s", shlex.quote(path))
try:
os.chdir(path)
yield
@@ -118,15 +120,16 @@ def cd(path, silent=False):
@contextmanager
-def tempdir():
+def tempdir(dir=None):
"""Creates a temporary directory and then deletes it afterwards."""
- prefix = 'terraform-aws-lambda-'
- path = tempfile.mkdtemp(prefix=prefix)
- cmd_log.info('mktemp -d %sXXXXXXXX # %s', prefix, shlex.quote(path))
+ prefix = "terraform-aws-lambda-"
+ path = tempfile.mkdtemp(prefix=prefix, dir=dir)
+ abs_path = os.path.abspath(path)
+ cmd_log.info("mktemp -d %sXXXXXXXX # %s", prefix, shlex.quote(abs_path))
try:
- yield path
+ yield abs_path
finally:
- shutil.rmtree(path)
+ shutil.rmtree(abs_path)
def list_files(top_path, log=None):
@@ -135,11 +138,14 @@ def list_files(top_path, log=None):
"""
if log:
- log = log.getChild('ls')
+ log = log.getChild("ls")
results = []
- for root, dirs, files in os.walk(top_path):
+ for root, dirs, files in os.walk(top_path, followlinks=True):
+ # Sort directories and files to ensure they are always processed in the same order
+ dirs.sort()
+ files.sort()
for file_name in files:
file_path = os.path.join(root, file_name)
relative_path = os.path.relpath(file_path, top_path)
@@ -152,10 +158,14 @@ def list_files(top_path, log=None):
def dataclass(name):
- typ = type(name, (dict,), {
- '__getattr__': lambda self, x: self.get(x),
- '__init__': lambda self, **k: self.update(k),
- })
+ typ = type(
+ name,
+ (dict,),
+ {
+ "__getattr__": lambda self, x: self.get(x),
+ "__init__": lambda self, **k: self.update(k),
+ },
+ )
return typ
@@ -171,14 +181,19 @@ def decode_json(k, v):
pass
return v
- return dataclass(name)(**dict(((
- k, datatree(k, **v) if isinstance(v, dict) else decode_json(k, v))
- for k, v in fields.items())))
+ return dataclass(name)(
+ **dict(
+ (
+ (k, datatree(k, **v) if isinstance(v, dict) else decode_json(k, v))
+ for k, v in fields.items()
+ )
+ )
+ )
def timestamp_now_ns():
timestamp = datetime.datetime.now().timestamp()
- timestamp = int(timestamp * 10 ** 7) * 10 ** 2
+ timestamp = int(timestamp * 10**7) * 10**2
return timestamp
@@ -197,9 +212,9 @@ def yesno_bool(val):
if val.isnumeric():
return bool(int(val))
val = val.lower()
- if val in ('true', 'yes', 'y'):
+ if val in ("true", "yes", "y"):
return True
- elif val in ('false', 'no', 'n'):
+ elif val in ("false", "no", "n"):
return False
else:
raise ValueError("Unsupported value: %s" % val)
@@ -209,22 +224,25 @@ def yesno_bool(val):
################################################################################
# Packaging functions
+
def emit_dir_content(base_dir):
- for root, dirs, files in os.walk(base_dir):
+ for root, dirs, files in os.walk(base_dir, followlinks=True):
+ # Sort directories and files to ensure they are always processed in the same order
+ dirs.sort()
+ files.sort()
if root != base_dir:
yield os.path.normpath(root)
for name in files:
yield os.path.normpath(os.path.join(root, name))
-def generate_content_hash(source_paths,
- hash_func=hashlib.sha256, log=None):
+def generate_content_hash(source_paths, hash_func=hashlib.sha256, log=None):
"""
Generate a content hash of the source paths.
"""
if log:
- log = log.getChild('hash')
+ log = log.getChild("hash")
hash_obj = hash_func()
@@ -254,48 +272,58 @@ def update_hash(hash_obj, file_root, file_path):
relative_path = os.path.join(file_root, file_path)
hash_obj.update(relative_path.encode())
- with open(relative_path, 'rb') as open_file:
- while True:
- data = open_file.read(1024 * 8)
- if not data:
- break
- hash_obj.update(data)
+ try:
+ with open(relative_path, "rb") as open_file:
+ while True:
+ data = open_file.read(1024 * 8)
+ if not data:
+ break
+ hash_obj.update(data)
+ # ignore broken symlinks content to don't fail on `terraform destroy` command
+ except FileNotFoundError:
+ pass
class ZipWriteStream:
""""""
- def __init__(self, zip_filename,
- compress_type=zipfile.ZIP_DEFLATED,
- compresslevel=None,
- timestamp=None):
-
+ def __init__(
+ self,
+ zip_filename,
+ compress_type=zipfile.ZIP_DEFLATED,
+ compresslevel=None,
+ timestamp=None,
+ quiet=False,
+ ):
self.timestamp = timestamp
self.filename = zip_filename
+ self.quiet = quiet
if not (self.filename and isinstance(self.filename, str)):
- raise ValueError('Zip file path must be provided')
+ raise ValueError("Zip file path must be provided")
self._tmp_filename = None
self._compress_type = compress_type
self._compresslevel = compresslevel
self._zip = None
- self._log = logging.getLogger('zip')
+ self._log = logging.getLogger("zip")
def open(self):
if self._tmp_filename:
raise zipfile.BadZipFile("ZipStream object can't be reused")
self._ensure_base_path(self.filename)
- self._tmp_filename = '{}.tmp'.format(self.filename)
- self._log.info("creating '%s' archive", self.filename)
- self._zip = zipfile.ZipFile(self._tmp_filename, "w",
- self._compress_type)
+ self._tmp_filename = "{}.tmp".format(self.filename)
+ if not self.quiet:
+ self._log.info("creating '%s' archive", self.filename)
+ self._zip = zipfile.ZipFile(self._tmp_filename, "w", self._compress_type)
return self
def close(self, failed=False):
self._zip.close()
self._zip = None
+ if not os.path.exists(self._tmp_filename):
+ return
if failed:
os.unlink(self._tmp_filename)
else:
@@ -316,14 +344,14 @@ def _ensure_open(self):
return True
if self._tmp_filename:
raise zipfile.BadZipFile("ZipWriteStream object can't be reused")
- raise zipfile.BadZipFile('ZipWriteStream should be opened first')
+ raise zipfile.BadZipFile("ZipWriteStream should be opened first")
def _ensure_base_path(self, zip_filename):
archive_dir = os.path.dirname(zip_filename)
if archive_dir and not os.path.exists(archive_dir):
self._log.info("creating %s", archive_dir)
- os.makedirs(archive_dir)
+ os.makedirs(archive_dir, exist_ok=True)
def write_dirs(self, *base_dirs, prefix=None, timestamp=None):
"""
@@ -331,7 +359,8 @@ def write_dirs(self, *base_dirs, prefix=None, timestamp=None):
"""
self._ensure_open()
for base_dir in base_dirs:
- self._log.info("adding content of directory: %s", base_dir)
+ if not self.quiet:
+ self._log.info("adding content of directory: %s", base_dir)
for path in emit_dir_content(base_dir):
arcname = os.path.relpath(path, base_dir)
self._write_file(path, prefix, arcname, timestamp)
@@ -357,10 +386,11 @@ def _write_file(self, file_path, prefix=None, name=None, timestamp=None):
if prefix:
arcname = os.path.join(prefix, arcname)
zinfo = self._make_zinfo_from_file(file_path, arcname)
- if zinfo.is_dir():
- self._log.info("adding: %s/", arcname)
- else:
- self._log.info("adding: %s", arcname)
+ if not self.quiet:
+ if zinfo.is_dir():
+ self._log.info("adding: %s/", arcname)
+ else:
+ self._log.info("adding: %s", arcname)
if timestamp is None:
timestamp = self.timestamp
date_time = self._timestamp_to_date_time(timestamp)
@@ -375,15 +405,13 @@ def write_file_obj(self, file_path, data, prefix=None, timestamp=None):
self._ensure_open()
raise NotImplementedError
- def _write_zinfo(self, zinfo, filename,
- compress_type=None, compresslevel=None):
+ def _write_zinfo(self, zinfo, filename, compress_type=None, compresslevel=None):
self._ensure_open()
zip = self._zip
if not zip.fp:
- raise ValueError(
- "Attempt to write to ZIP archive that was already closed")
+ raise ValueError("Attempt to write to ZIP archive that was already closed")
if zip._writing:
raise ValueError(
"Can't write to ZIP archive while an open writing handle exists"
@@ -421,7 +449,7 @@ def _write_zinfo(self, zinfo, filename,
zip.fp.write(zinfo.FileHeader(False))
zip.start_dir = zip.fp.tell()
else:
- with open(filename, "rb") as src, zip.open(zinfo, 'w') as dest:
+ with open(filename, "rb") as src, zip.open(zinfo, "w") as dest:
shutil.copyfileobj(src, dest, 1024 * 8)
def _make_zinfo_from_file(self, filename, arcname=None):
@@ -432,8 +460,7 @@ def _make_zinfo_from_file(self, filename, arcname=None):
zinfo_func = self._zinfo_from_file
strict_timestamps = True
- return zinfo_func(filename, arcname,
- strict_timestamps=strict_timestamps)
+ return zinfo_func(filename, arcname, strict_timestamps=strict_timestamps)
@staticmethod
def _update_zinfo(zinfo, date_time):
@@ -468,7 +495,7 @@ def _zinfo_from_file(filename, arcname=None, *, strict_timestamps=True):
while arcname[0] in (os.sep, os.altsep):
arcname = arcname[1:]
if isdir:
- arcname += '/'
+ arcname += "/"
zinfo = zipfile.ZipInfo(arcname, date_time)
zinfo.external_attr = (st.st_mode & 0xFFFF) << 16 # Unix attributes
if isdir:
@@ -488,7 +515,7 @@ def str_int_to_timestamp(s):
return min_zip_ts
deg = len(str(int(s))) - 9
if deg < 0:
- ts = ts * 10 ** deg
+ ts = ts * 10**deg
return ts
date_time = None
@@ -504,23 +531,26 @@ def str_int_to_timestamp(s):
date_time = datetime.datetime.fromtimestamp(timestamp).timetuple()
date_time = date_time[:6]
if date_time[0] < 1980:
- raise ValueError('ZIP does not support timestamps before 1980')
+ raise ValueError("ZIP does not support timestamps before 1980")
return date_time
################################################################################
# Building
+
def patterns_list(args, patterns):
_filter = str.strip
if args.pattern_comments:
+
def _filter(x):
x = x.strip()
p = re.search("^(.*?)[ \t]*(?:[ \t]{2}#.*)?$", x).group(1).rstrip()
- if p.startswith('#'):
+ if p.startswith("#"):
return
if p:
return p
+
if isinstance(patterns, str):
return list(filter(None, map(_filter, patterns.splitlines())))
return patterns
@@ -533,13 +563,13 @@ def __init__(self, args):
self._args = args
self._rules = None
self._excludes = set()
- self._log = logging.getLogger('zip')
+ self._log = logging.getLogger("zip")
def compile(self, patterns):
rules = []
for p in patterns_list(self._args, patterns):
self._log.debug("filter pattern: %s", p)
- if p.startswith('!'):
+ if p.startswith("!"):
r = re.compile(p[1:])
rules.append((operator.not_, r))
else:
@@ -547,6 +577,10 @@ def compile(self, patterns):
rules.append((None, r))
self._rules = rules
+ def reset(self):
+ self._log.debug("reset filter patterns")
+ self._rules = None
+
def filter(self, path, prefix=None):
path = os.path.normpath(path)
if prefix:
@@ -580,13 +614,13 @@ def emit_dir(dpath, opath):
if apply(dpath):
yield opath
else:
- self._log.debug('skip: %s', dpath)
+ self._log.debug("skip: %s", dpath)
def emit_file(fpath, opath):
if apply(fpath):
yield opath
else:
- self._log.debug('skip: %s', fpath)
+ self._log.debug("skip: %s", fpath)
if os.path.isfile(path):
name = os.path.basename(path)
@@ -595,7 +629,10 @@ def emit_file(fpath, opath):
if apply(name):
yield path
else:
- for root, dirs, files in os.walk(path):
+ for root, dirs, files in os.walk(path, followlinks=True):
+ # Sort directories and files to ensure they are always processed in the same order
+ dirs.sort()
+ files.sort()
o, d = norm_path(path, root)
# log.info('od: %s %s', o, d)
if root != path:
@@ -606,6 +643,19 @@ def emit_file(fpath, opath):
yield from emit_file(f, o)
+def get_build_system_from_pyproject_toml(pyproject_file):
+ # Implement a basic TOML parser because python stdlib does not provide toml support and we probably do not want to add external dependencies
+ if os.path.isfile(pyproject_file):
+ with open(pyproject_file) as f:
+ bs = False
+ for line in f.readlines():
+ if line.startswith("[build-system]"):
+ bs = True
+ continue
+ if bs and line.startswith("build-backend") and "poetry" in line:
+ return "poetry"
+
+
class BuildPlanManager:
""""""
@@ -616,7 +666,7 @@ def __init__(self, args, log=None):
def hash(self, extra_paths):
if not self._source_paths:
- raise ValueError('BuildPlanManager.plan() should be called first')
+ raise ValueError("BuildPlanManager.plan() should be called first")
content_hash_paths = self._source_paths + extra_paths
@@ -624,8 +674,7 @@ def hash(self, extra_paths):
# runtime value, build command, and content of the build paths
# because they can have an effect on the resulting archive.
self._log.debug("Computing content hash on files...")
- content_hash = generate_content_hash(content_hash_paths,
- log=self._log)
+ content_hash = generate_content_hash(content_hash_paths, log=self._log)
return content_hash
def plan(self, source_path, query):
@@ -635,20 +684,79 @@ def plan(self, source_path, query):
source_paths = []
build_plan = []
+ build_step = []
- step = lambda *x: build_plan.append(x)
- hash = source_paths.append
+ def step(*x):
+ build_step.append(x)
- def pip_requirements_step(path, prefix=None, required=False):
+ def hash(path):
+ source_paths.append(path)
+
+ def pip_requirements_step(path, prefix=None, required=False, tmp_dir=None):
+ command = runtime
requirements = path
if os.path.isdir(path):
- requirements = os.path.join(path, 'requirements.txt')
+ requirements = os.path.join(path, "requirements.txt")
if not os.path.isfile(requirements):
+ if required:
+ raise RuntimeError("File not found: {}".format(requirements))
+ else:
+ if not query.docker and not shutil.which(command):
+ raise RuntimeError(
+ "Python interpreter version equal "
+ "to defined lambda runtime ({}) should be "
+ "available in system PATH".format(command)
+ )
+
+ step("pip", runtime, requirements, prefix, tmp_dir)
+ hash(requirements)
+
+ def poetry_install_step(
+ path, poetry_export_extra_args=[], prefix=None, required=False, tmp_dir=None
+ ):
+ pyproject_file = path
+ if os.path.isdir(path):
+ pyproject_file = os.path.join(path, "pyproject.toml")
+ if get_build_system_from_pyproject_toml(pyproject_file) != "poetry":
if required:
raise RuntimeError(
- 'File not found: {}'.format(requirements))
+ "poetry configuration not found: {}".format(pyproject_file)
+ )
+ else:
+ step("poetry", runtime, path, poetry_export_extra_args, prefix, tmp_dir)
+ hash(pyproject_file)
+ pyproject_path = os.path.dirname(pyproject_file)
+ poetry_lock_file = os.path.join(pyproject_path, "poetry.lock")
+ if os.path.isfile(poetry_lock_file):
+ hash(poetry_lock_file)
+ poetry_toml_file = os.path.join(pyproject_path, "poetry.toml")
+ if os.path.isfile(poetry_toml_file):
+ hash(poetry_toml_file)
+
+ def npm_requirements_step(path, prefix=None, required=False, tmp_dir=None):
+ command = "npm"
+ requirements = path
+ if os.path.isdir(path):
+ requirements = os.path.join(path, "package.json")
+ npm_lock_file = os.path.join(path, "package-lock.json")
+ else:
+ npm_lock_file = os.path.join(os.path.dirname(path), "package-lock.json")
+
+ if os.path.isfile(npm_lock_file):
+ hash(npm_lock_file)
+ log.info("Added npm lock file: %s", npm_lock_file)
+
+ if not os.path.isfile(requirements):
+ if required:
+ raise RuntimeError("File not found: {}".format(requirements))
else:
- step('pip', runtime, requirements, prefix)
+ if not query.docker and not shutil.which(command):
+ raise RuntimeError(
+ "Nodejs package manager ({}) should be "
+ "available in system PATH".format(command)
+ )
+
+ step("npm", runtime, requirements, prefix, tmp_dir)
hash(requirements)
def commands_step(path, commands):
@@ -660,138 +768,290 @@ def commands_step(path, commands):
if path:
path = os.path.normpath(path)
+ step("set:workdir", path)
+
batch = []
for c in commands:
if isinstance(c, str):
- if c.startswith(':zip'):
+ if c.startswith(":zip"):
if path:
hash(path)
- else:
- # If path doesn't defined for a block with
- # commands it will be set to Terraform's
- # current working directory
- path = query.paths.cwd
if batch:
- step('sh', path, '\n'.join(batch))
+ step("sh", "\n".join(batch))
batch.clear()
c = shlex.split(c)
- if len(c) == 3:
+ n = len(c)
+ if n == 3:
_, _path, prefix = c
prefix = prefix.strip()
- _path = os.path.normpath(os.path.join(path, _path))
- step('zip:embedded', _path, prefix)
- elif len(c) == 2:
- prefix = None
+ _path = os.path.normpath(_path)
+ step("zip:embedded", _path, prefix)
+ elif n == 2:
_, _path = c
- step('zip:embedded', _path, prefix)
- elif len(c) == 1:
- prefix = None
- step('zip:embedded', path, prefix)
+ _path = os.path.normpath(_path)
+ step("zip:embedded", _path)
+ elif n == 1:
+ step("zip:embedded")
else:
raise ValueError(
":zip invalid call signature, use: "
- "':zip [path [prefix_in_zip]]'")
+ "':zip [path [prefix_in_zip]]'"
+ )
else:
batch.append(c)
+ if batch:
+ step("sh", "\n".join(batch))
+ batch.clear()
for claim in claims:
if isinstance(claim, str):
path = claim
if not os.path.exists(path):
- abort('source_path must be set.')
+ abort(
+ 'Could not locate source_path "{path}". Paths are relative to directory where `terraform plan` is being run ("{pwd}")'.format(
+ path=path, pwd=os.getcwd()
+ )
+ )
runtime = query.runtime
- if runtime.startswith('python'):
- pip_requirements_step(
- os.path.join(path, 'requirements.txt'))
- step('zip', path, None)
+ if runtime.startswith("python"):
+ pip_requirements_step(os.path.join(path, "requirements.txt"))
+ poetry_install_step(path)
+ elif runtime.startswith("nodejs"):
+ npm_requirements_step(os.path.join(path, "package.json"))
+ step("zip", path, None)
hash(path)
elif isinstance(claim, dict):
- path = claim.get('path')
- patterns = claim.get('patterns')
- commands = claim.get('commands')
+ path = claim.get("path")
+ patterns = claim.get("patterns")
+ commands = claim.get("commands")
if patterns:
- step('set:filter', patterns_list(self._args, patterns))
+ step("set:filter", patterns_list(self._args, patterns))
if commands:
commands_step(path, commands)
else:
- prefix = claim.get('prefix_in_zip')
- pip_requirements = claim.get('pip_requirements')
- runtime = claim.get('runtime', query.runtime)
-
- if pip_requirements and runtime.startswith('python'):
+ prefix = claim.get("prefix_in_zip")
+ pip_requirements = claim.get("pip_requirements")
+ poetry_install = claim.get("poetry_install")
+ poetry_export_extra_args = claim.get("poetry_export_extra_args", [])
+ npm_requirements = claim.get(
+ "npm_requirements", claim.get("npm_package_json")
+ )
+ runtime = claim.get("runtime", query.runtime)
+
+ if pip_requirements and runtime.startswith("python"):
if isinstance(pip_requirements, bool) and path:
- pip_requirements_step(path, prefix, required=True)
+ pip_requirements_step(
+ path,
+ prefix,
+ required=True,
+ tmp_dir=claim.get("pip_tmp_dir"),
+ )
else:
- pip_requirements_step(pip_requirements, prefix,
- required=True)
+ pip_requirements_step(
+ pip_requirements,
+ prefix,
+ required=True,
+ tmp_dir=claim.get("pip_tmp_dir"),
+ )
+
+ if poetry_install and runtime.startswith("python"):
+ if path:
+ poetry_install_step(
+ path,
+ prefix=prefix,
+ poetry_export_extra_args=poetry_export_extra_args,
+ required=True,
+ tmp_dir=claim.get("poetry_tmp_dir"),
+ )
+
+ if npm_requirements and runtime.startswith("nodejs"):
+ if isinstance(npm_requirements, bool) and path:
+ npm_requirements_step(
+ path,
+ prefix,
+ required=True,
+ tmp_dir=claim.get("npm_tmp_dir"),
+ )
+ else:
+ npm_requirements_step(
+ npm_requirements,
+ prefix,
+ required=True,
+ tmp_dir=claim.get("npm_tmp_dir"),
+ )
if path:
- step('zip', path, prefix)
- hash(path)
- if patterns:
- step('clear:filter')
+ path = os.path.normpath(path)
+ step("zip", path, prefix)
+ if patterns:
+ # Take patterns into account when computing hash
+ pf = ZipContentFilter(args=self._args)
+ pf.compile(patterns)
+
+ for path_from_pattern in pf.filter(path, prefix):
+ hash(path_from_pattern)
+ else:
+ hash(path)
else:
- raise ValueError(
- 'Unsupported source_path item: {}'.format(claim))
+ raise ValueError("Unsupported source_path item: {}".format(claim))
+
+ if build_step:
+ build_plan.append(build_step)
+ build_step = []
self._source_paths = source_paths
return build_plan
def execute(self, build_plan, zip_stream, query):
+ sh_log = logging.getLogger("sh")
+
+ tf_work_dir = os.getcwd()
+
zs = zip_stream
sh_work_dir = None
pf = None
- for action in build_plan:
- cmd = action[0]
- if cmd.startswith('zip'):
- ts = 0 if cmd == 'zip:embedded' else None
- source_path, prefix = action[1:]
- if sh_work_dir:
- if source_path != sh_work_dir:
- if not os.path.isfile(source_path):
- source_path = sh_work_dir
- if os.path.isdir(source_path):
- if pf:
- self._zip_write_with_filter(zs, pf, source_path, prefix,
- timestamp=ts)
+ for step in build_plan:
+ # init step
+ sh_work_dir = tf_work_dir
+ if pf:
+ pf.reset()
+ pf = None
+
+ log.debug("STEPDIR: %s", sh_work_dir)
+
+ # execute step actions
+ for action in step:
+ cmd = action[0]
+ if cmd.startswith("zip"):
+ ts = 0 if cmd == "zip:embedded" else None
+
+ source_path, prefix = None, None
+ n = len(action)
+ if n == 2:
+ source_path = action[1]
+ elif n == 3:
+ source_path, prefix = action[1:]
+
+ if source_path:
+ if not os.path.isabs(source_path):
+ source_path = os.path.normpath(
+ os.path.join(sh_work_dir, source_path)
+ )
else:
- zs.write_dirs(source_path, prefix=prefix, timestamp=ts)
- else:
- zs.write_file(source_path, prefix=prefix, timestamp=ts)
- elif cmd == 'pip':
- runtime, pip_requirements, prefix = action[1:]
- with install_pip_requirements(query, pip_requirements) as rd:
- if rd:
+ source_path = sh_work_dir
+ if os.path.isdir(source_path):
if pf:
- self._zip_write_with_filter(zs, pf, rd, prefix,
- timestamp=0)
+ self._zip_write_with_filter(
+ zs, pf, source_path, prefix, timestamp=ts
+ )
else:
- # XXX: timestamp=0 - what actually do with it?
- zs.write_dirs(rd, prefix=prefix, timestamp=0)
- elif cmd == 'sh':
- r, w = os.pipe()
- side_ch = os.fdopen(r)
- path, script = action[1:]
- script = "{}\npwd >&{}".format(script, w)
-
- p = subprocess.Popen(script, shell=True, cwd=path,
- pass_fds=(w,))
- os.close(w)
- sh_work_dir = side_ch.read().strip()
- p.wait()
- log.info('WD: %s', sh_work_dir)
- side_ch.close()
- elif cmd == 'set:filter':
- patterns = action[1]
- pf = ZipContentFilter(args=self._args)
- pf.compile(patterns)
- elif cmd == 'clear:filter':
- pf = None
+ zs.write_dirs(source_path, prefix=prefix, timestamp=ts)
+ else:
+ zs.write_file(source_path, prefix=prefix, timestamp=ts)
+ elif cmd == "pip":
+ runtime, pip_requirements, prefix, tmp_dir = action[1:]
+ with install_pip_requirements(
+ query, pip_requirements, tmp_dir
+ ) as rd:
+ if rd:
+ if pf:
+ self._zip_write_with_filter(
+ zs, pf, rd, prefix, timestamp=0
+ )
+ else:
+ # XXX: timestamp=0 - what actually do with it?
+ zs.write_dirs(rd, prefix=prefix, timestamp=0)
+ elif cmd == "poetry":
+ (runtime, path, poetry_export_extra_args, prefix, tmp_dir) = action[
+ 1:
+ ]
+ log.info("poetry_export_extra_args: %s", poetry_export_extra_args)
+ with install_poetry_dependencies(
+ query, path, poetry_export_extra_args, tmp_dir
+ ) as rd:
+ if rd:
+ if pf:
+ self._zip_write_with_filter(
+ zs, pf, rd, prefix, timestamp=0
+ )
+ else:
+ # XXX: timestamp=0 - what actually do with it?
+ zs.write_dirs(rd, prefix=prefix, timestamp=0)
+ elif cmd == "npm":
+ runtime, npm_requirements, prefix, tmp_dir = action[1:]
+ with install_npm_requirements(
+ query, npm_requirements, tmp_dir
+ ) as rd:
+ if rd:
+ if pf:
+ self._zip_write_with_filter(
+ zs, pf, rd, prefix, timestamp=0
+ )
+ else:
+ # XXX: timestamp=0 - what actually do with it?
+ zs.write_dirs(rd, prefix=prefix, timestamp=0)
+ elif cmd == "sh":
+ with tempfile.NamedTemporaryFile(
+ mode="w+t", delete=True
+ ) as temp_file:
+ script = action[1]
+
+ if log.isEnabledFor(DEBUG2):
+ log.debug("exec shell script ...")
+ for line in script.splitlines():
+ sh_log.debug(line)
+
+ script = "\n".join(
+ (
+ script,
+ # NOTE: Execute `pwd` to determine the subprocess shell's
+ # working directory after having executed all other commands.
+ "retcode=$?",
+ f"pwd >{temp_file.name}",
+ "exit $retcode",
+ )
+ )
+
+ p = subprocess.Popen(
+ script,
+ shell=True,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ cwd=sh_work_dir,
+ )
+
+ call_stdout, call_stderr = p.communicate()
+ exit_code = p.returncode
+ log.debug("exit_code: %s", exit_code)
+ if exit_code != 0:
+ raise RuntimeError(
+ "Script did not run successfully, exit code {}: {} - {}".format(
+ exit_code,
+ call_stdout.decode("utf-8").strip(),
+ call_stderr.decode("utf-8").strip(),
+ )
+ )
+
+ temp_file.seek(0)
+ # NOTE: This var `sh_work_dir` is consumed in cmd == "zip" loop
+ sh_work_dir = temp_file.read().strip()
+ log.debug("WORKDIR: %s", sh_work_dir)
+
+ elif cmd == "set:workdir":
+ path = action[1]
+ sh_work_dir = os.path.normpath(os.path.join(tf_work_dir, path))
+ log.debug("WORKDIR: %s", sh_work_dir)
+
+ elif cmd == "set:filter":
+ patterns = action[1]
+ pf = ZipContentFilter(args=self._args)
+ pf.compile(patterns)
@staticmethod
- def _zip_write_with_filter(zip_stream, path_filter, source_path, prefix,
- timestamp=None):
+ def _zip_write_with_filter(
+ zip_stream, path_filter, source_path, prefix, timestamp=None
+ ):
for path in path_filter.filter(source_path, prefix):
if os.path.isdir(source_path):
arcname = os.path.relpath(path, source_path)
@@ -801,7 +1061,7 @@ def _zip_write_with_filter(zip_stream, path_filter, source_path, prefix,
@contextmanager
-def install_pip_requirements(query, requirements_file):
+def install_pip_requirements(query, requirements_file, tmp_dir):
# TODO:
# 1. Emit files instead of temp_dir
@@ -812,6 +1072,7 @@ def install_pip_requirements(query, requirements_file):
runtime = query.runtime
artifacts_dir = query.artifacts_dir
docker = query.docker
+ temp_dir = query.temp_dir
docker_image_tag_id = None
if docker:
@@ -825,8 +1086,9 @@ def install_pip_requirements(query, requirements_file):
output = check_output(docker_image_id_command(docker_image))
if output:
docker_image_tag_id = output.decode().strip()
- log.debug("DOCKER TAG ID: %s -> %s",
- docker_image, docker_image_tag_id)
+ log.debug(
+ "DOCKER TAG ID: %s -> %s", docker_image, docker_image_tag_id
+ )
ok = True
if ok:
break
@@ -838,28 +1100,44 @@ def install_pip_requirements(query, requirements_file):
check_call(docker_cmd)
ok = True
elif docker_file or docker_build_root:
- raise ValueError('docker_image must be specified '
- 'for a custom image future references')
+ raise ValueError(
+ "docker_image must be specified for a custom image future references"
+ )
working_dir = os.getcwd()
- log.info('Installing python requirements: %s', requirements_file)
- with tempdir() as temp_dir:
+ log.info("Installing python requirements: %s", requirements_file)
+ with tempdir(tmp_dir) as temp_dir:
requirements_filename = os.path.basename(requirements_file)
target_file = os.path.join(temp_dir, requirements_filename)
shutil.copyfile(requirements_file, target_file)
python_exec = runtime
- if WINDOWS and not docker:
- python_exec = 'python.exe'
+ subproc_env = None
+
+ if not docker:
+ if WINDOWS:
+ python_exec = "python.exe"
+ elif OSX:
+ # Workaround for OSX when XCode command line tools'
+ # python becomes the main system python interpreter
+ os_path = "{}:/Library/Developer/CommandLineTools/usr/bin".format(
+ os.environ["PATH"]
+ )
+ subproc_env = os.environ.copy()
+ subproc_env["PATH"] = os_path
# Install dependencies into the temporary directory.
with cd(temp_dir):
pip_command = [
- python_exec, '-m', 'pip',
- 'install', '--no-compile',
- '--prefix=', '--target=.',
- '--requirement={}'.format(requirements_filename),
+ python_exec,
+ "-m",
+ "pip",
+ "install",
+ "--no-compile",
+ "--prefix=",
+ "--target=.",
+ "--requirement={}".format(requirements_filename),
]
if docker:
with_ssh_agent = docker.with_ssh_agent
@@ -867,34 +1145,401 @@ def install_pip_requirements(query, requirements_file):
if pip_cache_dir:
if isinstance(pip_cache_dir, str):
pip_cache_dir = os.path.abspath(
- os.path.join(working_dir, pip_cache_dir))
+ os.path.join(working_dir, pip_cache_dir)
+ )
else:
- pip_cache_dir = os.path.abspath(os.path.join(
- working_dir, artifacts_dir, 'cache/pip'))
-
- chown_mask = '{}:{}'.format(os.getuid(), os.getgid())
- shell_command = [shlex_join(pip_command), '&&',
- shlex_join(['chown', '-R',
- chown_mask, '.'])]
- shell_command = [' '.join(shell_command)]
- check_call(docker_run_command(
- '.', shell_command, runtime,
- image=docker_image_tag_id,
- shell=True, ssh_agent=with_ssh_agent,
- pip_cache_dir=pip_cache_dir,
- ))
+ pip_cache_dir = os.path.abspath(
+ os.path.join(working_dir, artifacts_dir, "cache/pip")
+ )
+
+ chown_mask = "{}:{}".format(os.getuid(), os.getgid())
+ shell_command = [
+ shlex_join(pip_command),
+ "&&",
+ shlex_join(["chown", "-R", chown_mask, "."]),
+ ]
+ shell_command = [" ".join(shell_command)]
+ check_call(
+ docker_run_command(
+ ".",
+ shell_command,
+ runtime,
+ image=docker_image_tag_id,
+ shell=True,
+ ssh_agent=with_ssh_agent,
+ pip_cache_dir=pip_cache_dir,
+ docker=docker,
+ )
+ )
else:
cmd_log.info(shlex_join(pip_command))
log_handler and log_handler.flush()
- check_call(pip_command)
+ try:
+ if query.quiet:
+ check_call(
+ pip_command,
+ env=subproc_env,
+ stdout=subprocess.DEVNULL,
+ stderr=subprocess.DEVNULL,
+ )
+ else:
+ check_call(pip_command, env=subproc_env)
+ except FileNotFoundError as e:
+ raise RuntimeError(
+ "Python interpreter version equal "
+ "to defined lambda runtime ({}) should be "
+ "available in system PATH".format(runtime)
+ ) from e
os.remove(target_file)
yield temp_dir
+@contextmanager
+def install_poetry_dependencies(query, path, poetry_export_extra_args, tmp_dir):
+ # TODO:
+ # 1. Emit files instead of temp_dir
+
+ # pyproject.toml is always required by poetry
+ pyproject_file = path
+ if os.path.isdir(path):
+ pyproject_file = os.path.join(path, "pyproject.toml")
+ if not os.path.exists(pyproject_file):
+ yield
+ return
+
+ # poetry.lock & poetry.toml are optional
+ pyproject_path = os.path.dirname(pyproject_file)
+ poetry_lock_file = os.path.join(pyproject_path, "poetry.lock")
+ poetry_toml_file = os.path.join(pyproject_path, "poetry.toml")
+
+ runtime = query.runtime
+ artifacts_dir = query.artifacts_dir
+ docker = query.docker
+ docker_image_tag_id = None
+
+ if docker:
+ docker_file = docker.docker_file
+ docker_image = docker.docker_image
+ docker_build_root = docker.docker_build_root
+
+ if docker_image:
+ ok = False
+ while True:
+ output = check_output(docker_image_id_command(docker_image))
+ if output:
+ docker_image_tag_id = output.decode().strip()
+ log.debug(
+ "DOCKER TAG ID: %s -> %s", docker_image, docker_image_tag_id
+ )
+ ok = True
+ if ok:
+ break
+ docker_cmd = docker_build_command(
+ build_root=docker_build_root,
+ docker_file=docker_file,
+ tag=docker_image,
+ )
+ check_call(docker_cmd)
+ ok = True
+ elif docker_file or docker_build_root:
+ raise ValueError(
+ "docker_image must be specified for a custom image future references"
+ )
+
+ working_dir = os.getcwd()
+
+ log.info("Installing python dependencies with poetry & pip: %s", poetry_lock_file)
+ with tempdir(tmp_dir) as temp_dir:
+
+ def copy_file_to_target(file, temp_dir):
+ filename = os.path.basename(file)
+ target_file = os.path.join(temp_dir, filename)
+ shutil.copyfile(file, target_file)
+ return target_file
+
+ pyproject_target_file = copy_file_to_target(pyproject_file, temp_dir)
+
+ if os.path.isfile(poetry_lock_file):
+ log.info("Using poetry.lock file: %s", poetry_lock_file)
+ poetry_lock_target_file = copy_file_to_target(poetry_lock_file, temp_dir)
+ else:
+ poetry_lock_target_file = None
+
+ if os.path.isfile(poetry_toml_file):
+ log.info("Using poetry.toml configuration file: %s", poetry_toml_file)
+ poetry_toml_target_file = copy_file_to_target(poetry_toml_file, temp_dir)
+ else:
+ poetry_toml_target_file = None
+
+ poetry_exec = "poetry"
+ python_exec = runtime
+ subproc_env = None
+
+ if not docker:
+ if WINDOWS:
+ poetry_exec = "poetry.bat"
+
+ # Install dependencies into the temporary directory.
+ with cd(temp_dir):
+ # NOTE: poetry must be available in the build environment, which is the case with lambci/lambda:build-python* docker images but not public.ecr.aws/sam/build-python* docker images
+ # FIXME: poetry install does not currently allow to specify the target directory so we export the
+ # requirements then install them with "pip --no-deps" to avoid using pip dependency resolver
+
+ poetry_export = [
+ poetry_exec,
+ "export",
+ "--format",
+ "requirements.txt",
+ "--output",
+ "requirements.txt",
+ "--with-credentials",
+ ] + poetry_export_extra_args
+
+ poetry_commands = [
+ [
+ poetry_exec,
+ "config",
+ "--no-interaction",
+ "virtualenvs.create",
+ "true",
+ ],
+ [
+ poetry_exec,
+ "config",
+ "--no-interaction",
+ "virtualenvs.in-project",
+ "true",
+ ],
+ poetry_export,
+ [
+ python_exec,
+ "-m",
+ "pip",
+ "install",
+ "--no-compile",
+ "--no-deps",
+ "--prefix=",
+ "--target=.",
+ "--requirement=requirements.txt",
+ ],
+ ]
+ if docker:
+ with_ssh_agent = docker.with_ssh_agent
+ poetry_cache_dir = docker.docker_poetry_cache
+ if poetry_cache_dir:
+ if isinstance(poetry_cache_dir, str):
+ poetry_cache_dir = os.path.abspath(
+ os.path.join(working_dir, poetry_cache_dir)
+ )
+ else:
+ poetry_cache_dir = os.path.abspath(
+ os.path.join(working_dir, artifacts_dir, "cache/poetry")
+ )
+
+ chown_mask = "{}:{}".format(os.getuid(), os.getgid())
+ poetry_commands += [["chown", "-R", chown_mask, "."]]
+ shell_commands = [
+ shlex_join(poetry_command) for poetry_command in poetry_commands
+ ]
+ shell_command = [" && ".join(shell_commands)]
+ check_call(
+ docker_run_command(
+ ".",
+ shell_command,
+ runtime,
+ image=docker_image_tag_id,
+ shell=True,
+ ssh_agent=with_ssh_agent,
+ poetry_cache_dir=poetry_cache_dir,
+ docker=docker,
+ )
+ )
+ else:
+ cmd_log.info(poetry_commands)
+ log_handler and log_handler.flush()
+ for poetry_command in poetry_commands:
+ if query.quiet:
+ check_call(
+ poetry_command,
+ env=subproc_env,
+ stdout=subprocess.DEVNULL,
+ stderr=subprocess.DEVNULL,
+ )
+ else:
+ check_call(poetry_command, env=subproc_env)
+
+ os.remove(pyproject_target_file)
+ if poetry_lock_target_file:
+ os.remove(poetry_lock_target_file)
+ if poetry_toml_target_file:
+ os.remove(poetry_toml_target_file)
+
+ yield temp_dir
+
+
+@contextmanager
+def install_npm_requirements(query, requirements_file, tmp_dir):
+ # TODO:
+ # 1. Emit files instead of temp_dir
+
+ if not os.path.exists(requirements_file):
+ yield
+ return
+
+ runtime = query.runtime
+ artifacts_dir = query.artifacts_dir
+ temp_dir = query.temp_dir
+ docker = query.docker
+ docker_image_tag_id = None
+
+ if docker:
+ docker_file = docker.docker_file
+ docker_image = docker.docker_image
+ docker_build_root = docker.docker_build_root
+
+ if docker_image:
+ ok = False
+ while True:
+ output = check_output(docker_image_id_command(docker_image))
+ if output:
+ docker_image_tag_id = output.decode().strip()
+ log.debug(
+ "DOCKER TAG ID: %s -> %s", docker_image, docker_image_tag_id
+ )
+ ok = True
+ if ok:
+ break
+ docker_cmd = docker_build_command(
+ build_root=docker_build_root,
+ docker_file=docker_file,
+ tag=docker_image,
+ )
+ check_call(docker_cmd)
+ ok = True
+ elif docker_file or docker_build_root:
+ raise ValueError(
+ "docker_image must be specified for a custom image future references"
+ )
+
+ log.info("Installing npm requirements: %s", requirements_file)
+ with tempdir(tmp_dir) as temp_dir:
+ temp_copy = TemporaryCopy(os.path.dirname(requirements_file), temp_dir, log)
+ temp_copy.add(os.path.basename(requirements_file))
+ temp_copy.add("package-lock.json", required=False)
+ temp_copy.copy_to_target_dir()
+
+ subproc_env = None
+ npm_exec = "npm"
+ if not docker:
+ if WINDOWS:
+ npm_exec = "npm.cmd"
+ elif OSX:
+ subproc_env = os.environ.copy()
+
+ # Install dependencies into the temporary directory.
+ with cd(temp_dir):
+ npm_command = [npm_exec, "install"]
+ if docker:
+ with_ssh_agent = docker.with_ssh_agent
+ chown_mask = "{}:{}".format(os.getuid(), os.getgid())
+ shell_command = [
+ shlex_join(npm_command),
+ "&&",
+ shlex_join(["chown", "-R", chown_mask, "."]),
+ ]
+ shell_command = [" ".join(shell_command)]
+ check_call(
+ docker_run_command(
+ ".",
+ shell_command,
+ runtime,
+ image=docker_image_tag_id,
+ shell=True,
+ ssh_agent=with_ssh_agent,
+ docker=docker,
+ )
+ )
+ else:
+ cmd_log.info(shlex_join(npm_command))
+ log_handler and log_handler.flush()
+ try:
+ if query.quiet:
+ check_call(
+ npm_command,
+ env=subproc_env,
+ stdout=subprocess.DEVNULL,
+ stderr=subprocess.DEVNULL,
+ )
+ else:
+ check_call(npm_command, env=subproc_env)
+ except FileNotFoundError as e:
+ raise RuntimeError(
+ "Nodejs interpreter version equal "
+ "to defined lambda runtime ({}) should be "
+ "available in system PATH".format(runtime)
+ ) from e
+
+ temp_copy.remove_from_target_dir()
+ yield temp_dir
+
+
+class TemporaryCopy:
+ """Temporarily copy files to a specified location and remove them when
+ not needed.
+ """
+
+ def __init__(self, source_dir_path, target_dir_path, logger=None):
+ """Initialise with a target and a source directories."""
+ self.source_dir_path = source_dir_path
+ self.target_dir_path = target_dir_path
+ self._filenames = []
+ self._logger = logger
+
+ def _make_source_path(self, filename):
+ return os.path.join(self.source_dir_path, filename)
+
+ def _make_target_path(self, filename):
+ return os.path.join(self.target_dir_path, filename)
+
+ def add(self, filename, *, required=True):
+ """Add a file to be copied from from source to target directory
+ when `TemporaryCopy.copy_to_target_dir()` is called.
+
+ By default, the file must exist in the source directory. Set `required`
+ to `False` if the file is optional.
+ """
+ if os.path.exists(self._make_source_path(filename)):
+ self._filenames.append(filename)
+ elif required:
+ raise RuntimeError("File not found: {}".format(filename))
+
+ def copy_to_target_dir(self):
+ """Copy files (added so far) to the target directory."""
+ for filename in self._filenames:
+ if self._logger:
+ self._logger.info("Copying temporarily '%s'", filename)
+
+ shutil.copyfile(
+ self._make_source_path(filename),
+ self._make_target_path(filename),
+ )
+
+ def remove_from_target_dir(self):
+ """Remove files (added so far) from the target directory."""
+ for filename in self._filenames:
+ if self._logger:
+ self._logger.info("Removing temporarily copied '%s'", filename)
+
+ try:
+ os.remove(self._make_target_path(filename))
+ except FileNotFoundError:
+ pass
+
+
def docker_image_id_command(tag):
""""""
- docker_cmd = ['docker', 'images', '--format={{.ID}}', tag]
+ docker_cmd = ["docker", "images", "--format={{.ID}}", tag]
cmd_log.info(shlex_join(docker_cmd))
log_handler and log_handler.flush()
return docker_cmd
@@ -903,18 +1548,18 @@ def docker_image_id_command(tag):
def docker_build_command(tag=None, docker_file=None, build_root=False):
""""""
if not (build_root or docker_file):
- raise ValueError('docker_build_root or docker_file must be provided')
+ raise ValueError("docker_build_root or docker_file must be provided")
- docker_cmd = ['docker', 'build']
+ docker_cmd = ["docker", "build"]
if tag:
- docker_cmd.extend(['--tag', tag])
+ docker_cmd.extend(["--tag", tag])
else:
- raise ValueError('docker_image must be specified')
+ raise ValueError("docker_image must be specified")
if not build_root:
build_root = os.path.dirname(docker_file)
if docker_file:
- docker_cmd.extend(['--file', docker_file])
+ docker_cmd.extend(["--file", docker_file])
docker_cmd.append(build_root)
cmd_log.info(shlex_join(docker_cmd))
@@ -922,60 +1567,101 @@ def docker_build_command(tag=None, docker_file=None, build_root=False):
return docker_cmd
-def docker_run_command(build_root, command, runtime,
- image=None, shell=None, ssh_agent=False,
- interactive=False, pip_cache_dir=None):
+def docker_run_command(
+ build_root,
+ command,
+ runtime,
+ image=None,
+ shell=None,
+ ssh_agent=False,
+ interactive=False,
+ pip_cache_dir=None,
+ poetry_cache_dir=None,
+ docker=None,
+):
""""""
- if platform.system() not in ('Linux', 'Darwin'):
+ if platform.system() not in ("Linux", "Darwin"):
raise RuntimeError("Unsupported platform for docker building")
- docker_cmd = ['docker', 'run', '--rm']
+ workdir = "/var/task"
+
+ docker_cmd = ["docker", "run", "--rm", "-w", workdir]
if interactive:
- docker_cmd.append('-it')
+ docker_cmd.append("-it")
bind_path = os.path.abspath(build_root)
- docker_cmd.extend(['-v', "{}:/var/task:z".format(bind_path)])
+ docker_cmd.extend(["-v", "{}:{}:z".format(bind_path, workdir)])
+
+ home = os.environ["HOME"]
+ docker_cmd.extend(
+ [
+ # '-v', '{}/.ssh/id_rsa:/root/.ssh/id_rsa:z'.format(home),
+ "-v",
+ "{}/.ssh/known_hosts:/root/.ssh/known_hosts:z".format(home),
+ ]
+ )
- home = os.environ['HOME']
- docker_cmd.extend([
- # '-v', '{}/.ssh/id_rsa:/root/.ssh/id_rsa:z'.format(home),
- '-v', '{}/.ssh/known_hosts:/root/.ssh/known_hosts:z'.format(home),
- ])
+ if docker and docker.docker_additional_options:
+ docker_cmd.extend(docker.docker_additional_options)
if ssh_agent:
- if platform.system() == 'Darwin':
+ if platform.system() == "Darwin":
# https://docs.docker.com/docker-for-mac/osxfs/#ssh-agent-forwarding
- docker_cmd.extend([
- '--mount', 'type=bind,'
- 'src=/run/host-services/ssh-auth.sock,'
- 'target=/run/host-services/ssh-auth.sock',
- '-e', 'SSH_AUTH_SOCK=/run/host-services/ssh-auth.sock',
- ])
- elif platform.system() == 'Linux':
- sock = os.environ['SSH_AUTH_SOCK'] # TODO: Handle missing env var
- docker_cmd.extend([
- '-v', '{}:/tmp/ssh_sock:z'.format(sock),
- '-e', 'SSH_AUTH_SOCK=/tmp/ssh_sock',
- ])
-
- if platform.system() == 'Linux':
+ docker_cmd.extend(
+ [
+ "--mount",
+ "type=bind,"
+ "src=/run/host-services/ssh-auth.sock,"
+ "target=/run/host-services/ssh-auth.sock",
+ "-e",
+ "SSH_AUTH_SOCK=/run/host-services/ssh-auth.sock",
+ ]
+ )
+ elif platform.system() == "Linux":
+ sock = os.environ["SSH_AUTH_SOCK"] # TODO: Handle missing env var
+ docker_cmd.extend(
+ [
+ "-v",
+ "{}:/tmp/ssh_sock:z".format(sock),
+ "-e",
+ "SSH_AUTH_SOCK=/tmp/ssh_sock",
+ ]
+ )
+
+ if platform.system() in ("Linux", "Darwin"):
if pip_cache_dir:
pip_cache_dir = os.path.abspath(pip_cache_dir)
- docker_cmd.extend([
- '-v', '{}:/root/.cache/pip:z'.format(pip_cache_dir),
- ])
+ docker_cmd.extend(
+ [
+ "-v",
+ "{}:/root/.cache/pip:z".format(pip_cache_dir),
+ ]
+ )
+ if poetry_cache_dir:
+ poetry_cache_dir = os.path.abspath(poetry_cache_dir)
+ docker_cmd.extend(
+ [
+ "-v",
+ "{}:/root/.cache/pypoetry:z".format(poetry_cache_dir),
+ ]
+ )
if not image:
- image = 'lambci/lambda:build-{}'.format(runtime)
+ image = "public.ecr.aws/sam/build-{}".format(runtime)
+
+ if docker and docker.docker_entrypoint:
+ docker_cmd.extend(["--entrypoint", docker.docker_entrypoint])
+ else:
+ docker_cmd.extend(["--entrypoint", ""])
docker_cmd.append(image)
assert isinstance(command, list)
if shell:
if not isinstance(shell, str):
- shell = '/bin/sh'
- docker_cmd.extend([shell, '-c'])
+ shell = "/bin/sh"
+ docker_cmd.extend([shell, "-c"])
docker_cmd.extend(command)
cmd_log.info(shlex_join(docker_cmd))
@@ -986,6 +1672,7 @@ def docker_run_command(build_root, command, runtime,
################################################################################
# Commands
+
def prepare_command(args):
"""
Generates a content hash of the source_path, which is used to determine if
@@ -994,7 +1681,7 @@ def prepare_command(args):
Outputs a filename and a command to run if the archive needs to be built.
"""
- log = logging.getLogger('prepare')
+ log = logging.getLogger("prepare")
# Load the query.
query_data = json.load(sys.stdin)
@@ -1002,13 +1689,13 @@ def prepare_command(args):
dump_env()
if log.isEnabledFor(DEBUG2):
if log.isEnabledFor(DEBUG3):
- log.debug('QUERY: %s', json.dumps(query_data, indent=2))
+ log.debug("QUERY: %s", json.dumps(query_data, indent=2))
else:
- log_excludes = ('source_path', 'hash_extra_paths', 'paths')
+ log_excludes = ("source_path", "hash_extra_paths", "paths")
qd = {k: v for k, v in query_data.items() if k not in log_excludes}
- log.debug('QUERY (excerpt): %s', json.dumps(qd, indent=2))
+ log.debug("QUERY (excerpt): %s", json.dumps(qd, indent=2))
- query = datatree('prepare_query', **query_data)
+ query = datatree("prepare_query", **query_data)
tf_paths = query.paths
runtime = query.runtime
@@ -1017,29 +1704,34 @@ def prepare_command(args):
hash_extra_paths = query.hash_extra_paths
source_path = query.source_path
hash_extra = query.hash_extra
- recreate_missing_package = yesno_bool(args.recreate_missing_package)
+ recreate_missing_package = yesno_bool(
+ args.recreate_missing_package
+ if args.recreate_missing_package is not None
+ else query.recreate_missing_package
+ )
docker = query.docker
bpm = BuildPlanManager(args, log=log)
build_plan = bpm.plan(source_path, query)
if log.isEnabledFor(DEBUG2):
- log.debug('BUILD_PLAN: %s', json.dumps(build_plan, indent=2))
+ log.debug("BUILD_PLAN: %s", json.dumps(build_plan, indent=2))
# Expand a Terraform path. references
hash_extra_paths = [p.format(path=tf_paths) for p in hash_extra_paths]
content_hash = bpm.hash(hash_extra_paths)
+ content_hash.update(json.dumps(build_plan, sort_keys=True).encode())
content_hash.update(runtime.encode())
content_hash.update(hash_extra.encode())
content_hash = content_hash.hexdigest()
# Generate a unique filename based on the hash.
- filename = os.path.join(artifacts_dir, '{}.zip'.format(content_hash))
+ zip_filename = os.path.join(artifacts_dir, "{}.zip".format(content_hash))
# Compute timestamp trigger
was_missing = False
- filename_path = os.path.join(os.getcwd(), filename)
+ filename_path = os.path.join(os.getcwd(), zip_filename)
if recreate_missing_package:
if os.path.exists(filename_path):
st = os.stat(filename_path)
@@ -1048,53 +1740,58 @@ def prepare_command(args):
timestamp = timestamp_now_ns()
was_missing = True
else:
- timestamp = ""
+ timestamp = ""
# Replace variables in the build command with calculated values.
build_data = {
- 'filename': filename,
- 'runtime': runtime,
- 'artifacts_dir': artifacts_dir,
- 'build_plan': build_plan,
+ "filename": zip_filename,
+ "runtime": runtime,
+ "artifacts_dir": artifacts_dir,
+ "build_plan": build_plan,
+ "quiet": query.quiet,
}
if docker:
- build_data['docker'] = docker
+ build_data["docker"] = docker
build_plan = json.dumps(build_data)
- build_plan_filename = os.path.join(artifacts_dir,
- '{}.plan.json'.format(content_hash))
+ build_plan_filename = os.path.join(
+ artifacts_dir, "{}.plan.json".format(content_hash)
+ )
if not os.path.exists(artifacts_dir):
- os.makedirs(artifacts_dir)
- with open(build_plan_filename, 'w') as f:
+ os.makedirs(artifacts_dir, exist_ok=True)
+ with open(build_plan_filename, "w") as f:
f.write(build_plan)
# Output the result to Terraform.
- json.dump({
- 'filename': filename,
- 'build_plan': build_plan,
- 'build_plan_filename': build_plan_filename,
- 'timestamp': str(timestamp),
- 'was_missing': 'true' if was_missing else 'false',
- }, sys.stdout, indent=2)
- sys.stdout.write('\n')
+ json.dump(
+ {
+ "filename": zip_filename,
+ "build_plan": build_plan,
+ "build_plan_filename": build_plan_filename,
+ "timestamp": str(timestamp),
+ "was_missing": "true" if was_missing else "false",
+ },
+ sys.stdout,
+ indent=2,
+ )
+ sys.stdout.write("\n")
def build_command(args):
"""
Builds a zip file from the source_dir or source_file.
- Installs dependencies with pip automatically.
+ Installs dependencies with pip or npm automatically.
"""
- log = logging.getLogger('build')
+ log = logging.getLogger("build")
dump_env()
if log.isEnabledFor(DEBUG2):
- log.debug('CMD: python3 %s', shlex_join(sys.argv))
+ log.debug("CMD: python3 %s", shlex_join(sys.argv))
with open(args.build_plan_file) as f:
query_data = json.load(f)
- query = datatree('build_query', **query_data)
+ query = datatree("build_query", **query_data)
runtime = query.runtime
filename = query.filename
@@ -1106,20 +1803,21 @@ def build_command(args):
timestamp = int(_timestamp)
if os.path.exists(filename) and not args.force:
- log.info('Reused: %s', shlex.quote(filename))
+ log.info("Reused: %s", shlex.quote(filename))
return
# Zip up the build plan and write it to the target filename.
# This will be used by the Lambda function as the source code package.
- with ZipWriteStream(filename) as zs:
+ with ZipWriteStream(filename, quiet=getattr(query, "quiet", False)) as zs:
bpm = BuildPlanManager(args, log=log)
bpm.execute(build_plan, zs, query)
os.utime(filename, ns=(timestamp, timestamp))
- log.info('Created: %s', shlex.quote(filename))
+ if not getattr(query, "quiet", False):
+ log.info("Created: %s", shlex.quote(filename))
if log.isEnabledFor(logging.DEBUG):
- with open(filename, 'rb') as f:
- log.info('Base64sha256: %s', source_code_hash(f.read()))
+ with open(filename, "rb") as f:
+ log.info("Base64sha256: %s", source_code_hash(f.read()))
def add_hidden_commands(sub_parsers):
@@ -1130,22 +1828,34 @@ def hidden_parser(name, **kwargs):
sp._choices_actions.pop() # XXX: help=argparse.SUPPRESS - doesn't work
return p
- p = hidden_parser('docker', help='Run docker build')
- p.set_defaults(command=lambda args: subprocess.call(docker_run_command(
- args.build_root, args.docker_command, args.runtime, interactive=True)))
- p.add_argument('build_root', help='A docker build root folder')
- p.add_argument('docker_command', help='A docker container command',
- metavar='command', nargs=argparse.REMAINDER)
- p.add_argument('-r', '--runtime', help='A docker image runtime',
- default='python3.8')
-
- p = hidden_parser('docker-image', help='Run docker build')
- p.set_defaults(command=lambda args: subprocess.call(docker_build_command(
- args.build_root, args.docker_file, args.tag)))
- p.add_argument('-t', '--tag', help='A docker image tag')
- p.add_argument('build_root', help='A docker build root folder')
- p.add_argument('docker_file', help='A docker file path',
- nargs=argparse.OPTIONAL)
+ p = hidden_parser("docker", help="Run docker build")
+ p.set_defaults(
+ command=lambda args: subprocess.call(
+ docker_run_command(
+ args.build_root, args.docker_command, args.runtime, interactive=True
+ )
+ )
+ )
+ p.add_argument("build_root", help="A docker build root folder")
+ p.add_argument(
+ "docker_command",
+ help="A docker container command",
+ metavar="command",
+ nargs=argparse.REMAINDER,
+ )
+ p.add_argument(
+ "-r", "--runtime", help="A docker image runtime", default="python3.12"
+ )
+
+ p = hidden_parser("docker-image", help="Run docker build")
+ p.set_defaults(
+ command=lambda args: subprocess.call(
+ docker_build_command(args.build_root, args.docker_file, args.tag)
+ )
+ )
+ p.add_argument("-t", "--tag", help="A docker image tag")
+ p.add_argument("build_root", help="A docker build root folder")
+ p.add_argument("docker_file", help="A docker file path", nargs=argparse.OPTIONAL)
def zip_cmd(args):
if args.verbose:
@@ -1153,27 +1863,33 @@ def zip_cmd(args):
with ZipWriteStream(args.zipfile) as zs:
zs.write_dirs(*args.dir, timestamp=args.timestamp)
if log.isEnabledFor(logging.DEBUG):
- zipinfo = shutil.which('zipinfo')
+ zipinfo = shutil.which("zipinfo")
if zipinfo:
- log.debug('-' * 80)
+ log.debug("-" * 80)
subprocess.call([zipinfo, args.zipfile])
- log.debug('-' * 80)
- log.debug('Source code hash: %s',
- source_code_hash(open(args.zipfile, 'rb').read()))
+ log.debug("-" * 80)
+ log.debug(
+ "Source code hash: %s",
+ source_code_hash(open(args.zipfile, "rb").read()),
+ )
- p = hidden_parser('zip', help='Zip folder with provided files timestamp')
+ p = hidden_parser("zip", help="Zip folder with provided files timestamp")
p.set_defaults(command=zip_cmd)
- p.add_argument('zipfile', help='Path to a zip file')
- p.add_argument('dir', nargs=argparse.ONE_OR_MORE,
- help='Path to a directory for packaging')
- p.add_argument('-t', '--timestamp', type=int,
- help='A timestamp to override for all zip members')
- p.add_argument('-v', '--verbose', action='store_true')
-
- p = hidden_parser('hash', help='Generate content hash for a file')
- p.set_defaults(
- command=lambda args: print(source_code_hash(args.file.read())))
- p.add_argument('file', help='Path to a file', type=argparse.FileType('rb'))
+ p.add_argument("zipfile", help="Path to a zip file")
+ p.add_argument(
+ "dir", nargs=argparse.ONE_OR_MORE, help="Path to a directory for packaging"
+ )
+ p.add_argument(
+ "-t",
+ "--timestamp",
+ type=int,
+ help="A timestamp to override for all zip members",
+ )
+ p.add_argument("-v", "--verbose", action="store_true")
+
+ p = hidden_parser("hash", help="Generate content hash for a file")
+ p.set_defaults(command=lambda args: print(source_code_hash(args.file.read())))
+ p.add_argument("file", help="Path to a file", type=argparse.FileType("rb"))
def args_parser():
@@ -1181,31 +1897,41 @@ def args_parser():
ap.set_defaults(command=lambda _: ap.print_usage())
sp = ap.add_subparsers(metavar="COMMAND")
- p = sp.add_parser('prepare',
- help='compute a filename hash for a zip archive')
+ p = sp.add_parser("prepare", help="compute a filename hash for a zip archive")
p.set_defaults(command=prepare_command)
- p = sp.add_parser('build',
- help='build and pack to a zip archive')
+ p = sp.add_parser("build", help="build and pack to a zip archive")
p.set_defaults(command=build_command)
- p.add_argument('--force', action='store_true',
- help='Force rebuilding even if a zip artifact exists')
- p.add_argument('-t', '--timestamp',
- dest='zip_file_timestamp', required=True,
- help='A zip file timestamp generated by the prepare command')
- p.add_argument('build_plan_file', metavar='PLAN_FILE',
- help='A build plan file provided by the prepare command')
+ p.add_argument(
+ "--force",
+ action="store_true",
+ help="Force rebuilding even if a zip artifact exists",
+ )
+ p.add_argument(
+ "-t",
+ "--timestamp",
+ dest="zip_file_timestamp",
+ required=True,
+ help="A zip file timestamp generated by the prepare command",
+ )
+ p.add_argument(
+ "build_plan_file",
+ metavar="PLAN_FILE",
+ help="A build plan file provided by the prepare command",
+ )
add_hidden_commands(sp)
return ap
def main():
ns = argparse.Namespace(
- pattern_comments=yesno_bool(os.environ.get(
- 'TF_LAMBDA_PACKAGE_PATTERN_COMMENTS', False)),
+ pattern_comments=yesno_bool(
+ os.environ.get("TF_LAMBDA_PACKAGE_PATTERN_COMMENTS", False)
+ ),
recreate_missing_package=os.environ.get(
- 'TF_RECREATE_MISSING_LAMBDA_PACKAGE', True),
- log_level=os.environ.get('TF_LAMBDA_PACKAGE_LOG_LEVEL', 'INFO'),
+ "TF_RECREATE_MISSING_LAMBDA_PACKAGE", None
+ ),
+ log_level=os.environ.get("TF_LAMBDA_PACKAGE_LOG_LEVEL", "INFO"),
)
p = args_parser()
@@ -1224,5 +1950,5 @@ def main():
exit(args.command(args))
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/package.tf b/package.tf
index 07de800c..99078600 100644
--- a/package.tf
+++ b/package.tf
@@ -7,8 +7,7 @@ locals {
data "external" "archive_prepare" {
count = var.create && var.create_package ? 1 : 0
- program = [local.python, "${path.module}/package.py", "prepare"]
- working_dir = path.cwd
+ program = [local.python, "${path.module}/package.py", "prepare"]
query = {
paths = jsonencode({
@@ -18,18 +17,30 @@ data "external" "archive_prepare" {
})
docker = var.build_in_docker ? jsonencode({
- docker_pip_cache = var.docker_pip_cache
- docker_build_root = var.docker_build_root
- docker_file = var.docker_file
- docker_image = var.docker_image
- with_ssh_agent = var.docker_with_ssh_agent
+ docker_pip_cache = var.docker_pip_cache
+ docker_build_root = var.docker_build_root
+ docker_file = var.docker_file
+ docker_image = var.docker_image
+ with_ssh_agent = var.docker_with_ssh_agent
+ docker_additional_options = var.docker_additional_options
+ docker_entrypoint = var.docker_entrypoint
}) : null
- artifacts_dir = var.artifacts_dir
- runtime = var.runtime
- source_path = jsonencode(var.source_path)
- hash_extra = var.hash_extra
- hash_extra_paths = jsonencode(["${path.module}/package.py"])
+ artifacts_dir = var.artifacts_dir
+ runtime = var.runtime
+ source_path = jsonencode(var.source_path)
+ hash_extra = var.hash_extra
+ hash_extra_paths = jsonencode(
+ [
+ # Temporary fix when building from multiple locations
+ # We should take into account content of package.py when counting hash
+ # Related issue: https://github.com/terraform-aws-modules/terraform-aws-lambda/issues/63
+ # "${path.module}/package.py"
+ ]
+ )
+
+ recreate_missing_package = var.recreate_missing_package
+ quiet = var.quiet_archive_local_exec
}
}
@@ -39,7 +50,7 @@ data "external" "archive_prepare" {
resource "local_file" "archive_plan" {
count = var.create && var.create_package ? 1 : 0
- content = data.external.archive_prepare[0].result.build_plan
+ content = var.build_in_docker ? sensitive(data.external.archive_prepare[0].result.build_plan) : data.external.archive_prepare[0].result.build_plan
filename = data.external.archive_prepare[0].result.build_plan_filename
directory_permission = "0755"
file_permission = "0644"
@@ -51,7 +62,7 @@ resource "null_resource" "archive" {
triggers = {
filename = data.external.archive_prepare[0].result.filename
- timestamp = data.external.archive_prepare[0].result.timestamp
+ timestamp = var.trigger_on_package_timestamp ? data.external.archive_prepare[0].result.timestamp : null
}
provisioner "local-exec" {
@@ -59,8 +70,8 @@ resource "null_resource" "archive" {
local.python, "${path.module}/package.py", "build",
"--timestamp", data.external.archive_prepare[0].result.timestamp
]
- command = data.external.archive_prepare[0].result.build_plan_filename
- working_dir = path.cwd
+ command = data.external.archive_prepare[0].result.build_plan_filename
+ quiet = var.quiet_archive_local_exec
}
depends_on = [local_file.archive_plan]
diff --git a/tests/fixtures/node-app/index.js b/tests/fixtures/node-app/index.js
new file mode 100644
index 00000000..09d4352e
--- /dev/null
+++ b/tests/fixtures/node-app/index.js
@@ -0,0 +1 @@
+// test
diff --git a/tests/fixtures/node-app/package.json b/tests/fixtures/node-app/package.json
new file mode 100644
index 00000000..1bd4d69d
--- /dev/null
+++ b/tests/fixtures/node-app/package.json
@@ -0,0 +1,16 @@
+{
+ "name": "app",
+ "version": "1.0.0",
+ "description": "",
+ "main": "index.js",
+ "scripts": {
+ "test": "echo \"Error: no test specified\" && exit 1"
+ },
+ "author": "",
+ "license": "ISC",
+ "dependencies": {
+ },
+ "devDependencies": {
+ "axios": "^1.7.3"
+ }
+}
diff --git a/tests/fixtures/pyproject-unknown.toml b/tests/fixtures/pyproject-unknown.toml
new file mode 100644
index 00000000..4f0e31e0
--- /dev/null
+++ b/tests/fixtures/pyproject-unknown.toml
@@ -0,0 +1,2 @@
+[build-system]
+build-backend = "dummy"
diff --git a/tests/test_package_toml.py b/tests/test_package_toml.py
new file mode 100644
index 00000000..9eba3f4a
--- /dev/null
+++ b/tests/test_package_toml.py
@@ -0,0 +1,41 @@
+from package import get_build_system_from_pyproject_toml, BuildPlanManager
+from pytest import raises
+from unittest.mock import Mock
+
+
+def test_get_build_system_from_pyproject_toml_inexistent():
+ assert (
+ get_build_system_from_pyproject_toml("fixtures/inexistent/pyproject.toml")
+ is None
+ )
+
+
+def test_get_build_system_from_pyproject_toml_unknown():
+ assert (
+ get_build_system_from_pyproject_toml("fixtures/pyproject-unknown.toml") is None
+ )
+
+
+def test_build_manager_sucess_command():
+ bpm = BuildPlanManager(args=Mock())
+ # Should not have exception raised
+ bpm.execute(build_plan=[["sh", "/tmp", "pwd"]], zip_stream=None, query=None)
+
+
+def test_build_manager_failing_command():
+ bpm = BuildPlanManager(args=Mock())
+ with raises(Exception):
+ bpm.execute(
+ build_plan=[[["sh", "/tmp", "NOTACOMMAND"]]],
+ zip_stream=None,
+ query=None,
+ )
+
+
+def test_get_build_system_from_pyproject_toml_poetry():
+ assert (
+ get_build_system_from_pyproject_toml(
+ "examples/fixtures/python-app-poetry/pyproject.toml"
+ )
+ == "poetry"
+ )
diff --git a/tests/test_zip_source.py b/tests/test_zip_source.py
new file mode 100644
index 00000000..dd6750ca
--- /dev/null
+++ b/tests/test_zip_source.py
@@ -0,0 +1,50 @@
+import os
+from unittest.mock import MagicMock, Mock
+
+from package import BuildPlanManager
+
+
+def test_zip_source_path_sh_work_dir():
+ zs = Mock()
+ zs.write_dirs = MagicMock()
+
+ bpm = BuildPlanManager(args=Mock())
+
+ bpm.execute(
+ build_plan=[
+ [
+ ["sh", "cd $(mktemp -d)\n echo pip install"],
+ ["zip:embedded", ".", "./python"],
+ ]
+ ],
+ zip_stream=zs,
+ query=None,
+ )
+
+ zs.write_dirs.assert_called_once()
+
+ zip_source_path = zs.write_dirs.call_args_list[0][0][0]
+ assert zip_source_path != f"{os.getcwd()}"
+
+
+def test_zip_source_path():
+ zs = Mock()
+ zs.write_dirs = MagicMock()
+
+ bpm = BuildPlanManager(args=Mock())
+
+ bpm.execute(
+ build_plan=[
+ [
+ ["sh", "echo pip install"],
+ ["zip:embedded", ".", "./python"],
+ ]
+ ],
+ zip_stream=zs,
+ query=None,
+ )
+
+ zs.write_dirs.assert_called_once()
+
+ zip_source_path = zs.write_dirs.call_args_list[0][0][0]
+ assert zip_source_path == f"{os.getcwd()}"
diff --git a/tox.ini b/tox.ini
new file mode 100644
index 00000000..f0297d3f
--- /dev/null
+++ b/tox.ini
@@ -0,0 +1,8 @@
+[tox]
+skipsdist=True
+
+[testenv]
+deps =
+ pytest==7.1.3
+commands =
+ python -m pytest {posargs} tests/
diff --git a/variables.tf b/variables.tf
index 852b89ea..6ea454f6 100644
--- a/variables.tf
+++ b/variables.tf
@@ -28,6 +28,30 @@ variable "create_role" {
default = true
}
+variable "create_lambda_function_url" {
+ description = "Controls whether the Lambda Function URL resource should be created"
+ type = bool
+ default = false
+}
+
+variable "create_sam_metadata" {
+ description = "Controls whether the SAM metadata null resource should be created"
+ type = bool
+ default = false
+}
+
+variable "putin_khuylo" {
+ description = "Do you agree that Putin doesn't respect Ukrainian sovereignty and territorial integrity? More info: https://en.wikipedia.org/wiki/Putin_khuylo!"
+ type = bool
+ default = true
+}
+
+variable "region" {
+ description = "Region where the resource(s) will be managed. Defaults to the region set in the provider configuration"
+ type = string
+ default = null
+}
+
###########
# Function
###########
@@ -38,6 +62,12 @@ variable "lambda_at_edge" {
default = false
}
+variable "lambda_at_edge_logs_all_regions" {
+ description = "Whether to specify a wildcard in IAM policy used by Lambda@Edge to allow logging in all regions"
+ type = bool
+ default = true
+}
+
variable "function_name" {
description = "A unique name for your Lambda Function"
type = string
@@ -54,15 +84,10 @@ variable "runtime" {
description = "Lambda Function runtime"
type = string
default = ""
-
- // validation {
- // condition = can(var.create && contains(["nodejs10.x", "nodejs12.x", "java8", "java11", "python2.7", " python3.6", "python3.7", "python3.8", "dotnetcore2.1", "dotnetcore3.1", "go1.x", "ruby2.5", "ruby2.7", "provided"], var.runtime))
- // error_message = "The runtime value must be one of supported by AWS Lambda."
- // }
}
variable "lambda_role" {
- description = " IAM role attached to the Lambda Function. This governs both who / what can invoke your Lambda Function, as well as what resources our Lambda Function has access to. See Lambda Permission Model for more details."
+ description = " IAM role ARN attached to the Lambda Function. This governs both who / what can invoke your Lambda Function, as well as what resources our Lambda Function has access to. See Lambda Permission Model for more details."
type = string
default = ""
}
@@ -73,12 +98,24 @@ variable "description" {
default = ""
}
+variable "code_signing_config_arn" {
+ description = "Amazon Resource Name (ARN) for a Code Signing Configuration"
+ type = string
+ default = null
+}
+
variable "layers" {
description = "List of Lambda Layer Version ARNs (maximum of 5) to attach to your Lambda Function."
type = list(string)
default = null
}
+variable "architectures" {
+ description = "Instruction set architecture for your Lambda function. Valid values are [\"x86_64\"] and [\"arm64\"]."
+ type = list(string)
+ default = null
+}
+
variable "kms_key_arn" {
description = "The ARN of KMS key to use by your Lambda Function"
type = string
@@ -86,11 +123,17 @@ variable "kms_key_arn" {
}
variable "memory_size" {
- description = "Amount of memory in MB your Lambda Function can use at runtime. Valid value between 128 MB to 3008 MB, in 64 MB increments."
+ description = "Amount of memory in MB your Lambda Function can use at runtime. Valid value between 128 MB to 10,240 MB (10 GB), in 64 MB increments."
type = number
default = 128
}
+variable "ephemeral_storage_size" {
+ description = "Amount of ephemeral storage (/tmp) in MB your Lambda Function can use at runtime. Valid value between 512 MB to 10,240 MB (10 GB)."
+ type = number
+ default = 512
+}
+
variable "publish" {
description = "Whether to publish creation/change as new Lambda Function Version."
type = bool
@@ -139,18 +182,136 @@ variable "vpc_security_group_ids" {
default = null
}
+variable "ipv6_allowed_for_dual_stack" {
+ description = "Allows outbound IPv6 traffic on VPC functions that are connected to dual-stack subnets"
+ type = bool
+ default = null
+}
+
variable "tags" {
description = "A map of tags to assign to resources."
type = map(string)
default = {}
}
+variable "include_default_tag" {
+ description = "Set to false to not include the default tag in the tags map."
+ type = bool
+ default = true
+}
+
+variable "function_tags" {
+ description = "A map of tags to assign only to the lambda function"
+ type = map(string)
+ default = {}
+}
+
variable "s3_object_tags" {
description = "A map of tags to assign to S3 bucket object."
type = map(string)
default = {}
}
+variable "s3_object_tags_only" {
+ description = "Set to true to not merge tags with s3_object_tags. Useful to avoid breaching S3 Object 10 tag limit."
+ type = bool
+ default = false
+}
+
+variable "package_type" {
+ description = "The Lambda deployment package type. Valid options: Zip or Image"
+ type = string
+ default = "Zip"
+}
+
+variable "image_uri" {
+ description = "The ECR image URI containing the function's deployment package."
+ type = string
+ default = null
+}
+
+variable "image_config_entry_point" {
+ description = "The ENTRYPOINT for the docker image"
+ type = list(string)
+ default = []
+
+}
+variable "image_config_command" {
+ description = "The CMD for the docker image"
+ type = list(string)
+ default = []
+}
+
+variable "image_config_working_directory" {
+ description = "The working directory for the docker image"
+ type = string
+ default = null
+}
+
+variable "snap_start" {
+ description = "(Optional) Snap start settings for low-latency startups"
+ type = bool
+ default = false
+}
+
+variable "replace_security_groups_on_destroy" {
+ description = "(Optional) When true, all security groups defined in vpc_security_group_ids will be replaced with the default security group after the function is destroyed. Set the replacement_security_group_ids variable to use a custom list of security groups for replacement instead."
+ type = bool
+ default = null
+}
+
+variable "replacement_security_group_ids" {
+ description = "(Optional) List of security group IDs to assign to orphaned Lambda function network interfaces upon destruction. replace_security_groups_on_destroy must be set to true to use this attribute."
+ type = list(string)
+ default = null
+}
+
+variable "timeouts" {
+ description = "Define maximum timeout for creating, updating, and deleting Lambda Function resources"
+ type = map(string)
+ default = {}
+}
+
+variable "skip_destroy" {
+ description = "Set to true if you do not wish the function to be deleted at destroy time, and instead just remove the function from the Terraform state. Useful for Lambda@Edge functions attached to CloudFront distributions."
+ type = bool
+ default = null
+}
+
+###############
+# Function URL
+###############
+
+variable "create_unqualified_alias_lambda_function_url" {
+ description = "Whether to use unqualified alias pointing to $LATEST version in Lambda Function URL"
+ type = bool
+ default = true
+}
+
+variable "authorization_type" {
+ description = "The type of authentication that the Lambda Function URL uses. Set to 'AWS_IAM' to restrict access to authenticated IAM users only. Set to 'NONE' to bypass IAM authentication and create a public endpoint."
+ type = string
+ default = "NONE"
+}
+
+variable "cors" {
+ description = "CORS settings to be used by the Lambda Function URL"
+ type = any
+ default = {}
+}
+
+variable "invoke_mode" {
+ description = "Invoke mode of the Lambda Function URL. Valid values are BUFFERED (default) and RESPONSE_STREAM."
+ type = string
+ default = null
+}
+
+variable "s3_object_override_default_tags" {
+ description = "Whether to override the default_tags from provider? NB: S3 objects support a maximum of 10 tags."
+ type = bool
+ default = false
+}
+
########
# Layer
########
@@ -161,6 +322,12 @@ variable "layer_name" {
default = ""
}
+variable "layer_skip_destroy" {
+ description = "Whether to retain the old version of a previously deployed Lambda Layer."
+ type = bool
+ default = false
+}
+
variable "license_info" {
description = "License info for your Lambda Layer. Eg, MIT or full url of a license."
type = string
@@ -173,6 +340,12 @@ variable "compatible_runtimes" {
default = []
}
+variable "compatible_architectures" {
+ description = "A list of Architectures Lambda layer is compatible with. Currently x86_64 and arm64 can be specified."
+ type = list(string)
+ default = null
+}
+
############################
# Lambda Async Event Config
############################
@@ -224,7 +397,7 @@ variable "destination_on_success" {
##########################
variable "provisioned_concurrent_executions" {
- description = "Amount of capacity to allocate. Must be greater than or equal to 1."
+ description = "Amount of capacity to allocate. Set to 1 or greater to enable, or set to 0 to disable provisioned concurrency."
type = number
default = -1
}
@@ -251,6 +424,16 @@ variable "allowed_triggers" {
default = {}
}
+############################################
+# Lambda Event Source Mapping
+############################################
+
+variable "event_source_mapping" {
+ description = "Map of event source mapping"
+ type = any
+ default = {}
+}
+
#################
# CloudWatch Logs
#################
@@ -273,6 +456,18 @@ variable "cloudwatch_logs_kms_key_id" {
default = null
}
+variable "cloudwatch_logs_skip_destroy" {
+ description = "Whether to keep the log group (and any logs it may contain) at destroy time."
+ type = bool
+ default = false
+}
+
+variable "cloudwatch_logs_log_group_class" {
+ description = "Specified the log class of the log group. Possible values are: `STANDARD` or `INFREQUENT_ACCESS`"
+ type = string
+ default = null
+}
+
variable "cloudwatch_logs_tags" {
description = "A map of tags to assign to the resource."
type = map(string)
@@ -319,16 +514,34 @@ variable "role_tags" {
default = {}
}
+variable "role_maximum_session_duration" {
+ description = "Maximum session duration, in seconds, for the IAM role"
+ type = number
+ default = 3600
+}
+
###########
# Policies
###########
+variable "policy_name" {
+ description = "IAM policy name. It override the default value, which is the same as role_name"
+ type = string
+ default = null
+}
+
variable "attach_cloudwatch_logs_policy" {
description = "Controls whether CloudWatch Logs policy should be added to IAM role for Lambda Function"
type = bool
default = true
}
+variable "attach_create_log_group_permission" {
+ description = "Controls whether to add the create log group permission to the CloudWatch logs policy"
+ type = bool
+ default = true
+}
+
variable "attach_dead_letter_policy" {
description = "Controls whether SNS/SQS dead letter notification policy should be added to IAM role for Lambda Function"
type = bool
@@ -359,6 +572,12 @@ variable "attach_policy_json" {
default = false
}
+variable "attach_policy_jsons" {
+ description = "Controls whether policy_jsons should be added to IAM role for Lambda Function"
+ type = bool
+ default = false
+}
+
variable "attach_policy" {
description = "Controls whether policy should be added to IAM role for Lambda Function"
type = bool
@@ -371,6 +590,12 @@ variable "attach_policies" {
default = false
}
+variable "number_of_policy_jsons" {
+ description = "Number of policies JSON to attach to IAM role for Lambda Function"
+ type = number
+ default = 0
+}
+
variable "number_of_policies" {
description = "Number of policies to attach to IAM role for Lambda Function"
type = number
@@ -384,17 +609,29 @@ variable "attach_policy_statements" {
}
variable "trusted_entities" {
- description = "Lambda Function additional trusted entities for assuming roles (trust relationship)"
- type = list(string)
+ description = "List of additional trusted entities for assuming Lambda Function role (trust relationship)"
+ type = any
default = []
}
+variable "assume_role_policy_statements" {
+ description = "Map of dynamic policy statements for assuming Lambda Function role (trust relationship)"
+ type = any
+ default = {}
+}
+
variable "policy_json" {
description = "An additional policy document as JSON to attach to the Lambda Function role"
type = string
default = null
}
+variable "policy_jsons" {
+ description = "List of additional policy documents as JSON to attach to Lambda Function role"
+ type = list(string)
+ default = []
+}
+
variable "policy" {
description = "An additional policy document ARN to attach to the Lambda Function role"
type = string
@@ -413,6 +650,18 @@ variable "policy_statements" {
default = {}
}
+variable "file_system_arn" {
+ description = "The Amazon Resource Name (ARN) of the Amazon EFS Access Point that provides access to the file system."
+ type = string
+ default = null
+}
+
+variable "file_system_local_mount_path" {
+ description = "The path where the function can access the file system, starting with /mnt/."
+ type = string
+ default = null
+}
+
##########################
# Build artifact settings
##########################
@@ -423,6 +672,18 @@ variable "artifacts_dir" {
default = "builds"
}
+variable "s3_prefix" {
+ description = "Directory name where artifacts should be stored in the S3 bucket. If unset, the path from `artifacts_dir` is used"
+ type = string
+ default = null
+}
+
+variable "ignore_source_code_hash" {
+ description = "Whether to ignore changes to the function's source code hash. Set to true if you manage infrastructure and code deployments separately."
+ type = bool
+ default = false
+}
+
variable "local_existing_package" {
description = "The absolute path to an existing zip-file to use"
type = string
@@ -453,6 +714,24 @@ variable "s3_bucket" {
default = null
}
+variable "s3_acl" {
+ description = "The canned ACL to apply. Valid values are private, public-read, public-read-write, aws-exec-read, authenticated-read, bucket-owner-read, and bucket-owner-full-control. Defaults to private."
+ type = string
+ default = "private"
+}
+
+variable "s3_server_side_encryption" {
+ description = "Specifies server-side encryption of the object in S3. Valid values are \"AES256\" and \"aws:kms\"."
+ type = string
+ default = null
+}
+
+variable "s3_kms_key_id" {
+ description = "Specifies a custom KMS key to use for S3 object encryption."
+ type = string
+ default = null
+}
+
variable "source_path" {
description = "The absolute path to a local file or directory containing your Lambda source code"
type = any # string | list(string | map(any))
@@ -500,3 +779,71 @@ variable "docker_pip_cache" {
type = any
default = null
}
+
+variable "docker_additional_options" {
+ description = "Additional options to pass to the docker run command (e.g. to set environment variables, volumes, etc.)"
+ type = list(string)
+ default = []
+}
+
+variable "docker_entrypoint" {
+ description = "Path to the Docker entrypoint to use"
+ type = string
+ default = null
+}
+
+variable "recreate_missing_package" {
+ description = "Whether to recreate missing Lambda package if it is missing locally or not"
+ type = bool
+ default = true
+}
+
+variable "trigger_on_package_timestamp" {
+ description = "Whether to recreate the Lambda package if the timestamp changes"
+ type = bool
+ default = true
+}
+
+variable "quiet_archive_local_exec" {
+ description = "Whether to disable archive local execution output"
+ type = bool
+ default = true
+}
+
+############################################
+# Lambda Advanced Logging Settings
+############################################
+
+variable "logging_log_format" {
+ description = "The log format of the Lambda Function. Valid values are \"JSON\" or \"Text\"."
+ type = string
+ default = "Text"
+}
+
+variable "logging_application_log_level" {
+ description = "The application log level of the Lambda Function. Valid values are \"TRACE\", \"DEBUG\", \"INFO\", \"WARN\", \"ERROR\", or \"FATAL\"."
+ type = string
+ default = "INFO"
+}
+
+variable "logging_system_log_level" {
+ description = "The system log level of the Lambda Function. Valid values are \"DEBUG\", \"INFO\", or \"WARN\"."
+ type = string
+ default = "INFO"
+}
+
+variable "logging_log_group" {
+ description = "The CloudWatch log group to send logs to."
+ type = string
+ default = null
+}
+
+############################################
+# Lambda Recursive Loop Settings
+############################################
+
+variable "recursive_loop" {
+ description = "Lambda function recursion configuration. Valid values are Allow or Terminate."
+ type = string
+ default = null
+}
diff --git a/versions.tf b/versions.tf
index c1b26983..8dea461c 100644
--- a/versions.tf
+++ b/versions.tf
@@ -1,7 +1,22 @@
terraform {
- required_version = "~> 0.12.6"
+ required_version = ">= 1.5.7"
required_providers {
- aws = "~> 2.46"
+ aws = {
+ source = "hashicorp/aws"
+ version = ">= 6.0"
+ }
+ external = {
+ source = "hashicorp/external"
+ version = ">= 1.0"
+ }
+ local = {
+ source = "hashicorp/local"
+ version = ">= 1.0"
+ }
+ null = {
+ source = "hashicorp/null"
+ version = ">= 2.0"
+ }
}
}
diff --git a/wrappers/README.md b/wrappers/README.md
new file mode 100644
index 00000000..954ea7d1
--- /dev/null
+++ b/wrappers/README.md
@@ -0,0 +1,100 @@
+# Wrapper for the root module
+
+The configuration in this directory contains an implementation of a single module wrapper pattern, which allows managing several copies of a module in places where using the native Terraform 0.13+ `for_each` feature is not feasible (e.g., with Terragrunt).
+
+You may want to use a single Terragrunt configuration file to manage multiple resources without duplicating `terragrunt.hcl` files for each copy of the same module.
+
+This wrapper does not implement any extra functionality.
+
+## Usage with Terragrunt
+
+`terragrunt.hcl`:
+
+```hcl
+terraform {
+ source = "tfr:///terraform-aws-modules/lambda/aws//wrappers"
+ # Alternative source:
+ # source = "git::git@github.com:terraform-aws-modules/terraform-aws-lambda.git//wrappers?ref=master"
+}
+
+inputs = {
+ defaults = { # Default values
+ create = true
+ tags = {
+ Terraform = "true"
+ Environment = "dev"
+ }
+ }
+
+ items = {
+ my-item = {
+ # omitted... can be any argument supported by the module
+ }
+ my-second-item = {
+ # omitted... can be any argument supported by the module
+ }
+ # omitted...
+ }
+}
+```
+
+## Usage with Terraform
+
+```hcl
+module "wrapper" {
+ source = "terraform-aws-modules/lambda/aws//wrappers"
+
+ defaults = { # Default values
+ create = true
+ tags = {
+ Terraform = "true"
+ Environment = "dev"
+ }
+ }
+
+ items = {
+ my-item = {
+ # omitted... can be any argument supported by the module
+ }
+ my-second-item = {
+ # omitted... can be any argument supported by the module
+ }
+ # omitted...
+ }
+}
+```
+
+## Example: Manage multiple S3 buckets in one Terragrunt layer
+
+`eu-west-1/s3-buckets/terragrunt.hcl`:
+
+```hcl
+terraform {
+ source = "tfr:///terraform-aws-modules/s3-bucket/aws//wrappers"
+ # Alternative source:
+ # source = "git::git@github.com:terraform-aws-modules/terraform-aws-s3-bucket.git//wrappers?ref=master"
+}
+
+inputs = {
+ defaults = {
+ force_destroy = true
+
+ attach_elb_log_delivery_policy = true
+ attach_lb_log_delivery_policy = true
+ attach_deny_insecure_transport_policy = true
+ attach_require_latest_tls_policy = true
+ }
+
+ items = {
+ bucket1 = {
+ bucket = "my-random-bucket-1"
+ }
+ bucket2 = {
+ bucket = "my-random-bucket-2"
+ tags = {
+ Secure = "probably"
+ }
+ }
+ }
+}
+```
diff --git a/wrappers/alias/README.md b/wrappers/alias/README.md
new file mode 100644
index 00000000..a296ced7
--- /dev/null
+++ b/wrappers/alias/README.md
@@ -0,0 +1,100 @@
+# Wrapper for module: `modules/alias`
+
+The configuration in this directory contains an implementation of a single module wrapper pattern, which allows managing several copies of a module in places where using the native Terraform 0.13+ `for_each` feature is not feasible (e.g., with Terragrunt).
+
+You may want to use a single Terragrunt configuration file to manage multiple resources without duplicating `terragrunt.hcl` files for each copy of the same module.
+
+This wrapper does not implement any extra functionality.
+
+## Usage with Terragrunt
+
+`terragrunt.hcl`:
+
+```hcl
+terraform {
+ source = "tfr:///terraform-aws-modules/lambda/aws//wrappers/alias"
+ # Alternative source:
+ # source = "git::git@github.com:terraform-aws-modules/terraform-aws-lambda.git//wrappers/alias?ref=master"
+}
+
+inputs = {
+ defaults = { # Default values
+ create = true
+ tags = {
+ Terraform = "true"
+ Environment = "dev"
+ }
+ }
+
+ items = {
+ my-item = {
+ # omitted... can be any argument supported by the module
+ }
+ my-second-item = {
+ # omitted... can be any argument supported by the module
+ }
+ # omitted...
+ }
+}
+```
+
+## Usage with Terraform
+
+```hcl
+module "wrapper" {
+ source = "terraform-aws-modules/lambda/aws//wrappers/alias"
+
+ defaults = { # Default values
+ create = true
+ tags = {
+ Terraform = "true"
+ Environment = "dev"
+ }
+ }
+
+ items = {
+ my-item = {
+ # omitted... can be any argument supported by the module
+ }
+ my-second-item = {
+ # omitted... can be any argument supported by the module
+ }
+ # omitted...
+ }
+}
+```
+
+## Example: Manage multiple S3 buckets in one Terragrunt layer
+
+`eu-west-1/s3-buckets/terragrunt.hcl`:
+
+```hcl
+terraform {
+ source = "tfr:///terraform-aws-modules/s3-bucket/aws//wrappers"
+ # Alternative source:
+ # source = "git::git@github.com:terraform-aws-modules/terraform-aws-s3-bucket.git//wrappers?ref=master"
+}
+
+inputs = {
+ defaults = {
+ force_destroy = true
+
+ attach_elb_log_delivery_policy = true
+ attach_lb_log_delivery_policy = true
+ attach_deny_insecure_transport_policy = true
+ attach_require_latest_tls_policy = true
+ }
+
+ items = {
+ bucket1 = {
+ bucket = "my-random-bucket-1"
+ }
+ bucket2 = {
+ bucket = "my-random-bucket-2"
+ tags = {
+ Secure = "probably"
+ }
+ }
+ }
+}
+```
diff --git a/wrappers/alias/main.tf b/wrappers/alias/main.tf
new file mode 100644
index 00000000..7729dd06
--- /dev/null
+++ b/wrappers/alias/main.tf
@@ -0,0 +1,25 @@
+module "wrapper" {
+ source = "../../modules/alias"
+
+ for_each = var.items
+
+ allowed_triggers = try(each.value.allowed_triggers, var.defaults.allowed_triggers, {})
+ create = try(each.value.create, var.defaults.create, true)
+ create_async_event_config = try(each.value.create_async_event_config, var.defaults.create_async_event_config, false)
+ create_qualified_alias_allowed_triggers = try(each.value.create_qualified_alias_allowed_triggers, var.defaults.create_qualified_alias_allowed_triggers, true)
+ create_qualified_alias_async_event_config = try(each.value.create_qualified_alias_async_event_config, var.defaults.create_qualified_alias_async_event_config, true)
+ create_version_allowed_triggers = try(each.value.create_version_allowed_triggers, var.defaults.create_version_allowed_triggers, true)
+ create_version_async_event_config = try(each.value.create_version_async_event_config, var.defaults.create_version_async_event_config, true)
+ description = try(each.value.description, var.defaults.description, "")
+ destination_on_failure = try(each.value.destination_on_failure, var.defaults.destination_on_failure, null)
+ destination_on_success = try(each.value.destination_on_success, var.defaults.destination_on_success, null)
+ event_source_mapping = try(each.value.event_source_mapping, var.defaults.event_source_mapping, {})
+ function_name = try(each.value.function_name, var.defaults.function_name, "")
+ function_version = try(each.value.function_version, var.defaults.function_version, "")
+ maximum_event_age_in_seconds = try(each.value.maximum_event_age_in_seconds, var.defaults.maximum_event_age_in_seconds, null)
+ maximum_retry_attempts = try(each.value.maximum_retry_attempts, var.defaults.maximum_retry_attempts, null)
+ name = try(each.value.name, var.defaults.name, "")
+ refresh_alias = try(each.value.refresh_alias, var.defaults.refresh_alias, true)
+ routing_additional_version_weights = try(each.value.routing_additional_version_weights, var.defaults.routing_additional_version_weights, {})
+ use_existing_alias = try(each.value.use_existing_alias, var.defaults.use_existing_alias, false)
+}
diff --git a/wrappers/alias/outputs.tf b/wrappers/alias/outputs.tf
new file mode 100644
index 00000000..ec6da5f4
--- /dev/null
+++ b/wrappers/alias/outputs.tf
@@ -0,0 +1,5 @@
+output "wrapper" {
+ description = "Map of outputs of a wrapper."
+ value = module.wrapper
+ # sensitive = false # No sensitive module output found
+}
diff --git a/wrappers/alias/variables.tf b/wrappers/alias/variables.tf
new file mode 100644
index 00000000..a6ea0962
--- /dev/null
+++ b/wrappers/alias/variables.tf
@@ -0,0 +1,11 @@
+variable "defaults" {
+ description = "Map of default values which will be used for each item."
+ type = any
+ default = {}
+}
+
+variable "items" {
+ description = "Maps of items to create a wrapper from. Values are passed through to the module."
+ type = any
+ default = {}
+}
diff --git a/wrappers/alias/versions.tf b/wrappers/alias/versions.tf
new file mode 100644
index 00000000..db13b0a8
--- /dev/null
+++ b/wrappers/alias/versions.tf
@@ -0,0 +1,10 @@
+terraform {
+ required_version = ">= 1.5.7"
+
+ required_providers {
+ aws = {
+ source = "hashicorp/aws"
+ version = ">= 6.0"
+ }
+ }
+}
diff --git a/wrappers/deploy/README.md b/wrappers/deploy/README.md
new file mode 100644
index 00000000..5d24d8b2
--- /dev/null
+++ b/wrappers/deploy/README.md
@@ -0,0 +1,100 @@
+# Wrapper for module: `modules/deploy`
+
+The configuration in this directory contains an implementation of a single module wrapper pattern, which allows managing several copies of a module in places where using the native Terraform 0.13+ `for_each` feature is not feasible (e.g., with Terragrunt).
+
+You may want to use a single Terragrunt configuration file to manage multiple resources without duplicating `terragrunt.hcl` files for each copy of the same module.
+
+This wrapper does not implement any extra functionality.
+
+## Usage with Terragrunt
+
+`terragrunt.hcl`:
+
+```hcl
+terraform {
+ source = "tfr:///terraform-aws-modules/lambda/aws//wrappers/deploy"
+ # Alternative source:
+ # source = "git::git@github.com:terraform-aws-modules/terraform-aws-lambda.git//wrappers/deploy?ref=master"
+}
+
+inputs = {
+ defaults = { # Default values
+ create = true
+ tags = {
+ Terraform = "true"
+ Environment = "dev"
+ }
+ }
+
+ items = {
+ my-item = {
+ # omitted... can be any argument supported by the module
+ }
+ my-second-item = {
+ # omitted... can be any argument supported by the module
+ }
+ # omitted...
+ }
+}
+```
+
+## Usage with Terraform
+
+```hcl
+module "wrapper" {
+ source = "terraform-aws-modules/lambda/aws//wrappers/deploy"
+
+ defaults = { # Default values
+ create = true
+ tags = {
+ Terraform = "true"
+ Environment = "dev"
+ }
+ }
+
+ items = {
+ my-item = {
+ # omitted... can be any argument supported by the module
+ }
+ my-second-item = {
+ # omitted... can be any argument supported by the module
+ }
+ # omitted...
+ }
+}
+```
+
+## Example: Manage multiple S3 buckets in one Terragrunt layer
+
+`eu-west-1/s3-buckets/terragrunt.hcl`:
+
+```hcl
+terraform {
+ source = "tfr:///terraform-aws-modules/s3-bucket/aws//wrappers"
+ # Alternative source:
+ # source = "git::git@github.com:terraform-aws-modules/terraform-aws-s3-bucket.git//wrappers?ref=master"
+}
+
+inputs = {
+ defaults = {
+ force_destroy = true
+
+ attach_elb_log_delivery_policy = true
+ attach_lb_log_delivery_policy = true
+ attach_deny_insecure_transport_policy = true
+ attach_require_latest_tls_policy = true
+ }
+
+ items = {
+ bucket1 = {
+ bucket = "my-random-bucket-1"
+ }
+ bucket2 = {
+ bucket = "my-random-bucket-2"
+ tags = {
+ Secure = "probably"
+ }
+ }
+ }
+}
+```
diff --git a/wrappers/deploy/main.tf b/wrappers/deploy/main.tf
new file mode 100644
index 00000000..47cc3d8b
--- /dev/null
+++ b/wrappers/deploy/main.tf
@@ -0,0 +1,41 @@
+module "wrapper" {
+ source = "../../modules/deploy"
+
+ for_each = var.items
+
+ after_allow_traffic_hook_arn = try(each.value.after_allow_traffic_hook_arn, var.defaults.after_allow_traffic_hook_arn, "")
+ alarm_enabled = try(each.value.alarm_enabled, var.defaults.alarm_enabled, false)
+ alarm_ignore_poll_alarm_failure = try(each.value.alarm_ignore_poll_alarm_failure, var.defaults.alarm_ignore_poll_alarm_failure, false)
+ alarms = try(each.value.alarms, var.defaults.alarms, [])
+ alias_name = try(each.value.alias_name, var.defaults.alias_name, "")
+ app_name = try(each.value.app_name, var.defaults.app_name, "")
+ attach_hooks_policy = try(each.value.attach_hooks_policy, var.defaults.attach_hooks_policy, true)
+ attach_triggers_policy = try(each.value.attach_triggers_policy, var.defaults.attach_triggers_policy, false)
+ auto_rollback_enabled = try(each.value.auto_rollback_enabled, var.defaults.auto_rollback_enabled, true)
+ auto_rollback_events = try(each.value.auto_rollback_events, var.defaults.auto_rollback_events, ["DEPLOYMENT_STOP_ON_ALARM"])
+ aws_cli_command = try(each.value.aws_cli_command, var.defaults.aws_cli_command, "aws")
+ before_allow_traffic_hook_arn = try(each.value.before_allow_traffic_hook_arn, var.defaults.before_allow_traffic_hook_arn, "")
+ codedeploy_principals = try(each.value.codedeploy_principals, var.defaults.codedeploy_principals, ["codedeploy.amazonaws.com"])
+ codedeploy_role_name = try(each.value.codedeploy_role_name, var.defaults.codedeploy_role_name, "")
+ create = try(each.value.create, var.defaults.create, true)
+ create_app = try(each.value.create_app, var.defaults.create_app, false)
+ create_codedeploy_role = try(each.value.create_codedeploy_role, var.defaults.create_codedeploy_role, true)
+ create_deployment = try(each.value.create_deployment, var.defaults.create_deployment, false)
+ create_deployment_group = try(each.value.create_deployment_group, var.defaults.create_deployment_group, false)
+ current_version = try(each.value.current_version, var.defaults.current_version, "")
+ deployment_config_name = try(each.value.deployment_config_name, var.defaults.deployment_config_name, "CodeDeployDefault.LambdaAllAtOnce")
+ deployment_group_name = try(each.value.deployment_group_name, var.defaults.deployment_group_name, "")
+ description = try(each.value.description, var.defaults.description, "")
+ force_deploy = try(each.value.force_deploy, var.defaults.force_deploy, false)
+ function_name = try(each.value.function_name, var.defaults.function_name, "")
+ get_deployment_sleep_timer = try(each.value.get_deployment_sleep_timer, var.defaults.get_deployment_sleep_timer, 5)
+ interpreter = try(each.value.interpreter, var.defaults.interpreter, ["/bin/bash", "-c"])
+ run_deployment = try(each.value.run_deployment, var.defaults.run_deployment, false)
+ save_deploy_script = try(each.value.save_deploy_script, var.defaults.save_deploy_script, false)
+ tags = try(each.value.tags, var.defaults.tags, {})
+ target_version = try(each.value.target_version, var.defaults.target_version, "")
+ triggers = try(each.value.triggers, var.defaults.triggers, {})
+ use_existing_app = try(each.value.use_existing_app, var.defaults.use_existing_app, false)
+ use_existing_deployment_group = try(each.value.use_existing_deployment_group, var.defaults.use_existing_deployment_group, false)
+ wait_deployment_completion = try(each.value.wait_deployment_completion, var.defaults.wait_deployment_completion, false)
+}
diff --git a/wrappers/deploy/outputs.tf b/wrappers/deploy/outputs.tf
new file mode 100644
index 00000000..ec6da5f4
--- /dev/null
+++ b/wrappers/deploy/outputs.tf
@@ -0,0 +1,5 @@
+output "wrapper" {
+ description = "Map of outputs of a wrapper."
+ value = module.wrapper
+ # sensitive = false # No sensitive module output found
+}
diff --git a/wrappers/deploy/variables.tf b/wrappers/deploy/variables.tf
new file mode 100644
index 00000000..a6ea0962
--- /dev/null
+++ b/wrappers/deploy/variables.tf
@@ -0,0 +1,11 @@
+variable "defaults" {
+ description = "Map of default values which will be used for each item."
+ type = any
+ default = {}
+}
+
+variable "items" {
+ description = "Maps of items to create a wrapper from. Values are passed through to the module."
+ type = any
+ default = {}
+}
diff --git a/wrappers/deploy/versions.tf b/wrappers/deploy/versions.tf
new file mode 100644
index 00000000..ddb64c76
--- /dev/null
+++ b/wrappers/deploy/versions.tf
@@ -0,0 +1,18 @@
+terraform {
+ required_version = ">= 1.5.7"
+
+ required_providers {
+ aws = {
+ source = "hashicorp/aws"
+ version = ">= 6.0"
+ }
+ local = {
+ source = "hashicorp/local"
+ version = ">= 1.0"
+ }
+ null = {
+ source = "hashicorp/null"
+ version = ">= 2.0"
+ }
+ }
+}
diff --git a/wrappers/docker-build/README.md b/wrappers/docker-build/README.md
new file mode 100644
index 00000000..093f989b
--- /dev/null
+++ b/wrappers/docker-build/README.md
@@ -0,0 +1,100 @@
+# Wrapper for module: `modules/docker-build`
+
+The configuration in this directory contains an implementation of a single module wrapper pattern, which allows managing several copies of a module in places where using the native Terraform 0.13+ `for_each` feature is not feasible (e.g., with Terragrunt).
+
+You may want to use a single Terragrunt configuration file to manage multiple resources without duplicating `terragrunt.hcl` files for each copy of the same module.
+
+This wrapper does not implement any extra functionality.
+
+## Usage with Terragrunt
+
+`terragrunt.hcl`:
+
+```hcl
+terraform {
+ source = "tfr:///terraform-aws-modules/lambda/aws//wrappers/docker-build"
+ # Alternative source:
+ # source = "git::git@github.com:terraform-aws-modules/terraform-aws-lambda.git//wrappers/docker-build?ref=master"
+}
+
+inputs = {
+ defaults = { # Default values
+ create = true
+ tags = {
+ Terraform = "true"
+ Environment = "dev"
+ }
+ }
+
+ items = {
+ my-item = {
+ # omitted... can be any argument supported by the module
+ }
+ my-second-item = {
+ # omitted... can be any argument supported by the module
+ }
+ # omitted...
+ }
+}
+```
+
+## Usage with Terraform
+
+```hcl
+module "wrapper" {
+ source = "terraform-aws-modules/lambda/aws//wrappers/docker-build"
+
+ defaults = { # Default values
+ create = true
+ tags = {
+ Terraform = "true"
+ Environment = "dev"
+ }
+ }
+
+ items = {
+ my-item = {
+ # omitted... can be any argument supported by the module
+ }
+ my-second-item = {
+ # omitted... can be any argument supported by the module
+ }
+ # omitted...
+ }
+}
+```
+
+## Example: Manage multiple S3 buckets in one Terragrunt layer
+
+`eu-west-1/s3-buckets/terragrunt.hcl`:
+
+```hcl
+terraform {
+ source = "tfr:///terraform-aws-modules/s3-bucket/aws//wrappers"
+ # Alternative source:
+ # source = "git::git@github.com:terraform-aws-modules/terraform-aws-s3-bucket.git//wrappers?ref=master"
+}
+
+inputs = {
+ defaults = {
+ force_destroy = true
+
+ attach_elb_log_delivery_policy = true
+ attach_lb_log_delivery_policy = true
+ attach_deny_insecure_transport_policy = true
+ attach_require_latest_tls_policy = true
+ }
+
+ items = {
+ bucket1 = {
+ bucket = "my-random-bucket-1"
+ }
+ bucket2 = {
+ bucket = "my-random-bucket-2"
+ tags = {
+ Secure = "probably"
+ }
+ }
+ }
+}
+```
diff --git a/wrappers/docker-build/main.tf b/wrappers/docker-build/main.tf
new file mode 100644
index 00000000..61a99a93
--- /dev/null
+++ b/wrappers/docker-build/main.tf
@@ -0,0 +1,28 @@
+module "wrapper" {
+ source = "../../modules/docker-build"
+
+ for_each = var.items
+
+ build_args = try(each.value.build_args, var.defaults.build_args, {})
+ build_target = try(each.value.build_target, var.defaults.build_target, null)
+ builder = try(each.value.builder, var.defaults.builder, null)
+ cache_from = try(each.value.cache_from, var.defaults.cache_from, [])
+ create_ecr_repo = try(each.value.create_ecr_repo, var.defaults.create_ecr_repo, false)
+ create_sam_metadata = try(each.value.create_sam_metadata, var.defaults.create_sam_metadata, false)
+ docker_file_path = try(each.value.docker_file_path, var.defaults.docker_file_path, "Dockerfile")
+ ecr_address = try(each.value.ecr_address, var.defaults.ecr_address, null)
+ ecr_force_delete = try(each.value.ecr_force_delete, var.defaults.ecr_force_delete, true)
+ ecr_repo = try(each.value.ecr_repo, var.defaults.ecr_repo, null)
+ ecr_repo_lifecycle_policy = try(each.value.ecr_repo_lifecycle_policy, var.defaults.ecr_repo_lifecycle_policy, null)
+ ecr_repo_tags = try(each.value.ecr_repo_tags, var.defaults.ecr_repo_tags, {})
+ force_remove = try(each.value.force_remove, var.defaults.force_remove, false)
+ image_tag = try(each.value.image_tag, var.defaults.image_tag, null)
+ image_tag_mutability = try(each.value.image_tag_mutability, var.defaults.image_tag_mutability, "MUTABLE")
+ keep_locally = try(each.value.keep_locally, var.defaults.keep_locally, false)
+ keep_remotely = try(each.value.keep_remotely, var.defaults.keep_remotely, false)
+ platform = try(each.value.platform, var.defaults.platform, null)
+ scan_on_push = try(each.value.scan_on_push, var.defaults.scan_on_push, false)
+ source_path = try(each.value.source_path, var.defaults.source_path, null)
+ triggers = try(each.value.triggers, var.defaults.triggers, {})
+ use_image_tag = try(each.value.use_image_tag, var.defaults.use_image_tag, true)
+}
diff --git a/wrappers/docker-build/outputs.tf b/wrappers/docker-build/outputs.tf
new file mode 100644
index 00000000..ec6da5f4
--- /dev/null
+++ b/wrappers/docker-build/outputs.tf
@@ -0,0 +1,5 @@
+output "wrapper" {
+ description = "Map of outputs of a wrapper."
+ value = module.wrapper
+ # sensitive = false # No sensitive module output found
+}
diff --git a/wrappers/docker-build/variables.tf b/wrappers/docker-build/variables.tf
new file mode 100644
index 00000000..a6ea0962
--- /dev/null
+++ b/wrappers/docker-build/variables.tf
@@ -0,0 +1,11 @@
+variable "defaults" {
+ description = "Map of default values which will be used for each item."
+ type = any
+ default = {}
+}
+
+variable "items" {
+ description = "Maps of items to create a wrapper from. Values are passed through to the module."
+ type = any
+ default = {}
+}
diff --git a/wrappers/docker-build/versions.tf b/wrappers/docker-build/versions.tf
new file mode 100644
index 00000000..b203b635
--- /dev/null
+++ b/wrappers/docker-build/versions.tf
@@ -0,0 +1,18 @@
+terraform {
+ required_version = ">= 1.5.7"
+
+ required_providers {
+ aws = {
+ source = "hashicorp/aws"
+ version = ">= 6.0"
+ }
+ docker = {
+ source = "kreuzwerker/docker"
+ version = ">= 3.5.0"
+ }
+ null = {
+ source = "hashicorp/null"
+ version = ">= 2.0"
+ }
+ }
+}
diff --git a/wrappers/main.tf b/wrappers/main.tf
new file mode 100644
index 00000000..eb78bb41
--- /dev/null
+++ b/wrappers/main.tf
@@ -0,0 +1,139 @@
+module "wrapper" {
+ source = "../"
+
+ for_each = var.items
+
+ allowed_triggers = try(each.value.allowed_triggers, var.defaults.allowed_triggers, {})
+ architectures = try(each.value.architectures, var.defaults.architectures, null)
+ artifacts_dir = try(each.value.artifacts_dir, var.defaults.artifacts_dir, "builds")
+ assume_role_policy_statements = try(each.value.assume_role_policy_statements, var.defaults.assume_role_policy_statements, {})
+ attach_async_event_policy = try(each.value.attach_async_event_policy, var.defaults.attach_async_event_policy, false)
+ attach_cloudwatch_logs_policy = try(each.value.attach_cloudwatch_logs_policy, var.defaults.attach_cloudwatch_logs_policy, true)
+ attach_create_log_group_permission = try(each.value.attach_create_log_group_permission, var.defaults.attach_create_log_group_permission, true)
+ attach_dead_letter_policy = try(each.value.attach_dead_letter_policy, var.defaults.attach_dead_letter_policy, false)
+ attach_network_policy = try(each.value.attach_network_policy, var.defaults.attach_network_policy, false)
+ attach_policies = try(each.value.attach_policies, var.defaults.attach_policies, false)
+ attach_policy = try(each.value.attach_policy, var.defaults.attach_policy, false)
+ attach_policy_json = try(each.value.attach_policy_json, var.defaults.attach_policy_json, false)
+ attach_policy_jsons = try(each.value.attach_policy_jsons, var.defaults.attach_policy_jsons, false)
+ attach_policy_statements = try(each.value.attach_policy_statements, var.defaults.attach_policy_statements, false)
+ attach_tracing_policy = try(each.value.attach_tracing_policy, var.defaults.attach_tracing_policy, false)
+ authorization_type = try(each.value.authorization_type, var.defaults.authorization_type, "NONE")
+ build_in_docker = try(each.value.build_in_docker, var.defaults.build_in_docker, false)
+ cloudwatch_logs_kms_key_id = try(each.value.cloudwatch_logs_kms_key_id, var.defaults.cloudwatch_logs_kms_key_id, null)
+ cloudwatch_logs_log_group_class = try(each.value.cloudwatch_logs_log_group_class, var.defaults.cloudwatch_logs_log_group_class, null)
+ cloudwatch_logs_retention_in_days = try(each.value.cloudwatch_logs_retention_in_days, var.defaults.cloudwatch_logs_retention_in_days, null)
+ cloudwatch_logs_skip_destroy = try(each.value.cloudwatch_logs_skip_destroy, var.defaults.cloudwatch_logs_skip_destroy, false)
+ cloudwatch_logs_tags = try(each.value.cloudwatch_logs_tags, var.defaults.cloudwatch_logs_tags, {})
+ code_signing_config_arn = try(each.value.code_signing_config_arn, var.defaults.code_signing_config_arn, null)
+ compatible_architectures = try(each.value.compatible_architectures, var.defaults.compatible_architectures, null)
+ compatible_runtimes = try(each.value.compatible_runtimes, var.defaults.compatible_runtimes, [])
+ cors = try(each.value.cors, var.defaults.cors, {})
+ create = try(each.value.create, var.defaults.create, true)
+ create_async_event_config = try(each.value.create_async_event_config, var.defaults.create_async_event_config, false)
+ create_current_version_allowed_triggers = try(each.value.create_current_version_allowed_triggers, var.defaults.create_current_version_allowed_triggers, true)
+ create_current_version_async_event_config = try(each.value.create_current_version_async_event_config, var.defaults.create_current_version_async_event_config, true)
+ create_function = try(each.value.create_function, var.defaults.create_function, true)
+ create_lambda_function_url = try(each.value.create_lambda_function_url, var.defaults.create_lambda_function_url, false)
+ create_layer = try(each.value.create_layer, var.defaults.create_layer, false)
+ create_package = try(each.value.create_package, var.defaults.create_package, true)
+ create_role = try(each.value.create_role, var.defaults.create_role, true)
+ create_sam_metadata = try(each.value.create_sam_metadata, var.defaults.create_sam_metadata, false)
+ create_unqualified_alias_allowed_triggers = try(each.value.create_unqualified_alias_allowed_triggers, var.defaults.create_unqualified_alias_allowed_triggers, true)
+ create_unqualified_alias_async_event_config = try(each.value.create_unqualified_alias_async_event_config, var.defaults.create_unqualified_alias_async_event_config, true)
+ create_unqualified_alias_lambda_function_url = try(each.value.create_unqualified_alias_lambda_function_url, var.defaults.create_unqualified_alias_lambda_function_url, true)
+ dead_letter_target_arn = try(each.value.dead_letter_target_arn, var.defaults.dead_letter_target_arn, null)
+ description = try(each.value.description, var.defaults.description, "")
+ destination_on_failure = try(each.value.destination_on_failure, var.defaults.destination_on_failure, null)
+ destination_on_success = try(each.value.destination_on_success, var.defaults.destination_on_success, null)
+ docker_additional_options = try(each.value.docker_additional_options, var.defaults.docker_additional_options, [])
+ docker_build_root = try(each.value.docker_build_root, var.defaults.docker_build_root, "")
+ docker_entrypoint = try(each.value.docker_entrypoint, var.defaults.docker_entrypoint, null)
+ docker_file = try(each.value.docker_file, var.defaults.docker_file, "")
+ docker_image = try(each.value.docker_image, var.defaults.docker_image, "")
+ docker_pip_cache = try(each.value.docker_pip_cache, var.defaults.docker_pip_cache, null)
+ docker_with_ssh_agent = try(each.value.docker_with_ssh_agent, var.defaults.docker_with_ssh_agent, false)
+ environment_variables = try(each.value.environment_variables, var.defaults.environment_variables, {})
+ ephemeral_storage_size = try(each.value.ephemeral_storage_size, var.defaults.ephemeral_storage_size, 512)
+ event_source_mapping = try(each.value.event_source_mapping, var.defaults.event_source_mapping, {})
+ file_system_arn = try(each.value.file_system_arn, var.defaults.file_system_arn, null)
+ file_system_local_mount_path = try(each.value.file_system_local_mount_path, var.defaults.file_system_local_mount_path, null)
+ function_name = try(each.value.function_name, var.defaults.function_name, "")
+ function_tags = try(each.value.function_tags, var.defaults.function_tags, {})
+ handler = try(each.value.handler, var.defaults.handler, "")
+ hash_extra = try(each.value.hash_extra, var.defaults.hash_extra, "")
+ ignore_source_code_hash = try(each.value.ignore_source_code_hash, var.defaults.ignore_source_code_hash, false)
+ image_config_command = try(each.value.image_config_command, var.defaults.image_config_command, [])
+ image_config_entry_point = try(each.value.image_config_entry_point, var.defaults.image_config_entry_point, [])
+ image_config_working_directory = try(each.value.image_config_working_directory, var.defaults.image_config_working_directory, null)
+ image_uri = try(each.value.image_uri, var.defaults.image_uri, null)
+ include_default_tag = try(each.value.include_default_tag, var.defaults.include_default_tag, true)
+ invoke_mode = try(each.value.invoke_mode, var.defaults.invoke_mode, null)
+ ipv6_allowed_for_dual_stack = try(each.value.ipv6_allowed_for_dual_stack, var.defaults.ipv6_allowed_for_dual_stack, null)
+ kms_key_arn = try(each.value.kms_key_arn, var.defaults.kms_key_arn, null)
+ lambda_at_edge = try(each.value.lambda_at_edge, var.defaults.lambda_at_edge, false)
+ lambda_at_edge_logs_all_regions = try(each.value.lambda_at_edge_logs_all_regions, var.defaults.lambda_at_edge_logs_all_regions, true)
+ lambda_role = try(each.value.lambda_role, var.defaults.lambda_role, "")
+ layer_name = try(each.value.layer_name, var.defaults.layer_name, "")
+ layer_skip_destroy = try(each.value.layer_skip_destroy, var.defaults.layer_skip_destroy, false)
+ layers = try(each.value.layers, var.defaults.layers, null)
+ license_info = try(each.value.license_info, var.defaults.license_info, "")
+ local_existing_package = try(each.value.local_existing_package, var.defaults.local_existing_package, null)
+ logging_application_log_level = try(each.value.logging_application_log_level, var.defaults.logging_application_log_level, "INFO")
+ logging_log_format = try(each.value.logging_log_format, var.defaults.logging_log_format, "Text")
+ logging_log_group = try(each.value.logging_log_group, var.defaults.logging_log_group, null)
+ logging_system_log_level = try(each.value.logging_system_log_level, var.defaults.logging_system_log_level, "INFO")
+ maximum_event_age_in_seconds = try(each.value.maximum_event_age_in_seconds, var.defaults.maximum_event_age_in_seconds, null)
+ maximum_retry_attempts = try(each.value.maximum_retry_attempts, var.defaults.maximum_retry_attempts, null)
+ memory_size = try(each.value.memory_size, var.defaults.memory_size, 128)
+ number_of_policies = try(each.value.number_of_policies, var.defaults.number_of_policies, 0)
+ number_of_policy_jsons = try(each.value.number_of_policy_jsons, var.defaults.number_of_policy_jsons, 0)
+ package_type = try(each.value.package_type, var.defaults.package_type, "Zip")
+ policies = try(each.value.policies, var.defaults.policies, [])
+ policy = try(each.value.policy, var.defaults.policy, null)
+ policy_json = try(each.value.policy_json, var.defaults.policy_json, null)
+ policy_jsons = try(each.value.policy_jsons, var.defaults.policy_jsons, [])
+ policy_name = try(each.value.policy_name, var.defaults.policy_name, null)
+ policy_statements = try(each.value.policy_statements, var.defaults.policy_statements, {})
+ provisioned_concurrent_executions = try(each.value.provisioned_concurrent_executions, var.defaults.provisioned_concurrent_executions, -1)
+ publish = try(each.value.publish, var.defaults.publish, false)
+ putin_khuylo = try(each.value.putin_khuylo, var.defaults.putin_khuylo, true)
+ quiet_archive_local_exec = try(each.value.quiet_archive_local_exec, var.defaults.quiet_archive_local_exec, true)
+ recreate_missing_package = try(each.value.recreate_missing_package, var.defaults.recreate_missing_package, true)
+ recursive_loop = try(each.value.recursive_loop, var.defaults.recursive_loop, null)
+ region = try(each.value.region, var.defaults.region, null)
+ replace_security_groups_on_destroy = try(each.value.replace_security_groups_on_destroy, var.defaults.replace_security_groups_on_destroy, null)
+ replacement_security_group_ids = try(each.value.replacement_security_group_ids, var.defaults.replacement_security_group_ids, null)
+ reserved_concurrent_executions = try(each.value.reserved_concurrent_executions, var.defaults.reserved_concurrent_executions, -1)
+ role_description = try(each.value.role_description, var.defaults.role_description, null)
+ role_force_detach_policies = try(each.value.role_force_detach_policies, var.defaults.role_force_detach_policies, true)
+ role_maximum_session_duration = try(each.value.role_maximum_session_duration, var.defaults.role_maximum_session_duration, 3600)
+ role_name = try(each.value.role_name, var.defaults.role_name, null)
+ role_path = try(each.value.role_path, var.defaults.role_path, null)
+ role_permissions_boundary = try(each.value.role_permissions_boundary, var.defaults.role_permissions_boundary, null)
+ role_tags = try(each.value.role_tags, var.defaults.role_tags, {})
+ runtime = try(each.value.runtime, var.defaults.runtime, "")
+ s3_acl = try(each.value.s3_acl, var.defaults.s3_acl, "private")
+ s3_bucket = try(each.value.s3_bucket, var.defaults.s3_bucket, null)
+ s3_existing_package = try(each.value.s3_existing_package, var.defaults.s3_existing_package, null)
+ s3_kms_key_id = try(each.value.s3_kms_key_id, var.defaults.s3_kms_key_id, null)
+ s3_object_override_default_tags = try(each.value.s3_object_override_default_tags, var.defaults.s3_object_override_default_tags, false)
+ s3_object_storage_class = try(each.value.s3_object_storage_class, var.defaults.s3_object_storage_class, "ONEZONE_IA")
+ s3_object_tags = try(each.value.s3_object_tags, var.defaults.s3_object_tags, {})
+ s3_object_tags_only = try(each.value.s3_object_tags_only, var.defaults.s3_object_tags_only, false)
+ s3_prefix = try(each.value.s3_prefix, var.defaults.s3_prefix, null)
+ s3_server_side_encryption = try(each.value.s3_server_side_encryption, var.defaults.s3_server_side_encryption, null)
+ skip_destroy = try(each.value.skip_destroy, var.defaults.skip_destroy, null)
+ snap_start = try(each.value.snap_start, var.defaults.snap_start, false)
+ source_path = try(each.value.source_path, var.defaults.source_path, null)
+ store_on_s3 = try(each.value.store_on_s3, var.defaults.store_on_s3, false)
+ tags = try(each.value.tags, var.defaults.tags, {})
+ timeout = try(each.value.timeout, var.defaults.timeout, 3)
+ timeouts = try(each.value.timeouts, var.defaults.timeouts, {})
+ tracing_mode = try(each.value.tracing_mode, var.defaults.tracing_mode, null)
+ trigger_on_package_timestamp = try(each.value.trigger_on_package_timestamp, var.defaults.trigger_on_package_timestamp, true)
+ trusted_entities = try(each.value.trusted_entities, var.defaults.trusted_entities, [])
+ use_existing_cloudwatch_log_group = try(each.value.use_existing_cloudwatch_log_group, var.defaults.use_existing_cloudwatch_log_group, false)
+ vpc_security_group_ids = try(each.value.vpc_security_group_ids, var.defaults.vpc_security_group_ids, null)
+ vpc_subnet_ids = try(each.value.vpc_subnet_ids, var.defaults.vpc_subnet_ids, null)
+}
diff --git a/wrappers/outputs.tf b/wrappers/outputs.tf
new file mode 100644
index 00000000..ec6da5f4
--- /dev/null
+++ b/wrappers/outputs.tf
@@ -0,0 +1,5 @@
+output "wrapper" {
+ description = "Map of outputs of a wrapper."
+ value = module.wrapper
+ # sensitive = false # No sensitive module output found
+}
diff --git a/wrappers/variables.tf b/wrappers/variables.tf
new file mode 100644
index 00000000..a6ea0962
--- /dev/null
+++ b/wrappers/variables.tf
@@ -0,0 +1,11 @@
+variable "defaults" {
+ description = "Map of default values which will be used for each item."
+ type = any
+ default = {}
+}
+
+variable "items" {
+ description = "Maps of items to create a wrapper from. Values are passed through to the module."
+ type = any
+ default = {}
+}
diff --git a/wrappers/versions.tf b/wrappers/versions.tf
new file mode 100644
index 00000000..8dea461c
--- /dev/null
+++ b/wrappers/versions.tf
@@ -0,0 +1,22 @@
+terraform {
+ required_version = ">= 1.5.7"
+
+ required_providers {
+ aws = {
+ source = "hashicorp/aws"
+ version = ">= 6.0"
+ }
+ external = {
+ source = "hashicorp/external"
+ version = ">= 1.0"
+ }
+ local = {
+ source = "hashicorp/local"
+ version = ">= 1.0"
+ }
+ null = {
+ source = "hashicorp/null"
+ version = ">= 2.0"
+ }
+ }
+}