I am new to Terraform, I am having difficulty in configuring vpc_config in Lambda function.
main.tf
resource "aws_lambda_function" "lambda" {
function_name = var.function_name
s3_bucket = var.s3_bucket_name
s3_key = var.s3_key
vpc_config {
security_group_ids = ["${var.lambda_security_group_id}"]
subnet_ids = ["${split(",", var.lambda_subnet_id)}"]
}
#source_code_hash = data.archive_file.zip.output_base64sha256
role = aws_iam_role.iam_for_lambda.arn
handler = "welcome.lambda_handler"
runtime = "python3.9"
timeout = var.timeout
memory_size = var.lambda_mem_size
}
variables.tf
variable "lambda_security_group_id" {
type = list(string)
}
variable "lambda_subnet_id" {
type = list(string)
}
terraform.tfvars
lambda_security_group_id = ["sg-0aabcef7795c7e092", "sg-0f218ddc9fb47341d"]
lambda_subnet_id = ["subnet-0d786711ca50ab0f7", "subnet-06341798f99bc9849"]
Please guide me from here.
I think you need something like this, since the variables are already a list of strings. It could be a good idea to rename your variables to the plural form since they are lists: lambda_security_group_ids and lambda_subnet_ids.
vpc_config {
security_group_ids = var.lambda_security_group_id
subnet_ids = var.lambda_subnet_id
}
More info can be found here.
Related
I am new to tf. Trying to use two for_each in a resource module
resource "aws_cloudwatch_event_rule" "event_rule" {
name = "${local.service_name}-trigger"
description = "Schedule for synthetic tests lambda function"
for_each = var.cron_schedule
schedule_expression = each.value.cron_schedule
for_each = var.enable_event_schedule_trigger
is_enabled = each.value.enable_event_schedule_trigger
}
resource "aws_cloudwatch_event_target" "target_lambda" {
target_id = "${local.service_name}"
arn = aws_lambda_function.synthetictests.arn
rule = aws_cloudwatch_event_rule.event_rule.name
}
but getting following error.
The argument "for_each" was already set at lambda_trigers.tf:4,3-11. Each
argument may be set only once.
Variables
variable "cron_schedule" {
type = list(string)
description = "Cron schedule of lambda trigger via event bridge"
default = []
}
variable "enable_event_schedule_trigger" {
type = list(bool)
default = ["false"]
description = "flag to enable or disable event trigger. Disabled by
default"
}
Please suggest.
You can use zipmap:
resource "aws_cloudwatch_event_rule" "schedule_rule" {
name = "${local.service_name}-trigger"
description = "Schedule for synthetic tests lambda function"
for_each = zipmap(var.cron_schedule, var.enable_event_schedule_trigger)
schedule_expression = each.key
is_enabled = each.value
}
Update
Since aws_cloudwatch_event_rule uses, aws_cloudwatch_event_target should probably also use it:
resource "aws_cloudwatch_event_target" "target_lambda" {
for_each = aws_cloudwatch_event_rule.event_rule
target_id = "${local.service_name}"
arn = aws_lambda_function.synthetictests.arn
rule = each.value.name
}
Terraform CLI and Terraform AWS Provider Version
Installed from https://releases.hashicorp.com/terraform/0.13.5/terraform_0.13.5_linux_amd64.zip
hashicorp/aws v3.15.0
Affected Resource(s)
aws_rds_cluster
aws_rds_cluster_instance
Terraform Configuration Files
# inside ./modules/rds/main.tf
terraform {
required_providers {
aws = {
source = "hashicorp/aws"
}
}
required_version = "~> 0.13"
}
provider "aws" {
alias = "primary"
}
provider "aws" {
alias = "dr"
}
locals {
region_tags = ["primary", "dr"]
db_name = "${var.project_name}-${var.stage}-db"
db_cluster_0 = "${local.db_name}-cluster-${local.region_tags[0]}"
db_cluster_1 = "${local.db_name}-cluster-${local.region_tags[1]}"
db_instance_name = "${local.db_name}-instance"
}
resource "aws_rds_global_cluster" "global_db" {
global_cluster_identifier = "${var.project_name}-${var.stage}"
database_name = "${var.project_name}${var.stage}db"
engine = "aurora-mysql"
engine_version = "${var.mysql_version}.mysql_aurora.${var.aurora_version}"
// force_destroy = true
}
resource "aws_rds_cluster" "primary_cluster" {
depends_on = [aws_rds_global_cluster.global_db]
provider = aws.primary
cluster_identifier = "${local.db_name}-cluster-${local.region_tags[0]}"
# the database name does not allow dashes:
database_name = "${var.project_name}${var.stage}db"
# The engine and engine_version must be repeated in aws_rds_global_cluster,
# aws_rds_cluster, and aws_rds_cluster_instance to
# avoid "Value for engine should match" error
engine = "aurora-mysql"
engine_version = "${var.mysql_version}.mysql_aurora.${var.aurora_version}"
engine_mode = "global"
global_cluster_identifier = aws_rds_global_cluster.global_db.id
# backtrack and multi-master not supported by Aurora Global.
master_username = var.username
master_password = var.password
backup_retention_period = 5
preferred_backup_window = "07:00-09:00"
db_subnet_group_name = aws_db_subnet_group.primary.id
# We must have these values, because destroying or rolling back requires them
skip_final_snapshot = true
final_snapshot_identifier = "ci-aurora-cluster-backup"
tags = {
Name = local.db_cluster_0
Stage = var.stage
CreatedBy = var.created_by
}
}
resource "aws_rds_cluster_instance" "primary" {
depends_on = [aws_rds_global_cluster.global_db]
provider = aws.primary
cluster_identifier = aws_rds_cluster.primary_cluster.id
engine = "aurora-mysql"
engine_version = "${var.mysql_version}.mysql_aurora.${var.aurora_version}"
instance_class = "db.${var.instance_class}.${var.instance_size}"
db_subnet_group_name = aws_db_subnet_group.primary.id
tags = {
Name = local.db_instance_name
Stage = var.stage
CreatedBy = var.created_by
}
}
resource "aws_rds_cluster" "dr_cluster" {
depends_on = [aws_rds_cluster_instance.primary, aws_rds_global_cluster.global_db]
provider = aws.dr
cluster_identifier = "${local.db_name}-cluster-${local.region_tags[1]}"
# db name now allowed to specified on secondary regions
# The engine and engine_version must be repeated in aws_rds_global_cluster,
# aws_rds_cluster, and aws_rds_cluster_instance to
# avoid "Value for engine should match" error
engine = "aurora-mysql"
engine_version = "${var.mysql_version}.mysql_aurora.${var.aurora_version}"
engine_mode = "global"
global_cluster_identifier = aws_rds_global_cluster.global_db.id
# backtrack and multi-master not supported by Aurora Global.
# cannot specify username/password in cross-region replication cluster:
backup_retention_period = 5
preferred_backup_window = "07:00-09:00"
db_subnet_group_name = aws_db_subnet_group.dr.id
# We must have these values, because destroying or rolling back requires them
skip_final_snapshot = true
final_snapshot_identifier = "ci-aurora-cluster-backup"
tags = {
Name = local.db_cluster_1
Stage = var.stage
CreatedBy = var.created_by
}
}
resource "aws_rds_cluster_instance" "dr_instance" {
depends_on = [aws_rds_cluster_instance.primary, aws_rds_global_cluster.global_db]
provider = aws.dr
cluster_identifier = aws_rds_cluster.dr_cluster.id
engine = "aurora-mysql"
engine_version = "${var.mysql_version}.mysql_aurora.${var.aurora_version}"
instance_class = "db.${var.instance_class}.${var.instance_size}"
db_subnet_group_name = aws_db_subnet_group.dr.id
tags = {
Name = local.db_instance_name
Stage = var.stage
CreatedBy = var.created_by
}
}
resource "aws_db_subnet_group" "primary" {
name = "${local.db_name}-subnetgroup"
subnet_ids = var.subnet_ids
provider = aws.primary
tags = {
Name = "primary_subnet_group"
Stage = var.stage
CreatedBy = var.created_by
}
}
resource "aws_db_subnet_group" "dr" {
provider = aws.dr
name = "${local.db_name}-subnetgroup"
subnet_ids = var.dr_subnet_ids
tags = {
Name = "dr_subnet_group"
Stage = var.stage
CreatedBy = var.created_by
}
}
resource "aws_rds_cluster_parameter_group" "default" {
name = "rds-cluster-pg"
family = "aurora-mysql${var.mysql_version}"
description = "RDS default cluster parameter group"
parameter {
name = "character_set_server"
value = "utf8"
}
parameter {
name = "character_set_client"
value = "utf8"
}
parameter {
name = "aurora_parallel_query"
value = "ON"
apply_method = "pending-reboot"
}
}
Inside ./modules/sns/main.tf, this is the resource I'm adding when calling terraform apply from within the ./modules directory:
resource "aws_sns_topic" "foo_topic" {
name = "foo-${var.stage}-${var.topic_name}"
tags = {
Name = "foo-${var.stage}-${var.topic_name}"
Stage = var.stage
CreatedBy = var.created_by
CreatedOn = timestamp()
}
}
./modules/main.tf:
terraform {
backend "s3" {
bucket = "terraform-remote-state-s3-bucket-unique-name"
key = "terraform.tfstate"
region = "us-east-2"
dynamodb_table = "TerraformLockTable"
}
}
provider "aws" {
alias = "primary"
region = var.region
}
provider "aws" {
alias = "dr"
region = var.dr_region
}
module "vpc" {
stage = var.stage
source = "./vpc"
providers = {
aws = aws.primary
}
}
module "dr_vpc" {
stage = var.stage
source = "./vpc"
providers = {
aws = aws.dr
}
}
module "vpc_security_group" {
source = "./vpc_security_group"
vpc_id = module.vpc.vpc_id
providers = {
aws = aws.primary
}
}
module "rds" {
source = "./rds"
stage = var.stage
created_by = var.created_by
vpc_id = module.vpc.vpc_id
subnet_ids = [module.vpc.subnet_a_id, module.vpc.subnet_b_id, module.vpc.subnet_c_id]
dr_subnet_ids = [module.dr_vpc.subnet_a_id, module.dr_vpc.subnet_b_id, module.dr_vpc.subnet_c_id]
region = var.region
username = var.rds_username
password = var.rds_password
providers = {
aws.primary = aws.primary
aws.dr = aws.dr
}
}
module "sns_start" {
stage = var.stage
source = "./sns"
topic_name = "start"
created_by = var.created_by
}
./modules/variables.tf:
variable "region" {
default = "us-east-2"
}
variable "dr_region" {
default = "us-west-2"
}
variable "service" {
type = string
default = "foo-back"
description = "service to match what serverless framework deploys"
}
variable "stage" {
type = string
default = "sandbox"
description = "The stage to deploy: sandbox, dev, qa, uat, or prod"
validation {
condition = can(regex("sandbox|dev|qa|uat|prod", var.stage))
error_message = "The stage value must be a valid stage: sandbox, dev, qa, uat, or prod."
}
}
variable "created_by" {
description = "Company or vendor name followed by the username part of the email address"
}
variable "rds_username" {
description = "Username for rds"
}
variable "rds_password" {
description = "Password for rds"
}
./modules/sns/main.tf:
resource "aws_sns_topic" "foo_topic" {
name = "foo-${var.stage}-${var.topic_name}"
tags = {
Name = "foo-${var.stage}-${var.topic_name}"
Stage = var.stage
CreatedBy = var.created_by
CreatedOn = timestamp()
}
}
./modules/sns/output.tf:
output "sns_topic_arn" {
value = aws_sns_topic.foo_topic.arn
}
Debug Output
Both outputs have modified keys, names, account IDs, etc:
The plan output from running terraform apply:
https://gist.github.com/ystoneman/95df711ee0a11d44e035b9f8f39b75f3
The state before applying: https://gist.github.com/ystoneman/5c842769c28e1ae5969f9aaff1556b37
Expected Behavior
The entire ./modules/main.tf had already been created, and the only thing that was added was the SNS module, so only the SNS module should be created.
Actual Behavior
But instead, the RDS resources are affected too, and terraform "claims" that engine_mode has changed from provisioned to global, even though it already was global according to the console:
The plan output also says that cluster_identifier is only known after apply and therefore forces replacement, however, I think the cluster_identifier is necessary to let the aws_rds_cluster know it belongs to the aws_rds_global_cluster, and for the aws_rds_cluster_instance to know it belongs to the aws_rds_cluster, respectively.
Steps to Reproduce
comment out the module "sns_start"
cd ./modules
terraform apply (after this step is done is where the state file I included is at)
uncomment out the module "sns_start"
terraform apply (at this point is where I provide the debug output)
Important Factoids
This problem happens whether I run it from my Mac or within AWS CodeBuild.
References
Seems like AWS Terraform tried to destory and rebuild RDS cluster references this too, but it's not specific to a Global Cluster, where you do need identifiers so that instances and clusters know to what they belong to.
It seems like you are using an outdated version of the aws provider and are specifying the engine_mode incorrectly. There was a bug ticket relating to this: https://github.com/hashicorp/terraform-provider-aws/issues/16088
It is fixed in version 3.15.0 which you can use via
terraform {
required_providers {
aws = {
source = "hashicorp/aws"
version = "~> 3.15.0"
}
}
required_version = "~> 0.13"
}
Additionally you should drop the engine_mode property from your terraform specification completely.
I'm currently having an issue with my aws_s3_notification resource creation. Whenever I attempt to deploy this resource, I receive this error
Error putting S3 notification configuration: InvalidArgument: Unable to validate the following destination configurations
I've tried setting depends_on parameters and adjusting permissions. One interesting thing is in my main.tf file, I'm creating two lambda functions. Both are extremely similar (just vary by code). My "controller" configuration deploys with no issue but my "chunker" function seems to have an issue creating the s3_notification.. I have included both configs for comparison.
#S3
resource "aws_s3_bucket" "ancb" {
for_each = toset(var.ancb_bucket)
bucket = format("ancb-%s-%s-%s",var.env,var.product_name,each.value)
acl = "private"
versioning {
enabled = true
}
tags = {
Environment = var.env
Terraform = true
}
}
#Chunker
resource "aws_lambda_function" "ancb_chunker" {
function_name = format("ancb-chunker-%s-%s",var.env,var.product_name)
s3_bucket = aws_s3_bucket.ancb["config"].id
s3_key = var.lambda_zip_chunker
handler = "handler.chunk"
runtime = "nodejs8.10"
role = aws_iam_role.lambda_exec.arn
environment {
variables = {
ORIGINAL_BUCKET = aws_s3_bucket.ancb["original"].id
TO_PROCESS_BUCKET = aws_s3_bucket.ancb["to-process"].id
ENVIRONMENT = var.env
CHUNK_SIZE = 5000
}
}
tags = {
Environment = var.env
Terraform = true
}
depends_on = [
aws_s3_bucket_object.ancb["chunker.zip"],
aws_s3_bucket.ancb["chunker"]
]
}
resource "aws_lambda_permission" "ancb_chunker_s3" {
statement_id = "AllowExecutionFromS3Bucket-Chunker"
action = "lambda:InvokeFunction"
function_name = aws_lambda_function.ancb_controller.arn
principal = "s3.amazonaws.com"
source_arn = aws_s3_bucket.ancb["original"].arn
}
resource "aws_s3_bucket_notification" "chunker" {
bucket = aws_s3_bucket.ancb["original"].id
lambda_function {
lambda_function_arn = aws_lambda_function.ancb_chunker.arn
events = ["s3:ObjectCreated:*"]
}
depends_on = [
aws_lambda_permission.ancb_chunker_s3,
aws_lambda_function.ancb_chunker,
aws_s3_bucket.ancb["original"]
]
}
#Controller
resource "aws_lambda_function" "ancb_controller" {
function_name = format("ancb-controller-%s-%s",var.env,var.product_name)
s3_bucket = aws_s3_bucket.ancb["config"].id
s3_key = var.lambda_zip_controller
handler = "handler.controller"
runtime = "nodejs8.10"
role = aws_iam_role.lambda_exec.arn
environment {
variables = {
DESTINATION_BUCKET = aws_s3_bucket.ancb["destination"].id
ENVIRONMENT = var.env
ERROR_BUCKET = aws_s3_bucket.ancb["error"].id
GEOCODIO_APIKEY = <insert>
GEOCODIO_ENDPOINT = <insert>
GEOCODIO_VERSION = <insert>
ORIGINAL_BUCKET = aws_s3_bucket.ancb["original"].id
SOURCE_BUCKET = aws_s3_bucket.ancb["source"].id
TO_PROCESS_BUCKET = aws_s3_bucket.ancb["to-process"].id
WORKING_BUCKET = aws_s3_bucket.ancb["working"].id
}
}
tags = {
Environment = var.env
Terraform = true
}
depends_on = [
aws_s3_bucket_object.ancb["controller.zip"]
]
}
resource "aws_lambda_permission" "ancb_controller_s3" {
statement_id = "AllowExecutionFromS3Bucket-Controller"
action = "lambda:InvokeFunction"
function_name = aws_lambda_function.ancb_controller.arn
principal = "s3.amazonaws.com"
source_arn = aws_s3_bucket.ancb["source"].arn
}
resource "aws_s3_bucket_notification" "controller" {
bucket = aws_s3_bucket.ancb["source"].id
lambda_function {
lambda_function_arn = aws_lambda_function.ancb_controller.arn
events = ["s3:ObjectCreated:*"]
}
depends_on = [
aws_lambda_permission.ancb_controller_s3,
aws_s3_bucket.ancb["source"]
]
}
UPDATE: If I manually create the trigger and run terraform apply again, terraform is able to move forward with no problem....
I have the following code in my main.tf file:
provider "aws" {
access_key = "${var.aws_access_key}"
secret_key = "${var.aws_secret_key}"
region = "us-east-1"
alias = "us-east-1"
}
provider "aws" {
access_key = "${var.aws_access_key}"
secret_key = "${var.aws_secret_key}"
region = "us-west-1"
alias = "us-west-1"
}
module "us-east_vpc" {
source = "./setup-networking"
providers = {
"aws.region" = "aws.us-east-1"
}
}
module "us-west_vpc" {
source = "./setup-networking"
providers = {
"aws.region" = "aws.us-west-1"
}
}
And then in my modules file I have:
provider "aws" {
alias = "region"
}
resource "aws_vpc" "default" {
provider = "aws.region"
cidr_block = "${lookup(var.vpc_cidr, ${aws.region.region})}"
enable_dns_hostnames = true
tags {
Name = "AWS VPC"
}
}
resource "aws_internet_gateway" "default" {
provider = "aws.region"
vpc_id = "${aws_vpc.default.id}"
}
resource "aws_subnet" "default" {
provider = "aws.region"
vpc_id = "${aws_vpc.default.id}"
cidr_block = "${lookup(var.subnet_cidr, ${aws.region.region})}"
availability_zone = "aws.region"
tags {
Name = "AWS Subnet"
}
}
resource "aws_route_table" "default" {
provider = "aws.region"
vpc_id = "${aws_vpc.default.id}"
route {
cidr_block = "0.0.0.0/0"
gateway_id = "${aws_internet_gateway.default.id}"
}
tags {
Name = "Main Gateway"
}
}
As you can see in the modules file code I am trying to do a lookup to find the VPC CIDR mask and the subnet CIDR mask from map variables.
The trouble is that I can't seem to sort out how to get the region to be used as a lookup value.
If I hard code these values:
cidr_block = "10.10.0.0/16"
cidr_block = "10.10.10.0/24"
The script works as expected but I don't want to hard code the values.
Can someone with more Terraform experience help me understand how I can properly reference the region to lookup the correct value?
I was looking for the same answer for a different problem. I wanted to get the region for a name of a role, I was able to get the info by doing this:
1.- Create a file like data.tf and add this info:
data "aws_region" "current" {}
2.- Get the info from the data by calling this variable in any TF file:
name = "${var.vpc-name}-${data.aws_region.current.name}-Bastion-Role"
This way it will get the region where you are executing the code, and you don't have to mess with the provider.tf file.
You can get the region that's currently in use by the provider by using the aws_region data source.
So in your case you could do something like this:
provider "aws" {
alias = "region"
}
data "aws_region" "current" {
provider = "aws.region"
}
resource "aws_vpc" "default" {
provider = "aws.region"
cidr_block = "${lookup(var.vpc_cidr, ${data.aws_region.current.name})}"
enable_dns_hostnames = true
tags {
Name = "AWS VPC"
}
}
...
provider "aws" {
alias = "region"
}
data "aws_region" "current" {
provider = "aws.region"
}
data "aws_availability_zone" "current" {
provider = "aws.region"
name = "${data.aws_region.current.name}a"
}
resource "aws_vpc" "default" {
provider = "aws.region"
cidr_block = "${lookup(var.vpc_cidr, data.aws_availability_zone.current.name)}"
enable_dns_hostnames = true
tags {
Name = "${data.aws_region.current.name} Security VPC1"
Region = "${data.aws_region.current.name}"
Account = "Security"
}
}
I am new to Terraform and I ran into some issue when trying to use environment variables with .tf file, I tried to use terraform.tfvars / variables.tf.
./terraform apply -var-file="terraform.tfvars"
Failed to load root config module: Error parsing variables.tf: At 54:17: illegal char
What am I missing here?
Terraform Version: Terraform v0.9.2
main.tf:
provider "aws" {
access_key = "${var.aws_access_key}"
secret_key = "${var.aws_secret_key}"
region = "${var.aws_region}"
allowed_account_ids = ["${var.aws_account_id}"]
}
resource "aws_instance" "db" {
ami = "ami-49c9295"
instance_type = "t2.micro"
tags {
Name = "test"
}
connection {
user = "ubuntu"
}
security_groups = ["sg-ccc943b0"]
availability_zone = "${var.availability_zone}"
subnet_id = "${var.subnet_id}"
}
terraform.tfvars:
aws_profile = "default"
aws_access_key = "xxxxxx"
aws_secret_key = "xxxxxx"
aws_account_id = "xxxxxx"
key_name = "keyname"
key_path = "/home/user/.ssh/user.pem"
aws_region = "us-east-1"
subnet_id = "subnet-51997e7a"
vpc_security_group_ids = "mysql"
instance_type = "t2.xlarge"
availability_zone = "us-east-1a"
variables.tf:
variable "key_name" {
description = "Name of the SSH keypair to use in AWS."
default = "keypairname"
}
variable "key_path" {
description = "Path to the private portion of the SSH key specified."
default = "/home/user/.ssh/mypem.pem"
}
variable "aws_region" {
description = "AWS region to launch servers."
default = "us-east-1"
}
variable "aws_access_key" {
decscription = "AWS Access Key"
default = "xxxxxx"
}
variable "aws_secret_key" {
description = "AWS Secret Key"
default = "xxxxxx"
}
variable "aws_account_id" {
description = "AWS Account ID"
default = "xxxxxx"
}
variable "subnet_id" {
description = "Subnet ID to use in VPC"
default = "subnet-51997e7a"
}
variable "vpc_security_group_ids" {
description = "vpc_security_group_ids"
default = "sec"
}
variable "instance_type" {
description = "Instance type"
default = "t2.xlarge"
}
variable "instance_name" {
description = "Instance Name"
default = "test"
}
variable "availability_zone" {
description = "availability_zone"
default = "us-east-1a"
}
variable "aws_amis" {
default = {
"us-east-1": "ami-49c9295f",
"eu-west-1": "ami-49c9295f",
"us-west-1": "ami-49c9295f",
"us-west-2": "ami-49c9295f"
}
}
Update
After removing variable "aws_amis" section from variables.tf, I ran into another issue:
Failed to load root config module: Error loading variables.tf: 1 error(s) occurred:
* variable[aws_access_key]: invalid key: decscription
The aws_amis variable being used as a lookup map looks incorrectly formatted to me. Instead it should probably be of the format:
variable "aws_amis" {
default = {
us-east-1 = "ami-49c9295f"
eu-west-1 = "ami-49c9295f"
us-west-1 = "ami-49c9295f"
us-west-2 = "ami-49c9295f"
}
}
As an aside Terraform will look for a terraform.tfvars file by default so you can drop the -var-file="terraform.tfvars". You'll need to pass the -var-file option if you want to use a differently named file (such as prod.tfvars) but for this you can omit it.