Relative paths in Terraform - amazon-web-services

I am trying to create an AWS lambda Function using terraform.
My terraform directory looks like
terraform
iam-policies
main.tf
lambda
files/
main.tf
main.tf
I have my lambda function stored inside /terraform/lambda/files/lambda_function.py.
Whenever I terraform apply, I have a "null_resource" that executes some commands in local machine that will zip the python file
variable "pythonfile" {
description = "lambda function python filename"
type = "string"
}
resource "null_resource" "lambda_preconditions" {
triggers {
always_run = "${uuid()}"
}
provisioner "local-exec" {
command = "rm -rf ${path.module}/files/zips"
}
provisioner "local-exec" {
command = "mkdir -p ${path.module}/files/zips"
}
provisioner "local-exec" {
command = "cp -R ${path.module}/files/${var.pythonfile} ${path.module}/files/zips/lambda_function.py"
}
provisioner "local-exec" {
command = "cd ${path.module}/files/zips && zip -r lambda.zip ."
}
}
My "aws_lambda_function" resource looks like this.
resource "aws_lambda_function" "lambda_function" {
filename = "${path.module}/files/zips/lambda.zip"
function_name = "${format("%s-%s-%s-lambda-function", var.name, var.environment, var.function_name)}"
role = "${aws_iam_role.iam_for_lambda.arn}"
handler = "lambda_function.lambda_handler"
source_code_hash = "${base64sha256(format("%s/files/zips/lambda.zip", path.module))}", length(path.cwd) + 1, -1)}")}"
runtime = "${var.function_runtime}"
timeout = "${var.function_timeout}"
memory_size = "${var.function_memory}"
environment {
variables = {
region = "${var.region}"
name = "${var.name}"
environment = "${var.environment}"
}
}
vpc_config {
subnet_ids = ["${var.subnet_ids}"]
security_group_ids = ["${aws_security_group.lambda_sg.id}"]
}
depends_on = [
"null_resource.lambda_preconditions"
]
}
Problem:
Whenever I change the lambda_function.py file and terraform apply again, everything works fine but the actual code in the lambda function do not change.
Also if I delete all the terraform state files and apply again, the new change is propagated without any problem.
What could be the possible reason for this?

Instead of using null_resource, I used the archive_file data source that creates the zip file automatically if new changes are detected. Next I took a reference from the archive_file data in the lambda resource source_code_hash attribute.
archive_file data source
data "archive_file" "lambda_zip" {
type = "zip"
output_path = "${path.module}/files/zips/lambda.zip"
source {
content = "${file("${path.module}/files/ebs_cleanup_lambda.py")}"
filename = "lambda_function.py"
}
}
The lambda resource
resource "aws_lambda_function" "lambda_function" {
filename = "${path.module}/files/zips/lambda.zip"
function_name = "${format("%s-%s-%s-lambda-function", var.name, var.environment, var.function_name)}"
role = "${aws_iam_role.iam_for_lambda.arn}"
handler = "lambda_function.lambda_handler"
source_code_hash = "${data.archive_file.lambda_zip.output_base64sha256}"
runtime = "${var.function_runtime}"
timeout = "${var.function_timeout}"
memory_size = "${var.function_memory}"
environment {
variables = {
region = "${var.region}"
name = "${var.name}"
environment = "${var.environment}"
}
}
vpc_config {
subnet_ids = ["${var.subnet_ids}"]
security_group_ids = ["${aws_security_group.lambda_sg.id}"]
}
}

Related

Error: unexpected format for ID (), expected EVENTBUSNAME/RULENAME or RULENAME

I'm trying to deploy some event rules using Terraform. From what I've seen in the docs, my (JSON) format is fine. I can't figure out why it's throwing that error.
resource "aws_kinesis_firehose_delivery_stream" "kinesis_stream" {
name = var.delivery_stream_name
destination = "s3"
s3_configuration {
role_arn = aws_iam_role.kinesis_data_firehose_role.arn
bucket_arn = aws_s3_bucket.s3_bucket.arn
}
}
resource "aws_cloudwatch_event_rule" "successful_sign_in_rule" {
description = "Auth0 User Successfully signed in"
event_bus_name = aws_cloudwatch_event_bus.event_bridge_event_bus.arn
event_pattern = <<EOF
{
"detail-type": [
"s"
]
}
EOF
}
resource "aws_cloudwatch_event_target" "successful_sign_in_rule_target" {
rule = aws_cloudwatch_event_rule.successful_sign_in_rule.name
arn = aws_kinesis_firehose_delivery_stream.kinesis_stream.arn
}

Error creating aws_s3_notification with Terraform

I'm currently having an issue with my aws_s3_notification resource creation. Whenever I attempt to deploy this resource, I receive this error
Error putting S3 notification configuration: InvalidArgument: Unable to validate the following destination configurations
I've tried setting depends_on parameters and adjusting permissions. One interesting thing is in my main.tf file, I'm creating two lambda functions. Both are extremely similar (just vary by code). My "controller" configuration deploys with no issue but my "chunker" function seems to have an issue creating the s3_notification.. I have included both configs for comparison.
#S3
resource "aws_s3_bucket" "ancb" {
for_each = toset(var.ancb_bucket)
bucket = format("ancb-%s-%s-%s",var.env,var.product_name,each.value)
acl = "private"
versioning {
enabled = true
}
tags = {
Environment = var.env
Terraform = true
}
}
#Chunker
resource "aws_lambda_function" "ancb_chunker" {
function_name = format("ancb-chunker-%s-%s",var.env,var.product_name)
s3_bucket = aws_s3_bucket.ancb["config"].id
s3_key = var.lambda_zip_chunker
handler = "handler.chunk"
runtime = "nodejs8.10"
role = aws_iam_role.lambda_exec.arn
environment {
variables = {
ORIGINAL_BUCKET = aws_s3_bucket.ancb["original"].id
TO_PROCESS_BUCKET = aws_s3_bucket.ancb["to-process"].id
ENVIRONMENT = var.env
CHUNK_SIZE = 5000
}
}
tags = {
Environment = var.env
Terraform = true
}
depends_on = [
aws_s3_bucket_object.ancb["chunker.zip"],
aws_s3_bucket.ancb["chunker"]
]
}
resource "aws_lambda_permission" "ancb_chunker_s3" {
statement_id = "AllowExecutionFromS3Bucket-Chunker"
action = "lambda:InvokeFunction"
function_name = aws_lambda_function.ancb_controller.arn
principal = "s3.amazonaws.com"
source_arn = aws_s3_bucket.ancb["original"].arn
}
resource "aws_s3_bucket_notification" "chunker" {
bucket = aws_s3_bucket.ancb["original"].id
lambda_function {
lambda_function_arn = aws_lambda_function.ancb_chunker.arn
events = ["s3:ObjectCreated:*"]
}
depends_on = [
aws_lambda_permission.ancb_chunker_s3,
aws_lambda_function.ancb_chunker,
aws_s3_bucket.ancb["original"]
]
}
#Controller
resource "aws_lambda_function" "ancb_controller" {
function_name = format("ancb-controller-%s-%s",var.env,var.product_name)
s3_bucket = aws_s3_bucket.ancb["config"].id
s3_key = var.lambda_zip_controller
handler = "handler.controller"
runtime = "nodejs8.10"
role = aws_iam_role.lambda_exec.arn
environment {
variables = {
DESTINATION_BUCKET = aws_s3_bucket.ancb["destination"].id
ENVIRONMENT = var.env
ERROR_BUCKET = aws_s3_bucket.ancb["error"].id
GEOCODIO_APIKEY = <insert>
GEOCODIO_ENDPOINT = <insert>
GEOCODIO_VERSION = <insert>
ORIGINAL_BUCKET = aws_s3_bucket.ancb["original"].id
SOURCE_BUCKET = aws_s3_bucket.ancb["source"].id
TO_PROCESS_BUCKET = aws_s3_bucket.ancb["to-process"].id
WORKING_BUCKET = aws_s3_bucket.ancb["working"].id
}
}
tags = {
Environment = var.env
Terraform = true
}
depends_on = [
aws_s3_bucket_object.ancb["controller.zip"]
]
}
resource "aws_lambda_permission" "ancb_controller_s3" {
statement_id = "AllowExecutionFromS3Bucket-Controller"
action = "lambda:InvokeFunction"
function_name = aws_lambda_function.ancb_controller.arn
principal = "s3.amazonaws.com"
source_arn = aws_s3_bucket.ancb["source"].arn
}
resource "aws_s3_bucket_notification" "controller" {
bucket = aws_s3_bucket.ancb["source"].id
lambda_function {
lambda_function_arn = aws_lambda_function.ancb_controller.arn
events = ["s3:ObjectCreated:*"]
}
depends_on = [
aws_lambda_permission.ancb_controller_s3,
aws_s3_bucket.ancb["source"]
]
}
UPDATE: If I manually create the trigger and run terraform apply again, terraform is able to move forward with no problem....

How to trigger terraform to upload new lambda code

I deploy lambda using Terraform as follows but have following questions:
1) I want null_resource.lambda to be called always or when stop_ec2.py is changed so that stop_ec2_upload.zip is not out-of-date. What should I write in triggers{}?
2) how to make aws_lambda_function.stop_ec2 update the new stop_ec2_upload.zip to cloud when stop_ec2_upload.zip is changed?
right now I have to destroy aws_lambda_function.stop_ec2 then create it again. is there anything I can write in the code so that when I run terraform apply, 1) and 2) will happen automatically?
resource "null_resource" "lambda" {
triggers {
#what should I write here?
}
provisioner "local-exec" {
command = "mkdir -p lambda_func && cd lambda_py && zip
../lambda_func/stop_ec2_upload.zip stop_ec2.py && cd .."
}
}
resource "aws_lambda_function" "stop_ec2" {
depends_on = ["null_resource.lambda"]
function_name = "stopEC2"
handler = "stop_ec2.handler"
runtime = "python3.6"
filename = "lambda_func/stop_ec2_upload.zip"
source_code_hash =
"${base64sha256(file("lambda_func/stop_ec2_upload.zip"))}"
role = "..."
}
I read the link provided by Chandan and figured out.
Here is my code and it works perfectly.
In fact, with "archive_file", and source_code_hash, I do not need trigger. whenever I create a new file stop_ec2.py or modify it. when I run terraform, the file will be re-zipped and uploaded to cloud.
data "archive_file" "stop_ec2" {
type = "zip"
source_file = "src_dir/stop_ec2.py"
output_path = "dest_dir/stop_ec2_upload.zip"
}
resource "aws_lambda_function" "stop_ec2" {
function_name = "stopEC2"
handler = "stop_ec2.handler"
runtime = "python3.6"
filename = "dest_dir/stop_ec2_upload.zip"
source_code_hash = data.archive_file.stop_ec2.output_base64sha256
role = "..."
}
These might help:
triggers {
main = "${base64sha256(file("source/main.py"))}"
requirements = "${base64sha256(file("source/requirements.txt"))}"
}
triggers = {
source_file = "${sha1Folder("${path.module}/source")}"
}
REF: https://github.com/hashicorp/terraform/issues/8344

terraform depends_on for provisioner file

i want data "template_file" in below terraform code to execute after provisioner "file" (basically ansible playbook) is copied to the ec2 instance. I am not able to successfully use "depends_on" in this scenario. Can some one please help me how can i achieve this? below is the sample code snippet.
resource "aws_eip" "opendj-source-ami-eip" {
instance = "${aws_instance.opendj-source-ami-server.id}"
vpc = true
connection {
host = "${aws_eip.opendj-source-ami-eip.public_ip}"
user = "ubuntu"
timeout = "3m"
agent = false
private_key = "${file(var.private_key)}"
}
provisioner "file" {
source = "./${var.copy_password_file}"
destination = "/home/ubuntu/${var.copy_password_file}"
}
provisioner "file" {
source = "./${var.ansible_playbook}"
destination = "/home/ubuntu/${var.ansible_playbook}"
}
}
data "template_file" "run-ansible-playbooks" {
template = <<-EOF
#!/bin/bash
ansible-playbook /home/ubuntu/${var.copy_password_file} && ansible-playbook /home/ubuntu/${var.ansible_playbook}
EOF
#depends_on = ["<< not sure what to put here>>"]
}
The correct format for depends_on is pegged to the resource as a whole; so the format in your case would look like:
data "template_file" "run-ansible-playbooks" {
template = <<-EOF
#!/bin/bash
ansible-playbook /home/ubuntu/${var.copy_password_file} && ansible-playbook /home/ubuntu/${var.ansible_playbook}
EOF
depends_on = ["aws_eip.opendj-source-ami-eip"]
}

How to use terraform with environment variables in .tf file

I am new to Terraform and I ran into some issue when trying to use environment variables with .tf file, I tried to use terraform.tfvars / variables.tf.
./terraform apply -var-file="terraform.tfvars"
Failed to load root config module: Error parsing variables.tf: At 54:17: illegal char
What am I missing here?
Terraform Version: Terraform v0.9.2
main.tf:
provider "aws" {
access_key = "${var.aws_access_key}"
secret_key = "${var.aws_secret_key}"
region = "${var.aws_region}"
allowed_account_ids = ["${var.aws_account_id}"]
}
resource "aws_instance" "db" {
ami = "ami-49c9295"
instance_type = "t2.micro"
tags {
Name = "test"
}
connection {
user = "ubuntu"
}
security_groups = ["sg-ccc943b0"]
availability_zone = "${var.availability_zone}"
subnet_id = "${var.subnet_id}"
}
terraform.tfvars:
aws_profile = "default"
aws_access_key = "xxxxxx"
aws_secret_key = "xxxxxx"
aws_account_id = "xxxxxx"
key_name = "keyname"
key_path = "/home/user/.ssh/user.pem"
aws_region = "us-east-1"
subnet_id = "subnet-51997e7a"
vpc_security_group_ids = "mysql"
instance_type = "t2.xlarge"
availability_zone = "us-east-1a"
variables.tf:
variable "key_name" {
description = "Name of the SSH keypair to use in AWS."
default = "keypairname"
}
variable "key_path" {
description = "Path to the private portion of the SSH key specified."
default = "/home/user/.ssh/mypem.pem"
}
variable "aws_region" {
description = "AWS region to launch servers."
default = "us-east-1"
}
variable "aws_access_key" {
decscription = "AWS Access Key"
default = "xxxxxx"
}
variable "aws_secret_key" {
description = "AWS Secret Key"
default = "xxxxxx"
}
variable "aws_account_id" {
description = "AWS Account ID"
default = "xxxxxx"
}
variable "subnet_id" {
description = "Subnet ID to use in VPC"
default = "subnet-51997e7a"
}
variable "vpc_security_group_ids" {
description = "vpc_security_group_ids"
default = "sec"
}
variable "instance_type" {
description = "Instance type"
default = "t2.xlarge"
}
variable "instance_name" {
description = "Instance Name"
default = "test"
}
variable "availability_zone" {
description = "availability_zone"
default = "us-east-1a"
}
variable "aws_amis" {
default = {
"us-east-1": "ami-49c9295f",
"eu-west-1": "ami-49c9295f",
"us-west-1": "ami-49c9295f",
"us-west-2": "ami-49c9295f"
}
}
Update
After removing variable "aws_amis" section from variables.tf, I ran into another issue:
Failed to load root config module: Error loading variables.tf: 1 error(s) occurred:
* variable[aws_access_key]: invalid key: decscription
The aws_amis variable being used as a lookup map looks incorrectly formatted to me. Instead it should probably be of the format:
variable "aws_amis" {
default = {
us-east-1 = "ami-49c9295f"
eu-west-1 = "ami-49c9295f"
us-west-1 = "ami-49c9295f"
us-west-2 = "ami-49c9295f"
}
}
As an aside Terraform will look for a terraform.tfvars file by default so you can drop the -var-file="terraform.tfvars". You'll need to pass the -var-file option if you want to use a differently named file (such as prod.tfvars) but for this you can omit it.