I'm currently having an issue with my aws_s3_notification resource creation. Whenever I attempt to deploy this resource, I receive this error
Error putting S3 notification configuration: InvalidArgument: Unable to validate the following destination configurations
I've tried setting depends_on parameters and adjusting permissions. One interesting thing is in my main.tf file, I'm creating two lambda functions. Both are extremely similar (just vary by code). My "controller" configuration deploys with no issue but my "chunker" function seems to have an issue creating the s3_notification.. I have included both configs for comparison.
#S3
resource "aws_s3_bucket" "ancb" {
for_each = toset(var.ancb_bucket)
bucket = format("ancb-%s-%s-%s",var.env,var.product_name,each.value)
acl = "private"
versioning {
enabled = true
}
tags = {
Environment = var.env
Terraform = true
}
}
#Chunker
resource "aws_lambda_function" "ancb_chunker" {
function_name = format("ancb-chunker-%s-%s",var.env,var.product_name)
s3_bucket = aws_s3_bucket.ancb["config"].id
s3_key = var.lambda_zip_chunker
handler = "handler.chunk"
runtime = "nodejs8.10"
role = aws_iam_role.lambda_exec.arn
environment {
variables = {
ORIGINAL_BUCKET = aws_s3_bucket.ancb["original"].id
TO_PROCESS_BUCKET = aws_s3_bucket.ancb["to-process"].id
ENVIRONMENT = var.env
CHUNK_SIZE = 5000
}
}
tags = {
Environment = var.env
Terraform = true
}
depends_on = [
aws_s3_bucket_object.ancb["chunker.zip"],
aws_s3_bucket.ancb["chunker"]
]
}
resource "aws_lambda_permission" "ancb_chunker_s3" {
statement_id = "AllowExecutionFromS3Bucket-Chunker"
action = "lambda:InvokeFunction"
function_name = aws_lambda_function.ancb_controller.arn
principal = "s3.amazonaws.com"
source_arn = aws_s3_bucket.ancb["original"].arn
}
resource "aws_s3_bucket_notification" "chunker" {
bucket = aws_s3_bucket.ancb["original"].id
lambda_function {
lambda_function_arn = aws_lambda_function.ancb_chunker.arn
events = ["s3:ObjectCreated:*"]
}
depends_on = [
aws_lambda_permission.ancb_chunker_s3,
aws_lambda_function.ancb_chunker,
aws_s3_bucket.ancb["original"]
]
}
#Controller
resource "aws_lambda_function" "ancb_controller" {
function_name = format("ancb-controller-%s-%s",var.env,var.product_name)
s3_bucket = aws_s3_bucket.ancb["config"].id
s3_key = var.lambda_zip_controller
handler = "handler.controller"
runtime = "nodejs8.10"
role = aws_iam_role.lambda_exec.arn
environment {
variables = {
DESTINATION_BUCKET = aws_s3_bucket.ancb["destination"].id
ENVIRONMENT = var.env
ERROR_BUCKET = aws_s3_bucket.ancb["error"].id
GEOCODIO_APIKEY = <insert>
GEOCODIO_ENDPOINT = <insert>
GEOCODIO_VERSION = <insert>
ORIGINAL_BUCKET = aws_s3_bucket.ancb["original"].id
SOURCE_BUCKET = aws_s3_bucket.ancb["source"].id
TO_PROCESS_BUCKET = aws_s3_bucket.ancb["to-process"].id
WORKING_BUCKET = aws_s3_bucket.ancb["working"].id
}
}
tags = {
Environment = var.env
Terraform = true
}
depends_on = [
aws_s3_bucket_object.ancb["controller.zip"]
]
}
resource "aws_lambda_permission" "ancb_controller_s3" {
statement_id = "AllowExecutionFromS3Bucket-Controller"
action = "lambda:InvokeFunction"
function_name = aws_lambda_function.ancb_controller.arn
principal = "s3.amazonaws.com"
source_arn = aws_s3_bucket.ancb["source"].arn
}
resource "aws_s3_bucket_notification" "controller" {
bucket = aws_s3_bucket.ancb["source"].id
lambda_function {
lambda_function_arn = aws_lambda_function.ancb_controller.arn
events = ["s3:ObjectCreated:*"]
}
depends_on = [
aws_lambda_permission.ancb_controller_s3,
aws_s3_bucket.ancb["source"]
]
}
UPDATE: If I manually create the trigger and run terraform apply again, terraform is able to move forward with no problem....
Related
This tf code creates s3 that will host website. Index file has old API endpoint.
It creates s3 bucket, edits for static hosting, and uploads 4 s3 objects.
resource "aws_s3_bucket" "frontend_vote" {
bucket = "frontend-bucket-${var.vote}-${var.def_region}"
}
resource "aws_s3_bucket_policy" "frontend_vote_s3_bucket_policy" {
bucket = aws_s3_bucket.frontend_vote.id
policy = data.aws_iam_policy_document.frontend_vote_s3_bucket_policy.json
depends_on = [aws_s3_bucket.frontend_vote]
}
data "aws_iam_policy_document" "frontend_vote_s3_bucket_policy" {
statement {
sid = "PublicReadGetObject"
principals {
type = "*"
identifiers = ["*"]
}
actions = [
"s3:GetObject"
]
resources = ["${aws_s3_bucket.frontend_vote.arn}/*"]
}
statement {
actions = ["s3:GetObject"]
resources = ["${aws_s3_bucket.frontend_vote.arn}/*"]
principals {
type = "AWS"
identifiers = [aws_cloudfront_origin_access_identity.frontend_vote_cloudfront_oai.iam_arn]
}
}
}
resource "aws_cloudfront_origin_access_identity" "frontend_vote_cloudfront_oai" {
comment = "frontend_vote origin"
depends_on = [aws_s3_bucket.frontend_vote]
}
resource "aws_s3_bucket_ownership_controls" "frontend_vote-bucket-ownership" {
bucket = aws_s3_bucket.frontend_vote.id
rule {
object_ownership = "BucketOwnerEnforced"
}
depends_on = [aws_s3_bucket.frontend_vote]
}
resource "aws_s3_bucket_website_configuration" "frontend_vote-static" {
bucket = aws_s3_bucket.frontend_vote.bucket
index_document {
suffix = "index.html"
}
depends_on = [aws_s3_bucket.frontend_vote, aws_s3_object.index_file_vote]
}
resource "aws_s3_object" "index_file_vote" {
bucket = aws_s3_bucket.frontend_vote.id
key = "index.html"
source = "./vote/index.html"
depends_on = [aws_s3_bucket.frontend_vote]
}
resource "aws_s3_object" "myicon_vote" {
bucket = aws_s3_bucket.frontend_vote.id
key = "myicon.png"
source = "./vote/myicon.png"
depends_on = [aws_s3_bucket.frontend_vote]
}
resource "aws_s3_object" "stylecss_vote" {
bucket = aws_s3_bucket.frontend_vote.id
key = "style.css"
source = "./vote/style.css"
depends_on = [aws_s3_bucket.frontend_vote]
}
Then apigateway is created via tf, 2 routes, 2 integrations:
# ###########################################
# # api gateway
# ###########################################
resource "aws_apigatewayv2_api" "main_apigateway" {
name = var.apigateway_name
protocol_type = "HTTP"
cors_configuration {
allow_credentials = false
allow_headers = ["accept", "content-type"]
allow_methods = [
"GET",
"OPTIONS",
"POST",
]
allow_origins = [
# "*",
"https://${aws_cloudfront_distribution.cloudfront_result.domain_name}",
"https://${aws_cloudfront_distribution.cloudfront_vote.domain_name}"
]
expose_headers = []
max_age = 0
}
}
resource "aws_apigatewayv2_stage" "default" {
api_id = aws_apigatewayv2_api.main_apigateway.id
name = "$default"
auto_deploy = true
}
# ###########################################
# # VOTE lambda backend integration
# ###########################################
resource "aws_apigatewayv2_integration" "vote_integration" {
api_id = aws_apigatewayv2_api.main_apigateway.id
# integration_uri = aws_lambda_function.vote_lambda_backend.invoke_arn
integration_uri = aws_lambda_function.vote_lambda_backend.arn
integration_type = "AWS_PROXY"
payload_format_version = "2.0"
}
resource "aws_apigatewayv2_route" "vote_route" {
api_id = aws_apigatewayv2_api.main_apigateway.id
route_key = "POST /voting"
target = "integrations/${aws_apigatewayv2_integration.vote_integration.id}"
}
# resource "aws_iam_role_policy_attachment" "vote_policy_basic_execution_attachment" {
# role = aws_iam_role.vote_lambda_iam_role.name
# policy_arn = "arn:aws:iam:aws:policy/service-role/AWSLambdaBasicExecutionRole"
# }
resource "aws_lambda_permission" "vote_permission" {
statement_id = "AllowExecutionFromAPIGateway"
action = "lambda:InvokeFunction"
function_name = aws_lambda_function.vote_lambda_backend.function_name
principal = "apigateway.amazonaws.com"
source_arn = "${aws_apigatewayv2_api.main_apigateway.execution_arn}/*/*/voting"
}
# ###########################################
# # RESULT lambda backend integration
# ###########################################
resource "aws_apigatewayv2_integration" "result_integration" {
api_id = aws_apigatewayv2_api.main_apigateway.id
# integration_uri = aws_lambda_function.result_lambda_backend.invoke_arn
integration_uri = aws_lambda_function.result_lambda_backend.arn
integration_type = "AWS_PROXY"
payload_format_version = "2.0"
}
resource "aws_apigatewayv2_route" "result_route" {
api_id = aws_apigatewayv2_api.main_apigateway.id
route_key = "GET /results"
target = "integrations/${aws_apigatewayv2_integration.result_integration.id}"
}
resource "aws_lambda_permission" "result_permission" {
statement_id = "AllowExecutionFromAPIGateway"
action = "lambda:InvokeFunction"
function_name = aws_lambda_function.result_lambda_backend.function_name
principal = "apigateway.amazonaws.com"
source_arn = "${aws_apigatewayv2_api.main_apigateway.execution_arn}/*/*/results"
}
In that new API endpoint I want to put it in my index.html. How to replace old API with newly created one? Via bash?
Index file:
<!DOCTYPE html>
<html>
< SOME CODE>
<script>
var backend_url = "https://5y7dfynd34.execute-api.us-east-1.amazonaws.com/voting" #this backend is old
SOME OTHER CODE
</script>
</body>
</html>
I don't think using environment variables like aws_lambda in terraform will work here.
You can pass variable to template file.
Lets convert your index.html to index.html.tpl on vote directory.
index.html.tpl
<!DOCTYPE html>
<html>
<SOME CODE>
<script>
var backend_url = ${backend_api_url}
SOME OTHER CODE
</script>
</body>
</html>
Then you can pass variable to template file with templatefile function.
You just need to change from source to content in your resource.
resource "aws_s3_object" "index_file_vote" {
bucket = aws_s3_bucket.frontend_vote.id
key = "index.html"
content = templatefile("./vote/index.html.tpl", {
backend_api_url = var.your_backend_api_url
})
depends_on = [aws_s3_bucket.frontend_vote]
}
I have created two lambda functions. now i wanted to pass all the cloudwatch logs from first lambda to second lambda. i have created new log group name and subscription filter to pass the cloudwatch logs to second lambda.
I am not sure if this configuration needs to be added any resource.
resource "aws_lambda_function" "audit-logs" {
filename = var.audit_filename
function_name = var.audit_function
source_code_hash = filebase64sha256(var.audit_filename)
role = module.lambda_role.arn
handler = "cloudwatch.lambda_handler"
runtime = "python3.9"
timeout = 200
description = "audit logs"
depends_on = [module.lambda_role, module.security_group]
}
resource "aws_cloudwatch_log_group" "splunk_cloudwatch_loggroup" {
name = "/aws/lambda/audit_logs"
}
resource "aws_lambda_permission" "allow_cloudwatch_for_splunk" {
statement_id = "AllowExecutionFromCloudWatch"
action = "lambda:InvokeFunction"
function_name = aws_lambda_function.splunk-logs.arn
principal = "logs.amazonaws.com" #"logs.region.amazonaws.com"
source_arn = "${aws_cloudwatch_log_group.splunk_cloudwatch_loggroup.arn}:*"
}
resource "aws_cloudwatch_log_subscription_filter" "splunk_cloudwatch_trigger" {
depends_on = [aws_lambda_permission.allow_cloudwatch_for_splunk]
destination_arn = aws_lambda_function.splunk-logs.arn
filter_pattern = ""
log_group_name = aws_cloudwatch_log_group.splunk_cloudwatch_loggroup.name
name = "splunk_filter"
}
# splunk logs lambda function
resource "aws_lambda_function" "splunk-logs" {
filename = var.splunk_filename
function_name = var.splunk_function
source_code_hash = filebase64sha256(var.splunk_filename)
role = module.lambda_role.arn
handler = "${var.splunk_handler}.handler"
runtime = "python3.9"
timeout = 200
description = "audit logs"
depends_on = [module.lambda_role, module.security_group]
}
how can i pass all the logs from first lambda to newly created log group ? any help ?
I want to create two Amazon SNS topics with the same aws_iam_policy_document, aws_sns_topic_policy & time_sleep configs.
This is my terraform, my_sns_topic.tf:
resource "aws_sns_topic" "topic_a" {
name = "topic-a"
}
resource "aws_sns_topic" "topic_b" {
name = "topic-b"
}
data "aws_iam_policy_document" "topic_notification" {
version = "2008-10-17"
statement {
sid = "__default_statement_ID"
actions = [
"SNS:Publish"
]
# Cut off some lines for simplification.
## NEW LINE ADDED
statement {
sid = "allow_snowflake_subscription"
principals {
type = "AWS"
identifiers = [var.storage_aws_iam_user_arn]
}
actions = ["SNS:Subscribe"]
resources = [aws_sns_topic.topic_a.arn] # Troubles with this line
}
}
resource "aws_sns_topic_policy" "topic_policy_notification" {
arn = aws_sns_topic.topic_a.arn
policy = data.aws_iam_policy_document.topic_policy_notification.json
}
resource "time_sleep" "topic_wait_10s" {
depends_on = [aws_sns_topic.topic_a]
create_duration = "10s"
}
As you can see here, I set up the configuration only for topic-a. I want to loop this over to apply for topic-b as well.
It would be better to use map and for_each, instead of separately creating "a" and "b" topics:
variable "topics" {
default = ["a", "b"]
}
resource "aws_sns_topic" "topic" {
for_each = toset(var.topics)
name = "topic-${each.key}"
}
data "aws_iam_policy_document" "topic_notification" {
version = "2008-10-17"
statement {
sid = "__default_statement_ID"
actions = [
"SNS:Publish"
]
# Cut off some lines for simplification.
}
resource "aws_sns_topic_policy" "topic_policy_notification" {
for_each = toset(var.topics)
arn = aws_sns_topic.topic[each.key].arn
policy = data.aws_iam_policy_document.topic_policy_notification.json
}
resource "time_sleep" "topic_wait_10s" {
for_each = toset(var.topics)
depends_on = [aws_sns_topic.topic[each.key]]
create_duration = "10s"
}
I'm using Terraform to create a Cognito User pool. I'd like to use a lambda function for sending a custom message when a user signs up. When I run attempt to sign up on the client, I get an error saying that "CustomMessage invocation failed due to error AccessDeniedException." I've used Lambda Permissions before, but I can't find any examples of this configuration. How do I give the lambda function permission? The following is my current configuration.
resource "aws_cognito_user_pool" "main" {
name = "${var.user_pool_name}_${var.stage}"
username_attributes = [ "email" ]
schema {
attribute_data_type = "String"
mutable = true
name = "name"
required = true
}
schema {
attribute_data_type = "String"
mutable = true
name = "email"
required = true
}
password_policy {
minimum_length = "8"
require_lowercase = true
require_numbers = true
require_symbols = true
require_uppercase = true
}
mfa_configuration = "OFF"
lambda_config {
custom_message = aws_lambda_function.custom_message.arn
post_confirmation = aws_lambda_function.post_confirmation.arn
}
}
...
resource "aws_lambda_permission" "get_blog" {
statement_id = "AllowExecutionFromCognito"
action = "lambda:InvokeFunction"
function_name = aws_lambda_function.custom_message.function_name
principal = "cognito-idp.amazonaws.com"
source_arn = "${aws_cognito_user_pool.main.arn}/*/*"
depends_on = [ aws_lambda_function.custom_message ]
}
...
resource "aws_lambda_function" "custom_message" {
filename = "${var.custom_message_path}/${var.custom_message_file_name}.zip"
function_name = var.custom_message_file_name
role = aws_iam_role.custom_message.arn
handler = "${var.custom_message_file_name}.handler"
source_code_hash = filebase64sha256("${var.custom_message_path}/${var.custom_message_file_name}.zip")
runtime = "nodejs12.x"
timeout = 10
layers = [ var.node_layer_arn ]
environment {
variables = {
TABLE_NAME = var.table_name
RESOURCENAME = "blogAuthCustomMessage"
REGION = "us-west-2"
}
}
tags = {
Name = var.developer
}
depends_on = [
data.archive_file.custom_message,
]
}
Based on OP's feedback in the comment section, changing source_arn property in the aws_lambda_permission.get_blog to aws_cognito_user_pool.main.arn works.
I am using terraform to create aws infrastructure with 4 regional api gateways with corresponding dynamodb in that region.
I want to create one module consisting of ( API + dynamo ) with configurable region specific values. Is it possible with terraform? Or I would have to create 4 separate API + 4 separate dynamodb resources.
Any links or documentation would be helpful as well.
Currently working for regional API gateway and corresponding dynamodb.
variable "access_key" {}
variable "secret_key" {}
provider "aws" {
access_key = "${var.access_key}"
secret_key = "${var.secret_key}"
alias = "us-east-1"
region = "us-east-1"
}
provider "aws" {
access_key = "${var.access_key}"
secret_key = "${var.secret_key}"
alias = "us-west-2"
region = "us-west-2"
}
resource "aws_dynamodb_table" "us-east-1" {
provider = "aws.us-east-1"
hash_key = "test_tf"
name = "test_tf"
stream_enabled = true
stream_view_type = "NEW_AND_OLD_IMAGES"
read_capacity = 1
write_capacity = 1
attribute {
name = "test_tf"
type = "S"
}
}
resource "aws_dynamodb_table" "us-west-2" {
provider = "aws.us-west-2"
hash_key = "test_tf"
name = "test_tf"
stream_enabled = true
stream_view_type = "NEW_AND_OLD_IMAGES"
read_capacity = 1
write_capacity = 1
attribute {
name = "test_tf"
type = "S"
}
}
resource "aws_dynamodb_global_table" "test_tf" {
depends_on = ["aws_dynamodb_table.us-east-1", "aws_dynamodb_table.us-west-2"]
provider = "aws.us-east-1"
name = "test_tf"
replica {
region_name = "us-east-1"
}
replica {
region_name = "us-west-2"
}
}
resource "aws_api_gateway_rest_api" "test-us-east-1" {
name = "test-us-east-1"
endpoint_configuration {
types = ["REGIONAL"]
}
}
resource "aws_api_gateway_resource" "sample_test" {
rest_api_id = "${aws_api_gateway_rest_api.test-us-east-1.id}"
parent_id = "${aws_api_gateway_rest_api.test-us-east-1.root_resource_id}"
path_part = "{testid}"
}
resource "aws_api_gateway_method" "sample_get" {
rest_api_id = "${aws_api_gateway_rest_api.test-us-east-1.id}"
resource_id = "${aws_api_gateway_resource.sample_test.id}"
http_method = "GET"
authorization = "NONE"
}
resource "aws_api_gateway_deployment" "Deployment" {
depends_on = ["aws_api_gateway_method.sample_get"]
rest_api_id = "${aws_api_gateway_rest_api.test-us-east-1.id}"
stage_name = "test"
}
resource "aws_api_gateway_integration" "test" {
rest_api_id = "${aws_api_gateway_rest_api.test-us-east-1.id}"
resource_id = "${aws_api_gateway_resource.sample_test.id}"
http_method = "${aws_api_gateway_method.sample_get.http_method}"
integration_http_method = "POST"
type = "AWS"
uri = "arn:aws:apigateway:us-east-1:dynamodb:action/GetItem"
credentials = "${aws_iam_role.apiGatewayDynamoDbAccessRole.arn}"
passthrough_behavior = "WHEN_NO_TEMPLATES"
request_templates = {
"application/json" = <<EOF
{
"TableName": "test_tf",
"Key":
{
"test_tf":
{
"S": "$input.params('testid')"
}
}
}
EOF
}
}
resource "aws_iam_policy" "api_dbaccess_policy" {
name = "api_dbaccess_policy"
policy = "${file("api-dynamodb-policy.json")}"
depends_on = [
"aws_dynamodb_table.us-east-1"
]
}
resource "aws_iam_role" "apiGatewayDynamoDbAccessRole" {
name = "apiGatewayDynamoDbAccessRole"
assume_role_policy = "${file("assume-role-policy.json")}"
depends_on = [
"aws_dynamodb_table.us-east-1"
]
}
resource "aws_iam_policy_attachment" "api-dbaccess-policy-attach" {
name = "api-dbaccess-policy-attachment"
roles = ["${aws_iam_role.apiGatewayDynamoDbAccessRole.name}"]
policy_arn = "${aws_iam_policy.api_dbaccess_policy.arn}"
}
resource "aws_api_gateway_method_response" "200" {
rest_api_id = "${aws_api_gateway_rest_api.test-us-east-1.id}"
resource_id = "${aws_api_gateway_resource.sample_test.id}"
http_method = "${aws_api_gateway_method.sample_get.http_method}"
status_code = "200"
}
resource "aws_api_gateway_integration_response" "us-east-1-response" {
rest_api_id = "${aws_api_gateway_rest_api.test-us-east-1.id}"
resource_id = "${aws_api_gateway_resource.sample_test.id}"
http_method = "${aws_api_gateway_method.sample_get.http_method}"
status_code = "${aws_api_gateway_method_response.200.status_code}"
response_templates = {
"application/json" = <<EOF
{
#set($sampletest = $input.path('Item.test_tf.S'))
"test": #if ($sampletest && $sampletest != '')
true
#else
false
#end
}
EOF
}
}
Yes, this is possible with Terraform.
In the root module you define 4 AWS providers, giving alias to each one:
provider "aws" {
alias = "oregon"
region = "us-west-2"
}
provider "aws" {
alias = "virginia"
region = "us-east-1"
}
Then, when you instantiate your modules, instead of relying on provider inheritance you pass the provider explicitly by alias:
module "api_gateway" {
source = "./api_gateway"
providers = {
aws = "aws.oregon"
}
}
Rinse and repeat 4 times for each region.
You can find the docs here: https://www.terraform.io/docs/modules/usage.html