The below code works when i create the resources but i would like to tie each SQS created with a different s3 bucket.for example.I want CloudTrail_SQS_Management_Event/CloudTrail_DLQ_Management_Event to use a bucket called "management_sqs_bucket and CloudTrail_SQS_Data_Event/CloudTrail_DLQ_Data_Event to use bucket called "data_sqs_bucket and for the bucket names to reflect accordingly on the queue policies.
SQS/variables.tf
variable "sqs_queue_name"{
description = "The name of different SQS to be created"
type = string
}
variable "dead_queue_name"{
description = "The name of different Dead Queues to be created"
type = string
}
variable "max_receive_count" {
type = number
}
SQS/iam.tf
data "aws_iam_policy_document" "policy_document"{
statement{
actions = [
"sqs:DeleteMessage",
"sqs:GetQueueUrl",
"sqs:ReceiveMessage",
"sqs:SendMessage",
"sqs:SetQueueAttributes"
]
effect = "Allow"
resources = values(aws_sqs_queue.sqs)[*].arn
}
resource "aws_sqs_queue_policy" "Cloudtrail_SQS_Policy" {
queue_url = aws_sqs_queue.CloudTrail_SQS.id
policy = <<POLICY
{
"Version": "2012-10-17",
"Id": "sqspolicy",
"Statement": [
{
"Sid": "AllowSQSInvocation",
"Effect": "Allow",
"Principal": {"AWS":"*"},
"Action": "sqs:*",
"Resource": "${aws_sqs_queue.CloudTrail_SQS.arn}",
"Condition": {
"ArnEquals": {
"aws:SourceArn": "arn:aws:s3:::${var.cloudtrail_event_log_bucket_name}"
}
}
}
]
}
POLICY
}
resource "aws_sqs_queue_policy" "CloudTrail_SQS_DLQ"{
queue_url = aws_sqs_queue.CloudTrail_SQS_DLQ.id
policy = <<POLICY
{
"Version": "2012-10-17",
"Id": "sqspolicy",
"Statement": [
{
"Sid": "DLQ Policy",
"Effect": "Allow",
"Principal": {"AWS":"*"},
"Action": "sqs:*",
"Resource": "${aws_sqs_queue.CloudTrail_SQS_DLQ.arn}",
"Condition": {
"ArnEquals": {
"aws:SourceArn": "arn:aws:s3:::${var.cloudtrail_event_log_bucket_name}"
}
}
}
]
}
POLICY
}
SQS/main.tf
resource "aws_sqs_queue" "sqs" {
name = var.sqs_queue_name
redrive_policy = jsonencode({
deadLetterTargetArn = aws_sqs_queue.dlq.arn
maxReceiveCount = var.max_receive_count
})
}
resource "aws_sqs_queue" "dlq" {
name = var.dead_queue_name
}
SQS/output.tf
output "sqs_queue_id"{
value = values(aws_sqs_queue.sqs)[*].id
description = "The URL for the created Amazon SQS queue."
}
output "sqs_queue_arn" {
value = values(aws_sqs_queue.sqs)[*].arn
description = "The ARN of the SQS queue."
}
variable.tf
variable "queue_names" {
default = [
{
sqs_name = "CloudTrail_SQS_Management_Event"
dlq_name = "CloudTrail_DLQ_Management_Event"
},
{
sqs_name = "CloudTrail_SQS_Data_Event"
dlq_name = "CloudTrail_DLQ_Data_Event"
}
]
}
module "sqs_queue" {
source = "../SQS"
for_each = {
for sqs, dlq in var.queue_names : sqs => dlq
}
sqs_queue_name = each.value.sqs_name
dead_queue_name = each.value.dlq_name
max_receive_count = var.max_receive_count
}
From what I understand I believe this is what you would want to do:
variables.tf:
variable "queue_names" {
default = [
{
sqs_name = "CloudTrail_SQS_Management_Event"
dlq_name = "CloudTrail_DLQ_Management_Event"
bucket_name = "management_sqs_bucket"
},
{
sqs_name = "CloudTrail_SQS_Data_Event"
dlq_name = "CloudTrail_DLQ_Data_Event"
bucket_name = "data_sqs_bucket"
}
]
}
main.tf:
module "my_sqs" {
source = "./my_sqs"
for_each = {
for q in var.queue_names : q.sqs_name => q
}
sqs_queue_name = each.value.sqs_name
dead_queue_name = each.value.dlq_name
max_receive_count = 4
cloudtrail_event_log_bucket_name = each.value.bucket_name
}
Also, I see that some code duplication for aws_sqs_queue_policy because you have both the SQS queue and the DLQ. This can be refactored to something like this:
iam.tf:
data "aws_iam_policy_document" "policy_document" {
statement {
actions = [
"sqs:DeleteMessage",
"sqs:GetQueueUrl",
"sqs:ReceiveMessage",
"sqs:SendMessage",
"sqs:SetQueueAttributes"
]
effect = "Allow"
resources = aws_sqs_queue.sqs[*].arn
}
}
locals {
queue_data = [
{
id = aws_sqs_queue.sqs.id
arn = aws_sqs_queue.sqs.arn
},
{
id = aws_sqs_queue.dlq.id
arn = aws_sqs_queue.dlq.arn
}
]
}
resource "aws_sqs_queue_policy" "sqs_policy" {
# This can be achieved with for_each similar to what we have in main.tf, but I did not want to complicate it
count = length(aws_sqs_queue.sqs[*])
queue_url = local.queue_data[count.index].id
policy = <<POLICY
{
"Version": "2012-10-17",
"Id": "sqspolicy",
"Statement": [
{
"Sid": "AllowSQSInvocation",
"Effect": "Allow",
"Principal": {"AWS":"*"},
"Action": "sqs:*",
"Resource": "${local.queue_data[count.index].arn}",
"Condition": {
"ArnEquals": {
"aws:SourceArn": "arn:aws:s3:::${var.cloudtrail_event_log_bucket_name}"
}
}
}
]
}
POLICY
}
Related
I have created lambda function with s3 bucket required access and i am trying to create s3 events trigger but i am getting access denied error.
lambda.tf
resource "aws_lambda_function" "s3-lambdas" {
filename = "./s3-lambdas.zip"
function_name = "s3-lambdas"
source_code_hash = filebase64sha256(s3-lambdas)
role = module.lambda_role.arn
handler = "s3-lambdas.lambda_handler"
runtime = "python3.9"
timeout = 200
description = "invoke glue job"
depends_on = [module.lambda_role]
}
resource "aws_lambda_permission" "s3_lambdas_s3_events" {
depends_on = [aws_lambda_function.s3-lambdas]
statement_id = "AllowS3Invoke"
action = "lambda:InvokeFunction"
function_name = aws_lambda_function.s3-lambdas.function_name
principal = "s3.amazonaws.com"
source_arn = "arn:aws:s3:::${module.bucket-name.name}"
}
resource "aws_s3_bucket_notification" "bucket_notifications" {
bucket = module.bucket-name.name
lambda_function {
lambda_function_arn = aws_lambda_function.s3-lambdas.arn
events = ["s3:ObjectCreated:*"]
filter_prefix = "abc/def/"
}
depends_on = [aws_lambda_permission.s3_lambdas_s3_events]
}
and my lambda role which contains s3 full access.
s3.tf
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"s3:*"
],
"Resource": [
"arn:aws:s3:::dev-s3-bucket",
"arn:aws:s3:::dev-s3-bucket/*"
]
}
]
}
even though i am getting An error occurred while listing S3 relations: Access Denied when i tried to create s3 triggers.
Edited
output "bucket_name" {
value = module.bucket-name.name
}
bucket_name = dev-s3-bucket
output "iam_dev_arn" {
value = module.lambda_role.arn
}
iam_dev_arn = arn:aws:iam::0123456789:role/s3-lambda-role
output "div_arn" {
value = aws_lambda_function.s3-lambdas.arn
}
div_arn = arn:aws:lambda:us-east-1:0123456789:function:s3-lambdas
assume_role_policy
{
"Version": "2008-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Service": "lambda.amazonaws.com"
},
"Action": "sts:AssumeRole"
}
]
}
i have added the outputs related to module
s3-lambdas
from __future__ import print_function
import boto3
import urllib
import os
print ('Loading function')
glue = boto3.client('glue')
def lambda_handler(event, context):
gluejobname = os.environ['glue_job']
try:
runId = glue.start_job_run(JobName=gluejobname)
status = glue.get_job_run(JobName=gluejobname, RunId=runId['JobRunId'])
print("Job Status : ", status['JobRun']['JobRunState'], "runId",runId)
except Exception as e:
raise e
return {
"statusCode": 200,
"body": os.environ['glue_job'] + " Job started"
}
s3 bucket policy:
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "denyInsecureTransport",
"Effect": "Deny",
"Principal": "*",
"Action": "s3:*",
"Resource": [
"arn:aws:s3:::dev-s3-bucket/*",
"arn:aws:s3:::dev-s3-bucket"
],
"Condition": {
"Bool": {
"aws:SecureTransport": "false"
}
}
}
]
}
In your S3 bucket policy can you make the following change and retry:
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"s3:*"
],
"Resource": [
"arn:aws:s3:::dev-s3-bucket/abc/def/*" <-- change this
]
}
]
}
I have Terraform code that almost successfully builds an AWS Batch Compute Environment with an Fsx file share mount to it.
However, despite passing the aws_fsx_lustre_file_system module a deployment type of PERSISTENT_2:
resource "aws_fsx_lustre_file_system" "storage" {
storage_capacity = 1200
subnet_ids = [var.subnet_id]
deployment_type = "PERSISTENT_2"
per_unit_storage_throughput = 250
}
the Fsx is only spun up at a scratch drive (viewable via AWS management console).
What additional information can I post here to help debug why this Terraform code is not respecting the deployment_type parameter?
Full code:
// ==========================================================
// Module input variables
// ----------------------------------------------------------
variable "region" {
type = string
}
variable "compute_environment_name" {
type = string
}
variable "job_queue_name" {
type = string
}
variable "max_vcpus" {
type = number
}
variable "vpc_id" {
type = string
}
variable "subnet_id" {
type = string
}
variable "security_group_id" {
type = string
}
variable "mounted_storage_bucket" {
type = string
}
// ==========================================================
// Components for batch processing for AWS Batch
// ----------------------------------------------------------
resource "aws_iam_role" "batch_role" {
name = "batch_role"
assume_role_policy = <<EOF
{
"Version": "2012-10-17",
"Statement":
[
{
"Action": "sts:AssumeRole",
"Effect": "Allow",
"Principal": {
"Service": "batch.amazonaws.com"
}
},
{
"Action": "sts:AssumeRole",
"Effect": "Allow",
"Principal": {
"Service": "ec2.amazonaws.com"
}
},
{
"Action": "sts:AssumeRole",
"Effect": "Allow",
"Principal": {
"Service": "ecs.amazonaws.com"
}
},
{
"Action": "sts:AssumeRole",
"Effect": "Allow",
"Principal": {
"Service": "ecs-tasks.amazonaws.com"
}
}
]
}
EOF
tags = {
created-by = "Terraform"
}
}
# Attach the Batch policy to the Batch role
resource "aws_iam_role_policy_attachment" "batch_service_role" {
role = aws_iam_role.batch_role.name
policy_arn = "arn:aws:iam::aws:policy/service-role/AWSBatchServiceRole"
}
resource "aws_iam_role_policy_attachment" "elastic_container_service_role" {
role = aws_iam_role.batch_role.name
policy_arn = "arn:aws:iam::aws:policy/service-role/AmazonEC2ContainerServiceforEC2Role"
}
# Security Group for batch processing
resource "aws_security_group" "batch_security_group" {
name = "batch_security_group"
description = "AWS Batch Security Group for batch jobs"
vpc_id = var.vpc_id
egress {
from_port = 0
to_port = 65535
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
tags = {
created-by = "Terraform"
}
}
# IAM Role for underlying EC2 instances
resource "aws_iam_role" "ec2_role" {
name = "ec2_role"
assume_role_policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Action": "sts:AssumeRole",
"Principal": {
"Service": "ec2.amazonaws.com"
},
"Effect": "Allow",
"Sid": ""
}
]
}
EOF
tags = {
created-by = "Terraform"
}
}
# Assign the EC2 role to the EC2 profile
resource "aws_iam_instance_profile" "ec2_profile" {
name = "ec2_profile"
role = aws_iam_role.ec2_role.name
}
# Attach the EC2 container service policy to the EC2 role
resource "aws_iam_role_policy_attachment" "ec2_policy_attachment" {
role = aws_iam_role.ec2_role.name
policy_arn = "arn:aws:iam::aws:policy/service-role/AmazonEC2ContainerServiceforEC2Role"
}
# IAM Role for jobs
resource "aws_iam_role" "job_role" {
name = "job_role"
assume_role_policy = <<EOF
{
"Version": "2012-10-17",
"Statement":
[
{
"Action": "sts:AssumeRole",
"Effect": "Allow",
"Principal": {
"Service": "ecs-tasks.amazonaws.com"
}
}
]
}
EOF
tags = {
created-by = "Terraform"
}
}
# S3 read/write policy
resource "aws_iam_policy" "s3_policy" {
name = "s3_policy"
policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"s3:Get*",
"s3:List*",
"s3:Put*"
],
"Resource": [
"arn:aws:s3:::${var.mounted_storage_bucket}",
"arn:aws:s3:::${var.mounted_storage_bucket}/*"
]
}
]
}
EOF
}
resource "aws_iam_policy" "ecs_policy" {
name = "ecs_policy"
policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"ecs:*"
],
"Resource": [
"*"
]
}
]
}
EOF
}
# Attach the policy to the job role
resource "aws_iam_role_policy_attachment" "job_policy_attachment_s3" {
role = aws_iam_role.job_role.name
policy_arn = aws_iam_policy.s3_policy.arn
}
resource "aws_iam_role_policy_attachment" "job_policy_attachment_ecs" {
role = aws_iam_role.job_role.name
policy_arn = aws_iam_policy.ecs_policy.arn
}
resource "aws_fsx_lustre_file_system" "storage" {
storage_capacity = 1200
subnet_ids = [var.subnet_id]
deployment_type = "PERSISTENT_2"
per_unit_storage_throughput = 250
}
resource "aws_fsx_data_repository_association" "storage_association" {
file_system_id = aws_fsx_lustre_file_system.storage.id
data_repository_path = "s3://${var.mounted_storage_bucket}"
file_system_path = "/data/fsx"
s3 {
auto_export_policy {
events = ["NEW", "CHANGED", "DELETED"]
}
auto_import_policy {
events = ["NEW", "CHANGED", "DELETED"]
}
}
}
resource "aws_launch_template" "launch_template" {
name = "launch_template"
update_default_version = true
user_data = base64encode(<<EOF
MIME-Version: 1.0
Content-Type: multipart/mixed; boundary="==MYBOUNDARY=="
--==MYBOUNDARY==
Content-Type: text/cloud-config; charset="us-ascii"
runcmd:
- region=${var.region}
- amazon-linux-extras install -y lustre2.10
- mkdir -p /data/fsx
- mount -t lustre ${aws_fsx_lustre_file_system.storage.dns_name}#tcp:fsx" /data/fsx
--==MYBOUNDARY==--
EOF
)
}
// ==========================================================
// Batch setup
// - compute environment
// - job queue
// ----------------------------------------------------------
resource "aws_iam_role" "ecs_instance_role" {
name = "ecs_instance_role"
assume_role_policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Action": "sts:AssumeRole",
"Effect": "Allow",
"Principal": {
"Service": "ec2.amazonaws.com"
}
}
]
}
EOF
}
resource "aws_iam_role_policy_attachment" "ecs_instance_role" {
role = "${aws_iam_role.ecs_instance_role.name}"
policy_arn = "arn:aws:iam::aws:policy/service-role/AmazonEC2ContainerServiceforEC2Role"
}
resource "aws_iam_instance_profile" "ecs_instance_role" {
name = "ecs_instance_role"
role = "${aws_iam_role.ecs_instance_role.name}"
}
resource "aws_iam_role" "aws_batch_service_role" {
name = "aws_batch_service_role"
assume_role_policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Action": "sts:AssumeRole",
"Effect": "Allow",
"Principal": {
"Service": "batch.amazonaws.com"
}
}
]
}
EOF
}
resource "aws_iam_role_policy_attachment" "aws_batch_service_role" {
role = "${aws_iam_role.aws_batch_service_role.name}"
policy_arn = "arn:aws:iam::aws:policy/service-role/AWSBatchServiceRole"
}
resource "aws_batch_compute_environment" "batch_environment" {
compute_environment_name = var.compute_environment_name
compute_resources {
instance_role = "${aws_iam_instance_profile.ecs_instance_role.arn}"
launch_template {
launch_template_name = aws_launch_template.launch_template.name
version = "$Latest"
}
instance_type = [
"c6g.large",
"c6g.xlarge",
"c6g.2xlarge",
"c6g.4xlarge",
"c6g.8xlarge",
"c6g.12xlarge"
]
max_vcpus = 16
min_vcpus = 0
security_group_ids = [
aws_security_group.batch_security_group.id,
]
subnets = [
var.subnet_id
]
type = "EC2"
}
service_role = "${aws_iam_role.aws_batch_service_role.arn}"
type = "MANAGED"
depends_on = [aws_iam_role_policy_attachment.aws_batch_service_role]
tags = {
created-by = "Terraform"
}
}
resource "aws_batch_job_queue" "job_queue" {
name = "job_queue"
state = "ENABLED"
priority = 1
compute_environments = [
aws_batch_compute_environment.batch_environment.arn
]
depends_on = [aws_batch_compute_environment.batch_environment]
tags = {
created-by = "Terraform"
}
}
output "batch_compute_environment_id" {
value = aws_batch_compute_environment.batch_environment.id
}
output "batch_job_queue_id" {
value = aws_batch_job_queue.job_queue.id
}
output "batch_storage_mount_target" {
value = aws_fsx_lustre_file_system.storage.arn
}
output "batch_storage_mount_target_mount" {
value = aws_fsx_lustre_file_system.storage.mount_name
}
When trying to use the eventbridge input_transformer i do not receive the transformed object, but instead i get the original object sent directly to SQS.
I currently have the following setup running
locals {
rule-arn = "arn:aws:events:${data.aws_arn.event-rule.region}:${data.aws_arn.event-rule.account}:rule/${aws_cloudwatch_event_rule.notification.name}"
}
resource "aws_sqs_queue" "test-queue" {
name = "test-queue"
}
resource "aws_cloudwatch_event_rule" "notification" {
name = "test-notification"
event_bus_name = aws_cloudwatch_event_bus.events.name
description = "Listens to all events in the TEST.Notification namespace"
event_pattern = jsonencode({
source = [{ "prefix" : "TEST.Notification" }],
})
}
resource "aws_cloudwatch_event_target" "developer-notification" {
rule = aws_cloudwatch_event_rule.notification.name
target_id = "SendToSQS"
arn = aws_sqs_queue.test-queue.arn
input_transformer {
input_paths = {
"detailType" = "$.detail-type",
}
input_template = jsonencode(
{
"detailType" : "<detailType>"
}
)
}
}
resource "aws_sqs_queue_policy" "test-queue" {
queue_url = aws_sqs_queue.test-queue.id
policy = <<POLICY
{
"Version": "2012-10-17",
"Id": "",
"Statement": [
{
"Sid": "Allow EventBridge to SQS",
"Effect": "Allow",
"Principal": {
"Service": "events.amazonaws.com"
},
"Action": "*",
"Resource": "${aws_sqs_queue.test-queue.arn}",
"Condition": {
"ArnEquals": {
"aws:SourceArn": "${aws_cloudwatch_event_rule.notification.arn}"
}
}
}
]
}
POLICY
}
I am running Terraform version:
Terraform v1.2.3
on darwin_arm64
I have seen some talk about having to do stuff like
"\"<detailType>\""
in order to have it work, but i've had no luck with that either, so for brevity/readability i've removed all the weird tricks i've seen people use. My thinking is there's something more basic i am missing here.
Does someone know what i am doing wrong?
I am creating policy based on tags. The environment value differs for each resource, so I have to add them as multiple values. Unable to pass as list. Tried join,split and for loop. None of them works. Pls help. Below code just add the value as "beta,test" which will not work as expected
main.tf
locals{
workspaceValues = terraform.workspace == "dev" ? ["alpha", "dev"] : terraform.workspace == "test" ? ["beta", "test"] : ["prod", "staging"]
}
resource "aws_iam_group_policy" "inline_policy" {
name = "${terraform.workspace}_policy"
group = aws_iam_group.backend_admin.name
policy = templatefile("policy.tpl", { env = join(",", local.workspaceValues), region = "${data.aws_region.current.name}", account_id = "${data.aws_caller_identity.current.account_id}" })
}
policy.tpl:
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": "ec2:*",
"Resource": "*",
"Condition": {
"ForAllValues:StringLikeIfExists": {
"aws:ResourceTag/Environment": "${env}"
}
}
}
]
}
You can use jsonencode to properly format TF list as a list in json in your template:
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": "ec2:*",
"Resource": "*",
"Condition": {
"ForAllValues:StringLikeIfExists": {
"aws:ResourceTag/Environment": ${jsonencode(env)}
}
}
}
]
}
For that you would call the template as follows:
resource "aws_iam_group_policy" "inline_policy" {
name = "${terraform.workspace}_policy"
group = aws_iam_group.backend_admin.name
policy = templatefile("policy.tpl", {
env = local.workspaceValues
})
}
You are not using region nor account_id in your template, so there is no reason to pass them in.
An alternative solution to avoid this all together is to recreate the policy using the data component "aws_iam_policy_document" and then pass it to the aws_iam_policy resource like the following,
data "aws_iam_policy_document" "example" {
statement {
effect = "Allow"
actions = [
"ec2:*"
]
resources = [
"*"
]
condition {
test = "ForAllValues:StringLikeIfExists"
variable = "aws:ResourceTag/Environment"
values = local.workspaceValues
}
}
}
resource "aws_iam_policy" "example" {
name = "example_policy"
path = "/"
policy = data.aws_iam_policy_document.example.json
}
When I try to create a aws_s3_bucket_notification I get this terrerform exception: aws_s3_bucket_notification.input_notification: Error putting S3 notification configuration: InvalidArgument: Unable to validate the following destination configurations
status code: 400, request id: 4E17F794B9BC67C9, host id: QmeEFS+T1cvr1xFEMmAlqBKxzX1Fg+qOpwJFXDl4sR1hVcHa4swLN87BiPI8BToGuNQ3oYD0pYk= As for as I can tell I have followed the specs outlined in the terraform docs here: https://www.terraform.io/docs/providers/aws/r/s3_bucket_notification.html
Has anyone else had this problem before?
resource "aws_sqs_queue" "sqs_queue" {
name = "${var.env}-${var.subenv}-${var.appname}"
delay_seconds = 5
max_message_size = 262144
message_retention_seconds = 86400
receive_wait_time_seconds = 10
visibility_timeout_seconds = 90
redrive_policy = "{\"deadLetterTargetArn\":\"${aws_sqs_queue.sqs_dlq.arn}\",\"maxReceiveCount\":${var.sqs_max_receive_count}}"
policy = <<POLICY
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": "*",
"Action": "sqs:SendMessage",
"Resource": "arn:aws:sqs:*:*:s3-event-notification-queue",
"Condition": {
"ArnEquals": { "aws:SourceArn": "${aws_s3_bucket.input.arn}" }
}
}
]
}
POLICY
}
resource "aws_s3_bucket" "input" {
bucket = "${var.env}-${var.subenv}-${var.appname}-input"
}
resource "aws_s3_bucket_notification" "input_notification" {
depends_on = [
"aws_s3_bucket.input",
"aws_sqs_queue.sqs_queue"
]
bucket = "${aws_s3_bucket.input.id}"
queue {
queue_arn = "${aws_sqs_queue.sqs_queue.arn}"
events = ["s3:ObjectCreated:*"]
filter_suffix = ".gz"
}
}
The SQS policy was wrong, it should look like this:
resource "aws_sqs_queue" "sqs_queue" {
name = "${var.env}-${var.subenv}-${var.appname}"
delay_seconds = 5
max_message_size = 262144
message_retention_seconds = 86400
receive_wait_time_seconds = 10
visibility_timeout_seconds = 90
redrive_policy = "{\"deadLetterTargetArn\":\"${aws_sqs_queue.sqs_dlq.arn}\",\"maxReceiveCount\":${var.sqs_max_receive_count}}"
policy = <<POLICY
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": "*",
"Action": "sqs:SendMessage",
"Resource": "arn:aws:sqs:*:*:${var.env}-${var.subenv}-${var.appname}",
"Condition": {
"ArnEquals": { "aws:SourceArn": "${aws_s3_bucket.input.arn}" }
}
}
]
}
POLICY
}