I want to replicate s3 buckets across regions of aws with terraform.
This is my code in my s3.tf:
# Bucket
module "s3_replica" {
source = "git#github.com:xxx"
providers = {
aws = "aws.us-west-2"
}
name = "s3_replica"
logging_bucket_prefix = "s3_replica"
versioning = var.versioning
bucket_logging = var.bucket_logging
logging_bucket_name = var.logging_bucket_name
kms_key_id = aws_kms_key.s3-replica-key.key_id
sse_algorithm = var.sse_algorithm
tags = var.bucket_tags
environment = var.environment
}
module "s3" {
source = "git#github.com:xxxx"
name = "s3"
logging_bucket_prefix = "s3"
versioning = var.versioning
bucket_logging = var.bucket_logging
logging_bucket_name = var.logging_bucket_name
kms_key_id = aws_kms_key.s3.key_id
sse_algorithm = var.sse_algorithm
tags = var.bucket_tags
environment = var.environment
replication_configuration {
role = aws_iam_role.replication_role.arn
rules {
status = "Enabled"
destination {
bucket = aws_s3_bucket.module.s3_replica.bucket_arn
storage_class = "STANDARD_IA"
replicate_kms_key_id = aws_kms_key.s3-replica-key.key_id
}
source_selection_criteria {
sse_kms_encrypted_objects {
enabled = true
}
}
}
}
}
and this is my part of my module:
dynamic "replication_configuration" {
for_each = length(keys(var.replication_configuration)) == 0 ? [] : [var.replication_configuration]
content {
role = replication_configuration.value.role
dynamic "rules" {
for_each = replication_configuration.value.rules
content {
id = lookup(rules.value, "id", null)
priority = lookup(rules.value, "priority", null)
prefix = lookup(rules.value, "prefix", null)
status = rules.value.status
dynamic "destination" {
for_each = length(keys(lookup(rules.value, "destination", {}))) == 0 ? [] : [lookup(rules.value, "destination", {})]
content {
bucket = destination.value.bucket
storage_class = lookup(destination.value, "storage_class", null)
replica_kms_key_id = lookup(destination.value, "replica_kms_key_id", null)
account_id = lookup(destination.value, "account_id", null)
}
}
dynamic "source_selection_criteria" {
for_each = length(keys(lookup(rules.value, "source_selection_criteria", {}))) == 0 ? [] : [lookup(rules.value, "source_selection_criteria", {})]
content {
dynamic "sse_kms_encrypted_objects" {
for_each = length(keys(lookup(source_selection_criteria.value, "sse_kms_encrypted_objects", {}))) == 0 ? [] : [lookup(source_selection_criteria.value, "sse_kms_encrypted_objects", {})]
content {
enabled = sse_kms_encrypted_objects.value.enabled
}
}
}
}
dynamic "filter" {
for_each = length(keys(lookup(rules.value, "filter", {}))) == 0 ? [] : [lookup(rules.value, "filter", {})]
content {
prefix = lookup(filter.value, "prefix", null)
tags = lookup(filter.value, "tags", null)
}
}
}
}
}
}
resource "aws_iam_role" "replication" {
name = "s3-bucket-replication"
assume_role_policy = <<POLICY
{
"Version": "2012-10-17",
"Statement": [
{
"Action": "sts:AssumeRole",
"Principal": {
"Service": "s3.amazonaws.com"
},
"Effect": "Allow",
"Sid": ""
}
]
}
POLICY
}
resource "aws_iam_policy" "replication" {
name = "s3-bucket-replication"
policy = <<POLICY
{
"Version": "2012-10-17",
"Statement": [
{
"Action": [
"s3:GetReplicationConfiguration",
"s3:ListBucket"
],
"Effect": "Allow",
"Resource": [
"module.s3.bucket_arn"
]
},
{
"Action": [
"s3:GetObjectVersion",
"s3:GetObjectVersionAcl"
],
"Effect": "Allow",
"Resource": [
"module.s3.bucket_arn"
]
},
{
"Action": [
"s3:ReplicateObject",
"s3:ReplicateDelete"
],
"Effect": "Allow",
"Resource": "module.s3.bucket_arn"
}
]
}
POLICY
}
resource "aws_iam_policy_attachment" "replication" {
name = "s3-bucket-replication"
roles = [aws_iam_role.replication.name]
policy_arn = aws_iam_policy.replication.arn
}
}
When I run terraform init.. it is successful.
However, when I run terraform plan it gives me this error:
Error: Unsupported block type
on s3.tf line 102, in module "s3":
102: replication_configuration {
Blocks of type "replication_configuration" are not expected here.
Why am I getting this error? I do have the replication_confirguration right?
Is there a configuration problem or a syntax problem?
I tried adding "=" in front of the configurations.. and now I get the error:
Error: Unsupported argument
on s3.tf line 102, in module "s3":
102: replication_configuration = {
An argument named "replication_configuration" is not expected here.
Related
I have Terraform code that almost successfully builds an AWS Batch Compute Environment with an Fsx file share mount to it.
However, despite passing the aws_fsx_lustre_file_system module a deployment type of PERSISTENT_2:
resource "aws_fsx_lustre_file_system" "storage" {
storage_capacity = 1200
subnet_ids = [var.subnet_id]
deployment_type = "PERSISTENT_2"
per_unit_storage_throughput = 250
}
the Fsx is only spun up at a scratch drive (viewable via AWS management console).
What additional information can I post here to help debug why this Terraform code is not respecting the deployment_type parameter?
Full code:
// ==========================================================
// Module input variables
// ----------------------------------------------------------
variable "region" {
type = string
}
variable "compute_environment_name" {
type = string
}
variable "job_queue_name" {
type = string
}
variable "max_vcpus" {
type = number
}
variable "vpc_id" {
type = string
}
variable "subnet_id" {
type = string
}
variable "security_group_id" {
type = string
}
variable "mounted_storage_bucket" {
type = string
}
// ==========================================================
// Components for batch processing for AWS Batch
// ----------------------------------------------------------
resource "aws_iam_role" "batch_role" {
name = "batch_role"
assume_role_policy = <<EOF
{
"Version": "2012-10-17",
"Statement":
[
{
"Action": "sts:AssumeRole",
"Effect": "Allow",
"Principal": {
"Service": "batch.amazonaws.com"
}
},
{
"Action": "sts:AssumeRole",
"Effect": "Allow",
"Principal": {
"Service": "ec2.amazonaws.com"
}
},
{
"Action": "sts:AssumeRole",
"Effect": "Allow",
"Principal": {
"Service": "ecs.amazonaws.com"
}
},
{
"Action": "sts:AssumeRole",
"Effect": "Allow",
"Principal": {
"Service": "ecs-tasks.amazonaws.com"
}
}
]
}
EOF
tags = {
created-by = "Terraform"
}
}
# Attach the Batch policy to the Batch role
resource "aws_iam_role_policy_attachment" "batch_service_role" {
role = aws_iam_role.batch_role.name
policy_arn = "arn:aws:iam::aws:policy/service-role/AWSBatchServiceRole"
}
resource "aws_iam_role_policy_attachment" "elastic_container_service_role" {
role = aws_iam_role.batch_role.name
policy_arn = "arn:aws:iam::aws:policy/service-role/AmazonEC2ContainerServiceforEC2Role"
}
# Security Group for batch processing
resource "aws_security_group" "batch_security_group" {
name = "batch_security_group"
description = "AWS Batch Security Group for batch jobs"
vpc_id = var.vpc_id
egress {
from_port = 0
to_port = 65535
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
tags = {
created-by = "Terraform"
}
}
# IAM Role for underlying EC2 instances
resource "aws_iam_role" "ec2_role" {
name = "ec2_role"
assume_role_policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Action": "sts:AssumeRole",
"Principal": {
"Service": "ec2.amazonaws.com"
},
"Effect": "Allow",
"Sid": ""
}
]
}
EOF
tags = {
created-by = "Terraform"
}
}
# Assign the EC2 role to the EC2 profile
resource "aws_iam_instance_profile" "ec2_profile" {
name = "ec2_profile"
role = aws_iam_role.ec2_role.name
}
# Attach the EC2 container service policy to the EC2 role
resource "aws_iam_role_policy_attachment" "ec2_policy_attachment" {
role = aws_iam_role.ec2_role.name
policy_arn = "arn:aws:iam::aws:policy/service-role/AmazonEC2ContainerServiceforEC2Role"
}
# IAM Role for jobs
resource "aws_iam_role" "job_role" {
name = "job_role"
assume_role_policy = <<EOF
{
"Version": "2012-10-17",
"Statement":
[
{
"Action": "sts:AssumeRole",
"Effect": "Allow",
"Principal": {
"Service": "ecs-tasks.amazonaws.com"
}
}
]
}
EOF
tags = {
created-by = "Terraform"
}
}
# S3 read/write policy
resource "aws_iam_policy" "s3_policy" {
name = "s3_policy"
policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"s3:Get*",
"s3:List*",
"s3:Put*"
],
"Resource": [
"arn:aws:s3:::${var.mounted_storage_bucket}",
"arn:aws:s3:::${var.mounted_storage_bucket}/*"
]
}
]
}
EOF
}
resource "aws_iam_policy" "ecs_policy" {
name = "ecs_policy"
policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"ecs:*"
],
"Resource": [
"*"
]
}
]
}
EOF
}
# Attach the policy to the job role
resource "aws_iam_role_policy_attachment" "job_policy_attachment_s3" {
role = aws_iam_role.job_role.name
policy_arn = aws_iam_policy.s3_policy.arn
}
resource "aws_iam_role_policy_attachment" "job_policy_attachment_ecs" {
role = aws_iam_role.job_role.name
policy_arn = aws_iam_policy.ecs_policy.arn
}
resource "aws_fsx_lustre_file_system" "storage" {
storage_capacity = 1200
subnet_ids = [var.subnet_id]
deployment_type = "PERSISTENT_2"
per_unit_storage_throughput = 250
}
resource "aws_fsx_data_repository_association" "storage_association" {
file_system_id = aws_fsx_lustre_file_system.storage.id
data_repository_path = "s3://${var.mounted_storage_bucket}"
file_system_path = "/data/fsx"
s3 {
auto_export_policy {
events = ["NEW", "CHANGED", "DELETED"]
}
auto_import_policy {
events = ["NEW", "CHANGED", "DELETED"]
}
}
}
resource "aws_launch_template" "launch_template" {
name = "launch_template"
update_default_version = true
user_data = base64encode(<<EOF
MIME-Version: 1.0
Content-Type: multipart/mixed; boundary="==MYBOUNDARY=="
--==MYBOUNDARY==
Content-Type: text/cloud-config; charset="us-ascii"
runcmd:
- region=${var.region}
- amazon-linux-extras install -y lustre2.10
- mkdir -p /data/fsx
- mount -t lustre ${aws_fsx_lustre_file_system.storage.dns_name}#tcp:fsx" /data/fsx
--==MYBOUNDARY==--
EOF
)
}
// ==========================================================
// Batch setup
// - compute environment
// - job queue
// ----------------------------------------------------------
resource "aws_iam_role" "ecs_instance_role" {
name = "ecs_instance_role"
assume_role_policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Action": "sts:AssumeRole",
"Effect": "Allow",
"Principal": {
"Service": "ec2.amazonaws.com"
}
}
]
}
EOF
}
resource "aws_iam_role_policy_attachment" "ecs_instance_role" {
role = "${aws_iam_role.ecs_instance_role.name}"
policy_arn = "arn:aws:iam::aws:policy/service-role/AmazonEC2ContainerServiceforEC2Role"
}
resource "aws_iam_instance_profile" "ecs_instance_role" {
name = "ecs_instance_role"
role = "${aws_iam_role.ecs_instance_role.name}"
}
resource "aws_iam_role" "aws_batch_service_role" {
name = "aws_batch_service_role"
assume_role_policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Action": "sts:AssumeRole",
"Effect": "Allow",
"Principal": {
"Service": "batch.amazonaws.com"
}
}
]
}
EOF
}
resource "aws_iam_role_policy_attachment" "aws_batch_service_role" {
role = "${aws_iam_role.aws_batch_service_role.name}"
policy_arn = "arn:aws:iam::aws:policy/service-role/AWSBatchServiceRole"
}
resource "aws_batch_compute_environment" "batch_environment" {
compute_environment_name = var.compute_environment_name
compute_resources {
instance_role = "${aws_iam_instance_profile.ecs_instance_role.arn}"
launch_template {
launch_template_name = aws_launch_template.launch_template.name
version = "$Latest"
}
instance_type = [
"c6g.large",
"c6g.xlarge",
"c6g.2xlarge",
"c6g.4xlarge",
"c6g.8xlarge",
"c6g.12xlarge"
]
max_vcpus = 16
min_vcpus = 0
security_group_ids = [
aws_security_group.batch_security_group.id,
]
subnets = [
var.subnet_id
]
type = "EC2"
}
service_role = "${aws_iam_role.aws_batch_service_role.arn}"
type = "MANAGED"
depends_on = [aws_iam_role_policy_attachment.aws_batch_service_role]
tags = {
created-by = "Terraform"
}
}
resource "aws_batch_job_queue" "job_queue" {
name = "job_queue"
state = "ENABLED"
priority = 1
compute_environments = [
aws_batch_compute_environment.batch_environment.arn
]
depends_on = [aws_batch_compute_environment.batch_environment]
tags = {
created-by = "Terraform"
}
}
output "batch_compute_environment_id" {
value = aws_batch_compute_environment.batch_environment.id
}
output "batch_job_queue_id" {
value = aws_batch_job_queue.job_queue.id
}
output "batch_storage_mount_target" {
value = aws_fsx_lustre_file_system.storage.arn
}
output "batch_storage_mount_target_mount" {
value = aws_fsx_lustre_file_system.storage.mount_name
}
I am trying to create multiple mount targets using 3 subnet ids to a single file system through terraform. While running the code it is creating 2 file shares (the second one without a mount target) and then failing giving me the below error.
Error: putting EFS file system (fs-0c479ab4b699829d1) lifecycle configuration: InvalidParameter: 1 validation error(s) found.
│ - missing required field, PutLifecycleConfigurationInput.LifecyclePolicies.
Here's my EFS code.
resource "aws_efs_file_system" "efs" {
creation_token = var.efs_token
performance_mode = var.efs_performance_mode
encrypted = true
lifecycle_policy {
transition_to_ia = "AFTER_30_DAYS"
}
tags = local.mandatory_tags
}
resource "aws_efs_mount_target" "efs_mount" {
count = length(data.aws_subnets.private.*.id)
depends_on = [aws_efs_file_system.efs]
file_system_id = "${aws_efs_file_system.efs.id}"
subnet_id = "${element(data.aws_subnets.private.*.id, count.index)}"
}
resource "aws_efs_backup_policy" "efs_backup_policy" {
file_system_id = "${aws_efs_file_system.efs.id}"
backup_policy {
status = "ENABLED"
}
}
This is the EFS lifecycle policy
resource "aws_efs_file_system_policy" "policy" {
file_system_id = aws_efs_file_system.efs.id
bypass_policy_lockout_safety_check = true
policy = <<POLICY
{
"Version": "2012-10-17",
"Id": "ExamplePolicy01",
"Statement": [
{
"Sid": "ExampleStatement01",
"Effect": "Allow",
"Principal": {
"AWS": "*"
},
"Resource": "${aws_efs_file_system.efs.arn}",
"Action": [
"elasticfilesystem:ClientMount",
"elasticfilesystem:ClientWrite"
],
"Condition": {
"Bool": {
"aws:SecureTransport": "true"
}
}
}
]
}
POLICY
}
data.tf
data "aws_vpc" "this" {
filter {
name = "tag:Name"
values = ["${local.name_prefix}-vpc"]
}
}
data "aws_subnets" "private" {
count = length(data.aws_availability_zones.azs.names)
filter {
name = "tag:Name"
values = ["${local.name_prefix}-vpc-private*"]
}
filter {
name = "vpc-id"
values = [data.aws_vpc.this.id]
}
}
data "aws_availability_zones" "azs" {}
tfvars
efs_token = "dev-load-test"
efs_performance_mode = "maxIO"
I am attempting to create a CICD pipeline using AWS CodePipeline to deploy magento to an EC2 instance and terraform to provision the infrastructure on AWS. After running the script, it fails at DOWNLOAD_SOURCE phase during the build stage. After much enquiry I realised it has to do with VPC settings. In the AWS documentation it says to use private subnet ids and present the arn instead of the just the subnet id. I have done all that but it is still failing. What do I do?
Here is the codebuild code
resource "aws_codebuild_project" "o4bproject_codebuild" {
name = "${local.name}-codebuild-project"
description = "${local.name}_codebuild_project"
build_timeout = 60
queued_timeout = 480
depends_on = [aws_iam_role.o4bproject_codebuild]
service_role = aws_iam_role.o4bproject_codebuild.arn
> artifacts {
type = "CODEPIPELINE"
encryption_disabled = false
name = "${local.name}-codebuild-project"
override_artifact_name = false
packaging = "NONE"
}
> cache {
type = "S3"
location = aws_s3_bucket.o4bproject-codebuild.bucket
}
> environment {
compute_type = "BUILD_GENERAL1_SMALL"
image = "aws/codebuild/standard:5.0"
type = "LINUX_CONTAINER"
image_pull_credentials_type = "CODEBUILD"
> environment_variable {
name = "PARAMETERSTORE"
value = aws_ssm_parameter.env.name
type = "PARAMETER_STORE"
}
}
> logs_config {
cloudwatch_logs {
group_name = "log-group"
stream_name = "log-stream"
}
s3_logs {
status = "ENABLED"
location = "${aws_s3_bucket.o4bproject-codebuild.bucket}/build-log"
}
}
> source {
type = "CODEPIPELINE"
buildspec = file("${abspath(path.root)}/buildspec.yml")
location = REDACTED
git_clone_depth = 1
git_submodules_config {
fetch_submodules = true
}
}
source_version = "master"
> vpc_config {
vpc_id = aws_vpc.o4bproject.id
subnets = [
aws_subnet.o4bproject-private[0].id,
aws_subnet.o4bproject-private[1].id,
]
security_group_ids = [
aws_security_group.o4bproject_dev_ec2_private_sg.id,
]
}
tags = local.common_tags
}`
Here is the codebuild role and policy
resource "aws_iam_role" "o4bproject_codebuild" {
name = "${local.name}-codebuild-role"
description = "Allows CodeBuild to call AWS services on your behalf."
path = "/service-role/"
assume_role_policy = jsonencode(
{
Statement = [
{
Action = "sts:AssumeRole"
Effect = "Allow"
Principal = {
Service = "codebuild.amazonaws.com"
}
Sid = ""
},
]
Version = "2012-10-17"
}
)
tags = local.common_tags
}
resource` "aws_iam_role_policy" "o4bproject_codebuild" {
role = aws_iam_role.o4bproject_codebuild.name
policy = <<POLICY
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "AllowCodebuildCreateLogs",
"Effect": "Allow",
"Resource": [
"${aws_cloudwatch_log_group.o4bproject-codebuild.name}:*"
],
"Action": [
"logs:CreateLogGroup",
"logs:CreateLogStream",
"logs:PutLogEvents"
]
},
{
"Sid": "AllowCodeBuildGitActions",
"Effect": "Allow",
"Action": [
"github:GitPull"
],
"Resource": "*"
},
{
"Sid": "AllowCodeBuildGitActions",
"Effect": "Allow",
"Action": [
"github:GitPush"
],
"Resource": "*",
"Condition": {
"StringEqualsIfExists": {
"codecommit:References": [
"refs/heads/build"
]
}
}
},
{
"Sid": "SNSTopicListAccess",
"Effect": "Allow",
"Action": [
"sns:ListTopics",
"sns:GetTopicAttributes"
],
"Resource": "*"
},
{
"Effect": "Allow",
"Action": [
"ec2:CreateNetworkInterface",
"ec2:DescribeDhcpOptions",
"ec2:DescribeNetworkInterfaces",
"ec2:DeleteNetworkInterface",
"ec2:DescribeSubnets",
"ec2:DescribeSecurityGroups",
"ec2:DescribeVpcs"
],
"Resource": "*"
},
{
"Effect": "Allow",
"Action": [
"ec2:CreateNetworkInterfacePermission"
],
"Resource": [
"arn:aws:ec2:${var.aws_region}:REDACTED:network-interface/*"
],
"Condition": {
"StringEquals": {
"ec2:Subnet": [
"${aws_subnet.o4bproject-private[0].arn}",
"${aws_subnet.o4bproject-private[1].arn}"
],
"ec2:AuthorizedService": "codebuild.amazonaws.com"
}
}
},
{
"Effect": "Allow",
"Action": [
"s3:*"
],
"Resource": [
"${aws_s3_bucket.o4bproject-codebuild.arn}",
"${aws_s3_bucket.o4bproject-codebuild.arn}/*"
]
}
]
}
POLICY
}`
Here is my code for vpc-network
resource "aws_vpc" "o4bproject" {
cidr_block = var.vpc_cidr_block
instance_tenancy = "default"
enable_dns_support = true
enable_dns_hostnames = true
tags = {
Name = "${local.name}-vpc"
Environment = "dev"
}
}
> resource "aws_subnet" "o4bproject-private" {
count = var.item_count
vpc_id = aws_vpc.o4bproject.id
availability_zone = var.private_subnet_availability_zone[count.index]
cidr_block = var.private_subnet_cidr_block[count.index]
tags = {
Name = "${local.name}-private-subnet-${count.index}"
Environment = "dev"
}
}
> resource "aws_subnet" "o4bproject-public" {
count = var.item_count
vpc_id = aws_vpc.o4bproject.id
availability_zone = var.public_subnet_availability_zone[count.index]
cidr_block = var.public_subnet_cidr_block[count.index]
tags = {
Name = "${local.name}-public-subnet-${count.index}"
Environment = "dev"
}
}
> resource "aws_route_table" "public-route-table" {
vpc_id = aws_vpc.o4bproject.id
tags = {
Name = "${local.name}-public-route-table"
Environment = "dev"
}
}
> resource "aws_route_table" "private-route-table" {
vpc_id = aws_vpc.o4bproject.id
tags = {
Name = "${local.name}-private-route-table"
Environment = "dev"
}
}
> resource "aws_route_table_association" "public-route-association" {
count = var.item_count
route_table_id = aws_route_table.public-route-table.id
subnet_id = aws_subnet.o4bproject-public[count.index].id
}
> resource "aws_route_table_association" "private-route-association" {
count = var.item_count
route_table_id = aws_route_table.private-route-table.id
subnet_id = aws_subnet.o4bproject-private[count.index].id
}
> resource "aws_internet_gateway" "o4bproject-igw" {
vpc_id = aws_vpc.o4bproject.id
tags = {
Name = "${local.name}-igw"
Environment = "dev"
}
}
> resource "aws_route" "public-internet-gateway-route" {
route_table_id = aws_route_table.public-route-table.id
gateway_id = aws_internet_gateway.o4bproject-igw.id
destination_cidr_block = "0.0.0.0/0"
}
> resource "aws_eip" "o4bproject-eip-nat-gateway" {
vpc = true
count = var.item_count
associate_with_private_ip = REDACTED
depends_on = [aws_internet_gateway.o4bproject-igw]
tags = {
Name = "${local.name}-eip-${count.index}"
Environment = "dev"
}
}
> resource "aws_nat_gateway" "o4bproject-nat-gateway" {
allocation_id = aws_eip.o4bproject-eip-nat-gateway[count.index].id
count = var.item_count
subnet_id = aws_subnet.o4bproject-public[count.index].id
depends_on = [aws_eip.o4bproject-eip-nat-gateway]
tags = {
Name = "${local.name}-nat-gw-${count.index}"
Environment = "dev"
}
}
The below code works when i create the resources but i would like to tie each SQS created with a different s3 bucket.for example.I want CloudTrail_SQS_Management_Event/CloudTrail_DLQ_Management_Event to use a bucket called "management_sqs_bucket and CloudTrail_SQS_Data_Event/CloudTrail_DLQ_Data_Event to use bucket called "data_sqs_bucket and for the bucket names to reflect accordingly on the queue policies.
SQS/variables.tf
variable "sqs_queue_name"{
description = "The name of different SQS to be created"
type = string
}
variable "dead_queue_name"{
description = "The name of different Dead Queues to be created"
type = string
}
variable "max_receive_count" {
type = number
}
SQS/iam.tf
data "aws_iam_policy_document" "policy_document"{
statement{
actions = [
"sqs:DeleteMessage",
"sqs:GetQueueUrl",
"sqs:ReceiveMessage",
"sqs:SendMessage",
"sqs:SetQueueAttributes"
]
effect = "Allow"
resources = values(aws_sqs_queue.sqs)[*].arn
}
resource "aws_sqs_queue_policy" "Cloudtrail_SQS_Policy" {
queue_url = aws_sqs_queue.CloudTrail_SQS.id
policy = <<POLICY
{
"Version": "2012-10-17",
"Id": "sqspolicy",
"Statement": [
{
"Sid": "AllowSQSInvocation",
"Effect": "Allow",
"Principal": {"AWS":"*"},
"Action": "sqs:*",
"Resource": "${aws_sqs_queue.CloudTrail_SQS.arn}",
"Condition": {
"ArnEquals": {
"aws:SourceArn": "arn:aws:s3:::${var.cloudtrail_event_log_bucket_name}"
}
}
}
]
}
POLICY
}
resource "aws_sqs_queue_policy" "CloudTrail_SQS_DLQ"{
queue_url = aws_sqs_queue.CloudTrail_SQS_DLQ.id
policy = <<POLICY
{
"Version": "2012-10-17",
"Id": "sqspolicy",
"Statement": [
{
"Sid": "DLQ Policy",
"Effect": "Allow",
"Principal": {"AWS":"*"},
"Action": "sqs:*",
"Resource": "${aws_sqs_queue.CloudTrail_SQS_DLQ.arn}",
"Condition": {
"ArnEquals": {
"aws:SourceArn": "arn:aws:s3:::${var.cloudtrail_event_log_bucket_name}"
}
}
}
]
}
POLICY
}
SQS/main.tf
resource "aws_sqs_queue" "sqs" {
name = var.sqs_queue_name
redrive_policy = jsonencode({
deadLetterTargetArn = aws_sqs_queue.dlq.arn
maxReceiveCount = var.max_receive_count
})
}
resource "aws_sqs_queue" "dlq" {
name = var.dead_queue_name
}
SQS/output.tf
output "sqs_queue_id"{
value = values(aws_sqs_queue.sqs)[*].id
description = "The URL for the created Amazon SQS queue."
}
output "sqs_queue_arn" {
value = values(aws_sqs_queue.sqs)[*].arn
description = "The ARN of the SQS queue."
}
variable.tf
variable "queue_names" {
default = [
{
sqs_name = "CloudTrail_SQS_Management_Event"
dlq_name = "CloudTrail_DLQ_Management_Event"
},
{
sqs_name = "CloudTrail_SQS_Data_Event"
dlq_name = "CloudTrail_DLQ_Data_Event"
}
]
}
module "sqs_queue" {
source = "../SQS"
for_each = {
for sqs, dlq in var.queue_names : sqs => dlq
}
sqs_queue_name = each.value.sqs_name
dead_queue_name = each.value.dlq_name
max_receive_count = var.max_receive_count
}
From what I understand I believe this is what you would want to do:
variables.tf:
variable "queue_names" {
default = [
{
sqs_name = "CloudTrail_SQS_Management_Event"
dlq_name = "CloudTrail_DLQ_Management_Event"
bucket_name = "management_sqs_bucket"
},
{
sqs_name = "CloudTrail_SQS_Data_Event"
dlq_name = "CloudTrail_DLQ_Data_Event"
bucket_name = "data_sqs_bucket"
}
]
}
main.tf:
module "my_sqs" {
source = "./my_sqs"
for_each = {
for q in var.queue_names : q.sqs_name => q
}
sqs_queue_name = each.value.sqs_name
dead_queue_name = each.value.dlq_name
max_receive_count = 4
cloudtrail_event_log_bucket_name = each.value.bucket_name
}
Also, I see that some code duplication for aws_sqs_queue_policy because you have both the SQS queue and the DLQ. This can be refactored to something like this:
iam.tf:
data "aws_iam_policy_document" "policy_document" {
statement {
actions = [
"sqs:DeleteMessage",
"sqs:GetQueueUrl",
"sqs:ReceiveMessage",
"sqs:SendMessage",
"sqs:SetQueueAttributes"
]
effect = "Allow"
resources = aws_sqs_queue.sqs[*].arn
}
}
locals {
queue_data = [
{
id = aws_sqs_queue.sqs.id
arn = aws_sqs_queue.sqs.arn
},
{
id = aws_sqs_queue.dlq.id
arn = aws_sqs_queue.dlq.arn
}
]
}
resource "aws_sqs_queue_policy" "sqs_policy" {
# This can be achieved with for_each similar to what we have in main.tf, but I did not want to complicate it
count = length(aws_sqs_queue.sqs[*])
queue_url = local.queue_data[count.index].id
policy = <<POLICY
{
"Version": "2012-10-17",
"Id": "sqspolicy",
"Statement": [
{
"Sid": "AllowSQSInvocation",
"Effect": "Allow",
"Principal": {"AWS":"*"},
"Action": "sqs:*",
"Resource": "${local.queue_data[count.index].arn}",
"Condition": {
"ArnEquals": {
"aws:SourceArn": "arn:aws:s3:::${var.cloudtrail_event_log_bucket_name}"
}
}
}
]
}
POLICY
}
I struggle to have an AWS Lambda function to connect to an AWS ElasticSearch cluster.
I have an AWS Lambda function defined as the following:
resource "aws_lambda_function" "fun1" {
function_name = "fun1"
role = aws_iam_role.ia0.arn
vpc_config {
security_group_ids = local.security_group_ids
subnet_ids = local.subnet_ids
}
environment {
variables = {
ELASTICSEARCH_ENDPOINT = "https://${aws_elasticsearch_domain.es.endpoint}"
}
}
}
resource "aws_iam_role" "ia0" {
name = "lambda-exec-role"
assume_role_policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Action": "sts:AssumeRole",
"Principal": {
"Service": "lambda.amazonaws.com"
},
"Effect": "Allow",
"Sid": ""
}
]
}
EOF
}
resource "aws_iam_role_policy_attachment" "lambda_logs" {
role = aws_iam_role.ia0.id
policy_arn = aws_iam_policy.lambda_logging.arn
}
data "aws_iam_policy" "AWSLambdaBasicExecutionRole" {
arn = "arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole"
}
resource "aws_iam_role_policy_attachment" "AWSLambdaBasicExecutionRole" {
role = aws_iam_role.ia0.id
policy_arn = data.aws_iam_policy.AWSLambdaBasicExecutionRole.arn
}
data "aws_iam_policy" "AWSLambdaVPCAccessExecutionRole" {
arn = "arn:aws:iam::aws:policy/service-role/AWSLambdaVPCAccessExecutionRole"
}
resource "aws_iam_role_policy_attachment" "AWSLambdaVPCAccessExecutionRole" {
role = aws_iam_role.ia0.id
policy_arn = data.aws_iam_policy.AWSLambdaVPCAccessExecutionRole.arn
}
My VPC is defined like that:
locals {
security_group_ids = [aws_security_group.sg0.id]
subnet_ids = [aws_subnet.private_a.id, aws_subnet.private_b.id]
}
resource "aws_vpc" "vpc0" {
cidr_block = "10.0.0.0/16"
enable_dns_hostnames = true
enable_dns_support = true
}
resource "aws_subnet" "private_a" {
vpc_id = aws_vpc.vpc0.id
cidr_block = cidrsubnet(aws_vpc.vpc0.cidr_block, 2, 1)
availability_zone = "eu-west-3a"
}
resource "aws_subnet" "private_b" {
vpc_id = aws_vpc.vpc0.id
cidr_block = cidrsubnet(aws_vpc.vpc0.cidr_block, 2, 2)
availability_zone = "eu-west-3b"
}
resource "aws_security_group" "sg0" {
vpc_id = aws_vpc.vpc0.id
}
Finally my cluster looks like that:
resource "aws_elasticsearch_domain" "es" {
domain_name = "es"
elasticsearch_version = "7.9"
cluster_config {
instance_count = 2
zone_awareness_enabled = true
instance_type = "t2.small.elasticsearch"
}
domain_endpoint_options {
enforce_https = true
tls_security_policy = "Policy-Min-TLS-1-2-2019-07"
}
ebs_options {
ebs_enabled = true
volume_size = 10
}
vpc_options {
security_group_ids = local.security_group_ids
subnet_ids = local.subnet_ids
}
}
resource "aws_iam_role_policy" "rp0" {
name = "rp0"
role = aws_iam_role.ia0.id
policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"es:*"
],
"Resource": [
"${aws_elasticsearch_domain.es.arn}",
"${aws_elasticsearch_domain.es.arn}/*"
],
"Condition": {
"IpAddress": {
"aws:SourceIp": [
"${aws_subnet.private_a.cidr_block}",
"${aws_subnet.private_b.cidr_block}"
]
}
}
},
{
"Effect": "Allow",
"Action": [
"ec2:DescribeVpcs",
"ec2:DescribeVpcAttribute",
"ec2:DescribeSubnets",
"ec2:DescribeSecurityGroups",
"ec2:DescribeNetworkInterfaces",
"ec2:CreateNetworkInterface",
"ec2:CreateNetworkInterfacePermission",
"ec2:DeleteNetworkInterface"
],
"Resource": [
"*"
]
}
]
}
EOF
}
Despite of that I still get this answer
Response
{ responseStatus = Status {statusCode = 403, statusMessage = "Forbidden"}
, responseVersion = HTTP/1.1
, responseHeaders =
[("Date","xxx")
,("Content-Type","application/json")
,("Content-Length","72")
,("Connection","keep-alive")
,("x-amzn-RequestId","xxx")
,("Access-Control-Allow-Origin","*")
]
, responseBody = "{\"Message\":\"User: anonymous is not authorized to perform: es:ESHttpPut\"}\"
, responseCookieJar = CJ {expose = []}, responseClose' = ResponseClose
}"
According to AWS documentation using CIDR should be sufficient, but in practice, something is missing.
Thanks in advance for your help.
you need to sign the request before making a http call to tell Elastic search from who is initiating the request. I don't know which programming language you are using, here is what we can do in NodeJs
For simple http call
let request = new (AWS as any).HttpRequest(endpoint, 'us-east-1');
let credentials = new AWS.EnvironmentCredentials('AWS');
let signers = new (AWS as any).Signers.V4(request, 'es');
signers.addAuthorization(credentials, new Date());
if you are using a package like #elastic/elasticsearch, you can combine http-aws-es to create a client which creates a signature , might look something like
let options = {
hosts: [ yourHost ],
connectionClass: require('http-aws-es'),
awsConfig: new AWS.Config({ region: 'us-east-1', credentials: new AWS.EnvironmentCredentials('AWS') })
};
client = require('elasticsearch').Client(options);