creating multiple SQS from main.tf configuration - amazon-web-services

I have just updated my question to include my terragrunt.hcl that will call my main.tf to create the resources in different environment.I dont know how to replace the resources section of the policy that has ${aws_sqs_queue.Trail_SQS.arn} because I need to have different names in their based on the environment I am working in and also I don't know how to represent the redrive_policy in my terragrunt.hcl.please guys i need some help.thanks inadvance
Main.tf
resource "aws_sqs_queue" "Trail_SQS"{
name = var.aws_sqs
visibility_timeout_seconds = var.visibility_timeout_seconds
max_message_size = var.max_message_size
message_retention_seconds = var.message_retention_seconds
delay_seconds = var.delay_seconds
receive_wait_time_seconds = var.receive_wait_time_seconds
redrive_policy = jsonencode({
deadLetterTargetArn = aws_sqs_queue.Trail_SQS_DLQ.arn
maxReceiveCount = var.max_receive_count
})
}
resource "aws_sqs_queue" "Trail_SQS_DLQ"{
name = var.dead_letter_queue
visibility_timeout_seconds = var.visibility_timeout_seconds
max_message_size = var.max_message_size
message_retention_seconds = var.message_retention_seconds
delay_seconds = var.delay_seconds
receive_wait_time_seconds = var.receive_wait_time_seconds
}
resource "aws_iam_role" "ronix_access_role" {
name = var.role_name
description = var.description
assume_role_policy = data.aws_iam_policy_document.trust_relationship.json
}
data "aws_iam_policy_document" "ronix_policy_document"{
statement{
actions = [
"sqs:DeleteMessage",
"sqs:GetQueueUrl",
"sqs:ReceiveMessage",
"sqs:SendMessage",
"sqs:SetQueueAttributes"
]
effect = "Allow"
resources =[
"${aws_sqs_queue.Trail_SQS.arn}"
] }
resource "aws_iam_policy" "ronix_policy" {
name = "ronix_access_policy"
description = "ronix policy to access SQS"
policy = data.aws_iam_policy_document.securonix_policy_document.json
resource "aws_iam_role_policy_attachment" "ronix_policy_attachment" {
policy_arn = aws_iam_policy.ronix_policy.arn
role = aws_iam_role.ronix_access_role.id
}
resource "aws_sqs_queue_policy" "trail_SQS_Policy" {
queue_url = aws_sqs_queue.Trail_SQS.id
policy = <<POLICY
{ "Version": "2012-10-17",
"Id": "sqspolicy",
"Statement": [
{
"Sid": "AllowSQSInvocation",
"Effect": "Allow",
"Principal": {"AWS":"*"},
"Action": "sqs:*",
"Resource": "${aws_sqs_queue.Trail_SQS.arn}"
Terragrunt.hcl to call main.tf
terraform {
source = "../../../../..//module"
}
include {
path = find_in_parent_folders()
}
inputs = {
event_log_bucket_name = "trailbucket-sqs-logs"
aws_sqs_queue_name = "Trail_SQS"
dead_letter_queue_name = "Trail_SQS_DLQ"
role_name = "ronix_access_role"
description = "Role for ronix access"
kms_key_arn = "ARN of the key"
}
}

I don't know your setup but there are a few ways to do it.
1 - Using workspaces.
If you are using workspaces in terraform and let's say you have dev and prod as workspace, you can simply do it:
locals.tf:
locals {
env = terraform.workspace
}
sqs.tf:
resource "aws_sqs_queue" "my_sqs" {
name = "${local.env}-sqs"
...
}
It will create two sqs: dev-sqs and prod-sqs depending on each workspace you are in.
2 - If you are using environment variables in your setup, you need to send it to terraform like:
export TF_VAR_ENV=prod
Then your setup will be something like:
variables.tf:
variable "ENV" {
type = string
}
sqs.tf
resource "aws_sqs_queue" "my_sqs" {
name = "${var.ENV}-sqs"
...
}

Related

how to configure s3 bucket to allow aws application load balancer (not class) use it? currently throws' access denied'

I have an application load balancer and I'm trying to enable logging, terraform code below:
resource "aws_s3_bucket" "lb-logs" {
bucket = "yeo-messaging-${var.environment}-lb-logs"
}
resource "aws_s3_bucket_acl" "lb-logs-acl" {
bucket = aws_s3_bucket.lb-logs.id
acl = "private"
}
resource "aws_lb" "main" {
name = "main"
internal = false
load_balancer_type = "application"
security_groups = [aws_security_group.public.id]
enable_deletion_protection = false
subnets = [aws_subnet.public.id, aws_subnet.public-backup.id]
access_logs {
bucket = aws_s3_bucket.lb-logs.bucket
prefix = "main-lb"
enabled = true
}
}
unfortunately I can't apply this due to:
Error: failure configuring LB attributes: InvalidConfigurationRequest: Access Denied for bucket: xxx-lb-logs. Please check S3bucket permission
│ status code: 400, request id: xx
I've seen a few SO threads and documentation but unfortunately it all applies to the classic load balancer, particularly the 'data' that allows you to get the service account of the laod balancer.
I have found some policy info on how to apply the right permissions to a SA but I can't seem to find how to apply the service account to the LB itself.
Example:
data "aws_iam_policy_document" "allow-lb" {
statement {
principals {
type = "AWS"
identifiers = [data.aws_elb_service_account.main.arn]
}
actions = [
"s3:GetObject",
"s3:ListBucket",
"s3:PutObject"
]
resources = [
aws_s3_bucket.lb-logs.arn,
"${aws_s3_bucket.lb-logs.arn}/*",
]
}
}
resource "aws_s3_bucket_policy" "allow-lb" {
bucket = aws_s3_bucket.lb-logs.id
policy = data.aws_iam_policy_document.allow-lb.json
}
But this is all moot because data.aws_elb_service_account.main.arn is only for classic LB.
EDIT:
Full code with attempt from answer below:
resource "aws_s3_bucket" "lb-logs" {
bucket = "yeo-messaging-${var.environment}-lb-logs"
}
resource "aws_s3_bucket_acl" "lb-logs-acl" {
bucket = aws_s3_bucket.lb-logs.id
acl = "private"
}
data "aws_iam_policy_document" "allow-lb" {
statement {
principals {
type = "Service"
identifiers = ["logdelivery.elb.amazonaws.com"]
}
actions = [
"s3:PutObject"
]
resources = [
"${aws_s3_bucket.lb-logs.arn}/*"
]
condition {
test = "StringEquals"
variable = "s3:x-amz-acl"
values = [
"bucket-owner-full-control"
]
}
}
}
resource "aws_s3_bucket_policy" "allow-lb" {
bucket = aws_s3_bucket.lb-logs.id
policy = data.aws_iam_policy_document.allow-lb.json
}
resource "aws_lb" "main" {
name = "main"
internal = false
load_balancer_type = "application"
security_groups = [aws_security_group.public.id]
enable_deletion_protection = false
subnets = [aws_subnet.public.id, aws_subnet.public-backup.id]
access_logs {
bucket = aws_s3_bucket.lb-logs.bucket
prefix = "main-lb"
enabled = true
}
}
The bucket policy you need to use is provided in the official documentation for access logs on Application Load Balancers.
{
"Effect": "Allow",
"Principal": {
"Service": "logdelivery.elb.amazonaws.com"
},
"Action": "s3:PutObject",
"Resource": "arn:aws:s3:::bucket-name/prefix/AWSLogs/your-aws-account-id/*",
"Condition": {
"StringEquals": {
"s3:x-amz-acl": "bucket-owner-full-control"
}
}
}
Notice bucket-name prefix and your-aws-account-id need to be replaced in that policy with your actual values.
In Terraform:
data "aws_iam_policy_document" "allow-lb" {
statement {
principals {
type = "Service"
identifiers = ["logdelivery.elb.amazonaws.com"]
}
actions = [
"s3:PutObject"
]
resources = [
"${aws_s3_bucket.lb-logs.arn}/*"
]
condition {
test = "StringEquals"
variable = "s3:x-amz-acl"
values = [
"bucket-owner-full-control"
]
}
}
}

Error: error putting S3 bucket (s3-bucket-master-xxxxxx) logging: CrossLocationLoggingProhibitted: Cross S3 location logging not allowed

I am creating two S3 buckets to keep logs, I want SRR Same Region Replication. Whilst I am not so much familiar with S3 service my code has worked except for the last stage of adding logging to make tfsec and checkov, compliant
s3.tf
resource "aws_iam_role" "iam_role_replication" {
name = "tf-iam-role-replication-12345"
assume_role_policy = <<POLICY
{
"Version": "2012-10-17",
"Statement": [
{
"Action": "sts:AssumeRole",
"Principal": {
"Service": "s3.amazonaws.com"
},
"Effect": "Allow",
"Sid": ""
}
]
}
POLICY
}
resource "aws_iam_policy" "iam_policy_replication" {
name = "tf-iam-role-policy-replication-12345"
policy = <<POLICY
{
"Version": "2012-10-17",
"Statement": [
{
"Action": [
"s3:GetReplicationConfiguration",
"s3:ListBucket"
],
"Effect": "Allow",
"Resource": [
"${aws_s3_bucket.s3_bucket_master.arn}"
]
},
{
"Action": [
"s3:GetObjectVersionForReplication",
"s3:GetObjectVersionAcl",
"s3:GetObjectVersionTagging"
],
"Effect": "Allow",
"Resource": [
"${aws_s3_bucket.s3_bucket_master.arn}/*"
]
},
{
"Action": [
"s3:ReplicateObject",
"s3:ReplicateDelete",
"s3:ReplicateTags"
],
"Effect": "Allow",
"Resource": "${aws_s3_bucket.s3_bucket_slave.arn}/*"
}
]
}
POLICY
}
resource "aws_iam_role_policy_attachment" "replication" {
role = aws_iam_role.iam_role_replication.name
policy_arn = aws_iam_policy.iam_policy_replication.arn
}
resource "aws_s3_bucket" "s3_bucket_slave" {
bucket_prefix = "s3-bucket-slave-"
}
resource "aws_s3_bucket_server_side_encryption_configuration" "s3_bucket_slave_sse_config" {
bucket = aws_s3_bucket.s3_bucket_slave.bucket
rule {
apply_server_side_encryption_by_default {
kms_master_key_id = aws_kms_key.kms_key.arn
sse_algorithm = "aws:kms"
}
}
}
resource "aws_s3_bucket_versioning" "s3_bucket_slave_versioning" {
bucket = aws_s3_bucket.s3_bucket_slave.id
versioning_configuration {
status = "Enabled"
}
}
resource "aws_s3_bucket" "s3_bucket_master" {
provider = aws.apac
bucket_prefix = "s3-bucket-master-"
}
resource "aws_s3_bucket_server_side_encryption_configuration" "s3_bucket_master_sse_config" {
bucket = aws_s3_bucket.s3_bucket_master.bucket
provider = aws.apac
rule {
apply_server_side_encryption_by_default {
kms_master_key_id = aws_kms_key.kms_key.arn
sse_algorithm = "aws:kms"
}
}
}
resource "aws_s3_bucket_versioning" "s3_bucket_master_versioning" {
provider = aws.apac
bucket = aws_s3_bucket.s3_bucket_master.id
versioning_configuration {
status = "Enabled"
}
}
resource "aws_s3_bucket_replication_configuration" "s3_bucket_master_replication" {
provider = aws.apac
# Must have bucket versioning enabled first
depends_on = [aws_s3_bucket_versioning.s3_bucket_master_versioning]
role = aws_iam_role.iam_role_replication.arn
bucket = aws_s3_bucket.s3_bucket_master.id
rule {
id = "foobar"
delete_marker_replication {
status = "Disabled"
}
filter {
prefix = "foo"
}
status = "Enabled"
destination {
bucket = aws_s3_bucket.s3_bucket_slave.arn
storage_class = "STANDARD"
}
}
}
resource "aws_s3_bucket_acl" "s3_bucket_master_acl" {
bucket = aws_s3_bucket.s3_bucket_master.id
acl = "private"
provider = aws.apac
}
resource "aws_s3_bucket_acl" "s3_bucket_slave_acl" {
bucket = aws_s3_bucket.s3_bucket_slave.id
acl = "log-delivery-write"
}
resource "aws_s3_bucket_public_access_block" "s3_bucket_master_public_access" {
provider = alias.apac
bucket = aws_s3_bucket.s3_bucket_master.id
restrict_public_buckets = true
block_public_acls = true
block_public_policy = true
ignore_public_acls = true
}
resource "aws_s3_bucket_public_access_block" "s3_bucket_slave_public_access" {
bucket = aws_s3_bucket.s3_bucket_slave.id
restrict_public_buckets = true
block_public_acls = true
block_public_policy = true
ignore_public_acls = true
}
resource "aws_s3_bucket_logging" "example" {
provider = alias.apac
bucket = aws_s3_bucket.s3_bucket_master.id
target_bucket = aws_s3_bucket.s3_bucket_slave.id
target_prefix = "log/"
}
provider.tf
terraform {
required_version = ">= 1.0"
required_providers {
aws = {
source = "hashicorp/aws"
version = "4.22.0"
}
null = {
source = "hashicorp/null"
version = "3.1.1"
}
alias = {
source = "hashicorp/aws"
version = "4.22.0"
}
}
}
provider "null" {
# Configuration options
}
# Configure AWS provider:
provider "aws" {
region = "ap-southeast-2"
}
provider "aws" {
alias = "apac"
region = "ap-southeast-1"
}
kms.tf
resource "aws_kms_key" "kms_key" {
description = "This key is used to encrypt bucket objects"
deletion_window_in_days = 10
enable_key_rotation = true
}
Unfortunately, I am getting errors like the below:
╷
│ Warning: Duplicate required provider
│
│ on provider.tf line 4, in terraform:
│ 4: aws = {
│ 5: source = "hashicorp/aws"
│ 6: version = "4.22.0"
│ 7: }
│
│ Provider hashicorp/aws with the local name "aws" was previously required as "alias". A provider can only be required once within required_providers.
│
│ (and one more similar warning elsewhere)
╵
╷
│ Error: error putting S3 bucket (s3-bucket-master-20220712130829925900000001) logging: CrossLocationLoggingProhibitted: Cross S3 location logging not allowed.
│ status code: 403, request id: 85528RE6KMQJDMJM, host id: 3cpcDdHT3Wl442f7L/x3VLCp26wCghaIPTwKKhnWLOmsTW4cSI9f5pFROHr7q4fDLQJMyfNBZIA=
│
│ with aws_s3_bucket_logging.example,
│ on s3.tf line 166, in resource "aws_s3_bucket_logging" "example":
│ 166: resource "aws_s3_bucket_logging" "example" {
│
╵
What help I am seeking from more experienced people is
What part code do I need to delete to make SRR? thereby resolve the error.
Any idea how to suppress the warning for alias?
Do note if I remote the section
alias = {
source = "hashicorp/aws"
version = "4.22.0"
}
I get errors like the below for ** terraform init **
│ Error: Failed to query available provider packages
│
│ Could not retrieve the list of available versions for provider hashicorp/alias: provider registry registry.terraform.io does not have a provider named
│ registry.terraform.io/hashicorp/alias
│
│ All modules should specify their required_providers so that external consumers will get the correct providers when using a module. To see which
│ modules are currently depending on hashicorp/alias, run the following command:
│ terraform providers
╵
terraform providers
Providers required by configuration:
.
├── provider[registry.terraform.io/hashicorp/aws] 4.22.0
├── provider[registry.terraform.io/hashicorp/null] 3.1.1
└── provider[registry.terraform.io/hashicorp/alias]
Providers required by state:
provider[registry.terraform.io/hashicorp/aws]
You don't need to define the aws twice in the required_providers block, it's enough to be defined only once:
terraform {
required_version = ">= 1.0"
required_providers {
aws = {
source = "hashicorp/aws"
version = "4.22.0"
}
null = {
source = "hashicorp/null"
version = "3.1.1"
}
}
}
Then, you need to remove the .terraform directory prior to re-running terraform init to make sure you have a clean slate. Lastly, make sure if you want SRR and not CRR to use either the aliased or non-aliased provider, but not both.
So you are currently defining one of the buckets as:
resource "aws_s3_bucket" "s3_bucket_slave" {
bucket_prefix = "s3-bucket-slave-"
}
And the second one as:
resource "aws_s3_bucket" "s3_bucket_master" {
provider = aws.apac # <---- note the aliased provider, hence a different region
bucket_prefix = "s3-bucket-master-"
}
In order to fix that, either remove the aliased provider from the second bucket or add it to the first one. As you current configuration is using aws.apac in more places than it is not, I would suggest adding the aliased provider to the first bucket if the region is not important:
resource "aws_s3_bucket" "s3_bucket_slave" {
provider = aws.apac
bucket_prefix = "s3-bucket-slave-"
}

AWS S3 replication fails without explicit error

I'm trying to setup one-way replication between two accounts using this guide. Here's the relevant code for the source account:
data "aws_iam_policy_document" "s3-replication-trust" {
statement {
actions = ["sts:AssumeRole"]
principals {
type = "Service"
identifiers = ["s3.amazonaws.com"]
}
effect = "Allow"
}
}
resource "aws_iam_role" "s3-replication-prod" {
count = var.env == "prod" ? 1 : 0 # only apply in prod account
name = "s3-replication-role-prod"
path = "/"
assume_role_policy = data.aws_iam_policy_document.s3-replication-trust.json
}
data "aws_iam_policy_document" "s3-replication-prod" {
count = var.env == "prod" ? 1 : 0 # only apply in prod account
statement {
actions = [
"s3:GetReplicationConfiguration",
"s3:ListBucket"
]
effect = "Allow"
resources = [ aws_s3_bucket.source.arn ]
}
statement {
actions = [
"s3:GetObjectVersionForReplication",
"s3:GetObjectVersion",
"s3:GetObjectVersionAcl",
"s3:GetObjectVersionTagging"
]
effect = "Allow"
resources = [ "${aws_s3_bucket.source.arn}/*" ]
}
statement {
actions = [
"s3:ReplicateObject",
"s3:ReplicateDelete",
"s3:ReplicateTags"
]
effect = "Allow"
resources = [ "${aws_s3_bucket.destination.arn}/*" ]
}
}
resource "aws_iam_policy" "s3-replication-prod" {
count = var.env == "prod" ? 1 : 0 # only apply in prod account
name = "s3-replication"
path = "/"
policy = data.aws_iam_policy_document.s3-replication-prod[0].json
}
resource "aws_iam_role_policy_attachment" "s3-replication-prod" {
count = var.env == "prod" ? 1 : 0 # only apply in prod account
role = aws_iam_role.s3-replication-prod[0].name
policy_arn = aws_iam_policy.s3-replication-prod[0].arn
}
resource "aws_s3_bucket_replication_configuration" "replication" {
count = var.env == "prod" ? 1 : 0 # only apply in prod account
# Must have bucket versioning enabled first
depends_on = [aws_s3_bucket_versioning.source]
role = aws_iam_role.s3-replication-prod[0].arn
bucket = aws_s3_bucket.source.id
rule {
id = "ReplicateToDev"
status = "Enabled"
destination {
bucket = aws_s3_bucket.destination.arn
storage_class = "ONEZONE_IA"
access_control_translation {
owner = "Destination"
}
account = var.destination_account_id
}
}
}
And here's the code relevant to the destination account:
data "aws_iam_policy_document" "destination_bucket_policy" {
statement {
principals {
type = "AWS"
identifiers = [
"arn:aws:iam::${var.prod_account_id}:role/s3-replication-role-prod"
]
}
actions = [
"s3:ReplicateDelete",
"s3:ReplicateObject"
]
resources = ["${aws_s3_bucket.destination.arn}/*"]
}
statement {
principals {
type = "AWS"
identifiers = [
"arn:aws:iam::${var.prod_account_id}:role/s3-replication-role-prod"
]
}
actions = [
"s3:List*",
"s3:GetBucketVersioning",
"s3:PutBucketVersioning"
]
resources = [aws_s3_bucket.destination.arn]
}
}
resource "aws_s3_bucket_policy" "s3-replication-dev" {
count = var.env == "dev" ? 1 : 0 # only apply in dev account
bucket = "${var.app}-dev"
policy = data.aws_iam_policy_document.destination_bucket_policy.json
}
When I try to add any new object to the source bucket, it is unable to replicate. When I navigate to the object's listing in the console, it shows replication status "FAILED".
There's no obvious errors showing up in CloudTrail.
What am I doing wrong here?

How do I capture AWS Backup failures in terraform when Windows VSS fails?

I'm using AWS Backups to back up several EC2 instances. I have terraform that seems to report correctly when there is a backup failure, but I am also interested in when the disks have backed up correctly, but when Windows VSS fails. Ultimately, the failed events are going to be published to Opsgenie. Is there a way to accomplish this? I have tried capturing all events with the 'aws_backup_vault_notifications' resource, and I have tried a filter as described in this AWS blog: https://aws.amazon.com/premiumsupport/knowledge-center/aws-backup-failed-job-notification/
I have included most of my terraform below, minus the opsgenie module; I can get successful or fully failing events published to Opsgenie just fine if I include those events:
locals {
backup_vault_events = toset(["BACKUP_JOB_FAILED", "COPY_JOB_FAILED"])
}
resource "aws_backup_region_settings" "legacy" {
resource_type_opt_in_preference = {
"Aurora" = false
"DynamoDB" = false
"EFS" = false
"FSx" = false
"RDS" = false
"Storage Gateway" = false
"EBS" = true
"EC2" = true
"DocumentDB" = false
"Neptune" = false
"VirtualMachine" = false
}
}
resource "aws_backup_vault" "legacy" {
name = "Legacy${var.environment_tag}"
kms_key_arn = aws_kms_key.key.arn
}
resource "aws_iam_role" "legacy_backup" {
name = "AWSBackupService"
permissions_boundary = data.aws_iam_policy.role_permissions_boundary.arn
assume_role_policy = <<POLICY
{
"Version": "2012-10-17",
"Statement": [
{
"Action": ["sts:AssumeRole"],
"Effect": "allow",
"Principal": {
"Service": ["backup.amazonaws.com"]
}
}
]
}
POLICY
}
resource "aws_iam_role_policy_attachment" "legacy_backup" {
policy_arn = "arn:aws:iam::aws:policy/service-role/AWSBackupServiceRolePolicyForBackup"
role = aws_iam_role.legacy_backup.name
}
###############################################################################
## Second Region Backup
###############################################################################
resource "aws_backup_vault" "secondary" {
provider = aws.secondary
name = "Legacy${var.environment_tag}SecondaryRegion"
kms_key_arn = aws_kms_replica_key.secondary_region.arn
tags = merge(
local.tags, {
name = "Legacy${var.environment_tag}SecondaryRegion"
}
)
}
data "aws_iam_policy_document" "backups" {
policy_id = "__default_policy_ID"
statement {
actions = [
"SNS:Publish",
]
effect = "Allow"
principals {
type = "Service"
identifiers = ["backup.amazonaws.com"]
}
resources = [
aws_sns_topic.backup_alerts.arn
]
sid = "__default_statement_ID"
}
}
###############################################################################
# SNS
###############################################################################
resource "aws_sns_topic_policy" "backup_alerts" {
arn = aws_sns_topic.backup_alerts.arn
policy = data.aws_iam_policy_document.backups.json
}
resource "aws_backup_vault_notifications" "backup_alerts" {
backup_vault_name = aws_backup_vault.legacy.id
sns_topic_arn = aws_sns_topic.backup_alerts.arn
backup_vault_events = local.backup_vault_events
}
resource "aws_sns_topic_subscription" "backup_alerts_opsgenie_target" {
topic_arn = aws_sns_topic.backup_alerts.arn
protocol = "https"
endpoint = module.opsgenie_team.sns_integration_sns_endpoint
confirmation_timeout_in_minutes = 1
endpoint_auto_confirms = true
}

how to create an iam role with policy that grants access to the SQS created

I created 2 SQS and the DeadLetterQueue with the code in my main.tf calling the SQS/main.tf module.I would like to destroy and create them again but this time,I want to call IAM/iam_role.tf as well to create one IAM role together with the policy documents.I don't know how to specify that in my main.tf so that the resources section of the data policy document has both CloudTrail_SQS created ,meaning "CloudTrail_SQS_Data_Event" and "cloudTrail_SQS_Management_Event" and the resources arn of the S3 give the role access to the 2 different buckets used for the SQS,meaning "cloudtrail-management-event-logs" and "aws-cloudtrail143-sqs-logs"
SQS/main.tf
resource "aws_sqs_queue" "CloudTrail_SQS"{
name = var.sqs_queue_name
redrive_policy = jsonencode({
deadLetterTargetArn = aws_sqs_queue.CloudTrail_SQS_DLQ.arn
maxReceiveCount = 4
})
}
resource "aws_sqs_queue" "CloudTrail_SQS_DLQ"{
name = var.dead_queue_name
IAM/iam_role.tf
resource "aws_iam_role" "access_role" {
name = var.role_name
description = var.description
assume_role_policy = data.aws_iam_policy_document.trust_relationship.json
}
trust policy
data "aws_iam_policy_document" "trust_relationship" {
statement {
sid = "AllowAssumeRole"
actions = ["sts:AssumeRole"]
principals {
type = "AWS"
identifiers = [var.account_id]
}
condition {
test = "StringEquals"
variable = "sts:ExternalId"
values = [var.external_id]
}
}
}
data "aws_iam_policy_document" "policy_document"{
statement{
actions = [
"sqs:GetQueueUrl",
"sqs:ReceiveMessage",
"sqs:SendMessage"
]
effect = "Allow"
resources = aws_sqs_queue.CloudTrail_SQS.arn
}
statement {
actions = ["sqs:ListQueues"]
effect = "Allow"
resources = ["*"]
}
statement {
actions = ["s3:GetObject", "s3:GetBucketLocation"]
resources = [
"arn:aws:s3:::${var.cloudtrail_event_log_bucket_name}/*"
]
effect = "Allow"
}
statement {
actions = ["s3:ListBucket"]
resources = [
"arn:aws:s3:::${var.cloudtrail_event_log_bucket_name}"
]
effect = "Allow"
}
statement {
actions = ["kms:Decrypt", "kms:GenerateDataKey","kms:DescribeKey" ]
effect = "Allow"
resources = [var.kms_key_arn]
}
}
main.tf
module "data_events"{
source = "../SQS"
cloudtrail_event_log_bucket_name = "aws-cloudtrail143-sqs-logs"
sqs_queue_name = "CloudTrail_SQS_Data_Event"
dead_queue_name = "CloudTrail_DLQ_Data_Event"
}
module "management_events"{
source = "../SQS"
cloudtrail_event_log_bucket_name = "cloudtrail-management-event-logs"
sqs_queue_name = "cloudTrail_SQS_Management_Event"
dead_queue_name = "cloudTrail_DLQ_Management_Event"
}
The role would be created as shown below. But your question has so many mistakes and missing information, that its impossible to provide full, working code. So the below code should be treated as a template which you need to adjust for your use.
resource "aws_iam_role" "access_role" {
name = var.role_name
description = var.description
assume_role_policy = jsonencode({
Version = "2012-10-17"
Statement = [
{
Action = "sts:AssumeRole"
Effect = "Allow"
Sid = ""
Principal = {
Service = "ec2.amazonaws.com"
}
},
]
})
inline_policy {
name = "allow-access-to-s3-sqs"
policy = data.aws_iam_policy_document.policy_document.json
}
}
data "aws_iam_policy_document" "policy_document"{
statement{
actions = [
"sqs:GetQueueUrl",
"sqs:ReceiveMessage",
"sqs:SendMessage"
]
effect = "Allow"
resources = [
module.data_events.sqs.arn,
module.management_events.sqs.arn,
]
}
statement {
actions = ["sqs:ListQueues"]
effect = "Allow"
resources = ["*"]
}
statement {
actions = ["s3:GetObject", "s3:GetBucketLocation"]
resources = [
"arn:aws:s3:::aws-cloudtrail143-sqs-logs/*"
"arn:aws:s3:::cloudtrail-management-event-logs/*"
]
effect = "Allow"
}
statement {
actions = ["s3:ListBucket"]
resources = [
"arn:aws:s3:::aws-cloudtrail143-sqs-logs",
"arn:aws:s3:::cloudtrail-management-event-logs"
]
effect = "Allow"
}
statement {
actions = ["kms:Decrypt", "kms:GenerateDataKey","kms:DescribeKey" ]
effect = "Allow"
resources = [var.kms_key_arn]
}
}
You can use the data sources of terraform.
At this time, you should write the output for SQS folder, write them as data in IAM folder and use it