AWS S3 replication fails without explicit error - amazon-web-services

I'm trying to setup one-way replication between two accounts using this guide. Here's the relevant code for the source account:
data "aws_iam_policy_document" "s3-replication-trust" {
statement {
actions = ["sts:AssumeRole"]
principals {
type = "Service"
identifiers = ["s3.amazonaws.com"]
}
effect = "Allow"
}
}
resource "aws_iam_role" "s3-replication-prod" {
count = var.env == "prod" ? 1 : 0 # only apply in prod account
name = "s3-replication-role-prod"
path = "/"
assume_role_policy = data.aws_iam_policy_document.s3-replication-trust.json
}
data "aws_iam_policy_document" "s3-replication-prod" {
count = var.env == "prod" ? 1 : 0 # only apply in prod account
statement {
actions = [
"s3:GetReplicationConfiguration",
"s3:ListBucket"
]
effect = "Allow"
resources = [ aws_s3_bucket.source.arn ]
}
statement {
actions = [
"s3:GetObjectVersionForReplication",
"s3:GetObjectVersion",
"s3:GetObjectVersionAcl",
"s3:GetObjectVersionTagging"
]
effect = "Allow"
resources = [ "${aws_s3_bucket.source.arn}/*" ]
}
statement {
actions = [
"s3:ReplicateObject",
"s3:ReplicateDelete",
"s3:ReplicateTags"
]
effect = "Allow"
resources = [ "${aws_s3_bucket.destination.arn}/*" ]
}
}
resource "aws_iam_policy" "s3-replication-prod" {
count = var.env == "prod" ? 1 : 0 # only apply in prod account
name = "s3-replication"
path = "/"
policy = data.aws_iam_policy_document.s3-replication-prod[0].json
}
resource "aws_iam_role_policy_attachment" "s3-replication-prod" {
count = var.env == "prod" ? 1 : 0 # only apply in prod account
role = aws_iam_role.s3-replication-prod[0].name
policy_arn = aws_iam_policy.s3-replication-prod[0].arn
}
resource "aws_s3_bucket_replication_configuration" "replication" {
count = var.env == "prod" ? 1 : 0 # only apply in prod account
# Must have bucket versioning enabled first
depends_on = [aws_s3_bucket_versioning.source]
role = aws_iam_role.s3-replication-prod[0].arn
bucket = aws_s3_bucket.source.id
rule {
id = "ReplicateToDev"
status = "Enabled"
destination {
bucket = aws_s3_bucket.destination.arn
storage_class = "ONEZONE_IA"
access_control_translation {
owner = "Destination"
}
account = var.destination_account_id
}
}
}
And here's the code relevant to the destination account:
data "aws_iam_policy_document" "destination_bucket_policy" {
statement {
principals {
type = "AWS"
identifiers = [
"arn:aws:iam::${var.prod_account_id}:role/s3-replication-role-prod"
]
}
actions = [
"s3:ReplicateDelete",
"s3:ReplicateObject"
]
resources = ["${aws_s3_bucket.destination.arn}/*"]
}
statement {
principals {
type = "AWS"
identifiers = [
"arn:aws:iam::${var.prod_account_id}:role/s3-replication-role-prod"
]
}
actions = [
"s3:List*",
"s3:GetBucketVersioning",
"s3:PutBucketVersioning"
]
resources = [aws_s3_bucket.destination.arn]
}
}
resource "aws_s3_bucket_policy" "s3-replication-dev" {
count = var.env == "dev" ? 1 : 0 # only apply in dev account
bucket = "${var.app}-dev"
policy = data.aws_iam_policy_document.destination_bucket_policy.json
}
When I try to add any new object to the source bucket, it is unable to replicate. When I navigate to the object's listing in the console, it shows replication status "FAILED".
There's no obvious errors showing up in CloudTrail.
What am I doing wrong here?

Related

Terraform give IAM access to specific folder only in S3

How can I create an IAM user using terraform with access to one folder in s3
I found a lot of answers that give access to the entire bucket but I want access to one folder only
resource "aws_iam_policy" "username-s3-access" {
name = "username-s3-access"
path = "/"
policy = data.aws_iam_policy_document.username-s3-access.json
}
resource "aws_iam_user" "username-s3-access" {
name = "username-s3-access"
path = "/machine/"
}
resource "aws_iam_user_policy_attachment" "username-s3-access" {
user = aws_iam_user.username-s3-access.name
policy_arn = aws_iam_policy.username-s3-access.arn
}
I found the solution based on this official documentation in AWS site https://aws.amazon.com/premiumsupport/knowledge-center/s3-folder-user-access/
This code will give full assess to one folder in the bucket
scroll down to find the code for readonly access to one folder
Note: change the bucket_arn and the folder_name
variable "bucket_arn" {
type = string
default = "bucket_arn"
}
variable "folder_name" {
type = string
default = "s3_folder_name"
}
data "aws_iam_policy_document" "username-s3-access" {
statement {
sid = "AllowUserToSeeBuckets"
effect = "Allow"
resources = ["arn:aws:s3:::*"]
actions = [
"s3:ListAllMyBuckets",
"s3:GetBucketLocation",
]
}
statement {
sid = "AllowBucketListing"
effect = "Allow"
resources = [ var.bucket_arn ]
actions = ["s3:ListBucket"]
}
statement {
sid = "AllowFolderListing"
effect = "Allow"
resources = [ var.bucket_arn ]
actions = ["s3:ListBucket"]
condition {
test = "StringEquals"
variable = "s3:prefix"
values = [
"",
"${var.folder_name}"
]
}
condition {
test = "StringEquals"
variable = "s3:delimiter"
values = ["/"]
}
}
statement {
sid = "AllowAllS3ActionsInUserFolder"
effect = "Allow"
resources = ["${var.bucket_arn}/${var.folder_name}/*"]
actions = ["s3:*"]
}
}
resource "aws_iam_policy" "username-s3-access" {
name = "username-s3-access"
path = "/"
policy = data.aws_iam_policy_document.username-s3-access.json
}
resource "aws_iam_user" "username-s3-access" {
name = "username-s3-access"
path = "/machine/"
}
resource "aws_iam_user_policy_attachment" "username-s3-access" {
user = aws_iam_user.username-s3-access.name
policy_arn = aws_iam_policy.username-s3-access.arn
}
if you want to give read only access to the folder then replace the last statement
statement {
sid = "AllowReadOnlyInUserFolder"
effect = "Allow"
resources = ["${var.bucket_name}/${var.folder_name}/*"]
actions = [
"s3:GetObject",
"s3:ListBucket",
]
}

how to configure s3 bucket to allow aws application load balancer (not class) use it? currently throws' access denied'

I have an application load balancer and I'm trying to enable logging, terraform code below:
resource "aws_s3_bucket" "lb-logs" {
bucket = "yeo-messaging-${var.environment}-lb-logs"
}
resource "aws_s3_bucket_acl" "lb-logs-acl" {
bucket = aws_s3_bucket.lb-logs.id
acl = "private"
}
resource "aws_lb" "main" {
name = "main"
internal = false
load_balancer_type = "application"
security_groups = [aws_security_group.public.id]
enable_deletion_protection = false
subnets = [aws_subnet.public.id, aws_subnet.public-backup.id]
access_logs {
bucket = aws_s3_bucket.lb-logs.bucket
prefix = "main-lb"
enabled = true
}
}
unfortunately I can't apply this due to:
Error: failure configuring LB attributes: InvalidConfigurationRequest: Access Denied for bucket: xxx-lb-logs. Please check S3bucket permission
│ status code: 400, request id: xx
I've seen a few SO threads and documentation but unfortunately it all applies to the classic load balancer, particularly the 'data' that allows you to get the service account of the laod balancer.
I have found some policy info on how to apply the right permissions to a SA but I can't seem to find how to apply the service account to the LB itself.
Example:
data "aws_iam_policy_document" "allow-lb" {
statement {
principals {
type = "AWS"
identifiers = [data.aws_elb_service_account.main.arn]
}
actions = [
"s3:GetObject",
"s3:ListBucket",
"s3:PutObject"
]
resources = [
aws_s3_bucket.lb-logs.arn,
"${aws_s3_bucket.lb-logs.arn}/*",
]
}
}
resource "aws_s3_bucket_policy" "allow-lb" {
bucket = aws_s3_bucket.lb-logs.id
policy = data.aws_iam_policy_document.allow-lb.json
}
But this is all moot because data.aws_elb_service_account.main.arn is only for classic LB.
EDIT:
Full code with attempt from answer below:
resource "aws_s3_bucket" "lb-logs" {
bucket = "yeo-messaging-${var.environment}-lb-logs"
}
resource "aws_s3_bucket_acl" "lb-logs-acl" {
bucket = aws_s3_bucket.lb-logs.id
acl = "private"
}
data "aws_iam_policy_document" "allow-lb" {
statement {
principals {
type = "Service"
identifiers = ["logdelivery.elb.amazonaws.com"]
}
actions = [
"s3:PutObject"
]
resources = [
"${aws_s3_bucket.lb-logs.arn}/*"
]
condition {
test = "StringEquals"
variable = "s3:x-amz-acl"
values = [
"bucket-owner-full-control"
]
}
}
}
resource "aws_s3_bucket_policy" "allow-lb" {
bucket = aws_s3_bucket.lb-logs.id
policy = data.aws_iam_policy_document.allow-lb.json
}
resource "aws_lb" "main" {
name = "main"
internal = false
load_balancer_type = "application"
security_groups = [aws_security_group.public.id]
enable_deletion_protection = false
subnets = [aws_subnet.public.id, aws_subnet.public-backup.id]
access_logs {
bucket = aws_s3_bucket.lb-logs.bucket
prefix = "main-lb"
enabled = true
}
}
The bucket policy you need to use is provided in the official documentation for access logs on Application Load Balancers.
{
"Effect": "Allow",
"Principal": {
"Service": "logdelivery.elb.amazonaws.com"
},
"Action": "s3:PutObject",
"Resource": "arn:aws:s3:::bucket-name/prefix/AWSLogs/your-aws-account-id/*",
"Condition": {
"StringEquals": {
"s3:x-amz-acl": "bucket-owner-full-control"
}
}
}
Notice bucket-name prefix and your-aws-account-id need to be replaced in that policy with your actual values.
In Terraform:
data "aws_iam_policy_document" "allow-lb" {
statement {
principals {
type = "Service"
identifiers = ["logdelivery.elb.amazonaws.com"]
}
actions = [
"s3:PutObject"
]
resources = [
"${aws_s3_bucket.lb-logs.arn}/*"
]
condition {
test = "StringEquals"
variable = "s3:x-amz-acl"
values = [
"bucket-owner-full-control"
]
}
}
}

How do I capture AWS Backup failures in terraform when Windows VSS fails?

I'm using AWS Backups to back up several EC2 instances. I have terraform that seems to report correctly when there is a backup failure, but I am also interested in when the disks have backed up correctly, but when Windows VSS fails. Ultimately, the failed events are going to be published to Opsgenie. Is there a way to accomplish this? I have tried capturing all events with the 'aws_backup_vault_notifications' resource, and I have tried a filter as described in this AWS blog: https://aws.amazon.com/premiumsupport/knowledge-center/aws-backup-failed-job-notification/
I have included most of my terraform below, minus the opsgenie module; I can get successful or fully failing events published to Opsgenie just fine if I include those events:
locals {
backup_vault_events = toset(["BACKUP_JOB_FAILED", "COPY_JOB_FAILED"])
}
resource "aws_backup_region_settings" "legacy" {
resource_type_opt_in_preference = {
"Aurora" = false
"DynamoDB" = false
"EFS" = false
"FSx" = false
"RDS" = false
"Storage Gateway" = false
"EBS" = true
"EC2" = true
"DocumentDB" = false
"Neptune" = false
"VirtualMachine" = false
}
}
resource "aws_backup_vault" "legacy" {
name = "Legacy${var.environment_tag}"
kms_key_arn = aws_kms_key.key.arn
}
resource "aws_iam_role" "legacy_backup" {
name = "AWSBackupService"
permissions_boundary = data.aws_iam_policy.role_permissions_boundary.arn
assume_role_policy = <<POLICY
{
"Version": "2012-10-17",
"Statement": [
{
"Action": ["sts:AssumeRole"],
"Effect": "allow",
"Principal": {
"Service": ["backup.amazonaws.com"]
}
}
]
}
POLICY
}
resource "aws_iam_role_policy_attachment" "legacy_backup" {
policy_arn = "arn:aws:iam::aws:policy/service-role/AWSBackupServiceRolePolicyForBackup"
role = aws_iam_role.legacy_backup.name
}
###############################################################################
## Second Region Backup
###############################################################################
resource "aws_backup_vault" "secondary" {
provider = aws.secondary
name = "Legacy${var.environment_tag}SecondaryRegion"
kms_key_arn = aws_kms_replica_key.secondary_region.arn
tags = merge(
local.tags, {
name = "Legacy${var.environment_tag}SecondaryRegion"
}
)
}
data "aws_iam_policy_document" "backups" {
policy_id = "__default_policy_ID"
statement {
actions = [
"SNS:Publish",
]
effect = "Allow"
principals {
type = "Service"
identifiers = ["backup.amazonaws.com"]
}
resources = [
aws_sns_topic.backup_alerts.arn
]
sid = "__default_statement_ID"
}
}
###############################################################################
# SNS
###############################################################################
resource "aws_sns_topic_policy" "backup_alerts" {
arn = aws_sns_topic.backup_alerts.arn
policy = data.aws_iam_policy_document.backups.json
}
resource "aws_backup_vault_notifications" "backup_alerts" {
backup_vault_name = aws_backup_vault.legacy.id
sns_topic_arn = aws_sns_topic.backup_alerts.arn
backup_vault_events = local.backup_vault_events
}
resource "aws_sns_topic_subscription" "backup_alerts_opsgenie_target" {
topic_arn = aws_sns_topic.backup_alerts.arn
protocol = "https"
endpoint = module.opsgenie_team.sns_integration_sns_endpoint
confirmation_timeout_in_minutes = 1
endpoint_auto_confirms = true
}

how to create an iam role with policy that grants access to the SQS created

I created 2 SQS and the DeadLetterQueue with the code in my main.tf calling the SQS/main.tf module.I would like to destroy and create them again but this time,I want to call IAM/iam_role.tf as well to create one IAM role together with the policy documents.I don't know how to specify that in my main.tf so that the resources section of the data policy document has both CloudTrail_SQS created ,meaning "CloudTrail_SQS_Data_Event" and "cloudTrail_SQS_Management_Event" and the resources arn of the S3 give the role access to the 2 different buckets used for the SQS,meaning "cloudtrail-management-event-logs" and "aws-cloudtrail143-sqs-logs"
SQS/main.tf
resource "aws_sqs_queue" "CloudTrail_SQS"{
name = var.sqs_queue_name
redrive_policy = jsonencode({
deadLetterTargetArn = aws_sqs_queue.CloudTrail_SQS_DLQ.arn
maxReceiveCount = 4
})
}
resource "aws_sqs_queue" "CloudTrail_SQS_DLQ"{
name = var.dead_queue_name
IAM/iam_role.tf
resource "aws_iam_role" "access_role" {
name = var.role_name
description = var.description
assume_role_policy = data.aws_iam_policy_document.trust_relationship.json
}
trust policy
data "aws_iam_policy_document" "trust_relationship" {
statement {
sid = "AllowAssumeRole"
actions = ["sts:AssumeRole"]
principals {
type = "AWS"
identifiers = [var.account_id]
}
condition {
test = "StringEquals"
variable = "sts:ExternalId"
values = [var.external_id]
}
}
}
data "aws_iam_policy_document" "policy_document"{
statement{
actions = [
"sqs:GetQueueUrl",
"sqs:ReceiveMessage",
"sqs:SendMessage"
]
effect = "Allow"
resources = aws_sqs_queue.CloudTrail_SQS.arn
}
statement {
actions = ["sqs:ListQueues"]
effect = "Allow"
resources = ["*"]
}
statement {
actions = ["s3:GetObject", "s3:GetBucketLocation"]
resources = [
"arn:aws:s3:::${var.cloudtrail_event_log_bucket_name}/*"
]
effect = "Allow"
}
statement {
actions = ["s3:ListBucket"]
resources = [
"arn:aws:s3:::${var.cloudtrail_event_log_bucket_name}"
]
effect = "Allow"
}
statement {
actions = ["kms:Decrypt", "kms:GenerateDataKey","kms:DescribeKey" ]
effect = "Allow"
resources = [var.kms_key_arn]
}
}
main.tf
module "data_events"{
source = "../SQS"
cloudtrail_event_log_bucket_name = "aws-cloudtrail143-sqs-logs"
sqs_queue_name = "CloudTrail_SQS_Data_Event"
dead_queue_name = "CloudTrail_DLQ_Data_Event"
}
module "management_events"{
source = "../SQS"
cloudtrail_event_log_bucket_name = "cloudtrail-management-event-logs"
sqs_queue_name = "cloudTrail_SQS_Management_Event"
dead_queue_name = "cloudTrail_DLQ_Management_Event"
}
The role would be created as shown below. But your question has so many mistakes and missing information, that its impossible to provide full, working code. So the below code should be treated as a template which you need to adjust for your use.
resource "aws_iam_role" "access_role" {
name = var.role_name
description = var.description
assume_role_policy = jsonencode({
Version = "2012-10-17"
Statement = [
{
Action = "sts:AssumeRole"
Effect = "Allow"
Sid = ""
Principal = {
Service = "ec2.amazonaws.com"
}
},
]
})
inline_policy {
name = "allow-access-to-s3-sqs"
policy = data.aws_iam_policy_document.policy_document.json
}
}
data "aws_iam_policy_document" "policy_document"{
statement{
actions = [
"sqs:GetQueueUrl",
"sqs:ReceiveMessage",
"sqs:SendMessage"
]
effect = "Allow"
resources = [
module.data_events.sqs.arn,
module.management_events.sqs.arn,
]
}
statement {
actions = ["sqs:ListQueues"]
effect = "Allow"
resources = ["*"]
}
statement {
actions = ["s3:GetObject", "s3:GetBucketLocation"]
resources = [
"arn:aws:s3:::aws-cloudtrail143-sqs-logs/*"
"arn:aws:s3:::cloudtrail-management-event-logs/*"
]
effect = "Allow"
}
statement {
actions = ["s3:ListBucket"]
resources = [
"arn:aws:s3:::aws-cloudtrail143-sqs-logs",
"arn:aws:s3:::cloudtrail-management-event-logs"
]
effect = "Allow"
}
statement {
actions = ["kms:Decrypt", "kms:GenerateDataKey","kms:DescribeKey" ]
effect = "Allow"
resources = [var.kms_key_arn]
}
}
You can use the data sources of terraform.
At this time, you should write the output for SQS folder, write them as data in IAM folder and use it

Build a S3 bucket policy with terraform dynamic blocks

I'm trying to create a S3 Bucket Policy to provide access to a number of other accounts. I can't figure out how to do it with Terraform either with a for loop or with dynamic blocks.
locals {
account_ids = [
987654321098,
765432109876,
432109876543
]
}
resource "aws_s3_bucket_policy" "bucket" {
bucket = aws_s3_bucket.bucket.id
policy = jsonencode({
Statement = [
for account in local.account_ids : {
Effect = "Allow"
Action = [ ... ]
Principal = { AWS = [ "arn:aws:iam::${account}:root" ] }
Resource = "${aws_s3_bucket.bucket.arn}/states/${account}/*"
}
]
}
})
}
This fails with: Error: Missing argument separator / A comma is required to separate each function argument from the next.
If I try a dynamic block it's a similar issue.
Ultimately I want the Statement block to contain a list of 3 blocks, one for each account.
Any ideas?
You have too many closing brackets. It should be:
resource "aws_s3_bucket_policy" "bucket" {
bucket = aws_s3_bucket.bucket.id
policy = jsonencode({
Statement = [
for account in local.account_ids : {
Effect = "Allow"
Action = [ ... ]
Principal = { AWS = [ "arn:aws:iam::${account}:root" ] }
Resource = "${aws_s3_bucket.bucket.arn}/states/${account}/*"
}
]
})
}