My lambda tries to copy a S3 object, but does not work.
Code:
import { escapeUriPath } from '#aws-sdk/util-uri-escape';
import { S3Client, CopyObjectCommandInput, CopyObjectCommand } from '#aws-sdk/client-s3';
const handler = () => {
const path = escapeUriPath('All Files/documents/folder with space/test');
const CopySource = escapeUriPath('my_bucket/All Files/documents/folder with space/test_rename');
copyS3Object({
Bucket: 'my_bucket',
Key: path,
CopySource
})
}
export const copyS3Object = async (input: CopyObjectCommandInput) => {
const command = new CopyObjectCommand(input);
return await s3Client.send(command);
};
An error I see in CloudWatch which is not quite helpful
"Code": "AccessDenied",
"name": "AccessDenied",
"$fault": "client",
"$metadata": {
"httpStatusCode": 400,
"attempts": 1,
"totalRetryDelay": 0
}
The interesting part is that:
I already set s3 policy to allow s3:* action on the bucket
I am able to use the same lambda to upload/delete an object, but not copy
What could cause the AccessDenied error?
I figure it out. It is totally my fault.
The policy resource should be "Resource": ["my_bucket", "my_bucket/*"], instead of "Resource": ["my_bucket"]
The full policy:
resource "aws_iam_policy" "create-lambda-policy" {
name = local.lambda_policy_name
path = "/"
policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Action": [
"s3:*"
],
"Resource": ["my_bucket", "my_bucket/*"],
"Effect": "Allow"
}
]
}
EOF
}
Related
I have created lambda function with s3 bucket required access and i am trying to create s3 events trigger but i am getting access denied error.
lambda.tf
resource "aws_lambda_function" "s3-lambdas" {
filename = "./s3-lambdas.zip"
function_name = "s3-lambdas"
source_code_hash = filebase64sha256(s3-lambdas)
role = module.lambda_role.arn
handler = "s3-lambdas.lambda_handler"
runtime = "python3.9"
timeout = 200
description = "invoke glue job"
depends_on = [module.lambda_role]
}
resource "aws_lambda_permission" "s3_lambdas_s3_events" {
depends_on = [aws_lambda_function.s3-lambdas]
statement_id = "AllowS3Invoke"
action = "lambda:InvokeFunction"
function_name = aws_lambda_function.s3-lambdas.function_name
principal = "s3.amazonaws.com"
source_arn = "arn:aws:s3:::${module.bucket-name.name}"
}
resource "aws_s3_bucket_notification" "bucket_notifications" {
bucket = module.bucket-name.name
lambda_function {
lambda_function_arn = aws_lambda_function.s3-lambdas.arn
events = ["s3:ObjectCreated:*"]
filter_prefix = "abc/def/"
}
depends_on = [aws_lambda_permission.s3_lambdas_s3_events]
}
and my lambda role which contains s3 full access.
s3.tf
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"s3:*"
],
"Resource": [
"arn:aws:s3:::dev-s3-bucket",
"arn:aws:s3:::dev-s3-bucket/*"
]
}
]
}
even though i am getting An error occurred while listing S3 relations: Access Denied when i tried to create s3 triggers.
Edited
output "bucket_name" {
value = module.bucket-name.name
}
bucket_name = dev-s3-bucket
output "iam_dev_arn" {
value = module.lambda_role.arn
}
iam_dev_arn = arn:aws:iam::0123456789:role/s3-lambda-role
output "div_arn" {
value = aws_lambda_function.s3-lambdas.arn
}
div_arn = arn:aws:lambda:us-east-1:0123456789:function:s3-lambdas
assume_role_policy
{
"Version": "2008-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Service": "lambda.amazonaws.com"
},
"Action": "sts:AssumeRole"
}
]
}
i have added the outputs related to module
s3-lambdas
from __future__ import print_function
import boto3
import urllib
import os
print ('Loading function')
glue = boto3.client('glue')
def lambda_handler(event, context):
gluejobname = os.environ['glue_job']
try:
runId = glue.start_job_run(JobName=gluejobname)
status = glue.get_job_run(JobName=gluejobname, RunId=runId['JobRunId'])
print("Job Status : ", status['JobRun']['JobRunState'], "runId",runId)
except Exception as e:
raise e
return {
"statusCode": 200,
"body": os.environ['glue_job'] + " Job started"
}
s3 bucket policy:
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "denyInsecureTransport",
"Effect": "Deny",
"Principal": "*",
"Action": "s3:*",
"Resource": [
"arn:aws:s3:::dev-s3-bucket/*",
"arn:aws:s3:::dev-s3-bucket"
],
"Condition": {
"Bool": {
"aws:SecureTransport": "false"
}
}
}
]
}
In your S3 bucket policy can you make the following change and retry:
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"s3:*"
],
"Resource": [
"arn:aws:s3:::dev-s3-bucket/abc/def/*" <-- change this
]
}
]
}
When trying to use the eventbridge input_transformer i do not receive the transformed object, but instead i get the original object sent directly to SQS.
I currently have the following setup running
locals {
rule-arn = "arn:aws:events:${data.aws_arn.event-rule.region}:${data.aws_arn.event-rule.account}:rule/${aws_cloudwatch_event_rule.notification.name}"
}
resource "aws_sqs_queue" "test-queue" {
name = "test-queue"
}
resource "aws_cloudwatch_event_rule" "notification" {
name = "test-notification"
event_bus_name = aws_cloudwatch_event_bus.events.name
description = "Listens to all events in the TEST.Notification namespace"
event_pattern = jsonencode({
source = [{ "prefix" : "TEST.Notification" }],
})
}
resource "aws_cloudwatch_event_target" "developer-notification" {
rule = aws_cloudwatch_event_rule.notification.name
target_id = "SendToSQS"
arn = aws_sqs_queue.test-queue.arn
input_transformer {
input_paths = {
"detailType" = "$.detail-type",
}
input_template = jsonencode(
{
"detailType" : "<detailType>"
}
)
}
}
resource "aws_sqs_queue_policy" "test-queue" {
queue_url = aws_sqs_queue.test-queue.id
policy = <<POLICY
{
"Version": "2012-10-17",
"Id": "",
"Statement": [
{
"Sid": "Allow EventBridge to SQS",
"Effect": "Allow",
"Principal": {
"Service": "events.amazonaws.com"
},
"Action": "*",
"Resource": "${aws_sqs_queue.test-queue.arn}",
"Condition": {
"ArnEquals": {
"aws:SourceArn": "${aws_cloudwatch_event_rule.notification.arn}"
}
}
}
]
}
POLICY
}
I am running Terraform version:
Terraform v1.2.3
on darwin_arm64
I have seen some talk about having to do stuff like
"\"<detailType>\""
in order to have it work, but i've had no luck with that either, so for brevity/readability i've removed all the weird tricks i've seen people use. My thinking is there's something more basic i am missing here.
Does someone know what i am doing wrong?
I created an AWS step function using Terraform. For now, the step function has only 1 lambda function for now:
resource "aws_iam_role_policy" "sfn_policy" {
policy = jsonencode(
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": "sts:AssumeRole",
"Resource": "*"
},
{
"Sid": "VisualEditor0",
"Effect": "Allow",
"Action": [
"lambda:InvokeFunction",
"lambda:InvokeAsync"
],
"Resource": "*"
},
{
"Effect": "Allow",
"Action": [ "states:StartExecution" ],
"Resource": "*"
}
]
}
)
role = aws_iam_role.processing_lambda_role.id
}
resource "aws_sfn_state_machine" "sfn_state_machine_zip_files" {
name = local.zip_files_step_function_name
role_arn = aws_iam_role.processing_lambda_role.arn
definition = <<EOF
{
"Comment": "Process Incoming Zip Files",
"StartAt": "ProcessIncomingZipFiles",
"States": {
"ProcessIncomingZipFiles": {
"Type": "Task",
"Resource": "${aws_lambda_function.process_zip_files_lambda.arn}",
"ResultPath": "$.Output",
"End": true
}
}
}
EOF
}
This is how the role is initially defined:
resource "aws_iam_role" "processing_lambda_role" {
name = local.name
path = "/service-role/"
assume_role_policy = jsonencode({
Version = "2012-10-17"
Statement = [
{
Effect = "Allow"
Principal = { Service = "lambda.amazonaws.com" }
Action = "sts:AssumeRole"
}
]
})
}
Why do I get this error message even though the policies include the AssumeRole policy already. I also tried removing one of the sts:AssumeRolepolicies but the error was still there.
"Neither the global service principal states.amazonaws.com, nor the regional one is authorized to assume the provided role."
AWS docs Reference: https://aws.amazon.com/premiumsupport/knowledge-center/step-functions-iam-role-troubleshooting/
The role aws_iam_role.processing_lambda_role can be only assumed by a lambda function. So, your aws_sfn_state_machine.sfn_state_machine_zip_files can't assume this role. You have to change the Principal in the role from:
Principal = { Service = "lambda.amazonaws.com" }
into
Principal = { Service = "states.amazonaws.com" }
You still may have other issues, depending on what you want to do exactly. But your reported error is due to what I mentioned.
I am having issues with my s3 bucket policy, it seems to add the policy correctly and even verified it in AWS and it shows the exact policy set in the policy.tpl but it keeps saying there are changes
I've tried changing the action and resource into arrays which I've heard may help.. tried removing the "Version" from the policy, the SID, keeps saying there are changes everytime i run it
policy.tf
resource "aws_s3_bucket_policy" "bucket" {
bucket = aws_s3_bucket.bucket.id
policy = local.policy
}
locals.tf
locals {
template_dir = "${path.module}/templates"
template_vars = {
encrypt = var.s3_require_encryption_enabled
bucket_arn = aws_s3_bucket.bucket.arn
extra_statements = var.s3_bucket_policy
}
policy = templatefile("${local.template_dir}/policy.tpl", local.template_vars)
}
templates/policy.tpl
{
"Version": "2008-10-17",
"Statement": [
{
"Sid" : "",
"Effect" : "Deny",
"Principal" : "*",
"Action" : "s3:*",
"Resource" : "${bucket_arn}/*",
"Condition": {
"Bool": {
"aws:SecureTransport": "false"
}
}
}
]
}
in AWS
{
"Version": "2008-10-17",
"Statement": [
{
"Sid": "",
"Effect": "Deny",
"Principal": "*",
"Action": "s3:*",
"Resource": "arn:aws:s3:::test-bucket-us-east-1/*",
"Condition": {
"Bool": {
"aws:SecureTransport": "false"
}
}
}
]
}
says
An execution plan has been generated and is shown below.
Resource actions are indicated with the following symbols:
~ update in-place
Terraform will perform the following actions:
#aws_s3_bucket_policy.bucket will be updated in-place
~ resource "aws_s3_bucket_policy" "bucket" {
bucket = "test-bucket-us-east-1"
id = "test-bucket-us-east-1"
+ policy = jsonencode(
{
+ Statement = [
+ {
+ Action = "s3:*"
+ Condition = {
+ Bool = {
+ aws:SecureTransport = "false"
}
}
+ Effect = "Deny"
+ Principal = "*"
+ Resource = "arn:aws:s3:::test-bucket-us-east-1/*"
+ Sid = ""
},
]
+ Version = "2008-10-17"
}
)
}
Plan: 0 to add, 1 to change, 0 to destroy.
Based on the comments, the underlying bucket policy had issues.
PutBucketPolicy has
Content-MD5
The MD5 hash of the request body.
For requests made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is calculated automatically.)
So resource aws_s3_bucket_policy was trying to update the policy.
I was using Terraform to setup S3 buckets (different region) and set up replication between them.
It was working properly until I added KMS in it.
I created 2 KMS keys one for source and one for destination.
Now while applying replication configuration, there is an option to pass destination key for destination bucket but I am not sure how to apply key at the source.
Any help would be appreciated.
provider "aws" {
alias = "east"
region = "us-east-1"
}
resource "aws_s3_bucket" "destination-bucket" {
bucket = ""destination-bucket"
provider = "aws.east"
acl = "private"
region = "us-east-1"
versioning {
enabled = true
}
server_side_encryption_configuration {
rule {
apply_server_side_encryption_by_default {
kms_master_key_id = "${var.kms_cmk_dest_arn}"
sse_algorithm = "aws:kms"
}
}
}
}
resource "aws_s3_bucket" "source-bucket" {
bucket = "source-bucket"
acl = "private"
versioning {
enabled = true
}
server_side_encryption_configuration {
rule {
apply_server_side_encryption_by_default {
kms_master_key_id = "${var.kms_cmk_arn}"
sse_algorithm = "aws:kms"
}
}
}
replication_configuration {
role = "${aws_iam_role.replication.arn}"
rules {
status = "Enabled"
destination {
bucket = "${aws_s3_bucket.source-bucket.arn}"
storage_class = "STANDARD"
replica_kms_key_id = "${var.kms_cmk_dest_arn}"
}
source_selection_criteria {
sse_kms_encrypted_objects {
enabled = true
}
}
}
}
}
resource "aws_iam_role" "replication" {
name = "cdd-iam-role-replication"
permissions_boundary = "arn:aws:iam::${var.account_id}:policy/ServiceRoleBoundary"
assume_role_policy = <<POLICY
{
"Version": "2012-10-17",
"Statement": [
{
"Action": "sts:AssumeRole",
"Principal": {
"Service": "s3.amazonaws.com"
},
"Effect": "Allow",
"Sid": ""
}
]
}
POLICY
}
resource "aws_iam_role_policy" "replication" {
name = "cdd-iam-role-policy-replication"
role = "${aws_iam_role.replication.id}"
policy = <<POLICY
{
"Version": "2012-10-17",
"Statement": [
{
"Action": [
"s3:GetReplicationConfiguration",
"s3:ListBucket"
],
"Effect": "Allow",
"Resource": [
"${aws_s3_bucket.source-bucket.arn}"
]
},
{
"Action": [
"s3:GetObjectVersion",
"s3:GetObjectVersionAcl"
],
"Effect": "Allow",
"Resource": [
"${aws_s3_bucket.source-bucket.arn}/*"
]
},
{
"Action": [
"s3:ReplicateObject",
"s3:ReplicateDelete"
],
"Effect": "Allow",
"Resource": "${aws_s3_bucket.destination-bucket.arn}/*"
}
]
}
POLICY
}
In case you're using a Customer Managed Key(CMK) for S3 encryption, you need extra configuration.
AWS S3 Documentation mentions that the CMK owner must grant the source bucket owner permission to use the CMK.
https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-config-for-kms-objects.html#replication-kms-cross-acct-scenario
Also, a good article to summarize the S3 cross region replication configuration:
https://medium.com/#devopslearning/100-days-of-devops-day-44-s3-cross-region-replication-crr-8c58ae8c68d4
If I understand you correctly, you've got two S3 Buckets in two different regions within the same account.
One way I've done this in the past is to plan/apply the KMS keys to both regions first.
Then on a separate plan/apply, I used Terraform's data sources:
data "aws_kms_key" "source_credentials_encryption_key" {
key_id = "alias/source-encryption-key"
}
data "aws_kms_key" "destination_credentials_encryption_key" {
provider = aws.usEast
key_id = "alias/destination-encryption-key"
}
And used the data source for the replication configuration like so:
replication_configuration {
role = aws_iam_role.replication_role.arn
rules {
status = "Enabled"
destination {
bucket = aws_s3_bucket.source_bucket.arn
storage_class = "STANDARD"
replicate_kms_key_id = data.aws_kms_key.destination_bucket_encryption_key.arn
}
source_selection_criteria {
sse_kms_encrypted_objects {
enabled = true
}
}
}
}