aws lambda function getting access denied when getObject from s3 - amazon-web-services

I am getting an acccess denied error from S3 AWS service on my Lambda function.
This is the code:
// dependencies
var async = require('async');
var AWS = require('aws-sdk');
var gm = require('gm').subClass({ imageMagick: true }); // Enable ImageMagick integration.
exports.handler = function(event, context) {
var srcBucket = event.Records[0].s3.bucket.name;
// Object key may have spaces or unicode non-ASCII characters.
var key = decodeURIComponent(event.Records[0].s3.object.key.replace(/\+/g, " "));
/*
{
originalFilename: <string>,
versions: [
{
size: <number>,
crop: [x,y],
max: [x, y],
rotate: <number>
}
]
}*/
var fileInfo;
var dstBucket = "xmovo.transformedimages.develop";
try {
//TODO: Decompress and decode the returned value
fileInfo = JSON.parse(key);
//download s3File
// get reference to S3 client
var s3 = new AWS.S3();
// Download the image from S3 into a buffer.
s3.getObject({
Bucket: srcBucket,
Key: key
},
function (err, response) {
if (err) {
console.log("Error getting from s3: >>> " + err + "::: Bucket-Key >>>" + srcBucket + "-" + key + ":::Principal>>>" + event.Records[0].userIdentity.principalId, err.stack);
return;
}
// Infer the image type.
var img = gm(response.Body);
var imageType = null;
img.identify(function (err, data) {
if (err) {
console.log("Error image type: >>> " + err);
deleteFromS3(srcBucket, key);
return;
}
imageType = data.format;
//foreach of the versions requested
async.each(fileInfo.versions, function (currentVersion, callback) {
//apply transform
async.waterfall([async.apply(transform, response, currentVersion), uploadToS3, callback]);
}, function (err) {
if (err) console.log("Error on excecution of watefall: >>> " + err);
else {
//when all done then delete the original image from srcBucket
deleteFromS3(srcBucket, key);
}
});
});
});
}
catch (ex){
context.fail("exception through: " + ex);
deleteFromS3(srcBucket, key);
return;
}
function transform(response, version, callback){
var imageProcess = gm(response.Body);
if (version.rotate!=0) imageProcess = imageProcess.rotate("black",version.rotate);
if(version.size!=null) {
if (version.crop != null) {
//crop the image from the coordinates
imageProcess=imageProcess.crop(version.size[0], version.size[1], version.crop[0], version.crop[1]);
}
else {
//find the bigger and resize proportioned the other dimension
var widthIsMax = version.size[0]>version.size[1];
var maxValue = Math.max(version.size[0],version.size[1]);
imageProcess=(widthIsMax)?imageProcess.resize(maxValue):imageProcess.resize(null, maxValue);
}
}
//finally convert the image to jpg 90%
imageProcess.toBuffer("jpg",{quality:90}, function(err, buffer){
if (err) callback(err);
callback(null, version, "image/jpeg", buffer);
});
}
function deleteFromS3(bucket, filename){
s3.deleteObject({
Bucket: bucket,
Key: filename
});
}
function uploadToS3(version, contentType, data, callback) {
// Stream the transformed image to a different S3 bucket.
var dstKey = fileInfo.originalFilename + "_" + version.size + ".jpg";
s3.putObject({
Bucket: dstBucket,
Key: dstKey,
Body: data,
ContentType: contentType
}, callback);
}
};
This is the error on Cloudwatch:
AccessDenied: Access Denied
This is the stack error:
at Request.extractError (/var/runtime/node_modules/aws-sdk/lib/services/s3.js:329:35)
at Request.callListeners (/var/runtime/node_modules/aws-sdk/lib/sequential_executor.js:105:20)
at Request.emit (/var/runtime/node_modules/aws-sdk/lib/sequential_executor.js:77:10)
at Request.emit (/var/runtime/node_modules/aws-sdk/lib/request.js:596:14)
at Request.transition (/var/runtime/node_modules/aws-sdk/lib/request.js:21:10)
at AcceptorStateMachine.runTo (/var/runtime/node_modules/aws-sdk/lib/state_machine.js:14:12)
at /var/runtime/node_modules/aws-sdk/lib/state_machine.js:26:10
at Request.<anonymous> (/var/runtime/node_modules/aws-sdk/lib/request.js:37:9)
at Request.<anonymous> (/var/runtime/node_modules/aws-sdk/lib/request.js:598:12)
at Request.callListeners (/var/runtime/node_modules/aws-sdk/lib/sequential_executor.js:115:18)
Without any other description or info
on S3 bucket permissions allow to everyone put list and delete.
What can I do to access the S3 bucket?
PS: on Lambda event properties the principal is correct and has administrative privileges.

Interestingly enough, AWS returns 403 (access denied) when the file does not exist. Be sure the target file is in the S3 bucket.

If you are specifying the Resource don't forget to add the sub folder specification as well. Like this:
"Resource": [
"arn:aws:s3:::BUCKET-NAME",
"arn:aws:s3:::BUCKET-NAME/*"
]

Your Lambda does not have privileges (S3:GetObject).
Go to IAM dashboard, check the role associated with your Lambda execution. If you use AWS wizard, it automatically creates a role called oneClick_lambda_s3_exec_role. Click on Show Policy. It should show something similar to the attached image. Make sure S3:GetObject is listed.

I ran into this issue and after hours of IAM policy madness, the solution was to:
Go to S3 console
Click bucket you are interested in.
Click 'Properties'
Unfold 'Permissions'
Click 'Add more permissions'
Choose 'Any Authenticated AWS User' from dropdown. Select 'Upload/Delete' and 'List' (or whatever you need for your lambda).
Click 'Save'
Done.
Your carefully written IAM role policies don't matter, neither do specific bucket policies (I've written those too to make it work). Or they just don't work on my account, who knows.
[EDIT]
After a lot of tinkering the above approach is not the best. Try this:
Keep your role policy as in the helloV post.
Go to S3. Select your bucket. Click Permissions. Click Bucket Policy.
Try something like this:
{
"Version": "2012-10-17",
"Id": "Lambda access bucket policy",
"Statement": [
{
"Sid": "All on objects in bucket lambda",
"Effect": "Allow",
"Principal": {
"AWS": "arn:aws:iam::AWSACCOUNTID:root"
},
"Action": "s3:*",
"Resource": "arn:aws:s3:::BUCKET-NAME/*"
},
{
"Sid": "All on bucket by lambda",
"Effect": "Allow",
"Principal": {
"AWS": "arn:aws:iam::AWSACCOUNTID:root"
},
"Action": "s3:*",
"Resource": "arn:aws:s3:::BUCKET-NAME"
}
]
}
Worked for me and does not require for you to share with all authenticated AWS users (which most of the time is not ideal).

If you have encryption set on your S3 bucket (such as AWS KMS), you may need to make sure the IAM role applied to your Lambda function is added to the list of IAM > Encryption keys > region > key > Key Users for the corresponding key that you used to encrypt your S3 bucket at rest.
In my screenshot, for example, I added the CyclopsApplicationLambdaRole role that I have applied to my Lambda function as a Key User in IAM for the same AWS KMS key that I used to encrypt my S3 bucket. Don't forget to select the correct region for your key when you open up the Encryption keys UI.
Find the execution role you've applied to your Lambda function:
Find the key you used to add encryption to your S3 bucket:
In IAM > Encryption keys, choose your region and click on the key name:
Add the role as a Key User in IAM Encryption keys for the key specified in S3:

If all the other policy ducks are in a row, S3 will still return an Access Denied message if the object doesn't exist AND the requester doesn't have ListBucket permission on the bucket.
From https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectGET.html:
...If the object you request does not exist, the error Amazon S3
returns depends on whether you also have the s3:ListBucket permission.
If you have the s3:ListBucket permission on the bucket, Amazon S3 will
return an HTTP status code 404 ("no such key") error. if you don’t
have the s3:ListBucket permission, Amazon S3 will return an HTTP
status code 403 ("access denied") error.

I too ran into this issue, I fixed this by providing s3:GetObject* in the ACL as it is attempting to obtain a version of that object.

I tried to execute a basic blueprint Python lambda function [example code] and I had the same issue. My execition role was lambda_basic_execution
I went to S3 > (my bucket name here) > permissions .
Because I'm beginner, I used the Policy Generator provided by Amazon rather than writing JSON myself: http://awspolicygen.s3.amazonaws.com/policygen.html
my JSON looks like this:
{
"Id": "Policy153536723xxxx",
"Version": "2012-10-17",
"Statement": [
{
"Sid": "Stmt153536722xxxx",
"Action": [
"s3:GetObject"
],
"Effect": "Allow",
"Resource": "arn:aws:s3:::tokabucket/*",
"Principal": {
"AWS": [
"arn:aws:iam::82557712xxxx:role/lambda_basic_execution"
]
}
}
]
And then the code executed nicely:

I solved my problem following all the instruction from the AWS - How do I allow my Lambda execution role to access my Amazon S3 bucket?:
Create an AWS Identity and Access Management (IAM) role for the Lambda function that grants access to the S3 bucket.
Modify the IAM role's trust policy.
Set the IAM role as the Lambda function's execution role.
Verify that the bucket policy grants access to the Lambda function's execution role.

I was trying to read a file from s3 and create a new file by changing content of file read (Lambda + Node). Reading file from S3 did not had any problem. As soon I tried writing to S3 bucket I get 'Access Denied' error.
I tried every thing listed above but couldn't get rid of 'Access Denied'. Finally I was able to get it working by giving 'List Object' permission to everyone on my bucket.
Obviously this not the best approach but nothing else worked.

After searching for a long time i saw that my bucket policy was only allowed read access and not put access:
"Version": "2012-10-17",
"Statement": [
{
"Sid": "PublicListGet",
"Effect": "Allow",
"Principal": "*",
"Action": [
"s3:List*",
"s3:Get*",
"s3:Put*"
],
"Resource": [
"arn:aws:s3:::bucketName",
"arn:aws:s3:::bucketName/*"
]
}
]
}
Also another issue might be that in order to fetch objects from cross region you need to initialize new s3 client with other region name like:
const getS3Client = (region) => new S3({ region })
I used this function to get s3 client based on region.

I was struggling with this issue for hours. I was using AmazonS3EncryptionClient and nothing I did helped. Then I noticed that the client is actually deprecated, so I thought I'd try switching to the builder model they have:
var builder = AmazonS3EncryptionClientBuilder.standard()
.withEncryptionMaterials(new StaticEncryptionMaterialsProvider(encryptionMaterials))
if (accessKey.nonEmpty && secretKey.nonEmpty) builder = builder.withCredentials(new AWSStaticCredentialsProvider(new BasicAWSCredentials(accessKey.get, secretKey.get)))
builder.build()
And... that solved it. Looks like Lambda has trouble injecting the credentials in the old model, but works well in the new one.

I was getting the same error "AccessDenied: Access Denied" while cropping s3 images using lambda function. I updated the s3 bucket policy and IAM role inline policy as per the document link given below.
But still, I was getting the same error. Then I realised, I was trying to give "public-read" access in a private bucket. After removed ACL: 'public-read' from S3.putObject problem get resolved.
https://aws.amazon.com/premiumsupport/knowledge-center/access-denied-lambda-s3-bucket/

I had this error message in aws lambda environment when using boto3 with python:
botocore.exceptions.ClientError: An error occurred (AccessDenied) when calling the GetObject operation: Access Denied
It turns out I needed an extra permission because I was using object tags. If your objects have tags you will need
s3:GetObject AND s3:GetObjectTagging for getting the object.

I have faced the same problem when creating Lambda function that should have read S3 bucket content. I created the Lambda function and S3 bucket using AWS CDK. To solve this within AWS CDK, I used magic from the docs.
Resources that use execution roles, such as lambda.Function, also
implement IGrantable, so you can grant them access directly instead of
granting access to their role. For example, if bucket is an Amazon S3
bucket, and function is a Lambda function, the code below grants the
function read access to the bucket.
bucket.grantRead(function);

Related

AWS S3 Access Denied from EC2 and Lambda

I'm trying to get or list files from an S3 bucket. The bucket is set up as no private access, has no specific permissions added.
I'm trying to access from EC2 configured with a role that has full S3 access, this worked before.
I'm also trying to access from Lambda, configured with a role that has full S3 access, this is new, and never worked before.
According to the IAM simulator this should be allowed.
This is an excerpt from my Lambda (python):
import json
import boto3
from datetime import datetime
def lambda_handler(event, context):
bucket = 'mybucketname' # this the name itself, no url or arn or anything
# check if file exists
s3client = boto3.client('s3')
key = 'mypath/' + 'anotherbitofpath' + '/' + 'index.html'
print(f"key = {key}")
objs = s3client.list_objects_v2(
Bucket=bucket,
Prefix=key
)
print(f"objs = {objs}")
if any([w.key == path_s3 for w in objs]):
print("Exists!")
else:
print("Doesn't exist")
many thanks
I implemented this exact use case. I can access S3 objects from a Lambda function. The only difference is I implemented my code in Java. This method that tags objects works perfectly in a Lambda function.
private void tagExistingObject(S3Client s3, String bucketName, String key, String label, String LabelValue) {
try {
GetObjectTaggingRequest getObjectTaggingRequest = GetObjectTaggingRequest.builder()
.bucket(bucketName)
.key(key)
.build();
GetObjectTaggingResponse response = s3.getObjectTagging(getObjectTaggingRequest);
// Get the existing immutable list - cannot modify this list.
List<Tag> existingList = response.tagSet();
ArrayList<Tag> newTagList = new ArrayList(new ArrayList<>(existingList));
// Create a new tag.
Tag myTag = Tag.builder()
.key(label)
.value(LabelValue)
.build();
// push new tag to list.
newTagList.add(myTag);
Tagging tagging = Tagging.builder()
.tagSet(newTagList)
.build();
PutObjectTaggingRequest taggingRequest = PutObjectTaggingRequest.builder()
.key(key)
.bucket(bucketName)
.tagging(tagging)
.build();
s3.putObjectTagging(taggingRequest);
System.out.println(key + " was tagged with " + label);
} catch (S3Exception e) {
System.err.println(e.awsErrorDetails().errorMessage());
System.exit(1);
}
}
The role i use has full access to S3 and there are no issues performing S3 operations from a Lambda function.
Update the bucket policy so that it specifies the ARN of the Lambda function's IAM role (execution role) as a Principal that has access to the action s3:GetObject. You can use a bucket policy similar to the following:
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"AWS": "arn:aws:iam::YourAWSAccount:role/AccountARole"
},
"Action": [
"s3:GetObject",
"s3:GetObjectVersion"
],
"Resource": [
"arn:aws:s3:::YourBucketName/*"
]
}
]
}

Can I export data from s3 to dynamoDB using lambda?

I want to build a lambda process that automatically sends data to dynamodb when s3 comes in. But as dynamodb is not set as destination like the picture below, what should I do? (The permission is admin.)
The best way would be to setup notifications on your s3 bucket to trigger for new object. The notifications would launch your lambda function, which would then update dynamodb.
If you already have objects in your bucket, you could use S3 batch operations to process all of them with your lambda function.
You should understand how to work with lambda event source (event trigger). Here the event source is the S3 where once there is a stored object, the S3 will trigger an event to the lambda function. To get this work, you have to add the permission on lambda for S3 event. Check it out :
Using Lambda Function with Amazon S3
Now, every objects put into S3 will trigger an event to Lambda. Something like tell the lambda for new coming S3 object. You can check the event object from lambda code like this sample code :
exports.handler = async (event) => {
event.Records.forEach((record, i)=>{
if (record.eventName == 'ObjectCreated:Put')
console.log(record);
})
return;
}
Test the file upload to your s3 bucket and go to cloudwatch to check your lambda log.
Next if you want to store the file content into dynamodb, you will add policies for lambda role and write some lines on the lambda function, for the sample policy to add permission s3:GetObject and dynamodb:PutItem for lambda role :
{
"Sid": "Stmt1583413548180",
"Action": [
"s3:GetObject"
],
"Effect": "Allow",
"Resource": "your_s3Bucket_ARN"
},
{
"Sid": "Stmt1583413573162",
"Action": [
"dynamodb:PutItem"
],
"Effect": "Allow",
"Resource": "your_dynamodbTable_ARN"
}
And this is the sample of lambda code :
let s3 = new AWS.S3();
let dynamoDB = new AWS.DynamoDB.DocumentClient();
let TextDecoder = require("util").TextDecoder;
exports.handler = async (event) => {
let records = [];
event.Records.forEach((record, i)=>{
if (record.eventName == 'ObjectCreated:Put')
records.push(fileContent());
if (i == event.Records.length-1)
if (records.length > 0)
return Promise.all(records).then(()=>{
console.log("All events completed")
return "All events completed";
}).catch((e)=>{
console.log("The tasks error: ",e)
throw "The tasks error";
})
else
return "All events completed";
})
}
/* Get the file content and put new dynamodb item */
function fileContent(obj) {
let params = {
Bucket: obj.bucket.name,
Key: obj.object.key
}
return s3.getObject(params).promise().then((content)=>{
console.log("GetObject succeeded");
content = new TextDecoder("utf-8").decode(content.Body);
let Item = {
Key: obj.object.key,
dataContent: content
}
return dynamoDB.put({
TableName: 'table_name',
Item:Item
}).promise();
})
}
Well, let me resume the steps :
Add permission for s3 event on your lambda function
Add the IAM policy to lambda role for actions s3:GetObject and dynamodb:PutItem
Update your lambda function code to export the s3 file to dynamodb item

How to get/generate aws quicksight secure dashboard url

I want to embed Quicksight dashboard to an application. I have gone through the AWS quicksight documents, I did not get where I will find secure signed dashboard url.
In order to generate Quicksight secure dashboard url, follow the below steps:
Step 1: Create a new Identity Pool. Go to https://console.aws.amazon.com/cognito/home?region=us-east-1 , click ‘Create new Identity Pool’
Give an appropriate name.
Go to the Authentication Providers section, select Cognito.
Give the User Pool ID(your User pool ID) and App Client ID (go to App Clients in userpool and copy id).
Click ‘Create Pool’. Then click ‘Allow’ to create roles of the identity pool in IAM.
Step 2: Assign Custom policy to the Identity Pool Role
Create a custom policy with the below JSON.
{
"Version": "2012-10-17",
"Statement": [
{
"Action": "quicksight:RegisterUser",
"Resource": "*",
"Effect": "Allow"
},
{
"Action": "quicksight:GetDashboardEmbedUrl",
"Resource": "*",
"Effect": "Allow"
},
{
"Action": "sts:AssumeRole",
"Resource": "*",
"Effect": "Allow"
}
]
}
Note: if you want to restrict the user to only one dashboard, replace the * with the dashboard ARN name in quicksight:GetDashboardEmbedUrl,
then goto the roles in IAM.
select the IAM role of the Identity pool and assign custom policy to the role.
Step 3: Configuration for generating the temporary IAM(STS) user
Login to your application with the user credentials.
For creating temporary IAM user, we use Cognito credentials.
When user logs in, Cognito generates 3 token IDs - IDToken, AccessToken, RefreshToken. These tokens will be sent to your application server.
For creating a temporary IAM user, we use Cognito Access Token and credentials will look like below.
AWS.config.region = 'us-east-1';
AWS.config.credentials = new AWS.CognitoIdentityCredentials({
IdentityPoolId:"Identity pool ID",
Logins: {
'cognito-idp.us-east-1.amazonaws.com/UserPoolID': AccessToken
}
});
For generating temporary IAM credentials, we call sts.assume role method with the below parameters.
var params = {
RoleArn: "Cognito Identity role arn",
RoleSessionName: "Session name"
};
sts.assumeRole(params, function (err, data) {
if (err) console.log( err, err.stack); // an error occurred
else {
console.log(data);
})
You can add additional parameters like duration (in seconds) for the user.
Now, we will get the AccessKeyId, SecretAccessKey and Session Token of the temporary user.
Step 4: Register the User in Quicksight
With the help of same Cognito credentials used in the Step 3, we will register the user in quicksight by using the quicksight.registerUser method with the below parameters
var params = {
AwsAccountId: “account id”,
Email: 'email',
IdentityType: 'IAM' ,
Namespace: 'default',
UserRole: ADMIN | AUTHOR | READER | RESTRICTED_AUTHOR | RESTRICTED_READER,
IamArn: 'Cognito Identity role arn',
SessionName: 'session name given in the assume role creation',
};
quicksight.registerUser(params, function (err, data1) {
if (err) console.log("err register user”); // an error occurred
else {
// console.log("Register User1”);
}
})
Now the user will be registered in quicksight.
Step5: Update AWS configuration with New credentials.
Below code shows how to configure the AWS.config() with new credentials generated Step 3.
AWS.config.update({
accessKeyId: AccessToken,
secretAccessKey: SecretAccessKey ,
sessionToken: SessionToken,
"region": Region
});
Step6: Generate the EmbedURL for Dashboards:
By using the credentials generated in Step 3, we will call the quicksight.getDashboardEmbedUrl with the below parameters
var params = {
AwsAccountId: "account ID",
DashboardId: "dashboard Id",
IdentityType: "IAM",
ResetDisabled: true,
SessionLifetimeInMinutes: between 15 to 600 minutes,
UndoRedoDisabled: True | False
}
quicksight.getDashboardEmbedUrl(params,
function (err, data) {
if (!err) {
console.log(data);
} else {
console.log(err);
}
});
Now, we will get the embed url for the dashboard.
Call the QuickSightEmbedding.embedDashboard from front end with the help of the above generated url.
The result will be the dashboard embedded in your application with filter controls.
this link will give you what you need from aws cli https://aws.amazon.com/blogs/big-data/embed-interactive-dashboards-in-your-application-with-amazon-quicksight/
this is the step 3 aws cli cmd to give you embeded URL ( i was able to excecute)
aws quicksight get-dashboard-embed-url --aws-account-id (your account ID) --dashboard-id (your dashgboard ID) --identity-type IAM
there are many other dependence to enable the embeded dashboard per aws dcouments. i have not able to successfully doen that. GL and let me know if you make it happen!
PHP implementation
(in addition to Siva Sumanth's answer)
https://gist.github.com/evgalak/d0d1adf099e2d7bff741c16a89bf30ba

AWS generate dynamic credential for S3 folder level access?

I'm new to AWS and still figuring out how to do things.
Part of my web application is using AWS S3 for file storage, but I want each user to be only able to access specific folders(for CRUD) in the bucket.
The backend server will track what folders the user will be able to access.
I know it is possible to define policies that allow access to specific folders(by matching prefix of objects), but can I generate these policies dynamically and get credentials with these policies attached (probably with Cognito?). So that these credentials could be passed to client-side to enable access to S3 folders.
I'm wondering if it is possible to do that and what services are required to achieve this.
You should change your view, each time you want to share a file with one of your users, you should check your database about their permissions( folders they have access) and if logical things on your side are correct, generate a presigned URL for access to that object.
How presigned URL works.
When you generate a presigned URL for accessing to an object, you can set the time limit too, it means after that time, the URL not work and expired.
For more information about the presigned URL, read the following documents on Amazon Web services website:
Generate a Pre-signed Object URL Using the AWS SDK for Java
Generate a Pre-signed Object URL Using AWS SDK for .NET
Also, if you want to create users and assign the right policy for access them to their folder you can follow these instructions:
You can use the IAM API to creating a user for each of your users, and attach the right policy for each of them.
For example, for creating the new user, you should use the following API
/* The following create-user command creates an IAM user named Bob in the current account. */
var params = {
UserName: "Bob"
};
iam.createUser(params, function(err, data) {
if (err) console.log(err, err.stack); // an error occurred
else console.log(data); // successful response
/*
data = {
User: {
Arn: "arn:aws:iam::123456789012:user/Bob",
CreateDate: <Date Representation>,
Path: "/",
UserId: "AKIAIOSFODNN7EXAMPLE",
UserName: "Bob"
}
}
*/
});
For more info about the Create user API, read the following
https://docs.aws.amazon.com/IAM/latest/APIReference/API_CreateUser.html
After creating a user, you should create a policy for each of them with CreatePolicy API.
var params = {
PolicyDocument: 'STRING_VALUE', /* required */
PolicyName: 'STRING_VALUE', /* required */
Description: 'STRING_VALUE',
Path: 'STRING_VALUE'
};
iam.createPolicy(params, function(err, data) {
if (err) console.log(err, err.stack); // an error occurred
else console.log(data); // successful response
});
For more info about the Create policy read the following doc:
https://docs.aws.amazon.com/IAM/latest/APIReference/API_CreatePolicy.html
And finally, you should assign the policy you created before to each user by the AttachUserPolicy API.
/* The following command attaches the AWS managed policy named AdministratorAccess to the IAM user named Alice. */
var params = {
PolicyArn: "arn:aws:iam::aws:policy/AdministratorAccess",
UserName: "Alice"
};
iam.attachUserPolicy(params, function(err, data) {
if (err) console.log(err, err.stack); // an error occurred
else console.log(data); // successful response
});
For more info about the AttachUserPolicy API read the following doc:
https://docs.aws.amazon.com/IAM/latest/APIReference/API_AttachUserPolicy.html
The last part is about the which policy you should create and assign to each of them; we use the following policy for listing objects in each folder:
{
"Sid": "AllowListingOfUserFolder",
"Action": ["s3:ListBucket"],
"Effect": "Allow",
"Resource": ["arn:aws:s3:::my-company"],
"Condition":{"StringLike":{"s3:prefix":["home/David/*"]}}
}
And the following policy for actions in each folder:
{
"Sid": "AllowAllS3ActionsInUserFolder",
"Effect": "Allow",
"Action": ["s3:*"],
"Resource": ["arn:aws:s3:::my-company/home/David/*"]
}
For more detailed info about that policies read the following article by Jim Scharf:
https://aws.amazon.com/blogs/security/writing-iam-policies-grant-access-to-user-specific-folders-in-an-amazon-s3-bucket/

AWS SDK What context do applications run in for ACL?

We're using the AWS SDK (.net) and have successfully uploaded files through our program using PutObjectRequest. I know how to set the ACL permissions on the file once it's created, But when trying to Get the file using GetObjectRequest our application is getting "Access Denied". I realize that I don't know what the userID is for the application that's running. How can I make sure my application has the permissions needed to read the file, without using "public" rights? (setting the ACL on the file to public works for the application).
Is there a way to make the application retrieve a file AS a certain user or group?
Any AWS API call request, need an access key and secret key pair which can be set from:
Hard coded in the app,
AWS configuration file (by default it is in ~/.aws/credentials or C:\Users\*USERNAME*\.aws\credentials), or
Instance role for EC2 instances.
For the #1 and #2, you can check your IAM user permission in https://console.aws.amazon.com/iam/home?region=us-east-1#users.
For #3, you can check your IAM role in https://console.aws.amazon.com/iam/home?region=us-east-1#roles.
Make sure the user/role have enough permission to read your S3 object. You can attach this policy into the user/role:
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "Stmt1455021573875",
"Action": [
"s3:GetObject",
"s3:GetObjectAcl"
],
"Effect": "Allow",
"Resource": "arn:aws:s3:::your-bucket-name/*"
}
]
}
Adjust the above policy. Your app should have read object access now.
Btw, you can set ACL while creating a resource. You can find the documentation on http://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-dot-net-sdk.html.
static string bucketName = "*** Provide existing bucket name ***";
static string newBucketName = "*** Provide a name for a new bucket ***";
static string newKeyName = "*** Provide a name for a new key ***";
IAmazonS3 client;
client = new AmazonS3Client(Amazon.RegionEndpoint.USEast1);
// Retrieve ACL from one of the owner's buckets
S3AccessControlList acl = client.GetACL(new GetACLRequest
{
BucketName = bucketName,
}).AccessControlList;
// Describe grant for full control for owner.
S3Grant grant1 = new S3Grant
{
Grantee = new S3Grantee { CanonicalUser = acl.Owner.Id },
Permission = S3Permission.FULL_CONTROL
};
// Describe grant for write permission for the LogDelivery group.
S3Grant grant2 = new S3Grant
{
Grantee = new S3Grantee { URI = "http://acs.amazonaws.com/groups/s3/LogDelivery" },
Permission = S3Permission.WRITE
};
PutBucketRequest request = new PutBucketRequest()
{
BucketName = newBucketName,
BucketRegion = S3Region.US,
Grants = new List<S3Grant> { grant1, grant2 }
};
PutBucketResponse response = client.PutBucket(request);
PutObjectRequest objectRequest = new PutObjectRequest()
{
ContentBody = "Object data for simple put.",
BucketName = newBucketName,
Key = newKeyName,
Grants = new List<S3Grant> { grant1 }
};
PutObjectResponse objectResponse = client.PutObject(objectRequest);