AWS.S3.upload() 403 Error When Attempting Multipart Upload - amazon-web-services

TL;DR
When attempting to upload a file directly from the browser using the s3.upload() method provided by the AWS SDK for Javascript in the Browser combined with temporary IAM Credentials generated by a call to AWS.STS.getFederationToken() everything works fine for non-multipart uploads, and for the first part of a multipart upload.
But when s3.upload() attempts to send the second part of a multipart upload S3 responds with a 403 Access Denied error.
Why?
The Context
I'm implementing an uploader in my app that will enable multipart (chunked) uploads directly from the browser to my S3 bucket.
To achieve this, I'm utilizing the s3.upload() method of the AWS SDK for Javascript in the Browser, which I understand to be nothing more than sugar for its underlying utilization of new AWS.S3.ManagedUpload().
A simple illustration of what I'm attempting can be found here: https://aws.amazon.com/blogs/developer/announcing-the-amazon-s3-managed-uploader-in-the-aws-sdk-for-javascript/
Additionally, I'm also using AWS.STS.getFederationToken() as a means to vend temporary IAM User credentials from my API layer to authorize the uploads.
The 1,2,3:
The user initiates an upload by choosing a file via a standard HTML <input type="file">.
This triggers an initial request to my API layer to ensure the user has the necessary privileges on my own system to perform this action, if that's true then my server calls AWS.STS.getFederationToken() with a Policy param that scopes their privileges down to nothing more than uploading the file to the key provided. And then returns the resulting temporary creds to the browser.
Now that the browser has the temp creds it needs, it can go about using them to create a new AWS.S3 client and then execute the AWS.S3.upload() method to perform a (supposedly) automagical multipart upload of the file.
The Code
api.myapp.com/vendUploadCreds.js
This is the API layer method called that generates and vends the temporary upload creds. At this point in the process the account has already been authenticated and authorized to receive the creds and upload the file.
module.exports = function vendUploadCreds(request, response) {
var account = request.params.account;
var file = request.params.file;
var bucket = 'cdn.myapp.com';
var sts = new AWS.STS({
AccessKeyId : process.env.MY_AWS_ACCESS_KEY_ID,
SecretAccessKey : process.env.MY_AWS_SECRET_ACCESS_KEY
});
/// The following policy is *exactly* the same as the S3 policy
/// attached to the IAM user that executes this STS request.
var policy = {
Version : '2012-10-17',
Statement : [
{
Effect : 'Allow',
Action : [
's3:ListBucket',
's3:ListBucketMultipartUploads',
's3:ListBucketVersions',
's3:ListMultipartUploadParts',
's3:AbortMultipartUpload',
's3:GetObject',
's3:GetObjectVersion',
's3:PutObject',
's3:PutObjectAcl',
's3:PutObjectVersionAcl',
's3:DeleteObject',
's3:DeleteObjectVersion'
],
Resource : [
'arn:aws:s3:::' + bucket + '/' + account._id + '/files/' + file.name
],
Condition : {
StringEquals : {
's3:x-amz-acl' : ['private']
}
}
}
]
};
sts.getFederationToken({
DurationSeconds : 129600, /// 36 hours
Name : account._id + '-uptoken',
Policy : JSON.stringify(policy)
}, function(err, data) {
if (err) console.log(err, err.stack); // an error occurred
response.send(data);
});
}
console.myapp.com/uploader.js
This is a truncated illustration of the uploader on the browser-side that first calls the vendUploadCreds API method and then uses the resulting temporary creds to execute the multipart upload.
uploader.getUploadCreds(account, file) {
/// A request is sent to api.myapp.com/vendUploadCreds
/// Upon successful response, the creds are returned.
request('https://api.myapp.com/vendUploadCreds', {
params : {
account : account,
file : file
}
}, function(error, data) {
upload.credentials = data.credentials;
this.uploadFile(upload);
});
}
uploader.uploadFile : function(upload) {
var uploadID = upload.id;
/// The `upload` object coming through via the args has
/// a `credentials` property containing the creds obtained
/// via the `vendUploadCreds` method above.
var credentials = new AWS.Credentials({
accessKeyId : upload.credentials.AccessKeyId,
secretAccessKey : upload.credentials.SecretAccessKey,
sessionToken : upload.credentials.SessionToken
});
AWS.config.region = 'us-east-1';
var s3 = new AWS.S3({
credentials,
signatureVersion : 'v2', /// 'v4' also attempted
params : {
Bucket : 'cdn.myapp.com'
}
});
var uploader = s3.upload({
Key : upload.key,
ACL : 'private',
ContentType : upload.file.type,
Body : upload.file
},{
queueSize : 3,
partSize : 1024 * 1024 * 5
});
uploader.on('httpUploadProgress', function(event) {
var total = event.total;
var loaded = event.loaded;
var percent = loaded / total;
percent = Math.ceil(percent * 100);
console.log('Uploaded ' + percent + '% of ' + upload.key);
});
uploader.send(function(error, result) {
console.log(error, result);
});
}
cdn.myapp.com S3 Bucket CORS Configuration
So far as I can tell, this is wide open, so CORS shouldn't be the issue?
<?xml version="1.0" encoding="UTF-8"?>
<CORSConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<CORSRule>
<AllowedOrigin>*</AllowedOrigin>
<AllowedMethod>GET</AllowedMethod>
<AllowedMethod>PUT</AllowedMethod>
<AllowedMethod>POST</AllowedMethod>
<AllowedMethod>DELETE</AllowedMethod>
<MaxAgeSeconds>3000</MaxAgeSeconds>
<ExposeHeader>ETag</ExposeHeader>
<AllowedHeader>*</AllowedHeader>
</CORSRule>
</CORSConfiguration>
The Error
Okay, so when I attempt to upload a file, it gets really confusing:
Any file under 5Mb uploads just fine. Files under 5Mb (the minimum part size for an S3 Multipart Upload) do not require multipart upload so s3.upload() sends them as a standard PUT request. Makes sense, and they succeed just fine.
Any file over 5Mb seems to upload fine, but only for the first part. Then when s3.upload() attempts to send the second part S3 responds with a 403 Access Denied error.
I hope you're a fan of info because here's a dump of the error that I get from Chrome when I attempt to upload Astrud Gilberto's melancholy classic "So Nice (Summer Samba)" (MP3, 6.6Mb):
General
Request URL:https://s3.amazonaws.com/cdn.myapp.com/5a2cbda70b9b741661ad98df/files/Astrud-Gilberto-So-Nice-1512903188573.mp3?partNumber=2&uploadId=ljaviv9n25aRKwc4HKGhBbbXTWI3wSGZwRRi39fPSEvU2dcM9G7gO6iu5w7va._dMTZil4e_b53Iy5ngojJqRr5F6Uo_ZXuF27yaqizeARmUVf5ZVeah8ZjYwkZV8C0i3rhluYoxFHUPxlLMjaKLww--
Request Method:PUT
Status Code:403 Forbidden
Remote Address:52.216.165.77:443
Referrer Policy:no-referrer-when-downgrade
Response Headers
Access-Control-Allow-Methods:GET, PUT, POST, DELETE
Access-Control-Allow-Origin:*
Access-Control-Expose-Headers:ETag
Access-Control-Max-Age:3000
Connection:close
Content-Type:application/xml
Date:Sun, 10 Dec 2017 10:53:12 GMT
Server:AmazonS3
Transfer-Encoding:chunked
Vary:Origin, Access-Control-Request-Headers, Access-Control-Request-Method
x-amz-id-2:0Mzo7b/qj0r5Is7aJIIJ/U2VxTTulWsjl5kJpTnEhy/B0fQDlRuANcursnxI71LA16AdePVSc/s=
x-amz-request-id:DA008A5116E0058F
Request Headers
Accept:*/*
Accept-Encoding:gzip, deflate, br
Accept-Language:en-US,en;q=0.9
Authorization:AWS ASIAJAR5KXKAOPTC64PQ:Wo9lbflZuVVS9+UTTDSjU0iPUbI=
Cache-Control:no-cache
Connection:keep-alive
Content-Length:1314943
Content-Type:application/octet-stream
DNT:1
Host:s3.amazonaws.com
Origin:http://132.12.23.145:8080
Pragma:no-cache
Referer:http://132.12.23.145:8080/
User-Agent:Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.84 Safari/537.36
X-Amz-Date:Sun, 10 Dec 2017 10:53:09 GMT
x-amz-security-token:FQoDYXdzENT//////////wEaDK9srK2+5FN91W+T+SLSA/LdEwpOiY7wDkgggOMhuGEiqIXAQrFMk/EqvZFl8Npqx414WsL9E310rj5mU1RGXsxuN+ers1r6NVPpJIlXSDG7bnwlGabejNvDL9vMX5HJHGbZOEVUoaL60/T5NM+0TZtH61vHAEVmRVFKOB0tSez8TEU1jQ2cJME0THn5RuV/6CuIpA9dlEYO7/ajB5UKT3F1rBkt12b0DeWmKG2pvTJRwa8nrsF6Hk6dk1B1Hl1fUwAh9rD17O9Roi7MFLKisPH+96WX08liC8k+n+kPPOox6ZZM/lOMwlNinDjLc2iC+JD/6uxyAGpNbQ7OHAUsF7DOiMvw6Nv6PrImrBvnK439BhLOk1VXCfxxmtTWGim8TD1w1EciZcJhsuCMpDF8fMnhF/JFw3KNOJXHUtpTGRjNbOPcPojVs3FgIt+9MllIA0pGMr2bYmA3HvKewnhD2qeKkG3DPDIbpwuRoY4wIXCP5OclmoHp5nE5O94aRIvkBvS1YmqDQO+jTiI7/O7vlX63q9sGqdIA4nwzh5ASTRJhC2rKgxepFirEB53dCev8i9f1pwXG3/4H3TvPCLVpK94S7/csNJexJP75bPBpo4nDeIbOBKKIMuUDK1pQsyuGwuUolKS00QU=
X-Amz-User-Agent:aws-sdk-js/2.164.0 callback
Query String Params
partNumber:2
uploadId:ljaviv9n25aRKwc4HKGhBbbXTWI3wSGZwRRi39fPSEvU2dcM9G7gO6iu5w7va._dMTZil4e_b53Iy5ngojJqRr5F6Uo_ZXuF27yaqizeARmUVf5ZVeah8ZjYwkZV8C0i3rhluYoxFHUPxlLMjaKLww--
Actual Response Body
And here's the body of the response from S3:
<?xml version="1.0" encoding="UTF-8"?>
<Error><Code>AccessDenied</Code><Message>Access Denied</Message><RequestId>8277A4969E955274</RequestId><HostId>XtQ2Ezv0Wa81Rm2jymB5ZwTe+OHfwTcnNapYMgceqZCJeb75YwOa1AZZ5/10CAeVgmfeP0BFXnM=</HostId></Error>
The Questions
It's obviously not an issue with the creds created by the sts.generateFederationToken() request, because if it were then the smaller (non-multipart) uploads would fail as well, right?
It's obviously not an issue with the CORS configuration on the cdn.myapp.com bucket, because if it were then the smaller (non-multipart) uploads would fail as well, right?
Why would S3 accept partNumber=1 of a multipart upload, and then 403 on the partNumber=2 of the same upload?

A Solution
After many hours of wrestling with this I figured out that the issue was with the Condition block of the IAM Policy that I was sending through as the Policy param of my AWS.STS.getFederationToken() request. Specifically, AWS.S3.upload() only sends an x-amz-acl header for the first PUT request, which is the call to S3.initiateMultipartUpoad.
The x-amz-acl header is not included in the subsequent PUT requests for the actual parts of the upload.
I had the following condition on my IAM Policy, which I was using to ensure that any uploads must have an ACL of 'private':
Condition : {
StringEquals : {
's3:x-amz-acl' : ['private']
}
}
So the initial PUT request to S3.initiateMultipartUpload was fine, but the subsequent PUTs failed because they didn't have the x-amz-acl header.
The solution was to edit the policy I was attaching to the temporary user and move the s3:PutObject permission into its own statement, and then adjust the condition to apply only if the targeted value exists. The final policy looks like so:
var policy = {
Version : '2012-10-17',
Statement : [
{
Effect : 'Allow',
Action : [
's3:PutObject'
],
Resource : [
'arn:aws:s3:::' + bucket + '/' + account._id + '/files/' + file.name
],
Condition : {
StringEqualsIfExists : {
's3:x-amz-acl' : ['private']
}
}
},
{
Effect : 'Allow',
Action : [
's3:AbortMultipartUpload'
],
Resource : [
'arn:aws:s3:::' + bucket + '/' + account._id + '/files/' + file.name
]
}
]
};
Hopefully that'll help someone else from wasting three days on this.

Related

Cloudfront Malformed Policy Error with AWS Cloudfront-Signer V3

I'm having an issue with the AWS Cookie-Signer V3 and Custom Policies. I'm currently using #aws-sdk/cloudfront-signer v3.254.0. I have followed the official docs of how to create and handle signed cookies - it works as long as I don't use custom policies.
Setup
I use a custom lambda via an API Gateway to obtain the Set-Cookie header with my signed cookies. These cookies will be attached to a further file-request via my AWS Cloudfront instance. In order to avoid CORS errors, I have set up custom domains for the API Gateway as well as for the Cloudfront instance.
A minified example of the signing and the return value looks as follows:
// Expiration time
const getExpTime = new Date(Date.now() + 5 * (60 * 60 * 1000)).toISOString();
// Cookie-Signer
const signedCookie = getSignedCookies({
keyPairId: "MY-KEYPAIR-ID",
privateKey: "MY-PRIVATE-KEY",
url: "https://cloudfront.example.com/path-to-file/file.m3u8",
dateLessThan: getExpTime,
});
// Response
const response = {
statusCode: 200,
isBase64Encoded: false,
body: JSON.stringify({ url: url, bucket: bucket, key: key }),
headers: {
"Content-Type": "application/json",
"Access-Control-Allow-Origin": "https://example.com",
"Access-Control-Allow-Credentials": true,
"Access-Control-Allow-Methods": "OPTIONS,POST,GET",
},
multiValueHeaders: {
"Set-Cookie": [
`CloudFront-Expires=${signedCookie["CloudFront-Expires"]}; Domain=example.com; Path=/${path}/`,
`CloudFront-Signature=${signedCookie["CloudFront-Signature"]}; Domain=example.com; Path=/${path}/`,
`CloudFront-Key-Pair-Id=${signedCookie["CloudFront-Key-Pair-Id"]}; Domain=example.com; Path=/${path}/`,
],
},
};
This works well if I request a single file from my S3 bucket. However, since I want to stream video files from my S3 via Cloudfront and according to the AWS docs, wildcard characters are only allowed with Custom Policies. I need this wildcard to give access to the entire video folder with my video chunks. Again following the official docs, I have updated my lambda with:
// Expiration time
const getExpTime = new Date(Date.now() + 5 * (60 * 60 * 1000)).getTime();
// Custom Policy
const policyString = JSON.stringify({
Statement: [
{
Resource: "https://cloudfront.example.com/path-to-file/*",
Condition: {
DateLessThan: { "AWS:EpochTime": getExpTime },
},
},
],
});
// Cookie signing
const signedCookie = getSignedCookies({
keyPairId: "MY-KEYPAIR-ID",
privateKey: "MY-PRIVATE-KEY",
policy: policyString,
url: "https://cloudfront.example.com/path-to-file/*",
});
which results in a Malformed Policy error.
What confuses me is that the getSignedCookies() method requires the url property even though I'm using a custom policy with the Ressource parameter. Since the Resource parameter is optional, I've also tried without which led to the same error.
To rule out that something is wrong with the wildcard character, I've also run a test where I've pointed to the exact file but using the custom policy. Although this works without custom policy, it does fail with the Malformed Policy error when using the custom policy.
Since there is also no example of how to use the Cloudfront Cookie-Signer V3 with custom policies, I'd be very grateful if someone can tell me how I'm supposed to type this out!
Cheers! 🙌

AWS S3 Generate a presigned url with a md5 check

Im looking to generate a pre signed url with aws s3.
It works fine with some condition (mime type for example) but im unable to use 'Content-MD5'.
I use the node js sdk and put the md5 in fields object.
const options = {
Bucket: bucket,
Expires: expires,
ContentType: 'multipart/form-data',
Conditions: [{ key }],
Fields: {
'Content-MD5': params.md5,
},
} as PresignedPost.Params;
if (acl) {
options.Conditions.push({ acl });
}
if (params.mimeType) {
options.Conditions.push({ contentType: params.mimeType });
}
But after when I upload the file, I would like AWS to check by itself the uploaded file with the MD5 given in the presigned request but I always have that error :
<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>AccessDenied</Code>
<Message>Invalid according to Policy: Policy Condition failed: ["eq", "$Content-MD5", "<md5>"]</Message>
<RequestId>497312AFEEF83235</RequestId>
<HostId>KY9RxpGZzRog7hjlDk3whjAbItG/mwhpItYDL7rUNNH4BCXMfmLZsbZIPKivmSZZ3VkWxlgstOk=</HostId>
</Error>
My MD5 is generated like that in the browser ( just after recording a video ):
const reader = new FileReader();
reader.readAsBinaryString(blob);
reader.onloadend = () => {
const mdsum = CryptoJS.MD5(reader.result.toString());
resolve(CryptoJS.enc.Base64.stringify(mdsum));
};
Maybe it's not the way it works ?
edit :
If I add to the upload form data (md5 hash is the same as set in the presigned)
formData.append('Content-MD5', encodeURI(fields['Content-MD5']));
the error becomes
<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>BadDigest</Code>
<Message>The Content-MD5 you specified did not match what we received.</Message>
<ExpectedDigest>2b36c76525c8d3a6dada59a6ad2867a7</ExpectedDigest><CalculatedDigest>+RifURVLd61O6QCT+SzhBg==</CalculatedDigest><RequestId>B4FF38D0FCC2E8F2</RequestId><HostId>yS7q200rJpBu48RNcGzsb1oGbDUrN8UK9+gkg6jGMl+EJSGeyQaSCfwfcMRUeNlJYapfmF304Oc=</HostId></Error>
answer:
const reader = new FileReader();
reader.readAsBinaryString(blob);
reader.onloadend = () => {
resolve(CryptoJS.enc.Base64.stringify(CryptoJS.MD5(CryptoJS.enc.Latin1.parse(reader.result.toString()))));
};

AWS Textract InvalidParameterException

I have a .Net core client application using amazon Textract with S3,SNS and SQS as per the AWS Document , Detecting and Analyzing Text in Multipage Documents(https://docs.aws.amazon.com/textract/latest/dg/async.html)
Created an AWS Role with AmazonTextractServiceRole Policy and added the Following Trust relation ship as per the documentation (https://docs.aws.amazon.com/textract/latest/dg/api-async-roles.html)
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Service": "textract.amazonaws.com"
},
"Action": "sts:AssumeRole"
}
]
}
Subscribed SQS to the topic and Given Permission to the Amazon SNS Topic to Send Messages to the Amazon SQS Queue as per the aws documentation .
All Resources including S3 Bucket, SNS ,SQS are in the same us-west2 region
The following method shows a generic error "InvalidParameterException"
Request has invalid parameters
But If the NotificationChannel section is commented the code is working fine and returning the correct job id.
Error message is not giving a clear picture about the parameter. Highly appreciated any help .
public async Task<string> ScanDocument()
{
string roleArn = "aws:iam::xxxxxxxxxxxx:instance-profile/MyTextractRole";
string topicArn = "aws:sns:us-west-2:xxxxxxxxxxxx:AmazonTextract-My-Topic";
string bucketName = "mybucket";
string filename = "mytestdoc.pdf";
var request = new StartDocumentAnalysisRequest();
var notificationChannel = new NotificationChannel();
notificationChannel.RoleArn = roleArn;
notificationChannel.SNSTopicArn = topicArn;
var s3Object = new S3Object
{
Bucket = bucketName,
Name = filename
};
request.DocumentLocation = new DocumentLocation
{
S3Object = s3Object
};
request.FeatureTypes = new List<string>() { "TABLES", "FORMS" };
request.NotificationChannel = channel; /* Commenting this line work the code*/
var response = await this._textractService.StartDocumentAnalysisAsync(request);
return response.JobId;
}
Debugging Invalid AWS Requests
The AWS SDK validates your request object locally, before dispatching it to the AWS servers. This validation will fail with unhelpfully opaque errors, like the OP.
As the SDK is open source, you can inspect the source to help narrow down the invalid parameter.
Before we look at the code: The SDK (and documentation) are actually generated from special JSON files that describe the API, its requirements and how to validate them. The actual code is generated based on these JSON files.
I'm going to use the Node.js SDK as an example, but I'm sure similar approaches may work for the other SDKs, including .NET
In our case (AWS Textract), the latest Api version is 2018-06-27. Sure enough, the JSON source file is on GitHub, here.
In my case, experimentation narrowed the issue down to the ClientRequestToken. The error was an opaque InvalidParameterException. I searched for it in the SDK source JSON file, and sure enough, on line 392:
"ClientRequestToken": {
"type": "string",
"max": 64,
"min": 1,
"pattern": "^[a-zA-Z0-9-_]+$"
},
A whole bunch of undocumented requirements!
In my case the token I was using violated the regex (pattern in the above source code). Changing my token code to satisfy the regex solved the problem.
I recommend this approach for these sorts of opaque type errors.
After a long days analyzing the issue. I was able to resolve it .. as per the documentation topic only required SendMessage Action to the SQS . But after changing it to All SQS Action its Started Working . But Still AWS Error message is really misleading and confusing
you would need to change the permissions to All SQS Action and then use the code as below
def startJob(s3BucketName, objectName):
response = None
response = textract.start_document_text_detection(
DocumentLocation={
'S3Object': {
'Bucket': s3BucketName,
'Name': objectName
}
})
return response["JobId"]
def isJobComplete(jobId):
# For production use cases, use SNS based notification
# Details at: https://docs.aws.amazon.com/textract/latest/dg/api-async.html
time.sleep(5)
response = textract.get_document_text_detection(JobId=jobId)
status = response["JobStatus"]
print("Job status: {}".format(status))
while(status == "IN_PROGRESS"):
time.sleep(5)
response = textract.get_document_text_detection(JobId=jobId)
status = response["JobStatus"]
print("Job status: {}".format(status))
return status
def getJobResults(jobId):
pages = []
response = textract.get_document_text_detection(JobId=jobId)
pages.append(response)
print("Resultset page recieved: {}".format(len(pages)))
nextToken = None
if('NextToken' in response):
nextToken = response['NextToken']
while(nextToken):
response = textract.get_document_text_detection(JobId=jobId, NextToken=nextToken)
pages.append(response)
print("Resultset page recieved: {}".format(len(pages)))
nextToken = None
if('NextToken' in response):
nextToken = response['NextToken']
return pages
Invoking textract with Python, I received the same error until I truncated the ClientRequestToken down to 64 characters
response = client.start_document_text_detection(
DocumentLocation={
'S3Object':{
'Bucket': bucket,
'Name' : fileName
}
},
ClientRequestToken= fileName[:64],
NotificationChannel= {
"SNSTopicArn": "arn:aws:sns:us-east-1:AccountID:AmazonTextractXYZ",
"RoleArn": "arn:aws:iam::AccountId:role/TextractRole"
}
)
print('Processing started : %s' % json.dumps(response))

s3 SignedUrl x-amz-security-token

const AWS = require('aws-sdk');
export function main (event, context, callback) {
const s3 = new AWS.S3();
const data = JSON.parse(event.body);`
const s3Params = {
Bucket: process.env.mediaFilesBucket,
Key: data.name,
ContentType: data.type,
ACL: 'public-read',
};
const uploadURL = s3.getSignedUrl('putObject', s3Params);
callback(null, {
statusCode: 200,
headers: {
'Access-Control-Allow-Origin': '*'
},
body: JSON.stringify({ uploadURL: uploadURL }),
})
}
When I test it locally it works fine, but after deployment it x-amz-security-token, and then I get access denied response. How can I get rid of this x-amz-security-token?
I was having the same issue. Everything was working flawlessly using serverless-offline but when I deployed to Lambda I started receiving AccessDenied issues on the URL. When comparing the URLs returned between the serverless-offline and AWS deployments I noticed the only difference was the inclusion of the X-Amz-Security-Token in the URL as a query string parameter. After some digging I discovered the token being assigned was based upon the assumed role the lambda function had. All I had to do was grant the appropriate S3 policies to the role and it worked.
I just solved a very similar, probably the same issue as you have. I say probably because you dont say what deployment entails for you. I am assuming you are deploying to Lambda but you may not be, this may or may not apply but if you are using temporary credentials this will apply.
I initially used the method you use above but then was using the npm module aws-signature-v4 to see if it was different and was getting the same error you are.
You will need the token, it is needed when you have signed a request with temporary credentials. In Lambda's case the credentials are in the runtime, including the session token, which you need to pass, the same is most likely true elsewhere as well but I'm not sure I haven't used ec2 in a few years.
Buried in the docs (and sorry I cannot find the place this is stated) it is pointed out that some services require that the session_token be processed with the other canonical query params. The module I'm using was tacking it on at the end, as the sig v4 instructions seem to imply, so I modified it so the token is canonical and it works.
We've updated the live version of the aws-signature-v4 module to reflect this change and now it works nicely for signing your s3 requests.
Signing is discussed here.
I would use the module I did as I have a feeling the sdk is doing the wrong thing for some reason.
usage example (this is wrapped in a multiPart upload thus the part number and upload Id):
function createBaseUrl( bucketName, uploadId, partNumber, objectKey ) {
let url = sig4.createPresignedS3URL( objectKey, {
method: "PUT",
bucket: bucketName,
expires: 21600,
query: `partNumber=${partNumber}&uploadId=${uploadId}`
});
return url;
}
I was facing the same issue, I'm creating a signed URL using library Boto3 in python3.7
All though this is not a recommended way to solve, it worked for me.
The request methods should be POST, content-type=['multipart/form-data']
Create a client in like this.
# Do not hard code credentials
client = boto3.client(
's3',
# Hard coded strings as credentials, not recommended.
aws_access_key_id='YOUR_ACCESS_KEY',
aws_secret_access_key='YOUR_SECRET_ACCESS_KEY'
)
Return response
bucket_name = BUCKET
acl = {'acl': 'public-read-write'}
file_path = str(file_name) //file you want to upload
response = s3_client.generate_presigned_post(bucket_name,
file_path,
Fields={"Content-Type": ""},
Conditions=[acl,
{"Content-Type": ""},
["starts-with", "$success_action_status", ""],
],
ExpiresIn=3600)

Bypassing need for x-amz-cf-id header inclusion in S3 auth in cloudfront

I have a not completely orthodox CF->S3 setup. The relevant components here are:
Cloudfront distribution with origin s3.ap-southeast-2.amazonaws.com
Lambda#Edge function (Origin Request) that adds a S3 authorisation (version 2) query string (Signed using the S3 policy the function uses).
The request returned from Lambda is completely correct. If I log the uri, host and query string I get the file I am requesting. However, if I access it through the Cloudfront link directly, the request fails because it no longer uses the AWSAccessKeyID, instead it opts to use x-amz-cf-id (but uses the same Signature, Amz-Security-Token etc). CORRECTION: it may not replace, but be required in addition to.
I know this is the case because I have returned both the
StringToSign and the SignatureProvided. These both match the Lambda response except for the AWSAccessKeyID which has been replaced with the x-amz-cf-id.
This is a very specific question obviously. I may have to look at remodelling this architecture but I would prefer not to. There are several requirements which has led me down this not completely regular setup.
I believe the AWSAccessKeyID => x-amz-cf-id replacement is the result of two mechanisms:
First, you need to configure CloudFront to forward the query parameters to the origin. Without that, it will strip all parameters. If you use S3 signed URLs, make sure to also cache based on all parameters as otherwise you'll end up without any access control.
Second, CloudFront attaches the x-amz-cf-id to the requests that are not going to an S3 origin. You can double-check at the CloudFront console the origin type and you need to make sure it is reported as S3. I have a blog post describing it in detail.
But adding the S3 signature to all the requests with Lambda#Edge defeats the purpose. If you want to keep the bucket private and only allow CloudFront to access it then use the Origin Access Identity, that is precisely for the use-case.
So it seems like with Authentication V2 or V4, the x-amz-cf-id header that's appended to the origin request and inaccessible by the Lambda#Edge origin request function must be included in the authentication string. This is not possible.
The simple solution is to use the built-in S3 integration in Cloudflare, use a Lambda#Edge origin request function that switches the bucket if like me, that's your desired goal. For each bucket you want to use, add the following policy to allow your CF distribution to access the objects within the bucket.
{
"Version": "2008-10-17",
"Id": "PolicyForCloudFrontPrivateContent",
"Statement": [
{
"Sid": "1",
"Effect": "Allow",
"Principal": {
"AWS": "arn:aws:iam::cloudfront:user/CloudFront Origin Access Identity <CloudfrontID>"
},
"Action": "s3:GetObject",
"Resource": "arn:aws:s3:::<bucket-name>/*"
}
]
}
CloudfrontID refers to the ID under Origin Access Identity, not the Amazon S3 Canonical ID.
X-amz-cf-id is a reserved header of CF and it could be get by event as event['Records'][0]['cf']['config']['requestId']. You don't have to calculate Authentication V4 with X-amz-cf-id.
I had alike task of returning S3 signed URL from a CloudFront origin request Lambda#Edge. Here is what I found:
If your S3 bucket does not have dots in the name you can use S3 origin in CloudFront, use domain name in the form of <bucket_name>.s3.<region>.amazonaws.com and generate signed URL e.g. via getSignedUrl from #aws-sdk/s3-request-presigner. CloudFront should be configured to pass URL query to the origin. Do not grant CloudFront access to S3 bucket in this case: presigned URL will grant access to the bucket.
However, when your bucket does have dots in the name, the signed URL produced by the function will have path-style URL and you will need to use CloudFront custom origin with s3.<region>.amazonaws.com domain. When using custom origin, CloudFront adds "x-amz-cf-id" header to the request to the origin. Quite inconveniently, value of the header should be signed. However, provided you do not change the origin domain in the Lambda#Edge return value, CloudFront seems to use the same value for "x-amz-cf-id" header as passed to the lambda event in event.Records[0].cf.config.requestId field. You can then generate S3 signed URL with the value of the header. With AWS JavaScript SDK v3 this can be done using S3Client.middlewareStack.add.
Here is an example of a JavaScript Lambda#Edge producing S3 signed URL with "x-amz-cf-id" header:
const {S3Client, GetObjectCommand} = require("#aws-sdk/client-s3");
const {getSignedUrl} = require("#aws-sdk/s3-request-presigner");
exports.handler = async function handler(event, context) {
console.log('Request: ', JSON.stringify(event));
let bucketName = 'XXX';
let fileName = 'XXX';
let bucketRegion = 'XXX';
// Pre-requisite: this Lambda#Edge function has 's3:GetObject' permission for bucket ${bucketName}, otherwise you will get AccessDenied
const command = new GetObjectCommand({
Bucket: bucketName, Key: fileName,
});
const s3Client = new S3Client({region: bucketRegion});
s3Client.middlewareStack.add((next, context) => async (args) => {
args.request.headers["x-amz-cf-id"] = event.Records[0].cf.config.requestId;
return await next(args);
}, {
step: "build", name: "addXAmzCfIdHeaderMiddleware",
});
let signedS3Url = await getSignedUrl(s3Client, command, {
signableHeaders: new Set(["x-amz-cf-id"]), unhoistableHeaders: new Set(["x-amz-cf-id"])
});
let parsedUrl = new URL(signedS3Url);
const request = event.Records[0].cf.request;
if (!request.origin.custom || request.origin.custom.domainName != parsedUrl.hostname) {
return {
status: '500',
body: `CloudFront should use custom origin configured to the matching domain '${parsedUrl.hostname}'.`,
headers: {
'content-type': [{key: 'Content-Type', value: 'text/plain; charset=UTF-8',}]
}
};
}
request.querystring = parsedUrl.search.substring(1); //drop '?'
request.uri = parsedUrl.pathname;
console.log('Response: ', JSON.stringify(request));
return request;
}