I'm trying to upload files to my S3 bucket via PresignedUrl Lambda function. everything works fine via post man. but the Browser based application is failing saying "SignatureDoesNotMatch"
My Lambda function region is ap-southeast-1.
but similar function works fine in ap-south1 (which is same timezone as mine). any idea why is this happening. could this be anything to do with the timezone difference between the server and client.
Please see my code below:
<script>
$(document).one('submit', '#memberForm', function (e) {
e.preventDefault();
$.get("<FUNCTION URL>", function (data) {
var getUrl = data.uploadURL;
var fileName = data.fileName;
var theFormFile = $('#fileLogo').get()[0].files[0];
if (theFormFile != null) {
console.log(theFormFile);
$.ajax({
type: 'PUT',
url: getUrl,
contentType: 'binary/octet-stream',
processData: false,
crossDomain: true,
data: theFormFile,
success: function () {
alert('Yeehaaaw');
},
error: function (e) {
console.log(e);
alert('File NOT uploaded');
console.log(arguments);
}
});
} else {
$("#memberForm").submit();
}
});
return false;
});
</script>
My Code for Url Generation is as below:
'use strict'
const AWS = require('aws-sdk')
AWS.config.update({ region: process.env.AWS_REGION || 'ap-southeast-1' })
const s3 = new AWS.S3()
// Main Lambda entry point
exports.handler = async (event) => {
console.log("execution started")
var contentType=event["queryStringParameters"]['contentType']
var path=event["queryStringParameters"]['path']
const result = await getUploadURL(contentType,path)
console.log('Result: ', result)
return result
}
const getContentType=function(contentType){
switch(contentType) {
case "png":
return "image/png"
case "jpg":
return "image/jpeg"
case "pdf":
return "application/pdf"
default:
return "application/json"
}
}
const getExtension=function(contentType){
switch(contentType) {
case "png":
return "png"
case "jpg":
return "jpg"
case "pdf":
return "pdf"
default:
return `${contentType}`
}
}
const getUploadURL = async function(contentType,path) {
console.log(`Content type is ${contentType}`)
const actionId = parseInt(Math.random()*10000000)
var type=getContentType(contentType);
var ext= getExtension(contentType);
const s3Params = {
Bucket: process.env.UploadBucket,
Key: `${path}/${actionId}.${ext}`,
ContentType: type,// Update to match whichever content type you need to upload
ACL: 'public-read', // Enable this setting to make the object publicly readable - only works if the bucket can support public objects,
Expires: 300
}
console.log('getUploadURL: ', s3Params)
return new Promise((resolve, reject) => {
// Get signed URL
resolve({
"statusCode": 200,
"isBase64Encoded": false,
"headers": {
"Access-Control-Allow-Origin": "*"
},
"body": JSON.stringify({
"uploadURL": s3.getSignedUrl('putObject', s3Params),
"fileName": `${actionId}.${ext}`
})
})
})
}
Also the same works when i try with PostMan.
I resolved this by adding the signature version:
const s3=new AWS.S3({
signatureVersion:'v4'
});
Related
I'm trying to send a request with an image to a lambda function through API gateway.
I'm using this piece of code to parse the form-data-object received by my lambda function. I then upload the image to S3, but when downloading and opening the image from S3, I see that it's corrupt.
I have tried the following npm packages:
parse-multipart
parse-multipart-data
Both do not work, because I get an empty parts-array. The piece of code I use does get results in the array, but the buffers seems to be corrupt.
The problem seems to be in this line of code:
Buffer.from(item.slice(item.search(/Content-Type:\s.+/g) + item.match(/Content-Type:\s.+/g)[0].length + 4, -4), 'binary')
Does anyone has a solution for me?
You can use busboy to parse the multipart form data before uploading to S3 as shown below:
// This code is written in ECMAScript 6 (ES6), not CommonJS syntax.
// So, make sure you add <"type": "module"> in your package.json.
import {S3Client, PutObjectCommand} from '#aws-sdk/client-s3';
import Busboy from 'busboy';
// Initialize the clients outside the function handler to take advantage of execution environment reuse.
const s3Client = new S3Client({region: process.env.AWS_REGION});
// Function handler.
export async function handler(event) {
const {
ContentType: contentType = '',
File: file = '',
} = await FORM.parse(event['body'], event['headers']);
try {
// Adds an object to a bucket. We must have `WRITE` permissions on a bucket to add an object to it.
await s3Client.send(new PutObjectCommand({
Body: file,
Bucket: 'BUCKET_NAME',
ContentType: contentType,
Key: 'SOME_KEY',
}));
return {
isBase64Encoded: false,
statusCode: 200,
body: JSON.stringify({
message: 'Everything is gonna be alright.',
}),
}
} catch (e) {
return {
isBase64Encoded: false,
statusCode: 404,
body: JSON.stringify(e),
}
}
}
const FORM = {
parse(body, headers) {
return new Promise((resolve, reject) => {
const data = {};
const buffer = Buffer.from(body, 'base64');
const bb = Busboy({
headers: Object.keys(headers).reduce((newHeaders, key) => {
// busboy expects lower-case headers.
newHeaders[key.toLowerCase()] = headers[key];
return newHeaders;
}, {}),
limits: {
fileSize: 10485760, // Set as desired.
files: 1,
},
});
bb.on('file', (name, stream, info) => {
const chunks = [];
stream.on('data', (chunk) => {
if (name === 'File') {
chunks.push(chunk);
}
}).on('limit', () => {
reject(new Error('File size limit has been reached.'));
}).on('close', () => {
if (name === 'File') {
data[name] = Buffer.concat(chunks);
data['ContentType'] = info.mimeType;
}
});
});
bb.on('error', (err) => {
reject(err);
});
bb.on('close', () => {
resolve(data);
});
bb.end(buffer);
});
}
};
I'd like a user to be able to upload either JPG or PNG image to an S3 bucket.
I am using a Lambda function which allows me to only presign .jpg images for S3 and it works great for just one file type. How do I add an additional file type to presign, for example, .png images too. Do I really need to write a new Lambda where I just change the .jpg to .png or I can do it somehow in my existing code below?
const AWS = require('aws-sdk')
AWS.config.update({ region: process.env.REGION })
const s3 = new AWS.S3();
const uploadBucket = 'xxx-bucket'
exports.handler = async (event) => {
const result = await getUploadURL()
console.log('Result: ', result)
return result
};
const getUploadURL = async function() {
console.log('getUploadURL started')
let actionId = Date.now()
var s3Params = {
Bucket: uploadBucket,
Key: `${actionId}.jpg`,
ContentType: 'image/jpeg',
CacheControl: 'max-age=31104000',
ACL: 'public-read',
};
return new Promise((resolve, reject) => {
// Get signed URL
let uploadURL = s3.getSignedUrl('putObject', s3Params)
resolve({
"statusCode": 200,
"isBase64Encoded": false,
"headers": {
"Access-Control-Allow-Origin": "*"
},
"body": JSON.stringify({
"uploadURL": uploadURL,
"photoFilename": `${actionId}.jpg`
})
})
})
}
Your options are as follow :
Make a new Lambda as you suggested to handle the PNG separately
Pass parameters to your getUploadURL function through your event, something like :
exports.handler = async event => {
const { filetype } = event.body (or pathParams, query string, etc)
const result = await getUploadURL(filetype)
console.log('Result: ', result)
return result
};
const getUploadURL = async filetype => {
console.log('getUploadURL started')
let actionId = Date.now()
var s3Params = {
Bucket: uploadBucket,
Key: `${actionId}.${filetype}`,
ContentType: `image/${filetype === 'jpg'?'jpeg':'png'}`,
CacheControl: 'max-age=31104000',
ACL: 'public-read',
};
...
The call to S3.getSignedUrl() requires {Bucket: 'bucket', Key: 'key'} at a minimum for a putObject operation. So if you don't want to sacrifice the filename extension and/or the content-type attribute, those are the only options.
AWS docs
I am trying to post a file to s3 using createPresignedPost. The file is posting to my bucket but it is not respecting the file size constraint. Here is my code and the file upload is base64 encoded string.
function postObjectSignedUrl(req) {
const key = `${req + "/" + uuid.v4()}`;
return new Promise(function (resolve, reject) {
const params = {
Bucket: 'base',
Expires: 60 * 60, // in seconds,
Fields: {
key: key,
},
conditions: [
['content-length-range', 0,1000000]
]
}
s3.createPresignedPost(params, (err, data) => {
if (err) {
reject(err)
} else {
resolve(data);
}
})
})
}
My client side code is the following:
var data = new FormData();
const getUrl = await getSignedUrl();
const keys = getUrl["fields"];
$.each(keys, function(key,value){
data.append(key,value);
});
data.append("file", profilePic);
try {
const result = await fetch(getUrl["url"], {
method: "POST",
mode: "cors",
headers: {
'Access-Control-Allow-Origin': '*',
},
body: data
})
if (result.status === 204){
}
} catch (err) {
console.log(err, " error ")
}
Normally params attributes in NodeJS SDK are Upper Camel Case so you have to change "conditions" for "Conditions".
BTW you can change your url generator code as follow :)
function postObjectSignedUrl(req) {
const key = `${req + "/" + uuid.v4()}`;
const params = {
Bucket: 'base',
Expires: 60 * 60, // in seconds,
Fields: {
key: key,
},
Conditions: [
['content-length-range', 0,1000000]
]
}
return s3.createPresignedPost(params).promise();
})
Regards,
I tried to create an API for uploading & retrieving images with NestJS. Images should be stored on S3.
What I currently have:
Controller
#Post()
#UseInterceptors(FileFieldsInterceptor([
{name: 'photos', maxCount: 10},
]))
async uploadPhoto(#UploadedFiles() files): Promise<void> {
await this.s3Service.savePhotos(files.photos)
}
#Get('/:id')
#Header('content-type', 'image/jpeg')
async getPhoto(#Param() params,
#Res() res) {
const photoId = PhotoId.of(params.id)
const photoObject = await this.s3Service.getPhoto(photoId)
res.send(photoObject)
}
S3Service
async savePhotos(photos: FileUploadEntity[]): Promise<any> {
return Promise.all(photos.map(photo => {
const filePath = `${moment().format('YYYYMMDD-hhmmss')}${Math.floor(Math.random() * (1000))}.jpg`
const params = {
Body: photo.buffer,
Bucket: Constants.BUCKET_NAME,
Key: filePath,
}
return new Promise((resolve) => {
this.client.putObject(params, (err: any, data: any) => {
if (err) {
logger.error(`Photo upload failed [err=${err}]`)
ExceptionHelper.throw(ErrorCodes.SERVER_ERROR_UNCAUGHT_EXCEPTION)
}
logger.info(`Photo upload succeeded [filePath=${filePath}]`)
return resolve()
})
})
}))
}
async getPhoto(photoId: PhotoId): Promise<AWS.S3.Body> {
const object: S3.GetObjectOutput = await this.getObject(S3FileKey.of(`${Constants.S3_PHOTO_PATH}/${photoId.value}`))
.catch(() => ExceptionHelper.throw(ErrorCodes.RESOURCE_NOT_FOUND_PHOTO)) as S3.GetObjectOutput
logger.info(JSON.stringify(object.Body))
return object.Body
}
async getObject(s3FilePath: S3FileKey): Promise<S3.GetObjectOutput> {
logger.info(`Retrieving object from S3 s3FilePath=${s3FilePath.value}]`)
return this.client.getObject({
Bucket: Constants.BUCKET_NAME,
Key: s3FilePath.value
}).promise()
.catch(err => {
logger.error(`Could not retrieve object from S3 [err=${err}]`)
ExceptionHelper.throw(ErrorCodes.SERVER_ERROR_UNCAUGHT_EXCEPTION)
}) as S3.GetObjectOutput
}
The photo object actually ends up in S3, but when I download it I can't open it.
Same for the GET => can't be displayed.
What general mistake(s) I'm making here?
Not sure what values are you returning to your consumer and which values they use to get the Image again; Could you post how the actual response looks like, what is the request and verify, if the FQDN & Path match?
It seems you forgot about ACL as well, i.e. the resources you upload this way are not public-read by default.
BTW you could use aws SDK there:
import { Injectable } from '#nestjs/common'
import * as AWS from 'aws-sdk'
import { InjectConfig } from 'nestjs-config'
import { AwsConfig } from '../../config/aws.config'
import UploadedFile from '../interfaces/uploaded-file'
export const UPLOAD_WITH_ACL = 'public-read'
#Injectable()
export class ImageUploadService {
s3: AWS.S3
bucketName
cdnUrl
constructor(#InjectConfig() private readonly config) {
const awsConfig = (this.config.get('aws') || { bucket: '', secretKey: '', accessKey: '', cdnUrl: '' }) as AwsConfig // read from envs
this.bucketName = awsConfig.bucket
this.cdnUrl = awsConfig.cdnUrl
AWS.config.update({
accessKeyId: awsConfig.accessKey,
secretAccessKey: awsConfig.secretKey,
})
this.s3 = new AWS.S3()
}
upload(file: UploadedFile): Promise<string> {
return new Promise((resolve, reject) => {
const params: AWS.S3.Types.PutObjectRequest = {
Bucket: this.bucketName,
Key: `${Date.now().toString()}_${file.originalname}`,
Body: file.buffer,
ACL: UPLOAD_WITH_ACL,
}
this.s3.upload(params, (err, data: AWS.S3.ManagedUpload.SendData) => {
if (err) {
return reject(err)
}
resolve(`${this.cdnUrl}/${data.Key}`)
})
})
}
}
For anyone having the same troubles, I finally figured it out:
I enabled binary support on API Gateway (<your-gateway> Settings -> Binary Media Types -> */*) and then returned all responses from lambda base64 encoded. API Gateway will do the decode automatically before returning the response to the client.
With serverless express you can can enable the auto base64 encoding easily at the server creation:
const BINARY_MIME_TYPES = [
'application/javascript',
'application/json',
'application/octet-stream',
'application/xml',
'font/eot',
'font/opentype',
'font/otf',
'image/jpeg',
'image/png',
'image/svg+xml',
'text/comma-separated-values',
'text/css',
'text/html',
'text/javascript',
'text/plain',
'text/text',
'text/xml',
]
async function bootstrap() {
const expressServer = express()
const nestApp = await NestFactory.create(AppModule, new ExpressAdapter(expressServer))
await nestApp.init()
return serverlessExpress.createServer(expressServer, null, BINARY_MIME_TYPES)
}
In the Controller, you're now able to just return the S3 response body:
#Get('/:id')
async getPhoto(#Param() params,
#Res() res) {
const photoId = PhotoId.of(params.id)
const photoObject: S3.GetObjectOutput = await this.s3Service.getPhoto(photoId)
res
.set('Content-Type', 'image/jpeg')
.send(photoObject.Body)
}
Hope this helps somebody!
I am working on one node application which is using AWS. now i want to get all cognito users but as per doc it returns first 60 users but i want all users. can you assist me with this? In doc, they mentioned that pass PaginationToken (string) . but i don't know what to pass in it.
Here what i have done so far :
exports.handler = (event, context, callback) => {
const requestBody = JSON.parse(event.body);
var params = {
"UserPoolId": "****************",
"Limit": 60,
"PaginationToken" : (what to pass here????),
}
const cognitoidentityserviceprovider = new AWS.CognitoIdentityServiceProvider();
cognitoidentityserviceprovider.listUsers(params, (err, data) => {
if (err) {
callback(null, { headers: { "Content-Type": "application/json", "Access-Control-Allow-Origin": "*" }, body: JSON.stringify({ statusCode: 405, data: err }) });
} else {
console.log(data);
let userdata = [];
for(let i=0; i<data.Users.length;i++){
// console.log(data.Users[i].Attributes);
userdata.push(getAttributes(data.Users[i].Attributes));
}
callback(null, { headers: { "Content-Type": "application/json", "Access-Control-Allow-Origin": "*" }, body: JSON.stringify({ statusCode: 200, data: userdata }) });
}
});
};
function getAttributes(attributes){
let jsonObj = {};
attributes.forEach((obj) => {
jsonObj[obj.Name] = obj.Value;
});
return jsonObj;
}
In your response you should see a property called PaginationToken. If you make the same call but include this value in your params you will receive the next 60 users. Here's the concept:
cognitoidentityserviceprovider.listUsers(params, (err, data) => {
// data.Users is the first 60 users
params.PaginationToken = data.PaginationToken;
cognitoidentityserviceprovider.listUsers(params, (err, data) => {
// data.Users is the next 60 users
});
});
You might want to consider switching to promises and async/await if your environment supports it. That would make this code easier to read and write.
const data = await cognitoidentityserviceprovider.listUsers(params).promise();
params.PaginationToken = data.PaginationToken;
const data2 = await cognitoidentityserviceprovider.listUsers(params).promise();