Handling multipart/form-data in aws lambda - amazon-web-services

I'm trying to send a request with an image to a lambda function through API gateway.
I'm using this piece of code to parse the form-data-object received by my lambda function. I then upload the image to S3, but when downloading and opening the image from S3, I see that it's corrupt.
I have tried the following npm packages:
parse-multipart
parse-multipart-data
Both do not work, because I get an empty parts-array. The piece of code I use does get results in the array, but the buffers seems to be corrupt.
The problem seems to be in this line of code:
Buffer.from(item.slice(item.search(/Content-Type:\s.+/g) + item.match(/Content-Type:\s.+/g)[0].length + 4, -4), 'binary')
Does anyone has a solution for me?

You can use busboy to parse the multipart form data before uploading to S3 as shown below:
// This code is written in ECMAScript 6 (ES6), not CommonJS syntax.
// So, make sure you add <"type": "module"> in your package.json.
import {S3Client, PutObjectCommand} from '#aws-sdk/client-s3';
import Busboy from 'busboy';
// Initialize the clients outside the function handler to take advantage of execution environment reuse.
const s3Client = new S3Client({region: process.env.AWS_REGION});
// Function handler.
export async function handler(event) {
const {
ContentType: contentType = '',
File: file = '',
} = await FORM.parse(event['body'], event['headers']);
try {
// Adds an object to a bucket. We must have `WRITE` permissions on a bucket to add an object to it.
await s3Client.send(new PutObjectCommand({
Body: file,
Bucket: 'BUCKET_NAME',
ContentType: contentType,
Key: 'SOME_KEY',
}));
return {
isBase64Encoded: false,
statusCode: 200,
body: JSON.stringify({
message: 'Everything is gonna be alright.',
}),
}
} catch (e) {
return {
isBase64Encoded: false,
statusCode: 404,
body: JSON.stringify(e),
}
}
}
const FORM = {
parse(body, headers) {
return new Promise((resolve, reject) => {
const data = {};
const buffer = Buffer.from(body, 'base64');
const bb = Busboy({
headers: Object.keys(headers).reduce((newHeaders, key) => {
// busboy expects lower-case headers.
newHeaders[key.toLowerCase()] = headers[key];
return newHeaders;
}, {}),
limits: {
fileSize: 10485760, // Set as desired.
files: 1,
},
});
bb.on('file', (name, stream, info) => {
const chunks = [];
stream.on('data', (chunk) => {
if (name === 'File') {
chunks.push(chunk);
}
}).on('limit', () => {
reject(new Error('File size limit has been reached.'));
}).on('close', () => {
if (name === 'File') {
data[name] = Buffer.concat(chunks);
data['ContentType'] = info.mimeType;
}
});
});
bb.on('error', (err) => {
reject(err);
});
bb.on('close', () => {
resolve(data);
});
bb.end(buffer);
});
}
};

Related

Modify Cloudfront origin response with Lambda - read-only headers

I have a Cloudfront distribution with a single React site, which is hosting in S3. The origin is connected via REST api. To properly handle queries, I use custom error responses on status 403 and 404 to 200 and route them to root. The root object is index.html and everything seems to be fine.
Now I have a task to add to a distribution an another site, which should be accessible through a subdirectory.
To do this I have to set a root object for a subdirectory and to catch 404 and 403 responses and transfer them to a root object. I've already set up origin and behaviour.
I tried to use theese manuals:
example
source
but it seems that something went wrong
The first approach (CloudFrontSubdirectoryIndex) seems not working at all (the function is not invoked and no rewrite happens), so i tried CloudFront function and it seems to work fine.
The last step is to handle 404 and 403 responses.
Here is the function from the manual:
'use strict';
const http = require('https');
const indexPage = 'index.html';
exports.handler = async (event, context, callback) => {
const cf = event.Records[0].cf;
const request = cf.request;
const response = cf.response;
const statusCode = response.status;
// Only replace 403 and 404 requests typically received
// when loading a page for a SPA that uses client-side routing
const doReplace = request.method === 'GET'
&& (statusCode == '403' || statusCode == '404');
const result = doReplace
? await generateResponseAndLog(cf, request, indexPage)
: response;
callback(null, result);
};
async function generateResponseAndLog(cf, request, indexPage){
const domain = cf.config.distributionDomainName;
const appPath = getAppPath(request.uri);
const indexPath = `/${appPath}/${indexPage}`;
const response = await generateResponse(domain, indexPath);
console.log('response: ' + JSON.stringify(response));
return response;
}
async function generateResponse(domain, path){
try {
// Load HTML index from the CloudFront cache
const s3Response = await httpGet({ hostname: domain, path: path });
const headers = s3Response.headers ||
{
'content-type': [{ value: 'text/html;charset=UTF-8' }]
};
return {
status: '200',
headers: wrapAndFilterHeaders(headers),
body: s3Response.body
};
} catch (error) {
return {
status: '500',
headers:{
'content-type': [{ value: 'text/plain' }]
},
body: 'An error occurred loading the page'
};
}
}
function httpGet(params) {
return new Promise((resolve, reject) => {
http.get(params, (resp) => {
console.log(`Fetching ${params.hostname}${params.path}, status code : ${resp.statusCode}`);
let result = {
headers: resp.headers,
body: ''
};
resp.on('data', (chunk) => { result.body += chunk; });
resp.on('end', () => { resolve(result); });
}).on('error', (err) => {
console.log(`Couldn't fetch ${params.hostname}${params.path} : ${err.message}`);
reject(err, null);
});
});
}
// Get the app path segment e.g. candidates.app, employers.client etc
function getAppPath(path){
if(!path){
return '';
}
if(path[0] === '/'){
path = path.slice(1);
}
const segments = path.split('/');
// will always have at least one segment (may be empty)
return segments[0];
}
// Cloudfront requires header values to be wrapped in an array
function wrapAndFilterHeaders(headers){
const allowedHeaders = [
'content-type',
'content-length',
'last-modified',
'date',
'etag'
];
const responseHeaders = {};
if(!headers){
return responseHeaders;
}
for(var propName in headers) {
// only include allowed headers
if(allowedHeaders.includes(propName.toLowerCase())){
var header = headers[propName];
if (Array.isArray(header)){
// assume already 'wrapped' format
responseHeaders[propName] = header;
} else {
// fix to required format
responseHeaders[propName] = [{ value: header }];
}
}
}
return responseHeaders;
}
When i try to implement this solution (attach the function to origin response) I get
The Lambda function result failed validation: The function tried to add, delete, or change a read-only header.
Here is a list of restricted headers, but I'm not modifying any of them.
If I try not to attach any headers to a response at all, the message is the same.
If I try to attach all headers, CloudFront says that i'm modifying a black-listed header.
Objects in a bucket have only one customized Cache-Control: no-cache metadata.
It seemed to be a fast task, but I'm stuck for two days already.
Any help will be appreciated.
UPD: I've searched the logs and found
ERROR Validation error: Lambda function result failed validation, the function tried to delete read-only header, headerName : Transfer-Encoding.
I'm a little bit confused. This header is not present in origin response, but CF is telling that I deleted it...
I tried to find the value of the header "Transfer-Encoding" that should come from origin (S3) but it seems that it has been disappeared. And CloudFront says that this header is essential.
So I've just hard-coded it and everything becomes fine.
'use strict';
const http = require('https');
const indexPage = 'index.html';
exports.handler = async (event, context, callback) => {
const cf = event.Records[0].cf;
const request = cf.request;
const response = cf.response;
const statusCode = response.status;
// Only replace 403 and 404 requests typically received
// when loading a page for a SPA that uses client-side routing
const doReplace = request.method === 'GET'
&& (statusCode == '403' || statusCode == '404');
const result = doReplace
? await generateResponseAndLog(cf, request, indexPage)
: response;
callback(null, result);
};
async function generateResponseAndLog(cf, request, indexPage){
const domain = cf.config.distributionDomainName;
const appPath = getAppPath(request.uri);
const indexPath = `/${appPath}/${indexPage}`;
const response = await generateResponse(domain, indexPath);
console.log('response: ' + JSON.stringify(response));
return response;
}
async function generateResponse(domain, path){
try {
// Load HTML index from the CloudFront cache
const s3Response = await httpGet({ hostname: domain, path: path });
const headers = s3Response.headers ||
{
'content-type': [{ value: 'text/html;charset=UTF-8' }]
};
s3Response.headers['transfer-encoding'] = 'chunked';
return {
status: '200',
headers: wrapAndFilterHeaders(headers),
body: s3Response.body
};
} catch (error) {
return {
status: '500',
headers:{
'content-type': [{ value: 'text/plain' }]
},
body: 'An error occurred loading the page'
};
}
}
function httpGet(params) {
return new Promise((resolve, reject) => {
http.get(params, (resp) => {
console.log(`Fetching ${params.hostname}${params.path}, status code : ${resp.statusCode}`);
let result = {
headers: resp.headers,
body: ''
};
resp.on('data', (chunk) => { result.body += chunk; });
resp.on('end', () => { resolve(result); });
}).on('error', (err) => {
console.log(`Couldn't fetch ${params.hostname}${params.path} : ${err.message}`);
reject(err, null);
});
});
}
// Get the app path segment e.g. candidates.app, employers.client etc
function getAppPath(path){
if(!path){
return '';
}
if(path[0] === '/'){
path = path.slice(1);
}
const segments = path.split('/');
// will always have at least one segment (may be empty)
return segments[0];
}
// Cloudfront requires header values to be wrapped in an array
function wrapAndFilterHeaders(headers){
const allowedHeaders = [
'content-type',
'content-length',
'content-encoding',
'transfer-encoding',
'last-modified',
'date',
'etag'
];
const responseHeaders = {};
if(!headers){
return responseHeaders;
}
for(var propName in headers) {
// only include allowed headers
if(allowedHeaders.includes(propName.toLowerCase())){
var header = headers[propName];
if (Array.isArray(header)){
// assume already 'wrapped' format
responseHeaders[propName] = header;
} else {
// fix to required format
responseHeaders[propName] = [{ value: header }];
}
}
}
return responseHeaders;
}

AWS - PresignedUrl Upload Error on Browser, Works in Postman

I'm trying to upload files to my S3 bucket via PresignedUrl Lambda function. everything works fine via post man. but the Browser based application is failing saying "SignatureDoesNotMatch"
My Lambda function region is ap-southeast-1.
but similar function works fine in ap-south1 (which is same timezone as mine). any idea why is this happening. could this be anything to do with the timezone difference between the server and client.
Please see my code below:
<script>
$(document).one('submit', '#memberForm', function (e) {
e.preventDefault();
$.get("<FUNCTION URL>", function (data) {
var getUrl = data.uploadURL;
var fileName = data.fileName;
var theFormFile = $('#fileLogo').get()[0].files[0];
if (theFormFile != null) {
console.log(theFormFile);
$.ajax({
type: 'PUT',
url: getUrl,
contentType: 'binary/octet-stream',
processData: false,
crossDomain: true,
data: theFormFile,
success: function () {
alert('Yeehaaaw');
},
error: function (e) {
console.log(e);
alert('File NOT uploaded');
console.log(arguments);
}
});
} else {
$("#memberForm").submit();
}
});
return false;
});
</script>
My Code for Url Generation is as below:
'use strict'
const AWS = require('aws-sdk')
AWS.config.update({ region: process.env.AWS_REGION || 'ap-southeast-1' })
const s3 = new AWS.S3()
// Main Lambda entry point
exports.handler = async (event) => {
console.log("execution started")
var contentType=event["queryStringParameters"]['contentType']
var path=event["queryStringParameters"]['path']
const result = await getUploadURL(contentType,path)
console.log('Result: ', result)
return result
}
const getContentType=function(contentType){
switch(contentType) {
case "png":
return "image/png"
case "jpg":
return "image/jpeg"
case "pdf":
return "application/pdf"
default:
return "application/json"
}
}
const getExtension=function(contentType){
switch(contentType) {
case "png":
return "png"
case "jpg":
return "jpg"
case "pdf":
return "pdf"
default:
return `${contentType}`
}
}
const getUploadURL = async function(contentType,path) {
console.log(`Content type is ${contentType}`)
const actionId = parseInt(Math.random()*10000000)
var type=getContentType(contentType);
var ext= getExtension(contentType);
const s3Params = {
Bucket: process.env.UploadBucket,
Key: `${path}/${actionId}.${ext}`,
ContentType: type,// Update to match whichever content type you need to upload
ACL: 'public-read', // Enable this setting to make the object publicly readable - only works if the bucket can support public objects,
Expires: 300
}
console.log('getUploadURL: ', s3Params)
return new Promise((resolve, reject) => {
// Get signed URL
resolve({
"statusCode": 200,
"isBase64Encoded": false,
"headers": {
"Access-Control-Allow-Origin": "*"
},
"body": JSON.stringify({
"uploadURL": s3.getSignedUrl('putObject', s3Params),
"fileName": `${actionId}.${ext}`
})
})
})
}
Also the same works when i try with PostMan.
I resolved this by adding the signature version:
const s3=new AWS.S3({
signatureVersion:'v4'
});

NestJS: Image Upload & Serve API

I tried to create an API for uploading & retrieving images with NestJS. Images should be stored on S3.
What I currently have:
Controller
#Post()
#UseInterceptors(FileFieldsInterceptor([
{name: 'photos', maxCount: 10},
]))
async uploadPhoto(#UploadedFiles() files): Promise<void> {
await this.s3Service.savePhotos(files.photos)
}
#Get('/:id')
#Header('content-type', 'image/jpeg')
async getPhoto(#Param() params,
#Res() res) {
const photoId = PhotoId.of(params.id)
const photoObject = await this.s3Service.getPhoto(photoId)
res.send(photoObject)
}
S3Service
async savePhotos(photos: FileUploadEntity[]): Promise<any> {
return Promise.all(photos.map(photo => {
const filePath = `${moment().format('YYYYMMDD-hhmmss')}${Math.floor(Math.random() * (1000))}.jpg`
const params = {
Body: photo.buffer,
Bucket: Constants.BUCKET_NAME,
Key: filePath,
}
return new Promise((resolve) => {
this.client.putObject(params, (err: any, data: any) => {
if (err) {
logger.error(`Photo upload failed [err=${err}]`)
ExceptionHelper.throw(ErrorCodes.SERVER_ERROR_UNCAUGHT_EXCEPTION)
}
logger.info(`Photo upload succeeded [filePath=${filePath}]`)
return resolve()
})
})
}))
}
async getPhoto(photoId: PhotoId): Promise<AWS.S3.Body> {
const object: S3.GetObjectOutput = await this.getObject(S3FileKey.of(`${Constants.S3_PHOTO_PATH}/${photoId.value}`))
.catch(() => ExceptionHelper.throw(ErrorCodes.RESOURCE_NOT_FOUND_PHOTO)) as S3.GetObjectOutput
logger.info(JSON.stringify(object.Body))
return object.Body
}
async getObject(s3FilePath: S3FileKey): Promise<S3.GetObjectOutput> {
logger.info(`Retrieving object from S3 s3FilePath=${s3FilePath.value}]`)
return this.client.getObject({
Bucket: Constants.BUCKET_NAME,
Key: s3FilePath.value
}).promise()
.catch(err => {
logger.error(`Could not retrieve object from S3 [err=${err}]`)
ExceptionHelper.throw(ErrorCodes.SERVER_ERROR_UNCAUGHT_EXCEPTION)
}) as S3.GetObjectOutput
}
The photo object actually ends up in S3, but when I download it I can't open it.
Same for the GET => can't be displayed.
What general mistake(s) I'm making here?
Not sure what values are you returning to your consumer and which values they use to get the Image again; Could you post how the actual response looks like, what is the request and verify, if the FQDN & Path match?
It seems you forgot about ACL as well, i.e. the resources you upload this way are not public-read by default.
BTW you could use aws SDK there:
import { Injectable } from '#nestjs/common'
import * as AWS from 'aws-sdk'
import { InjectConfig } from 'nestjs-config'
import { AwsConfig } from '../../config/aws.config'
import UploadedFile from '../interfaces/uploaded-file'
export const UPLOAD_WITH_ACL = 'public-read'
#Injectable()
export class ImageUploadService {
s3: AWS.S3
bucketName
cdnUrl
constructor(#InjectConfig() private readonly config) {
const awsConfig = (this.config.get('aws') || { bucket: '', secretKey: '', accessKey: '', cdnUrl: '' }) as AwsConfig // read from envs
this.bucketName = awsConfig.bucket
this.cdnUrl = awsConfig.cdnUrl
AWS.config.update({
accessKeyId: awsConfig.accessKey,
secretAccessKey: awsConfig.secretKey,
})
this.s3 = new AWS.S3()
}
upload(file: UploadedFile): Promise<string> {
return new Promise((resolve, reject) => {
const params: AWS.S3.Types.PutObjectRequest = {
Bucket: this.bucketName,
Key: `${Date.now().toString()}_${file.originalname}`,
Body: file.buffer,
ACL: UPLOAD_WITH_ACL,
}
this.s3.upload(params, (err, data: AWS.S3.ManagedUpload.SendData) => {
if (err) {
return reject(err)
}
resolve(`${this.cdnUrl}/${data.Key}`)
})
})
}
}
For anyone having the same troubles, I finally figured it out:
I enabled binary support on API Gateway (<your-gateway> Settings -> Binary Media Types -> */*) and then returned all responses from lambda base64 encoded. API Gateway will do the decode automatically before returning the response to the client.
With serverless express you can can enable the auto base64 encoding easily at the server creation:
const BINARY_MIME_TYPES = [
'application/javascript',
'application/json',
'application/octet-stream',
'application/xml',
'font/eot',
'font/opentype',
'font/otf',
'image/jpeg',
'image/png',
'image/svg+xml',
'text/comma-separated-values',
'text/css',
'text/html',
'text/javascript',
'text/plain',
'text/text',
'text/xml',
]
async function bootstrap() {
const expressServer = express()
const nestApp = await NestFactory.create(AppModule, new ExpressAdapter(expressServer))
await nestApp.init()
return serverlessExpress.createServer(expressServer, null, BINARY_MIME_TYPES)
}
In the Controller, you're now able to just return the S3 response body:
#Get('/:id')
async getPhoto(#Param() params,
#Res() res) {
const photoId = PhotoId.of(params.id)
const photoObject: S3.GetObjectOutput = await this.s3Service.getPhoto(photoId)
res
.set('Content-Type', 'image/jpeg')
.send(photoObject.Body)
}
Hope this helps somebody!

AWS cognito users list : lambda

I am working on one node application which is using AWS. now i want to get all cognito users but as per doc it returns first 60 users but i want all users. can you assist me with this? In doc, they mentioned that pass PaginationToken (string) . but i don't know what to pass in it.
Here what i have done so far :
exports.handler = (event, context, callback) => {
const requestBody = JSON.parse(event.body);
var params = {
"UserPoolId": "****************",
"Limit": 60,
"PaginationToken" : (what to pass here????),
}
const cognitoidentityserviceprovider = new AWS.CognitoIdentityServiceProvider();
cognitoidentityserviceprovider.listUsers(params, (err, data) => {
if (err) {
callback(null, { headers: { "Content-Type": "application/json", "Access-Control-Allow-Origin": "*" }, body: JSON.stringify({ statusCode: 405, data: err }) });
} else {
console.log(data);
let userdata = [];
for(let i=0; i<data.Users.length;i++){
// console.log(data.Users[i].Attributes);
userdata.push(getAttributes(data.Users[i].Attributes));
}
callback(null, { headers: { "Content-Type": "application/json", "Access-Control-Allow-Origin": "*" }, body: JSON.stringify({ statusCode: 200, data: userdata }) });
}
});
};
function getAttributes(attributes){
let jsonObj = {};
attributes.forEach((obj) => {
jsonObj[obj.Name] = obj.Value;
});
return jsonObj;
}
In your response you should see a property called PaginationToken. If you make the same call but include this value in your params you will receive the next 60 users. Here's the concept:
cognitoidentityserviceprovider.listUsers(params, (err, data) => {
// data.Users is the first 60 users
params.PaginationToken = data.PaginationToken;
cognitoidentityserviceprovider.listUsers(params, (err, data) => {
// data.Users is the next 60 users
});
});
You might want to consider switching to promises and async/await if your environment supports it. That would make this code easier to read and write.
const data = await cognitoidentityserviceprovider.listUsers(params).promise();
params.PaginationToken = data.PaginationToken;
const data2 = await cognitoidentityserviceprovider.listUsers(params).promise();

Lambda function s3.getObject returns "Internal server error"

This code works just fine locally using nodejs. Images download from s3, write to file.
However, in Lambda (using nodejs 8.10) I'm getting "Internal Server Error" when testing the function with this in the Logs:
"Execution failed due to configuration error: Malformed Lambda proxy response"
I am using the lambda proxy response in the callback, but clearly some AWS SDK error with S3 is not getting caught.
I do have a role setup with S3 full access that the Lambda has access to.
What am I missing with my first Lambda function? Docs and tutorials I've followed correctly and it is not working.
const async = require('async')
const aws = require('aws-sdk')
const fs = require('fs')
const exec = require('child_process').exec
const bucket = 'mybucket'
const s3Src = 'bucket_prefix'
const s3Dst = 'new_prefix'
const local = `${__dirname}/local/`
aws.config.region = 'us-west-2'
const s3 = new aws.S3()
exports.handler = async (event, context, callback) => {
const outputImage = 'hello_world.png'
const rack = JSON.parse(event.body)
const images = my.images
async.waterfall([
function download(next) {
let downloaded = 0
let errors = false
let errorMessages = []
for (let i = 0; i < images.length; i++) {
let key = `${s3Src}/${images[i].prefix}/${images[i].image}`,
localImage = `${local}${images[i].image}`
getBucketObject(bucket, key, localImage).then(() => {
++downloaded
if (downloaded === images.length) { // js is non blocking, need to check if all images have been downloaded. If so, then go to next function
if (errors) {
next(errorMessages.join(' '))
} else {
next(null)
}
}
}).catch(error => {
errorMessages.push(`${error} - ${localImage}`)
++downloaded
errors = true
})
}
function getBucketObject(bucket, key, dest) {
return new Promise((resolve, reject) => {
let ws = fs.createWriteStream(dest)
ws.once('error', (err) => {
return reject(err)
})
ws.once('finish', () => {
return resolve(dest)
})
let s3Stream = s3.getObject({
Bucket: bucket,
Key: key
}).createReadStream()
s3Stream.pause() // Under load this will prevent first few bytes from being lost
s3Stream.on('error', (err) => {
return reject(err)
})
s3Stream.pipe(ws)
s3Stream.resume()
})
}
}
], err => {
if (err) {
let response = {
"statusCode": 400,
"headers": {
"my_header": "my_value"
},
"body": JSON.stringify(err),
"isBase64Encoded": false
}
callback(null, response)
} else {
let response = {
"statusCode": 200,
"headers": {
"my_header": "my_value"
},
"body": JSON.stringify(`<img src="${local}${outputImage}" />`),
"isBase64Encoded": false
}
callback(null, response)
}
}
)
}
Response should be always sent to callback function. Your code sends response only on error. That's why Lambda executor thinks your code fails.
BTW - should your functions in async.waterfall be separated with coma, as two tasks?
Locally, I've been running nodejs 10.10 and lambda currently is at 8.10. That is a big part I'm sure. In the end I had to remove the async. I had to move the getBucketObject function out of the waterfall. Once I made those adjustments it started working. And another issue was the downloaded images needed to go into "/tmp" directory.
const aws = require('aws-sdk')
const async = require('async')
const fs = require('fs')
const bucket = 'mybucket'
const s3Src = 'mys3src'
const local = '/tmp/'
aws.config.region = 'us-west-2'
const s3 = new aws.S3()
exports.handler = (event, context, callback) => {
const outputImage = 'hello_world.png'
async.waterfall([
function download(next) {
let downloaded = 0,
errorMessages = []
for (let i = 0; i < event['images'].length; i++) {
let key = `${s3Src}/${event['images'][i]['prefix']}/${event['images'][i]['image']}`,
localImage = `${local}${event['images'][i]['image']}`
getBucketObject(bucket, key, localImage).then(() => {
downloaded++
if (downloaded === event['images'].length) {
if (errorMessages.length > 0) {
next(errorMessages.join(' '))
} else {
console.log('All downloaded')
next(null)
}
}
}).catch(error => {
downloaded++
errorMessages.push(`${error} - ${localImage}`)
if (downloaded === event['images'].length) {
next(errorMessages.join(' '))
}
})
}
}
], err => {
if (err) {
console.error(err)
callback(null, {
"statusCode": 400,
"body": JSON.stringify(err),
"isBase64Encoded": false
})
} else {
console.log('event image created!')
callback(null, {
"statusCode": 200,
"body": JSON.stringify(`<img src="${local}${outputImage}" />`),
"isBase64Encoded": false
})
}
}
)
}
function getBucketObject(bucket, key, dest) {
return new Promise((resolve, reject) => {
let ws = fs.createWriteStream(dest)
ws.once('error', (err) => {
return reject(err)
})
ws.once('finish', () => {
return resolve(dest)
})
let s3Stream = s3.getObject({
Bucket: bucket,
Key: key
}).createReadStream()
s3Stream.pause() // Under load this will prevent first few bytes from being lost
s3Stream.on('error', (err) => {
return reject(err)
})
s3Stream.pipe(ws)
s3Stream.resume()
})
}