React Native upload to S3 with presigned URL - amazon-web-services

Been trying with no luck to upload an image to S3 from React Native using pre-signed url. Here is my code:
generate pre-signed url in node:
const s3 = new aws.S3();
const s3Params = {
Bucket: bucket,
Key: fileName,
Expires: 60,
ContentType: 'image/jpeg',
ACL: 'public-read'
};
return s3.getSignedUrl('putObject', s3Params);
here is RN request to S3:
var file = {
uri: game.pictureToSubmitUri,
type: 'image/jpeg',
name: 'image.jpg',
};
const xhr = new XMLHttpRequest();
var body = new FormData();
body.append('file', file);
xhr.open('PUT', signedRequest);
xhr.onreadystatechange = () => {
if(xhr.readyState === 4){
if(xhr.status === 200){
alert('Posted!');
}
else{
alert('Could not upload file.');
}
}
};
xhr.send(body);
game.pictureToSubmitUri = assets-library://asset/asset.JPG?id=A282A2C5-31C8-489F-9652-7D3BD5A1FAA4&ext=JPG
signedRequest = https://my-bucket.s3-us-west-1.amazonaws.com/8bd2d4b9-3206-4bff-944d-e06f872d8be3?AWSAccessKeyId=AKIAIOLHQY4GAXN26FOQ&Content-Type=image%2Fjpeg&Expires=1465671117&Signature=bkQIp5lgzuYrt2vyl7rqpCXPcps%3D&x-amz-acl=public-read
Error message:
<Code>SignatureDoesNotMatch</Code>
<Message>
The request signature we calculated does not match the signature you provided. Check your key and signing method.
</Message>
I can successfully curl and image to S3 using the generated url, and I seem to be able to successfully post to requestb.in from RN (however I can only see the raw data on requestb.in so not 100% sure the image is properly there).
Based on all this, I've narrowed my issue down to 1) my image is not correctly uploading period, or 2) somehow the way S3 wants my request is different then how it is coming in.
Any help would be muuuuuucchhhh appreciated!
UPDATE
Can successfully post from RN to S3 if body is just text ({'data': 'foo'}). Perhaps AWS does not like mutliform data? How can I send as just a file in RN???

To upload pre-signed S3 URL on both iOS and Android use react-native-blob-util lib
Code snippet:
import RNBlobUtil from 'react-native-blob-util'
const preSignedURL = 'pre-signed url'
const pathToImage = '/path/to/image.jpg' // without file:// scheme at the beginning
const headers = {}
RNBlobUtil.fetch('PUT', preSignedURL, headers, RNBlobUtil.wrap(pathToImage))
Edited 19 Oct 2022 and swapped unsupported RN Fetch Blob for React Native Blob Util package.

FormData will create a multipart/form-data request. S3 PUT object needs its request body to be a file.
You just need to send your file in the request body without wrapping it into FormData:
function uploadFile(file, signedRequest, url) {
const xhr = new XMLHttpRequest();
xhr.open('PUT', signedRequest);
xhr.onreadystatechange = function() {
if (xhr.readyState === 4) {
if(xhr.status === 200) {
alert(url);
} else {
alert('Could not upload file.');
}
}
};
xhr.send(file);
};
See https://devcenter.heroku.com/articles/s3-upload-node for example in a browser. Please also ensure your Content-Type header is matched with the signed URL request.

"rn-fetch-blob": 0.12.0,
"react-native": 0.61.5
This code works for both Android & iOS
const response = await RNFetchBlob.fetch(
'PUT',
presignedUrl,
{
'Content-Type': undefined
},
RNFetchBlob.wrap(file.path.replace('file://', '')),
)
Note {'Content-Type': undefined} is needed for iOS

sorry if none worked for any body. took me 5 days to get this to work . 5 crazy days of no result until my sleepy eyes turned green after little nap. Guess i had a sweet dream that brought the idea. so quickly say u have an end point on ur server to generate the sign url for the request from react native end or from react side or any web frontier. i would be doing this for both react native and react(can serve for html pages and angular pages).
WEB APPROACH
UPLOAD IMAGE TO S3 BUCKET PRESIGNED URI
/*
Function to carry out the actual PUT request to S3 using the signed request from the app.
*/
function uploadFile(file, signedRequest, url){
// document.getElementById('preview').src = url; // THE PREVIEW PORTION
// document.getElementById('avatar-url').value = url; //
const xhr = new XMLHttpRequest();
xhr.open('PUT', signedRequest);
xhr.onreadystatechange = () => {
if(xhr.readyState === 4){
if(xhr.status === 200){
document.getElementById('preview').src = url;
// document.getElementById('avatar-url').value = url;
}
else{
alert('Could not upload file.');
}
}
};
xhr.send(file);
}
/*
Function to get the temporary signed request from the app.
If request successful, continue to upload the file using this signed
request.
*/
function getSignedRequest(file){
const xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:1234'+`/sign-s3?file-name=${file.name}&file-type=${file.type}`);
xhr.setRequestHeader('Access-Control-Allow-Headers', '*');
xhr.setRequestHeader('Content-type', 'application/json');
xhr.setRequestHeader('Access-Control-Allow-Origin', '*');
xhr.onreadystatechange = () => {
if(xhr.readyState === 4){
if(xhr.status === 200){
const response = JSON.parse(xhr.responseText);
uploadFile(file, response.signedRequest, response.url);
}
else{
alert('Could not get signed URL.');
}
}
};
xhr.send();
}
/*
Function called when file input updated. If there is a file selected, then
start upload procedure by asking for a signed request from the app.
*/
function initUpload(){
const files = document.getElementById('file-input').files;
const file = files[0];
if(file == null){
return alert('No file selected.');
}
getSignedRequest(file);
}
/*
Bind listeners when the page loads.
*/
//check if user is actually on the profile page
//just ensure that the id profile page exist on your html
if (document.getElementById('profile-page')) {
document.addEventListener('DOMContentLoaded',() => {
///here is ur upload trigger bttn effect
document.getElementById('file-input').onchange = initUpload;
});
}

FOR REACT NATIVE I WILL NOT BE USING ANY 3RD PARTY LIBS.
i have my pick image function that picks the image and upload using xhr
const pickImage = async () => {
let result = await ImagePicker.launchImageLibraryAsync({
// mediaTypes: ImagePicker.MediaTypeOptions.All,
allowsEditing: true,
aspect: [4, 3],
quality: 1,
base64:true
});
console.log(result);
if (!result.cancelled) {
// setImage(result.uri);
let base64Img = `data:image/jpg;base64,${result.uri}`;
// ImagePicker saves the taken photo to disk and returns a local URI to it
let localUri = result.uri;
let filename = localUri.split('/').pop();
// Infer the type of the image
let match = /\.(\w+)$/.exec(filename);
let type = match ? `image/${match[1]}` : `image`;
// Upload the image using the fetch and FormData APIs
let formData = new FormData();
// Assume "photo" is the name of the form field the server expects
formData.append('file', { uri: base64Img, name: filename, type });
const xhr = new XMLHttpRequest();
xhr.open('GET', ENVIRONMENTS.CLIENT_API+`/sign-s3?file-name=${filename}&file-type=${type}`);
xhr.setRequestHeader('Access-Control-Allow-Headers', '*');
xhr.setRequestHeader('Content-type', 'application/json');
// xhr.setRequestHeader('Content-type', 'multipart/form-data');
xhr.setRequestHeader('Access-Control-Allow-Origin', '*');
xhr.setRequestHeader('X-Amz-ACL', 'public-read') //added
xhr.setRequestHeader('Content-Type', type) //added
xhr.onreadystatechange = () => {
if(xhr.readyState === 4){
if(xhr.status === 200){
const response = JSON.parse(xhr.responseText);
alert(JSON.stringify( response.signedRequest, response.url))
// uploadFile(file, response.signedRequest, response.url);
// this.setState({imagename:file.name})
const xhr2 = new XMLHttpRequest();
xhr2.open('PUT', response.signedRequest);
xhr2.setRequestHeader('Access-Control-Allow-Headers', '*');
xhr2.setRequestHeader('Content-type', 'application/json');
// xhr2.setRequestHeader('Content-type', 'multipart/form-data');
xhr2.setRequestHeader('Access-Control-Allow-Origin', '*');
// xhr2.setRequestHeader('X-Amz-ACL', 'public-read') //added
xhr2.setRequestHeader('Content-Type', type) //added
xhr2.onreadystatechange = () => {
if(xhr2.readyState === 4){
if(xhr2.status === 200){
alert("successful upload ")
}
else{
// alert('Could not upload file.');
var error = new Error(xhr.responseText)
error.code = xhr.status;
for (var key in response) error[key] = response[key]
alert(error)
}
}
};
xhr2.send( result.base64)
}
else{
alert('Could not get signed URL.');
}
}
};
xhr.send();
}
};
then some where in the render method
<View style={{ flex: 1, alignItems: 'center', justifyContent: 'center' }}>
<Button title="Pick an image from camera roll" onPress={pickImage} />
{image && <Image source={{ uri: image }} style={{ width: 200, height: 200 }} />}
</View>
hope it helps any one who doesnt want sleepless nights like me.

import React from 'react'
import { Button, SafeAreaView } from 'react-native'
import { launchImageLibrary } from 'react-native-image-picker'
const Home = () => {
const getImageFromLibrary = async () => {
const result = await launchImageLibrary()
const { type, uri } = result.assets[0]
const blob = await new Promise((resolve, reject) => {
const xhr = new XMLHttpRequest()
xhr.onload = function () {
resolve(xhr.response)
}
xhr.onerror = function () {
reject(new TypeError('Network request failed'))
}
xhr.responseType = 'blob'
xhr.open('GET', uri, true)
xhr.send(null)
})
// Send your blob off to the presigned url
const res = await axios.put(presignedUrl, blob)
}
return (
<SafeAreaView>
<Button onPress={getImageFromLibrary} title="Get from library" />
</SafeAreaView>
)
}
export default Home
Your BE that creates the pre-signed url can look something like this (pseudo code):
const { getSignedUrl } = require('#aws-sdk/s3-request-presigner')
const { S3Client, PutObjectCommand } = require('#aws-sdk/client-s3')
const BUCKET_NAME = process.env.BUCKET_NAME
const REGION = process.env.AWS_REGION
const s3Client = new S3Client({
region: REGION
})
const body = JSON.parse(request.body)
const { type } = body
const uniqueName = uuidv4()
const date = moment().format('MMDDYYYY')
const fileName = `${uniqueName}-${date}`
const params = {
Bucket: BUCKET_NAME,
Key: fileName,
ContentType: type
}
try {
const command = new PutObjectCommand(params)
const signedUrl = await getSignedUrl(s3Client, command, {
expiresIn: 60
})
response.send({ url: signedUrl, fileName })
} catch (err) {
console.log('ERROR putPresignedUrl : ', err)
response.send(err)
}
I am using aws-sdk v3 which is nice because the packages are smaller. I create a filename on the BE and send it to the FE. For the params, you don't need anything listed then those 3. Also, I never did anything with CORS and my bucket is completely private. Again, the BE code is pseudo code ish so you will need to edit a few spots.
Lastly, trying to use the native fetch doesn't work. It's not the same fetch you use in React. Use XHR request like I showed else you cannot create a blob.

First, install two libraries, then the image convert into base64 after that arrayBuffer, then upload it
import RNFS from 'react-native-fs';
import {decode} from 'base64-arraybuffer';
try {
RNFS.readFile(fileUri, 'base64').then(data => {
const arrayBuffer = decode(data);
axios
.put(sThreeApiUrl.signedUrl, arrayBuffer, {
headers: {
'Content-Type': 'image/jpeg',
'Content-Encoding': 'base64',
},
})
.then(res => {
if (res.status == 200) {
console.log('image is uploaded successfully');
}
});
});
} catch (error) {
console.log('this is error', error); }

Related

Handling multipart/form-data in aws lambda

I'm trying to send a request with an image to a lambda function through API gateway.
I'm using this piece of code to parse the form-data-object received by my lambda function. I then upload the image to S3, but when downloading and opening the image from S3, I see that it's corrupt.
I have tried the following npm packages:
parse-multipart
parse-multipart-data
Both do not work, because I get an empty parts-array. The piece of code I use does get results in the array, but the buffers seems to be corrupt.
The problem seems to be in this line of code:
Buffer.from(item.slice(item.search(/Content-Type:\s.+/g) + item.match(/Content-Type:\s.+/g)[0].length + 4, -4), 'binary')
Does anyone has a solution for me?
You can use busboy to parse the multipart form data before uploading to S3 as shown below:
// This code is written in ECMAScript 6 (ES6), not CommonJS syntax.
// So, make sure you add <"type": "module"> in your package.json.
import {S3Client, PutObjectCommand} from '#aws-sdk/client-s3';
import Busboy from 'busboy';
// Initialize the clients outside the function handler to take advantage of execution environment reuse.
const s3Client = new S3Client({region: process.env.AWS_REGION});
// Function handler.
export async function handler(event) {
const {
ContentType: contentType = '',
File: file = '',
} = await FORM.parse(event['body'], event['headers']);
try {
// Adds an object to a bucket. We must have `WRITE` permissions on a bucket to add an object to it.
await s3Client.send(new PutObjectCommand({
Body: file,
Bucket: 'BUCKET_NAME',
ContentType: contentType,
Key: 'SOME_KEY',
}));
return {
isBase64Encoded: false,
statusCode: 200,
body: JSON.stringify({
message: 'Everything is gonna be alright.',
}),
}
} catch (e) {
return {
isBase64Encoded: false,
statusCode: 404,
body: JSON.stringify(e),
}
}
}
const FORM = {
parse(body, headers) {
return new Promise((resolve, reject) => {
const data = {};
const buffer = Buffer.from(body, 'base64');
const bb = Busboy({
headers: Object.keys(headers).reduce((newHeaders, key) => {
// busboy expects lower-case headers.
newHeaders[key.toLowerCase()] = headers[key];
return newHeaders;
}, {}),
limits: {
fileSize: 10485760, // Set as desired.
files: 1,
},
});
bb.on('file', (name, stream, info) => {
const chunks = [];
stream.on('data', (chunk) => {
if (name === 'File') {
chunks.push(chunk);
}
}).on('limit', () => {
reject(new Error('File size limit has been reached.'));
}).on('close', () => {
if (name === 'File') {
data[name] = Buffer.concat(chunks);
data['ContentType'] = info.mimeType;
}
});
});
bb.on('error', (err) => {
reject(err);
});
bb.on('close', () => {
resolve(data);
});
bb.end(buffer);
});
}
};

Modify Cloudfront origin response with Lambda - read-only headers

I have a Cloudfront distribution with a single React site, which is hosting in S3. The origin is connected via REST api. To properly handle queries, I use custom error responses on status 403 and 404 to 200 and route them to root. The root object is index.html and everything seems to be fine.
Now I have a task to add to a distribution an another site, which should be accessible through a subdirectory.
To do this I have to set a root object for a subdirectory and to catch 404 and 403 responses and transfer them to a root object. I've already set up origin and behaviour.
I tried to use theese manuals:
example
source
but it seems that something went wrong
The first approach (CloudFrontSubdirectoryIndex) seems not working at all (the function is not invoked and no rewrite happens), so i tried CloudFront function and it seems to work fine.
The last step is to handle 404 and 403 responses.
Here is the function from the manual:
'use strict';
const http = require('https');
const indexPage = 'index.html';
exports.handler = async (event, context, callback) => {
const cf = event.Records[0].cf;
const request = cf.request;
const response = cf.response;
const statusCode = response.status;
// Only replace 403 and 404 requests typically received
// when loading a page for a SPA that uses client-side routing
const doReplace = request.method === 'GET'
&& (statusCode == '403' || statusCode == '404');
const result = doReplace
? await generateResponseAndLog(cf, request, indexPage)
: response;
callback(null, result);
};
async function generateResponseAndLog(cf, request, indexPage){
const domain = cf.config.distributionDomainName;
const appPath = getAppPath(request.uri);
const indexPath = `/${appPath}/${indexPage}`;
const response = await generateResponse(domain, indexPath);
console.log('response: ' + JSON.stringify(response));
return response;
}
async function generateResponse(domain, path){
try {
// Load HTML index from the CloudFront cache
const s3Response = await httpGet({ hostname: domain, path: path });
const headers = s3Response.headers ||
{
'content-type': [{ value: 'text/html;charset=UTF-8' }]
};
return {
status: '200',
headers: wrapAndFilterHeaders(headers),
body: s3Response.body
};
} catch (error) {
return {
status: '500',
headers:{
'content-type': [{ value: 'text/plain' }]
},
body: 'An error occurred loading the page'
};
}
}
function httpGet(params) {
return new Promise((resolve, reject) => {
http.get(params, (resp) => {
console.log(`Fetching ${params.hostname}${params.path}, status code : ${resp.statusCode}`);
let result = {
headers: resp.headers,
body: ''
};
resp.on('data', (chunk) => { result.body += chunk; });
resp.on('end', () => { resolve(result); });
}).on('error', (err) => {
console.log(`Couldn't fetch ${params.hostname}${params.path} : ${err.message}`);
reject(err, null);
});
});
}
// Get the app path segment e.g. candidates.app, employers.client etc
function getAppPath(path){
if(!path){
return '';
}
if(path[0] === '/'){
path = path.slice(1);
}
const segments = path.split('/');
// will always have at least one segment (may be empty)
return segments[0];
}
// Cloudfront requires header values to be wrapped in an array
function wrapAndFilterHeaders(headers){
const allowedHeaders = [
'content-type',
'content-length',
'last-modified',
'date',
'etag'
];
const responseHeaders = {};
if(!headers){
return responseHeaders;
}
for(var propName in headers) {
// only include allowed headers
if(allowedHeaders.includes(propName.toLowerCase())){
var header = headers[propName];
if (Array.isArray(header)){
// assume already 'wrapped' format
responseHeaders[propName] = header;
} else {
// fix to required format
responseHeaders[propName] = [{ value: header }];
}
}
}
return responseHeaders;
}
When i try to implement this solution (attach the function to origin response) I get
The Lambda function result failed validation: The function tried to add, delete, or change a read-only header.
Here is a list of restricted headers, but I'm not modifying any of them.
If I try not to attach any headers to a response at all, the message is the same.
If I try to attach all headers, CloudFront says that i'm modifying a black-listed header.
Objects in a bucket have only one customized Cache-Control: no-cache metadata.
It seemed to be a fast task, but I'm stuck for two days already.
Any help will be appreciated.
UPD: I've searched the logs and found
ERROR Validation error: Lambda function result failed validation, the function tried to delete read-only header, headerName : Transfer-Encoding.
I'm a little bit confused. This header is not present in origin response, but CF is telling that I deleted it...
I tried to find the value of the header "Transfer-Encoding" that should come from origin (S3) but it seems that it has been disappeared. And CloudFront says that this header is essential.
So I've just hard-coded it and everything becomes fine.
'use strict';
const http = require('https');
const indexPage = 'index.html';
exports.handler = async (event, context, callback) => {
const cf = event.Records[0].cf;
const request = cf.request;
const response = cf.response;
const statusCode = response.status;
// Only replace 403 and 404 requests typically received
// when loading a page for a SPA that uses client-side routing
const doReplace = request.method === 'GET'
&& (statusCode == '403' || statusCode == '404');
const result = doReplace
? await generateResponseAndLog(cf, request, indexPage)
: response;
callback(null, result);
};
async function generateResponseAndLog(cf, request, indexPage){
const domain = cf.config.distributionDomainName;
const appPath = getAppPath(request.uri);
const indexPath = `/${appPath}/${indexPage}`;
const response = await generateResponse(domain, indexPath);
console.log('response: ' + JSON.stringify(response));
return response;
}
async function generateResponse(domain, path){
try {
// Load HTML index from the CloudFront cache
const s3Response = await httpGet({ hostname: domain, path: path });
const headers = s3Response.headers ||
{
'content-type': [{ value: 'text/html;charset=UTF-8' }]
};
s3Response.headers['transfer-encoding'] = 'chunked';
return {
status: '200',
headers: wrapAndFilterHeaders(headers),
body: s3Response.body
};
} catch (error) {
return {
status: '500',
headers:{
'content-type': [{ value: 'text/plain' }]
},
body: 'An error occurred loading the page'
};
}
}
function httpGet(params) {
return new Promise((resolve, reject) => {
http.get(params, (resp) => {
console.log(`Fetching ${params.hostname}${params.path}, status code : ${resp.statusCode}`);
let result = {
headers: resp.headers,
body: ''
};
resp.on('data', (chunk) => { result.body += chunk; });
resp.on('end', () => { resolve(result); });
}).on('error', (err) => {
console.log(`Couldn't fetch ${params.hostname}${params.path} : ${err.message}`);
reject(err, null);
});
});
}
// Get the app path segment e.g. candidates.app, employers.client etc
function getAppPath(path){
if(!path){
return '';
}
if(path[0] === '/'){
path = path.slice(1);
}
const segments = path.split('/');
// will always have at least one segment (may be empty)
return segments[0];
}
// Cloudfront requires header values to be wrapped in an array
function wrapAndFilterHeaders(headers){
const allowedHeaders = [
'content-type',
'content-length',
'content-encoding',
'transfer-encoding',
'last-modified',
'date',
'etag'
];
const responseHeaders = {};
if(!headers){
return responseHeaders;
}
for(var propName in headers) {
// only include allowed headers
if(allowedHeaders.includes(propName.toLowerCase())){
var header = headers[propName];
if (Array.isArray(header)){
// assume already 'wrapped' format
responseHeaders[propName] = header;
} else {
// fix to required format
responseHeaders[propName] = [{ value: header }];
}
}
}
return responseHeaders;
}

Image Upload with AWS using ID return from JWT

update
Ok so I am having a authentication issue with passport/JWT when trying to grab the header after it got set with login. So JWT should return an ID and I am trying to grab that ID and use it to update a user profile with a collection Image upload. Here is where it gets weird. I get this error in the console:
you are not valid
node:internal/errors:464
ErrorCaptureStackTrace(err);
^
Error [ERR_HTTP_HEADERS_SENT]: Cannot set headers after they are sent to the client
at new NodeError (node:internal/errors:371:5)
at ServerResponse.setHeader (node:_http_outgoing:576:11)
at ServerResponse.header (C:\Users\tquig\OneDrive\Documents\GitHub\Team2\backend\node_modules\express\lib\response.js:776:10)
at ServerResponse.send (C:\Users\tquig\OneDrive\Documents\GitHub\Team2\backend\node_modules\express\lib\response.js:170:12)
at ServerResponse.json (C:\Users\tquig\OneDrive\Documents\GitHub\Team2\backend\node_modules\express\lib\response.js:267:15)
at C:\Users\tquig\OneDrive\Documents\GitHub\Team2\backend\routes\users.js:239:29
at processTicksAndRejections (node:internal/process/task_queues:96:5) {
code: 'ERR_HTTP_HEADERS_SENT'
}
Now insomnia gives me back this error:
"AwsError: MissingRequiredParameter: Missing required key 'Key' in params"
I am not sure if AWS is just hating the JWT token or if my JWT id return isn't really returning properly and I am not doing it right in general.
Here is the AWS middleware:
const S3 = require('aws-sdk/clients/s3')
const fs = require('fs')
const bucketName = process.env.bucketName
const region = process.env.bucketRegion
const accessKeyId = process.env.AWSAccessKeyId
const secretAccessKey = process.env.AWSSecretKey
const s3 = new S3({
region,
accessKeyId,
secretAccessKey
})
function uploadFile(file){
const fileStream = fs.createReadStream(file.path)
const uploadParams = {
Bucket: bucketName,
Body: fileStream,
key: file.filename
}
return s3.upload(uploadParams).promise()
}
exports.uploadFile = uploadFile
//downloads a file from s3
//not tested yet
/*
function getFileStream(fileKey){
const downloadParams = {
key: fileKey
bucket: bucketName
}
return s3.getObject(downloadParams).createReadStream()
}
exports.getFileStream = getFileStream
*/
Here is the authentication code:
require('dotenv').config();
const jwt = require('jsonwebtoken');
const mongoose = require('mongoose');
// middleware functionality to check logged in user
module.exports = async (req, res, next) => {
try{
const token = req.cookies.jwt;
if(!token) return res.status(401).json({errorMessage: "Unauthorized"});
const verified = jwt.verify(token, process.env.secretKey);
req.user = verified.id;
next();
} catch (err){
console.error(err);
res.status(401).json({errorMessage: "Unauthorized"});
}
}
and here is the route:
const storage = multer.diskStorage({
destination: function (req, file, cb) {
cb(null, "uploads");
},
filename: function (req, file, cb) {
cb(
null,
file.fieldname + "-" + Date.now() + path.extname(file.originalname)
);
},
});
const upload = multer({
storage: storage,
fileFilter: (req, file, cb) => {
if (file.mimetype == "image/png" || file.mimetype == "image/jpg" || file.mimetype == "image/jpeg") {
cb(null, true);
} else {
cb(null, false);
return cb(new Error('Only .png, .jpg and .jpeg format allowed!'));
}
} });
router.post("/collections", requireLogin, upload.single("myImage"), async (req, res) => {
const obj = {
img: {
data: req.file.filename,
contentType: req.file.contentType
}
}
const newCollection = new collections({
imgName: req.file.filename,
image: obj.img
});
const findBool = user.findById(req.body.id)
.then(() => res.json('found user'))
.catch(err => res.status(400).json('UserIdError: ' + err));
//need another check before uploading to aws to prevent unauthorized uploads
if (findBool){
//upload to aws
await uploadFile(req.file).then(() => res.json('AWS upload Complete'))
.catch(err => res.status(400).json('AwsError: ' + err))
//mongodb upload
try {
await user.findbyId(req.body.id).insertOne(newCollection)
} catch (error) {
res.status(400).json('updateError: ' + error)
}
//delete file from local storage
unlinkFile(file.path)
}
});

Google Storage + JQuery-File-Upload + Django + Signed URL, how should I change submit() and relevant options?

I have the following js code and it uses the signed-url api to get signed urls for uploading content to google storage via Django api.
When I use it with the following code :
xhr.open("PUT", data.signed_url);
xhr.setRequestHeader('Content-Type', file.type);
xhr.send(file);
It works fine and I am able to upload to Google Storage very large files. But obviously, when I do that, I cannot use any progress-bar features of jquery-file-upload.
Can you please suggest on how I should alter the data.submit(), where shall I put it, and how should I change the options or settings prior to submitting. Should I be overriding add or submit callback ?
I feel that there is a missing support for Google Storage with Jquery-file-upload as the only example covers only obsolute Google Blobstore in the following link : https://github.com/blueimp/jQuery-File-Upload/wiki/Google-App-Engine
$("#fileupload").fileupload({
dataType: 'json',
type: 'PUT',
sequentialUploads: true,
submit: function(e, data) {
var $this = $(this);
$.each(data.files, function(index, file) {
// pack our data to get signature url
var formData = new FormData();
formData.append('filename', file.name);
formData.append('type', file.type);
formData.append('size', file.size);
// Step 3: get our signature URL
$.ajax({
url: '/api/getsignedurl/',
type: 'POST',
processData: false,
contentType: false,
dataType: 'json',
headers: {
'X-CSRFToken': Cookies.get('csrftoken'),
},
primary_data: data,
data: formData
}).done(function (data) {
// Step 5: got our url, push to GCS
const xhr = new XMLHttpRequest();
if ('withCredentials' in xhr) {
console.log("With credentials");
xhr.open("PUT", data.signed_url, true);
}
else if (typeof XDomainRequest !== 'undefined') {
console.log("With domainrequest");
xhr = new XDomainRequest();
xhr.open("PUT", data.signed_url);
}
else {
console.log("With null");
xhr = null;
}
//What shall I do to make the following work for uploading GS
this.primary_data.url = data.signed_url;
this.primary_data.headers={'Content-Type': file.type};
this.primary_data.submit();
xhr.onload = () => {
const status = xhr.status;
if (status === 200) {
} else {
alert("Failed to upload 1: " + status);
}
};
xhr.onerror = () => {
alert("Failed to upload 2");
};
//When the code below uncommented, it uploads to GS succesfully.
//xhr.setRequestHeader('Content-Type', file.type);
//xhr.send(file);
});
});
},
Also this is my cors setup for the GS Bucket.
[
{
"origin": ["*"],
"responseHeader": ["Content-Type", "Access-Control-Allow-Origin"],
"method": ["GET", "PUT", "OPTIONS"],
"maxAgeSeconds": 60
}
]

How to pass data from the response headers to the ApolloProvider with SSR?

I work with an application built with Nextjs and Apollo. I receive a token into the Graphql response headers. I can read this token on the server side and I need to pass it to the Apollo Provider on the client side.
cache.writeData({
data: {
isLoggedIn: false
}
});
const afterwareLink = new ApolloLink((operation, forward) => {
return forward(operation).map(response => {
const context = operation.getContext();
const {
response: { headers },
} = context;
if (headers) {
const authorization = headers.get('authorization');
if (authorization) {
console.log(authorization);
// what to do next? I need somehow set isLoggedIn to true...
}
}
return response;
});
});
const link = ApolloLink.from([
afterwareLink,
new RetryLink(),
httpLink
]);
const client = new ApolloClient({
link,
cache
});
function App({ children }){
return (
<ApolloProvider client={client}>
{children}
</ApolloProvider>
);
}
I have tried to set the context inside de ApolloLink:
operation.setContext({ isLoggedIn: true });
I can't write directly on the cache because the page is rendered on the server side.