I use react-native with graphQL.
Now I can upload one single photo on aws successfully.
But I want to upload multiple files at once.
If I run loop I can successfully upload multiple files on aws as below.
const onValid = ({ caption }) => {
const uploadPhotoArray = selectPhoto.map((sp, index) => {
return new ReactNativeFile({
uri: sp,
name: `${index}.jpg`,
type: "image/jpeg",
});
});
for (let i = 0; i < uploadPhotoArray.length; i++) {
uploadPhotoMutation({
variables: {
caption,
file: uploadPhotoArray[i],
},
});
}
};
But the problem is, if I upload 2 images, then it creates two rows on aws and backend.
In order to make it one row (post) with 2 images, I make file column of backend as Array, not string.
However I think the problem is the frontend.
I wanted to make uploadPhotoArray as array like below.
Array [
ReactNativeFile {
"name": "0.jpg",
"type": "image/jpeg",
"uri": "file:///storage/emulated/0/DCIM/Camera/20220306_020216.jpg",
},
ReactNativeFile {
"name": "1.jpg",
"type": "image/jpeg",
"uri": "file:///storage/emulated/0/DCIM/Camera/20220305_201130.jpg",
},
]
then tried to run uploadPhotoMutation with this array.
uploadPhotoMutation({
variables: {
caption,
file: uploadPhotoArray,
},
});
Then it will pass array data to backend.
But it seems now working.
If I can't pass Array data to backend, which means that one by one is possible, then I need to make this incoming data to array on backend.
But that's also hard for me.
If you want to clarify my question, I can answer in real-item and chat is also possible.
Please give me any idea. :(
backend code
const fileUrl = fileArrayCheck
? await uploadFileToS3(file, loggedInUser.id, "uploads")
: await uploadStringleFileToS3(file, loggedInUser.id, "uploads");
export const uploadStringleFileToS3 = async (file, userId, folderName) => {
console.log(file);
AWS.config.update({
credentials: {
accessKeyId: process.env.AWS_KEY,
secretAccessKey: process.env.AWS_SECRET,
},
});
const { filename, createReadStream } = await file;
const readStream = createReadStream();
const objectName = `${folderName}/${userId}-${Date.now()}-${filename}`;
const { Location } = await new AWS.S3()
.upload({
Bucket: "chungchunonuploads",
Key: objectName,
ACL: "public-read",
Body: readStream,
})
.promise();
return [Location];
};
export const uploadFileToS3 = async (filesToUpload, userId, folderName) => {
const uploadPromises = await filesToUpload.map((file) => {
uploadStringleFileToS3(file, userId, folderName);
});
return Promise.all(uploadPromises);
};
Related
I'm trying to send a request with an image to a lambda function through API gateway.
I'm using this piece of code to parse the form-data-object received by my lambda function. I then upload the image to S3, but when downloading and opening the image from S3, I see that it's corrupt.
I have tried the following npm packages:
parse-multipart
parse-multipart-data
Both do not work, because I get an empty parts-array. The piece of code I use does get results in the array, but the buffers seems to be corrupt.
The problem seems to be in this line of code:
Buffer.from(item.slice(item.search(/Content-Type:\s.+/g) + item.match(/Content-Type:\s.+/g)[0].length + 4, -4), 'binary')
Does anyone has a solution for me?
You can use busboy to parse the multipart form data before uploading to S3 as shown below:
// This code is written in ECMAScript 6 (ES6), not CommonJS syntax.
// So, make sure you add <"type": "module"> in your package.json.
import {S3Client, PutObjectCommand} from '#aws-sdk/client-s3';
import Busboy from 'busboy';
// Initialize the clients outside the function handler to take advantage of execution environment reuse.
const s3Client = new S3Client({region: process.env.AWS_REGION});
// Function handler.
export async function handler(event) {
const {
ContentType: contentType = '',
File: file = '',
} = await FORM.parse(event['body'], event['headers']);
try {
// Adds an object to a bucket. We must have `WRITE` permissions on a bucket to add an object to it.
await s3Client.send(new PutObjectCommand({
Body: file,
Bucket: 'BUCKET_NAME',
ContentType: contentType,
Key: 'SOME_KEY',
}));
return {
isBase64Encoded: false,
statusCode: 200,
body: JSON.stringify({
message: 'Everything is gonna be alright.',
}),
}
} catch (e) {
return {
isBase64Encoded: false,
statusCode: 404,
body: JSON.stringify(e),
}
}
}
const FORM = {
parse(body, headers) {
return new Promise((resolve, reject) => {
const data = {};
const buffer = Buffer.from(body, 'base64');
const bb = Busboy({
headers: Object.keys(headers).reduce((newHeaders, key) => {
// busboy expects lower-case headers.
newHeaders[key.toLowerCase()] = headers[key];
return newHeaders;
}, {}),
limits: {
fileSize: 10485760, // Set as desired.
files: 1,
},
});
bb.on('file', (name, stream, info) => {
const chunks = [];
stream.on('data', (chunk) => {
if (name === 'File') {
chunks.push(chunk);
}
}).on('limit', () => {
reject(new Error('File size limit has been reached.'));
}).on('close', () => {
if (name === 'File') {
data[name] = Buffer.concat(chunks);
data['ContentType'] = info.mimeType;
}
});
});
bb.on('error', (err) => {
reject(err);
});
bb.on('close', () => {
resolve(data);
});
bb.end(buffer);
});
}
};
i can upload array of images to aws s3 succeefully without any issues, but i want to show these images in my application. I can upload images succeefuly to my s3 Bucket, but i want to add the metadata, or the url to dynamodb to show these images along with the other data in my application.
any support will be appreciated.
import { View, Text, Image, StyleSheet, ScrollView, Button, TextInput, Platform, PermissionsAndroid } from 'react-native';
import React, {useState, useEffect} from "react";
import { RadioButton } from 'react-native-paper';
import { useForm, Controller } from "react-hook-form";
import ImagePicker from 'react-native-image-crop-picker';
import DropDownPicker from 'react-native-dropdown-picker';
import { DataStore, Storage, API, graphqlOperation, Auth } from 'aws-amplify';
import { createService } from '../graphql/mutations';
import { RNS3 } from 'react-native-aws3';
const AddService = ()=>{
const [checked, setChecked] = React.useState('OFFERED');
const { control, handleSubmit, formState: { errors } } = useForm({
});
const [images, setImages]= useState([])
const uploadButtonClick = () => {
let promises = [];
images.map((image, i) => {
promises.push(uploadImageToS3(image));
});
}
const uploadImageToS3 = async image => {
const options = {
keyPrefix: "uploads/",
bucket: "alkhair-serviceimages142621-dev",
region: "us-east-1",
accessKey: "",
secretKey: "",
successActionStatus: 201
}
const file = {
uri: `${image.path}`,
name: image.path.substring(image.path.lastIndexOf('/') + 1), //extracting filename from image path
type: image.mime,
};
// I had to work around here to get the image url and add it to dynamoDB, but i can only add one image. I need to add the images that the user uploads max of 4 images
setImageUri(`http://${options.bucket}.s3.amazonaws.com/${options.keyPrefix}${file.name}`)
return new Promise((resolve, reject) => {
RNS3.put(file, options)
.then(res => {
if (res.status === 201) {
const {postResponse} = res.body;
resolve({
src: postResponse.location,
});
} else {
console.log('error uploading to s3', res);
}
})
.catch(err => {
console.log('error uploading to s3', err);
reject(err);
});
});
};
Promise.all(promises).then(uploadedImgs => {
console.log('Yayy, all images are uploaded successfully', uploadedImgs)
})
const onSubmit =async (data) =>{
const createNewService =
{
type: checked,
name: data.serviceName,
description: data.serviceDescription,
image: // i need to upload array of images here which is already uploaded to aws s3,
serviceProviderName: data.serviceProviderName,
serviceProviderAddress: data.serviceProviderAddress,
serviceProviderPhone: data.serviceProviderPhone,
notes: data.serviceProviderNotes,
serviceAreaId: areaLabel.value,
serviceCategoryId: categLabel.value,
}
API.graphql(graphqlOperation(createService, {input: createNewService}))
}
const pickImage = async()=>{
let isStoragePermitted = await requestExternalWritePermission();
if(isStoragePermitted){
ImagePicker.openPicker({
multiple: true,
waitAnimationEnd: false,
includeExif: true,
forceJpg: true,
compressImageQuality: 0.8,
maxFiles: 6,
includeBase64: true,
showsSelectedCount: true,
mediaType: 'photo',
}) .then(imgs=>{
if (imgs.length <= 6) {
setImages([...images, ...imgs]);
} else {
setImages([...images]);
ToastAndroid.show("Maximum of 6 images allowed", ToastAndroid.SHORT);
}
})}}
const [areaOpen, setAreaOpen] = useState(false);
const [areaValue, setAreaValue] = useState(null);
const [area, setArea] = useState([
{label: 'مصر القديمة', value: 'AAA'},
{label: 'المعادي', value: 'BBB'},
{label: 'القاهرة الجديدة', value: 'CCC'}
]);
const [serviceOpen, setServiceOpen] = useState(false);
const [serviceValue, setServiceValue] = useState(null);
const [service, setService] = useState([
{label: 'خدمات مواصلات ', value: '09339a8d'},
{label: 'اخرى', value: 'b4d227e3'}
]);
I'd like a user to be able to upload either JPG or PNG image to an S3 bucket.
I am using a Lambda function which allows me to only presign .jpg images for S3 and it works great for just one file type. How do I add an additional file type to presign, for example, .png images too. Do I really need to write a new Lambda where I just change the .jpg to .png or I can do it somehow in my existing code below?
const AWS = require('aws-sdk')
AWS.config.update({ region: process.env.REGION })
const s3 = new AWS.S3();
const uploadBucket = 'xxx-bucket'
exports.handler = async (event) => {
const result = await getUploadURL()
console.log('Result: ', result)
return result
};
const getUploadURL = async function() {
console.log('getUploadURL started')
let actionId = Date.now()
var s3Params = {
Bucket: uploadBucket,
Key: `${actionId}.jpg`,
ContentType: 'image/jpeg',
CacheControl: 'max-age=31104000',
ACL: 'public-read',
};
return new Promise((resolve, reject) => {
// Get signed URL
let uploadURL = s3.getSignedUrl('putObject', s3Params)
resolve({
"statusCode": 200,
"isBase64Encoded": false,
"headers": {
"Access-Control-Allow-Origin": "*"
},
"body": JSON.stringify({
"uploadURL": uploadURL,
"photoFilename": `${actionId}.jpg`
})
})
})
}
Your options are as follow :
Make a new Lambda as you suggested to handle the PNG separately
Pass parameters to your getUploadURL function through your event, something like :
exports.handler = async event => {
const { filetype } = event.body (or pathParams, query string, etc)
const result = await getUploadURL(filetype)
console.log('Result: ', result)
return result
};
const getUploadURL = async filetype => {
console.log('getUploadURL started')
let actionId = Date.now()
var s3Params = {
Bucket: uploadBucket,
Key: `${actionId}.${filetype}`,
ContentType: `image/${filetype === 'jpg'?'jpeg':'png'}`,
CacheControl: 'max-age=31104000',
ACL: 'public-read',
};
...
The call to S3.getSignedUrl() requires {Bucket: 'bucket', Key: 'key'} at a minimum for a putObject operation. So if you don't want to sacrifice the filename extension and/or the content-type attribute, those are the only options.
AWS docs
I am working on an animation project to add subtitle to what my character says. I can get the mp3 file from AWS Polly with no issue.
However, when I want to get each part of the word separately, it doesn't work. I checked inspector tab, and I can see some params are passing to request to polly.aws. Any idea how I get json/mark-up file to know the start and end of each word & sentence?
const AWS = require('aws-sdk')
const Fs = require('fs')
const Polly = new AWS.Polly({
signatureVersion: 'v4',
region: 'us-east-1'
})
// # this part works fine
let params = {
'Text': 'Hi, my name is Soley. We are building something amazing!',
'OutputFormat': 'mp3',
'VoiceId': 'Matthew'
}
// # from chrome's network tab:
// # and is there a way to get mp3 and mark-up text at the same time?
// "text": "Hi, my name is Soley. We are building something amazing!",
// "textContentType": "text",
// "voiceId": "Matthew",
// "languageCode": "en-US",
// "engine": "standard",
// "outputFormat": "json-8000",
// "lexiconNames": [],
// "speechMarksTypes": [
// "word",
// "sentence"
// ]
Polly.synthesizeSpeech(params, (err, data) => {
if (err) {
console.log(err)
} else if (data) {
console.log(data)
if (data.AudioStream instanceof Buffer) {
Fs.writeFile("speech."+params.OutputFormat, data.AudioStream, function (err) {
if (err) {
return console.log(err)
}
console.log("The file was saved!")
})
}
}
})
some useful links to check: https://aws.amazon.com/blogs/aws/new-amazon-polly-speech-marks/
using cli also works file: https://docs.aws.amazon.com/polly/latest/dg/speechmarkexamples.html but I want it in NodeJs
Oh, I think I found something:
let params = {
'Text': 'Hi, my name is Soley. We are building something amazing!',
'OutputFormat': 'json',
'VoiceId': 'Matthew',
'SpeechMarkTypes': ['word', 'sentence']
}
Thanks to java: https://docs.aws.amazon.com/polly/latest/dg/SynthesizeSpeechMarksSample.html
I want to return connectionId to a client after the client connect to aws websocket.
I'm using apigwManagementApi.postToConnection to send a response to a client, but I always get an absurd error message.
I already try to debug & search in google, but I can't find a solution for this.
patch.js
require('aws-sdk/lib/node_loader');
var AWS = require('aws-sdk/lib/core');
var Service = AWS.Service;
var apiLoader = AWS.apiLoader;
apiLoader.services['apigatewaymanagementapi'] = {};
AWS.ApiGatewayManagementApi = Service.defineService('apigatewaymanagementapi', ['2018-11-29']);
Object.defineProperty(apiLoader.services['apigatewaymanagementapi'], '2018-11-29', {
get: function get() {
var model = {
"metadata": {
"apiVersion": "2018-11-29",
"endpointPrefix": "execute-api",
"signingName": "execute-api",
"serviceFullName": "AmazonApiGatewayManagementApi",
"serviceId": "ApiGatewayManagementApi",
"protocol": "rest-json",
"jsonVersion": "1.1",
"uid": "apigatewaymanagementapi-2018-11-29",
"signatureVersion": "v4"
},
"operations": {
"PostToConnection": {
"http": {
"requestUri": "/#connections/{connectionId}",
"responseCode": 200
},
"input": {
"type": "structure",
"members": {
"Data": {
"type": "blob"
},
"ConnectionId": {
"location": "uri",
"locationName": "connectionId"
}
},
"required": [
"ConnectionId",
"Data"
],
"payload": "Data"
}
}
},
"shapes": {}
}
model.paginators = {
"pagination": {}
}
return model;
},
enumerable: true,
configurable: true
});
module.exports = AWS.ApiGatewayManagementApi;
index.js
const AWS = require('aws-sdk');
require('./patch.js');
exports.handler = async(event) => {
const connectionId = event.requestContext.connectionId;
const apigwManagementApi = new AWS.ApiGatewayManagementApi({
apiVersion: '2018-11-29',
endpoint: event.requestContext.domainName + '/' + event.requestContext.stage
});
await apigwManagementApi.postToConnection({ ConnectionId: connectionId, Data: connectionId }).promise();
return {};
};
client.js
const WebSocket = require('ws');
const ws = new WebSocket('wss://****');
ws.on('open', () => {
console.log('connected ===================>')
ws.on('message', data => console.warn(`From server: ${data}`));
});
Error in cloudwatch
{
"errorMessage": "410",
"errorType": "UnknownError",
"stackTrace": [
"Object.extractError (/var/runtime/node_modules/aws-sdk/lib/protocol/json.js:48:27)",
"Request.extractError (/var/runtime/node_modules/aws-sdk/lib/protocol/rest_json.js:52:8)",
"Request.callListeners (/var/runtime/node_modules/aws-sdk/lib/sequential_executor.js:105:20)",
"Request.emit (/var/runtime/node_modules/aws-sdk/lib/sequential_executor.js:77:10)",
"Request.emit (/var/runtime/node_modules/aws-sdk/lib/request.js:683:14)",
"Request.transition (/var/runtime/node_modules/aws-sdk/lib/request.js:22:10)",
"AcceptorStateMachine.runTo (/var/runtime/node_modules/aws-sdk/lib/state_machine.js:14:12)",
"/var/runtime/node_modules/aws-sdk/lib/state_machine.js:26:10",
"Request.<anonymous> (/var/runtime/node_modules/aws-sdk/lib/request.js:38:9)",
"Request.<anonymous> (/var/runtime/node_modules/aws-sdk/lib/request.js:685:12)"
]
}
I don't know why, but if I'm trying in a custom route, this code can work.
Does anyone know how to solve this?
I'd suggest to look into this example from AWS, there is on connect response for subprotocol confirmation, but I think any payload can be provided.
The most important bit is the route integration settings in the template, basically, the following two lines in the route integration properties:
IntegrationMethod: POST
ConnectionType: INTERNET
then response will be sent to the connected client.
The only way I've found to make this work is to use a DynamoDB table to store connections, then set up a trigger from the table back to a Lambda function.
There are a few catches though. This Lambda function wont work like your index.js file above. You'll have to use NPM install --save aws-sdk on a folder with your index.js file, zip it and upload it to the lambda function, so that the SDK is localized.
You will also need to set up a user with proper access and put the credentials into a your Lambda function.
Note, if you see a 410 error, that means the connection is no longer there, so you're going in the right direction at that point.
const AWS = require('aws-sdk');
require('./patch.js');
var log = console.log;
AWS.config.update({
accessKeyId: "YOURDATAHERE",
secretAccessKey: "YOURDATAHERE"
});
let send = undefined;
function init() {
const apigwManagementApi = new AWS.ApiGatewayManagementApi({
apiVersion: '2018-11-29',
endpoint: "HARDCODEYOURENDPOINTHERE"
});
send = async (connectionId, data) => {
await apigwManagementApi.postToConnection({ ConnectionId: connectionId, Data: `${data}` }).promise();
}
}
exports.handler = async (event, context) => {
init();
console.log('Received event:', JSON.stringify(event, null, 2));
for (const record of event.Records) {
//console.log(record.eventID);
console.log(record.eventName);
console.log('DynamoDB Record: %j', record.dynamodb);
if(record.eventName == "INSERT"){
var connectionId = record.dynamodb.NewImage.connectionId.S;
try{
await send(connectionId, connectionId);
}catch(err){
log("Error", err);
}
log("sent");
}
}
return `Successfully processed ${event.Records.length} records.`;
};