Cypress.Commands.add('verifyfilesinS3', (filename) => {
const AWS = require('aws-sdk');
var fileExists;
AWS.config.update({
accessKeyId: "",
accessSecretKey: "",
region: ""
});
const s3 = new AWS.S3();
const params = {
Bucket: "",
Key: ""
};
const result = s3.headObject(params, (err, data) => { // Check if the file exists
if (err) {
if (err.code === 'NotFound') {
console.log(`File "${params.Key}" does not exist in the "${params.Bucket}" bucket.`);
fileExists = 0
} else {
console.error(err);
fileExists = err
}
} else {
fileExists = 2
console.log(`File "${params.Key}" exists in the "${params.Bucket}" bucket.`);
}
});
return fileExists;
});
I am first trying to directly hardcode the values and test but still fails with the error "CredentialsError: Missing credentials in config, if using AWS_CONFIG_FILE, set AWS_SDK_LOAD_CONFIG=1"
enter image description here
When I run the same code individually in node.js , it works as expected.
Related
const S3 = require('aws-sdk/clients/s3');
const { getSignedUrl } = require("#aws-sdk/s3-request-presigner");
const { S3Client, GetObjectCommand } = require("#aws-sdk/client-s3");
dotenv.config();
const bucketName = process.env.AWS_BUCKET_NAME
const region = process.env.AWS_BUCKET_REGION
const accessKeyId = process.env.AWS_ACCESS_KEY
const secretAccessKey = process.env.AWS_SECRET_KEY
const s3 = new S3({
region,
accessKeyId,
secretAccessKey
})
router.get("/:id", async (req, res) => {
try {
const post = await Post.findById(req.params.id);
const getObjectParams = {
Bucket: bucketName,
Key: post.photo,
}
const command = new GetObjectCommand(getObjectParams);
const url = await getSignedUrl(s3, command, { expiresIn: 3600 });
post.imageUrl = url
res.status(200).json(post);
} catch (err) {
console.error('errorrr', err);
res.status(500).json(err);
}
});
Here is my code I've console logged post, getObjectParams, command and everything is there but when I console log url it's not logging and when I console.log errorrr it logs Cannot read properties of undefined (reading 'clone')
What is the issue here?
I think issue is with function getSignedUrl, but not sure what it is
What is the way to get the response body of the uploaded JSON file to the lambda function to print it?
I used the following code but it is specified for Content Type. Any suggestions for this please?
// console.log('Loading function');
const aws = require('aws-sdk');
const s3 = new aws.S3({ apiVersion: '2006-03-01' });
exports.handler = async (event, context) => {
//console.log('Received event:', JSON.stringify(event, null, 2));
// Get the object from the event and show its content type
const bucket = event.Records[0].s3.bucket.name;
const key = decodeURIComponent(event.Records[0].s3.object.key.replace(/\+/g, ' '));
const params = {
Bucket: bucket,
Key: key,
};
try {
const { ContentType } = await s3.getObject(params).promise();
console.log('CONTENT TYPE:', ContentType);
console.log('Body: ', );
console.log("response: " + "I want to print the response body here when a JSON file uploaded")
return ContentType;
} catch (err) {
console.log(err);
const message = `Error getting object ${key} from bucket ${bucket}. Error : ` + err;
console.log(message);
throw new Error(message);
}
};
return value of getObject contains Body field.
const { ContentType, Body } = await s3.getObject(params).promise();
console.log('CONTENT TYPE:', ContentType);
console.log('Body: ', Body);
I am trying to disable users from my node JS backend using the AWS SDK v3. everything works normally except for the disable/enable user command.
I have tried everything I know but here is my code snippet:
const aws_creds = {
accessKeyId: process.env.ACCESS_KEY_ID,
secretAccessKey: process.env.SECRET_ACCESS_KEY,
};
const cognitoConfig = {
region: process.env.REGION,
credentials: aws_creds,
};
const cognito_v3 = new CognitoIdentityProviderClient(cognitoConfig);
Then in my route I am running this try / catch block
try {
if (status === "enable") {
const enableUserCommand = new AdminEnableUserCommand(userDetails);
const enableUserResults = await cognito_v3.send(enableUserCommand);
return res.status(200).json(enableUserResults);
}
const disableUserCommand = new AdminDisableUserCommand(userDetails);
const disableUserResults = await cognito_v3.send(disableUserCommand);
return res.status(200).json(disableUserResults);
} catch (err) {
console.log(err);
return res.status(400).json(err);
}
However, the err console log returns this:
TypeError: Cannot read property 'byteLength' of undefined
at Object.fromArrayBuffer (C:\Users\SER-01\Documents\ctr\simsim\simsim-backend\lambdas\simsim-auth\node_modules\#aws-sdk\util-buffer-from\dist\cjs\index.js:6:60)
at castSourceData (C:\Users\SER-01\Documents\ctr\simsim\simsim-backend\lambdas\simsim-auth\node_modules\#aws-sdk\hash-node\dist\cjs\index.js:29:31)
at Hash.update (C:\Users\SER-01\Documents\ctr\simsim\simsim-backend\lambdas\simsim-auth\node_modules\#aws-sdk\hash-node\dist\cjs\index.js:12:26)
at hmac (C:\Users\SER-01\Documents\ctr\simsim\simsim-backend\lambdas\simsim-auth\node_modules\#aws-sdk\signature-v4\dist\cjs\credentialDerivation.js:60:10)
at Object.getSigningKey (C:\Users\SER-01\Documents\ctr\simsim\simsim-backend\lambdas\simsim-auth\node_modules\#aws-sdk\signature-v4\dist\cjs\credentialDerivation.js:32:29)
at SignatureV4.getSigningKey (C:\Users\SER-01\Documents\ctr\simsim\simsim-backend\lambdas\simsim-auth\node_modules\#aws-sdk\signature-v4\dist\cjs\SignatureV4.js:139:39)
at SignatureV4.signRequest (C:\Users\SER-01\Documents\ctr\simsim\simsim-backend\lambdas\simsim-auth\node_modules\#aws-sdk\signature-v4\dist\cjs\SignatureV4.js:98:73)
at async C:\Users\SER-01\Documents\ctr\simsim\simsim-backend\lambdas\simsim-auth\node_modules\#aws-sdk\middleware-signing\dist\cjs\middleware.js:14:22
at async StandardRetryStrategy.retry (C:\Users\SER-01\Documents\ctr\simsim\simsim-backend\lambdas\simsim-auth\node_modules\#aws-sdk\middleware-retry\dist\cjs\defaultStrategy.js:56:46)
at async C:\Users\SER-01\Documents\ctr\simsim\simsim-backend\lambdas\simsim-auth\node_modules\#aws-sdk\middleware-logger\dist\cjs\loggerMiddleware.js:6:22
at async C:\Users\SER-01\Documents\ctr\simsim\simsim-backend\lambdas\simsim-auth\src\routes\auth.js:232:33 {
'$metadata': { attempts: 1, totalRetryDelay: 0 }
}
Any idea why?
I am following the proposed solution by Trivikr for adding support for s3.getSignedUrl api which is not currently available in newer v3. I am trying to make a signed url for getting an object from bucket.
Just for convenience, the code is being added below:
const { S3, GetObjectCommand } = require("#aws-sdk/client-s3"); // 1.0.0-gamma.2 version
const { S3RequestPresigner } = require("#aws-sdk/s3-request-presigner"); // 0.1.0-preview.2 version
const { createRequest } = require("#aws-sdk/util-create-request"); // 0.1.0-preview.2 version
const { formatUrl } = require("#aws-sdk/util-format-url"); // 0.1.0-preview.1 //version
const fetch = require("node-fetch");
(async () => {
try {
const region = "us-east-1";
const Bucket = `SOME_BUCKET_NAME`;
const Key = `SOME_KEY_VALUE`;
const credentials = {
accessKeyId: ACCESS_KEY_HERE,
secretAccessKey: SECRET_KEY_HERE,
sessionToken: SESSION_TOKEN_HERE
};
const S3Client = new S3({ region, credentials, signatureVersion: 'v4' });
console.log('1'); // for quick debugging
const signer = new S3RequestPresigner({ ...S3Client.config });
console.log('2')
const request = await createRequest(
S3Client,
new GetObjectCommand({ Key, Bucket })
);
console.log('3');
let signedUrl = formatUrl(await signer.presign(request));
console.log(signedUrl);
let response = await fetch(signedUrl);
console.log("Response", response);
}catch(e) {
console.error(e);
}
I successfully create S3Client and signer but on creating request, I get the following error:
clientStack.concat(...).filter is not a function
Anything wrong I am doing?
Please also note that I am using webpack for bundling
Just add my example in TypeScript:
import { S3Client, GetObjectCommand, S3ClientConfig } from '#aws-sdk/client-s3';
import { getSignedUrl } from '#aws-sdk/s3-request-presigner';
const s3Configuration: S3ClientConfig = {
credentials: {
accessKeyId: '<ACCESS_KEY_ID>',
secretAccessKey: '<SECRET_ACCESS_KEY>'
},
region: '<REGION>',
};
const s3 = new S3Client(s3Configuration);
const command = new GetObjectCommand({Bucket: '<BUCKET>', Key: '<KEY>' });
const url = await getSignedUrl(s3, command, { expiresIn: 15 * 60 }); // expires in seconds
console.log('Presigned URL: ', url);
RESOLVED
I ended up successfully making the signed urls by installing the beta versions rather than preview (default) ones
I have been trying to launch a new EC2 instance as well as add a piece of string data to my SQS through lambda in response to an object upload event in my s3 bucket.
I have been able to successfully to update my SQS but has been unable to initialise the new EC2 instance. Despite setting the time allocation to the lambda function to the maximum time of 5mins and increasing memory allocation, an operation timeout error continuously surface.
My code is as below. Can anyone point out what are the potential causes for such an error? While I have stuck my whole piece of code here for reference, the area concerning the launch is towards the end of the code.
Thank you so much!
console.log('Loading function');
var fs = require('fs');
var async = require('async');
var aws = require('aws-sdk');
var s3 = new aws.S3({ apiVersion: '2006-03-01' });
var sqs = new aws.SQS({apiVersion: '2012-11-05'});
var ecs = new aws.ECS({apiVersion: '2014-11-13'});
var ec2 = new aws.EC2({apiVersion: '2015-10-01'});
// Check if the given key suffix matches a suffix in the whitelist. Return true if it matches, false otherwise.
exports.checkS3SuffixWhitelist = function(key, whitelist) {
if(!whitelist){ return true; }
if(typeof whitelist == 'string'){ return key.match(whitelist + '$') }
if(Object.prototype.toString.call(whitelist) === '[object Array]') {
for(var i = 0; i < whitelist.length; i++) {
if(key.match(whitelist[i] + '$')) { return true; }
}
return false;
}
console.log(
'Unsupported whitelist type (' + Object.prototype.toString.call(whitelist) +
') for: ' + JSON.stringify(whitelist)
);
return false;
};
exports.handler = function(event, context) {
//console.log('Received event:', JSON.stringify(event, null, 2));
console.log('Received event:');
//Read in the configuration file
var config = JSON.parse(fs.readFileSync('config.json', 'utf8'));
if(!config.hasOwnProperty('s3_key_suffix_whitelist')) {
config.s3_key_suffix_whitelist = false;
}
console.log('Config: ' + JSON.stringify(config));
var name = event.Records[0].s3.object.key;
if(!exports.checkS3SuffixWhitelist(name, config.s3_key_suffix_whitelist)) {
context.fail('Suffix for key: ' + name + ' is not in the whitelist')
}
// Get the object from the event and show its key
var bucket = event.Records[0].s3.bucket.name;
var key = decodeURIComponent(event.Records[0].s3.object.key.replace(/\+/g, ' '));
var params = {
Bucket: bucket,
Key: key
};
s3.getObject(params, function(err, data) {
if (err) {
console.log(err);
var message = "Error getting object " + key + " from bucket " + bucket +
". Make sure they exist and your bucket is in the same region as this function.";
console.log(message);
context.fail(message);
} else {
console.log('CONTENT TYPE:', key);
context.succeed(key);
}
});
//Sending the image key as a message to SQS and starting a new instance on EC2
async.waterfall([
function(next){
var params = {
MessageBody: JSON.stringify(event),
QueueUrl: config.queue
};
console.log("IN QUEUE FUNCTION");
sqs.sendMessage(params, function (err, data) {
if (err) { console.warn('Error while sending message: ' + err); }
else { console.info('Message sent, ID: ' + data.MessageId); }
next(err);
});
},
function (next) {
console.log("INITIALIZING ECS");
var params = {
ImageId: 'ami-e559b485',
MinCount: 1,
MaxCount: 1,
DryRun: true,
InstanceType: 't2.micro',
KeyName: 'malpem2102',
SubnetId: 'subnet-e8607e8d'
}
ec2.runInstances(params, function(err,data){
if(err){
console.log(err, err.stack);
context.fail('Error', "Error getting file: " + err);
return;
} else{
var instanceId = data.Instances[0].InstanceId;
console.log("Created instance ", instanceId);
context.suceed("Created instance");
}
});
}
], function(err){
if (err) {
context.fail('An error has occurred: ' + err);
}
else {
context.succeed('Successfully processed Amazon S3 URL.');
}
}
);
};