Lambda function triggered continuously by ObjectRemoved event - amazon-web-services

I created a Lambda function for deleting a given thumbnail and I set a trigger on the ObjectRemoved event in order to automatically delete a thumbnail image when the original file was deleted from a given aws-S3 bucket.
However, by analyzing the monthly bill I realized that for some reason that Lambda was called hundred millions of times and wouldn't stop to be triggered. I had to disable the trigger on the Lambda to disable it.
The problem is I have not created or deleted any file on that bucket, so I wonder how it's possible the lambda function continued to be triggered continuously.
Any help is appreciated.
Thanks.
Edit:
My AWS Lambda code
var aws = require('aws-sdk');
var s3 = new aws.S3();
exports.handler = function (event, context) {
console.log('Received event:', JSON.stringify(event, null, 2));
// Get the object from the event and show its content type
const bucket = event.Records[0].s3.bucket.name;
const key = event.Records[0].s3.object.key;
const path = key.split('/');
const folder = path[0];
const fileName = path[1];
const deleteKey = folder + '/thumbnails/' + fileName;
s3.deleteObject({ Bucket: bucket, Key: deleteKey }, function (err, data) {
if (err) {
console.log('Error deleting object ' + deleteKey + ' from bucket ' + bucket + '. Make sure they exist and your bucket is in the same region as this function.');
context.fail('Error getting file: ' + err)
} else {
context.succeed();
}
});
};

Related

Lambda function is not triggered for all the s3 image upload

I have s3 bucket as below,
myBucket
a/
b/
c/
where myBucket is a s3 bucket and a, b, c are the key folders inside that bucket.
I will upload images into a/. The s3 event notification will trigger SQS which will then trigger lambda function which does the process of removing image background and uploads into b/ folder.
The problem here is for example, if I upload a folder which has around 26 images into s3 only 23 or 22 images are getting triggered by lambda and only those images are getting are processed.
For some reason s3 is not triggering all the images or is that something I should configure in my lambda function?
Here is my function code
exports.handler = async(event, context, callback) => {
try {
console.log(event.Records[0]);
var json = JSON.parse(event.Records[0]['body']);
console.log('json: '+json);
json = JSON.parse(json['Message']);
json = json['Records'][0]['s3'];
console.log(json);
var srcBucket = json['bucket']['name'];
console.log('srcBucket: ' + srcBucket);
var srcKey = decodeURIComponent(json['object']['key'].replace(/\+/g, ' '));
console.log('srcKey: ' + srcKey);
var str = (srcKey.split('/').pop()).split('.')[0];
console.log('str: ' + str);
if (str != '') {
var folderPath = srcKey.substr(srcKey.indexOf('/') + 1).split('.')[0];
folderPath = folderPath.substring(0, folderPath.lastIndexOf('/'));
console.log('folderPath: ' + folderPath);
const params1 = { Bucket: srcBucket, Key: srcKey };
var origimage = await s3.getObject(params1).promise();
var destObject = await origimage.Body;
var destKey = 'removebg/' + folderPath + '/' + str + '.jpg';
var options = {
'method': 'POST',
'url': 'https://api.remove.bg/v1.0/removebg',
'headers': {
'X-Api-Key': 'xxxxxxxxxxx'
},
formData: {
'image_file': destObject,
'size': 'auto'
},
encoding: null
};
request(options, function(error, response, body) {
if (error) {
console.log(error);
sendmessage(error, 'Error removing image background', arn, srcBucket + '/' + srcKey, destBucket + destKey);
}
var params = { Bucket: destBucket, Key: destKey, Body: body };
s3.upload(params, function(err, data) {
if (err) {
console.log('Error uploading data: ', err);
sendmessage(err, 'Error uploading transparent image to s3', arn, srcBucket + '/' + srcKey, destBucket + destKey);
}
else { console.log('Successfully uploaded data to ' + destBucket); }
});
});
}
}
catch (e) {
console.log(e);
}
callback(null, 'All done!');
};
Please let me know. Thanks in advance.
I think your problem lies on these lines:
console.log(event.Records[0]);
var json = JSON.parse(event.Records[0]['body']);
console.log('json: '+json);
json = JSON.parse(json['Message']);
json = json['Records'][0]['s3'];
Your function is only looking at the first record that is provided to the function. Multiple events can be given the the Lambda function, so your function should loop through the Records entries and process all of the events that are provided.
It should do something like:
for record in event.Records:
console.log(record);
var json = JSON.parse(record['body']);
console.log('json: '+json);
json = JSON.parse(json['Message']);
json = record['s3'];

Lambda not deleting DynamoDB records when triggered with Cloud Watch events

I am trying to delete items in my Dynamodb table using Cloud Watch event triggered Lambda. This lambda scans the dynamo table and deletes all expired items. My code seems to be working when I test it using the test event in the console (i.e it deletes all the expired items). But when lambda gets triggered automatically using the Cloud Watch event it does not delete, event though I see that the lambda is being triggered.
exports.handler = async function () {
var params = {
TableName: TABLE_NAME
}
try {
const data = await docClient.scan(params).promise();
const items = data.Items;
if (items.length != 0) {
Promise.all(items.map(async (item) => {
const expirationDT = new Date(item.ExpiresAt);
const now = new Date();
if (now > expirationDT) {
console.log("Deleting item with otc: " + item.Otc + " and name: " + item.SecretName);
const deleteParams = {
TableName: TABLE_NAME,
Key: {
"Otc": item.Otc,
"SecretName": item.SecretName,
},
};
try {
await docClient.delete(deleteParams).promise();
} catch (err) {
console.log("The Secret was not deleted due to: ", err.message);
}
}
}))
}
} catch (err) {
console.log("The items were not able to be scanned due to : ", err.message)
}}
I know using DynamoDB TTL is an option, but I need these deletions to be somewhat precise, and TTL can sometimes take up to 48 hours, and I am aware I can use a filter when retrieving records to counter-act that. Just wondering what's wrong with my code here.
You need to await Promise.all or your lambda will end execution before it resolves
await Promise.all(items.map(async (item) => {
const expirationDT = new Date(item.ExpiresAt);
const now = new Date();
// ...

how to format bucket name in AWS for saving

Hi i have a lambda function that is trying to save to a bucket:
exports.handler = async (event) => {
console.log('starting');
const { Client } = require('pg');
const client = new Client();
const AWS = require('aws-sdk');
const s3 = new AWS.S3();
var bucketName = 'arn:aws:s3:us-east-1::my_bucket_name';
var keyName = 'prova.txt';
var content = 'This is a sample text file';
var params = { 'Bucket': bucketName, 'Key': keyName, 'Body': content };
try {
console.log('saving...');
const data = await s3.putObject(params).promise();
console.log("Successfully saved object to " + bucketName + "/" + keyName);
} catch (err) {
console.log('err');
console.log(err);
};
But I get the error below. Know what i'm doing wrong?
message: "Access point ARN resource should begin with 'accesspoint/'",
code: 'InvalidAccessPointARN',
time: 2020-03-21T12:38:33.370Z
}
END RequestId: 31aba537-c25a-45bf-877e-0be8e8f98c95
REPORT RequestId: 31aba537-c25a-45bf-877e-0be8e8f98c95 Duration: 4543.02 ms Billed Duration: 4600 ms Memory Size: 128 MB Max Memory Used: 83 MB Init Duration: 107.67 ms
Your bucket is not accessible at the moment.
Go to your S3 bucket, then navigate to "Access points" tab.
Create an access point from here.
I believe you need an Internet access point, and to keep things simple, untick the "Block all public access" (not recommended for security).
Once it is created open the access point details and use "Access point ARN" from there.

UpdateFunctionCode in lambda does not update the code

I have followed this blog to update the code of a lambda function using a jar file stored in a S3 bucket. the execution was succeded, but it is not updating the code of target lambda function
Code snippet
console.log('Loading function');
var AWS = require('aws-sdk');
var lambda = new AWS.Lambda();
exports.handler = function(event, context) {
var functionName = "runJarFile";
var bucket = "jarfiletest2";
var key = "lambda-java-example-0.0.1-SNAPSHOT.jar.zip";
console.log("uploaded to lambda function: " + functionName);
var params = {
FunctionName: functionName,
S3Key: key,
S3Bucket: bucket,
Publish: true
};
lambda.updateFunctionCode(params, function(err, data) {
if (err) {
console.log(err, err.stack);
context.fail(err);
} else {
console.log(data);
context.succeed(data);
}
});
};
Thanks in advance
It's difficult to comment on this without knowing the details about the destination function. What's the output of the GetFunction API call of that Lambda, before and after calling the UpdateFunctionConfig call?
I am interested to see the SHA-256 hash of the code, and the last modified timestamp off that API call before and after calling UpdateFunctionConfig:
{
...
"CodeSha256": "5tT2qgzYUHoqwR616pZ2dpkn/0J1FrzJmlKidWaaCgk=",
"LastModified": "2019-09-24T18:20:35.054+0000"
...
}
If the values are exactly the same, can you add this check as per the blog post to see if the bucket and the object exists?
if (bucket == "YOUR_BUCKET_NAME" && key == "YOUR_CODE.zip" && version) {
// your code
} else {
context.succeed("skipping zip " + key + " in bucket " + bucket + " with version " + version);
}
Pls try to remove 'Publish: true' to call the latest version not the specified version

Creating lambda function to create thumbnail for AWS s3 bucket is not working

I am learning using AWS lambda functions. What I am trying to do is, when I upload an image (JPEG) file to the s3 bucket, the image will be resized. But it is not working. See what I have done below.
I create a folder. Then created a node_modules folder inside the previously created folder. Then created a file called CreateThumbnail.js inside the main folder.
This is the CreateThumbnail.js:
// dependencies
var async = require('async');
var AWS = require('aws-sdk');
var gm = require('gm')
.subClass({ imageMagick: true }); // Enable ImageMagick integration.
var util = require('util');
// constants
var MAX_WIDTH = 100;
var MAX_HEIGHT = 100;
// get reference to S3 client
var s3 = new AWS.S3();
exports.handler = function(event, context, callback) {
// Read options from the event.
console.log("Reading options from event:\n", util.inspect(event, {depth: 5}));
var srcBucket = event.Records[0].s3.bucket.name;
// Object key may have spaces or unicode non-ASCII characters.
var srcKey =
decodeURIComponent(event.Records[0].s3.object.key.replace(/\+/g, " "));
var dstBucket = srcBucket + "resized";
var dstKey = "resized-" + srcKey;
// Sanity check: validate that source and destination are different buckets.
if (srcBucket == dstBucket) {
callback("Source and destination buckets are the same.");
return;
}
// Infer the image type.
var typeMatch = srcKey.match(/\.([^.]*)$/);
if (!typeMatch) {
callback("Could not determine the image type.");
return;
}
var imageType = typeMatch[1];
if (imageType != "jpg" && imageType != "png") {
callback('Unsupported image type: ${imageType}');
return;
}
// Download the image from S3, transform, and upload to a different S3 bucket.
async.waterfall([
function download(next) {
// Download the image from S3 into a buffer.
s3.getObject({
Bucket: srcBucket,
Key: srcKey
},
next);
},
function transform(response, next) {
gm(response.Body).size(function(err, size) {
// Infer the scaling factor to avoid stretching the image unnaturally.
var scalingFactor = Math.min(
MAX_WIDTH / size.width,
MAX_HEIGHT / size.height
);
var width = scalingFactor * size.width;
var height = scalingFactor * size.height;
// Transform the image buffer in memory.
this.resize(width, height)
.toBuffer(imageType, function(err, buffer) {
if (err) {
next(err);
} else {
next(null, response.ContentType, buffer);
}
});
});
},
function upload(contentType, data, next) {
// Stream the transformed image to a different S3 bucket.
s3.putObject({
Bucket: dstBucket,
Key: dstKey,
Body: data,
ContentType: contentType
},
next);
}
], function (err) {
if (err) {
console.error(
'Unable to resize ' + srcBucket + '/' + srcKey +
' and upload to ' + dstBucket + '/' + dstKey +
' due to an error: ' + err
);
} else {
console.log(
'Successfully resized ' + srcBucket + '/' + srcKey +
' and uploaded to ' + dstBucket + '/' + dstKey
);
}
callback(null, "message");
}
);
};
Then I zipped the folder. Then I created a function in the AWS lambda console and upload the zip file from the UI as follow.
Then I added the s3 trigger as in the screenshot.
I also created the role with correct permission and policies.
But when I upload a JPG file to s3 bucket, it is neither resized not thumbnail is created. What could be wrong here?
This is the function policy:
Lambda functions send their debug information to Amazon CloudWatch Logs. Examine the log file to determine what went wrong.
If there is no log, then either the Lambda function never executed, or the Lambda function was not given sufficient permissions to write to CloudWatch Logs.
See: Accessing Amazon CloudWatch Logs for AWS Lambda