I have followed this blog to update the code of a lambda function using a jar file stored in a S3 bucket. the execution was succeded, but it is not updating the code of target lambda function
Code snippet
console.log('Loading function');
var AWS = require('aws-sdk');
var lambda = new AWS.Lambda();
exports.handler = function(event, context) {
var functionName = "runJarFile";
var bucket = "jarfiletest2";
var key = "lambda-java-example-0.0.1-SNAPSHOT.jar.zip";
console.log("uploaded to lambda function: " + functionName);
var params = {
FunctionName: functionName,
S3Key: key,
S3Bucket: bucket,
Publish: true
};
lambda.updateFunctionCode(params, function(err, data) {
if (err) {
console.log(err, err.stack);
context.fail(err);
} else {
console.log(data);
context.succeed(data);
}
});
};
Thanks in advance
It's difficult to comment on this without knowing the details about the destination function. What's the output of the GetFunction API call of that Lambda, before and after calling the UpdateFunctionConfig call?
I am interested to see the SHA-256 hash of the code, and the last modified timestamp off that API call before and after calling UpdateFunctionConfig:
{
...
"CodeSha256": "5tT2qgzYUHoqwR616pZ2dpkn/0J1FrzJmlKidWaaCgk=",
"LastModified": "2019-09-24T18:20:35.054+0000"
...
}
If the values are exactly the same, can you add this check as per the blog post to see if the bucket and the object exists?
if (bucket == "YOUR_BUCKET_NAME" && key == "YOUR_CODE.zip" && version) {
// your code
} else {
context.succeed("skipping zip " + key + " in bucket " + bucket + " with version " + version);
}
Pls try to remove 'Publish: true' to call the latest version not the specified version
Related
I am trying to create a lambda function which will check if a particular repository exist in codecommit.
Lamda service role is having admin priviledge. Below is the code.
The lambda is unable to call getRepository method. It is niether giving any exception nor passing. Any help on this?
console.log("Before calling cc") This is last printed statement. After that I am not getting any success or error log.
const CloudFormation = require('aws-sdk/clients/cloudformation');
const Codecommit = require('aws-sdk/clients/codecommit');
exports.handler = async (event) => {
try{
console.log("event",event);
console.log("event",JSON.stringify(event));
var repositoryName = event.detail.repositoryName;
var cfn = new CloudFormation({
region: "ap-northeast-1"
});
var cc = new Codecommit({
region: "ap-northeast-1"
});
const stackName = repositoryName+"-infra-stack";
var cloneUrl;
console.log("RepositoryName"+repositoryName);
console.log("StackName"+stackName);
var codeCommitParam = {
repositoryName: repositoryName
};
try{
console.log("Before calling cc")
cc.getRepository(codeCommitParam, function(err, data) {
if (err){
console.log(err, err.stack);
}else {
console.log(data.repositoryMetadata.cloneUrlHttp);
cloneUrl=data.repositoryMetadata.cloneUrlHttp;
console.log("Clone url "+cloneUrl);
checkStackDescription();
}
});
}catch(error){
console.log(error);
}
}
I believe this is coming down to the JavaScript in the Lambda being invoked asynchronously so the Lambda is finishing invoking before the callback processes the response.
Try updating to use this synchronously by updating to the below syntax.
console.log("Before calling cc")
let result = await cc.getRepository(codeCommitParam).promise();
console.log(result);
Be aware that result could either be an error or valid response.
I am trying to use AWS mobile backend (using lambda function) to insert into dynamoDB (also configured at the mobile backend) but with no success so far.
The relevant code:
'use strict';
console.log("Loading function");
const AWS = require('aws-sdk');
const docClient = new AWS.DynamoDB.DocumentClient({region:process.env.MOBILE_HUB_PROJECT_REGION});
exports.handler = function(event, context, callback) {
var responseCode = 200;
var requestBody, pathParams, queryStringParams, headerParams, stage,
stageVariables, cognitoIdentityId, httpMethod, sourceIp, userAgent,
requestId, resourcePath;
console.log("request: " + JSON.stringify(event));
// Request Body
requestBody = event.body;
if (requestBody !== undefined && requestBody !== null) {
// Set 'test-status' field in the request to test sending a specific response status code (e.g., 503)
responseCode = JSON.parse(requestBody)['test-status'];
}
// Path Parameters
pathParams = event.path;
// Query String Parameters
queryStringParams = event.queryStringParameters;
// Header Parameters
headerParams = event.headers;
if (event.requestContext !== null && event.requestContext !== undefined) {
var requestContext = event.requestContext;
// API Gateway Stage
stage = requestContext.stage;
// Unique Request ID
requestId = requestContext.requestId;
// Resource Path
resourcePath = requestContext.resourcePath;
var identity = requestContext.identity;
// Amazon Cognito User Identity
cognitoIdentityId = identity.cognitoIdentityId;
// Source IP
sourceIp = identity.sourceIp;
// User-Agent
userAgent = identity.userAgent;
}
// API Gateway Stage Variables
stageVariables = event.stageVariables;
// HTTP Method (e.g., POST, GET, HEAD)
httpMethod = event.httpMethod;
// TODO: Put your application logic here...
let params = {
Item:{
"prop1":0,
"prop2":"text"
},
TableName:"testTable"
};
docClient.put(params, function(data, err){
if(err)
responseCode = 500;
else
{
responseCode = 200;
context.succeed(data);
}
});
// For demonstration purposes, we'll just echo these values back to the client
var responseBody = {
requestBody : requestBody,
pathParams : pathParams,
queryStringParams : queryStringParams,
headerParams : headerParams,
stage : stage,
stageVariables : stageVariables,
cognitoIdentityId : cognitoIdentityId,
httpMethod : httpMethod,
sourceIp : sourceIp,
userAgent : userAgent,
requestId : requestId,
resourcePath : resourcePath
};
var response = {
statusCode: responseCode,
headers: {
"x-custom-header" : "custom header value"
},
body: JSON.stringify(responseBody)
};
console.log("response: " + JSON.stringify(response))
context.succeed(response);
};
this doesn't put the item to the table for some reason.
I gave the necessary permissions using the roles part, anything I am missing?
**responseCode is only for testing purposes.
Edit:
tried AWS node.js lambda request dynamodb but no response (no err, no return data) and doesn't work either.
Edit2:
Added the full handler code. (it the default generated code when creating first AWS lambda).
I have refactored some bits of your code to look much simpler and use async/await (make sure to select Node 8.10 as the running environment for your function) instead of callbacks. I also got rid of the context and callback parameters, as they were used for older versions of NodeJS. Once you're using Node 8+, async/await should be the default option.
Also, it is possible to chain a .promise() on docClient.putItem, so you can easily await on it, making your code way simpler. I have left only the DynamoDB part (which is what is relevant to your question)
'use strict';
console.log("Loading function");
const AWS = require('aws-sdk');
const docClient = new AWS.DynamoDB.DocumentClient({region:process.env.MOBILE_HUB_PROJECT_REGION});
exports.handler = async (event) => {
let params = {
Item:{
"prop0":1,
"prop2":"text"
},
TableName:"testTable"
};
try {
await docClient.put(params).promise();
} catch (e) {
console.log(e)
return {
messsage: e.message
}
}
return { message: 'Data inserted successfully' };
};
Things to keep in mind if still it does not work:
Make sure your Lambda function has the right permissions to insert items on DynamoDB (AmazonDynamoDBFullAccess will do it)
You ALWAYS have to provide the partition key when inserting items to DynamoDB. On your example, the JSON only has two properties: prop1 and prop2. If none of them are the partition key, your code will certainly fail.
Make sure you table also exists
If you code fails, just check CloudWatch logs as any exception is now captured and printed out on the console.
The reason why no data is written in the table is because the call to DynamoDB put is asynchronous and will return by calling your callback. But during that time, the rest of the code continues to execute and your function eventually finish before the call to DynamoDB has a chance to complete.
You can use the await / async keywords to make your code sychronous :
async function writeToDynamoDB(params) {
return new Promise((resolve,reject) => {
docClient.put(params, function(data, err){
if(err)
reject(500);
else
resolve(data);
});
});
}
let params = ...
var data = await writeToDynamoDB(params)
You can find sample code I wrote (in Typescript) at https://github.com/sebsto/maxi80-alexa/blob/master/lambda/src/DDBController.ts
I created a Lambda function for deleting a given thumbnail and I set a trigger on the ObjectRemoved event in order to automatically delete a thumbnail image when the original file was deleted from a given aws-S3 bucket.
However, by analyzing the monthly bill I realized that for some reason that Lambda was called hundred millions of times and wouldn't stop to be triggered. I had to disable the trigger on the Lambda to disable it.
The problem is I have not created or deleted any file on that bucket, so I wonder how it's possible the lambda function continued to be triggered continuously.
Any help is appreciated.
Thanks.
Edit:
My AWS Lambda code
var aws = require('aws-sdk');
var s3 = new aws.S3();
exports.handler = function (event, context) {
console.log('Received event:', JSON.stringify(event, null, 2));
// Get the object from the event and show its content type
const bucket = event.Records[0].s3.bucket.name;
const key = event.Records[0].s3.object.key;
const path = key.split('/');
const folder = path[0];
const fileName = path[1];
const deleteKey = folder + '/thumbnails/' + fileName;
s3.deleteObject({ Bucket: bucket, Key: deleteKey }, function (err, data) {
if (err) {
console.log('Error deleting object ' + deleteKey + ' from bucket ' + bucket + '. Make sure they exist and your bucket is in the same region as this function.');
context.fail('Error getting file: ' + err)
} else {
context.succeed();
}
});
};
Good day guys.
I have a simple question: How do I download an image from a S3 bucket to Lambda function temp folder for processing? Basically, I need to attach it to an email (this I can do when testing locally).
I have tried:
s3.download_file(bucket, key, '/tmp/image.png')
as well as (not sure which parameters will help me get the job done):
s3.getObject(params, (err, data) => {
if (err) {
console.log(err);
const message = `Error getting object ${key} from bucket ${bucket}.`;
console.log(message);
callback(message);
} else {
console.log('CONTENT TYPE:', data.ContentType);
callback(null, data.ContentType);
}
});
Like I said, simple question, which for some reason I can't find a solution for.
Thanks!
You can get the image using the aws s3 api, then write it to the tmp folder using fs.
var params = { Bucket: "BUCKET_NAME", Key: "OBJECT_KEY" };
s3.getObject(params, function(err, data){ if (err) {
console.error(err.code, "-", err.message);
return callback(err); }
fs.writeFile('/tmp/filename', data.Body, function(err){
if(err)
console.log(err.code, "-", err.message);
return callback(err);
});
});
Out of curiousity, why do you need to write the file in order to attach it? It seems kind of redundant to write the file to disk so that you can then read it from disk
If you're writing it straight to the filesystem you can also do it with streams. It may be a little faster/more memory friendly, especially in a memory-constrained environment like Lambda.
var fs = require('fs');
var path = require('path');
var params = {
Bucket: "mybucket",
Key: "image.png"
};
var tempFileName = path.join('/tmp', 'downloadedimage.png');
var tempFile = fs.createWriteStream(tempFileName);
s3.getObject(params).createReadStream().pipe(tempFile);
// Using NodeJS version 10.0 or later and promises
const fsPromise = require('fs').promises;
try {
const params = {
Bucket: 's3Bucket',
Key: 'file.txt',
};
const data = await s3.getObject(params).promise();
await fsPromise.writeFile('/tmp/file.txt', data.Body);
} catch(err) {
console.log(err);
}
I was having the same problem, and the issue was that I was using Runtime.NODEJS_12_X in my AWS lambda.
When I switched over to NODEJS_14_X it started working for me :').
Also
The /tmp is required. It will directly write to /tmp/file.ext.
I am creating a cloudwatch event which at a specific time in future is supposed to call a aws lambda function . The i am using aws nodejs sdk as described here: http://docs.aws.amazon.com/AWSJavaScriptSDK/latest/index.html
the code block to create the cloudwatch event looks like this:
module.exports.createReservationReminder = function (reservationModel, user, restaurant) {
return new Promise(function (resolve, reject) {
const ruleName = "rsv_" + reservationModel.reservationId;
const description = "Reservation reminder of `" + user.name + "` # `" + restaurant.title + "` on `" + reservationModel.time + "`";
let reservationTime = reservationModel.time;
let lambdaFunctionName = module.exports.buildLamdaFunctionArn("restaurant")
let alertTime = moment(reservationTime).tz(AppConfig.defaultTimezone).subtract( // Create alert 45 minute before a reservation
45,
'minutes'
);
let lambda = new AWS.Lambda({
accessKeyId: AppConfig.accessKeyId,
secretAccessKey: AppConfig.secretAccessKey,
region: AppConfig.region
});
let scheduleExpression1 = "cron(" + alertTime.utc().format('m H D MMM ? YYYY') + ')';
let ruleParams = {
Name: ruleName,
Description: description,
ScheduleExpression: scheduleExpression1,
State: 'ENABLED',
};
cloudwatchevents.deleteRule({Name: ruleName}, function (err, deleteRuleData) { //remove if a previous rule was created halfway
cloudwatchevents.putRule(ruleParams, function (err, ruleData) { //create the rule
if (err) {
reject(err)
}
else {
let lambdaPermission = {
FunctionName: lambdaFunctionName,
StatementId: ruleName,
Action: 'lambda:InvokeFunction',
Principal: 'events.amazonaws.com',
SourceArn: ruleData.RuleArn
};
let removePermission = {
FunctionName: lambdaFunctionName,
StatementId: ruleName,
}
//now to create the rule's target, need to add permission to lambda
lambda.removePermission(removePermission, function (err, removeLambdaData) { //remove if rule of same name was added as permission to this lambda before, ignore if rule not found error is thrown
lambda.addPermission(lambdaPermission, function (err, lamdaData) { //now add the permission
if (err) {
reject(err) // FAIL : throws error PolicyLengthExceededException after ~50 cloudwatch events are registered to this lambda function
}
else {
let targetParams = {
Rule: ruleName,
Targets: [
{
Arn: module.exports.buildLamdaFunctionArn("restaurant"),
Id: ruleName,
Input: JSON.stringify({
func: "notifyUserOfUpcomingReservation",
data: {
reservationId: reservationModel.reservationId
}
}),
},
]
};
cloudwatchevents.putTargets(targetParams, function (err, targetData) {
if (err) {
reject(err)
}
else {
resolve(targetData)
}
})
}
})
})
}
});
})
})
}
Above function works fine for the first ~50 times ( so I can easily make reminder for the 50 reservations. ) However , it will always fail eventually with:
PolicyLengthExceededException
Lambda function access policy is limited to 20 KB.
HTTP Status Code: 400
Which makes sense, as policy document can not be too big.
So what is the correct way to approach this problem : make unlimited cloudwatch event reminder with a lambda function target .
create a role and add that policy or permission for that role and then your lambda can assume role and run.
you can use aws STS module for that.
Rather than create and removing permission each time. STS will assume role temporarily then execute the code.