handshake inactivity TimeOut error in aws lambda and Rds - amazon-web-services

var mysql = require('mysql');
var config = require('./config.json');
var pool = mysql.createPool({
host : config.dbhost,
user : config.dbuser,
password : config.dbpassword,
database : config.dbname
});
module.exports.handler = (event, context, callback) => {
//prevent timeout from waiting event loop
context.callbackWaitsForEmptyEventLoop = false;
pool.getConnection(function(err, connection) {
// Use the connection
if(err){
return callback(err,null);
}
connection.query("Select * from allBlogs", function (error, results, fields) {
connection.release();
// Handle error after the release.
console.log(results);
var len = results.length;
var i = 0;
for(i ; i < len; i++) {
var tagArray = results[i].tag.split(",");
results[i].tag = tagArray;
tagArray = [];
};
if (error) {callback( {statusCode: 400, headers: { "Access-Control-Allow-Origin": "*" }, body: JSON.stringify(error)} ,null)}
else {
callback(null, { statusCode: 200, headers: { "Access-Control-Allow-Origin": "*" }, body: JSON.stringify(results) });
}
});
});
};
i am using Aws lambda ,API gateway and RDS database but i am facing a handshake inactivity error .below is the my code which is written in node.js

Since lambda is in the VPC, a likely reason why it timeouts while trying to connect to the RDS is because a lambda function in a VPC does not have internet access nor public ip. From docs:
Connecting a function to a public subnet does not give it internet access or a public IP address.
Since the question does not provide VPC details nor specifics of the RDS setup, it should be pointed out that if your RDS is not in the VPC, your lambda will fail to connect to a public RDS without a NAT gateway .
On the other hand, if the RDS is in different VPC, then peering connection between the two VPC could be established to enabled interplay between your lambda function and the RDS.

Related

AWS Enabling CORS for my API triggering a Lambda

I managed to create an AWS Lambda that does two things: writes on a dynamo DB and sends an SMS to a mobile number. Then I call this Lambda through a API Gateway POST call, and it works great from the Test section on AWS console but it gives error both on Postman and my own website. I inserted a callback to handle CORS on Lambda and deployed + enabled CORS on my API via console and deployed it but still get errors:
Errors via postman call: {
"message": "Internal server error"
}
Errors via my website (jquery ajax POST call): Lambda calling failed: {"readyState":4,"responseText":"{"message": "Internal server error"}","responseJSON":{"message":"Internal server error"},"status":500,"statusText":"error"}
This is my lambda code
const AWS = require('aws-sdk');
const dynamodb = new AWS.DynamoDB();
const SNS = new AWS.SNS();
const tableName = "#####";
let params = {
PhoneNumber: 'mynumber####',
Message: 'Someone wrote!'
};
exports.handler = (event, context, callback) => {
dynamodb.putItem({
"TableName": tableName,
"Item" : {
"Id": {
N: event.Id
},
"Type": {
S: event.Type
}
}
}, function(err, data) {
if (err) {
console.log('Error putting item into dynamodb failed: '+err);
}
else {
console.log('Success in writing, now starting to send SMS');
return new Promise((resolve, reject) => {
SNS.publish(params, function(err, data) {
if(err) {
console.log("Error in sending sms alarm");
reject(err);
}
else {
console.log("SMS alarm sent!");
resolve(data);
}
})
})
}
});
callback(null, {
statusCode: 200,
headers: {
"Access-Control-Allow-Headers" : "Content-Type",
"Access-Control-Allow-Origin": "*",
"Access-Control-Allow-Methods": "OPTIONS,POST,GET"
},
body: JSON.stringify('Hello from Lambda!'),
});
};
What am I doing wrong? I don't think permissions on lambda are the problem here, since testing it on console works both on writing on dynamo both sending the sms to my cellphone.
If I click from here on API Endpoint I get the same error {"message": "Internal server error"}
SOLUTION: instead of making an AWS HTTP API, make a AWS REST API, that is much more complex and offers more personalization for CORS, letting you set them and headers.

AWS How to Invoke SSM Param Store using Private DNS Endpoint from Lamda function Nodejs

Hi have requirement where credential needs to be stored in SSM Param store and will be read by Lambda function which sits inside an VPC, and all the subnets inside my VPC is public subnet.
So when I am calling SSM Param store using below code I am getting timed out error.
const AWS = require('aws-sdk');
AWS.config.update({
region: 'us-east-1'
})
const parameterStore = new AWS.SSM();
exports.handler = async (event, context, callback) => {
console.log('calling param store');
const param = await getParam('/my/param/name')
console.log('param : ',param);
//Send API Response
return {
statusCode: '200',
body: JSON.stringify('able to connect to param store'),
headers: {
'Content-Type': 'application/json',
},
};
};
const getParam = param => {
return new Promise((res, rej) => {
parameterStore.getParameter({
Name: param
}, (err, data) => {
if (err) {
return rej(err)
}
return res(data)
})
})
}
So I created vpc endpoint for Secrets Manager which has with Private DNS name enabled.
Still I am getting timed out error for above code.
Do I need change Lambda code to specify Private DNS Endpoint in Lambda function
Below Image contains outbound rule for subnet NACL
Below Image contains outbound rule for Security Group
I managed to fix this issue. The root cause of this problem was all the subnets were public subnet. Since VPC endpoints are accessed privately without internet hence the subnets associated with Lambda function should be private subnet.
Here are the below steps I have take to fix this issue
Created a NAT Gateway in side VPC and assigned one elastic IP to it
Created new route table and pointed all the traffics to NAT gateway created in steps 1
Attached new route table to couple of subnets (which made them private)
then attached only private subnets to Lambda function
Other than this IAM role associated with Lambda function should have below 2 policy to access SSM Param store
AmazonSSMReadOnlyAccess
AWSLambdaVPCAccessExecutionRole

access all ec2 cross region via lambda

I have lambda function for auto Ami backup is possible to execute lambda across the region for take automatic backup of all my EC2 working on account.
One lambda function execution for all ec2 across region
var aws = require('aws-sdk');
aws.config.region = 'us-east-1','ap-south-1','eu-central-1';
var ec2 = new aws.EC2();
var now = new Date();
date = now.toISOString().substring(0, 10)
hours = now.getHours()
minutes = now.getMinutes()
exports.handler = function(event, context) {
var instanceparams = {
Filters: [{
Name: 'tag:Backup',
Values: [
'yes'
]
}]
}
ec2.describeInstances(instanceparams, function(err, data) {
if (err) console.log(err, err.stack);
else {
for (var i in data.Reservations) {
for (var j in data.Reservations[i].Instances) {
instanceid = data.Reservations[i].Instances[j].InstanceId;
nametag = data.Reservations[i].Instances[j].Tags
for (var k in data.Reservations[i].Instances[j].Tags) {
if (data.Reservations[i].Instances[j].Tags[k].Key == 'Name') {
name = data.Reservations[i].Instances[j].Tags[k].Value;
}
}
console.log("Creating AMIs of the Instance: ", name);
var imageparams = {
InstanceId: instanceid,
Name: name + "_" + date + "_" + hours + "-" + minutes,
NoReboot: true
}
ec2.createImage(imageparams, function(err, data) {
if (err) console.log(err, err.stack);
else {
image = data.ImageId;
console.log(image);
var tagparams = {
Resources: [image],
Tags: [{
Key: 'DeleteOn',
Value: 'yes'
}]
};
ec2.createTags(tagparams, function(err, data) {
if (err) console.log(err, err.stack);
else console.log("Tags added to the created AMIs");
});
}
});
}
}
}
});
}
where aws.config.region is for region config..it's working for current(in which lambda deploy) region
This line:
var ec2 = new aws.EC2();
connects to the Amazon EC2 service in the region where the Lambda function is running.
You can modify it to connect to another region:
var ec2 = new AWS.EC2({apiVersion: '2006-03-01', region: 'us-west-2'});
Thus, your program could loop through a list of regions (from ec2.describeRegions), creating a new EC2 client for the given region, then running the code you already have.
See: Setting the AWS Region - AWS SDK for JavaScript
In your Lambda Role, you need to add a policy which gives the Lambda function necessary permissions to access the EC2 on different accounts, typically you can add ARN's of EC2 instances you wan't access to or you can specify "*" which gives permissions to all instances.
Also on other accounts where EC2 instances are running you need to add IAM policy which gives access to your Lambda Role, note that you need to provide Lambda role ARN,
In this way your Lambda role will have policy to access EC2 and cross account EC2 will have policy which grant's access to Lambda role.
Without this in place you might need to do heavy lifting of configuring IP's of each EC2 in each account.
Yes and you also need to point EC2 object to a region where the instance is running,
Any code (including a Lambda function) can create a client that connects to a different region.

AWS Elastic search service - IAM Role (Refresh session token - Node.js)

I'm using AWS Elastic search service and written a Node.js wrapper (runs within a ECS dockerized container) . IAM roles are used to create a singleton connection with Elastic search.
I'm trying to refresh my session token by checking AWS.config.credentials.needsRefresh() before every request - however it always returns true even after the session has expired. Obviously AWS is complaining with a 403 error. Any ideas will be greatly appreciated .
var AWS = require('aws-sdk');
var config = require('config');
var connectionClass = require('http-aws-es');
var elasticsearch = require('elasticsearch');
AWS.config.getCredentials(function() {
AWS.config.update({
credentials: new AWS.Credentials(AWS.config.credentials.accessKeyId,AWS.config.credentials.secretAccessKey,AWS.config.credentials.sessionToken),
region: 'us-east-1'
});
}
)
var client = new elasticsearch.Client({
host: `${config.get('elasticSearch.host')}`,
log: 'debug',
connectionClass: connectionClass,
amazonES: {
credentials: new AWS.EnvironmentCredentials('AWS')
}
});
module.exports = client;

API timeouts in AWS Lambda?

I'm trying to create a lambda function in AWS which will create a new Stripe token:
import stripePackage from 'stripe';
const stripe = stripePackage('...');
module.exports.create = (event, context, callback) => {
stripe.tokens.create({
card: {
"number": 4242424242424242,
"exp_month": '02',
"exp_year": '22',
"cvc": '123'
}
}, (err, token) => {
if (err) {
console.log(err);
callback(null, {
statusCode: 400,
body: "error"
});
}
callback(null, {
statusCode: 200,
body: "ok"
});
console.log(token);
});
}
However, this will time out every time. I have a security group for outbound connections as follows:
Ports Destination
All 0.0.0.0/0
However the only thing I seem to be able to connect to are other AWS services. How can I open my Lambda function up to connections outside AWS?
You either need to remove the Lambda function from your VPC (if it doesn't need VPC resource access then adding it to the VPC only introduces performance issues anyway), or you need to make sure the Lambda function is in a private subnet of your VPC and that subnet has a route to a NAT Gateway.