How to fix aws-iot-device-sdk disconnect behaviour after switching internet connection? - amazon-web-services

I am trying to setup aws-iot-device-sdk-js with reconnect behaviour after wifi is switched and its taking around 20 mins to do so.I am not sure where i am wrong the docs doesn't have anything regarding the issue i am having as well.
I have tried going through the package docs and tried changing the keepalive time but its still showing the same output the offine is called only after 20 mins and reconnects.
const awsIot = require("aws-iot-device-sdk").device;
const certs = require("./certs_config");
const device = awsIot({
keyPath: certs.KEYPATH,
certPath: certs.CERTPATH,
caPath: certs.CAPATH,
deviceId: "rt.bottle.com.np",
host: "aot2wgmcbqwsa-ats.iot.ap-south-1.amazonaws.com",
region: "ap-south-1",
keepalive: 60
});
let delay = 4000;
let count = 0;
const minimumDelay = 250;
if ((Math.max(delay, minimumDelay)) !== delay) {
console.log('substituting ' + minimumDelay + 'ms delay for ' + delay + 'ms...');
}
setInterval(function () {
count++;
device.publish('topic', JSON.stringify({
count
}));
}, Math.max(delay, minimumDelay)); // clip to minimum
device
.on('connect', function () {
console.log('connect');
});
device
.on('close', function () {
console.log('close');
});
device
.on('reconnect', function () {
console.log('reconnect');
});
device
.on('offline', function () {
console.log('offline');
});
device
.on('error', function (error) {
console.log('error', error);
});
device
.on('message', function (topic, payload) {
console.log('message', topic, payload.toString());
});
In aws console i am getting this message:
Mqtt connection lost. Reconnect. Error code: 4. AMQJS0004E Ping timed out.
after around 1.5 mins of the network switch but in the node server setup as you can see in the code below it only receives the offline message in around 20 mins. I want to get the error/offline/disconnect as soon as its disconnects or goes offline.(i.e when receives the error on aws console) as expected.

i am currently using the simulateNetworkFailure Function to handle the network switch issue i was having i hope it helps others having the same issue.

Related

How to setup a Sockect.io server in aws EC2 and then connect remotely to the server?

Good morning.
I created a simple server for a chat using Socket.io. In the next screen, which would be the client interface, you can see some inputs
In localhost, in my computer works fine. Here are the logs messages from the server:
I uploaded the server to an EC2 instance with ubuntu. And it work fine, in the same way
Logs from the server in EC2
The main problem is when I tried to connect from my computer to the Socket.io server in EC2. I got this error from my client side file in my computer:
socket.io.js:3888 GET
https://mydomain:80/socket.io/?EIO=4&transport=polling&t=NQB59WG
Here is my server code:
const path = require("path");
const express = require("express");
const socketio = require("socket.io");
const app = express();
app.set('port', 3000);
//static files
app.use(express.static(path.join(__dirname, "public")));
//start the server
const server = app.listen(app.get('port'), () => {
console.log("server on port", app.get('port'));
});
const io = socketio(server);
//websockects
io.on('connection', (socket) => {
console.log("new connection " + socket.id);
socket.on('chat:message', (data) => {
console.log(data);
io.sockets.emit("chat:server", data);
});
socket.on('chat:typing', (data) => {
console.log(data);
socket.broadcast.emit("chat:typing", data);
});
});
Here is my client code:
const socket = io("https://domain:80");
//dom elements
let message = document.getElementById("message");
let username = document.getElementById("username");
let btn = document.getElementById("button");
let output = document.getElementById("output");
let actions = document.getElementById("actions");
btn.addEventListener('click', function() {
console.log({
username: username.value,
message: message.value
});
socket.emit("chat:message", {
username: username.value,
message: message.value
});
});
message.addEventListener('keypress', function() {
socket.emit("chat:typing", username.value);
});
socket.on("chat:server", function(data) {
console.log(data);
actions.innerHTML = "";
output.innerHTML += "<p><strong>" + data.username + "</strong>: " + data.message + " </p>";
});
socket.on("chat:typing", function(data) {
console.log(data);
actions.innerHTML = "<p><strong>" + data + "</strong> esta escribiendo </p>";
});
And here are the instance inboud rules for ports:
When i tried to connect from my computer to the EC2 instance, I tried with several ways to connect, like this:
const socket = io("https://url.org:80");
const socket = io("https://url.org");
const socket = io("https://ipaddres:80");
const socket = io("https://ipaddres");
const socket = io("ec2-xxxx.compute-1.amazonaws.com");
const socket = io("ec2-xxxx.compute-1.amazonaws.com:80");
Nothing works, Any help?
In my case the solution was to do two things:
I use my domaind without port
const socket = io("https://url.org");
The EC2 Instance work with nginx, I modified the header to add this property:
Access-Control-Allow-Origin: *
And it worked

Pubsub push subscription not acknowledging messages

This is my setup.
Subscription A is a push subscription that POSTs messages to a cloud Run deployment.
That deployment exposes an HTTP endpoint, processes the message, posts the result to Topic B, and responds 200 to subscription A's POST request. The whole process takes ~1.5 seconds.
Therefore, for every message in subscription A, I should end up with 1 message in Topic B.
This is how my code looks like
My app started an Express server
const express = require('express');
const bodyParser = require('body-parser');
const _ = require('lodash');
const startBrowser = require('./startBrowser');
const tab = require('./tab');
const createMessage = require('./publishMessage');
const domain = 'https://example.com';
require('dotenv').config();
const app = express();
app.use(bodyParser.json());
const port = process.env.PORT || 8080;
app.listen(port, async () => {
console.log('Listening on port', port);
});
The endpoint where all the magic happens
app.post('/', async (req, res) => {
// Define the success and fail functions, that respond status 200 and 500 respectively
const failed = () => res.status(500).send();
const completed = async () => {
const response = await res.status(200).send();
if (response && res.writableEnded) {
console.log('successfully responded 200');
}
};
//Process the data coming from Subscription A
let pubsubMessage = decodeBase64Json(req.body.message.data);
let parsed = await processor(pubsubMessage);
//Post the processed data to topic B
let messageId = await postParsedData(parsed);
if (messageId) {
// ACK the message once the data has been processed and posted to topic B.
completed();
} else {
console.log('Didnt get a message id');
// failed();
}
});
//define the functions that post data to Topic B
const postParsedData = async (parsed) => {
if (!_.isEmpty(parsed)) {
const topicName = 'topic-B';
const messageIdInternal = await createMessage(parsed, topicName);
};
return messageId;
} else {
console.log('Parsed is Empty');
return null;
}
};
function decodeBase64Json(data) {
return JSON.parse(Buffer.from(data, 'base64').toString());
}
Execution time takes about ~1.5 seconds and I can see the successful responses logged on Cloud run every ~1.5seconds. That adds up to about ~2400messages/hour (per cloud run instance).
Topic B is getting new messages at ~2400messages/hour, Subscription A's acknowledgement rate is ~200messages/hour, which leads to the messages being re-delivered many times.
Subscription A's Acknowledgement deadline is 600 seconds.
The request timeout period in Cloud run is 300 seconds.
I've tried ACKing messages before they're published to topic-B or even before parsing, but I'm getting the same result.
Edit: added screenshot of the pending messages and processed messages. Many more messages processed than ACKed pending messages. Should be 1:1
Thanks for your help
Solution This error could not be reproduced by GCP support. It didn't happen with large amounts of Cloud Run VMs. The solution is just to increase the number of worker instances
You need to await your complete(); function call. like this
....
if (messageId) {
// ACK the message once the data has been processed and posted to topic B.
await completed();
} else {
console.log('Didnt get a message id');
// failed();
}

Does AWS SQS Long poll return early if a message is received?

If I use SQS long polling and set the "WaitTimeSeconds" to say 10 seconds and "MaxNumberOfMessages" to 1, and a single message is delivered to the queue after say 0.1 seconds, will the call to sqs.receiveMessage() return immediately at that point, or should it not return until the 10 seconds of "WaitTimeSeconds" have elapsed?
In my testing the call to sqs.receiveMessage() seems to not return until the full duration of "WaitTimeSeconds" has elapsed.
Here is the code:
// Load the AWS SDK for Node.js
var AWS = require("aws-sdk");
const fmReqQ = "https://sqs.ap-southeast-2.amazonaws.com/myactid/fmReqQ";
const fmRspQ = "https://sqs.ap-southeast-2.amazonaws.com/myactid/fmRspQ";
const SydneyRegion = "ap-southeast-2";
var credentials = new AWS.SharedIniFileCredentials({ profile: "myprofile" });
AWS.config.credentials = credentials;
// Set the region
AWS.config.update({ region: SydneyRegion });
// Create an SQS service object
var sqs = new AWS.SQS({ apiVersion: "2012-11-05" });
async function sendRequest() {
var sendParams = {
MessageBody: "Information of 12/11/2016.",
QueueUrl: fmReqQ,
};
try {
data = await sqs.sendMessage(sendParams).promise();
console.log("Success, request MessageId: ", data.MessageId);
} catch (err) {
console.log("Error", err);
}
}
async function doModelling() {
console.time("modelling");
await sendRequest();
await receiveResponse();
console.timeEnd("modelling");
}
async function receiveResponse() {
var receiveParams = {
AttributeNames: ["SentTimestamp"],
MaxNumberOfMessages: 1,
MessageAttributeNames: ["All"],
QueueUrl: fmRspQ,
WaitTimeSeconds: 1,
};
let data = null;
try {
data = await sqs.receiveMessage(receiveParams).promise();
console.log("Success, response MessageId: ", data);
} catch (err) {
console.log("Error", err);
}
}
doModelling();
When I set "WaitTimeSeconds: 3" I get output:
Success, request MessageId: e5079c2a-050f-4681-aa8c-77b05ac7da7f
Success, response MessageId: {
ResponseMetadata: { RequestId: '1b4d6a6b-eaa2-59ea-a2c3-3d9b6fadbb3f' }
}
modelling: 3.268s
When I set "WaitTimeSeconds: 10" I get output:
Success, request MessageId: bbf0a429-b2f7-46f2-b9dd-38833b0c462a
Success, response MessageId: {
ResponseMetadata: { RequestId: '64bded2d-5398-5ca2-86f8-baddd6d4300a' }
}
modelling: 10.324s
Notice how the elapsed time durations match the WaitTimeSeconds.
From reading about AWS SQS long polling it says it long polling "Return messages as soon as they become available."
I don't seem to be seeing the messages "as soon as they become available", I seem to be noticing the sqs.receiveMessage() call always taking the duration set in WaitTimeSeconds.
As you can see in the sample code, I have set MaxNumberOfMessage to 1.
Using ReceiveMessage() with Long Polling will return as soon as there is at least one message in the queue.
I'm not a Node person, but here's how I tested it:
Created an Amazon SQS queue
In one window, I ran:
aws sqs receive-message --queue-url https://sqs.ap-southeast-2.amazonaws.com/123/foo --visibility-timeout 1 --wait-time-seconds 10
Then, in another window, I ran:
aws sqs send-message --queue-url https://sqs.ap-southeast-2.amazonaws.com/123/foo --message-body bar
The receive-message command returned very quickly after I used the send-message command.
It is possible that your tests are impacted by messages being 'received' but marked as 'invisible', and remaining invisible for later tests since your code does not call DeleteMessage(). I avoided this by specifically stating --visibility-timeout 1, which made the message immediately reappear on the queue for the next test.
The number of messages being requested (--max-number-of-messages) does not impact this result. It returns as soon as there is at least one message available.
I set the "WaitTimeSeconds" to 0, I seem to get the behaviour that I am after now:
Success, request MessageId: 9f286e22-1a08-4532-88ba-06c88be3dbc3
Success, response MessageId: {
ResponseMetadata: { RequestId: '5c264e27-0788-5772-a990-19d78c8b2565' }
}
modelling: 307.884ms
The value I was specifying for "WaitTimeSeconds" determines the duration of the sqs.receiveMessage() call because there were no messages on my queue, so the sqs.receiveMessage() call will wait for duration specified by "WaitTimeSeconds".

DB Connection Unavailable Lambda with Cloud Watch Event Rules

I'm having issues connecting to mongodb when I am using a cloud watch event rule to trigger lambdas to keep them warm and also the same issue when I tried using serverless-plugin-warmup. Anyone have and ideas as to why this would be happening? Also I whitelist IP's for my database and use an Elastic IP for my lambda functions. Could the cloud watch event rules be causing the lambdas to use a different IP?
{"error":{"name":"MongoError","message":"no connection available"}}
I wrap my functions with the following to make sure the database is connected before running code
const mongoose = require('mongoose');
mongoose.Promise = global.Promise;
let cachedDB = null;
module.exports = fn => (...args) => {
const [, context] = args;
context.callbackWaitsForEmptyEventLoop = false;
if (cachedDB && cachedDB.readyState != 0 && cachedDB.readyState != 3) {
fn(...args);
} else {
mongoose.connect(process.env.MONGO_URI);
mongoose.connection.on('error', err => {
console.log('Connection Error');
console.log(err);
});
mongoose.connection.once('open', () => {
cachedDB = mongoose.connection;
fn(...args);
});
}
};
You need to handle warmup events as a special case in your handler. Basically, your handler should return right away when it is invoked via a warmup event.
In the example given in serverless-warmup-plugin, you can do it this way,
module.exports.lambdaToWarm = function(event, context, callback) {
// Immediate response for WarmUP plugin
if (event.source === 'serverless-plugin-warmup') {
console.log('WarmUP - Lambda is warm!')
return callback(null, 'Lambda is warm!')
}
// add lambda logic after
}
Notice that there is an if statement at the beginning to check if it's a warmup event. If it is, return successfully right away.
This check should be at the beginning so that it doesn't have to connect to MongoDB at all.

AWS javascript SDK request.js send request function execution time gradually increases

I am using aws-sdk to push data to Kinesis stream.
I am using PutRecord to achieve realtime data push.
I am observing same delay in putRecords as well in case of batch write.
I have tried out this with 4 records where I am not crossing any shard limit.
Below is my node js http agent configurations. Default maxSocket value is set to infinity.
Agent {
domain: null,
_events: { free: [Function] },
_eventsCount: 1,
_maxListeners: undefined,
defaultPort: 80,
protocol: 'http:',
options: { path: null },
requests: {},
sockets: {},
freeSockets: {},
keepAliveMsecs: 1000,
keepAlive: false,
maxSockets: Infinity,
maxFreeSockets: 256 }
Below is my code.
I am using following code to trigger putRecord call
event.Records.forEach(function(record) {
var payload = new Buffer(record.kinesis.data, 'base64').toString('ascii');
// put record request
evt = transformEvent(payload );
promises.push(writeRecordToKinesis(kinesis, streamName, evt ));
}
Event structure is
evt = {
Data: new Buffer(JSON.stringify(payload)),
PartitionKey: payload.PartitionKey,
StreamName: streamName,
SequenceNumberForOrdering: dateInMillis.toString()
};
This event is used in put request.
function writeRecordToKinesis(kinesis, streamName, evt ) {
console.time('WRITE_TO_KINESIS_EXECUTION_TIME');
var deferred = Q.defer();
try {
kinesis.putRecord(evt , function(err, data) {
if (err) {
console.warn('Kinesis putRecord %j', err);
deferred.reject(err);
} else {
console.log(data);
deferred.resolve(data);
}
console.timeEnd('WRITE_TO_KINESIS_EXECUTION_TIME');
});
} catch (e) {
console.error('Error occured while writing data to Kinesis' + e);
deferred.reject(e);
}
return deferred.promise;
}
Below is output for 3 messages.
WRITE_TO_KINESIS_EXECUTION_TIME: 2026ms
WRITE_TO_KINESIS_EXECUTION_TIME: 2971ms
WRITE_TO_KINESIS_EXECUTION_TIME: 3458ms
Here we can see gradual increase in response time and function execution time.
I have added counters in aws-sdk request.js class. I can see same pattern in there as well.
Below is code snippet for aws-sdk request.js class which executes put request.
send: function send(callback) {
console.time('SEND_REQUEST_TO_KINESIS_EXECUTION_TIME');
if (callback) {
this.on('complete', function (resp) {
console.timeEnd('SEND_REQUEST_TO_KINESIS_EXECUTION_TIME');
callback.call(resp, resp.error, resp.data);
});
}
this.runTo();
return this.response;
},
Output for send request:
SEND_REQUEST_TO_KINESIS_EXECUTION_TIME: 1751ms
SEND_REQUEST_TO_KINESIS_EXECUTION_TIME: 1816ms
SEND_REQUEST_TO_KINESIS_EXECUTION_TIME: 2761ms
SEND_REQUEST_TO_KINESIS_EXECUTION_TIME: 3248ms
Here you can see it is increasing gradually.
Can anyone please suggest how can I reduce this delay?
3 seconds to push single record to Kinesis is not at all acceptable.