Connection refused when connecting to redis on EC2 instance - amazon-web-services

I am trying to connect to local redis database on EC2 instance from a lambda function. However when I try to execute the code, I get the following error in the logs
{
"errorType": "Error",
"errorMessage": "Redis connection to 127.0.0.1:6379 failed - connect ECONNREFUSED 127.0.0.1:6379",
"code": "ECONNREFUSED",
"stack": [
"Error: Redis connection to 127.0.0.1:6379 failed - connect ECONNREFUSED 127.0.0.1:6379",
" at TCPConnectWrap.afterConnect [as oncomplete] (net.js:1106:14)"
],
"errno": "ECONNREFUSED",
"syscall": "connect",
"address": "127.0.0.1",
"port": 6379
}
The security group has the following entries
Type: Custom TCP Rule
Port: 6379
Source: <my security group name>
Type: Custom TCP Rule
Port: 6379
Source: 0.0.0.0/0
My Lambda function has the following code.
'use strict';
const Redis = require('redis');
module.exports.hello = async event => {
var redis = Redis.createClient({
port: 6379,
host: '127.0.0.1',
password: ''
});
redis.on('connect', function(){
console.log("Redis client conected : " );
});
redis.set('age', 38, function(err, reply) {
console.log(err);
console.log(reply);
});
return {
statusCode: 200,
body: JSON.stringify(
{
message: 'The lambda function is called..!!',
input: event,
redis: redis.get('age')
},
null,
2
),
};
};
Please let me know where I am going wrong.

First thing, Your lambda trying to connect to localhost so this will not work. You have to place the public or private IP of the Redis instance.
But still, you need to make sure these things
Should in the same VPC as your EC2 instance
Should allow outbound traffic in the security group
Assign subnet
Your instance Allow lambda to connect with Redis in security group
const redis = require('redis');
const redis_client = redis.createClient({
host: 'you_instance_IP',
port: 6379
});
exports.handler = (event, context, callback) => {
redis_client.set("foo", "bar");
redis_client.get("foo", function(err, reply) {
redis_client.unref();
callback(null, reply);
});
};
You can also look into this how-should-i-connect-to-a-redis-instance-from-an-aws-lambda-function

On Linux Ubuntu server 20.04 LTS I was seeing a similar error after reboot of the EC2 server which for our use case runs an express app via a cron job connecting a nodeJs app (installed with nvm) using passport.js to use sessions in Redis:
Redis error: Error: Redis connection to 127.0.0.1:6379 failed - connect ECONNREFUSED 127.0.0.1:6379
at TCPConnectWrap.afterConnect [as oncomplete] (net.js:1144:16) {
errno: 'ECONNREFUSED',
code: 'ECONNREFUSED',
syscall: 'connect',
address: '127.0.0.1',
port: 6379
}
What resolved it for me, as my nodeJs app was running as Ubuntu user I needed to make that path available, was to add to the PATH within /etc/crontab by:
sudo nano /etc/crontab Just comment out the original path in there so you can switch back if required (my original PATH was set to: PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin ) and append the location of your bin you may need to refer to, in the format:
PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin:/home/ubuntu/.nvm/versions/node/v12.20.0/bin
And the error disappeared for me
// redisInit.js
const session = require('express-session');
const redis = require('redis');
const RedisStore = require('connect-redis')(session);
const { redisSecretKey } = process.env;
const redisClient = redis.createClient();
redisClient.on('error', (err) => {
console.log('Redis error: ', err);
});
const redisSession = session({
secret: redisSecretKey,
name: 'some_redis_store_name',
resave: true,
saveUninitialized: true,
cookie: { secure: false },
store: new RedisStore(
{
host: 'localhost', port: 6379, client: redisClient, ttl: 86400
}
)
});
module.exports = redisSession;

Related

strapi admin not running in aws

I just finished my project in Strapi and deployed in AWS, when I run my Public ipv4 :1337 says: 'server is running successully' but when I want to log in admin panel just spinning and not showing panel.
server.js
module.exports = ({ env }) => ({
host: env('HOST', '0.0.0.0'),
port: env.int('PORT', 1337),
cron: { enabled: true},
url: env('URL', 'http://localhost'),
admin: {
auth: {
secret: env('ADMIN_JWT_SECRET', 'MY_JWT_SECRET'),
},
},
});

Cert fails in type-orm but works fine with node-postgres

I want to connect to a Postgres data source using type-orm like this:
it("can connect using type-orm", async () => {
const signer = new AWS.RDS.Signer({
region: REGION,
hostname: DB_INSTANCE,
port: DB_PORT,
username: DB_USER
})
const AppDataSource = new DataSource({
type: "postgres",
host: DB_HOST,
port: DB_PORT,
username: DB_USER,
password: async () => signer.getAuthToken({}),
database: DB_DATABASE,
entities: [],
synchronize: false,
logging: false,
ssl: {
ca: fs.readFileSync('./cert.pem').toString(),
},
})
await AppDataSource.initialize()
});
But it fails with:
Hostname/IP does not match certificate's altnames: Host: localhost. is not in the cert's altnames: DNS: <TRUE AWS altnames here>
What is interesting is that the exact same credentials/certificate works fine with node-postgres:
it("can connect using node-postgres", async () => {
const signer = new AWS.RDS.Signer({
region: REGION,
hostname: DB_INSTANCE,
port: DB_PORT,
username: DB_USER
})
const client = new Client({
user: DB_USER,
host: DB_HOST,
database: DB_DATABASE,
password: signer.getAuthToken({}),
port: DB_PORT,
ssl: {
ca: fs.readFileSync('./cert.pem').toString(),
},
});
await client.connect();
await client.end();
})
So the certificate should be fine? I'm connecting to localhost:5432 which output a forwarded port on a jump server.
It seems type-orm performs a stricter validation. To bypass it, I added rejectUnauthorized in the extra option:
it("can connect using type-orm", async () => {
const AppDataSource = new DataSource({
type: "postgres",
host: env.DB_HOST,
port: env.DB_PORT,
username: env.DB_USER,
password: 'password123',
database: env.DB_DATABASE,
entities: [],
synchronize: false,
logging: false,
ssl: {
ca: fs.readFileSync('./cert.pem').toString(),
},
extra: {
ssl: {
// Disregard mismatch between localhost and rds.amazonaws.com
rejectUnauthorized: false
}
}
})
await AppDataSource.initialize()
});

Flask app is giving proxy error: ENOTFOUND

Trying to run an Flask project which uses grunt.
Gruntfile.js has following configuration:
connect: {
options: {
port: 9000,
// Change this to '0.0.0.0' to access the server from outside.
//hostname: 'localhost',
hostname: '0.0.0.0',
livereload: 35728
},
proxies: [{
context: '/api',
host: 'backend',
port: 5000,
changeOrigin: true
}],
app.py has following:
app.run(host='127.0.0.1', port='9000', debug=True) #host='0.0.0.0'
ServerURL has following configuration:
.constant('serverURL', 'http://127.0.0.1:9000/api');
Client shows this:
Started connect web server on http://0.0.0.0:9000
But in Client window I receive this:
Running "watch" task
Waiting...
>> Proxy error: ENOTFOUND
>> Proxy error: ENOTFOUND
Could anyone tell me what is the reason behind this?
In the proxies, try changing the host from backend to 0.0.0.0.
connect: {
options: {
port: 9000,
// Change this to '0.0.0.0' to access the server from outside.
//hostname: 'localhost',
hostname: '0.0.0.0',
livereload: 35728
},
proxies: [{
context: '/api',
host: '0.0.0.0',
port: 5000,
changeOrigin: true
}],

ERROR: The request could not be satisfied CloudFront

I am struggling to send emails to my server hosted on aws elastic beanstalk, using certificates from cloudfront, i am using nodemailer to send emails, it is working on my local environment but fails once deployed to AWS
Email Code:
const transporter = nodemailer.createTransport({
host: 'mail.email.co.za',
port: 587,
auth: {
user: 'example#email.co.za',
pass: 'email#22'
},
secure:false,
tls: {rejectUnauthorized: false},
debug:true
});
const mailOptions = {
from: 'example#email.co.za',
to: email,
subject: 'Password Reset OTP' ,
text: `${OTP}`
}
try {
const response = await transporter.sendMail(mailOptions)
return {error:false, message:'OTP successfully sent' , response}
}catch(e) {
return {error:false, message:'Problems sending OTP, Please try again'}
}
Error from AWS:
The request could not be satisfied
504 ERROR The request could not be satisfied CloudFront attempted to
establish a connection with the origin, but either the
attempt failed or the origin closed the connection.
NB: The code runs fine on my local

Meteor deploy error (mup): pushing meteor app bundle to server failed

I am trying to deploy a meteor app to an AWS server, but am getting this message:
Started TaskList: Configuring App
[52.41.84.125] - Pushing the Startup Script
nodemiral:sess:52.41.84.125 copy file - src: /
Users/Olivia/.nvm/versions/node/v7.8.0/lib/node_modules/mup/lib/modules/meteor/assets/templates/start.sh, dest: /opt/CanDu/config/start.sh, vars: {"appName":"CanDu","useLocalMongo":0,"port":80,"bind":"0.0.0.0","logConfig":{"opts":{"max-size":"100m","max-file":10}},"docker":{"image":"abernix/meteord:base","imageFrontendServer":"meteorhacks/mup-frontend-server","imagePort":80},"nginxClientUploadLimit":"10M"} +0ms
[52.41.84.125] x Pushing the Startup Script: FAILED Failure
Previously I had been able to deploy using mup, but now I am getting this message. The only major thing I've changed is the Python path in my .noderc. I am also able to SSH into my amazon server directly from the terminal. My mup file is:
module.exports = {
servers: {
one: {
host: '##.##.##.###',
username: 'ec2-user',
pem: '/Users/Olivia/.ssh/oz-pair.pem'
// password:
// or leave blank for authenticate from ssh-agent
}}meteor: {
name: 'CanDu',
path: '/Users/Olivia/repos/bene_candu_v2',
servers: {
one: {}
},
buildOptions: {
serverOnly: true,
mobileSettings: {
public: {
"astronomer": {
"appId": "<key>",
"disableUserTracking": false,
"disableRouteTracking": false,
"disableMethodTracking": false
},
"googleMaps": "<key>",
"facebook":{
"permissions":["email","public_profile","user_friends"]
}
},
},
},
env: {
ROOT_URL: 'http://ec2-##-##-##-###.us-west-2.compute.amazonaws.com',
MONGO_URL: 'mongodb://. . . "
},
/*ssl: {
crt: '/opt/keys/server.crt', // this is a bundle of certificates
key: '/opt/keys/server.key', // this is the private key of the certificate
port: 443, // 443 is the default value and it's the standard HTTPS port
upload: false
},*/
docker: {
image: 'abernix/meteord:base'
},
deployCheckWaitTime: 60
}
};
And I have checked to make sure there are no trailing commas, and have tried increasing the wait time. etc. The error message I'm getting is pretty unhelpful. Does anyone have any insight? Thank you so much!