Cert fails in type-orm but works fine with node-postgres - amazon-iam

I want to connect to a Postgres data source using type-orm like this:
it("can connect using type-orm", async () => {
const signer = new AWS.RDS.Signer({
region: REGION,
hostname: DB_INSTANCE,
port: DB_PORT,
username: DB_USER
})
const AppDataSource = new DataSource({
type: "postgres",
host: DB_HOST,
port: DB_PORT,
username: DB_USER,
password: async () => signer.getAuthToken({}),
database: DB_DATABASE,
entities: [],
synchronize: false,
logging: false,
ssl: {
ca: fs.readFileSync('./cert.pem').toString(),
},
})
await AppDataSource.initialize()
});
But it fails with:
Hostname/IP does not match certificate's altnames: Host: localhost. is not in the cert's altnames: DNS: <TRUE AWS altnames here>
What is interesting is that the exact same credentials/certificate works fine with node-postgres:
it("can connect using node-postgres", async () => {
const signer = new AWS.RDS.Signer({
region: REGION,
hostname: DB_INSTANCE,
port: DB_PORT,
username: DB_USER
})
const client = new Client({
user: DB_USER,
host: DB_HOST,
database: DB_DATABASE,
password: signer.getAuthToken({}),
port: DB_PORT,
ssl: {
ca: fs.readFileSync('./cert.pem').toString(),
},
});
await client.connect();
await client.end();
})
So the certificate should be fine? I'm connecting to localhost:5432 which output a forwarded port on a jump server.

It seems type-orm performs a stricter validation. To bypass it, I added rejectUnauthorized in the extra option:
it("can connect using type-orm", async () => {
const AppDataSource = new DataSource({
type: "postgres",
host: env.DB_HOST,
port: env.DB_PORT,
username: env.DB_USER,
password: 'password123',
database: env.DB_DATABASE,
entities: [],
synchronize: false,
logging: false,
ssl: {
ca: fs.readFileSync('./cert.pem').toString(),
},
extra: {
ssl: {
// Disregard mismatch between localhost and rds.amazonaws.com
rejectUnauthorized: false
}
}
})
await AppDataSource.initialize()
});

Related

strapi admin not running in aws

I just finished my project in Strapi and deployed in AWS, when I run my Public ipv4 :1337 says: 'server is running successully' but when I want to log in admin panel just spinning and not showing panel.
server.js
module.exports = ({ env }) => ({
host: env('HOST', '0.0.0.0'),
port: env.int('PORT', 1337),
cron: { enabled: true},
url: env('URL', 'http://localhost'),
admin: {
auth: {
secret: env('ADMIN_JWT_SECRET', 'MY_JWT_SECRET'),
},
},
});

AWS CDK Custom Domain for Private API Gateway Not Working

Trying to associate a custom domain name (ie api.internal.domain.com) in a private hosted zone to a private API gateway deployment using CDK. Here's what I have so far.
PRIVATE_HOSTED_ZONE_NAME=internal.domain.com
PRIVATE_HOSTED_ZONE_ID=xxxxxxx
API_HOST_RECORD=api
FULL_DNS_RECORD_NAME = `${API_HOST_RECORD}.${PRIVATE_HOSTED_ZONE_NAME}`
// get the private hosted zone for internal.domain.com
const privateHostedZone = route53.PrivateHostedZone.fromHostedZoneAttributes(this, 'PrivateHostedZone', {
zoneName: PRIVATE_HOSTED_ZONE_NAME,
hostedZoneId: PRIVATE_HOSTED_ZONE_ID,
});
// get the cert for *.internal.domain.com
const certificate = acm.Certificate.fromCertificateArn(this, 'Certificate', `${process.env.API_SSL_CERTIFICATE_ARN}`);
// create a VPC endpoint for API gateway
const restApiVpcEndpoint = new ec2.InterfaceVpcEndpoint(this, 'RestApiVpcEndpoint', {
vpc,
privateDnsEnabled: true,
subnets: vpc.selectSubnets({
subnetType: ec2.SubnetType.PRIVATE,
}),
service: {
name: 'com.amazonaws.us-east-1.execute-api',
port: 443,
},
});
// custom resource to retrieve ENI IPs for NLB
// https://repost.aws/questions/QUjISNyk6aTA6jZgZQwKWf4Q/how-to-connect-a-load-balancer-and-an-interface-vpc-endpoint-together-using-cdk
const eni = new cr.AwsCustomResource(this, 'DescribeNetworkInterfaces', {
onCreate: {
service: 'EC2',
action: 'describeNetworkInterfaces',
parameters: {
NetworkInterfaceIds: restApiVpcEndpoint.vpcEndpointNetworkInterfaceIds,
},
physicalResourceId: cr.PhysicalResourceId.of(Date.now().toString()),
},
onUpdate: {
service: 'EC2',
action: 'describeNetworkInterfaces',
parameters: {
NetworkInterfaceIds: restApiVpcEndpoint.vpcEndpointNetworkInterfaceIds,
},
physicalResourceId: cr.PhysicalResourceId.of(Date.now().toString()),
},
policy: {
statements: [
new iam.PolicyStatement({
actions: ['ec2:DescribeNetworkInterfaces'],
resources: ['*'],
}),
],
},
});
// create API Gateway
const restApiGateway = new apigateway.LambdaRestApi(this, 'RestApiGateway', {
handler: apiLambda,
proxy: false,
endpointConfiguration: {
types: [apigateway.EndpointType.PRIVATE],
vpcEndpoints: [restApiVpcEndpoint],
},
domainName: {
domainName: FULL_DNS_RECORD_NAME,
certificate: certificate,
},
policy: new iam.PolicyDocument({
statements: [
new iam.PolicyStatement({
principals: [new iam.AnyPrincipal()],
actions: ['execute-api:Invoke'],
resources: ['execute-api:/*'],
effect: iam.Effect.DENY,
conditions: {
StringNotEquals: {
'aws:SourceVpce': restApiVpcEndpoint.vpcEndpointId,
},
},
}),
new iam.PolicyStatement({
principals: [new iam.AnyPrincipal()],
actions: ['execute-api:Invoke'],
resources: ['execute-api:/*'],
effect: iam.Effect.ALLOW,
}),
],
}),
deployOptions: {
dataTraceEnabled: true,
tracingEnabled: true,
},
});
// workaround for private api gateway with custom domain
const lb = new elbv2.ApplicationLoadBalancer(this, 'LB', {
vpc: vpc,
internetFacing: false,
});
const lbHttpsListener = lb.addListener('lbListener', {
port: 443,
certificates: [certificate],
});
const ip1 = eni.getResponseField('NetworkInterfaces.0.PrivateIpAddress');
const ip2 = eni.getResponseField('NetworkInterfaces.1.PrivateIpAddress');
lbHttpsListener.addTargets('Target', {
port: 443,
targets: [new albTargets.IpTarget(ip1), new albTargets.IpTarget(ip2)],
});
new route53.ARecord(this, 'CustomDomainAliasRecord', {
zone: privateHostedZone,
recordName: API_HOST_RECORD,
target: route53.RecordTarget.fromAlias(new targets.LoadBalancerTarget(lb)),
});
After deployment, I can successfully curl directly to the API Gateway from an EC2 instance in the same VPC. I can also resolve the custom DNS host (api.internal.domain.com).
But when I curl api.internal.domain.com, I get a 504 Gateway Time-out. Starting from Route53, the alias to the load balancer is configured correctly. The load balancer targets have the correct IP addresses but all health check is Unhealthy.
Also, when I curl the IPs directly (curl -v -k https://10.0.176.185), I get the following response.
< HTTP/1.1 403 Forbidden
< Server: Server
< Date: Mon, 06 Jun 2022 02:16:23 GMT
< Content-Type: application/json
< Content-Length: 23
< Connection: keep-alive
< x-amzn-RequestId: aeda494b-a60d-4a42-be14-816e8ff93aa0
< x-amzn-ErrorType: ForbiddenException
< x-amz-apigw-id: TRx6xEYxxxxx // also strange, no know what apigw id this is?
<
* TLSv1.2 (IN), TLS header, Supplemental data (23):
* Connection #0 to host 10.0.176.185 left intact
{"message":"Forbidden"}
I tried using different headers, setting API keys and usage plans, messing with the resource policy, changing the health check to allow 200-499. Nothing seems to work. Hoping someone dealt with this issue before.
I have a strong suspicion that something is not right between the VPC endpoint and Private API gateway, but can't find any solutions.
References:
https://gist.github.com/skorfmann/6941326b2dd75f52cb67e1853c5f8601
https://github.com/aws-samples/serverless-samples/tree/main/apigw-private-custom-domain-name
https://docs.aws.amazon.com/apigateway/latest/developerguide/apigateway-private-api-test-invoke-url.html

Flask app is giving proxy error: ENOTFOUND

Trying to run an Flask project which uses grunt.
Gruntfile.js has following configuration:
connect: {
options: {
port: 9000,
// Change this to '0.0.0.0' to access the server from outside.
//hostname: 'localhost',
hostname: '0.0.0.0',
livereload: 35728
},
proxies: [{
context: '/api',
host: 'backend',
port: 5000,
changeOrigin: true
}],
app.py has following:
app.run(host='127.0.0.1', port='9000', debug=True) #host='0.0.0.0'
ServerURL has following configuration:
.constant('serverURL', 'http://127.0.0.1:9000/api');
Client shows this:
Started connect web server on http://0.0.0.0:9000
But in Client window I receive this:
Running "watch" task
Waiting...
>> Proxy error: ENOTFOUND
>> Proxy error: ENOTFOUND
Could anyone tell me what is the reason behind this?
In the proxies, try changing the host from backend to 0.0.0.0.
connect: {
options: {
port: 9000,
// Change this to '0.0.0.0' to access the server from outside.
//hostname: 'localhost',
hostname: '0.0.0.0',
livereload: 35728
},
proxies: [{
context: '/api',
host: '0.0.0.0',
port: 5000,
changeOrigin: true
}],

Connection refused when connecting to redis on EC2 instance

I am trying to connect to local redis database on EC2 instance from a lambda function. However when I try to execute the code, I get the following error in the logs
{
"errorType": "Error",
"errorMessage": "Redis connection to 127.0.0.1:6379 failed - connect ECONNREFUSED 127.0.0.1:6379",
"code": "ECONNREFUSED",
"stack": [
"Error: Redis connection to 127.0.0.1:6379 failed - connect ECONNREFUSED 127.0.0.1:6379",
" at TCPConnectWrap.afterConnect [as oncomplete] (net.js:1106:14)"
],
"errno": "ECONNREFUSED",
"syscall": "connect",
"address": "127.0.0.1",
"port": 6379
}
The security group has the following entries
Type: Custom TCP Rule
Port: 6379
Source: <my security group name>
Type: Custom TCP Rule
Port: 6379
Source: 0.0.0.0/0
My Lambda function has the following code.
'use strict';
const Redis = require('redis');
module.exports.hello = async event => {
var redis = Redis.createClient({
port: 6379,
host: '127.0.0.1',
password: ''
});
redis.on('connect', function(){
console.log("Redis client conected : " );
});
redis.set('age', 38, function(err, reply) {
console.log(err);
console.log(reply);
});
return {
statusCode: 200,
body: JSON.stringify(
{
message: 'The lambda function is called..!!',
input: event,
redis: redis.get('age')
},
null,
2
),
};
};
Please let me know where I am going wrong.
First thing, Your lambda trying to connect to localhost so this will not work. You have to place the public or private IP of the Redis instance.
But still, you need to make sure these things
Should in the same VPC as your EC2 instance
Should allow outbound traffic in the security group
Assign subnet
Your instance Allow lambda to connect with Redis in security group
const redis = require('redis');
const redis_client = redis.createClient({
host: 'you_instance_IP',
port: 6379
});
exports.handler = (event, context, callback) => {
redis_client.set("foo", "bar");
redis_client.get("foo", function(err, reply) {
redis_client.unref();
callback(null, reply);
});
};
You can also look into this how-should-i-connect-to-a-redis-instance-from-an-aws-lambda-function
On Linux Ubuntu server 20.04 LTS I was seeing a similar error after reboot of the EC2 server which for our use case runs an express app via a cron job connecting a nodeJs app (installed with nvm) using passport.js to use sessions in Redis:
Redis error: Error: Redis connection to 127.0.0.1:6379 failed - connect ECONNREFUSED 127.0.0.1:6379
at TCPConnectWrap.afterConnect [as oncomplete] (net.js:1144:16) {
errno: 'ECONNREFUSED',
code: 'ECONNREFUSED',
syscall: 'connect',
address: '127.0.0.1',
port: 6379
}
What resolved it for me, as my nodeJs app was running as Ubuntu user I needed to make that path available, was to add to the PATH within /etc/crontab by:
sudo nano /etc/crontab Just comment out the original path in there so you can switch back if required (my original PATH was set to: PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin ) and append the location of your bin you may need to refer to, in the format:
PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin:/home/ubuntu/.nvm/versions/node/v12.20.0/bin
And the error disappeared for me
// redisInit.js
const session = require('express-session');
const redis = require('redis');
const RedisStore = require('connect-redis')(session);
const { redisSecretKey } = process.env;
const redisClient = redis.createClient();
redisClient.on('error', (err) => {
console.log('Redis error: ', err);
});
const redisSession = session({
secret: redisSecretKey,
name: 'some_redis_store_name',
resave: true,
saveUninitialized: true,
cookie: { secure: false },
store: new RedisStore(
{
host: 'localhost', port: 6379, client: redisClient, ttl: 86400
}
)
});
module.exports = redisSession;

Meteor deploy error (mup): pushing meteor app bundle to server failed

I am trying to deploy a meteor app to an AWS server, but am getting this message:
Started TaskList: Configuring App
[52.41.84.125] - Pushing the Startup Script
nodemiral:sess:52.41.84.125 copy file - src: /
Users/Olivia/.nvm/versions/node/v7.8.0/lib/node_modules/mup/lib/modules/meteor/assets/templates/start.sh, dest: /opt/CanDu/config/start.sh, vars: {"appName":"CanDu","useLocalMongo":0,"port":80,"bind":"0.0.0.0","logConfig":{"opts":{"max-size":"100m","max-file":10}},"docker":{"image":"abernix/meteord:base","imageFrontendServer":"meteorhacks/mup-frontend-server","imagePort":80},"nginxClientUploadLimit":"10M"} +0ms
[52.41.84.125] x Pushing the Startup Script: FAILED Failure
Previously I had been able to deploy using mup, but now I am getting this message. The only major thing I've changed is the Python path in my .noderc. I am also able to SSH into my amazon server directly from the terminal. My mup file is:
module.exports = {
servers: {
one: {
host: '##.##.##.###',
username: 'ec2-user',
pem: '/Users/Olivia/.ssh/oz-pair.pem'
// password:
// or leave blank for authenticate from ssh-agent
}}meteor: {
name: 'CanDu',
path: '/Users/Olivia/repos/bene_candu_v2',
servers: {
one: {}
},
buildOptions: {
serverOnly: true,
mobileSettings: {
public: {
"astronomer": {
"appId": "<key>",
"disableUserTracking": false,
"disableRouteTracking": false,
"disableMethodTracking": false
},
"googleMaps": "<key>",
"facebook":{
"permissions":["email","public_profile","user_friends"]
}
},
},
},
env: {
ROOT_URL: 'http://ec2-##-##-##-###.us-west-2.compute.amazonaws.com',
MONGO_URL: 'mongodb://. . . "
},
/*ssl: {
crt: '/opt/keys/server.crt', // this is a bundle of certificates
key: '/opt/keys/server.key', // this is the private key of the certificate
port: 443, // 443 is the default value and it's the standard HTTPS port
upload: false
},*/
docker: {
image: 'abernix/meteord:base'
},
deployCheckWaitTime: 60
}
};
And I have checked to make sure there are no trailing commas, and have tried increasing the wait time. etc. The error message I'm getting is pretty unhelpful. Does anyone have any insight? Thank you so much!