Deploying EmberJS to AWS using SSH + RSync - ember.js

I've managed to deploy a simple todo app unto AWS with S3 using this site
http://emberigniter.com/deploy-ember-cli-app-amazon-s3-linux-ssh-rsync/
However, when I attempt to do this ( Deploying with SSH and Rsync ) according to the tutorial, I run into the following error:
gzipping **/*.{js,css,json,ico,map,xml,txt,svg,eot,ttf,woff,woff2}
ignoring null
✔ assets/ember-user-app-d41d8cd98f00b204e9800998ecf8427e.css
✔ assets/vendor-d41d8cd98f00b204e9800998ecf8427e.css
✔ assets/ember-user-app-45a9825ab0116a8007bb48645b09f060.js
✔ crossdomain.xml
✔ robots.txt
✔ assets/vendor-d008595752c8e859a04200ceb9a77874.js
gzipped 6 files ok
|
+- upload
| |
| +- rsync
- Uploading using rsync...
- Permission denied (publickey,gssapi-keyex,gssapi-with-mic).
rsync: connection unexpectedly closed (0 bytes received so far) [sender]
rsync error: unexplained error (code 255) at /BuildRoot/Library/Caches/com.apple.xbs/Sources/rsync/rsync-47/rsync/io.c(453) [sender=2.6.9]
The following is my config/deploy.js
module.exports = function(deployTarget) {
var ENV = {
build: {
environment: deployTarget
},
's3-index': {
accessKeyId: "<myKeyID>",
secretAccessKey: "<mySecret>",
bucket: "emberjsft",
region: "ap-southeast-1",
allowOverwrite: true
},
's3': {
accessKeyId: "<myKeyID>",
secretAccessKey: "<mySecret>",
bucket: "emberjsft",
region: "ap-southeast-1"
},
'ssh-index': {
remoteDir: "/var/www/",
username: "ec2-user",
host: "ec2-<elastic-ip>.ap-southeast-1.compute.amazonaws.com",
privateKeyFile: "/Users/imac/MY_AWS_PEMFILE.pem",
allowOverwrite: true
},
rsync: {
dest: "/var/www/",
username: "ec2-user",
host: "ec2-<elastic-ip>.ap-southeast-1.compute.amazonaws.com",
delete: false
}
// include other plugin configuration that applies to all deploy targets here
};
if (deployTarget === 'development') {
ENV.build.environment = 'development';
// configure other plugins for development deploy target here
}
if (deployTarget === 'staging') {
ENV.build.environment = 'production';
// configure other plugins for staging deploy target here
}
if (deployTarget === 'production') {
ENV.build.environment = 'production';
// configure other plugins for production deploy target here
}
// Note: if you need to build some configuration asynchronously, you can return
// a promise that resolves with the ENV object instead of returning the
// ENV object synchronously.
return ENV;
};
How should I resolve this issue?
Thanks

I've just spent the last hour fighting the same issue as you. I was able to kind of fix it by using ssh-add /home/user/.ssh/example-key.pem and removing privateKeyFile.
I still get a error thrown after the transfer ends, but can confirm all files successfully transferred to my EC2 box despite the error..
deploy.js
module.exports = function (deployTarget) {
var ENV = {
build: {
environment: deployTarget
},
'ssh-index': {
remoteDir: "/var/www/",
username: "ubuntu",
host: "52.xx.xx.xx",
allowOverwrite: true
},
rsync: {
host: "ubuntu#52.xx.xx.xx",
dest: "/var/www/",
recursive: true,
delete: true
}
};
return ENV;
};

In your deploy.js file you need to place your information for accessKeyId. You left "" in the place of accessKeyId. You need to put your information there. Same for secretAccessKey, acessKeyId, plus your host , you need to put your elastic-ip address.

myKeyID and mySecret shall be present in a .env file and then accessed here by process.env.myKeyID , process.env.mySecret
Not a good practice to hard-code the Keys in deploy.js file.
Best practise would be read it using Consul

Related

Amplify GraphQL No Credentials Error in NextJs Application

NextJs Application is working perfectly fine locally but after pushing the fronted to AWS I'm getting the run time error, No Credentials in callback.js api
No Build time error, so I assume aws-exports.js file is built correctly during the build but I don't know if it is built with the required details like API key on AWS.
I'm using API key authentication by default. I'm not using amplify add auth because I've a requirement to use custom auth. I know amplify auth is the recommend way but I still need to use my custom method.
I have already tried multiple suggestions like disable Analytics: true as suggested in couple of other discussions but none of them worked for me. Build my project multiple times from scratch by re-installing all the dependencies but no luck.
callback.js API
import { API, graphqlOperation } from 'aws-amplify';
import {getAuth} from "../../../../src/graphql/queries"
import {createAuth} from "../../../../src/graphql/mutations"
export default async function callback(req, res) {
const record = await API.graphql(graphqlOperation(getAuth, {emailId: "abc#gmail.com"}))
res.status(200).json({ record });
}
aws-exports.js
/* eslint-disable */
// WARNING: DO NOT EDIT. This file is automatically generated by AWS Amplify. It will be overwritten.
const awsmobile = {
"aws_project_region": "us-east-1",
"aws_appsync_graphqlEndpoint": "https://dummyUrl.appsync-api.us-east-1.amazonaws.com/graphql",
"aws_appsync_region": "us-east-1",
"aws_appsync_authenticationType": "API_KEY",
"aws_appsync_apiKey": "da2-************"
};
export default awsmobile;
_app.js
import {Amplify} from 'aws-amplify';
import config from "../aws-exports"
Amplify.configure(config)
function MyApp({ Component, pageProps: { session, ...pageProps } }) {
// App logic
})
GraphQL Schema
type Auth #model #auth(rules: [{ allow: public }]) {
emailId: ID! #primaryKey
name: String
screen_name: String
profile_img: String
userSession: String
tokenType: String
accessToken: String
accessSecret: String
refreshToken: String
accessScope: String
}
Package.json
"dependencies": {
"#emoji-mart/data": "^1.0.6",
"#emoji-mart/react": "^1.0.1",
"aes256": "^1.1.0",
"aws-amplify": "^4.3.37",
"emoji-mart": "^5.2.2",
"formidable": "^2.0.1",
"js-cookie": "^3.0.1",
"next": "12.3.1",
"react": "18.2.0",
"react-datepicker": "^4.8.0",
"react-dom": "18.2.0",
},
Amplify.yml
version: 1
backend:
phases:
build:
commands:
- '# Execute Amplify CLI with the helper script'
- amplifyPush --simple
frontend:
phases:
preBuild:
commands:
- yarn install
build:
commands:
- yarn run build
artifacts:
baseDirectory: .next
files:
- '**/*'
cache:
paths:
- node_modules/**/*
Edited
I've found how the server side process work with amplify and graphql. Please refer to this. On the server side, you need to pass the API KEY explicitly into the graphql request as that page writes.
==========
I'm using appsync, but appsync pure directives. So just let me provide the reference below. Please confirm the rule follows this way.
{ allow: public, provider: apiKey }

In Pulumi, when defining a GCP CloudBuild Trigger, what do I use as kmsKeyName for a managed secret?

My goal is to create a GCP CloudBuild Trigger using Pulumi. I'm using the Typescript client.
When creating a Google-managed secret (as opposed to customer-managed) I don't use KMS.
What would I put into the required (!) variable build.secrets[0].kmsKeyName? This is trivial when using KMS, but I found no "default" or "global" KMS name that would work when running the trigger with a Google-managed secret. I can create the trigger with a "fake" KMS name, but it doesn't run, complaining with:
Failed to trigger build: generic::invalid_argument: invalid build: invalid secrets: kmsKeyName "?WHAT TO PUT HERE?" is not a valid KMS key resource.
Thank you in advance for any suggestions.
import * as gcp from "#pulumi/gcp";
const ghToken = new gcp.secretmanager.Secret("gh-token", {
secretId: "gh-token",
replication: {
automatic: true,
},
})
const ghTokenSecretVersion = new gcp.secretmanager.SecretVersion("secret-version", {
secret: ghToken.id,
secretData: "the-secret-token",
});
const cloudBuild = new gcp.cloudbuild.Trigger("trigger-name", {
github: {
owner: "the-org",
name: "repo-name",
push: {
branch: "^main$"
}
},
build: {
substitutions: {
"_SERVICE_NAME": "service-name",
"_DEPLOY_REGION": "deploy-region",
"_GCR_HOSTNAME": "gcr.io",
},
steps: [
{
id: "Build",
name: "gcr.io/cloud-builders/docker",
entrypoint: "bash",
args: [
"-c",
`docker build --no-cache
-t $_GCR_HOSTNAME/$PROJECT_ID/$REPO_NAME/$_SERVICE_NAME:$COMMIT_SHA
--build-arg GH_TOKEN=$$GH_TOKEN
.
-f Dockerfile
`,
],
secretEnvs: ["GH_TOKEN"],
},
],
tags: ["my-tag"],
secrets: [
{
kmsKeyName: "?WHAT TO PUT HERE?",
secretEnv: {
"GH_TOKEN": ghTokenSecretVersion.secretData
}
}
]
},
})
I don't think you can use a SecretManager secret with cloud build through Pulumi. I solved it by creating a kms key and encrypting my data using gcp.kms.Ciphertext. Here's what it looks like:
import * as gcp from "#pulumi/gcp";
import * as pulumi from "#pulumi/pulumi";
export const keyRing = new gcp.kms.KeyRing("keyring", {
location: "global",
}, {protect: true});
export const secretsEncryptionKey = new gcp.kms.CryptoKey("secrets-key", {
keyRing: keyRing.id,
rotationPeriod: "100000s",
}, { protect: true });
const config = new pulumi.Config();
export const githubTokenCiphertext = new gcp.kms.SecretCiphertext("github-token", {
cryptoKey: secretsEncryptionKey.id,
plaintext: config.requireSecret("github-token"),
});
const cloudBuild = new gcp.cloudbuild.Trigger("trigger-name", {
github: {...},
build: {
...,
secrets: [
{
kmsKeyName: githubTokenCiphertext.cryptoKey,
secretEnv: {
"GH_TOKEN": githubTokenCiphertext.ciphertext,
}
}
]
},
})

How to setup AWS-SDK credentials in NextJS

I need to upload some files to S3 from a NextJs application. Since it is server side I am under the impression simply setting environment variables should work but it doesn't. I know there are other alternative like assigning a role to EC2 but I want to use accessKeyID and secretKey.
This is my next.config.js
module.exports = {
env: {
//..others
AWS_ACCESS_KEY_ID: process.env.AWS_ACCESS_KEY_ID
},
serverRuntimeConfig: {
//..others
AWS_SECRET_ACCESS_KEY: process.env.AWS_SECRET_ACCESS_KEY
}
}
This is my config/index.js
export default {
//...others
awsClientID: process.env. AWS_ACCESS_KEY_ID,
awsClientSecret: process.env.AWS_SECRET_ACCESS_KEY
}
This is how I use in my code:
import AWS from 'aws-sdk'
import config from '../config'
AWS.config.update({
accessKeyId: config.awsClientID,
secretAccessKey: config.awsClientSecret,
});
const S3 = new AWS.S3()
const params = {
Bucket: "bucketName",
Key: "some key",
Body: fileObject,
ContentType: fileObject.type,
ACL: 'public-read'
}
await S3.upload(params).promise()
I am getting this error:
Unhandled Rejection (CredentialsError): Missing credentials in config, if using AWS_CONFIG_FILE, set AWS_SDK_LOAD_CONFIG=1
If I hard code the credentials in code, it works fine.
How can I make it work correctly?
Looks like the Vercel docs are currently outdated (AWS SDK V2 instead of V3). You can pass the credentials object to the AWS service when you instantiate it. Use an environment variable that is not reserved by adding the name of your app to it for example.
.env.local
YOUR_APP_AWS_ACCESS_KEY_ID=[your key]
YOUR_APP_AWS_SECRET_ACCESS_KEY=[your secret]
Add these env variables to your Vercel deployment settings (or Netlify, etc) and pass them in when you start up your AWS service client.
import { S3Client } from '#aws-sdk/client-s3'
...
const s3 = new S3Client({
region: 'us-east-1',
credentials: {
accessKeyId: process.env.TRENDZY_AWS_ACCESS_KEY_ID ?? '',
secretAccessKey: process.env.TRENDZY_AWS_SECRET_ACCESS_KEY ?? '',
},
})
(note: undefined check so Typescript stays happy)
are you possibly hosting this app via vercel?
As per vercel docs, some env variables are reserved by vercel.
https://vercel.com/docs/concepts/projects/environment-variables#reserved-environment-variables
AWS_ACCESS_KEY_ID
AWS_SECRET_ACCESS_KEY
Maybe that's the reason why it is not getting those env vars
I was able to workaround this by adding my custom env variables into .env.local and then calling for those variables
AWS.config.update({
'region': 'us-east-1',
'credentials': {
'accessKeyId': process.env.MY_AWS_ACCESS_KEY,
'secretAccessKey': process.env.MY_AWS_SECRET_KEY
}
});
As last step would need to add these into vercel UI
obviously not ideal solution and not recommended by AWS.
https://vercel.com/support/articles/how-can-i-use-aws-sdk-environment-variables-on-vercel
If I'm not mistaken, you want to make AWS_ACCESS_KEY_ID into a runtime variable as well. Currently, it is a build time variable, which won't be accessible in your node application.
// replace this
env: {
//..others
AWS_ACCESS_KEY_ID: process.env.AWS_ACCESS_KEY_ID
},
// with this
module.exports = {
serverRuntimeConfig: {
//..others
AWS_ACCESS_KEY_ID: process.env.AWS_ACCESS_KEY_ID
}
}
Reference: https://nextjs.org/docs/api-reference/next.config.js/environment-variables

Connection refused when connecting to redis on EC2 instance

I am trying to connect to local redis database on EC2 instance from a lambda function. However when I try to execute the code, I get the following error in the logs
{
"errorType": "Error",
"errorMessage": "Redis connection to 127.0.0.1:6379 failed - connect ECONNREFUSED 127.0.0.1:6379",
"code": "ECONNREFUSED",
"stack": [
"Error: Redis connection to 127.0.0.1:6379 failed - connect ECONNREFUSED 127.0.0.1:6379",
" at TCPConnectWrap.afterConnect [as oncomplete] (net.js:1106:14)"
],
"errno": "ECONNREFUSED",
"syscall": "connect",
"address": "127.0.0.1",
"port": 6379
}
The security group has the following entries
Type: Custom TCP Rule
Port: 6379
Source: <my security group name>
Type: Custom TCP Rule
Port: 6379
Source: 0.0.0.0/0
My Lambda function has the following code.
'use strict';
const Redis = require('redis');
module.exports.hello = async event => {
var redis = Redis.createClient({
port: 6379,
host: '127.0.0.1',
password: ''
});
redis.on('connect', function(){
console.log("Redis client conected : " );
});
redis.set('age', 38, function(err, reply) {
console.log(err);
console.log(reply);
});
return {
statusCode: 200,
body: JSON.stringify(
{
message: 'The lambda function is called..!!',
input: event,
redis: redis.get('age')
},
null,
2
),
};
};
Please let me know where I am going wrong.
First thing, Your lambda trying to connect to localhost so this will not work. You have to place the public or private IP of the Redis instance.
But still, you need to make sure these things
Should in the same VPC as your EC2 instance
Should allow outbound traffic in the security group
Assign subnet
Your instance Allow lambda to connect with Redis in security group
const redis = require('redis');
const redis_client = redis.createClient({
host: 'you_instance_IP',
port: 6379
});
exports.handler = (event, context, callback) => {
redis_client.set("foo", "bar");
redis_client.get("foo", function(err, reply) {
redis_client.unref();
callback(null, reply);
});
};
You can also look into this how-should-i-connect-to-a-redis-instance-from-an-aws-lambda-function
On Linux Ubuntu server 20.04 LTS I was seeing a similar error after reboot of the EC2 server which for our use case runs an express app via a cron job connecting a nodeJs app (installed with nvm) using passport.js to use sessions in Redis:
Redis error: Error: Redis connection to 127.0.0.1:6379 failed - connect ECONNREFUSED 127.0.0.1:6379
at TCPConnectWrap.afterConnect [as oncomplete] (net.js:1144:16) {
errno: 'ECONNREFUSED',
code: 'ECONNREFUSED',
syscall: 'connect',
address: '127.0.0.1',
port: 6379
}
What resolved it for me, as my nodeJs app was running as Ubuntu user I needed to make that path available, was to add to the PATH within /etc/crontab by:
sudo nano /etc/crontab Just comment out the original path in there so you can switch back if required (my original PATH was set to: PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin ) and append the location of your bin you may need to refer to, in the format:
PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin:/home/ubuntu/.nvm/versions/node/v12.20.0/bin
And the error disappeared for me
// redisInit.js
const session = require('express-session');
const redis = require('redis');
const RedisStore = require('connect-redis')(session);
const { redisSecretKey } = process.env;
const redisClient = redis.createClient();
redisClient.on('error', (err) => {
console.log('Redis error: ', err);
});
const redisSession = session({
secret: redisSecretKey,
name: 'some_redis_store_name',
resave: true,
saveUninitialized: true,
cookie: { secure: false },
store: new RedisStore(
{
host: 'localhost', port: 6379, client: redisClient, ttl: 86400
}
)
});
module.exports = redisSession;

Meteor deploy error (mup): pushing meteor app bundle to server failed

I am trying to deploy a meteor app to an AWS server, but am getting this message:
Started TaskList: Configuring App
[52.41.84.125] - Pushing the Startup Script
nodemiral:sess:52.41.84.125 copy file - src: /
Users/Olivia/.nvm/versions/node/v7.8.0/lib/node_modules/mup/lib/modules/meteor/assets/templates/start.sh, dest: /opt/CanDu/config/start.sh, vars: {"appName":"CanDu","useLocalMongo":0,"port":80,"bind":"0.0.0.0","logConfig":{"opts":{"max-size":"100m","max-file":10}},"docker":{"image":"abernix/meteord:base","imageFrontendServer":"meteorhacks/mup-frontend-server","imagePort":80},"nginxClientUploadLimit":"10M"} +0ms
[52.41.84.125] x Pushing the Startup Script: FAILED Failure
Previously I had been able to deploy using mup, but now I am getting this message. The only major thing I've changed is the Python path in my .noderc. I am also able to SSH into my amazon server directly from the terminal. My mup file is:
module.exports = {
servers: {
one: {
host: '##.##.##.###',
username: 'ec2-user',
pem: '/Users/Olivia/.ssh/oz-pair.pem'
// password:
// or leave blank for authenticate from ssh-agent
}}meteor: {
name: 'CanDu',
path: '/Users/Olivia/repos/bene_candu_v2',
servers: {
one: {}
},
buildOptions: {
serverOnly: true,
mobileSettings: {
public: {
"astronomer": {
"appId": "<key>",
"disableUserTracking": false,
"disableRouteTracking": false,
"disableMethodTracking": false
},
"googleMaps": "<key>",
"facebook":{
"permissions":["email","public_profile","user_friends"]
}
},
},
},
env: {
ROOT_URL: 'http://ec2-##-##-##-###.us-west-2.compute.amazonaws.com',
MONGO_URL: 'mongodb://. . . "
},
/*ssl: {
crt: '/opt/keys/server.crt', // this is a bundle of certificates
key: '/opt/keys/server.key', // this is the private key of the certificate
port: 443, // 443 is the default value and it's the standard HTTPS port
upload: false
},*/
docker: {
image: 'abernix/meteord:base'
},
deployCheckWaitTime: 60
}
};
And I have checked to make sure there are no trailing commas, and have tried increasing the wait time. etc. The error message I'm getting is pretty unhelpful. Does anyone have any insight? Thank you so much!