Adonis: How to connect with SSL (with a certificate.crt file) to MySQL? - adonis.js

I just created a Managed Database on Digital Ocean that requires an SSL connection. How can I do that with Adonis?

This code works for me:
/**************************************************************************
* IMPORTS
***************************************************************************/
// NPM
const fs = require('fs')
// Providers
const Env = use('Env')
/**************************************************************************
* CONFIG > DATABASE
***************************************************************************/
const config = {
connection: Env.get('DB_CONNECTION', 'mysql'),
mysql: {
client: 'mysql',
connection: {
host: Env.get('DB_HOST', 'localhost'),
port: Env.get('DB_PORT', ''),
user: Env.get('DB_USER', 'root'),
password: Env.get('DB_PASSWORD', ''),
database: Env.get('DB_DATABASE', 'adonis'),
},
debug: Env.get('DB_DEBUG', false),
},
}
// Add certificate for production environment
if (Env.get('NODE_ENV') === 'production') {
config.mysql.connection.ssl = {
ca: fs.readFileSync(__dirname + '/certs/ca-database.crt'),
}
}
module.exports = config

Related

Error: Expected private key to be an Uint8Array with length 32

Following the guide from https://ethereum.org/vi/developers/tutorials/hello-world-smart-contract/
I am getting this error when trying to run my deploy script. I am absolutely lost as to why this is not working as I have copied every piece of code directly from the guide.
My hardhat.config.js
require('dotenv').config();
require("#nomiclabs/hardhat-ethers");
const { API_URL, PRIVATE_KEY } = process.env;
/**
* #type import('hardhat/config').HardhatUserConfig
*/
module.exports = {
solidity: "0.7.3",
defaultNetwork: "ropsten",
networks: {
hardhat: {},
ropsten: {
url: API_URL,
accounts: [`0x${PRIVATE_KEY}`]
}
},
}
My deploy.js
async function main() {
const HelloWorld = await ethers.getContractFactory("HelloWorld");
// Start deployment, returning a promise that resolves to a contract object
const hello_world = await HelloWorld.deploy("Hello World!");
console.log("Contract deployed to address:", hello_world.address);}
main()
.then(() => process.exit(0))
.catch(error => {
console.error(error);
process.exit(1);
});
my .env
API_URL = "https://eth-ropsten.alchemyapi.io/v2/[REDACTED]"
PRIVATE_KEY = "[REDACTED]". // my private key goes here, not including the 0x
It compiles fine but gives me the error when I use the command
npx hardhat run scripts/deploy.js --network ropsten
You don't need the 0x in the private key, just put the exact key you got from metamask :)
https://github.com/ethereumjs/ethereumjs-tx
As per usage example we need to add chain name while creating Transaction.
const tx = new Tx(txObject , { chain: 'rinkeby' })

How to setup a Sockect.io server in aws EC2 and then connect remotely to the server?

Good morning.
I created a simple server for a chat using Socket.io. In the next screen, which would be the client interface, you can see some inputs
In localhost, in my computer works fine. Here are the logs messages from the server:
I uploaded the server to an EC2 instance with ubuntu. And it work fine, in the same way
Logs from the server in EC2
The main problem is when I tried to connect from my computer to the Socket.io server in EC2. I got this error from my client side file in my computer:
socket.io.js:3888 GET
https://mydomain:80/socket.io/?EIO=4&transport=polling&t=NQB59WG
Here is my server code:
const path = require("path");
const express = require("express");
const socketio = require("socket.io");
const app = express();
app.set('port', 3000);
//static files
app.use(express.static(path.join(__dirname, "public")));
//start the server
const server = app.listen(app.get('port'), () => {
console.log("server on port", app.get('port'));
});
const io = socketio(server);
//websockects
io.on('connection', (socket) => {
console.log("new connection " + socket.id);
socket.on('chat:message', (data) => {
console.log(data);
io.sockets.emit("chat:server", data);
});
socket.on('chat:typing', (data) => {
console.log(data);
socket.broadcast.emit("chat:typing", data);
});
});
Here is my client code:
const socket = io("https://domain:80");
//dom elements
let message = document.getElementById("message");
let username = document.getElementById("username");
let btn = document.getElementById("button");
let output = document.getElementById("output");
let actions = document.getElementById("actions");
btn.addEventListener('click', function() {
console.log({
username: username.value,
message: message.value
});
socket.emit("chat:message", {
username: username.value,
message: message.value
});
});
message.addEventListener('keypress', function() {
socket.emit("chat:typing", username.value);
});
socket.on("chat:server", function(data) {
console.log(data);
actions.innerHTML = "";
output.innerHTML += "<p><strong>" + data.username + "</strong>: " + data.message + " </p>";
});
socket.on("chat:typing", function(data) {
console.log(data);
actions.innerHTML = "<p><strong>" + data + "</strong> esta escribiendo </p>";
});
And here are the instance inboud rules for ports:
When i tried to connect from my computer to the EC2 instance, I tried with several ways to connect, like this:
const socket = io("https://url.org:80");
const socket = io("https://url.org");
const socket = io("https://ipaddres:80");
const socket = io("https://ipaddres");
const socket = io("ec2-xxxx.compute-1.amazonaws.com");
const socket = io("ec2-xxxx.compute-1.amazonaws.com:80");
Nothing works, Any help?
In my case the solution was to do two things:
I use my domaind without port
const socket = io("https://url.org");
The EC2 Instance work with nginx, I modified the header to add this property:
Access-Control-Allow-Origin: *
And it worked

How to dynamically change Apollo Web Socket Link URI?

Currently I've set up Apollo's web socket link like so:
const wsLink = new WebSocketLink({
uri: `ws://example.com/graphql?token=${getToken()}`,
options: {
reconnect: true,
connectionParams(): ConnectionParams {
return {
authToken: getToken(),
};
},
},
});
This works fine while the connection lasts, but fails when the connection needs to be re-established if the token in the query string has expired.
The way the infra I'm dealing with is set up requires this token to be set as a query param in the URI. How can I dynamically change the URI so that I may provide a new token when the connection needs to be re-established?
You can set property wsLink.subscriptionClient.url manually (or create a new subscriptionClient instance?) in function setContext https://www.apollographql.com/docs/link/links/context/.
For example:
import { setContext } from 'apollo-link-context'
...
const wsLink = your code...
const authLink = setContext(() => {
wsLink.subscriptionClient.url = `ws://example.com/graphql?token=${getToken()}`
})
...
const config = {
link: ApolloLink.from([
authLink,
wsLink
]),
...
}

Cherrypy configuration for cherrypy.quickstart

This is configuration for /projects endpoint. What modification do i have to add for adding an endpoint "/projects/all" at the same port.
conf = {
'global':{
'server.socket_host': "127.0.0.1",
'server.socket_port': config.SERVER_PORT
},
'/projects': {
'request.dispatch': cherrypy.dispatch.MethodDispatcher(),
'tools.response_headers.on': True,
'tools.response_headers.headers': [('Content-Type', 'application/json')],
}
}
webapp = ApiApp()
webapp.projects = ProjectsApi()
cherrypy.quickstart(webapp, '/', conf)

Rewrite cookie paths when using grunt-connect-proxy

During development I use grunt-connect-proxy to make some remote APIs locally available. This works just fine, except that the rewrite rules that I use are not applied to cookies:
proxies: [{
context: '/publicPath',
host: 'localhost',
port: 8080,
rewrite: {
'^/publicPath': '/privatePath'
}
}]
If the remote API sets a cookie with path /privatePath it must be rewritten to /publicPath.
E.g. When using Apache httpd I'd use the ProxyPassReverseCookiePath-Directive. How do I do it with grunt-connect-proxy?
Thanks to this answer in "Rewrite response headers with node-http-proxy" I managed to figure it out. I created the following middleware:
function rewriteSetCookie(req, res, next) {
var isProxyRequest = req.url.lastIndexOf('/publicPath', 0) === 0;
if (isProxyRequest) {
// we intercept the writeHead function, so that we can exchange headers just before they are written
var oldWriteHead = res.writeHead;
res.writeHead = function () {
var cookie = res.getHeader('Set-Cookie');
if (cookie) {
res.setHeader('Set-Cookie', cookie.map(function(item) {
// Replace paths in all cookies. The simple string/replace approach might be too naive in some cases, so check before you copy&paste before thinking
return item.replace(/\/privatePath/, '/publicPath');
}));
}
oldWriteHead.apply(res, arguments);
};
}
next();
}
Just for reference here is the full configuration so that you can see how to use the middleware:
connect: {
server: {
options: {
hostname: 'localhost',
base: 'myBaseDir',
open: true,
middleware: function (connect, options) {
if (!Array.isArray(options.base)) {
options.base = [options.base];
}
// Setup the proxy
var middlewares = [rewriteSetCookie, proxySnippet];
// ^^^^^^^^^^^^^^^^- Here is is used!
// Serve static files.
options.base.forEach(function(base) {
middlewares.push(connect.static(base));
});
// Make directory browse-able.
var directory = options.directory || options.base[options.base.length - 1];
middlewares.push(connect.directory(directory));
return middlewares;
}
},
proxies: [{
context: '/publicPath',
host: 'localhost',
port: 8080,
rewrite: {
'^/publicPath': '/privatePath'
}
}]
}
}