NodeJS: Native c++ module with multi-threading (openmp) - c++

I have a very basic native module for NodeJS which essentially wraps a more complicated c++ program which includes multithreading using OpenMP (it's a Monte Carlo simulation). This c++ program works fine on its own and when called from python using a cython module to wrap the c++ code. However, when I compile it to a native node module (using /OpenMP as the parameter to the MSBuild compiler) it only uses one thread.
The following is my binding.gyp for reference:
{ "targets": [
{
"target_name": "LossForecast",
"sources": [ "NodeLossForecast.cpp", "src/AutoDiff.cpp" ],
"include_dirs":["src", "<!(node -e \"require('nan')\")"],
'conditions': [
['OS=="win"',
{
'cflags': ["/EHsc", "/O2", "/openmp"]
}
]
]
} ]}

So, I am writing down my files for running a simple node-addon-api pi program using OpenMP. Although node-addon-api is experimental, it will be soon released as an official api for node.js. Its very simple.
This is for Windows only. And I can confirm if it is running in multiple processors.
binding.gyp
{
"targets": [
{
"target_name": "nodeOpenMP",
"defines": [
"VERSION=0.0.1",
"NAPI_DISABLE_CPP_EXCEPTIONS"
],
"sources": [
"module.cc"
],
"include_dirs": [
"<!(echo %cd%/node_modules/node-addon-api)"
],
"conditions": [
[
'OS=="win"',
{
'msvs_settings': {
'VCCLCompilerTool' : {
'AdditionalOptions' : ['/MT','/openmp']
}
}
}
]
]
}
]
}
You have to add VCCLCompilerTool and then under AdditionalOptions add `/openmp' as mentioned above.
Here's a simple PI program that I wrote,
module.cc
#include <napi.h>
#include <omp.h>
#include <iostream>
#include <windows.h> // for GetCurrentProcessorNumber()
int getThreads(){
// Get number of threads
omp_set_num_threads(3);
int n = 0;
#pragma omp parallel reduction(+:n)
n += 1;
return n;
}
double GetPi(short numOfThreads,long numberOfSteps){
long i;
double pi, sum = 0.0;
double step = 1.0/(double) numberOfSteps;
omp_set_num_threads(numOfThreads);
#pragma omp parallel
{
std::cout << "This thread is running on processor: " << GetCurrentProcessorNumber() << "\n";
double x;
#pragma omp for reduction(+:sum)
for(i = 0; i < numberOfSteps; i++) {
x = ( i + 0.5 ) * step;
sum += 4.0 / (1 + x*x);
}
}
std::cout << "Total no. of threads (not processors)" <<getThreads() << std::endl;
pi = step * (double)sum;
return pi;
}
Napi::Value calculatePi(const Napi::CallbackInfo& info ){
Napi::Env env = info.Env();
// check for no. of arguments
if(info.Length()<2){
Napi::TypeError::New(env, "Wrong number of arguments").ThrowAsJavaScriptException();
return env.Null();
}
if (!info[0].IsNumber() || !info[1].IsNumber()) {
Napi::TypeError::New(env, "Wrong arguments").ThrowAsJavaScriptException();
return env.Null();
}
double numThreads = info[0].As<Napi::Number>().DoubleValue();
double numSteps = info[1].As<Napi::Number>().DoubleValue();
double pi = GetPi(numThreads, numSteps);
Napi::Number PI = Napi::Number::New(env, pi);
return PI;
}
Napi::Object Init(Napi::Env env, Napi::Object exports){
// register the functions that are to be exported
exports.Set(Napi::String::New(env, "pi"), Napi::Function::New(env, calculatePi));
return exports;
}
NODE_API_MODULE(nodeOpenMP, Init);
testOMP.js
const omp = require("./build/Release/nodeOpenMP");
const numThreads = 4, numSteps = 1000000;
console.log( numThreads, numSteps );
Simply copy paste the files above accordingly. Don't forget to install node-addon-api(locally) and node-gyp(globally) and then run:
node-gyp configure build && node --no-warnings testOMP.js
You should get an output like this:
This thread is running on processor: 3
This thread is running on processor: 3
This thread is running on processor: 0
This thread is running on processor: 3
Total no. of threads (not processors): 3
I am thinking of soon making a cross-platform npm package node-openmp.
Follow this repo and contribute. I am open to any contributions.
Himujjal/node-openmp

Related

Debugging CUDA kernels with VSCode

I am trying to debug a CUDA application on VSCode.
Configuration:
Ubuntu 20.04
VSCode 1.56.2
CUDA 11.3
gcc/g++ 9.3
For this, I have the following (up to date) extensions:
C/C++
Nsight Visual Studio Code Edition
I have no problem compiling/running my program. However when I'm debugging, I am able to use the debugger correctly in CPU side but not on GPU side. Indeed, when I'm trying to add a breakpoint anywhere in the kernel, running the program always moves my breakpoint to the closing bracket and I can't see the variables.
Here are the files:
add.cu
#include <iostream>
#include <math.h>
// Kernel function to add the elements of two arrays
__global__
void add(float *x, float *y)
{
y[blockIdx.x] = x[blockIdx.x] + y[blockIdx.x];
}
int main(void)
{
const int N = 1<<20;
float *x, *y;
// Allocate Unified Memory – accessible from CPU or GPU
cudaMallocManaged(&x, N*sizeof(float));
cudaMallocManaged(&y, N*sizeof(float));
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
// Run kernel on 1M elements on the GPU
add<<<N, 1>>>(x, y);
// Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(y[i]-3.0f));
std::cout << "Max error: " << maxError << std::endl;
// Free memory
cudaFree(x);
cudaFree(y);
return 0;
}
CMakeLists.txt
cmake_minimum_required(VERSION 3.10)
# set the project name
project(add CUDA)
# add the executable
add_executable(add add.cu)
.vscode/launch.json
{
// Use IntelliSense to learn about possible attributes.
// Hover to view descriptions of existing attributes.
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
"version": "0.2.0",
"configurations": [
{
"name": "CUDA C++: Launch",
"type": "cuda-gdb",
"request": "launch",
"program": "${workspaceFolder}/build/add"
}
]
}
I'm compiling this way:
mkdir build
cd build
cmake -DCMAKE_BUILD_TYPE=Debug ..
cmake --build .
Every program I am trying do the exact same thing on any kernel breakpoint.
What do I miss here?
I finally solved it by forcing -G flag when in debug mode in cmake, adding the following lines after add_executable:
if(CMAKE_BUILD_TYPE STREQUAL "Debug")
target_compile_options(add PRIVATE $<$<COMPILE_LANGUAGE:CUDA>:-G>)
endif()
With this, debugging on device works.
Thanks #RobertCrovella for the tip.

Can't compile STK (The Synthesis Toolkit) with node-gyp (C++)

I'm trying to make an audio software (a DAW) using Electron to create the window and c++ to play audio / generate audio / apply audio effects.
I have been searching for a simple, powerful, and cross-platform library to play and process audio, and I've found The Synthesis Toolkit and I'm really happy with it.
Here is the code (it's from the STK demo programs):
#include "BeeThree.h"
#include "RtAudio.h"
using namespace stk;
// The TickData structure holds all the class instances and data that
// are shared by the various processing functions.
struct TickData {
Instrmnt *instrument;
StkFloat frequency;
StkFloat scaler;
long counter;
bool done;
// Default constructor.
TickData()
: instrument(0), scaler(1.0), counter(0), done( false ) {}
};
// This tick() function handles sample computation only. It will be
// called automatically when the system needs a new buffer of audio
// samples.
int tick( void *outputBuffer, void *inputBuffer, unsigned int nBufferFrames,
double streamTime, RtAudioStreamStatus status, void *userData )
{
TickData *data = (TickData *) userData;
register StkFloat *samples = (StkFloat *) outputBuffer;
for ( unsigned int i=0; i<nBufferFrames; i++ ) {
*samples++ = data->instrument->tick();
if ( ++data->counter % 2000 == 0 ) {
data->scaler += 0.025;
data->instrument->setFrequency( data->frequency * data->scaler );
}
}
if ( data->counter > 80000 )
data->done = true;
return 0;
}
int main()
{
// Set the global sample rate and rawwave path before creating class instances.
Stk::setSampleRate( 44100.0 );
Stk::setRawwavePath("./engine/rawwaves/");
TickData data;
RtAudio dac;
// Figure out how many bytes in an StkFloat and setup the RtAudio stream.
RtAudio::StreamParameters parameters;
parameters.deviceId = dac.getDefaultOutputDevice();
parameters.nChannels = 1;
RtAudioFormat format = ( sizeof(StkFloat) == 8 ) ? RTAUDIO_FLOAT64 : RTAUDIO_FLOAT32;
unsigned int bufferFrames = RT_BUFFER_SIZE;
try {
dac.openStream( &parameters, NULL, format, (unsigned int)Stk::sampleRate(), &bufferFrames, &tick, (void *)&data );
}
catch ( RtAudioError& error ) {
error.printMessage();
goto cleanup;
}
try {
// Define and load the BeeThree instrument
data.instrument = new BeeThree();
}
catch ( StkError & ) {
goto cleanup;
}
data.frequency = 220.0;
data.instrument->noteOn( data.frequency, 0.5 );
try {
dac.startStream();
}
catch ( RtAudioError &error ) {
error.printMessage();
goto cleanup;
}
// Block waiting until callback signals done.
std::cin.get();
data.scaler = 0.025;
std::cin.get();
data.scaler = -1;
std::cin.get();
// Shut down the callback and output stream.
try {
dac.closeStream();
}
catch ( RtAudioError &error ) {
error.printMessage();
}
cleanup:
delete data.instrument;
return 0;
}
I managed to compile this simple demo program with g++, using this command:
g++ -D__LITTLE_ENDIAN__ -D__LINUX_ALSA__ ./engine/engine.cpp -o ./engine/engi -I./engine/include/ -L./engine/lib/ -lstk -lpthread -lasound -lm
But then I try to compile it into an engine.node file with node-gyp, I get this error:
paulux#Paulux-Laptop:~/Documents/Code/FyneWav$ node-gyp build
/usr/bin/ld : can't find -lstk
collect2: error: ld returned 1 exit status
Here's my binding.gyp file:
{
"targets": [
{
"target_name": "engine",
"sources": ["./engine/engine.cpp"],
"cflags_cc" :["-fexceptions"],
"include_dirs": [
"./engine/include/"
],
'link_settings': {
"libraries": [
"-lpthread", "-lasound" , "-lm",
"-L./engine/lib/", "-lstk"
],
},
"defines": [
"__LITTLE_ENDIAN__", "__LINUX_ALSA__"
]
}
]
}
My folders looks like this:
root
|- package-lock.json
|- package.json
|- README.md
|- binding.gyp
|- 10.1.4 (includes for v8 NodeJS addon)
|- engine
|- engine.cpp
|- include (all include files from *STK/include* archive)
|- lib
|- libstk.a (lib from the *STK/src/Release* archive)
I tried not to link stk in the binding.gyp file, but then I loaded the engine.node module in electron, I got:
Uncaught Error: /home/paulux/Documents/Code/FyneWav/build/Release/engine.node:
undefined symbol: _ZN3stk3Stk17sampleRateChangedEdd
So, the question is:
How can I link stk in node-gyp, and, if we can't, how can I make a engine.node file using other compilers (like g++ for example) ?
Finnaly ! I've found by myself !
The answer is really dumb: in my binding.gyp file, I just had to replace
-L./engine/include by
-L/home/paulux/Documents/Code/fynewav/engine/include.
I just had to change from a relative path to an absolute one...
And it took me a day to figure it out...
And I hate myself =)

How to use functions, written on c++, in node.js

I have a basic c++ file.
I have a node server. One of the functions there gets a number, makes calculation and returns another number. And I want to make this function work faster rewriting it on c++. So I want to be able to call the function written on c++ in a .cpp file from .js file.
When I write node index.js all .cpp files should be compiled, then functions from them should be "require();" in .js files and then I want to be able to use them in .js file calling as common functions: e.g. calc(number, param);. How do I do it?
I tried to read some articles about that and watched some videos on YouTube and made something, but when it starts I get a lor of errors like
gyp ERR! find Python Python is not set from command line or npm configuration
gyp ERR! find Python Python is not set from environment variable PYTHON
gyp ERR! find Python checking if "python" can be used
gyp ERR! find Python - "python" is not in PATH or produced an error
As I understand, it wants me to install python on my computer. But what for? I do not need python, I want to compile and execute c++ from node.js.
My implementation:
index.js
var testlib = require('./build/Release/testlib');
testlib.addThousandToNumber(20, function(err, res)
{
if (err)
console.error(err);
else
console.log(res);
});
package.json
"name": "testlib",
"version": "1.0.0",
"description": "",
"main": "run.js",
"scripts": {
"test": "echo \"Error: no test specified\" && exit 1",
"install": "node-gyp clean configure build"
},
"author": "",
"license": "ISC",
"dependencies": {
"nan": "^2.14.1",
"node-gyp": "^7.0.0"
}
}
binding.gyp
"targets": [{
"target_name": "testlib",
"sources": ["testlib.cpp", "testworker.cpp"],
"include_dirs": ["<!(node -e \"require('nan')\""]
}]
}
testlib.cpp
#include <nan.h>
#include "testworker.h"
NAN_METHOD(addThoudsandToNumber)
{
auto number = Nan::To<int>(info[0]).FromJust();
auto *callback = new Nan::Callback(info[1].As<v8::Function());
Nan::AsyncQueueWorker(new TestWorker(number, callback));
}
NAN_MODULE_INIT(init)
{
Nan::SetMethod(target, "addThousandTonumber", addThousandToNumber);
}
NODU_MODULE(testlib, init);
testworker.cpp
#include "testworker.h"
void TestWorker::Execute()
{
for (int i = 0; i < 1000; i++)
{
myNumber++;
}
}
void TestWorker::HandleOKCallback()
{
Nan::HandleScope scope;
auto NumberJS = Nan::New(myNumber);
v8::Local<v8::Value> argv[] = {Nan::Undefined(), numberJS};
myCallback->Call(2, argv);
}
testworker.h
#include <nan.h>
class TestWorker : public Nan::AsyncWorker
{
public:
TestWorker(int number, Nan::Callback * callback) :
Nan::AsyncWorker(callback), myNumber(number), myCallback(callback) { }
~TestWorker() { delete callback }
void Execute();
void HandleOKCallback();
private:
int myNumber;
Nan::Callback * myCallback;
}
May be this is what you are looking for: cmake-js
"CMake.js is a Node.js/io.js native addon build tool which works (almost) exactly like node-gyp, but instead of gyp, it is based on CMake build system"
If your primary goal is just to make some high-performance calculations in C++ with the web interface, you might be interested in compiling and linking your function directly into a HTTP engine, like in Node++:
https://github.com/rekmus/nodepp
void npp_app_main()
{
if ( REQ("") ) // landing
{
// ...
}
else if ( REQ("calc") ) // calculate
{
int number;
// get number from the query string:
if ( QSI("number", &number) )
{
int result = calc(number, param);
// return result to the client:
OUT("{\"result\":%d}", result);
RES_CONTENT_TYPE_JSON;
}
}
else
{
RES_STATUS(404);
}
}
Besides the calc time, the engine latency is around 5-20 µs on an average PC (Linux). The added benefit is that Node++ is fairly independent (requires only OS, GCC and OpenSSL if you want HTTPS).

NodeJS: node-gyp compile with the equivalent gcc -lm option

I'm new in developing nodejs addons in C++.
I have a static c lib that I include in my addon, the lib is needed to talk to a custom hardware, but some functions of this library uses math.h. So when I compile in a C example I do: gcc main.c libmackrnp6_0_2_fPIC.a -o test -lm, no problem there, but when I include this lib in the cpp addon I have a problem with the pow function.
How can I compile the addon sudo node-gyp configure build with the -lm option (gcc equivalent)?
binding.gyp
{
"targets": [
{
"target_name": "octotuner",
"sources": [
"params.cpp",
"headerv6.h"
],
"libraries": ["/home/nvidia/webrf/api/libmackrnp6_0_2_fPIC.a"],
"link_settings": {
"libraries": [
"-lm"
]
},
}
]
}
params.cpp
#include <node.h>
#include <iostream>
#include <stdio.h>
extern "C" {
#include "headerV6.h"
}
using namespace v8;
using namespace std;
void getParams(const FunctionCallbackInfo<Value>& args) {
Isolate* isolate = args.GetIsolate();
int status = 0, numplaca = 0;
unsigned char json[9999] = ""; //<-- the lib function needs this (I know)
unsigned char flagTuner = 0;
int timeOut = 20; //segundos
parametros(json, flagTuner, numplaca, timeOut, &status); // <-- this uses pow
std::string sJson(reinterpret_cast<char*>(json));
Local<String> retval = String::NewFromUtf8(isolate, sJson.c_str());
args.GetReturnValue().Set(retval);
}
void Initialize(Local<Object> exports) {
NODE_SET_METHOD(exports, "getRF", getParams);
}
NODE_MODULE(octotuner, Initialize);
teste.js
const rf = require('./build/Release/octotuner');
console.log(rf.getRF())
Testing
sudo node teste.js
Output:
{
"Numero_da_Placa":0,
"Comando_OK":1,
"Tuner0":{
"Canal_Fisico":25,
"QUALIDADE_LA":"-",
"QUALIDADE_LB":"-",
"QUALIDADE_LC":"-",
"BER_LA":"-",
"BER_LB":"-",
"BER_LC":"-",
"Potencia":-1.95,
"SNR":3.9,
"Modo":"8K",
"Intervalo_de_Guarda":"1/8",
"Modulacao_LA":"QPSK",
"Taxa_do_Codigo_LA":"2/3",
"Entrelacamento_LA":400,
"Segmentos_LA":1,
"Modulacao_LB":"64-QAM",
"Taxa_do_Codigo_LB":"3/4",
"Entrelacamento_LB":200,
"Segmentos_LB":3,
"Modulacao_LC":"Error",
"Taxa_do_Codigo_LC":"Error",
"Entrelacamento_LC":"Error",
"Segmentos_LC":"Error"
}
}
C program that uses the same library (not in addon, just for testing):
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include "headerV6.h"
int main(int argc, char *argv[]){
int i = 0, status = 0, numplaca = 0;
char json[9999] = {0};
unsigned char flagTuner = 0;
int timeOut = 20; //segundos
parametros(json, flagTuner, numplaca, timeOut, &status);
printf("%s", json);
return 0;
}
Compiled with: gcc params.c libmackrnp6_0_2_fPIC.a -o teste
Compile FAIL (that's why I think the problem with the addon is that it's not linking math lib)
(metadados.o): na função `recoverDataNum':
metadados.c:(.text+0x138): referência indefinida para `pow'
metadados.c:(.text+0x1d8): referência indefinida para `pow'
collect2: error: ld returned 1 exit status
Compiled with: gcc params.c libmackrnp6_0_2_fPIC.a -o teste -lm
Compile OK
Testing:
sudo ./teste
{
"Numero_da_Placa":0,
"Comando_OK":1,
"Tuner0":{
"Canal_Fisico":25,
"QUALIDADE_LA":100.0,
"QUALIDADE_LB":100.0,
"QUALIDADE_LC":"-",
"BER_LA":0.0000e+00,
"BER_LB":0.0000e+00,
"BER_LC":"-",
"Potencia":-19.50,
"SNR":37.9,
"Modo":"8K",
"Intervalo_de_Guarda":"1/8",
"Modulacao_LA":"QPSK",
"Taxa_do_Codigo_LA":"2/3",
"Entrelacamento_LA":400,
"Segmentos_LA":1,
"Modulacao_LB":"64-QAM",
"Taxa_do_Codigo_LB":"3/4",
"Entrelacamento_LB":200,
"Segmentos_LB":12,
"Modulacao_LC":"-",
"Taxa_do_Codigo_LC":"-",
"Entrelacamento_LC":"-",
"Segmentos_LC":"-"
}
}
Solved
I've used the following binding.gyp and I've got a new version of the thirdy part library and it's working now! Thanks.
{
"targets": [
{
"target_name": "octotuner",
"sources": [
"params.cpp"
],
"include_dirs": [
"./so"
],
"link_settings": {
"libraries": ["/home/nvidia/webrf/api/so/libmackrnp6_0_2_fPIC.a","-lm"],
}
}
]
}

Pass data from OpenCV C++ to NodeJS/JS | Electron

I am trying to do a POC for Video Processing application with the following stack and struck with passing processed media stream from c++ application to Electron Front end GUI.
Electron
|
Nodejs
|
C++ Application
C++ Application will read the IP/Webcam(using OpenCV only to fetch data) and process the input stream(not with OpenCV). I am trying to figure out a way to send that stream from C++ to Electron GUI(NodeJS/JS) with good fps. Right now I compiled my C++ app using node-gyp and installed it as node package.
Also, I don't want to change my C++ Application too much( like including OpenCV as node package), because later I will use that C++ Application alone for integrating with another application.
The Challenge:
We want to execute our heavy lifting code in a separate worker thread while also sending results (stream data chunks) back to the main thread during execution.
NAN (Native Abstractions for Node.js) already provides an approach to do this with (AsyncProgressWorker).
However, we can not know if the HandleProgressCallback is actually invoked during the execution to send back our results. This can happen when our run time is simply to fast and therefore the callback is never executed.
Proposed Solution:
We simply collect our stream output in a stack (StackCollect). We attempt to clear this stack immediately and send the stream results back to the main thread (if possible) - (StackDrain). If we don't have the time to clear the stack immediately we drain (whats left) at the end of the execution run (HandleOKCallback).
Implementation Example:
demo.cpp (our C++ node/electron addon):
#include <nan.h>
#include <node.h>
#include <v8.h>
#include <iostream>
#include <string>
#include <vector>
#include <mutex>
#include <chrono>
#include <thread>
class vSync_File : public Nan::AsyncProgressWorker {
public:
~vSync_File();
vSync_File(Nan::Callback * result, Nan::Callback * chunk);
void Execute(const Nan::AsyncProgressWorker::ExecutionProgress& chunk);
void HandleOKCallback();
void HandleProgressCallback(const char *tout, size_t tout_size);
//needed for stream data collection
void StackCollect(std::string & str_chunk, const Nan::AsyncProgressWorker::ExecutionProgress& tchunk);
//drain stack
void StackDrain();
private:
Nan::Callback * chunk;
//stores stream data - use other data types for different output
std::vector<std::string> stack;
//mutex
std::mutex m;
};
vSync_File::vSync_File(Nan::Callback * result, Nan::Callback * chunk)
: Nan::AsyncProgressWorker(result), chunk(chunk) {}
vSync_File::~vSync_File() {
delete chunk;
}
void vSync_File::StackCollect(std::string & str_chunk, const Nan::AsyncProgressWorker::ExecutionProgress& tchunk) {
std::lock_guard<std::mutex> guardme(m);
stack.push_back(str_chunk);
//attempt drain
std::string dummy = "NA";
tchunk.Send(dummy.c_str(), dummy.length());
}
//Dump out stream data
void vSync_File::StackDrain() {
std::lock_guard<std::mutex> guardme(m);
for (uint i = 0; i < stack.size(); i++) {
std::string th_chunk = stack[i];
v8::Local<v8::String> chk = Nan::New<v8::String>(th_chunk).ToLocalChecked();
v8::Local<v8::Value> argv[] = { chk };
chunk->Call(1, argv, this->async_resource);
}
stack.clear();
}
//Our main job in a nice worker thread
void vSync_File::Execute(const Nan::AsyncProgressWorker::ExecutionProgress& tchunk) {
//simulate some stream output
for (unsigned int i = 0; i < 20; i++) {
std::string out_chunk;
out_chunk = "Simulated stream data " + std::to_string(i);
std::this_thread::sleep_for(std::chrono::milliseconds(300)); //so our HandleProgressCallback is invoked, otherwise we are too fast in our example here
this->StackCollect(out_chunk, tchunk);
}
}
//Back at the main thread - if we have time stream back the output
void vSync_File::HandleProgressCallback(const char *tout, size_t tout_size) {
Nan::HandleScope scope;
this->StackDrain();
}
//Back at the main thread - we are done
void vSync_File::HandleOKCallback () {
this->StackDrain(); //drain leftovers from stream stack
v8::Local<v8::String> result_mess = Nan::New<v8::String>("done reading").ToLocalChecked();
v8::Local<v8::Value> argv[] = { result_mess };
callback->Call(1, argv, this->async_resource);
}
NAN_METHOD(get_stream_data) {
Nan::Callback *result = new Nan::Callback(info[0].As<v8::Function>());
Nan::Callback *chunk = new Nan::Callback(info[1].As<v8::Function>());
AsyncQueueWorker(new vSync_File(result, chunk));
}
NAN_MODULE_INIT(Init) {
//we want stream data
Nan::Set(target, Nan::New<v8::String>("get_stream_data").ToLocalChecked(),
Nan::GetFunction(Nan::New<v8::FunctionTemplate>(get_stream_data)).ToLocalChecked());
}
NODE_MODULE(stream_c_electron, Init)
index.js (electron implementation example):
const stream_c_electron = require('./build/linux_x64/stream_c_electron.node');
stream_c_electron.get_stream_data(function(res) {
//we are done
console.log(res);
}, function(chk) {
console.log("a line streamed");
console.log(chk);
});
package.json:
{
"name": "stream_c_electron",
"version": "1.0.0",
"description": "stream from c++ node addon demo",
"main": "index.js",
"scripts": {
"start": "electron .",
"build_this": "HOME=~/.electron-gyp node-gyp rebuild --target=2.0.8 --arch=x64 --dist-url=https://atom.io/download/electron",
"test": "echo \"Error: no test specified\" && exit 1"
},
"author": "11AND2",
"license": "MIT",
"dependencies": {
"nan": "2.11.0"
},
"devDependencies": {
"electron": "2.0.8"
}
}
binding.gyp:
{
"targets": [
{
"target_name": "stream_c_electron",
"sources": [ "c_src/demo.cpp" ],
"conditions": [
[
'OS=="linux"',
{
"cflags": ["-Wall", "-std=c++11"],
'product_dir' : 'linux_x64',
"include_dirs": [
"<!(node -e \"require('nan')\")"
]
}
]
]
}
]
}
You have to compile your c++ stuff as a static library with emscripten and load it in via import MyLib from "./MyLib"; or with require and run with node --experimental-modules --napi-modules main.mjs. Basically the idea is that the V8 engine is able to read your native code. It's also incredibly fast compared to pure javascript code.
It's actually pretty easy when you know what to do. Have a look to this sample code. It basically uses native c++ libpng library for javascript. The only tricky thing is actually interfacing c++ with javascript.
https://github.com/skanti/png-decoder-javascript/tree/devel