I am trying to do a POC for Video Processing application with the following stack and struck with passing processed media stream from c++ application to Electron Front end GUI.
Electron
|
Nodejs
|
C++ Application
C++ Application will read the IP/Webcam(using OpenCV only to fetch data) and process the input stream(not with OpenCV). I am trying to figure out a way to send that stream from C++ to Electron GUI(NodeJS/JS) with good fps. Right now I compiled my C++ app using node-gyp and installed it as node package.
Also, I don't want to change my C++ Application too much( like including OpenCV as node package), because later I will use that C++ Application alone for integrating with another application.
The Challenge:
We want to execute our heavy lifting code in a separate worker thread while also sending results (stream data chunks) back to the main thread during execution.
NAN (Native Abstractions for Node.js) already provides an approach to do this with (AsyncProgressWorker).
However, we can not know if the HandleProgressCallback is actually invoked during the execution to send back our results. This can happen when our run time is simply to fast and therefore the callback is never executed.
Proposed Solution:
We simply collect our stream output in a stack (StackCollect). We attempt to clear this stack immediately and send the stream results back to the main thread (if possible) - (StackDrain). If we don't have the time to clear the stack immediately we drain (whats left) at the end of the execution run (HandleOKCallback).
Implementation Example:
demo.cpp (our C++ node/electron addon):
#include <nan.h>
#include <node.h>
#include <v8.h>
#include <iostream>
#include <string>
#include <vector>
#include <mutex>
#include <chrono>
#include <thread>
class vSync_File : public Nan::AsyncProgressWorker {
public:
~vSync_File();
vSync_File(Nan::Callback * result, Nan::Callback * chunk);
void Execute(const Nan::AsyncProgressWorker::ExecutionProgress& chunk);
void HandleOKCallback();
void HandleProgressCallback(const char *tout, size_t tout_size);
//needed for stream data collection
void StackCollect(std::string & str_chunk, const Nan::AsyncProgressWorker::ExecutionProgress& tchunk);
//drain stack
void StackDrain();
private:
Nan::Callback * chunk;
//stores stream data - use other data types for different output
std::vector<std::string> stack;
//mutex
std::mutex m;
};
vSync_File::vSync_File(Nan::Callback * result, Nan::Callback * chunk)
: Nan::AsyncProgressWorker(result), chunk(chunk) {}
vSync_File::~vSync_File() {
delete chunk;
}
void vSync_File::StackCollect(std::string & str_chunk, const Nan::AsyncProgressWorker::ExecutionProgress& tchunk) {
std::lock_guard<std::mutex> guardme(m);
stack.push_back(str_chunk);
//attempt drain
std::string dummy = "NA";
tchunk.Send(dummy.c_str(), dummy.length());
}
//Dump out stream data
void vSync_File::StackDrain() {
std::lock_guard<std::mutex> guardme(m);
for (uint i = 0; i < stack.size(); i++) {
std::string th_chunk = stack[i];
v8::Local<v8::String> chk = Nan::New<v8::String>(th_chunk).ToLocalChecked();
v8::Local<v8::Value> argv[] = { chk };
chunk->Call(1, argv, this->async_resource);
}
stack.clear();
}
//Our main job in a nice worker thread
void vSync_File::Execute(const Nan::AsyncProgressWorker::ExecutionProgress& tchunk) {
//simulate some stream output
for (unsigned int i = 0; i < 20; i++) {
std::string out_chunk;
out_chunk = "Simulated stream data " + std::to_string(i);
std::this_thread::sleep_for(std::chrono::milliseconds(300)); //so our HandleProgressCallback is invoked, otherwise we are too fast in our example here
this->StackCollect(out_chunk, tchunk);
}
}
//Back at the main thread - if we have time stream back the output
void vSync_File::HandleProgressCallback(const char *tout, size_t tout_size) {
Nan::HandleScope scope;
this->StackDrain();
}
//Back at the main thread - we are done
void vSync_File::HandleOKCallback () {
this->StackDrain(); //drain leftovers from stream stack
v8::Local<v8::String> result_mess = Nan::New<v8::String>("done reading").ToLocalChecked();
v8::Local<v8::Value> argv[] = { result_mess };
callback->Call(1, argv, this->async_resource);
}
NAN_METHOD(get_stream_data) {
Nan::Callback *result = new Nan::Callback(info[0].As<v8::Function>());
Nan::Callback *chunk = new Nan::Callback(info[1].As<v8::Function>());
AsyncQueueWorker(new vSync_File(result, chunk));
}
NAN_MODULE_INIT(Init) {
//we want stream data
Nan::Set(target, Nan::New<v8::String>("get_stream_data").ToLocalChecked(),
Nan::GetFunction(Nan::New<v8::FunctionTemplate>(get_stream_data)).ToLocalChecked());
}
NODE_MODULE(stream_c_electron, Init)
index.js (electron implementation example):
const stream_c_electron = require('./build/linux_x64/stream_c_electron.node');
stream_c_electron.get_stream_data(function(res) {
//we are done
console.log(res);
}, function(chk) {
console.log("a line streamed");
console.log(chk);
});
package.json:
{
"name": "stream_c_electron",
"version": "1.0.0",
"description": "stream from c++ node addon demo",
"main": "index.js",
"scripts": {
"start": "electron .",
"build_this": "HOME=~/.electron-gyp node-gyp rebuild --target=2.0.8 --arch=x64 --dist-url=https://atom.io/download/electron",
"test": "echo \"Error: no test specified\" && exit 1"
},
"author": "11AND2",
"license": "MIT",
"dependencies": {
"nan": "2.11.0"
},
"devDependencies": {
"electron": "2.0.8"
}
}
binding.gyp:
{
"targets": [
{
"target_name": "stream_c_electron",
"sources": [ "c_src/demo.cpp" ],
"conditions": [
[
'OS=="linux"',
{
"cflags": ["-Wall", "-std=c++11"],
'product_dir' : 'linux_x64',
"include_dirs": [
"<!(node -e \"require('nan')\")"
]
}
]
]
}
]
}
You have to compile your c++ stuff as a static library with emscripten and load it in via import MyLib from "./MyLib"; or with require and run with node --experimental-modules --napi-modules main.mjs. Basically the idea is that the V8 engine is able to read your native code. It's also incredibly fast compared to pure javascript code.
It's actually pretty easy when you know what to do. Have a look to this sample code. It basically uses native c++ libpng library for javascript. The only tricky thing is actually interfacing c++ with javascript.
https://github.com/skanti/png-decoder-javascript/tree/devel
Related
I'm trying to make an audio software (a DAW) using Electron to create the window and c++ to play audio / generate audio / apply audio effects.
I have been searching for a simple, powerful, and cross-platform library to play and process audio, and I've found The Synthesis Toolkit and I'm really happy with it.
Here is the code (it's from the STK demo programs):
#include "BeeThree.h"
#include "RtAudio.h"
using namespace stk;
// The TickData structure holds all the class instances and data that
// are shared by the various processing functions.
struct TickData {
Instrmnt *instrument;
StkFloat frequency;
StkFloat scaler;
long counter;
bool done;
// Default constructor.
TickData()
: instrument(0), scaler(1.0), counter(0), done( false ) {}
};
// This tick() function handles sample computation only. It will be
// called automatically when the system needs a new buffer of audio
// samples.
int tick( void *outputBuffer, void *inputBuffer, unsigned int nBufferFrames,
double streamTime, RtAudioStreamStatus status, void *userData )
{
TickData *data = (TickData *) userData;
register StkFloat *samples = (StkFloat *) outputBuffer;
for ( unsigned int i=0; i<nBufferFrames; i++ ) {
*samples++ = data->instrument->tick();
if ( ++data->counter % 2000 == 0 ) {
data->scaler += 0.025;
data->instrument->setFrequency( data->frequency * data->scaler );
}
}
if ( data->counter > 80000 )
data->done = true;
return 0;
}
int main()
{
// Set the global sample rate and rawwave path before creating class instances.
Stk::setSampleRate( 44100.0 );
Stk::setRawwavePath("./engine/rawwaves/");
TickData data;
RtAudio dac;
// Figure out how many bytes in an StkFloat and setup the RtAudio stream.
RtAudio::StreamParameters parameters;
parameters.deviceId = dac.getDefaultOutputDevice();
parameters.nChannels = 1;
RtAudioFormat format = ( sizeof(StkFloat) == 8 ) ? RTAUDIO_FLOAT64 : RTAUDIO_FLOAT32;
unsigned int bufferFrames = RT_BUFFER_SIZE;
try {
dac.openStream( ¶meters, NULL, format, (unsigned int)Stk::sampleRate(), &bufferFrames, &tick, (void *)&data );
}
catch ( RtAudioError& error ) {
error.printMessage();
goto cleanup;
}
try {
// Define and load the BeeThree instrument
data.instrument = new BeeThree();
}
catch ( StkError & ) {
goto cleanup;
}
data.frequency = 220.0;
data.instrument->noteOn( data.frequency, 0.5 );
try {
dac.startStream();
}
catch ( RtAudioError &error ) {
error.printMessage();
goto cleanup;
}
// Block waiting until callback signals done.
std::cin.get();
data.scaler = 0.025;
std::cin.get();
data.scaler = -1;
std::cin.get();
// Shut down the callback and output stream.
try {
dac.closeStream();
}
catch ( RtAudioError &error ) {
error.printMessage();
}
cleanup:
delete data.instrument;
return 0;
}
I managed to compile this simple demo program with g++, using this command:
g++ -D__LITTLE_ENDIAN__ -D__LINUX_ALSA__ ./engine/engine.cpp -o ./engine/engi -I./engine/include/ -L./engine/lib/ -lstk -lpthread -lasound -lm
But then I try to compile it into an engine.node file with node-gyp, I get this error:
paulux#Paulux-Laptop:~/Documents/Code/FyneWav$ node-gyp build
/usr/bin/ld : can't find -lstk
collect2: error: ld returned 1 exit status
Here's my binding.gyp file:
{
"targets": [
{
"target_name": "engine",
"sources": ["./engine/engine.cpp"],
"cflags_cc" :["-fexceptions"],
"include_dirs": [
"./engine/include/"
],
'link_settings': {
"libraries": [
"-lpthread", "-lasound" , "-lm",
"-L./engine/lib/", "-lstk"
],
},
"defines": [
"__LITTLE_ENDIAN__", "__LINUX_ALSA__"
]
}
]
}
My folders looks like this:
root
|- package-lock.json
|- package.json
|- README.md
|- binding.gyp
|- 10.1.4 (includes for v8 NodeJS addon)
|- engine
|- engine.cpp
|- include (all include files from *STK/include* archive)
|- lib
|- libstk.a (lib from the *STK/src/Release* archive)
I tried not to link stk in the binding.gyp file, but then I loaded the engine.node module in electron, I got:
Uncaught Error: /home/paulux/Documents/Code/FyneWav/build/Release/engine.node:
undefined symbol: _ZN3stk3Stk17sampleRateChangedEdd
So, the question is:
How can I link stk in node-gyp, and, if we can't, how can I make a engine.node file using other compilers (like g++ for example) ?
Finnaly ! I've found by myself !
The answer is really dumb: in my binding.gyp file, I just had to replace
-L./engine/include by
-L/home/paulux/Documents/Code/fynewav/engine/include.
I just had to change from a relative path to an absolute one...
And it took me a day to figure it out...
And I hate myself =)
I have a basic c++ file.
I have a node server. One of the functions there gets a number, makes calculation and returns another number. And I want to make this function work faster rewriting it on c++. So I want to be able to call the function written on c++ in a .cpp file from .js file.
When I write node index.js all .cpp files should be compiled, then functions from them should be "require();" in .js files and then I want to be able to use them in .js file calling as common functions: e.g. calc(number, param);. How do I do it?
I tried to read some articles about that and watched some videos on YouTube and made something, but when it starts I get a lor of errors like
gyp ERR! find Python Python is not set from command line or npm configuration
gyp ERR! find Python Python is not set from environment variable PYTHON
gyp ERR! find Python checking if "python" can be used
gyp ERR! find Python - "python" is not in PATH or produced an error
As I understand, it wants me to install python on my computer. But what for? I do not need python, I want to compile and execute c++ from node.js.
My implementation:
index.js
var testlib = require('./build/Release/testlib');
testlib.addThousandToNumber(20, function(err, res)
{
if (err)
console.error(err);
else
console.log(res);
});
package.json
"name": "testlib",
"version": "1.0.0",
"description": "",
"main": "run.js",
"scripts": {
"test": "echo \"Error: no test specified\" && exit 1",
"install": "node-gyp clean configure build"
},
"author": "",
"license": "ISC",
"dependencies": {
"nan": "^2.14.1",
"node-gyp": "^7.0.0"
}
}
binding.gyp
"targets": [{
"target_name": "testlib",
"sources": ["testlib.cpp", "testworker.cpp"],
"include_dirs": ["<!(node -e \"require('nan')\""]
}]
}
testlib.cpp
#include <nan.h>
#include "testworker.h"
NAN_METHOD(addThoudsandToNumber)
{
auto number = Nan::To<int>(info[0]).FromJust();
auto *callback = new Nan::Callback(info[1].As<v8::Function());
Nan::AsyncQueueWorker(new TestWorker(number, callback));
}
NAN_MODULE_INIT(init)
{
Nan::SetMethod(target, "addThousandTonumber", addThousandToNumber);
}
NODU_MODULE(testlib, init);
testworker.cpp
#include "testworker.h"
void TestWorker::Execute()
{
for (int i = 0; i < 1000; i++)
{
myNumber++;
}
}
void TestWorker::HandleOKCallback()
{
Nan::HandleScope scope;
auto NumberJS = Nan::New(myNumber);
v8::Local<v8::Value> argv[] = {Nan::Undefined(), numberJS};
myCallback->Call(2, argv);
}
testworker.h
#include <nan.h>
class TestWorker : public Nan::AsyncWorker
{
public:
TestWorker(int number, Nan::Callback * callback) :
Nan::AsyncWorker(callback), myNumber(number), myCallback(callback) { }
~TestWorker() { delete callback }
void Execute();
void HandleOKCallback();
private:
int myNumber;
Nan::Callback * myCallback;
}
May be this is what you are looking for: cmake-js
"CMake.js is a Node.js/io.js native addon build tool which works (almost) exactly like node-gyp, but instead of gyp, it is based on CMake build system"
If your primary goal is just to make some high-performance calculations in C++ with the web interface, you might be interested in compiling and linking your function directly into a HTTP engine, like in Node++:
https://github.com/rekmus/nodepp
void npp_app_main()
{
if ( REQ("") ) // landing
{
// ...
}
else if ( REQ("calc") ) // calculate
{
int number;
// get number from the query string:
if ( QSI("number", &number) )
{
int result = calc(number, param);
// return result to the client:
OUT("{\"result\":%d}", result);
RES_CONTENT_TYPE_JSON;
}
}
else
{
RES_STATUS(404);
}
}
Besides the calc time, the engine latency is around 5-20 µs on an average PC (Linux). The added benefit is that Node++ is fairly independent (requires only OS, GCC and OpenSSL if you want HTTPS).
So lately I've been getting into OpenCV with C++.
I've built up a few libraries and apps that I would like to export over to Nodejs, but I can't figure it out for the life of me.
I tried to check out how he did it in this repo below, but it was a lot to take in especially since this is my first add-on.
https://github.com/peterbraden/node-opencv/blob/master/binding.gyp
I don't mind it being with NAN or N-API, I just am hoping for something simple and easy to see what goes where and why.
Here is a simple OpenCV function that just opens up an image that I am trying to use as an addon with Node:
#include <opencv2/core.hpp>
#include <opencv2/imgcodecs.hpp>
#include <opencv2/highgui.hpp>
#include <iostream>
#include <string>
using namespace cv;
using namespace std;
int ShowImage()
{
String imageName("./image.png");
Mat image;
image = imread(imageName, IMREAD_COLOR);
namedWindow("Display window", WINDOW_AUTOSIZE);
imshow("Display window", image);
waitKey(0);
}
There are three main files that you will need.
binding.gyp
module.cpp
index.js
binding.gyp
For me The hardest part was figuring out how to include openCV into the project. I don't know if this is correct or not but I looked at the binding.gyp file like a make file in a typical C++ project. With that in mind this is what my binding.gyp file looks like.
{
"targets": [{
"target_name": "module",
'include_dirs': [
'.',
'/user/local/lib',
],
'cflags': [
'-std=c++11',
],
'link_settings': {
'libraries': [
'-L/user/local/lib',
'-lopencv_core',
'-lopencv_imgproc',
'-lopencv_highgui'
],
},
"sources": [ "./src/module.cpp",
"./src/ImageProcessing.cpp" ]
}]
}
The ImageProcessing.cpp file that I wrote needed c++11 so that's why I added that flag it is not necessary to get openCV to work.
The key of the binding.gyp file is the link-settings. This is how you actually include openCV into your project.
Also make sure to include all of your source files in the sources list(I forgot to include my ImageProcessing.cpp file initially)
module.cpp
I used n-api so my module.cpp file looked like this
#include <node_api.h>
#include "ImageProcessing.hpp"
#include "opencv.hpp"
template <typename T>
ostream& operator<<(ostream& output, std::vector<T> const& values)
{
for (auto const& value : values)
{
output << value;
}
return output;
}
napi_value processImages(napi_env env, napi_callback_info info)
{
napi_status status;
size_t argc = 3;
napi_value argv[1];
status = napi_get_cb_info(env, info, &argc, argv, NULL, NULL);
char PathName[100];
size_t result;
status = napi_get_value_string_utf8(env, argv[0], PathName, 100, &result);
char FileName1[100];
status = napi_get_value_string_utf8(env, argv[1], FileName1, 100, &result);
char FileName2[100];
status = napi_get_value_string_utf8(env, argv[2], FileName2, 100, &result);
vector< vector<Point> > Anchors; //to store coordinates of all anchor points
vector< vector<Point> > Regions[4]; //to store coordinates of all corners of all pages
vector<int> Parameters; // image processing parameters
vector<string> FileList1;
vector<string> FileList2;
Mat TemplateROI[NUM_SHEET][4];
Mat Result1, Result2;
string FileName;
string testName = FileName1;
int i;
// The first function to be called only at startup of the program
// provide the path to folder where the data and reference image files are saved
getAnchorRegionRoI(PathName, &Anchors, Regions, &Parameters, TemplateROI);
vector< vector<int> > Answers;
if (Parameters.at(0)) {
namedWindow("Display1", CV_WINDOW_AUTOSIZE);
namedWindow("Display2", CV_WINDOW_AUTOSIZE);
}
napi_value outer;
status = napi_create_array(env, &outer);
//This will need to be changed to watch for new files and then process them
Answers = scanBothSides(FileName1, FileName2, "./Output/", &Result1, &Result2, &Anchors, Regions, Parameters, TemplateROI);
for(int k = 0; k<Answers.size(); k++){
napi_value inner;
status = napi_create_array(env, &inner);
int j;
for(j = 0; j<Answers[k].size(); j++){
napi_value test;
napi_create_int32(env, Answers[k][j], &test);
napi_set_element(env,inner, j, test);
}
napi_value index;
napi_create_int32(env, k, &index);
napi_set_element(env,inner, j, index);
napi_set_element(env,outer, k, inner);
}
if (Parameters.at(0)) {
if (!Result1.empty() && !Result1.empty()) {
FileName = "./Output/" + string("O ") + FileList1[i];
imwrite(FileName, Result1);
FileName = "./Output/" + string("O ") + FileList2[i];
imwrite(FileName, Result2);
resize(Result1, Result1, Size(772, 1000));
resize(Result2, Result2, Size(772, 1000));
imshow("Display1", Result1);
imshow("Display2", Result2);
waitKey(0);
}
}
if (status != napi_ok)
{
napi_throw_error(env, NULL, "Failed to parse arguments");
}
//return PathName;
return outer;
}
napi_value Init(napi_env env, napi_value exports)
{
napi_status status;
napi_value fn;
status = napi_create_function(env, NULL, 0, processImages, NULL, &fn);
if (status != napi_ok)
{
napi_throw_error(env, NULL, "Unable to wrap native function");
}
status = napi_set_named_property(env, exports, "processImages", fn);
if (status != napi_ok)
{
napi_throw_error(env, NULL, "Unable to populate exports");
}
return exports;
}
NAPI_MODULE(NODE_GYP_MODULE_NAME, Init)
This is the file that interfaces with C/C++ and node.
I had trouble with the opencv.hpp file being found so I just moved it into my working directory for now. This is why I used quotes instead of brackets to include it.
Working with the n-api took a little getting used to so make sure you read the docs here
index.js
And finally here is my index.js file
const express = require('express');
const app = express();
const addon = require('./build/Release/module');
const value = "./Data/";
let FileName1 = "./Images/Back1.jpg";
let FileName2 = "./Images/Front1.jpg";
let result = addon.processImages(value, FileName1, FileName2);
console.log("Results: "+result);
server.listen(3000, () => console.log('Example app listening on port 3000!'))
So all you have to do is require your module from the build/Release folder and then call it like any other js function.
Take a look at the module.cpp code again and you will see that in the init function you use the n-api to create a new function. I called mine processImages. This name matches the name of the processImages function at the top of the module.cpp file. Finally in my index.js file I call addon.processImages().
Tips:
I installed node-gyp globally by running npm install -g node-gyp
I compiled my code using the following command: node-gyp configure build
Try getting a simple n-api project working first then add in openCV. I used this tutorial to get started
I created a simple script which will compile OpenCV 3.4 with opencv_contrib (SIFT, SURF available) for statically linking with Native Abstractions for Node.js.
rm -rf 3rdparty/opencv
mkdir -p 3rdparty/opencv
rm -rf tmp
mkdir tmp
cd tmp
rm -rf opencv-master
rm -rf opencv_contrib-master
git clone --branch 3.4 --depth 1 https://github.com/opencv/opencv.git opencv-master
git clone --branch 3.4 --depth 1 https://github.com/opencv/opencv_contrib.git opencv_contrib-master
mkdir build
cd build
cmake \
-DCMAKE_INSTALL_PREFIX="$(pwd)/../../3rdparty/opencv\#3.4" \
-DBUILD_SHARED_LIBS=OFF \
-DENABLE_PRECOMPILED_HEADERS=YES \
-DOPENCV_EXTRA_MODULES_PATH=../opencv_contrib-master/modules \
../opencv-master
cmake --build .
make install
It will help you to to get started with your computer vision project. By the way, SIFT algorithm is no longer requires a license for use as its patent expired 2020-03-06
{
"targets": [
{
"target_name": "addon",
"cflags": [
"-std=c++11",
"-stdlib=libc++"
],
"cflags_cc!": [
"-fno-rtti",
"-fno-exceptions"
],
"xcode_settings": {
"GCC_ENABLE_CPP_RTTI": "YES",
"GCC_ENABLE_CPP_EXCEPTIONS": "YES",
},
"include_dirs": [
"../../3rdparty/opencv/#3.4/include",
"<!(node -e \"require('nan')\")"
],
'libraries': [
"<!#(node utils/find-libs.js)",
"-framework OpenCL"
],
"sources": [
"./src/main.cc",
"./src/lib/MainAddon.cc"
],
}
]
}
The full source code is large enough so It published in this GitHub repository
I have a very basic native module for NodeJS which essentially wraps a more complicated c++ program which includes multithreading using OpenMP (it's a Monte Carlo simulation). This c++ program works fine on its own and when called from python using a cython module to wrap the c++ code. However, when I compile it to a native node module (using /OpenMP as the parameter to the MSBuild compiler) it only uses one thread.
The following is my binding.gyp for reference:
{ "targets": [
{
"target_name": "LossForecast",
"sources": [ "NodeLossForecast.cpp", "src/AutoDiff.cpp" ],
"include_dirs":["src", "<!(node -e \"require('nan')\")"],
'conditions': [
['OS=="win"',
{
'cflags': ["/EHsc", "/O2", "/openmp"]
}
]
]
} ]}
So, I am writing down my files for running a simple node-addon-api pi program using OpenMP. Although node-addon-api is experimental, it will be soon released as an official api for node.js. Its very simple.
This is for Windows only. And I can confirm if it is running in multiple processors.
binding.gyp
{
"targets": [
{
"target_name": "nodeOpenMP",
"defines": [
"VERSION=0.0.1",
"NAPI_DISABLE_CPP_EXCEPTIONS"
],
"sources": [
"module.cc"
],
"include_dirs": [
"<!(echo %cd%/node_modules/node-addon-api)"
],
"conditions": [
[
'OS=="win"',
{
'msvs_settings': {
'VCCLCompilerTool' : {
'AdditionalOptions' : ['/MT','/openmp']
}
}
}
]
]
}
]
}
You have to add VCCLCompilerTool and then under AdditionalOptions add `/openmp' as mentioned above.
Here's a simple PI program that I wrote,
module.cc
#include <napi.h>
#include <omp.h>
#include <iostream>
#include <windows.h> // for GetCurrentProcessorNumber()
int getThreads(){
// Get number of threads
omp_set_num_threads(3);
int n = 0;
#pragma omp parallel reduction(+:n)
n += 1;
return n;
}
double GetPi(short numOfThreads,long numberOfSteps){
long i;
double pi, sum = 0.0;
double step = 1.0/(double) numberOfSteps;
omp_set_num_threads(numOfThreads);
#pragma omp parallel
{
std::cout << "This thread is running on processor: " << GetCurrentProcessorNumber() << "\n";
double x;
#pragma omp for reduction(+:sum)
for(i = 0; i < numberOfSteps; i++) {
x = ( i + 0.5 ) * step;
sum += 4.0 / (1 + x*x);
}
}
std::cout << "Total no. of threads (not processors)" <<getThreads() << std::endl;
pi = step * (double)sum;
return pi;
}
Napi::Value calculatePi(const Napi::CallbackInfo& info ){
Napi::Env env = info.Env();
// check for no. of arguments
if(info.Length()<2){
Napi::TypeError::New(env, "Wrong number of arguments").ThrowAsJavaScriptException();
return env.Null();
}
if (!info[0].IsNumber() || !info[1].IsNumber()) {
Napi::TypeError::New(env, "Wrong arguments").ThrowAsJavaScriptException();
return env.Null();
}
double numThreads = info[0].As<Napi::Number>().DoubleValue();
double numSteps = info[1].As<Napi::Number>().DoubleValue();
double pi = GetPi(numThreads, numSteps);
Napi::Number PI = Napi::Number::New(env, pi);
return PI;
}
Napi::Object Init(Napi::Env env, Napi::Object exports){
// register the functions that are to be exported
exports.Set(Napi::String::New(env, "pi"), Napi::Function::New(env, calculatePi));
return exports;
}
NODE_API_MODULE(nodeOpenMP, Init);
testOMP.js
const omp = require("./build/Release/nodeOpenMP");
const numThreads = 4, numSteps = 1000000;
console.log( numThreads, numSteps );
Simply copy paste the files above accordingly. Don't forget to install node-addon-api(locally) and node-gyp(globally) and then run:
node-gyp configure build && node --no-warnings testOMP.js
You should get an output like this:
This thread is running on processor: 3
This thread is running on processor: 3
This thread is running on processor: 0
This thread is running on processor: 3
Total no. of threads (not processors): 3
I am thinking of soon making a cross-platform npm package node-openmp.
Follow this repo and contribute. I am open to any contributions.
Himujjal/node-openmp
Both tools are available over here: https://github.com/ninjablocks/433Utils/tree/master/RPi_utils
I really want a simple interface to manage my 433mhz devices. but i can't find a good one.
So I have worked all day now trying to make a wrapper for nodejs to the RCSwitch class. with 2 simple methods
- send[code]
- recieve[callback[code]]
I get this error when i try to make a new instance of the RCSwitch class.
node: symbol lookup error:
/root/nodemodule/example/build/Release/kaku.node:
undefined symbol: _ZN8RCSwitchC1Ev
It compiles perfectly with node-gyp but when I execute node it fails.
Now I use exec to execute sendCommand with the code. (UGLY I Know)
And I tried to make the RFSniffer work like this:
./RFSniffer > rfsniffer.log
.Then tail -f the rfsniffer.log
But RFSniffer wont give me any data.
So my question is can anybody help me to get RFsniffer working with tail -f
Or even beter can someone help me fix the c++ addon for nodejs:)
Here is the wrapper code:
#include "RCSwitch.h"
#include <node.h>
#include <v8.h>
using namespace v8;
Handle<Value> CodeSend(const Arguments& args) {
HandleScope scope;
int PIN = 0;
RCSwitch mySwitch = RCSwitch();
mySwitch.enableTransmit(PIN);
mySwitch.send(args[0]->IntegerValue(), 24);
return scope.Close(True());
}
Handle<Value> CodeRecieve(const Arguments& args) {
HandleScope scope;
// Entry check
if (args.Length() != 2) {
ThrowException(Exception::TypeError(String::New("Wrong number of arguments")));
return scope.Close(Undefined());
}
Local<String> name= args[0]->ToString();
Local<String> msg = name;
Local<Function> cb = Local<Function>::Cast(args[1]);
const unsigned argc = 1;
Local<Value> argv[argc] = { Local<Value>::New(msg) };
cb->Call(Context::GetCurrent()->Global(), argc, argv);
return scope.Close(Undefined());
}
extern "C" {
static void init(Handle<Object> target) {
if( wiringPiSetup() == -1 ) {
ThrowException( Exception::TypeError( String::New( "rcswitch: GPIO initialization failed" ) ) );
return;
}
NODE_SET_METHOD(target, "Send", CodeSend);
NODE_SET_METHOD(target, "Recieve", CodeRecieve);
}
NODE_MODULE(kaku, init);
}
nodejs code:
var addon = require('./build/Release/kaku');
console.log(addon.Send(1234));
addon.Recieve(1234, function (val) {
console.log(val);
});
I had the same problem than you and the reason why ./RFSniffer > rfsniffer.log doesn't work is that printf() function in RFSniffer code is not flushed.
Try with this source code :
/*
RF_Sniffer
Hacked from http://code.google.com/p/rc-switch/
by #justy to provide a handy RF code sniffer
*/
#include "RCSwitch.h"
#include <stdlib.h>
#include <stdio.h>
RCSwitch mySwitch;
int main(int argc, char *argv[]) {
// This pin is not the first pin on the RPi GPIO header!
// Consult https://projects.drogon.net/raspberry-pi/wiringpi/pins/
// for more information.
int PIN = 2;
if(wiringPiSetup() == -1)
return 0;
mySwitch = RCSwitch();
mySwitch.enableReceive(PIN); // Receiver on inerrupt 0 => that is pin #2
while(1) {
if (mySwitch.available()) {
int value = mySwitch.getReceivedValue();
if (value == 0) {
printf("Unknown encoding");
} else {
printf("Received %i\n", mySwitch.getReceivedValue() );
}
fflush(stdout); // Add this line to flush the previous printf()
mySwitch.resetAvailable();
}
}
exit(0);
}
And if you run the RFSniffer tool with sudo permission, you can execute with :
sudo ./RFSniffer | sudo tee rfsniffer.log
OR
sudo sh -c './RFSniffer >> rfsniffer.log'