My problem is that is access the kinect with the following code:
#include "libfreenect.hpp"
#include <iostream>
freenect_context* ctx;
freenect_device* dev;
void freenect_threadfunc(freenect_device* dev, void* v_depth, uint32_t timestamp){
short* d = (short*) v_depth;
std::cout << d[0] << std::endl;
}
int main(int argc, char const *argv[])
{
if(freenect_init(&ctx, NULL) < 0){
std::cout << "freenect_init() failed!" << std::endl;
}
if (freenect_open_device(ctx, &dev, 0) < 0){
std::cout << "No device found!" << std::endl;
freenect_shutdown(ctx);
}
freenect_set_depth_callback(dev, freenect_threadfunc);
freenect_set_depth_mode(dev, freenect_find_depth_mode(FREENECT_RESOLUTION_MEDIUM, FREENECT_DEPTH_11BIT));
freenect_start_depth(dev);
while (true) {
}
return 0;
}
But for some reason i don't know, the callback function ´freenect_threadfunc´ doesn't execute. When executing freenect-glview which is a example provided by Openkinect, everything works fine.
Thank you for your help.
Related
I'm making some API and want to allow app developers to put their own callback function with any return types and any number of parameters. Below code is example I expect to work. I wonder how to define ?<?> userfunc in MyAPI.h
// MyAPI.h
using namespace API {
/**
* Executes callback function after a random time in its own thread.
* #param userfunc User defined callback function.
**/
void SetCallback(?<?> userfunc);
}
// application.cpp
#include "MyAPI.h"
#include <chrono>
#include <format>
#include <iostream>
std::string get_current_datetime() {
const auto now = std::chrono::system_clock::now();
return std::format("{:%d-%m-%Y %H:%M:%OS}", now);
}
bool callback_1(bool* triggered, std::string* registration_time) {
*triggered = true;
std::cout << "CALLBACK_1" << std::endl;
std::cout << "registered: " << registration_time << std::endl;
std::Cout << "triggered : " << get_current_datetime() << std::endl;
return true;
}
int callback_2(int* triggered) {
*triggered = 1;
std::cout << "CALLBACK_2" << std::endl;
return 1;
}
int main(void) {
bool is_triggered_bool = false;
int is_triggered_int = 0;
std::string curr = get_current_datetime();
API::SetCallback(std::bind(&callback_1, &is_triggered_bool, &curr));
API::SetCallback(std::bind(&callback_2, &is_triggered_int))
Sleep(3000);
if (is_triggered_bool && is_triggered_int == 1) return 0;
else return -1;
}
Any advice will appreciate
I passed a structure pointer to a function. What I need is to get the string "connection refused"(return from ssh_get_error()) in the main function.
#include <libssh/libssh.h>
#include <stdlib.h>
#include <stdio.h>
#include <iostream>
#include <cstring>
using namespace std;
typedef struct rtns {
bool state;
char* desc;
} rtns;
int connect_host(rtns *con_host) {
const char* desc;
char desc2[1000];
ssh_session my_ssh_session;
int rc;
my_ssh_session = ssh_new();
if (my_ssh_session == NULL)
exit(-1);
ssh_options_set(my_ssh_session, SSH_OPTIONS_HOST, "localhost");
rc = ssh_connect(my_ssh_session);
if (rc != SSH_OK) {
(*con_host).state = false;
desc = ssh_get_error(my_ssh_session);
strcpy(desc2, ssh_get_error(my_ssh_session));
(*con_host).desc = strdup(desc2);
cout << "from connect_host function->" <<con_host->desc << "\n";
} else {
(*con_host).state = true;
}
ssh_disconnect(my_ssh_session);
ssh_free(my_ssh_session);
}
int main() {
rtns con_host;
rtns *p = (rtns*) malloc(sizeof(struct rtns));
p = (rtns*) malloc(20);
connect_host(&con_host);
cout << "from main function->" << *(con_host.desc) << "\n\n";
}
Output-
from connect_host function->Connection refused
from main function->C
The problem is that i am only getting one character "C" in the main function. I have some idea that I am doing memory allocation wrong but i have no idea how to do it correctly. I also tried to shift the malloc allocation in con_host function but it didn't work either.
Think about what you are printing. You pass to the stream a char, not a char *:
cout << "from main function->" << *(con_host.desc) << "\n\n";
// *(con_host.desc) = *(char *) = char
// Above is a simplification but you get the point.
So long as con_host.desc is a null terminated c string you can print it like this:
cout << "from main function->" << con_host.desc << "\n\n";
Here is a live example.
It's not memory allocation error. It's caused by the output statement. Change to
cout << "from main function->" << con_host.desc << "\n\n";
I am trying to use boost::latch in my program to block waiting until all the threads finish or time out. My code is as follows. ctpl is a thread pool library adopted from https://github.com/vit-vit/CTPL.
#include <boost/thread/latch.hpp>
#include <CTPL/ctpl.h>
#include <mutex>
#include <iostream>
using namespace std;
int main(int argc, char **argv) {
ctpl::thread_pool outer_tp(100);
ctpl::thread_pool inner_tp(5, 5000);
auto out_func = [&inner_tp](int outer_id, int outer_invoke_idx) {
int num_batch = 20;
boost::latch latch_(num_batch);
auto func = [&latch_, &outer_invoke_idx](int inner_id, int inner_invoke_idx) {
try {
std::cout << "outer: " << outer_invoke_idx << ", inner: " << inner_invoke_idx << endl;
} catch (exception &ex) { cout << "error: " << ex.what() << endl; }
latch_.count_down();
};
for (int i = 0; i < num_batch; ++i) {
inner_tp.push(func, i);
}
latch_.wait_for(boost::chrono::milliseconds(1));
};
for (int i = 0; i < 5000; ++i) outer_tp.push(out_func, i);
outer_tp.stop(true);
return 0;
}
g++ -std=c++11 test.cpp -lboost_system -lpthread -lboost_chrono -lboost_thread
However I get the following error message.
bool boost::latch::count_down(boost::unique_lock&): Assertion `count_ > 0' failed.
If I use latch_.wait() instead of latch_.wait_for() or set very long wait time, the code works without error. Hence I guess 'time out' leads to this error issue. Does any one know how to fix the error.
There seem to be few issues with your code. I think that referring to latch_ in inner thread by reference is one of them. Replacing that with shared_ptr to boost::latch fixes that. Another issue is similar but with outer_invoke_idx. Fixing those and waiting for inner_tp to finish seems to make your test work just fine.
Here is modified test case that works for me:
#include <boost/thread/latch.hpp>
#include <memory>
#include <CTPL/ctpl.h>
#include <mutex>
#include <iostream>
using namespace std;
int main(int argc, char **argv) {
ctpl::thread_pool outer_tp(100);
ctpl::thread_pool inner_tp(5, 5000);
auto out_func = [&inner_tp](int outer_id, int outer_invoke_idx) {
int num_batch = 20;
auto latch_ = std::make_shared<boost::latch>(num_batch);
auto func = [latch_, outer_invoke_idx](int inner_id, int inner_invoke_idx) {
try {
std::cout << "outer: " << outer_invoke_idx << ", inner: " << inner_invoke_idx << endl;
} catch (exception &ex) { cout << "error: " << ex.what() << endl; }
latch_->count_down();
};
for (int i = 0; i < num_batch; ++i) {
inner_tp.push(func, i);
}
latch_->wait_for(boost::chrono::milliseconds(1));
};
for (int i = 0; i < 5000; ++i) outer_tp.push(out_func, i);
outer_tp.stop(true);
inner_tp.stop(true);
std::cout << "EXITING!!!" << std::endl;
return 0;
}
I am trying to use libuv to launch a process in a cross-platform way. To test the library, I have written a small C++ application that calls sleep 1 and then exits. The problem is that sometimes (~5% of the time) it crashes with the following error from libuv:
EFAULT bad address in system call argument
Here is my code:
#include <iostream>
#include <string>
#include <memory>
#include <cstring>
#include <uv.h>
char* a;
char* b;
char** args;
std::string error_to_string(int const& error) {
return std::string(uv_err_name(error)) +
" " +
std::string(uv_strerror(error));
}
void on_exit(uv_process_t* req, int64_t exit_status, int term_signal) {
std::cout << "I'm back! " << std::endl;
std::cout << "exit_status " << exit_status
<< " term_signal " << term_signal << std::endl;
uv_close((uv_handle_t*)req, nullptr);
}
int main(int argc, const char** argv) {
auto* loop = new uv_loop_t();
uv_loop_init(loop);
auto* process = new uv_process_t();
uv_process_options_t options = {};
a = new char[100];
b = new char[100];
strcpy(a, "sleep\0");
strcpy(b, "1\0");
args = new char*[2];
args[0] = a;
args[1] = b;
options.exit_cb = on_exit;
options.file = "sleep";
options.args = args;
std::cout << "Going to sleep..." << std::endl;
int const r = uv_spawn(loop, process, &options);
if (r < 0) {
std::cout << error_to_string(r) << std::endl;
return 1;
}
uv_run(loop, UV_RUN_DEFAULT);
return 0;
}
I am using libuv 1.11.0, Clang 4.0.1 and C++ 14.
Can you spot my mistake?
Your args array is set up wrong. It should be:
args = new char*[3];
args[0] = a;
args[1] = b;
args[2] = NULL;
Otherwise uv_spawn doesn't know where the end of the array is.
I am trying to use clang+llvm 3.6 to JIT compile several C functions (each can eventually be very large).
Unfortunately I the function pointer that LLVM provides makes the program SEGFAULT.
So far I have following code:
#include <iostream>
#include <clang/CodeGen/CodeGenAction.h>
#include <clang/Basic/DiagnosticOptions.h>
#include <clang/Basic/TargetInfo.h>
#include <clang/Basic/SourceManager.h>
#include <clang/Frontend/CompilerInstance.h>
#include <clang/Frontend/CompilerInvocation.h>
#include <clang/Frontend/FrontendDiagnostic.h>
#include <clang/Frontend/TextDiagnosticPrinter.h>
#include <clang/Frontend/Utils.h>
#include <clang/Parse/ParseAST.h>
#include <clang/Lex/Preprocessor.h>
#include <llvm/Analysis/Passes.h>
#include <llvm/ExecutionEngine/SectionMemoryManager.h>
#include <llvm/ExecutionEngine/MCJIT.h>
#include <llvm/ExecutionEngine/ExecutionEngine.h>
#include <llvm/IR/Verifier.h>
#include <llvm/IR/Module.h>
#include <llvm/IR/LLVMContext.h>
#include <llvm/IR/LegacyPassManager.h>
#include <llvm/Bitcode/ReaderWriter.h>
#include <llvm/Support/ManagedStatic.h>
#include <llvm/Support/MemoryBuffer.h>
#include <llvm/Support/TargetSelect.h>
#include <llvm/Support/raw_os_ostream.h>
#include <llvm/Linker/Linker.h>
int main(int argc, char *argv[]) {
using namespace llvm;
using namespace clang;
static const char* clangArgv [] = {"program", "-x", "c", "string-input"};
static const int clangArgc = sizeof (clangArgv) / sizeof (clangArgv[0]);
// C functions to be compiled (they could eventually be extremely large)
std::map<std::string, std::string> func2Source;
func2Source["getOne"] = "int getOne() {return 1;}";
func2Source["getTwo"] = "int getTwo() {return 2;}";
llvm::InitializeAllTargets();
llvm::InitializeAllAsmPrinters();
std::unique_ptr<llvm::Linker> linker;
std::unique_ptr<llvm::LLVMContext> context(new llvm::LLVMContext());
std::unique_ptr<llvm::Module> module;
/**
* add each C function to the same module
*/
for (const auto& p : func2Source) {
const std::string& source = p.second;
IntrusiveRefCntPtr<DiagnosticOptions> diagOpts = new DiagnosticOptions();
TextDiagnosticPrinter *diagClient = new TextDiagnosticPrinter(llvm::errs(), &*diagOpts); // will be owned by diags
IntrusiveRefCntPtr<DiagnosticIDs> diagID(new DiagnosticIDs());
IntrusiveRefCntPtr<DiagnosticsEngine> diags(new DiagnosticsEngine(diagID, &*diagOpts, diagClient));
ArrayRef<const char *> args(clangArgv + 1, // skip program name
clangArgc - 1);
std::unique_ptr<CompilerInvocation> invocation(createInvocationFromCommandLine(args, diags));
if (invocation.get() == nullptr) {
std::cerr << "Failed to create compiler invocation" << std::endl;
exit(1);
}
CompilerInvocation::setLangDefaults(*invocation->getLangOpts(), IK_C,
LangStandard::lang_unspecified);
invocation->getFrontendOpts().DisableFree = false; // make sure we free memory (by default it does not)
// Create a compiler instance to handle the actual work.
CompilerInstance compiler;
compiler.setInvocation(invocation.release());
// Create the compilers actual diagnostics engine.
compiler.createDiagnostics(); //compiler.createDiagnostics(argc, const_cast<char**> (argv));
if (!compiler.hasDiagnostics()) {
std::cerr << "No diagnostics" << std::endl;
exit(1);
}
// Create memory buffer with source text
std::unique_ptr<llvm::MemoryBuffer> buffer = llvm::MemoryBuffer::getMemBufferCopy(source, "SIMPLE_BUFFER");
if (buffer.get() == nullptr) {
std::cerr << "Failed to create memory buffer" << std::endl;
exit(1);
}
// Remap auxiliary name "string-input" to memory buffer
PreprocessorOptions& po = compiler.getInvocation().getPreprocessorOpts();
po.addRemappedFile("string-input", buffer.release());
// Create and execute the frontend to generate an LLVM bitcode module.
clang::EmitLLVMOnlyAction action(context.get());
if (!compiler.ExecuteAction(action)) {
std::cerr << "Failed to emit LLVM bitcode" << std::endl;
exit(1);
}
std::unique_ptr<llvm::Module> module1 = action.takeModule();
if (module1.get() == nullptr) {
std::cerr << "No module" << std::endl;
exit(1);
}
if (linker.get() == nullptr) {
module.reset(module1.release());
linker.reset(new llvm::Linker(module.get()));
} else {
if (linker->linkInModule(module1.release())) {
std::cerr << "LLVM failed to link module" << std::endl;
exit(1);
}
}
}
llvm::InitializeNativeTarget();
llvm::Module* m = module.get();
std::string errStr;
std::unique_ptr<llvm::ExecutionEngine> executionEngine(EngineBuilder(std::move(module))
.setErrorStr(&errStr)
.setEngineKind(EngineKind::JIT)
.setMCJITMemoryManager(std::unique_ptr<SectionMemoryManager>(new SectionMemoryManager()))
.setVerifyModules(true)
.create());
if (!executionEngine.get()) {
std::cerr << "Could not create ExecutionEngine: " + errStr << std::endl;
exit(1);
}
executionEngine->finalizeObject();
/**
* Lets try to use each function
*/
for (const auto& p : func2Source) {
const std::string& funcName = p.first;
llvm::Function* func = m->getFunction(funcName);
if (func == nullptr) {
std::cerr << "Unable to find function '" << funcName << "' in LLVM module" << std::endl;
exit(1);
}
// Validate the generated code, checking for consistency.
llvm::raw_os_ostream os(std::cerr);
bool failed = llvm::verifyFunction(*func, &os);
if (failed) {
std::cerr << "Failed to verify function '" << funcName << "' in LLVM module" << std::endl;
exit(1);
}
#if 1
func->dump(); // Dump the function for exposition purposes.
// JIT the function, returning a function pointer.
void *fPtr = executionEngine->getPointerToFunction(func); ///// BAD function pointer!!!!
// Cast it to the right type (takes no arguments, returns a double) so we
// can call it as a native function.
int (*funcPtr)();
*(int **) (&funcPtr) = *(int **) fPtr;
int v = (*funcPtr)();
std::cout << "return: " << v << std::endl;
#else // THIS DOES NOT WORK EITHER:
// JIT the function, returning a function pointer.
uint64_t fPtr = executionEngine->getFunctionAddress(funcName); ///// BAD function pointer!!!!
if (fPtr == 0) {
std::cerr << "Unable to find function '" << funcName << "' in LLVM module" << std::endl;
exit(1);
}
int (*funcPtr)();
*(int **) (&funcPtr) = *(int **) fPtr;
int v = (*funcPtr)();
std::cout << "return: " << v << std::endl;
#endif
}
}
Can anyone help me pin-point the problem?
(I'm running this in linux-ubuntu 15.04)
This assignment is incredibly messed up:
*(int **) (&funcPtr) = *(int **) fPtr;
Not only does it violate strict-aliasing to write an int* and then use it as a function pointer on the next line, but a data pointer is often not large enough to hold an entire code pointer.
The safe approach is either
memcpy(funcPtr, fPtr, sizeof funcPtr);
or
funcPtr = reinterpret_cast<decltype(funcPtr)>(fPtr);