I have a problem with openssl and the EVP-functions:
When I execute the following code
#include <openssl/evp.h>
#include <openssl/engine.h>
#include <array>
#include <iostream>
void hash(ENGINE* eng) {
EVP_MD_CTX *_mdctx(EVP_MD_CTX_create());
int ret = EVP_DigestInit_ex(_mdctx, EVP_sha512(), eng);
EVP_MD_CTX_destroy(_mdctx);
if(1 == ret) {
std::cout << "Finished successfully (with eng=" << eng << ")" << std::endl;
return;
} else {
std::array<char, 256> err_str;
ERR_error_string_n(ERR_get_error(), err_str.data(), err_str.size());
std::cout << "Error at Digest (engine: " << ENGINE_get_id(eng) << "): " << err_str.data() << std::endl;
}
}
int main(void) {
ENGINE_load_builtin_engines();
hash(nullptr);
for(ENGINE *eng = ENGINE_get_first(); eng != nullptr; eng = ENGINE_get_next(eng)) {
hash(eng);
}
}
I get the following output:
Finished successfully (with eng=0)
Error at Digest (engine: rdrand): error:260BA093:engine routines:ENGINE_get_digest:unimplemented digest
Error at Digest (engine: dynamic): error:06080086:digital envelope routines:EVP_DigestInit_ex:initialization error
I understand, that rdrand doesn't support digest, but why do I get an initialization error, when I use the dynamic engine? In particular why does it work, when I call EVP_DigestInit_ex with eng=nullptr?
The code can be compiled with g++ example.cpp -std=c++17 -Wall -Wextra -Werror -pedantic -O2 -lssl -lcrypto. I am using g++, version 6.3.0 and openssl 1.1.0f.
Related
I am trying to run a very simple example of the std::get_time() function but the parse is failing.
#include <iostream>
#include <sstream>
#include <locale>
#include <iomanip>
int main()
{
std::tm t = {};
std::istringstream ss("04-02-2022 3:04:32");
ss >> std::get_time(&t, "%m-%d-%Y %H:%M:%S");
if (ss.fail()) {
std::cout << "Parse failed\n";
if ((ss.rdstate() & std::istringstream::failbit) != 0)
std::cerr << "Error failbit\n";
if ((ss.rdstate() & std::istringstream::goodbit) != 0)
std::cerr << "Error goodbit\n";
if ((ss.rdstate() & std::istringstream::eofbit) != 0)
std::cerr << "Error eofbit\n";
if ((ss.rdstate() & std::istringstream::badbit) != 0)
std::cerr << "Error badbit\n";
}
else {
std::cout << std::put_time(&t, "%c") << '\n';
}
}
I am using Ubuntu 20.04 in Windows via WSL, which has g++ 9.4 installed, and I am compiling with the terminal command:
g++ -std=c++17 date.cpp -o date
The error is std::istringstream::failbit
Any Ideas what is going on?
I was able to reproduce the failure in gcc 9.4. It works if you change 3 to 03 in the hour field.
Even though %H is not supposed to require a leading zero, this appears to be a gcc bug where it does. See Bug 78714. Despite being about %b, this comment in that ticket goes into detail about how it also affects %H and other placeholders, too.
The code works fine in gcc 12.1.
Demo
I have checked many StackOverflow posts, but no answers solve my problem.
I get 2 errors:
g++ .\main.cpp -fopenmp -o test
.\main.cpp:12:14: error: 'std::this_thread' has not been declared
12 | std::this_thread::sleep_for(chrono::seconds(20000) );
.\main.cpp:12:37: error: 'chrono' has not been declared
12 | std::this_thread::sleep_for(chrono::seconds(20000) );
My current G++ version is:
g++.exe (MinGW.org GCC Build-2) 9.2.0
The code is:
#include <iostream>
#include <chrono>
#include <thread>
#include <omp.h>
int main()
{
omp_set_num_threads(4);
#pragma omp parallel
{
std::this_thread::sleep_for(chrono::seconds(20000) );
std::cout << "Number of available threads: " << omp_get_num_threads() << std::endl;
std::cout << "Current thread number: " << omp_get_thread_num() << std::endl;
std::cout << "Hello, World!" << std::endl;
}
return 0;
}
I have already tried -std=c++11 from 11 & 14 & 17.
I'm not sure it's right but about your second error can you replace it with
std::this_thread::sleep_for(chrono::seconds(20000) );
to
std::this_thread::sleep_for(std::chrono::seconds(20000));
I think the first error depends from second, but I'm not sure.
For a legacy project I have to compile our code into a dylib with gcc which is then linked into a clang app starting with Intel-based macOS 10.15 (Catalina). I work with Big Sur 11.4 on a Mac Mini 2020 M1.
For testing purposes, I have created a small C++ project with header
foo.h
#pragma once
class Foo
{
public:
Foo();
virtual ~Foo();
void throw_runtime_error(const char* msg);
int bar(int a);
};
and code foo.cpp
#include <iostream>
#include <stdexcept>
#include "foo.h"
using namespace std;
Foo::Foo()
{
cout << "Foo:Foo() called" << endl;
}
Foo::~Foo()
{
cout << "Foo:~Foo() called" << endl;
}
void Foo::throw_runtime_error(const char* msg)
{
cout << "throwing runtime_error " << msg << endl;
throw runtime_error( msg );
}
int Foo::bar(int a)
{
cout << "Foo::bar(" << a << ") called" << endl;
return a + 1;
}
To create the dylib in a build subdir, I use the homebrewn g++ 10.2 with
g++-10 -arch x86_64 -fexceptions -dynamiclib -I../include ../src/foo.cpp -o ../bin/libMyDylib.dylib
For testing, how to be used along with clang 12.2.0 I have created a main.cpp in the bin subdir
#include "../include/foo.h"
#include <iostream>
#include <stdexcept>
int main()
{
Foo foo;
try
{
foo.throw_runtime_error("Hello you ugly little creatures!");
}
catch( const std::runtime_error &r )
{
std::cout << "Caught foo runtime_error, what = " << r.what() << std::endl;
}
catch( const std::exception &x )
{
std::cout << "Caught foo exception, what = " << x.what() << std::endl;
}
catch(...)
{
std::cout << "Caught foo exception, but without what()" << std::endl;
}
return foo.bar(42);
}
Now, if I use gcc-10 to compile main.cpp and link it with libMyDylib.dylib via g++-10 -arch x86_64 -L/Users/macos/bin -lMyDylib main.cpp -o main the exception handler in main.cpp catches the std::runtime_error properly.
If I use clang 12.2 along with g++ -arch x86_64 -L/Users/macos/bin -lMyDylib main.cpp -o main
then the output shows that only the catch (...) handler is triggered.
./main
Foo:Foo() called
throwing runtime_error Hello you ugly little creatures!
Caught foo exception, but without what()
Foo::bar(42) called
Foo:~Foo() called
So, can someone please give me a hint, what I am missing, in order to get 'cross-compiler' exceptions properly handled?
Thnxalot
Currently I test a shared library vendor provided in linux ,
the following is the simple source :
#include <iostream>
using namespace std;
extern int test1();
extern int test2();
int main()
{
cout << "hello world" << endl ;
return 0 ;
cout << "Test 1" << endl;
test1();
cout << "Test 2" << endl;
test2();
return 0;
}
I have compile and link like :
g++ -g -Wall -fPIC -D_DEBUG -o test -I./include32 src/xxx.cpp src/yyy.cpp src/test.cpp
-L./lib32 -lshare1 -lshared2
I have the following output while run :
hello world
***glibc detected *** ./test: double free or corrution (!prev) 0x00000000077ec30 ***
What I don't get is , since I only do print "hello world" and then return 0 ,
that mean I don't call any function in libshared1.so and libshared2.so ,
why error like glibc detected happen ? does it mean that shared library has
problem to be loaded to memory ? since the main function never call test1() , test2()
which really call functions in libshared1.so and libshared2.so !!
And suggestions , comments are most appreciated !!
Edit :
#include <iostream>
#include <sys/types.h>
#include <unistd.h>
#include <stdio.h>
#include <stdlib.h>
#include <dlfcn.h>
using namespace std;
int main()
{
cout << "hello world 3 " << endl ;
void *handle2;
handle2 = dlopen ("/usr/local/lib/xxx.so", RTLD_LAZY);
if (!handle2) {
fprintf (stderr, "%s\n", dlerror());
exit(1);
}
cout << "hello world 1 " << endl ;
void *handle3;
handle3 = dlopen ("/usr/local/lib/yyy.so", RTLD_LAZY);
if (!handle3) {
fprintf (stderr, "%s\n", dlerror());
exit(1);
}
cout << "hello world" << endl ;
}
Compile :
g++ -g -Wall -rdynamic -o test src/test.cpp -ldl
Output :
hello world 3
hello world 1
Segmentation fault (core dumped)
The Vendor really provide damaged shared library ?!
I have a GUI application with a producer thread and an OpenGL thread, the OpenGL thread needs to call CUDA functions and the producer needs to call cudaMemcpy etc.
No matter what I do I can't seem to get the CUDA driver api to work. Every time I try to use these function I get a cudaErrorMissingConfiguration.
I want to use multi-threaded CUDA, what is the paradigmatic way to accomplish this?
Original
void program::initCuda()
{
CUresult a;pctx=0;
cudaSafeCall(cudaSetDevice(0));
cudaSafeCall(cudaGLSetGLDevice(0));
a=cuInit(0);
cudaSafeCall(cudaFree(0));
cout <<"cuInit :" <<a << endl;assert(a == cudaSuccess);
//a=cuCtxGetCurrent(pctx);
a=cuCtxCreate(pctx,CU_CTX_SCHED_AUTO,0);
cout <<"GetContext :" <<a << endl;assert(a == cudaSuccess);
//Fails with cudaErrorMissingConfiguration
a=cuCtxPopCurrent(pctx);
cout <<"cuCtxPopCurrent :" <<a << endl;assert(a == cudaSuccess);
cout <<"Initialized CUDA" << endl;
}
Revised
void glStream::initCuda()
{
CUresult a;
pctx=0;
cudaSafeCall(cudaSetDevice(0));
cudaSafeCall(cudaGLSetGLDevice(0));
cudaFree(0);// From post http://stackoverflow.com/questions/10415204/how-to-create-a-cuda-context seems to indicate that `cudaSetDevice` should make a context.
a=cuCtxGetCurrent(pctx);
cout <<"GetContext :" <<a << endl;assert(a == cudaSuccess);
a=cuCtxPopCurrent(pctx);
cout <<"cuCtxPopCurrent :" <<a << endl;assert(a == cudaSuccess);
cout <<"Initialized CUDA" << endl;
}
The simplest version of your second code should look like this:
#include <iostream>
#include <assert.h>
#include <cuda.h>
#include <cuda_runtime.h>
int main(void)
{
CUresult a;
CUcontext pctx;
cudaSetDevice(0); // runtime API creates context here
a = cuCtxGetCurrent(&pctx);
std::cout << "GetContext : " << a << std::endl;
assert(a == CUDA_SUCCESS);
a = cuCtxPopCurrent(&pctx);
std::cout << "cuCtxPopCurrent : " << a << std::endl;
assert(a == CUDA_SUCCESS);
std::cout << "Initialized CUDA" << std::endl;
return 0;
}
which yields the following on OS X 10.6 with CUDA 5.0:
$ g++ -I/usr/local/cuda/include -L/usr/local/cuda/lib driver.cc -lcuda -lcudart
$ ./a.out
GetContext :0
cuCtxPopCurrent :0
Initialized CUDA
ie. "just works". Here the context is lazily initiated by the cudaSetDevice call (note I incorrectly asserted that cudaSetDevice doesn't establish a context, but at least in CUDA 5 it appears to. This behaviour may have changed when the runtime API was revised in CUDA 4).
Alternatively, you can use the driver API to initiate the context:
#include <iostream>
#include <assert.h>
#include <cuda.h>
#include <cuda_runtime.h>
int main(void)
{
CUresult a;
CUcontext pctx;
CUdevice device;
cuInit(0);
cuDeviceGet(&device, 0);
std::cout << "DeviceGet : " << a << std::endl;
cuCtxCreate(&pctx, CU_CTX_SCHED_AUTO, device ); // explicit context here
std::cout << "CtxCreate : " << a << std::endl;
assert(a == CUDA_SUCCESS);
a = cuCtxPopCurrent(&pctx);
std::cout << "cuCtxPopCurrent : " << a << std::endl;
assert(a == CUDA_SUCCESS);
std::cout << "Initialized CUDA" << std::endl;
return 0;
}
which also "just works":
$ g++ -I/usr/local/cuda/include -L/usr/local/cuda/lib driver.cc -lcuda -lcudart
$ ./a.out
DeviceGet : 0
CtxCreate : 0
cuCtxPopCurrent : 0
Initialized CUDA
What you shouldn't do is mix both as in your first example. All I can suggest is try both of these and confirm they work for you, then adopt the call sequences to whatever it is you are actually trying to achieve.