How to use Boost.Interprocess to stream data to other applications? - c++

The main application needs to update the shared memory at a fast frequency.
And several consuming applications need to read from the shared memory to update the streamed data.
main and consuming applications are different processes.
How to implement this with Boost.Interprocess ?

producer:
#include <boost/interprocess/managed_shared_memory.hpp>
#include <boost/interprocess/sync/scoped_lock.hpp>
#include <boost/interprocess/sync/interprocess_mutex.hpp>
#include <boost/thread.hpp>
#include <iostream>
struct shared_data_t {
boost::uint32_t data;
boost::interprocess::interprocess_mutex mutex;
};
/***************************************************************************/
/* producer */
int main(int argc, char** argv) {
const char* shname = "_unique_object_name_";
boost::shared_ptr<const char> remover(
shname,
boost::interprocess::shared_memory_object::remove
);
try {
boost::interprocess::shared_memory_object shared_object(
boost::interprocess::create_only,
shname,
boost::interprocess::read_write
);
shared_object.truncate(sizeof(shared_data_t));
boost::interprocess::mapped_region region(
shared_object,
boost::interprocess::read_write
);
shared_data_t* data = new(region.get_address())shared_data_t;
assert(data);
const boost::uint32_t count = 0x1000;
for ( boost::uint32_t idx = 0; idx < count; ++idx ) {
{ boost::interprocess::scoped_lock<
boost::interprocess::interprocess_mutex
> lock(data->mutex);
data->data = idx;
}
boost::this_thread::sleep(boost::posix_time::seconds(1));
}
} catch(boost::interprocess::interprocess_exception &e){
std::cout << e.what() << std::endl;
return 1;
}
return 0;
}
consumer:
#include <boost/interprocess/managed_shared_memory.hpp>
#include <boost/interprocess/sync/scoped_lock.hpp>
#include <boost/interprocess/sync/interprocess_mutex.hpp>
#include <boost/thread.hpp>
struct shared_data_t {
boost::uint32_t data;
boost::interprocess::interprocess_mutex mutex;
};
/***************************************************************************/
/* consumer */
int main(int argc, char** argv) {
try {
boost::interprocess::shared_memory_object shared_object(
boost::interprocess::open_only,
"_unique_object_name_",
boost::interprocess::read_only
);
shared_object.truncate(sizeof(shared_data_t));
boost::interprocess::mapped_region region(
shared_object,
boost::interprocess::read_only
);
shared_data_t* data = new(region.get_address())shared_data_t;
assert(data);
while ( true ) {
{ boost::interprocess::scoped_lock<
boost::interprocess::interprocess_mutex
> lock(data->mutex);
std::cout << "ping: " << data->data << std::endl;
}
boost::this_thread::sleep(boost::posix_time::milliseconds(100));
}
} catch(boost::interprocess::interprocess_exception &e){
std::cout << e.what() << std::endl;
return 1;
}
return 0;
}
man:
http://www.boost.org/doc/libs/1_43_0/doc/html/interprocess/synchronization_mechanisms.html

Related

Boost interprocess: string in a *not managed* shared memory?

I know that construction of a string in a shared memory needs an allocator.
That's fine, but I can't find out how can I do that, because all examples are using a Managed Shared Memory which has a method of get_segment_manager() which has to be used as allocator (if I'm not wrong).
Let's see this example copied from here: https://www.boost.org/doc/libs/1_77_0/doc/html/interprocess/synchronization_mechanisms.html#interprocess.synchronization_mechanisms.conditions.conditions_anonymous_example
doc_anonymous_condition_shared_data.hpp
#include <boost/interprocess/sync/interprocess_mutex.hpp>
#include <boost/interprocess/sync/interprocess_condition.hpp>
struct trace_queue
{
enum { LineSize = 100 };
trace_queue()
: message_in(false)
{}
//Mutex to protect access to the queue
boost::interprocess::interprocess_mutex mutex;
//Condition to wait when the queue is empty
boost::interprocess::interprocess_condition cond_empty;
//Condition to wait when the queue is full
boost::interprocess::interprocess_condition cond_full;
//Items to fill
char items[LineSize];
//Is there any message
bool message_in;
};
Main process
#include <boost/interprocess/shared_memory_object.hpp>
#include <boost/interprocess/mapped_region.hpp>
#include <boost/interprocess/sync/scoped_lock.hpp>
#include <iostream>
#include <cstdio>
#include "doc_anonymous_condition_shared_data.hpp"
using namespace boost::interprocess;
int main ()
{
//Erase previous shared memory and schedule erasure on exit
struct shm_remove
{
shm_remove() { shared_memory_object::remove("MySharedMemory"); }
~shm_remove(){ shared_memory_object::remove("MySharedMemory"); }
} remover;
//Create a shared memory object.
shared_memory_object shm
(create_only //only create
,"MySharedMemory" //name
,read_write //read-write mode
);
try{
//Set size
shm.truncate(sizeof(trace_queue));
//Map the whole shared memory in this process
mapped_region region
(shm //What to map
,read_write //Map it as read-write
);
//Get the address of the mapped region
void * addr = region.get_address();
//Construct the shared structure in memory
trace_queue * data = new (addr) trace_queue;
const int NumMsg = 100;
for(int i = 0; i < NumMsg; ++i){
scoped_lock<interprocess_mutex> lock(data->mutex);
if(data->message_in){
data->cond_full.wait(lock);
}
if(i == (NumMsg-1))
std::sprintf(data->items, "%s", "last message");
else
std::sprintf(data->items, "%s_%d", "my_trace", i);
//Notify to the other process that there is a message
data->cond_empty.notify_one();
//Mark message buffer as full
data->message_in = true;
}
}
catch(interprocess_exception &ex){
std::cout << ex.what() << std::endl;
return 1;
}
return 0;
}
Second process:
#include <boost/interprocess/shared_memory_object.hpp>
#include <boost/interprocess/mapped_region.hpp>
#include <boost/interprocess/sync/scoped_lock.hpp>
#include <iostream>
#include <cstring>
#include "doc_anonymous_condition_shared_data.hpp"
using namespace boost::interprocess;
int main ()
{
//Create a shared memory object.
shared_memory_object shm
(open_only //only create
,"MySharedMemory" //name
,read_write //read-write mode
);
try{
//Map the whole shared memory in this process
mapped_region region
(shm //What to map
,read_write //Map it as read-write
);
//Get the address of the mapped region
void * addr = region.get_address();
//Obtain a pointer to the shared structure
trace_queue * data = static_cast<trace_queue*>(addr);
//Print messages until the other process marks the end
bool end_loop = false;
do{
scoped_lock<interprocess_mutex> lock(data->mutex);
if(!data->message_in){
data->cond_empty.wait(lock);
}
if(std::strcmp(data->items, "last message") == 0){
end_loop = true;
}
else{
//Print the message
std::cout << data->items << std::endl;
//Notify the other process that the buffer is empty
data->message_in = false;
data->cond_full.notify_one();
}
}
while(!end_loop);
}
catch(interprocess_exception &ex){
std::cout << ex.what() << std::endl;
return 1;
}
return 0;
}
I'd like to replace char items[LineSize]; to a more convenient string in the trace_queue struct.
How can I do that without the Managed Shared Memory?
Or this is somewhat completely not recommended to do without the managed Boost libraries?
Or this is somewhat completely not recommended to do without the managed Boost libraries?
I cannot recommend it. It's fine to do it unmanaged, but I'd 100% suggest the exact approach they gave with the fixed char array. What's wrong with that?
You cannot have your cake and eat it. You can't wish for "highlevel dynamic strings" and "no heap management overhead" magically at the same time.
That said, you may be able to find some trade-offs. Specifically, you might want to emulate something like a polymorphic memory resource in such a shared byte array. Then you could use std::pmr::string on top of that. Tragedy has it that memory_resource isn't shared-memory safe.
SIMPLIFY
However, I suppose all you need is some nice abstraction, where the interface is using C++ vocabulary types. Why not simplfy the entire deal to that point?
Here's a quick draft:
struct trace_queue {
private:
bip::interprocess_mutex mutex;
bip::interprocess_condition cond;
std::array<char, 300> buffer{};
bool message_in{false}; // Is there any message
auto wait(bool state) {
bip::scoped_lock lock(mutex);
cond.wait(lock, [=,this] { return message_in == state; });
return lock;
}
public:
void send(std::string_view msg) {
auto lock = wait(false); // !message_in
auto n = std::min(buffer.size(), msg.size());
std::fill(buffer.begin(), buffer.end(), '\0');
std::copy_n(msg.data(), n, buffer.begin());
message_in = true;
cond.notify_one();
}
std::string receive() {
auto lock = wait(true); // message_in
std::string msg(buffer.data(), strnlen(buffer.data(), buffer.size()));
message_in = false;
cond.notify_one();
return msg;
}
};
In my opinion the code is already easier to read. And it's certainly easier to use! The entire server side:
// Create a shared memory object.
bip::shared_memory_object shm(bip::create_only, "MySharedMemory",
bip::read_write);
shm.truncate(sizeof(trace_queue));
// Map the whole shared memory in this process
bip::mapped_region region(shm, bip::read_write);
trace_queue& data = *new (region.get_address()) trace_queue;
for (int i = 0; i < 99; ++i)
data.send("my_trace_" + std::to_string(i));
data.send("TEARDOWN");
And the client side:
bip::shared_memory_object shm(bip::open_only, "MySharedMemory",
bip::read_write);
bip::mapped_region region(shm, bip::read_write);
trace_queue& data = *static_cast<trace_queue*>(region.get_address());
while (true) {
auto msg = data.receive();
if (msg == "TEARDOWN")
break;
std::cout << msg << "\n";
};
See it Live On Coliru
#include <array>
#include <boost/interprocess/mapped_region.hpp>
#include <boost/interprocess/shared_memory_object.hpp>
#include <boost/interprocess/sync/interprocess_condition.hpp>
#include <boost/interprocess/sync/interprocess_mutex.hpp>
#include <boost/interprocess/sync/scoped_lock.hpp>
#include <iostream>
namespace bip = boost::interprocess;
struct trace_queue {
private:
bip::interprocess_mutex mutex;
bip::interprocess_condition cond;
std::array<char, 300> buffer{};
bool message_in{false}; // Is there any message
auto wait(bool state) {
bip::scoped_lock lock(mutex);
cond.wait(lock, [=,this] { return message_in == state; });
return lock;
}
public:
void send(std::string_view msg) {
auto lock = wait(false); // !message_in
auto n = std::min(buffer.size(), msg.size());
std::fill(buffer.begin(), buffer.end(), '\0');
std::copy_n(msg.data(), n, buffer.begin());
message_in = true;
cond.notify_one();
}
std::string receive() {
auto lock = wait(true); // message_in
std::string msg(buffer.data(), strnlen(buffer.data(), buffer.size()));
message_in = false;
cond.notify_one();
return msg;
}
};
int main(int argc, char**) {
try {
if (argc < 2) {
// Erase previous shared memory and schedule erasure on exit
struct shm_remove {
shm_remove() { bip::shared_memory_object::remove("MySharedMemory"); }
~shm_remove() { bip::shared_memory_object::remove("MySharedMemory"); }
} remover;
// Create a shared memory object.
bip::shared_memory_object shm(bip::create_only, "MySharedMemory",
bip::read_write);
shm.truncate(sizeof(trace_queue));
// Map the whole shared memory in this process
bip::mapped_region region(shm, bip::read_write);
trace_queue& data = *new (region.get_address()) trace_queue;
for (int i = 0; i < 99; ++i)
data.send("my_trace_" + std::to_string(i));
data.send("TEARDOWN");
} else {
bip::shared_memory_object shm(bip::open_only, "MySharedMemory",
bip::read_write);
bip::mapped_region region(shm, bip::read_write);
trace_queue& data = *static_cast<trace_queue*>(region.get_address());
while (true) {
auto msg = data.receive();
if (msg == "TEARDOWN")
break;
std::cout << msg << "\n";
};
}
} catch (std::exception const& ex) {
std::cout << ex.what() << std::endl;
return 1;
}
}
Output, as expected:

How can I take a value from thread without future?

I wrote the program to count all words in .log files in the different threads and output the result on the screen. First argument in command line is dir to find all .log files and then count words in this files. Second argument in command line is number of threads (by default = 4)
I used the ThreadPool for this program
ThreadPool.h
#ifndef THREAD_POOL_H
#define THREAD_POOL_H
#include <boost/thread/condition_variable.hpp>
#include <boost/thread.hpp>
#include <future> // I don't how to work with boost future
#include <queue>
#include <vector>
#include <functional>
class ThreadPool
{
public:
using Task = std::function<void()>; // Our task
explicit ThreadPool(int num_threads)
{
start(num_threads);
}
~ThreadPool()
{
stop();
}
template<class T>
auto enqueue(T task)->std::future<decltype(task())>
{
// packaged_task wraps any Callable target
auto wrapper = std::make_shared<std::packaged_task<decltype(task()) ()>>(std::move(task));
{
boost::unique_lock<boost::mutex> lock{ mutex_p };
tasks_p.emplace([=] {
(*wrapper)();
});
}
event_p.notify_one();
return wrapper->get_future();
}
//void enqueue(Task task)
//{
// {
// boost::unique_lock<boost::mutex> lock { mutex_p };
// tasks_p.emplace(std::move(task));
// event_p.notify_one();
// }
//}
private:
std::vector<boost::thread> threads_p; // num of threads
std::queue<Task> tasks_p; // Tasks to make
boost::condition_variable event_p;
boost::mutex mutex_p;
bool isStop = false;
void start(int num_threads)
{
for (int i = 0; i < num_threads; ++i)
{
// Add to the end our thread
threads_p.emplace_back([=] {
while (true)
{
// Task to do
Task task;
{
boost::unique_lock<boost::mutex> lock(mutex_p);
event_p.wait(lock, [=] { return isStop || !tasks_p.empty(); });
// If we make all tasks
if (isStop && tasks_p.empty())
break;
// Take new task from queue
task = std::move(tasks_p.front());
tasks_p.pop();
}
// Execute our task
task();
}
});
}
}
void stop() noexcept
{
{
boost::unique_lock<boost::mutex> lock(mutex_p);
isStop = true;
event_p.notify_all();
}
for (auto& thread : threads_p)
{
thread.join();
}
}
};
#endif
main.cpp
#include "ThreadPool.h"
#include <iostream>
#include <iomanip>
#include <Windows.h>
#include <vector>
#include <map>
#include <boost/filesystem.hpp>
#include <boost/thread.hpp>
namespace bfs = boost::filesystem;
int count_words(const std::string& filename)
{
int counter = 0;
std::ifstream file(filename);
std::string buffer;
while (file >> buffer)
{
++counter;
}
return counter;
}
int main(int argc, const char* argv[])
{
bfs::path path = argv[1];
// If this path is exist and if this is dir
if (bfs::exists(path) && bfs::is_directory(path))
{
// Number of threads. Default = 4
int n = (argc == 3 ? atoi(argv[2]) : 4);
ThreadPool pool(n);
// Container to store all filenames and number of words inside them
std::map<bfs::path, int> all_files_and_sums;
// Iterate all files in dir
for (auto& p : bfs::directory_iterator(path)) {
// Takes only .txt files
if (p.path().extension() == ".log") {
// Future for taking value from here
auto fut = pool.enqueue([&p, &all_files_and_sums]() {
// In this lambda function I count all words in file and return this value
int result = count_words(p.path().string());
std::cout << "TID " << GetCurrentThreadId() << "\n";
return result;
});
// "filename = words in this .txt file"
all_files_and_sums[p.path()] = fut.get();
}
}
int result = 0;
for (auto& k : all_files_and_sums)
{
std::cout << k.first << "- " << k.second << "\n";
result += k.second;
}
std::cout << "Result: " << result << "\n";
}
else
std::perror("Dir is not exist");
}
And this solution works correctly. But if in the directory many files this solution works so slow. I think it's because of the futures. How can I take values from different threads without futures.
(P.S)
Sorry for my english

boost interprocess message_queue and fork

I am trying to communicate with forked child process using message queue from boost interprocess library. When child process calls receive it causes exception with message
boost::interprocess_exception::library_error
I am using GCC 6.3 on Debian 9 x64.
#include <iostream>
#include <unistd.h>
#include <boost/interprocess/ipc/message_queue.hpp>
#include <memory>
int main(int argc, char* argv[])
{
using namespace boost::interprocess;
const char* name = "foo-552b8ae9-6037-4b77-aa0d-d4dc9dad790b";
const int max_num_msg = 100;
const int max_msg_size = 32;
bool is_child = false;
message_queue::remove(name);
auto mq = std::make_unique<message_queue>(create_only, name, max_num_msg, max_msg_size);
auto child_pid = fork();
if (child_pid == -1)
{
std::cout << "fork failed" << std::endl;
return -1;
}
else if (child_pid == 0)
{
is_child = true;
}
if (is_child)
{
// does child needs to reopen it?
mq.reset( new message_queue(open_only, name) );
}
int send_num = 0;
while(true)
{
unsigned int priority = 0;
if (is_child)
{
message_queue::size_type bytes = 0;
try
{
int num;
// Always throws. What is wrong ???????
mq->receive(&num, sizeof(num), bytes, priority);
std::cout << num << std::endl;
}
catch(const std::exception& e)
{
std::cout << "Receive caused execption " << e.what() << std::endl;
}
sleep(1);
}
else
{
mq->send(&send_num, sizeof(send_num), priority);
send_num++;
sleep(5);
}
}
return 0;
}
Also, in child process is it required to reopen the message queue created by the parent process? I tried it both ways and neither worked. I am getting the same exception on receive.
The problem is that your receive buffer is smaller than max_msg_size. Assuming 4-byte integers, this should work:
int num[8];
mq.receive(num, sizeof(num), bytes, priority);
std::cout << *num << std::endl;
Also, I see no reason to play fast and loose with the actual queue instance. Just create it per process:
#include <boost/interprocess/ipc/message_queue.hpp>
#include <boost/exception/diagnostic_information.hpp>
#include <iostream>
#include <memory>
#include <unistd.h>
int main() {
namespace bip = boost::interprocess;
const char *name = "foo-552b8ae9-6037-4b77-aa0d-d4dc9dad790b";
{
const int max_num_msg = 100;
const int max_msg_size = 32;
bip::message_queue::remove(name);
bip::message_queue mq(bip::create_only, name, max_num_msg, max_msg_size);
}
auto child_pid = fork();
if (child_pid == -1) {
std::cout << "fork failed" << std::endl;
return -1;
}
bip::message_queue mq(bip::open_only, name);
if (bool const is_child = (child_pid == 0)) {
while (true) {
unsigned int priority = 0;
bip::message_queue::size_type bytes = 0;
try {
int num[8];
mq.receive(num, sizeof(num), bytes, priority);
std::cout << *num << std::endl;
} catch (const bip::interprocess_exception &e) {
std::cout << "Receive caused execption " << boost::diagnostic_information(e, true) << std::endl;
}
sleep(1);
}
} else {
// parent
int send_num = 0;
while (true) {
unsigned int priority = 0;
mq.send(&send_num, sizeof(send_num), priority);
send_num++;
sleep(5);
}
}
}

POSIX semaphore doesn't work under high contention/load

Using C++11 on Linux kernel 4.4.0-57, I'm trying to run two busy-looping processes (say p1, p2) pinned (pthread_setaffinity_np) on the same core and making sure the interleaving execution order by using POSIX semaphore (semaphore.h) and sched_yield(). But it did not work out well.
Below is the parent code (parent-task) that spawns 2 processes and each executes child-task code.
#include <stdio.h>
#include <cstdlib>
#include <errno.h> // errno
#include <iostream> // cout cerr
#include <semaphore.h> // semaphore
#include <fcntl.h> // O_CREAT
#include <unistd.h> // fork
#include <string.h> // cpp string
#include <sys/types.h> //
#include <sys/wait.h> // wait()
int init_semaphore(){
std::string sname = "/SEM_CORE";
sem_t* sem = sem_open ( sname.c_str(), O_CREAT, 0644, 1 );
if ( sem == SEM_FAILED ) {
std::cerr << "sem_open failed!\n";
return -1;
}
sem_init( sem, 0, 1 );
return 0;
}
// Fork and exec child-task.
// Return pid of child
int fork_and_exec( std::string pname, char* cpuid ){
int pid = fork();
if ( pid == 0) {
// Child
char* const params[] = { "./child-task", "99", strdup( pname.c_str() ), cpuid, NULL };
execv( params[0], params );
exit(0);
}
else {
// Parent
return pid;
}
}
int main( int argc, char* argv[] ) {
if ( argc <= 1 )
printf( "Usage ./parent-task <cpuid> \n" );
char* cpuid = argv[1];
std::string pnames[2] = { "p111", "p222" };
init_semaphore();
int childid[ 2 ] = { 0 };
int i = 0;
for( std::string pname : pnames ){
childid[ i ] = fork_and_exec( pname, cpuid );
}
for ( i=0; i<2; i++ )
if ( waitpid( childid[i], NULL, 0 ) < 0 )
perror( "waitpid() failed.\n" );
return 0;
}
The child-task code looks like this:
#include <cstdlib>
#include <stdio.h>
#include <sched.h>
#include <pthread.h>
#include <stdint.h>
#include <errno.h>
#include <semaphore.h>
#include <iostream>
#include <sys/types.h>
#include <fcntl.h> // O_CREAT
sem_t* sm;
int set_cpu_affinity( int cpuid ) {
pthread_t current_thread = pthread_self();
cpu_set_t cpuset;
CPU_ZERO( &cpuset );
CPU_SET( cpuid, &cpuset );
return pthread_setaffinity_np( current_thread,
sizeof( cpu_set_t ), &cpuset );
}
int lookup_semaphore() {
sm = sem_open( "/SEM_CORE", O_RDWR );
if ( sm == SEM_FAILED ) {
std::cerr << "sem_open failed!" << std::endl ;
return -1;
}
}
int main( int argc, char* argv[] ) {
printf( "Usage: ./child-task <PRIORITY> <PROCESS-NAME> <CPUID>\n" );
printf( "Setting SCHED_RR and priority to %d\n", atoi( argv[1] ) );
set_cpu_affinity( atoi( argv[3] ) );
lookup_semaphore();
int res;
uint32_t n = 0;
while ( 1 ) {
n += 1;
if ( !( n % 1000 ) ) {
res = sem_wait( sm );
if( res != 0 ) {
printf(" sem_wait %s. errno: %d\n", argv[2], errno);
}
printf( "Inst:%s RR Prio %s running (n=%u)\n", argv[2], argv[1], n );
fflush( stdout );
sem_post( sm );
sched_yield();
}
sched_yield();
}
sem_close( sm );
}
In the child-task code, I have if ( !( n % 1000 ) ) to experiment reducing the contention/load in waiting and posting the semaphore. The outcome I got is that when n % 1000, one of the child process will be always in Sleep state (from top) and the other child process executes properly. However, if I set n % 10000, i.e. less load/contention, both processes will run and printout the output interleavingly which is my expected outcome.
Does anyone know if this is the limitaion of semaphore.h or there's a better way to ensure processes execution order?
Updated: I did a simple example with threads and semaphore, note that sched_yield may help avoiding unnecessary wakeups of the thread that is not 'in turn' to do work, but yielding is not a guarantee. I also show an example with mutex/condvar that is guaranteed to work, no yield necessary.
#include <stdexcept>
#include <semaphore.h>
#include <pthread.h>
#include <thread>
#include <iostream>
using std::thread;
using std::cout;
sem_t sem;
int count = 0;
const int NR_WORK_ITEMS = 10;
void do_work(int worker_id)
{
cout << "Worker " << worker_id << '\n';
}
void foo(int work_on_odd)
{
int result;
int contention_count = 0;
while (count < NR_WORK_ITEMS)
{
result = sem_wait(&sem);
if (result) {
throw std::runtime_error("sem_wait failed!");
}
if (count % 2 == work_on_odd)
{
do_work(work_on_odd);
count++;
}
else
{
contention_count++;
}
result = sem_post(&sem);
if (result) {
throw std::runtime_error("sem_post failed!");
}
result = sched_yield();
if (result < 0) {
throw std::runtime_error("yield failed!");
}
}
cout << "Worker " << work_on_odd << " terminating. Nr of redundant wakeups from sem_wait: " <<
contention_count << '\n';
}
int main()
{
int result = sem_init(&sem, 0, 1);
if (result) {
throw std::runtime_error("sem_init failed!");
}
thread t0 = thread(foo, 0);
thread t1 = thread(foo, 1);
t0.join();
t1.join();
return 0;
}
Here is one way to do it with condition variables and mutexes. Translating from C++ std threads to pthreads should be trivial. To do it between processes, you would have to use a pthread mutex type that can be shared between processes. Maybe the condvar and the mutex can both be placed in shared memory, to achieve the same thing I do below with threads.
See also the manpage pthread_condattr_setpshared (3) or
http://manpages.ubuntu.com/manpages/wily/man3/pthread_condattr_setpshared.3posix.html
On the other hand, maybe it is simpler to just use a SOCK_STREAM unix domain socket between the two worker processes, and just block on the socket until the peer worker pings you (i.e. send one char) over the socket.
#include <cassert>
#include <iostream>
#include <thread>
#include <condition_variable>
#include <unistd.h>
using std::thread;
using std::condition_variable;
using std::mutex;
using std::unique_lock;
using std::cout;
condition_variable cv;
mutex mtx;
int count;
void dowork(int arg)
{
std::thread::id this_id = std::this_thread::get_id();
cout << "Arg: " << arg << ", thread id: " << this_id << '\n';
}
void tfunc(int work_on_odd)
{
assert(work_on_odd < 2);
auto check_can_work = [&count, &work_on_odd](){ return ((count % 2) ==
work_on_odd); };
while (count < 10)
{
unique_lock<mutex> lk(mtx);
cv.wait (lk, check_can_work);
dowork(work_on_odd);
count++;
cv.notify_one();
// Lock is unlocked automatically here, but with threads and condvars,
// it is actually better to unlock manually before notify_one.
}
}
int main()
{
count = 0;
thread t0 = thread(tfunc, 0);
thread t1 = thread(tfunc, 1);
sleep(1);
cv.notify_one();
t0.join();
t1.join();
}

C++ callstack on windows access violation

I have been trying to make a Windows application dump the callstack on the event of a crash (bad memory access or division by zero) or standard c++ exceptions.
I have build StackWalker and linked it into my application and compiled my application with /EHa.
#include "win/StackWalker.h"
extern int runapp(int argc, char **argv);
// The exception filter function:
LONG WINAPI ExpFilter(EXCEPTION_POINTERS* pExp, DWORD dwExpCode) {
StackWalker sw;
sw.ShowCallstack(GetCurrentThread(), pExp->ContextRecord);
return EXCEPTION_EXECUTE_HANDLER;
}
int main(int argc, char *argv[]) {
__try
{
return runapp(argc, argv);
}
__except (ExpFilter(GetExceptionInformation(), GetExceptionCode()))
{
}
}
The real program is started via runapp() since it is not possible to instantiate objects that require unwinding (destruction) directly inside a __try scope.
My problem is that nothing is caught when I force my program to crash using this code:
int *data1 = 0;
*data1 = 0;
In other words, it just crashes "normally"
Does anybody have a hint?
/EHa switch tells compiler that you want to handle SEH exceptions inside C++ try/catch block. In your code you use SEH exception handler instead. This is a working approach I am using:
dbgutils.h
#pragma once
#include <eh.h>
#include <windows.h>
#include <string>
#include <sstream>
#include <iomanip>
#include <boost/optional.hpp>
#include "StackWalker.h"
class CSO3SEHException
{
public:
CSO3SEHException(unsigned int nCode, EXCEPTION_POINTERS* pEx);
std::string what();
std::string stack();
private:
std::string m_sWhat, m_sStack;
std::string seName(const unsigned int& nCode);
boost::optional<std::string> seInfo(unsigned int nCode, EXCEPTION_POINTERS* pEx);
void seStack(EXCEPTION_POINTERS* pEx);
void seExceptionInfo(unsigned int nCode, EXCEPTION_POINTERS* pEx);
};
class CCustomStackWalker : public StackWalker
{
public:
CCustomStackWalker(std::stringstream* ss);
protected:
virtual void OnOutput(LPCSTR szText);
private:
std::stringstream* m_sOut;
};
void _so3_seh_translate(unsigned int code, _EXCEPTION_POINTERS *ep);
void ReportSEHException(CSO3SEHException& ex);
dbgutils.cpp
#include "dbgutils.h"
CCustomStackWalker::CCustomStackWalker(std::stringstream* ss)
{
m_sOut = ss;
}
void CCustomStackWalker::OnOutput(LPCSTR szText)
{
size_t sLen = strlen(szText);
std::string s = std::string(szText, sLen);
(*m_sOut) << s << std::endl;
}
CSO3SEHException::CSO3SEHException(unsigned int nCode, EXCEPTION_POINTERS* pEx)
{
seExceptionInfo(nCode, pEx);
seStack(pEx);
}
std::string CSO3SEHException::what()
{
return(m_sWhat);
}
std::string CSO3SEHException::stack()
{
return(m_sStack);
}
std::string CSO3SEHException::seName(const unsigned int& nCode)
{
switch (nCode)
{
case EXCEPTION_ACCESS_VIOLATION: return ("Access Violation");
case EXCEPTION_ARRAY_BOUNDS_EXCEEDED: return ("Range Check");
case EXCEPTION_BREAKPOINT: return ("Breakpoint");
case EXCEPTION_DATATYPE_MISALIGNMENT: return ("Datatype misaligment");
case EXCEPTION_ILLEGAL_INSTRUCTION: return ("Illegal instruction");
case EXCEPTION_INT_DIVIDE_BY_ZERO: return ("Divide by zero");
case EXCEPTION_INT_OVERFLOW: return ("Integer overflow");
case EXCEPTION_PRIV_INSTRUCTION: return ("Privileged instruction");
case EXCEPTION_STACK_OVERFLOW: return ("Stack overflow");
default: return("UNKNOWN EXCEPTION");
}
}
boost::optional<std::string> CSO3SEHException::seInfo(unsigned int nCode, EXCEPTION_POINTERS* pEx)
{
std::stringstream ss;
if (EXCEPTION_ACCESS_VIOLATION == nCode)
{
ss << (pEx->ExceptionRecord->ExceptionInformation[0] ? "write " : " read");
ss << std::hex << std::setfill('0');
ss << " of address 0x" << std::setw(2*sizeof(void*)) << (unsigned)pEx->ExceptionRecord->ExceptionInformation[1];
return(ss.str());
}
return(nullptr);
}
void CSO3SEHException::seStack(EXCEPTION_POINTERS* pEx)
{
std::stringstream ss;
CCustomStackWalker sw(&ss);
sw.ShowCallstack(GetCurrentThread(), pEx->ContextRecord);
m_sStack = ss.str();
}
void CSO3SEHException::seExceptionInfo(unsigned int nCode, EXCEPTION_POINTERS* pEx)
{
std::stringstream ss;
ss << seName(nCode);
ss << std::hex << std::setfill('0');
ss << " at 0x" << std::setw(2*sizeof(void*)) << pEx->ExceptionRecord->ExceptionAddress;
auto pSInfo = seInfo(nCode, pEx);
if (pSInfo)
ss << *pSInfo;
m_sWhat = ss.str();
}
void _so3_seh_translate(unsigned int code, _EXCEPTION_POINTERS *ep)
{
throw CSO3SEHException(code, ep);
}
void ReportSEHException(CSO3SEHException& ex)
{
std::string sError = ex.what();
std::string sStack = ex.stack();
//do some error reporting here
}
somewhere in your code:
//You have to call _set_se_translator in all threads
_set_se_translator(_so3_seh_translate);
try
{
//do something exception-prone
}
catch (CSO3SEHException & pSEH)
{
ReportSEHException(pSEH);
}
catch (std::exception& err)
{
//handle c++ exceptions
}
The following solution works cross-platform (with the inclusion of StackWalker). Unfortunately it only works across threads on posix systems.
If someone has a solution for catching crashes/exceptions in all threads on Windows please tell.
#include <stdio.h>
#include <signal.h>
#include <stdlib.h>
#ifdef WINDOWS_OS
#include <windows.h>
#include "StackWalker.h"
#include <DbgHelp.h>
#include <iostream>
void seg_handler(int sig)
{
unsigned int i;
void * stack[ 100 ];
unsigned short frames;
SYMBOL_INFO * symbol;
HANDLE process;
process = GetCurrentProcess();
SymInitialize( process, NULL, TRUE );
frames = CaptureStackBackTrace( 0, 100, stack, NULL );
symbol = ( SYMBOL_INFO * )calloc( sizeof( SYMBOL_INFO ) + 256 * sizeof( char ), 1 );
symbol->MaxNameLen = 255;
symbol->SizeOfStruct = sizeof( SYMBOL_INFO );
for( i = 0; i < frames; i++ ) {
SymFromAddr( process, ( DWORD64 )( stack[ i ] ), 0, symbol );
printf( "%i: %s - 0x%0X\n", frames - i - 1, symbol->Name, symbol->Address );
}
free( symbol );
StackWalker sw;
sw.ShowCallstack(GetCurrentThread());
exit(1);
}
void std_handler( void ) {
seg_handler(1);
}
#else
#include <execinfo.h>
#include <unistd.h>
void seg_handler(int sig) {
void *array[10];
size_t size;
// get void*'s for all entries on the stack
size = backtrace(array, 10);
// print out all the frames to stderr
fprintf(stderr, "Error: signal %d:\n", sig);
backtrace_symbols_fd(array, size, STDERR_FILENO);
exit(1);
}
void std_handler( void )
{
void *trace_elems[20];
int trace_elem_count(backtrace( trace_elems, 20 ));
char **stack_syms(backtrace_symbols( trace_elems, trace_elem_count ));
for ( int i = 0 ; i < trace_elem_count ; ++i )
{
std::cout << stack_syms[i] << "\n";
}
free( stack_syms );
exit(1);
}
#endif
int main(int argc, char *argv[]) {
signal(SIGSEGV, seg_handler);
std::set_terminate( std_handler );
// Main Program
// Crash
int *a = 0;
*a = 1;
}