I've used the program answered in this link with some modifications.
Below is my modified code:
#include <linux/netlink.h>
#include <netlink/netlink.h>
#include <netlink/route/qdisc.h>
#include <netlink/route/qdisc/plug.h>
#include <netlink/socket.h>
#include <atomic>
#include <csignal>
#include <iostream>
#include <stdexcept>
#include <unistd.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <thread>
#include <queue>
#include <chrono>
/**
* Netlink route socket.
*/
struct Socket {
Socket() : handle{nl_socket_alloc()} {
if (handle == nullptr) {
throw std::runtime_error{"Failed to allocate socket!"};
}
if (int err = nl_connect(handle, NETLINK_ROUTE); err < 0) {
throw std::runtime_error{"Unable to connect netlink socket: " +
std::string{nl_geterror(err)}};
}
}
Socket(const Socket &) = delete;
Socket &operator=(const Socket &) = delete;
Socket(Socket &&) = delete;
Socket &operator=(Socket &&) = delete;
~Socket() { nl_socket_free(handle); }
struct nl_sock *handle;
};
/**
* Read all links from netlink socket.
*/
struct LinkCache {
explicit LinkCache(Socket *socket) : handle{nullptr} {
if (int err = rtnl_link_alloc_cache(socket->handle, AF_UNSPEC, &handle);
err < 0) {
throw std::runtime_error{"Unable to allocate link cache: " +
std::string{nl_geterror(err)}};
}
}
LinkCache(const LinkCache &) = delete;
LinkCache &operator=(const LinkCache &) = delete;
LinkCache(LinkCache &&) = delete;
LinkCache &operator=(LinkCache &&) = delete;
~LinkCache() { nl_cache_free(handle); }
struct nl_cache *handle;
};
/**
* Link (such as "eth0" or "wlan0").
*/
struct Link {
Link(LinkCache *link_cache, const std::string &iface)
: handle{rtnl_link_get_by_name(link_cache->handle, iface.c_str())} {
if (handle == nullptr) {
throw std::runtime_error{"Link does not exist:" + iface};
}
}
Link(const Link &) = delete;
Link &operator=(const Link &) = delete;
Link(Link &&) = delete;
Link &operator=(Link &&) = delete;
~Link() { rtnl_link_put(handle); }
struct rtnl_link *handle;
};
/**
* Queuing discipline.
*/
struct QDisc {
QDisc(const std::string &iface, const std::string &kind)
: handle{rtnl_qdisc_alloc()} {
if (handle == nullptr) {
throw std::runtime_error{"Failed to allocate qdisc!"};
}
struct rtnl_tc *tc = TC_CAST(handle);
// Set link
LinkCache link_cache{&socket};
Link link{&link_cache, iface};
rtnl_tc_set_link(tc, link.handle);
// Set parent qdisc
uint32_t parent = 0;
if (int err = rtnl_tc_str2handle("root", &parent); err < 0) {
throw std::runtime_error{"Unable to parse handle: " +
std::string{nl_geterror(err)}};
}
rtnl_tc_set_parent(tc, parent);
// Set kind (e.g. "plug")
if (int err = rtnl_tc_set_kind(tc, kind.c_str()); err < 0) {
throw std::runtime_error{"Unable to set kind: " +
std::string{nl_geterror(err)}};
}
}
QDisc(const QDisc &) = delete;
QDisc &operator=(const QDisc &) = delete;
QDisc(QDisc &&) = delete;
QDisc &operator=(QDisc &&) = delete;
~QDisc() {
if (int err = rtnl_qdisc_delete(socket.handle, handle); err < 0) {
std::cerr << "Unable to delete qdisc: " << nl_geterror(err) << std::endl;
}
rtnl_qdisc_put(handle);
}
void send_msg() {
int flags = NLM_F_CREATE;
if (int err = rtnl_qdisc_add(socket.handle, handle, flags); err < 0) {
throw std::runtime_error{"Unable to add qdisc: " +
std::string{nl_geterror(err)}};
}
}
Socket socket;
struct rtnl_qdisc *handle;
};
/**
* Queuing discipline for plugging traffic.
*/
class Plug {
public:
Plug(const std::string &iface, uint32_t limit, std::string msg)
: qdisc_{iface, "plug"} {
rtnl_qdisc_plug_set_limit(qdisc_.handle, limit);
qdisc_.send_msg();
// set_enabled(enabled_);
set_msg(msg);
}
// void set_enabled(bool enabled) {
// if (enabled) {
// rtnl_qdisc_plug_buffer(qdisc_.handle);
// } else {
// rtnl_qdisc_plug_release_one(qdisc_.handle);
// }
// qdisc_.send_msg();
// enabled_ = enabled;
// }
void set_msg(std::string msg) {
if (msg == "buffer") {
int ret = rtnl_qdisc_plug_buffer(qdisc_.handle);
//std::cout<<strerror(ret);
} else if(msg == "commit") {
int ret = rtnl_qdisc_plug_release_one(qdisc_.handle);
//std::cout<<strerror(ret);
} else {
int ret = rtnl_qdisc_plug_release_indefinite(qdisc_.handle);
//std::cout<<strerror(ret);
}
qdisc_.send_msg();
}
// bool is_enabled() const { return enabled_; }
private:
QDisc qdisc_;
// bool enabled_;
};
std::atomic<bool> quit{false};
void exit_handler(int /*signal*/) { quit = true; }
// this function busy wait on job queue until there's something
//and calls release operation i.e. unplug qdisc to release output packets
//generated for a particular epoch
void transmit_ckpnt(std::queue<int> &job_queue, Plug &plug){
while(true){
while(!job_queue.empty()){
int id = job_queue.front();
job_queue.pop();
std::string s = std::to_string(id);
std::cout<<"called from parallel thread "<<s<<"\n";
//release buffer
plug.set_msg("commit");
}
}
}
int main() {
std::string iface{"veth-host"};
constexpr uint32_t buffer_size = 10485760;
// bool enabled = true;
Plug plug{iface, buffer_size, "buffer"};
/**
* Set custom exit handler to ensure destructor runs to delete qdisc.
*/
struct sigaction sa {};
sa.sa_handler = exit_handler;
sigfillset(&sa.sa_mask);
sigaction(SIGINT, &sa, nullptr);
pid_t wpid;
int status = 0;
std::queue<int> job_queue;
int ckpnt_no = 1;
std::thread td(transmit_ckpnt, std::ref(job_queue), std::ref(plug));
plug.set_msg("indefinite");
while(true){
//plug the buffer at start of the epoch
plug.set_msg("buffer");
//wait for completion of epoch
sleep(4);
job_queue.push(ckpnt_no);
ckpnt_no += 1;
}
plug.set_msg("indefinite");
td.join();
// while (!quit) {
// std::cout << "Plug set to " << plug.is_enabled() << std::endl;
// std::cout << "Press <Enter> to continue.";
// std::cin.get();
// plug.set_enabled(!plug.is_enabled());
// }
return EXIT_SUCCESS;
}
Walkthrough of code: This program creates a plug/unplug type of qdiscs in which during plug operation, the network packets are buffered and during an unplug operation the network packets are released from the first plug(the front of the queuing discipline qdisc) to the second plug in the qdisc. The above program is working correctly if the plug and unplug operation are there alternately. But i want to use it in the way that it was built for, i.e like how this is mentioned in this link , i.e.
TCQ_PLUG_BUFFER (epoch i)
TCQ_PLUG_BUFFER (epoch i+1)
TCQ_PLUG_RELEASE_ONE (for epoch i)
TCQ_PLUG_BUFFER (epoch i+2)
..............................so on
In my program, the main thread start buffering in the begining of every epoch, and continued execution. The job thread takes the job id from job queue and releases the buffered packets from the head of the queue to next plug. But this gives below error:
./a.out: /lib/x86_64-linux-gnu/libnl-3.so.200: no version information available (required by ./a.out)
./a.out /usr/lib/x86_64-linux-gnu/libnl-route-3.so.200: no version information available (required by ./a.out)
called from parallel thread 1
called from parallel thread 2
called from parallel thread 3
called from parallel thread 4
called from parallel thread 5
called from parallel thread 6
called from parallel thread 7
terminate called after throwing an instance of 'std::runtime_error'
what(): Unable to add qdisc: Message sequence number mismatch
Aborted
Unable to understand what is this and why it is giving this error, when release was performed in sequentially in the main thread then it is working but now when there is another thread to perform the release operation which just checks whether the job_queue is empty or not and perform the release operation until there is something inside job queue and busy wait if job_queue is empty.
The expected sequence counter is stored by libnl as part of the nl_sock struct (reference). When multiple threads call libnl functions, this can cause inconsistencies, such as a data race (two threads writing to the sequence counter at same time) or a race condition (time-of-check-time-of-use problem, where one thread checks if the counter satisfies some condition, then performs some operation, but in between the other thread modifies the counter). See here for more details on data races and race conditions.
Sidenote: Both g++ and clang++ support the -fsanitize=thread flag, which automatically inserts additional debug code into the binary that can help to detect this kind of data races (reference). Though in this case, it might not be as useful, since you would also have to get libnl compiled with this flag, which might not be easy.
From the libnl documentation (reference):
The next step is to check the sequence number of the message against
the currently expected sequence number. The application may provide
its own sequence number checking algorithm by setting the callback
function NL_CB_SEQ_CHECK to its own implementation. In fact, calling
nl_socket_disable_seq_check() to disable sequence number checking will
do nothing more than set the NL_CB_SEQ_CHECK hook to a function which
always returns NL_OK.
This leaves us with the following options:
Use a mutex to guard all access to libnl functions that might modify the sequence counter.
Disable sequence counter checking using nl_socket_disable_seq_check.
From my point of view, 1) is the more robust solution. If you care more about performance than robustness, then you could go with 2).
Option 1: Use a mutex to guard access to libnl functions
Include the mutex header from the standard library:
#include <mutex>
In the Plug class, add a std::mutex as a member:
class Plug {
...
private:
std::mutex seq_counter_mutex_;
...
};
At the beginning of set_msg, use a std::lock_guard for acquiring the mutex for the duration of the function. This ensures that only one thread can enter the function at the same time:
void set_msg(std::string msg) {
std::lock_guard guard{seq_counter_mutex_};
...
}
Option 2: Disable sequence number checking
In the Socket class, at the end of the constructor you can disable sequence counter checking with:
nl_socket_disable_seq_check(handle);
Related
I called IMFSourceReader::ReadSample and I found it was stuck if it cannot read data.
So I tried to terminate the thread by TerminateThread() but it returned 0 as a fail.
How could I terminate the stuck thread?
This is my sample code:
#include <iostream>
#include <vector>
#include <codecvt>
#include <string>
#include <thread>
#include <mutex>
#include <chrono>
#include <condition_variable>
#include <Windows.h>
using namespace std::chrono_literals;
class MyObject
{
private:
...
std::thread *t;
std::mutex m;
std::condition_variable cv;
std::thread::native_handle_type handle;
int getsample(uint8_t* data)
{
// call a ReadSample
hr = pVideoReader->ReadSample(
MF_SOURCE_READER_ANY_STREAM, // Stream index.
0, // Flags.
&streamIndex, // Receives the actual stream index.
&flags, // Receives status flags.
&llTimeStamp, // Receives the time stamp.
&pSample // Receives the sample or NULL.
);
...
return 0;
}
int myfunc_wrapper(uint8_t* data)
{
int ret = 0;
BOOL bpass = 0;
if (t == nullptr) {
t = new std::thread([this, &data, &ret]()
{
ret = this->getsample(data);
this->cv.notify_one();
});
handle = t->native_handle();
t->detach();
}
{
std::unique_lock<std::mutex> l(this->m);
if (this->cv.wait_for(l, 2500ms) == std::cv_status::timeout) {
bpass = TerminateThread(handle, 0);
if (bpass == 0) {
std::cout << "TerminateThread Fail! " << GetLastError() << std::endl;
}
throw std::runtime_error("Timeout Fail 2500 ms");
}
}
delete t;
t = nullptr;
}
public:
int my_func(uint8_t* raw_data)
{
bool timedout = false;
try {
if (myfunc_wrapper(raw_data) != 0)
return -1;
}
catch (std::runtime_error& e) {
std::cout << e.what() << std::endl;
timedout = true;
}
if (timedout)
return -1;
return 0;
}
};
int main()
{
uint8_t data[512];
MyObject* obj = new MyObject();
while (true)
{
obj->my_func(data);
}
return 0;
}
Output:
TerminateThread Fail! 6
Timeout Fail 2500 ms
TerminateThread Fail! 6
Timeout Fail 2500 ms
...
I also tried to use pthread_cancel but it cannot be compiled because there is a type error.
no suitable constructor exists to convert from "std::thread::native_handle_type" to "__ptw32_handle_t"
handle = t->native_handle();
...
pthread_cancel(handle); // no suitable constructor exists to convert
The reason it failed to terminate is that the native handle is no longer valid after detaching, one way you could do this is to OpenThread using the thread id to get a new handle.
To get the thread id, you could use its handle before detaching like this:
DWORD nativeId = GetThreadId(t->native_handle());
t->detach();
After that, just open a new handle to the thread to terminate it:
HANDLE hThread = OpenThread(THREAD_TERMINATE, FALSE, nativeId);
if (hThread)
{
BOOL result = TerminateThread(hThread, 0);
CloseHandle(hThread);
}
But you should not do this, consider other ways to signal the thread to terminate on its own.
The Question
What's a good way to increment a counter and signal once that counter reaches a given value (i.e., signaling a function waiting on blocks until full, below)? It's a lot like asking for a semaphore. The involved processes are communicating via shared memory (/dev/shm), and I'm currently trying to avoid using a library (like Boost).
Initial Solution
Declare a struct that contains a SignalingIncrementingCounter. This struct is allocated in shared memory, and a single process sets up the shared memory with this struct before the other processes begin. The SignalingIncrementingCounter contains the following three fields:
A plain old int to represent the counter's value.
Note: Due to the MESI caching protocol, we are guaranteed that if one cpu core modifies the value, that the updated value will be reflected in other caches once the value is read from those other caches.
A pthread mutex to guard the reading and incrementing of the integer counter
A pthread condition variable to signal when the integer has reached a desirable value
Other Solutions
Instead of using an int, I also tried using std::atomic<int>. I've tried just defining this field as a member of the SignalingIncrementingCounter class, and I've also tried allocating it into the struct at run time with placement new. It seems that neither worked better than the int.
The following should work.
The Implementation
I include most of the code, but I leave out parts of it for the sake of brevity.
signaling_incrementing_counter.h
#include <atomic>
struct SignalingIncrementingCounter {
public:
void init(const int upper_limit_);
void reset_to_empty();
void increment(); // only valid when counting up
void block_until_full(const char * comment = {""});
private:
int upper_limit;
volatile int value;
pthread_mutex_t mutex;
pthread_cond_t cv;
};
signaling_incrementing_counter.cpp
#include <pthread.h>
#include <stdexcept>
#include "signaling_incrementing_counter.h"
void SignalingIncrementingCounter::init(const int upper_limit_) {
upper_limit = upper_limit_;
{
pthread_mutexattr_t attr;
pthread_mutexattr_init(&attr);
int retval = pthread_mutexattr_setpshared(&attr, PTHREAD_PROCESS_SHARED);
if (retval) {
throw std::runtime_error("Error while setting sharedp field for mutex");
}
pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK);
pthread_mutex_init(&mutex, &attr);
pthread_mutexattr_destroy(&attr);
}
{
pthread_condattr_t attr;
pthread_condattr_init(&attr);
pthread_condattr_setpshared(&attr, PTHREAD_PROCESS_SHARED);
pthread_cond_init(&cv, &attr);
pthread_condattr_destroy(&attr);
}
value = 0;
}
void SignalingIncrementingCounter::reset_to_empty() {
pthread_mutex_lock(&mutex);
value = 0;
// No need to signal, because in my use-case, there is no function that unblocks when the value changes to 0
pthread_mutex_unlock(&mutex);
}
void SignalingIncrementingCounter::increment() {
pthread_mutex_lock(&mutex);
fprintf(stderr, "incrementing\n");
++value;
if (value >= upper_limit) {
pthread_cond_broadcast(&cv);
}
pthread_mutex_unlock(&mutex);
}
void SignalingIncrementingCounter::block_until_full(const char * comment) {
struct timespec max_wait = {0, 0};
pthread_mutex_lock(&mutex);
while (value < upper_limit) {
int val = value;
printf("blocking until full, value is %i, for %s\n", val, comment);
clock_gettime(CLOCK_REALTIME, &max_wait);
max_wait.tv_sec += 5; // wait 5 seconds
const int timed_wait_rv = pthread_cond_timedwait(&cv, &mutex, &max_wait);
if (timed_wait_rv)
{
switch(timed_wait_rv) {
case ETIMEDOUT:
break;
default:
throw std::runtime_error("Unexpected error encountered. Investigate.");
}
}
}
pthread_mutex_unlock(&mutex);
}
Using either an int or std::atomic works.
One of the great things about the std::atomic interface is that it plays quite nicely with the int "interface". So, the code is almost exactly the same. One can switch between each implementation below by adding a #define USE_INT_IN_SHARED_MEMORY_FOR_SIGNALING_COUNTER true.
I'm not so sure about statically creating the std::atomic in shared memory, so I use placement new to allocate it. My guess is that relying on the static allocation would work, but it may technically be undefined behavior. Figuring that out is beyond the scope of my question, but a comment on that topic would be quite welcome.
signaling_incrementing_counter.h
#include <atomic>
#include "gpu_base_constants.h"
struct SignalingIncrementingCounter {
public:
/**
* We will either count up or count down to the given limit. Once the limit is reached, whatever is waiting on this counter will be signaled and allowed to proceed.
*/
void init(const int upper_limit_);
void reset_to_empty();
void increment(); // only valid when counting up
void block_until_full(const char * comment = {""});
// We don't have a use-case for the block_until_non_full
private:
int upper_limit;
#if USE_INT_IN_SHARED_MEMORY_FOR_SIGNALING_COUNTER
volatile int value;
#else // USE_INT_IN_SHARED_MEMORY_FOR_SIGNALING_COUNTER
std::atomic<int> value;
std::atomic<int> * value_ptr;
#endif // USE_INT_IN_SHARED_MEMORY_FOR_SIGNALING_COUNTER
pthread_mutex_t mutex;
pthread_cond_t cv;
};
signaling_incrementing_counter.cpp
#include <pthread.h>
#include <stdexcept>
#include "signaling_incrementing_counter.h"
void SignalingIncrementingCounter::init(const int upper_limit_) {
upper_limit = upper_limit_;
#if !GPU_USE_INT_IN_SHARED_MEMORY_FOR_SIGNALING_COUNTER
value_ptr = new(&value) std::atomic<int>(0);
#endif // GPU_USE_INT_IN_SHARED_MEMORY_FOR_SIGNALING_COUNTER
{
pthread_mutexattr_t attr;
pthread_mutexattr_init(&attr);
int retval = pthread_mutexattr_setpshared(&attr, PTHREAD_PROCESS_SHARED);
if (retval) {
throw std::runtime_error("Error while setting sharedp field for mutex");
}
pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK);
pthread_mutex_init(&mutex, &attr);
pthread_mutexattr_destroy(&attr);
}
{
pthread_condattr_t attr;
pthread_condattr_init(&attr);
pthread_condattr_setpshared(&attr, PTHREAD_PROCESS_SHARED);
pthread_cond_init(&cv, &attr);
pthread_condattr_destroy(&attr);
}
reset_to_empty(); // should be done at end, since mutex functions are called
}
void SignalingIncrementingCounter::reset_to_empty() {
int mutex_rv = pthread_mutex_lock(&mutex);
if (mutex_rv) {
throw std::runtime_error("Unexpected error encountered while grabbing lock. Investigate.");
}
value = 0;
// No need to signal, because there is no function that unblocks when the value changes to 0
pthread_mutex_unlock(&mutex);
}
void SignalingIncrementingCounter::increment() {
fprintf(stderr, "incrementing\n");
int mutex_rv = pthread_mutex_lock(&mutex);
if (mutex_rv) {
throw std::runtime_error("Unexpected error encountered while grabbing lock. Investigate.");
}
++value;
fprintf(stderr, "incremented\n");
if (value >= upper_limit) {
pthread_cond_broadcast(&cv);
}
pthread_mutex_unlock(&mutex);
}
void SignalingIncrementingCounter::block_until_full(const char * comment) {
struct timespec max_wait = {0, 0};
int mutex_rv = pthread_mutex_lock(&mutex);
if (mutex_rv) {
throw std::runtime_error("Unexpected error encountered while grabbing lock. Investigate.");
}
while (value < upper_limit) {
int val = value;
printf("blocking during increment until full, value is %i, for %s\n", val, comment);
/*const int gettime_rv =*/ clock_gettime(CLOCK_REALTIME, &max_wait);
max_wait.tv_sec += 5;
const int timed_wait_rv = pthread_cond_timedwait(&cv, &mutex, &max_wait);
if (timed_wait_rv)
{
switch(timed_wait_rv) {
case ETIMEDOUT:
break;
default:
pthread_mutex_unlock(&mutex);
throw std::runtime_error("Unexpected error encountered. Investigate.");
}
}
}
pthread_mutex_unlock(&mutex);
}
Closed. This question needs debugging details. It is not currently accepting answers.
Edit the question to include desired behavior, a specific problem or error, and the shortest code necessary to reproduce the problem. This will help others answer the question.
Closed 7 years ago.
Improve this question
I write a program to capture the packets using Library "Winpcap". ( How to complie WinPcap with VS2010? Please see this link: http://www.rhyous.com/2011/11/12/how-to-compile-winpcap-with-visual-studio-2010/)
I want to capture the packet(using pcap_loop()) and at the same time I process the packet in the same function void D. The code int x = 1; in function void D is only an easy example which represents some functions that can process the packet. The thread t calls void D. The thread tt calls the function startCap() which captures the packet.
I want to emphasize my question: I debug the program and it stops at the position of pcap_loop() in thread tt until pcap_loop() finishes. I set the parameter of pcap_loop() to make it to run without end because I need continious capture of packet. The result is that the program will not go to the next step int x = 1;. I want to run int x = 1; as thread tt runs. In a word, I wish that the both threads t and tt run at the same time. But the program runs only on thread tt without jumping out and run on thread t.
PS 1: int x = 1; keeps staying better in void D.
PS 2: There's no erros in compiling and debuging. But the program falls into the function pcap_loop which is in the thread tt.
Did I make my question clear?
The whole code:
Data.h
#ifndef DATA_H
#define DATA_H
#include <pcap/pcap.h>
#include <vector>
using namespace std;
struct PcapDevice
{
string name;
string description;
};
class Data
{
public:
Data();
~Data();
bool findDevices(vector<PcapDevice> &deviceList );
bool openDevices(const char * device);
void processPacket();
void startCap();
private:
pcap_t* inData;
char errbuf[256];
bool isCapturing;
pcap_if_t * m_pDevs;
};
#endif // DATA_H
Data.cpp
#include "Data.h"
#include <iostream>
// define pcap callback
void capture_callback_handler(unsigned char *userData, const struct pcap_pkthdr* pkthdr, const unsigned char * packet)
{
((Data*) userData)->processPacket();
}
Data::Data()
{
memset(errbuf,0,PCAP_ERRBUF_SIZE);
isCapturing = false;
inData = NULL;
m_pDevs = NULL;
}
Data::~Data()
{
}
// process the packet
void Data::processPacket()
{
return ;
}
// find local adapter
bool Data::findDevices(vector<PcapDevice> &deviceList )
{
m_pDevs = NULL;
int res = pcap_findalldevs(&m_pDevs, errbuf);
if(0 == res)
{
pcap_if_t * pIter = m_pDevs;
while(pIter != NULL)
{
PcapDevice device;
device.description = pIter->description;
device.name = pIter->name;
deviceList.push_back(device);
pIter = pIter->next;
}
return true;
}
else
{
printf("PCAP: no devices found\n");
}
return false;
}
// open the adapter
bool Data::openDevices(const char *device)
{
if ( (inData = pcap_open_live(device, 8192, 1, 512, errbuf)) == NULL)
{
return false;
}
return true;
}
// start the process of capturing the packet
void Data::startCap()
{
if ( inData == NULL ){
fprintf(stderr, "ERROR: no source set\n" );
return;
}
// free the list of device(adapter)
pcap_freealldevs(m_pDevs);
Data* data = this;
isCapturing = true;
// capture in the loop
if ( pcap_loop(inData, -1, capture_callback_handler, (unsigned char *) data) == -1)
{
fprintf(stderr, "ERROR: %s\n", pcap_geterr(inData) );
isCapturing = false;
}
}
main.cpp
#include <WinSock2.h>
#include <Windows.h>
#include <time.h>
#include "Data.h"
#include <boost\thread\thread.hpp>
#include <boost\function.hpp>
struct parameter
{
Data* pData;
};
void D(void* pParam)
{
// init the parameter
parameter* pUserParams = (parameter*)pParam;
boost::function<void()> f;
// the capture thread will be started
f = boost::bind(&Data::startCap, pUserParams->pData);
boost::thread tt(f);
tt.join();
// I want to work on the packet at the same time, the code "int x=1" is only an easy example
// and it represents a series of functions that can process the packet. I want to run those function as the thread tt runs.
int x = 1;
}
void main()
{
Data oData;
parameter pPara ;
pPara.pData = &oData;
std::vector<PcapDevice> DevList;
oData.findDevices(DevList);
int num = DevList.size()-1;
oData.openDevices(DevList[num].name.c_str());
boost::thread t(D,(void*)&pPara);
t.join();
}
Calling tt.join() will wait until the thread finishes (that is, startCap() returns) before executing the next statement.
You can simply put your int x = 1; before the join(); however, the thread may have not have even started at that point. If you want to ensure the thread is running, or up to a certain point before processing int x = 1; you can use a condition_variable:
The condition_variable class is a synchronization primitive that can be used to block a thread, or multiple threads at the same time, until:
a notification is received from another thread
void Data::startCap(std::condition_variable& cv)
{
if ( inData == NULL ){
fprintf(stderr, "ERROR: no source set\n" );
return;
}
// free the list of device(adapter)
pcap_freealldevs(m_pDevs);
Data* data = this;
isCapturing = true;
// Notify others that we are ready to begin capturing packets
cv.notify_one();
// capture in the loop
if ( pcap_loop(inData, -1, capture_callback_handler, (unsigned char *) data) == -1)
{
fprintf(stderr, "ERROR: %s\n", pcap_geterr(inData) );
isCapturing = false;
}
}
void D(void* pParam)
{
// init the parameter
parameter* pUserParams = (parameter*)pParam;
// Create conditional_variable
std::conditional_variable cv;
// Pass the conditional_variable by reference to the thread
boost::thread tt(&Data::startCap, pUserParams->pData, std::ref(cv));
// Wait until the thread notifies us it's ready:
cv.wait();
// Process packets etc.
int x = 1;
// Wait for the thread to finish
tt.join();
}
Now D() will start the thread tt and then wait until startCaps has reached a certain point (where it calls notify_one()) before we continuing doing things.
I am trying to understand the below implementation of thread pool using the pthreads. When I comment out the the for loop in the main, the program stucks, upon putting the logs it seems that its getting stuck in the join function in threadpool destructor.
I am unable to understand why this is happening, is there any deadlock scenario happening ?
This may be naive but can someone help me understand why this is happening and how to correct this.
Thanks a lot !!!
#include <stdio.h>
#include <queue>
#include <unistd.h>
#include <pthread.h>
#include <malloc.h>
#include <stdlib.h>
// Base class for Tasks
// run() should be overloaded and expensive calculations done there
// showTask() is for debugging and can be deleted if not used
class Task {
public:
Task() {}
virtual ~Task() {}
virtual void run()=0;
virtual void showTask()=0;
};
// Wrapper around std::queue with some mutex protection
class WorkQueue {
public:
WorkQueue() {
// Initialize the mutex protecting the queue
pthread_mutex_init(&qmtx,0);
// wcond is a condition variable that's signaled
// when new work arrives
pthread_cond_init(&wcond, 0);
}
~WorkQueue() {
// Cleanup pthreads
pthread_mutex_destroy(&qmtx);
pthread_cond_destroy(&wcond);
}
// Retrieves the next task from the queue
Task *nextTask() {
// The return value
Task *nt = 0;
// Lock the queue mutex
pthread_mutex_lock(&qmtx);
// Check if there's work
if (finished && tasks.size() == 0) {
// If not return null (0)
nt = 0;
} else {
// Not finished, but there are no tasks, so wait for
// wcond to be signalled
if (tasks.size()==0) {
pthread_cond_wait(&wcond, &qmtx);
}
// get the next task
nt = tasks.front();
if(nt){
tasks.pop();
}
// For debugging
if (nt) nt->showTask();
}
// Unlock the mutex and return
pthread_mutex_unlock(&qmtx);
return nt;
}
// Add a task
void addTask(Task *nt) {
// Only add the task if the queue isn't marked finished
if (!finished) {
// Lock the queue
pthread_mutex_lock(&qmtx);
// Add the task
tasks.push(nt);
// signal there's new work
pthread_cond_signal(&wcond);
// Unlock the mutex
pthread_mutex_unlock(&qmtx);
}
}
// Mark the queue finished
void finish() {
pthread_mutex_lock(&qmtx);
finished = true;
// Signal the condition variable in case any threads are waiting
pthread_cond_signal(&wcond);
pthread_mutex_unlock(&qmtx);
}
// Check if there's work
bool hasWork() {
//printf("task queue size is %d\n",tasks.size());
return (tasks.size()>0);
}
private:
std::queue<Task*> tasks;
bool finished;
pthread_mutex_t qmtx;
pthread_cond_t wcond;
};
// Function that retrieves a task from a queue, runs it and deletes it
void *getWork(void* param) {
Task *mw = 0;
WorkQueue *wq = (WorkQueue*)param;
while (mw = wq->nextTask()) {
mw->run();
delete mw;
}
pthread_exit(NULL);
}
class ThreadPool {
public:
// Allocate a thread pool and set them to work trying to get tasks
ThreadPool(int n) : _numThreads(n) {
int rc;
printf("Creating a thread pool with %d threads\n", n);
threads = new pthread_t[n];
for (int i=0; i< n; ++i) {
rc = pthread_create(&(threads[i]), 0, getWork, &workQueue);
if (rc){
printf("ERROR; return code from pthread_create() is %d\n", rc);
exit(-1);
}
}
}
// Wait for the threads to finish, then delete them
~ThreadPool() {
workQueue.finish();
//waitForCompletion();
for (int i=0; i<_numThreads; ++i) {
pthread_join(threads[i], 0);
}
delete [] threads;
}
// Add a task
void addTask(Task *nt) {
workQueue.addTask(nt);
}
// Tell the tasks to finish and return
void finish() {
workQueue.finish();
}
// Checks if there is work to do
bool hasWork() {
return workQueue.hasWork();
}
private:
pthread_t * threads;
int _numThreads;
WorkQueue workQueue;
};
// stdout is a shared resource, so protected it with a mutex
static pthread_mutex_t console_mutex = PTHREAD_MUTEX_INITIALIZER;
// Debugging function
void showTask(int n) {
pthread_mutex_lock(&console_mutex);
pthread_mutex_unlock(&console_mutex);
}
// Task to compute fibonacci numbers
// It's more efficient to use an iterative algorithm, but
// the recursive algorithm takes longer and is more interesting
// than sleeping for X seconds to show parrallelism
class FibTask : public Task {
public:
FibTask(int n) : Task(), _n(n) {}
~FibTask() {
// Debug prints
pthread_mutex_lock(&console_mutex);
printf("tid(%d) - fibd(%d) being deleted\n", pthread_self(), _n);
pthread_mutex_unlock(&console_mutex);
}
virtual void run() {
// Note: it's important that this isn't contained in the console mutex lock
long long val = innerFib(_n);
// Show results
pthread_mutex_lock(&console_mutex);
printf("Fibd %d = %lld\n",_n, val);
pthread_mutex_unlock(&console_mutex);
// The following won't work in parrallel:
// pthread_mutex_lock(&console_mutex);
// printf("Fibd %d = %lld\n",_n, innerFib(_n));
// pthread_mutex_unlock(&console_mutex);
}
virtual void showTask() {
// More debug printing
pthread_mutex_lock(&console_mutex);
printf("thread %d computing fibonacci %d\n", pthread_self(), _n);
pthread_mutex_unlock(&console_mutex);
}
private:
// Slow computation of fibonacci sequence
// To make things interesting, and perhaps imporove load balancing, these
// inner computations could be added to the task queue
// Ideally set a lower limit on when that's done
// (i.e. don't create a task for fib(2)) because thread overhead makes it
// not worth it
long long innerFib(long long n) {
if (n<=1) { return 1; }
return innerFib(n-1) + innerFib(n-2);
}
long long _n;
};
int main(int argc, char *argv[])
{
// Create a thread pool
ThreadPool *tp = new ThreadPool(10);
// Create work for it
/*for (int i=0;i<100; ++i) {
int rv = rand() % 40 + 1;
showTask(rv);
tp->addTask(new FibTask(rv));
}*/
delete tp;
printf("\n\n\n\n\nDone with all work!\n");
}
The design is more or less OK-ish but implementationwise it contains several things that are a bit overcomplicated and may introduce instabilities. I guess you prog deadlocks when you comment out the for loop because you should use pthread_cond_broadcast instead of pthread_cond_signal in your WorkQueue::finish() method.
Note: I usually implemented threadpool termination by placing NUM_THREADS number of NULL items into the workqueue and I set a finished flag only to be able to check something in my addTask() method because after finish() I usually don't let adding new tasks and I return with false from addTask() or sometimes I assert.
Another note: Its best to encapsulate threads into classes, that has several benefits and makes proting to multiple platforms easier.
There may be other bugs too as I haven't executed your program, just ran through your code.
EDIT: Here is a reworked version, I issued some modifications to your code but I don't guarantee that it works. Fingers crossed... :-)
#include <stdio.h>
#include <queue>
#include <unistd.h>
#include <pthread.h>
#include <malloc.h>
#include <stdlib.h>
#include <assert.h>
// Reusable thread class
class Thread
{
public:
Thread()
{
state = EState_None;
handle = 0;
}
virtual ~Thread()
{
assert(state != EState_Started);
}
void start()
{
assert(state == EState_None);
// in case of thread create error I usually FatalExit...
if (pthread_create(&handle, NULL, threadProc, this))
abort();
state = EState_Started;
}
void join()
{
// A started thread must be joined exactly once!
// This requirement could be eliminated with an alternative implementation but it isn't needed.
assert(state == EState_Started);
pthread_join(handle, NULL);
state = EState_Joined;
}
protected:
virtual void run() = 0;
private:
static void* threadProc(void* param)
{
Thread* thread = reinterpret_cast<Thread*>(param);
thread->run();
return NULL;
}
private:
enum EState
{
EState_None,
EState_Started,
EState_Joined
};
EState state;
pthread_t handle;
};
// Base task for Tasks
// run() should be overloaded and expensive calculations done there
// showTask() is for debugging and can be deleted if not used
class Task {
public:
Task() {}
virtual ~Task() {}
virtual void run()=0;
virtual void showTask()=0;
};
// Wrapper around std::queue with some mutex protection
class WorkQueue
{
public:
WorkQueue() {
pthread_mutex_init(&qmtx,0);
// wcond is a condition variable that's signaled
// when new work arrives
pthread_cond_init(&wcond, 0);
}
~WorkQueue() {
// Cleanup pthreads
pthread_mutex_destroy(&qmtx);
pthread_cond_destroy(&wcond);
}
// Retrieves the next task from the queue
Task *nextTask() {
// The return value
Task *nt = 0;
// Lock the queue mutex
pthread_mutex_lock(&qmtx);
while (tasks.empty())
pthread_cond_wait(&wcond, &qmtx);
nt = tasks.front();
tasks.pop();
// Unlock the mutex and return
pthread_mutex_unlock(&qmtx);
return nt;
}
// Add a task
void addTask(Task *nt) {
// Lock the queue
pthread_mutex_lock(&qmtx);
// Add the task
tasks.push(nt);
// signal there's new work
pthread_cond_signal(&wcond);
// Unlock the mutex
pthread_mutex_unlock(&qmtx);
}
private:
std::queue<Task*> tasks;
pthread_mutex_t qmtx;
pthread_cond_t wcond;
};
// Thanks to the reusable thread class implementing threads is
// simple and free of pthread api usage.
class PoolWorkerThread : public Thread
{
public:
PoolWorkerThread(WorkQueue& _work_queue) : work_queue(_work_queue) {}
protected:
virtual void run()
{
while (Task* task = work_queue.nextTask())
task->run();
}
private:
WorkQueue& work_queue;
};
class ThreadPool {
public:
// Allocate a thread pool and set them to work trying to get tasks
ThreadPool(int n) {
printf("Creating a thread pool with %d threads\n", n);
for (int i=0; i<n; ++i)
{
threads.push_back(new PoolWorkerThread(workQueue));
threads.back()->start();
}
}
// Wait for the threads to finish, then delete them
~ThreadPool() {
finish();
}
// Add a task
void addTask(Task *nt) {
workQueue.addTask(nt);
}
// Asking the threads to finish, waiting for the task
// queue to be consumed and then returning.
void finish() {
for (size_t i=0,e=threads.size(); i<e; ++i)
workQueue.addTask(NULL);
for (size_t i=0,e=threads.size(); i<e; ++i)
{
threads[i]->join();
delete threads[i];
}
threads.clear();
}
private:
std::vector<PoolWorkerThread*> threads;
WorkQueue workQueue;
};
// stdout is a shared resource, so protected it with a mutex
static pthread_mutex_t console_mutex = PTHREAD_MUTEX_INITIALIZER;
// Debugging function
void showTask(int n) {
pthread_mutex_lock(&console_mutex);
pthread_mutex_unlock(&console_mutex);
}
// Task to compute fibonacci numbers
// It's more efficient to use an iterative algorithm, but
// the recursive algorithm takes longer and is more interesting
// than sleeping for X seconds to show parrallelism
class FibTask : public Task {
public:
FibTask(int n) : Task(), _n(n) {}
~FibTask() {
// Debug prints
pthread_mutex_lock(&console_mutex);
printf("tid(%d) - fibd(%d) being deleted\n", (int)pthread_self(), (int)_n);
pthread_mutex_unlock(&console_mutex);
}
virtual void run() {
// Note: it's important that this isn't contained in the console mutex lock
long long val = innerFib(_n);
// Show results
pthread_mutex_lock(&console_mutex);
printf("Fibd %d = %lld\n",(int)_n, val);
pthread_mutex_unlock(&console_mutex);
// The following won't work in parrallel:
// pthread_mutex_lock(&console_mutex);
// printf("Fibd %d = %lld\n",_n, innerFib(_n));
// pthread_mutex_unlock(&console_mutex);
// this thread pool implementation doesn't delete
// the tasks so we perform the cleanup here
delete this;
}
virtual void showTask() {
// More debug printing
pthread_mutex_lock(&console_mutex);
printf("thread %d computing fibonacci %d\n", (int)pthread_self(), (int)_n);
pthread_mutex_unlock(&console_mutex);
}
private:
// Slow computation of fibonacci sequence
// To make things interesting, and perhaps imporove load balancing, these
// inner computations could be added to the task queue
// Ideally set a lower limit on when that's done
// (i.e. don't create a task for fib(2)) because thread overhead makes it
// not worth it
long long innerFib(long long n) {
if (n<=1) { return 1; }
return innerFib(n-1) + innerFib(n-2);
}
long long _n;
};
int main(int argc, char *argv[])
{
// Create a thread pool
ThreadPool *tp = new ThreadPool(10);
// Create work for it
for (int i=0;i<100; ++i) {
int rv = rand() % 40 + 1;
showTask(rv);
tp->addTask(new FibTask(rv));
}
delete tp;
printf("\n\n\n\n\nDone with all work!\n");
}
I think you are having a race condition there...
When you remove the for loop, the pool is destructed as soon as it gets created so there is no time for the threads to start waiting on the queue. Try putting a sleep there and you'll see.
I implemented a threadpool library, which is used widely among all our services, so here come some advices:
You are using C++, so there's no need to use pthreads, just use boost, or std:thread if available
Don't signal, push empty tasks instead (pushing a task requires to signal, of course)
Use boost::function or std:function instead of a base class
Cope with spurious wake-ups (you code doesn't seem to handle them)
pthread_cond_signal wakes-up only one thread, you must use pthread_cond_broadcast if you want to notify them all, that said, I'd recommend, again, to stick to boost's conditions (#pasztorpisti got it rigth here, he's got my upvote)
I am using boost library to develop a asynchronous udp communication. A data received at the receiver side is being precessed by another thread. Then my problem is when I read the received data in another thread rather than the receiver thread it self it gives a modified data or updated data which is not the data that is supposed to be.
My code is working on unsigned character buffer array at sender side and receiver side. The reason is I need consider unsigned character buffer as a packet of data
e.g buffer[2] = Engine_start_ID
/* global buffer to store the incomming data
unsigned char received_buffer[200];
/*
global buffer accessed by another thread
which contains copy the received_buffer
*/
unsigned char read_hmi_buffer[200];
boost::mutex hmi_buffer_copy_mutex;
void udpComm::start_async_receive() {
udp_socket.async_receive_from(
boost::asio::buffer(received_buffer, max_length), remote_endpoint,
boost::bind(&udpComm::handle_receive_from, this,
boost::asio::placeholders::error,
boost::asio::placeholders::bytes_transferred));
}
/* the data received is stored in the unsigned char received_buffer data buffer*/
void udpComm::handle_receive_from(const boost::system::error_code& error,
size_t bytes_recvd) {
if (!error && bytes_recvd > 0) {
received_bytes = bytes_recvd;
hmi_buffer_copy_mutex.lock();
memcpy(&read_hmi_buffer[0], &received_buffer[0], received_bytes);
hmi_buffer_copy_mutex.unlock();
/*data received here is correct 'cus i printed in the console
checked it
*/
cout<<(int)read_hmi_buffer[2]<<endl;
}
start_async_receive();
}
/* io_service is running in a thread
*/
void udpComm::run_io_service() {
udp_io_service.run();
usleep(1000000);
}
The above code is the asynchronous udp communication running a thread
/* My second thread function is */
void thread_write_to_datalink()
{ hmi_buffer_copy_mutex.lock();
/* here is my problem begins*/
cout<<(int)read_hmi_buffer[2]<<endl;
hmi_buffer_copy_mutex.unlock();
/* all data are already changed */
serial.write_to_serial(read_hmi_buffer, 6);
}
/* threads from my main function
are as below */
int main() {
receive_from_hmi.start_async_receive();
boost::thread thread_receive_from_hmi(&udpComm::run_io_service,
&receive_from_hmi);
boost::thread thread_serial(&thread_write_to_datalink);
thread_serial.join();
thread_receive_from_hmi.join();
return 0;
}
/* The Serial_manager class contains functions for writting and reading from serial port*/
#include <iostream>
#include <boost/thread.hpp>
#include <boost/asio.hpp>
#include <boost/date_time/posix_time/posix_time.hpp>
using namespace boost::asio;
class Serial_manager {
public:
Serial_manager(boost::asio::io_service &serial_io_service,char *dev_name);
void open_serial_port();
void write_to_serial(void *data, int size);
size_t read_from_serial(void *data, int size);
void handle_serial_exception(std::exception &ex);
virtual ~Serial_manager();
void setDeviceName(char* deviceName);
protected:
io_service &port_io_service;
serial_port datalink_serial_port;
bool serial_port_open;
char *device_name;
};
void Serial_manager::setDeviceName(char* deviceName) {
device_name = deviceName;
}
Serial_manager::Serial_manager(boost::asio::io_service &serial_io_service,char *dev_name):
port_io_service(serial_io_service),
datalink_serial_port(serial_io_service) {
device_name = dev_name;
serial_port_open = false;
open_serial_port();
}
void Serial_manager::open_serial_port() {
bool temp_port_status = false;
bool serial_port_msg_printed = false;
do {
try {
datalink_serial_port.open(device_name);
temp_port_status = true;
} catch (std::exception &ex) {
if (!serial_port_msg_printed) {
std::cout << "Exception-check the serial port device "
<< ex.what() << std::endl;
serial_port_msg_printed = true;
}
datalink_serial_port.close();
temp_port_status = false;
}
} while (!temp_port_status);
serial_port_open = temp_port_status;
std::cout <<std::endl <<"serial port device opened successfully"<<std::endl;
datalink_serial_port.set_option(serial_port_base::baud_rate(115200));
datalink_serial_port.set_option(
serial_port_base::flow_control(
serial_port_base::flow_control::none));
datalink_serial_port.set_option(
serial_port_base::parity(serial_port_base::parity::none));
datalink_serial_port.set_option(
serial_port_base::stop_bits(serial_port_base::stop_bits::one));
datalink_serial_port.set_option(serial_port_base::character_size(8));
}
void Serial_manager::write_to_serial(void *data, int size) {
boost::asio::write(datalink_serial_port, boost::asio::buffer(data, size));
}
size_t Serial_manager::read_from_serial(void *data, int size) {
return boost::asio::read(datalink_serial_port, boost::asio::buffer(data, size));
}
void Serial_manager::handle_serial_exception(std::exception& ex) {
std::cout << "Exception-- " << ex.what() << std::endl;
std::cout << "Cannot access data-link, check the serial connection"
<< std::endl;
datalink_serial_port.close();
open_serial_port();
}
Serial_manager::~Serial_manager() {
// TODO Auto-generated destructor stub
}
I think my area of problem is about thread synchronization and notification and I will be happy if you help me. You should not worry about the sender it is works perfectly as I already checked it the data is received at the receiver thread. I hope you understand my question.
Edit: Here is the modification.My whole idea here is to develop a simulation for the Manual flight control so according my design i have client application that sends commands through
udp communication. At the receiver side intended to use 3 threads. one thread receives input from sticks i.e void start_hotas() the second thread is a thread that receives commands from sender(client): void udpComm::run_io_service() and 3rd is the void thread_write_to_datalink().
/* a thread that listens for input from sticks*/
void start_hotas() {
Hotas_manager hotasobj;
__s16 event_value; /* value */
__u8 event_number; /* axis/button number */
while (1) {
hotasobj.readData_from_hotas();
event_number = hotasobj.getJoystickEvent().number;
event_value = hotasobj.getJoystickEvent().value;
if (hotasobj.isAxisPressed()) {
if (event_number == 0) {
aileron = (float) event_value / 32767;
} else if (event_number == 1) {
elevator = -(float) event_value / 32767;
} else if (event_number == 2) {
rudder = (float) event_value / 32767;
} else if (event_number == 3) {
brake_left = (float) (32767 - event_value) / 65534;
} else if (event_number == 4) {
} else if (event_number == 6) {
} else if (event_number == 10) {
} else if (event_number == 11) {
} else if (event_number == 12) {
}
} else if (hotasobj.isButtonPressed()) {
}
usleep(1000);
}
}
/*
* Hotas.h
*
* Created on: Jan 31, 2013
* Author: metec
*/
#define JOY_DEV "/dev/input/js0"
#include <iostream>
#include <boost/thread.hpp>
#include <boost/asio.hpp>
#include <boost/date_time/posix_time/posix_time.hpp>
#include <linux/joystick.h>
bool message_printed = false;
bool message2_printed = false;
class Hotas_manager {
public:
Hotas_manager();
virtual ~Hotas_manager();
void open_hotas_device();
/*
*
* read from hotas input
* used to the updated event data and status of the joystick from the
* the file.
*
*/
void readData_from_hotas();
js_event getJoystickEvent() {
return joystick_event;
}
int getNumOfAxis() {
return num_of_axis;
}
int getNumOfButtons() {
return num_of_buttons;
}
bool isAxisPressed() {
return axis_pressed;
}
bool isButtonPressed() {
return button_pressed;
}
int* getAxis() {
return axis;
}
char* getButton() {
return button;
}
private:
int fd;
js_event joystick_event;
bool hotas_connected;
int num_of_axis;
int num_of_buttons;
int version;
char devName[80];
/*
* the the variables below indicates
* the state of the joystick.
*/
int axis[30];
char button[30];
bool button_pressed;
bool axis_pressed;
};
Hotas_manager::Hotas_manager() {
// TODO Auto-generated constructor stub
hotas_connected = false;
open_hotas_device();
std::cout << "joystick device detected" << std::endl;
}
Hotas_manager::~Hotas_manager() {
// TODO Auto-generated destructor stub
}
void Hotas_manager::open_hotas_device() {
bool file_open_error_printed = false;
while (!hotas_connected) {
if ((fd = open(JOY_DEV, O_RDONLY)) > 0) {
ioctl(fd, JSIOCGAXES, num_of_axis);
ioctl(fd, JSIOCGBUTTONS, num_of_buttons);
ioctl(fd, JSIOCGVERSION, version);
ioctl(fd, JSIOCGNAME(80), devName);
/*
* NON BLOCKING MODE
*/
ioctl(fd, F_SETFL, O_NONBLOCK);
hotas_connected = true;
} else {
if (!file_open_error_printed) {
std::cout << "hotas device not detected. check "
"whether it is "
"plugged" << std::endl;
file_open_error_printed = true;
}
close(fd);
hotas_connected = false;
}
}
}
void Hotas_manager::readData_from_hotas() {
int result;
result = read(fd, &joystick_event, sizeof(struct js_event));
if (result > 0) {
switch (joystick_event.type & ~JS_EVENT_INIT) {
case JS_EVENT_AXIS:
axis[joystick_event.number] = joystick_event.value;
axis_pressed = true;
button_pressed = false;
break;
case JS_EVENT_BUTTON:
button[joystick_event.number] = joystick_event.value;
button_pressed = true;
axis_pressed = false;
break;
}
message2_printed = false;
message_printed = false;
} else {
if (!message_printed) {
std::cout << "problem in reading the stick file" << std::endl;
message_printed = true;
}
hotas_connected = false;
open_hotas_device();
if (!message2_printed) {
std::cout << "stick re-connected" << std::endl;
message2_printed = true;
}
}
}
I updated the main function to run 3 threads .
int main() {
boost::asio::io_service receive_from_hmi_io;
udpComm receive_from_hmi(receive_from_hmi_io, 6012);
receive_from_hmi.setRemoteEndpoint("127.0.0.1", 6011);
receive_from_hmi.start_async_receive();
boost::thread thread_receive_from_hmi(&udpComm::run_io_service,
&receive_from_hmi);
boost::thread thread_serial(&thread_write_to_datalink);
boost::thread thread_hotas(&start_hotas);
thread_hotas.join();
thread_serial.join();
thread_receive_from_hmi.join();
return 0;
}
The void thread_write_to_datalink() also writes the data come from the hotas_manager(joysticks).
void thread_write_to_datalink() {
/*
* boost serial communication
*/
boost::asio::io_service serial_port_io;
Serial_manager serial(serial_port_io, (char*) "/dev/ttyUSB0");
cout << "aileron " << "throttle " << "elevator " << endl;
while (1) {
// commands from udp communication
serial.write_to_serial(read_hmi_buffer, 6);
// data come from joystick inputs
//cout << aileron<<" "<<throttle<<" "<<elevator<< endl;
memcpy(&buffer_manual_flightcontrol[4], &aileron, 4);
memcpy(&buffer_manual_flightcontrol[8], &throttle, 4);
memcpy(&buffer_manual_flightcontrol[12], &elevator, 4);
unsigned char temp;
try {
serial.write_to_serial(buffer_manual_flightcontrol, 32);
//serial.write_to_serial(buffer_manual_flightcontrol, 32);
} catch (std::exception& exp) {
serial.handle_serial_exception(exp);
}
try {
serial.write_to_serial(buffer_payloadcontrol, 20);
} catch (std::exception& exp) {
serial.handle_serial_exception(exp);
}
usleep(100000);
}
}
My question is how better can I design to synchronize these 3 threads. If your answer says you do not need to use 3 threads I need you to tell me how.
Let's back up a little bit from multi-threading, your program mixes synchronous and asynchronous operations. You don't need to do this, as it will only cause confusion. You can asynchronously write the buffer read from the UDP socket to the serial port. This can all be achieved with a single thread running the io_service, eliminating any concurrency concerns.
You will need to add buffer management to keep the data read from the socket in scope for the lifetime of the async_write for the serial port, study the async UDP server as an example. Also study the documentation, specifically the requirements for buffer lifetime in async_write
buffers
One or more buffers containing the data to be written.
Although the buffers object may be copied as necessary, ownership of
the underlying memory blocks is retained by the caller, which must
guarantee that they remain valid until the handler is called.
Once you have completed that design, then you can move to more advanced techniques such as a thread pool or multiple io_services.
You need to make your access to read_hmi_buffer synchronized.
Therefore you need a mutex (std::mutex, pthread_mutex_t, or the windows equivalent), to lock onto whenever a piece of code read or write in that buffer.
See this question for a few explanations on the concept and links to other tutorials.