Im trying to make multithreaded proxy checker in c++, when I start the threads and lock it all threads wait till the request is finished. I tried to remove the locks but that doesn't help either. Im using the cpr library to make the requests, the documentation can be found here: https://whoshuu.github.io/cpr/advanced-usage.html.
Reproduceable example:
#include <stdio.h>
#include <pthread.h>
#include <iostream>
#include <queue>
#include <mutex>
#include <cpr/cpr.h>
#include <fmt/format.h>
#define NUMT 10
using namespace std;
using namespace fmt;
std::mutex mut;
std::queue<std::string> q;
void* Checker(void* arg) {
while (!q.empty()) {
mut.lock();
//get a webhook at https://webhook.site
string protocol = "socks4";
string proxyformatted = format("{0}://{1}", protocol, q.front());
auto r = cpr::Get(cpr::Url{ "<webhook url>" },
cpr::Proxies{ {"http", proxyformatted}, {"https", proxyformatted} });
q.pop();
mut.unlock();
}
return NULL;
}
int main(int argc, char** argv) {
q.push("138.201.134.206:5678");
q.push("185.113.7.87:5678");
q.push("5.9.16.126:5678");
q.push("88.146.196.181:4153");
pthread_t tid[NUMT]; int i;
int thread_args[NUMT];
for (i = 0; i < NUMT; i++) {
thread_args[i] = i;
pthread_create(&tid[i], NULL, Checker, (void*) &thread_args);
}
for (i = 0; i < NUMT; i++) {
pthread_join(tid[i], NULL);
fprintf(stderr, "Thread %d terminated\n", i);
}
return 0;
}
Thanks in advance.
I suggest to implement a wrapper class for your queue that will hide the mutex.
That class can provide push(std::string s) and bool pop(std::string& s) that returns true and populate s if the queue wasn't empty or false othervise. Then your worker threads can simply loop
std::string s;
while(q.pop(s)) {
...
}
Related
Sorry for my english, if something is not clear, please ask me. I am having trouble to make that application for WindowsForms ("ThreadTeste" is a representation of "MyForm1"):
#include <chrono>
#include <iostream>
#include <thread>
using namespace std;
using namespace std::chrono;
class ThreadTeste
{
public:
void loop()
{
for(int i = 0 ; i < 5 ; i++)
{
cout << i << endl;
this_thread::sleep_for(seconds(1));
}
}
thread getThread()
{
return thread(&ThreadTeste::loop, this);
}
ThreadTeste()
{
thread myThread = getThread();
myThread.detach();
}
};
int main(int argc, char *argv[])
{
ThreadTeste* t = new ThreadTeste();
while(true)
{
cout << "Working" << endl;
this_thread::sleep_for(seconds(1));
}
}
//That works!!
D:
If you want to output from 0 to 4 and then output working, then you could not use detach. Because detach means that the main thread does not need to wait for the execution of the child thread to complete and the two are out of relationship,then it will run by itself.
You could use join.
ThreadTeste()
{
thread myThread = getThread();
myThread.join();
}
In my main program I am copying a string buffer into a boost ring buffer and then trying to consume that data in a created thread and writing to a file. In the main thread also I am writing the same data to a file but both input and output file is not matching.
I feel I am doing something incredibly stupid. Please help. Also, if there are any suggesting to improve the code that would really be appreciated.
#include <iostream>
#include <vector>
#include <boost/circular_buffer.hpp>
#include <numeric>
#include <assert.h>
#include <thread>
#include <mutex>
#include <chrono>
#include <time.h>
#include <cstdint>
#include <fstream>
#include <string>
using std::cin;
using std::cout;
using std::endl;
using std::fstream;
using std::string;
#define SOME_FIXED_HARDCODED_NUMBER 40980
class MyClass {
public:
std::vector<int8_t> vec;
public:
MyClass(std::vector<int8_t> v){ vec = v; }
};
boost::circular_buffer<MyClass> cb(300);
int waiting = 1;
std::mutex my_mutex;
FILE *out_file;
FILE *in_file;
void foo()
{
while (waiting) {
std::unique_lock<std::mutex> lock(my_mutex);
if (!cb.size() || waiting == 0) {
lock.unlock();
continue;
}
if (!waiting)
break;
MyClass local_buf = cb.front();
cb.pop_front();
fwrite(local_buf.vec.data(), 1, local_buf.vec.size(), out_file);
}
}
int main(int argc, char* argv[])
{
out_file = fopen("output_data.raw", "w");
in_file = fopen("input_data.raw", "w");
std::thread th1(foo);
char *buf = {"abc"};
int counter = 0;
std::vector<int8_t> mem;
mem.insert(mem.end(), buf, buf + strlen(buf));
while (counter < SOME_FIXED_HARDCODED_NUMBER)
{
{
std::unique_lock<std::mutex> lock(my_mutex);
/* if the circular buffer is full then wait for consumer to pull the data */
while (cb.full()) {
lock.unlock();
std::this_thread::sleep_for(std::chrono::milliseconds(1000));
std::unique_lock<std::mutex> lock(my_mutex);
}
cb.push_front(MyClass(mem));
fwrite(mem.data(), 1, mem.size(), in_file);
}
counter++;
}
waiting = 0;
th1.join();
fclose(out_file);
fclose(in_file);
return 0;
}
while (cb.full()) {
lock.unlock();
std::this_thread::sleep_for(std::chrono::milliseconds(1000));
>>> std::unique_lock<std::mutex> lock(my_mutex);
}
The marked unique_lock doesn't do anything as it will go out of scope immediately and unlock the mutex. Hence once you leave the loop the mutex is not locked and you have a racecondition. Instead, you should use lock.lock() to relock the mutex.
There is a few more bugs. You are not waiting for your foo thread to actually drain the buffer. It will stop as soon as the waiting flag is set by the main thread. Also, waiting should be an atomic.
I've implemented thread pooling following the answer of Kerrek SB in this question.
I've implemented MPMC queue for the functions and vector threads for the threads.
Everything worked perfectly, except that I don't know how to terminate the program, in the end if I just do thread.join since the thread is still waiting for more tasks to do, it will not join and the main thread will not continue.
Any idea how to end the program correctly?
For completeness, this is my code:
function_pool.h
#pragma once
#include <queue>
#include <functional>
#include <mutex>
#include <condition_variable>
class Function_pool
{
private:
std::queue<std::function<void()>> m_function_queue;
std::mutex m_lock;
std::condition_variable m_data_condition;
public:
Function_pool();
~Function_pool();
void push(std::function<void()> func);
std::function<void()> pop();
};
function_pool.cpp
#include "function_pool.h"
Function_pool::Function_pool() : m_function_queue(), m_lock(), m_data_condition()
{
}
Function_pool::~Function_pool()
{
}
void Function_pool::push(std::function<void()> func)
{
std::unique_lock<std::mutex> lock(m_lock);
m_function_queue.push(func);
// when we send the notification immediately, the consumer will try to
get the lock , so unlock asap
lock.unlock();
m_data_condition.notify_one();
}
std::function<void()> Function_pool::pop()
{
std::unique_lock<std::mutex> lock(m_lock);
m_data_condition.wait(lock, [this]() {return !m_function_queue.empty();
});
auto func = m_function_queue.front();
m_function_queue.pop();
return func;
// Lock will be released
}
main.cpp
#include "function_pool.h"
#include <string>
#include <iostream>
#include <mutex>
#include <functional>
#include <thread>
#include <vector>
Function_pool func_pool;
void example_function()
{
std::cout << "bla" << std::endl;
}
void infinite_loop_func()
{
while (true)
{
std::function<void()> func = func_pool.pop();
func();
}
}
int main()
{
std::cout << "stating operation" << std::endl;
int num_threads = std::thread::hardware_concurrency();
std::cout << "number of threads = " << num_threads << std::endl;
std::vector<std::thread> thread_pool;
for (int i = 0; i < num_threads; i++)
{
thread_pool.push_back(std::thread(infinite_loop_func));
}
//here we should send our functions
func_pool.push(example_function);
for (int i = 0; i < thread_pool.size(); i++)
{
thread_pool.at(i).join();
}
int i;
std::cin >> i;
}
Your problem is located in infinite_loop_func, which is an infinite loop and by result doesn't terminate. I've read the previous answer which suggests throwing an exception, however, I don't like it since exceptions should not be used for the regular control flow.
The best way to solve this is to explicitly deal with the stop condition. For example:
std::atomic<bool> acceptsFunctions;
Adding this to the function pool allows you to clearly have state and to assert that no new functions being added when you destruct.
std::optional<std::function<void()>> Function_pool::pop()
Returning an empty optional (or function in C++14 and before), allows you to deal with an empty queue. You have to, as condition_variable can do spurious wakeups.
With this, m_data_condition.notify_all() can be used to wake all threads.
Finally we have to fix the infinite loop as it doesn't cover overcommitment and at the same time allows you to execute all functions still in the queue:
while (func_pool.acceptsFunctions || func_pool.containsFunctions())
{
auto f = func_pool.pop();
If (!f)
{
func_pool.m_data_condition.wait_for(1s);
continue;
}
auto &function = *f;
function ();
}
I'll leave it up to you to implement containsFunctions() and clean up the code (infinite_loop_func as member function?) Note that with a counter, you could even deal with background task being spawned.
You can always use a specific exception type to signal to infinite_loop_func that it should return...
class quit_worker_exception: public std::exception {};
Then change infinite_loop_func to...
void infinite_loop_func ()
{
while (true) {
std::function<void()> func = func_pool.pop();
try {
func();
}
catch (quit_worker_exception &ex) {
return;
}
}
}
With the above changes you could then use (in main)...
/*
* Enqueue `thread_pool.size()' function objects whose sole job is
* to throw an instance of `quit_worker_exception' when invoked.
*/
for (int i = 0; i < thread_pool.size(); i++)
func_pool.push([](){ throw quit_worker_exception(); });
/*
* Now just wait for each worker to terminate having received its
* quit_worker_exception.
*/
for (int i = 0; i < thread_pool.size(); i++)
thread_pool.at(i).join();
Each instance of infinite_loop_func will dequeue one function object which, when called, throws a quit_worker_exception causing it to return.
Follwoing [JVApen](https://stackoverflow.com/posts/51382714/revisions) suggestion, I copy my code in case anyone will want a working code:
function_pool.h
#pragma once
#include <queue>
#include <functional>
#include <mutex>
#include <condition_variable>
#include <atomic>
#include <cassert>
class Function_pool
{
private:
std::queue<std::function<void()>> m_function_queue;
std::mutex m_lock;
std::condition_variable m_data_condition;
std::atomic<bool> m_accept_functions;
public:
Function_pool();
~Function_pool();
void push(std::function<void()> func);
void done();
void infinite_loop_func();
};
function_pool.cpp
#include "function_pool.h"
Function_pool::Function_pool() : m_function_queue(), m_lock(), m_data_condition(), m_accept_functions(true)
{
}
Function_pool::~Function_pool()
{
}
void Function_pool::push(std::function<void()> func)
{
std::unique_lock<std::mutex> lock(m_lock);
m_function_queue.push(func);
// when we send the notification immediately, the consumer will try to get the lock , so unlock asap
lock.unlock();
m_data_condition.notify_one();
}
void Function_pool::done()
{
std::unique_lock<std::mutex> lock(m_lock);
m_accept_functions = false;
lock.unlock();
// when we send the notification immediately, the consumer will try to get the lock , so unlock asap
m_data_condition.notify_all();
//notify all waiting threads.
}
void Function_pool::infinite_loop_func()
{
std::function<void()> func;
while (true)
{
{
std::unique_lock<std::mutex> lock(m_lock);
m_data_condition.wait(lock, [this]() {return !m_function_queue.empty() || !m_accept_functions; });
if (!m_accept_functions && m_function_queue.empty())
{
//lock will be release automatically.
//finish the thread loop and let it join in the main thread.
return;
}
func = m_function_queue.front();
m_function_queue.pop();
//release the lock
}
func();
}
}
main.cpp
#include "function_pool.h"
#include <string>
#include <iostream>
#include <mutex>
#include <functional>
#include <thread>
#include <vector>
Function_pool func_pool;
class quit_worker_exception : public std::exception {};
void example_function()
{
std::cout << "bla" << std::endl;
}
int main()
{
std::cout << "stating operation" << std::endl;
int num_threads = std::thread::hardware_concurrency();
std::cout << "number of threads = " << num_threads << std::endl;
std::vector<std::thread> thread_pool;
for (int i = 0; i < num_threads; i++)
{
thread_pool.push_back(std::thread(&Function_pool::infinite_loop_func, &func_pool));
}
//here we should send our functions
for (int i = 0; i < 50; i++)
{
func_pool.push(example_function);
}
func_pool.done();
for (unsigned int i = 0; i < thread_pool.size(); i++)
{
thread_pool.at(i).join();
}
}
My code acquires images and processes them. Performance is critical for my code, so I've tried my hand at multi-threading. Currently, I've only made the acquiring part a separate thread. I'm implementing a simple FIFO buffer using std::queue that stores the acquired images. The acquisition function AcquireImages writes raw image data to this buffer indefinitely until user interruption. Processing function, ProcessImages reads the buffer and processes the image data (currently in the main thread but I'm planning to make this a separate thread as well once I've ironed out issues). Here's my code (modified to form an MCV example):
#include <iostream>
#include <vector>
#include <queue>
#include <atomic>
#include <thread>
#define NUM_CAMERAS 2
void AcquireImages(std::queue<unsigned char*> &rawImageQueue, std::atomic<bool> &quit)
{
unsigned char* rawImage{};
while (!quit)
{
for (int camera = 0; camera < NUM_CAMERAS; camera++)
{
switch (camera)
{
case 0:
rawImage = (unsigned char*)"Cam0Image";
break;
case 1:
rawImage = (unsigned char*)"Cam1Image";
break;
default:
break;
}
rawImageQueue.push(std::move(rawImage));
}
}
}
int ProcessImages(const std::vector<unsigned char*> &rawImageVec, const int count)
{
// Do something to the raw image vector
if (count > 10)
{
return 1;
}
else
{
return 0;
} // In my application, this function only returns non-zero upon user interception.
}
int main()
{
// Preparation
std::vector<unsigned char*> rawImageVec;
rawImageVec.reserve(NUM_CAMERAS);
std::queue<unsigned char*> rawImageQueue;
int count{};
const unsigned int nThreads = 1; // this might grow later
std::atomic<bool> loopFlags[nThreads];
std::thread threads[nThreads];
// Start threads
for (int i = 0; i < nThreads; i++) {
loopFlags[i] = false;
threads[i] = std::thread(AcquireImages, rawImageQueue, ref(loopFlags[i]));
}
// Process images
while (true)
{
// Process the images
for (int cam{}; cam < NUM_CAMERAS; ++cam)
{
rawImageVec.push_back(rawImageQueue.front());
rawImageQueue.pop();
}
int processResult = ProcessImages(move(rawImageVec), count);
if (processResult)
{
std::cout << "Leaving while loop.\n"; // In my application this is triggered by the user
break;
}
rawImageVec.clear();
++count;
}
// Shutdown other threads
for (auto & flag : loopFlags) {
flag = true;
}
// Wait for threads to actually finish.
for (auto& thread : threads) {
thread.join();
}
return 0;
}
Some of you may have already noticed my blunder. What I know is that this program throws an exception atrawImageVec.push_back(rawImageQueue.front());.
The output after throwing the exception reads as follows:
Debug Assertion Failed!
Program: C:\WINDOWS\SYSTEM32\MSVCP140D.dll
File: c:\program files (x86)\microsoft visual studio 14.0\vc\include\deque
Line: 329
Expression: deque iterator not dereferencable
I understand the cause of the issue is probably that I'm reading something that is shared with another thread (Am I correct?). How do I resolve this?
I followed Praetorian's advice in the comments, after checking to see if rawImageQueue is empty, I see that it's always empty. I'm not sure what's causing this.
Here is a generalized example of producer/consumer on a shared queue. The idea is that if you're writing and reading from a data structure, you need some kind of protection around accesses.
For this, the below example uses condition variables and a mutex.
#include <thread>
#include <iostream>
#include <chrono>
#include <queue>
#include <mutex>
#include <vector>
#include <condition_variable>
using namespace std::chrono_literals;
using std::vector;
using std::thread;
using std::unique_lock;
using std::mutex;
using std::condition_variable;
using std::queue;
class WorkQueue
{
condition_variable work_available;
mutex work_mutex;
queue<int> work;
public:
void push_work(int item)
{
unique_lock<mutex> lock(work_mutex);
bool was_empty = work.empty();
work.push(item);
lock.unlock();
if (was_empty)
{
work_available.notify_one();
}
}
int wait_and_pop()
{
unique_lock<mutex> lock(work_mutex);
while (work.empty())
{
work_available.wait(lock);
}
int tmp = work.front();
work.pop();
return tmp;
}
};
int main() {
WorkQueue work_queue;
auto producer = [&]() {
while (true) {
work_queue.push_work(10);
std::this_thread::sleep_for(2ms);
}
};
vector<thread> producers;
producers.push_back(std::thread(producer));
producers.push_back(std::thread(producer));
producers.push_back(std::thread(producer));
producers.push_back(std::thread(producer));
std::thread consumer([&]() {
while (true)
{
int work_to_do = work_queue.wait_and_pop();
std::cout << "Got some work: " << work_to_do << std::endl;
}
});
std::for_each(producers.begin(), producers.end(), [](thread &p) {
p.join();
});
consumer.join();
}
Your case is relatively simple as seems you have just one producer and one consumer. Also image processing sounds quite slow (slow enough to not worry about threads contention) and you're switching from single-threaded version so probably no need to bother with highly efficient lock-free implementations.
I'd recommend to study this pseudo code: https://en.wikipedia.org/wiki/Producer%E2%80%93consumer_problem#Using_monitors, then to learn about condition variables if you need: http://en.cppreference.com/w/cpp/thread/condition_variable.
I have a program which takes several input from the user and based on the input it posts the work to a thread pool like the following:
while (getline (file, input)){
if(input =='a'){
ioService_.post(boost::bind(&myClass::myFunction, this, input, counter));
} //end if
else if(input=='b'){
ioService_.post(boost::bind(importedFunction, input));
}
else{
break;
}
}//end while
threadpool.join_all();
}
The function importedFunction is imported from another class and the code works correctly with it but for the myFunction function it does not post work.
I was originally binding the function as this:
ioService_.post(boost::bind(myClass::myFunction, input, counter));
and it was giving me an error which is:
/usr/local/include/boost/bind/bind.hpp:75:22: Type 'void
(multithreading::*)(std::__1::basic_string, int)' cannot be used
prior to '::' because it has no members
and then I changed into this and solved the error:
ioService_.post(boost::bind(&myClass::myFunction, this, input, counter));
but now the work is not submitted and I don't know why.
I suspect you may have forgotten to run() the io_service.
If so, add
ioService_.run();
somehwere.
And perhaps read up about io_service::work to keep the service alive between tasks. It is possible that service threads completed the run() before you posted the work.
I have faced the same issue. It is because of the io_service::work. The task this Copy constructor notifies the io_service that work is starting.
Here is a working example, if someone else needs it.
#include <iostream>
#include <unistd.h>
#include <iostream>
#include <thread>
#include <vector>
#include <algorithm>
#include <boost/shared_ptr.hpp>
#include <boost/make_shared.hpp>
#include <boost/thread.hpp>
#include <boost/bind.hpp>
#include <boost/asio.hpp>
#include <boost/move/move.hpp>
#include <boost/make_unique.hpp>
namespace asio = boost::asio;
typedef boost::packaged_task<int> task_t;
typedef boost::shared_ptr<task_t> ptask_t;
class Task
{
public:
int execute(std::string command)
{
//TODO actual logic
std::cout<< "\nThread:" << command << std::endl;
int sum = 0;
for(int i = 0; i < 5; i++)
{
sum+=i;
}
return sum;
}
};
void push_job(Task* worker, std::string seconds, boost::asio::io_service& io_service
, std::vector<boost::shared_future<int> >& pending_data) {
ptask_t task = boost::make_shared<task_t>(boost::bind(&Task::execute, worker, seconds));
boost::shared_future<int> fut(task->get_future());
pending_data.push_back(fut);
io_service.post(boost::bind(&task_t::operator(), task));
}
int main()
{
Task* taskPtr = new Task();
boost::asio::io_service io_service;
boost::thread_group threads;
std::unique_ptr<boost::asio::io_service::work> service_work;
service_work = boost::make_unique<boost::asio::io_service::work>(io_service);
for (int i = 0; i < boost::thread::hardware_concurrency() ; ++i)
{
threads.create_thread(boost::bind(&boost::asio::io_service::run,
&io_service));
}
std::vector<boost::shared_future<int> > pending_data; // vector of futures
push_job(taskPtr, "4", io_service, pending_data);
push_job(taskPtr, "5", io_service, pending_data);
push_job(taskPtr, "6", io_service, pending_data);
push_job(taskPtr, "7", io_service, pending_data);
boost::wait_for_all(pending_data.begin(), pending_data.end());
int total_sum = 0;
for(auto result : pending_data){
total_sum += result.get();
}
std::cout<< "Total sum: "<< total_sum << std::endl;
return 0;
}