Consider a situation where you need to maintain 256 tcp connections with devices just for ocassionally sending commands. I want to do this in parallel(It needs to block until it gets the response), I'm trying to use QThreadPool for this purpose but I have some doubts if it is possible.
I tried to use QRunnable but I'm not sure how sockets will behave between threads (sockets should be used only in thread that they were created in?)
I'm also worried about efficiency of this solution, I would be glad if somebody could propose some alternatives, not necessarily using QT.
Below I'm posting some snippets of the code.
class Task : public QRunnable {
Task(){
//creating TaskSubclass instance and socket in it
}
private:
TaskSubclass *sub;
void run() override {
//some debug info and variable setting...
sub->doSomething( args );
return;
}
};
class TaskSubclass {
Socket *sock; // socket instance
//...
void doSomething( args )
{
//writing to socket here
}
}
class MainProgram : public QObject{
Q_OBJECT
private:
QThreadPool *pool;
Task *tasks;
public:
MainProgram(){
pool = new QThreadPool(this);
//create tasks here
}
void run(){
//decide which task to start
pool->start(tasks[i]);
}
};
My favorite solution for this problem is by multiplexing your sockets using select(). That way you don't need to create additional threads, and it is a "very POSIX" way to do it.
See for example see this tutorial:
http://www.binarytides.com/multiple-socket-connections-fdset-select-linux/
Or a related question in:
Using select(..) on client
As OMD_AT has allready pointed out the best solution is to use Select() and let the kernel do the job for you :-)
here you have an example of an Async approach and an Syncron multi thread approach.
In this example we create 10 connection to a google webservice and make a simple get request to the server, we measure how long all connections in each approach needed to receive the response from the google server.
Be aware that you should use a more faster webserver to make a real test, like the localhost because the network latency has a big impact on the result.
#include <QCoreApplication>
#include <QTcpSocket>
#include <QtConcurrent/QtConcurrentRun>
#include <QElapsedTimer>
#include <QAtomicInt>
class Task : public QRunnable
{
public:
Task() : QRunnable() {}
static QAtomicInt counter;
static QElapsedTimer timer;
virtual void run() override
{
QTcpSocket* socket = new QTcpSocket();
socket->connectToHost("www.google.com", 80);
socket->write("GET / HTTP/1.1\r\nHost: www.google.com\r\n\r\n");
socket->waitForReadyRead();
if(!--counter) {
qDebug("Multiple Threads elapsed: %lld nanoseconds", timer.nsecsElapsed());
}
}
};
QAtomicInt Task::counter;
QElapsedTimer Task::timer;
int main(int argc, char *argv[])
{
QCoreApplication app(argc, argv);
// init
int connections = 10;
Task::counter = connections;
QElapsedTimer timer;
/// Async via One Thread (Select)
// handle the data
auto dataHandler = [&timer,&connections](QByteArray data) {
Q_UNUSED(data);
if(!--connections) qDebug(" Single Threads elapsed: %lld nanoseconds", timer.nsecsElapsed());
};
// create 10 connection to google.com and send an http get request
timer.start();
for(int i = 0; i < connections; i++) {
QTcpSocket* socket = new QTcpSocket();
socket->connectToHost("www.google.com", 80);
socket->write("GET / HTTP/1.1\r\nHost: www.google.com\r\n\r\n");
QObject::connect(socket, &QTcpSocket::readyRead, [dataHandler,socket]() {
dataHandler(socket->readAll());
});
}
/// Async via Multiple Threads
Task::timer.start();
for(int i = 0; i < connections; i++) {
QThreadPool::globalInstance()->start(new Task());
}
return app.exec();
}
Prints:
Multiple Threads elapsed: 62324598 nanoseconds
Single Threads elapsed: 63613967 nanoseconds
Although, the answer is already accepted, I would like to share my)
What I understood from your question: Having 256 currently active connections, from time to time you send a request ("command" as you named it) to one of them and wait for the response. Meanwhile, you want to make this process multithreaded and, though you said "It needs to block until it gets the response", I assume you implied blocking a thread which handles request-response process, but not the main thread.
If I indeed understand the question right, here is how I suggest to do it using Qt:
#include <functional>
#include <QObject> // need to add "QT += core" in .pro
#include <QTcpSocket> // QT += network
#include <QtConcurrent> // QT += concurrent
#include <QFuture>
#include <QFutureWatcher>
class CommandSender : public QObject
{
public:
// Sends a command via connection and blocks
// until the response arrives or timeout occurs
// then passes the response to a handler
// when the handler is done - unblocks
void SendCommand(
QTcpSocket* connection,
const Command& command,
void(*responseHandler)(Response&&))
{
const int timeout = 1000; // milliseconds, set it to -1 if you want no timeouts
// Sending a command (blocking)
connection.write(command.ToByteArray()); // Look QByteArray for more details
if (connection.waitForBytesWritten(timeout) {
qDebug() << connection.errorString() << endl;
emit error(connection);
return;
}
// Waiting for a response (blocking)
QDataStream in{ connection, QIODevice::ReadOnly };
QString message;
do {
if (!connection.waitForReadyRead(timeout)) {
qDebug() << connection.errorString() << endl;
emit error(connection);
return;
}
in.startTransaction();
in >> message;
} while (!in.commitTransaction());
responseHandler(Response{ message }); // Translate message to a response and handle it
}
// Non-blocking version of SendCommand
void SendCommandAsync(
QTcpSocket* connection,
const Command& command,
void(*responseHandler) (Response&&))
{
QFutureWatcher<void>* watcher = new QFutureWatcher<void>{ this };
connect(watcher, &QFutureWatcher<void>::finished, [connection, watcher] ()
{
emit done(connection);
watcher->deleteLater();
});
// Does not block,
// emits "done" when finished
QFuture<void> future
= QtConcurrent::run(this, &CommandSender::SendCommand, connection, command, responseHandler);
watcher->setFuture(future);
}
signals:
void done(QTcpSocket* connection);
void error(QTcpSocket* connection);
}
Now you can send a command to a socket using a separate thread taken from a thread pool: under the hood QtConcurrent::run() uses the global instance of QThreadPool provided by Qt for you. That thread blocks until it gets a response back and than handles it with responseHandler . Meanwhile, your main thread managing all your commands and sockets stays unblocked. Just catch done() signal which tells that response was received and handled successfully.
One thing to note: asynchronous version sends request only when there is a free thread in the thread pool and waits for it otherwise. Of course, that is the behavior for any thread pool (that is exactly the point of such pattern) but just do not forget about that.
Also I was writing code without Qt in handy so may contain some errors.
Edit: As it turned out, this is not thread safe as sockets are not reentrant in Qt.
What you can do about it is to associate a mutex with a socket and lock it each time you execute its function. This can be done easily creating a wrapper around QTcpSocket class. Please, correct me if I wrong.
Related
I use the following code to talk to a USB-serial port device:
#include "masterthread.h"
#include <QtSerialPort/QSerialPort>
#include <QTime>
#include "Windows.h"
#include "Psapi.h"
#include <QDebug>
QT_USE_NAMESPACE
MasterThread::MasterThread(QObject *parent)
: QThread(parent), waitTimeout(0), quit(false)
{
}
MasterThread::~MasterThread()
{
mutex.lock();
quit = true;
cond.wakeOne();
mutex.unlock();
wait();
}
void MasterThread::run()
{
bool currentPortNameChanged = false;
QSerialPort serial;
serial.setPortName("COM3");
serial.setBaudRate(57600);
serial.setStopBits(static_cast<QSerialPort::StopBits>(1));
serial.setDataBits(static_cast<QSerialPort::DataBits>(8));
serial.setParity(static_cast<QSerialPort::Parity>(0));
serial.open(QIODevice::ReadWrite);
//Tell the serial port connected device to start talking
//--------------------------------------
const char init[] = { 0x0d, 0x0d, 0x0d };
serial.write(init, sizeof(init));
const char* cmd = "mavlink stop\n";
serial.write(cmd, strlen(cmd));
serial.write(init, 2);
cmd = "uorb start";
serial.write(cmd, strlen(cmd));
serial.write(init, 2);
cmd = "sh /etc/init.d/rc.usb\n";
serial.write(cmd, strlen(cmd));
serial.write(init, 4);
serial.waitForBytesWritten(100);
int i = 0;
int j = 0;
forever
{
//Write test data out
//-----------------------------
QByteArray test(2000, 't');
serial.write(test);
bool check = serial.waitForBytesWritten(100);
if (!check)
{
qDebug() << "FAIL: " << j++;
}
if (serial.waitForReadyRead(20))
{
QByteArray responseData = serial.readAll();
while (serial.waitForReadyRead(10))
responseData += serial.readAll();
QString response(responseData);
qDebug() << response;
}
QThread::msleep(20);
//Print memory usage
//---------------------------------------------------
if (i++ % 10 == 0)
{
PROCESS_MEMORY_COUNTERS memcount;
if (!GetProcessMemoryInfo(GetCurrentProcess(), &memcount, sizeof(memcount))) return;
qDebug()<<"----------------------------" << memcount.WorkingSetSize / 1024 << "KB memory used";
}
} // end foever
qDebug() << "Exiting forever loop";
}
with a simple main.cpp as:
#include <QApplication>
#include "masterthread.h"
int main(int argc, char *argv[])
{
QApplication app(argc, argv);
MasterThread thread;
thread.start();
return app.exec();
}
But the memory usage keeps increasing, like 5~10MB per hour as if there are some leakage.
The device is suppose to be connected for days and weeks...
What am I doing wrong here? I am on Qt5.6 windows7 debug
Many Qt Components have an implicit dependency on its event loop.
While you are starting the main threads event loop with the call to app.exec(); you are not handling events generated by the QObjects created in the QThread MasterThread thread;. The details and nuances of Event handling in Qt are very well described on this page: https://wiki.qt.io/Threads_Events_QObjects#Threads_and_QObjects
But the solution boils down to: if you want to be able to process queued up Qt events in a thread where you are processing some long-running task you should call QCoreApplication::processEvents(); from time to time. This will prevent Qt events from endlessly queueing up.
EDITED after looking on the code Qt 5.7,5.6,5.5 and reading docs.
As an answer is already accepted, I would just add some thoughts here as it's too long for comments.
Keep things short - an answer you accepted is wrong..
There are two sides of the story. And as SO answers often taken 'as it is as long as they work' I'd like to explain myself...
If you look on a code provided - there is nothing wrong with it. All objects are properly stack allocated and should be destroyed automatically.
Point is that QtSerial uses deleteLater() and then a question - how to delete those allocations properly.
If any module/object/code uses deleteLater() it requires an event loop, if deleteLater() called on a thread without event loop, object will be deleted after thread is terminated. As long as there is no event loop running for code above, processEvents will no work.. actually processEvents() is not something which is used for this, because a whole idea to return from the context which is called deleteLater() and have a next run, and that's checked in the Qt Source Code, so calling processEvent() straight after without incrementing loop count will do nothing at all, that's why answer you accepted is totally wrong.
Conclusion:
If any object requires event loop running it should be EXPLICITELY stated in the documentation as there is nothing wrong in using QIODevice in sync mode outside event loop.
So at my opinion,point is - its a bug in the QT Serial itself which I suggest you report.
In general it's really wrong practice for Qt to run never-ending loops..
It's much much better and cleaner to use QObject Worker tactic which is pushed to the thread, have proper even loop running etc.
For small 'threaded' tasks it's much better to use QtConcurrent.
Proper Workaround:
you will have a thread with properly running event loop and a timer firing at 20ms to do your things
// main thread:
class Worker: public QObject {
public:
Worker();
public slots:
onInit() {
// initialize everything
startTimer(20);
}
protected:
void timerEvent(..) {
// do your things every 20ms
}
}
...
QThread * pWorkerThread = new QThread();
pWorkerThread->setObjectName(QString("Serial"));
Worker * pWorker = new Worker();
Worker->setObjectName(QString("Common Storage Impl"));
Worker->moveToThread(WorkerThread);
connect(pWorkerThread, SIGNAL(started()), pWorker, SLOT(onInit()));
connect(pWorkerThread, SIGNAL(finished()), pWorker, SLOT(deleteLater()));
connect(pWorkerThread, SIGNAL(finished()), pWorkerThread, SLOT(deleteLater()));
pWorkerThread->start();
...
I am using the HTML Server 3 example from boost as my learning tool (http://www.boost.org/doc/libs/1_53_0/doc/html/boost_asio/examples.html#boost_asio.examples.http_server_3) for asynchronous message handling.
I have taken the example, and turned it into a library with a server object I can instantiate in my programs. The only thing I have done to the above example is remove the main.cpp and compile it as a library. And it works to the extend that I can instantiate the server object in my code, and pass messages to it from the command line.
Where I am struggling is how to terminate the server gracefully. From the sample code I see this:
server::server(const std::string& address, const std::string& port,
std::size_t thread_pool_size,
Handler &handler)
: thread_pool_size_(thread_pool_size),
signals_(io_service_),
acceptor_(io_service_),
new_connection_(),
request_handler_(handler)
{
// Register to handle the signals that indicate when the server should exit.
// It is safe to register for the same signal multiple times in a program,
// provided all registration for the specified signal is made through Asio.
signals_.add(SIGINT);
signals_.add(SIGTERM);
signals_.async_wait(boost::bind(&server::handle_stop, this));
So an asynchronous thread is set up to listen for signals and respond to them
I have implemented this server object in a thread in my program as follows:
class ServerWorker
{
public:
ServerWorker(std::string theHost, std::string thePort)
{
Host = theHost;
Port = thePort;
}
void Start()
{
try
{
MYRequestHandler handler;
int nCores = boost::thread::hardware_concurrency();
server *mServer = new server(Host, Port, nCores, handler);
svr->run();
}
catch(std::exception &e) { /* do something */ }
}
void Stop()
{
mServer->stop(); // this should raise a signal and send it to the server
// but don't know how to do it
}
private:
std::string Host;
std::string Port;
server *mServer;
};
TEST(BSGT_LBSSERVER_STRESS, BSGT_SINGLETON)
{
// Launch as server on a new thread
ServerWorker sw(BSGT_DEFAULT_IPADDRESS, BSGT_DEFAULT_PORT_STR);
boost::function<void()> th_func = boost::bind(&ServerWorker::Start, &sw);
boost::thread swThread = boost::thread(th_func);
// DO SOMETHING
// How do I signal the server in the swThread to stop?
}
How do I implement the stop() method on the server object to send the signal to itself? I have tried:
1) raise(SIGTERM) - kills the whole program
2) raise(SIGINT) - kills the whole program
raise() is appropriate for having a process signal itself.
void ServerWorker::Stop()
{
std::raise(SIGTERM);
}
Be aware that raise() is asynchronous. It will issue the signal and return immediately. Hence, control may continue before the io_service processes the enqueued SignalHandler.
void run_server()
{
// Launch as server on a new thread
ServerWorker server_worker(...);
boost::thread worker_thread([&server_worker]() { server_worker.Start(); });
...
// Raises SIGTERM. May return before io_service is stopped.
server_worker.Stop();
// Need to synchronize with worker_thread. The `worker_thread` may still be
// in `ServerWorker::Start()` which would go out of scope. Additionally,
// the `worker_thread` is joinable, so its destructor may invoke
// `std::terminate()`.
}
Here is a minimal example demonstrating using Boost.Asio signal handling, raise(), and synchronization:
#include <cassert>
#include <csignal>
#include <iostream>
#include <thread>
#include <boost/asio.hpp>
int main()
{
boost::asio::io_service io_service;
// Prevent io_service from running out of work.
boost::asio::io_service::work work(io_service);
// Boost.Asio will register an internal handler for SIGTERM.
boost::asio::signal_set signal_set(io_service, SIGTERM);
signal_set.async_wait(
[&io_service](
const boost::system::error_code& error,
int signal_number)
{
std::cout << "Got signal " << signal_number << "; "
"stopping io_service." << std::endl;
io_service.stop();
});
// Raise SIGTERM.
std::raise(SIGTERM);
// By the time raise() returns, Boost.Asio has handled SIGTERM with its
// own internal handler, queuing it internally. At this point, Boost.Asio
// is ready to dispatch this notification to a user signal handler
// (i.e. those provided to signal_set.async_wait()) within the
// io_service event loop.
std::cout << "io_service stopped? " << io_service.stopped() << std::endl;
assert(false == io_service.stopped());
// Initiate thread that will run the io_service. This will invoke
// the queued handler that is ready for completion.
std::thread work_thread([&io_service]() { io_service.run(); });
// Synchornize on the work_thread. Letting it run to completion.
work_thread.join();
// The io_service has been explicitly stopped in the async_wait
// handler.
std::cout << "io_service stopped? " << io_service.stopped() << std::endl;
assert(true == io_service.stopped());
}
Output:
io_service stopped? 0
Got signal 15; stopping io_service.
io_service stopped? 1
I'm writing a threaded TcpServer (each client in its own thread) using QTcpServer and QTcpSocket. The client application works correctly and sends data every 3 seconds but the readReady() signal never fires, meaning my receive_data() function is never called. When using socket->waitForReadyRead() and calling receive_data() by myself everything works fine. Please have a look at the code below, maybe I made some mistake with the moveToThread / connect functionality Qt offers.
Client.h
#ifndef CLIENT_H
#define CLIENT_H
#include <QThread>
#include <QTcpSocket>
#include <QHostAddress>
#include "PacketDefinitions.h"
#include "tcpserver.h"
class Client : public QObject
{
Q_OBJECT
public:
explicit Client(int socket,TcpServer *parent,bool auto_disconnect = true);
~Client();
bool isGameServer(){return is_gameserver;}
GameServerPacket getGameServerData(){return gameserver;}
void run();
private:
QTcpSocket* client;
TcpServer *parent_server;
int socket;
GameServerPacket gameserver;
ClientPacket clientdata;
bool is_gameserver;
bool auto_disconnect;
QHostAddress client_ip;
quint16 client_port;
signals:
void disconnected(Client *);
private slots:
void remove_from_clientlist();
void receive_data();
void display_error(QAbstractSocket::SocketError error);
};
#endif // CLIENT_H
Client.cpp
#include "client.h"
#include "PacketDefinitions.h"
#include "time.h"
#include <iostream>
Client::Client(int _socket, TcpServer *parent,bool _auto_disconnect)
{
auto_disconnect = _auto_disconnect;
parent_server = parent;
is_gameserver = false;
socket = _socket;
}
void Client::run(){
client = new QTcpSocket();
if(client->setSocketDescriptor(socket) == false){
std::cout << client->errorString().toStdString() << std::endl;
remove_from_clientlist();
return;
}
connect(client,SIGNAL(disconnected()),this,SLOT(remove_from_clientlist()));
if(connect(client,SIGNAL(readyRead()),this,SLOT(receive_data()),Qt::DirectConnection) == false) return;
connect(client,SIGNAL(error(QAbstractSocket::SocketError)),this,SLOT(display_error(QAbstractSocket::SocketError)));
client_ip = client->peerAddress();
client_port = client->peerPort();
std::cout << "New incomming connection " << client->peerAddress().toString().toStdString() << ":" << client->peerPort() << std::endl;
//this works fine
// while(client->waitForReadyRead()){
// receive_data();
// }
}
void Client::receive_data(){
QDataStream stream(client);
stream.setVersion(QDataStream::Qt_5_2);
quint32 magic; stream >> magic;
//interpret data
if(magic == GAMESERVER_MAGIC){
is_gameserver = true;
gameserver.Read(stream);
gameserver.port = client_port;
gameserver.ip = client_ip;
time(&(gameserver.last_update));
parent_server->add_server(gameserver.ip.toString(),gameserver);
std::cout << "GameServer " << gameserver.name << " registerd" << std::endl;
}else if(magic == CLIENT_MAGIC){
is_gameserver = false;
clientdata.Read(stream);
//get nearby servers
GameServerListPacket server_list = parent_server->getServerList(clientdata);
QDataStream outstream(client);
server_list.Write(outstream);
std::cout << "Sending ServerList(" << server_list.server_count << ") to " << client->peerAddress().toString().toStdString() << std::endl;
if(auto_disconnect){
//client->flush();
client->waitForBytesWritten();
}
}else{
std::cout << "Unknown package " << magic << std::endl;
}
//not enough data read, somthing is wrong, just for debugging
if(client->bytesAvailable()> 0) std::cout << "BytesAvailable " << client->bytesAvailable() << std::endl;
if(auto_disconnect) remove_from_clientlist();//close the connection once the serverlist was deployed
}
In the TcpServer.cpp add_client() is called when newConnection() was emitted by the QTcpServer:
void TcpServer::add_client(){
while(server->hasPendingConnections()){
QTcpSocket *socket = 0;
if(thread_pool.size() < max_connections && (socket = server->nextPendingConnection())){
QThread *thread = new QThread();
Client * client = new Client(socket->socketDescriptor(),this,auto_disconnect);
client->moveToThread(thread);
client->run();
thread->start();
connect(client,SIGNAL(disconnected(Client*)),this,SLOT(remove_client(Client*)));
WRITELOCK(thread_pool.insert(client,thread));
}
}
}
the order calling client->run() and thread->start() doesn't seem to matter. Some time ago the code (not this exact code) worked fine but I can't remember what I changed that made it fail. Any help is appreciated!
Thanks in advance
Fabian
Edit 1:
I derived from QTcpServer and reimplemented void incomingConnection(qintptr socketDescriptor) which works fine. I dont use QThreadPool, its just a QMap and remove_client(Client*) closes the QTcpSocket and stops the thread and removes it from the map. Everything works fine on linux, on windows I get the following error: QSocketNotifier: socket notifiers cannot be disabled from another thread ASSERT failure in QCoreApplication::sendEvent: "Cannot send events to objects owned by a different thread....
Caused by this remove_client(Client*)
void TcpServer::remove_client(Client *client){
//disconnect(client,SIGNAL(disconnected(Client*)),this,SLOT(remove_client(Client*)));
lock.lockForWrite();
QMap<Client*,QThread*>::iterator itr = thread_pool.find(client);
if(itr != thread_pool.end()){
//delete itr.key(); causes the problem on windows
itr.value()->quit();
itr.value()->wait();
delete itr.value();
thread_pool.erase(itr);
}
lock.unlock();
}
Where and how should I free the Client object? If i'd use QThreadPool theres no way to iterate through the clients in case i want to send a message to more than one client. I could use a list/map holding only the Client* but then QThreadPool might delete them for me right before i want to access it. Any suggestions?
There is a problem with how you move your client object to a new thread. Actually, Client::run executes in the same thread as TcpServer::add_client.
Also QTcpSocket client remains in the default thread, while its container (Client class) is moved to a new thread. That's why the connection with Qt::DirectConnection type doesn't work.
Try this:
class Client : public QObject
{
Q_OBJECT
...
public slots:
void run();
...
}
Client::Client(int _socket, TcpServer *parent,bool _auto_disconnect)
{
...
client = new QTcpSocket(this);
}
void Client::run()
{
...
connect(client, SIGNAL(readyRead()), this, SLOT(receive_data()));
...
}
And here's how you should move your client to a new thread:
void TcpServer::add_client()
{
...
QThread *thread = new QThread();
Client * client = new Client(socket->socketDescriptor(),this,auto_disconnect);
client->moveToThread(thread);
connect(thread, SIGNAL(started()), client, SLOT(run()));
thread->start();
...
}
There are a number of things wrong with your code.
1.You have two QTcpSocket object trying to collect data from the same underlying socket descriptor. You appear to use the first one only to get access to the socket descriptor value which you then pass to your Client class. You might end up losing data because you won't be able to tell which socket will be getting what data from the operating system.
If you are creating a derived class of QTcpServer, rather reimplement QTcpServer::incomingConnection(qintptr socketDescriptor) instead of your existing TcpServer::add_client() function. Since this protected function is called once for every new connection, you don't need to make any connections to the newConnection() signal, nor do you have to loop while new connections are pending. You will also then only have one QTcpSocket connected to each socket descriptor so you won't lose data.
2.You seem to be using QThreadPool to manage threads. If you make Client a derived class of QRunnable (take not that with multiple inheritance of QObject, QObject must always be first), you don't need to check the maximum connections and you can eliminate all the QThread boiler-plating.
Taking 1. and 2. into account, your TcpServer::add_client() function will be replaced with:
void TcpServer::incomingConnection(qintptr socketDescriptor){
Client * client = new Client(socketDescriptor,this,auto_disconnect);
connect(client,SIGNAL(disconnected(Client*)),this,SLOT(remove_client(Client*)));
QThreadPool::globalInstance()->start(client);
}
With QThreadPool, there's no need to check whether the max number of threads has been reached or not. If the maximum has been reached, any new calls to start() will queue the next connection until a thread becomes available.
3.The reason your socket is not reading any data unless you call waitForReadyRead() is because you're executing the run() function in the main thread, creating the local socket in the main thread, you make a DirectConnection with the instance of Client and then move client to a different thread. You cannot have direct connections between threads.
You will need to add a local QEventLoop to your run() function to handle all events and signals of your new thread but remember to connect signals to your loop's quit() slot so the run() function will exit, otherwise your thread will continue to run forever.
I am writing a server as a Qt console application. I have the server set up to wait for a socket connection, but I also need to allow a user to input commands into the server for managing it. Both are working independently. However, the problem I ran into is that when I'm in a while loop accepting and processing input commands, the server doesn't accept connections.
I have a Socket class, and in its constructor, I have:
connect(server,SIGNAL(newConnection()),this, SLOT(newConnection()));
Right under that in the constructor, I call a function that has a more in-depth version of this for getting commands from the user:
QTextStream qin(stdin, QIODevice::ReadOnly);
QString usrCmd;
while(usrCmd != "exit" && usrCmd != "EXIT") {
//Get command input and process here
}
Inside newConnection(), I just accept the next connection and then use the socket.
QTcpSocket *serverSocket = server->nextPendingConnection();
How can I make it so the socket can wait for connections and wait for user-inputed commands at the same time?
Problem with your code is because you are blocking event loop with your while loop. So, the solution to your problem is to read from stdin asynchronously. On Linux (and on Mac, I guess), you can use QSocketNotifier to notify when the data is arrived on stdin, and to read it manually), as per various internet sources.
As I am using Windows, I would suggest you to do it in this way (which should work on all platforms):
Open the thread for reading data from stdin
Once you get some data (perhaps line?) you can use Qt signal-slot mechanism to pass the data to main thread for processing without blocking the event loop.
So, this is the pseudocode. MainAppClass should your existing server class, just edit the constructor to create new thread, and add new slot for processing the data.
class Reader: public QThread
{
Q_OBJECT
public:
Reader(QObject * parent = 0 ): QThread(parent){}
void run(void)
{
forever{
std::string data;
std::getline (std::cin, data);
if(data == "exit")
{
emit exitServer();
return;
}
emit dataReady(QString::fromStdString(data));
}
}
signals:
void dataReady(QString data);
void exitServer();
};
class MainAppClass: public QObject
{
Q_OBJECT
public:
MainAppClass()
{
Reader * tr = new Reader(this);
connect(tr, SIGNAL(dataReady(QString)), this, SLOT(processData(QString)));
connect(tr, SIGNAL(exitServer()), this, SLOT(exitServer()));
tr->start();
}
public slots:
void processData(QString data)
{
std::cout << "Command: " << data.toStdString() << std::endl;
}
void exitServer()
{
std::cout << "Exiting..." << std::endl;
}
};
int main(int argc, char *argv[])
{
QApplication app(argc, argv);
MainAppClass myapp; //your server
app.exec();
return 0;
}
Since I wrote simple guidelines how to use QTcpSocket, here is the brief
When you get client QTcpSocket, connect readyRead() signal to some slot, and read data from sender() object. You don't need to read anything in the constructor.
For reading you can use standard QIODevice functions.
Note: this is pseudo code, and you may need to change few things (check the state of the stream on reading, save pointer to sockets in some list, subscribe to disconnected() signal, call listen() in constructor, check if QTcpServer is listening, etc).
So, you need to have slot onReadyRead() in your class which will have the following code:
void Server::readyReadSlot()
{
QTcpSocket *client = (QTcpSocket*)sender(); // get socket which emited the signal
while(client->canReadLine()) // read all lines!
// If there is not any lines received (you may not always receive
// whole line as TCP is stream based protocol),
// you will not leave data in the buffer for later processing.
{
QString line = client->readLine();
processLine(line); // or emit new signal if you like
}
}
Inside newConnection() you need to connect readyRead() signal with your slot.
void Server::newConnection()
{
QTcpSocket *clientSocket = server->nextPendingConnection();
connect(clientSocket, SIGNAL(readyRead()), this, SLOT(readyReadSlot()));
}
So I'm starting to do some research on alternatives for implementing a high volume client/server system, and I'm currently looking at Poco's Reactor framework since I'm using Poco for so much of my application frameworks now.
The incoming packet sizes are going to be pretty small, so I think it will work fine from the perspective of reading the data from the clients. But the operations that will be performed based on the client input will be relatively expensive and may need to be offloaded to another process or even another server. And the responses sent back to the client will sometimes be fairly large. So obviously I can't block the reactor thread while that is taking place.
So I'm thinking if I just read the data in the reactor event handler and then pass it to another thread(pool) that processes the data, it would work out better.
What I'm not too sure about is the process for sending the responses back to the client when the operations are complete.
I can't find too much information about the best ways to use the framework. But I've done some testing and it looks like the reactor will fire the WritableNotification event repeatedly while the socket is writable. So would the optimal process be to queue up the data that needs to be sent in the object that receives the WritableNotification events and send small chunks each time the event is received?
Update: So when I started testing this I was horrified to discover that server CPU usage went up to 100% on the CPU the server app was running on with a single connection. But after some digging I found what I was doing wrong. I discovered that I don't need to register for WritableNotification events when the service handler is created, I only need to register when I have data to send. Then once all of the data is sent, I should unregister the event handler. This way the reactor doesn't have to keep calling the event handlers over and over when there is nothing to send. Now my CPU usage stays close to 0 even with 100 connections. Whew!
i have wrote a class ServerConnector that copied from SocketConnector, but do not call the connect for socket, because the socket was connected already, if a reactor was started with a ServiceHandler for notifications in the run() function of TcpServerConnection, the class TcpServer would start a new thread. so, i got multithread of reactor-partten, but i do not konw it's best way or not.
class ServerConnector
template <class ServiceHandler>
class ServerConnector
{
public:
explicit ServerConnector(StreamSocket& ss):
_pReactor(0),
_socket(ss)
/// Creates a ServerConnector, using the given Socket.
{
}
ServerConnector(StreamSocket& ss, SocketReactor& reactor):
_pReactor(0),
_socket(ss)
/// Creates an acceptor, using the given ServerSocket.
/// The ServerConnector registers itself with the given SocketReactor.
{
registerConnector(reactor);
onConnect();
}
virtual ~ServerConnector()
/// Destroys the ServerConnector.
{
unregisterConnector();
}
//
// this part is same with SocketConnector
//
private:
ServerConnector();
ServerConnector(const ServerConnector&);
ServerConnector& operator = (const ServerConnector&);
StreamSocket& _socket;
SocketReactor* _pReactor;
};
the Echo-Service is a common ServiceHander
class EchoServiceHandler
{
public:
EchoServiceHandler(StreamSocket& socket, SocketReactor& reactor):
_socket(socket),
_reactor(reactor)
{
_reactor.addEventHandler(_socket, Observer<EchoServiceHandler, ReadableNotification>(*this, &EchoServiceHandler::onReadable));
_reactor.addEventHandler(_socket, Observer<EchoServiceHandler, ErrorNotification>(*this, &EchoServiceHandler::onError));
}
~EchoServiceHandler()
{
_reactor.removeEventHandler(_socket, Observer<EchoServiceHandler, ErrorNotification>(*this, &EchoServiceHandler::onError));
_reactor.removeEventHandler(_socket, Observer<EchoServiceHandler, ReadableNotification>(*this, &EchoServiceHandler::onReadable));
}
void onReadable(ReadableNotification* pNf)
{
pNf->release();
char buffer[4096];
try {
int n = _socket.receiveBytes(buffer, sizeof(buffer));
if (n > 0)
{
_socket.sendBytes(buffer, n);
} else
onError();
} catch( ... ) {
onError();
}
}
void onError(ErrorNotification* pNf)
{
pNf->release();
onError();
}
void onError()
{
_socket.shutdown();
_socket.close();
_reactor.stop();
delete this;
}
private:
StreamSocket _socket;
SocketReactor& _reactor;
};
The EchoReactorConnection works with class TcpServer to run reactor as a thread
class EchoReactorConnection: public TCPServerConnection
{
public:
EchoReactorConnection(const StreamSocket& s): TCPServerConnection(s)
{
}
void run()
{
StreamSocket& ss = socket();
SocketReactor reactor;
ServerConnector<EchoServiceHandler> sc(ss, reactor);
reactor.run();
std::cout << "exit EchoReactorConnection thread" << std::endl;
}
};
cppunit test case is same with TCPServerTest::testMultiConnections, but using EchoReactorConnection for multi-thread.
void TCPServerTest::testMultithreadReactor()
{
ServerSocket svs(0);
TCPServerParams* pParams = new TCPServerParams;
pParams->setMaxThreads(4);
pParams->setMaxQueued(4);
pParams->setThreadIdleTime(100);
TCPServer srv(new TCPServerConnectionFactoryImpl<EchoReactorConnection>(), svs, pParams);
srv.start();
assert (srv.currentConnections() == 0);
assert (srv.currentThreads() == 0);
assert (srv.queuedConnections() == 0);
assert (srv.totalConnections() == 0);
//
// same with TCPServerTest::testMultiConnections()
//
// ....
///
}