I am trying to use the Boost 1.60.0 library with Intel Pin 2.14-71313-msvc12-windows. The following piece of code is the simple implementation I did to try things out:
#define _CRT_SECURE_NO_WARNINGS
#include "pin.H"
#include <iostream>
#include <fstream>
#include <stdio.h>
#include <stdlib.h>
#include <sstream>
#include <time.h>
#include <boost/lockfree/spsc_queue.hpp>
#include <boost/date_time/posix_time/posix_time.hpp>
namespace boost_network{
#include <boost/asio.hpp>
#include <boost/array.hpp>
}
//Buffersize of lockfree queue to use
const int BUFFERSIZE = 1000;
//Tracefiles for error / debug purpose
std::ofstream TraceFile;
//String wrapper for boost queue
class statement {
public:
statement(){ s = ""; }
statement(const std::string &n) : s(n) {}
std::string s;
};
//string queue to store inserts
boost::lockfree::spsc_queue<statement, boost::lockfree::capacity<BUFFERSIZE>> buffer; // need lockfree queue for multithreading
//Pin Lock to synchronize buffer pushes between threads
PIN_LOCK lock;
KNOB<string> KnobOutputFile(KNOB_MODE_WRITEONCE, "pintool", "o", "calltrace.txt", "specify trace file name");
KNOB<BOOL> KnobPrintArgs(KNOB_MODE_WRITEONCE, "pintool", "a", "0", "print call arguments ");
INT32 Usage()
{
cerr << "This tool produces a call trace." << endl << endl;
cerr << KNOB_BASE::StringKnobSummary() << endl;
return -1;
}
VOID ImageLoad(IMG img, VOID *)
{
//save module informations
buffer.push(statement("" + IMG_Name(img) + "'; '" + IMG_Name(img).c_str() + "'; " + IMG_LowAddress(img) + ";"));
}
VOID Fini(INT32 code, VOID *v)
{
}
void do_somenetwork(std::string host, int port, std::string message)
{
boost_network::boost::asio::io_service ios;
boost_network::boost::asio::ip::tcp::endpoint endpoint(boost::asio::ip::address::from_string(host), port);
boost_network::boost::asio::ip::tcp::socket socket(ios);
socket.connect(endpoint);
boost_network::boost::system::error_code error;
socket.write_some(boost_network::boost::asio::buffer(message.data(), message.size()), error);
socket.close();
}
void WriteData(void * arg)
{
int popped; //actual amount of popped objects
const int pop_amount = 10000;
statement curr[pop_amount];
string statement = "";
while (1) {
//pop more objects from buffer
while (popped = buffer.pop(curr, pop_amount))
{
//got new statements in buffer to insert into db: clean up statement
statement.clear();
//concat into one statement
for (int i = 0; i < popped; i++){
statement += curr[i].s;
}
do_somenetwork(std::string("127.0.0.1"), 50000, sql_statement.c_str());
}
PIN_Sleep(1);
}
}
int main(int argc, char *argv[])
{
PIN_InitSymbols();
//write address of label to TraceFile
TraceFile.open(KnobOutputFile.Value().c_str());
TraceFile << &label << endl;
TraceFile.close();
// Initialize the lock
PIN_InitLock(&lock);
// Initialize pin
if (PIN_Init(argc, argv)) return Usage();
// Register ImageLoad to be called when an image is loaded
IMG_AddInstrumentFunction(ImageLoad, 0);
//Start writer thread
PIN_SpawnInternalThread(WriteData, 0, 0, 0);
PIN_AddFiniFunction(Fini, 0);
// Never returns
PIN_StartProgram();
return 0;
}
When I build the above code, Visual Studio cannot find boost_network::boost::asio::ip and keeps giving error saying asio::ip does not exist. I had previously posted this question myself:
Sending data from a boost asio client
and after using the provided solution in the same workspace, the code worked fine and I was able to communicate over the network. I am not sure what is going wrong here. For some reason using the different namespace seems to not work out because it says boost must be in the default namespace.
However, if I do not add the namespace, in that case the line,
KNOB<BOOL> KnobPrintArgs(KNOB_MODE_WRITEONCE, "pintool", "a", "0", "print call arguments ");
throws an error saying BOOL is ambiguous.
Kindly suggest what should be a viable solution in this situation. I am using Visual Studio 2013.
The same piece of code with only Pin also works with out the network part and I can write data generated from Pin into a flat file.
Related
I plan on rewriting this to assembly so I can't use c or c++ standard library. The code below runs perfectly. However I want a thread instead of a second process. If you uncomment /*CLONE_THREAD|*/ on line 25 waitpid will return -1. I would like to have a blocking function that will resume when my thread is complete. I couldn't figure out by looking at the man pages what it expects me to do
#include <sys/wait.h>
#include <sched.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <sys/mman.h>
int globalValue=0;
static int childFunc(void*arg)
{
printf("Global value is %d\n", globalValue);
globalValue += *(int*)&arg;
return 31;
}
int main(int argc, char *argv[])
{
auto stack_size = 1024 * 1024;
auto stack = (char*)mmap(NULL, stack_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_STACK, -1, 0);
if (stack == MAP_FAILED) { perror("mmap"); exit(EXIT_FAILURE); }
globalValue = 5;
auto pid = clone(childFunc, stack + stack_size, /*CLONE_THREAD|*/CLONE_VM|CLONE_SIGHAND|SIGCHLD, (void*)7);
sleep(1); //So main and child printf don't collide
if (pid == -1) { perror("clone"); exit(EXIT_FAILURE); }
printf("clone() returned %d\n", pid);
int status;
int waitVal = waitpid(-1, &status, __WALL);
printf("Expecting 12 got %d. Expecting 31 got %d. ID=%d\n", globalValue, WEXITSTATUS(status), waitVal);
return 0;
}
If you want to call functions asynchronously with threads I recommend using std::async. Example here :
#include <iostream>
#include <future>
#include <mutex>
#include <condition_variable>
int globalValue = 0; // could also have been std::atomic<int> but I choose a mutex (to also serialize output to std::cout)
std::mutex mtx; // to protect access to data in multithreaded applications you can use mutexes
int childFunc(const int value)
{
std::unique_lock<std::mutex> lock(mtx);
globalValue = value;
std::cout << "Global value set to " << globalValue << "\n";
return 31;
}
int getValue()
{
std::unique_lock<std::mutex> lock(mtx);
return globalValue;
}
int main(int argc, char* argv[])
{
// shared memory stuff is not needed for threads
// launch childFunc asynchronously
// using a lambda function : https://en.cppreference.com/w/cpp/language/lambda
// to call a function asynchronously : https://en.cppreference.com/w/cpp/thread/async
// note I didn't ues the C++ thread class, it can launch things asynchronously
// however async is both a better abstraction and you can return values (and exceptions)
// to the calling thread if you need to (which you do in this case)
std::future<int> future = std::async(std::launch::async, []
{
return childFunc(12);
});
// wait until asynchronous function call is complete
// and get its return value;
int value_from_async = future.get();
std::cout << "Expected global value 12, value = " << getValue() << "\n";
std::cout << "Expected return value from asynchronous process is 31, value = " << value_from_async << "\n";
return 0;
}
I am trying to read some data from stdin in a separate thread from main thread. Main thread should be able to communicate to this waiting thread by writing to stdin, but when I run the test code (included below) nothing happens except that the message ('do_some_work' in my test code) is printed on the terminal directly instead of being output from the waiting thread.
I have tried a couple of solutions listed on SO but with no success. My code mimics one of the solutions from following SO question, and it works perfectly fine by itself but when coupled with my read_stdin_thread it does not.
Is it possible to write data into own stdin in Linux
#include <unistd.h>
#include <string>
#include <iostream>
#include <sstream>
#include <thread>
bool terminate_read = true;
void readStdin() {
static const int INPUT_BUF_SIZE = 1024;
char buf[INPUT_BUF_SIZE];
while (terminate_read) {
fd_set readfds;
struct timeval tv;
int data;
FD_ZERO(&readfds);
FD_SET(STDIN_FILENO, &readfds);
tv.tv_sec=2;
tv.tv_usec=0;
int ret = select(16, &readfds, 0, 0, &tv);
if (ret == 0) {
continue;
} else if (ret == -1) {
perror("select");
continue;
}
data=FD_ISSET(STDIN_FILENO, &readfds);
if (data>0) {
int bytes = read(STDIN_FILENO,buf,INPUT_BUF_SIZE);
if (bytes == -1) {
perror("input poll: read");
continue;
}
if (bytes) {
std::cout << "Execute: " << buf << std::endl;
if (strncmp(buf, "quit", 4)==0) {
std::cout << "quitting reading from stdin." << std::endl;
break;
}
else {
continue;
}
}
}
}
}
int main() {
std::thread threadReadStdin([] () {
readStdin();
});
usleep(1000000);
std::stringstream msg;
msg << "do_some_work" << std::endl;
auto s = msg.str();
write(STDIN_FILENO, s.c_str(), s.size());
usleep(1000000);
terminate_read = false;
threadReadStdin.join();
return 0;
}
A code snippet illustrating how to write to stdin that in turn is read by threadReadStdin would be extremely helpful.
Thanks much in advance!
Edit:
One thing I forgot to mention here that code within readStdin() is a third party code and any kind of communication that takes place has to be on its terms.
Also, I am pretty easily able to redirect std::cin and std::cout to either fstream or stringstream. Problem is that when I write to redirected cin buffer nothing really appears on the reading thread.
Edit2:
This is a single process application and spawning is not an option.
If you want to use a pipe to communicate between different threads in the same program, you shouldn't try using stdin or stdout. Instead, just use the pipe function to create your own pipe. I'll walk you through doing this step-by-step!
Opening the channel
Let's create a helper function to open the channel using pipe. This function takes two ints by reference - the read end and the write end. It tries opening the pipe, and if it can't, it prints an error.
#include <unistd.h>
#include <cstdio>
#include <thread>
#include <string>
void open_channel(int& read_fd, int& write_fd) {
int vals[2];
int errc = pipe(vals);
if(errc) {
fputs("Bad pipe", stderr);
read_fd = -1;
write_fd = -1;
} else {
read_fd = vals[0];
write_fd = vals[1];
}
}
Writing a message
Next, we define a function to write the message. This function is given as a lambda, so that we can pass it directly to the thread.
auto write_message = [](int write_fd, std::string message) {
ssize_t amnt_written = write(write_fd, message.data(), message.size());
if(amnt_written != message.size()) {
fputs("Bad write", stderr);
}
close(write_fd);
};
Reading a message
We should also make a function to read the message. Reading the message will be done on a different thread. This lambda reads the message 1000 bytes at a type, and prints it to standard out.
auto read_message = [](int read_fd) {
constexpr int buffer_size = 1000;
char buffer[buffer_size + 1];
ssize_t amnt_read;
do {
amnt_read = read(read_fd, &buffer[0], buffer_size);
buffer[amnt_read] = 0;
fwrite(buffer, 1, amnt_read, stdout);
} while(amnt_read > 0);
};
Main method
Finally, we can write the main method. It opens the channel, writes the message on one thread, and reads it on the other thread.
int main() {
int read_fd;
int write_fd;
open_channel(read_fd, write_fd);
std::thread write_thread(
write_message, write_fd, "Hello, world!");
std::thread read_thread(
read_message, read_fd);
write_thread.join();
read_thread.join();
}
It seems like I have stumbled upon the answer with the help of very constructive responses from #Jorge Perez, #Remy Lebeau and #Kamil Cuk. This solution is built upon #Jorge Perez's extremely helpful code. For brevity's sake I am not including the whole code but part comes from the code I posted and a large part comes from #Jorge Perez's code.
What I have done is taken his approach using pipes and replacing STDIN_FILENO by the pipe read fd using dup. Following link was really helpful:
https://en.wikipedia.org/wiki/Dup_(system_call)
I would really appreciate your input on whether this is a hack or a good enough approach/solution given the constraints I have in production environment code.
int main() {
int read_fd;
int write_fd;
open_channel(read_fd, write_fd);
close(STDIN_FILENO);
if(dup(read_fd) == -1)
return -1;
std::thread write_thread(write_message, write_fd, "Whatsup?");
std::thread threadReadStdin([] () {
readStdin();
});
write_thread.join();
threadReadStdin.join();
return 0;
}
My question
How do I avoid a data race when using two threads to send and receive over an asio::ip::tcp::iostream?
Design
I am writing a program that uses an asio::ip::tcp::iostream for input and output. The program accepts commands from the (remote) user over port 5555 and sends messages over that same TCP connection to the user. Because these events (commands received from the user or messages sent to the user) occur asynchronously, I have separate transmit and receive threads.
In this toy version, the commands are "one", "two" and "quit". Of course "quit" quits the program. The other commands do nothing, and any unrecognized command causes the server to close the TCP connection.
The transmitted messages are simple serial-numbered messages that are are sent once per second.
In both this toy version and the real code I'm trying to write, the transmit and receive processes are both using blocking IO, so there doesn't appear to be a good way to use a std::mutex or other synchronization mechanism. (In my attempts, one process would grab the mutex and then block, which isn't going to work for this.)
Build and test
To build and test this, I'm using gcc version 7.2.1 and valgrind 3.13 on a 64-bit Linux machine. Build:
g++ -DASIO_STANDALONE -Wall -Wextra -pedantic -std=c++14 concurrent.cpp -o concurrent -lpthread
To test, I run the server with this command:
valgrind --tool=helgrind --log-file=helgrind.txt ./concurrent
Then I use telnet 127.0.0.1 5555 in another window to create a connection to the server. What helgrind correctly points out is that there is a data race because both runTx and runRx are trying to access the same stream asynchronously:
==16188== Possible data race during read of size 1 at 0x1FFEFFF1CC by thread #1
==16188== Locks held: none
... many more lines elided
concurrent.cpp
#include <asio.hpp>
#include <iostream>
#include <fstream>
#include <thread>
#include <array>
#include <chrono>
class Console {
public:
Console() :
want_quit{false},
want_reset{false}
{}
bool getQuitValue() const { return want_quit; }
int run(std::istream *in, std::ostream *out);
bool wantReset() const { return want_reset; }
private:
int runTx(std::istream *in);
int runRx(std::ostream *out);
bool want_quit;
bool want_reset;
};
int Console::runTx(std::istream *in) {
static const std::array<std::string, 3> cmds{
"quit", "one", "two",
};
std::string command;
while (!want_quit && !want_reset && *in >> command) {
if (command == cmds.front()) {
want_quit = true;
}
if (std::find(cmds.cbegin(), cmds.cend(), command) == cmds.cend()) {
want_reset = true;
std::cout << "unknown command [" << command << "]\n";
} else {
std::cout << command << '\n';
}
}
return 0;
}
int Console::runRx(std::ostream *out) {
for (int i=0; !(want_reset || want_quit); ++i) {
(*out) << "This is message number " << i << '\n';
std::this_thread::sleep_for(std::chrono::milliseconds(1000));
out->flush();
}
return 0;
}
int Console::run(std::istream *in, std::ostream *out) {
want_reset = false;
std::thread t1{&Console::runRx, this, out};
int status = runTx(in);
t1.join();
return status;
}
int main()
{
Console con;
asio::io_service ios;
// IPv4 address, port 5555
asio::ip::tcp::acceptor acceptor(ios,
asio::ip::tcp::endpoint{asio::ip::tcp::v4(), 5555});
while (!con.getQuitValue()) {
asio::ip::tcp::iostream stream;
acceptor.accept(*stream.rdbuf());
con.run(&stream, &stream);
if (con.wantReset()) {
std::cout << "resetting\n";
}
}
}
Yeah, you're sharing the socket that underlies the stream, without synchronization
Sidenote, same with the boolean flags, which can easily be "fixed" by changing:
std::atomic_bool want_quit;
std::atomic_bool want_reset;
How To Solve
To be honest, I don't think there is a good solution. You said it yourself: the operations are asynchronous, so you'll be in trouble if you try to do them synchronously.
You could try to think of hacks. What if we created a separate stream object based on the same underlying socket (filedescriptor). It's not going to be very easy as such a stream is not part of Asio.
But we could hack one up using Boost Iostreams:
#define BOOST_IOSTREAMS_USE_DEPRECATED
#include <boost/iostreams/device/file_descriptor.hpp>
#include <boost/iostreams/stream.hpp>
// .... later:
// HACK: procure a _separate `ostream` to prevent the race, using the same fd
namespace bio = boost::iostreams;
bio::file_descriptor_sink fds(stream.rdbuf()->native_handle(), false); // close_on_exit flag is deprecated
bio::stream<bio::file_descriptor_sink> hack_ostream(fds);
con.run(stream, hack_ostream);
Indeed this runs without the race (simultaneous reads and writes on the same socket are fine, as long as you don't share the non-threadsafe Asio object(s) wrapping them).
What I Recommend Instead:
Don't do that. It's a kludge. You're complicating things, apparently in an attempt to avoid using asynchronous code. I'd bite the bullet.
It's not too much work to factor the IO mechanics out from the service logic. You'll end up being free from random limitations (you could consider dealing with multiple clients, you could do without any threading at all etc.).
If you would like to learn about some middle ground, look at stackful coroutines (http://www.boost.org/doc/libs/1_66_0/doc/html/boost_asio/reference/spawn.html)
Listing
Just for reference
Note I refactored to remove the need for pointers. You're not transferring ownership, so a reference will do. In case you didn't know how to pass the reference to a bind/std::thread constructor, the trick is in the std::ref you'll see.
[For stress testing I have greatly reduced the delays.]
Live On Coliru
#include <boost/asio.hpp>
#include <iostream>
#include <fstream>
#include <thread>
#include <array>
#include <chrono>
class Console {
public:
Console() :
want_quit{false},
want_reset{false}
{}
bool getQuitValue() const { return want_quit; }
int run(std::istream &in, std::ostream &out);
bool wantReset() const { return want_reset; }
private:
int runTx(std::istream &in);
int runRx(std::ostream &out);
std::atomic_bool want_quit;
std::atomic_bool want_reset;
};
int Console::runTx(std::istream &in) {
static const std::array<std::string, 3> cmds{
{"quit", "one", "two"},
};
std::string command;
while (!want_quit && !want_reset && in >> command) {
if (command == cmds.front()) {
want_quit = true;
}
if (std::find(cmds.cbegin(), cmds.cend(), command) == cmds.cend()) {
want_reset = true;
std::cout << "unknown command [" << command << "]\n";
} else {
std::cout << command << '\n';
}
}
return 0;
}
int Console::runRx(std::ostream &out) {
for (int i=0; !(want_reset || want_quit); ++i) {
out << "This is message number " << i << '\n';
std::this_thread::sleep_for(std::chrono::milliseconds(1));
out.flush();
}
return 0;
}
int Console::run(std::istream &in, std::ostream &out) {
want_reset = false;
std::thread t1{&Console::runRx, this, std::ref(out)};
int status = runTx(in);
t1.join();
return status;
}
#define BOOST_IOSTREAMS_USE_DEPRECATED
#include <boost/iostreams/device/file_descriptor.hpp>
#include <boost/iostreams/stream.hpp>
int main()
{
Console con;
boost::asio::io_service ios;
// IPv4 address, port 5555
boost::asio::ip::tcp::acceptor acceptor(ios, boost::asio::ip::tcp::endpoint{boost::asio::ip::tcp::v4(), 5555});
while (!con.getQuitValue()) {
boost::asio::ip::tcp::iostream stream;
acceptor.accept(*stream.rdbuf());
{
// HACK: procure a _separate `ostream` to prevent the race, using the same fd
namespace bio = boost::iostreams;
bio::file_descriptor_sink fds(stream.rdbuf()->native_handle(), false); // close_on_exit flag is deprecated
bio::stream<bio::file_descriptor_sink> hack_ostream(fds);
con.run(stream, hack_ostream);
}
if (con.wantReset()) {
std::cout << "resetting\n";
}
}
}
Testing:
netcat localhost 5555 <<<quit
This is message number 0
This is message number 1
This is message number 2
And
commands=( one two one two one two one two one two one two one two three )
while sleep 0.1; do echo ${commands[$(($RANDOM%${#commands}))]}; done | (while netcat localhost 5555; do sleep 1; done)
runs indefinitely, occasionally resetting the connection (when command "three" has been sent).
We've made good progress in getting GRPC running under RHEL 7.
Our application has one rather complicated structure with three levels of nesting with the outer level implementing a "oneof" keyword.
We find that all our other structures run fine, but this one gives us an RPC failure with code=14.
We've simplified this part of the application as much as possible so it can hopefully be recompiled and run easily.
Here's the .proto file, updated to accommodate Uli's question:
syntax = "proto3";
option java_multiple_files = true;
option java_package = "io.grpc.examples.debug";
option java_outer_classname = "DebugProto";
option objc_class_prefix = "DEBUG";
package DEBUGpackage;
service DEBUGservice {
rpc DEBUG_val_container_get (input_int32_request) returns (outer_container) {}
}
message input_int32_request {
int32 ival = 1;
}
message inner_container {
repeated uint32 val_array = 1;
}
message middle_container {
inner_container vac = 1;
}
message other_container {
int32 other_val = 1;
}
message outer_container {
oneof reply {
middle_container r1 = 1;
other_container r2 = 2;
}
}
(Please note that the java lines in this prototype code are just in there because they are in the GRPC website examples. Our code is entirely C++, with no java. Don't know if that means we can do without some of these "option java..." lines).
Here's our client source code:
#include <iostream>
#include <memory>
#include <string>
#include <grpc++/grpc++.h>
#include <grpc/support/log.h>
#include <thread>
#include <unistd.h>
#include "debug.grpc.pb.h"
using grpc::Channel;
using grpc::ClientAsyncResponseReader;
using grpc::ClientContext;
using grpc::CompletionQueue;
using grpc::Status;
using DEBUGpackage::input_int32_request;
using DEBUGpackage::inner_container;
using DEBUGpackage::middle_container;
using DEBUGpackage::outer_container;
using DEBUGpackage::DEBUGservice;
class DEBUGClient {
public:
explicit DEBUGClient(std::shared_ptr<Channel> channel)
: stub_(DEBUGservice::NewStub(channel)) {}
void DEBUG_val_container_get() {
std::cout << "in DEBUG_val_container_get" << std::endl;
// Data we are sending to the server
input_int32_request val;
val.set_ival(0);
AsyncClientCall* call = new AsyncClientCall;
call->response_reader = stub_->AsyncDEBUG_val_container_get(&call->context, val, &cq_);
call->response_reader->Finish(&call->reply_, &call->status, (void*)call);
}
void AsyncCompleteRpc() {
void* got_tag;
bool ok = false;
while (cq_.Next(&got_tag, &ok)) {
AsyncClientCall* call = static_cast<AsyncClientCall*>(got_tag);
GPR_ASSERT(ok);
if (call->status.ok()) {
if (call->reply_.has_r1()) {
std::cout << call << " DEBUG received: "
<< call->reply_.r1().vac().val_array(0) << std::endl;
}
}
else {
std::cout << call << " RPC failed" << std::endl;
std::cout << " RPC failure code = " << call->status.error_code() << std::endl;
std::cout << " RPC failure message = " << call->status.error_message() << std::endl;
}
delete call;
}
}
private:
struct AsyncClientCall {
outer_container reply_;
ClientContext context;
Status status;
std::unique_ptr<ClientAsyncResponseReader<outer_container>> response_reader;
};
std::unique_ptr<DEBUGservice::Stub> stub_;
CompletionQueue cq_;
};
int main(int argc, char** argv) {
DEBUGClient DEBUG0(grpc::CreateChannel("172.16.17.46:50050", grpc::InsecureChannelCredentials()));
std::thread thread0_ = std::thread(&DEBUGClient::AsyncCompleteRpc, &DEBUG0);
DEBUG0.DEBUG_val_container_get();
sleep(1);
std::cout << "Press control-c to quit" << std::endl << std::endl;
thread0_.join(); //blocks forever
return 0;
}
And, here's our server source code:
#include <memory>
#include <iostream>
#include <string>
#include <thread>
#include <grpc++/grpc++.h>
#include <grpc/support/log.h>
#include "debug.grpc.pb.h"
#include <time.h>
#include <stdlib.h>
#include <unistd.h>
#include <stdio.h>
using grpc::Server;
using grpc::ServerAsyncResponseWriter;
using grpc::ServerBuilder;
using grpc::ServerContext;
using grpc::ServerCompletionQueue;
using grpc::Status;
using DEBUGpackage::inner_container;
using DEBUGpackage::input_int32_request;
using DEBUGpackage::middle_container;
using DEBUGpackage::outer_container;
using DEBUGpackage::DEBUGservice;
std::string save_server_address;
class ServerImpl final {
public:
~ServerImpl() {
server_->Shutdown();
cq_->Shutdown();
}
void Run() {
std::string server_address("0.0.0.0:50050");
ServerBuilder builder;
builder.AddListeningPort(server_address, grpc::InsecureServerCredentials());
builder.RegisterService(&service_);
cq_ = builder.AddCompletionQueue();
server_ = builder.BuildAndStart();
std::cout << "Server listening on " << server_address << std::endl;
save_server_address = server_address;
HandleRpcs();
}
private:
class CallData {
public:
virtual void Proceed() = 0;
};
class DebugGetCallData final : public CallData{
public:
DebugGetCallData(DEBUGservice::AsyncService* service, ServerCompletionQueue* cq)
: service_(service), cq_(cq), responder_(&ctx_), status_(CREATE) {
Proceed();
}
void Proceed() {
if (status_ == CREATE) {
status_ = PROCESS;
service_->RequestDEBUG_val_container_get(&ctx_, &request_, &responder_, cq_, cq_, this);
} else if (status_ == PROCESS) {
new DebugGetCallData(service_, cq_);
char *portchar;
portchar = (char *) save_server_address.c_str();
long cq_addr = (long) cq_;
int cq_addr32 = (int) (cq_addr & 0xfffffff);
srand(cq_addr32);
fprintf(stderr, "%s task started\n", portchar); fflush(stderr);
unsigned int return_val = 10;
inner_container ic;
ic.add_val_array(return_val);
middle_container reply_temp;
reply_temp.set_allocated_vac(&ic);
reply_.set_allocated_r1(&reply_temp);
fprintf(stderr, "%s %s task done\n", portchar, "val_container_get"); fflush(stderr);
status_ = FINISH;
responder_.Finish(reply_, Status::OK, this);
} else {
GPR_ASSERT(status_ == FINISH);
}
}
private:
DEBUGservice::AsyncService* service_;
ServerCompletionQueue* cq_;
ServerContext ctx_;
input_int32_request request_;
outer_container reply_;
ServerAsyncResponseWriter<outer_container> responder_;
enum CallStatus { CREATE, PROCESS, FINISH };
CallStatus status_;
};
void HandleRpcs() {
new DebugGetCallData(&service_, cq_.get());
void* tag;
bool ok;
while (true) {
GPR_ASSERT(cq_->Next(&tag, &ok));
GPR_ASSERT(ok);
static_cast<CallData*>(tag)->Proceed();
}
}
std::unique_ptr<ServerCompletionQueue> cq_;
DEBUGservice::AsyncService service_;
std::unique_ptr<Server> server_;
};
int main() {
ServerImpl server;
server.Run();
return 0;
}
The output when I run it looks like this:
[fossum#netsres46 debug]$ DEBUG_client2
in DEBUG_val_container_get
0xb73ff0 RPC failed
RPC failure code = 14
RPC failure message = Endpoint read failed
Press control-c to quit
We ran the server under gdb, and found a place in the generated
file "debug.pb.cc" where if we just comment out one line, it all starts working.
Here's the pertinent piece of the generated file "debug.pb.cc":
middle_container::~middle_container() {
// ##protoc_insertion_point(destructor:DEBUGpackage.middle_container)
SharedDtor();
}
void middle_container::SharedDtor() {
if (this != internal_default_instance()) {
delete vac_; // comment out this one line, to make the problem go away
}
}
The "delete vac_" line appears to be an attempt to delete storage that either has already been deleted, or is about to be deleted somewhere else. Please, can someone look into this? [The files below are still the files we use to generate this code, and to debug the problem to this point]
I have no idea whether I've uncovered a bug in GRPC, or whether I've coded something wrong.
The issue is that you are allocated middle_container reply_tmp on the stack in your server. As a result it gets destructed as soon as you pass out of the scope. At that time, you have called Finish but not yet waited for its result. Since this is an async server, the data must remain alive until you've received the tag back for it. This is why manually editing your destructor works in your case; you're basically nullifying the destructor (and leaking memory as a result).
I am working in Ubuntu. I want to monitor a folder and print every event that pops up in the subfolders (print files).
I have the following code but it doesn't work. When executed, there is no println of the events.
In the second code I only see the events from the folder. The events from each subfolder do not pop up.
#include <string>
#include <iostream>
#include <stdio.h>
using namespace std;
std::string exec(char* cmd) {
FILE* pipe = popen(cmd, "r");
if (!pipe) return "ERROR";
char buffer[256];
std::string result = "";
while(!feof(pipe)) {
if(fgets(buffer, 256, pipe) != NULL)
result += buffer;
}
pclose(pipe);
cout<<"result is: "<<result<<endl;
return result;
}
int main()
{
//while(1)
//{
string s=exec((char*)"inotifywait -rme create /home/folder/");
cout << s << endl;
//}
return 0;
}
This code only prints the events from the folder I'm monitoring. It doesn't print the events from each subfolder. I don't know how to improve it for my needs.
#include <sys/inotify.h>
#include <sys/ioctl.h>
#include <iostream>
void processNewFiles(int fd, int wd);
int main(int argc, char** argv)
{
const char* dirPath = "/home/folder/" ;//argv[1];
int fd = inotify_init();
int wd = inotify_add_watch(fd, dirPath, IN_CREATE);
if (wd)
{
processNewFiles(fd, wd);
inotify_rm_watch(fd, wd);
}
}
void processNewFiles(int fd, int wd)
{
bool done = false;
do
{
int qLen = 0;
ioctl(fd, FIONREAD, &qLen);
char* buf = new char[qLen];
int num = read(fd, buf, qLen);
if (num == qLen)
{
inotify_event* iev = reinterpret_cast<inotify_event*>(buf);
if (iev->wd == wd && iev->mask & IN_CREATE)
{
std::cout << "New file created: " << iev->name << std::endl;
}
}
delete [] buf;
} while (!done);
}
Your second solution does not work because inotify_add_watch is not working recursivly. You would have to add watches for subdirectories manually. As this might be annoying, it is also possible to use the utility inotifywait as you do in your first example.
Your first example is not working because you're reading from the pipe forever. If you kill the inotifywait process (e.g. if you're the only person on the machine and this is the only inotifywait process just using "killall inotifywait") you will get your output because you'll break out of the loop reading from the pipe. If you output something inside the loop, it will work, too.