RPC failure code 14 in our simple GRPC example program - c++

We've made good progress in getting GRPC running under RHEL 7.
Our application has one rather complicated structure with three levels of nesting with the outer level implementing a "oneof" keyword.
We find that all our other structures run fine, but this one gives us an RPC failure with code=14.
We've simplified this part of the application as much as possible so it can hopefully be recompiled and run easily.
Here's the .proto file, updated to accommodate Uli's question:
syntax = "proto3";
option java_multiple_files = true;
option java_package = "io.grpc.examples.debug";
option java_outer_classname = "DebugProto";
option objc_class_prefix = "DEBUG";
package DEBUGpackage;
service DEBUGservice {
rpc DEBUG_val_container_get (input_int32_request) returns (outer_container) {}
}
message input_int32_request {
int32 ival = 1;
}
message inner_container {
repeated uint32 val_array = 1;
}
message middle_container {
inner_container vac = 1;
}
message other_container {
int32 other_val = 1;
}
message outer_container {
oneof reply {
middle_container r1 = 1;
other_container r2 = 2;
}
}
(Please note that the java lines in this prototype code are just in there because they are in the GRPC website examples. Our code is entirely C++, with no java. Don't know if that means we can do without some of these "option java..." lines).
Here's our client source code:
#include <iostream>
#include <memory>
#include <string>
#include <grpc++/grpc++.h>
#include <grpc/support/log.h>
#include <thread>
#include <unistd.h>
#include "debug.grpc.pb.h"
using grpc::Channel;
using grpc::ClientAsyncResponseReader;
using grpc::ClientContext;
using grpc::CompletionQueue;
using grpc::Status;
using DEBUGpackage::input_int32_request;
using DEBUGpackage::inner_container;
using DEBUGpackage::middle_container;
using DEBUGpackage::outer_container;
using DEBUGpackage::DEBUGservice;
class DEBUGClient {
public:
explicit DEBUGClient(std::shared_ptr<Channel> channel)
: stub_(DEBUGservice::NewStub(channel)) {}
void DEBUG_val_container_get() {
std::cout << "in DEBUG_val_container_get" << std::endl;
// Data we are sending to the server
input_int32_request val;
val.set_ival(0);
AsyncClientCall* call = new AsyncClientCall;
call->response_reader = stub_->AsyncDEBUG_val_container_get(&call->context, val, &cq_);
call->response_reader->Finish(&call->reply_, &call->status, (void*)call);
}
void AsyncCompleteRpc() {
void* got_tag;
bool ok = false;
while (cq_.Next(&got_tag, &ok)) {
AsyncClientCall* call = static_cast<AsyncClientCall*>(got_tag);
GPR_ASSERT(ok);
if (call->status.ok()) {
if (call->reply_.has_r1()) {
std::cout << call << " DEBUG received: "
<< call->reply_.r1().vac().val_array(0) << std::endl;
}
}
else {
std::cout << call << " RPC failed" << std::endl;
std::cout << " RPC failure code = " << call->status.error_code() << std::endl;
std::cout << " RPC failure message = " << call->status.error_message() << std::endl;
}
delete call;
}
}
private:
struct AsyncClientCall {
outer_container reply_;
ClientContext context;
Status status;
std::unique_ptr<ClientAsyncResponseReader<outer_container>> response_reader;
};
std::unique_ptr<DEBUGservice::Stub> stub_;
CompletionQueue cq_;
};
int main(int argc, char** argv) {
DEBUGClient DEBUG0(grpc::CreateChannel("172.16.17.46:50050", grpc::InsecureChannelCredentials()));
std::thread thread0_ = std::thread(&DEBUGClient::AsyncCompleteRpc, &DEBUG0);
DEBUG0.DEBUG_val_container_get();
sleep(1);
std::cout << "Press control-c to quit" << std::endl << std::endl;
thread0_.join(); //blocks forever
return 0;
}
And, here's our server source code:
#include <memory>
#include <iostream>
#include <string>
#include <thread>
#include <grpc++/grpc++.h>
#include <grpc/support/log.h>
#include "debug.grpc.pb.h"
#include <time.h>
#include <stdlib.h>
#include <unistd.h>
#include <stdio.h>
using grpc::Server;
using grpc::ServerAsyncResponseWriter;
using grpc::ServerBuilder;
using grpc::ServerContext;
using grpc::ServerCompletionQueue;
using grpc::Status;
using DEBUGpackage::inner_container;
using DEBUGpackage::input_int32_request;
using DEBUGpackage::middle_container;
using DEBUGpackage::outer_container;
using DEBUGpackage::DEBUGservice;
std::string save_server_address;
class ServerImpl final {
public:
~ServerImpl() {
server_->Shutdown();
cq_->Shutdown();
}
void Run() {
std::string server_address("0.0.0.0:50050");
ServerBuilder builder;
builder.AddListeningPort(server_address, grpc::InsecureServerCredentials());
builder.RegisterService(&service_);
cq_ = builder.AddCompletionQueue();
server_ = builder.BuildAndStart();
std::cout << "Server listening on " << server_address << std::endl;
save_server_address = server_address;
HandleRpcs();
}
private:
class CallData {
public:
virtual void Proceed() = 0;
};
class DebugGetCallData final : public CallData{
public:
DebugGetCallData(DEBUGservice::AsyncService* service, ServerCompletionQueue* cq)
: service_(service), cq_(cq), responder_(&ctx_), status_(CREATE) {
Proceed();
}
void Proceed() {
if (status_ == CREATE) {
status_ = PROCESS;
service_->RequestDEBUG_val_container_get(&ctx_, &request_, &responder_, cq_, cq_, this);
} else if (status_ == PROCESS) {
new DebugGetCallData(service_, cq_);
char *portchar;
portchar = (char *) save_server_address.c_str();
long cq_addr = (long) cq_;
int cq_addr32 = (int) (cq_addr & 0xfffffff);
srand(cq_addr32);
fprintf(stderr, "%s task started\n", portchar); fflush(stderr);
unsigned int return_val = 10;
inner_container ic;
ic.add_val_array(return_val);
middle_container reply_temp;
reply_temp.set_allocated_vac(&ic);
reply_.set_allocated_r1(&reply_temp);
fprintf(stderr, "%s %s task done\n", portchar, "val_container_get"); fflush(stderr);
status_ = FINISH;
responder_.Finish(reply_, Status::OK, this);
} else {
GPR_ASSERT(status_ == FINISH);
}
}
private:
DEBUGservice::AsyncService* service_;
ServerCompletionQueue* cq_;
ServerContext ctx_;
input_int32_request request_;
outer_container reply_;
ServerAsyncResponseWriter<outer_container> responder_;
enum CallStatus { CREATE, PROCESS, FINISH };
CallStatus status_;
};
void HandleRpcs() {
new DebugGetCallData(&service_, cq_.get());
void* tag;
bool ok;
while (true) {
GPR_ASSERT(cq_->Next(&tag, &ok));
GPR_ASSERT(ok);
static_cast<CallData*>(tag)->Proceed();
}
}
std::unique_ptr<ServerCompletionQueue> cq_;
DEBUGservice::AsyncService service_;
std::unique_ptr<Server> server_;
};
int main() {
ServerImpl server;
server.Run();
return 0;
}
The output when I run it looks like this:
[fossum#netsres46 debug]$ DEBUG_client2
in DEBUG_val_container_get
0xb73ff0 RPC failed
RPC failure code = 14
RPC failure message = Endpoint read failed
Press control-c to quit
We ran the server under gdb, and found a place in the generated
file "debug.pb.cc" where if we just comment out one line, it all starts working.
Here's the pertinent piece of the generated file "debug.pb.cc":
middle_container::~middle_container() {
// ##protoc_insertion_point(destructor:DEBUGpackage.middle_container)
SharedDtor();
}
void middle_container::SharedDtor() {
if (this != internal_default_instance()) {
delete vac_; // comment out this one line, to make the problem go away
}
}
The "delete vac_" line appears to be an attempt to delete storage that either has already been deleted, or is about to be deleted somewhere else. Please, can someone look into this? [The files below are still the files we use to generate this code, and to debug the problem to this point]
I have no idea whether I've uncovered a bug in GRPC, or whether I've coded something wrong.

The issue is that you are allocated middle_container reply_tmp on the stack in your server. As a result it gets destructed as soon as you pass out of the scope. At that time, you have called Finish but not yet waited for its result. Since this is an async server, the data must remain alive until you've received the tag back for it. This is why manually editing your destructor works in your case; you're basically nullifying the destructor (and leaking memory as a result).

Related

How to use boost::beast, download a file no blocking and with responses

I have started with this example so won't post all the code. My objective is to download a large file without blocking my main thread. The second objective is to get notifications so I can update a progress bar. I do have the code working a couple of ways. First is to just ioc.run(); and let it go to work, I get the file downloaded. But I can not find anyway to start the session without blocking.
The second way I can make the calls down to http::async_read_some and the call works but I can not get a response that I can use. I don't know if there is a way to pass a lambda that captures.
The #if 0..#else..#endif switches the methods. I'm sure there is a simple way but I just can not see it. I'll clean up the code when I get it working, like setting the local file name. Thanks.
std::size_t on_read_some(boost::system::error_code ec, std::size_t bytes_transferred)
{
if (ec);//deal with it...
if (!bValidConnection) {
std::string_view view((const char*)buffer_.data().data(), bytes_transferred);
auto pos = view.find("Content-Length:");
if (pos == std::string_view::npos)
;//error
file_size = std::stoi(view.substr(pos+sizeof("Content-Length:")).data());
if (!file_size)
;//error
bValidConnection = true;
}
else {
file_pos += bytes_transferred;
response_call(ec, file_pos);
}
#if 0
std::cout << "in on_read_some caller\n";
http::async_read_some(stream_, buffer_, file_parser_, std::bind(
response_call,
std::placeholders::_1,
std::placeholders::_2));
#else
std::cout << "in on_read_some inner\n";
http::async_read_some(stream_, buffer_, file_parser_, std::bind(
&session::on_read_some,
shared_from_this(),
std::placeholders::_1,
std::placeholders::_2));
#endif
return buffer_.size();
}
The main, messy but.....
struct lambda_type {
bool bDone = false;
void operator ()(const boost::system::error_code ec, std::size_t bytes_transferred) {
;
}
};
int main(int argc, char** argv)
{
auto const host = "reserveanalyst.com";
auto const port = "443";
auto const target = "/downloads/demo.msi";
int version = argc == 5 && !std::strcmp("1.0", argv[4]) ? 10 : 11;
boost::asio::io_context ioc;
ssl::context ctx{ ssl::context::sslv23_client };
load_root_certificates(ctx);
//ctx.load_verify_file("ca.pem");
auto so = std::make_shared<session>(ioc, ctx);
so->run(host, port, target, version);
bool bDone = false;
auto const lambda = [](const boost::system::error_code ec, std::size_t bytes_transferred) {
std::cout << "data lambda bytes: " << bytes_transferred << " er: " << ec.message() << std::endl;
};
lambda_type lambda2;
so->set_response_call(lambda);
ioc.run();
std::cout << "not in ioc.run()!!!!!!!!" << std::endl;
so->async_read_some(lambda);
//pseudo message pump when working.........
for (;;) {
std::this_thread::sleep_for(250ms);
std::cout << "time" << std::endl;
}
return EXIT_SUCCESS;
}
And stuff I've added to the class session
class session : public std::enable_shared_from_this<session>
{
using response_call_type = void(*)(boost::system::error_code ec, std::size_t bytes_transferred);
http::response_parser<http::file_body> file_parser_;
response_call_type response_call;
//
bool bValidConnection = false;
std::size_t file_pos = 0;
std::size_t file_size = 0;
public:
auto& get_result() { return res_; }
auto& get_buffer() { return buffer_; }
void set_response_call(response_call_type the_call) { response_call = the_call; }
I've updated this as I finally put it to use and I wanted the old method where I could download to a file or a string. Link to how asio works, great talk.
CppCon 2016 Michael Caisse Asynchronous IO with BoostAsio
As for my misunderstanding of how to pass a lambda, here is Adam Nevraumont's answer
There are two ways to compile this using a type to select the method. Both are shown at the beginning of main. You can construct either a file downloader or string downloader by selecting the type of beast parser. The parsers don't have the same constructs so an if constexpr compile time conditions are used. And I checked, a release build of the downloader is about 1K so pretty light weight for what it does. In the case of a small string you don't have to handle the call backs. either pass an empty lambda or add the likes of:
if(response_call)
response_call(resp_ok, test);
This looks to be a pretty clean way to get the job done so I've updated this post as of 11/27/2202.
The code:
//
// Copyright (c) 2016-2019 Vinnie Falco (vinnie dot falco at gmail dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
// Official repository: https://github.com/boostorg/beast
//------------------------------------------------------------------------------
//
// Example: HTTP SSL client, synchronous, usable in a thread with a message pump
// Added code to use from a message pump
// Also useable as body to a file download, or body to string
//
//------------------------------------------------------------------------------
#include <boost/beast/core.hpp>
#include <boost/beast/http.hpp>
#include <boost/beast/ssl.hpp>
#include <boost/beast/version.hpp>
#include <boost/asio/connect.hpp>
#include <boost/asio/ip/tcp.hpp>
#include <boost/asio/ssl/error.hpp>
#include <boost/asio/ssl/stream.hpp>
#include <cstdlib>
#include <iostream>
#include <string>
#include <fstream>
//the boost shipped certificates
#include <boost/../libs/beast/example/common/root_certificates.hpp>
//TODO add your ssl libs as you would like
#ifdef _M_IX86
#pragma comment(lib, "libcrypto.lib")
#pragma comment(lib, "libssl.lib")
#elif _M_X64
#pragma comment(lib, "libcrypto-3-x64.lib")
#pragma comment(lib, "libssl-3-x64.lib")
#endif
namespace downloader {
namespace beast = boost::beast; // from <boost/beast.hpp>
namespace http = beast::http; // from <boost/beast/http.hpp>
namespace net = boost::asio; // from <boost/asio.hpp>
namespace ssl = net::ssl; // from <boost/asio/ssl.hpp>
using tcp = net::ip::tcp; // from <boost/asio/ip/tcp.hpp>
//specialization if using < c++17; see both 'if constexpr' below.
//this is not needed otherwise
//namespace detail {
// template<typename Type>
// void open_file(http::parser < false, Type>& p, const char* name, boost::system::error_code& file_open_ec) { }
// template<>
// void open_file(http::parser<false, http::file_body>& p, const char* name, boost::system::error_code& file_open_ec) {
// p.get().body().open(name, boost::beast::file_mode::write, file_open_ec);
// }
// template<typename Type>
// std::string get_string(http::parser < false, Type>& p) { return std::string{}; }
// template<>
// std::string get_string(http::parser<false, http::string_body>& p) {
// return p.get().body();
// }
//} //namespace detail
enum responses {
resp_null,
resp_ok,
resp_done,
resp_error,
};
using response_call_type = std::function< void(responses, std::size_t)>;
template<typename ParserType>
struct download {
//as these can be set with array initialization
const char* target_ = "/";
const char* filename_ = "test.txt";
const char* host_ = "lakeweb.net";
std::string body_;
using response_call_type = std::function< void(responses, std::size_t)>;
response_call_type response_call;
boost::asio::io_context ioc_;
ssl::context ctx_{ ssl::context::sslv23_client };
ssl::stream<tcp::socket> stream_{ ioc_, ctx_ };
tcp::resolver resolver_{ ioc_ };
boost::beast::flat_buffer buffer_;
uint64_t file_size_{};
int version{ 11 };
void set_response_call(response_call_type the_call) { response_call = the_call; }
uint64_t get_file_size() { return file_size_; }
void stop() { ioc_.stop(); }
bool stopped() { return ioc_.stopped(); }
std::string get_body() { return std::move(body_); }
void run() {
try {
// TODO should have a timer in case of a hang
load_root_certificates(ctx_);
// Set SNI Hostname (many hosts need this to handshake successfully)
if (!SSL_set_tlsext_host_name(stream_.native_handle(), host_)) {
boost::system::error_code ec{ static_cast<int>(::ERR_get_error()), boost::asio::error::get_ssl_category() };
throw boost::system::system_error{ ec };
}
//TODO resolve is depreciated, use endpoint
auto const results = resolver_.resolve(host_, "443");
boost::asio::connect(stream_.next_layer(), results.begin(), results.end());
stream_.handshake(ssl::stream_base::client);
// Set up an HTTP GET request message
http::request<http::string_body> req{ http::verb::get, target_, version };
req.set(http::field::host, host_);
req.set(http::field::user_agent, "mY aGENT");
// Send the HTTP request to the remote host
http::write(stream_, req);
// Read the header
boost::system::error_code file_open_ec;
http::parser<false, ParserType> p;
p.body_limit((std::numeric_limits<std::uint32_t>::max)());
//detail::open_file(p, filename_, file_open_ec);
//or => c++17
if constexpr (std::is_same_v<ParserType, http::file_body>)
p.get().body().open(filename_, boost::beast::file_mode::write, file_open_ec);
http::read_header(stream_, buffer_, p);
file_size_ = p.content_length().has_value() ? p.content_length().value() : 0;
//Read the body
uint64_t test{};
boost::system::error_code rec;
for (;;) {
test += http::read_some(stream_, buffer_, p, rec);
if (test >= file_size_) {
response_call(resp_done, 0);
break;
}
response_call(resp_ok, test);
}
// Gracefully close the stream
boost::system::error_code ec;
stream_.shutdown(ec);
if (ec == boost::asio::error::eof)
{
// Rationale:
// http://stackoverflow.com/questions/25587403/boost-asio-ssl-async-shutdown-always-finishes-with-an-error
ec.assign(0, ec.category());
}
if (ec)
throw boost::system::system_error{ ec };
//value = detail::get_string(p);
//or => c++17
if constexpr (std::is_same_v<ParserType, http::string_body>)
body_ = p.get().body();
}
catch (std::exception const& e)
{
std::cerr << "Error: " << e.what() << std::endl;
response_call(resp_error, -1);
}
ioc_.stop();
}
};
}//namespace downloadns
//comment to test with string body
#define THE_FILE_BODY_TEST
int main(int argc, char** argv)
{
using namespace downloader;
#ifdef THE_FILE_BODY_TEST
download<http::file_body> dl{"/Nasiri%20Abarbekouh_Mahdi.pdf", "test.pdf"};
#else //string body test
download<http::string_body> dl{ "/robots.txt" };
#endif
responses dl_response{ resp_null };
size_t cur_size{};
auto static const lambda = [&dl_response, &dl, &cur_size](responses response, std::size_t bytes_transferred) {
if ((dl_response = response) == resp_ok) {
cur_size += bytes_transferred;
size_t sizes = dl.get_file_size() - cur_size;//because size is what is left
//drive your progress bar from here in a GUI app
}
};
dl.set_response_call(lambda);
std::thread thread{ [&dl]() { dl.run(); } };
//thread has started, now the pseudo message pump
bool quit = false; //true: as if a cancel button was pushed; won't finish download
for (int i = 0; ; ++i) {
switch (dl_response) { //ad hoc as if messaged
case resp_ok:
std::cout << "from sendmessage: " << cur_size << std::endl;
dl_response = resp_null;
break;
case resp_done:
std::cout << "from sendmessage: done" << std::endl;
dl_response = resp_null;
break;
case resp_error:
std::cout << "from sendmessage: error" << std::endl;
dl_response = resp_null;
}//switch
if (!(i % 5))
std::cout << "in message pump, stopped: " << std::boolalpha << dl.stopped() << std::endl;
std::this_thread::sleep_for(std::chrono::milliseconds(100));
if (quit && i == 10) //the cancel message
dl.stop();
if (!(i % 20) && dl.stopped()) {//dl job was quit or error or finished
std::cout << "dl is stopped" << std::endl;
break;
}
}
#ifdef THE_FILE_BODY_TEST
std::cout << "file written named: 'test.txt'" << std::endl;
#else
std::string res = dl.get_body();
std::cout << "body retrieved:\n" << res << std::endl;
#endif
if (thread.joinable())//in the case a thread was never started
thread.join();
std::cout << "exiting, program all done" << std::endl;
return EXIT_SUCCESS;
}
I strongly recommend against using the low-level [async_]read_some function instead of using http::[async_]read as intended with http::response_parser<http::buffer_body>
I do have an example of that - which is a little bit complicated by the fact that it also uses Boost Process to concurrently decompress the body data, but regardless it should show you how to use it:
How to read data from Internet using muli-threading with connecting only once?
I guess I could tailor it to your specific example given more complete code, but perhaps the above is good enough? Also see "Relay an HTTP message" in libs/beast/example/doc/http_examples.hpp which I used as "inspiration".
Caution: the buffer arithmetic is not intuitive. I think this is unfortunate and should not have been necessary, so pay (very) close attention to these samples for exactly how that's done.

gRPC asynchronous rpc throws seg fault

Updates: I found out if I call contactRemoteDone() inside of the contactRemote(), it does process. But if I call it outside of contactRemote(),but right after it, it throws a seg fault.So
for(int x=0; x<10; x++){
c1->contactRemote(x,request,response[x]);
c1->contactRemoteDone(x,response[x]);
}
doesn't work.
I tried to write a small program to test my implementation of gRPC asynchronous service. It throws a seg fault when I tried to access the response and when I used GDB to debug, I could not understand what the backtrace actually means and I did not find anything doing a Google search. The following is my code.
grpc_async_client.h
#include "sundial_grpc.grpc.pb.h"
#include "sundial_grpc.pb.h"
#include <iostream>
#include <memory>
#include <string>
#include <grpcpp/grpcpp.h>
using grpc::Channel;
using grpc::ClientAsyncResponseReader;
using grpc::ClientContext;
using grpc::CompletionQueue;
using grpc::Status;
using sundial_rpc::SundialRequest;
using sundial_rpc::SundialResponse;
using sundial_rpc::Sundial_GRPC_ASYNC;
#ifndef SAC
#define SAC
class TxnManager;
class Sundial_Async_Client{
public:
Sundial_Async_Client(std::string* channel);
Status contactRemote(uint64_t node_id,SundialRequest& request, SundialResponse* response);
Status contactRemoteDone(uint64_t node_id, SundialResponse* response);
private:
//std::unique_ptr<Sundial_GRPC_ASYNC::Stub> stub_[8];
std::unique_ptr<Sundial_GRPC_ASYNC::Stub> stub_;
CompletionQueue cq;
};
#endif
grpc_async_client.cpp
#include "sundial_grpc.grpc.pb.h"
#include "sundial_grpc.pb.h"
#include <iostream>
#include <memory>
#include <string>
#include <grpcpp/grpcpp.h>
#include "grpc_async_client.h"
using grpc::Channel;
using grpc::ClientAsyncResponseReader;
using grpc::ClientContext;
using grpc::CompletionQueue;
using grpc::Status;
using sundial_rpc::SundialRequest;
using sundial_rpc::SundialResponse;
using sundial_rpc::Sundial_GRPC_ASYNC;
Sundial_Async_Client::Sundial_Async_Client(std::string* channel){
for(int i=0; i<2;i++){
if(i==1)
continue;
std::string server_address = channel[i];
printf("async client is connecting to server %s\n",server_address.c_str());
stub_=Sundial_GRPC_ASYNC::NewStub(grpc::CreateChannel(
server_address, grpc::InsecureChannelCredentials()));
}
};
//toDo: more than 2 nodes
Status Sundial_Async_Client:: contactRemote(uint64_t node_id,SundialRequest& request, SundialResponse* response){
ClientContext context;
Status status;
std::unique_ptr<ClientAsyncResponseReader<SundialResponse>> rpc(stub_->PrepareAsynccontactRemote(&context,request,&cq));
rpc->StartCall();
rpc->Finish(response, &status, (void*)1);
printf("sends a request %d \n",node_id);
return status;
}
Status Sundial_Async_Client::contactRemoteDone(uint64_t node_id, SundialResponse* response){
void* got_tag;
bool ok = false;
// Block until the next result is available in the completion queue "cq".
// The return value of Next should always be checked. This return value
// tells us whether there is any kind of event or the cq_ is shutting down.
GPR_ASSERT(cq.Next(&got_tag, &ok));
// Verify that the result from "cq" corresponds, by its tag, our previous
// request.
GPR_ASSERT(got_tag == (void*)1);
// ... and that the request was completed successfully. Note that "ok"
// corresponds solely to the request for updates introduced by Finish().
GPR_ASSERT(ok);
printf("node %d is done\n",node_id);
return Status::OK;
}
grpc_async_server.h
#include "sundial_grpc.grpc.pb.h"
#include "sundial_grpc.pb.h"
#include <iostream>
#include <memory>
#include <string>
#include <grpcpp/grpcpp.h>
using grpc::Server;
using grpc::ServerAsyncResponseWriter;
using grpc::ServerBuilder;
using grpc::ServerContext;
using grpc::ServerCompletionQueue;
using grpc::Status;
using sundial_rpc::SundialRequest;
using sundial_rpc::SundialResponse;
using sundial_rpc::Sundial_GRPC_ASYNC;
#ifndef SAS
#define SAS
class SundialAsyncServiceImp {
public:
~SundialAsyncServiceImp();
void run();
std::mutex mtx;
private:
// Class encompasing the state and logic needed to serve a request.
class CallData {
public:
// Take in the "service" instance (in this case representing an asynchronous
// server) and the completion queue "cq" used for asynchronous communication
// with the gRPC runtime.
CallData(Sundial_GRPC_ASYNC::AsyncService* service, ServerCompletionQueue* cq);
void Proceed();
private:
// The means of communication with the gRPC runtime for an asynchronous
// server.
Sundial_GRPC_ASYNC::AsyncService* service_;
// The producer-consumer queue where for asynchronous server notifications.
ServerCompletionQueue* cq_;
// Context for the rpc, allowing to tweak aspects of it such as the use
// of compression, authentication, as well as to send metadata back to the
// client.
ServerContext ctx_;
// What we get from the client.
SundialRequest request_;
// What we send back to the client.
SundialResponse response_;
// The means to get back to the client.
ServerAsyncResponseWriter<SundialResponse> responder_;
// Let's implement a tiny state machine with the following states.
enum CallStatus { CREATE, PROCESS, FINISH };
CallStatus status_; // The current serving state.
};
void HandleRpcs();
std::unique_ptr<ServerCompletionQueue> cq_;
Sundial_GRPC_ASYNC::AsyncService service_;
std::unique_ptr<Server> server_;
};
#endif
grpc_aynsc_server.cpp
#include "sundial_grpc.grpc.pb.h"
#include "sundial_grpc.pb.h"
#include <iostream>
#include <memory>
#include <string>
#include <grpcpp/grpcpp.h>
#include "grpc_async_server.h"
#include <grpcpp/grpcpp.h>
#include <grpcpp/health_check_service_interface.h>
#include <grpcpp/ext/proto_server_reflection_plugin.h>
using grpc::Server;
using grpc::ServerAsyncResponseWriter;
using grpc::ServerBuilder;
using grpc::ServerContext;
using grpc::ServerCompletionQueue;
using grpc::Status;
using sundial_rpc::SundialRequest;
using sundial_rpc::SundialResponse;
using sundial_rpc::Sundial_GRPC_ASYNC;
SundialAsyncServiceImp::~SundialAsyncServiceImp(){
server_->Shutdown();
// Always shutdown the completion queue after the server.
cq_->Shutdown();
}
void SundialAsyncServiceImp::run(){
uint32_t num_nodes = 0;
std::string port("0.0.0.0:50051");
ServerBuilder builder;
builder.AddListeningPort(port, grpc::InsecureServerCredentials());
builder.RegisterService(&service_);
cq_ = builder.AddCompletionQueue();
server_ = builder.BuildAndStart();
std::cout << "Server listening on " << port << std::endl<<"\n";
HandleRpcs();
}
SundialAsyncServiceImp::CallData::CallData(Sundial_GRPC_ASYNC::AsyncService* service, ServerCompletionQueue* cq):
service_(service), cq_(cq), responder_(&ctx_), status_(CREATE) {
Proceed();
}
int processRequest(SundialRequest* request, SundialResponse* response){
if (request->request_type() == SundialRequest::SYS_REQ) {
response->set_response_type( SundialResponse::SYS_RESP );
return 1;
}
int a =0;
//some processing
while(a<100000){
a++;
}
return 1;
}
void SundialAsyncServiceImp::CallData::Proceed(){
if (status_ == CREATE) {
status_ = PROCESS;
service_->RequestcontactRemote(&ctx_, &request_, &responder_, cq_, cq_,
this);
} else if (status_ == PROCESS) {
new CallData(service_, cq_);
int a = processRequest(&request_ , &response_);
status_ = FINISH;
responder_.Finish(response_, Status::OK, this);
} else {
GPR_ASSERT(status_ == FINISH);
delete this;
}
}
void SundialAsyncServiceImp::HandleRpcs(){
new CallData(&service_, cq_.get());
void* tag;
bool ok;
mtx.unlock();
while (true) {
GPR_ASSERT(cq_->Next(&tag, &ok));
GPR_ASSERT(ok);
static_cast<CallData*>(tag)->Proceed();
}
}
main.cpp
#include "grpc_async_server.h"
#include "grpc_async_client.h"
Sundial_Async_Client* c1;
SundialAsyncServiceImp* s1 ;
void * start_sync_rpc_server(void* input){
s1->run();
return NULL;
}
int main(){
std::string server_address("0.0.0.0:50051");
std::string channel_async[2];
channel_async[0]=server_address;
s1 = new SundialAsyncServiceImp();
pthread_t * pthread_rpc1 = new pthread_t;
s1->mtx.lock();
pthread_create(pthread_rpc1, NULL, start_sync_rpc_server,NULL);
s1->mtx.lock();
c1=new Sundial_Async_Client(channel_async);
SundialRequest request;
SundialResponse* response[10];
for(int i=0; i<10; i++){
SundialResponse r;
response[i]=&r;
}
for(int x=0; x<10; x++){
c1->contactRemote(x,request,response[x]);
}
printf("sends all requests out\n");
int y=0;
while(y<10){
c1->contactRemoteDone(y,response[y]);
}
}
It runs smoothly until it reaches the while loop in main. It throws a seg fault. If I use GDB, I get the following:
Thread 1 "rundb" received signal SIGSEGV, Segmentation fault.
grpc::internal::InterceptorBatchMethodsImpl::RunInterceptors (
this=this#entry=0x7ffff6a9ec58)
at /usr/local/include/grpcpp/impl/codegen/interceptor_common.h:264
264 RunServerInterceptors();
(gdb) backtrace
#0 grpc::internal::InterceptorBatchMethodsImpl::RunInterceptors (
this=this#entry=0x7ffff6a9ec58)
at /usr/local/include/grpcpp/impl/codegen/interceptor_common.h:264
#1 0x000000000041c8aa in grpc::internal::CallOpSet<grpc::internal::CallOpSendInitialMetadata, grpc::internal::CallOpSendMessage, grpc::internal::CallOpClientSendClose, grpc::internal::CallOpRecvInitialMetadata, grpc::internal::CallOpRecvMessage<sundial_rpc::SundialResponse>, grpc::internal::CallOpClientRecvStatus>::RunInterceptorsPostRecv (this=0x7ffff6a9eb18)
at /usr/local/include/grpcpp/impl/codegen/call_op_set.h:826
#2 grpc::internal::CallOpSet<grpc::internal::CallOpSendInitialMetadata, grpc::internal::CallOpSendMessage, grpc::internal::CallOpClientSendClose, grpc::internal::CallOpRecvInitialMetadata, grpc::internal::CallOpRecvMessage<sundial_rpc::SundialResponse>, grpc::internal::CallOpClientRecvStatus>::FinalizeResult (
this=0x7ffff6a9eb18, tag=0x7fffffffdd18, status=<optimized out>)
at /usr/local/include/grpcpp/impl/codegen/call_op_set.h:920
#3 0x00007ffff7c95f23 in grpc_impl::CompletionQueue::AsyncNextInternal(void**, bool*, gpr_timespec) () from /usr/local/lib/libgrpc++.so.1
#4 0x000000000040ab9c in grpc_impl::CompletionQueue::Next (ok=0x7fffffffdd17,
tag=0x7fffffffdd18, this=0x7ffff6a42078)
at /usr/local/include/grpcpp/impl/codegen/completion_queue_impl.h:179
#5 Sundial_Async_Client::contactRemoteDone (this=0x7ffff6a42070,
node_id=node_id#entry=0, response=response#entry=0x7fffffffdd70)
at grpc/grpc_async_client.cpp:93
--Type <RET> for more, q to quit, c to continue without paging--ret
#6 0x000000000040a6ac in main () at grpc/main.cpp:38
main.cpp:38 is c1->contactRemoteDone(y,response[y]); and grpc_async_client.cpp:93 is GPR_ASSERT(cq.Next(&got_tag, &ok));. I'm really lost on this error message and really wish someone could help me out.
Here:
for(int i=0; i<10; i++){
SundialResponse r;
response[i]=&r;
}
Each SundialResponse is being destructed per iteration. Do this instead:
for(int i=0; i<10; i++){
SundialResponse* r = new SundialResponse();
response[i]=r;
}

C++ listener on event is not getting event using socket io C++ client

I created sample app, for sending/receiving messages between node.js app which is running as socket.io server and C++ client, Below is my code of C++ client side:
sio::client io;
socket::ptr current_socket;
string w = "harshil";
io.set_open_listener([&]() {
io.socket()->emit("message", w); // Can able to send message to server
});
io.socket()->on("server", sio::socket::event_listener([&](event &e)
{
cout << __LINE__ << endl; // Can not print line :(
}));
io.connect("http://127.0.0.1:8081");
Over here you can see, that client can able to send message to server, but it can not receive message based on "server" event name, Can some one help me on it?
To those of you who stuck as me, then below sample code will be helpful for them.
Created different sample code for node.js acts as socket.io server and main.cpp file acts as client,
'use strict';
const express = require('express');
const app = express();
const serverHttp = require('http').Server(app);
const io = require('socket.io')(serverHttp);
const port = 8081;
io.on('connection', function (socket) {
socket.on('message', function (data) {
console.log("key received!!!" + data);
socket.emit('server', 'hello socket io');
console.log("sent server msg");
});
});
serverHttp.listen(port, function() {
console.log("init!!!");
});
Sample server app, which receives request from client and emit message to client.
#include "sio_client.h"
#include <unistd.h>
#include <functional>
#include <iostream>
#include <thread>
#include <mutex>
#include <condition_variable>
#include <string>
#define HIGHLIGHT(__O__) std::cout<<"\e[1;31m"<<__O__<<"\e[0m"<<std::endl
#define EM(__O__) std::cout<<"\e[1;30;1m"<<__O__<<"\e[0m"<<std::endl
#define MAIN_FUNC int main(int argc ,const char* args[])
using namespace sio;
using namespace std;
std::mutex _lock;
std::condition_variable_any _cond;
bool connect_finish = false;
class connection_listener
{
sio::client &handler;
public:
connection_listener(sio::client& h):
handler(h)
{
}
void on_connected()
{
_lock.lock();
_cond.notify_all();
connect_finish = true;
_lock.unlock();
}
void on_close(client::close_reason const& reason)
{
std::cout<<"sio closed "<<std::endl;
exit(0);
}
void on_fail()
{
std::cout<<"sio failed "<<std::endl;
exit(0);
}
};
socket::ptr current_socket;
void bind_events()
{
current_socket->on("server", sio::socket::event_listener_aux([&](string const& name, message::ptr const& data, bool isAck,message::list &ack_resp)
{
_lock.lock();
cout << name << endl;
cout << data->get_string() << endl;
_lock.unlock();
}));
}
MAIN_FUNC
{
sio::client h;
connection_listener l(h);
h.set_open_listener(std::bind(&connection_listener::on_connected, &l));
h.set_close_listener(std::bind(&connection_listener::on_close, &l,std::placeholders::_1));
h.set_fail_listener(std::bind(&connection_listener::on_fail, &l));
h.connect("http://127.0.0.1:8081");
_lock.lock();
if(!connect_finish)
{
cout << "wait\n";
_cond.wait(_lock);
}
_lock.unlock();
current_socket = h.socket();
string nickname;
while (nickname.length() == 0) {
HIGHLIGHT("Type your nickname:");
getline(cin, nickname);
}
current_socket->emit("message", nickname);
bind_events();
sleep(10);
h.sync_close();
h.clear_con_listeners();
return 0;
}
Sample client app, which emits message to server, and receives message from server,

Unix domain sockets with Poco::Net::TCPServer

I created a TCPServer using the Poco::Net::TCPServer framework, that uses a unix domain socket and it seems to work. However if I close the server and start it again I get this error:
Net Exception: Address already in use: /tmp/app.SocketTest
What is the right way to deal with this error?
Are the TCPServerConnections, TCPServerConnectionFactory and sockets
automatically cleaned-up or do I need to implement their destructors or destroy them manually?
EDIT
I have two questions here. The first is answered by using remove() on the socket-file. The other question is, if the clean-up in the Poco::Net::TCPServer framework is automatic or if it has to be manually implemented to prevent memory-leak?
Here is the code for the TCPServer:
#include "Poco/Util/ServerApplication.h"
#include "Poco/Net/TCPServer.h"
#include "Poco/Net/TCPServerConnection.h"
#include "Poco/Net/TCPServerConnectionFactory.h"
#include "Poco/Util/Option.h"
#include "Poco/Util/OptionSet.h"
#include "Poco/Util/HelpFormatter.h"
#include "Poco/Net/StreamSocket.h"
#include "Poco/Net/ServerSocket.h"
#include "Poco/Net/SocketAddress.h"
#include "Poco/File.h"
#include <fstream>
#include <iostream>
using Poco::Net::ServerSocket;
using Poco::Net::StreamSocket;
using Poco::Net::TCPServer;
using Poco::Net::TCPServerConnection;
using Poco::Net::TCPServerConnectionFactory;
using Poco::Net::SocketAddress;
using Poco::Util::ServerApplication;
using Poco::Util::Option;
using Poco::Util::OptionSet;
using Poco::Util::HelpFormatter;
class UnixSocketServerConnection: public TCPServerConnection
/// This class handles all client connections.
{
public:
UnixSocketServerConnection(const StreamSocket& s):
TCPServerConnection(s)
{
}
void run()
{
try
{
/*char buffer[1024];
int n = 1;
while (n > 0)
{
n = socket().receiveBytes(buffer, sizeof(buffer));
EchoBack(buffer);
}*/
std::string message;
char buffer[1024];
int n = 1;
while (n > 0)
{
n = socket().receiveBytes(buffer, sizeof(buffer));
buffer[n] = '\0';
message += buffer;
if(sizeof(buffer) > n && message != "")
{
EchoBack(message);
message = "";
}
}
}
catch (Poco::Exception& exc)
{
std::cerr << "Error: " << exc.displayText() << std::endl;
}
std::cout << "Disconnected." << std::endl;
}
private:
inline void EchoBack(std::string message)
{
std::cout << "Message: " << message << std::endl;
socket().sendBytes(message.data(), message.length());
}
};
class UnixSocketServerConnectionFactory: public TCPServerConnectionFactory
/// A factory
{
public:
UnixSocketServerConnectionFactory()
{
}
TCPServerConnection* createConnection(const StreamSocket& socket)
{
std::cout << "Got new connection." << std::endl;
return new UnixSocketServerConnection(socket);
}
private:
};
class UnixSocketServer: public Poco::Util::ServerApplication
/// The main application class.
{
public:
UnixSocketServer(): _helpRequested(false)
{
}
~UnixSocketServer()
{
}
protected:
void initialize(Application& self)
{
loadConfiguration(); // load default configuration files, if present
ServerApplication::initialize(self);
}
void uninitialize()
{
ServerApplication::uninitialize();
}
void defineOptions(OptionSet& options)
{
ServerApplication::defineOptions(options);
options.addOption(
Option("help", "h", "display help information on command line arguments")
.required(false)
.repeatable(false));
}
void handleOption(const std::string& name, const std::string& value)
{
ServerApplication::handleOption(name, value);
if (name == "help")
_helpRequested = true;
}
void displayHelp()
{
HelpFormatter helpFormatter(options());
helpFormatter.setCommand(commandName());
helpFormatter.setUsage("OPTIONS");
helpFormatter.setHeader("A server application to test unix domain sockets.");
helpFormatter.format(std::cout);
}
int main(const std::vector<std::string>& args)
{
if (_helpRequested)
{
displayHelp();
}
else
{
// set-up unix domain socket
Poco::File socketFile("/tmp/app.SocketTest");
SocketAddress unixSocket(SocketAddress::UNIX_LOCAL, socketFile.path());
// set-up a server socket
ServerSocket svs(unixSocket);
// set-up a TCPServer instance
TCPServer srv(new UnixSocketServerConnectionFactory, svs);
// start the TCPServer
srv.start();
// wait for CTRL-C or kill
waitForTerminationRequest();
// Stop the TCPServer
srv.stop();
}
return Application::EXIT_OK;
}
private:
bool _helpRequested;
};
int main(int argc, char **argv) {
UnixSocketServer app;
return app.run(argc, argv);
}
You don't need to worry about deallocating memory. All is done by library.
TCPServer srv(new UnixSocketServerConnectionFactory, svs);
^^^
Instance of UnixSocketServerConnectionFactory is deleted by TCPServer according to poco ref
The server takes ownership of the TCPServerConnectionFactory and
deletes it when it's no longer needed.
TCPServerConnection* createConnection(const StreamSocket& socket)
{
std::cout << "Got new connection." << std::endl;
return new UnixSocketServerConnection(socket);
^^^
}
instances of UnixSocketServerConnection are deleted by Poco library code as well:
As soon as the run() method returns, the server connection object is
destroyed and the connection is automatically closed.
The problem with Poco::File was that the destructor of Poco::File cannot remove file, you have to do it explicitly by remove method.

How to implement websocket++ ping handler?

I'm trying to detect lost connections that closed without sending the close frame by sending pings on a websocket++ application.
I'm having trouble setting up the handler.
I initially tried to set it up like how the handlers are setup with the broadcast_server example:
m_server.set_ping_handler(bind(&broadcast_server::on_m_server_ping,this,::_1,::_2));
That gives this error:
note: candidate is:
websocketpp/endpoint.hpp:240:10: note: void websocketpp::endpoint::set_ping_handler(websocketpp::ping_handler) [with connection = websocketpp::connection; config = websocketpp::config::asio_tls_client; websocketpp::ping_handler = std::function, std::basic_string)>]
void set_ping_handler(ping_handler h) {
I thought that setting up a typedef like with this problem would solve it, but putting it outside the class broadcast_server makes it impossible to access m_server.
How can this handler be properly implemented?
Includes & flags
Boost 1.54
#include <websocketpp/config/asio.hpp>
#include <websocketpp/server.hpp>
#include <websocketpp/common/thread.hpp>
typedef websocketpp::server<websocketpp::config::asio_tls> server;
flags
-std=c++0x -I ~/broadcast_server -D_WEBSOCKETPP_CPP11_STL_
-D_WEBSOCKETPP_NO_CPP11_REGEX_ -lboost_regex -lboost_system
-lssl -lcrypto -pthread -lboost_thread
typedef
typedef websocketpp::lib::function<bool(connection_hdl,std::string)> ping_handler;
Solving quite easy. First, the definition in websocket/connection.hpp:
/// The type and function signature of a ping handler
/**
* The ping handler is called when the connection receives a WebSocket ping
* control frame. The string argument contains the ping payload. The payload is
* a binary string up to 126 bytes in length. The ping handler returns a bool,
* true if a pong response should be sent, false if the pong response should be
* suppressed.
*/
typedef lib::function<bool(connection_hdl,std::string)> ping_handler;
gives the basic idea that function must have the definition:
bool on_ping(connection_hdl hdl, std::string s)
{
/* Do something */
return true;
}
Now everything comes to the right place:
m_server.set_ping_handler(bind(&broadcast_server::on_ping,this,::_1,::_2));
The complete modified example source looks like:
#include <websocketpp/config/asio_no_tls.hpp>
#include <websocketpp/server.hpp>
#include <iostream>
/*#include <boost/thread.hpp>
#include <boost/thread/mutex.hpp>
#include <boost/thread/condition_variable.hpp>*/
#include <websocketpp/common/thread.hpp>
typedef websocketpp::server<websocketpp::config::asio> server;
using websocketpp::connection_hdl;
using websocketpp::lib::placeholders::_1;
using websocketpp::lib::placeholders::_2;
using websocketpp::lib::bind;
using websocketpp::lib::thread;
using websocketpp::lib::mutex;
using websocketpp::lib::unique_lock;
using websocketpp::lib::condition_variable;
/* on_open insert connection_hdl into channel
* on_close remove connection_hdl from channel
* on_message queue send to all channels
*/
enum action_type {
SUBSCRIBE,
UNSUBSCRIBE,
MESSAGE
};
struct action {
action(action_type t, connection_hdl h) : type(t), hdl(h) {}
action(action_type t, connection_hdl h, server::message_ptr m)
: type(t), hdl(h), msg(m) {}
action_type type;
websocketpp::connection_hdl hdl;
server::message_ptr msg;
};
class broadcast_server {
public:
broadcast_server() {
// Initialize Asio Transport
m_server.init_asio();
// Register handler callbacks
m_server.set_open_handler(bind(&broadcast_server::on_open,this,::_1));
m_server.set_close_handler(bind(&broadcast_server::on_close,this,::_1));
m_server.set_message_handler(bind(&broadcast_server::on_message,this,::_1,::_2));
m_server.set_ping_handler(bind(&broadcast_server::on_ping,this,::_1,::_2));
}
void run(uint16_t port) {
// listen on specified port
m_server.listen(port);
// Start the server accept loop
m_server.start_accept();
// Start the ASIO io_service run loop
try {
m_server.run();
} catch (const std::exception & e) {
std::cout << e.what() << std::endl;
} catch (websocketpp::lib::error_code e) {
std::cout << e.message() << std::endl;
} catch (...) {
std::cout << "other exception" << std::endl;
}
}
void on_open(connection_hdl hdl) {
unique_lock<mutex> lock(m_action_lock);
//std::cout << "on_open" << std::endl;
m_actions.push(action(SUBSCRIBE,hdl));
lock.unlock();
m_action_cond.notify_one();
}
void on_close(connection_hdl hdl) {
unique_lock<mutex> lock(m_action_lock);
//std::cout << "on_close" << std::endl;
m_actions.push(action(UNSUBSCRIBE,hdl));
lock.unlock();
m_action_cond.notify_one();
}
void on_message(connection_hdl hdl, server::message_ptr msg) {
// queue message up for sending by processing thread
unique_lock<mutex> lock(m_action_lock);
//std::cout << "on_message" << std::endl;
m_actions.push(action(MESSAGE,hdl,msg));
lock.unlock();
m_action_cond.notify_one();
}
bool on_ping(connection_hdl hdl, std::string s)
{
/* Do something */
return true;
}
void process_messages() {
while(1) {
unique_lock<mutex> lock(m_action_lock);
while(m_actions.empty()) {
m_action_cond.wait(lock);
}
action a = m_actions.front();
m_actions.pop();
lock.unlock();
if (a.type == SUBSCRIBE) {
unique_lock<mutex> con_lock(m_connection_lock);
m_connections.insert(a.hdl);
} else if (a.type == UNSUBSCRIBE) {
unique_lock<mutex> con_lock(m_connection_lock);
m_connections.erase(a.hdl);
} else if (a.type == MESSAGE) {
unique_lock<mutex> con_lock(m_connection_lock);
con_list::iterator it;
for (it = m_connections.begin(); it != m_connections.end(); ++it) {
m_server.send(*it,a.msg);
}
} else {
// undefined.
}
}
}
private:
typedef std::set<connection_hdl,std::owner_less<connection_hdl>> con_list;
server m_server;
con_list m_connections;
std::queue<action> m_actions;
mutex m_action_lock;
mutex m_connection_lock;
condition_variable m_action_cond;
};
int main() {
try {
broadcast_server server_instance;
// Start a thread to run the processing loop
thread t(bind(&broadcast_server::process_messages,&server_instance));
// Run the asio loop with the main thread
server_instance.run(9002);
t.join();
} catch (std::exception & e) {
std::cout << e.what() << std::endl;
}
}