I am writing mosquitto code for consuming the message after subscribing to a particular topic. Now I want to set different configuration for mosquitto like
autosave_interval 100
persistence true
persistence_location /var/lib/mosquitto/
persistence_file mosquitto.db
But I have no idea how to set this configuration in c++. I tried to google it but could not found any result. Plz, help. Below is c++ code for mosquito
myMosq.h
/*
* myMosq.h
*
* Created on: Jul 28, 2016
* Author: nilav
*/
#include <iostream>
#ifndef MYMOSQ_H_
#define MYMOSQ_H_
#include <mosquittopp.h>
#include <mosquitto.h>
using namespace std;
class myMosq : public mosqpp::mosquittopp
{
private:
const char * host;
const char * id;
const char * topic;
int port;
int keepalive;
void on_connect(int rc);
void on_message(const struct mosquitto_message *message);
void on_disconnect(int rc);
void on_subscribe(int mid, int qos_count, const int *granted_qos);
void on_publish(int mid);
void on_unsubscribe(int mid);
public:
myMosq(const char *id, const char * _topic, const char *host, int port);
~myMosq();
bool send_message(string responseMessage);
bool receive_message();
void writeToDatabase(string query);
};
#endif
myMosq.cpp
#include <cstdio>
#include <cstring>
#include <iostream>
#include "myMosq.h"
#include <mosquittopp.h>
#include "Configuration.h"
#include "Databases.h"
using namespace std;
Configuration configuration;
myMosq::myMosq(const char * _id,const char * _topic, const char * _host, int _port) : mosquittopp(_id)
{
mosqpp::lib_init(); // Mandatory initialization for mosquitto library
this->keepalive = 60; // Basic configuration setup for myMosq class
this->id = _id;
this->port = _port;
this->host = _host;
this->topic = _topic;
connect_async(host, // non blocking connection to broker request
port,
keepalive);
loop_start(); // Start thread managing connection / publish / subscribe
};
myMosq::~myMosq() {
loop_stop(); // Kill the thread
mosqpp::lib_cleanup(); // Mosquitto library cleanup
}
bool myMosq::receive_message()
{
int set = subscribe(NULL, configuration.subscriptionTopic.c_str(),2);
return set;
}
bool myMosq::send_message(string responseMessage) {
int ret = publish(NULL,configuration.producerTopic.c_str(),strlen(responseMessage.c_str()),responseMessage.c_str(),1,false);
return (ret = MOSQ_ERR_SUCCESS);
}
void myMosq::on_disconnect(int rc) {
std::cout << ">> myMosq - disconnection(" << rc << ")" << std::endl;
}
void myMosq::on_connect(int rc)
{
if ( rc == 0 ) {
std::cout << ">> myMosq - connected with server" << std::endl;
} else {
std::cout << ">> myMosq - Impossible to connect with server(" << rc << ")" << std::endl;
}
}
void myMosq::on_message(const struct mosquitto_message *message) {
char * pchar = (char*)(message->payload);
string str(pchar);
writeToDatabase(str);
}
void myMosq::on_subscribe(int mid, int qos_count, const int *granted_qos)
{
std::cout << ">> subscription succeeded (" << mid << ") " << std::endl;
}
void myMosq::on_publish(int mid) {
std::cout << ">> myMosq - Message (" << mid << ") succeed to be published " << std::endl;
}
void myMosq::writeToDatabase(string query) {
Databases* database = new Databases(configuration.db,
configuration.dbPort, configuration.username, configuration.password,
configuration.schema);
database->writeDatabase(query);
if(database->responseMessage == "") {
database->responseMessage = "SUCCESS";
}
this->send_message(database->responseMessage);
}
void myMosq::on_unsubscribe(int mid) {
cout<<"unscubscribed";
};
The options you are seeing are for mosquitto broker which acts almost like a server.
Mosquitto C++ library is a client library and those options(e.g. autosave_interval) are not valid for a client. Regarding persistance, mosquitto C/C++ client library doesn't offer file persistance currently.
Related
I have created a client-server program based on one of the tests in the gRPC repo.
The UDP code in gRPC is not built on top of its RPC layer, and so there is no notion of stubs, etc.
My code works, though I've noticed that under just a mild stress, a huge fraction of messages get dropped, and I'm not sure if it's entirely due to the lossy nature of UDP or it's something about my code.
I have two questions:
Main question: Is there a gRPC-way to set deadlines for UDP messages? I am familiar with ClientContext and its deadline feature, but I don't know how to use it in a non-TCP RPC-less code. If not, what is the best way to achieve this?
Is a drop rate of %50 for a UDP localhost communication sensible?
My code (It's quite long, so just attaching it for reference. My main question doesn't require reading the code):
#include <netdb.h>
#include <string>
#include <thread>
#include <vector>
// grpc headers
#include <grpcpp/grpcpp.h>
#include "src/core/lib/iomgr/udp_server.h"
#include "src/core/lib/iomgr/socket_utils_posix.h"
#include "src/core/lib/iomgr/unix_sockets_posix.h"
#include "src/core/lib/iomgr/sockaddr_utils.h"
using namespace std;
int client_port = 6666;
int server_port = 5555;
int num_of_msgs = 1000;
int listening_port;
int remote_port;
int fd;
int received_msgs_cnt = 0;
vector<bool> is_received(num_of_msgs, false);
enum Role {
CLIENT,
SERVER
};
struct Request {
int id;
};
struct Response {
int id;
};
Role role;
bool udpServerFinished = false;
void sendUdp(const char *hostname, int port, const char* payload, size_t size) {
auto transferred = write(fd, (void*)payload, size);
assert(size == transferred);
}
/***************************************
* UDP Handler class
* (will be generated by factory class)
* upon receiving a new message, the Read()
* function is invoked
***************************************/
class UdpHandler : public GrpcUdpHandler {
public:
UdpHandler(grpc_fd *emfd, void *user_data):
GrpcUdpHandler(emfd, user_data), emfd_(emfd) {
}
virtual ~UdpHandler() {}
static void startLoop(volatile bool &udpServerFinished) {
grpc_core::ExecCtx exec_ctx;
grpc_millis deadline;
gpr_mu_lock(g_mu);
while (!udpServerFinished) {
deadline = grpc_timespec_to_millis_round_up(gpr_time_add(
gpr_now(GPR_CLOCK_MONOTONIC),
gpr_time_from_millis(10000, GPR_TIMESPAN)));
grpc_pollset_worker *worker = nullptr;
GPR_ASSERT(GRPC_LOG_IF_ERROR(
"pollset_work", grpc_pollset_work(UdpHandler::g_pollset, &worker, deadline)));
gpr_mu_unlock(UdpHandler::g_mu);
grpc_core::ExecCtx::Get()->Flush();
gpr_mu_lock(UdpHandler::g_mu);
}
gpr_mu_unlock(g_mu);
}
static grpc_pollset *g_pollset;
static gpr_mu *g_mu;
public:
static int g_num_listeners;
protected:
bool Read() override {
char read_buffer[512];
ssize_t byte_count;
gpr_mu_lock(UdpHandler::g_mu);
byte_count = recv(grpc_fd_wrapped_fd(emfd()), read_buffer, sizeof(read_buffer), 0);
processIncomingMsg((void*)read_buffer, byte_count);
GPR_ASSERT(GRPC_LOG_IF_ERROR("pollset_kick",
grpc_pollset_kick(UdpHandler::g_pollset, nullptr)));
gpr_mu_unlock(UdpHandler::g_mu);
return false;
}
void processIncomingMsg(void* msg, ssize_t size) {
received_msgs_cnt++;
(void)size;
int id;
if (role == Role::CLIENT) {
Response res;
assert(size == sizeof(Response));
memcpy((void*)&res, (void*)msg, size);
id = res.id;
cout << "Msg: response for request " << res.id << endl;
}
else {
Request req;
assert(size == sizeof(Request));
memcpy((void*)&req, (void*)msg, size);
id = req.id;
cout << "Msg: request " << req.id << endl;
// send response
Response res;
res.id = req.id;
sendUdp("127.0.0.1", remote_port, (const char*)&res, sizeof(Response));
}
// check for termination condition (both for client and server)
if (received_msgs_cnt == num_of_msgs) {
cout << "This is the last msg" << endl;
udpServerFinished = true;
}
// mark the id of the current message
is_received[id] = true;
// if this was the last message, print the missing msg ids
if (id == num_of_msgs - 1) {
cout << "missing ids: ";
for (int i = 0; i < num_of_msgs; i++) {
if (is_received[i] == false)
cout << i << ", ";
}
cout << endl;
cout << "% of missing messages: "
<< 1.0 - ((double)received_msgs_cnt / num_of_msgs) << endl;
}
}
void OnCanWrite(void* /*user_data*/, grpc_closure* /*notify_on_write_closure*/) override {
gpr_mu_lock(g_mu);
GPR_ASSERT(GRPC_LOG_IF_ERROR("pollset_kick",
grpc_pollset_kick(UdpHandler::g_pollset, nullptr)));
gpr_mu_unlock(g_mu);
}
void OnFdAboutToOrphan(grpc_closure *orphan_fd_closure, void* /*user_data*/) override {
grpc_core::ExecCtx::Run(DEBUG_LOCATION, orphan_fd_closure, GRPC_ERROR_NONE);
}
grpc_fd *emfd() { return emfd_; }
private:
grpc_fd *emfd_;
};
int UdpHandler::g_num_listeners = 1;
grpc_pollset *UdpHandler::g_pollset;
gpr_mu *UdpHandler::g_mu;
/****************************************
* Factory class (generated UDP handler)
****************************************/
class UdpHandlerFactory : public GrpcUdpHandlerFactory {
public:
GrpcUdpHandler *CreateUdpHandler(grpc_fd *emfd, void *user_data) override {
UdpHandler *handler = new UdpHandler(emfd, user_data);
return handler;
}
void DestroyUdpHandler(GrpcUdpHandler *handler) override {
delete reinterpret_cast<UdpHandler *>(handler);
}
};
/****************************************
* Main function
****************************************/
int main(int argc, char *argv[]) {
if (argc != 2) {
cerr << "Usage: './run client' or './run server' " << endl;
return 1;
}
string r(argv[1]);
if (r == "client") {
cout << "Client is initializing to send requests!" << endl;
role = Role::CLIENT;
listening_port = client_port;
remote_port = server_port;
}
else if (r == "server") {
cout << "Server is initializing to accept requests!" << endl;
role = Role::SERVER;
listening_port = server_port;
remote_port = client_port;
}
else {
cerr << "Usage: './run client' or './run server' " << endl;
return 1;
}
/********************************************************
* Initialize UDP Listener
********************************************************/
/* Initialize the grpc library. After it's called,
* a matching invocation to grpc_shutdown() is expected. */
grpc_init();
grpc_core::ExecCtx exec_ctx;
UdpHandler::g_pollset = static_cast<grpc_pollset *>(
gpr_zalloc(grpc_pollset_size()));
grpc_pollset_init(UdpHandler::g_pollset, &UdpHandler::g_mu);
grpc_resolved_address resolved_addr;
struct sockaddr_storage *addr =
reinterpret_cast<struct sockaddr_storage *>(resolved_addr.addr);
int svrfd;
grpc_udp_server *s = grpc_udp_server_create(nullptr);
grpc_pollset *pollsets[1];
memset(&resolved_addr, 0, sizeof(resolved_addr));
resolved_addr.len = static_cast<socklen_t>(sizeof(struct sockaddr_storage));
addr->ss_family = AF_INET;
grpc_sockaddr_set_port(&resolved_addr, listening_port);
/* setup UDP server */
UdpHandlerFactory handlerFactory;
int rcv_buf_size = 1024;
int snd_buf_size = 1024;
GPR_ASSERT(grpc_udp_server_add_port(s, &resolved_addr, rcv_buf_size,
snd_buf_size, &handlerFactory,
UdpHandler::g_num_listeners) > 0);
svrfd = grpc_udp_server_get_fd(s, 0);
GPR_ASSERT(svrfd >= 0);
GPR_ASSERT(getsockname(svrfd, (struct sockaddr *) addr,
(socklen_t *) &resolved_addr.len) == 0);
GPR_ASSERT(resolved_addr.len <= sizeof(struct sockaddr_storage));
pollsets[0] = UdpHandler::g_pollset;
grpc_udp_server_start(s, pollsets, 1, nullptr);
string addr_str = grpc_sockaddr_to_string(&resolved_addr, 1);
cout << "UDP Server listening on: " << addr_str << endl;
thread udpPollerThread(
UdpHandler::startLoop, ref(udpServerFinished));
/********************************************************
* Establish connection to the other side
********************************************************/
struct sockaddr_in serv_addr;
struct hostent *server = gethostbyname("127.0.0.1");
bzero((char *) &serv_addr, sizeof(serv_addr));
serv_addr.sin_family = AF_INET;
bcopy((char *) server->h_addr,
(char *) &serv_addr.sin_addr.s_addr,
server->h_length);
serv_addr.sin_port = htons(remote_port);
fd = socket(serv_addr.sin_family, SOCK_DGRAM, 0);
GPR_ASSERT(fd >= 0);
GPR_ASSERT(connect(fd, (struct sockaddr *) &serv_addr, sizeof(serv_addr)) == 0);
/********************************************************
* Send requests
********************************************************/
if (role == Role::CLIENT) {
static int counter = 0;
for (int i = 0; i < num_of_msgs; i++) {
Request req;
req.id = counter++;
cout << "Sending request " << req.id << endl;
sendUdp("127.0.0.1", remote_port, (char*)&req, sizeof(Request));
}
}
/********************************************************
* wait for client to finish
********************************************************/
udpPollerThread.join();
/********************************************************
* cleanup
********************************************************/
close(fd);
gpr_free(UdpHandler::g_pollset);
grpc_shutdown();
cout << "finished successfully!" << endl;
return 0;
}
Compiled with:
-std=c++17 -I$(GRPC_DIR) -I$(GRPC_DIR)/third_party/abseil-cpp.
Linked with:
pkg-config --libs grpc++
I created a TCPServer using the Poco::Net::TCPServer framework, that uses a unix domain socket and it seems to work. However if I close the server and start it again I get this error:
Net Exception: Address already in use: /tmp/app.SocketTest
What is the right way to deal with this error?
Are the TCPServerConnections, TCPServerConnectionFactory and sockets
automatically cleaned-up or do I need to implement their destructors or destroy them manually?
EDIT
I have two questions here. The first is answered by using remove() on the socket-file. The other question is, if the clean-up in the Poco::Net::TCPServer framework is automatic or if it has to be manually implemented to prevent memory-leak?
Here is the code for the TCPServer:
#include "Poco/Util/ServerApplication.h"
#include "Poco/Net/TCPServer.h"
#include "Poco/Net/TCPServerConnection.h"
#include "Poco/Net/TCPServerConnectionFactory.h"
#include "Poco/Util/Option.h"
#include "Poco/Util/OptionSet.h"
#include "Poco/Util/HelpFormatter.h"
#include "Poco/Net/StreamSocket.h"
#include "Poco/Net/ServerSocket.h"
#include "Poco/Net/SocketAddress.h"
#include "Poco/File.h"
#include <fstream>
#include <iostream>
using Poco::Net::ServerSocket;
using Poco::Net::StreamSocket;
using Poco::Net::TCPServer;
using Poco::Net::TCPServerConnection;
using Poco::Net::TCPServerConnectionFactory;
using Poco::Net::SocketAddress;
using Poco::Util::ServerApplication;
using Poco::Util::Option;
using Poco::Util::OptionSet;
using Poco::Util::HelpFormatter;
class UnixSocketServerConnection: public TCPServerConnection
/// This class handles all client connections.
{
public:
UnixSocketServerConnection(const StreamSocket& s):
TCPServerConnection(s)
{
}
void run()
{
try
{
/*char buffer[1024];
int n = 1;
while (n > 0)
{
n = socket().receiveBytes(buffer, sizeof(buffer));
EchoBack(buffer);
}*/
std::string message;
char buffer[1024];
int n = 1;
while (n > 0)
{
n = socket().receiveBytes(buffer, sizeof(buffer));
buffer[n] = '\0';
message += buffer;
if(sizeof(buffer) > n && message != "")
{
EchoBack(message);
message = "";
}
}
}
catch (Poco::Exception& exc)
{
std::cerr << "Error: " << exc.displayText() << std::endl;
}
std::cout << "Disconnected." << std::endl;
}
private:
inline void EchoBack(std::string message)
{
std::cout << "Message: " << message << std::endl;
socket().sendBytes(message.data(), message.length());
}
};
class UnixSocketServerConnectionFactory: public TCPServerConnectionFactory
/// A factory
{
public:
UnixSocketServerConnectionFactory()
{
}
TCPServerConnection* createConnection(const StreamSocket& socket)
{
std::cout << "Got new connection." << std::endl;
return new UnixSocketServerConnection(socket);
}
private:
};
class UnixSocketServer: public Poco::Util::ServerApplication
/// The main application class.
{
public:
UnixSocketServer(): _helpRequested(false)
{
}
~UnixSocketServer()
{
}
protected:
void initialize(Application& self)
{
loadConfiguration(); // load default configuration files, if present
ServerApplication::initialize(self);
}
void uninitialize()
{
ServerApplication::uninitialize();
}
void defineOptions(OptionSet& options)
{
ServerApplication::defineOptions(options);
options.addOption(
Option("help", "h", "display help information on command line arguments")
.required(false)
.repeatable(false));
}
void handleOption(const std::string& name, const std::string& value)
{
ServerApplication::handleOption(name, value);
if (name == "help")
_helpRequested = true;
}
void displayHelp()
{
HelpFormatter helpFormatter(options());
helpFormatter.setCommand(commandName());
helpFormatter.setUsage("OPTIONS");
helpFormatter.setHeader("A server application to test unix domain sockets.");
helpFormatter.format(std::cout);
}
int main(const std::vector<std::string>& args)
{
if (_helpRequested)
{
displayHelp();
}
else
{
// set-up unix domain socket
Poco::File socketFile("/tmp/app.SocketTest");
SocketAddress unixSocket(SocketAddress::UNIX_LOCAL, socketFile.path());
// set-up a server socket
ServerSocket svs(unixSocket);
// set-up a TCPServer instance
TCPServer srv(new UnixSocketServerConnectionFactory, svs);
// start the TCPServer
srv.start();
// wait for CTRL-C or kill
waitForTerminationRequest();
// Stop the TCPServer
srv.stop();
}
return Application::EXIT_OK;
}
private:
bool _helpRequested;
};
int main(int argc, char **argv) {
UnixSocketServer app;
return app.run(argc, argv);
}
You don't need to worry about deallocating memory. All is done by library.
TCPServer srv(new UnixSocketServerConnectionFactory, svs);
^^^
Instance of UnixSocketServerConnectionFactory is deleted by TCPServer according to poco ref
The server takes ownership of the TCPServerConnectionFactory and
deletes it when it's no longer needed.
TCPServerConnection* createConnection(const StreamSocket& socket)
{
std::cout << "Got new connection." << std::endl;
return new UnixSocketServerConnection(socket);
^^^
}
instances of UnixSocketServerConnection are deleted by Poco library code as well:
As soon as the run() method returns, the server connection object is
destroyed and the connection is automatically closed.
The problem with Poco::File was that the destructor of Poco::File cannot remove file, you have to do it explicitly by remove method.
We've made good progress in getting GRPC running under RHEL 7.
Our application has one rather complicated structure with three levels of nesting with the outer level implementing a "oneof" keyword.
We find that all our other structures run fine, but this one gives us an RPC failure with code=14.
We've simplified this part of the application as much as possible so it can hopefully be recompiled and run easily.
Here's the .proto file, updated to accommodate Uli's question:
syntax = "proto3";
option java_multiple_files = true;
option java_package = "io.grpc.examples.debug";
option java_outer_classname = "DebugProto";
option objc_class_prefix = "DEBUG";
package DEBUGpackage;
service DEBUGservice {
rpc DEBUG_val_container_get (input_int32_request) returns (outer_container) {}
}
message input_int32_request {
int32 ival = 1;
}
message inner_container {
repeated uint32 val_array = 1;
}
message middle_container {
inner_container vac = 1;
}
message other_container {
int32 other_val = 1;
}
message outer_container {
oneof reply {
middle_container r1 = 1;
other_container r2 = 2;
}
}
(Please note that the java lines in this prototype code are just in there because they are in the GRPC website examples. Our code is entirely C++, with no java. Don't know if that means we can do without some of these "option java..." lines).
Here's our client source code:
#include <iostream>
#include <memory>
#include <string>
#include <grpc++/grpc++.h>
#include <grpc/support/log.h>
#include <thread>
#include <unistd.h>
#include "debug.grpc.pb.h"
using grpc::Channel;
using grpc::ClientAsyncResponseReader;
using grpc::ClientContext;
using grpc::CompletionQueue;
using grpc::Status;
using DEBUGpackage::input_int32_request;
using DEBUGpackage::inner_container;
using DEBUGpackage::middle_container;
using DEBUGpackage::outer_container;
using DEBUGpackage::DEBUGservice;
class DEBUGClient {
public:
explicit DEBUGClient(std::shared_ptr<Channel> channel)
: stub_(DEBUGservice::NewStub(channel)) {}
void DEBUG_val_container_get() {
std::cout << "in DEBUG_val_container_get" << std::endl;
// Data we are sending to the server
input_int32_request val;
val.set_ival(0);
AsyncClientCall* call = new AsyncClientCall;
call->response_reader = stub_->AsyncDEBUG_val_container_get(&call->context, val, &cq_);
call->response_reader->Finish(&call->reply_, &call->status, (void*)call);
}
void AsyncCompleteRpc() {
void* got_tag;
bool ok = false;
while (cq_.Next(&got_tag, &ok)) {
AsyncClientCall* call = static_cast<AsyncClientCall*>(got_tag);
GPR_ASSERT(ok);
if (call->status.ok()) {
if (call->reply_.has_r1()) {
std::cout << call << " DEBUG received: "
<< call->reply_.r1().vac().val_array(0) << std::endl;
}
}
else {
std::cout << call << " RPC failed" << std::endl;
std::cout << " RPC failure code = " << call->status.error_code() << std::endl;
std::cout << " RPC failure message = " << call->status.error_message() << std::endl;
}
delete call;
}
}
private:
struct AsyncClientCall {
outer_container reply_;
ClientContext context;
Status status;
std::unique_ptr<ClientAsyncResponseReader<outer_container>> response_reader;
};
std::unique_ptr<DEBUGservice::Stub> stub_;
CompletionQueue cq_;
};
int main(int argc, char** argv) {
DEBUGClient DEBUG0(grpc::CreateChannel("172.16.17.46:50050", grpc::InsecureChannelCredentials()));
std::thread thread0_ = std::thread(&DEBUGClient::AsyncCompleteRpc, &DEBUG0);
DEBUG0.DEBUG_val_container_get();
sleep(1);
std::cout << "Press control-c to quit" << std::endl << std::endl;
thread0_.join(); //blocks forever
return 0;
}
And, here's our server source code:
#include <memory>
#include <iostream>
#include <string>
#include <thread>
#include <grpc++/grpc++.h>
#include <grpc/support/log.h>
#include "debug.grpc.pb.h"
#include <time.h>
#include <stdlib.h>
#include <unistd.h>
#include <stdio.h>
using grpc::Server;
using grpc::ServerAsyncResponseWriter;
using grpc::ServerBuilder;
using grpc::ServerContext;
using grpc::ServerCompletionQueue;
using grpc::Status;
using DEBUGpackage::inner_container;
using DEBUGpackage::input_int32_request;
using DEBUGpackage::middle_container;
using DEBUGpackage::outer_container;
using DEBUGpackage::DEBUGservice;
std::string save_server_address;
class ServerImpl final {
public:
~ServerImpl() {
server_->Shutdown();
cq_->Shutdown();
}
void Run() {
std::string server_address("0.0.0.0:50050");
ServerBuilder builder;
builder.AddListeningPort(server_address, grpc::InsecureServerCredentials());
builder.RegisterService(&service_);
cq_ = builder.AddCompletionQueue();
server_ = builder.BuildAndStart();
std::cout << "Server listening on " << server_address << std::endl;
save_server_address = server_address;
HandleRpcs();
}
private:
class CallData {
public:
virtual void Proceed() = 0;
};
class DebugGetCallData final : public CallData{
public:
DebugGetCallData(DEBUGservice::AsyncService* service, ServerCompletionQueue* cq)
: service_(service), cq_(cq), responder_(&ctx_), status_(CREATE) {
Proceed();
}
void Proceed() {
if (status_ == CREATE) {
status_ = PROCESS;
service_->RequestDEBUG_val_container_get(&ctx_, &request_, &responder_, cq_, cq_, this);
} else if (status_ == PROCESS) {
new DebugGetCallData(service_, cq_);
char *portchar;
portchar = (char *) save_server_address.c_str();
long cq_addr = (long) cq_;
int cq_addr32 = (int) (cq_addr & 0xfffffff);
srand(cq_addr32);
fprintf(stderr, "%s task started\n", portchar); fflush(stderr);
unsigned int return_val = 10;
inner_container ic;
ic.add_val_array(return_val);
middle_container reply_temp;
reply_temp.set_allocated_vac(&ic);
reply_.set_allocated_r1(&reply_temp);
fprintf(stderr, "%s %s task done\n", portchar, "val_container_get"); fflush(stderr);
status_ = FINISH;
responder_.Finish(reply_, Status::OK, this);
} else {
GPR_ASSERT(status_ == FINISH);
}
}
private:
DEBUGservice::AsyncService* service_;
ServerCompletionQueue* cq_;
ServerContext ctx_;
input_int32_request request_;
outer_container reply_;
ServerAsyncResponseWriter<outer_container> responder_;
enum CallStatus { CREATE, PROCESS, FINISH };
CallStatus status_;
};
void HandleRpcs() {
new DebugGetCallData(&service_, cq_.get());
void* tag;
bool ok;
while (true) {
GPR_ASSERT(cq_->Next(&tag, &ok));
GPR_ASSERT(ok);
static_cast<CallData*>(tag)->Proceed();
}
}
std::unique_ptr<ServerCompletionQueue> cq_;
DEBUGservice::AsyncService service_;
std::unique_ptr<Server> server_;
};
int main() {
ServerImpl server;
server.Run();
return 0;
}
The output when I run it looks like this:
[fossum#netsres46 debug]$ DEBUG_client2
in DEBUG_val_container_get
0xb73ff0 RPC failed
RPC failure code = 14
RPC failure message = Endpoint read failed
Press control-c to quit
We ran the server under gdb, and found a place in the generated
file "debug.pb.cc" where if we just comment out one line, it all starts working.
Here's the pertinent piece of the generated file "debug.pb.cc":
middle_container::~middle_container() {
// ##protoc_insertion_point(destructor:DEBUGpackage.middle_container)
SharedDtor();
}
void middle_container::SharedDtor() {
if (this != internal_default_instance()) {
delete vac_; // comment out this one line, to make the problem go away
}
}
The "delete vac_" line appears to be an attempt to delete storage that either has already been deleted, or is about to be deleted somewhere else. Please, can someone look into this? [The files below are still the files we use to generate this code, and to debug the problem to this point]
I have no idea whether I've uncovered a bug in GRPC, or whether I've coded something wrong.
The issue is that you are allocated middle_container reply_tmp on the stack in your server. As a result it gets destructed as soon as you pass out of the scope. At that time, you have called Finish but not yet waited for its result. Since this is an async server, the data must remain alive until you've received the tag back for it. This is why manually editing your destructor works in your case; you're basically nullifying the destructor (and leaking memory as a result).
I am trying to perform XMPP Handshake Flow mentioned at https://developers.google.com/cloud-print/docs/rawxmpp.
This allows the device to subscribe and receive notifications.
As of now, I have explored the following options:
1) libcurl
2) Gloox C/C++
3) TXMPP
4) Libjingle
Which option would be a good choice to start with? I would like to consider support for and maintenance of the library as a major factor.
Following is my solution using Gloox C/C++ library to perform XMPP Handshake Flow:
#include <cassert>
#include <iostream>
#include <boost/make_shared.hpp>
#include <iq.h>
#include <parser.h>
#include <base64.h>
#include <connectiontcpclient.h>
#include <connectiontls.h>
#include <connectiondatahandler.h>
#include <connectionhttpproxy.h>
#include <logsink.h>
#include <client.h>
#include <connectionsocks5proxy.h>
using namespace gloox;
using namespace std;
const string proxyHost = ""; //specify proxy server name
const int proxyPort = 0; //specify proxy port number
const string xmppHost = "talk.google.com";
const int xmppPort = 5222;
Client *c;
ConnectionBase *client_ ;
ConnectionTCPClient* conn0;
ConnectionHTTPProxy* conn2;
class Bot: public ConnectionDataHandler, TagHandler, TLSHandler{
public:
Bot(): parser_(this)
{
conn0 = new ConnectionTCPClient(this, log_, proxyHost, proxyPort);
ConnectionHTTPProxy* conn2 = new ConnectionHTTPProxy( this, conn0, log_, xmppHost, xmppPort);
client_ = conn0;
ConnectionError ce = ConnNoError;
ce = conn2->connect();
assert(ce == ConnNoError);
conn2->receive();
}
virtual void handleConnect(const ConnectionBase* con) {
send("<stream:stream to=\"gmail.com\" xml:lang=\"en\" version=\"1.0\" xmlns:stream=\"http://etherx.jabber.org/streams\" xmlns=\"jabber:client\">\r\n");
}
virtual void handleReceivedData(const ConnectionBase* con, const string& data) {
cerr << "[recv] " << data << endl;
string copied = data;
int pos = parser_.feed(copied);
assert(pos < 0);
}
virtual void handleTag(Tag* tag) {
if (tag->name() == "stream" && tag->xmlns() == "http://etherx.jabber.org/streams") {
sid_ = tag->findAttribute("id");
} else{
if (tag->name() == "features") {
if (tag->hasChild("starttls", "xmlns", "urn:ietf:params:xml:ns:xmpp-tls")) {
send(Tag("starttls", "xmlns", "urn:ietf:params:xml:ns:xmpp-tls").xml());
}
else if (tag->hasChild("mechanisms", "xmlns", "urn:ietf:params:xml:ns:xmpp-sasl") && tag->findChild("mechanisms")->hasChildWithCData(
"mechanism", "X-OAUTH2"))
{
Tag a("auth", "xmlns", "urn:ietf:params:xml:ns:xmpp-sasl");
a.addAttribute("mechanism", "X-OAUTH2");
a.addAttribute("service", "chromiumsync");
a.addAttribute("allow-generated-jid", "true");
a.addAttribute("client-uses-full-bind-result", "true");
a.addAttribute("auth", "http://www.google.com/talk/protocol/auth");
string credential;
credential.append("\0", 1);
credential.append(""); //Specify Bare JID
credential.append("\0", 1);
credential.append(""); //Specify Access Token
a.setCData(Base64::encode64(credential));
send(a.xml());
}
else if (tag->hasChild("bind", "xmlns", "urn:ietf:params:xml:ns:xmpp-bind")) {
Tag iq("iq", "xmlns", "jabber:client");
iq.addAttribute("type", "set");
iq.addAttribute("id", "0");
Tag *bind = new Tag("bind", "xmlns", "urn:ietf:params:xml:ns:xmpp-bind");
Tag *resource = new Tag("resource");
resource->setCData("GCPResource");
bind->addChild(resource);
iq.addChild(bind);
send(iq.xml());
}
}
else if (tag->name() == "proceed" && tag->xmlns() == "urn:ietf:params:xml:ns:xmpp-tls") {
ConnectionTLS* encryption_client = new ConnectionTLS(this, conn0, log_);
encryption_client->registerTLSHandler(this);
client_ = encryption_client;
ConnectionError ret = encryption_client->connect();
assert(ret == ConnNoError);
}
else if (tag->name() == "success" && tag->xmlns() == "urn:ietf:params:xml:ns:xmpp-sasl") {
send("<stream:stream to=\"gmail.com\" xml:lang=\"en\" version=\"1.0\" xmlns:stream=\"http://etherx.jabber.org/streams\" xmlns=\"jabber:client\">\r\n");
}
else if (tag->name() == "iq") {
if (tag->hasChild("bind", "xmlns", "urn:ietf:params:xml:ns:xmpp-bind")) {
resource_ = tag->findChild("bind")->findChild("jid")->cdata();
Tag iq("iq");
iq.addAttribute("type", "set");
iq.addAttribute("id", "1");
iq.addChild(new Tag("session", "xmlns", "urn:ietf:params:xml:ns:xmpp-session"));
send(iq.xml());
//Step 2: Subscribing for notifications
if (tag->hasAttribute("type", "result")) {
Tag iq("iq");
iq.addAttribute("type", "set");
iq.addAttribute("to", ""); //Specify Bare JID
iq.addAttribute("id", "3");
Tag *bind = new Tag("subscribe", "xmlns", "google:push");
Tag *resource = new Tag("item");
resource->addAttribute("channel", "cloudprint.google.com");
resource->addAttribute("from", "cloudprint.google.com");
bind->addChild(resource);
iq.addChild(bind);
send(iq.xml());
}
}
}
}
}
virtual void handleEncryptedData(const TLSBase* tls,
const string& data) {
cout << "handleEncryptedData" << endl;
}
virtual void handleDecryptedData(const TLSBase* tls,
const string& data) {
cout << "handleDecryptedData" << endl;
}
virtual void handleHandshakeResult(const TLSBase* tls, bool,
CertInfo& cert) {
cout << "handleHandshakeResult" << endl;
}
virtual void handleDisconnect(const ConnectionBase* con, ConnectionError) {
cout << "handleDisconnect" << endl;
}
private:
LogSink log_;
string sid_;
string resource_;
Parser parser_;
ConnectionBase *client_;
void send(const string &data) {
cerr << "[send] " << data << endl;
client_->send(data);
}
};
int main() {
Bot bot;
}
I am getting these errors:
Error 2 error LNK2005: "class std::basic_string<char,struct std::char_traits<char>,class std::allocator<char> > __cdecl ToString(int)" (?ToString##YA?AV?$basic_string#DU?$char_traits#D#std##V?$allocator#D#2##std##H#Z) already defined in Sender.obj C:\CSE687\Project3_TT_1\Repository\Repository.obj
Error 3 error LNK2005: "private: static int Sender::count" (?count#Sender##0HA) already defined in Sender.obj C:\CSE687\Project3_TT_1\Repository\Repository.obj
Error 4 error LNK1169: one or more multiply defined symbols found C:\CSE687\Project3_TT_1\Debug\Repository.exe
from these three pieces of code:
#ifndef SEND_H
#define SEND_H
/////////////////////////////////////////////////////////////////
// Sender.cpp - Demonstration of concurrent socket connectors //
// ver 2 //
// Jim Fawcett, CSE687 - Object Oriented Design, Spring 2013 //
/////////////////////////////////////////////////////////////////
/*
* This Sender expects to write lines of text only.
* So message framing is done by lines.
*
* For HTTP like protocols the Sender should send lines for each
* header attribute and bytes in the body, if there is one,
* specified by a last header line something like:
* content_length : 1024
* where 1024 is a stand-in for whatever you want your block
* size to be.
*
*/
/*
* Required files:
* - Sender.cpp, Sockets.h, Sockets.cpp,
* Threads.h, Threads.cpp, Locks.h, Locks.cpp
* BlockingQueue.h, BlockingQueue.cpp
*
* Maintanence History:
* ver 1.1 - 30 Mar 2013
* - changed Sendthread from terminating to default
* - minor changes to error handling
* ver 1.0 - 29 Mar 2013
* - first release
*/
#include "../sockets/Sockets.h"
#include "../Threads/Threads.h"
#include "../Threads/Locks.h"
#include "../BlockingQueue/BlockingQueue.h"
#include <string>
#include <iostream>
#include <sstream>
///////////////////////////////////////////////////
// SendThread thread
class SendThread : public threadBase
{
public:
SendThread(Socket s, BlockingQueue<std::string>& q) : s_(s), q_(q) {}
std::string& status() { return status_; }
private:
void run()
{
status_ = "good";
doLog("send thread running");
std::string msg;
do
{
doLog("send thread enqing msg");
msg = q_.deQ();
if(!s_.writeLine(msg))
{
sout << "\n bad status in sending thread";
status_ = "bad";
break;
}
} while(msg != "stop");
s_.disconnect();
}
std::string status_;
Socket s_;
BlockingQueue<std::string>& q_;
};
std::string ToString(int i)
{
std::ostringstream conv;
conv << i;
return conv.str();
}
class Sender
{
public:
Sender() {};
Sender(int numMsgs) : numMsgs_(numMsgs) { myCount = ++count; }
int id() { return myCount; }
void start(std::string ip, int port)
{
sout << locker << "\n Sender #" << id() << " started" << unlocker;
pSt = new SendThread(s_, q_);
pSt->start();
if(!s_.connect(ip, port))
{
sout << locker << "\n couldn't connect to " << ip << ":" << port << "\n\n" << unlocker;
delete pSt;
return;
}
else
{
std::string logMsg = "\n connected to " + ip + ":" + ToString(port);
doLog(logMsg.c_str());
}
doLog("starting Sender");
std::string msg;
for(int i=0; i<numMsgs_; ++i)
{
doLog("sending message");
msg = "sender#" + ToString(id()) + ": msg#" + ToString(i);
sout << locker << "\n " << msg.c_str() << unlocker;
q_.enQ(msg);
::Sleep(10 * id()); // sleep time increases with each addition Sender
if(pSt->status() == "bad")
break;
}
q_.enQ("stop");
msg = "sender#" + ToString(id()) + ": stop";
sout << "\n " + msg;
pSt->join();
delete pSt;
}
private:
Socket s_;
BlockingQueue<std::string> q_;
SendThread* pSt;
static int count;
int myCount;
int numMsgs_;
};
int Sender::count = 0;
///////////////////////////////////////////////////
// DemoThread is used to get two or more senders
// running concurrently from a single process, to
// make testing easier.
class DemoThread : public threadBase
{
public:
DemoThread(Sender sndr) : sndr_(sndr) {}
private:
void run()
{
sndr_.start("127.0.0.1", 8080);
}
Sender sndr_;
};
#endif
and:
#ifndef REPOS_H
#define REPOS_H
/////////////////////////////////////////////////////////////////
// Recepository.h - Demonstration of repository action using a //
// socket reciever with concurrent clients //
// //
// Thomas P. Taggart //
// tptaggarsyr.edu //
// CSE687, Object Oriented Design, Spring 2013 //
/////////////////////////////////////////////////////////////////
/*
* Required files:
* - Reciever.h, Receiver.cpp, Sockets.h, Sockets.cpp,
* Threads.h, Threads.cpp, Locks.h, Locks.cpp
* BlockingQueue.h, BlockingQueue.cpp
*/
#include "../Sockets/Sockets.h"
#include "../Threads/Threads.h"
#include "../Threads/Locks.h"
#include "../Receiver/Receiver.h"
#include "../Sender/Sender.h"
#include "../BlockingQueue/BlockingQueue.h"
#include <string>
class Repository
{
public:
Repository() {};
~Repository() {};
Sender* getSender();
Receiver* getReceiver();
private:
Sender* repoSender;
Receiver* repoReceiver;
};
#endif
and:
/////////////////////////////////////////////////////////////////
// Recepository.cpp - Demonstration of repository action using //
// a socket reciever with concurrent clients //
// //
// Thomas P. Taggart //
// tptaggarsyr.edu //
// CSE687, Object Oriented Design, Spring 2013 //
/////////////////////////////////////////////////////////////////
/*
* Required files:
* - Reciever.h, Receiver.cpp, Sockets.h, Sockets.cpp,
* Threads.h, Threads.cpp, Locks.h, Locks.cpp
* BlockingQueue.h, BlockingQueue.cpp
*/
#include "../Repository/Repository.h"
#include <string>
using namespace std;
Sender* Repository::getSender()
{
return repoSender;
}
Receiver* Repository::getReceiver()
{
return repoReceiver;
}
int main()
{
int ret = 0;
try
{
Repository repos;
repos.getReceiver()->start(8080);
}
catch(std::exception& ex)
{
std::cout << "\n\n " << ex.what();
ret = 1;
}
catch(...)
{
sout << "\n something bad happened";
ret = 1;
}
sout << "\n\n";
return ret;
}
I have not for the life of me been able to figure out how to avoid it. the 'extern' solution doesn't seem to work and #ifndef headers area already used.
Any advice is greatly appreciated.
Tom
You're breaking the ODR - either make std::string ToString(int i) inline or move it to an implementation file.