Imagine that you have some websocket client, that downloading some data in loop like this:
#include <boost/asio.hpp>
#include <boost/beast.hpp>
#include "nlohmann/json.hpp"
namespace beast = boost::beast;
namespace websocket = beast::websocket;
using tcp = boost::asio::ip::tcp;
class Client {
public:
Client(boost::asio::io_context &ctx) : ws_{ctx}, ctx_{ctx} {
ws_.set_option(websocket::stream_base::timeout::suggested(boost::beast::role_type::client));
#define HOST "127.0.0.1"
#define PORT "8000"
boost::asio::connect(ws_.next_layer(), tcp::resolver{ctx_}.resolve(HOST, PORT));
ws_.handshake(HOST ":" PORT, "/api/v1/music");
#undef HOST
#undef PORT
}
~Client() {
if (ws_.is_open()) {
ws_.close(websocket::normal);
}
}
nlohmann::json NextPacket(std::size_t offset) {
nlohmann::json request;
request["offset"] = offset;
ws_.write(boost::asio::buffer(request.dump()));
beast::flat_buffer buffer;
ws_.read(buffer);
return nlohmann::json::parse(std::string_view{reinterpret_cast<const char *>(buffer.data().data()), buffer.size()});
}
private:
boost::beast::websocket::stream<boost::asio::ip::tcp::socket> ws_;
boost::asio::io_context &ctx_;
};
// ... some function
int main() {
boost::asio::io_context context;
boost::asio::executor_work_guard<boost::asio::io_context::executor_type> guard{context.get_executor()};
std::thread{[&context]() { context.run(); }}.detach();
static constexpr std::size_t kSomeVeryBigConstant{1'000'000'000};
Client client{context};
std::size_t offset{};
while (offset < kSomeVeryBigConstant) {
offset += client.NextPacket(offset)["offset"].get<std::size_t>();
// UPDATE:
userDefinedLongPauseHere();
}
}
On the server side we have ping requests with some frequency. Were should I handle ping requests? As I understand it, control_callback controls calls to ping, pong and close functions, not requests. With the read or read_async functions, I also cannot catch the ping request.
Beast responds to pings with pongs automatically, as described here: https://github.com/boostorg/beast/issues/899#issuecomment-346333014
Whenever you call read(), it can process a ping and send a pong without you knowing about that.
Related
Using the following boost::asio code I run a loop of 1M sequential http calls to a Docker node.js simple http service that generates random numbers, but after a few thousand calls I start getting async_connect errors. The node.js part is not producing any errors and I believe it works OK.
To avoid resolving the host in every call and trying to speed-up, I am caching the endpoint, which makes no difference, I have tested both ways.
Can anyone see what is wrong with my code below?
Are there any best practices for a stress-test tool using asio that I am missing?
//------------------------------------------------------------------------------
// https://www.boost.org/doc/libs/1_70_0/libs/beast/doc/html/beast/using_io/timeouts.html
HttpResponse HttpClientAsyncBase::_http(HttpRequest&& req)
{
using namespace boost::beast;
namespace net = boost::asio;
using tcp = net::ip::tcp;
HttpResponse res;
req.prepare_payload();
boost::beast::error_code ec = {};
const HOST_INFO host = resolve(req.host(), req.port, req.resolve);
net::io_context m_io;
boost::asio::spawn(m_io, [&](boost::asio::yield_context yield)
{
size_t retries = 0;
tcp_stream stream(m_io);
if (req.timeout_seconds == 0) get_lowest_layer(stream).expires_never();
else get_lowest_layer(stream).expires_after(std::chrono::seconds(req.timeout_seconds));
get_lowest_layer(stream).async_connect(host, yield[ec]);
if (ec) return;
http::async_write(stream, req, yield[ec]);
if (ec)
{
stream.close();
return;
}
flat_buffer buffer;
http::async_read(stream, buffer, res, yield[ec]);
stream.close();
});
m_io.run();
if (ec)
throw boost::system::system_error(ec);
return std::move(res);
}
I have tried both sync/async implementations of a boost http client and I get the exact same problem.
The error I get is "You were not connected because a duplicate name exists on the network. If joining a domain, go to System in Control Panel to change the computer name and try again. If joining a workgroup, choose another workgroup name [system:52]"
So, I decided to... just try. I made your code into self-contained example:
#include <boost/asio/spawn.hpp>
#include <boost/beast.hpp>
#include <fmt/ranges.h>
#include <iostream>
namespace http = boost::beast::http;
//------------------------------------------------------------------------------
// https://www.boost.org/doc/libs/1_70_0/libs/beast/doc/html/beast/using_io/timeouts.html
struct HttpRequest : http::request<http::string_body> { // SEHE: don't do this
using base_type = http::request<http::string_body>;
using base_type::base_type;
std::string host() const { return "127.0.0.1"; }
uint16_t port = 80;
bool resolve = true;
int timeout_seconds = 0;
};
using HttpResponse = http::response<http::vector_body<uint8_t> >; // Do this or aggregation instead
struct HttpClientAsyncBase {
HttpResponse _http(HttpRequest&& req);
using HOST_INFO = boost::asio::ip::tcp::endpoint;
static HOST_INFO resolve(std::string const& host, uint16_t port, bool resolve) {
namespace net = boost::asio;
using net::ip::tcp;
net::io_context ioc;
tcp::resolver r(ioc);
using flags = tcp::resolver::query::flags;
auto f = resolve ? flags::address_configured
: static_cast<flags>(flags::numeric_host | flags::numeric_host);
tcp::resolver::query q(tcp::v4(), host, std::to_string(port), f);
auto it = r.resolve(q);
assert(it.size());
return HOST_INFO{it->endpoint()};
}
};
HttpResponse HttpClientAsyncBase::_http(HttpRequest&& req) {
using namespace boost::beast;
namespace net = boost::asio;
using net::ip::tcp;
HttpResponse res;
req.prepare_payload();
boost::beast::error_code ec = {};
const HOST_INFO host = resolve(req.host(), req.port, req.resolve);
net::io_context m_io;
spawn(m_io, [&](net::yield_context yield) {
// size_t retries = 0;
tcp_stream stream(m_io);
if (req.timeout_seconds == 0)
get_lowest_layer(stream).expires_never();
else
get_lowest_layer(stream).expires_after(std::chrono::seconds(req.timeout_seconds));
get_lowest_layer(stream).async_connect(host, yield[ec]);
if (ec)
return;
http::async_write(stream, req, yield[ec]);
if (ec) {
stream.close();
return;
}
flat_buffer buffer;
http::async_read(stream, buffer, res, yield[ec]);
stream.close();
});
m_io.run();
if (ec)
throw boost::system::system_error(ec);
return res;
}
int main() {
for (int i = 0; i<100'000; ++i) {
HttpClientAsyncBase hcab;
HttpRequest r(http::verb::get, "/bytes/10", 11);
r.timeout_seconds = 0;
r.port = 80;
r.resolve = false;
auto res = hcab._http(std::move(r));
std::cout << res.base() << "\n";
fmt::print("Data: {::02x}\n", res.body());
}
}
(Side note, this is using docker run -p 80:80 kennethreitz/httpbin to run the server side)
While this is about 10x faster than running curl to do the equivalent requests in a bash loop, none of this is particularly stressing. There's nothing async about it, and it seems resource usage is mild and stable, e.g. memory profiled:
(for completeness I verified identical results with timeout_seconds = 1)
Since what you're doing is literally the opposite of async IO, I'd write it much simpler:
struct HttpClientAsyncBase {
net::io_context m_io;
HttpResponse _http(HttpRequest&& req);
static auto resolve(std::string const& host, uint16_t port, bool resolve);
};
HttpResponse HttpClientAsyncBase::_http(HttpRequest&& req) {
HttpResponse res;
req.requestObject.prepare_payload();
const auto host = resolve(req.host(), req.port, req.resolve);
beast::tcp_stream stream(m_io);
if (req.timeout_seconds == 0)
stream.expires_never();
else
stream.expires_after(std::chrono::seconds(req.timeout_seconds));
stream.connect(host);
write(stream, req.requestObject);
beast::flat_buffer buffer;
read(stream, buffer, res);
stream.close();
return res;
}
That's just simpler, runs faster and does the same, down to the exceptions.
But, you're probably trying to cause stress, perhaps you instead need to reuse some connections and multi-thread?
You can see a very complete example of just that here:
How do I make this HTTPS connection persistent in Beast?
It includes reconnecting dropped connections, connections to different hosts, varied requests etc.
Alan's comments gave me the right pointers and I soon found using netstat -a that it was a ports leakage problem with thousands of ports in TIME_WAIT state after running the code for some brief time.
The root cause was both on the client and the server:
In node.js server I had to make sure that responses close the connection by
adding
response.setHeader("connection", "close");
In boost::asio C++ code I replaced stream.close() with
stream.socket().shutdown(boost::asio::ip::tcp::socket::shutdown_both, ec);
That seems to make all the difference. Also I made sure to use
req.set(boost::beast::http::field::connection, "close");
in my requests.
I verfied with the tool running for over 5 hours with no problems at all, so I guess the problem is solved!
Implementing 'Abortive TCP/IP Close' with boost::asio to treat EADDRNOTAVAIL and TIME_WAIT for HTTP client stress test tool
I am revisting the issue to offer an alternative that actually worked much better. Reminding that the objective was to develop a stress test tool for hitting a server with 1M requests. Even though my previous solution worked on Windows, when I loaded the executable on Docker/Alpine it started crashing with SEGFAULT errors that I was unable to trace. The root cause seems to be related to boost::asio::spawn(m_io, [&](boost::asio::yield_context yield) but time pressured me to solve the HTTP problem.
I decided to use synch HTTP and treat EADDRNOTAVAIL and TIME_WAIT errors by following suggestions from Disable TIME_WAIT with boost sockets and TIME_WAIT with boost asio and template code from https://www.boost.org/doc/libs/1_80_0/libs/beast/example/http/client/sync/http_client_sync.cpp.
For anyone having EADDRNOTAVAIL and TIME_WAIT with boost::asio, the solution that worked for me and it is actually much faster than before on both Windows, Linux and Dockers is the following:
HttpResponse HttpClientSyncBase::_http(HttpRequest&& req)
{
namespace beast = boost::beast;
namespace http = beast::http;
namespace net = boost::asio;
using tcp = net::ip::tcp;
HttpResponse res;
req.prepare_payload();
const auto host = req.host();
const auto port = req.port;
const auto target = req.target();
const bool abortive_close = boost::iequals(req.header("Connection"), "close");
const bool download_large_file = boost::iequals(req.header("X-LARGE-FILE-HINT"), "YES");
beast::error_code ec;
net::io_context ioc;
// Resolve host:port for IPv4
tcp::resolver resolver(ioc);
const auto endpoints = resolver.resolve(boost::asio::ip::tcp::v4(), host, port);
// Create stream and set timeouts
beast::tcp_stream stream(ioc);
if (req.timeout_seconds == 0) boost::beast::get_lowest_layer(stream).expires_never();
else boost::beast::get_lowest_layer(stream).expires_after(std::chrono::seconds(req.timeout_seconds));
// Caution: we can get address_not_available[EADDRNOTAVAIL] due to TIME_WAIT port exhaustion
stream.connect(endpoints, ec);
if (ec == boost::system::errc::address_not_available)
throw beast::system_error{ ec };
// Write HTTP request
http::write(stream, req);
// Read HTTP response (or download large file >8MB)
beast::flat_buffer buffer;
if (download_large_file)
{
_HttpResponse tmp;
boost::beast::http::response_parser<boost::beast::http::string_body> parser{ std::move(tmp) };
parser.body_limit(boost::none);
boost::beast::http::read(stream, buffer, parser);
res = HttpResponse(std::move(parser.release()));
}
else
{
http::read(stream, buffer, res);
}
// Try to shut down socket gracefully
stream.socket().shutdown(tcp::socket::shutdown_both, ec);
if (abortive_close)
{
// Read until no more data are in socket buffers
// https://stackoverflow.com/questions/58983527/disable-time-wait-with-boost-sockets
try
{
http::response<http::dynamic_body> res;
beast::flat_buffer buffer;
http::read(stream, buffer, res);
}
catch (...)
{
// should get end of stream here, ignore it
}
// Perform "Abortive TCP/IP Close" to minimize TIME_WAIT port exhaustion
// https://stackoverflow.com/questions/35006324/time-wait-with-boost-asio
try
{
// enable linger with timeout 0 to force abortive close
boost::asio::socket_base::linger option(true, 0);
stream.socket().set_option(option);
stream.close();
}
catch (...)
{
}
}
else
{
try { stream.close(); } catch (...) {}
}
// Ignore not_connected and end_of_stream errors, handle the rest
if (ec && ec != beast::errc::not_connected && ec != beast::http::error::end_of_stream)
throw beast::system_error{ ec };
return std::move(res);
}
In the sample above I should add error handling in write but I guess anyone can do it. _HttpResponse is the following and is the base for HttpResponse.
using _HttpRequest = boost::beast::http::message<true, boost::beast::http::string_body, boost::beast::http::fields>;
using _HttpResponse = boost::beast::http::message<false, boost::beast::http::string_body, boost::beast::http::fields>;
using HttpHeaders = boost::beast::http::header<1, boost::beast::http::basic_fields<std::allocator<char>>>;
For what is worth, when I started the estimation for the job was 5-7 days. Using connetion=close in my previous solution it got down to 7-8 hours. Using Abortive TCP/IP Close I got down to 1.5 hours.
Funny thing is, the server, also boost::asio, could handle the stress while the original stress tool didn't. Finally both the server and its stress test tool work just fine! The code also demonstrates how to download a large file (over 8MB) which was another side-problem, as I needed to download the test results from the server.
I want to send 3-4 headers to the WebSocket server that I have and the headers are action = subscribe,userID = <some email address>,agentID =831C5DFC-1643-40C4-A5A3-9C918556D3A1 , I am unable to understand how to send these headers to the server, like what is the typical method? this is my client code👇🏼
#include <boost/beast/http.hpp>
#include <boost/asio/connect.hpp>
#include <boost/beast/core.hpp>
#include <boost/beast/websocket.hpp>
#include <boost/asio/connect.hpp>
#include <boost/asio/ip/tcp.hpp>
#include <boost/beast/websocket/stream.hpp>
#include <cstdlib>
#include <iostream>
#include <string>
namespace http = boost::beast::http;
namespace beast = boost::beast; // from <boost/beast.hpp>
namespace http = beast::http; // from <boost/beast/http.hpp>
namespace websocket = beast::websocket; // from <boost/beast/websocket.hpp>
namespace net = boost::asio; // from <boost/asio.hpp>
using tcp = boost::asio::ip::tcp; // from <boost/asio/ip/tcp.hpp>
// Sends a WebSocket message and prints the response
#define SERVER_URL "127.0.0.1"
#define SERVER_PORT "80"
class set_subprotocols
{
std::string s_;
public:
explicit
set_subprotocols(std::string s)
: s_(s) {}
template<bool isRequest, class Body, class Headers>
void
operator()(boost::beast::http::message<isRequest, Body, Headers>& m) const
{
m.set("X-Custome-Id", s_);
}
};
int main(int argc, char** argv)
{
try
{
// Check command line arguments.
auto const host = SERVER_URL;
auto const port = SERVER_PORT;
auto const text = "hello world";
// The io_context is required for all I/O
net::io_context ioc;
// These objects perform our I/O
tcp::resolver resolver{ioc};
websocket::stream<tcp::socket> ws{ioc};
// Look up the domain name
auto const results = resolver.resolve(host, port,boost::asio::ip::resolver_query_base::numeric_service);
// Make the connection on the IP address we get from a lookup
net::connect(ws.next_layer(), results.begin(), results.end());
// Set a decorator to change the User-Agent of the handshake
ws.set_option(websocket::stream_base::decorator(
[](websocket::request_type& req)
{
req.set(http::field::user_agent,
std::string("agent"));
}));
ws.set_option(websocket::stream_base::decorator(set_subprotocols{"action = subscribe"}));
ws.set_option(websocket::stream_base::decorator(set_subprotocols{"userID = madhur#ayraa.io"}));
ws.set_option(websocket::stream_base::decorator(set_subprotocols{"agentID = 831C5DFC-1643-40C4-A5A3-9C918556D3A1"}));
// Perform the websocket handshake
ws.handshake(host, "/");
// Send the message
ws.write(net::buffer(std::string(text)));
// This buffer will hold the incoming message
beast::multi_buffer buffer;
// Read a message into our buffer
ws.read(buffer);
// The make_printable() function helps print a ConstBufferSequence
std::cout << beast::make_printable(buffer.data()) << std::endl;
// If we get here then the connection is closed gracefully
// Close the WebSocket connection
ws.close(websocket::close_code::normal);
}
catch(std::exception const& e)
{
std::cerr << "Error: " << e.what() << std::endl;
}
return EXIT_SUCCESS;
}
I currently don't have the server code but will provide it If I get it in the future.
You should do all that in a single decorator. In fact, you can use lambdas:
ws.set_option(websocket::stream_base::decorator([](auto& m) {
m.set("action", "subscribe");
m.set("userID", "madhur#ayraa.io");
m.set("agentID", "831C5DFC-1643-40C4-A5A3-9C918556D3A1");
}));
Now your request is:
GET / HTTP/1.1
Host: 127.0.0.1
Upgrade: websocket
Connection: upgrade
Sec-WebSocket-Key: xCj08Lz6IzEzH4s422aT5w==
Sec-WebSocket-Version: 13
action: subscribe
userID: madhur#ayraa.io
agentID: 831C5DFC-1643-40C4-A5A3-9C918556D3A1
HTTP/1.1 101 Switching Protocols
Upgrade: websocket
Connection: Upgrade
Sec-WebSocket-Accept: pbg8RLbrPirBbYgmG0EptdCm2tQ=
...8.x.].......T..r{
"timestamp": "Wed Dec 29 2021 20:20:27 GMT+0000 (UTC)",
"url": "http://sockb.in/",
"reqData": "hello world"
}.. .H.
B....
I am developing a simple test code using Websocket client using c++ boost. A server I get response from says I need to decompress messages using inflate algorithm. I found out there is deflate option in boost Websocket library but it did not work. Please let me know how to convert data to decompressed string.
#include <iostream>
#include <string>
#include <boost/beast/core.hpp>
#include <boost/beast/websocket.hpp>
#include <boost/asio/connect.hpp>
#include <boost/asio/ip/tcp.hpp>
#include <boost/beast/websocket/ssl.hpp>
#include <boost/asio/ssl.hpp>
#include <chrono>
using tcp = boost::asio::ip::tcp;
namespace websocket = boost::beast::websocket;
int main()
{
std::ostringstream stream;
std::string host = "real.okex.com";
auto const port = "8443";
auto const path = "/ws/v3";
boost::beast::multi_buffer buffer;
boost::asio::io_context ioc;
boost::asio::ssl::context ctx{boost::asio::ssl::context::sslv23};
tcp::resolver resolver{ioc};
websocket::stream<boost::asio::ssl::stream<boost::asio::ip::tcp::socket>> wss{ioc, ctx};
ctx.set_verify_mode(boost::asio::ssl::verify_none);
tcp::resolver::results_type results = resolver.resolve(host, port);
boost::asio::connect(wss.next_layer().next_layer(), results.begin(), results.end());
// SSL handshake
wss.next_layer().handshake(boost::asio::ssl::stream_base::client);
// websocket handshake
wss.handshake(host, path);
std::cout << "connected" << std::endl;
// send request to the websocket
wss.write(boost::asio::buffer("{'op':'subscribe', 'args':['spot/ticker:ETH-USDT']}"));
// read message
wss.read(buffer);
std::cout << buffer.size() << std::endl;
buffer.consume(buffer.size());
/*
stream << boost::beast::buffers(buffer.data());
buffer.consume(buffer.size());
std::string incoming = stream.str();
std::cout << incoming << std::endl;
*/
}
Thanks !
I struggled for a long time, then I figured, what if I try with a different server?
That helped. I took echo_compressed/server.py from Autobahn:
wget 'https://github.com/crossbario/autobahn-python/raw/master/examples/twisted/websocket/echo_compressed/server.py'
virtualenv venv && . venv/bin/activate && pip install autobahn twisted
python server.py
That starts a WS server on port 9000. It's not using SSL though, so I disabled that in the code (see #ifdef SSL below).
Now the key is to set the permessage_deflate extension option before WS handshake:
websocket::permessage_deflate opt;
opt.client_enable = true; // for clients
opt.server_enable = true; // for servers
s.set_option(opt);
Also noted that some servers require the port name be present in the Host header when not running on standard ports:
s.handshake(host + ":" + port, path);
Now reading works just fine and deflates as you'd expect, e.g. write it to response.txt:
beast::multi_buffer buffer;
s.read(buffer);
{
std::ofstream ofs("response.txt", std::ios::binary);
std::copy(
net::buffers_begin(buffer.data()),
net::buffers_end(buffer.data()),
std::ostreambuf_iterator<char>(ofs));
}
Or, when replacing the multi_buffer with an Asio streambuf, it's easy to just stream it:
net::streambuf buffer;
s.read(buffer);
std::cout << &buffer;
Proof That It Was Deflating
Inspecting the traffic with tcpdump/Wireshark shows this. Also, the Autobahn logging confirms it:
2020-06-22 02:12:05+0200 [-] Log opened.
2020-06-22 02:12:05+0200 [-] WebSocketServerFactory starting on 9000
2020-06-22 02:12:05+0200 [-] Starting factory <autobahn.twisted.websocket.WebSocketServerFactory object at 0x7f7af3fa5710>
2020-06-22 02:12:05+0200 [-] Site starting on 8080
2020-06-22 02:12:05+0200 [-] Starting factory <twisted.web.server.Site instance at 0x7f7af3850910>
2020-06-22 02:12:11+0200 [-] WebSocket connection request by tcp4:127.0.0.1:48658
2020-06-22 02:12:11+0200 [-] WebSocket extensions in use: [PerMessageDeflate(is_server = True, server_no_context_takeover = False, client_no_context_takeover = False, server_max_window_bits = 15, client_max_window_bits = 15, mem_level = 8)]
The Problem With That Server (real.okex.com)
I don't know what about it, really, but it seems that server is not sending standard responses. Perhaps someone else can tell. Writing the responses to a file did NOT result in a file that looks like it is zlib compressed.
Other tools tried ALSO fail to decode the data:
zlib-flate -uncompress < response.txt
Same with a python oneliner:
python -c 'import zlib; import sys; sys.stdout.write(zlib.decompress(sys.stdin.read()))' < response.txt
Full Listing
As I tested it with:
#include <boost/asio/ip/tcp.hpp>
#include <boost/asio/ssl.hpp>
#include <boost/asio.hpp>
#include <boost/beast.hpp>
#include <boost/beast/websocket.hpp>
#include <boost/beast/websocket/ssl.hpp>
#include <iostream>
#include <string>
#include <fstream>
namespace net = boost::asio;
namespace ssl = net::ssl;
namespace beast = boost::beast;
namespace http = beast::http;
namespace websocket = beast::websocket;
using tcp = net::ip::tcp;
//#define SSL
#ifdef SSL
using stream_t = websocket::stream<ssl::stream<tcp::socket>>;
#else
using stream_t = websocket::stream<tcp::socket/*, true*/>;
#endif
int main(int argc, char** argv) {
if (argc<4) {
std::cerr << "Usage: " << argv[0] << " host port path\n";
return 1;
}
std::string host = argc>=2? argv[1] : "real.okex.com";
auto const port = argc>=3? argv[2] : "8443";
auto const path = argc>=3? argv[3] : "/ws/v3";
net::io_context ioc;
ssl::context ctx{ ssl::context::sslv23 };
tcp::resolver resolver{ ioc };
#ifdef SSL
stream_t s{ ioc, ctx };
#else
stream_t s{ ioc };
#endif
ctx.set_verify_mode(ssl::verify_none);
tcp::resolver::results_type results = resolver.resolve(host, port);
net::connect(
beast::get_lowest_layer(s),
//s.next_layer().next_layer(),
results.begin());
#ifdef SSL
// SSL handshake
s.next_layer().handshake(ssl::stream_base::client);
#endif
// websocket handshake
websocket::permessage_deflate opt;
opt.client_enable = true; // for clients
opt.server_enable = true; // for servers
s.set_option(opt);
s.handshake(host + ":" + port, path);
std::cout << "connected" << std::endl;
// send request to the websocket
s.write(net::buffer("{'op':'subscribe', 'args':['spot/ticker:ETH-USDT']}"));
{
net::streambuf buffer;
s.read(buffer);
std::cout << &buffer << std::endl;
}
}
Then I ran with
In the protocol upgrade response, The websocket server should have included a field "Sec-WebSocket-Extensions" which tell the client to use Compression Extensions for WebSocket.
But lots of websocket servers of the crypto exchanges like okex/huobi don't do this. You have to deflate the message in your application code.
You can think of this as moving the deflate/inflate from the protocol layer up to the application layer.
I have to write a program that initializes a array of TCP sockets, and use async i/o to read data using a thread pool. Im new to async io, thread pools, shared_ptrs. What I now have is a working program with one socket. Heres the clipping:
boost::shared_ptr< asio::ip::tcp::socket > sock1(
new asio::ip::tcp::socket( *io_service )
);
boost::shared_ptr< asio::ip::tcp::acceptor > acceptor( new asio::ip::tcp::acceptor( *io_service ) );
asio::ip::tcp::endpoint endpoint(asio::ip::tcp::v4(), portNum);
acceptor->open( endpoint.protocol() );
acceptor->set_option( asio::ip::tcp::acceptor::reuse_address( false ) );
acceptor->bind( endpoint );
acceptor->listen();
I am stuck in getting similar code for an "array of sockets", that is, I want to have acceptor[], that are binded to endpoint[]. I must pass around pointers to the acceptors an sockets, so shared_ptr comes in, and am unable to get it right.
for (i=0; i<10; i++) {
// init socket[i] with *io_service
// init endpoint[i]
// init acceptor[i] with *io_service
acceptor[i]->listen()
}
(btw, do I really need an socket[] array for this porpose?) Can someone please help me?
Here is a full example for using Boost ASIO to implement a TCP echo server listening to multiple ports, with a thread pool to distribute work across multiple cores. It is based on this example from the Boost documentation (providing a single-threaded TCP echo server).
Session class
The session class represents a single active socket connection with a client. It reads from the socket and then writes the same data into the socket to echo it back to the client. The implementation uses the async_... functions provided by Boost ASIO: These functions register a callback at the I/O service that will be triggered when the I/O operation has finished.
session.h
#pragma once
#include <array>
#include <memory>
#include <boost/asio.hpp>
/**
* A TCP session opened on the server.
*/
class session : public std::enable_shared_from_this<session> {
using endpoint_t = boost::asio::ip::tcp::endpoint;
using socket_t = boost::asio::ip::tcp::socket;
public:
session(boost::asio::io_service &service);
/**
* Start reading from the socket.
*/
void start();
/**
* Callback for socket reads.
*/
void handle_read(const boost::system::error_code &ec,
size_t bytes_transferred);
/**
* Callback for socket writes.
*/
void handle_write(const boost::system::error_code &ec);
/**
* Get a reference to the session socket.
*/
socket_t &socket() { return socket_; }
private:
/**
* Session socket
*/
socket_t socket_;
/**
* Buffer to be used for r/w operations.
*/
std::array<uint8_t, 4096> buffer_;
};
session.cpp
#include "session.h"
#include <functional>
#include <iostream>
#include <thread>
using boost::asio::async_write;
using boost::asio::buffer;
using boost::asio::io_service;
using boost::asio::error::connection_reset;
using boost::asio::error::eof;
using boost::system::error_code;
using boost::system::system_error;
using std::placeholders::_1;
using std::placeholders::_2;
session::session(io_service &service) : socket_{service} {}
void session::start() {
auto handler = std::bind(&session::handle_read, shared_from_this(), _1, _2);
socket_.async_read_some(buffer(buffer_), handler);
}
void session::handle_read(const error_code &ec, size_t bytes_transferred) {
if (ec) {
if (ec == eof || ec == connection_reset) {
return;
}
throw system_error{ec};
}
std::cout << "Thread " << std::this_thread::get_id() << ": Received "
<< bytes_transferred << " bytes on " << socket_.local_endpoint()
<< " from " << socket_.remote_endpoint() << std::endl;
auto handler = std::bind(&session::handle_write, shared_from_this(), _1);
async_write(socket_, buffer(buffer_.data(), bytes_transferred), handler);
}
void session::handle_write(const error_code &ec) {
if (ec) {
throw system_error{ec};
}
auto handler = std::bind(&session::handle_read, shared_from_this(), _1, _2);
socket_.async_read_some(buffer(buffer_), handler);
}
Server class
The server class creates an acceptor for each given port. The acceptor will listen to the port and dispatch a socket for each incoming connection request. The waiting for an incoming connection is again implemented with a async_... function.
server.h
#pragma once
#include <vector>
#include <boost/asio.hpp>
#include "session.h"
/**
* Listens to a socket and dispatches sessions for each incoming request.
*/
class server {
using acceptor_t = boost::asio::ip::tcp::acceptor;
using endpoint_t = boost::asio::ip::tcp::endpoint;
using socket_t = boost::asio::ip::tcp::socket;
public:
server(boost::asio::io_service &service, const std::vector<uint16_t> &ports);
/**
* Start listening for incoming requests.
*/
void start_accept(size_t index);
/**
* Callback for when a request comes in.
*/
void handle_accept(size_t index, std::shared_ptr<session> new_session,
const boost::system::error_code &ec);
private:
/**
* Reference to the I/O service that will call our callbacks.
*/
boost::asio::io_service &service_;
/**
* List of acceptors each listening to (a different) socket.
*/
std::vector<acceptor_t> acceptors_;
};
server.cpp
#include "server.h"
#include <functional>
#include <boost/asio.hpp>
using std::placeholders::_1;
using std::placeholders::_2;
using boost::asio::io_service;
using boost::asio::error::eof;
using boost::system::error_code;
using boost::system::system_error;
server::server(boost::asio::io_service &service,
const std::vector<uint16_t> &ports)
: service_{service} {
auto create_acceptor = [&](uint16_t port) {
acceptor_t acceptor{service};
endpoint_t endpoint{boost::asio::ip::tcp::v4(), port};
acceptor.open(endpoint.protocol());
acceptor.set_option(acceptor_t::reuse_address(false));
acceptor.bind(endpoint);
acceptor.listen();
return acceptor;
};
std::transform(ports.begin(), ports.end(), std::back_inserter(acceptors_),
create_acceptor);
for (size_t i = 0; i < acceptors_.size(); i++) {
start_accept(i);
}
}
void server::start_accept(size_t index) {
auto new_session{std::make_shared<session>(service_)};
auto handler =
std::bind(&server::handle_accept, this, index, new_session, _1);
acceptors_[index].async_accept(new_session->socket(), handler);
}
void server::handle_accept(size_t index, std::shared_ptr<session> new_session,
const boost::system::error_code &ec) {
if (ec) {
throw system_error{ec};
}
new_session->start();
start_accept(index);
}
Main
The main function creates the server for a series of ports.
For this example, the ports are set to 5000,...,5010. It then spawns a series of threads for each CPU core that calls the run function of the I/O service provided by Boost ASIO. The I/O service is capable of handling such a multi-threading scenario, dispatching work among the threads that have called its run function (reference):
Multiple threads may call the run() function to set up a pool of threads from which the io_context may execute handlers. All threads that are waiting in the pool are equivalent and the io_context may choose any one of them to invoke a handler.
server_main.cpp
#include "server.h"
#include <numeric>
#include <boost/asio.hpp>
#include <boost/thread.hpp>
int main() {
std::vector<uint16_t> ports{};
// Fill ports with range [5000,5000+n)
ports.resize(10);
std::iota(ports.begin(), ports.end(), 5000);
boost::asio::io_service service{};
server s{service, ports};
// Spawn thread group for running the I/O service
size_t thread_count = std::min(
static_cast<size_t>(boost::thread::hardware_concurrency()), ports.size());
boost::thread_group tg{};
for (size_t i = 0; i < thread_count; ++i) {
tg.create_thread([&]() { service.run(); });
}
tg.join_all();
return 0;
}
You could compile the server for example with g++ -O2 -lboost_thread -lpthread {session,server,server_main}.cpp -o server. If you run the server with clients that send it random data, you would get output such as:
Thread 140043413878528: Received 4096 bytes on 127.0.0.1:5007 from 127.0.0.1:40856
Thread 140043405485824: Received 4096 bytes on 127.0.0.1:5000 from 127.0.0.1:42556
Thread 140043388700416: Received 4096 bytes on 127.0.0.1:5005 from 127.0.0.1:58582
Thread 140043388700416: Received 4096 bytes on 127.0.0.1:5001 from 127.0.0.1:40192
Thread 140043388700416: Received 4096 bytes on 127.0.0.1:5003 from 127.0.0.1:42508
Thread 140043397093120: Received 4096 bytes on 127.0.0.1:5008 from 127.0.0.1:37808
Thread 140043388700416: Received 4096 bytes on 127.0.0.1:5006 from 127.0.0.1:35440
Thread 140043397093120: Received 4096 bytes on 127.0.0.1:5009 from 127.0.0.1:58306
Thread 140043405485824: Received 4096 bytes on 127.0.0.1:5002 from 127.0.0.1:56300
You can see the server handling multiple ports, with work being distributed among the worker threads (not necessarily restricting each thread to a specific port).
I use the excellent websocketpp library to provide a Websockets (and HTTP) server in a C++ application. I also need a HTTP client in the same app to connect to REST APIs. I have been attempting this in websocketpp also, but so far I have had little success. The following preliminary attempt gives me this log output:
[2015-03-06 18:01:18] [connect] Successful connection
[2015-03-06 18:01:18] [error] Server handshake response error: websocketpp.processor:20 (Invalid HTTP status.)
[2015-03-06 18:01:18] [disconnect] Failed: Invalid HTTP status.
This suggests my http_ handler method may need something more. Any advice would be appreciated. The websocketpp docs and examples don't seem to include a simple HTTP client.
#define _WEBSOCKETPP_CPP11_STL_
#include <websocketpp/config/asio_client.hpp>
#include <websocketpp/client.hpp>
#include <websocketpp/common/thread.hpp>
namespace {
using namespace websocketpp;
typedef client<websocketpp::config::asio_client> client;
class Client {
public:
Client(void){
client_.init_asio();
client_.set_http_handler(bind(&Client::http_,this,_1));
}
std::string get(const std::string& url) {
websocketpp::lib::error_code error;
client::connection_ptr con = client_.get_connection(url,error);
if(error) std::runtime_error("Unable to connnect.\n url: "+url+"\n message: "+error.message());
client_.connect(con);
websocketpp::lib::thread asio_thread(&client::run, &client_);
asio_thread.join();
return data_;
}
private:
void http_(connection_hdl hdl){
std::cout<<"Connected\n";
data_ = "http payload";
}
client client_;
std::string data_;
};
}
int main(void){
Client client;
client.get("http://google.com/");
}
WebSocket++'s HTTP handling features are a convenience feature designed to allow WebSocket servers to serve HTTP responses in a limited capacity. WebSocket++ is not intended for use as a generic HTTP library and does not contain the ability to play the role of a (non-WebSocket) HTTP client.
Using a separate library (such as cpp-netlib) for HTTP client functionality is a good solution.
If you're trying to do both WebSocket and HTTP in C++ there's a great library called Beast that has BOTH of these things! Its open source and builds on Boost.Asio:
https://github.com/vinniefalco/Beast/
Here's some example code:
Use HTTP to request the root page from a website and print the response:
#include <beast/http.hpp>
#include <boost/asio.hpp>
#include <iostream>
#include <string>
int main()
{
// Normal boost::asio setup
std::string const host = "boost.org";
boost::asio::io_service ios;
boost::asio::ip::tcp::resolver r(ios);
boost::asio::ip::tcp::socket sock(ios);
boost::asio::connect(sock,
r.resolve(boost::asio::ip::tcp::resolver::query{host, "http"}));
// Send HTTP request using beast
beast::http::request_v1<beast::http::empty_body> req;
req.method = "GET";
req.url = "/";
req.version = 11;
req.headers.replace("Host", host + ":" + std::to_string(sock.remote_endpoint().port()));
req.headers.replace("User-Agent", "Beast");
beast::http::prepare(req);
beast::http::write(sock, req);
// Receive and print HTTP response using beast
beast::streambuf sb;
beast::http::response_v1<beast::http::streambuf_body> resp;
beast::http::read(sock, sb, resp);
std::cout << resp;
}
Establish a WebSocket connection, send a message and receive the reply:
#include <beast/to_string.hpp>
#include <beast/websocket.hpp>
#include <boost/asio.hpp>
#include <iostream>
#include <string>
int main()
{
// Normal boost::asio setup
std::string const host = "echo.websocket.org";
boost::asio::io_service ios;
boost::asio::ip::tcp::resolver r(ios);
boost::asio::ip::tcp::socket sock(ios);
boost::asio::connect(sock,
r.resolve(boost::asio::ip::tcp::resolver::query{host, "80"}));
// WebSocket connect and send message using beast
beast::websocket::stream<boost::asio::ip::tcp::socket&> ws(sock);
ws.handshake(host, "/");
ws.write(boost::asio::buffer("Hello, world!"));
// Receive WebSocket message, print and close using beast
beast::streambuf sb;
beast::websocket::opcode op;
ws.read(op, sb);
ws.close(beast::websocket::close_code::normal);
std::cout << to_string(sb.data()) << "\n";
}
I did not know how to prevent the websocketpp client from asking for a Upgrade: connection so I ended up using cpp-netlib for a HTTP client instead.