Convert struct sockaddr_in6* to boost::asio::ip::address_v6 - c++

I am implementing a host name / ip address parser for a TCP/IP client / server program.
I can successfully cast IPv4 addresses from sockaddr_in* to boost::asio::ip::address_v4 but I fail to get the IPv6 conversion from struct sockaddr_in6* to boost::asio::ip::address_v6 right:
#include <iostream>
using std::cout;
#include <netdb.h>
#include <stdexcept>
using std::domain_error;
#include <sys/socket.h>
#include <string>
using std::string;
#include <vector>
using std::vector;
#include <boost/asio.hpp>
using boost::asio::ip::address;
using boost::asio::ip::address_v4;
using boost::asio::ip::address_v6;
vector<address> getAddresses(string const &hostname)
{
struct addrinfo req = {.ai_family = AF_UNSPEC, .ai_socktype = SOCK_STREAM};
struct addrinfo *pai;
int error = getaddrinfo(hostname.c_str(), nullptr, &req, &pai);
if (error)
throw domain_error("Could not resolve host name.");
vector<address> addresses;
for(struct addrinfo *info = pai; info != nullptr; info = info->ai_next) {
if (info->ai_family == AF_INET) {
auto ipv4socket = reinterpret_cast<struct sockaddr_in*>(info->ai_addr);
auto ipv4addr = address_v4(htonl(ipv4socket->sin_addr.s_addr));
addresses.emplace_back(ipv4addr);
}
/*
* TODO: Implement IPv6 support.
else {
auto ipv6socket = reinterpret_cast<struct sockaddr_in6*>(info->ai_addr);
auto ipv6base = reinterpret_cast<array<unsigned char, 16>>(ipv6socket->sin6_addr.__in6_u);
auto ipv6addr = address_v6(ipv6base, ipv6socket->sin6_scope_id);
addresses.emplace_back(ipv6addr);
}
*/
}
return addresses;
}
int main()
{
auto addresses = getAddresses("www.google.de");
for (auto ipa : addresses)
cout << "Address: " << ipa << "\n";
return 0;
}

Got it working:
vector<address> getAddresses(string const &hostname)
{
struct addrinfo req = {.ai_family = AF_UNSPEC, .ai_socktype = SOCK_STREAM};
struct addrinfo *pai;
int error = getaddrinfo(hostname.c_str(), nullptr, &req, &pai);
if (error)
throw domain_error("Could not resolve host name.");
vector<address> addresses;
for(struct addrinfo *info = pai; info != nullptr; info = info->ai_next) {
if (info->ai_family == AF_INET) {
auto ipv4socket = reinterpret_cast<struct sockaddr_in*>(info->ai_addr);
auto ipv4addr = address_v4(htonl(ipv4socket->sin_addr.s_addr));
addresses.emplace_back(ipv4addr);
} else {
auto ipv6socket = reinterpret_cast<struct sockaddr_in6*>(info->ai_addr);
array<unsigned char, 16> bytes = {
ipv6socket->sin6_addr.s6_addr[0],
ipv6socket->sin6_addr.s6_addr[1],
ipv6socket->sin6_addr.s6_addr[2],
ipv6socket->sin6_addr.s6_addr[3],
ipv6socket->sin6_addr.s6_addr[4],
ipv6socket->sin6_addr.s6_addr[5],
ipv6socket->sin6_addr.s6_addr[6],
ipv6socket->sin6_addr.s6_addr[7],
ipv6socket->sin6_addr.s6_addr[8],
ipv6socket->sin6_addr.s6_addr[9],
ipv6socket->sin6_addr.s6_addr[10],
ipv6socket->sin6_addr.s6_addr[11],
ipv6socket->sin6_addr.s6_addr[12],
ipv6socket->sin6_addr.s6_addr[13],
ipv6socket->sin6_addr.s6_addr[14],
ipv6socket->sin6_addr.s6_addr[15]
};
auto ipv6addr = address_v6(bytes, ipv6socket->sin6_scope_id);
addresses.emplace_back(ipv6addr);
}
}
return addresses;
}
Next station: codereview

Related

`getnameinfo()` failing on WSL

I'm writing some cross-platform networking code, and have come across some inconsistent behavior in getnameinfo() on Windows and Linux (WSL).
The code below does the following:
Get an address using getaddrinfo().
Calls getnameinfo() on the address with:
NI_NAMEREQD set and not set.
NI_NUMERICHOST set and not set.
.
// INCLUDES
#if defined(PLATFORM_WINDOWS)
#include <winsock2.h>
#include <ws2tcpip.h>
#else
#include <cerrno>
#include <netdb.h>
#include <signal.h>
#include <sys/types.h>
#include <sys/socket.h>
#endif
#include <algorithm>
#include <cstring>
#include <functional>
#include <iostream>
#include <memory>
#include <string>
#include <vector>
// DEBUG UTILS
namespace debug
{
#if defined(PLATFORM_WINDOWS)
void die()
{
__debugbreak();
}
#else
void die()
{
raise(SIGTRAP);
}
#endif
void die_if(bool condition)
{
if (condition)
die();
}
} // debug
// NET CODE
enum class error_code
{
no_error,
host_not_found,
try_again,
out_of_memory,
buffer_overflow,
unrecoverable_error,
system_error,
};
char const* get_error_string(error_code ec)
{
switch (ec)
{
case error_code::no_error: return "no_error";
case error_code::host_not_found: return "host_not_found";
case error_code::try_again: return "try_again";
case error_code::out_of_memory: return "out_of_memory";
case error_code::buffer_overflow: return "buffer_overflow";
case error_code::unrecoverable_error: return "unrecoverable_error";
case error_code::system_error: return "system_error";
}
debug::die();
return nullptr;
}
namespace ip
{
enum class address_family
{
v4, v6, unspecified,
};
enum class protocol
{
tcp, udp,
};
} // ip
class platform_context
{
public:
#if defined (PLATFORM_WINDOWS)
platform_context()
{
auto data = WSADATA();
auto const result = WSAStartup(MAKEWORD(2, 2), &data);
debug::die_if(result != 0);
debug::die_if(LOBYTE(data.wVersion) != 2 || HIBYTE(data.wVersion) != 2);
}
~platform_context()
{
auto const result = WSACleanup();
debug::die_if(result != 0);
}
#else
platform_context() { }
~platform_context() { }
#endif
platform_context(platform_context const&) = delete;
platform_context operator=(platform_context const&) = delete;
platform_context(platform_context&&) = delete;
platform_context operator=(platform_context&&) = delete;
};
using addrinfo_ptr = std::unique_ptr<addrinfo, std::function<void(addrinfo*)>>;
int get_ai_family(ip::address_family family)
{
switch (family)
{
case ip::address_family::v4: return AF_INET;
case ip::address_family::v6: return AF_INET6;
case ip::address_family::unspecified: return AF_UNSPEC;
}
debug::die();
return AF_UNSPEC;
}
int get_ai_socktype(ip::protocol protocol)
{
switch (protocol)
{
case ip::protocol::tcp: return SOCK_STREAM;
case ip::protocol::udp: return SOCK_DGRAM;
}
debug::die();
return SOCK_STREAM;
}
int get_ai_protocol(ip::protocol protocol)
{
switch (protocol)
{
case ip::protocol::tcp: return IPPROTO_TCP;
case ip::protocol::udp: return IPPROTO_UDP;
}
debug::die();
return IPPROTO_TCP;
}
ip::address_family get_ip_address_family(int ai_family)
{
switch (ai_family)
{
case AF_INET: return ip::address_family::v4;
case AF_INET6: return ip::address_family::v6;
case AF_UNSPEC: return ip::address_family::unspecified;
}
debug::die();
return ip::address_family::unspecified;
}
struct end_point
{
explicit end_point(addrinfo const& info):
address_length(0),
address{ 0 }
{
debug::die_if(info.ai_addrlen < 0);
debug::die_if(info.ai_addrlen > sizeof(sockaddr_storage));
address_length = static_cast<std::size_t>(info.ai_addrlen);
std::memcpy(&address, info.ai_addr, address_length);
}
ip::address_family get_address_family() const
{
return get_ip_address_family(address.ss_family);
}
std::size_t address_length;
sockaddr_storage address;
};
std::vector<end_point> get_end_points(addrinfo_ptr const& info)
{
if (!info)
return {};
auto result = std::vector<end_point>();
auto ptr = info.get();
while (ptr)
{
result.emplace_back(*ptr);
ptr = ptr->ai_next;
}
return result;
}
addrinfo_ptr get_address(error_code&, char const* node, char const* service, ip::address_family family, ip::protocol protocol, int flags)
{
debug::die_if(!node && !service);
auto hints = addrinfo();
std::memset(&hints, 0, sizeof(hints));
hints.ai_family = get_ai_family(family);
hints.ai_socktype = get_ai_socktype(protocol);
hints.ai_protocol = get_ai_protocol(protocol);
hints.ai_flags = flags;
auto out = (addrinfo*) nullptr;
auto const result = ::getaddrinfo(node, service, &hints, &out);
// error handling ignored for this example
// (make sure you have internet for testing remote end points)
debug::die_if(result != 0);
debug::die_if(out == nullptr);
return addrinfo_ptr(out, std::bind(::freeaddrinfo, std::placeholders::_1));
}
std::vector<end_point> get_wildcard_address(error_code& ec, ip::address_family family, ip::protocol protocol)
{
return get_end_points(get_address(ec, nullptr, "0", family, protocol, AI_PASSIVE));
}
std::vector<end_point> get_loopback_address(error_code& ec, ip::address_family family, ip::protocol protocol)
{
return get_end_points(get_address(ec, nullptr, "0", family, protocol, 0));
}
std::vector<end_point> get_address(error_code& ec, std::string const& node, std::string const& service, ip::address_family family, ip::protocol protocol)
{
return get_end_points(get_address(ec, node.c_str(), service.c_str(), family, protocol, 0));
};
enum class name_type
{
numeric,
name,
};
#if defined(PLATFORM_WINDOWS)
error_code get_getnameinfo_error(int result)
{
debug::die_if(result == 0);
auto const error = WSAGetLastError();
debug::die_if(error == WSANOTINITIALISED);
debug::die_if(error == WSAEAFNOSUPPORT);
debug::die_if(error == WSAEINVAL);
debug::die_if(error == WSAEFAULT);
switch (error)
{
case WSAHOST_NOT_FOUND: return error_code::host_not_found;
case WSATRY_AGAIN: return error_code::try_again;
case WSA_NOT_ENOUGH_MEMORY: return error_code::out_of_memory;
case WSANO_RECOVERY: return error_code::unrecoverable_error;
}
debug::die();
return error_code::no_error;
}
std::size_t get_cstr_len(char const* string, std::size_t max)
{
return strnlen_s(string, max);
}
#else
error_code get_getnameinfo_error(int result)
{
debug::die_if(result == 0);
auto const error = result;
debug::die_if(error == EAI_FAMILY);
debug::die_if(error == EAI_BADFLAGS);
switch (error)
{
case EAI_NONAME: return error_code::host_not_found;
case EAI_AGAIN: return error_code::try_again;
case EAI_MEMORY: return error_code::out_of_memory;
case EAI_OVERFLOW: return error_code::buffer_overflow;
case EAI_FAIL: return error_code::unrecoverable_error;
case EAI_SYSTEM: return error_code::system_error;
}
debug::die();
return error_code::no_error;
}
std::size_t get_cstr_len(char const* string, std::size_t max)
{
return strnlen(string, max);
}
#endif
//////////////
bool get_node_name(error_code& ec, std::string& node, name_type node_type, end_point const& end_point, bool require_name)
{
auto const numeric_flag = (node_type == name_type::numeric ? NI_NUMERICHOST : 0);
auto const require_flag = (require_name ? NI_NAMEREQD : 0);
char node_buffer[NI_MAXHOST] = { 0 };
auto const result = ::getnameinfo((sockaddr const*)&end_point.address, (socklen_t)end_point.address_length, node_buffer, NI_MAXHOST, nullptr, 0, numeric_flag | require_flag);
if (result != 0)
{
ec = get_getnameinfo_error(result);
return false;
}
node.resize(get_cstr_len(node_buffer, NI_MAXHOST));
std::copy_n(node_buffer, node.size(), node.begin());
return true;
}
//////////////
// TEST CODE
void test_get_node_name(end_point const& e, name_type node_type, bool name_required)
{
auto ec = error_code::no_error;
auto node = std::string();
auto result = get_node_name(ec, node, node_type, e, name_required);
std::cout << "\t"
<< (name_required ? "required - " : "not required - ")
<< (node_type == name_type::numeric ? "numeric - " : "");
if (result)
std::cout << "success (node name: '" << node << "')";
else
std::cout << "failed! (error: " << get_error_string(ec) << ")";
std::cout << "\n";
}
int main()
{
platform_context context;
std::cout << "wildcard address:" << std::endl;
{
auto ec = error_code::no_error;
auto end_points = get_wildcard_address(ec, ip::address_family::unspecified, ip::protocol::tcp);
debug::die_if(end_points.empty());
test_get_node_name(end_points.front(), name_type::name, true);
test_get_node_name(end_points.front(), name_type::name, false);
test_get_node_name(end_points.front(), name_type::numeric, true);
test_get_node_name(end_points.front(), name_type::numeric, false);
}
std::cout << "loopback address:" << std::endl;
{
auto ec = error_code::no_error;
auto end_points = get_loopback_address(ec, ip::address_family::unspecified, ip::protocol::tcp);
debug::die_if(end_points.empty());
test_get_node_name(end_points.front(), name_type::name, true);
test_get_node_name(end_points.front(), name_type::name, false);
test_get_node_name(end_points.front(), name_type::numeric, true);
test_get_node_name(end_points.front(), name_type::numeric, false);
}
std::cout << "remote address:" << std::endl;
{
auto ec = error_code::no_error;
auto end_points = get_address(ec, "www.google.com", "443", ip::address_family::unspecified, ip::protocol::tcp);
debug::die_if(end_points.empty());
test_get_node_name(end_points.front(), name_type::name, true);
test_get_node_name(end_points.front(), name_type::name, false);
test_get_node_name(end_points.front(), name_type::numeric, true);
test_get_node_name(end_points.front(), name_type::numeric, false);
}
}
This can be compiled with cl main.cpp /DPLATFORM_WINDOWS /nologo /EHsc /W4 /WX ws2_32.lib on Windows, and g++ -Wall -Werror -std=c++17 -o main main.cpp on WSL.
I get the following output on my system:
Windows:
wildcard address:
required - success (node name: 'ComputerName')
not required - success (node name: 'ComputerName')
required - numeric - success (node name: '::')
not required - numeric - success (node name: '::')
loopback address:
required - success (node name: 'ComputerName')
not required - success (node name: 'ComputerName')
required - numeric - success (node name: '::1')
not required - numeric - success (node name: '::1')
remote address:
required - success (node name: 'lhr25s12-in-f4.1e100.net')
not required - success (node name: 'lhr25s12-in-f4.1e100.net')
required - numeric - success (node name: '216.58.204.36')
not required - numeric - success (node name: '216.58.204.36')
WSL:
wildcard address:
required - failed! (error: host_not_found)
not required - success (node name: '0.0.0.0')
required - numeric - failed! (error: host_not_found)
not required - numeric - success (node name: '0.0.0.0')
loopback address:
required - success (node name: 'ip6-localhost')
not required - success (node name: 'ip6-localhost')
required - numeric - failed! (error: host_not_found)
not required - numeric - success (node name: '::1')
remote address:
required - success (node name: 'lhr25s12-in-x04.1e100.net')
not required - success (node name: 'lhr25s12-in-x04.1e100.net')
required - numeric - failed! (error: host_not_found)
not required - numeric - success (node name: '2a00:1450:4009:80d::2004')
So the getnameinfo() behavioral differences are:
Non-numeric wildcard addresses work on Windows, but fail on WSL.
Numeric address lookups fail on WSL when NI_NAMEREQD is set.
Are these differences simply alternative interpretations of the specs? Is it reasonable for the Windows version to return the ComputerName as the host name?
I'm not yet sure why the wildcard lookups fail, but after digging around in glibc, it seems that NI_NUMERICHOST and NI_NAMEREQD simply don't work together:
/* Convert AF_INET or AF_INET6 socket address, host part. */
static int
gni_host_inet (struct scratch_buffer *tmpbuf,
const struct sockaddr *sa, socklen_t addrlen,
char *host, socklen_t hostlen, int flags)
{
if (!(flags & NI_NUMERICHOST))
{
int result = gni_host_inet_name
(tmpbuf, sa, addrlen, host, hostlen, flags);
if (result != EAI_NONAME)
return result;
}
if (flags & NI_NAMEREQD)
return EAI_NONAME;
else
return gni_host_inet_numeric
(tmpbuf, sa, addrlen, host, hostlen, flags);
}

How to use the libuv to accept the tcp connection with multi-thread?

I write a C++ dome of tcp server with the libuv. When I check the cpu performance, I found the dome is a single thread running, how can I implement it with multi-thread?
Currently, the dome can hanlde 100,000+ tcp request per second, it can only eat 1 CPU.
Code:
#include <iostream>
#include <atomic>
#include "uv.h"
#include <thread>
#include <mutex>
#include <map>
using namespace std;
auto loop = uv_default_loop();
struct sockaddr_in addr;
typedef struct {
uv_write_t req;
uv_buf_t buf;
} write_req_t;
typedef struct {
uv_stream_t* client;
uv_alloc_cb alloc_cb;
uv_read_cb read_cb;
} begin_read_req;
void alloc_buffer(uv_handle_t *handle, size_t suggested_size, uv_buf_t *buf) {
buf->base = (char*)malloc(suggested_size);
buf->len = suggested_size;
}
void free_write_req(uv_write_t *req) {
write_req_t *wr = (write_req_t*)req;
free(wr->buf.base);
free(wr);
}
void echo_write(uv_write_t *req, int status) {
if (status) {
fprintf(stderr, "Write error %s\n", uv_strerror(status));
}
free_write_req(req);
}
void echo_read(uv_stream_t *client, ssize_t nread, const uv_buf_t *buf) {
if (nread > 0) {
auto req = (write_req_t*)malloc(sizeof(write_req_t));
auto *aaa = (char*)malloc(5);
aaa[0] = '+';
aaa[1] = 'O';
aaa[2] = 'K';
aaa[3] = '\r';
aaa[4] = '\n';
req->buf = uv_buf_init(aaa, 5);
uv_write((uv_write_t*)req, client, &req->buf, 1, echo_write);
}
if (nread < 0) {
if (nread != UV_EOF)
fprintf(stderr, "Read error %s\n", uv_err_name(static_cast<unsigned int>(nread)));
uv_close((uv_handle_t*)client, nullptr);
}
free(buf->base);
}
void acceptClientRead(uv_work_t *req) {
begin_read_req *data = (begin_read_req *)req->data;
uv_read_start(data->client, data->alloc_cb, data->read_cb);
}
void on_new_connection(uv_stream_t *server, int status) {
if (status < 0) {
cout << "New connection error:" << uv_strerror(status);
return;
}
uv_tcp_t *client = (uv_tcp_t *)malloc(sizeof(uv_tcp_t));
uv_tcp_init(loop, client);
uv_work_t *req = (uv_work_t *)malloc(sizeof(uv_work_t));
begin_read_req *read_req = (begin_read_req *)malloc(sizeof(begin_read_req));
read_req->client = (uv_stream_t *)client;
read_req->read_cb = echo_read;
read_req->alloc_cb = alloc_buffer;
req->data = read_req;
if (uv_accept(server, (uv_stream_t *)client) == 0) {
uv_read_start((uv_stream_t *)client, alloc_buffer, echo_read);
// uv_queue_work(workloop[0], req, acceptClientRead, nullptr);
}
else {
uv_close((uv_handle_t *)client, nullptr);
}
}
void timer_callback(uv_timer_t* handle) {
cout << std::this_thread::get_id() << "---------" << "hello" << endl;
}
int main() {
uv_tcp_t server{};
uv_tcp_init(loop, &server);
uv_ip4_addr("0.0.0.0", 8790, &addr);
uv_tcp_bind(&server, (const struct sockaddr *) &addr, 0);
uv_listen((uv_stream_t *)&server, 511, on_new_connection);
uv_run(loop, UV_RUN_DEFAULT);
return 0;
}
Of course, I can make the write step asynchronous in the method "echo_read", but I didn't do anything before the write, can I make the demo multi-thread in another way to improve the throughput?

Attempting to reference a deleted function inside xmemory0 on threaded sockets class

I'm trying to finish this threaded sockets class. This is what I've got so far:
#pragma once
#include <winsock2.h>
#include <ws2tcpip.h>
#include <stdio.h>
#include <atomic>
#include <thread>
#include <vector>
#pragma comment(lib, "Ws2_32.lib")
//---------------------------------------------------CLASS
class AsyncSockets
{
public:
int StartSockets(int, void(*)(SOCKET, sockaddr*, int, std::atomic<bool>&));
static const int FAIL_SUCCESS, FAIL_INIT_WS, FAIL_BIND_SOCKET, FAIL_ASYNCSELECT, FAIL_LISTEN, FAIL_ACCEPT;
bool StopSockets();
private:
/*static*/ void(*Functiane)(SOCKET, sockaddr*, int, std::atomic<bool>&);
struct RunThread {
std::thread Thread;
std::atomic<bool> Running = true;
};
void AcceptLoop();
SOCKET Socket; std::atomic<bool> running = false;
std::vector<AsyncSockets::RunThread>ThreadList;
void StartFunc(SOCKET, sockaddr*, int, int);
std::thread loopaccepter;
std::thread deleteInvalid;
};
//-------------------------------------STARTFUNC---PRIVATE
void AsyncSockets::StartFunc(SOCKET socketa, sockaddr* sockaddra, int sockaddrinfolena, int BoolIndex) {
if (socketa != INVALID_SOCKET) {
(*Functiane)(socketa, sockaddra, sockaddrinfolena, ThreadList[BoolIndex].Running);
}else{
closesocket(socketa); ThreadList[BoolIndex].Running = false; return;
}
closesocket(socketa);
ThreadList[BoolIndex].Running = false;
return;
}
//------------------------------------ACCEPTLOOP---PRIVATE
void AsyncSockets::AcceptLoop() {
while (true) {
struct sockaddr *AddressInfo = NULL; int addrinfoLen;
if (running) {
int temp = ThreadList.size();
for (int i = 0; i < temp; i++){
if (i != temp - 1) {
if (!ThreadList[i].Running){
if (ThreadList[i].Thread.joinable()) ThreadList[i].Thread.join();
ThreadList[i].Running = true; ThreadList[i].Thread = std::thread(&AsyncSockets::StartFunc, this, accept(Socket, AddressInfo, &addrinfoLen), AddressInfo, addrinfoLen, i); }
}else{
if (!ThreadList[i].Running) {
if (ThreadList[i].Thread.joinable()) ThreadList[i].Thread.join();
ThreadList[i].Running = true; ThreadList[i].Thread = std::thread(&AsyncSockets::StartFunc, this, accept(Socket, AddressInfo, &addrinfoLen), AddressInfo, addrinfoLen, i);}
else { ThreadList.push_back({ std::thread(&AsyncSockets::StartFunc, this, accept(Socket, AddressInfo, &addrinfoLen), AddressInfo, addrinfoLen, i + 1), true });}
}
}
}else return;
}
}
//-----------------------------------STARTSOCKETS---PUBLIC
int AsyncSockets::StartSockets(int port, void(*functian)(SOCKET, sockaddr*, int, std::atomic<bool>&)) {
//DeclaringShiet
int recvbuflen = 512;
//InitWinsock
WSADATA WsaDat;
int nResult = WSAStartup(MAKEWORD(2, 2), &WsaDat);
if (nResult != 0)
{
return FAIL_INIT_WS;
}
//BuildAddr
SOCKADDR_IN SockAddr;
SockAddr.sin_port = htons(port);
SockAddr.sin_family = AF_INET;
SockAddr.sin_addr.s_addr = htonl(INADDR_ANY);
//BindSocket
if (bind(Socket, (LPSOCKADDR)&SockAddr, sizeof(SockAddr)) == SOCKET_ERROR)
{
return FAIL_BIND_SOCKET;
}
//StartListening
if (listen(Socket, 10) == SOCKET_ERROR)
{
return FAIL_LISTEN;
}
//StartAccepting
running = true;
Functiane = (*functian);
loopaccepter = std::thread(&AsyncSockets::AcceptLoop, this);
}
//------------------------------------STOPSOCKETS---PUBLIC
bool AsyncSockets::StopSockets() {
running = false;
loopaccepter.join();
closesocket(Socket);
for (unsigned int i = 0; i < ThreadList.size(); i++)ThreadList[i].Running = false;
for (unsigned int i = 0; i < ThreadList.size(); i++)if(ThreadList[i].Thread.joinable())ThreadList[i].Thread.join();
return true;
}
//------------------------------------ERRORCONSTS---PUBLIC
#pragma region DeclaringErrorConsts
const int
AsyncSockets::FAIL_SUCCESS = 0x0,
AsyncSockets::FAIL_INIT_WS = 0x1,
AsyncSockets::FAIL_BIND_SOCKET = 0x2,
AsyncSockets::FAIL_LISTEN = 0x3,
AsyncSockets::FAIL_ACCEPT = 0x4;
#pragma endregion
So, as you may have noticed, I'm trying to do this with std::thread, vectors, and winsock. And the point is to have a function as an argument for StartSockets, that'll do all the sending, receiving, and processing.
Thread objects will be stored in a vector of the RunThread type, that contains an atomic bool that tells whether or not the thread is/should be running, as well as the thread object.
My problem is I'm getting an
'AsyncSockets::RunThread::RunThread(const AsyncSockets::RunThread &)': attempting to reference a deleted function
in
xmemory0 (line 840)
(c:\program files (x86)\microsoft visual
studio\2017\community\vc\tools\msvc\14.10.24728\include\xmemory0)
I have no idea why, and since RunThread is a struct and not a function, along with the fact that RunThread::RunThread doesn't make sense, I don't, at all, understand what the profanity is going on. In case it can help any further, I'm on VS 2017 RC on windows 10, win32 console application.
What am I doing wrong?
Thank you.

SDL_Net Socket error on creation

I'm trying to create a socket for my SDL server.
Problem is that I get an access violation crash because my socket called server is unable to open itself properly.
My class:
#pragma once
#include <SDL_net.h>
#include <thread>
#include <vector>
#include <string>
#include <iostream>
using namespace std;
const Uint16 SERVER_PORT = 1234;
const int TICKS_PER_SECOND = 1000;
const int REQUIRED_PLAYERS = 1;
class ServerTCP {
private:
//Thread data
thread *threadListen;
bool threadExit;
//Server data
IPaddress serverIP;
TCPsocket server;
vector <string> feedback;
//Client data
vector <TCPsocket> clients;
vector <string> events;
static void threadLoop(ServerTCP *self);
public:
ServerTCP();
~ServerTCP();
};
Source:
#include "ServerTCP.h"
ServerTCP::ServerTCP() {
printf("Starting server...\n");
if (SDLNet_ResolveHost(&serverIP, NULL, SERVER_PORT) == -1) {
printf("SDLNet_ResolveHost: %s\n", SDLNet_GetError());
}
server = SDLNet_TCP_Open(&serverIP);
if (!server) {
printf("SDLNet_TCP_Open: %s\n", SDLNet_GetError());
}
threadExit = false;
threadListen = new thread(&ServerTCP::threadLoop, this);
}
ServerTCP::~ServerTCP() {
printf("Shutting down server...\n");
threadExit = true;
threadListen->join();
for (int i = 0; i < clients.size(); i++) {
string warning = "Server has shut down, you was disconnected!\n";
SDLNet_TCP_Send(clients[i], warning.c_str(), warning.size());
SDLNet_TCP_Close(clients[i]);
}
SDLNet_TCP_Close(server);
}
void ServerTCP::threadLoop(ServerTCP *self) {
printf("Waiting for players...\n");
TCPsocket newClient;
//Run thread until orderered to stop
while (!self->threadExit) {
//Look for new clients
newClient = SDLNet_TCP_Accept(self->server);
if (newClient) {
self->clients.push_back(newClient);
string warning = "You have connected to the server!\n";
SDLNet_TCP_Send(newClient, warning.c_str(), warning.size());
printf("Player %i has connected!\n", self->clients.size());
}
if (self->clients.size() >= REQUIRED_PLAYERS) {
for (int i = 0; i < REQUIRED_PLAYERS; i++) {
string warning = "You found an opponent!\n";
SDLNet_TCP_Send(self->clients[i], warning.c_str(), warning.size());
SDLNet_TCP_Close(self->clients[i]);
}
}
}
}
Output:
Starting server...
SDLNet_TCP_Open: Couldn't create socket
Never mind, I forgot I had the SDLNet_Init function in my sub class i removed in the server file.

boost::asio over SocketCAN

I was thinking of making use of Boost Asio to read data from a Socket CAN.
There's nothing fancy going on in linux/can.h , and the device should
behave like the loopback interface, and be used with a raw socket.
Looking at the basic_raw_socket interface it seems that I can make use of
basic_raw_socket::assign to assign the native socket created with
socket( PF_CAN, SOCK_RAW, CAN_RAW );
This is what I have so far
namespace can {
class CanSocket {
public:
typedef boost::asio::ip::basic_endpoint<CanSocket> endpoint;
typedef boost::asio::ip::basic_resolver_query<CanSocket> resolver_query;
typedef boost::asio::ip::basic_resolver_iterator<CanSocket> resolver_iterator;
typedef boost::asio::basic_raw_socket<CanSocket> socket;
typedef boost::asio::ip::basic_resolver<CanSocket> resolver;
CanSocket()
: _protocol( CAN_RAW )
, _family( PF_CAN )
{
}
static CanSocket v4()
{
return CanSocket();
}
static CanSocket v6();
int type() const;
int protocol() const;
int family() const;
friend bool operator==(const CanSocket& p1, const CanSocket& p2)
{
return p1._protocol != p2._protocol || p1._family != p2._family;
}
friend bool operator!=(const CanSocket& p1, const CanSocket& p2)
{
return p1._protocol == p2._protocol || p1._family == p2._family;
}
private:
int _protocol;
int _family;
};
}
And this is how I use it in my application
boost::asio::io_service ioserv;
CanSocket::socket s( ioserv );
int sock = socket( PF_CAN, SOCK_RAW, CAN_RAW );
s.assign(CanSocket::v4(), sock);
struct ifreq ifr;
strcpy(ifr.ifr_name, "vcan0");
ioctl(sock, SIOCGIFINDEX, &ifr); /* ifr.ifr_ifindex gets filled
* with that device's index */
/* Select that CAN interface, and bind the socket to it. */
/* this should be the endpoint */
struct sockaddr_can addr;
addr.can_family = AF_CAN;
addr.can_ifindex = ifr.ifr_ifindex;
/* s.bind (....) */
bind( sock, (struct sockaddr*)&addr, sizeof(addr) );
What I don't quite get is how do I bind s to the local endpoint? There are no IPs or ports involved.
Is there anything else that should be implemented besides the endpoint to get it going?
Here is working example, assembled with help of this thread
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <string.h>
#include <iostream>
#include <net/if.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <sys/ioctl.h>
#include <linux/can.h>
#include <linux/can/raw.h>
#include <boost/asio.hpp>
#include <boost/bind.hpp>
void data_send(void) {
std::cout << "omg sent" << std::endl;
}
void data_rec(struct can_frame &rec_frame,
boost::asio::posix::basic_stream_descriptor<> &stream) {
std::cout << std::hex << rec_frame.can_id << " ";
for (int i = 0; i < rec_frame.can_dlc; i++) {
std::cout << std::hex << int(rec_frame.data[i]) << " ";
}
std::cout << std::dec << std::endl;
stream.async_read_some(
boost::asio::buffer(&rec_frame, sizeof(rec_frame)),
boost::bind(data_rec, boost::ref(rec_frame), boost::ref(stream)));
}
int main(void) {
struct sockaddr_can addr;
struct can_frame frame;
struct can_frame rec_frame;
struct ifreq ifr;
int natsock = socket(PF_CAN, SOCK_RAW, CAN_RAW);
strcpy(ifr.ifr_name, "vcan0");
ioctl(natsock, SIOCGIFINDEX, &ifr);
addr.can_family = AF_CAN;
addr.can_ifindex = ifr.ifr_ifindex;
if (bind(natsock, (struct sockaddr *)&addr, sizeof(addr)) < 0) {
perror("Error in socket bind");
return -2;
}
frame.can_id = 0x123;
frame.can_dlc = 2;
frame.data[0] = 0x11;
frame.data[1] = 0x23;
boost::asio::io_service ios;
boost::asio::posix::basic_stream_descriptor<> stream(ios);
stream.assign(natsock);
stream.async_write_some(boost::asio::buffer(&frame, sizeof(frame)),
boost::bind(data_send));
stream.async_read_some(
boost::asio::buffer(&rec_frame, sizeof(rec_frame)),
boost::bind(data_rec, boost::ref(rec_frame), boost::ref(stream)));
ios.run();
}
The solution is to use posix::stream_descriptor.
Just open the native socket, bind and then use posix::basic_stream_descriptor::assign.