I'm creating a server / client paradigm using UDP, but the Rust server is not receiving the C++ client messages. I have been able to successfully do Rust server / Rust client and C++ server / Rust client communication.
This leads me to believe that there is an issue with my C++ code, or there is some type of miscommunication when sending C++ buffers to Rust, but I have used code that I beleive works. This is only being sent from and to the same computer and has not been expanded to computer to computer.
I am no expert with UDP / TCP so I may be doing something incorrectly
Rust server:
use std::net::UdpSocket;
fn main() {
let udp: UdpSocket = UdpSocket::bind("0.0.0.0:12000")
.expect("Failed to bind to address for sending/receiving messages");
udp.connect("127.0.0.1:12683")
.expect("Failed to connect address receiving our messages");
//The below (recv_from) is set to blocking
let mut buf = [0; 20];
let (number_of_bytes, src_addr) = udp.recv_from(&mut buf).expect("Didn't receive data");
let filled_buf = &mut buf[..number_of_bytes];
println!("{:?}", filled_buf);
}
C++ client:
boost::asio::io_service io_service;
ip::udp::socket socket( io_service );
ip::udp::endpoint remote_endpoint;
std::cout << "sending reply..." << std::endl;
socket.open( ip::udp::v4() );
remote_endpoint = ip::udp::endpoint( ip::address::from_string( "127.0.0.1" ), 12000 );
unsigned char buff[8]{ 5,5,5,5,5,5,5,5 };
boost::system::error_code err;
//auto sent = socket.send_to( buffer( "Jane Doe"), remote_endpoint, 0, err );
auto sent = socket.send_to( buffer( buff ), remote_endpoint, 0, err );
std::cout << err << std::endl;
std::cout << "Sent: " << sent << std::endl;
socket.close();
The C++ client states that the data was sent (sent variable) and there is no err (err variable). However, my Rust server never receives the data. It is set to non-blocking so it just sits there waiting to receive data (its looking at port 12000 while the client is sending to port 12000).
When you connect a UDP socket, that causes the UDP socket to only receive datagrams from the address it is connected to. Servers should not connect their UDP sockets.
Related
I need some help with a socket program with multiple clients and one server. To simplify, I create
3 socket clients
1 socket server
For each client, it opens a new connection for sending a new message and closes the connection after a response is received.
For the server, it does not need to deal with connections concurrently, it can deal with the message one by one
here is my code (runnable), compile it with /usr/bin/g++ mycode.cpp -g -lpthread -lrt -Wall -o mycode
#include <iostream>
#include <arpa/inet.h>
#include <string.h>
#include <sys/socket.h>
#include <unistd.h>
#include <unordered_map>
#include <thread>
using namespace std;
void Warning(string msg) { std::cout<< msg << std::endl; }
namespace mySocket {
class Memcached {
public:
// start a server
static void controller(int port=7111) { std::thread (server, port).detach(); }
// open a new connection to send a message:
// 1. open a connection
// 2. send the message
// 3. read the message
// 4. close the connection
std::string sendMessage(string msg, string host, int port=7111) {
int sock = 0, client_fd;
struct sockaddr_in serv_addr;
char buffer[1024] = { 0 };
if ((sock = socket(AF_INET, SOCK_STREAM, 0)) < 0) {
std::cout << "Socket creation error, msg: " << msg << ", host: " << host << ", port: " << port << std::endl;
exit(1);
}
serv_addr.sin_family = AF_INET;
serv_addr.sin_port = htons(port);
if (inet_pton(AF_INET, host.c_str(), &serv_addr.sin_addr) <= 0) {
std::cout << "\nInvalid address/ Address not supported, kmsgey: " << msg << ", host: " << host << ", port: " << port << std::endl;
exit(1);
}
while ((client_fd = connect(sock, (struct sockaddr*)&serv_addr, sizeof(serv_addr))) < 0) { sleep(10*1000); }
std::cout << "client sends a message:"<<msg<<", msg size:"<<msg.size()<<std::endl;
send(sock, msg.c_str(), msg.size(), 0);
read(sock, buffer, 1024);
close(client_fd);
return std::string(buffer, strlen(buffer));
}
private:
// start a server
// 1. open a file descriptor
// 2. listen the fd with queue size 10
// 3. accept one connection at a time
// 4. deal with message in the connection
// 5. accept the next connection
// 6. repeat step 3
static void server(int port) {
int server_fd, new_socket;
struct sockaddr_in address;
int opt = 1;
int addrlen = sizeof(address);
char buffer[1024] = { 0 };
unordered_map<string,string> data;
if ((server_fd = socket(AF_INET, SOCK_STREAM, 0)) == 0) {
Warning("socket failed"); exit(1);
}
if (setsockopt(server_fd, SOL_SOCKET, SO_REUSEADDR | SO_REUSEPORT, &opt, sizeof(opt))) {
Warning("setsockopt failed"); exit(1);
}
address.sin_family = AF_INET;
address.sin_addr.s_addr = INADDR_ANY;
address.sin_port = htons(port);
if (bind(server_fd, (struct sockaddr*)&address, sizeof(address)) < 0) {
Warning("bind failed"); exit(1);
}
// the queue size is 10 > 3
if (listen(server_fd, 10) < 0) {
Warning("listen failed"); exit(1);
}
while(1)
{
if ((new_socket = accept(server_fd, (struct sockaddr*)&address, (socklen_t*)&addrlen)) < 0) {
std::cout << "accept failed"; exit(1);
}
memset(&buffer, 0, sizeof(buffer)); //clear the buffer
read(new_socket, buffer, 1024);
std::string msg = std::string(buffer, strlen(buffer));
if (msg.size()==0) {
std::cout<<"I can't believe it"<<std::endl;
}
std::cout<<"received msg from the client:"<<msg<<",msg size:"<<msg.size()<<std::endl;
std::string results="response from the server:["+msg+"]";
send(new_socket, results.c_str(), results.length(), 0);
//usleep(10*1000);
}
if (close(new_socket)<0){
std::cout <<"close error"<<std::endl;
}
shutdown(server_fd, SHUT_RDWR);
}
} ;
}
void operation(int client_id) {
auto obj = new mySocket::Memcached();
for (int i=0; i<10;i++){
int id=client_id*100+i;
std::cout<<obj->sendMessage(std::to_string(id), "127.0.0.1", 7111)<<std::endl<<std::endl;
}
}
int main(int argc, char const* argv[]) {
// start a socket server
mySocket::Memcached::controller();
// start 3 socket clients
std::thread t1(operation, 1);
std::thread t2(operation, 2);
std::thread t3(operation, 3);
t1.join();
t2.join();
t3.join();
}
In the code above, the client always sends a message with a length of 3. However, the server can receive messages with a length of 0 which causes further errors.
I'm struggling with this for several days and can't figure out why it happens. I noticed
if I add a short sleep inside the server while loop, the problem is solved. (uncomment usleep(10*1000); in the code).
or if I only use one client, the problem is also solved.
Any thought helps.
You are using TCP sockets. You may want to use some application-level protocol like HTTP, websockets instead, that will be much easier, because you will not need to worry about how message is sent/received and in which sequence. If you have to stick with TCP sockets, you firstly have to understand few things:
There's two types of TCP sockets you can use: non-blocking and blocking IO (input/output). You are currently using blocking IO. That IO will be sometimes blocked and you won't be able to do anything with sockets. In blocking IO, it can be work arounded by using one socket per thread on server-side. It's not efficient, but it's relatively easy comparing to Non-blocking IO. Non-blocking IO doesn't wait for anything. While in blocking IO you wait for data, in non-blocking IO you create something like events, callbacks, that are used when there's some data. You probably have to read about these types of IO.
In your server function, would be better, if you listen for incoming connections in one thread, and when there's incoming connection, move this connection into another thread and function, that will handle other things. This may solve your problem related to multiple clients at the same time.
In function operation, instead of allocating memory using raw pointer, use static allocation or smart pointers to avoid memory leaks. If you don't want to, then at least, do delete obj; in the end of function.
And the last one thing. You can use some TCP socket wrapper like sockpp to make things a lot easier. You will have anything TCP sockets have, but in C++ style and a little bit easier to understand and maintain. If you can't use application-level protocol, I strongly suggest you to use some wrapper at least.
Update
As was stated by commenters, there are more things you need to know:
TCP sockets are streams. This means that if you send your message with length of 1024 bytes, it can be divided into several TCP data packets and you can't know if it will be divided or not, how much packets other side will receive etc. You have to read in a while loop using recv() and wait for data. There's some tricks which can help you to properly receive data:
You can send length of your message first, so other side will know how much bytes it needs to receive.
You can place some terminating symbol or sequence of terminating symbols in the end of your message and read until these will be received. This can be a little risky, because there's chance that you would not receive these symbols at all and will be reading next.
You have to join client threads only when you know, that server is already started and listening for incoming connections. You can use some variable as a flag for these purposes, but make note, that you have to pay a lot of attention, when reading/writing variable from two or more different threads. For these purposes, you can use mutexes, which are some mechanism that will allow you safely access one variable from several threads.
I have one machine running simultaniously some C++ application and a Node.js server.
Use-case:
I want to be able to trigger my C++ application and make it pass some data (lets say a string) into a socket file. Then my Node.js server shall fetch that data from the socket and print it on some web page via a TCP-port (Code not included here/yet). The same should happen the other way around.
What I've done so far:
I was able to write some strings from my Node.js server into to the socket file with the following code:
server.js
var net = require('net');
var fs = require('fs');
var socketPath = '/tmp/sock';
fs.stat(socketPath, function(err) {
if (!err) fs.unlinkSync(socketPath);
var unixServer = net.createServer(function(localSerialConnection) {
localSerialConnection.on('data', function(data) {
// data is a buffer from the socket
console.log('Something happened!');
});
// write to socket with localSerialConnection.write()
localSerialConnection.write('HELLO\n');
localSerialConnection.write('I\'m\n');
localSerialConnection.write('DOING something!\n');
localSerialConnection.write('with the SOCKS\n');
});
unixServer.listen(socketPath);
});
reading the content with nc -U /tmp/sock and with the following output https://i.stack.imgur.com/ye2Dx.png.
When I run my C++ code:
cpp_socket.cpp
#include <boost/asio.hpp>
#include <iostream>
int main() {
using boost::asio::local::stream_protocol;
boost::system::error_code ec;
::unlink("/tmp/sock"); // Remove previous binding.
boost::asio::io_service service;
stream_protocol::endpoint ep("/tmp/sock");
stream_protocol::socket s(service);
std::cout << "passed setup section" << std::endl;
s.connect(ep);
std::cout << "passed connection" << std::endl;
std::string message = "Hello from C++!";
std::cout << "before sending" << std::endl;
boost::asio::write(s, boost::asio::buffer(message), boost::asio::transfer_all());
/* s.write_some(boost::asio::buffer("hello world!"), ec); */
std::cout << "after sending" << std::endl;
I get the following output:
/cpp_socket
passed setup section
terminate called after throwing an instance of 'boost::wrapexcept<boost::system::system_error>'
what(): connect: No such file or directory
Aborted (core dumped)
Even though the /tmp/sock file still exists.
When I remove ::unlink("/tmp/sock"); // Remove previous binding. with comments it runs through, but my Node.js server stops running and nc -U /tmp/sock looses its connection.
Neither the .write() nor the .write_some() function seems to work.
I assume that I miss something trivial or I'm not following basic concepts of unix socket communication.
Questions:
Is it even possible to listen with one Node.js server application to a TCP-port and a UNIX-socket at the same time?
Am I understanding the concept of unix socket communication correctly, judging from my input?
How can I read or write from C++ from/into a socket, preferably with C++ boost/asio library. But not necessarily necessary :-)
Am I asking the right questions?
As you might see, I'm not too experienced with these subjects. If I haven't addressed my issues accordingly and not precisely enough,it's due to my lack of experience.
Thanks a lot in advance. Lets have a fruitful discussion.
Oh oops. The error was in plain sight:
::unlink("/tmp/sock"); // Remove previous binding.
Removes the socket. That's not good if you wanted to connect to it.
Removing that line made it work:
passed setup section
passed connection: Success
before sending
after sending
And on the listener side:
Which is, I guess, to be expected because the client isn't complete yet.
Disclaimer:
I made it work with TCP sockets, but I would like to see how its possible with unix sockets. One more open port could lead to potential security threats (correct me if I'm wrong). So if you (sehe) or someone knows how to achieve this, please feel free to share. Since I wasn't able to find this in my searches over the internet, it could be helpful for others, too.
What I did now:
Creating a NodeJS server which is listening to two ports. One port for the web-browser and one for the C++ application
Connect the C++ application with one port
Sending strings using telnet
server.js
const net = require('net');
const express = require('express');
const app = express();
const c_port = 6666;
const si_port = 8888;
//------------- From here Browser stream is handled -------------//
app.get('/', (req, res)=>{
res.send('Hello from Node!');
});
app.get('/index.html', (req, res) => {
res.sendFile(__dirname + "/" + "index.html");
});
app.listen(si_port,(req, res)=>{
console.log(`Listening on http://localhost:${si_port}`);
});
//------------- From here C++ stream is handled -------------//
var server = net.createServer(function(c) { //'connection' listener
console.log('client connected');
c.on('end', function() {
console.log('client disconnected');
});
c.write('hello\r\n');
c.on('data', function(data){
var read = data.toString();
console.log(read);
// var message = c.read();
// console.log(message);
})
// c.pipe(c);
c.write('Hello back to C++'); // But only if you shut down the server
});
server.listen(c_port, function() { //'listening' listener
console.log(`Listening for input from C++ application on port:${c_port}`);
});
client.cpp
#include <iostream>
#include <boost/asio.hpp>
int main(int argc, char* argv[])
{
if(argc != 4){
std::cout<<"Wrong parameter\n"<<"Example usage ./client 127.0.0.1 1234 hello"<<std::endl;
return -1;
}
auto const address = boost::asio::ip::make_address(argv[1]);
auto const port = std::atoi(argv[2]);
std::string msg = argv[3];
msg = msg + '\n';
boost::asio::io_service io_service;
//socket creation
boost::asio::ip::tcp::socket socket(io_service);
//connection
boost::system::error_code ec;
socket.connect( boost::asio::ip::tcp::endpoint( address, port ),ec);
if(ec){std::cout<<ec.message()<<std::endl; return 1;}
// request/message from client
//const string msg = "Hello from Client!\n";
boost::system::error_code error;
boost::asio::write( socket, boost::asio::buffer(msg), error );
if(error){
std::cout << "send failed: " << error.message() << std::endl;
}
// getting response from server
boost::asio::streambuf receive_buffer;
boost::asio::read(socket, receive_buffer, boost::asio::transfer_all(), error);
if( error && error != boost::asio::error::eof ){
std::cout << "receive failed: " << error.message() << std::endl;
}
else{
const char* data = boost::asio::buffer_cast<const char*>(receive_buffer.data());
std::cout << data << std::endl;
}
return 0;
}
With telnet localhost 6666 I can easily on that port and send random strings.
Executing my binary with additional arguments and a string I was able to send some data from my C++: ./clientcpp 127.0.0.1 6666 "HELLO from C++". And here is the output:
Thanks a lot again.
I have one udp server receiving messages from multiple remote clients. When it receives one message, I copy the endpoint and reply to the client at the same IP address on port 5000 where each client is listening.
I have tried multiple debbuging strategies, and printing the endpoint right before I send the reply message gives me the correct IP address and port.
The sender:
std::cout << udp_remote_endpoint.address().to_string();
std::string str(packet.begin(), packet.end());
std::cout << str << std::endl;
io_service.post(
[this, packet]()
{
udp_socket.async_send_to(
boost::asio::buffer(packet),
udp_remote_endpoint,
boost::bind(
&uds::handle_write,
this,
boost::asio::placeholders::error,
boost::asio::placeholders::bytes_transferred
)
);
}
);
On the receiver, I get the udp_remote_endpoint and before sending, I set the socket endpoint:
new_addr.endpoint = socket.get_udp_remote_endpoint();
new_addr.endpoint.port(5000);
socket.set_udp_remote_endpoint(new_addr.endpoint);
For example, this output:
192.168.1.131K-131-1559147491761155
Is actually sending to the IP 192.168.1.130. The message contents are correct "K-131-1559147491761155"
Solved!
I removed the io_service.post and it worked!
`std::cout << udp_remote_endpoint.address().to_string();
std::string str(packet.begin(), packet.end());
std::cout << str << std::endl;
//io_service.post(
// [this, packet]()
// {
udp_socket.async_send_to(
boost::asio::buffer(packet),
udp_remote_endpoint,
boost::bind(
&uds::handle_write,
this,
boost::asio::placeholders::error,
boost::asio::placeholders::bytes_transferred
)
);
// }
//);`
I am learning multicast programming with socket.h and boost::asio. I am reviewing this link here, and they offer the following code using socket.h to implement a multicast server.
#include <sys/types.h>
#include <sys/socket.h>
#include <arpa/inet.h>
#include <netinet/in.h>
#include <stdio.h>
#include <stdlib.h>
struct in_addr localInterface;
struct sockaddr_in groupSock;
int sd;
char databuf[1024] = "Multicast test message lol!";
int datalen = sizeof(databuf);
int main (int argc, char *argv[ ])
{
/* Create a datagram socket on which to send. */
sd = socket(AF_INET, SOCK_DGRAM, 0);
if(sd < 0)
{
perror("Opening datagram socket error");
exit(1);
}
else
printf("Opening the datagram socket...OK.\n");
/* Initialize the group sockaddr structure with a */
/* group address of 225.1.1.1 and port 5555. */
memset((char *) &groupSock, 0, sizeof(groupSock));
groupSock.sin_family = AF_INET;
groupSock.sin_addr.s_addr = inet_addr("226.1.1.1");
groupSock.sin_port = htons(4321);
/* Disable loopback so you do not receive your own datagrams.
{
char loopch = 0;
if(setsockopt(sd, IPPROTO_IP, IP_MULTICAST_LOOP, (char *)&loopch, sizeof(loopch)) < 0)
{
perror("Setting IP_MULTICAST_LOOP error");
close(sd);
exit(1);
}
else
printf("Disabling the loopback...OK.\n");
}
*/
/* Set local interface for outbound multicast datagrams. */
/* The IP address specified must be associated with a local, */
/* multicast capable interface. */
localInterface.s_addr = inet_addr("203.106.93.94");
if(setsockopt(sd, IPPROTO_IP, IP_MULTICAST_IF, (char *)&localInterface, sizeof(localInterface)) < 0)
{
perror("Setting local interface error");
exit(1);
}
else
printf("Setting the local interface...OK\n");
/* Send a message to the multicast group specified by the*/
/* groupSock sockaddr structure. */
/*int datalen = 1024;*/
if(sendto(sd, databuf, datalen, 0, (struct sockaddr*)&groupSock, sizeof(groupSock)) < 0)
{perror("Sending datagram message error");}
else
printf("Sending datagram message...OK\n");
/* Try the re-read from the socket if the loopback is not disable
if(read(sd, databuf, datalen) < 0)
{
perror("Reading datagram message error\n");
close(sd);
exit(1);
}
else
{
printf("Reading datagram message from client...OK\n");
printf("The message is: %s\n", databuf);
}
*/
return 0;
}
I am also reviewing an example of how to implement a multicast server using boost::asio here, and they present the following code.
//
// sender.cpp
// ~~~~~~~~~~
//
// Copyright (c) 2003-2010 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#include <iostream>
#include <sstream>
#include <string>
#include <boost/asio.hpp>
#include "boost/bind.hpp"
#include "boost/date_time/posix_time/posix_time_types.hpp"
const short multicast_port = 30001;
const int max_message_count = 10;
class sender
{
public:
sender(boost::asio::io_service& io_service,
const boost::asio::ip::address& multicast_address)
: endpoint_(multicast_address, multicast_port),
socket_(io_service, endpoint_.protocol()),
timer_(io_service),
message_count_(0)
{
std::ostringstream os;
os << "Message " << message_count_++;
message_ = os.str();
socket_.async_send_to(
boost::asio::buffer(message_), endpoint_,
boost::bind(&sender::handle_send_to, this,
boost::asio::placeholders::error));
}
void handle_send_to(const boost::system::error_code& error)
{
if (!error && message_count_ < max_message_count)
{
timer_.expires_from_now(boost::posix_time::seconds(1));
timer_.async_wait(
boost::bind(&sender::handle_timeout, this,
boost::asio::placeholders::error));
}
}
void handle_timeout(const boost::system::error_code& error)
{
if (!error)
{
std::ostringstream os;
os << "Message " << message_count_++;
message_ = os.str();
socket_.async_send_to(
boost::asio::buffer(message_), endpoint_,
boost::bind(&sender::handle_send_to, this,
boost::asio::placeholders::error));
}
}
private:
boost::asio::ip::udp::endpoint endpoint_;
boost::asio::ip::udp::socket socket_;
boost::asio::deadline_timer timer_;
int message_count_;
std::string message_;
};
int main(int argc, char* argv[])
{
try
{
if (argc != 2)
{
std::cerr << "Usage: sender <multicast_address>\n";
std::cerr << " For IPv4, try:\n";
std::cerr << " sender 239.255.0.1\n";
std::cerr << " For IPv6, try:\n";
std::cerr << " sender ff31::8000:1234\n";
return 1;
}
boost::asio::io_service io_service;
sender s(io_service, boost::asio::ip::address::from_string(argv[1]));
io_service.run();
}
catch (std::exception& e)
{
std::cerr << "Exception: " << e.what() << "\n";
}
return 0;
}
I noticed the example using socket.h defines both a local interface and multicast addresses. However, the example using boost::asio only defines a multicast address. I will not include the code for the sake of brevity, but I noticed the code to implement a multicast receiver with both socket.h and boost::asio define both local interface and multicast addresses. But why do I not need to define a local interface address using boost::asio to implement a multicast server? Also, is boost::asio or socket.h faster if I want to send and receive multicast messages every few milliseconds?
When using a multicast, one only needs to set the IP_MULTICAST_IF option when datagrams should egress a specific interface. Boost.Asio provides this option with ip::multicast::outbound_interface. When this option is not used, multicast transmissions are sent from the default interface, and the kernel may perform routing and forwarding through other interfaces. For instance, consider the case where a server has two NIC cards, connecting it to a LAN and a WAN. If the WAN is the default interface and multicast datagrams are to be sent to the LAN, then for a given socket, one could use the socket option to specify the outbound interface as the LAN.
Often times, the sender rarely cares about the exact endpoint (address and port) to which the socket binds. In both sender examples, the sender creates a socket, and defers to the kernel to bind to an endpoint. In the first example, the multicast messages sent from the local socket will egress the interface that has been assigned 203.106.93.94 address.
On the other hand, the receiver often cares about binding to a specific port. The receiver will bind the local socket to any appropriate address or defer to the kernel, and bind to the port matching the multicast endpoint's port. Once bound, a receiver will then have the socket join the multicast group, at which point the socket can begin receiving multicast datagrams. Note that for a given system, if multiple applications are interested in receiving the multicast datagram, then one should use the reuse_address socket option.
Using the Boost.Asio examples as a reference, if one launches the sender with ./sender 239.255.0.1 and multiple receivers with ./receiver 0.0.0.0 239.255.0.1, then the following sockets and binds occur:
.----------.
.----------.|
.--------. address: any address: any .----------.||
| | port: any / \ port: 30001 | |||
| sender |-( ----------->| address: 239.255.0.1 |----------> )-| receiver ||'
| | \ port: 30001 / | |'
'--------' '----------'
The sender binds to any address and port. For instance, lets say the kernel binds it to port 24000.
The receiver binds to any address and port 30001 and joins the 239.255.0.1 multicast group.
The sender writes messages to 239.255.0.1:30001.
The receiver receives messages sent to 239.255.0.1:30001. The receiver's receive_from() operation's sender_endpoint argument will be populated with the sender's endpoint address and port 24000.
As far as performance goes, profiling the application would provide a definitive answer. The examples provided in the question are very different (synchronous vs. asynchronous), so directly comparing the two to determine which is faster may not be appropriate. In general, Boost.Asio will provide some overhead due to its abstractions. However, I have yet to work on an application where Boost.Asio's overhead was the problem, and its abstractions have saved me countless development and maintenance man-hours.
I apologize in advance if the question has been previously answered, but I've searched and found nothing that helps me. As indicated by the question's title, I'm trying to broadcast a package from a server to a set of clients listening for any message.
The client will count the number of messages it receives during one second.
The server side of things goes like this:
class Server
{
public:
Server(boost::asio::io_service& io)
: socket(io, udp::endpoint(udp::v4(), 8888))
, broadcastEndpoint(address_v4::broadcast(), 8888)
, tickHandler(boost::bind(&Server::Tick, this, boost::asio::placeholders::error))
, timer(io, boost::posix_time::milliseconds(20))
{
socket.set_option(boost::asio::socket_base::reuse_address(true));
socket.set_option(boost::asio::socket_base::broadcast(true));
timer.async_wait(tickHandler);
}
private:
void Tick(const boost::system::error_code&)
{
socket.send_to(boost::asio::buffer(buffer), broadcastEndpoint);
timer.expires_at(timer.expires_at() + boost::posix_time::milliseconds(20));
timer.async_wait(tickHandler);
}
private:
udp::socket socket;
udp::endpoint broadcastEndpoint;
boost::function<void(const boost::system::error_code&)> tickHandler;
boost::asio::deadline_timer timer;
boost::array<char, 100> buffer;
};
It is initialized and run in the following way:
int main()
{
try
{
boost::asio::io_service io;
Server server(io);
io.run();
}
catch (const std::exception& e)
{
std::cerr << e.what() << "\n";
}
return 0;
}
This (apparently) works fine. Now comes the client...
void HandleReceive(const boost::system::error_code&, std::size_t bytes)
{
std::cout << "Got " << bytes << " bytes\n";
}
int main(int argc, char* argv[])
{
if (argc != 2)
{
std::cerr << "Usage: " << argv[0] << " <host>\n";
return 1;
}
try
{
boost::asio::io_service io;
udp::resolver resolver(io);
udp::resolver::query query(udp::v4(), argv[1], "1666");
udp::endpoint serverEndpoint = *resolver.resolve(query);
//std::cout << serverEndpoint.address() << "\n";
udp::socket socket(io);
socket.open(udp::v4());
socket.bind(serverEndpoint);
udp::endpoint senderEndpoint;
boost::array<char, 300> buffer;
auto counter = 0;
auto start = std::chrono::system_clock::now();
while (true)
{
socket.receive_from(boost::asio::buffer(buffer), senderEndpoint);
++counter;
auto current = std::chrono::system_clock::now();
if (current - start >= std::chrono::seconds(1))
{
std::cout << counter << "\n";
counter = 0;
start = current;
}
}
}
catch (const std::exception& e)
{
std::cerr << e.what() << "\n";
}
This works when running both the server and client on the same machine, but doesn't when I run the server on a machine different from that of where I run the client.
First thing is, it seems odd to me that I have to resolve the server's address. Perhaps I don't know how broadcasting really works, but I thought the server would send a message using its socket with the broadcast option turned on, and it would arrive to all the sockets in the same network.
I read you should bind the client's socket to the address_v4::any() address. I did, it doesn't work (says something about a socket already using the address/port).
Thanks in advance.
PS: I'm under Windows 8.
I am a bit surprised this works on the same machine. I would not have expected the client, listening to port 1666, to receive data being sent to the broadcast address on port 8888.
bind() assigns a local endpoint (composed of a local address and port) to the socket. When a socket binds to an endpoint, it specifies that the socket will only receive data sent to the bound address and port. It is often advised to bind to address_v4::any(), as this will use all available interfaces for listening. In the case of a system with multiple interfaces (possible multiple NIC cards), binding to a specific interface address will result in the socket only listening to data received from the specified interface[1]. Thus, one might find themselves obtaining an address through resolve() when the application wants to bind to a specific network interface and wants to support resolving it by providing the IP directly (127.0.0.1) or a name (localhost).
It is important to note that when binding to a socket, the endpoint is composed of both an address and port. This is the source of my surprise that it works on the same machine. If the server is writing to broadcast:8888, a socket bound to port 1666 should not receive the datagram. Nevertheless, here is a visual of the endpoints and networking:
.--------.
.--------.|
.--------. address: any address: any .--------.||
| | port: any / \ port: 8888 | |||
| server |-( ----------->| address: broadcast |----------> )-| client ||'
| | \ port: 8888 / | |'
'--------' '--------'
The server binds to any address and any port, enables the broadcast option, and sends data to the remote endpoint (broadcast:8888). Clients bound to the any address on port 8888 should receive the data.
A simple example is as follows.
The server:
#include <boost/asio.hpp>
int main()
{
namespace ip = boost::asio::ip;
boost::asio::io_service io_service;
// Server binds to any address and any port.
ip::udp::socket socket(io_service,
ip::udp::endpoint(ip::udp::v4(), 0));
socket.set_option(boost::asio::socket_base::broadcast(true));
// Broadcast will go to port 8888.
ip::udp::endpoint broadcast_endpoint(ip::address_v4::broadcast(), 8888);
// Broadcast data.
boost::array<char, 4> buffer;
socket.send_to(boost::asio::buffer(buffer), broadcast_endpoint);
}
The client:
#include <iostream>
#include <boost/asio.hpp>
int main()
{
namespace ip = boost::asio::ip;
boost::asio::io_service io_service;
// Client binds to any address on port 8888 (the same port on which
// broadcast data is sent from server).
ip::udp::socket socket(io_service,
ip::udp::endpoint(ip::udp::v4(), 8888 ));
ip::udp::endpoint sender_endpoint;
// Receive data.
boost::array<char, 4> buffer;
std::size_t bytes_transferred =
socket.receive_from(boost::asio::buffer(buffer), sender_endpoint);
std::cout << "got " << bytes_transferred << " bytes." << std::endl;
}
When the client is not co-located with the server, then it could be a variety of network related issues:
Verify connectivity between the server and client.
Verify firewall exceptions.
Verify broadcast support/exceptions on the routing device.
Use a network analyzer tool, such as Wireshark, to verify that the time to live field in the packets is high enough that it will not be discarded during routing.
1. On Linux, broadcast datagrams received by an adapter will not be passed to a socket bound to a specific interface, as the datagram's destination is set to the broadcast address. On the other hand, Windows will pass broadcast datagrams received by an adapter to sockets bound to a specific interface.