So i'm trying to create a minimal tcp server using kqueue, assuming all the previous details of socket(), bind(), listen() is working correctly given that i've tested them using a thread-per-connection model. why it hangs unexpectedly ?
struct kevent events;
struct kevent change_list;
// create a kqueue
int kq = kqueue();
if (kq < 0) {
throw KqueueException();
}
// initialize the events we want to monitor
EV_SET(&events, sock, EVFILT_READ, EV_ADD, 0, 0, NULL);
// monitor the events
if (kevent(kq, &events, 1, NULL, 0, NULL) < 0) {
throw KqueueException();
}
int nev;
while (true) {
// wait for events to happen
nev = kevent(kq, NULL, 0, &change_list, 1, NULL);
if (nev < 0) {
throw KqueueException();
}
else if (nev > 0) {
// if the event fd is the same as the socket fd we have a new connection
if ((int)change_list.ident == sock) {
std::cout << "new connection" << std::endl;
// accept the connection
int client_fd = accept(sock, NULL, NULL);
if (client_fd < 0) {
throw AcceptException();
}
// add the new connection to the events we want to monitor
EV_SET(&events, client_fd, EVFILT_READ, EV_ADD, 0, 0, NULL);
// add the new connection to the events we want to monitor
if (kevent(kq, &events, 1, NULL, 0, NULL) < 0) {
throw KqueueException();
}
}
else if (change_list.filter == EVFILT_READ) {
char hello[] = "HTTP/1.1 200 OK\nContent-Type: text/plain; charset=UTF-8\nContent-Length: 15\n\nhello from meee\n";
send(change_list.ident, hello, strlen(hello), 0);
}
}
}
using a browser i entered localhost:8090 and the program entered the
if ((int)change_list.ident == sock) {...
the first time and then accepted the connection, went to next iteration and entered the second if condition else if (change_list.filter == EVFILT_READ) {.. (these two actions happened and i received a hello message in the browser ) but when i refreshed the browser the server just hangs with no error (which means no exceptions happen). i tried debugging and it seems like the program keeps entering the else if (change_list.filter == EVFILT_READ) {.. condition over and over until it hangs,I tried adding (naively) nev = 0 at each end of iteration to reset the events count which surely didn't resolve the problem, am i doing something wrong... ? can u give an explanation to what's happening ?
Related
I'm working on a server implementation on a Chromebook, using tcp connectivity between the windows client and the ChromeOS server. When a connection is being made, the server (Chromebook) side is sending out 5 packets; first one is the header, the next 3 ones are the information sent and the last one is the footer of the message.
We're using send and recv for sending and receiving the information, and after the header is being sent, the rest of the packets are never received, because the client is receiving error code 10054, "connection reset by peer", before the rest are received, though those are sent.
The sizes of the packets are as follows: Header is 4 bytes, the second packet sent is 2 bytes, the next one is 1 byte and the next one is 8 bytes, and the footer is 4 bytes. Our suspicion was that perhaps the 2 bytes are too small for the OS to send, and it perhaps waits for more data before sending, unlike in Windows where it currently does send those immediately. So we tried using SO_LINGER on the socket, but it didn't help. We also tried using TCP_NODELAY, but it didn't help as well. When attempting not to write to the socket's fd with the timeout (using select) the connection is being broken after the first header is sent.
We know all the packets are sent, because logging the sent packets from the machine shows all packets as sent, and only the first one arrives.
Socket flag used is this only:
setsockopt(s, SOL_SOCKET, SO_REUSEADDR, (const char *) &n, sizeof(n));
Sending a message:
ret = write_timeout(fd, timeout);
if (ret != OK) {
Logger::LogError(PROTOCOL_ERROR, "Write data to socket failed with error %d, while waiting timeout of %u\n", get_last_comm_error(), timeout);
return PROTOCOL_ERROR;
}
while (size) {
ret = send(fd, ptr, size, 0);
ptr += ret;
size -= ret;
if (ret < 0) {
Logger::LogError(PROTOCOL_ERROR, "Transport write failed: %d\n", get_last_comm_error());
return PROTOCOL_ERROR;
}
}
Write_timeout:
int write_timeout(int fd, unsigned int wait_useconds)
{
Logger::LogInfo(__FUNCTION__);
int ret = OK;
if (wait_useconds > 0) {
fd_set write_fdset;
struct timeval timeout;
FD_ZERO(&write_fdset);
FD_SET(fd, &write_fdset);
timeout.tv_sec = 0;
timeout.tv_usec = wait_useconds;
do {
ret = select(fd + 1, NULL, &write_fdset, NULL, &timeout);
} while (ret < 0 && errno == EINTR);
if (ret == OK) {
ret = -1;
errno = ETIMEDOUT;
} else if (ret == 1)
return OK;
}
The receiving end is similar:
ret = read_timeout(fd, timeout);
if (ret != OK) {
Logger::LogError(PROTOCOL_ERROR, "Error while trying to receive data from the host - timeout\n");
return TIMED_OUT;
}
while (size) {
ret = recv(fd, ptr, size, 0);
ptr+=ret;
size-=ret;
if (ret == 0) {
return FAILED_TRANSACTION;
}
if (ret < 0) {
Logger::LogError(PROTOCOL_ERROR, "Transport read failed: %d\n", get_last_comm_error());
return UNKNOWN_ERROR;
}
}
return OK;
And timeout:
int read_timeout(int fd, unsigned int wait_useconds)
{
Logger::LogInfo(__FUNCTION__);
int ret = OK;
if (wait_useconds > 0) {
fd_set read_fdset;
struct timeval timeout;
FD_ZERO(&read_fdset);
FD_SET(fd, &read_fdset);
timeout.tv_sec = 0;
timeout.tv_usec = wait_useconds;
do {
ret = select(fd + 1, &read_fdset, NULL, NULL, &timeout);
} while (ret < 0 && errno == EINTR);
if (ret == OK) {
ret = -1;
errno = ETIMEDOUT;
} else if (ret == 1)
return OK;
}
Our code does work on Windows, but (after modifying it accordingly and) using it on ChromeOS does not seem to work unfortunately.
We're running the server on a Chromebook with version 93 and building the code with that code base as well.
I did try making the second packet 4 bytes as well, but it still does not work and connection is being reset by peer after the first one is received correctly.
Does anyone know if maybe the chrome OS system waits for bigger packets before sending? Or if something else works a little bit different when working with TCP on that OS that needs to be done differently then in Windows?
I'm trying to write a Port open test by using sockets and for some reason this is reporting "port open" for invalid IP addresses. I'm currently plugged into an access point that's not connected to the internet so this is incorrectly reporting port open for external IP addresses.
First I set up the socket and since this is nonblocking mode it's usually still in progress by the first if statement. Then I am polling the socket. However, for a socket on an external IP, I'm getting the POLLOUT event even though that doesn't seem possible...
What am I missing? Why does the poll received events contain POLLOUT? I've tried resetting the pollfd struct before calling poll again but that didn't change the result.
result = connect(sd, (struct sockaddr*)&serverAddr, sizeof(serverAddr));
if(result == 0) //woohoo! success
{
//SUCCESS!
return true;
}
else if (result < 0 && errno != EINPROGRESS) //real error
{
//FAIL
return false;
}
// poll the socket until it connects
struct pollfd fds[1];
fds[0].fd = sd;
fds[0].events = POLLOUT | POLLRDHUP | POLLERR | POLLNVAL;
fds[0].revents = 0;
while (1)
{
result = poll(fds, 1, 1);
if (result < 1)
{
//Poll failed, mark as FAIL
}
else
{
// see which event occurred
if (fds[0].revents & POLLOUT || fds[0].revents & POLLRDHUP)
{
//SUCCESS
}
else if (fds[0].revents & POLLERR || fds[0].revents & POLLNVAL)
{
//FAIL
}
}
}
I needed to check SO_ERROR after receiving the POLLOUT event - POLLOUT by itself does not indicate success.
//now read the error code of the socket
int errorCode;
uint len = sizeof(errorCode);
result = getsockopt(fds[0].fd, SOL_SOCKET, SO_ERROR,
&errorCode, &len);
This code compiles and runs. When the client connects, the WSAWaitForMultipleEvents returns with nReturnCode = 0 (which equals WSA_WAIT_EVENT_0). But the code always displays "EVENT: NOT ACCEPT" and when I look at NetworkEvents in debug mode, it is completely filled with zeroes.
I didn't expect this code to work because I don't really know how to initialize the variable NetworkEvents. Don't I have to somehow "bind" it to the socket ? How is the struct supposed to know that it should store the network events of ListenSocket ?
WSAEVENT event = WSACreateEvent();
WSAEventSelect(ListenSocket, event, FD_CONNECT | FD_ACCEPT | FD_READ | FD_WRITE | FD_CLOSE);
//DWORD cEvents = MAKELONG(MAKEWORD(0, 0), MAKEWORD(0, 1));
WSAEVENT lphEvents[1] = {event};
//lphEvents[0] = event;
//DWORD dwTimeout = MAKELONG(MAKEWORD(0, 0), MAKEWORD(0, 0));
WSANETWORKEVENTS NetworkEvents = {0};
int nReturnCode = WSAWaitForMultipleEvents(1, &lphEvents[0], false, WSA_INFINITE, false);
if ((NetworkEvents.lNetworkEvents & FD_ACCEPT) == FD_ACCEPT) {
std::cout << "EVENT: ACCEPT !" << std::endl;
} else {
std::cout << "EVENT: NOT ACCEPT !" << std::endl;
}
You set everything to 0 at WSANETWORKEVENTS NetworkEvents = {0}; and there's no code elsewhere that uses that variable, so it's going to be zeroes when you check that structure.
You're probably missing a call to WSAEnumNetworkEvents after WSAWaitForMultipleEvents
if (WSAEnumNetworkEvents(ListenSocket, event, &NetworkEvents) == SOCKET_ERROR) {
printf("WSAEnumNetworkEvents() failed with error %d\n", WSAGetLastError());
}
Trying to write a client which will try to receive data till 3 seconds. I have implemented the connect method using select by below code.
//socket creation
m_hSocket = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
m_stAddress.sin_family = AF_INET;
m_stAddress.sin_addr.S_un.S_addr = inet_addr(pchIP);
m_stAddress.sin_port = htons(iPort);
m_stTimeout.tv_sec = SOCK_TIMEOUT_SECONDS;
m_stTimeout.tv_usec = 0;
//connecting to server
long iMode = 1;
int iResult = ioctlsocket(m_hSocket, FIONBIO, &iMode);
connect(m_hSocket, (struct sockaddr *)&m_stAddress, sizeof(m_stAddress));
long iMode = 0;
iResult = ioctlsocket(m_hSocket, FIONBIO, &iMode);
fd_set stWrite;
FD_ZERO(&stWrite);
FD_SET(m_hSocket, &stWrite);
iResult = select(0, NULL, &stWrite, NULL, &m_stTimeout);
if((iResult > 0) && (FD_ISSET(m_hSocket, &stWrite)))
return true;
But I cannot figure out what I am missing at receiving timeout with below code? It doesn't wait if the server connection got disconnected. It just returns instantly from select method.
Also how can I write a non blocking socket call with timeout for socket send.
long iMode = 1;
int iResult = ioctlsocket(m_hSocket, FIONBIO, &iMode);
fd_set stRead;
FD_ZERO(&stRead);
FD_SET(m_hSocket, &stRead);
int iRet = select(0, &stRead, NULL, NULL, &m_stTimeout);
if ((iRet > 0) && (FD_ISSET(m_hSocket, &stRead)))
{
while ((iBuffLen-1) > 0)
{
int iRcvLen = recv(m_hSocket, pchBuff, iBuffLen-1, 0);
if (iRcvLen == SOCKET_ERROR)
{
return false;
}
else if (iRcvLen == 0)
{
break;
}
pchBuff += iRcvLen;
iBuffLen -= iRcvLen;
}
}
The first parameter to select should not be 0.
Correct usage of select can be found here :
http://developerweb.net/viewtopic.php?id=2933
the first parameter should be the max value of your socket +1 and take interrupted system calls into account if it is non blocking:
/* Call select() */
do {
FD_ZERO(&readset);
FD_SET(socket_fd, &readset);
result = select(socket_fd + 1, &readset, NULL, NULL, NULL);
} while (result == -1 && errno == EINTR);
This is just example code you probably need the timeout parameter as well.
If you can get EINTR this will complicate your required logic, because if you get EINTR you have to do the same call again, but with the remaining time to wait for.
I think for non blocking mode one needs to check the recv() failure along with a timeout value. That mean first select() will return whether the socket is ready to receive data or not. If yes it will go forward else it will sleep until timeout elapses on the select() method call line. But if the receive fails due to some uncertain situations while inside read loop there we need to manually check for socket error and maximum timeout value. If the socket error continues and timeout elapses we need to break it.
I'm done with my receive timeout logic with non blocking mode.
Please correct me if I am wrong.
bool bReturn = true;
SetNonBlockingMode(true);
//check whether the socket is ready to receive
fd_set stRead;
FD_ZERO(&stRead);
FD_SET(m_hSocket, &stRead);
int iRet = select(0, &stRead, NULL, NULL, &m_stTimeout);
DWORD dwStartTime = GetTickCount();
DWORD dwCurrentTime = 0;
//if socket is not ready this line will be hit after 3 sec timeout and go to the end
//if it is ready control will go inside the read loop and reads data until data ends or
//socket error is getting triggered continuously for more than 3 secs.
if ((iRet > 0) && (FD_ISSET(m_hSocket, &stRead)))
{
while ((iBuffLen-1) > 0)
{
int iRcvLen = recv(m_hSocket, pchBuff, iBuffLen-1, 0);
dwCurrentTime = GetTickCount();
if ((iRcvLen == SOCKET_ERROR) && ((dwCurrentTime - dwStartTime) >= SOCK_TIMEOUT_SECONDS * 1000))
{
bReturn = false;
break;
}
else if (iRcvLen == 0)
{
break;
}
pchBuff += iRcvLen;
iBuffLen -= iRcvLen;
}
}
SetNonBlockingMode(false);
return bReturn;
I have a function to write data to the serial port with a certain protocol. When the function writes one frame, it waits for one answer of receiver. If no answer is received it has to resend data during 3 timeouts and in the end of 3 timeouts with no success, close the communication...
I have this function:
int serial_write(int fd, unsigned char* send, size_t send_size) {
......
int received_counter = 0;
while (!RECEIVED) {
Timeout.tv_usec = 0; // milliseconds
Timeout.tv_sec = timeout; // seconds
FD_SET(fd, &readfs);
//set testing for source 1
res = select(fd + 1, &readfs, NULL, NULL, &Timeout);
//timeout occurred.
if (received_counter == 3) {
printf(
"Connection maybe turned off! Number of resends exceeded!\n");
exit(-1);
}
if (res == 0) {
printf("Timeout occured\n");
write(fd, (&I[0]), I.size());
numTimeOuts++;
received_counter++;
} else {
RECEIVED = true;
break;
}
}
......
}
I have verified that this function, when it goes into timeout, does not resend the data. Why?