Related
Here's my attempt to create a wrapper around the winsock recv function in blocking mode so that a control-c can interrupt the connection without closing the process.
I get kind of stuck on how to do a "select" to detect if I have a "ctrl-C event" or a "socket recv event".
Any ideas on how to fix this code to get it working? The idea is that, first I call the CtrlC_Init routine to install the ctrl-c handler... then I replace recv with CtrlC_Recv in my tcpip program, and finally, i and press ctrl-c while tailing a file on a remote computer and it will magically stop without exiting my program....
#include <iostream>
#include <atomic>
#include <mutex>
#include <ws2tcpip.h>
using namespace std;
#pragma comment(lib, "ws2_32.lib")
atomic<HANDLE> g_event_ctrlc {0};
BOOL WINAPI CtrlHandler(DWORD fdwCtrlType);
void CtrlC_Init()
{
g_event_ctrlc = CreateEvent(NULL, FALSE, FALSE, NULL);
SetConsoleCtrlHandler(CtrlHandler, TRUE);
}
int CtrlC_Recv( SOCKET s, char* buf, int len, int flags)
{
WSAEVENT event_recv;
HANDLE handle_array[2];
int handle_count = 0;
if (g_event_ctrlc != 0) {
handle_array[handle_count] = g_event_ctrlc;
handle_count++;
}
// Create Event Receive
event_recv = WSACreateEvent();
if (event_recv == WSA_INVALID_EVENT) {
return SOCKET_ERROR;
}
WSAEventSelect(s, event_recv, FD_READ | FD_CLOSE);
handle_array[handle_count] = event_recv;
handle_count++;
DWORD rc = WaitForMultipleObjectsEx(
handle_count,
handle_array,
FALSE, INFINITE, FALSE);
// Close Event_Recv
WSAEventSelect(s, event_recv, 0);
CloseHandle(event_recv);
// Check for Error
if(rc == WAIT_FAILED) {
DWORD err = GetLastError();
cout << "ERROR" << err << "\n";
return SOCKET_ERROR;
}
// If Event Triggered
if(rc >= WAIT_OBJECT_0 && rc < WAIT_OBJECT_0 + handle_count) {
// Ctrl-C Interrupt Event
if (g_event_ctrlc != 0) {
if (handle_array[rc] == g_event_ctrlc) {
cout << "CTRL-C CloseSocket\n";
closesocket(s);
return SOCKET_ERROR;
}
}
// Received Socket
if (handle_array[rc] == event_recv) {
return recv(s, buf, len, flags);
}
}
return SOCKET_ERROR;
}
BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
{
//NOTE: make sure console app is run from powershell.exe
// instead of being spawned by C++ IDE...
// some C++ IDEs trap Ctrl-C before it gets to console
// preventing this handler from getting triggered.
if (fdwCtrlType == CTRL_C_EVENT) {
printf("Ctrl-c\n");
if (g_event_ctrlc != 0) {
SetEvent(g_event_ctrlc);
}
return TRUE;
}
return FALSE;
}
I have a poll() loop with a small socket communication, I want to start an other program by system() or exec() and I need the the return value of the system()/exec() but I don't want to stop the main loop while the child process is running so I thought I start it in a thread but I am not sure how to set up the pollfd to catch the thread when it is done, I am using c/c++
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/socket.h>
#include <sys/un.h>
#include <unistd.h>
#include <poll.h>
#include <iostream>
#include <string>
#include <thread>
#include <future>
#define SOCKET_NAME "/tmp/9Lq7BNBnBycd6nxy.socket"
int runProgram(const std::string &programName, const std::string &fileName) {
return system((programName + " " + fileName).c_str());
}
int main(int argc, char *argv[]) {
struct sockaddr_un server;
int sock;
char buf[1024];
unlink(SOCKET_NAME);
sock = socket(AF_UNIX, SOCK_STREAM, 0);
if (sock == -1){
perror("socket");
exit(EXIT_FAILURE);
}
memset(&server, 0, sizeof(struct sockaddr_un));
server.sun_family = AF_UNIX;
strncpy(server.sun_path, SOCKET_NAME, sizeof(server.sun_path) - 1);
if (bind(sock, (struct sockaddr *) &server, sizeof(struct sockaddr_un)) < 0) {
perror("bind");
exit(EXIT_FAILURE);
}
if (listen(sock, 3) < -1) {
perror("listen");
exit(EXIT_FAILURE);
}
struct pollfd fds[2];
fds[0].fd = sock;
fds[0].events = POLLIN;
std::future<int> ret = std::async(&runProgram, "cat", "test.txt");
while (true) {
poll(fds, 2, -1);
if(fds[0].revents & POLLIN) {
int new_sd = accept(fds[0].fd, NULL, NULL);
if (new_sd < 0) {
perror("accept");
}
fds[1].fd = new_sd;
}
if (fds[0].revents & POLLIN) {
int rv = recv(fds[1].fd, buf, 1024, 0);
if (rv < 0)
perror("recv");
else if (rv == 0) {
printf("disconnet\n");
close(fds[1].fd);
} else {
printf("%s\n", buf);
send(fds[1].fd, buf, 1024, 0);
}
memset(buf, 0, 1024);
}
}
close(sock);
return(EXIT_SUCCESS);
}
So I want to add one more to the pollfd (fds[ret.get()]) and get a POLLIN on fds[2] when my thread is done and I can get the return value (ret.get()), here I used an exaple command cat but in my final code the command would need mach more time so I cant wait for that to finish
The simplest solution is to create an anonymous pipe (or, since you say that you are on Linux, an eventfd) and write data to one end of the pipe in the runProgram function once the call to system returns. You can then include the read end of the pipe in the set of file descriptors that you are polling.
int process_eventfd = eventfd(0, EFD_CLOEXEC);
if (process_eventfd == -1) exit(1); // change this to handle appropriately
struct pollfd fds[3];
fds[0].fd = sock;
fds[0].events = POLLIN;
fds[1].fd = process_eventfd;
fds[1].events = POLLIN;
// use fds[2] instead of fds[1] for your socket connection, etc.
You can add the eventfd number as an argument to runProgram. It should now look something like:
int runProgram(const std::string &programName, const std::string &fileName, int process_eventfd) {
return system((programName + " " + fileName).c_str());
uint64_t value = 1;
write(process_eventfd, &value, 8);
}
By the way, your current program has a bug: you always pass 2 as the number of file descriptors to poll, even before you have set up the second file descriptor in the array. You should only pass the number of valid descriptors actually present in the array.
However, if you don't need to use system and can use exec, there is no need to create another thread; just perform the following steps:
Mask (but don't ignore) the SIGCHLD signal. (You may need to set up a signal handler, even if it does nothing; I can't remember if this is true for Linux or not).
Create your external process via fork/exec
Use ppoll rather than poll, and include SIGCHLD in the signals to be enabled
If the ppoll call returns an EINTR error, use waitpid to obtain the child status
The child process will run in parallel to your program.
I'm trying to learn how to use epoll() for tcp server application, 'cause i'm expecting many connections.
i tried checking samples and tutorials, they always recommend using/setting sockets that are added in epoll() to be NON-BLOCKING sockets. why?
For level-triggered epoll, nonblocking sockets can help to minimize epoll_wait() calls, its an optimization issue.
For edge-triggered epoll, you MUST use nonblocking sockets AND call read() or write() until they return EWOULDBLOCK. If you don't, you can miss kernel notifications.
You can find a detailed answer here: https://eklitzke.org/blocking-io-nonblocking-io-and-epoll
It's a good question and not duplicated. Recently I also find a tutorial using nonblocking socket in select (select is level-triggered only), which causes me to think.
The question is:
Why using nonblocking IO or set fd to nonblicking, in level-triggered epoll, select or other similar interfaces?
There are in fact very solid reasons for this case.
Cite from the book The Linux Programming Interface :
63.1.2 Employing Nonblocking I/O with Alternative I/O Models
Nonblocking I/O (the O_NONBLOCK flag) is often used in conjunction
with the I/O models described in this chapter. Some examples of why
this can be useful are the following:
As explained in the previous section, nonblocking I/O is usually employed in conjunction with I/O models that provide edge-triggered
notification of I/O events.
If multiple processes (or threads) are performing I/O on the same open file descriptions, then, from a particular process’s point of
view, a descriptor’s readiness may change between the time the
descriptor was notified as being ready and the time of the subsequent
I/O call. Consequently, a blocking I/O call could block, thus
preventing the process from monitoring other file descriptors. (This
can occur for all of the I/O models that we describe in this chapter,
regardless of whether they employ level-triggered or edge-triggered
notification.)
Even after a level-triggered API such as select() or poll() informs us that a file descriptor for a stream socket is ready for writing, if
we write a large enough block of data in a single write() or send(),
then the call will nevertheless block.
In rare cases, level-triggered APIs such as select() and poll() can return spurious readiness notifications—they can falsely inform us
that a file descriptor is ready. This could be caused by a kernel bug
or be expected behavior in an uncommon scenario.
First, let's check case #2: "If multiple processes (or threads) are performing I/O on the same open file descriptions...".
Read this code from libevent introduction, http://www.wangafu.net/~nickm/libevent-book/01_intro.html .
/* For sockaddr_in */
#include <netinet/in.h>
/* For socket functions */
#include <sys/socket.h>
/* For fcntl */
#include <fcntl.h>
/* for select */
#include <sys/select.h>
#include <assert.h>
#include <unistd.h>
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
#include <errno.h>
#define MAX_LINE 16384
char
rot13_char(char c)
{
/* We don't want to use isalpha here; setting the locale would change
* which characters are considered alphabetical. */
if ((c >= 'a' && c <= 'm') || (c >= 'A' && c <= 'M'))
return c + 13;
else if ((c >= 'n' && c <= 'z') || (c >= 'N' && c <= 'Z'))
return c - 13;
else
return c;
}
struct fd_state {
char buffer[MAX_LINE];
size_t buffer_used;
int writing;
size_t n_written;
size_t write_upto;
};
struct fd_state *
alloc_fd_state(void)
{
struct fd_state *state = malloc(sizeof(struct fd_state));
if (!state)
return NULL;
state->buffer_used = state->n_written = state->writing =
state->write_upto = 0;
return state;
}
void
free_fd_state(struct fd_state *state)
{
free(state);
}
void
make_nonblocking(int fd)
{
fcntl(fd, F_SETFL, O_NONBLOCK);
}
int
do_read(int fd, struct fd_state *state)
{
char buf[1024];
int i;
ssize_t result;
while (1) {
result = recv(fd, buf, sizeof(buf), 0);
if (result <= 0)
break;
for (i=0; i < result; ++i) {
if (state->buffer_used < sizeof(state->buffer))
state->buffer[state->buffer_used++] = rot13_char(buf[i]);
if (buf[i] == '\n') {
state->writing = 1;
state->write_upto = state->buffer_used;
}
}
}
if (result == 0) {
return 1;
} else if (result < 0) {
if (errno == EAGAIN)
return 0;
return -1;
}
return 0;
}
int
do_write(int fd, struct fd_state *state)
{
while (state->n_written < state->write_upto) {
ssize_t result = send(fd, state->buffer + state->n_written,
state->write_upto - state->n_written, 0);
if (result < 0) {
if (errno == EAGAIN)
return 0;
return -1;
}
assert(result != 0);
state->n_written += result;
}
if (state->n_written == state->buffer_used)
state->n_written = state->write_upto = state->buffer_used = 0;
state->writing = 0;
return 0;
}
void
run(void)
{
int listener;
struct fd_state *state[FD_SETSIZE];
struct sockaddr_in sin;
int i, maxfd;
fd_set readset, writeset, exset;
sin.sin_family = AF_INET;
sin.sin_addr.s_addr = 0;
sin.sin_port = htons(40713);
for (i = 0; i < FD_SETSIZE; ++i)
state[i] = NULL;
listener = socket(AF_INET, SOCK_STREAM, 0);
make_nonblocking(listener);
#ifndef WIN32
{
int one = 1;
setsockopt(listener, SOL_SOCKET, SO_REUSEADDR, &one, sizeof(one));
}
#endif
if (bind(listener, (struct sockaddr*)&sin, sizeof(sin)) < 0) {
perror("bind");
return;
}
if (listen(listener, 16)<0) {
perror("listen");
return;
}
FD_ZERO(&readset);
FD_ZERO(&writeset);
FD_ZERO(&exset);
while (1) {
maxfd = listener;
FD_ZERO(&readset);
FD_ZERO(&writeset);
FD_ZERO(&exset);
FD_SET(listener, &readset);
for (i=0; i < FD_SETSIZE; ++i) {
if (state[i]) {
if (i > maxfd)
maxfd = i;
FD_SET(i, &readset);
if (state[i]->writing) {
FD_SET(i, &writeset);
}
}
}
if (select(maxfd+1, &readset, &writeset, &exset, NULL) < 0) {
perror("select");
return;
}
if (FD_ISSET(listener, &readset)) {
struct sockaddr_storage ss;
socklen_t slen = sizeof(ss);
int fd = accept(listener, (struct sockaddr*)&ss, &slen);
if (fd < 0) {
perror("accept");
} else if (fd > FD_SETSIZE) {
close(fd);
} else {
make_nonblocking(fd);
state[fd] = alloc_fd_state();
assert(state[fd]);/*XXX*/
}
}
for (i=0; i < maxfd+1; ++i) {
int r = 0;
if (i == listener)
continue;
if (FD_ISSET(i, &readset)) {
r = do_read(i, state[i]);
}
if (r == 0 && FD_ISSET(i, &writeset)) {
r = do_write(i, state[i]);
}
if (r) {
free_fd_state(state[i]);
state[i] = NULL;
close(i);
}
}
}
}
int
main(int c, char **v)
{
setvbuf(stdout, NULL, _IONBF, 0);
run();
return 0;
}
This is not an example of multiple processes (or threads) performing I/O on the same open file descriptions, but it demostrates the same idea.
In the do_read function, it uses recv in side a while(1) to read as many bytes as possible, but 1024 bytes for each recv. I guess this is a typical pattern.
So you need nonblocking here, otherwise recv will eventually block when there's no data in network input.
For #3, if you write too much data in a blocking socket and there's no enough buffer. send will block until all data are sent. And it could block for long enough time if there's no enough space in the send buffer. More details check https://stackoverflow.com/a/74172742/5983841 .
Building on a similar example located here in stackoverflow,
I have three named pipes, pipe_a, pipe_b, and pipe_c that are being fed from external processes. I'd like to have a reader process that outputs to the console, whatever is written to any of these pipes.
The program below is an all-in-one c program that should read the three pipes in a non-blocking manner, and display output when any one of the pipes gets new data.
However, it isn't working - it is blocking! If pipe_a gets data, it will display it and then wait for new data to arrive in pipe_b, etc...
select() should allow the monitoring of multiple file descriptors until one is ready, at which time we should drop into the pipe's read function and get the data.
Can anyone help identify why the pipes are behaving like they are in blocking mode?
/*
* FIFO example using select.
*
* $ mkfifo /tmp/fifo
* $ clang -Wall -o test ./test.c
* $ ./test &
* $ echo 'hello' > /tmp/fifo
* $ echo 'hello world' > /tmp/fifo
* $ killall test
*/
#include <sys/types.h>
#include <sys/select.h>
#include <errno.h>
#include <stdlib.h>
#include <stdio.h>
#include <fcntl.h>
#include <unistd.h>
// globals
int fd_a, fd_b, fd_c;
int nfd_a, nfd_b, nfd_c;
fd_set set_a, set_b, set_c;
char buffer_a[100*1024];
char buffer_b[100*1024];
char buffer_c[100*1024];
int readPipeA()
{
ssize_t bytes;
size_t total_bytes;
if (FD_ISSET(fd_a, &set_a)) {
printf("\nDescriptor %d has new data to read.\n", fd_a);
total_bytes = 0;
for (;;) {
printf("\nDropped into read loop\n");
bytes = read(fd_a, buffer_a, sizeof(buffer_a));
if (bytes > 0) {
total_bytes += (size_t)bytes;
printf("%s", buffer_a);
} else {
if (errno == EWOULDBLOCK) {
printf("\ndone reading (%ul bytes)\n", total_bytes);
break;
} else {
perror("read");
return EXIT_FAILURE;
}
}
}
}
}
int readPipeB()
{
ssize_t bytes;
size_t total_bytes;
if (FD_ISSET(fd_b, &set_b)) {
printf("\nDescriptor %d has new data to read.\n", fd_b);
total_bytes = 0;
for (;;) {
printf("\nDropped into read loop\n");
bytes = read(fd_b, buffer_b, sizeof(buffer_b));
if (bytes > 0) {
total_bytes += (size_t)bytes;
printf("%s", buffer_b);
} else {
if (errno == EWOULDBLOCK) {
printf("\ndone reading (%ul bytes)\n", total_bytes);
break;
} else {
perror("read");
return EXIT_FAILURE;
}
}
}
}
}
int readPipeC()
{
ssize_t bytes;
size_t total_bytes;
if (FD_ISSET(fd_c, &set_c)) {
printf("\nDescriptor %d has new data to read.\n", fd_c);
total_bytes = 0;
for (;;) {
printf("\nDropped into read loop\n");
bytes = read(fd_c, buffer_c, sizeof(buffer_c));
if (bytes > 0) {
total_bytes += (size_t)bytes;
printf("%s", buffer_c);
} else {
if (errno == EWOULDBLOCK) {
printf("\ndone reading (%ul bytes)\n", total_bytes);
break;
} else {
perror("read");
return EXIT_FAILURE;
}
}
}
}
}
int main(int argc, char* argv[])
{
// create pipes to monitor (if they don't already exist)
system("mkfifo /tmp/PIPE_A");
system("mkfifo /tmp/PIPE_B");
system("mkfifo /tmp/PIPE_C");
// open file descriptors of named pipes to watch
fd_a = open("/tmp/PIPE_A", O_RDWR | O_NONBLOCK);
if (fd_a == -1) {
perror("open");
return EXIT_FAILURE;
}
FD_ZERO(&set_a);
FD_SET(fd_a, &set_a);
fd_b = open("/tmp/PIPE_B", O_RDWR | O_NONBLOCK);
if (fd_b == -1) {
perror("open");
return EXIT_FAILURE;
}
FD_ZERO(&set_b);
FD_SET(fd_b, &set_b);
fd_c = open("/tmp/PIPE_C", O_RDWR | O_NONBLOCK);
if (fd_c == -1) {
perror("open");
return EXIT_FAILURE;
}
FD_ZERO(&set_c);
FD_SET(fd_c, &set_c);
for(;;)
{
// check pipe A
nfd_a= select(fd_a+1, &set_a, NULL, NULL, NULL);
if (nfd_a) {
if (nfd_a == -1) {
perror("select");
return EXIT_FAILURE;
}
readPipeA();
}
// check pipe B
nfd_b= select(fd_b+1, &set_b, NULL, NULL, NULL);
if (nfd_b) {
if (nfd_b == -1) {
perror("select");
return EXIT_FAILURE;
}
readPipeB();
}
// check pipe C
nfd_c= select(fd_c+1, &set_c, NULL, NULL, NULL);
if (nfd_c) {
if (nfd_c == -1) {
perror("select");
return EXIT_FAILURE;
}
readPipeC();
}
}
return EXIT_SUCCESS;
}
--- Updated Code ---
Modified the application based on the feedback here, and some more reading:
/*
* FIFO example using select.
*
* $ mkfifo /tmp/fifo
* $ clang -Wall -o test ./test.c
* $ ./test &
* $ echo 'hello' > /tmp/fifo
* $ echo 'hello world' > /tmp/fifo
* $ killall test
*/
#include <sys/types.h>
#include <sys/select.h>
#include <sys/time.h>
#include <sys/types.h>
#include <errno.h>
#include <stdlib.h>
#include <stdio.h>
#include <fcntl.h>
#include <unistd.h>
int readPipe(int fd)
{
ssize_t bytes;
size_t total_bytes = 0;
char buffer[100*1024];
printf("\nDropped into read pipe\n");
for(;;) {
bytes = read(fd, buffer, sizeof(buffer));
if (bytes > 0) {
total_bytes += (size_t)bytes;
printf("%s", buffer);
} else {
if (errno == EWOULDBLOCK) {
printf("\ndone reading (%d bytes)\n", (int)total_bytes);
break;
} else {
perror("read");
return EXIT_FAILURE;
}
}
}
return EXIT_SUCCESS;
}
int main(int argc, char* argv[])
{
int fd_a, fd_b, fd_c; // file descriptors for each pipe
int nfd; // select() return value
fd_set read_fds; // file descriptor read flags
struct timeval tv;
tv.tv_sec = 0;
tv.tv_usec = 0;
// create pipes to monitor (if they don't already exist)
system("mkfifo /tmp/PIPE_A");
system("mkfifo /tmp/PIPE_B");
system("mkfifo /tmp/PIPE_C");
// open file descriptors of named pipes to watch
fd_a = open("/tmp/PIPE_A", O_RDWR | O_NONBLOCK);
if (fd_a == -1) {
perror("open");
return EXIT_FAILURE;
}
fd_b = open("/tmp/PIPE_B", O_RDWR | O_NONBLOCK);
if (fd_b == -1) {
perror("open");
return EXIT_FAILURE;
}
fd_c = open("/tmp/PIPE_C", O_RDWR | O_NONBLOCK);
if (fd_c == -1) {
perror("open");
return EXIT_FAILURE;
}
FD_ZERO(&read_fds);
FD_SET(fd_a, &read_fds); // add pipe to the read descriptor watch list
FD_SET(fd_b, &read_fds);
FD_SET(fd_c, &read_fds);
for(;;)
{
// check if there is new data in any of the pipes
nfd = select(fd_a+1, &read_fds, NULL, NULL, &tv);
if (nfd != 0) {
if (nfd == -1) {
perror("select");
return EXIT_FAILURE;
}
if (FD_ISSET(fd_a, &read_fds)) {
readPipe(fd_a);
}
}
nfd = select(fd_b+1, &read_fds, NULL, NULL, &tv);
if (nfd != 0) {
if (nfd == -1) {
perror("select");
return EXIT_FAILURE;
}
if (FD_ISSET(fd_b, &read_fds)){
readPipe(fd_b);
}
}
nfd = select(fd_c+1, &read_fds, NULL, NULL, &tv);
if (nfd != 0) {
if (nfd == -1) {
perror("select");
return EXIT_FAILURE;
}
if (FD_ISSET(fd_c, &read_fds)){
readPipe(fd_c);
}
}
usleep(10);
}
return EXIT_SUCCESS;
}
Still having an issue with the select returning zero (0) when there is data waiting in any one of the watched pipes? I must not be using the select() and fd_isset() correctly. Can you see what I'm doing wrong? Thanks.
The issue is that the select function is blocking. I understood select() to check flags to see if the read "would" block if it was performed, so that one can decide to perform the read or not. The pipe is being opened in RDWR and NONBLOCK mode.
You say the problem is that the select function is blocking, but go on to admit that the NONBLOCK flag only makes it so that the read would block. Select and read are two different things.
The O_NONBLOCK flag affects the socket (and, consequently, your read calls); it does not change the behaviour of select, which has its own timeout/blocking semantics.
man select states that a timeout argument with both numeric members set to zero produces a non-blocking poll, whereas a timeout argument of NULL may lead to an indefinite block:
If the timeout parameter is a null pointer, then the call to pselect() or select() shall block indefinitely until at least one descriptor meets the specified criteria. To effect a poll, the timeout parameter should not be a null pointer, and should point to a zero-valued timespec timeval structure.
(NB. text further up the page indicates that, though pselect() takes a timespec structure, select() takes a timeval structure; I've taken the liberty of applying this logic to the above quotation.)
So, before each select call construct a timeval, set its members to zero, and pass that to select.
A couple of notes, while we're here:
Ideally you'd only have one select call, checking all three file descriptors at once, then deciding which pipes to read from by checking your FD set with fd_isset;
I also suggest putting a little usleep at the end of your loop body, otherwise your program is going to spin really, really quickly when starved of data.
Here is my working solution for reading the three named pipes. It could be optimized in a few ways, but as its written, it should be very clear for anyone else who needs to do this:
#include <sys/types.h>
#include <sys/select.h>
#include <sys/time.h>
#include <sys/types.h>
#include <errno.h>
#include <stdlib.h>
#include <stdio.h>
#include <fcntl.h>
#include <unistd.h>
int readPipe(int fd)
{
ssize_t bytes;
size_t total_bytes = 0;
char buffer[100*1024];
printf("\nReading pipe descriptor # %d\n",fd);
for(;;) {
bytes = read(fd, buffer, sizeof(buffer));
if (bytes > 0) {
total_bytes += (size_t)bytes;
printf("%s", buffer);
} else {
if (errno == EWOULDBLOCK) {
break;
} else {
perror("read error");
return EXIT_FAILURE;
}
}
}
return EXIT_SUCCESS;
}
int main(int argc, char* argv[])
{
int fd_a, fd_b, fd_c; // file descriptors for each pipe
int nfd; // select() return value
fd_set read_fds; // file descriptor read flags
struct timeval tv;
tv.tv_sec = 0;
tv.tv_usec = 0;
// create pipes to monitor (if they don't already exist)
system("mkfifo /tmp/PIPE_A");
system("mkfifo /tmp/PIPE_B");
system("mkfifo /tmp/PIPE_C");
// open file descriptors of named pipes to watch
fd_a = open("/tmp/PIPE_A", O_RDWR | O_NONBLOCK);
if (fd_a == -1) {
perror("open error");
return EXIT_FAILURE;
}
fd_b = open("/tmp/PIPE_B", O_RDWR | O_NONBLOCK);
if (fd_b == -1) {
perror("open error");
return EXIT_FAILURE;
}
fd_c = open("/tmp/PIPE_C", O_RDWR | O_NONBLOCK);
if (fd_c == -1) {
perror("open error");
return EXIT_FAILURE;
}
for(;;)
{
// clear fds read flags
FD_ZERO(&read_fds);
// check if there is new data in any of the pipes
// PIPE_A
FD_SET(fd_a, &read_fds);
nfd = select(fd_a+1, &read_fds, NULL, NULL, &tv);
if (nfd != 0) {
if (nfd == -1) {
perror("select error");
return EXIT_FAILURE;
}
if (FD_ISSET(fd_a, &read_fds)) {
readPipe(fd_a);
}
}
// PIPE_B
FD_SET(fd_b, &read_fds);
nfd = select(fd_b+1, &read_fds, NULL, NULL, &tv);
if (nfd != 0) {
if (nfd == -1) {
perror("select error");
return EXIT_FAILURE;
}
if (FD_ISSET(fd_b, &read_fds)){
readPipe(fd_b);
}
}
// PIPE_C
FD_SET(fd_c, &read_fds);
nfd = select(fd_c+1, &read_fds, NULL, NULL, &tv);
if (nfd != 0) {
if (nfd == -1) {
perror("select error");
return EXIT_FAILURE;
}
if (FD_ISSET(fd_c, &read_fds)){
readPipe(fd_c);
}
}
usleep(100000);
}
return EXIT_SUCCESS;
}
Just for making your code simpler. You don't need three selects. You can set all free file descriptors with three calls FD_SET(), call select, and if nfd > 0 check each fd_x with FD_ISSET().
I took a snippet I used for socket programming, but it should work the same for named pipes. It should be simple and easy to follow.
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <cctype>
#include <unistd.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <sys/select.h>
int main()
{
fd_set readSet, writeSet, exSet;
struct timeval tv;
int i;
int fifoFds[3];
//open files or named pipes and put them into fifoFds array
while(1)
{
FD_ZERO(&readSet);
FD_ZERO(&writeSet); //not used
FD_ZERO(&exSet); //not used
int maxfd = -1;
for(i = 0; i < 3; i++)
{
if(maxfd == -1 || fifoFds[i] > maxfd)
maxfd = fifoFds[i];
FD_SET(fifoFds[i], &readSet);
}
tv.tv_sec = 1; //wait 1 second in select, change these as needed
tv.tv_usec = 0; //this is microseconds
select(maxfd+1, &readSet, &writeSet, &exSet, &tv);
for(i = 0; i < 3; i++)
{
if(FD_ISSET(fifoFds[i], &readSet))
{
//Read from that fifo now!
}
}
}
return 0;
}
What would be your suggestion in order to create a single instance application, so that only one process is allowed to run at a time? File lock, mutex or what?
A good way is:
#include <sys/file.h>
#include <errno.h>
int pid_file = open("/var/run/whatever.pid", O_CREAT | O_RDWR, 0666);
int rc = flock(pid_file, LOCK_EX | LOCK_NB);
if(rc) {
if(EWOULDBLOCK == errno)
; // another instance is running
}
else {
// this is the first instance
}
Note that locking allows you to ignore stale pid files (i.e. you don't have to delete them). When the application terminates for any reason the OS releases the file lock for you.
Pid files are not terribly useful because they can be stale (the file exists but the process does not). Hence, the application executable itself can be locked instead of creating and locking a pid file.
A more advanced method is to create and bind a unix domain socket using a predefined socket name. Bind succeeds for the first instance of your application. Again, the OS unbinds the socket when the application terminates for any reason. When bind() fails another instance of the application can connect() and use this socket to pass its command line arguments to the first instance.
Here is a solution in C++. It uses the socket recommendation of Maxim. I like this solution better than the file based locking solution, because the file based one fails if the process crashes and does not delete the lock file. Another user will not be able to delete the file and lock it. The sockets are automatically deleted when the process exits.
Usage:
int main()
{
SingletonProcess singleton(5555); // pick a port number to use that is specific to this app
if (!singleton())
{
cerr << "process running already. See " << singleton.GetLockFileName() << endl;
return 1;
}
... rest of the app
}
Code:
#include <netinet/in.h>
class SingletonProcess
{
public:
SingletonProcess(uint16_t port0)
: socket_fd(-1)
, rc(1)
, port(port0)
{
}
~SingletonProcess()
{
if (socket_fd != -1)
{
close(socket_fd);
}
}
bool operator()()
{
if (socket_fd == -1 || rc)
{
socket_fd = -1;
rc = 1;
if ((socket_fd = socket(AF_INET, SOCK_DGRAM, 0)) < 0)
{
throw std::runtime_error(std::string("Could not create socket: ") + strerror(errno));
}
else
{
struct sockaddr_in name;
name.sin_family = AF_INET;
name.sin_port = htons (port);
name.sin_addr.s_addr = htonl (INADDR_ANY);
rc = bind (socket_fd, (struct sockaddr *) &name, sizeof (name));
}
}
return (socket_fd != -1 && rc == 0);
}
std::string GetLockFileName()
{
return "port " + std::to_string(port);
}
private:
int socket_fd = -1;
int rc;
uint16_t port;
};
For windows, a named kernel object (e.g. CreateEvent, CreateMutex). For unix, a pid-file - create a file and write your process ID to it.
You can create an "anonymous namespace" AF_UNIX socket. This is completely Linux-specific, but has the advantage that no filesystem actually has to exist.
Read the man page for unix(7) for more info.
Avoid file-based locking
It is always good to avoid a file based locking mechanism to implement the singleton instance of an application. The user can always rename the lock file to a different name and run the application again as follows:
mv lockfile.pid lockfile1.pid
Where lockfile.pid is the lock file based on which is checked for existence before running the application.
So, it is always preferable to use a locking scheme on object directly visible to only the kernel. So, anything which has to do with a file system is not reliable.
So the best option would be to bind to a inet socket. Note that unix domain sockets reside in the filesystem and are not reliable.
Alternatively, you can also do it using DBUS.
It's seems to not be mentioned - it is possible to create a mutex in shared memory but it needs to be marked as shared by attributes (not tested):
pthread_mutexattr_t attr;
pthread_mutexattr_init(&attr);
pthread_mutexattr_setpshared(&attr, PTHREAD_PROCESS_SHARED);
pthread_mutex_t *mutex = shmat(SHARED_MEMORY_ID, NULL, 0);
pthread_mutex_init(mutex, &attr);
There is also shared memory semaphores (but I failed to find out how to lock one):
int sem_id = semget(SHARED_MEMORY_KEY, 1, 0);
No one has mentioned it, but sem_open() creates a real named semaphore under modern POSIX-compliant OSes. If you give a semaphore an initial value of 1, it becomes a mutex (as long as it is strictly released only if a lock was successfully obtained).
With several sem_open()-based objects, you can create all of the common equivalent Windows named objects - named mutexes, named semaphores, and named events. Named events with "manual" set to true is a bit more difficult to emulate (it requires four semaphore objects to properly emulate CreateEvent(), SetEvent(), and ResetEvent()). Anyway, I digress.
Alternatively, there is named shared memory. You can initialize a pthread mutex with the "shared process" attribute in named shared memory and then all processes can safely access that mutex object after opening a handle to the shared memory with shm_open()/mmap(). sem_open() is easier if it is available for your platform (if it isn't, it should be for sanity's sake).
Regardless of the method you use, to test for a single instance of your application, use the trylock() variant of the wait function (e.g. sem_trywait()). If the process is the only one running, it will successfully lock the mutex. If it isn't, it will fail immediately.
Don't forget to unlock and close the mutex on application exit.
It will depend on which problem you want to avoid by forcing your application to have only one instance and the scope on which you consider instances.
For a daemon — the usual way is to have a /var/run/app.pid file.
For user application, I've had more problems with applications which prevented me to run them twice than with being able to run twice an application which shouldn't have been run so. So the answer on "why and on which scope" is very important and will probably bring answer specific on the why and the intended scope.
Here is a solution based on sem_open
/*
*compile with :
*gcc single.c -o single -pthread
*/
/*
* run multiple instance on 'single', and check the behavior
*/
#include <stdio.h>
#include <fcntl.h>
#include <sys/stat.h>
#include <semaphore.h>
#include <unistd.h>
#include <errno.h>
#define SEM_NAME "/mysem_911"
int main()
{
sem_t *sem;
int rc;
sem = sem_open(SEM_NAME, O_CREAT, S_IRWXU, 1);
if(sem==SEM_FAILED){
printf("sem_open: failed errno:%d\n", errno);
}
rc=sem_trywait(sem);
if(rc == 0){
printf("Obtained lock !!!\n");
sleep(10);
//sem_post(sem);
sem_unlink(SEM_NAME);
}else{
printf("Lock not obtained\n");
}
}
One of the comments on a different answer says "I found sem_open() rather lacking". I am not sure about the specifics of what's lacking
Based on the hints in maxim's answer here is my POSIX solution of a dual-role daemon (i.e. a single application that can act as daemon and as a client communicating with that daemon). This scheme has the advantage of providing an elegant solution of the problem when the instance started first should be the daemon and all following executions should just load off the work at that daemon. It is a complete example but lacks a lot of stuff a real daemon should do (e.g. using syslog for logging and fork to put itself into background correctly, dropping privileges etc.), but it is already quite long and is fully working as is. I have only tested this on Linux so far but IIRC it should be all POSIX-compatible.
In the example the clients can send integers passed to them as first command line argument and parsed by atoi via the socket to the daemon which prints it to stdout. With this kind of sockets it is also possible to transfer arrays, structs and even file descriptors (see man 7 unix).
#include <stdio.h>
#include <stddef.h>
#include <stdbool.h>
#include <stdlib.h>
#include <unistd.h>
#include <errno.h>
#include <signal.h>
#include <sys/socket.h>
#include <sys/un.h>
#define SOCKET_NAME "/tmp/exampled"
static int socket_fd = -1;
static bool isdaemon = false;
static bool run = true;
/* returns
* -1 on errors
* 0 on successful server bindings
* 1 on successful client connects
*/
int singleton_connect(const char *name) {
int len, tmpd;
struct sockaddr_un addr = {0};
if ((tmpd = socket(AF_UNIX, SOCK_DGRAM, 0)) < 0) {
printf("Could not create socket: '%s'.\n", strerror(errno));
return -1;
}
/* fill in socket address structure */
addr.sun_family = AF_UNIX;
strcpy(addr.sun_path, name);
len = offsetof(struct sockaddr_un, sun_path) + strlen(name);
int ret;
unsigned int retries = 1;
do {
/* bind the name to the descriptor */
ret = bind(tmpd, (struct sockaddr *)&addr, len);
/* if this succeeds there was no daemon before */
if (ret == 0) {
socket_fd = tmpd;
isdaemon = true;
return 0;
} else {
if (errno == EADDRINUSE) {
ret = connect(tmpd, (struct sockaddr *) &addr, sizeof(struct sockaddr_un));
if (ret != 0) {
if (errno == ECONNREFUSED) {
printf("Could not connect to socket - assuming daemon died.\n");
unlink(name);
continue;
}
printf("Could not connect to socket: '%s'.\n", strerror(errno));
continue;
}
printf("Daemon is already running.\n");
socket_fd = tmpd;
return 1;
}
printf("Could not bind to socket: '%s'.\n", strerror(errno));
continue;
}
} while (retries-- > 0);
printf("Could neither connect to an existing daemon nor become one.\n");
close(tmpd);
return -1;
}
static void cleanup(void) {
if (socket_fd >= 0) {
if (isdaemon) {
if (unlink(SOCKET_NAME) < 0)
printf("Could not remove FIFO.\n");
} else
close(socket_fd);
}
}
static void handler(int sig) {
run = false;
}
int main(int argc, char **argv) {
switch (singleton_connect(SOCKET_NAME)) {
case 0: { /* Daemon */
struct sigaction sa;
sa.sa_handler = &handler;
sigemptyset(&sa.sa_mask);
if (sigaction(SIGINT, &sa, NULL) != 0 || sigaction(SIGQUIT, &sa, NULL) != 0 || sigaction(SIGTERM, &sa, NULL) != 0) {
printf("Could not set up signal handlers!\n");
cleanup();
return EXIT_FAILURE;
}
struct msghdr msg = {0};
struct iovec iovec;
int client_arg;
iovec.iov_base = &client_arg;
iovec.iov_len = sizeof(client_arg);
msg.msg_iov = &iovec;
msg.msg_iovlen = 1;
while (run) {
int ret = recvmsg(socket_fd, &msg, MSG_DONTWAIT);
if (ret != sizeof(client_arg)) {
if (errno != EAGAIN && errno != EWOULDBLOCK) {
printf("Error while accessing socket: %s\n", strerror(errno));
exit(1);
}
printf("No further client_args in socket.\n");
} else {
printf("received client_arg=%d\n", client_arg);
}
/* do daemon stuff */
sleep(1);
}
printf("Dropped out of daemon loop. Shutting down.\n");
cleanup();
return EXIT_FAILURE;
}
case 1: { /* Client */
if (argc < 2) {
printf("Usage: %s <int>\n", argv[0]);
return EXIT_FAILURE;
}
struct iovec iovec;
struct msghdr msg = {0};
int client_arg = atoi(argv[1]);
iovec.iov_base = &client_arg;
iovec.iov_len = sizeof(client_arg);
msg.msg_iov = &iovec;
msg.msg_iovlen = 1;
int ret = sendmsg(socket_fd, &msg, 0);
if (ret != sizeof(client_arg)) {
if (ret < 0)
printf("Could not send device address to daemon: '%s'!\n", strerror(errno));
else
printf("Could not send device address to daemon completely!\n");
cleanup();
return EXIT_FAILURE;
}
printf("Sent client_arg (%d) to daemon.\n", client_arg);
break;
}
default:
cleanup();
return EXIT_FAILURE;
}
cleanup();
return EXIT_SUCCESS;
}
All credits go to Mark Lakata. I merely did some very minor touch up only.
main.cpp
#include "singleton.hpp"
#include <iostream>
using namespace std;
int main()
{
SingletonProcess singleton(5555); // pick a port number to use that is specific to this app
if (!singleton())
{
cerr << "process running already. See " << singleton.GetLockFileName() << endl;
return 1;
}
// ... rest of the app
}
singleton.hpp
#include <netinet/in.h>
#include <unistd.h>
#include <cerrno>
#include <string>
#include <cstring>
#include <stdexcept>
using namespace std;
class SingletonProcess
{
public:
SingletonProcess(uint16_t port0)
: socket_fd(-1)
, rc(1)
, port(port0)
{
}
~SingletonProcess()
{
if (socket_fd != -1)
{
close(socket_fd);
}
}
bool operator()()
{
if (socket_fd == -1 || rc)
{
socket_fd = -1;
rc = 1;
if ((socket_fd = socket(AF_INET, SOCK_DGRAM, 0)) < 0)
{
throw std::runtime_error(std::string("Could not create socket: ") + strerror(errno));
}
else
{
struct sockaddr_in name;
name.sin_family = AF_INET;
name.sin_port = htons (port);
name.sin_addr.s_addr = htonl (INADDR_ANY);
rc = bind (socket_fd, (struct sockaddr *) &name, sizeof (name));
}
}
return (socket_fd != -1 && rc == 0);
}
std::string GetLockFileName()
{
return "port " + std::to_string(port);
}
private:
int socket_fd = -1;
int rc;
uint16_t port;
};
#include <windows.h>
int main(int argc, char *argv[])
{
// ensure only one running instance
HANDLE hMutexH`enter code here`andle = CreateMutex(NULL, TRUE, L"my.mutex.name");
if (GetLastError() == ERROR_ALREADY_EXISTS)
{
return 0;
}
// rest of the program
ReleaseMutex(hMutexHandle);
CloseHandle(hMutexHandle);
return 0;
}
FROM: HERE
On Windows you could also create a shared data segment and use an interlocked function to test for the first occurence, e.g.
#include <Windows.h>
#include <stdio.h>
#include <conio.h>
#pragma data_seg("Shared")
volatile LONG lock = 0;
#pragma data_seg()
#pragma comment(linker, "/SECTION:Shared,RWS")
void main()
{
if (InterlockedExchange(&lock, 1) == 0)
printf("first\n");
else
printf("other\n");
getch();
}
I have just written one, and tested.
#define PID_FILE "/tmp/pidfile"
static void create_pidfile(void) {
int fd = open(PID_FILE, O_RDWR | O_CREAT | O_EXCL, 0);
close(fd);
}
int main(void) {
int fd = open(PID_FILE, O_RDONLY);
if (fd > 0) {
close(fd);
return 0;
}
// make sure only one instance is running
create_pidfile();
}
Just run this code on a seperate thread:
void lock() {
while(1) {
ofstream closer("myapplock.locker", ios::trunc);
closer << "locked";
closer.close();
}
}
Run this as your main code:
int main() {
ifstream reader("myapplock.locker");
string s;
reader >> s;
if (s != "locked") {
//your code
}
return 0;
}