In this code, I am trying to ipc with signals. Parent process will send SIGUSR1 signal to his children and waits for a SIGUSR1 signal for conformation of the signal arrive at the child end.
So I have;
Parent --> SIGUSR1 --> CHILD[i]
Parent <--- SIGUSR1 <--- CHILD[i]
this kind of communication between parent child processes. In normal executing, I have never face with a deadlock scenario. But when I run with valgrind, sometimes child process canot receive SIGUSR1 signal from parent process, which is start signal for communication.
Is the valgrind uses some mechanisms to avoid signal communication between processes ?
Parent process :
#include <sys/types.h>
#include <sys/wait.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <string.h>
#include <signal.h>
#include <fcntl.h>
#include <errno.h>
#define CHILD_SIZE 80
sigset_t sigusr_mask;
pid_t children[CHILD_SIZE];
void SIGUSR1_handler(int sig)
{
write(1,"test.\n",6);
}
void init_signal()
{
sigset_t blck_msk;
sigemptyset(&blck_msk);
sigaddset(&blck_msk, SIGUSR1);
sigfillset(&sigusr_mask);
sigdelset(&sigusr_mask, SIGUSR1);
struct sigaction usr1_act;
memset(&usr1_act, 0, sizeof(usr1_act));
usr1_act.sa_handler = &SIGUSR1_handler;
sigaction(SIGUSR1, &usr1_act, NULL);
if(-1== sigprocmask(SIG_BLOCK, &blck_msk,NULL))
exit(-1);
}
void create_children(const char* processPath, int round_)
{
for(int i = 0; i < CHILD_SIZE; ++i)
{
pid_t pid_c = fork();
if(pid_c > 0) {
children[i] = pid_c;
}
else if(pid_c == 0)
{
char* arg[1024] = {"childProcess", (char*)0};
execve(processPath, arg, NULL);
}
}
}
// for the synchronization
void wait_childs()
{
for (int i = 0; i < CHILD_SIZE; ++i)
{
// send SIGUSR1 to the children.
kill(children[i], SIGUSR1);
printf("%d : %s\n", children[i],strerror(errno));
// wait until children reply by SIGUSR1
sigsuspend(&sigusr_mask);
}
}
int main(int argc, char const *argv[])
{
init_signal();
create_children("childProcess", 1);
wait_childs();
return (0);
}
Child process :
#include <sys/types.h>
#include <sys/wait.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <string.h>
#include <signal.h>
#include <errno.h>
#include "utils.h"
sigset_t sigusr_mask;
void SIGUSR1_handler(int sig) {
write(1,"usr1\n",4);
}
void init_signal()
{
sigfillset(&sigusr_mask);
sigdelset(&sigusr_mask, SIGUSR1);
struct sigaction usr1_act;
memset(&usr1_act, 0, sizeof(usr1_act));
usr1_act.sa_handler = &SIGUSR1_handler;
sigaction(SIGUSR1, &usr1_act, NULL);
}
int main(int argc, char const *argv[])
{
init_signal();
printf("child %d waiting \n",getpid());
// wait until parent starts the communication.
sigsuspend(&sigusr_mask);
// after parent sends SIGUSR1, send reply.
kill(getppid(), SIGUSR1);
return 0;
}
Related
Here is the code that I have so far:
#include <pthread.h>
#include <stdio.h>
#include <unistd.h>
#include <sys/wait.h>
int value = 0;
void *runner(void *param);
int main(){
int pid;
pthread_t tid;
pthread_attr_t attr;
pid = fork();
if (pid == 0) {
pthread_attr_init(&attr);
pthread_create(&tid,&attr,runner,NULL);
pthread_join(tid,NULL);
printf("A: value = %d\n", value);
}
else if (pid > 0) {
wait(NULL);
printf("B: value = %d\n", value);
}
}
void *runner(void *param){
value = 5;
pthread_exit(0);
}
I have identified the child and parent processes but I am not sure how to make A and B print interleaved.
How to disable buffering in pipe. I'm creating a simple recorder/player for I/O. To do this I need record output witch time delays.
To record delays, I need something like this
example tekst
"wait 1s"
example tekst
"wait 1s"
example tekst
...
but if I use
pipe2(in, O_DIRECT );
i see something like this
"wait 100s"
"100 times" example tekst
"wait 100s"
"100 times" example tekst
...
man7 tells:
O_DIRECT (since Linux 3.4)
Create a pipe that performs I/O in "packet" mode. Each
write(2) to the pipe is dealt with as a separate packet, and
read(2)s from the pipe will read one packet at a time.
I tried to disable buffering, by:
fcntl(in[1], F_SETPIPE_SZ, 1);
but it's still not working.
read.cpp
#define _GNU_SOURCE
#include <fcntl.h>
#include <unistd.h>
#include <stdio.h>
#include <unistd.h>
#include <signal.h>
#include <sys/types.h>
#include <stdlib.h>
#include <string.h>
#include <sys/ioctl.h>
#include <poll.h>
#include <assert.h>
bool keep = true;
void intHandler(int dummy) {
keep = 0;
}
int main(void) {
signal(SIGINT, intHandler);
int in[2];
int out[2];
int pid;
int fo;
char buf[1024];
pipe2(in, O_DIRECT );
pipe2(out, O_DIRECT );
pid = fork();
if (pid == -1) {
perror("fork");
exit(1);
}
if (pid == 0) {
close(in[0]);
close(out[1]);
close(0);
close(1);
dup2(in[1], 1);
dup2(out[0], 0);
close(in[1]);
close(out[0]);
sleep(1);
char *newargv[] = {"/root/Pulpit/a1", NULL, NULL, NULL};
char *newenviron[] = {NULL};
int ret = execve("/root/Pulpit/a1", newargv, newenviron);
printf("%d", ret);
return 0;
} else {
close(out[0]);
close(in[1]);
int n = 0;
while (keep) {
int wyn = read(in[0], buf, 1024);
if (wyn > 0) {
char aa[1024];
write(1, buf, wyn);
fsync(1);
}
}
}
return (0);
}
a1.cpp
#include <cstdlib>
#include <unistd.h>
#include "stdio.h"
using namespace std;
/*
*
*/
int main(int argc, char** argv) {
while(true){
printf("example text\n");
usleep(100000);
}
return 0;
}
Does using the C function daemon() have any security or stability disadvantages for a linux daemon compared to using explicit functions like fork(), setsid(), umask(), etc. (beside being unable to set all daemon parameters)?
I was wondering why I should write
#include <sys/types.h>
#include <sys/stat.h>
#include <cstdio>
#include <cstdlib>
#include <fcntl.h>
#include <cerrno>
#include <unistd.h>
#include <syslog.h>
#include <string>
int main()
{
//Set our Logging Mask and open the Log
setlogmask(LOG_UPTO(LOG_NOTICE));
openlog(DAEMON_NAME, LOG_CONS | LOG_NDELAY | LOG_PERROR | LOG_PID, LOG_USER);
syslog(LOG_INFO, "Entering Daemon");
pid_t pid, sid;
//Fork the Parent Process
pid = fork();
if (pid < 0)
exit(EXIT_FAILURE);
//We got a good pid, Close the Parent Process
if (pid > 0)
exit(EXIT_SUCCESS);
//Change File Mask
umask(0);
//Create a new Signature Id for our child
sid = setsid();
if (sid < 0)
exit(EXIT_FAILURE);
//Change Directory
//If we cant find the directory we exit with failure.
if ((chdir("/")) < 0)
exit(EXIT_FAILURE);
//Close Standard File Descriptors
close(STDIN_FILENO);
close(STDOUT_FILENO);
close(STDERR_FILENO);
while (true)
{
sleep(5);
//Do something
}
closelog ();
}
instead of
#include <unistd.h>
int main()
{
daemon(0, 0);
while (true)
{
//Do something
sleep(5);
}
}
Per the manpage, it's not in POSIX so you're always taking a risk regarding its existence.
Otherwise, no.
I am currently trying to implement a server in C++ using sockets. I am trying to prevent race conditions by blocking the SIGINT signal until it is stuck in the blocking pselect. From there, it should be exiting, changing my loop variable, and then quitting the thread. From my attempts at getting this working, it appears that it reaches the pselect(), but it does not get interrupted using my code. Any help is appreciated.
Listener.h:
#include <sys/socket.h>
#include <netinet/in.h>
#include <arpa/inet.h>
class CListener
{
public:
CListener();
void quitListener(void);
private:
void* InitListener(void);
static void* StartListenerThread(void* context);
static bool mbListening;
pthread_t mtThreadID;
};
Listener.cpp
#include <iostream>
#include <unistd.h>
#include <string.h>
#include <sys/types.h>
#include <pthread.h>
#include <signal.h>
#include <errno.h>
#include "Listener.h"
bool CListener::mbListening = true;
CListener::CListener()
{
mbListening = true;
mtThreadID = 0;
pthread_create(&mtThreadID, NULL, &CListener::StartListenerThread, this);
}
void* CListener::StartListenerThread(void* context)
{
return ((CListener*)context)->InitListener();
}
void* CListener::InitListener()
{
sigset_t tSignalSet;
sigset_t tOriginalSignalSet;
sigemptyset(&tSignalSet);
sigaddset(&tSignalSet, SIGINT);
sigprocmask(SIG_BLOCK, &tSignalSet, &tOriginalSignalSet);
FD_ZERO(&tConnectionSet);
FD_SET(0, &tConnectionSet);
while(mbListening)
{
tSelectSet = tConnectionSet;
std::cout << "Reached pselect\n";
nReadyConnections = pselect(nSelectSocket+1, &tSelectSet,
NULL, NULL, NULL, &tOriginalSignalSet);
std::cout << "Broke out of pselect\n";
if(nReadyConnections < 0 && errno == EINTR)
{
mbListening = false;
}
}
pthread_exit(NULL);
return NULL;
}
void CListener::quitListener()
{
raise(SIGINT);
}
As long as I copied everything correctly fingers crossed you should just be able to run:
CListener tListener = CListener();
usleep(20000);
tListener.quitListener();
and the outputs should be displayed in terminal. My end goal is that I can allow for pselect to be interrupted without breaking any processing that may come after and allowing the thread to close gracefully. (blocking at pselect > recieve SIGINT > interupt pselect > return to loop > finish up and exit)
I've solved my own problem. Since I am generating a thread, I needed to add functions that allow for that, as well as adding a signal handler, like shown below.
void CListener::quitListener()
{
pthread_kill(mtThreadID,SIGINT);
}
void CListener::installSIGINTHandler()
{
signal(SIGINT, CListener::SIGINTHandler);
}
void CListener::SIGINTHandler(int signo)
{
mbListening = false;
}
And needed to change the sig mask setup to this:
pthread_sigmask(SIG_BLOCK, &tSignalSet, &tOriginalSignalSet);
I am trying to synchronize a father and children, the following code is not working (apparently usr_interrupt++ is not atomic). Semaphores does not seems to help either.
#include <sys/types.h>
#include <sys/ipc.h>
#include <sys/shm.h>
#include <sys/stat.h>
#include <unistd.h>
#include <cstdlib>
#include <iostream>
#include <unistd.h>
#include <cstring>
#include <string>
#include <semaphore.h>
#include <fcntl.h>
using namespace std;
/* When a SIGUSR1 signal arrives, set this variable. */
volatile sig_atomic_t usr_interrupt;
sem_t *mutex;
char* SEM_NAME;
void
synch_signal (int sig)
{
// sem_wait(mutex);
usr_interrupt++;
// sem_post(mutex);
}
/* The child process executes this function. */
void
child_function (void)
{
/* Perform initialization. */
cerr << "I'm here!!! My pid is " << (int)getpid() << " my usr_int=" << usr_interrupt << endl;
/* Let parent know you're done. */
kill (getppid (), SIGUSR1);
/* Continue with execution. */
cerr << "Bye, now...." << endl;
exit(0);
}
int
main (void)
{
usr_interrupt = 0;
string s_sem_name = "lir";
SEM_NAME = new char[s_sem_name.size()+1];
memcpy(SEM_NAME, s_sem_name.c_str(), s_sem_name.size());
SEM_NAME[s_sem_name.size()] = '\0';
mutex = sem_open (SEM_NAME,O_CREAT,0644,1);
if(mutex == SEM_FAILED) {
perror("unable to create semaphore");
sem_unlink(SEM_NAME);
exit(-1);
}
struct sigaction usr_action;
sigset_t mask, oldmask;
pid_t child_id, child_id2;
/* Set up the mask of signals to temporarily block. */
sigemptyset (&mask);
sigaddset (&mask, SIGUSR1);
/* Establish the signal handler.*/
usr_action.sa_handler = synch_signal;
usr_action.sa_flags = 0;
sigaction (SIGUSR1, &usr_action, NULL);
/* Create the 2 children processes. */
child_id = fork ();
if (child_id == 0)
child_function ();
child_id2 = fork();
if (child_id2 == 0)
child_function ();
/* Wait for a signal to arrive. */
sigprocmask (SIG_BLOCK, &mask, &oldmask);
while (usr_interrupt != 2) {
sigsuspend (&oldmask);
}
sigprocmask (SIG_UNBLOCK, &mask, NULL);
/* Now continue execution. */
puts ("That's all, folks!");
return 0;
}
Can anyone suggest a fix? (I cannot use threads)
Best,
-- Liron
You can't count signals. Two signals of the same type has the same semantic meaning as one signal of that type. You could use two different signal types like USR1 and USR2. But honestly, you shouldn't use signals as a communication mechanism. Use something sensible like a pipe.