So I am trying to run 3 commands in a sequential way.
ls -l / | grep a | sort -r
However, my code seems to hang the process. I know that this usually indicated an unhandled pipe. but I can't seem to find the problem, I have closed all possible file descriptors that I could find.
Code may have some meaningless lines like closing the same FD twice, which I added while trying to fix it
#include <iostream>
#include<unistd.h>
#include <sys/wait.h>
using namespace std;
#define pipeWrite 1
#define pipeRead 0
// Main Function
int main() {
//Command to run: ls -l / | grep a | sort -r | wc > count.txt
// Total 4 commands to run, so 4 child processes.
pid_t c1,c2,c3,c4;
// File Descriptors
int pipeFD_1[2];
int pipeFD_2[2];
pipe(pipeFD_1);
// To execute ls -l
c1 = fork();
// Child
if (c1 == 0) {
// Closing input end
close(1);
dup(pipeFD_1[1]);
close(pipeFD_1[0]);
close(pipeFD_1[1]);
// Running command
execl("/bin/ls", "ls", "-l", "/",NULL);
}
pipe(pipeFD_2);
c2 = fork();
if (c2 == 0) {
// Closing output end
close(0);
dup(pipeFD_1[0]);
close(1);
dup(pipeFD_2[1]);
close(pipeFD_1[0]);
close(pipeFD_1[1]);
close(pipeFD_2[0]);
close(pipeFD_2[1]);
// Running command
execl("/bin/grep", "grep", "a", NULL);
}
//Closing pipe 1.
close(pipeFD_1[0]);
close(pipeFD_1[1]);
c3 = fork();
if (c3 == 0) {
// Closing out end
close(0);
dup(pipeFD_2[0]);
close(pipeFD_2[0]);
close(pipeFD_2[1]);
// Running command
execl("/bin/sort", "sort", "-r", NULL);
}
// Since exec auto exits the child process. No need to put else.
// Closing output end
waitpid(-1, NULL, 0);
waitpid(-1, NULL, 0);
waitpid(-1, NULL, 0);
exit(0);
}
EDIT: Fixed by adding 2 more lines after the third fork, closing FDs for pipe2 (for parent).
Thank you Barmar and ItsHoney for your valuable discussions. Posting them as answer to help other community members.
Fixed by adding 2 more lines after the third fork, closing FDs for pipe2 (for parent).
#include <iostream>
#include<unistd.h>
#include <sys/wait.h>
using namespace std;
#define pipeWrite 1
#define pipeRead 0
// Main Function
int main() {
//Command to run: ls -l / | grep a | sort -r | wc > count.txt
// Total 4 commands to run, so 4 child processes.
pid_t c1,c2,c3,c4;
// File Descriptors
int pipeFD_1[2];
int pipeFD_2[2];
pipe(pipeFD_1);
// To execute ls -l
c1 = fork();
// Child
if (c1 == 0) {
// Closing input end
close(1);
dup(pipeFD_1[1]);
close(pipeFD_1[0]);
close(pipeFD_1[1]);
// Running command
execl("/bin/ls", "ls", "-l", "/",NULL);
}
pipe(pipeFD_2);
c2 = fork();
if (c2 == 0) {
// Closing output end
close(0);
dup(pipeFD_1[0]);
close(1);
dup(pipeFD_2[1]);
close(pipeFD_1[0]);
close(pipeFD_1[1]);
close(pipeFD_2[0]);
close(pipeFD_2[1]);
// Running command
execl("/bin/grep", "grep", "a", NULL);
}
//Closing pipe 1.
close(pipeFD_1[0]);
close(pipeFD_1[1]);
c3 = fork();
if (c3 == 0) {
// Closing out end
close(0);
dup(pipeFD_2[0]);
close(pipeFD_2[0]);
close(pipeFD_2[1]);
// Running command
execl("/bin/sort", "sort", "-r", NULL);
}
// Since exec auto exits the child process. No need to put else.
// Closing output end
waitpid(-1, NULL, 0);
waitpid(-1, NULL, 0);
waitpid(-1, NULL, 0);
exit(0);
}
Related
On windows machine, My code runs ShellExecute to launch abc.exe with SW_HIDE as the last argument to hide the launched command line window. This runs fine as desired.
std::string torun = "/c abc.exe >abc.log";
HINSTANCE retVal = ShellExecute(NULL, _T("open"), _T("cmd"), std::wstring(torun.begin(), torun.end()).c_str(), NULL, SW_HIDE);
My issue is that abc.log which captures the output of abc.exe is not accessible even though it is written successfully. Code below returns "file does not exist".
std::string filename = "abc.log";
if (_access(filename.c_str(), 0) == -1)
std::cout<<"file does not exist";
else
std::cout<<"file exists";
I need to read the content of this log file after checking that it exists. Code below also returns "file does not exist".
ifstream fin;
fin.open("abc.log");
if(fin)
std::cout<<"file exists";
else
std::cout<<"file does not exist";
Is there some permission issue? I have two needs - launch abc.exe on a command line window which is hidden or runs behind all open windows and also be able to read the abc.log. Appreciate the help.
You need to wait for the child process to finish:
#include <stdio.h>
#include <stdlib.h>
#include <windows.h>
#include <tchar.h>
#include <shellapi.h>
...
SHELLEXECUTEINFO sei = { sizeof(sei), SEE_MASK_NOCLOSEPROCESS|SEE_MASK_FLAG_NO_UI };
sei.lpFile = TEXT("cmd.exe");
sei.lpParameters = TEXT("/C ping localhost > abc.log"); // Command that takes a while to complete
sei.nShow = SW_HIDE;
if (ShellExecuteEx(&sei))
{
if (sei.hProcess)
{
WaitForSingleObject(sei.hProcess, INFINITE);
CloseHandle(sei.hProcess);
FILE*file = fopen("abc.log", "r");
if (file)
{
char buf[1337];
size_t cb = fread(buf, 1, sizeof(buf), file);
if (cb)
{
buf[min(cb, sizeof(buf)-1)] = '\0';
MessageBoxA(NULL, buf, "stdout started with", 0);
}
fclose(file);
}
}
}
This is still a bit lazy, the optimal way to read stdout from a child process is to call CreateProcess with pipes.
I fork a child process which in turn spawns a grand-child process and returns. If there are any errors in spawning that grand-child, the child writes to stderr using fprintf statements and exits.
I am trying to read stderr from the child process in the parent. But, the parent gets hung in the while loop reading from child process. When I look at ps -ef the child is <defunct>.
Following is the code I wrote to accomplish this. I am new to this and the search on web does not give enough information about why the parent is hung when the child is
int pipe_out[2];
// Create the pipe
if (pipe(pipe_out) < 0) {
perror("Failed pipe");
exit(1);
}
// Create the child process
int status;
pid_t pid = fork();
switch(pid) {
case -1:
perror("Failed fork");
exit(1);
case 0: { // Child
close(pipe_out[0]); // close read end
// Make stderr go to pipe write end
dup2(pipe_out[1], STDERR_FILENO);
close(pipe_out[1]);
// start my process
execvp(inp_args[0], inp_args);
_exit(EXIT_FAILURE);
break;
}
default: { // Parent
close(pipe_out[1]); // close write end
// read from child
while( read(pipe_out[0], buffer, sizeof(buffer)) )
log(stdout, "%s\n",buffer);
}
// wait for end then close other end of pipe
waitpid(pid, &status, 0);
if (WIFSIGNALED(status))
log(stdout, "killed by signal %d\n", WTERMSIG(status));
close(pipe_out[0]);
}
}
I'm trying to make a program that forks once and while the parent waits for the child terminates, this child forks again and then executes two execs. There is a Pipe on the program and I've checked the return values of every dup2() and pipe() on the program -just omitted them here to make it looks more concise-. The problem is that I only get the result of ls -a | sort -r AFTER the program finishes.
The code is:
#include <cstdio>
#include <cstring>
#include <sys/wait.h>
#include <sys/types.h>
#include <unistd.h>
#include <stdlib.h>
#include <errno.h>
int main(int argc, char *argv[]) {
printf("Shell> \n"); fflush(stdout);
pid_t pid1;
pid_t pid2;
int status = 0;
int fd[2];
if(pipe(fd) < 0) {
printf("FATAL ERROR.\n");
}
pid1 = fork();
if(pid1 > 0) { // Parent
waitpid(pid1, &status, 0);
printf("\t\t------PID1 Complete-------\n\n");
}
else { // Child
if(pid1 == 0) {
printf("ON CHILD\n");
pid2 = fork();
if(pid2 > 0) { // Child -> Parent
printf("ON CHILD-Parent\n");
close(fd[1]);
dup2(fd[0], STDIN_FILENO);
waitpid(pid2, &status, 0);
printf("ON CHILD-Parent after wait\n");
execlp("sort", "sort", "-r", NULL);
perror("Problem with execlp\n");
exit(1);
}
else { // Child -> Child
printf("ON CHILD->Child\n");
close(fd[0]);
dup2(fd[1], STDOUT_FILENO);
execlp("ls", "ls", "-a", NULL);
perror("Problem with execvp\n");
exit(1);
}
} // End of if(pid1 == 0)
} // End of Child
printf("\nEnd of program.\n");
return 0;
}
My current output is:
Shell>
ON CHILD
ON CHILD-Parent
ON CHILD->Child
ON CHILD-Parent after wait
I think the problem is on the waits, but I just can't figure out how to make this work. Any ideas? Thanks!
The problem is that you call pipe in the grandparent process. After the grandchild process (ls -a) exits, the parent process (sort -r) blocks indefinitely waiting to read more input from the pipe since some process - the grandparent - holds an open descriptor to the write end of the pipe.
If you close the pipe descriptors in the grandparent process, or better yet move the pipe call into the first forked process, then the sort process will terminate when the last process with an open descriptor for the write end of the pipe exits (DEMO):
int main() {
// Turn off buffering of stdout, to help with debugging
setvbuf(stdout, NULL, _IONBF, 0);
printf("Shell> \n");
pid_t pid1 = fork();
if(pid1 < 0) {
perror("fork failed");
}
if(pid1 > 0) { // Parent
int status;
waitpid(pid1, &status, 0);
printf("\t\t------PID1 Complete (%d) -------\n\n", status);
} else { // Child
printf("ON CHILD\n");
int fd[2];
if(pipe(fd) < 0) {
perror("pipe failed");
return 1;
}
pid_t pid2 = fork();
if(pid2 < 0) {
perror("fork failed");
}
if(pid2 > 0) { // Child -> Parent
printf("ON CHILD-Parent\n");
close(fd[1]);
dup2(fd[0], STDIN_FILENO);
execlp("sort", "sort", "-r", NULL);
perror("Problem with execlp");
return 1;
} else { // Child -> Child
printf("ON CHILD->Child\n");
close(fd[0]);
dup2(fd[1], STDOUT_FILENO);
execlp("ls", "ls", "-a", NULL);
perror("Problem with execvp");
return 1;
}
}
printf("\nEnd of program.\n");
}
The other problem with the program is the one #nategoose commented on: the call to waitpid could lead to a deadlock if the output of "ls -a" is too large to fit in the pipe's buffer. There's no reason to wait, so it should simply be eliminated.
This isn't a real answer, but I have some into that I'd like to share.
To make sure that your output comes out in the order that it should, I'm flushing a lot more than you were. Remember that when you are calling functions like fork(), clone(), vfork(), dup(), dup2(), close(), or any of the exec() family of functions you are doing stuff that is BELOW the C runtime environment, which includes stdio. If you do:
printf("cat");
fork();
fflush(stdout);
You are very likely to get:
catcat
as your output because you've duplicated the stdout structure, including all buffered data, so unless stdio decided that it was time to flush anyway before the end of the printf function, then "cat" is in each process's stdout buffer.
There's also the fact that since data can stay buffered when you run a function in the exec family your data may not be flushed before your program is replaced with the new program. When your program is replaced by ls or sort then any data pending in stdout gets lost forever.
Also, when you use dup you have the another issue since you are swapping the file descriptor out from under stdio so it may not have flushed yet and the data may end up getting flushed to the new file after the dup.
Because of these things you should have a lot more calls to fflush, but I don't think that's your problem here.
I have created a daemon for linux in c++, however, the child process does not seem to be doing anything. Once it reaches the if(pid > 0) statement, everything seems to stop.
The code for the Daemon.Start() is as follows:
//Process ID and Session ID
pid_t pid,sid;
//Fork off the Parent Process
pid = fork();
if(pid < 0)
exit(EXIT_FAILURE);
//If PID is good, then exit the Parent Process
if(pid > 0)
exit(EXIT_SUCCESS);
//Change the file mode mask
umask(0);
//Create a new SID for the Child Process
sid = setsid();
if(sid < 0)
{
exit(EXIT_FAILURE);
}
//Change the current working directory
if((chdir("/")) < 0)
{
//Log the failure
exit(EXIT_FAILURE);
}
//Close out the standard file descriptors
close(STDIN_FILENO);
close(STDOUT_FILENO);
close(STDERR_FILENO);
//The main loop.
Globals::LogError("Service started.");
while(true)
{
//The Service task
Globals::LogError("Service working.");
if(!SystemConfiguration::IsFirstRun() && !SystemConfiguration::GetMediaUpdateReady())
{
SyncServer();
}
sleep(SystemConfiguration::GetServerConnectionFrequency()); //Wait 30 seconds
}
exit(EXIT_SUCCESS);
Any help would be great! :)
I'm pretty sure your child process dies within either the sid < 0 or the chdir("/") < 0 if statement. Write to stderr in these cases before exit to reveal what the problem is:
//Create a new SID for the Child Process
sid = setsid();
if(sid < 0)
{
fprintf(stderr,"Failed to create SID: %s\n",strerror(errno));
exit(EXIT_FAILURE);
}
//Change the current working directory
int chdir_rv = chdir("/");
if(chdir_rv < 0)
{
fprintf(stderr,"Failed to chdir: %s\n",strerror(errno));
exit(EXIT_FAILURE);
}
You need to include <errno.h> and <string.h> in order to have errno and strerror defined (respectively).
Regards
I have something like this:
pipe
close(pipe[0]);
parent writes something to pipe
close(pipe[1]);
fork();
if(child)
{
close(pipe[1]);
child reads from pipe
close(pipe[0]);
child does some operations
child writes to pipe
close(pipe[1]);
}
else
{
back to parent
close(pipe[0]);
wait(&code);
parent tries to read what the terminated child just wrote but fails to do so
}
I'm not really sure what can i do to make the parent read from the terminated child. Do i need to make use of dup? I'm not so very sure in what situations dup or dup2 is useful.
writing and reading is done using the write() and read() functions.
I have to use pipes and not fifo's or other means to communicate between processes.
A sample from this article says:
#include <stdio.h>
#include <unistd.h>
#include <sys/types.h>
main()
{
int fd[2];
pid_t childpid;
pipe(fd);
if((childpid = fork()) == -1)
{
perror("fork");
exit(1);
}
if(childpid == 0)
{
/* Child process closes up input side of pipe */
close(fd[0]);
}
else
{
/* Parent process closes up output side of pipe */
close(fd[1]);
}
.
.
}
IIRC that's the way doing it. The crucial thing is to close the unused fd's in parent and child process.
I think fifo suites your need and I don't think you need to use a dup either. Here is a working code:
#include <fcntl.h>
int main()
{
int e=open("fif",O_RDONLY|O_NONBLOCK);
if(fork()==0)
{
int d=open("fif",O_WRONLY);
write(d,"hi there\n",9);
close(d);
//sleep(5);
exit(0);
}
wait();
char buf[15];
int n=read(e,buf,15);
buf[n]=0;
printf("%s", buf);
//wait();
return 0;
}