I have the following c++ code which writes "Line from #" to a file while managing a file lock. I am running this code on two different computers, which share at least some of their memory. That is I can access my files by logging onto either of these computers.
On the first computer I run the program as ./test 1 (e.g. so it will print Line from 1 20,000 times) and on the second computer I run the program as ./test 17. I am starting these programs close enough in time so that the writes to file.txt should be interleaved and controlled by the file locks.
The problem is that I am losing output as the file has 22,770 newlines, but it should have exactly 40,000 newlines.
wc file.txt
22770 68310 276008 file.txt
Also,
cat -n file.txt | grep 18667
18667 ne from 17
My question is why are my file locks not preventing file overwriting, and how can I fix my code so that multiple processes can write to the same file without file loss.
#include <unistd.h>
#include <fcntl.h>
#include <cstdio>
#include <cstdlib>
#include <fstream>
#include <sstream>
#include <iostream>
using namespace std;
void inline Set_Lck(struct flock &flck, const int fd)
{
flck.l_type = F_WRLCK;
if (fcntl(fd, F_SETLKW, &flck) == -1) {
perror("fcntl");
exit(1);
}
}
void inline Release_Lck(struct flock &flck, const int fd)
{
flck.l_type = F_UNLCK;
if (fcntl(fd,F_SETLK,&flck) == -1) {
perror("fcntl");
exit(1);
}
}
void Print_Spec(fstream &fout, ostringstream &oss,struct flock &flck, const int fd)
{
Set_Lck(flck,fd);
fout.seekp(0,ios_base::end);
fout << oss.str() << endl;
flush(fout);
Release_Lck(flck,fd);
}
int main(int argc, char **argv)
{
int fd_cd;
struct flock flock_cd;
ostringstream oss;
fstream comp_data;
const string s_cd_lck = "file_lock.txt";
const string s_cd = "file.txt";
int my_id;
if (argc == 1) {
my_id = 0;
} else if (argc == 2) {
my_id = atoi(argv[1]);
} else {
fprintf(stderr,"error -- usage ./test [my_id]\n");
exit(1);
}
/* Open file computed_data.txt for writing; create it if non-existent.*/
comp_data.open(s_cd.c_str(),ios::app|ios::out);
if (comp_data.fail()) {
perror("comp_data.open");
exit(1);
}
/* Open file that we will be locking. */
fd_cd = open(s_cd_lck.c_str(),O_CREAT|O_WRONLY,0777);
if (fd_cd == -1) {
perror("fd_cd = open");
exit(1);
}
/* Set up the lock. */
flock_cd.l_type = F_WRLCK;
flock_cd.l_whence = SEEK_SET;
flock_cd.l_start = 0;
flock_cd.l_len = 0;
flock_cd.l_pid = getpid();
for (int i = 0; i < 20000; ++i) {
oss.str(""); /* Yes, this can be moved outside the loop. */
oss << "Line from " << my_id << endl;
Print_Spec(comp_data,oss,flock_cd,fd_cd);
}
return 0;
}
I am using c++ and this program is running on Red Hat Enterprise Linux Server release 7.2 (Maipo).
My Research
I am not sure if part of the answer comes from the following Stackoverflow post (https://stackoverflow.com/a/2059059/6417898) where they state that "locks are bound to processes."
At this website (http://perl.plover.com/yak/flock/samples/slide005.html), the author dissuades against using LOCK_UN with flock and suggests closing the file each time and reopening it as needed, so as to flush the file buffer. I don't know if this carries over with fcntl or if this is even necessary if flush the file buffer manually.
I am trying to create a minimal code to use pipe/fork/execlp.
So far so good, I am using execlp with bash -c, so if I do.
echo asd |./a.out cat
> asd
So it is working as expected.
But if I try to use anything that needs a TTY, it does not work.
Like ./a.out vim, I get "Vim: Warning: Input is not from a terminal"
And the vim that was open does not works as expected.
I tried to find on the internet an example on how to open a TTY, the only one that I found was:
http://www.danlj.org/lad/src/minopen.c
My Code, so far is:
#include <iostream>
#include <cstdio>
#include <string.h>
#include <cstdlib>
#include <unistd.h>
#include <sys/wait.h>
typedef struct pCon{
int fout[2];
int fin[2];
int fd[2];
int pid1, pid2;
} connectionManager;
std::string command = "";
/*
* Implementation
*/
void childFork(connectionManager *cm);
int main(int argc, char *argv[]) {
int size;
if(argc < 2) exit(1);
else command = argv[1];
connectionManager *cm = new connectionManager;
pipe(cm->fd);
if((cm->pid1 = fork()) == -1)exit(1);
if (cm->pid1 == 0)
{
const unsigned int RCVBUFSIZE = 2000;
char echoString[RCVBUFSIZE];
while((size = read(fileno(stdin),echoString,RCVBUFSIZE)) > 0)
write(cm->fd[1], echoString, size);
close(cm->fd[1]);
}
else
childFork(cm);
return 0;
}
void childFork(connectionManager *cm){
char *buffer = new char[2000];
int size;
close(cm->fd[1]);
dup2(cm->fd[0], 0);
close(cm->fd[0]);
pipe(cm->fout);
if((cm->pid2 = fork()) == -1)exit(1);
if (cm->pid2 == 0)
{
close(cm->fout[0]);
int returnCode = execlp("bash", "bash", "-c", command.c_str(), NULL);
if(returnCode!=0)
std::cerr << "Error starting the bash program" << std::endl;
}
else
{
close(cm->fout[1]);
while((size = read(cm->fout[0], buffer, 2000 )) > 0 )
write(fileno(stdout), buffer, size);
}
}
I tried to keep the minimal necessary code to make it work.
Is there any way to implement TTY on this code, I know that does not seems to be such trivial task.
Can someone help me with that?
I also tried to open the tty and dup it, but no luck so far.
Try to use pseudo terminal. You can use opentty. For your purpose you can use forkpty which combines pty with fork. I've created a small example for you. About the same as your program, just it works. I've kept it simple, so I don't handle the terminal control characters.
#include <pty.h>
#include <unistd.h>
#include <fcntl.h>
#include <termios.h>
#include <sys/select.h>
int main(int argc, char *argv[])
{
if (argc<1) return 1;
int master;
pid_t pid = forkpty(&master, NULL, NULL, NULL); // opentty + login_tty + fork
if (pid < 0) {
return 1; // fork with pseudo terminal failed
}
else if (pid == 0) { // child
char *args[] = { argv[1], argv[2], NULL }; // prg + 1 argument
execvp(argv[1], args); // run the program given in first param
}
else { // parent
struct termios tios;
tcgetattr(master, &tios);
tios.c_lflag &= ~(ECHO | ECHONL);
tcsetattr(master, TCSAFLUSH, &tios);
while(1) {
fd_set read_fd, write_fd, err_fd;
FD_ZERO(&read_fd);
FD_ZERO(&write_fd);
FD_ZERO(&err_fd);
FD_SET(master, &read_fd);
FD_SET(STDIN_FILENO, &read_fd);
select(master+1, &read_fd, &write_fd, &err_fd, NULL);
if (FD_ISSET(master, &read_fd))
{
char ch;
int c;
if (c=read(master, &ch, 1) != -1) // read from program
write(STDOUT_FILENO, &ch, c); // write to tty
else
break; // exit when end of communication channel with program
}
if (FD_ISSET(STDIN_FILENO, &read_fd))
{
char ch;
int c=read(STDIN_FILENO, &ch, 1); // read from tty
write(master, &ch, c); // write to program
}
}
}
return 0;
}
For compiling use -lutil .
While running a new tty device appears in /dev/pts .
vim accepts it as a terminal.
For my school project I must make a C++ program. The program has to use already installed applications on the Linux OS. To get familiar with managing other processes from within a C++ application I want to make a program that sets a wlan interface to monitor mode. The code that I have written so far is pretty long and doesn't seems efficient. Are there any ways to make my code more compact and efficient? At the end of the program I want to execute iwconfig to check if the wlan really is in monitor mode. What is the best way to do that?
#include <unistd.h>
#include <iostream>
#include <string>
#include <sys/types.h>
#include <sys/wait.h>
using namespace std;
int main(int argc, char *argv[])
{
string wlan = "wlan1";
pid_t ifconfigDown;
ifconfigDown = fork();
if(ifconfigDown == 0)//ifconfig
{
execl("/sbin/ifconfig", "ifconfig", wlan.c_str(), "down",(char*)0 );
}
else //parent
{
usleep(500000);
pid_t iwconfigMode;
iwconfigMode = fork();
if(iwconfigMode == 0)//ifconfig
{
execl("/sbin/iwconfig","iwconfig",wlan.c_str(),"mode","monitor",(char*)0 );
}
else//parent
{
usleep(500000);
pid_t ifconfigUp;
ifconfigUp = fork();
if(ifconfigUp == 0)//ifconfig
{
execl("/sbin/ifconfig", "ifconfig", wlan.c_str(), "up", (char*)0 );
}
else//parent
{
usleep(500000);
pid_t iwconfig;
iwconfig = fork();
if(iwconfig == 0)//iwconfig
{
execl("/sbin/iwconfig", "iwconfig", (char*)0 );
//check if wlan1 is in monitor mode
}
}
}
waitpid(-1, NULL, 0);
return 0;
}
}
string rPopenEnd (string cmd)
{
FILE *fp = popen(cmd.c_str(), "r");
if (fp == NULL)
{
return "ERROR";
}
else
{
uint16_t line_size = 20;
char line[line_size];
string result;
while (fgets(line, line_size, fp))
result += line;
wait(NULL);
return result;
}
}
int main(int argc, char *argv[])
{
rPopenEnd("iwconfig");
}
I have create following test case for simulating the issue.I have compiled the source code and able to simulate the issue.
1)When the system command,we got some console out ( i.e your job submitted) which is redirect to file using dup2 and create file .stdout.
When I try to read this file as I need job submission information and I did not get data which was on console out. I was able to get data which I wrote it.( confirm file operation).
Can we not read console output from the file which is create by the child process.
*change rundir and cmd
#include <string>
#include <vector>
#include <utility>
#include <sstream>
#include <fstream>
#include <iostream>
#include <iterator>
#include <algorithm>
#include <functional>
#include <map>
#include <sys/stat.h>
#include <fcntl.h>
using namespace std;
int child();
int main()
{
string rundir = "/temp";
pid_t id =child();
string _xafile;
string _afile;
string _afilecmd;
string line;
stringstream ss(stringstream::in | stringstream::out);
ss<<int(id);
_xafile = rundir;
_xafile = _xafile + "/" + ss.str()+".stdout";
cout<<" _xafile is "<<_xafile<<endl;
string cmd ="cat "+ _xafile + ">" + rundir + "/" + "process.txt";
_afile = rundir;
_afile = _afile + "/" + "process.txt";
_afilecmd = "rm -rf "+ _afile;
system(cmd.c_str());
ifstream xafile(_afile.c_str());
while(xafile)
{
string word;
xafile >> word;
cout<<word<<" ";
/* try to read console output but did not read data */
}
system(_afilecmd.c_str());
return 0;
}
int child()
{
string rundir = "/tmp";
string cmd = " tool <input file> -o <outputfile>";
const char* std_out_file;
pid_t pid = fork();
if (pid < 0) {
return -1;
}
if (pid == 0) {
pid_t mypid = getpid();
stringstream sm;
sm << rundir << "/";
if (strlen(std_out_file) == 0)
sm << mypid << ".stdout";
else
sm << std_out_file;
int fd = open(sm.str().c_str(), O_WRONLY | O_CREAT | O_TRUNC, 0644);
dup2(fd, 1);
dup2(fd, 2);
int fd2 = open("/dev/zero", O_RDONLY);
dup2(fd2, 0);
cout<<cmd <<endl<<endl;
// execl("/bin/sh", "/bin/sh", "-c", cmd, NULL);
system(cmd.c_str());
/*When system call it generate console output like : your job submitted */
/* redirect to file */
exit(-1);
}
if (pid > 0)
return pid;
cout<<" child is done"<<endl;
return 0;
}
It's not entirely clear what your thinking is here - your code appears to fork a child which does a bunch of fancy io stuff to try and redirect your applications stdout to a file, and then runs a command with system(). The command in your code has it's own redirects, specifically via "-o" so it probably isn't writing to stdout.
Back in the parent process, you try to open for reading the same file your child process will open for writing. You've got no synchronization, so they could happen in any order. You appear to be trying to use "cat" to read the file and trying to read the stdout of the cat?
What I think you are actually trying to do is the C/C++ equivalent of Perl's
$foo = `ls -l /tmp`;
or something similar - to execute a command and capture the output.
A better way to do this would be to use pipes.
#include <iostream>
#include <fcntl.h>
#include <stdio.h>
#include <unistd.h>
enum { WritePipe = 1, ReadPipe = 0 };
int main(int argc, const char** argv)
{
int pipes[2]; // each process is going to have its own file descriptor.
if(pipe(pipes) != 0) {
perror("pipe failed");
return -1;
}
pid_t pid = fork();
if(pid == 0) {
// child
close(pipes[ReadPipe]); // close the parent pipe in our context.
// redirect stdout and stderr to the pipe.
dup2(pipes[WritePipe], STDOUT_FILENO);
dup2(pipes[WritePipe], STDERR_FILENO);
// close(STDERR_FILENO); // <- or just do this to close stderr.
int result = system("ls -ltr /etc");
close(pipes[WritePipe]);
return result;
}
if(pid < 0) {
perror("fork failed");
return -1;
}
// Parent process has launched the child.
close(pipes[WritePipe]);
char buffer[4097];
int bytesRead;
while((bytesRead = read(pipes[ReadPipe], buffer, sizeof(buffer) - 1)) > 0) {
buffer[bytesRead] = 0;
std::cout << buffer;
}
std::cout << std::endl;
close(pipes[ReadPipe]);
return 0;
}
I am looking for a way to get the output of a command when it is run from within a C++ program. I have looked at using the system() function, but that will just execute a command. Here's an example of what I'm looking for:
std::string result = system("./some_command");
I need to run an arbitrary command and get its output. I've looked at boost.org, but I have not found anything that will give me what I need.
#include <cstdio>
#include <iostream>
#include <memory>
#include <stdexcept>
#include <string>
#include <array>
std::string exec(const char* cmd) {
std::array<char, 128> buffer;
std::string result;
std::unique_ptr<FILE, decltype(&pclose)> pipe(popen(cmd, "r"), pclose);
if (!pipe) {
throw std::runtime_error("popen() failed!");
}
while (fgets(buffer.data(), buffer.size(), pipe.get()) != nullptr) {
result += buffer.data();
}
return result;
}
Pre-C++11 version:
#include <iostream>
#include <stdexcept>
#include <stdio.h>
#include <string>
std::string exec(const char* cmd) {
char buffer[128];
std::string result = "";
FILE* pipe = popen(cmd, "r");
if (!pipe) throw std::runtime_error("popen() failed!");
try {
while (fgets(buffer, sizeof buffer, pipe) != NULL) {
result += buffer;
}
} catch (...) {
pclose(pipe);
throw;
}
pclose(pipe);
return result;
}
Replace popen and pclose with _popen and _pclose for Windows.
Getting both stdout and stderr (and also writing to stdin, not shown here) is easy peasy with my pstreams header, which defines iostream classes that work like popen:
#include <pstream.h>
#include <string>
#include <iostream>
int main()
{
// run a process and create a streambuf that reads its stdout and stderr
redi::ipstream proc("./some_command", redi::pstreams::pstdout | redi::pstreams::pstderr);
std::string line;
// read child's stdout
while (std::getline(proc.out(), line))
std::cout << "stdout: " << line << '\n';
// if reading stdout stopped at EOF then reset the state:
if (proc.eof() && proc.fail())
proc.clear();
// read child's stderr
while (std::getline(proc.err(), line))
std::cout << "stderr: " << line << '\n';
}
For Windows, popen also works, but it opens up a console window - which quickly flashes over your UI application. If you want to be a professional, it's better to disable this "flashing" (especially if the end-user can cancel it).
So here is my own version for Windows:
(This code is partially recombined from ideas written in The Code Project and MSDN samples.)
#include <windows.h>
#include <atlstr.h>
//
// Execute a command and get the results. (Only standard output)
//
CStringA ExecCmd(
const wchar_t* cmd // [in] command to execute
)
{
CStringA strResult;
HANDLE hPipeRead, hPipeWrite;
SECURITY_ATTRIBUTES saAttr = {sizeof(SECURITY_ATTRIBUTES)};
saAttr.bInheritHandle = TRUE; // Pipe handles are inherited by child process.
saAttr.lpSecurityDescriptor = NULL;
// Create a pipe to get results from child's stdout.
if (!CreatePipe(&hPipeRead, &hPipeWrite, &saAttr, 0))
return strResult;
STARTUPINFOW si = {sizeof(STARTUPINFOW)};
si.dwFlags = STARTF_USESHOWWINDOW | STARTF_USESTDHANDLES;
si.hStdOutput = hPipeWrite;
si.hStdError = hPipeWrite;
si.wShowWindow = SW_HIDE; // Prevents cmd window from flashing.
// Requires STARTF_USESHOWWINDOW in dwFlags.
PROCESS_INFORMATION pi = { 0 };
BOOL fSuccess = CreateProcessW(NULL, (LPWSTR)cmd, NULL, NULL, TRUE, CREATE_NEW_CONSOLE, NULL, NULL, &si, &pi);
if (! fSuccess)
{
CloseHandle(hPipeWrite);
CloseHandle(hPipeRead);
return strResult;
}
bool bProcessEnded = false;
for (; !bProcessEnded ;)
{
// Give some timeslice (50 ms), so we won't waste 100% CPU.
bProcessEnded = WaitForSingleObject( pi.hProcess, 50) == WAIT_OBJECT_0;
// Even if process exited - we continue reading, if
// there is some data available over pipe.
for (;;)
{
char buf[1024];
DWORD dwRead = 0;
DWORD dwAvail = 0;
if (!::PeekNamedPipe(hPipeRead, NULL, 0, NULL, &dwAvail, NULL))
break;
if (!dwAvail) // No data available, return
break;
if (!::ReadFile(hPipeRead, buf, min(sizeof(buf) - 1, dwAvail), &dwRead, NULL) || !dwRead)
// Error, the child process might ended
break;
buf[dwRead] = 0;
strResult += buf;
}
} //for
CloseHandle(hPipeWrite);
CloseHandle(hPipeRead);
CloseHandle(pi.hProcess);
CloseHandle(pi.hThread);
return strResult;
} //ExecCmd
I'd use popen() (++waqas).
But sometimes you need reading and writing...
It seems like nobody does things the hard way any more.
(Assuming a Unix/Linux/Mac environment, or perhaps Windows with a POSIX compatibility layer...)
enum PIPE_FILE_DESCRIPTERS
{
READ_FD = 0,
WRITE_FD = 1
};
enum CONSTANTS
{
BUFFER_SIZE = 100
};
int
main()
{
int parentToChild[2];
int childToParent[2];
pid_t pid;
string dataReadFromChild;
char buffer[BUFFER_SIZE + 1];
ssize_t readResult;
int status;
ASSERT_IS(0, pipe(parentToChild));
ASSERT_IS(0, pipe(childToParent));
switch (pid = fork())
{
case -1:
FAIL("Fork failed");
exit(-1);
case 0: /* Child */
ASSERT_NOT(-1, dup2(parentToChild[READ_FD], STDIN_FILENO));
ASSERT_NOT(-1, dup2(childToParent[WRITE_FD], STDOUT_FILENO));
ASSERT_NOT(-1, dup2(childToParent[WRITE_FD], STDERR_FILENO));
ASSERT_IS(0, close(parentToChild [WRITE_FD]));
ASSERT_IS(0, close(childToParent [READ_FD]));
/* file, arg0, arg1, arg2 */
execlp("ls", "ls", "-al", "--color");
FAIL("This line should never be reached!!!");
exit(-1);
default: /* Parent */
cout << "Child " << pid << " process running..." << endl;
ASSERT_IS(0, close(parentToChild [READ_FD]));
ASSERT_IS(0, close(childToParent [WRITE_FD]));
while (true)
{
switch (readResult = read(childToParent[READ_FD],
buffer, BUFFER_SIZE))
{
case 0: /* End-of-File, or non-blocking read. */
cout << "End of file reached..." << endl
<< "Data received was ("
<< dataReadFromChild.size() << "): " << endl
<< dataReadFromChild << endl;
ASSERT_IS(pid, waitpid(pid, & status, 0));
cout << endl
<< "Child exit staus is: " << WEXITSTATUS(status) << endl
<< endl;
exit(0);
case -1:
if ((errno == EINTR) || (errno == EAGAIN))
{
errno = 0;
break;
}
else
{
FAIL("read() failed");
exit(-1);
}
default:
dataReadFromChild . append(buffer, readResult);
break;
}
} /* while (true) */
} /* switch (pid = fork())*/
}
You also might want to play around with select() and non-blocking reads.
fd_set readfds;
struct timeval timeout;
timeout.tv_sec = 0; /* Seconds */
timeout.tv_usec = 1000; /* Microseconds */
FD_ZERO(&readfds);
FD_SET(childToParent[READ_FD], &readfds);
switch (select (1 + childToParent[READ_FD], &readfds, (fd_set*)NULL, (fd_set*)NULL, & timeout))
{
case 0: /* Timeout expired */
break;
case -1:
if ((errno == EINTR) || (errno == EAGAIN))
{
errno = 0;
break;
}
else
{
FAIL("Select() Failed");
exit(-1);
}
case 1: /* We have input */
readResult = read(childToParent[READ_FD], buffer, BUFFER_SIZE);
// However you want to handle it...
break;
default:
FAIL("How did we see input on more than one file descriptor?");
exit(-1);
}
Two possible approaches:
I don't think popen() is part of the C++ standard (it's part of POSIX from memory), but it's available on every UNIX I've worked with (and you seem to be targeting UNIX since your command is ./some_command).
On the off-chance that there is no popen(), you can use system("./some_command >/tmp/some_command.out");, then use the normal I/O functions to process the output file.
The following might be a portable solution. It follows standards.
#include <iostream>
#include <fstream>
#include <string>
#include <cstdlib>
#include <sstream>
std::string ssystem (const char *command) {
char tmpname [L_tmpnam];
std::tmpnam ( tmpname );
std::string scommand = command;
std::string cmd = scommand + " >> " + tmpname;
std::system(cmd.c_str());
std::ifstream file(tmpname, std::ios::in | std::ios::binary );
std::string result;
if (file) {
while (!file.eof()) result.push_back(file.get())
;
file.close();
}
remove(tmpname);
return result;
}
// For Cygwin
int main(int argc, char *argv[])
{
std::string bash = "FILETWO=/cygdrive/c/*\nfor f in $FILETWO\ndo\necho \"$f\"\ndone ";
std::string in;
std::string s = ssystem(bash.c_str());
std::istringstream iss(s);
std::string line;
while (std::getline(iss, line))
{
std::cout << "LINE-> " + line + " length: " << line.length() << std::endl;
}
std::cin >> in;
return 0;
}
I couldn't figure out why popen/pclose is missing from Code::Blocks/MinGW. So I worked around the problem by using CreateProcess() and CreatePipe() instead.
Here's the solution that worked for me:
//C++11
#include <cstdio>
#include <iostream>
#include <windows.h>
#include <cstdint>
#include <deque>
#include <string>
#include <thread>
using namespace std;
int SystemCapture(
string CmdLine, //Command Line
string CmdRunDir, //set to '.' for current directory
string& ListStdOut, //Return List of StdOut
string& ListStdErr, //Return List of StdErr
uint32_t& RetCode) //Return Exit Code
{
int Success;
SECURITY_ATTRIBUTES security_attributes;
HANDLE stdout_rd = INVALID_HANDLE_VALUE;
HANDLE stdout_wr = INVALID_HANDLE_VALUE;
HANDLE stderr_rd = INVALID_HANDLE_VALUE;
HANDLE stderr_wr = INVALID_HANDLE_VALUE;
PROCESS_INFORMATION process_info;
STARTUPINFO startup_info;
thread stdout_thread;
thread stderr_thread;
security_attributes.nLength = sizeof(SECURITY_ATTRIBUTES);
security_attributes.bInheritHandle = TRUE;
security_attributes.lpSecurityDescriptor = nullptr;
if (!CreatePipe(&stdout_rd, &stdout_wr, &security_attributes, 0) ||
!SetHandleInformation(stdout_rd, HANDLE_FLAG_INHERIT, 0)) {
return -1;
}
if (!CreatePipe(&stderr_rd, &stderr_wr, &security_attributes, 0) ||
!SetHandleInformation(stderr_rd, HANDLE_FLAG_INHERIT, 0)) {
if (stdout_rd != INVALID_HANDLE_VALUE) CloseHandle(stdout_rd);
if (stdout_wr != INVALID_HANDLE_VALUE) CloseHandle(stdout_wr);
return -2;
}
ZeroMemory(&process_info, sizeof(PROCESS_INFORMATION));
ZeroMemory(&startup_info, sizeof(STARTUPINFO));
startup_info.cb = sizeof(STARTUPINFO);
startup_info.hStdInput = 0;
startup_info.hStdOutput = stdout_wr;
startup_info.hStdError = stderr_wr;
if(stdout_rd || stderr_rd)
startup_info.dwFlags |= STARTF_USESTDHANDLES;
// Make a copy because CreateProcess needs to modify string buffer
char CmdLineStr[MAX_PATH];
strncpy(CmdLineStr, CmdLine.c_str(), MAX_PATH);
CmdLineStr[MAX_PATH-1] = 0;
Success = CreateProcess(
nullptr,
CmdLineStr,
nullptr,
nullptr,
TRUE,
0,
nullptr,
CmdRunDir.c_str(),
&startup_info,
&process_info
);
CloseHandle(stdout_wr);
CloseHandle(stderr_wr);
if(!Success) {
CloseHandle(process_info.hProcess);
CloseHandle(process_info.hThread);
CloseHandle(stdout_rd);
CloseHandle(stderr_rd);
return -4;
}
else {
CloseHandle(process_info.hThread);
}
if(stdout_rd) {
stdout_thread=thread([&]() {
DWORD n;
const size_t bufsize = 1000;
char buffer [bufsize];
for(;;) {
n = 0;
int Success = ReadFile(
stdout_rd,
buffer,
(DWORD)bufsize,
&n,
nullptr
);
printf("STDERR: Success:%d n:%d\n", Success, (int)n);
if(!Success || n == 0)
break;
string s(buffer, n);
printf("STDOUT:(%s)\n", s.c_str());
ListStdOut += s;
}
printf("STDOUT:BREAK!\n");
});
}
if(stderr_rd) {
stderr_thread=thread([&]() {
DWORD n;
const size_t bufsize = 1000;
char buffer [bufsize];
for(;;) {
n = 0;
int Success = ReadFile(
stderr_rd,
buffer,
(DWORD)bufsize,
&n,
nullptr
);
printf("STDERR: Success:%d n:%d\n", Success, (int)n);
if(!Success || n == 0)
break;
string s(buffer, n);
printf("STDERR:(%s)\n", s.c_str());
ListStdErr += s;
}
printf("STDERR:BREAK!\n");
});
}
WaitForSingleObject(process_info.hProcess, INFINITE);
if(!GetExitCodeProcess(process_info.hProcess, (DWORD*) &RetCode))
RetCode = -1;
CloseHandle(process_info.hProcess);
if(stdout_thread.joinable())
stdout_thread.join();
if(stderr_thread.joinable())
stderr_thread.join();
CloseHandle(stdout_rd);
CloseHandle(stderr_rd);
return 0;
}
int main()
{
int rc;
uint32_t RetCode;
string ListStdOut;
string ListStdErr;
cout << "STARTING.\n";
rc = SystemCapture(
"C:\\Windows\\System32\\ipconfig.exe", //Command Line
".", //CmdRunDir
ListStdOut, //Return List of StdOut
ListStdErr, //Return List of StdErr
RetCode //Return Exit Code
);
if (rc < 0) {
cout << "ERROR: SystemCapture\n";
}
cout << "STDOUT:\n";
cout << ListStdOut;
cout << "STDERR:\n";
cout << ListStdErr;
cout << "Finished.\n";
cout << "Press Enter to Continue";
cin.ignore();
return 0;
}
Take note that you can get output by redirecting output to the file and then reading it
It was shown in documentation of std::system
You can receive exit code by calling WEXITSTATUS macro.
int status = std::system("ls -l >test.txt"); // execute the UNIX command "ls -l >test.txt"
std::cout << std::ifstream("test.txt").rdbuf();
std::cout << "Exit code: " << WEXITSTATUS(status) << std::endl;
Assuming POSIX, simple code to capture stdout:
#include <sys/wait.h>
#include <unistd.h>
#include <string>
#include <vector>
std::string qx(const std::vector<std::string>& args) {
int stdout_fds[2];
pipe(stdout_fds);
int stderr_fds[2];
pipe(stderr_fds);
const pid_t pid = fork();
if (!pid) {
close(stdout_fds[0]);
dup2(stdout_fds[1], 1);
close(stdout_fds[1]);
close(stderr_fds[0]);
dup2(stderr_fds[1], 2);
close(stderr_fds[1]);
std::vector<char*> vc(args.size() + 1, 0);
for (size_t i = 0; i < args.size(); ++i) {
vc[i] = const_cast<char*>(args[i].c_str());
}
execvp(vc[0], &vc[0]);
exit(0);
}
close(stdout_fds[1]);
std::string out;
const int buf_size = 4096;
char buffer[buf_size];
do {
const ssize_t r = read(stdout_fds[0], buffer, buf_size);
if (r > 0) {
out.append(buffer, r);
}
} while (errno == EAGAIN || errno == EINTR);
close(stdout_fds[0]);
close(stderr_fds[1]);
close(stderr_fds[0]);
int r, status;
do {
r = waitpid(pid, &status, 0);
} while (r == -1 && errno == EINTR);
return out;
}
Code contributions are welcome for more functionality:
https://github.com/ericcurtin/execxx
You can get the output after running a script using a pipe. We use pipes when we want the output of the child process.
int my_func() {
char ch;
FILE *fpipe;
FILE *copy_fp;
FILE *tmp;
char *command = (char *)"/usr/bin/my_script my_arg";
copy_fp = fopen("/tmp/output_file_path", "w");
fpipe = (FILE *)popen(command, "r");
if (fpipe) {
while ((ch = fgetc(fpipe)) != EOF) {
fputc(ch, copy_fp);
}
}
else {
if (copy_fp) {
fprintf(copy_fp, "Sorry there was an error opening the file");
}
}
pclose(fpipe);
fclose(copy_fp);
return 0;
}
So here is the script, which you want to run. Put it in a command variable with the arguments your script takes (nothing if no arguments). And the file where you want to capture the output of the script, put it in copy_fp.
So the popen runs your script and puts the output in fpipe and then you can just copy everything from that to your output file.
In this way you can capture the outputs of child processes.
And another process is you can directly put the > operator in the command only. So if we will put everything in a file while we run the command, you won't have to copy anything.
In that case, there isn't any need to use pipes. You can use just system, and it will run the command and put the output in that file.
int my_func(){
char *command = (char *)"/usr/bin/my_script my_arg > /tmp/my_putput_file";
system(command);
printf("everything saved in my_output_file");
return 0;
}
You can read YoLinux Tutorial: Fork, Exec and Process control for more information.
Command class uses system("cmd > stdout 2> stderr") to provide user with stdout and stderr, in addition to the exit code.
Test run:
./a.out 'ls .'
exit code: 0
stdout: HelloWorld
HelloWorld.c
HelloWorld.cpp
HelloWorld.dSYM
a.out
gcc_container.bash
linuxsys
macsys
test.sh
stderr:
#include <iostream>
#include <fstream>
#include <sstream>
#include <unistd.h>
using namespace std;
class Command {
public:
Command() {
exit_code_ = -1;
}
int GetExitCode() { return exit_code_;}
string GetStdOutStr() {return stdout_str_;}
string GetStdErrStr() {return stderr_str_;}
int Run(const char* cmd) {
return Run(string(cmd));
}
/**
* #brief run a given command
*
* #param cmd: command string
* #return int: the exit code of running the command
*/
int Run(string cmd) {
// create temp files
char tmp_dir[] = "/tmp/stdir.XXXXXX";
mkdtemp(tmp_dir);
string stdout_file = string(tmp_dir) + "/stdout";
string stderr_file = string(tmp_dir) + "/stderr";
// execute the command "cmd > stdout_file 2> stderr_file"
string cli = cmd + " > " + stdout_file + " 2> " + stderr_file;
exit_code_ = system(cli.c_str());
exit_code_ = WEXITSTATUS(exit_code_);
stdout_str_ = File2Str(stdout_file);
stderr_str_ = File2Str(stderr_file);
// rid of the temp files
remove(stdout_file.c_str());
remove(stderr_file.c_str());
remove(tmp_dir);
return exit_code_;
}
private:
int exit_code_;
string stderr_str_;
string stdout_str_;
/**
* #brief read a file
*
* #param file_name: file path
* #return string the contents of the file.
*/
string File2Str(string file_name) {
ifstream file;
stringstream str_stream;
file.open(file_name);
if (file.is_open()) {
str_stream << file.rdbuf();
file.close();
}
return str_stream.str();
}
};
int main(int argc, const char* argv[]) {
Command command;
command.Run(argv[1]);
cout << "exit code: " << command.GetExitCode() << endl;
cout << "stdout: " << command.GetStdOutStr() << endl;
cout << "stderr: " << command.GetStdErrStr() << endl;
return command.GetExitCode();
}
C++ stream implemention of waqas's answer:
#include <istream>
#include <streambuf>
#include <cstdio>
#include <cstring>
#include <memory>
#include <stdexcept>
#include <string>
class execbuf : public std::streambuf {
protected:
std::string output;
int_type underflow(int_type character) {
if (gptr() < egptr()) return traits_type::to_int_type(*gptr());
return traits_type::eof();
}
public:
execbuf(const char* command) {
std::array<char, 128> buffer;
std::unique_ptr<FILE, decltype(&pclose)> pipe(popen(command, "r"), pclose);
if (!pipe) {
throw std::runtime_error("popen() failed!");
}
while (fgets(buffer.data(), buffer.size(), pipe.get()) != nullptr) {
this->output += buffer.data();
}
setg((char*)this->output.data(), (char*)this->output.data(), (char*)(this->output.data() + this->output.size()));
}
};
class exec : public std::istream {
protected:
execbuf buffer;
public:
exec(char* command) : std::istream(nullptr), buffer(command, fd) {
this->rdbuf(&buffer);
}
};
This code catches all output through stdout . If you want to catch only stderr then pass your command like this:
sh -c '<your-command>' 2>&1 > /dev/null
If you want to catch both stdout and stderr then the command should be like this:
sh -c '<your-command>' 2>&1