I have an old C++ application running on OS X (10.10/Yosemite).
When I'm debugging the application I get an exception on this following lines of code:
// create pipe
int pipefd[2];
int piperet = pipe(pipefd);
if( piperet )
{
wcsncpy(errbuf, CEmpError::GetErrorText(CEmpError::ERR_SYSTEM, L"Can't create pipe for IPC.", errno).c_str(), errbuflen);
CEmpError::LogError(errbuf);
return CEmpError::ERR_SYSTEM; //= 115
}
So the application is running and doing this lines of code a few times. After a while pipette is -1. The errno error-code is 25.
After some research, this means "Too many open files". Is there a workaround to close all these open files? Or is it possible to know which files are open too many?
When I type in Terminal ulimit -a I get:
core file size (blocks, -c) 0
data seg size (kbytes, -d) unlimited
file size (blocks, -f) unlimited
max locked memory (kbytes, -l) unlimited
max memory size (kbytes, -m) unlimited
open files (-n) 2560
pipe size (512 bytes, -p) 1
stack size (kbytes, -s) 8192
cpu time (seconds, -t) unlimited
max user processes (-u) 709
virtual memory (kbytes, -v) unlimited
So I'm not the super c++-pro, here the required code of lines. Guess all not needed pipes or pipefd will be closed.
// create pipe
int pipefd[2];
int piperet = pipe(pipefd);
if( piperet )
{
wcsncpy(errbuf, CEmpError::GetErrorText(CEmpError::ERR_SYSTEM, L"Can't create pipe for IPC.", errno).c_str(), errbuflen);
CEmpError::LogError(errbuf);
return CEmpError::ERR_SYSTEM;
}
CEmpError *pError = 0;
// after transfer the execution bit could be reset, so set the rights back
chmod(args[0], S_IWUSR | S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH );
pid_t pid = fork();
if(pid == 0)
{ // child process
close(pipefd[0]); // close reading end
int fd = pipefd[1];
// redirect stdout and stderr to pipe
dup2(fd, STDOUT_FILENO);
dup2(fd, STDERR_FILENO);
close(fd); // not needed anymore
// execute steup.sh with built argument list
execvp(args[0], (char**)args);
// if we ever reached this line the exec failed and we need to report error to parent process
// once we are in child process we will print the error into stdout of our child process
// and parent process will parse and return it to the caller.
char buf[128];
sprintf(buf, "setup.sh:ERROR:PI%03d",CEmpError::ERR_EXEC);
perror(buf);
// keep the process alive until the parent process got the error from the pipe and killed this child process
sleep(5);
return CEmpError::ERR_EXEC;
}
else if (pid > 0)
{ // parent process
delete[] args[0]; // release memory allocated to f.
delete[] args[3]; // release memory allocated to log f.
delete[] args[5]; // release memory allocated to pn
close(pipefd[1]);
pParser = new CPackageInstallerParser();
FILE* fp = fdopen(pipefd[0], "r");
/*int res = */setvbuf(fp, NULL, _IOLBF, 0);
try
{
pParser->ParseOutput(fp, statusCallback, statusContext, logFileName);
}
catch (CEmpError* pErr)
{
if (pErr->ErrorCode == CEmpError::ERR_EXEC)
kill(pid, SIGABRT); // the error is parsed kill the child process
pError = pErr;
}
catch (...)
{
// some exception from statusCallback
fclose(fp);
delete pParser;
pParser = NULL;
throw;
}
fclose(fp);
int stat;
// wait for the installation process to end.
waitpid(pid, &stat, 0);
if (WIFEXITED(stat) && (stat % 256 == 0) && pError == NULL)
{
// exited normally with code 0 (success)
// printf("Installed succesfully!\n");
// register succesful operation result
try
{
RegisterResult(operation);
}
catch (CEmpError* pErr)
{
pError = pErr;
}
}
else
{
if (pError == NULL) // no error was caught by parser
pError = new CEmpError(CEmpError::ERR_UNKNOWN);
//dumpError(stat);
}
}
else
pError = new CEmpError(CEmpError::ERR_FORK);
//clean up and exit
if (pParser != NULL)
delete pParser;
pParser = NULL;
int exitcode = 0;
if (pError != NULL)
{
exitcode = pError->ErrorCode;
wcsncpy(errbuf, pError->GetErrorText().c_str(), errbuflen);
pError->Log();
delete pError;
}
return exitcode;
You need to close the pipe FDs with close when you no longer need them.
You're allowed to have 2560 open files per process, so you should close the other files and/or pipes, when no longer needed.
It is always good advice to release resources, when you're done with them.
Related
To expand on this, by chain piping I am referring to when I have 3 separate processes:
process 1 writes to process 2,
process 2 reads from process 1 and writes to process 3,
process 3 reads from process 2 and then finishes.
I am specifically trying to handle complex commands in a C++ written bash shell. So I would be using this to execute a set of commands like this that all communicate with each other:
ls | sort | grep "exit"
where process 1 is executing ls and its stdout is written to process 2 through a pipe, etc.
I already am writing code to solve this for a project and was just wondering if my approach is correct, as right now when just doing a 2 command call of ls | grep "exit" I am getting the bash error "grep: (standard input): Bad file descriptor"
//Block for when the userInput is a complex command
else{
if (debug)
printf("Complex command: %s\n", userInput.c_str());
vector<char*> commandsVect = splitCString(const_cast<char*>(userInput.c_str()), const_cast<char*>( delimVertPipe.c_str()));
if (debug)
printVect(commandsVect);
if (pipe(fileDescriptor) == -1){
fprintf(stderr, "Pipe failed for command %s\n", userInput.c_str());
return 1;
}
for (int i = 0; i < commandsVect.size(); ++i) {
vector<char*> tokens = splitCString(const_cast<char*>(commandsVect[i]), const_cast<char*>( delimSpace.c_str()));
printf("Commands vect size is %ld\n", commandsVect.size());
printf("Parsing command \'%s\'\n", commandsVect[i]);
if (debug) {
printVect(tokens);
}
procID = fork();
//Block for the first command
if (i == 0){
if (procID < 0){
fprintf(stderr, "Fork number %d in the complex command \'%s\' failed\n", i+1, userInput.c_str());
return 1;
}
//Child process
else if (procID == 0){
//close(fileDescriptor[READ_END]);
close(STDOUT_FILENO);
//Links the write end of the pipe to the STDOUT
dup2(fileDescriptor[WRITE_END], 1);
close(fileDescriptor[READ_END]);
close(fileDescriptor[WRITE_END]);
tokens.push_back(nullptr); //execvp() arg array needs a NULL pointer at the end
if ( execvp(tokens[0], tokens.data()) < 0 ) {
fprintf( stderr, "execvp() call failed for the command \'%s\' inside the input string \'%s\'\n", commandsVect[i], userInput.c_str() );
return 1;
}
exit(1);
}
//Parent process
else{
close(fileDescriptor[READ_END]);
close(fileDescriptor[WRITE_END]);
wait(NULL);
}
}
//Block for the very last command, which will pipe input from the previous
else if (i == commandsVect.size() - 1){
if (procID < 0){
fprintf(stderr, "Fork number %d in the complex command \'%s\' failed\n", i+1, userInput.c_str());
return 1;
}
//Child process
else if (procID == 0){
//close(fileDescriptor[WRITE_END]);
close(STDIN_FILENO);
//Links the read end of the pipe to the STDIN
dup2(fileDescriptor[READ_END], 0);
close(fileDescriptor[WRITE_END]);
close(fileDescriptor[READ_END]);
tokens.push_back(nullptr); //execvp() arg array needs a NULL pointer at the end
if ( execvp(tokens[0], tokens.data()) < 0 ) {
fprintf( stderr, "execvp() call failed for the command \'%s\' inside the input string \'%s\'\n", commandsVect[i], userInput.c_str() );
return 1;
}
exit(1);
}
//Parent process
else{
close(fileDescriptor[READ_END]);
close(fileDescriptor[WRITE_END]);
wait(NULL);
}
}
//To note for StackOverflow, this block of code is never executed since I am only ever calling a 2 chained command like ls|grep "exit"
//Block for the middle commands. (Will pipe input from previous, and output to the next)
else{
printf("GOING THROUGH BAD CODE");
continue;
if (procID < 0){
fprintf(stderr, "Fork number %d in the complex command \'%s\' failed\n", i+1, userInput.c_str());
return 1;
}
//Child process
else if (procID == 0){
exit(1);
}
//Parent process
else{
wait(NULL);
}
}
}
close(fileDescriptor[READ_END]);
close(fileDescriptor[WRITE_END]);
}
This might not be possible with your larger application, but you could simplify things by letting the shell manage the pipes. Write P1 (process one), P2, and P3 as three separate executables. In stead of doing IO on pipes, each program could read from stdin and write to stdout. Simple. To execute - let bash or whatever shell you use glue the three together by calling them as...
$P1 | P2 | P3;
Under the hood, your shell is doing pretty much what you're doing in C++ (only successfully 😉). It creates a pipe for P1, which it passes to exec as stdin to launch P1 after forking. It creates an input and output pipe for P2, and binds it stdin and stdout as appropriate in the same way - passed into exec when launching P2 after the fork. P3 gets only a stdin pipe and its stdout stream goes right to the console as normal. It's not quite as sexy as doing it all in C++, but it's very robust - pretty much guaranteed to work.
In my program, when I was trying to close the master file descriptor, suddenly my program got crashed and I haven't seen any cores. Could someone help me with this? I am providing the code that I have used. This is the code I copied from the internet(http://www.rkoucha.fr/tech_corner/pty_pdip.html), The only difference is that instead of fork I spawn a thread. I know some small info I miss. Could someone please shed the light?
Thanks in advance!!!
int ScalingCommandReceiver::execute_ptcoi_commands_sequence(const char * bc_name, std::vector<cmd_output_pair>& cmd_seq, std::string& output_str)
{
int fdm, fds;
int rc;
output_str.clear();
fdm = posix_openpt(O_RDWR);
if (fdm < 0)
{
output_str.append("Error on posix_openpt() \n");
return -1;
}
rc = grantpt(fdm);
if (rc != 0)
{
output_str.append("Error on grantpt() \n");
close(fdm);
return -1;
}
rc = unlockpt(fdm);
if (rc != 0)
{
output_str.append("Error on unlockpt() \n");
close(fdm);
return -1;
}
// Open the slave side ot the PTY
fds = open(ptsname(fdm), O_RDWR);
if (fds < 0)
{
output_str.append("Error on posix_openpt() \n");
close(fdm);
return -1;
}
std::string cp_name ("bc3");
pt_session_struct *file_refs = NULL;
file_refs = (pt_session_struct*) ::malloc(sizeof(pt_session_struct));
if (file_refs == NULL) {
output_str.append("ERROR: Failed to create the struct info for the thread! \n");
close(fdm);
close(fds);
return -1;
}
file_refs->fds = fds;
file_refs->cp_name = (char*)bc_name;
//Spawn a thread
if (ACE_Thread::spawn(ptcoi_command_thread, file_refs, THR_DETACHED) < 0) {
output_str.append("ERROR: Failed to start ptcoi_command_thread thread! \n");
close(fdm);
close(fds);
::free(file_refs);
return -1;
}
int i = 0;
while (i <= cmd_seq_dim)
{
char buffer[4096] = {'\0'};
ssize_t bytes_read = 0;
int read_res = 0;
do
{
// get the output in buffer
if((read_res = read(fdm, (buffer + bytes_read), sizeof(buffer))) > 0)
{
// The number of bytes read is returned and the file position is advanced by this number.
// Let's advance also buffer position.
bytes_read += read_res;
}
}
while((read_res > 0) && !strchr(buffer, cpt_prompt) && (std::string(buffer).find(ptcoi_warning) == std::string::npos));
if (bytes_read > 0) // No error
{
// Send data on standard output or wherever you want
//Do some operations here
}
else
{
output_str.append("\nFailed to read from master PTY \n");
}
if(i < cmd_seq_dim)
{
// Send data on the master side of PTY
write(fdm, cmd_seq[i].first.c_str(), cmd_seq[i].first.length());
}
++i;
} // End while
if(/*have some internal condition*/)
{
close(fdm); //Here I observe the crash :-(
return 0; // OK
}
else
{
output_str.append ("\nCPT printouts not expected.\n");
close(fdm);
return -1; // Failure
}
close(fdm);
return 0; // OK
}
ACE_THR_FUNC_RETURN ScalingCommandReceiver::ptcoi_command_thread(void* ptrParam)
{
pt_session_struct* fd_list = (pt_session_struct*) ptrParam;
struct termios slave_orig_term_settings; // Saved terminal settings
struct termios new_term_settings; // Current terminal settings
int fds = fd_list->fds;
char* cp_name = fd_list->cp_name;
::free (fd_list);
// Save the defaults parameters of the slave side of the PTY
tcgetattr(fds, &slave_orig_term_settings);
// Set RAW mode on slave side of PTY
new_term_settings = slave_orig_term_settings;
cfmakeraw (&new_term_settings);
tcsetattr (fds, TCSANOW, &new_term_settings);
int stdinCopy, stdoutCopy, stdErr;
stdinCopy = dup (0);
stdoutCopy = dup (1);
stdErr = dup (2);
// The slave side of the PTY becomes the standard input and outputs of the child process
close(0); // Close standard input (current terminal)
close(1); // Close standard output (current terminal)
close(2); // Close standard error (current terminal)
dup(fds); // PTY becomes standard output (0)
dup(fds); // PTY becomes standard output (1)
dup(fds); // PTY becomes standard error (2)
// Now the original file descriptor is useless
close(fds);
// Make the current process a new session leader
//setsid();
// As the child is a session leader, set the controlling terminal to be the slave side of the PTY
// (Mandatory for programs like the shell to make them manage correctly their outputs)
ioctl(0, TIOCSCTTY, 1);
// Execution of the program
char PTCOI [64] = {0};
snprintf(PTCOI, sizeof(PTCOI), "/opt/ap/mas/bin/mas_cptaspmml PTCOI -cp %s -echo 7", cp_name);
system(PTCOI); //my command
close(0); // Close standard input (current terminal)
close(1); // Close standard output (current terminal)
close(2); // Close standard error (current terminal)
dup2 (stdinCopy, 0);
dup2 (stdoutCopy, 1);
dup2 (stdErr, 2);
close (stdinCopy);
close (stdoutCopy);
close (stdErr);
return 0;
}
execute_ptcoi_commands_sequence seems to contain steps necessary to daemonize your process:
// The slave side of the PTY becomes the standard input and outputs of the child process
close(0); // Close standard input (current terminal)
close(1); // Close standard output (current terminal)
close(2); // Close standard error (current terminal)
. . .
Which means the fork and setsid were there to detach from the controlling terminal, so that your process can survive beyond your terminal session.
After you removed the fork your process remains associated with the controlling terminal and probably terminates when the terminal sends a SIGHUP on close.
I am developing an application that reads data from a named pipe on Windows 7 at around 800 Mbps. I have to develop it with several threads since the FIFO at the other side of the pipe overflows if I am not able to read at the given speed. The performance though is really pitifull and I cannot understand why. I already read several things I tried to split the memory to avoid bad memory sharing.
At the beginning I has thinking I could be a problem with contiguous memory possitions, but the memory sections are queued in a list the main thread is not using them any more after queue it. The amount of memory are huge so I don't thing they lay on same pages or so.
This is the threaded function:
void splitMessage(){
char* bufferMSEO;
char* bufferMDO;
std::list<struct msgBufferStr*> localBufferList;
while(1)
{
long bytesProcessed = 0;
{
std::unique_lock<std::mutex> lk(bufferMutex);
while(bufferList.empty())
{
// Wait until the map has data
listReady.wait(lk);
}
//Extract the data from the list and copy to the local list
localBufferList.splice(localBufferList.end(),bufferList);
//Unlock the mutex and notify
// Manual unlocking is done before notifying, to avoid waking up
// the waiting thread only to block again (see notify_one for details)
lk.unlock();
//listReady.notify_one();
}
for(auto nextBuffer = localBufferList.begin(); nextBuffer != localBufferList.end(); nextBuffer++)
{
//nextBuffer = it->second();
bufferMDO = (*nextBuffer)->MDO;
bufferMSEO = (*nextBuffer)->MSEO;
bytesProcessed += (*nextBuffer)->size;
//Process the data Stream
for(int k=0; k<(*nextBuffer)->size; k++)
{
}
//localBufferList.remove(*nextBuffer);
free(bufferMDO);
free(bufferMSEO);
free(*nextBuffer);
}
localBufferList.clear();
}
}
And here the thread that reads the data and queue them:
DWORD WINAPI InstanceThread(LPVOID lpvParam)
// This routine is a thread processing function to read from and reply to a client
// via the open pipe connection passed from the main loop. Note this allows
// the main loop to continue executing, potentially creating more threads of
// of this procedure to run concurrently, depending on the number of incoming
// client connections.
{
HANDLE hHeap = GetProcessHeap();
TCHAR* pchRequest = (TCHAR*)HeapAlloc(hHeap, 0, BUFSIZE*sizeof(TCHAR));
DWORD cbBytesRead = 0, cbReplyBytes = 0, cbWritten = 0;
BOOL fSuccess = FALSE;
HANDLE hPipe = NULL;
double totalRxData = 0;
char* bufferPnt;
char* bufferMDO;
char* bufferMSEO;
char* destPnt;
// Do some extra error checking since the app will keep running even if this
// thread fails.
if (lpvParam == NULL)
{
printf( "\nERROR - Pipe Server Failure:\n");
printf( " InstanceThread got an unexpected NULL value in lpvParam.\n");
printf( " InstanceThread exitting.\n");
if (pchRequest != NULL) HeapFree(hHeap, 0, pchRequest);
return (DWORD)-1;
}
if (pchRequest == NULL)
{
printf( "\nERROR - Pipe Server Failure:\n");
printf( " InstanceThread got an unexpected NULL heap allocation.\n");
printf( " InstanceThread exitting.\n");
return (DWORD)-1;
}
// Print verbose messages. In production code, this should be for debugging only.
printf("InstanceThread created, receiving and processing messages.\n");
// The thread's parameter is a handle to a pipe object instance.
hPipe = (HANDLE) lpvParam;
try
{
msgSplitter = std::thread(&splitMessage);
//msgSplitter.detach();
}
catch(...)
{
_tprintf(TEXT("CreateThread failed, GLE=%d.\n"), GetLastError());
return -1;
}
while (1)
{
struct msgBufferStr *newBuffer = (struct msgBufferStr* )malloc(sizeof(struct msgBufferStr));
// Read client requests from the pipe. This simplistic code only allows messages
// up to BUFSIZE characters in length.
fSuccess = ReadFile(
hPipe, // handle to pipe
pchRequest, // buffer to receive data
BUFSIZE*sizeof(TCHAR), // size of buffer
&cbBytesRead, // number of bytes read
NULL); // not overlapped I/O
if (!fSuccess || cbBytesRead == 0)
{
if (GetLastError() == ERROR_BROKEN_PIPE)
{
_tprintf(TEXT("InstanceThread: client disconnected.\n"), GetLastError());
break;
}
else if (GetLastError() == ERROR_MORE_DATA)
{
}
else
{
_tprintf(TEXT("InstanceThread ReadFile failed, GLE=%d.\n"), GetLastError());
}
}
//timeStart = omp_get_wtime();
bufferPnt = (char*)pchRequest;
totalRxData += ((double)cbBytesRead)/1000000;
bufferMDO = (char*) malloc(cbBytesRead);
bufferMSEO = (char*) malloc(cbBytesRead/3);
destPnt = bufferMDO;
//#pragma omp parallel for
for(int i = 0; i < cbBytesRead/12; i++)
{
msgCounter++;
if(*(bufferPnt + (i * 12)) == 0) continue;
if(*(bufferPnt + (i * 12)) == 8)
{
errorCounter++;
continue;
}
//Use 64 bits variables in order to make less operations
unsigned long long *sourceAddrLong = (unsigned long long*) (bufferPnt + (i * 12));
unsigned long long *destPntLong = (unsigned long long*) (destPnt + (i * 8));
//Copy the data bytes from source to destination
*destPntLong = *sourceAddrLong;
//Copy and prepare the MSEO lines for the data processing
bufferMSEO[i*4]=(bufferPnt[(i * 12) + 8] & 0x03);
bufferMSEO[i*4 + 1]=(bufferPnt[(i * 12) + 8] & 0x0C) >> 2;
bufferMSEO[i*4 + 2]=(bufferPnt[(i * 12) + 8] & 0x30) >> 4;
bufferMSEO[i*4 + 3]=(bufferPnt[(i * 12) + 8] & 0xC0) >> 6;
}
newBuffer->size = cbBytesRead/3;
newBuffer->MDO = bufferMDO;
newBuffer->MSEO = bufferMSEO;
{
//lock the mutex
std::lock_guard<std::mutex> lk(bufferMutex);
//add data to the list
bufferList.push_back(newBuffer);
} // bufferMutex is automatically released when lk goes out of scope
//Notify
listReady.notify_one();
}
// Flush the pipe to allow the client to read the pipe's contents
// before disconnecting. Then disconnect the pipe, and close the
// handle to this pipe instance.
FlushFileBuffers(hPipe);
DisconnectNamedPipe(hPipe);
CloseHandle(hPipe);
HeapFree(hHeap, 0, pchRequest);
//Show memory leak isues
_CrtDumpMemoryLeaks();
//TODO: Join thread
printf("InstanceThread exitting.\n");
return 1;
}
The think that really blows my mind is that I a let it like this the splitMessage thread takes minutes to read the data even though the first thread finished reading the data long ago. I mean the read thread reads like 1,5Gb or information in seconds and waits for more data from the pipe. This data are processed by the split thread (the only one really "doing" something in almost one minute or more). The CPU is moreover only to less than 20% percent used. (It is a i7 labtop with 16 Gb RAM and 8 cores!)
On the other hand, if I just comment the for loop in the process thread:
for(int k=0; k<(*nextBuffer)->size; k++)
Then the data are read slowly and the FIFO on the other side of the pipe overflows. With 8 processors and at more than 2 GHz should be fast enought to go throw the buffers without many problems, isn't it? I think it has to be a memory access issue or that the scheduler is sending the thread somehow to sleep but I cannot figure out why!!. Other possibility is that the iteration throw the linked list with the iterator is not optimal.
Any help would be geat because I am trying to understand it since a couple of days, I made several changes in the code and tried to simplified at the maximum and I am getting crazy :).
best regards,
Manuel
I'm totally newbie in Unix environment and i faced some problems with plain example from Unix Systems Programming book by Robbins.
It's plain chain of processes and each process prints some info to log file and stderr
#define BUFSIZE 1024
#define CREATE_FLAGS (O_WRONLY | O_CREAT | O_APPEND)
#define CREATE_PERMS (S_IRUSR | S_IWUSR| S_IRGRP | S_IROTH)
int main (int argc, char *argv[]) {
char buf[BUFSIZE];
pid_t childpid = 0;
int i, n;
if (argc != 3){ /* check for valid number of command-line arguments */
fprintf (stderr, "Usage: %s processes filename\n", argv[0]);
return 1;
}
/* open the log file before the fork */
n = atoi(argv[1]); /* create a process chain */
for (i = 1; i < n; i++)
if (childpid = fork())
break;
if (childpid == -1) {
fprintf(stderr, "Failed to fork");
return 1;
}
auto fd = open(argv[2], CREATE_FLAGS, CREATE_PERMS);
if (fd < 0) {
fprintf(stderr,"Failed to open file");
return 1;
}
sprintf(buf, "i:%d process:%ld parent:%ld child:%ld\n",
i, (long)getpid(), (long)getppid(), (long)childpid);
fprintf(stderr, buf);
write(fd, buf, strlen(buf));
return 0;
}
It's compiled on Netbeans 7.1 with g++ 4.7 and run command is "${OUTPUT_PATH}" 10 /home/maxim/testlog.log
So the problems are:
When i run or debug project it prints out only 2 or 3 lines of info in both console and file. But if i traverse with "Step Over" through childpid = fork(), it prints info about all 10 processes. Is that some compiler optimization or just my fault?
Even when it prints all lines, the output looks like
i:2 process:6571 parent:6566 child:6572
i:3 process:6572 parent:1 child:6573
i:4 process:6573 parent:6572 child:6574
...
i:9 process:6578 parent:1 child:6579
i:10 process:6579 parent:6578 child:0
Parent pid values for some processes are 1, which seems to be wrong
If the processes each open the same output file there will be a race condition causing the processes to overwrite each other. That is why it only happens when you run at full speed.
When the parent process ends any children that are still alive are either killed or get a new parent depending on a setting in Linux. In your case they seem to get a new parent. That new parent is process 1.
I wrote a UNIX daemon (targeting Debian, but it shouldn't matter) and I wanted to provide some way of creating a ".pid" file, (a file which contains the process identifier of the daemon).
I searched for a way of opening a file only if it doesn't exist, but couldn't find one.
Basically, I could do something like:
if (fileexists())
{
//fail...
}
else
{
//create it with fopen() or similar
}
But as it stands, this code does not perform the task in a atomic fashion, and doing so would be dangerous, because another process might create the file during my test, and the file creation.
Do you guys have any idea on how to do that?
Thank you.
P.S: Bonus point for a solution which only involves std::streams.
man 2 open:
O_EXCL Ensure that this call creates the file: if this flag is specified in conjunction with O_CREAT, and pathname already exists, then open()
will fail. The behavior of O_EXCL is undefined if O_CREAT is not specified.
so, you could call fd = open(name, O_CREAT | O_EXCL, 0644); /* Open() is atomic. (for a reason) */
UPDATE: and you should of course OR one of the O_RDONLY, O_WRONLY, or O_RDWR flags into the flags argument.
I learned about proper daemonizing here (back in the day):
http://www.enderunix.org/docs/eng/daemon.php
It is a good read. I have since improved the locking code to eliminate race conditions on platforms that allow advisory file locking with specific regions specified.
Here is a relevant snippet from a project that I was involved in:
static int zfsfuse_do_locking(int in_child)
{
/* Ignores errors since the directory might already exist */
mkdir(LOCKDIR, 0700);
if (!in_child)
{
ASSERT(lock_fd == -1);
/*
* before the fork, we create the file, truncating it, and locking the
* first byte
*/
lock_fd = creat(LOCKFILE, S_IRUSR | S_IWUSR);
if(lock_fd == -1)
return -1;
/*
* only if we /could/ lock all of the file,
* we shall lock just the first byte; this way
* we can let the daemon child process lock the
* remainder of the file after forking
*/
if (0==lockf(lock_fd, F_TEST, 0))
return lockf(lock_fd, F_TLOCK, 1);
else
return -1;
} else
{
ASSERT(lock_fd != -1);
/*
* after the fork, we instead try to lock only the region /after/ the
* first byte; the file /must/ already exist. Only in this way can we
* prevent races with locking before or after the daemonization
*/
lock_fd = open(LOCKFILE, O_WRONLY);
if(lock_fd == -1)
return -1;
ASSERT(-1 == lockf(lock_fd, F_TEST, 0)); /* assert that parent still has the lock on the first byte */
if (-1 == lseek(lock_fd, 1, SEEK_SET))
{
perror("lseek");
return -1;
}
return lockf(lock_fd, F_TLOCK, 0);
}
}
void do_daemon(const char *pidfile)
{
chdir("/");
if (pidfile) {
struct stat dummy;
if (0 == stat(pidfile, &dummy)) {
cmn_err(CE_WARN, "%s already exists; aborting.", pidfile);
exit(1);
}
}
/*
* info gleaned from the web, notably
* http://www.enderunix.org/docs/eng/daemon.php
*
* and
*
* http://sourceware.org/git/?p=glibc.git;a=blob;f=misc/daemon.c;h=7597ce9996d5fde1c4ba622e7881cf6e821a12b4;hb=HEAD
*/
{
int forkres, devnull;
if(getppid()==1)
return; /* already a daemon */
forkres=fork();
if (forkres<0)
{ /* fork error */
cmn_err(CE_WARN, "Cannot fork (%s)", strerror(errno));
exit(1);
}
if (forkres>0)
{
int i;
/* parent */
for (i=getdtablesize();i>=0;--i)
if ((lock_fd!=i) && (ioctl_fd!=i)) /* except for the lockfile and the comm socket */
close(i); /* close all descriptors */
/* allow for airtight lockfile semantics... */
struct timeval tv;
tv.tv_sec = 0;
tv.tv_usec = 200000; /* 0.2 seconds */
select(0, NULL, NULL, NULL, &tv);
VERIFY(0 == close(lock_fd));
lock_fd == -1;
exit(0);
}
/* child (daemon) continues */
setsid(); /* obtain a new process group */
VERIFY(0 == chdir("/")); /* change working directory */
umask(027); /* set newly created file permissions */
devnull=open("/dev/null",O_RDWR); /* handle standard I/O */
ASSERT(-1 != devnull);
dup2(devnull, 0); /* stdin */
dup2(devnull, 1); /* stdout */
dup2(devnull, 2); /* stderr */
if (devnull>2)
close(devnull);
/*
* contrary to recommendation, do _not_ ignore SIGCHLD:
* it will break exec-ing subprocesses, e.g. for kstat mount and
* (presumably) nfs sharing!
*
* this will lead to really bad performance too
*/
signal(SIGTSTP,SIG_IGN); /* ignore tty signals */
signal(SIGTTOU,SIG_IGN);
signal(SIGTTIN,SIG_IGN);
}
if (0 != zfsfuse_do_locking(1))
{
cmn_err(CE_WARN, "Unexpected locking conflict (%s: %s)", strerror(errno), LOCKFILE);
exit(1);
}
if (pidfile) {
FILE *f = fopen(pidfile, "w");
if (!f) {
cmn_err(CE_WARN, "Error opening %s.", pidfile);
exit(1);
}
if (fprintf(f, "%d\n", getpid()) < 0) {
unlink(pidfile);
exit(1);
}
if (fclose(f) != 0) {
unlink(pidfile);
exit(1);
}
}
}
See also http://gitweb.zfs-fuse.net/?p=sehe;a=blob;f=src/zfs-fuse/util.c;h=7c9816cc895db4f65b94592eebf96d05cd2c369a;hb=refs/heads/maint
The only way I can think of is to use system level locks. See this: C++ how to check if file is in use - multi-threaded multi-process system
One way to approach this problem is to open the file for appending. If the function succeeds and the position is at 0 then you can be fairly certain this is a new file. Could still be an empty file but that scenario may not be important.
FILE* pFile = fopen(theFilePath, "a+");
if (pFile && gfetpos(pFile) == 0) {
// Either file didn't previously exist or it did and was empty
} else if (pFile) {
fclose(pFile);
}
It would appear that there's no way to do it strictly using streams.
You can, instead, use open (as mentioned above by wildplasser) and if that succeeds, proceed to open the same file as a stream. Of course, if all you're writing to the file is a PID, it is unclear why you wouldn't just write it using C-style write().
O_EXCL only excludes other processes that are attempting to open the same file using O_EXCL. This, of course, means that you never have a perfect guarantee, but if the file name/location is somewhere nobody else is likely to be opening (other than folks you know are using O_EXCL) you should be OK.