Segmentation Fault C++, Linked List - c++

I am going through and comparing a bunch of DNA sequences to find if it is a subset of another. I remove those that are subsets of another.
I'm using a linked list and I keep getting a segmentation fault somewhere around the output of the data back to the output file.
I'd also greatly appreciate feedback on overall code structure. I know its rather messy so I figured someone could point out some things that should be improved on.
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <fstream>
#include <string>
#include <sstream>
using namespace std;
/*
* Step 1. Load all sequences and their metadata into structures.
*
* Step 2. Start n^2 operation to compare sequences.
*
* Step 3. Output file back to a different fasta file.
*/
typedef struct sequence_structure sequence_structure;
struct sequence_structure
{
char *sequence;
char *id;
char *header;
sequence_structure *next_sequence_structure;
sequence_structure *previous_sequence_structure;
int length;
};
int main(int argc, char *argv[])
{
FILE *input_file;
ofstream output_file;
/* this is the TAIL of the linked list. This is a reversed linked list. */
sequence_structure *sequences;
int first_sequence = 0;
char *line = (char*) malloc( sizeof( char ) * 1024 );
if( argc != 3 )
{
printf("This program requires a input file and output file as its argument!\n");
return 0;
}
else
{
/* let's read the input file. */
input_file = fopen( argv[1], "r" );
}
while( !feof(input_file) )
{
string string_line;
fgets( line, 2048, input_file );
string_line = line;
if( string_line.length() <= 2 )
break;
if( string_line.at( 0 ) == '>' )
{
sequence_structure *new_sequence = (sequence_structure *) malloc( sizeof( sequence_structure ) );
new_sequence->id = (char *) malloc( sizeof( char ) * ( 14 + 1 ) );
string_line.copy( new_sequence->id, 14, 1 );
(new_sequence->id)[14] = '\0';
stringstream ss ( string_line.substr( 23, 4 ) );
ss >> new_sequence->length;
new_sequence->header = (char *) malloc( sizeof(char) * ( string_line.length() + 1 ) );
string_line.copy( new_sequence->header, string_line.length(), 0 );
(new_sequence->header)[string_line.length()] = '\0';
fgets( line, 2048, input_file );
string_line = line;
new_sequence->sequence = (char *) malloc( sizeof(char) * ( string_line.length() + 1 ) );
string_line.copy( new_sequence->sequence, string_line.length(), 0 );
(new_sequence->sequence)[string_line.length()] = '\0';
if( first_sequence == 0 )
{
sequences = new_sequence;
sequences->previous_sequence_structure = NULL;
first_sequence = 1;
}
else
{
sequences->next_sequence_structure = new_sequence;
new_sequence->previous_sequence_structure = sequences;
sequences = new_sequence;
}
}
else
{
cout << "Error: input file reading error." << endl;
}
}
fclose( input_file );
free( line );
sequence_structure *outer_sequence_node = sequences;
while( outer_sequence_node != NULL )
{
sequence_structure *inner_sequence_node = sequences;
string outer_sequence ( outer_sequence_node->sequence );
while( inner_sequence_node != NULL )
{
string inner_sequence ( inner_sequence_node->sequence );
if( outer_sequence_node->length > inner_sequence_node->length )
{
if( outer_sequence.find( inner_sequence ) != std::string::npos )
{
cout << "Deleting the sequence with id: " << inner_sequence_node->id << endl;
cout << inner_sequence_node->sequence << endl;
cout << "Found within the sequence with id: " << outer_sequence_node->id << endl;
cout << outer_sequence_node->sequence << endl;
sequence_structure *previous_sequence = inner_sequence_node->previous_sequence_structure;
sequence_structure *next_sequence = inner_sequence_node->next_sequence_structure;
free( inner_sequence_node->id );
free( inner_sequence_node->sequence );
free( inner_sequence_node->header );
if( next_sequence != NULL )
next_sequence->previous_sequence_structure = previous_sequence;
if( previous_sequence != NULL )
{
inner_sequence_node = previous_sequence;
free( previous_sequence->next_sequence_structure );
previous_sequence->next_sequence_structure = next_sequence;
}
}
}
inner_sequence_node = inner_sequence_node->previous_sequence_structure;
}
outer_sequence_node = outer_sequence_node->previous_sequence_structure;
}
output_file.open( argv[2], ios::out );
while( sequences->previous_sequence_structure != NULL )
{
sequences = sequences->previous_sequence_structure;
}
sequence_structure *current_sequence = sequences;
while( current_sequence->next_sequence_structure != NULL )
{
output_file << current_sequence->header;
output_file << current_sequence->sequence;
current_sequence = current_sequence->next_sequence_structure;
}
output_file << current_sequence->header;
output_file << current_sequence->sequence;
output_file.close();
while( sequences != NULL )
{
cout << "Freeing sequence with this id: " << sequences->id << endl;
free( sequences->id );
free( sequences->header );
free( sequences->sequence );
if( sequences->next_sequence_structure != NULL )
{
sequences = sequences->next_sequence_structure;
free( sequences->previous_sequence_structure );
}
else
{
sequences = NULL;
}
}
return 0;
}

Related

How to remove the � UFT8 character from a char* string? [duplicate]

On Python, there is this option errors='ignore' for the open Python function:
open( '/filepath.txt', 'r', encoding='UTF-8', errors='ignore' )
With this, reading a file with invalid UTF8 characters will replace them with nothing, i.e., they are ignored. For example, a file with the characthers Føö»BÃ¥r is going to be read as FøöBår.
If a line as Føö»BÃ¥r is read with getline() from stdio.h, it will be read as Føö�Bår:
FILE* cfilestream = fopen( "/filepath.txt", "r" );
int linebuffersize = 131072;
char* readline = (char*) malloc( linebuffersize );
while( true )
{
if( getline( &readline, &linebuffersize, cfilestream ) != -1 ) {
std::cerr << "readline=" readline << std::endl;
}
else {
break;
}
}
How can I make stdio.h getline() read it as FøöBår instead of Føö�Bår, i..e, ignoring invalid UTF8 characters?
One overwhelming solution I can think of it do iterate throughout all characters on each line read and build a new readline without any of these characters. For example:
FILE* cfilestream = fopen( "/filepath.txt", "r" );
int linebuffersize = 131072;
char* readline = (char*) malloc( linebuffersize );
char* fixedreadline = (char*) malloc( linebuffersize );
int index;
int charsread;
int invalidcharsoffset;
while( true )
{
if( ( charsread = getline( &readline, &linebuffersize, cfilestream ) ) != -1 )
{
invalidcharsoffset = 0;
for( index = 0; index < charsread; ++index )
{
if( readline[index] != '�' ) {
fixedreadline[index-invalidcharsoffset] = readline[index];
}
else {
++invalidcharsoffset;
}
}
std::cerr << "fixedreadline=" << fixedreadline << std::endl;
}
else {
break;
}
}
Related questions:
Fixing invalid UTF8 characters
Replacing non UTF8 characters
python replace unicode characters
Python unicode: how to replace character that cannot be decoded using utf8 with whitespace?
You are confusing what you see with what is really going on. The getline function does not do any replacement of characters. [Note 1]
You are seeing a replacement character (U+FFFD) because your console outputs that character when it is asked to render an invalid UTF-8 code. Most consoles will do that if they are in UTF-8 mode; that is, the current locale is UTF-8.
Also, saying that a file contains the "characters Føö»BÃ¥r" is at best imprecise. A file does not really contain characters. It contains byte sequences which may be interpreted as characters -- for example, by a console or other user presentation software which renders them into glyphs -- according to some encoding. Different encodings produce different results; in this particular case, you have a file which was created by software using the Windows-1252 encoding (or, roughly equivalently, ISO 8859-15), and you are rendering it on a console using UTF-8.
What that means is that the data read by getline contains an invalid UTF-8 sequence, but it (probably) does not contain the replacement character code. Based on the character string you present, it contains the hex character \xbb, which is a guillemot (») in Windows code page 1252.
Finding all the invalid UTF-8 sequences in a string read by getline (or any other C library function which reads files) requires scanning the string, but not for a particular code sequence. Rather, you need to decode UTF-8 sequences one at a time, looking for the ones which are not valid. That's not a simple task, but the mbtowc function can help (if you have enabled a UTF-8 locale). As you'll see in the linked manpage, mbtowc returns the number of bytes contained in a valid "multibyte sequence" (which is UTF-8 in a UTF-8 locale), or -1 to indicate an invalid or incomplete sequence. In the scan, you should pass through the bytes in a valid sequence, or remove/ignore the single byte starting an invalid sequence, and then continue the scan until you reach the end of the string.
Here's some lightly-tested example code (in C):
#include <stdlib.h>
#include <string.h>
/* Removes in place any invalid UTF-8 sequences from at most 'len' characters of the
* string pointed to by 's'. (If a NUL byte is encountered, conversion stops.)
* If the length of the converted string is less than 'len', a NUL byte is
* inserted.
* Returns the length of the possibly modified string (with a maximum of 'len'),
* not including the NUL terminator (if any).
* Requires that a UTF-8 locale be active; since there is no way to test for
* this condition, no attempt is made to do so. If the current locale is not UTF-8,
* behaviour is undefined.
*/
size_t remove_bad_utf8(char* s, size_t len) {
char* in = s;
/* Skip over the initial correct sequence. Avoid relying on mbtowc returning
* zero if n is 0, since Posix is not clear whether mbtowc returns 0 or -1.
*/
int seqlen;
while (len && (seqlen = mbtowc(NULL, in, len)) > 0) { len -= seqlen; in += seqlen; }
char* out = in;
if (len && seqlen < 0) {
++in;
--len;
/* If we find an invalid sequence, we need to start shifting correct sequences. */
for (; len; in += seqlen, len -= seqlen) {
seqlen = mbtowc(NULL, in, len);
if (seqlen > 0) {
/* Shift the valid sequence (if one was found) */
memmove(out, in, seqlen);
out += seqlen;
}
else if (seqlen < 0) seqlen = 1;
else /* (seqlen == 0) */ break;
}
*out++ = 0;
}
return out - s;
}
Notes
Aside from the possible line-end transformation of the underlying I/O library, which will replace CR-LF with a single \n on systems like Windows where the two character CR-LF sequence is used as a line-end indication.
As #rici well explains in his answer, there can be several invalid UTF-8 sequences in a byte sequence.
Possibly iconv(3) could be worth a look, e.g. see https://linux.die.net/man/3/iconv_open.
When the string "//IGNORE" is appended to tocode, characters that cannot be represented in the target character set will be silently discarded.
Example
This byte sequence, if interpreted as UTF-8, contains some invalid UTF-8:
"some invalid\xFE\xFE\xFF\xFF stuff"
If you display this you would see something like
some invalid���� stuff
When this string passes through the remove_invalid_utf8 function in the following C program, the invalid UTF-8 bytes are removed using the iconv function mentioned above.
So the result is then:
some invalid stuff
C Program
#include <stdio.h>
#include <iconv.h>
#include <string.h>
#include <stdlib.h>
#include <stdbool.h>
#include <errno.h>
char *remove_invalid_utf8(char *utf8, size_t len) {
size_t inbytes_len = len;
char *inbuf = utf8;
size_t outbytes_len = len;
char *result = calloc(outbytes_len + 1, sizeof(char));
char *outbuf = result;
iconv_t cd = iconv_open("UTF-8//IGNORE", "UTF-8");
if(cd == (iconv_t)-1) {
perror("iconv_open");
}
if(iconv(cd, &inbuf, &inbytes_len, &outbuf, &outbytes_len)) {
perror("iconv");
}
iconv_close(cd);
return result;
}
int main() {
char *utf8 = "some invalid\xFE\xFE\xFF\xFF stuff";
char *converted = remove_invalid_utf8(utf8, strlen(utf8));
printf("converted: %s to %s\n", utf8, converted);
free(converted);
return 0;
}
I also managed to fix it by trailing/cutting down all Non-ASCII characters.
This one takes about 2.6 seconds to parse 319MB:
#include <stdlib.h>
#include <iostream>
int main(int argc, char const *argv[])
{
FILE* cfilestream = fopen( "./test.txt", "r" );
size_t linebuffersize = 131072;
if( cfilestream == NULL ) {
perror( "fopen cfilestream" );
return -1;
}
char* readline = (char*) malloc( linebuffersize );
char* fixedreadline = (char*) malloc( linebuffersize );
if( readline == NULL ) {
perror( "malloc readline" );
return -1;
}
if( fixedreadline == NULL ) {
perror( "malloc fixedreadline" );
return -1;
}
char* source;
if( ( source = std::setlocale( LC_ALL, "en_US.utf8" ) ) == NULL ) {
perror( "setlocale" );
}
else {
std::cerr << "locale='" << source << "'" << std::endl;
}
int index;
int charsread;
int invalidcharsoffset;
unsigned int fixedchar;
while( true )
{
if( ( charsread = getline( &readline, &linebuffersize, cfilestream ) ) != -1 )
{
invalidcharsoffset = 0;
for( index = 0; index < charsread; ++index )
{
fixedchar = static_cast<unsigned int>( readline[index] );
// std::cerr << "index " << std::setw(3) << index
// << " readline " << std::setw(10) << fixedchar
// << " -> '" << readline[index] << "'" << std::endl;
if( 31 < fixedchar && fixedchar < 128 ) {
fixedreadline[index-invalidcharsoffset] = readline[index];
}
else {
++invalidcharsoffset;
}
}
fixedreadline[index-invalidcharsoffset] = '\0';
// std::cerr << "fixedreadline=" << fixedreadline << std::endl;
}
else {
break;
}
}
std::cerr << "fixedreadline=" << fixedreadline << std::endl;
free( readline );
free( fixedreadline );
fclose( cfilestream );
return 0;
}
Alternative and slower version using memcpy
Using menmove does not improve much speed, so you could either one.
This one takes about 3.1 seconds to parse 319MB:
#include <stdlib.h>
#include <iostream>
#include <cstring>
#include <iomanip>
int main(int argc, char const *argv[])
{
FILE* cfilestream = fopen( "./test.txt", "r" );
size_t linebuffersize = 131072;
if( cfilestream == NULL ) {
perror( "fopen cfilestream" );
return -1;
}
char* readline = (char*) malloc( linebuffersize );
char* fixedreadline = (char*) malloc( linebuffersize );
if( readline == NULL ) {
perror( "malloc readline" );
return -1;
}
if( fixedreadline == NULL ) {
perror( "malloc fixedreadline" );
return -1;
}
char* source;
char* destination;
char* finalresult;
int index;
int lastcopy;
int charsread;
int charstocopy;
int invalidcharsoffset;
bool hasignoredbytes;
unsigned int fixedchar;
if( ( source = std::setlocale( LC_ALL, "en_US.utf8" ) ) == NULL ) {
perror( "setlocale" );
}
else {
std::cerr << "locale='" << source << "'" << std::endl;
}
while( true )
{
if( ( charsread = getline( &readline, &linebuffersize, cfilestream ) ) != -1 )
{
hasignoredbytes = false;
source = readline;
destination = fixedreadline;
lastcopy = 0;
invalidcharsoffset = 0;
for( index = 0; index < charsread; ++index )
{
fixedchar = static_cast<unsigned int>( readline[index] );
// std::cerr << "fixedchar " << std::setw(10)
// << fixedchar << " -> '"
// << readline[index] << "'" << std::endl;
if( 31 < fixedchar && fixedchar < 128 ) {
if( hasignoredbytes ) {
charstocopy = index - lastcopy - invalidcharsoffset;
memcpy( destination, source, charstocopy );
source += index - lastcopy;
lastcopy = index;
destination += charstocopy;
invalidcharsoffset = 0;
hasignoredbytes = false;
}
}
else {
++invalidcharsoffset;
hasignoredbytes = true;
}
}
if( destination != fixedreadline ) {
charstocopy = charsread - static_cast<int>( source - readline )
- invalidcharsoffset;
memcpy( destination, source, charstocopy );
destination += charstocopy - 1;
if( *destination == '\n' ) {
*destination = '\0';
}
else {
*++destination = '\0';
}
finalresult = fixedreadline;
}
else {
finalresult = readline;
}
// std::cerr << "finalresult=" << finalresult << std::endl;
}
else {
break;
}
}
std::cerr << "finalresult=" << finalresult << std::endl;
free( readline );
free( fixedreadline );
fclose( cfilestream );
return 0;
}
Optimized solution using iconv
This takes about 4.6 seconds to parse 319MB of text.
#include <iconv.h>
#include <string.h>
#include <stdlib.h>
#include <iostream>
// Compile it with:
// g++ -o main test.cpp -O3 -liconv
int main(int argc, char const *argv[])
{
FILE* cfilestream = fopen( "./test.txt", "r" );
size_t linebuffersize = 131072;
if( cfilestream == NULL ) {
perror( "fopen cfilestream" );
return -1;
}
char* readline = (char*) malloc( linebuffersize );
char* fixedreadline = (char*) malloc( linebuffersize );
if( readline == NULL ) {
perror( "malloc readline" );
return -1;
}
if( fixedreadline == NULL ) {
perror( "malloc fixedreadline" );
return -1;
}
char* source;
char* destination;
int charsread;
size_t inchars;
size_t outchars;
if( ( source = std::setlocale( LC_ALL, "en_US.utf8" ) ) == NULL ) {
perror( "setlocale" );
}
else {
std::cerr << "locale='" << source << "'" << std::endl;
}
iconv_t conversiondescriptor = iconv_open("UTF-8//IGNORE", "UTF-8");
if( conversiondescriptor == (iconv_t)-1 ) {
perror( "iconv_open conversiondescriptor" );
}
while( true )
{
if( ( charsread = getline( &readline, &linebuffersize, cfilestream ) ) != -1 )
{
source = readline;
inchars = charsread;
destination = fixedreadline;
outchars = charsread;
if( iconv( conversiondescriptor, &source, &inchars, &destination, &outchars ) )
{
perror( "iconv" );
}
// Trim out the new line character
if( *--destination == '\n' ) {
*--destination = '\0';
}
else {
*destination = '\0';
}
// std::cerr << "fixedreadline='" << fixedreadline << "'" << std::endl;
}
else {
break;
}
}
std::cerr << "fixedreadline='" << fixedreadline << "'" << std::endl;
free( readline );
free( fixedreadline );
if( fclose( cfilestream ) ) {
perror( "fclose cfilestream" );
}
if( iconv_close( conversiondescriptor ) ) {
perror( "iconv_close conversiondescriptor" );
}
return 0;
}
Slowest solution ever using mbtowc
This takes about 24.2 seconds to parse 319MB of text.
If you comment out the line fixedchar = mbtowc(NULL, source, charsread); and uncomment the line charsread -= fixedchar; (breaking the invalid characters removal) this will take 1.9 seconds instead of 24.2 seconds (also compiled with -O3 optimization level).
#include <stdlib.h>
#include <string.h>
#include <iostream>
#include <cstring>
#include <iomanip>
int main(int argc, char const *argv[])
{
FILE* cfilestream = fopen( "./test.txt", "r" );
size_t linebuffersize = 131072;
if( cfilestream == NULL ) {
perror( "fopen cfilestream" );
return -1;
}
char* readline = (char*) malloc( linebuffersize );
if( readline == NULL ) {
perror( "malloc readline" );
return -1;
}
char* source;
char* lineend;
char* destination;
int charsread;
int fixedchar;
if( ( source = std::setlocale( LC_ALL, "en_US.utf8" ) ) == NULL ) {
perror( "setlocale" );
}
else {
std::cerr << "locale='" << source << "'" << std::endl;
}
while( true )
{
if( ( charsread = getline( &readline, &linebuffersize, cfilestream ) ) != -1 )
{
lineend = readline + charsread;
destination = readline;
for( source = readline; source != lineend; )
{
// fixedchar = 1;
fixedchar = mbtowc(NULL, source, charsread);
charsread -= fixedchar;
// std::ostringstream contents;
// for( int index = 0; index < fixedchar; ++index )
// contents << source[index];
// std::cerr << "fixedchar=" << std::setw(10)
// << fixedchar << " -> '"
// << contents.str().c_str() << "'" << std::endl;
if( fixedchar > 0 ) {
memmove( destination, source, fixedchar );
source += fixedchar;
destination += fixedchar;
}
else if( fixedchar < 0 ) {
source += 1;
// std::cerr << "errno=" << strerror( errno ) << std::endl;
}
else {
break;
}
}
// Trim out the new line character
if( *--destination == '\n' ) {
*--destination = '\0';
}
else {
*destination = '\0';
}
// std::cerr << "readline='" << readline << "'" << std::endl;
}
else {
break;
}
}
std::cerr << "readline='" << readline << "'" << std::endl;
if( fclose( cfilestream ) ) {
perror( "fclose cfilestream" );
}
free( readline );
return 0;
}
Fastest version from all my others above using memmove
You cannot use memcpy here because the memory regions overlap!
This takes about 2.4 seconds to parse 319MB.
If you comment out the lines *destination = *source and memmove( destination, source, 1 ) (breaking the invalid characters removal) the performance still almost the same as when memmove is being called. Here in, calling memmove( destination, source, 1 ) is a little slower than directly doing *destination = *source;
#include <stdlib.h>
#include <iostream>
#include <cstring>
#include <iomanip>
int main(int argc, char const *argv[])
{
FILE* cfilestream = fopen( "./test.txt", "r" );
size_t linebuffersize = 131072;
if( cfilestream == NULL ) {
perror( "fopen cfilestream" );
return -1;
}
char* readline = (char*) malloc( linebuffersize );
if( readline == NULL ) {
perror( "malloc readline" );
return -1;
}
char* source;
char* lineend;
char* destination;
int charsread;
unsigned int fixedchar;
if( ( source = std::setlocale( LC_ALL, "en_US.utf8" ) ) == NULL ) {
perror( "setlocale" );
}
else {
std::cerr << "locale='" << source << "'" << std::endl;
}
while( true )
{
if( ( charsread = getline( &readline, &linebuffersize, cfilestream ) ) != -1 )
{
lineend = readline + charsread;
destination = readline;
for( source = readline; source != lineend; ++source )
{
fixedchar = static_cast<unsigned int>( *source );
// std::cerr << "fixedchar=" << std::setw(10)
// << fixedchar << " -> '" << *source << "'" << std::endl;
if( 31 < fixedchar && fixedchar < 128 ) {
*destination = *source;
++destination;
}
}
// Trim out the new line character
if( *source == '\n' ) {
*--destination = '\0';
}
else {
*destination = '\0';
}
// std::cerr << "readline='" << readline << "'" << std::endl;
}
else {
break;
}
}
std::cerr << "readline='" << readline << "'" << std::endl;
if( fclose( cfilestream ) ) {
perror( "fclose cfilestream" );
}
free( readline );
return 0;
}
Bonus
You can also use Python C Extensions (API).
It takes about 2.3 seconds to parse 319MB without converting them to cached version UTF-8 char*
And takes about 3.2 seconds to parse 319MB converting them to UTF-8 char*.
And also takes about 3.2 seconds to parse 319MB converting them to cached ASCII char*.
#define PY_SSIZE_T_CLEAN
#include <Python.h>
#include <iostream>
typedef struct
{
PyObject_HEAD
}
PyFastFile;
static PyModuleDef fastfilepackagemodule =
{
// https://docs.python.org/3/c-api/module.html#c.PyModuleDef
PyModuleDef_HEAD_INIT,
"fastfilepackage", /* name of module */
"Example module that wrapped a C++ object", /* module documentation, may be NULL */
-1, /* size of per-interpreter state of the module, or
-1 if the module keeps state in global variables. */
NULL, /* PyMethodDef* m_methods */
NULL, /* inquiry m_reload */
NULL, /* traverseproc m_traverse */
NULL, /* inquiry m_clear */
NULL, /* freefunc m_free */
};
// initialize PyFastFile Object
static int PyFastFile_init(PyFastFile* self, PyObject* args, PyObject* kwargs) {
char* filepath;
if( !PyArg_ParseTuple( args, "s", &filepath ) ) {
return -1;
}
int linecount = 0;
PyObject* iomodule;
PyObject* openfile;
PyObject* fileiterator;
iomodule = PyImport_ImportModule( "builtins" );
if( iomodule == NULL ) {
std::cerr << "ERROR: FastFile failed to import the io module '"
"(and open the file " << filepath << "')!" << std::endl;
PyErr_PrintEx(100);
return -1;
}
PyObject* openfunction = PyObject_GetAttrString( iomodule, "open" );
if( openfunction == NULL ) {
std::cerr << "ERROR: FastFile failed get the io module open "
<< "function (and open the file '" << filepath << "')!" << std::endl;
PyErr_PrintEx(100);
return -1;
}
openfile = PyObject_CallFunction(
openfunction, "ssiss", filepath, "r", -1, "ASCII", "ignore" );
if( openfile == NULL ) {
std::cerr << "ERROR: FastFile failed to open the file'"
<< filepath << "'!" << std::endl;
PyErr_PrintEx(100);
return -1;
}
PyObject* iterfunction = PyObject_GetAttrString( openfile, "__iter__" );
Py_DECREF( openfunction );
if( iterfunction == NULL ) {
std::cerr << "ERROR: FastFile failed get the io module iterator"
<< "function (and open the file '" << filepath << "')!" << std::endl;
PyErr_PrintEx(100);
return -1;
}
PyObject* openiteratorobject = PyObject_CallObject( iterfunction, NULL );
Py_DECREF( iterfunction );
if( openiteratorobject == NULL ) {
std::cerr << "ERROR: FastFile failed get the io module iterator object"
<< " (and open the file '" << filepath << "')!" << std::endl;
PyErr_PrintEx(100);
return -1;
}
fileiterator = PyObject_GetAttrString( openfile, "__next__" );
Py_DECREF( openiteratorobject );
if( fileiterator == NULL ) {
std::cerr << "ERROR: FastFile failed get the io module iterator "
<< "object (and open the file '" << filepath << "')!" << std::endl;
PyErr_PrintEx(100);
return -1;
}
PyObject* readline;
while( ( readline = PyObject_CallObject( fileiterator, NULL ) ) != NULL ) {
linecount += 1;
PyUnicode_AsUTF8( readline );
Py_DECREF( readline );
// std::cerr << "linecount " << linecount << " readline '" << readline
// << "' '" << PyUnicode_AsUTF8( readline ) << "'" << std::endl;
}
std::cerr << "linecount " << linecount << std::endl;
// PyErr_PrintEx(100);
PyErr_Clear();
PyObject* closefunction = PyObject_GetAttrString( openfile, "close" );
if( closefunction == NULL ) {
std::cerr << "ERROR: FastFile failed get the close file function for '"
<< filepath << "')!" << std::endl;
PyErr_PrintEx(100);
return -1;
}
PyObject* closefileresult = PyObject_CallObject( closefunction, NULL );
Py_DECREF( closefunction );
if( closefileresult == NULL ) {
std::cerr << "ERROR: FastFile failed close open file '"
<< filepath << "')!" << std::endl;
PyErr_PrintEx(100);
return -1;
}
Py_DECREF( closefileresult );
Py_XDECREF( iomodule );
Py_XDECREF( openfile );
Py_XDECREF( fileiterator );
return 0;
}
// destruct the object
static void PyFastFile_dealloc(PyFastFile* self) {
Py_TYPE(self)->tp_free( (PyObject*) self );
}
static PyTypeObject PyFastFileType =
{
PyVarObject_HEAD_INIT( NULL, 0 )
"fastfilepackage.FastFile" /* tp_name */
};
// create the module
PyMODINIT_FUNC PyInit_fastfilepackage(void)
{
PyObject* thismodule;
// https://docs.python.org/3/c-api/typeobj.html
PyFastFileType.tp_new = PyType_GenericNew;
PyFastFileType.tp_basicsize = sizeof(PyFastFile);
PyFastFileType.tp_dealloc = (destructor) PyFastFile_dealloc;
PyFastFileType.tp_flags = Py_TPFLAGS_DEFAULT;
PyFastFileType.tp_doc = "FastFile objects";
PyFastFileType.tp_init = (initproc) PyFastFile_init;
if( PyType_Ready( &PyFastFileType) < 0 ) {
return NULL;
}
thismodule = PyModule_Create(&fastfilepackagemodule);
if( thismodule == NULL ) {
return NULL;
}
// Add FastFile class to thismodule allowing the use to create objects
Py_INCREF( &PyFastFileType );
PyModule_AddObject( thismodule, "FastFile", (PyObject*) &PyFastFileType );
return thismodule;
}
To built it, create the file source/fastfilewrappar.cpp with the contents of the above file and the setup.py with the following contents:
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, Extension
myextension = Extension(
language = "c++",
extra_link_args = ["-std=c++11"],
extra_compile_args = ["-std=c++11"],
name = 'fastfilepackage',
sources = [
'source/fastfilewrapper.cpp'
],
include_dirs = [ 'source' ],
)
setup(
name = 'fastfilepackage',
ext_modules= [ myextension ],
)
To run example, use following Python script:
import time
import datetime
import fastfilepackage
testfile = './test.txt'
timenow = time.time()
iterable = fastfilepackage.FastFile( testfile )
fastfile_time = time.time() - timenow
timedifference = datetime.timedelta( seconds=fastfile_time )
print( 'FastFile timedifference', timedifference, flush=True )
Example:
user#user-pc$ /usr/bin/pip3.6 install .
Processing /fastfilepackage
Building wheels for collected packages: fastfilepackage
Building wheel for fastfilepackage (setup.py) ... done
Stored in directory: /pip-ephem-wheel-cache-j313cpzc/wheels/e5/5f/bc/52c820
Successfully built fastfilepackage
Installing collected packages: fastfilepackage
Found existing installation: fastfilepackage 0.0.0
Uninstalling fastfilepackage-0.0.0:
Successfully uninstalled fastfilepackage-0.0.0
Successfully installed fastfilepackage-0.0.0
user#user-pc$ /usr/bin/python3.6 fastfileperformance.py
linecount 820800
FastFile timedifference 0:00:03.204614
Using std::getline
This takes about 4.7 seconds to parse 319MB.
If you remove the UTF-8 removal algorithm borrowed from the fastest benchmark using stdlib.h getline(), it takes 1.7 seconds to run.
#include <stdlib.h>
#include <iostream>
#include <locale>
#include <fstream>
#include <iomanip>
int main(int argc, char const *argv[])
{
unsigned int fixedchar;
int linecount = -1;
char* source;
char* lineend;
char* destination;
if( ( source = setlocale( LC_ALL, "en_US.ascii" ) ) == NULL ) {
perror( "setlocale" );
return -1;
}
else {
std::cerr << "locale='" << source << "'" << std::endl;
}
std::ifstream fileifstream{ "./test.txt" };
if( fileifstream.fail() ) {
std::cerr << "ERROR: FastFile failed to open the file!" << std::endl;
return -1;
}
size_t linebuffersize = 131072;
char* readline = (char*) malloc( linebuffersize );
if( readline == NULL ) {
perror( "malloc readline" );
return -1;
}
while( true )
{
if( !fileifstream.eof() )
{
linecount += 1;
fileifstream.getline( readline, linebuffersize );
lineend = readline + fileifstream.gcount();
destination = readline;
for( source = readline; source != lineend; ++source )
{
fixedchar = static_cast<unsigned int>( *source );
// std::cerr << "fixedchar=" << std::setw(10)
// << fixedchar << " -> '" << *source << "'" << std::endl;
if( 31 < fixedchar && fixedchar < 128 ) {
*destination = *source;
++destination;
}
}
// Trim out the new line character
if( *source == '\n' ) {
*--destination = '\0';
}
else {
*destination = '\0';
}
// std::cerr << "readline='" << readline << "'" << std::endl;
}
else {
break;
}
}
std::cerr << "linecount='" << linecount << "'" << std::endl;
if( fileifstream.is_open() ) {
fileifstream.close();
}
free( readline );
return 0;
}
Resume
2.6 seconds trimming UTF-8 using two buffers with indexing
3.1 seconds trimming UTF-8 using two buffers with memcpy
4.6 seconds removing invalid UTF-8 with iconv
24.2 seconds removing invalid UTF-8 with mbtowc
2.4 seconds trimming UTF-8 using one buffer with pointer direct assigning
Bonus
2.3 seconds removing invalid UTF-8 without converting them to a cached UTF-8 char*
3.2 seconds removing invalid UTF-8 converting them to a cached UTF-8 char*
3.2 seconds trimming UTF-8 and caching as ASCII char*
4.7 seconds trimming UTF-8 with std::getline() using one buffer with pointer direct assigning
The used file ./text.txt had 820.800 lines where each line was equal to:
id-é-char&id-é-char&id-é-char&id-é-char&id-é-char&id-é-char&id-é-char&id-é-char&id-é-char&id-é-char&id-é-char&id-é-char&id-é-char&id-é-char&id-é-char&id-é-char&id-é-char&id-é-char&id-é-char&id-é-char\r\n
And all versions where compiled with
g++ (GCC) 7.4.0
iconv (GNU libiconv 1.14)
g++ -o main test.cpp -O3 -liconv && time ./main

Streaming file from aws s3 through boost socket

I'm trying to connect via boost ssl socket to aws s3.
It works but when I read, I had several problem.
Corrupted files on the same file but not others.
File not corrupted (md5filter got all the data) but the data sent to the buffer are not good. Meaning there is a problem between the different layers of read somewhere but can't figure out where.
Sometimes the program get stuck in the S3_client::read function and loop thousands of times in the do-while loop calling read. But it never reaches md5filter read.
It get stuck between filterStream.read() and md5filter.read() which is not called. I don't know if it gzip or filterStream. But it only happens if there is no call to the lower layers of read for a while.
Can you help spot the problem in my code ?
#ifndef BTLOOP_AWSCLIENT_H
#define BTLOOP_AWSCLIENT_H
#include "boost/iostreams/filter/gzip.hpp"
#include <boost/iostreams/filtering_stream.hpp>
#include <boost/iostreams/filtering_streambuf.hpp>
#include <boost/iostreams/categories.hpp>
#include <boost/iostreams/stream.hpp>
#include <string>
#include <set>
#include <map>
#include <openssl/md5.h>
#include <sstream>
#include <fstream>
#include <iostream>
#include <boost/asio.hpp>
#include <boost/asio/ssl.hpp>
#include "Logger.h"
namespace io = boost::iostreams;
namespace asio = boost::asio;
namespace ssl = boost::asio::ssl;
typedef ssl::stream<asio::ip::tcp::socket> ssl_socket;
namespace S3Reader
{
class MD5Filter
{
public:
typedef char char_type;
struct category :
io::multichar_input_filter_tag{};
MD5Filter( std::streamsize n );
~MD5Filter();
template<typename Source>
std::streamsize read( Source& src, char* s, std::streamsize n );
void setBigFileMode() { _bigFileMode = true; }
std::string close();
void setFileName( std::string fileName ) { _fileName = fileName; };
inline std::streamsize writtenBytes() {std::streamsize res = _writtenBytes; _writtenBytes= 0; return res;};
inline bool eof(){return _eof;};
private:
void computeMd5( char* buffer, size_t size, bool force = false );
private:
bool _bigFileMode;
int _blockCount;
std::vector<unsigned char> _bufferMD5;
MD5_CTX _mdContext;
unsigned char _hashMd5[MD5_DIGEST_LENGTH];
std::string _fileName;
std::streamsize _writtenBytes;
int _totalSize;
bool _eof;
};
class Ssl_wrapper : public io::device<io::bidirectional>
{
public:
Ssl_wrapper( ssl_socket* sock, std::streamsize n ) :
_sock( sock ),_totalSize(0) { };
std::streamsize read( char_type* s, std::streamsize n )
{
boost::system::error_code ec;
size_t rval = _sock->read_some( asio::buffer( s, n ), ec );
_totalSize +=rval;
LOG_AUDIT( " wrapperR: " << rval << " " << _totalSize << " "<<ec.message());
if ( !ec )
{
return rval;
}
else if ( ec == asio::error::eof )
return -1;
else
throw boost::system::system_error( ec, "Wrapper read_some" );
}
std::streamsize write( const char* s, std::streamsize n )
{
boost::system::error_code ec;
size_t rval = _sock->write_some( asio::buffer( s, n ), ec );
if ( !ec )
{
return rval;
}
else if ( ec == asio::error::eof )
return -1;
else
throw boost::system::system_error( ec, " Wrapper read_some" );
}
private:
ssl_socket* _sock;
int _totalSize;
};
class S3_client
{
public:
S3_client( const std::string& key_id, const std::string& key_secret, const std::string& bucket );
virtual ~S3_client();
bool open( const std::string& fileName );
int read( char* buffer, size_t size );
int readLine( char* buffer, size_t size );
void close();
bool eof() { return _filterStream.eof(); }
std::string authorize( const std::string request );
bool connectSocket( std::string url, std::string port, std::string auth );
private :
std::string _key_id;
std::string _key_secret;
std::string _bucket;
std::string _fileName;
io::gzip_decompressor _gzip;
MD5Filter _md5Filter;
boost::posix_time::seconds _timeout;
ssl_socket* _sock;
Ssl_wrapper* _wrapper;
io::stream<Ssl_wrapper>* _sockstream;
std::map<std::string, std::string> _headerMap;
io::filtering_istream _filterStream;
int _totalSize;
};
}
#endif //BTLOOP_AWSCLIENT_H
S3Client.cpp
#include "S3_client.h"
#include <boost/algorithm/string.hpp>
#include <boost/lexical_cast.hpp>
#include <boost/iostreams/copy.hpp>
#include <boost/iostreams/filter/counter.hpp>
#include <boost/exception/diagnostic_information.hpp>
#include <system/ArmError.h>
namespace io = boost::iostreams;
namespace asio = boost::asio;
namespace ssl = boost::asio::ssl;
namespace S3Reader
{
static const size_t s3_block_size = 8 * 1024 * 1024;
static const std::string base64_chars =
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"abcdefghijklmnopqrstuvwxyz"
"0123456789+/";
static inline bool is_base64( unsigned char c )
{
return (isalnum( c ) || (c == '+') || (c == '/'));
}
std::string url_encode( const std::string& value )
{
std::ostringstream escaped;
escaped.fill( '0' );
escaped << std::hex;
for ( auto i = value.begin(), n = value.end(); i != n; ++i )
{
auto c = *i;
if ( isalnum( c ) || c == '-' || c == '_' || c == '.' || c == '~' )
{
escaped << c;
continue;
}
escaped << std::uppercase;
escaped << '%' << std::setw( 2 ) << int((unsigned char) c );
escaped << std::nouppercase;
}
return escaped.str();
}
std::string base64_encode( unsigned char const* bytes_to_encode, unsigned int in_len )
{
std::string ret;
int i = 0;
int j = 0;
unsigned char char_array_3[3];
unsigned char char_array_4[4];
while ( in_len-- )
{
char_array_3[i++] = *(bytes_to_encode++);
if ( i == 3 )
{
char_array_4[0] = (char_array_3[0] & 0xfc) >> 2;
char_array_4[1] = ((char_array_3[0] & 0x03) << 4) + ((char_array_3[1] & 0xf0) >> 4);
char_array_4[2] = ((char_array_3[1] & 0x0f) << 2) + ((char_array_3[2] & 0xc0) >> 6);
char_array_4[3] = char_array_3[2] & 0x3f;
for ( i = 0; (i < 4); i++ )
ret += base64_chars[char_array_4[i]];
i = 0;
}
}
if ( i )
{
for ( j = i; j < 3; j++ )
char_array_3[j] = '\0';
char_array_4[0] = (char_array_3[0] & 0xfc) >> 2;
char_array_4[1] = ((char_array_3[0] & 0x03) << 4) + ((char_array_3[1] & 0xf0) >> 4);
char_array_4[2] = ((char_array_3[1] & 0x0f) << 2) + ((char_array_3[2] & 0xc0) >> 6);
char_array_4[3] = char_array_3[2] & 0x3f;
for ( j = 0; (j < i + 1); j++ )
ret += base64_chars[char_array_4[j]];
while ((i++ < 3))
ret += '=';
}
return ret;
}
std::string to_hex( const uint8_t* buffer, size_t buffer_size )
{
std::stringstream sst;
for ( uint i = 0; i < buffer_size; i++ )
{
sst << std::setw( 2 ) << std::setfill( '0' ) << std::hex << int( buffer[i] );
}
return sst.str();
}
std::string getDateForHeader( bool amzFormat )
{
time_t lt;
time( &lt );
struct tm* tmTmp;
tmTmp = gmtime( &lt );
char buf[50];
if ( amzFormat )
{
strftime( buf, 50, "Date: %a, %d %b %Y %X +0000", tmTmp );
return std::string( buf );
}
else
{
tmTmp->tm_hour++;
//strftime( buf, 50, "%a, %d %b %Y %X +0000", tmTmp );
std::stringstream ss;
ss << mktime( tmTmp );
return ss.str();
}
}
MD5Filter::MD5Filter( std::streamsize n ) :
_bigFileMode( false ), _blockCount( 0 ), _writtenBytes(0), _totalSize( 0 )
{
MD5_Init( &_mdContext );
memset( _hashMd5, 0, MD5_DIGEST_LENGTH );
}
MD5Filter::~MD5Filter()
{
close();
}
template<typename Source>
std::streamsize MD5Filter::read( Source& src, char* s, std::streamsize n )
{
int result =0;
try
{
if ((result = io::read( src, s, n )) == -1 )
{
_eof=true;
LOG_AUDIT( _fileName << " md5R: " << result << " " << _totalSize );
return -1;
}
}
catch ( boost::exception& ex)
{
LOG_ERROR( _fileName <<" "<< boost::diagnostic_information(ex)<< " " << result );
}
computeMd5( s, (size_t) result );
_totalSize += result;
_writtenBytes = result;
LOG_AUDIT( _fileName << " md5R: " << result << " " << _totalSize );
return result;
}
void MD5Filter::computeMd5( char* buffer, size_t size, bool force )
{
size_t realSize = s3_block_size;
uint8_t blockMd5[MD5_DIGEST_LENGTH];
if ( !_bigFileMode )
{
MD5_Update( &_mdContext, buffer, size );
return;
}
if ( size > 0 )
{
_bufferMD5.insert( _bufferMD5.end(), &buffer[0], &buffer[size] );
}
if ((_bufferMD5.size() < s3_block_size) && !force )
return;
if ( force )
realSize = _bufferMD5.size();
MD5( &_bufferMD5[0], realSize, blockMd5 );
MD5_Update( &_mdContext, blockMd5, MD5_DIGEST_LENGTH );
_blockCount++;
if ( _bufferMD5.size() == s3_block_size )
{
_bufferMD5.clear();
return;
}
if ( force )
return;
memcpy( &_bufferMD5[0], &_bufferMD5[s3_block_size], _bufferMD5.size() - s3_block_size );
_bufferMD5.erase( _bufferMD5.begin() + s3_block_size, _bufferMD5.end());
}
std::string MD5Filter::close()
{
std::string mdOutput;
computeMd5( NULL, 0, true );
MD5_Final( _hashMd5, &_mdContext );
mdOutput = to_hex( _hashMd5, MD5_DIGEST_LENGTH );
if ( _bigFileMode )
{
mdOutput += "-" + boost::lexical_cast<std::string>( _blockCount );
}
return mdOutput;
}
std::string S3_client::authorize( const std::string request )
{
unsigned char* digest;
digest = HMAC( EVP_sha1(), _key_secret.c_str(), (int) _key_secret.size(), (unsigned char*) request.c_str(), (int) request.size(), NULL, NULL );
std::string signature( url_encode( base64_encode( digest, 20 )));
return "?AWSAccessKeyId=" + _key_id + "&Expires=" + getDateForHeader( false ) + "&Signature=" + signature;
}
S3_client::S3_client( const std::string& key_id, const std::string& key_secret, const std::string& bucket ) :
_key_id( key_id ), _key_secret( key_secret ), _bucket( bucket ), _gzip( io::gzip::default_window_bits, 1024 * 1024 )
, _md5Filter( s3_block_size ), _timeout( boost::posix_time::seconds( 1 )), _totalSize( 0 ) { }
S3_client::~S3_client()
{
close();
}
bool S3_client::connectSocket( std::string url, std::string port, std::string auth )
{
std::string amzDate = getDateForHeader( true );
std::string host = "url";
boost::asio::io_service io_service;
boost::asio::ip::tcp::resolver resolver( io_service );
boost::asio::ip::tcp::resolver::query query( url, "https" );
auto endpoint = resolver.resolve( query );
// Context with default path
ssl::context ctx( ssl::context::sslv23 );
ctx.set_default_verify_paths();
_sock = new ssl_socket( io_service, ctx );
boost::asio::socket_base::keep_alive option( true );
_wrapper = new Ssl_wrapper( _sock, s3_block_size );
_sockstream = new io::stream<Ssl_wrapper>( boost::ref( *_wrapper ));
asio::connect( _sock->lowest_layer(), endpoint );
_sock->set_verify_mode( ssl::verify_peer );
_sock->set_verify_callback( ssl::rfc2818_verification( url ));
_sock->handshake( ssl_socket::client );
_sock->lowest_layer().set_option( option );
std::stringstream ss;
ss << "GET " << _fileName << auth << " HTTP/1.1\r\n" << "Host: " << host << "\r\nAccept: */*\r\n\r\n";
_sockstream->write( ss.str().c_str(), ss.str().size());
_sockstream->flush();
std::string http_version;
int status_code = 0;
(*_sockstream) >> http_version;
(*_sockstream) >> status_code;
if ( !_sockstream || http_version.substr( 0, 5 ) != "HTTP/" )
{
std::cout << "Invalid response: " << http_version << " " << status_code << std::endl;
return false;
}
if ( status_code != 200 )
{
std::cout << "Response returned with status code " << http_version << " " << status_code << std::endl;
return false;
}
return true;
}
bool S3_client::open( const std::string& fileName )
{
std::string port = "443";
std::string url = "bucket";
std::stringstream authRequest;
std::string date = getDateForHeader( false );
_fileName = fileName;
authRequest << "GET\n\n\n" << date << "\n/" << _bucket << "" << fileName;
std::string auth = authorize( authRequest.str());
if ( !connectSocket( url, port, auth ))
THROW( "Failed to open socket" );
std::string header;
while ( std::getline( *_sockstream, header ) && header != "\r" )
{
std::vector<std::string> vectLine;
boost::split( vectLine, header, boost::is_any_of( ":" ));
if ( vectLine.size() < 2 )
continue;
boost::erase_all( vectLine[1], "\"" );
boost::erase_all( vectLine[1], "\r" );
boost::erase_all( vectLine[1], " " );
_headerMap[vectLine[0]] = vectLine[1];
}
if ( _headerMap.find( "Content-Length" ) == _headerMap.end())
return false;
if ( _headerMap.find( "Content-Type" ) == _headerMap.end())
return false;
if ((uint) std::atoi( _headerMap.at( "Content-Length" ).c_str()) > s3_block_size )
_md5Filter.setBigFileMode();
_md5Filter.setFileName( _fileName );
if ( _headerMap["Content-Type"] == "binary/octet-stream" )
_filterStream.push( _gzip, s3_block_size );
_filterStream.push( boost::ref( _md5Filter ), s3_block_size );
_filterStream.push( boost::ref( *_sockstream ), s3_block_size );
return true;
}
void S3_client::close()
{
std::string localMD5 = _md5Filter.close();
std::string headerMD5 = _headerMap["ETag"];
if ( localMD5 != headerMD5 )
THROW ( "Corrupted file " << _fileName << " " << localMD5 << " " << headerMD5 << "." );
else
LOG_AUDIT( "Close S3: " << _fileName << " " << localMD5 << " " << headerMD5 << "." );
}
int S3_client::readLine( char* buffer, size_t size )
{
_filterStream.getline( buffer, size );
return _filterStream.gcount();
}
int S3_client::read( char* buffer, size_t size )
{
std::streamsize sizeRead = 0;
do
{
_filterStream.read( buffer, size );
sizeRead = _md5Filter.writtenBytes();
_totalSize += sizeRead;
LOG_AUDIT( _fileName << " s3R: " << sizeRead << " " << _totalSize );
}
while( sizeRead ==0 && !_md5Filter.eof() && !_sockstream->eof() && _filterStream.good() && _sock->next_layer().is_open());
return sizeRead;
}
}
int main( int argc, char** argv )
{
S3Reader::S3_client client( key_id, key_secret, s3_bucket );
client.open("MyFile");
while (client.read(buffer, bufferSize) >0 ) {}
}

C++ program opens file corectly on Linux but not on Windows

I compiled a Linux program on Windows via Mingw but the output is wrong.
Error description:
The output of the program looks different on Windows than on Linux. This is how it looks on Windows:
>tig_2
CAATCTTCAGAGTCCAGAGTGGGAGGCACAGACTACAGAAAATGAGCAGCGGGGCTGGTA
>cluster_1001_conTTGGTGAAGAGAATTTGGACATGGATGAAGGCTTGGGCTTGACCATGCGAAGG
Expected output:
>cluster_1001_contig2
CAATCTTCAGAGTCCAGAGTGGGAGGCACAGACTACAGAAAATGAGCAGCGGGGCTGGTA
>cluster_1001_contig1
TTGGTGAAGAGAATTTGGACATGGATGAAGGCTTGGGCTTGACCATGCGAAGG
(Note: the output is very large to paste it here so the examples above are pseudo-real).
Possible cause:
I have observed that if I convert the enter characters the input file from Linux (LF) to Windows (CRLF) it almost works: the first character (>) in file is missing. The same code works perfectly on Linux without any enter conversion. So, the problem must be in the function that is parsing the input not in the one that writes the output:
seq_db.Read( db_in.c_str(), options );
Source code:
This is the piece that is parsing the input file. Anyway, I might me wrong. The fault might be in other place. In case it is needed, the FULL source code is here :)
void SequenceDB::Read( const char *file, const Options & options )
{
Sequence one;
Sequence dummy;
Sequence des;
Sequence *last = NULL;
FILE *swap = NULL;
FILE *fin = fopen( file, "r" );
char *buffer = NULL;
char *res = NULL;
size_t swap_size = 0;
int option_l = options.min_length;
if( fin == NULL ) bomb_error( "Failed to open the database file" );
if( options.store_disk ) swap = OpenTempFile( temp_dir );
Clear();
dummy.swap = swap;
buffer = new char[ MAX_LINE_SIZE+1 ];
while (not feof( fin ) || one.size) { /* do not break when the last sequence is not handled */
buffer[0] = '>';
if ( (res=fgets( buffer, MAX_LINE_SIZE, fin )) == NULL && one.size == 0) break;
if( buffer[0] == '+' ){
int len = strlen( buffer );
int len2 = len;
while( len2 && buffer[len2-1] != '\n' ){
if ( (res=fgets( buffer, MAX_LINE_SIZE, fin )) == NULL ) break;
len2 = strlen( buffer );
len += len2;
}
one.des_length2 = len;
dummy.des_length2 = len;
fseek( fin, one.size, SEEK_CUR );
}else if (buffer[0] == '>' || buffer[0] == '#' || (res==NULL && one.size)) {
if ( one.size ) { // write previous record
one.dat_length = dummy.dat_length = one.size;
if( one.identifier == NULL || one.Format() ){
printf( "Warning: from file \"%s\",\n", file );
printf( "Discarding invalid sequence or sequence without identifier and description!\n\n" );
if( one.identifier ) printf( "%s\n", one.identifier );
printf( "%s\n", one.data );
one.size = 0;
}
one.index = dummy.index = sequences.size();
if( one.size > option_l ) {
if ( swap ) {
swap_size += one.size;
// so that size of file < MAX_BIN_SWAP about 2GB
if ( swap_size >= MAX_BIN_SWAP) {
dummy.swap = swap = OpenTempFile( temp_dir );
swap_size = one.size;
}
dummy.size = one.size;
dummy.offset = ftell( swap );
dummy.des_length = one.des_length;
sequences.Append( new Sequence( dummy ) );
one.ConvertBases();
fwrite( one.data, 1, one.size, swap );
}else{
//printf( "==================\n" );
sequences.Append( new Sequence( one ) );
//printf( "------------------\n" );
//if( sequences.size() > 10 ) break;
}
//if( sequences.size() >= 10000 ) break;
}
}
one.size = 0;
one.des_length2 = 0;
int len = strlen( buffer );
int len2 = len;
des.size = 0;
des += buffer;
while( len2 && buffer[len2-1] != '\n' ){
if ( (res=fgets( buffer, MAX_LINE_SIZE, fin )) == NULL ) break;
des += buffer;
len2 = strlen( buffer );
len += len2;
}
size_t offset = ftell( fin );
one.des_begin = dummy.des_begin = offset - len;
one.des_length = dummy.des_length = len;
int i = 0;
if( des.data[i] == '>' || des.data[i] == '#' || des.data[i] == '+' ) i += 1;
if( des.data[i] == ' ' or des.data[i] == '\t' ) i += 1;
if( options.des_len and options.des_len < des.size ) des.size = options.des_len;
while( i < des.size and ( des.data[i] != '\n') ) i += 1;
des.data[i] = 0;
one.identifier = dummy.identifier = des.data;
} else {
one += buffer;
}
}
#if 0
int i, n = 0;
for(i=0; i<sequences.size(); i++) n += sequences[i].bufsize + 4;
cout<<n<<"\t"<<sequences.capacity() * sizeof(Sequence)<<endl;
int i;
scanf( "%i", & i );
#endif
one.identifier = dummy.identifier = NULL;
delete[] buffer;
fclose( fin );
}
The format of the input file is like this:
> comment
ACGTACGTACGTACGTACGTACGTACGTACGT
> comment
ACGTACGTACGTACGTACGTACGTACGTACGT
> comment
ACGTACGTACGTACGTACGTACGTACGTACGT
etc
The issue is more than likely you need to open the file using the "rb" switch in the call to fopen. The "rb" opens the file in binary mode, as opposed to "r", which opens a file in "text" mode.
Since you're going back and forth between Linux and Windows, the end-of-line characters will be different. If you open the file as "text" in Windows, but the file was formatted for Linux, you're lying to Windows that it is a text file. So the runtime will do CR/LF conversion all wrong.
Therefore you should open the file as binary, "rb" so that the CR/LF translation isn't done.

Programmatically modify/create vcproj files

I have a visual c++ project file (vcproj) and i want to programmatically modify it in order to add additional include directories or link libraries.
One solution could be to parse the vcproj as an XML file and modify it. There is any other API to make this easier?
Here is some code I wrote for fetching from a Visual Studio solution (.sln) all the projects it contains, and from each project, all the file (including full paths) it contain.
static strList parseSolution( const char * solName )
{
strList result;
static char drive[_MAX_DRIVE];
static char somepath[_MAX_PATH];
static char buffer[_MAX_PATH];
static char path[_MAX_PATH];
static char ext[_MAX_EXT];
_splitpath( solName, drive, somepath, buffer, ext );
FILE * f = fopen( solName, "r" );
if( NULL == f )
{
printf("ERROR: Solution %s is missing or unavailable.\n", solName );
exit(1);
}
while( !feof(f) )
{
char * res = fgets( buffer, sizeof(buffer), f );
if( NULL == res )
continue;
if( NULL != strstr(buffer, "Project(") )
{
char * ptrName = strchr( buffer, '=' );
char * ptrFile = strchr( ptrName, ',' );
*ptrFile++ = 0;
char * ptrEnd = strchr( ptrFile, ',' );
*ptrEnd++ = 0;
while( ('=' == *ptrName)
||(' ' == *ptrName)
||('"' == *ptrName) ) ptrName++;
if( '"' == ptrName[strlen(ptrName)-1] )
ptrName[strlen(ptrName)-1] = 0;
while( (' ' == *ptrFile)
||('"' == *ptrFile) ) ptrFile++;
if( '"' == ptrFile[strlen(ptrFile)-1] )
ptrFile[strlen(ptrFile)-1] = 0;
_makepath( path, drive, somepath, ptrFile, NULL );
result.push_back( std::string(path) );
}
}
fclose(f);
return result;
}
/**
* Parse project and extract fullpath source filename from project.
*/
static strList parseProject( const char * projName )
{
strList result;
static char drive[_MAX_DRIVE];
static char somepath[_MAX_PATH];
static char buffer[_MAX_PATH];
static char path[_MAX_PATH];
static char ext[_MAX_EXT];
_splitpath( projName, drive, somepath, buffer, ext );
FILE * f = fopen( projName, "r" );
if( NULL == f )
{
printf("ERROR: Project %s is missing or unavailable.\n", projName );
exit(1);
}
while( !feof(f) )
{
char * res = fgets( buffer, sizeof(buffer), f );
if( NULL == res )
continue;
if( (NULL != strstr(buffer, "<ClInclude Include="))
||(NULL != strstr(buffer, "<ClCompile Include=")) )
{
char * ptrName = strchr( buffer, '=' );
char * ptrName1 = strstr( buffer, "/>" );
if( NULL != ptrName1 ) *ptrName1 = 0;
while( ('=' == *ptrName)
||(' ' == *ptrName)
||('"' == *ptrName) ) ptrName++;
while( ('"' == ptrName[strlen(ptrName)-1])
||(' ' == ptrName[strlen(ptrName)-1])
||('\n' == ptrName[strlen(ptrName)-1]))
ptrName[strlen(ptrName)-1] = 0;
_makepath( path, drive, somepath, ptrName, NULL );
result.push_back( std::string(path) );
}
}
fclose(f);
return result;
}
/**
* Recoding source file.
*/
Using these functions you can work on each file, or work on the projects themselves.
strList projectList = parseSolution( argv[1] );
strList::iterator itProj = projectList.begin();
while( itProj != projectList.end() )
{
printf("Project: %s\n", itProj->c_str());
strList fileName = parseProject( itProj->c_str() );
strList::iterator itFile = fileName.begin();
while( itFile != fileName.end() )
{
printf(" File %s\n", itFile->c_str());
// do something with the project file
itFile++;
}
fileName.clear();
itProj++;
}

Stack overflow in recursive function

I am writing a simple app that outputs all files in some directory to console. To achieve this I dynamically allocate memory in function PathCreator() and return a pointer to this memory. I don't know how to correctly free this memory segment in GetAllFiles(). When I use the code below I get a stack overflow exception. How can I fix this? Please don't offer me to use something that doesn't need dynamically allocated memory, I just want to fix my code.
#include "stdafx.h"
#include <windows.h>
#include <iostream>
wchar_t *PathCreator(wchar_t *dir, wchar_t *fileName);
int is_directory(wchar_t *p)
{
wchar_t *t = PathCreator(p,L"\\");
WIN32_FIND_DATA file;
HANDLE search_hendle = FindFirstFile(t, &file);
long error = GetLastError();
if(error == 267)
{
return 0;
}
else
{
return 1;
}
}
wchar_t *PathCreator(wchar_t *dir, wchar_t *fileName)
{
wchar_t* path = 0;
int size = 0;
wchar_t *d = dir;
wchar_t *f = fileName;
while(*d != '\0')
{
d++;
size++;
}
while(*f != '\0')
{
f++;
size++;
}
path = new wchar_t[(size+=3) * sizeof(wchar_t)];
int j = 0;
while(j < size)
{
path[j] = '\0';
j++;
}
int i;
i = 0;
while(*dir != '\0')
{
path[i] = *dir;
i++;
dir++;
}
path[i++] = '\\';
wchar_t *t = fileName;
while(*t != '\0')
{
path[i] = *t;
i++;
t++;
}
path[i] = '\0';
return path;
}
void GetAllFiles(wchar_t* dir)
{
wchar_t *p = 0;
int i = 0;
WIN32_FIND_DATA file;
wchar_t *t = PathCreator(dir, L"*");
HANDLE search_hendle = FindFirstFile(t, &file);
if(search_hendle)
{
do
{
p = PathCreator(dir,file.cFileName);
if(!is_directory(p))
{
std::wcout << p << std::endl;
}
else
{
GetAllFiles(p);
}
delete [] p;
}
while(FindNextFile(search_hendle, &file));
}
delete [] t;
FindClose(search_hendle);
}
int _tmain(int argc, _TCHAR* argv[])
{
GetAllFiles(L"C:\\Users");
}
So, you have "." and ".." in your directory search.
The first entry is ".", so:
p = PathCreator(dir, file.cFilename)
yields:
"C:\Users\."
Then the next line:
if (!is_directory(p))
Is ALWAYS false, so it just keeps recursing into:
GetAllFiles(p)
forever ... or until your stack blows up, whichever comes first ;-)
I would recommend explicitly checking for "." and ".." and skipping those entries (also MFC and Qt, etc. have nice directory handling classes, but I think you want to do it this way).
My modification:
do
{
// I added this - guess I can't embolden code text
if (wcscmp(file.cFileName,L".") == 0 || wcscmp(file.cFileName,L"..")==0)
continue;
p = PathCreator(dir,file.cFileName);
if(!is_directory(p))
{
std::wcout << p << std::endl;
}
else
{
GetAllFiles(p);
}
delete [] p;
}
while(FindNextFile(search_hendle, &file));
Again you try to use C in place of C++ and you still using wcout?! no problem you are a programmer and I'm sure you have a reason for this! but memory management in C is much much harder than C++ and you should have some skills to use it. Here is a fully working code but as you see it is really harder to manage, use and understand than its C++ version using standard containers and string, so if you are allowed to use C++(as you use wcout) then use its C++ version for ease:
#include <Windows.h>
/*! \brief Merge \a folder and \a filename into a newly allocate memory and
* return it to the caller. Use free to free returned memory!
*/
wchar_t* PathCreator( wchar_t const* folder, wchar_t const* filename )
{
wchar_t* res;
size_t i, len, folderLen = wcslen( folder ), filenameLen = wcslen( filename );
len = folderLen + filenameLen;
if( folder[folderLen - 1] != '\\' ) ++len;
++len; // for \0
res = (wchar_t*) malloc( sizeof(wchar_t) * len );
if( !res ) return NULL;
wcscpy_s( res, len, folder );
/* Remove possible wide card at end of folder */
for( i = folderLen; i--; ) {
if( res[i] == '*' || res[i] == '?' ) {
res[i] = 0;
--folderLen;
} else {
break;
}
}
if( res[folderLen - 1] != '\\' ) wcscat_s( res, len, L"\\" );
wcscat_s( res, len, filename );
return res;
}
/*! \brief Free memory that returned by \ref GetAllFiles
*/
void FreeAllFilesMemory( wchar_t** p )
{
wchar_t** tmp = p;
if( !p ) return ;
while( *tmp ) free( *tmp++ );
free( p );
}
wchar_t** AddToArray( wchar_t** p, size_t* pAllocated, size_t* pUsed, wchar_t* s )
{
if( *pUsed >= *pAllocated ) {
size_t newAlloc = *pAllocated * 3 / 2; // Grow by 1.5
if( newAlloc < 16 ) newAlloc = 16;
p = (wchar_t**) realloc( p, newAlloc * sizeof(wchar_t*) );
if( !p ) return NULL;
*pAllocated = newAlloc;
}
p[*pUsed] = s;
++*pUsed;
return p;
}
wchar_t** GetAllFilesImpl( wchar_t const* folder, wchar_t** res, size_t* pAllocated, size_t* pUsed )
{
HANDLE hSearch;
WIN32_FIND_DATAW fileinfo;
size_t allocatedMemory = 0;
hSearch = FindFirstFileW( folder, &fileinfo );
if( hSearch != INVALID_HANDLE_VALUE ) {
do {
wchar_t* sFileName, ** tmp, sTmp[ 1024 ];
/* ignore ., .. */
if( !wcscmp(fileinfo.cFileName, L".") ||
!wcscmp(fileinfo.cFileName, L"..") )
continue;
sFileName = PathCreator( folder, fileinfo.cFileName );
wprintf( L"%s\n", sFileName ); /* Print result */
tmp = AddToArray( res, pAllocated, pUsed, sFileName );
if( !tmp ) return FreeAllFilesMemory(res), NULL;
res = tmp;
if( fileinfo.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY ) {
wcscpy_s( sTmp, sFileName );
wcscat_s( sTmp, L"\\*" );
tmp = GetAllFilesImpl( sTmp, res, pAllocated, pUsed );
if( !tmp ) return NULL;
res = tmp;
}
} while( FindNextFileW(hSearch, &fileinfo) );
FindClose( hSearch );
}
return res;
}
/*! \brief List all files that match a pattern and return it as an array of
* wide strings, free result using \ref FreeAllFilesMemory
*/
wchar_t** GetAllFiles( wchar_t const* folder )
{
size_t nAllocated = 0, nUsed = 0;
wchar_t** res = GetAllFilesImpl( folder, NULL, &nAllocated, &nUsed );
if( res ) {
/* to indicate end of result add a NULL string */
wchar_t** tmp = AddToArray( res, &nAllocated, &nUsed, NULL );
if( !tmp ) return FreeAllFilesMemory(res), NULL;
res = tmp;
}
return res;
}