I was viewing the file agents.h on my windows OS,and I wanted to see the c++ code without comments.i striped them out to see the code more clearly with my old program but i was surprised that it took like 2 seconds to finish.the size of the file is 605KB,so it isn't so bad.Why is it that slow.I suspect it is the function ftell() that is doing it,but i can't really tell.Is it branching that is slowing or ftell()?, if ftell(),then what is a better way to throw the FILE pointer back?
EDIT
#include <stdio.h>
#include <time.h>
#define NOT_COMMENT (!DOUBLESLASH_Comment && !ASTERISK_SLASH_Comment)
int main(int argc,char *argv[])
{
clock_t t1 = clock();
FILE *input , *output;
if( fopen_s(&input,argv[1],"r") )
{
printf("error opening file %s\n",argv[1]);
return 0;
}
if( fopen_s(&output,argv[2],"w") )
{
printf("error opening file %s\n",argv[2]);
return 0;
}
char c , d;
//escape flag
bool DOUBLESLASH_Comment = 0 , ASTERISK_SLASH_Comment = 0 , flag = 0;
/* single quotes / double quotes */
int s_QUOTED = 0 , d_QUOTED = 0;
while( (c=getc(input)) != EOF )
{
switch(c)
{
case '\\':
{
if( NOT_COMMENT )
{
if( flag == 1 )
flag = 0;
else
flag = 1;
}
}break;
case '\'':
{
if( NOT_COMMENT && !d_QUOTED )
{
if( !flag )
{
s_QUOTED++;
}
}
}break;
case '"':
{
if( NOT_COMMENT && !flag )
{
if( !s_QUOTED )
{
d_QUOTED++;
}
}
}break;
case '/':
{
if( NOT_COMMENT && !d_QUOTED )
{
if( (d=getc(input)) == '*' )
{
ASTERISK_SLASH_Comment = 1;
}
else if( d == '/' )
{
DOUBLESLASH_Comment = 1;
}
else
{
if( d != EOF )
{
ungetc(d,input);
}
}
}
}break;
case '*':
{
if( ASTERISK_SLASH_Comment )
{
if( (d=getc(input)) == '/')
{
if( (c=getc(input)) == EOF )
return 0;
ASTERISK_SLASH_Comment = 0;
}
else
{
if( d != EOF )
{
ungetc(d,input);
}
}
}
}break;
case '\n':
{
if( DOUBLESLASH_Comment )
{
DOUBLESLASH_Comment = 0;
}
}break;
}
if( NOT_COMMENT && c != '\\' ) flag = 0;
if( d_QUOTED == 2 ) d_QUOTED = 0;
if( s_QUOTED == 2 ) s_QUOTED = 0;
if( NOT_COMMENT )
{
fprintf(output,"%c",c);
}
}
fclose(input);
fclose(output);
clock_t t2 = clock();
double elapsed = (double)(t2 - t1) / CLOCKS_PER_SEC;
printf("time elapsed : %f\n",elapsed);
}
Without actually measuring the speed of your code in a profiler (and with the file you use as input, since one I use may have a different set of comments, etc that trigger a different behaviour), it's hard to say for sure. But it looks like you use fseek( ... ) simply to move back one character. In which case writing your own function for a one character lookahead would be a much better choice.
Something like this:
char lookahead = ' ';
bool havelookahead = false;
char getNextChar(FILE *input)
{
if (havelookahead)
{
havelookahead = false;
return lookahead;
}
return getc(input);
}
char peekChar(FILE *input)
{
if (!havelookahead)
{
lookahead = getc(input);
havelookahead = true;
}
return lookahead;
}
Then replace your getc with getNextChar in the beginning of the loop, and where you check the next character with peekChar (followed by a dummy getNextChar() to consume it).
This is a useful pattern in general for parsing - both at character level and at token level, so it's good learning to understand how this works.
You can also use the standard ungetc to "put back" your character that you looked at.
Whether this makes your code run significantly faster or not is hard to say, as I said in the beginning.
I cannot compile your code, so I cannot make tests. But I suspect that the bottleneck is fseek rather than ftell. Rejecting a character is a common task in parsing files... and should be implemented by the library or some intermediate layer with some buffering. In this case (rejection of a single character) you can use ungetc to achieve that.
So you should replace
fseek( file , ( ftell(file) - 1 ) , SEEK_SET );
with
ungetc('*', file); // ungetc('/', file); the second time.
Related
I have a project which need to read path of SysData file.I want to move SysData file which contains "ç","ş","ğ" path way but cannot read this char.I have to read with UNICODE(like that utf-8).
There is code;
bool TSimTextFileStream::ReadLine ( mstring * str )
{
*str = "";
char c = ' ';
bool first = true;
// while ( read ( hFile, &c, 1 ) )
while ( fread ( &c, 1, 1, hFile ) )
{
if (first) first = false;
#ifdef __linux__
if ( c == 13 )
continue;
else
if ( c == 10 )
break;
else
*str += c;
#else
if( c == 13 || c == 10)
break;
else
*str += c;
#endif
}
return !first;
}
And there is code, calling this method;
mstring GetSysDataDirectory ( )
{
static mstring sysDataDir = "";
if ( sysDataDir == "" )
{
if (mIsEnvironmentVarExist("SYSDATAPATH"))
{
mstring folder = mGetEnvVar("SYSDATAPATH");
if (folder.size() == 0)
{
folder = mGetCurrentDir ( ) + "/SysData";
}
sysDataDir = folder;
}
else if ( mIsFileExist ( "SysDataPath.dat" ) )
{
TSimTextFileStream txtfile;
txtfile.OpenFileForRead( "SysDataPath.dat" );
mstring folder;
if ( txtfile.ReadLine( &folder ) )
{
sysDataDir = folder;
}
else
{
sysDataDir = mGetCurrentDir ( ) + "/SysData";
}
}
else
{
sysDataDir = mGetCurrentDir ( ) + "/SysData";
}
}
return sysDataDir;
}
I search and find some solution but not work, like that;
bool TSimTextFileStream::OpenFileForRead(mstring fname)
{
if (hFile != NULL) CloseFile();
hFile = fopen(fname.c_str(), "r,ccs=UNICODE");
if (hFile == NULL) return false; else return true;
}
and tried this;
hFile = fopen(fname.c_str(), "r,ccs=UTF-8");
But not work again. Can you help me please?
enter image description here
This situation is my problem :((
Windows does not support UTF-8 encoded path names for fopen:
The fopen function opens the file that is specified by filename. By
default, a narrow filename string is interpreted using the ANSI
codepage (CP_ACP).
Source.
Instead, a second function, called _wfopen is provided, which accepts a wide-character string as path argument.
Similar restrictions apply when using the C++ fstreams for File I/O.
So the only way for you to solve this is by converting your UTF-8 encoded pathname either to the system codepage or to a wide character string.
fopen usually reads unicode chars. try to change the files encoding
I cannot find a way to start cuFFT processing after a previous unsuccessful launch.
Here is a minimal example. The main idea is as follows: we create a simple cuFTT processor which can manage its resources ( device memory and cuFFT plans). We check that this processor does make FFT. Then we ask to create too many plans, thus we enforce cuFFT error. Then we release all resources and try to repeat the successful launch. However, the processor can do nothing after the failure.
Firstly, here is a rather long preamble:
#include <iostream>
using std::cout;
using std::cerr;
using std::endl;
#include <vector>
using std::vector;
#include "cuda_runtime.h"
#include "cufft.h"
// cuFFT API errors
static char* _cufftGetErrorEnum( cufftResult_t error )
{
switch ( error )
{
case CUFFT_SUCCESS:
return "CUFFT_SUCCESS";
case CUFFT_INVALID_PLAN:
return "cuFFT was passed an invalid plan handle";
case CUFFT_ALLOC_FAILED:
return "cuFFT failed to allocate GPU or CPU memory";
// No longer used
case CUFFT_INVALID_TYPE:
return "CUFFT_INVALID_TYPE";
case CUFFT_INVALID_VALUE:
return "User specified an invalid pointer or parameter";
case CUFFT_INTERNAL_ERROR:
return "Driver or internal cuFFT library error";
case CUFFT_EXEC_FAILED:
return "Failed to execute an FFT on the GPU";
case CUFFT_SETUP_FAILED:
return "The cuFFT library failed to initialize";
case CUFFT_INVALID_SIZE:
return "User specified an invalid transform size";
// No longer used
case CUFFT_UNALIGNED_DATA:
return "CUFFT_UNALIGNED_DATA";
case CUFFT_INCOMPLETE_PARAMETER_LIST:
return "Missing parameters in call";
case CUFFT_INVALID_DEVICE:
return "Execution of a plan was on different GPU than plan creation";
case CUFFT_PARSE_ERROR:
return "Internal plan database error";
case CUFFT_NO_WORKSPACE:
return "No workspace has been provided prior to plan execution";
case CUFFT_NOT_IMPLEMENTED:
return "CUFFT_NOT_IMPLEMENTED";
case CUFFT_LICENSE_ERROR:
return "CUFFT_LICENSE_ERROR";
}
return "<unknown>";
}
// check cuda runtime calls
bool cudaCheck( cudaError_t err )
{
if ( err != cudaSuccess )
{
cudaDeviceSynchronize();
cerr << cudaGetErrorString( cudaGetLastError() ) << endl;
return false;
}
return true;
}
// check cuFFT calls
bool cufftCheck( cufftResult_t err )
{
if ( err != CUFFT_SUCCESS )
{
cerr << _cufftGetErrorEnum( err ) << endl;
return false;
}
return true;
}
Next, we define a simple cuFFT processor which can manage its resources (device memory and cuFFT plans)
class CCuFFT_Processor
{
vector<cufftHandle> _plans;
cufftComplex *_data;
size_t _data_bytes;
// Release resouces
bool ReleaseAll();
bool ReleaseMemory();
bool ReleasePlans();
public:
CCuFFT_Processor() :
_data( NULL ),
_data_bytes( 0 )
{
_plans.reserve( 32 );
_plans.clear();
}
~CCuFFT_Processor()
{
ReleaseAll();
}
bool Run();
bool Alloc( size_t data_len, size_t batch_len );
};
Here is how we are going to release resources:
bool CCuFFT_Processor::ReleaseMemory()
{
bool chk = true;
if ( _data != NULL )
{
chk = cudaCheck( cudaFree( _data ) );
_data = NULL;
_data_bytes = 0;
}
return chk;
}
bool CCuFFT_Processor::ReleasePlans()
{
bool chk = true;
for ( auto & p : _plans )
chk = chk && cufftCheck( cufftDestroy( p ) );
_plans.clear();
return chk;
}
bool CCuFFT_Processor::ReleaseAll()
{
bool chk = true;
chk = chk && cudaCheck( cudaDeviceSynchronize() );
chk = chk && ReleaseMemory();
chk = chk && ReleasePlans();
chk = chk && cudaCheck( cudaDeviceReset() );
return chk;
}
Here is the implementation of the main functionality:
bool CCuFFT_Processor::Alloc( size_t data_len, size_t batch_len )
{
bool chk = true;
size_t bytes = sizeof( cufftComplex ) * data_len * batch_len;
// CUDA resources
if ( _data_bytes < bytes )
chk = chk && ReleaseMemory();
if ( _data == NULL )
{
chk = chk && cudaCheck( cudaMalloc( (void **)&_data, bytes ) );
_data_bytes = bytes;
}
// cuFFT resources
chk = chk && ReleasePlans();
for ( size_t b = 1; chk && ( b <= batch_len ); b *= 2 )
{
cufftHandle new_plan;
chk = cufftCheck(
cufftPlan1d( &new_plan, int(data_len), CUFFT_C2C, int(b) ) );
if ( chk )
_plans.push_back( new_plan );
}
if ( !chk )
ReleaseAll();
return chk;
}
bool CCuFFT_Processor::Run()
{
bool chk = true;
chk = cufftCheck(
cufftExecC2C( *_plans.rbegin(), _data, _data, CUFFT_FORWARD ) );
if ( !chk )
ReleaseAll();
chk = chk && cudaCheck( cudaDeviceSynchronize() );
return chk;
}
Finally, the program
int main()
{
size_t batch = 1 << 5;
size_t length = 1 << 21;
CCuFFT_Processor proc;
// Normal run
if ( proc.Alloc( length, batch ) )
proc.Run();
// Run with error
length *= 4;
if ( proc.Alloc( length, batch ) )
proc.Run();
// Normal run : check recovery
length /= 4;
if ( proc.Alloc( length, batch ) )
proc.Run();
return EXIT_SUCCESS;
}
If I use a small length = 1 << 18, then no errors occur. However, for the large length = 1 << 21 two errors appear:
cuFFT failed to allocate GPU or CPU memory
Failed to execute an FFT on the GPU
The first error is an expected one, we have done this intentionally. But the second one is not. Although the device was reset and new resources were successfully allocated, cuFFT failed to execute an FFT.
I use GTX 970. I tried all combinations of: cuda 6.5, cuda 7.5, 32-bit platform, 64-bit platform etc, but unsuccessfully.
This was apparently a problem limited to the out-of-memory error recovery behaviour of older versions of cuFFT, and was rectified during the CUDA 8 release cycle. If (6 years later) you are still using a pre-CUDA 8 version of cuFFT, please update to something more modern and this issue will be resolved.
[answer assembled from comments and added as a community wiki entry to get the question off the unanswered list for the CUDA and cuFFT tags]
I compiled a Linux program on Windows via Mingw but the output is wrong.
Error description:
The output of the program looks different on Windows than on Linux. This is how it looks on Windows:
>tig_2
CAATCTTCAGAGTCCAGAGTGGGAGGCACAGACTACAGAAAATGAGCAGCGGGGCTGGTA
>cluster_1001_conTTGGTGAAGAGAATTTGGACATGGATGAAGGCTTGGGCTTGACCATGCGAAGG
Expected output:
>cluster_1001_contig2
CAATCTTCAGAGTCCAGAGTGGGAGGCACAGACTACAGAAAATGAGCAGCGGGGCTGGTA
>cluster_1001_contig1
TTGGTGAAGAGAATTTGGACATGGATGAAGGCTTGGGCTTGACCATGCGAAGG
(Note: the output is very large to paste it here so the examples above are pseudo-real).
Possible cause:
I have observed that if I convert the enter characters the input file from Linux (LF) to Windows (CRLF) it almost works: the first character (>) in file is missing. The same code works perfectly on Linux without any enter conversion. So, the problem must be in the function that is parsing the input not in the one that writes the output:
seq_db.Read( db_in.c_str(), options );
Source code:
This is the piece that is parsing the input file. Anyway, I might me wrong. The fault might be in other place. In case it is needed, the FULL source code is here :)
void SequenceDB::Read( const char *file, const Options & options )
{
Sequence one;
Sequence dummy;
Sequence des;
Sequence *last = NULL;
FILE *swap = NULL;
FILE *fin = fopen( file, "r" );
char *buffer = NULL;
char *res = NULL;
size_t swap_size = 0;
int option_l = options.min_length;
if( fin == NULL ) bomb_error( "Failed to open the database file" );
if( options.store_disk ) swap = OpenTempFile( temp_dir );
Clear();
dummy.swap = swap;
buffer = new char[ MAX_LINE_SIZE+1 ];
while (not feof( fin ) || one.size) { /* do not break when the last sequence is not handled */
buffer[0] = '>';
if ( (res=fgets( buffer, MAX_LINE_SIZE, fin )) == NULL && one.size == 0) break;
if( buffer[0] == '+' ){
int len = strlen( buffer );
int len2 = len;
while( len2 && buffer[len2-1] != '\n' ){
if ( (res=fgets( buffer, MAX_LINE_SIZE, fin )) == NULL ) break;
len2 = strlen( buffer );
len += len2;
}
one.des_length2 = len;
dummy.des_length2 = len;
fseek( fin, one.size, SEEK_CUR );
}else if (buffer[0] == '>' || buffer[0] == '#' || (res==NULL && one.size)) {
if ( one.size ) { // write previous record
one.dat_length = dummy.dat_length = one.size;
if( one.identifier == NULL || one.Format() ){
printf( "Warning: from file \"%s\",\n", file );
printf( "Discarding invalid sequence or sequence without identifier and description!\n\n" );
if( one.identifier ) printf( "%s\n", one.identifier );
printf( "%s\n", one.data );
one.size = 0;
}
one.index = dummy.index = sequences.size();
if( one.size > option_l ) {
if ( swap ) {
swap_size += one.size;
// so that size of file < MAX_BIN_SWAP about 2GB
if ( swap_size >= MAX_BIN_SWAP) {
dummy.swap = swap = OpenTempFile( temp_dir );
swap_size = one.size;
}
dummy.size = one.size;
dummy.offset = ftell( swap );
dummy.des_length = one.des_length;
sequences.Append( new Sequence( dummy ) );
one.ConvertBases();
fwrite( one.data, 1, one.size, swap );
}else{
//printf( "==================\n" );
sequences.Append( new Sequence( one ) );
//printf( "------------------\n" );
//if( sequences.size() > 10 ) break;
}
//if( sequences.size() >= 10000 ) break;
}
}
one.size = 0;
one.des_length2 = 0;
int len = strlen( buffer );
int len2 = len;
des.size = 0;
des += buffer;
while( len2 && buffer[len2-1] != '\n' ){
if ( (res=fgets( buffer, MAX_LINE_SIZE, fin )) == NULL ) break;
des += buffer;
len2 = strlen( buffer );
len += len2;
}
size_t offset = ftell( fin );
one.des_begin = dummy.des_begin = offset - len;
one.des_length = dummy.des_length = len;
int i = 0;
if( des.data[i] == '>' || des.data[i] == '#' || des.data[i] == '+' ) i += 1;
if( des.data[i] == ' ' or des.data[i] == '\t' ) i += 1;
if( options.des_len and options.des_len < des.size ) des.size = options.des_len;
while( i < des.size and ( des.data[i] != '\n') ) i += 1;
des.data[i] = 0;
one.identifier = dummy.identifier = des.data;
} else {
one += buffer;
}
}
#if 0
int i, n = 0;
for(i=0; i<sequences.size(); i++) n += sequences[i].bufsize + 4;
cout<<n<<"\t"<<sequences.capacity() * sizeof(Sequence)<<endl;
int i;
scanf( "%i", & i );
#endif
one.identifier = dummy.identifier = NULL;
delete[] buffer;
fclose( fin );
}
The format of the input file is like this:
> comment
ACGTACGTACGTACGTACGTACGTACGTACGT
> comment
ACGTACGTACGTACGTACGTACGTACGTACGT
> comment
ACGTACGTACGTACGTACGTACGTACGTACGT
etc
The issue is more than likely you need to open the file using the "rb" switch in the call to fopen. The "rb" opens the file in binary mode, as opposed to "r", which opens a file in "text" mode.
Since you're going back and forth between Linux and Windows, the end-of-line characters will be different. If you open the file as "text" in Windows, but the file was formatted for Linux, you're lying to Windows that it is a text file. So the runtime will do CR/LF conversion all wrong.
Therefore you should open the file as binary, "rb" so that the CR/LF translation isn't done.
i have this code:
BOOLEAN Recurse = FALSE;
DWORD NumPasses = 1;
int _tmain( int argc, TCHAR *argv[] )
{
BOOL foundFileArg = FALSE;
int i;
if( argc < 2 ) {
return Usage( argv[0] );
}
for( i = 1; i < argc; i++ ) {
if( !_tcsicmp( argv[i], TEXT("/s") ) ||
!_tcsicmp( argv[i], TEXT("-s") )) {
Recurse = TRUE;
} else if( !_tcsicmp( argv[i], TEXT("/p") ) ||
!_tcsicmp( argv[i], TEXT("-p") )) {
// assertion failure
NumPasses = argc > i ? _ttoi( argv[i+1] ) : 1;
if( !NumPasses ) return Usage( argv[0] );
i++;
} else {
if( foundFileArg ) return Usage( argv[0] );
foundFileArg = TRUE;
}
}
return 0;
}
i get assertion failure,
Please suggest where the problem might be and where to look. Is it some problem with _ttoi function i'm using when it fails,
if i have to allocate a buffer,
how can i resolve it
thanks
this line
NumPasses = argc > i ? _ttoi( argv[i+1] ) : 1;
should be
NumPasses = argc > 1+i ? _ttoi( argv[i+1] ) : 1;
Nick is right; don't forget that arrays start at zero in C/C++. If there are 5 elements, it means argv[0] to argv[4] are valid - not argv[5].
I'm looking for a portable way to a) convert a string to a 64-bit signed integer (int64_t), and b) determine if it won't fit (overflows). Is there any way to do this?
strtoll is pretty portable anymore. And if not in your case, you could always crib the GNU C runtime library and add that to your project...
errno = 0;
long long val = strtoll (string, NULL, 0);
if (errno == ERANGE)
// there was an overflow conversion error
Run through the characters of the string one at a time and make your integer. if the character you're parsing will cause an overflow, then you know you're about to overflow. this code is the basic idea- doesn't handle errors or negative numbers, but should give you the idea...
bool ConvertToInt( const char* inString, int64_t& outInt )
{
int64_t kInt64Max = 0x7fffffffffffffff;
const char* c = inString;
outInt = 0;
while( *c != '\0' )
{
int charValue = *c - '0';
//outInt will be assigned outInt * 10 + charValue, so to check if that will overflow
//use algebra and move stuff around so that you can do the math without overflowing
if( outInt > ( kInt64Max - charValue ) / 10 )
{
//overflow
return false;
}
outInt = outInt * 10 + charValue;
++c;
}
return true;
}
if you want full credit on your homework, make sure to handle negative numbers and non-numeric characters. [ Edited to increment c ptr- thanks for the tip :) )
So a 'long long'? An signed int64_ can hold from –9,223,372,036,854,775,808 to 9,223,372,036,854,775,807, and you can just see that from the string. For example, with std::string:
int stringLength;
string myString("123456789");
stringLength = myString.length();
That code gets the length of your string. To determine whether it overflows just check the number of digits, and if there might be an overflow, check the first digit. To convert to int64_, use casting:
http://www.learncpp.com/cpp-tutorial/44-type-conversion-and-casting/
That link should answer your question. (However it's for C-style strings.) And one last clarification, is your string a std::string or not?
To cater for Visual C++ 10.0 (as I write this 11.0 is in beta), which apparently does not have strtoll or any equivalent,
#include <assert.h> // assert
#include <errno.h> // errno
#include <stdint.h> // int64_t
#include <string> // std::string
#include <stdexcept> // std::runtime_error, std::range_error
#include <stdlib.h> // EXIT_FAILURE, EXIT_SUCCESS, strtoll
#include <iostream>
using namespace std;
#if defined( _MSC_VER )
# if _MSC_VER <= 1600
# include <ole2.h>
inline long long strtoll( char const *str, char **str_end, int base )
{
assert(( "Only base 10 for Visual C++ 10 and earlier", base == 10 ));
std::wstring const ws( str, str + strlen( str ) );
LONG64 result;
HRESULT const hr = VarI8FromStr(
ws.c_str(), 0, LOCALE_NOUSEROVERRIDE, &result
);
switch( hr )
{
case S_OK:
if( str_end != 0 )
{
*str_end = const_cast<char*>( str + strlen( str ) );
}
return result;
case DISP_E_OVERFLOW:
errno = ERANGE;
if( str_end != 0 )
{
*str_end = const_cast<char*>( str );
}
return (*str == '-'? LLONG_MIN : LLONG_MAX);
default:
errno = EILSEQ;
if( str_end != 0 )
{
*str_end = const_cast<char*>( str );
}
return 0;
}
}
# endif
#endif
template< class Type >
bool hopefully( Type const& v ) { return !!v; }
bool throwX( string const& s ) { throw runtime_error( s ); }
bool throwRangeX( string const& s ) { throw range_error( s ); }
int64_t int64From( string const& s )
{
errno = 0;
int64_t const result = strtoll( s.c_str(), nullptr, 10 );
if( errno == ERANGE )
throwRangeX( "int64From: specificed nr too large" );
else if( errno != 0 )
throwX( "int64From: parsing failed" );
return result;
}
int main( int argc, char** argv )
{
try
{
int64_t const x = int64From( argv[argc - 1] );
wcout << x << endl;
return EXIT_SUCCESS;
}
catch( runtime_error const& x )
{
cerr << "!" << x.what() << endl;
}
return EXIT_FAILURE;
}
Then for Visual C++ 10 and earlier, link with [oleaut32.lib].
I tested this with MinGW g++ and Visual C++.
PS: Alternatively you can just an istringstream, but it does not reliably tell you why it failed when it fails – and it seems to be a requirement to detect overflow as such.
Based on a helpful response from Joshua Glazer, I came up with the following solution which does error checking and also works for negative integers:
#define __STDC_LIMIT_MACROS
#include <stdint.h>
// convert a string to an integer, return whether successful
bool string_to_int(string in, int64_t &out) {
size_t pos = 0;
if (in.size() == 0)
return false;
if (in[pos] == '+')
pos++;
out = 0;
if (in[pos] == '-') {
pos++;
while (pos < in.size()) {
if (in[pos] < '0' || in[pos] > '9')
return false;
int c = in[pos]-'0';
if (out < (INT64_MIN+c)/10)
return false;
out = out*10-c;
pos++;
}
} else {
while (pos < in.size()) {
if (in[pos] < '0' || in[pos] > '9')
return false;
int c = in[pos]-'0';
if (out > (INT64_MAX-c)/10)
return false;
out = out*10+c;
pos++;
}
}
return true;
}