In my project, there is a library that has code to load an fbx using the FBX SDK 2017.1 from Autodesk.
Loading the fbx crashes in debug & release. The crash occurs in 2 different ways and what seems to be at random:
the crash is either simply "Segmentation fault" (most of the time)
the crash is a dump of all the libraries that may be involved in the crash, and the allusion of a problem with a realloc() call. (every once in a while) From the context of the message, I haven't been able to make out which realloc that may be (the message is followed by a dump of all the libs that are linked).
The code does contain realloc() calls, specifically in the allocation of buffers used in a custom implementation of an FbxStream
Most of the code path is entirely identical for windows, only a number of platform specific sections have been re-implemented. On windows, it runs as expected.
What strikes me is that if I run the program in either gdb or valgrind, the crash disappears! So I set out to find uninitialized members/values, but so far I could not find anything suspicious. I used CppDepend/CppCheck and VS2012 code analysis, but both came up empty on un-initialized variables/members
To give some background on FBX loading; the FBX SDK has a number of ways to deal with different types of resources (obj, 3ds, fbx,..). They can be loaded from file or from stream. To support large files, the stream option is the more relevant option. The code below is far from perfect, but what interests me mostly at present is the reason why valgrind/gdb would not crash. I've left the SDK documentation on top of ReadString, since it's the most complex one.
class MyFbxStream : public FbxStream{
uint32 m_FormatID;
uint32 m_Error;
EState m_State;
size_t m_Pos;
size_t m_Size;
const Engine::Buffer* const m_Buffer;
MyFbxStream& operator = (const MyFbxStream& other) const;
public:
MyFbxStream(const Engine::Buffer* const buffer)
: m_FormatID(0)
, m_Error(0)
, m_State(eClosed)
, m_Pos(0)
, m_Size(0)
, m_Buffer(buffer) {};
virtual ~MyFbxStream() {};
virtual bool Open(void* pStreamData) {
m_FormatID = *(uint32*)pStreamData;
m_Pos = 0;
m_State = eOpen;
m_Size = m_Buffer->GetSize();
return true;
}
virtual bool Close() {
m_Pos = m_Size = 0;
m_State = eClosed;
return true;
}
virtual int Read(void* pData, int pSize) const {
const unsigned char* data = (m_Buffer->GetBase(m_Pos));
const size_t bytesRead = m_Pos + pSize > m_Buffer->GetSize() ? (m_Buffer->GetSize() - m_Pos) : pSize;
const_cast<MyFbxStream*>(this)->m_Pos += bytesRead;
memcpy(pData, data, bytesRead);
return (int)bytesRead;
}
/** Read a string from the stream.
* The default implementation is written in terms of Read() but does not cope with DOS line endings.
* Subclasses may need to override this if DOS line endings are to be supported.
* \param pBuffer Pointer to the memory block where the read bytes are stored.
* \param pMaxSize Maximum number of bytes to be read from the stream.
* \param pStopAtFirstWhiteSpace Stop reading when any whitespace is encountered. Otherwise read to end of line (like fgets()).
* \return pBuffer, if successful, else NULL.
* \remark The default implementation terminates the \e pBuffer with a null character and assumes there is enough room for it.
* For example, a call with \e pMaxSize = 1 will fill \e pBuffer with the null character only. */
virtual char* ReadString(char* pBuffer, int pMaxSize, bool pStopAtFirstWhiteSpace = false) {
assert(!pStopAtFirstWhiteSpace); // "Not supported"
const size_t pSize = pMaxSize - 1;
if (pSize) {
const char* const base = (const char* const)m_Buffer->GetBase();
char* cBuffer = pBuffer;
const size_t totalSize = std::min(m_Buffer->GetSize(), (m_Pos + pSize));
const char* const maxSize = base + totalSize;
const char* sum = base + m_Pos;
bool done = false;
// first align the copy on alignment boundary (4byte)
while ((((size_t)sum & 0x3) != 0) && (sum < maxSize)) {
const unsigned char c = *sum++;
*cBuffer++ = c;
if ((c == '\n') || (c == '\r')) {
done = true;
break;
} }
// copy from alignment boundary to boundary (4byte)
if (!done) {
int64 newBytesRead = 0;
uint32* dBuffer = (uint32*)cBuffer;
const uint32* dBase = (uint32*)sum;
const uint32* const dmaxSize = ((uint32*)maxSize) - 1;
while (dBase < dmaxSize) {
const uint32 data = *(const uint32*const)dBase++;
*dBuffer++ = data;
if (((data & 0xff) == 0x0a) || ((data & 0xff) == 0x0d)) { // third bytes, 4 bytes read..
newBytesRead -= 3;
done = true;
break;
} else {
const uint32 shiftedData8 = data & 0xff00;
if ((shiftedData8 == 0x0a00) || (shiftedData8 == 0x0d00)) { // third bytes, 3 bytes read..
newBytesRead -= 2;
done = true;
break;
} else {
const uint32 shiftedData16 = data & 0xff0000;
if ((shiftedData16 == 0x0a0000) || (shiftedData16 == 0x0d0000)) { // second byte, 2 bytes read..
newBytesRead -= 1;
done = true;
break;
} else {
const uint32 shiftedData24 = data & 0xff000000;
if ((shiftedData24 == 0x0a000000) || (shiftedData24 == 0x0d000000)) { // first byte, 1 bytes read..
done = true;
break;
} } } } }
newBytesRead += (int64)dBuffer - (int64)cBuffer;
if (newBytesRead) {
sum += newBytesRead;
cBuffer += newBytesRead;
} }
// copy anything beyond the last alignment boundary (4byte)
if (!done) {
while (sum < maxSize) {
const unsigned char c = *sum++;
*cBuffer++ = c;
if ((c == '\n') || (c == '\r')) {
done = true;
break;
} } }
const size_t bytesRead = cBuffer - pBuffer;
if (bytesRead) {
const_cast<MyFbxStream*>(this)->m_Pos += bytesRead;
pBuffer[bytesRead] = 0;
return pBuffer;
} }
pBuffer = NULL;
return NULL;
}
virtual void Seek(const FbxInt64& pOffset, const FbxFile::ESeekPos& pSeekPos) {
switch (pSeekPos) {
case FbxFile::ESeekPos::eBegin: m_Pos = pOffset; break;
case FbxFile::ESeekPos::eCurrent: m_Pos += pOffset; break;
case FbxFile::ESeekPos::eEnd: m_Pos = m_Size - pOffset; break;
}
}
virtual long GetPosition() const { return (long)m_Pos; }
virtual void SetPosition(long position) { m_Pos = position; }
virtual void ClearError() { m_Error = 0; }
virtual int GetError() const { return m_Error; }
virtual EState GetState() { return m_State; }
virtual int GetReaderID() const { return m_FormatID; }
virtual int GetWriterID() const { return -1; } // readonly stream
virtual bool Flush() { return true; } // readonly stream
virtual int Write(const void* /*d*/, int /*s*/) { assert(false); return 0; } // readonly stream
};
I assume that there may be undefined behavior related to malloc/free/realloc operations that somehow do not occur in gdb. But if this is the case, I also expect the Windows binaries to have problems.
Also, I don't know if this is relevant, but the when I trace into the Open() function and print the "m_Buffer" pointer's value (or the "this"), I get a pointer value starting with 0xfffffff.. which for a Windows programmer looks like a problem. However, can I pull the same conclusion in linux, since I also saw this happening in static function calls etc.
if I run the program in either gdb or valgrind, the crash disappears!
There are two possible explanations:
There are multiple threads, the code exhibits a data race, and both GDB and Valgrind significantly affect execution timing.
GDB disables address randomization; Valgrind significantly affects program layout, and the crash is sensitive to the exact layout.
The steps I would take:
Set ulimit -c unlimited, run the program and get it to dump core, then use post-mortem analysis in GDB.
Run the program under GDB, use set disable-randomization off and see if you can get to crash point that way.
Run the program with Helgrind or DRD, Valgrind's thread error detectors.
Related
I've tried to write an automatic indenter, however - it's skipping characters when it added new characters to the stream. I've tried debugging it and verified that from_next and to_next as well as from and to are working correctly.
Surely I've missed something in the specs but here is my code, maybe you an help me:
virtual result_t do_out(state_type& state, const intern_type* from, const intern_type* from_end, const intern_type*& from_next,
extern_type* to, extern_type* to_end, extern_type*& to_next) const override
{
auto result = std::codecvt_base::noconv;
while (from < from_end && to < to_end)
{
if (getState(state).missingWhitespaces > 0u && *from != '\n')
{
while (getState(state).missingWhitespaces > 0u && to < to_end)
{
*to = ' ';
to++;
getState(state).missingWhitespaces--;
}
if (to < to_end)
{
result = std::codecvt_base::partial;
}
else
{
result = std::codecvt_base::partial;
break;
}
}
else
{
*to = *from;
if (*from == '\n')
{
getState(state).missingWhitespaces = tabSize * indentLevel;
}
to++;
from++;
}
}
from_next = from;
to_next = to;
return result;
};
The state object is also working properly. The problem only occurs in between function calls.
Edit: Changing the result after if (to < to_end) to std::codecvt_base::ok doesn't solve the problem either.
After some more digging I found the solution to my problem. I got a detailed explanation of std::codecvt from this website: http://stdcxx.apache.org/doc/stdlibref/codecvt.html
It turned out, that I forgot to override these two methods:
virtual int do_length(state_type& state, const extern_type *from, const extern_type *end, size_t max) const;
Determines and returns n, where n is the number of elements of extern_type in the source range [from,end) that can be converted to max or fewer characters of intern_type, as if by a call to in(state, from, from_end, from_next, to, to_end, to_next) where to_end == to + max.
Sets the value of state to correspond to the shift state of the
sequence starting at from + n.
Function do_length must be called under the following preconditions:
state is either initialized to the beginning of a sequence or equal to
the result of the previous conversion on the sequence.
from <= end is well-defined and true.
Note that this function does not behave similarly to the C Standard
Library function mbsrtowcs(). See the mbsrtowcs.cpp example program
for an implementation of this function using the codecvt facet.
virtual int do_max_length() const throw();
Returns the maximum value that do_length() can return for any valid combination of its first three arguments, with the fourth argument max set to 1.
I implemented them this way and it worked:
virtual int do_length(state_type& state, const extern_type* from, const extern_type* end, size_t max) const override
{
auto numberOfCharsAbleToCopy = max;
numberOfCharsAbleToCopy -= std::min(static_cast<unsigned int>(numberOfCharsAbleToCopy), getState(state).missingWhitespaces);
bool newLineToAppend = false;
for (auto c = from + getState(state).missingWhitespaces; c < end && numberOfCharsAbleToCopy > 0u; c++)
{
if (*c == '\n' && !newLineToAppend)
{
newLineToAppend = true;
}
else if (*c != '\n' && newLineToAppend)
{
numberOfCharsAbleToCopy -= std::min(tabSize * indentLevel, numberOfCharsAbleToCopy);
if (numberOfCharsAbleToCopy == 0u)
{
break;
}
newLineToAppend = false;
}
}
return numberOfCharsAbleToCopy;
}
virtual int do_max_length() const throw() override
{
return tabSize * indentLevel;
}
Function called by object output of same class as object input seems to cause unexpected behaviour and overwrite object input's mallocated private member's data (pointer adresses stays same)
For object output both *fileStr and *p_file are NULL and for input both points at data
Both CASE1/CASE2 or combination of each #ifdef will cause input.fileStr data to be changed
input.fileStr data itself is malloc'ated by class1::open if called (called only by input) - else it's a NULL pointer by default
Header
class class1
{
private:
FILE *p_file = NULL;
char *fileStr = NULL;
bool encrypt_step1();
public:
bool open(char *pathto);
bool create(char *pathto);
bool encrypt();
~class1();
};
bool sub_function(char *pathIN);
Source Code
bool class1::open(char *pathto)
{
if (PathFileExistsA(pathto))
this->p_file = fopen(pathto, "rb+");
else
return 0;
if (!(this->p_file))
{
printf("Can't open\n");
return 0;
}
fseek(p_file, 0, SEEK_END);
long filesize = ftell(p_file);
fseek(p_file, 0, SEEK_SET);
this->fileStr = (char*)malloc(filesize+1);
this->fileStr[(fread(this->fileStr, 1, filesize, this->p_file))] = '\0';
return 1;
}
bool class1::create(char *pathto)
{
#ifdef CASE1
if (PathFileExistsA(pathto))
{
char pathtobak[MAX_PATH];
strcpy(pathtobak, pathto);
strcat(pathtobak, ".bak");
int i = 0;
char a[11];
if (PathFileExistsA(pathtobak))
{
while (1)
{
i++;
*a = '\0';
itoa(i, a, 10);
char *reset = pathtobak + strlen(pathtobak);
strcat(pathtobak, a);
if (!PathFileExistsA(pathtobak))
break;
*reset = '\0';
}
}
std::experimental::filesystem::copy_file(pathto, pathtobak);
}
#endif
#ifdef CASE2
this->p_file = fopen(pathto, "wb");
#endif
#ifndef NOERRORS
if (this->p_file == NULL)
return 0;
else
return 1;
#endif
}
class1::~class1()
{
if (this->fileStr)
{
free(this->fileStr);
this->fileStr = NULL;
}
if (this->p_file)
{
fclose(this->p_file);
this->p_file = NULL;
}
}
bool sub_function(char *pathIN)
{
class1 input;
input.open(pathIN);
input.encrypt();//omitted since this should be irrelevant
char pathOUT[MAX_PATH];
strcpy(pathOUT, pathIN);
char *OUText = pathOUT;
OUText += (strlen(pathOUT)-3);
*OUText = '\0';
strcat(pathOUT, "ext");
class1 output;
output.create(pathOUT);//bug here
char *next = input.get_fileStr();
...
}
It seems like memory access violation, but even CASE1 that's simple look up for duplicate files with only use of local variable still causes unexpected behaviour, so I have issues pinpointing the cause
Memory seems to be already released
Most plausible cause would be memory being marked as free, but I don't deallocate it outside of destructor, but when running program further once destructor of input has been called free(input.fileStr) will crash with is_block_type_valid(header->_block_use) exception
Actual issue with working example
Source Code starting from sub_function::input.encrypt()
bool class1::encrypt_step1()
{
bool *strBOOL_11_00 = (bool*)malloc(((strlen(this->fileStr)) + 1) * ((sizeof(bool)) * 8));
bool *strBOOL_10_01 = (bool*)malloc(((strlen(this->fileStr)) + 1) * ((sizeof(bool)) * 8));
bool *strBOOL_10_11 = (bool*)malloc(((strlen(this->fileStr)) + 1) * ((sizeof(bool)) * 8));
char *fileStrIt = this->fileStr;
char *fileStrIt2 = ((this->fileStr) + 1);
bool *next1100 = strBOOL_11_00;
bool *next1001 = strBOOL_10_01;
bool *next1011 = strBOOL_10_11;
//translating to binary array iterating through pointers above one by one happens here
//char->bin/encrypt/bin->char
//ommited
//reallocation to fit new encrypted and translated back to char that caused issues
#ifdef ISSUE
char *fileStr_temp = (char *)realloc(this->fileStr, ((next1011 - strBOOL_10_11) + (next1001 - strBOOL_10_01) + (next1100 - strBOOL_11_00) + 1));
if (!fileStr_temp)
return 0;
//original fileStr points at freed memory
#endif
#ifdef CORRECT
char *fileStr_temp = (char *)realloc(this->fileStr, ((next1011 - strBOOL_10_11) + (next1001 - strBOOL_10_01) + (next1100 - strBOOL_11_00) + 1));
if (!fileStr_temp)
return 0;
else
this->fileStr = fileStr_temp;//original fileStr points at new adress with reallocated data
#endif
free(strBOOL_11_00);
strBOOL_11_00 = NULL;
free(strBOOL_10_01);
strBOOL_10_01 = NULL;
free(strBOOL_10_11);
strBOOL_10_11 = NULL;
return 1;
}
bool class1::encrypt()
{
encrypt_step1();
...//other steps (irrelevant)
return 1;
}
How would I go about returning a string built from a buffer within a function without dynamically allocating memory?
Currently I have this function to consider:
// Reads null-terminated string from buffer in instance of buffer class.
// uint16 :: unsigned short
// ubyte :: unsigned char
ubyte* Readstr( void ) {
ubyte* Result = new ubyte[]();
for( uint16 i = 0; i < ByteSize; i ++ ) {
Result[ i ] = Buffer[ ByteIndex ];
ByteIndex ++;
if ( Buffer[ ByteIndex - 1 ] == ubyte( 0 ) ) {
ByteIndex ++;
break;
};
};
return Result;
};
While I can return the built string, I can't do this without dynamic allocation. This becomes a problem if you consider the following usage:
// Instance of buffer class "Buffer" calling Readstr():
cout << Buffer.Readstr() << endl;
// or...
ubyte String[] = Buffer.String();
Usages similar to this call result in the same memory leak as the data is not being deleted via delete. I don't think there is a way around this, but I am not entirely sure if it's possible.
Personally, I'd recommend just return std::string or std::vector<T>: this neatly avoids memory leaks and the string won't allocate memory for small strings (well, most implementations are going that way but not all are quite there).
The alternative is to create a class which can hold a big enough array and return an object that type:
struct buffer {
enum { maxsize = 16 };
ubyte buffer[maxsize];
};
If you want get more fancy and support bigger strings which would then just allocate memory you'll need to deal a bit more with constructors, destructors, etc. (or just use std::vector<ubyte> and get over it).
There are at least three ways you could reimplement the method to avoid a direct allocation with new.
The Good:
Use a std::vector (This will allocate heap memory):
std::vector<ubyte> Readstr()
{
std::vector<ubyte> Result;
for (uint16 i = 0; i < ByteSize; i++)
{
Result.push_back(Buffer[ByteIndex]);
ByteIndex++;
if (Buffer[ByteIndex - 1] == ubyte(0))
{
ByteIndex++;
break;
}
}
return Result;
}
The Bad:
Force the caller to provide an output buffer and possibly a size do avoid overflows (Does not directly allocate memory):
ubyte* Readstr(ubyte* outputBuffer, size_t maxCount)
{
for (uint16 i = 0; i < ByteSize; i++)
{
if (i == maxCount)
break;
outputBuffer[i] = Buffer[ByteIndex];
ByteIndex++;
if (Buffer[ByteIndex - 1] == ubyte(0))
{
ByteIndex++;
break;
}
}
return outputBuffer;
}
The Ugly:
Use an internal static array and return a reference to it:
ubyte* Readstr()
{
enum { MAX_SIZE = 2048 }; // Up to you to decide the max size...
static ubyte outputBuffer[MAX_SIZE];
for (uint16 i = 0; i < ByteSize; i++)
{
if (i == MAX_SIZE)
break;
outputBuffer[i] = Buffer[ByteIndex];
ByteIndex++;
if (Buffer[ByteIndex - 1] == ubyte(0))
{
ByteIndex++;
break;
}
}
return outputBuffer;
}
Be aware that this last option has several limitations, including possibility of data races in multithreaded application and inability to call it inside a recursive function, among other subtle issues. But otherwise, is probably the closest to what you are looking for and can be used safely if you take some precautions and make some assumptions about the calling code.
This is a follow-up to Critique my heap debugger from yesterday. As suggested by bitc, I now keep metadata about the allocated blocks in a separate handwritten hashtable.
The heap debugger now detects the following kinds of errors:
memory leaks (now with more verbose debugging output)
illegal pointers passed to delete (that also takes care of double deletes)
wrong form of delete (array vs. non-array)
buffer overflows
buffer underflows
Feel free to discuss and thanks in advance!
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <new>
namespace
{
// I don't want to #include <algorithm> for a single function template :)
template <typename T>
void my_swap(T& x, T& y)
{
T z(x);
x = y;
y = z;
}
typedef unsigned char byte;
const byte CANARY[] = {0x5A, 0xFE, 0x6A, 0x8D,
0x5A, 0xFE, 0x6A, 0x8D,
0x5A, 0xFE, 0x6A, 0x8D,
0x5A, 0xFE, 0x6A, 0x8D};
bool canary_dead(const byte* cage)
{
bool dead = memcmp(cage, CANARY, sizeof CANARY);
if (dead)
{
for (size_t i = 0; i < sizeof CANARY; ++i)
{
byte b = cage[i];
printf(b == CANARY[i] ? "__ " : "%2X ", b);
}
putchar('\n');
}
return dead;
}
enum kind_of_memory {AVAILABLE, TOMBSTONE, NON_ARRAY_MEMORY, ARRAY_MEMORY};
const char* kind_string[] = {0, 0, "non-array memory", " array memory"};
struct metadata
{
byte* address;
size_t size;
kind_of_memory kind;
bool in_use() const
{
return kind & 2;
}
void print() const
{
printf("%s at %p (%d bytes)\n", kind_string[kind], address, size);
}
bool must_keep_searching_for(void* address)
{
return kind == TOMBSTONE || (in_use() && address != this->address);
}
bool canaries_alive() const
{
bool alive = true;
if (canary_dead(address - sizeof CANARY))
{
printf("ERROR: buffer underflow at %p\n", address);
alive = false;
}
if (canary_dead(address + size))
{
printf("ERROR: buffer overflow at %p\n", address);
alive = false;
}
return alive;
}
};
const size_t MINIMUM_CAPACITY = 11;
class hashtable
{
metadata* data;
size_t used;
size_t capacity;
size_t tombstones;
public:
size_t size() const
{
return used - tombstones;
}
void print() const
{
for (size_t i = 0; i < capacity; ++i)
{
if (data[i].in_use())
{
printf(":( leaked ");
data[i].print();
}
}
}
hashtable()
{
used = 0;
capacity = MINIMUM_CAPACITY;
data = static_cast<metadata*>(calloc(capacity, sizeof(metadata)));
tombstones = 0;
}
~hashtable()
{
free(data);
}
hashtable(const hashtable& that)
{
used = 0;
capacity = 3 * that.size() | 1;
if (capacity < MINIMUM_CAPACITY) capacity = MINIMUM_CAPACITY;
data = static_cast<metadata*>(calloc(capacity, sizeof(metadata)));
tombstones = 0;
for (size_t i = 0; i < that.capacity; ++i)
{
if (that.data[i].in_use())
{
insert_unsafe(that.data[i]);
}
}
}
hashtable& operator=(hashtable copy)
{
swap(copy);
return *this;
}
void swap(hashtable& that)
{
my_swap(data, that.data);
my_swap(used, that.used);
my_swap(capacity, that.capacity);
my_swap(tombstones, that.tombstones);
}
void insert_unsafe(const metadata& x)
{
*find(x.address) = x;
++used;
}
void insert(const metadata& x)
{
if (2 * used >= capacity)
{
hashtable copy(*this);
swap(copy);
}
insert_unsafe(x);
}
metadata* find(void* address)
{
size_t index = reinterpret_cast<size_t>(address) % capacity;
while (data[index].must_keep_searching_for(address))
{
++index;
if (index == capacity) index = 0;
}
return &data[index];
}
void erase(metadata* it)
{
it->kind = TOMBSTONE;
++tombstones;
}
} the_hashset;
struct heap_debugger
{
heap_debugger()
{
puts("heap debugger started");
}
~heap_debugger()
{
the_hashset.print();
puts("heap debugger shutting down");
}
} the_heap_debugger;
void* allocate(size_t size, kind_of_memory kind) throw (std::bad_alloc)
{
byte* raw = static_cast<byte*>(malloc(size + 2 * sizeof CANARY));
if (raw == 0) throw std::bad_alloc();
memcpy(raw, CANARY, sizeof CANARY);
byte* payload = raw + sizeof CANARY;
memcpy(payload + size, CANARY, sizeof CANARY);
metadata md = {payload, size, kind};
the_hashset.insert(md);
printf("allocated ");
md.print();
return payload;
}
void release(void* payload, kind_of_memory kind) throw ()
{
if (payload == 0) return;
metadata* p = the_hashset.find(payload);
if (!p->in_use())
{
printf("ERROR: no dynamic memory at %p\n", payload);
}
else if (p->kind != kind)
{
printf("ERROR:wrong form of delete at %p\n", payload);
}
else if (p->canaries_alive())
{
printf("releasing ");
p->print();
free(static_cast<byte*>(payload) - sizeof CANARY);
the_hashset.erase(p);
}
}
}
void* operator new(size_t size) throw (std::bad_alloc)
{
return allocate(size, NON_ARRAY_MEMORY);
}
void* operator new[](size_t size) throw (std::bad_alloc)
{
return allocate(size, ARRAY_MEMORY);
}
void operator delete(void* payload) throw ()
{
release(payload, NON_ARRAY_MEMORY);
}
void operator delete[](void* payload) throw ()
{
release(payload, ARRAY_MEMORY);
}
int main()
{
int* p = new int[1];
delete p; // wrong form of delete
delete[] p; // ok
delete p; // no dynamic memory (double delete)
p = new int[1];
p[-1] = 0xcafebabe;
p[+1] = 0x12345678;
delete[] p; // underflow and overflow prevent release
// p is not released, hence leak
}
Very nice, indeed. Your canaries could actually reveal some real cases of overflow/underflow (though not all of them as Matthieu pointed out).
What more. You might run into some problems with a multi-threaded application. Perhaps protect the hashtable from concurrent access?
Now that you log every allocation and deallocation, you can (if you like) provide more information about the program being tested. It might be interesting to know the total and average number of allocations at any given time? The total, max, min and average bytes allocated, and the average lifespan of allocations.
If you want to compare different threads, at least with Pthreads you can identify them with pthread_self(). This heap debugger could become a quite useful analysis tool.
Are you using a very weak malloc that doesn't already have this sort of stuff built into it? Because if it's there, you are doubling the overhead for little gain. Also, this kind of system really hurts when doing small object allocation or is ineffective with them as people do 1 alloc and manage the memory themselves.
As far as the code is concerned, it looks like it will do what you say it will do and it looks well designed and is easy to read. But, if you are going to go through the trouble of doing this though, why not catch your buffer over/under flows at the source by using managed containers/pointers/operator[] thingies. That way, you can debug on the spot of the failure instead of finding out at free that something evil has occured.
There are efficiencies to be had that I'm sure others will find, but these are just some thoughts off the top of my head after looking over your code for a few minutes.
I wonder about the detection of underflows / overflows.
I mean, if I have a 10 elements arrays, then it seems you'll detect if I write at -1 and 10, but what if I write at 20 ? Underflow or Overflow are not necessarily done as part of a buffer overrun (contiguous).
Furthermore, what's the point of preventing release of the block ? This block is (relatively) fine, it's the neighbors you've (unfortunately) corrupted.
Anyway, it seems pretty fine to me, though I would probably have more than one return per function because there's no point in Single Exit. You seem more of a C programmer than a C++ one :)
I stumbled about a method which seems to be present in all data objects like QList, QQueue, QHash...
I even investigated so far I can see the source code of it, which is
inline void setSharable(bool sharable) {
if (!sharable) detach(); d->sharable = sharable;
}
in qlist.h (lines 117).
But what effect does it have on the QList, QQueue, QHash... ? And is it in any way related to threading (which sounds reasonable)?
Thanks for any answer, and please only answer if you got actual knowledge.
No one could say more clear:
http://qt.nokia.com/doc/4.6/implicit-sharing.html
It is common practice to realize containers this way.
The sharable state you're asking about has nothing to do with mutlithreading. It is instead an implementation detail of copy-on-write data classes (even single-threaded ones) that hand out references to internal state.
Consider a class String that is implemented using CoW (for illustration purposes, this class isn't usable in threaded contexts, because accesses to d->refcount aren't synchronised, it also doesn't ensure that the internal char arrary ends in '\0', and might as well eat your grandmother; you have been warned):
struct StringRep {
StringRep()
: capacity(0), size(0), refcount(0), sharable(true), data(0) {}
~StringRep() { delete[] data; }
size_t capacity, size, refcount;
bool sharable; // later...
char * data;
};
class String {
StringRep * d;
public:
String() : d(new StringRep) { ++d->refcount; }
~String() { if (--d->refcount <= 0) delete d; }
explicit String(const char * s)
: d(new StringRep)
{
++d->refcount;
d->size = d->capacity = strlen(s);
d->data = new char[d->size];
memcpy(d->data, s, d->size);
}
String(const String &other)
: d(other.d)
{
++d->refcount;
}
void swap(String &other) { std::swap(d, other.d); }
String &operator=(const String &other) {
String(other).swap(*this); // copy-swap trick
return *this;
}
And a sample function each for mutating and const methods:
void detach() {
if (d->refcount == 1)
return;
StringRep * newRep = new StringRep(*d);
++newRep->refcount;
newRep->data = new char[d->size];
memcpy(newRep->data, d->data, d->size);
--d->refcount;
d = newRep;
}
void resize(size_t newSize) {
if (newSize == d->size)
return;
detach(); // mutator methods need to detach
if (newSize < d->size) {
d->size = newSize;
} else if (newSize > d->size) {
char * newData = new char[newSize];
memcpy(newData, d->data, d->size);
delete[] d->data;
d->data = newData;
}
}
char operator[](size_t idx) const {
// no detach() here, we're in a const method
return d->data[idx];
}
};
So far so good. But what if we want to provide a mutable operator[]?
char & operator[](size_t idx) {
detach(); // make sure we're not changing all the copies
// in case the returned reference is written to
return d->data[idx];
}
This naïve implementation has a flaw. Consider the following scenario:
String s1("Hello World!");
char & W = s1[7]; // hold reference to the W
assert( W == 'W' );
const String s1(s2); // Shallow copy, but s1, s2 should now
// act independently
W = 'w'; // modify s1 _only_ (or so we think)
assert( W == 'w' ); // ok
assert( s1[7] == 'w' ); // ok
assert( s2[7] == 'W' ); // boom! s2[7] == 'w' instead!
To prevent this, String has to mark itself non-sharable when it hands out a reference to internal data, so that any copy that is taken from it is always deep. So, we need to adjust detach() and char & operator[] like this:
void detach() {
if (d->refcount == 1 && /*new*/ d->sharable)
return;
// rest as above
}
char & operator[](size_t idx) {
detach();
d->shareable = false; // new
return d->data[idx];
}
When to reset the shareable state back to true again? A common technique is to say that references to internal state are invalidated when calling a non-const method, so that's where shareable is reset back to true. Since every non-const function calls detach(), we can reset shareable there, so that detach() finally becomes:
void detach() {
if (d->refcount == 1 && d->sharable) {
d->sharable = true; // new
return;
}
d->sharable = true; // new
StringRep * newRep = new StringRep(*d);
++newRep->refcount;
newRep->data = new char[d->size+1];
memcpy(newRep->data, d->data, d->size+1);
--d->refcount;
d = newRep;
}