I want to compress/decompress a unsigned char buffer using fast-LZMA2 by 7Zip : https://github.com/conor42/fast-lzma2
In the sample there's two function :
static int compress_file(FL2_CStream *fcs)
{
unsigned char in_buffer[8 * 1024];
unsigned char out_buffer[4 * 1024];
FL2_inBuffer in_buf = { in_buffer, sizeof(in_buffer), sizeof(in_buffer) };
FL2_outBuffer out_buf = { out_buffer, sizeof(out_buffer), 0 };
size_t res = 0;
size_t in_size = 0;
size_t out_size = 0;
do {
if (in_buf.pos == in_buf.size) {
in_buf.size = fread(in_buffer, 1, sizeof(in_buffer), fin);
in_size += in_buf.size;
in_buf.pos = 0;
}
res = FL2_compressStream(fcs, &out_buf, &in_buf);
if (FL2_isError(res))
goto error_out;
fwrite(out_buf.dst, 1, out_buf.pos, fout);
out_size += out_buf.pos;
out_buf.pos = 0;
} while (in_buf.size == sizeof(in_buffer));
do {
res = FL2_endStream(fcs, &out_buf);
if (FL2_isError(res))
goto error_out;
fwrite(out_buf.dst, 1, out_buf.pos, fout);
out_size += out_buf.pos;
out_buf.pos = 0;
} while (res);
fprintf(stdout, "\t%ld -> %ld\n", in_size, out_size);
return 0;
error_out:
fprintf(stderr, "Error: %s\n", FL2_getErrorName(res));
return 1;
}
static int decompress_file(FL2_DStream *fds)
{
unsigned char in_buffer[4 * 1024];
unsigned char out_buffer[8 * 1024];
FL2_inBuffer in_buf = { in_buffer, sizeof(in_buffer), sizeof(in_buffer) };
FL2_outBuffer out_buf = { out_buffer, sizeof(out_buffer), 0 };
size_t res;
size_t in_size = 0;
size_t out_size = 0;
do {
if (in_buf.pos == in_buf.size) {
in_buf.size = fread(in_buffer, 1, sizeof(in_buffer), fout);
in_size += in_buf.size;
in_buf.pos = 0;
}
res = FL2_decompressStream(fds, &out_buf, &in_buf);
if (FL2_isError(res))
goto error_out;
/* Discard the output. XXhash will verify the integrity. */
out_size += out_buf.pos;
out_buf.pos = 0;
} while (res && in_buf.size);
fprintf(stdout, "\t%ld -> %ld\n", in_size, out_size);
return 0;
error_out:
fprintf(stderr, "Error: %s\n", FL2_getErrorName(res));
return 1;
}
But I have no idea how to make it work with a buffer and also without size limit like 8*1024
like zlib deflate compression.
I want something like
LZMA2_Compress(void* buffer,size_t bufferSize);
and LZMA2_Decompress(void* buffer,size_t bufferSize);
I want to use this algorithm on some heavy files and Fast LZMA2 is the fastest high ratio compression I found, Please don't suggest me using other methods.
Here's my test code, It's working but just need to correct information:
https://gist.github.com/Bit00009/3241bb66301f8aaba16074537d094e61
Check the header file for all of the functions available. This one looks like the one you need. You will need to cast your buffers as (void *).
High level functions
fast-lzma2.h
...
/*! FL2_compress() :
* Compresses `src` content as a single LZMA2 compressed stream into already allocated `dst`.
* Call FL2_compressMt() to use > 1 thread. Specify nbThreads = 0 to use all cores.
* #return : compressed size written into `dst` (<= `dstCapacity),
* or an error code if it fails (which can be tested using FL2_isError()). */
FL2LIB_API size_t FL2LIB_CALL FL2_compress(void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
int compressionLevel);
...
Management of memory and options
To do explicit memory management (set dictionary size, buffer size, etc.) you need to create a context:
fast-lzma2.h
/*= Compression context
* When compressing many times, it is recommended to allocate a context just once,
* and re-use it for each successive compression operation. This will make workload
* friendlier for system's memory. The context may not use the number of threads requested
* if the library is compiled for single-threaded compression or nbThreads > FL2_MAXTHREADS.
* Call FL2_getCCtxThreadCount to obtain the actual number allocated. */
typedef struct FL2_CCtx_s FL2_CCtx;
FL2LIB_API FL2_CCtx* FL2LIB_CALL FL2_createCCtx(void);
than you can use FL2_CCtx_setParameter() to set the parameters in the context. The possible values for the paramters are listed in FL2_cParameter , and the value FL2_p_dictionarySize will allow you to set the dictionary size.
/*! FL2_CCtx_setParameter() :
* Set one compression parameter, selected by enum FL2_cParameter.
* #result : informational value (typically, the one being set, possibly corrected),
* or an error code (which can be tested with FL2_isError()). */
FL2LIB_API size_t FL2LIB_CALL FL2_CCtx_setParameter(FL2_CCtx* cctx, FL2_cParameter param, size_t value);
Finally you can compress the buffer by calling FL2_compressCCtx()
/*! FL2_compressCCtx() :
* Same as FL2_compress(), but requires an allocated FL2_CCtx (see FL2_createCCtx()). */
FL2LIB_API size_t FL2LIB_CALL FL2_compressCCtx(FL2_CCtx* cctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
int compressionLevel);
Related
I have a raw video file , and i make in qt , an app that reads frame by frame from this file .At large raw files when i press an button that goes to the next frame there is a big delay almost one sec .
Here is my code that returns an frame from raw file :
void RawVideoReader::getFrame(int offset)
{
std::cout<<"getFrame"<<std::endl;
file.seek((unsigned long long int)(((unsigned long long int)width * (unsigned long long int)height) * (unsigned long long int)offset));
QByteArray array = file.read(width * height);
const std::size_t count = array.size();
hex = std::unique_ptr<unsigned char>(new unsigned char[count]);
std::memcpy(hex.get(), array.constData(), count);
}
You can read directly into the buffer you desire - the question is: why do you want to manage this memory buffer using unique_ptr? QByteArray already does that job. Furthermore, you probably want to keep the same buffer, and not reallocate it over and over.
class RawVideoReader : ... {
QByteArray frame;
uint8_t *frameData() const { return frame.size() ? static_cast<uint8_t*>(frame.constData()) : nullptr; }
size_t frameSize() const { return static_cast<size_t>(frame.size()); }
...
};
bool RawVideoReader::getFrame(int frameNo) {
qDebug() << __FUNCTION__;
frame.resize(width * height * 1);
file.seek(qint64(frame.size()) * qint64(frameNo));
auto const hadRead = file.read(frame.data(), frame.size());
return hadRead == frame.size();
}
I recently switched my project to using a linear memory allocator that I wrote myself (for learning). When I initialize the allocator, I pass it a pointer to a block of memory that was VirtualAlloc-ed beforehand. Before writing the allocator, I was using this block directly just fine.
In my test case, I am using the allocator to allocate memory for a Player* in that initial big block of memory. To make sure every was working, I tried accessing the block of memory directly as I had before to make sure the values were changing according to my expectations. That's when I hit a memory access error. Using the VS debugger/watch window, I have a reasonable idea of what is happening and when, but I am hoping to get some help with the question of why. I'll lay out the relevant pieces of code below.
Virtual Alloc call, later referred to by memory->transientStorage
win32_State.gameMemoryBlock = VirtualAlloc(baseAddress, (size_t)win32_State.totalSize,
MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
Allocator definition
struct LinearAllocator {
void* currentPos;
size_t totalSize;
void* startPos;
size_t usedMemory;
size_t numAllocations;
LinearAllocator();
LinearAllocator(size_t size, void* start);
LinearAllocator(LinearAllocator&) = delete;
~LinearAllocator();
void* allocate(size_t size, uint8 alignment);
void clear();
};
Player and Vec2f definitions
struct Player {
Vec2f pos;
bool32 isFiring;
real32 timeLastFiredMS;
};
union Vec2f {
struct {
real32 x, y;
};
real32 v[2];
};
Relevant Allocator Implementation Details
void* LinearAllocator::allocate(size_t size, uint8_t alignment) {
if (size == 0 || !isPowerOfTwo(alignment)) {
return nullptr;
}
uint8_t adjustment = alignForwardAdjustment(currentPos, alignment);
if (usedMemory + adjustment + size > totalSize) {
return nullptr;
}
uint8_t* alignedAddress = (uint8*)currentPos + adjustment;
currentPos = (void*)(alignedAddress + size);
usedMemory += size + adjustment;
numAllocations++;
return (void*)alignedAddress;
}
inline uint8_t alignForwardAdjustment(void* address, uint8_t alignment) {
uint8_t adjustment = alignment - ( (size_t)address & (size_t)(alignment - 1));
if (adjustment == alignment) {
return 0; // already aligned
}
return adjustment;
}
inline int32_t isPowerOfTwo(size_t value) {
return value != 0 && (value & (value - 1)) == 0;
}
Initialization code where I attempt to use allocator
// **Can write to memory fine here**
((float*)memory->transientStorage)[0] = 4.f;
size_t simulationAllocationSize = memory->transientStorageSize / 2 / sizeof(real32);
simulationMemory = LinearAllocator(simulationAllocationSize, &memory->transientStorage + (uint8_t)0);
for (int i = 0; i < MAX_PLAYERS; i++) {
Player* p = (Player*)simulationMemory.allocate(sizeof(Player), 4);
// **also works here**
((real32*)memory->transientStorage)[0] = 3.f;
p->pos.x = 0.f; // **after this line, I got the unable to read memory error**
p->pos.y = 0.f;
p->isFiring = false;
p->timeLastFiredMS = 0.f;
// **can't write **
((real32*)memory->transientStorage)[0] = 1.f;
}
// **also can't write**
((real32*)memory->transientStorage)[0] = 2.f;
real32 test = ((real32*)memory->transientStorage)[0];
My running assumption is that I'm missing something obvious. But the only clue I have to go off of is that it changed after setting a value in the Player struct. Any help here would be greatly appreciated!
Looks like this is your problem:
simulationMemory = LinearAllocator(simulationAllocationSize,
&memory->transientStorage + (uint8_t)0);
There's a stray & operator, causing you to allocate memory not from the allocated memory block that memory->transientStorage points to but from wherever memory itself lives.
This is turns causes the write to p->pos.x to overwrite the value of transientStorage.
The call to LinearAllocator should be just
simulationMemory = LinearAllocator(simulationAllocationSize,
memory->transientStorage + (uint8_t)0);
I'm using the SoundTouch library to pitch-shift some audio files. Everything works well, except the last few hundred milliseconds of the new audio file are not like the original file. Here is the original file:
And here's what I get after pitch-shifting:
As you can see the ending is not right. It's like there was nothing there in the original file, when there certainly is.
Here's the code I'm using:
int generateFile(WavInFile *file, SoundTouch *st, string fileName, int semitones)
{
const bool speech = true;
SAMPLETYPE samples[BUFF_SIZE];
WavOutFile *out = new WavOutFile(fileName.c_str(), (int)file->getSampleRate(), (int)file->getNumBits(), (int)file->getNumChannels());
int nChannels = (int)file->getNumChannels();
assert(nChannels > 0);
int num, nSamples;
int buffSizeSamples = BUFF_SIZE / nChannels;
st->setSampleRate((int)file->getSampleRate());
st->setChannels(nChannels);
st->setPitchSemiTones(semitones);
if (!speech)
{
st->setSetting(SETTING_USE_QUICKSEEK, 0);
st->setSetting(SETTING_USE_AA_FILTER, 0);
}
else
{
st->setSetting(SETTING_USE_QUICKSEEK, 0);
st->setSetting(SETTING_SEQUENCE_MS, 40);
st->setSetting(SETTING_SEEKWINDOW_MS, 15);
st->setSetting(SETTING_OVERLAP_MS, 8);
}
while (file->eof() == 0)
{
num = file->read(samples, BUFF_SIZE);
nSamples = num / (int)file->getNumChannels();
st->putSamples(samples, nSamples);
do
{
nSamples = st->receiveSamples(samples, buffSizeSamples);
out->write(samples, nSamples * nChannels);
} while (nSamples != 0);
}
st->flush();
do
{
nSamples = st->receiveSamples(samples, buffSizeSamples);
out->write(samples, nSamples * nChannels);
} while (nSamples != 0);
delete out;
return 0;
}
And yes, I delete WavInFile *file later in my code. So my question is- Why is SoundTouch doing this and how can I fix it?
Also I cannot simply cut the wrong part of the new audio file because I'm generating hundreds of files this way so cutting every single one of them would be...
I'm working on a function that can uncompress the deflate compression, so i can read/draw png files in my c++ program. However, the deflate specification isn't very clear on some things.
So my main question is:
Paragraph 3.2.7. Compression with dynamic Huffman codes (BTYPE=10) of the specification state that
the distance code follows the literal/length
But it does not state how many bits the distance code occupy, is it an entire byte?
And how does the distance code relate?.. whats its use, really?
Any one have a general explanation? since the specification is kinda lacking in clarity.
The specification i found here:
http://www.ietf.org/rfc/rfc1951.txt
Edit (Here is my following code to use with puff inflate code.)
First the header (ConceptApp.h)
#include "resource.h"
#ifdef _WIN64
typedef unsigned long long SIZE_PTR;
#else
typedef unsigned long SIZE_PTR;
#endif
typedef struct _IMAGE {
DWORD Width; //Width in pixels.
DWORD Height; //Height in pixels.
DWORD BitsPerPixel; //24 (RGB), 32 (RGBA).
DWORD Planes; //Count of color planes
PBYTE Pixels; //Pointer to the first pixel of the image.
} IMAGE, *PIMAGE;
typedef DWORD LodePNGColorType;
typedef struct _LodePNGColorMode {
DWORD colortype;
DWORD bitdepth;
} LodePNGColorMode;
typedef struct LodePNGInfo
{
/*header (IHDR), palette (PLTE) and transparency (tRNS) chunks*/
unsigned compression_method;/*compression method of the original file. Always 0.*/
unsigned filter_method; /*filter method of the original file*/
unsigned interlace_method; /*interlace method of the original file*/
LodePNGColorMode color; /*color type and bits, palette and transparency of the PNG file*/
} LodePNGInfo;
typedef struct _ZLIB {
BYTE CMF;
BYTE FLG;
//DWORD DICTID; //if FLG.FDICT (Bit 5) is set, this variable follows.
//Compressed data here...
} ZLIB, *PZLIB;
typedef struct _PNG_IHDR {
DWORD Width;
DWORD Height;
BYTE BitDepth;
BYTE ColourType;
BYTE CompressionMethod;
BYTE FilterMethod;
BYTE InterlaceMethod;
} PNG_IHDR, *PPNG_IHDR;
typedef struct _PNG_CHUNK {
DWORD Length;
CHAR ChuckType[4];
} PNG_CHUNK, *PPNG_CHUNK;
typedef struct _PNG {
BYTE Signature[8];
PNG_CHUNK FirstChunk;
} PNG, *PPNG;
And the code .cpp file:
The main function can be found at the bottom of the file (LoadPng)
BYTE LoadPng(PPNG PngFile, PIMAGE ImageData)
{
PDWORD Pixel = 0;
DWORD ChunkSize = 0;
PPNG_IHDR PngIhdr = (PPNG_IHDR) ((SIZE_PTR) &PngFile->FirstChunk + sizeof(PNG_CHUNK));
DWORD Png_Width = Png_ReadDword((PBYTE)&PngIhdr->Width);
DWORD Png_Height = Png_ReadDword((PBYTE)&PngIhdr->Height);
DWORD BufferSize = (Png_Width*Png_Height) * 8; //This just a guess right now, havent done the math yet. !!!
ChunkSize = Png_ReadDword((PBYTE)&PngFile->FirstChunk.Length);
PPNG_CHUNK ThisChunk = (PPNG_CHUNK) ((SIZE_PTR)&PngFile->FirstChunk + ChunkSize + 12); //12 is the length var itself, Chunktype and CRC.
PPNG_CHUNK NextChunk;
PBYTE UncompressedData = (PBYTE) malloc(BufferSize);
INT RetValue = 0;
do
{
ChunkSize = Png_ReadDword((PBYTE)&ThisChunk->Length);
NextChunk = (PPNG_CHUNK) ((SIZE_PTR)ThisChunk + ChunkSize + 12); //12 is the length var itself, Chunktype and CRC.
if (Png_IsChunk(ThisChunk->ChuckType, "IDAT")) //Is IDAT ?
{
PZLIB iData = (PZLIB) ((SIZE_PTR)ThisChunk + 8); //8 is the length and chunkType.
PBYTE FirstBlock; //ponter to the first 3 bits of the deflate stuff.
if ((iData->CMF & 8) == 8) //deflate compression method.
{
if ((iData->FLG & 0x20) == 0x20)
{
FirstBlock = (PBYTE) ((SIZE_PTR)iData + 6); //DICTID Present.
}
else FirstBlock = (PBYTE) ((SIZE_PTR)iData + 2); //DICTID Not present.
RetValue = puff(UncompressedData, &BufferSize, FirstBlock, &ChunkSize); //I belive chunksize should be fine.
if (RetValue != 0)
{
WCHAR ErrorText[100];
swprintf_s(ErrorText, 100, L"%u", RetValue); //Convert data into string.
MessageBox(NULL, ErrorText, NULL, MB_OK);
}
}
}
ThisChunk = NextChunk;
} while (!Png_IsChunk(ThisChunk->ChuckType, "IEND"));
//LodePNGInfo ImageInfo;
//PBYTE Png_Real_Image = (PBYTE) malloc(BufferSize);
//ImageInfo.compression_method = PngIhdr->CompressionMethod;
//ImageInfo.filter_method = PngIhdr->FilterMethod;
//ImageInfo.interlace_method = PngIhdr->InterlaceMethod;
//ImageInfo.color.bitdepth = PngIhdr->BitDepth;
//ImageInfo.color.colortype = PngIhdr->ColourType;
//Remove Filter/crap blah blah.
//postProcessScanlines(Png_Real_Image, UncompressedData, Png_Width, Png_Height, &ImageInfo);
ImageData->Width = Png_Width;
ImageData->Height = Png_Height;
ImageData->Planes = 0; //Will need changed later.
ImageData->BitsPerPixel = 32; //Will need changed later.
ImageData->Pixels = 0;
//ImageData->Pixels = Png_Real_Image; //image not uncompressed yet.
return TRUE; //ret true for now. fix later.
}
I just hope to make clearer what is stated before--Huffman coding is a method for encoding values using a variable number of bits. In, say, ASCII coding, every letter gets the same number of bits no matter how frequently it is used. In Huffman coding, you could make "e" have fewer bits than an "X".
The trick in huffman coding is how the codes are prefixed. After reading each bit, the decoder knows, unambiguously, whether it has a value or needs to read another bit.
To comprehend the deflate process you need to understand LZ algorithm and Huffman coding.
On their own, both techniques are simple. The complexity comes from how they are put together.
LZ compresses by finding previous occurrences of a string. When a string has occurred previously, it is compressed by referencing the previous occurrence. The Distance is the offset to the previous occurrence. Distance and length specify that occurrence.
The problem is not with puff.
All the IDAT chunks in the png file need to be put together before calling puff.
It should look something like this:
BYTE LoadPng(PPNG PngFile, PIMAGE ImageData)
{
PDWORD Pixel = 0;
DWORD ChunkSize = 0;
PPNG_IHDR PngIhdr = (PPNG_IHDR) ((SIZE_PTR) &PngFile->FirstChunk + sizeof(PNG_CHUNK));
DWORD Png_Width = Png_ReadDword((PBYTE)&PngIhdr->Width);
DWORD Png_Height = Png_ReadDword((PBYTE)&PngIhdr->Height);
DWORD BufferSize = (Png_Width*Png_Height) * 8; //This just a guess right now, havent done the math yet. !!!
ChunkSize = Png_ReadDword((PBYTE)&PngFile->FirstChunk.Length);
PPNG_CHUNK ThisChunk = (PPNG_CHUNK) ((SIZE_PTR)&PngFile->FirstChunk + ChunkSize + 12); //12 is the length var itself, Chunktype and CRC.
PPNG_CHUNK NextChunk;
PBYTE UncompressedData = (PBYTE) malloc(BufferSize);
PBYTE TempBuffer = (PBYTE) malloc(BufferSize); //Put all idat chunks together befor uncompressing.
DWORD DeflateSize = 0; //All IDAT Chunks Added.
PZLIB iData = NULL;
PBYTE FirstBlock = NULL; //ponter to the first 3 bits of the deflate stuff.
INT RetValue = 0;
do
{
ChunkSize = Png_ReadDword((PBYTE)&ThisChunk->Length);
NextChunk = (PPNG_CHUNK) ((SIZE_PTR)ThisChunk + ChunkSize + 12); //12 is the length var itself, Chunktype and CRC.
if (Png_IsChunk(ThisChunk->ChuckType, "IDAT")) //Is IDAT ?
{
CopyMemory(&TempBuffer[DeflateSize], (PBYTE) ((SIZE_PTR)ThisChunk + 8), ChunkSize); //8 is the length and chunkType.
DeflateSize += ChunkSize;
}
ThisChunk = NextChunk;
} while (!Png_IsChunk(ThisChunk->ChuckType, "IEND"));
iData = (PZLIB) TempBuffer;
if ((iData->CMF & 8) == 8) //deflate compression method.
{
if ((iData->FLG & 0x20) == 0x20)
{
FirstBlock = (PBYTE) ((SIZE_PTR)iData + 6); //DICTID Present.
}
else FirstBlock = (PBYTE) ((SIZE_PTR)iData + 2); //DICTID Not present.
}
RetValue = puff(UncompressedData, &BufferSize, FirstBlock, &DeflateSize); //I belive chunksize should be fine.
if (RetValue != 0)
{
WCHAR ErrorText[100];
swprintf_s(ErrorText, 100, L"%u", RetValue);
MessageBox(NULL, ErrorText, NULL, MB_OK);
}
//LodePNGInfo ImageInfo;
//PBYTE Png_Real_Image = (PBYTE) malloc(BufferSize);
//ImageInfo.compression_method = PngIhdr->CompressionMethod;
//ImageInfo.filter_method = PngIhdr->FilterMethod;
//ImageInfo.interlace_method = PngIhdr->InterlaceMethod;
//ImageInfo.color.bitdepth = PngIhdr->BitDepth;
//ImageInfo.color.colortype = PngIhdr->ColourType;
//Remove Filter/crap blah blah.
//postProcessScanlines(Png_Real_Image, UncompressedData, Png_Width, Png_Height, &ImageInfo);
ImageData->Width = Png_Width;
ImageData->Height = Png_Height;
ImageData->Planes = 0; //Will need changed later.
ImageData->BitsPerPixel = 32; //Will need changed later.
ImageData->Pixels = 0;
//ImageData->Pixels = Png_Real_Image; //image not uncompressed yet.
return TRUE; //ret true for now. fix later.
}
You need to first read up on compression, since there is a lot of basic stuff that you're not getting. E.g. The Data Compression Book, by Nelson and Gailly.
Since it's a code, specifically a Huffman code, by definition the number of bits are variable.
If you don't know what the distance is for, then you need to first understand the LZ77 compression approach.
Lastly, aside from curiosity and self-education, there is no need for you to understand the deflate specification or to write your own inflate code. That's what zlib is for.
I am reusing some old code(originally developed on c, not c++) with some functions to open/read/manipulate text-files. The path to the text-files is passed to the functions as a string (char*) then opened using: FileToUse = fopen(filename, "rb"); then multiple calls to fread() and fseek() are used. This code is known to work for external text-files, but now I would like to include the textfiles as resources in my project (MFC C++ in visual studio).
I found some examples on the web on how to use resources rusulting in this code:
HINSTANCE hInst = AfxGetResourceHandle();
HRSRC hResource = FindResource(hInst, MAKEINTRESOURCE(IDR_TEXTFILE1), "TEXTFILE");
if (hResource){
HGLOBAL hLoadedResource = LoadResource(hInst, hResource);
if (hLoadedResource){
const char* pResource = LockResource(hLoadedResource);
if (pResource){
DWORD dwResourceSize = SizeofResource(hInst, hResource);
if (0 != dwResourceSize){ // if(FileToUse){
memcpy(&Buffer, pResource, (15 * 2)); // fread($Buffer, 15, 2, FileToUse);
pTemp = pResource + 200; // fseek(FileToUse, 200, SEEK_SET);
pTemp = pTemp + 100; // fseek(FileToUse, 100, SEEK_CUR);
pTemp = pResource + (dwResourceSize - 1) - 40; // fseek(FileToUse, -40, SEEK_END);
}
}
}
}
I replaced the fread call by memcpy() as shown, but I'm missing the return value of fread (actual read items) and in the original code the filepointer was moved by fseek, I wonder whether my approach using a temporary pointer is correct.
My ultimate goal is to simulate the fread and fseek calls for resources with similar function prototypes:
size_t resread( void* buffer, size_t size, size_t count, char* resource );
int resseek( char* resource, long offset, int origin );
Any suggestions are much appreciated.
Thanks for your help, based on the Agent_L's suggestion this is what I came up with:
Text-resource type:
struct _resource {
const char * content; // File content
size_t size; // File size
size_t ptrloc; // 'Pointer' location
};
typedef struct _resource RES_TXT;
resread based on fread:
size_t resread( void* buffer, size_t size, size_t count, RES_TXT * resource)
{
size_t actualCount = ( resource->size - resource->ptrloc ) / size;
actualCount = min( count, actualCount );
if ( actualCount <= 0 ) return 0;
memcpy(buffer, (resource->_ptr + resource->ptrloc), (actualCount * size) );
resource->ptrloc += (actualCount * size);
return actualCount;
}
and to complete resseek based on fseek:
int resseek( RES_TXT * resource, long offset, int origin ) {
size_t nextloc;
switch ( origin ) {
case SEEK_SET: nextloc = 0;
break;
case SEEK_CUR: nextloc = resource->ptrloc;
break;
case SEEK_END: nextloc = resource->size;
break;
default: return -1;
}
nextloc += offset;
if ( nextloc >= 0 && nextloc < resource->size )
resource->ptrloc = nextloc;
else
return -1;
return 0;
}
Any call to fseek and fread can now be replaced to use a resource instead of an external file.
The file handle contains not only the data but also it's length and current position. You have to duplicate that.
(handwirtten code, unproven):
struct resFile
{
char* pData;
int iLenght;
int iCurrPosition;
};
size_t resread( void* buffer, size_t size, size_t count, resFile* resource)
{
int ActualRead = min(size*count, resource->iLenght - resource->iCurrPosition);
memcpy(buffer, resource->pData + resource->iCurrPosition, ActualRead);
resource->iCurrPostion += ActualRead;
return ActualRead;
}
Let me notify you that fread shifts current file position. This means that you don't need invoke fseek each time. From this perspective may be you code can avoid implementation of resseek by simple increasing Buffer pointer