I'm trying to program a video buffer in a std::deque in my OFX video plug-in. I would like to access previously processed images in order to process the current image. My idea is to push processed images to the front of the deque and pop them from the back if the buffer exceeds the maximum size.
The plug-in crashes when I try to free the memory of an image using delete before removing it from the buffer. I found out that I can add one or several images to the buffer and delete and remove them immediately afterwards with no problem. However, if I try to delete an image which has been added in an earlier cycle, it crashes.
The plug-in consists of the main class OFXPlugin and the processor class myplugin. The instance of OFXPlugin stays over time, but for every image to be processed it creates an instance of myplugin and destroys it after processing that frame.
I'm not sure if I'm doing something wrong in the way I use the deque, if I'm not allowed to free memory which has been allocated by another instance of myplugin or if I'm doing something illegal related to the OFX API.
The Code below shows the extracts of the plug-in related to the problem. It's based on the OFX Support examples. It crashes at delete videoBuffer_.back().img; in the function OFXPlugin::addToVBuff(OFX::Image *img, double t). I cannot catch an exception, apparently it is handled (ignored) in the OFX API.
Thanks a lot for your help!
myplugin.h
#include "ofxsImageEffect.h"
#include "ofxsMultiThread.h"
#include "../Support/Plugins/include/ofxsProcessing.H"
#include <deque>
// Video Buffer Element
typedef struct vBuffEl
{
OFX::Image* img;
double time;
} vBuffEl;
inline
bool operator==(const vBuffEl &a, const double b)
{
return a.time == b;
}
class myplugin : public OFX::ImageProcessor {
protected :
OFX::Image *_srcImg;
double _time;
OFXPlugin *_opInstance;
public :
// ctor
myplugin(OFX::ImageEffect &instance)
: OFX::ImageProcessor(instance)
, _srcImg(0)
, _time(0)
{}
void multiThreadProcessImages(OfxRectI procWindow);
void setOFXPlugin(OFXPlugin* opInstance) {_opInstance = opInstance;}
OFXPlugin* getOFXPlugin() {return _opInstance;}
void setTime(double argsTime) {_time = argsTime;}
double getTime() {return _time;}
void setSrcImg(OFX::Image *v) {_srcImg = v;}
OFX::Image* getSrcImg() {return _srcImg;}
};
class OFXPlugin : public OFX::ImageEffect {
protected :
OFX::Clip *dstClip_;
OFX::Clip *srcClip_;
double time_;
std::deque<vBuffEl> videoBuffer_;
public :
/** #brief ctor */
OFXPlugin(OfxImageEffectHandle handle);
/** #brief dtor */
~OFXPlugin();
/* Override the render */
virtual void render(const OFX::RenderArguments &args);
/* get the source Clip */
OFX::Clip* getSrcClip();
/* get the current time */
double getTime();
/* set up and run a processor */
void setupAndProcess(myplugin &, const OFX::RenderArguments &args);
/* add to video buffer */
void addToVBuff(OFX::Image *img, double t);
/* fetch a dst image from buffer */
void fetchDstImageBuff(double t, OFX::Image* &img, bool &buff);
};
myplugin.cpp
#include "myplugin.h"
#include <algorithm>
void myplugin::multiThreadProcessImages(OfxRectI procWindow)
{
// Do some filtering of the source image and store result in destination image
myfiltering(_dstImg, _srcImg, procWindow);
// add to buffer
_opInstance->addToVBuff(_dstImg, _time);
}
/* set up and run a processor */
void
OFXPlugin::setupAndProcess(myplugin &processor, const OFX::RenderArguments &args)
{
// get a dst image
std::auto_ptr<OFX::Image> dst(dstClip_->fetchImage(args.time));
OFX::BitDepthEnum dstBitDepth = dst->getPixelDepth();
OFX::PixelComponentEnum dstComponents = dst->getPixelComponents();
// fetch main input image
std::auto_ptr<OFX::Image> src(srcClip_->fetchImage(args.time));
// set the images
processor.setDstImg(dst.get());
processor.setSrcImg(src.get());
// set the render window
processor.setRenderWindow(args.renderWindow);
// set time
processor.setTime(args.time);
time_ = args.time;
// set OFXPlugin instance
processor.setOFXPlugin(this);
// Call the base class process member, this will call the derived templated process code
processor.process();
}
OFX::Clip* OFXPlugin::getSrcClip()
{
return srcClip_;
}
/* get the current time */
double
OFXPlugin::getTime()
{
return time_;
}
// the overridden render function
void
OFXPlugin::render(const OFX::RenderArguments &args)
{
try {
myplugin fred(*this);
setupAndProcess(fred, args);
} catch (...) {
outputMessage("ERROR: An unknown error happened!");
}
}
/* add to video buffer */
void
OFXPlugin::addToVBuff(OFX::Image *img, double t)
{
try {
// if frame already exists in buffer, remove
std::deque<vBuffEl>::iterator it;
it = find(videoBuffer_.begin(), videoBuffer_.end(), t);
if(it != videoBuffer_.end())
{
delete it->img;
videoBuffer_.erase(it);
}
// add new frame to the front
vBuffEl e;
e.time = t;
e.img = new OFX::Image(img->getPropertySet().propSetHandle());
memcpy(e.img, img, sizeof(img));
videoBuffer_.push_front(e);
// remove elements at the end, if the buffer exceeds the max size
int LASTIMG_ARRAY_SIZE = 10;
while(videoBuffer_.size() > LASTIMG_ARRAY_SIZE)
{
delete videoBuffer_.back().img;
videoBuffer_.erase(--(videoBuffer_.end()));
}
} catch (...) {
outputMessage("ERROR: An unknown error happened!");
}
}
/* fetch a dst image from buffer */
void
OFXPlugin::fetchDstImageBuff(double t, OFX::Image* &img, bool &buff)
{
try {
std::deque<vBuffEl>::iterator it;
it = find(videoBuffer_.begin(), videoBuffer_.end(), t);
if(it != videoBuffer_.end())
{
img = it->img; // return buffered dst image
buff = true;
}
else
{
img = getSrcClip()->fetchImage(t); // fetch and return src image
buff = false;
}
} catch (...) {
outputMessage("ERROR: An unknown error happened!");
}
}
The statement
memcpy(e.img, img, sizeof(img));
doesn't do what you expect it to.
The sizeof operation of a pointer returns the size of the pointer, not what it points to. This means that in this case, you are only copying 4 or 8 bytes (depending on if you are on a 32 or 64 bit platform).
However, there is another worse problem hidden in that memcpy call. If the OFX::Image contains data member pointers, copying the data will copy the pointers and not the data. It's a shallow copy, not a deep copy. This is a reason C++ has copy constructors and copy assignment operators.
What you should to is a simple assignment, and hope that OFX::Image follows the rule of three:
*e.img = *img;
Related
I'm refactoring and re-writing the guide made by VkGuide as to fit my idea of an engine. I'm using VMA to handle my memory needs.
I've stumbled upon a strange (?) issue in refactoring my memory mapping code into something more easily readable:
Original code:
Renderer fields
//-------------------------------------------------
struct ObjectData { matrix4 transform };
inline Frame& frame(); // Get current frame in flight
std::vector<RenderableObject> renderables;
//--------------------------------------------------
Drawing code
void* object_data;
vmaMapMemory(allocator, frame().object_buffer.allocation, &object_data);
auto* ssbo = static_cast<ObjectData*>(object_data);
for (int i = 0; i < renderables.size(); i++) {
auto& object = renderables[i];
ssbo[i].model_matrix = object.transform;
}
vmaUnmapMemory(allocator, frame().object_buffer.allocation)
and the refactored and templated namespace-local function:
struct AllocatedBuffer { VkBuffer buffer, VmaAllocation allocation };
template <typename T, typename Func>
void effect_mmap(VmaAllocator& allocator, AllocatedBuffer& buffer, Func&& effect)
{
void* object_data;
vmaMapMemory(allocator, buffer.allocation, &object_data);
auto* t_pointer = static_cast<T*>(object_data);
effect(*t_pointer);
vmaUnmapMemory(allocator, buffer.allocation);
}
My camera position and everything else seems to shift and work incorrectly. Following shows usage (which now should replace the original code):
MemoryMapper::effect_mmap<ObjectData>(allocator, frame().object_buffer, [&](ObjectData& data) {
for (auto& object : renderables) {
data.model_matrix = object.transform;
}
});
and here are the correct (the textures are still invalid, and I don't know why, that's for another day) and afters: Correct, and Incorrect.
Your usage example only ever sets the first SSBO in the buffer, as data is *t_pointer and never changes.
Change your code to pass t_pointer directly, change the type of the callback and then you can use it as
MemoryMapper::effect_mmap<ObjectData>(allocator, frame().object_buffer, [&](ObjectData* data) {
for (auto& object : renderables) {
data->model_matrix = object.transform;
data++;
}
});
I'm working on one of assignments to do with image manipulation (blending and zooming) and I've ran into a problem that I have hard time overcoming.
Structure of my application
Class Image
rgb struct: contains floats (they're flattened pixels) and overloaded operators.
pixels: a 1d array of pixels which is initialized via constructor to be h * w
Class destructor.
Destructor looks a little like this...
virtual ~Image()
{
if (pixels != NULL)
delete[] pixels;
}
Now I'm using another class called Filter which inherits from Image
class Filter: public class Image
std::vector of Image imgStack; Container for images that I'm going to blend
std::vector of Rgb pixelBuffer; A container for the pixels for each images one pixel. This is not dynamic so I'm not worried about deleting this.
Destructor for the derived class.
~Blend()
{
delete &imageStack;
}
what this part of my main.cpp looks like...
while (userInput != 5)
{
switch(userInput)
case 1:
{
Blend *meanImage = new Blend(3263, 2505, 13);
meanImage->writePPM("Mean_" + std::to_string(count) + ".ppm");//every iteration, the file name is unique
meanImage->~Blend();
}
}
In my main.cpp I'm basically running 13 images into the Blend object which stores the images in the vector container to do all my functionality on. During the run time the space used is around 1.3GB, but since my object is in a loop (i have a menu for multiple functionality) the object doesn't usually leave the scope so the destructor isn't automatically called, so I call it manually like this; medianImage->~Blend(); Now the all the error says is that my application "has triggered a breakpoint" and that's it... Note, no breakpoint is found anywhere. I'm aware that it's generally bad to use dynamic arrays because it causes all sorts of memory problems (if it's done by me), but I want to fix this just so I know how to solve these in the future.
If you have any questions of the code, I can post snippers.
Edit: here's an my Blend class.
#pragma once
#include "stdafx.h"
#include "Image.h"
#include <vector>
#include <string>
class Blend : public Image
{
private:
std::vector<Image> imageStack;
std::vector<Rgb*> pixelBuffer;//only works with pointers (no copying)
int m_noOfImages;
Rgb* getMedianFromSet();
Rgb getMeanFromSet();
Rgb getStdDev(Rgb meanPix);
public:
Blend(int width = 0, int height = 0, int noOfImages = 0):Image(width, height), m_noOfImages(noOfImages), imageStack(noOfImages), pixelBuffer(noOfImages)
{}
public:
void stack(bool push = true);
void meanBlend();
void medianBlend();
void sigmaClipping(float sigma = 0.5f);
~Blend()
{
delete &imageStack;
}
};
#pragma once
#include "stdafx.h"
#include "Image.h"
#include <vector>
#include <string>
#include <memory>
class Blend : public Image
{
private:
std::vector<Image> imageStack;
// Changed to shared_ptr<T> could use unique_ptr<T> depending on need.
std::vector<std::shared_ptr<Rgb>> pixelBuffer;//only works with pointers (no copying)
int m_noOfImages;
Rgb* getMedianFromSet();
Rgb getMeanFromSet();
Rgb getStdDev(Rgb meanPix);
public:
Blend(int width = 0, int height = 0, int noOfImages = 0):Image(width, height), m_noOfImages(noOfImages), imageStack(noOfImages), pixelBuffer(noOfImages)
{}
public:
void stack(bool push = true);
void meanBlend();
void medianBlend();
void sigmaClipping(float sigma = 0.5f);
// Clear Entire Buffer
void cleanup() {
// When using the heap with smart pointers
for ( auto item : containerVariable ) {
item.reset();
item = nullptr;
}
containerVariable.clear();
}
// Remove Single Element
void remove( unsigned idx ) {
// Write function to remove a single element from vector
}
~Blend()
{
// This is definitely wrong here: You are trying to delete a reference
// to a template container that is storing `Image` objects that
// are on the stack.
// delete &imageStack;
}
};
It is better to write a function to clean up memory, and to remove specific elements from containers when using dynamic memory than it is to use a class's destructor.
So I'm making a graphical application (game) that I want to utilize an Asset Manager. For this class I decided to use a Singleton Design. So in my main.cpp I would load something like...
ASSET_MANAGER.LoadImage("res/graphics/background.png", "background");
Here is the implication of the macros/methods used in the line above. This is sort of a mashup of code that I have to make things simpler to look at instead of pasting a few hundred lines of code into here.
assetmanager.h
#define ASSET_MANAGER AssetManager::GetAssetManager()
#define DEBUG
class AssetManager {
public:
static AssetManager &GetAssetManager();
//-----------------------------------------------------------------------------
// Purpose: Load a new image for the game to use. This function will store an
// instance of the asset in memory (in a hash map corresponding with
// the data type provided.
//
// param file: The location on disk of the asset
// param key: The string you use to receive this asset (defaults to the path str)
//-----------------------------------------------------------------------------
bool LoadImage(const char *file, const char *key = "");
//-----------------------------------------------------------------------------
// Purpose: Returns the image
//
// param key: The string used to store the asset in memory
//-----------------------------------------------------------------------------
ALLEGRO_BITMAP *GetImage(const char *key);
//-----------------------------------------------------------------------------
// Purpose: Destroys an asset that is presumably no longer needed by the game.
// This function is good for performance so that you don't use more
// RAM than you need to.
//
// param key: The string you use to receive this asset (defaults to the path str)
//-----------------------------------------------------------------------------
void DiscardImage(const char *key);
private:
AssetManager();
~AssetManager();
std::map<const char *, std::shared_ptr<ALLEGRO_BITMAP>> _ImageMap;
}
assetmanager.cpp
AssetManager &AssetManager::GetAssetManager() {
static AssetManager instance;
return instance;
}
bool AssetManager::LoadImage(const char *file, const char *key) {
key = key == "" ? file : key;
std::shared_ptr<ALLEGRO_BITMAP> x(al_load_bitmap(file), al_destroy_bitmap);
if (!x) {
fprintf(stderr, "Failed to load %s\n", file);
return false;
}
#ifdef DEBUG
printf("DEBUG: Loaded %s\n", key); //debug
#endif // DEBUG
_ImageMap.insert(std::pair<const char *, std::shared_ptr<ALLEGRO_BITMAP>>(key, x));
return true;
}
ALLEGRO_BITMAP *AssetManager::GetImage(const char *key) {
return _ImageMap.find(key) != _ImageMap.end() ? _ImageMap.at(key).get() : nullptr;
}
void AssetManager::DiscardImage(const char *key) {
_ImageMap.erase(key);
#ifdef DEBUG
printf("DEBUG: Discarded %s\n", key); //debug
#endif // DEBUG
}
This class only works from the class I initialized the asset manager in while I expected it to work anywhere that I called ASSET_MANAGER. It compiles fine, it only crashes when I try to use the manager in a different class and pass it into an allegro function because it returns something that is null instead of the proper allegro data types. What don't I understand about this?
The Char* in the map stores the location and not the data which makes the program think it's null.
A while ago I asked a question on why the following code did not work:
std::vector<std::vector<std::vector<Tile_Base*>>> map_tile; // This is located in Map object. See below.
int t_x, t_y;
t_x = t_y = 200;
map_tiles.begin(); // clear(), resize() and every other function still causes problems
The thing is, is that it should have worked, yet Visual Studios 2012 throws an exception when the resize function is called. The exception pointed to this piece of code:
*_Pnext != 0; *_Pnext = (*_Pnext)->_Mynextiter)
located in xutility. It said that there was an violating on access to reading the memory. I thought maybe somehow I lost access to the member along the way? (Using VS' watch I saw the memory was not corrupted)
So, I fiddled around with the code and tried to figure out what could possibly be going wrong, and after awhile I moved the map_tiles object down to the bottom of the list, and it worked:
// WORKS
class Map {
std::vector<Tile_Base*> spawn_tiles;
// map tile specific
bool Is_Valid(int,int);
std::string name;
std::vector<std::vector<std::vector<Tile_Base*> > > map_tiles;
public:
// ...
}
// DOESN'T WORK
class Map {
std::vector<std::vector<std::vector<Tile_Base*> > > map_tiles;
std::vector<Tile_Base*> spawn_tiles;
// map tile specific
bool Is_Valid(int,int);
std::string name;
public:
// ...
}
Any help pointing out what went wrong? I can't come up with any reasonable explanation.
A vector<T> comprises two discrete sets of data: the internal state and the array of Ts. The internal state - capacity, size, pointer - is separate from the array. The issue you're describing is normally caused by something overwriting the vector object, i.e the internal state. To track this down easily you could use a container class:
typedef std::vector<std::vector<std::vector<Tile_Base*> > > maptiles_t;
class CMapTiles
{
unsigned int m_guard;
maptiles_t m_tiles;
enum { Guard = 0xdeadbeef };
public:
CMapTiles() : m_guard(Guard), m_tiles() {}
~CMapTiles() { assert(m_guard == Guard); }
void Check()
{
#if defined(DEBUG)
if (m_guard != Guard)
DebugBreak();
#endif
}
void Resize(size_t x, size_t y)
{
Check();
auto init = std::vector<std::vector<Tile_Base*> >(y/32);
m_tiles.resize(m_x / 32, init);
Check();
}
const maptiles_t& tiles() const { Check(); return m_tiles; }
maptiles_t& tiles() { Check(); return m_tiles; }
};
And instead of using std::vector<...> map_tiles have CMapTiles map_tiles, and then when you want to get at the vector, map_tiles.tiles().
Hope this helps.
I would like to overload a RenderScript kernel:
/* donothing.rs */
uchar4 __attribute__((kernel, overloadable)) root (uchar4 in) {
return in;
}
float4 __attribute__((kernel, overloadable)) root (float4 in) {
return in;
}
However, this generates identically-named Java methods:
// ScriptC_donothing.java:95
public void forEach_root(Allocation ain, Allocation aout, Script.LaunchOptions sc) {
// check ain
if (!ain.getType().getElement().isCompatible(__U8_4)) {
throw new RSRuntimeException("Type mismatch with U8_4!");
}
...
// ScriptC_donothing.java:225
public void forEach_root(Allocation ain, Allocation aout, Script.LaunchOptions sc) {
// check ain
if (!ain.getType().getElement().isCompatible(__F32_4)) {
throw new RSRuntimeException("Type mismatch with F32_4!");
}
...
Is there a way to write the kernels so that overloading works? The usage I expected was:
// DoNothingActivity.java
mInAllocation = Allocation.createFromBitmap(mRS, ...
mOutAllocation = Allocation.createTyped(mRS, mInAllocation.getType());
mScript = new ScriptC_donothing(mRS);
mScript.forEach_root(mInAllocation, mOutAllocation);
// calls uchar4 kernel
There's no way to overload kernel names right now. We're investing some ways to associate more type information with an allocation in the future, though; we'll keep this use case in mind.