I am trying to hide the marker im artoolkit and in my research i found this code for alvar, it's a free AR toolkit made by a research technical center in finland.
This code helps hiding the marker but its in opencv, but i want to do the same in simpleVRML example.
Any help how can i change this code for the artoolkit example?
#include "CvTestbed.h"
#include "MarkerDetector.h"
#include "GlutViewer.h"
#include "Shared.h"
using namespace alvar;
using namespace std;
#define GLUT_DISABLE_ATEXIT_HACK // Needed to compile with Mingw?
#include <GL/gl.h>
const double margin = 1.0;
std::stringstream calibrationFilename;
// Own drawable for showing hide-texture in OpenGL
struct OwnDrawable : public Drawable {
unsigned char hidingtex[64*64*4];
virtual void Draw() {
glPushMatrix();
glMultMatrixd(gl_mat);
glPushAttrib(GL_ALL_ATTRIB_BITS);
glEnable(GL_TEXTURE_2D);
int tex=0;
glBindTexture(GL_TEXTURE_2D, tex);
glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_REPLACE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP);
glTexImage2D(GL_TEXTURE_2D,0,GL_RGBA,64,64,0,GL_RGBA,GL_UNSIGNED_BYTE,hidingtex);
glDisable(GL_CULL_FACE);
glDisable(GL_LIGHTING);
glDisable(GL_DEPTH_TEST);
glEnable(GL_ALPHA_TEST);
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
glBegin(GL_QUADS);
glTexCoord2d(0.0,0.0);
glVertex3d(-margin,-margin,0);
glTexCoord2d(0.0,1.0);
glVertex3d(-margin,margin,0);
glTexCoord2d(1.0,1.0);
glVertex3d(margin,margin,0);
glTexCoord2d(1.0,0.0);
glVertex3d(margin,-margin,0);
glEnd();
glPopAttrib();
glPopMatrix();
}
};
void videocallback(IplImage *image)
{
static bool init=true;
static const int marker_size=15;
static Camera cam;
static OwnDrawable d[32];
static IplImage *hide_texture;
bool flip_image = (image->origin?true:false);
if (flip_image) {
cvFlip(image);
image->origin = !image->origin;
}
static IplImage* bg_image = 0;
if(!bg_image) bg_image = cvCreateImage(cvSize(512, 512), 8, 3);
if(image->nChannels == 3)
{
bg_image->origin = 0;
cvResize(image, bg_image);
GlutViewer::SetVideo(bg_image);
}
if (init) {
init = false;
cout<<"Loading calibration: "<<calibrationFilename.str();
if (cam.SetCalib(calibrationFilename.str().c_str(), image->width, image->height)) {
cout<<" [Ok]"<<endl;
} else {
cam.SetRes(image->width, image->height);
cout<<" [Fail]"<<endl;
}
double p[16];
cam.GetOpenglProjectionMatrix(p,image->width,image->height);
GlutViewer::SetGlProjectionMatrix(p);
hide_texture = CvTestbed::Instance().CreateImage("hide_texture", cvSize(64, 64), 8, 4);
}
static MarkerDetector<MarkerData> marker_detector;\
marker_detector.Detect(image, &cam, false, false);
GlutViewer::DrawableClear();
for (size_t i=0; i<marker_detector.markers->size(); i++) {
if (i >= 32) break;
GlutViewer::DrawableAdd(&(d[i]));
}
for (size_t i=0; i<marker_detector.markers->size(); i++) {
if (i >= 32) break;
// Note that we need to mirror both the y- and z-axis because:
// - In OpenCV we have coordinates: x-right, y-down, z-ahead
// - In OpenGL we have coordinates: x-right, y-up, z-backwards
// TODO: Better option might be to use OpenGL projection matrix that matches our OpenCV-approach
Pose p = (*(marker_detector.markers))[i].pose;
BuildHideTexture(image, hide_texture, &cam, d[i].gl_mat, PointDouble(-margin, -margin), PointDouble(margin, margin));
//DrawTexture(image, hide_texture, &cam, d[i].gl_mat, PointDouble(-0.7, -0.7), PointDouble(0.7, 0.7));
p.GetMatrixGL(d[i].gl_mat);
for (int ii=0; ii<64*64; ii++) {
d[i].hidingtex[ii*4+0] = hide_texture->imageData[ii*4+2];
d[i].hidingtex[ii*4+1] = hide_texture->imageData[ii*4+1];
d[i].hidingtex[ii*4+2] = hide_texture->imageData[ii*4+0];
d[i].hidingtex[ii*4+3] = hide_texture->imageData[ii*4+3];
}
}
if (flip_image) {
cvFlip(image);
image->origin = !image->origin;
}
}
int main(int argc, char *argv[])
{
try {
// Output usage message
std::string filename(argv[0]);
filename = filename.substr(filename.find_last_of('\\') + 1);
std::cout << "SampleMarkerHide" << std::endl;
std::cout << "================" << std::endl;
std::cout << std::endl;
std::cout << "Description:" << std::endl;
std::cout << " This is an example of how to detect 'MarkerData' markers, similarly" << std::endl;
std::cout << " to 'SampleMarkerDetector', and hide them using the 'BuildHideTexture'" << std::endl;
std::cout << " and 'DrawTexture' classes." << std::endl;
std::cout << std::endl;
std::cout << "Usage:" << std::endl;
std::cout << " " << filename << " [device]" << std::endl;
std::cout << std::endl;
std::cout << " device integer selecting device from enumeration list (default 0)" << std::endl;
std::cout << " highgui capture devices are prefered" << std::endl;
std::cout << std::endl;
std::cout << "Keyboard Shortcuts:" << std::endl;
std::cout << " q: quit" << std::endl;
std::cout << std::endl;
// Initialise GlutViewer and CvTestbed
GlutViewer::Start(argc, argv, 640, 480, 15);
CvTestbed::Instance().SetVideoCallback(videocallback);
// Enumerate possible capture plugins
CaptureFactory::CapturePluginVector plugins = CaptureFactory::instance()->enumeratePlugins();
if (plugins.size() < 1) {
std::cout << "Could not find any capture plugins." << std::endl;
return 0;
}
// Display capture plugins
std::cout << "Available Plugins: ";
outputEnumeratedPlugins(plugins);
std::cout << std::endl;
// Enumerate possible capture devices
CaptureFactory::CaptureDeviceVector devices = CaptureFactory::instance()->enumerateDevices();
if (devices.size() < 1) {
std::cout << "Could not find any capture devices." << std::endl;
return 0;
}
// Check command line argument for which device to use
int selectedDevice = defaultDevice(devices);
if (argc > 1) {
selectedDevice = atoi(argv[1]);
}
if (selectedDevice >= (int)devices.size()) {
selectedDevice = defaultDevice(devices);
}
// Display capture devices
std::cout << "Enumerated Capture Devices:" << std::endl;
outputEnumeratedDevices(devices, selectedDevice);
std::cout << std::endl;
// Create capture object from camera
Capture *cap = CaptureFactory::instance()->createCapture(devices[selectedDevice]);
std::string uniqueName = devices[selectedDevice].uniqueName();
// Handle capture lifecycle and start video capture
// Note that loadSettings/saveSettings are not supported by all plugins
if (cap) {
std::stringstream settingsFilename;
settingsFilename << "camera_settings_" << uniqueName << ".xml";
calibrationFilename << "camera_calibration_" << uniqueName << ".xml";
cap->start();
if (cap->loadSettings(settingsFilename.str())) {
std::cout << "Loading settings: " << settingsFilename.str() << std::endl;
}
std::stringstream title;
title << "SampleMarkerHide (" << cap->captureDevice().captureType() << ")";
CvTestbed::Instance().StartVideo(cap, title.str().c_str());
if (cap->saveSettings(settingsFilename.str())) {
std::cout << "Saving settings: " << settingsFilename.str() << std::endl;
}
cap->stop();
delete cap;
}
else if (CvTestbed::Instance().StartVideo(0, argv[0])) {
}
else {
std::cout << "Could not initialize the selected capture backend." << std::endl;
}
return 0;
}
catch (const std::exception &e) {
std::cout << "Exception: " << e.what() << endl;
}
catch (...) {
std::cout << "Exception: unknown" << std::endl;
}
}
Related
I am trying to get joystick input using glfw. I am using nintendo pro controller.
The code seems pretty simple and straightfoward, but I can't seem to get accurate joystick data.
I used
int axesCount;
const float *axes = glfwGetJoystickAxes(GLFW_JOYSTICK_1, &axesCount);
and tried to access joystick data by
axes[0], axes[1], axes[2], and axis[3]
glfwGetJoystickAxes gave me an accurate number for axesCount, which is 4.
But even if I moved the joystick the axes array always returned 1.52590219e-05.
Below is the whole code for my appliation
#include "pch.h"
#include <iostream>
#include <gl/glew.h>
#include <GLFW/glfw3.h>
int main()
{
GLFWwindow *window;
// initialize
if (!glfwInit()) {
return -1;
}
// create window and its opengl context
window = glfwCreateWindow(640, 480, "OpenGL Project Tutorial", NULL, NULL);
if (!window) {
glfwTerminate();
return -1;
}
// make the window's context current
// loop untill the user closes the window
while (!glfwWindowShouldClose(window)) {
glClear(GL_COLOR_BUFFER_BIT);
// render OpenGL here
int present = glfwJoystickPresent(GLFW_JOYSTICK_1);
std::cout << "Joystick/Gamepad status: " << present << std::endl;
int axesCount;
const float *axes = glfwGetJoystickAxes(GLFW_JOYSTICK_1, &axesCount);
if (1 == present) {
//std::cout << "Number of axes available: " << axesCount << std::endl;
std::cout << "Axis 1: " << axes[0] << std::endl;
std::cout << "Axis 2: " << axes[1] << std::endl;
std::cout << "Axis 3: " << axes[2] << std::endl;
std::cout << "Axis 4: " << axes[3] << std::endl;
std::cout << std::endl;
std::cout << std::endl;
std::cout << std::endl;
}
// swap front and back buffers
glfwSwapBuffers(window);
// poll for and process events
glfwPollEvents();
}
glfwTerminate();
return 0;
}
It didn't matter whether I put
int axesCount;
const float *axes = glfwGetJoystickAxes(GLFW_JOYSTICK_1, &axesCount);
inside the while loop or not.
Any help would be appreciated, thanks.
Currently started to work with qt and found some bug in my code, and I can't understand where it comes from. Maybe you will see and explain why it happens.
Here is main.cpp code part which changes shutter from 3000 to maximum:
if (camera.test_cam->liveFrameReady())
{
camera.visualizeFrame(camera.test_cam->liveFrame());
camera.set_shutter(0, 1, 3000 + i * 200);
controlWidget->update(camera.test_cam->liveFrame());
i++;
}
The code works slowly(1 fps ), because of the camera.visuzlizeFrame() method:
void OsCam::visualizeFrame(Common::FrameHandle hFrame)
{
void* buffer_ptr = this->getFrameData(hFrame);
int width = hFrame->dataType()->width();
int height = hFrame->dataType()->height();
cv::Mat m(height, width, CV_8UC3, (int*)buffer_ptr);
cv::imshow("test image", m);
cv::waitKey(1);
}
Gui interface shows the camera frame in real time, test image from visualizeFrame at the background
Actionally, I don't need to call this method (I used it just to be sure that I can read the memory and I used opencv because I am more familiar with it).
But if I get rid of this camera.visuzlizeFrame() my gui becomes white and does not give any response.
Even if I use cv::waitKey or Sleep functions, nothing happens to the gui.
void ControlWidget::update(Common::FrameHandle hFrame)
{
try
{
QImage img((uchar*)hFrame->buffer()->data(), hFrame->dataType()->width(), hFrame->dataType()->height(), QImage::Format::Format_RGB888);
QSize standart_size = QSize(hFrame->dataType()->width() / 3, hFrame->dataType()->height() / 3);
QPixmap rectPxmp = QPixmap::fromImage(img).scaled(standart_size);
this->camera_1->setPixmap(rectPxmp);
this->camera_2->setPixmap(rectPxmp);
cv::waitKey(300);
}
catch (GeneralException& e)
{
std::cout << e.what() << std::endl;
}
}
Shall I go to QThreads? One thread for reading the buffer and another one for visualization of gui?
Thank you!
some additional code:
void* OsCam::getFrameData(Common::FrameHandle hFrame)
{
bool displayFrameData = false;
void* pFrameData = NULL;
try
{
if (hFrame)
{
if (displayFrameData)
{
// display information about the frame
cout << "Frame index: " << hFrame->frameIndex();
cout << "Frame timestamp: " << hFrame->timestamp();
// display information about the frame "data type"
DataTypeHandle hDataType = hFrame->dataType();
cout << "Frame size in bytes: " << hDataType->frameSizeInBytes() << endl;
cout << "Width in pixels: " << DataType::width(hDataType) << endl;
cout << "Height in rows: " << DataType::height(hDataType) << endl;
cout << "Bit depth: " << DataType::bitDepth(hDataType) << endl;
cout << "Bytes/line (stride): " << DataType::stride(hDataType) << endl;
// display the frame video format
VideoDataType::Format videoFormat = VideoDataType::format(hDataType);
cout << "Video format: " << VideoDataType::formatString(videoFormat).c_str() << endl;
// get a pointer to the frame data
}
pFrameData = hFrame->buffer()->data();
}
}
catch (GeneralException& e)
{
cout << e.what() << endl;
}
return pFrameData;
}
and main (I changed it a little bit) :
int main(int argc, char **argv)
{
QApplication app(argc, argv);
ControlWidget *controlWidget = new ControlWidget;
//controlWidget->show();
try
{
OsCam camera;
int i = 0;
for (int j = 0; j<10000 ; j++)
{
if ((camera.test_cam->liveFrameReady()))
{
Common::FrameHandle loaded = camera.test_cam->liveFrame()->clone();
camera.visualizeFrame(loaded);
controlWidget->update(loaded);
camera.set_shutter(0, 1, 3000 + i * 200);
loaded->~Frame();
}
i++;
}
}
catch (GeneralException& e)
{
std::cout << e.what() << std::endl;
int a;
std::cin >> a;
return 1;
}
}
After cv::named_window
I am trying to get a parametrized surface on a surface mesh (which is read from a STL format file.). I read some examples about parametrization provided by CGAL examples directory. I get to know that seam line should be provided in order to get a parametric surface on a arbitrary surface. But still I don't get how to make seam line. The below is my code so far. In summary, What I want to know is,
1) When CGAL::Parameterization_mesh_feature_extractor is used, how can I get vertices on the feature curves and make a seam line with the vertices?
2) Does CGAL provide a way to get intersection curve of a given surface and a cutting plane so that I can get a parametrized surface on a part of the given surface?
#include <cstdio>
#include <ctime>
#include <iostream>
#include <iomanip>
#include <algorithm>
#include <fstream>
#include <CGAL/IO/io.h>
#include <CGAL/IO/STL_reader.h>
#include <CGAL/Polyhedron_3.h>
#include <CGAL/Simple_cartesian.h>
#include <CGAL/polygon_soup_to_polyhedron_3.h>
#include <CGAL/Parameterization_polyhedron_adaptor_3.h>
#include <CGAL/Parameterization_mesh_patch_3.h>
#include <CGAL/parameterize.h>
int main(int argc, char* argv[]) {
clock_t time1, time2;
double read_time, write_time, build_time;
if(argc == 1) {
std::cout << "Please, give me a filename" << std::endl;
return 0;
}
std::ifstream infile(argv[1]);
if(infile.bad()) {
std::cout << "Infile not found or file corrupt" << std::endl;
return 1;
}
std::vector<CGAL::cpp11::array<double, 3> > points;
std::vector<CGAL::cpp11::array<int, 3> > triangles;
time1 = clock();
if (!CGAL::read_STL(infile, points, triangles)) {
std::cerr << "Error: invalid STL file" << std::endl;
return 0;
}
time2 = clock();
read_time = float(time2 - time1) / CLOCKS_PER_SEC;
fprintf(stdout, "Read time : %5.2f sec\n", read_time);
// Write polyhedron in Tecplot format
std::ofstream ofs("mesh.dat");
CGAL::set_ascii_mode(ofs);
time1 = clock();
ofs << "TITLE=\"\"" << std::endl;
ofs << "VARIABLES=\"X\" \"Y\" \"Z\"" << std::endl;
ofs << "ZONE T=\"None\" N=" << points.size() << " E=" << triangles.size() << " F=FEPOINT ET=TRIANGLE" << std::endl;
ofs.setf(std::ios::fixed);
ofs.precision(6);
for(std::vector<CGAL::cpp11::array<double, 3> >::iterator i = points.begin(); i != points.end(); ++i) {
ofs << (*i)[0] << " " << (*i)[1] << " " << (*i)[2] << std::endl;
}
for(std::vector<CGAL::cpp11::array<int, 3> >::iterator i = triangles.begin(); i != triangles.end(); ++i) {
ofs << (*i)[0]+1 << " " << (*i)[1]+1 << " " << (*i)[2]+1 << std::endl;
}
time2 = clock();
write_time = float(time2 - time1) / CLOCKS_PER_SEC;
fprintf(stdout, "Write time : %5.2f sec\n", write_time);
// build mesh
typedef CGAL::Simple_cartesian<double> Kernel;
typedef CGAL::Polyhedron_3<Kernel> Polyhedron;
Polyhedron mesh;
time1 = clock();
try{
// Try building a polyhedron
CGAL::polygon_soup_to_polyhedron_3(mesh, points, triangles);
if(! mesh.is_valid() || mesh.empty()){
std::cerr << "Error: Invalid polyhedron" << std::endl;
}
}
catch(...){}
time2 = clock();
build_time= float(time2 - time1) / CLOCKS_PER_SEC;
fprintf(stdout, "Build time : %5.2f sec\n", build_time);
// parameterization
typedef CGAL::Parameterization_polyhedron_adaptor_3<Polyhedron> Parameterization_polyhedron_adaptor;
// Type describing a border or seam as a vertex list
typedef std::list<Parameterization_polyhedron_adaptor::Vertex_handle> Seam;
//Create a second adaptor that virtually "cuts" the mesh following the 'seam' path
typedef CGAL::Parameterization_mesh_patch_3<Parameterization_polyhedron_adaptor> Mesh_patch_polyhedron;
Parameterization_polyhedron_adaptor mesh_adaptor(mesh);
////////////////////// cut graph ////////////////////////////////
typedef CGAL::Parameterization_mesh_feature_extractor<Parameterization_polyhedron_adaptor>
Mesh_feature_extractor;
Seam seam;
// Get reference to Polyhedron_3 mesh
Polyhedron& mesh_ref = mesh_adaptor.get_adapted_mesh();
// Extract mesh borders and compute genus
Mesh_feature_extractor feature_extractor(mesh_adaptor);
int nb_borders = feature_extractor.get_nb_borders();
int genus = feature_extractor.get_genus(); // genus means a hole inside a surface
std::cout << "# borders: " << nb_borders << " # holes: " << genus << std::endl;
std::cout << feature_extractor.get_borders()[0] << std::endl;
///////////////////// end of cut graph //////////////////////////
/*
Mesh_patch_polyhedron mesh_patch(mesh_adaptor, seam.begin(), seam.end());
if (!mesh_patch.is_valid())
{
std::cerr << "Input mesh not supported: non manifold shape or invalid cutting" << std::endl;
return EXIT_FAILURE;
}
typedef CGAL::Parameterizer_traits_3<Mesh_patch_polyhedron> Parameterizer; // Type that defines the error codes
Parameterizer::Error_code err = CGAL::parameterize(mesh_patch);
switch(err) {
case Parameterizer::OK: // Success
break;
case Parameterizer::ERROR_EMPTY_MESH: // Input mesh not supported
case Parameterizer::ERROR_NON_TRIANGULAR_MESH:
case Parameterizer::ERROR_NO_TOPOLOGICAL_DISC:
case Parameterizer::ERROR_BORDER_TOO_SHORT:
std::cerr << "Input mesh not supported: " << Parameterizer::get_error_message(err) << std::endl;
return EXIT_FAILURE;
break;
default: // Error
std::cerr << "Error: " << Parameterizer::get_error_message(err) << std::endl;
return EXIT_FAILURE;
break;
};
// Raw output: dump (u,v) pairs
Polyhedron::Vertex_const_iterator pVertex;
for (pVertex = mesh.vertices_begin(); pVertex != mesh.vertices_end(); pVertex++)
{
// (u,v) pair is stored in any halfedge
double u = mesh_adaptor.info(pVertex->halfedge())->uv().x();
double v = mesh_adaptor.info(pVertex->halfedge())->uv().y();
std::cout << "(u,v) = (" << u << "," << v << ")" << std::endl;
}
*/
return 0;
}
if i try to load a Texture i get this error:
Access violation reading location.
Unhandled exception at 0x651B5A17 (nvcuda.dll) in nsighttest.exe: 0xC0000005: Access violation reading location 0x00000010.
image:
http://img5.fotos-hochladen.net/uploads/error1wlbsuf0iv.png
Debug image:
http://img5.fotos-hochladen.net/uploads/debugatc1xerob5.png
unsigned int loadTexture(const char* filename, texProperties properties)
{
GLint numberofcolors = 0;
GLenum format;
SDL_Surface * img = IMG_Load(filename);
cout << "Image height: " << img->h << endl;
cout << "Image width: " << img->w << endl;
cout << "Images Pixels: " << img->pixels << endl;
cout << "Images BitsPerPixel: " << img->format->BitsPerPixel << endl;
cout << "Images Rmask: " << img->format->Rmask << endl;
cout << "Images Surface: " << img << endl;
if(!(&img)) { std::cout << "Fehler beim laden des bildes: " << filename; std::cout << std::endl; }
if(img->format->BitsPerPixel == 4) {
if(img->format->Rmask == 0x000000ff) { format = GL_RGBA; }
else { format = GL_BGRA; }
numberofcolors = 4;
} else {
if(img->format->Rmask == 0x000000ff) { format = GL_RGB; }
else { format = GL_BGR; }
numberofcolors = 3;
}
unsigned int id;
glGenTextures(1, &id);
glBindTexture(GL_TEXTURE_2D, id);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, properties.getMagFilter());
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, properties.getMinFilter());
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, properties.getTextureWrap());
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, properties.getTextureWrap());
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAX_ANISOTROPY_EXT, properties.getAnisotropy());
glTexImage2D(GL_TEXTURE_2D, 0 , numberofcolors=4?GL_RGBA:GL_RGB, img->w, img->w, 0, format, GL_UNSIGNED_BYTE, img->pixels);
SDL_FreeSurface(img);
return id;
}
The error comes from "glTexImage2D(...)".
Looking at the glTexImage2D() function, it looks like you are using
img->w twice, rather than using img->h for the second one.
Try calling
glTexImage2D(GL_TEXTURE_2D, 0 , numberofcolors=4?GL_RGBA:GL_RGB, img->w, /*Use height as second parameter, rather than width!*/img->h, 0, format, GL_UNSIGNED_BYTE, img->pixels);
Instead.
Every time I run this program I got a this error:
Segmentation fault
testing.cpp
#include <iostream>
#include <cstdint>
#include <SDL.h>
#include <SDL_image.h>
#include "Surface.h"
#include "Point2D.h"
int main() {
if (SDL_Init(SDL_INIT_EVERYTHING) != 0) {
std::cerr << "Can not init SDL" << std::endl;
return -1;
}
SDL_Surface *scr = SDL_SetVideoMode(800, 600, 0, SDL_HWSURFACE | SDL_DOUBLEBUF);
if ( scr == NULL ) {
std::cerr << "Can not open window" << std::endl;
return -1;
}
SDL_WM_SetCaption("Test Surface class", NULL);
Surface screen;
Surface image;
screen.set(SDL_GetVideoSurface());
if (image.load("./data/images/logo.png") == false) {
std::cerr << "Can not open file" << std::endl;
return -1;
}
bool run = true;
uint32_t sticks = SDL_GetTicks();
while(run) {
if (screen.draw(image, Rectangle(0, 0, image.getWidth(), image.getHeight()), Point2D(0, 0)) == false) {
std::cerr << "Can not draw to window" << std::endl;
return -1;
}
if ((SDL_GetTicks() - sticks)/1000 > 5) {
std::cerr << "End of time" << std::endl;
run = false;
}
SDL_Flip(screen.sdlSurface());
}
std::cerr << "Done" << std::endl;
SDL_Quit();
return 0;
}
There is the source code of class Surface
#include "Surface.h"
#include "Color.h"
Surface::Surface() {
this->surface = NULL;
}
Surface::~Surface() {
SDL_FreeSurface(this->surface);
this->surface = NULL;
}
bool Surface::draw(Surface source, Rectangle source_area, Point2D position) {
SDL_Rect sr = source_area.sdlRect();
SDL_Rect tr = {position.getX(), position.getY(), 0, 0};
std::cout << sr.x << "," << sr.y << "," << sr.w << "," << sr.h << std::endl;
std::cout << tr.x << "," << tr.y << "," << tr.w << "," << tr.h << std::endl;
std::cout << source.sdlSurface() << std::endl;
std::cout << this->surface << std::endl;
// TR and SR are currently unused. Just for case of testing
if (SDL_BlitSurface(source.sdlSurface(), NULL, this->surface, NULL) != 0) {
return false;
}
return true;
}
bool Surface::fill(Color color, Rectangle area) {
SDL_Rect tr = area.sdlRect();
std::cout << tr.x << "," << tr.y << "," << tr.w << "," << tr.h << std::endl;
std::cout << (int)color.sdlColor() << std::endl;
if ( SDL_FillRect(this->surface, &tr, color.sdlColor()) != 0) {
return false;
}
return true;
}
Rectangle Surface::getSize() {
return Rectangle(0, 0, this->getWidth(), this->getHeight());
}
bool Surface::load(std::string file) {
SDL_Surface *src = IMG_Load(file.c_str());
if (src == NULL) {
std::cout << "cyh" << std::endl;
std::cout << IMG_GetError() << std::endl;
return false;
}
this->surface = SDL_DisplayFormatAlpha(src);
if (this->surface == NULL) {
std::cout << "cyh1" << std::endl;
std::cout << SDL_GetError() << std::endl;
return false;
}
SDL_FreeSurface(src);
src = NULL;
return true;
}
void Surface::set(SDL_Surface *surface) {
this->surface = surface;
}
void Surface::set(Surface surface) {
this->surface = surface.sdlSurface();
}
The problem is probably in the draw function. If I change this line:
if (SDL_BlitSurface(source.sdlSurface(), NULL, this->surface, NULL) != 0) {
to:
if (SDL_BlitSurface(IMG_Load("./data/images/logo.png"), NULL, this->surface, NULL) != 0) {
everything seems to be ok. Could you give me a suggestion where should be the problem?
You're violating The Rule of 3.
You need to define a copy constructor and a copy assignment operator for this to work properly.
When you're passing the Surface in to the draw method, you're making a copy of it. Since you're managing a resource inside the Surface class (the SDL_Surface*), you'll have multiple instances of the class pointing at the same surface, so when the destructors are called, you'd be freeing the same SDL_Surface* more than once.
Edit: I would recommend passing your arguments by const reference. You don't need to make copies of the Surface objects when you want to draw them.
This:
bool Surface::draw(Surface source, Rectangle source_area, Point2D position);
Would become this:
bool Surface::draw(const Surface &source, const Rectangle &source_area, const Point2D &position);
And this can be applied to your other functions too.