intel real sense programming about librealsense2 - c++

I want to get 1280x720 depth image and 1280x720 color image.
So I founded as coded :
// License: Apache 2.0. See LICENSE file in root directory.
// Copyright(c) 2017 Intel Corporation. All Rights Reserved.
#include "librealsense2/rs.hpp" // Include RealSense Cross Platform API
#include "example.hpp" // Include short list of convenience functions for rendering
#include "opencv2/opencv.hpp"
#include <iostream>
#include "stb-master\stb_image_write.h"
using namespace std;
using namespace cv;
// Capture Example demonstrates how to
// capture depth and color video streams and render them to the screen
int main(int argc, char * argv[]) try
{
int width = 1280;
int height = 720;
rs2::log_to_console(RS2_LOG_SEVERITY_ERROR);
// Create a simple OpenGL window for rendering:
window app(width, height, "RealSense Capture Example");
// Declare two textures on the GPU, one for color and one for depth
texture depth_image, color_image;
// Declare depth colorizer for pretty visualization of depth data
rs2::colorizer color_map;
color_map.set_option(RS2_OPTION_HISTOGRAM_EQUALIZATION_ENABLED,1.f);
color_map.set_option(RS2_OPTION_COLOR_SCHEME, 2.f);
// Declare RealSense pipeline, encapsulating the actual device and sensors
rs2::pipeline pipe;
// Start streaming with default recommended configuration
pipe.start();
while (app) // Application still alive?
{
rs2::frameset data = pipe.wait_for_frames(); // Wait for next set of frames from the camera
rs2::frame depth = color_map(data.get_depth_frame()); // Find and colorize the depth data
rs2::frame color = data.get_color_frame(); // Find the color data
// For cameras that don't have RGB sensor, we'll render infrared frames instead of color
if (!color)
color = data.get_infrared_frame();
// Render depth on to the first half of the screen and color on to the second
depth_image.render(depth, { 0, 0, app.width() / 2, app.height() });
color_image.render(color, { app.width() / 2, 0, app.width() / 2, app.height() });
}
return EXIT_SUCCESS;
}
catch (const rs2::error & e)
{
std::cerr << "RealSense error calling " << e.get_failed_function() << "(" << e.get_failed_args() << "):\n " << e.what() << std::endl;
return EXIT_FAILURE;
}
catch (const std::exception& e)
{
std::cerr << e.what() << std::endl;
return EXIT_FAILURE;
}
I want to this..
Push the c <- keyboard value
save the color image and depth image in PNG foramt
I can get the code about 2.
but, i don't know how to call the action when I press the "c"
I guess I have to use this example.hpp.
GLFWwindow * win = glfwCreateWindow(tile_w*cols, tile_h*rows, ss.str().c_str(), 0, 0);
glfwSetWindowUserPointer(win, &dev);
glfwSetKeyCallback(win, [](GLFWwindow * win, int key, int scancode, int action, int mods)
{
auto dev = reinterpret_cast<rs::device *>(glfwGetWindowUserPointer(win));
if (action != GLFW_RELEASE) switch (key)
{
case GLFW_KEY_R: color_rectification_enabled = !color_rectification_enabled; break;
case GLFW_KEY_C: align_color_to_depth = !align_color_to_depth; break;
case GLFW_KEY_D: align_depth_to_color = !align_depth_to_color; break;
case GLFW_KEY_E:
if (dev->supports_option(rs::option::r200_emitter_enabled))
{
int value = !dev->get_option(rs::option::r200_emitter_enabled);
std::cout << "Setting emitter to " << value << std::endl;
dev->set_option(rs::option::r200_emitter_enabled, value);
}
break;
case GLFW_KEY_A:
if (dev->supports_option(rs::option::r200_lr_auto_exposure_enabled))
{
int value = !dev->get_option(rs::option::r200_lr_auto_exposure_enabled);
std::cout << "Setting auto exposure to " << value << std::endl;
dev->set_option(rs::option::r200_lr_auto_exposure_enabled, value);
}
break;
}
});
This code is used in librealsense 1.X version. I would like to change this to librealsense 2.0 version code. But I do not know what to do.
How way do I change this code??
Thanks for reading!

Useful samples to get you on your way with RealSense SDK 2.0 and OpenCV are available in the repo at /wrappers/opencv
Keep in mind that the Supported Devices by SDK 2.0 are:
Intel® RealSense™ Camera D400-Series
Intel® RealSense™ Developer Kit SR300

Related

Copy Freetype Bitmap into Magick::Image at specific offset

In my game engine, I have a texture loading API which wraps low level libraries like OpenGL, DirectX, etc. This API uses Magick++ because I found it to be a convenient cross-platform solution and allows me to create procedural textures fairly easily.
I'm now adding a text rendering system using freetype where I want to use this texture API to dynamically generate a texture atlas for any given font where all the glyphs are stored horizontally adjacent.
I have been able to get this to work in the past by buffering the bitmaps directly into OpenGL. But now I want to accomplish this in a platform independent way, using this API.
I've looked around for a few examples but I can't find anything quite like what I'm after so if there are any magick++ experts around, I'd really appreciate some pointers.
So in simple terms: I've got a freetype bitmap and I want to be able to copy its pixel buffer to a specific offset inside a Magick::Image.
This code might help to clarify:
auto texture = e6::textures->create(e6::texture::specification{}, [name, totalWidth, maxHeight](){
// Initialises Freetype
FT_Face face;
FT_Library ft;
if (FT_Init_FreeType(&ft)) {
std::cout << "ERROR::FREETYPE: Could not init FreeType Library" << std::endl;
}
if (int error = FT_New_Face(ft, path(name.c_str()).c_str(), 0, &face)) {
std::cout << "Failed to initialise fonts: " << name << std::endl;
throw std::exception();
}
// Sets the size of the font
FT_Set_Pixel_Sizes(face, 0, 100);
unsigned int cursor = 0; // Keeps track of the horizontal offset.
// Preallocate an image buffer
// totalWidth and maxHeight is the size of the entire atlas
Magick::Image image(Magick::Geometry(totalWidth, maxHeight), "BLACK");
image.type(Magick::GrayscaleType);
image.magick("BMP");
image.depth(8);
image.modifyImage();
Magick::Pixels view(image);
// Loops through a subset of the ASCII codes
for (uint8_t c = 32; c < 128; c++) {
if (FT_Load_Char(face, c, FT_LOAD_RENDER)) {
std::cout << "Failed to load glyph: " << c << std::endl;
continue;
}
// Just for clarification...
unsigned int width = face->glyph->bitmap.width;
unsigned int height = face->glyph->bitmap.rows;
unsigned char* image_data = face->glyph->bitmap.buffer;
// This is the problem part.
// How can I copy the image_data into `image` at the cursor position?
cursor += width; // Advance the cursor
}
image.write(std::string(TEXTURES) + "font-test.bmp"); // Write to filesystem
// Clean up freetype
FT_Done_Face(face);
FT_Done_FreeType(ft);
return image;
}, "font-" + name);
I tried using a pixel cache which the documentation demonstrates:
Magick::Quantum *pixels = view.get(cursor, 0, width, height);
*pixels = *image_data;
view.sync();
But this leaves me with a completely black image, I think because the image_data goes out of scope.
I was hoping there'd be a way to modify the image data directly but after a lot of trial and error, I ended up just creating an image for each glyph and compositing them together:
...
Magick::Image glyph (Magick::Geometry(), "BLACK");
glyph.type(MagickCore::GrayscaleType);
glyph.magick("BMP");
glyph.depth(8);
glyph.read(width, height, "R", Magick::StorageType::CharPixel, image_data);
image.composite(glyph, cursor, 0);
cursor += width;
At the very least, I hope this helps to prevent someone else going down the same rabbit hole I did.

Intel RealSense Depth Camera D435i noises and mounds

Intel RealSense Depth Camera D435i.
I try to capture an image and save it as stl format.
I use this project provided by Intel to achieve this task.
https://github.com/IntelRealSense/librealsense/releases/download/v2.29.0/Intel.RealSense.SDK.exe
In the solution there is an application named PointCloud.
I modified a little the application to have a clear image.
But even with the basic code, the result is not very satisfying.
I capture a smooth surface but there are many little bumps on result.
I don't know if the problem comes from the SDK or from the camera.
I check the result in MeshLab which is a great 3D tool.
Any idea ?
The result (a smooth table surface) :
Here is my code C++ (I added some filters only but without filters I have the same problem) :
#include <librealsense2/rs.hpp> // Include RealSense Cross Platform API
#include "example.hpp" // Include short list of convenience functions for rendering
#include <algorithm> // std::min, std::max
#include <iostream>
#include <Windows.h>
#include <imgui.h>
#include "imgui_impl_glfw.h"
#include <stdio.h>
#include <windows.h>
#include <conio.h>
#include "tchar.h"
// Helper functions
void register_glfw_callbacks(window& app, glfw_state& app_state);
int main(int argc, char * argv[]) try
{
::ShowWindow(::GetConsoleWindow(), SW_HIDE);
// Create a simple OpenGL window for rendering:
window app(1280, 720, "Capron 3D");
ImGui_ImplGlfw_Init(app, false);
bool capture = false;
HWND hWnd;
hWnd = FindWindow(NULL, _T("Capron 3D"));
ShowWindow(hWnd, SW_MAXIMIZE);
// Construct an object to manage view state
glfw_state app_state;
// register callbacks to allow manipulation of the pointcloud
register_glfw_callbacks(app, app_state);
app_state.yaw = 3.29;
app_state.pitch = 0;
// Declare pointcloud object, for calculating pointclouds and texture mappings
rs2::pointcloud pc;
// We want the points object to be persistent so we can display the last cloud when a frame drops
rs2::points points;
// Declare RealSense pipeline, encapsulating the actual device and sensors
rs2::pipeline pipe;
// Start streaming with default recommended configuration
pipe.start();
rs2::decimation_filter dec_filter;
rs2::spatial_filter spat_filter;
rs2::threshold_filter thres_filter;
rs2::temporal_filter temp_filter;
float w = static_cast<float>(app.width());
float h = static_cast<float>(app.height());
while (app) // Application still alive?
{
static const int flags = ImGuiWindowFlags_NoCollapse
| ImGuiWindowFlags_NoScrollbar
| ImGuiWindowFlags_NoSavedSettings
| ImGuiWindowFlags_NoTitleBar
| ImGuiWindowFlags_NoResize
| ImGuiWindowFlags_NoMove;
ImGui_ImplGlfw_NewFrame(1);
ImGui::SetNextWindowSize({ app.width(), app.height() });
ImGui::Begin("app", nullptr, flags);
// Set options for the ImGui buttons
ImGui::PushStyleColor(ImGuiCol_TextSelectedBg, { 1, 1, 1, 1 });
ImGui::PushStyleColor(ImGuiCol_Button, { 36 / 255.f, 44 / 255.f, 51 / 255.f, 1 });
ImGui::PushStyleColor(ImGuiCol_ButtonHovered, { 40 / 255.f, 170 / 255.f, 90 / 255.f, 1 });
ImGui::PushStyleColor(ImGuiCol_ButtonActive, { 36 / 255.f, 44 / 255.f, 51 / 255.f, 1 });
ImGui::PushStyleVar(ImGuiStyleVar_FrameRounding, 12);
ImGui::SetCursorPos({ 10, 10 });
if (ImGui::Button("Capturer", { 100, 50 }))
{
capture = true;
}
// Wait for the next set of frames from the camera
auto frames = pipe.wait_for_frames();
auto color = frames.get_color_frame();
// For cameras that don't have RGB sensor, we'll map the pointcloud to infrared instead of color
if (!color)
color = frames.get_infrared_frame();
// Tell pointcloud object to map to this color frame
pc.map_to(color);
auto depth = frames.get_depth_frame();
/*spat_filter.set_option(RS2_OPTION_FILTER_SMOOTH_DELTA, 50);
depth = spat_filter.process(depth);*/
spat_filter.set_option(RS2_OPTION_FILTER_SMOOTH_ALPHA, 1);
depth = spat_filter.process(depth);
spat_filter.set_option(RS2_OPTION_HOLES_FILL, 2);
depth = spat_filter.process(depth);
//temp_filter.set_option(RS2_OPTION_FILTER_SMOOTH_ALPHA, 1);
//depth = temp_filter.process(depth);
// Generate the pointcloud and texture mappings
points = pc.calculate(depth);
// Upload the color frame to OpenGL
app_state.tex.upload(color);
thres_filter.set_option(RS2_OPTION_MIN_DISTANCE, 0);
depth = thres_filter.process(depth);
// Draw the pointcloud
draw_pointcloud(int(w) / 2, int(h) / 2, app_state, points);
if (capture)
{
points.export_to_ply("My3DFolder\\new.ply", depth);
return EXIT_SUCCESS;
}
ImGui::PopStyleColor(4);
ImGui::PopStyleVar();
ImGui::End();
ImGui::Render();
}
return EXIT_SUCCESS;
}
catch (const rs2::error & e)
{
std::cerr << "RealSense error calling " << e.get_failed_function() << "(" << e.get_failed_args() << "):\n " << e.what() << std::endl;
MessageBox(0, "Erreur connexion RealSense. Veuillez vérifier votre caméra 3D.", "Capron Podologie", 0);
return EXIT_FAILURE;
}
catch (const std::exception & e)
{
std::cerr << e.what() << std::endl;
return EXIT_FAILURE;
}
I found the answer,
I was using HolesFill filter.
//These lines
spat_filter.set_option(RS2_OPTION_HOLES_FILL, 2);
depth = spat_filter.process(depth);
And the holes fill algorithm is prédictif. It creates points but the coordinates of these points are not exactly correct. The second parameter of the spat_filter.set_option is between 1 and 5. More I increade this parameter, more noised is the result.
If I remove these lines, I have a clearer result.
But this time I have many holes on the result.

How to capture camera's video in real time?

I'm using Usb 3.0 Basler camera acA640-750uc to capture the video and here is the program to use 2 cameras and grab the frame:
the problem is when i runed this program my computer captured the video from 2 cameras but the video is slower than my actual movement about 2 sec.It means
my video is slower than the real time and I want to capture the video in real time.
How can I solve this problem?
I've tried to change the condition of for (size_t i = 0; i < cameras.GetSize(); ++i) from ++i to i++, but it doesn't work.
#include <pylon/PylonIncludes.h>
#ifdef PYLON_WIN_BUILD
#include <pylon/PylonGUI.h>
#endif
// Namespace for using pylon objects.
using namespace Pylon;
// Namespace for using cout.
using namespace std;
// Number of images to be grabbed.
static const uint32_t c_countOfImagesToGrab = 1000;
// Limits the amount of cameras used for grabbing.
// It is important to manage the available bandwidth when grabbing with
// multiple cameras.
// This applies, for instance, if two GigE cameras are connected to the
// same network adapter via a switch.
// To manage the bandwidth, the GevSCPD interpacket delay parameter and
// the GevSCFTD transmission delay
// parameter can be set for each GigE camera device.
// The "Controlling Packet Transmission Timing with the Interpacket and
// Frame Transmission Delays on Basler GigE Vision Cameras"
// Application Notes (AW000649xx000)
// provide more information about this topic.
// The bandwidth used by a FireWire camera device can be limited by
// adjusting the packet size.
static const size_t c_maxCamerasToUse = 2;
int main(int argc, char* argv[])
{
// The exit code of the sample application.
int exitCode = 0;
// Before using any pylon methods, the pylon runtime must be initialized.
PylonInitialize();
try
{
// Get the transport layer factory.
CTlFactory& tlFactory = CTlFactory::GetInstance();
// Get all attached devices and exit application if no device is found.
DeviceInfoList_t devices;
if (tlFactory.EnumerateDevices(devices) == 0)
{
throw RUNTIME_EXCEPTION("No camera present.");
}
// Create an array of instant cameras for the found devices and avoid
// exceeding a maximum number of devices.
CInstantCameraArray cameras(min(devices.size(), c_maxCamerasToUse));
// Create and attach all Pylon Devices.
for (size_t i = 0; i < cameras.GetSize(); ++i)
{
cameras[i].Attach(tlFactory.CreateDevice(devices[i]));
// Print the model name of the camera.
cout << "Using device " << cameras[i].GetDeviceInfo().GetModelName() <<
endl;
}
// Starts grabbing for all cameras starting with index 0. The grabbing
// is started for one camera after the other. That's why the images of
// all
// cameras are not taken at the same time.
// However, a hardware trigger setup can be used to cause all cameras to
// grab images synchronously.
// According to their default configuration, the cameras are
// set up for free-running continuous acquisition.
cameras.StartGrabbing();
// This smart pointer will receive the grab result data.
CGrabResultPtr ptrGrabResult;
// Grab c_countOfImagesToGrab from the cameras.
for (uint32_t i = 0; i < c_countOfImagesToGrab && cameras.IsGrabbing();
++i)
{
cameras.RetrieveResult(5000, ptrGrabResult,
TimeoutHandling_ThrowException);
// When the cameras in the array are created the camera context value
// is set to the index of the camera in the array.
// The camera context is a user settable value.
// This value is attached to each grab result and can be used
// to determine the camera that produced the grab result.
intptr_t cameraContextValue = ptrGrabResult->GetCameraContext();
#ifdef PYLON_WIN_BUILD
// Show the image acquired by each camera in the window related to each
// camera.
Pylon::DisplayImage(cameraContextValue, ptrGrabResult);
#endif
// Print the index and the model name of the camera.
cout << "Camera " << cameraContextValue << ": " <<
cameras[cameraContextValue].GetDeviceInfo().GetModelName() << endl;
// Now, the image data can be processed.
cout << "GrabSucceeded: " << ptrGrabResult->GrabSucceeded() << endl;
cout << "SizeX: " << ptrGrabResult->GetWidth() << endl;
cout << "SizeY: " << ptrGrabResult->GetHeight() << endl;
const uint8_t* pImageBuffer = (uint8_t*)ptrGrabResult->GetBuffer();
cout << "Gray value of first pixel: " << (uint32_t)pImageBuffer[0] <<
endl <<
endl;
}
}
catch (const GenericException& e)
{
// Error handling
cerr << "An exception occurred." << endl
<< e.GetDescription() << endl;
exitCode = 1;
}
// Comment the following two lines to disable waiting on exit.
cerr << endl << "Press Enter to exit." << endl;
while (cin.get() != '\n');
// Releases all pylon resources.
PylonTerminate();
return exitCode;
}
I am not experienced in this field but changing ++i to i++ obviously does not solve your problem as they are equivalent in this for definition (for (size_t i = 0; i < cameras.GetSize(); ++i)).
I am not sure but accourding to comments in code you may need to configure cameras manually (cameras may configured differently):
// According to their ***default configuration***, the cameras are
// set up for free-running continuous acquisition.
cameras.StartGrabbing();
Also, please read these comments from the code carefully and see if you correctly configure your network and parameters. I suggest you try with one camera first:
// Limits the amount of cameras used for grabbing.
// It is important to manage the available bandwidth when grabbing with
// multiple cameras.
// This applies, for instance, if two GigE cameras are connected to the
// same network adapter via a switch.
// To manage the bandwidth, the GevSCPD interpacket delay parameter and
// the GevSCFTD transmission delay
// parameter can be set for each GigE camera device.
// The "Controlling Packet Transmission Timing with the Interpacket and
// Frame Transmission Delays on Basler GigE Vision Cameras"
// Application Notes (AW000649xx000)
// provide more information about this topic.
// The bandwidth used by a FireWire camera device can be limited by
// adjusting the packet size.

Sequential off-screen rendering / screen capture without windowing system using OpenSceneGraph

I am working currently on an off-screen renderer so that I can do Mutual Information Registration for real-world scenes. I use OpenSceneGraph to cope with the large data and automatic loading. I am having trouble getting a framebuffer capture within a sequential, single-threaded program.
Well, I have this class (header):
#include <osg/ref_ptr>
#include <osg/Array>
#include <osg/ImageUtils>
#include <osgGA/StateSetManipulator>
#include <osgViewer/Viewer>
#include <osg/GraphicsContext>
#include <osg/Texture2D>
#include <osg/FrameBufferObject>
#include <osgDB/WriteFile>
#include <osg/Referenced>
#include <osg/Vec3>
#include <osg/Image>
#include <osg/State>
#include <string>
#include <chrono>
#include <thread>
#include <assert.h>
#include "ImagingPrimitives.h"
class BoundRenderScene {
public:
BoundRenderScene();
virtual ~BoundRenderScene();
void NextFrame(void);
inline OpenThreads::Mutex* GetMutexObject(void) { return &_mutex; }
inline osg::Image* GetFrame(void)
{
OpenThreads::ScopedLock<OpenThreads::Mutex> lock(_mutex);
return _frame.get();
}
inline void GetFrame(osg::Image* img)
{
OpenThreads::ScopedLock<OpenThreads::Mutex> lock(_mutex);
if(_frame.valid() && (img!=NULL) && img->valid())
{
glReadBuffer(GL_BACK);
img->readPixels(0,0,_camera_configuration->GetSX(),_camera_configuration->GetSY(), GL_RGB,GL_UNSIGNED_BYTE);
uint w = img->s(), h = img->t(), d = img->r(), c = uint(img->getPixelSizeInBits()/8);
/*
* bare testing write op
* osgDB::writeImageFile(const_cast<const osg::Image&>(*img), "/tmp/testimg.png");
*/
}
}
inline void SetCameraConfiguration(CameraConfiguration* configuration) { _camera_configuration = configuration; }
inline void SetCameraMatrix(osg::Matrixd camera_matrix) { _camera_matrix = camera_matrix; }
inline void SetScene(osg::Node* scene) { _scene = scene; }
inline void Initialize(void) {
if(!_initialized)
_init();
else
_re_init();
}
protected:
osgViewer::Viewer _viewer;
osg::Matrixd _camera_matrix;
osg::ref_ptr<osg::Texture2D> _tex;
osg::ref_ptr<osg::FrameBufferObject> _fbo;
mutable osg::ref_ptr<osg::Image> _frame;
osg::ref_ptr<osg::Node> _scene;
osg::ref_ptr<osg::GraphicsContext::Traits> _traits;
osg::ref_ptr<osg::GraphicsContext> _gc;
CameraConfiguration* _camera_configuration;
SnapshotCallback* cb;
std::string _filepath;
private:
void _init(void);
void _re_init(void);
bool _initialized;
mutable OpenThreads::Mutex _mutex;
osg::Matrixd pre_transform;
osg::Matrixd transformation;
};
Also, because many examples within offscreen-rendering and for screen capture work with Post/FinalDrawCallaback's, I copied the callback structure from the "osgdistortion" example, but added the mutex for synchronisation:
struct SnapshotCallback : public osg::Camera::DrawCallback
{
public:
inline SnapshotCallback(OpenThreads::Mutex* mtx_obj, std::string filepath, int width, int height) : _filepath(filepath), _output_to_file(false), _mutex(mtx_obj)
{
_image = new osg::Image();
_image->allocateImage(width, height, 1, GL_RGB, GL_UNSIGNED_BYTE);
if(filepath!="")
_output_to_file = true;
}
inline virtual void operator() (osg::RenderInfo& renderInfo) const
{
OpenThreads::ScopedLock<OpenThreads::Mutex> lock(*_mutex);
osg::Camera* camera = renderInfo.getCurrentCamera();
osg::Viewport* viewport = camera ? camera->getViewport() : 0;
if(viewport && _image.valid())
{
glReadBuffer(GL_BACK);
_image->readPixels(int(viewport->x()),int(viewport->y()),int(viewport->width()),int(viewport->height()), GL_RGB, GL_UNSIGNED_BYTE);
if(_output_to_file)
{
osgDB::writeImageFile(*_image, _filepath);
}
}
}
inline virtual void operator() (const osg::Camera& camera) const
{
OpenThreads::ScopedLock<OpenThreads::Mutex> lock(*_mutex);
osg::Viewport* viewport = camera.getViewport();
if(viewport && _image.valid())
{
glReadBuffer(GL_BACK);
_image->readPixels(int(viewport->x()),int(viewport->y()),int(viewport->width()),int(viewport->height()), GL_RGB, GL_UNSIGNED_BYTE);
if(_output_to_file)
{
osgDB::writeImageFile(*_image, _filepath);
}
}
}
std::string _filepath;
bool _output_to_file;
mutable OpenThreads::Mutex* _mutex;
mutable osg::ref_ptr<osg::Image> _image;
};
I initialize and render the scene as follows:
#include "BoundRenderScene.h"
void BoundRenderScene::_init(void)
{
if(_camera!=NULL)
_viewer.setDone(true);
_traits->x = 0;
_traits->y = 0;
_traits->width = _camera_configuration->GetSX();
_traits->height = _camera_configuration->GetSY();
_traits->red = 8;
_traits->green = 8;
_traits->blue = 8;
_traits->alpha = 0;
_traits->depth = 24;
_traits->windowDecoration = false;
_traits->pbuffer = true;
_traits->doubleBuffer = true;
_traits->sharedContext = 0x0;
if(_gc.get()!=NULL)
{
bool release_success = _gc->releaseContext();
if(!release_success)
std::cerr << "Error releasing Graphics Context.";
}
_gc = osg::GraphicsContext::createGraphicsContext(_traits.get());
_viewer.getCamera()->setGraphicsContext(_gc.get());
_viewer.setThreadingModel(osgViewer::Viewer::SingleThreaded);
_viewer.setUpThreading();
_viewer.realize();
_frame->allocateImage(_camera_configuration->GetSX(), _camera_configuration->GetSY(), 1, GL_RGB, GL_UNSIGNED_BYTE);
_viewer.getCamera()->getOrCreateStateSet();
_viewer.getCamera()->setRenderTargetImplementation(osg::Camera::PIXEL_BUFFER);
cb = new SnapshotCallback(&_mutex,_filepath, _camera_configuration->GetSX(), _camera_configuration->GetSY());
//_viewer.getCamera()->setPostDrawCallback( cb );
//Clear colour "black" for representing "no information" => background elimination in natural image, pls.
_viewer.getCamera()->setClearColor(osg::Vec4f(0.25f, 0.25f, 0.25f, 1.0f));
_viewer.getCamera()->setClearMask(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT);
_viewer.getCamera()->setDrawBuffer(GL_BACK);
_viewer.getCamera()->setReadBuffer(GL_BACK);
_viewer.getCamera()->setViewport(0,0,_camera_configuration->GetSX(),_camera_configuration->GetSY());
_viewer.getCamera()->setProjectionMatrix(osg::Matrixd::perspective(osg::RadiansToDegrees(_camera_configuration->GetFoV()), _camera_configuration->GetAspectRatio(), 0.1, 150.0));
//looking in geo-coord system
_viewer.getCamera()->setViewMatrix(osg::Matrixd::lookAt(osg::Vec3d(0.0, 0.0, -1.0), osg::Vec3d(0.0, 0.0, 1.0), osg::Vec3d(0.0, 1.0, 0.0)));
_viewer.getCamera()->attach(osg::Camera::COLOR_BUFFER, _frame.get());
_viewer.getCamera()->setRenderTargetImplementation(osg::Camera::FRAME_BUFFER_OBJECT);
_tex->setTextureSize(_camera_configuration->GetSX(), _camera_configuration->GetSY());
_tex->setInternalFormat(GL_RGB);
_tex->setFilter(osg::Texture::MIN_FILTER, osg::Texture::LINEAR);
_tex->setFilter(osg::Texture::MAG_FILTER, osg::Texture::LINEAR);
_tex->setWrap(osg::Texture::WRAP_S, osg::Texture::CLAMP_TO_EDGE);
_tex->setWrap(osg::Texture::WRAP_T, osg::Texture::CLAMP_TO_EDGE);
_tex->setResizeNonPowerOfTwoHint(false);
_tex->setImage(0,_frame.get());
_fbo->setAttachment(osg::Camera::COLOR_BUFFER, osg::FrameBufferAttachment(_tex.get()));
_viewer.setDone(false);
_viewer.setSceneData(_scene.get());
_viewer.setCameraManipulator(0x0);
}
void BoundRenderScene::NextFrame(void)
{
OpenThreads::ScopedLock<OpenThreads::Mutex> lock(_mutex);
if(_frame.valid() && !_viewer.done())
{
osg::Matrixd inverse_cam = osg::Matrixd::inverse(_camera_matrix);
transformation = inverse_cam * pre_transform;
_viewer.getCamera()->setViewMatrix(transformation);
_viewer.updateTraversal();
_viewer.frame();
}
else
std::cout << "Viewer or Camera invalid." << std::endl;
}
The main workflow looks like this (simplified):
BoundRenderScene renderer;
std::vector<osg::Matrixd> poses;
/*
* setting initial parameters
* fill poses with camera positions to render, for regsitration
*/
renderer._init();
for(uint i = 0; i < poses.size(); i++)
{
renderer.SetCameraMatrix(poses.at(i));
renderer.NextImage();
sleep(0.04); // to get the 25fps frame limit
osg::Image* reg_image = renderer.GetImage();
/*
* Do further processing
*/
}
Now comes the crux: the OpenSceneGraph example "osgprenderer" (included in OSG) does off-screen rendering using an osg::Camera::DrawCallback, as my SnapshotCallback. Unfortunately, the operator()-function in my case never get's called in my scenegraph, so that way of screen capture doesn't work for me. It's also rather inconvenient as the rest of the Mutual Information procedure is a rather sequential pipeline.
Other wrappers (https://github.com/xarray/osgRecipes/blob/master/integrations/osgberkelium/osgberkelium.cpp) use methods similar to my "void GetFrame(osg::Image* img)" method, where the image is actively read using "readPixels". That is very convenient for my workflow, but the method always returns a blank image. It doesn't crash, but it doesn't do it's job either.
The method that does work is "osg: and :Image* GetFrame(void)", which returns the bound/attached FBO image. It is similar to the "osgdistortion" example. It does work for rendering one- to two images, but after some time, rendering and processing get out of sync and the application crashes as follows:
[---FIRST FRAME---]
GraphicsCostEstimator::calibrate(..)
cull_draw() 0x1998ca0
ShaderComposer::~ShaderComposer() 0x35a4d40
Renderer::compile()
OpenGL extension 'GL_ARB_vertex_buffer_object' is supported.
OpenGL extension 'GL_EXT_secondary_color' is supported.
OpenGL extension 'GL_EXT_fog_coord' is supported.
OpenGL extension '' is not supported.
OpenGL extension 'GL_EXT_packed_depth_stencil' is supported.
Setting up osg::Camera::FRAME_BUFFER_OBJECT
end cull_draw() 0x1998ca0
[processing]
[ SECOND FRAME ]
cull_draw() 0x1998ca0
OpenGL extension 'GL_ARB_fragment_program' is supported.
OpenGL extension 'GL_ARB_vertex_program' is supported.
OpenGL extension 'GL_ARB_shader_objects' is supported.
OpenGL extension 'GL_ARB_vertex_shader' is supported.
OpenGL extension 'GL_ARB_fragment_shader' is supported.
OpenGL extension 'GL_ARB_shading_language_100' is supported.
OpenGL extension 'GL_EXT_geometry_shader4' is supported.
OpenGL extension 'GL_EXT_gpu_shader4' is supported.
OpenGL extension 'GL_ARB_tessellation_shader' is supported.
OpenGL extension 'GL_ARB_uniform_buffer_object' is supported.
OpenGL extension 'GL_ARB_get_program_binary' is supported.
OpenGL extension 'GL_ARB_gpu_shader_fp64' is supported.
OpenGL extension 'GL_ARB_shader_atomic_counters' is supported.
glVersion=4.5, isGlslSupported=YES, glslLanguageVersion=4.5
Warning: detected OpenGL error 'invalid operation' at end of SceneView::draw()
end cull_draw() 0x1998ca0
[-FROM 3rd FRAME ONWARDS-]
[workload, matrix setup]
[_viewer.frame()]
cull_draw() 0x1998ca0
Warning: detected OpenGL error 'invalid operation' at start of State::apply()
end cull_draw() 0x1998ca0
[next frame]
[BREAKING]
cull_draw() 0x1998ca0
Warning: detected OpenGL error 'invalid operation' at start of State::apply()
end cull_draw() 0x1998ca0
[more work]
Segmentation fault (core dumped)
So, the question is:
I had a look into the source files from osg for the Viewer-related classes, but I was not able to determine where the error
Warning: detected OpenGL error 'invalid operation' at start of State::apply()
comes from. Any idea where to start looking for it ?
For sequential rendering and screen capture, which method is the best to use within OSG ?
How can I obtain the mutex of the normal osg::Viewer, so to sync the renderer with the rest of py pipeline ? (Renderer is single-threaded)
Any other suggestions from experiences OpenSceneGraph off-screen
renderers and screen captures ?
As deeper research turned out, releasing the graphics context in the class destructor freed the OpenGL pipeline, BUT: it also disallocated stateset-bound textures of the loaded scene/model, although the model itself was not suspended (as given in the question: it is re-used in the following passes). So, in further render passes, the render pipeline wanted to access OSG assets which have been released via releasing the GL context.
in code it changed from:
BoundRenderScene::~BoundRenderScene() {
// TODO Auto-generated destructor stub
_viewer.setDone(true);
_viewer.setReleaseContextAtEndOfFrameHint(true);
_gc->releaseContext();
#ifdef DEBUG
std::cout << "BoundRenderScene deleted." << std::endl;
#endif
}
to:
BoundRenderScene::~BoundRenderScene() {
// TODO Auto-generated destructor stub
_viewer.setDone(true);
_viewer.setReleaseContextAtEndOfFrameHint(true);
#ifdef DEBUG
std::cout << "BoundRenderScene deleted." << std::endl;
#endif
}
This resolved the OpenSceneGraph-internal error messages. Now, in order to solve the frame capture problem itself, I implemented the callback from osgprenderer:
struct SnapshotCallback : public osg::Camera::DrawCallback
{
public:
inline SnapshotCallback(std::string filepath) : _filepath(filepath), _output_to_file(false), _image(NULL)
{
if(filepath!="")
_output_to_file = true;
_image = new osg::Image();
}
inline virtual void operator() (osg::RenderInfo& renderInfo) const
{
osg::Camera* camera = renderInfo.getCurrentCamera();
osg::Viewport* viewport = camera ? camera->getViewport() : 0;
if(viewport)
{
glReadBuffer(camera->getDrawBuffer());
_image->allocateImage(int(viewport->width()), int(viewport->height()), 1, GL_RGB, GL_UNSIGNED_BYTE);
_image->readPixels(int(viewport->x()),int(viewport->y()),int(viewport->width()),int(viewport->height()), GL_RGB, GL_UNSIGNED_BYTE);
if(_output_to_file)
{
osgDB::writeImageFile(*reinterpret_cast<osg::Image*>(_image->clone(osg::CopyOp::DEEP_COPY_ALL)), _filepath);
}
}
}
inline virtual void operator() (const osg::Camera& camera) const
{
osg::Viewport* viewport = camera.getViewport();
if(viewport)
{
glReadBuffer(camera.getDrawBuffer());
_image->allocateImage(int(viewport->width()), int(viewport->height()), 1, GL_RGB, GL_UNSIGNED_BYTE);
_image->readPixels(int(viewport->x()),int(viewport->y()),int(viewport->width()),int(viewport->height()), GL_RGB, GL_UNSIGNED_BYTE);
if(_output_to_file)
{
osgDB::writeImageFile(*reinterpret_cast<osg::Image*>(_image->clone(osg::CopyOp::DEEP_COPY_ALL)), _filepath);
}
}
}
inline osg::Image* GetImage(void)
{
return reinterpret_cast<osg::Image*>(_image->clone(osg::CopyOp::DEEP_COPY_ALL));
}
protected:
std::string _filepath;
bool _output_to_file;
mutable osg::ref_ptr<osg::Image> _image;
};
Now, with the cloned buffer instead of the actual image buffer (idea taken over from osgscreencapture example), I do get the real image without memory errors.
For double-buffered rendering, I though have to somehow render the scene twice for the right buffer to contain the objects' images, but this is for my use case currently less of an issue (I/O-bound rendering, not operation-bound).
so, the main function looks like follows:
BoundRenderScene renderer;
std::vector<osg::Matrixd> poses;
/*
* setting initial parameters
* fill poses with camera positions to render, for registration
*/
renderer._init();
for(uint i = 0; i < poses.size(); i++)
{
renderer.SetCameraMatrix(poses.at(i));
renderer.NextImage();
renderer.NextImage();
osg::Image* reg_image = renderer.GetImage();
/*
* Do further processing
*/
}

SIGSEGV error while using c++ on linux with openGL and SDL

Myself and a few other guys are taking a crack at building a simple side scroller type game. However, I can not get a hold of them to help answer my question so I put it to you, the following code leaves me with a SIGSEGV error in the notated place... if anyone can tell me why, I would really appreciate it. If you need anymore info I will be watching this closely.
Main.cpp
Vector2 dudeDim(60,60);
Vector2 dudePos(300, 300);
Entity *test = new Entity("img/images.jpg", dudeDim, dudePos, false);
leads to:
Entity.cpp
Entity::Entity(std::string filename, Vector2 size, Vector2 position, bool passable):
mTexture(filename)
{
mTexture.load(false);
mDimension2D = size;
mPosition2D = position;
mPassable = passable;
}
leads to:
Textures.cpp
void Texture::load(bool generateMipmaps)
{
FREE_IMAGE_FORMAT imgFormat = FIF_UNKNOWN;
FIBITMAP *dib(0);
imgFormat = FreeImage_GetFileType(mFilename.c_str(), 0);
//std::cout << "File format: " << imgFormat << std::endl;
if (FreeImage_FIFSupportsReading(imgFormat)) // Check if the plugin has reading capabilities and load the file
dib = FreeImage_Load(imgFormat, mFilename.c_str());
if (!dib)
std::cout << "Error loading texture files!" << std::endl;
BYTE* bDataPointer = FreeImage_GetBits(dib); // Retrieve the image data
mWidth = FreeImage_GetWidth(dib); // Get the image width and height
mHeight = FreeImage_GetHeight(dib);
mBitsPerPixel = FreeImage_GetBPP(dib);
if (!bDataPointer || !mWidth || !mHeight)
std::cout << "Error loading texture files!" << std::endl;
// Generate and bind ID for this texture
vvvvvvvvvv!!!ERROR HERE!!!vvvvvvvvvvv
glGenTextures(1, &mId);
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
glBindTexture(GL_TEXTURE_2D, mId);
int format = mBitsPerPixel == 24 ? GL_BGR_EXT : mBitsPerPixel == 8 ? GL_LUMINANCE : 0;
int iInternalFormat = mBitsPerPixel == 24 ? GL_RGB : GL_DEPTH_COMPONENT;
if(generateMipmaps)
glTexParameteri(GL_TEXTURE_2D, GL_GENERATE_MIPMAP, GL_TRUE);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, mWidth, mHeight, 0, format, GL_UNSIGNED_BYTE, bDataPointer);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_LINEAR); // Linear Filtering
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER,GL_LINEAR); // Linear Filtering
//std::cout << "texture generated " << mId << std::endl;
FreeImage_Unload(dib);
}
after reading Peter's suggestion I have changed my main.cpp file to:
#include <iostream>
#include <vector>
#include "Game.h"
using namespace std;
int main(int argc, char** argv)
{
Game theGame;
/* Initialize game control objects and resources */
if (theGame.onInit() != false)
{
return theGame.onExecute();
}
else
{
return -1;
}
}
and it would seem the SIGSEGV error is gone and I'm now left with something not initializing. So thank you peter you were correct now I'm off to solve this issue.
ok so this is obviously a small amount of the code but in order to save time and a bit of sanity: all the code is available at:
GitHub Repo
So after looking at your code I can say that it's probably that you have not initialized you OpenGL context before executing that code.
You need to call your Game::onInit() which also calls RenderEngine::initGraphics() before making any calls to OpenGL. Which you currently don't do. You currently do main()->Game ctor (calls rendering engine ctor but that ctor doesn't init SDL and OpenGL)->Entity ctor->load texture
For details look at the OpenGL Wiki FAQ