Intel RealSense Depth Camera D435i.
I try to capture an image and save it as stl format.
I use this project provided by Intel to achieve this task.
https://github.com/IntelRealSense/librealsense/releases/download/v2.29.0/Intel.RealSense.SDK.exe
In the solution there is an application named PointCloud.
I modified a little the application to have a clear image.
But even with the basic code, the result is not very satisfying.
I capture a smooth surface but there are many little bumps on result.
I don't know if the problem comes from the SDK or from the camera.
I check the result in MeshLab which is a great 3D tool.
Any idea ?
The result (a smooth table surface) :
Here is my code C++ (I added some filters only but without filters I have the same problem) :
#include <librealsense2/rs.hpp> // Include RealSense Cross Platform API
#include "example.hpp" // Include short list of convenience functions for rendering
#include <algorithm> // std::min, std::max
#include <iostream>
#include <Windows.h>
#include <imgui.h>
#include "imgui_impl_glfw.h"
#include <stdio.h>
#include <windows.h>
#include <conio.h>
#include "tchar.h"
// Helper functions
void register_glfw_callbacks(window& app, glfw_state& app_state);
int main(int argc, char * argv[]) try
{
::ShowWindow(::GetConsoleWindow(), SW_HIDE);
// Create a simple OpenGL window for rendering:
window app(1280, 720, "Capron 3D");
ImGui_ImplGlfw_Init(app, false);
bool capture = false;
HWND hWnd;
hWnd = FindWindow(NULL, _T("Capron 3D"));
ShowWindow(hWnd, SW_MAXIMIZE);
// Construct an object to manage view state
glfw_state app_state;
// register callbacks to allow manipulation of the pointcloud
register_glfw_callbacks(app, app_state);
app_state.yaw = 3.29;
app_state.pitch = 0;
// Declare pointcloud object, for calculating pointclouds and texture mappings
rs2::pointcloud pc;
// We want the points object to be persistent so we can display the last cloud when a frame drops
rs2::points points;
// Declare RealSense pipeline, encapsulating the actual device and sensors
rs2::pipeline pipe;
// Start streaming with default recommended configuration
pipe.start();
rs2::decimation_filter dec_filter;
rs2::spatial_filter spat_filter;
rs2::threshold_filter thres_filter;
rs2::temporal_filter temp_filter;
float w = static_cast<float>(app.width());
float h = static_cast<float>(app.height());
while (app) // Application still alive?
{
static const int flags = ImGuiWindowFlags_NoCollapse
| ImGuiWindowFlags_NoScrollbar
| ImGuiWindowFlags_NoSavedSettings
| ImGuiWindowFlags_NoTitleBar
| ImGuiWindowFlags_NoResize
| ImGuiWindowFlags_NoMove;
ImGui_ImplGlfw_NewFrame(1);
ImGui::SetNextWindowSize({ app.width(), app.height() });
ImGui::Begin("app", nullptr, flags);
// Set options for the ImGui buttons
ImGui::PushStyleColor(ImGuiCol_TextSelectedBg, { 1, 1, 1, 1 });
ImGui::PushStyleColor(ImGuiCol_Button, { 36 / 255.f, 44 / 255.f, 51 / 255.f, 1 });
ImGui::PushStyleColor(ImGuiCol_ButtonHovered, { 40 / 255.f, 170 / 255.f, 90 / 255.f, 1 });
ImGui::PushStyleColor(ImGuiCol_ButtonActive, { 36 / 255.f, 44 / 255.f, 51 / 255.f, 1 });
ImGui::PushStyleVar(ImGuiStyleVar_FrameRounding, 12);
ImGui::SetCursorPos({ 10, 10 });
if (ImGui::Button("Capturer", { 100, 50 }))
{
capture = true;
}
// Wait for the next set of frames from the camera
auto frames = pipe.wait_for_frames();
auto color = frames.get_color_frame();
// For cameras that don't have RGB sensor, we'll map the pointcloud to infrared instead of color
if (!color)
color = frames.get_infrared_frame();
// Tell pointcloud object to map to this color frame
pc.map_to(color);
auto depth = frames.get_depth_frame();
/*spat_filter.set_option(RS2_OPTION_FILTER_SMOOTH_DELTA, 50);
depth = spat_filter.process(depth);*/
spat_filter.set_option(RS2_OPTION_FILTER_SMOOTH_ALPHA, 1);
depth = spat_filter.process(depth);
spat_filter.set_option(RS2_OPTION_HOLES_FILL, 2);
depth = spat_filter.process(depth);
//temp_filter.set_option(RS2_OPTION_FILTER_SMOOTH_ALPHA, 1);
//depth = temp_filter.process(depth);
// Generate the pointcloud and texture mappings
points = pc.calculate(depth);
// Upload the color frame to OpenGL
app_state.tex.upload(color);
thres_filter.set_option(RS2_OPTION_MIN_DISTANCE, 0);
depth = thres_filter.process(depth);
// Draw the pointcloud
draw_pointcloud(int(w) / 2, int(h) / 2, app_state, points);
if (capture)
{
points.export_to_ply("My3DFolder\\new.ply", depth);
return EXIT_SUCCESS;
}
ImGui::PopStyleColor(4);
ImGui::PopStyleVar();
ImGui::End();
ImGui::Render();
}
return EXIT_SUCCESS;
}
catch (const rs2::error & e)
{
std::cerr << "RealSense error calling " << e.get_failed_function() << "(" << e.get_failed_args() << "):\n " << e.what() << std::endl;
MessageBox(0, "Erreur connexion RealSense. Veuillez vérifier votre caméra 3D.", "Capron Podologie", 0);
return EXIT_FAILURE;
}
catch (const std::exception & e)
{
std::cerr << e.what() << std::endl;
return EXIT_FAILURE;
}
I found the answer,
I was using HolesFill filter.
//These lines
spat_filter.set_option(RS2_OPTION_HOLES_FILL, 2);
depth = spat_filter.process(depth);
And the holes fill algorithm is prédictif. It creates points but the coordinates of these points are not exactly correct. The second parameter of the spat_filter.set_option is between 1 and 5. More I increade this parameter, more noised is the result.
If I remove these lines, I have a clearer result.
But this time I have many holes on the result.
Related
I run the point cloud visualizer simpleVis from the code given in the official docu (see
here.)
Below you find a shortened version of the code.
#include <iostream>
#include <thread>
#include <pcl/common/common_headers.h>
#include <pcl/features/normal_3d.h>
#include <pcl/io/pcd_io.h>
#include <pcl/visualization/pcl_visualizer.h>
#include <pcl/console/parse.h>
using namespace std::chrono_literals;
pcl::visualization::PCLVisualizer::Ptr simpleVis (pcl::PointCloud<pcl::PointXYZ>::ConstPtr cloud)
{
// --------------------------------------------
// -----Open 3D viewer and add point cloud-----
// --------------------------------------------
pcl::visualization::PCLVisualizer::Ptr viewer (new pcl::visualization::PCLVisualizer ("3D Viewer"));
viewer->setBackgroundColor (0, 0, 0);
viewer->addPointCloud<pcl::PointXYZ> (cloud, "sample cloud");
viewer->setPointCloudRenderingProperties (pcl::visualization::PCL_VISUALIZER_POINT_SIZE, 1, "sample cloud");
viewer->addCoordinateSystem (1.0);
viewer->initCameraParameters ();
return (viewer);
}
// --------------
// -----Main-----
// --------------
int
main (int argc, char** argv)
{
// ------------------------------------
// -----Create example point cloud-----
// ------------------------------------
pcl::PointCloud<pcl::PointXYZ>::Ptr basic_cloud_ptr (new pcl::PointCloud<pcl::PointXYZ>);
pcl::PointCloud<pcl::PointXYZRGB>::Ptr point_cloud_ptr (new pcl::PointCloud<pcl::PointXYZRGB>);
std::cout << "Generating example point clouds.\n\n";
// We're going to make an ellipse extruded along the z-axis. The colour for
// the XYZRGB cloud will gradually go from red to green to blue.
std::uint8_t r(255), g(15), b(15);
for (float z(-1.0); z <= 1.0; z += 0.05)
{
for (float angle(0.0); angle <= 360.0; angle += 5.0)
{
pcl::PointXYZ basic_point;
basic_point.x = 0.5 * std::cos (pcl::deg2rad(angle));
basic_point.y = sinf (pcl::deg2rad(angle));
basic_point.z = z;
basic_cloud_ptr->points.push_back(basic_point);
pcl::PointXYZRGB point;
point.x = basic_point.x;
point.y = basic_point.y;
point.z = basic_point.z;
std::uint32_t rgb = (static_cast<std::uint32_t>(r) << 16 |
static_cast<std::uint32_t>(g) << 8 | static_cast<std::uint32_t>(b));
point.rgb = *reinterpret_cast<float*>(&rgb);
point_cloud_ptr->points.push_back (point);
}
if (z < 0.0)
{
r -= 12;
g += 12;
}
else
{
g -= 12;
b += 12;
}
}
basic_cloud_ptr->width = basic_cloud_ptr->size ();
basic_cloud_ptr->height = 1;
point_cloud_ptr->width = point_cloud_ptr->size ();
point_cloud_ptr->height = 1;
pcl::visualization::PCLVisualizer::Ptr viewer;
viewer = simpleVis(basic_cloud_ptr);
while (!viewer->wasStopped ())
{
viewer->resetCamera();
viewer->spinOnce (100);
std::this_thread::sleep_for(100ms);
}
}
Consequently, a window pops up which should visualize the point cloud.
However, I only see green, red and black areas as in the image below.
Then I press r and the following appears:
As far as I understand it, the appearing object is the point cloud.
Now, if I try to move it around. The point cloud "smears" as shown the image below.
I figured the following out:
If I run the simpler cloud viewer as described
here.
The visualisation works properly.
In the last few months, I tried to run a Python library for point cloud visualization that also uses
PCL under the hood. The problem also appears there.
Do you have any idea what the problem is?
This is a deep zoom-in of the meeting of the axes at (0,0,0).
To see more of the point cloud, you need to zoom-out (roll the mouse wheel).
Zoom-out may be limited depending on the definition of focus and the point of view.
Pressing the 'f' key when the mouse cursor hovers over the center will usually take you far enough away to zoom out.
Once you have a reasonable view, you can press 'c' to get (in the stdout) the camera parameters and set them in the code.
I want to get 1280x720 depth image and 1280x720 color image.
So I founded as coded :
// License: Apache 2.0. See LICENSE file in root directory.
// Copyright(c) 2017 Intel Corporation. All Rights Reserved.
#include "librealsense2/rs.hpp" // Include RealSense Cross Platform API
#include "example.hpp" // Include short list of convenience functions for rendering
#include "opencv2/opencv.hpp"
#include <iostream>
#include "stb-master\stb_image_write.h"
using namespace std;
using namespace cv;
// Capture Example demonstrates how to
// capture depth and color video streams and render them to the screen
int main(int argc, char * argv[]) try
{
int width = 1280;
int height = 720;
rs2::log_to_console(RS2_LOG_SEVERITY_ERROR);
// Create a simple OpenGL window for rendering:
window app(width, height, "RealSense Capture Example");
// Declare two textures on the GPU, one for color and one for depth
texture depth_image, color_image;
// Declare depth colorizer for pretty visualization of depth data
rs2::colorizer color_map;
color_map.set_option(RS2_OPTION_HISTOGRAM_EQUALIZATION_ENABLED,1.f);
color_map.set_option(RS2_OPTION_COLOR_SCHEME, 2.f);
// Declare RealSense pipeline, encapsulating the actual device and sensors
rs2::pipeline pipe;
// Start streaming with default recommended configuration
pipe.start();
while (app) // Application still alive?
{
rs2::frameset data = pipe.wait_for_frames(); // Wait for next set of frames from the camera
rs2::frame depth = color_map(data.get_depth_frame()); // Find and colorize the depth data
rs2::frame color = data.get_color_frame(); // Find the color data
// For cameras that don't have RGB sensor, we'll render infrared frames instead of color
if (!color)
color = data.get_infrared_frame();
// Render depth on to the first half of the screen and color on to the second
depth_image.render(depth, { 0, 0, app.width() / 2, app.height() });
color_image.render(color, { app.width() / 2, 0, app.width() / 2, app.height() });
}
return EXIT_SUCCESS;
}
catch (const rs2::error & e)
{
std::cerr << "RealSense error calling " << e.get_failed_function() << "(" << e.get_failed_args() << "):\n " << e.what() << std::endl;
return EXIT_FAILURE;
}
catch (const std::exception& e)
{
std::cerr << e.what() << std::endl;
return EXIT_FAILURE;
}
I want to this..
Push the c <- keyboard value
save the color image and depth image in PNG foramt
I can get the code about 2.
but, i don't know how to call the action when I press the "c"
I guess I have to use this example.hpp.
GLFWwindow * win = glfwCreateWindow(tile_w*cols, tile_h*rows, ss.str().c_str(), 0, 0);
glfwSetWindowUserPointer(win, &dev);
glfwSetKeyCallback(win, [](GLFWwindow * win, int key, int scancode, int action, int mods)
{
auto dev = reinterpret_cast<rs::device *>(glfwGetWindowUserPointer(win));
if (action != GLFW_RELEASE) switch (key)
{
case GLFW_KEY_R: color_rectification_enabled = !color_rectification_enabled; break;
case GLFW_KEY_C: align_color_to_depth = !align_color_to_depth; break;
case GLFW_KEY_D: align_depth_to_color = !align_depth_to_color; break;
case GLFW_KEY_E:
if (dev->supports_option(rs::option::r200_emitter_enabled))
{
int value = !dev->get_option(rs::option::r200_emitter_enabled);
std::cout << "Setting emitter to " << value << std::endl;
dev->set_option(rs::option::r200_emitter_enabled, value);
}
break;
case GLFW_KEY_A:
if (dev->supports_option(rs::option::r200_lr_auto_exposure_enabled))
{
int value = !dev->get_option(rs::option::r200_lr_auto_exposure_enabled);
std::cout << "Setting auto exposure to " << value << std::endl;
dev->set_option(rs::option::r200_lr_auto_exposure_enabled, value);
}
break;
}
});
This code is used in librealsense 1.X version. I would like to change this to librealsense 2.0 version code. But I do not know what to do.
How way do I change this code??
Thanks for reading!
Useful samples to get you on your way with RealSense SDK 2.0 and OpenCV are available in the repo at /wrappers/opencv
Keep in mind that the Supported Devices by SDK 2.0 are:
Intel® RealSense™ Camera D400-Series
Intel® RealSense™ Developer Kit SR300
I am working currently on an off-screen renderer so that I can do Mutual Information Registration for real-world scenes. I use OpenSceneGraph to cope with the large data and automatic loading. I am having trouble getting a framebuffer capture within a sequential, single-threaded program.
Well, I have this class (header):
#include <osg/ref_ptr>
#include <osg/Array>
#include <osg/ImageUtils>
#include <osgGA/StateSetManipulator>
#include <osgViewer/Viewer>
#include <osg/GraphicsContext>
#include <osg/Texture2D>
#include <osg/FrameBufferObject>
#include <osgDB/WriteFile>
#include <osg/Referenced>
#include <osg/Vec3>
#include <osg/Image>
#include <osg/State>
#include <string>
#include <chrono>
#include <thread>
#include <assert.h>
#include "ImagingPrimitives.h"
class BoundRenderScene {
public:
BoundRenderScene();
virtual ~BoundRenderScene();
void NextFrame(void);
inline OpenThreads::Mutex* GetMutexObject(void) { return &_mutex; }
inline osg::Image* GetFrame(void)
{
OpenThreads::ScopedLock<OpenThreads::Mutex> lock(_mutex);
return _frame.get();
}
inline void GetFrame(osg::Image* img)
{
OpenThreads::ScopedLock<OpenThreads::Mutex> lock(_mutex);
if(_frame.valid() && (img!=NULL) && img->valid())
{
glReadBuffer(GL_BACK);
img->readPixels(0,0,_camera_configuration->GetSX(),_camera_configuration->GetSY(), GL_RGB,GL_UNSIGNED_BYTE);
uint w = img->s(), h = img->t(), d = img->r(), c = uint(img->getPixelSizeInBits()/8);
/*
* bare testing write op
* osgDB::writeImageFile(const_cast<const osg::Image&>(*img), "/tmp/testimg.png");
*/
}
}
inline void SetCameraConfiguration(CameraConfiguration* configuration) { _camera_configuration = configuration; }
inline void SetCameraMatrix(osg::Matrixd camera_matrix) { _camera_matrix = camera_matrix; }
inline void SetScene(osg::Node* scene) { _scene = scene; }
inline void Initialize(void) {
if(!_initialized)
_init();
else
_re_init();
}
protected:
osgViewer::Viewer _viewer;
osg::Matrixd _camera_matrix;
osg::ref_ptr<osg::Texture2D> _tex;
osg::ref_ptr<osg::FrameBufferObject> _fbo;
mutable osg::ref_ptr<osg::Image> _frame;
osg::ref_ptr<osg::Node> _scene;
osg::ref_ptr<osg::GraphicsContext::Traits> _traits;
osg::ref_ptr<osg::GraphicsContext> _gc;
CameraConfiguration* _camera_configuration;
SnapshotCallback* cb;
std::string _filepath;
private:
void _init(void);
void _re_init(void);
bool _initialized;
mutable OpenThreads::Mutex _mutex;
osg::Matrixd pre_transform;
osg::Matrixd transformation;
};
Also, because many examples within offscreen-rendering and for screen capture work with Post/FinalDrawCallaback's, I copied the callback structure from the "osgdistortion" example, but added the mutex for synchronisation:
struct SnapshotCallback : public osg::Camera::DrawCallback
{
public:
inline SnapshotCallback(OpenThreads::Mutex* mtx_obj, std::string filepath, int width, int height) : _filepath(filepath), _output_to_file(false), _mutex(mtx_obj)
{
_image = new osg::Image();
_image->allocateImage(width, height, 1, GL_RGB, GL_UNSIGNED_BYTE);
if(filepath!="")
_output_to_file = true;
}
inline virtual void operator() (osg::RenderInfo& renderInfo) const
{
OpenThreads::ScopedLock<OpenThreads::Mutex> lock(*_mutex);
osg::Camera* camera = renderInfo.getCurrentCamera();
osg::Viewport* viewport = camera ? camera->getViewport() : 0;
if(viewport && _image.valid())
{
glReadBuffer(GL_BACK);
_image->readPixels(int(viewport->x()),int(viewport->y()),int(viewport->width()),int(viewport->height()), GL_RGB, GL_UNSIGNED_BYTE);
if(_output_to_file)
{
osgDB::writeImageFile(*_image, _filepath);
}
}
}
inline virtual void operator() (const osg::Camera& camera) const
{
OpenThreads::ScopedLock<OpenThreads::Mutex> lock(*_mutex);
osg::Viewport* viewport = camera.getViewport();
if(viewport && _image.valid())
{
glReadBuffer(GL_BACK);
_image->readPixels(int(viewport->x()),int(viewport->y()),int(viewport->width()),int(viewport->height()), GL_RGB, GL_UNSIGNED_BYTE);
if(_output_to_file)
{
osgDB::writeImageFile(*_image, _filepath);
}
}
}
std::string _filepath;
bool _output_to_file;
mutable OpenThreads::Mutex* _mutex;
mutable osg::ref_ptr<osg::Image> _image;
};
I initialize and render the scene as follows:
#include "BoundRenderScene.h"
void BoundRenderScene::_init(void)
{
if(_camera!=NULL)
_viewer.setDone(true);
_traits->x = 0;
_traits->y = 0;
_traits->width = _camera_configuration->GetSX();
_traits->height = _camera_configuration->GetSY();
_traits->red = 8;
_traits->green = 8;
_traits->blue = 8;
_traits->alpha = 0;
_traits->depth = 24;
_traits->windowDecoration = false;
_traits->pbuffer = true;
_traits->doubleBuffer = true;
_traits->sharedContext = 0x0;
if(_gc.get()!=NULL)
{
bool release_success = _gc->releaseContext();
if(!release_success)
std::cerr << "Error releasing Graphics Context.";
}
_gc = osg::GraphicsContext::createGraphicsContext(_traits.get());
_viewer.getCamera()->setGraphicsContext(_gc.get());
_viewer.setThreadingModel(osgViewer::Viewer::SingleThreaded);
_viewer.setUpThreading();
_viewer.realize();
_frame->allocateImage(_camera_configuration->GetSX(), _camera_configuration->GetSY(), 1, GL_RGB, GL_UNSIGNED_BYTE);
_viewer.getCamera()->getOrCreateStateSet();
_viewer.getCamera()->setRenderTargetImplementation(osg::Camera::PIXEL_BUFFER);
cb = new SnapshotCallback(&_mutex,_filepath, _camera_configuration->GetSX(), _camera_configuration->GetSY());
//_viewer.getCamera()->setPostDrawCallback( cb );
//Clear colour "black" for representing "no information" => background elimination in natural image, pls.
_viewer.getCamera()->setClearColor(osg::Vec4f(0.25f, 0.25f, 0.25f, 1.0f));
_viewer.getCamera()->setClearMask(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT);
_viewer.getCamera()->setDrawBuffer(GL_BACK);
_viewer.getCamera()->setReadBuffer(GL_BACK);
_viewer.getCamera()->setViewport(0,0,_camera_configuration->GetSX(),_camera_configuration->GetSY());
_viewer.getCamera()->setProjectionMatrix(osg::Matrixd::perspective(osg::RadiansToDegrees(_camera_configuration->GetFoV()), _camera_configuration->GetAspectRatio(), 0.1, 150.0));
//looking in geo-coord system
_viewer.getCamera()->setViewMatrix(osg::Matrixd::lookAt(osg::Vec3d(0.0, 0.0, -1.0), osg::Vec3d(0.0, 0.0, 1.0), osg::Vec3d(0.0, 1.0, 0.0)));
_viewer.getCamera()->attach(osg::Camera::COLOR_BUFFER, _frame.get());
_viewer.getCamera()->setRenderTargetImplementation(osg::Camera::FRAME_BUFFER_OBJECT);
_tex->setTextureSize(_camera_configuration->GetSX(), _camera_configuration->GetSY());
_tex->setInternalFormat(GL_RGB);
_tex->setFilter(osg::Texture::MIN_FILTER, osg::Texture::LINEAR);
_tex->setFilter(osg::Texture::MAG_FILTER, osg::Texture::LINEAR);
_tex->setWrap(osg::Texture::WRAP_S, osg::Texture::CLAMP_TO_EDGE);
_tex->setWrap(osg::Texture::WRAP_T, osg::Texture::CLAMP_TO_EDGE);
_tex->setResizeNonPowerOfTwoHint(false);
_tex->setImage(0,_frame.get());
_fbo->setAttachment(osg::Camera::COLOR_BUFFER, osg::FrameBufferAttachment(_tex.get()));
_viewer.setDone(false);
_viewer.setSceneData(_scene.get());
_viewer.setCameraManipulator(0x0);
}
void BoundRenderScene::NextFrame(void)
{
OpenThreads::ScopedLock<OpenThreads::Mutex> lock(_mutex);
if(_frame.valid() && !_viewer.done())
{
osg::Matrixd inverse_cam = osg::Matrixd::inverse(_camera_matrix);
transformation = inverse_cam * pre_transform;
_viewer.getCamera()->setViewMatrix(transformation);
_viewer.updateTraversal();
_viewer.frame();
}
else
std::cout << "Viewer or Camera invalid." << std::endl;
}
The main workflow looks like this (simplified):
BoundRenderScene renderer;
std::vector<osg::Matrixd> poses;
/*
* setting initial parameters
* fill poses with camera positions to render, for regsitration
*/
renderer._init();
for(uint i = 0; i < poses.size(); i++)
{
renderer.SetCameraMatrix(poses.at(i));
renderer.NextImage();
sleep(0.04); // to get the 25fps frame limit
osg::Image* reg_image = renderer.GetImage();
/*
* Do further processing
*/
}
Now comes the crux: the OpenSceneGraph example "osgprenderer" (included in OSG) does off-screen rendering using an osg::Camera::DrawCallback, as my SnapshotCallback. Unfortunately, the operator()-function in my case never get's called in my scenegraph, so that way of screen capture doesn't work for me. It's also rather inconvenient as the rest of the Mutual Information procedure is a rather sequential pipeline.
Other wrappers (https://github.com/xarray/osgRecipes/blob/master/integrations/osgberkelium/osgberkelium.cpp) use methods similar to my "void GetFrame(osg::Image* img)" method, where the image is actively read using "readPixels". That is very convenient for my workflow, but the method always returns a blank image. It doesn't crash, but it doesn't do it's job either.
The method that does work is "osg: and :Image* GetFrame(void)", which returns the bound/attached FBO image. It is similar to the "osgdistortion" example. It does work for rendering one- to two images, but after some time, rendering and processing get out of sync and the application crashes as follows:
[---FIRST FRAME---]
GraphicsCostEstimator::calibrate(..)
cull_draw() 0x1998ca0
ShaderComposer::~ShaderComposer() 0x35a4d40
Renderer::compile()
OpenGL extension 'GL_ARB_vertex_buffer_object' is supported.
OpenGL extension 'GL_EXT_secondary_color' is supported.
OpenGL extension 'GL_EXT_fog_coord' is supported.
OpenGL extension '' is not supported.
OpenGL extension 'GL_EXT_packed_depth_stencil' is supported.
Setting up osg::Camera::FRAME_BUFFER_OBJECT
end cull_draw() 0x1998ca0
[processing]
[ SECOND FRAME ]
cull_draw() 0x1998ca0
OpenGL extension 'GL_ARB_fragment_program' is supported.
OpenGL extension 'GL_ARB_vertex_program' is supported.
OpenGL extension 'GL_ARB_shader_objects' is supported.
OpenGL extension 'GL_ARB_vertex_shader' is supported.
OpenGL extension 'GL_ARB_fragment_shader' is supported.
OpenGL extension 'GL_ARB_shading_language_100' is supported.
OpenGL extension 'GL_EXT_geometry_shader4' is supported.
OpenGL extension 'GL_EXT_gpu_shader4' is supported.
OpenGL extension 'GL_ARB_tessellation_shader' is supported.
OpenGL extension 'GL_ARB_uniform_buffer_object' is supported.
OpenGL extension 'GL_ARB_get_program_binary' is supported.
OpenGL extension 'GL_ARB_gpu_shader_fp64' is supported.
OpenGL extension 'GL_ARB_shader_atomic_counters' is supported.
glVersion=4.5, isGlslSupported=YES, glslLanguageVersion=4.5
Warning: detected OpenGL error 'invalid operation' at end of SceneView::draw()
end cull_draw() 0x1998ca0
[-FROM 3rd FRAME ONWARDS-]
[workload, matrix setup]
[_viewer.frame()]
cull_draw() 0x1998ca0
Warning: detected OpenGL error 'invalid operation' at start of State::apply()
end cull_draw() 0x1998ca0
[next frame]
[BREAKING]
cull_draw() 0x1998ca0
Warning: detected OpenGL error 'invalid operation' at start of State::apply()
end cull_draw() 0x1998ca0
[more work]
Segmentation fault (core dumped)
So, the question is:
I had a look into the source files from osg for the Viewer-related classes, but I was not able to determine where the error
Warning: detected OpenGL error 'invalid operation' at start of State::apply()
comes from. Any idea where to start looking for it ?
For sequential rendering and screen capture, which method is the best to use within OSG ?
How can I obtain the mutex of the normal osg::Viewer, so to sync the renderer with the rest of py pipeline ? (Renderer is single-threaded)
Any other suggestions from experiences OpenSceneGraph off-screen
renderers and screen captures ?
As deeper research turned out, releasing the graphics context in the class destructor freed the OpenGL pipeline, BUT: it also disallocated stateset-bound textures of the loaded scene/model, although the model itself was not suspended (as given in the question: it is re-used in the following passes). So, in further render passes, the render pipeline wanted to access OSG assets which have been released via releasing the GL context.
in code it changed from:
BoundRenderScene::~BoundRenderScene() {
// TODO Auto-generated destructor stub
_viewer.setDone(true);
_viewer.setReleaseContextAtEndOfFrameHint(true);
_gc->releaseContext();
#ifdef DEBUG
std::cout << "BoundRenderScene deleted." << std::endl;
#endif
}
to:
BoundRenderScene::~BoundRenderScene() {
// TODO Auto-generated destructor stub
_viewer.setDone(true);
_viewer.setReleaseContextAtEndOfFrameHint(true);
#ifdef DEBUG
std::cout << "BoundRenderScene deleted." << std::endl;
#endif
}
This resolved the OpenSceneGraph-internal error messages. Now, in order to solve the frame capture problem itself, I implemented the callback from osgprenderer:
struct SnapshotCallback : public osg::Camera::DrawCallback
{
public:
inline SnapshotCallback(std::string filepath) : _filepath(filepath), _output_to_file(false), _image(NULL)
{
if(filepath!="")
_output_to_file = true;
_image = new osg::Image();
}
inline virtual void operator() (osg::RenderInfo& renderInfo) const
{
osg::Camera* camera = renderInfo.getCurrentCamera();
osg::Viewport* viewport = camera ? camera->getViewport() : 0;
if(viewport)
{
glReadBuffer(camera->getDrawBuffer());
_image->allocateImage(int(viewport->width()), int(viewport->height()), 1, GL_RGB, GL_UNSIGNED_BYTE);
_image->readPixels(int(viewport->x()),int(viewport->y()),int(viewport->width()),int(viewport->height()), GL_RGB, GL_UNSIGNED_BYTE);
if(_output_to_file)
{
osgDB::writeImageFile(*reinterpret_cast<osg::Image*>(_image->clone(osg::CopyOp::DEEP_COPY_ALL)), _filepath);
}
}
}
inline virtual void operator() (const osg::Camera& camera) const
{
osg::Viewport* viewport = camera.getViewport();
if(viewport)
{
glReadBuffer(camera.getDrawBuffer());
_image->allocateImage(int(viewport->width()), int(viewport->height()), 1, GL_RGB, GL_UNSIGNED_BYTE);
_image->readPixels(int(viewport->x()),int(viewport->y()),int(viewport->width()),int(viewport->height()), GL_RGB, GL_UNSIGNED_BYTE);
if(_output_to_file)
{
osgDB::writeImageFile(*reinterpret_cast<osg::Image*>(_image->clone(osg::CopyOp::DEEP_COPY_ALL)), _filepath);
}
}
}
inline osg::Image* GetImage(void)
{
return reinterpret_cast<osg::Image*>(_image->clone(osg::CopyOp::DEEP_COPY_ALL));
}
protected:
std::string _filepath;
bool _output_to_file;
mutable osg::ref_ptr<osg::Image> _image;
};
Now, with the cloned buffer instead of the actual image buffer (idea taken over from osgscreencapture example), I do get the real image without memory errors.
For double-buffered rendering, I though have to somehow render the scene twice for the right buffer to contain the objects' images, but this is for my use case currently less of an issue (I/O-bound rendering, not operation-bound).
so, the main function looks like follows:
BoundRenderScene renderer;
std::vector<osg::Matrixd> poses;
/*
* setting initial parameters
* fill poses with camera positions to render, for registration
*/
renderer._init();
for(uint i = 0; i < poses.size(); i++)
{
renderer.SetCameraMatrix(poses.at(i));
renderer.NextImage();
renderer.NextImage();
osg::Image* reg_image = renderer.GetImage();
/*
* Do further processing
*/
}
We are working with the Kinect to track faces for a schoolproject. We have set up Visual Studio 2012, and all the test programs are working correctly. However we are trying to run this code and it gives us an error. After many attempts to fix the code, it gives the following error:
"The application was unable to start correctly (0xc000007b).Click OK to close the application.
The good thing is that it's finally running. The bad thing is that the compiler doesn't throw any errors other than this vague error.
We are completely lost and we hope that someone can help us or point us into the right direction. Thanks in advance for helping us.
The code:
#include "stdafx.h"
#include <iostream>
#include <Windows.h>
#include <NuiApi.h>
#include <FaceTrackLib.h>
#include <NuiSensor.h>
using namespace std;
HANDLE rgbStream;
HANDLE depthStream;
INuiSensor* sensor;
#define width 640
#define height 480
bool initKinect() {
// Get a working kinect sensor
int numSensors;
if (NuiGetSensorCount(&numSensors) < 0 || numSensors < 1) return false;
if (NuiCreateSensorByIndex(0, &sensor) < 0) return false;
// Initialize sensor
sensor->NuiInitialize(NUI_INITIALIZE_FLAG_USES_DEPTH | NUI_INITIALIZE_FLAG_USES_COLOR);
sensor->NuiImageStreamOpen(
NUI_IMAGE_TYPE_COLOR, // Depth camera or rgb camera?
NUI_IMAGE_RESOLUTION_640x480, // Image resolution
0, // Image stream flags, e.g. near mode
2, // Number of frames to buffer
NULL, // Event handle
&rgbStream);
// --------------- END CHANGED CODE -----------------
return true;
}
BYTE* dataEnd;
USHORT* dataEndD;
void getKinectDataD(){
NUI_IMAGE_FRAME imageFrame;
NUI_LOCKED_RECT LockedRect;
if (sensor->NuiImageStreamGetNextFrame(rgbStream, 0, &imageFrame) < 0) return;
INuiFrameTexture* texture = imageFrame.pFrameTexture;
texture->LockRect(0, &LockedRect, NULL, 0);
const USHORT* curr = (const USHORT*)LockedRect.pBits;
const USHORT* dataEnding = curr + (width*height);
if (LockedRect.Pitch != 0)
{
const BYTE* curr = (const BYTE*)LockedRect.pBits;
dataEnd = (BYTE*)(curr + (width*height) * 4);
}
while (curr < dataEnding) {
// Get depth in millimeters
USHORT depth = NuiDepthPixelToDepth(*curr++);
dataEndD = (USHORT*)depth;
// Draw a grayscale image of the depth:
// B,G,R are all set to depth%256, alpha set to 1.
}
texture->UnlockRect(0);
sensor->NuiImageStreamReleaseFrame(rgbStream, &imageFrame);
}
// This example assumes that the application provides
// void* cameraFrameBuffer, a buffer for an image, and that there is a method
// to fill the buffer with data from a camera, for example
// cameraObj.ProcessIO(cameraFrameBuffer)
int main(){
initKinect();
// Create an instance of a face tracker
IFTFaceTracker* pFT = FTCreateFaceTracker();
if (!pFT)
{
// Handle errors
}
// Initialize cameras configuration structures.
// IMPORTANT NOTE: resolutions and focal lengths must be accurate, since it affects tracking precision!
// It is better to use enums defined in NuiAPI.h
// Video camera config with width, height, focal length in pixels
// NUI_CAMERA_COLOR_NOMINAL_FOCAL_LENGTH_IN_PIXELS focal length is computed for 640x480 resolution
// If you use different resolutions, multiply this focal length by the scaling factor
FT_CAMERA_CONFIG videoCameraConfig = { 640, 480, NUI_CAMERA_COLOR_NOMINAL_FOCAL_LENGTH_IN_PIXELS };
// Depth camera config with width, height, focal length in pixels
// NUI_CAMERA_COLOR_NOMINAL_FOCAL_LENGTH_IN_PIXELS focal length is computed for 320x240 resolution
// If you use different resolutions, multiply this focal length by the scaling factor
FT_CAMERA_CONFIG depthCameraConfig = { 320, 240, NUI_CAMERA_DEPTH_NOMINAL_FOCAL_LENGTH_IN_PIXELS };
// Initialize the face tracker
HRESULT hr = pFT->Initialize(&videoCameraConfig, &depthCameraConfig, NULL, NULL);
if (FAILED(hr))
{
// Handle errors
}
// Create a face tracking result interface
IFTResult* pFTResult = NULL;
hr = pFT->CreateFTResult(&pFTResult);
if (FAILED(hr))
{
// Handle errors
}
// Prepare image interfaces that hold RGB and depth data
IFTImage* pColorFrame = FTCreateImage();
IFTImage* pDepthFrame = FTCreateImage();
if (!pColorFrame || !pDepthFrame)
{
// Handle errors
}
// Attach created interfaces to the RGB and depth buffers that are filled with
// corresponding RGB and depth frame data from Kinect cameras
pColorFrame->Attach(640, 480, dataEnd, FTIMAGEFORMAT_UINT8_R8G8B8, 640 * 3);
pDepthFrame->Attach(320, 240, dataEndD, FTIMAGEFORMAT_UINT16_D13P3, 320 * 2);
// You can also use Allocate() method in which case IFTImage interfaces own their memory.
// In this case use CopyTo() method to copy buffers
FT_SENSOR_DATA sensorData;
sensorData.ZoomFactor = 1.0f; // Not used must be 1.0
bool isFaceTracked = false;
// Track a face
while (true)
{
// Call Kinect API to fill videoCameraFrameBuffer and depthFrameBuffer with RGB and depth data
getKinectDataD();
// Check if we are already tracking a face
if (!isFaceTracked)
{
// Initiate face tracking.
// This call is more expensive and searches the input frame for a face.
hr = pFT->StartTracking(&sensorData, NULL, NULL, pFTResult);
if (SUCCEEDED(hr))
{
isFaceTracked = true;
}
else
{
// No faces found
isFaceTracked = false;
}
}
else
{
// Continue tracking. It uses a previously known face position.
// This call is less expensive than StartTracking()
hr = pFT->ContinueTracking(&sensorData, NULL, pFTResult);
if (FAILED(hr))
{
// Lost the face
isFaceTracked = false;
}
}
// Do something with pFTResult like visualize the mask, drive your 3D avatar,
// recognize facial expressions
}
// Clean up
pFTResult->Release();
pColorFrame->Release();
pDepthFrame->Release();
pFT->Release();
return 0;
}
We figured it out we used the wrong dll indeed, it runs without errors now. But we ran in to an another problem, we have no clue how to use the pFTResult and retrieve the face angles with use of "getFaceRect". Does somebody know how?
Myself and a few other guys are taking a crack at building a simple side scroller type game. However, I can not get a hold of them to help answer my question so I put it to you, the following code leaves me with a SIGSEGV error in the notated place... if anyone can tell me why, I would really appreciate it. If you need anymore info I will be watching this closely.
Main.cpp
Vector2 dudeDim(60,60);
Vector2 dudePos(300, 300);
Entity *test = new Entity("img/images.jpg", dudeDim, dudePos, false);
leads to:
Entity.cpp
Entity::Entity(std::string filename, Vector2 size, Vector2 position, bool passable):
mTexture(filename)
{
mTexture.load(false);
mDimension2D = size;
mPosition2D = position;
mPassable = passable;
}
leads to:
Textures.cpp
void Texture::load(bool generateMipmaps)
{
FREE_IMAGE_FORMAT imgFormat = FIF_UNKNOWN;
FIBITMAP *dib(0);
imgFormat = FreeImage_GetFileType(mFilename.c_str(), 0);
//std::cout << "File format: " << imgFormat << std::endl;
if (FreeImage_FIFSupportsReading(imgFormat)) // Check if the plugin has reading capabilities and load the file
dib = FreeImage_Load(imgFormat, mFilename.c_str());
if (!dib)
std::cout << "Error loading texture files!" << std::endl;
BYTE* bDataPointer = FreeImage_GetBits(dib); // Retrieve the image data
mWidth = FreeImage_GetWidth(dib); // Get the image width and height
mHeight = FreeImage_GetHeight(dib);
mBitsPerPixel = FreeImage_GetBPP(dib);
if (!bDataPointer || !mWidth || !mHeight)
std::cout << "Error loading texture files!" << std::endl;
// Generate and bind ID for this texture
vvvvvvvvvv!!!ERROR HERE!!!vvvvvvvvvvv
glGenTextures(1, &mId);
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
glBindTexture(GL_TEXTURE_2D, mId);
int format = mBitsPerPixel == 24 ? GL_BGR_EXT : mBitsPerPixel == 8 ? GL_LUMINANCE : 0;
int iInternalFormat = mBitsPerPixel == 24 ? GL_RGB : GL_DEPTH_COMPONENT;
if(generateMipmaps)
glTexParameteri(GL_TEXTURE_2D, GL_GENERATE_MIPMAP, GL_TRUE);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, mWidth, mHeight, 0, format, GL_UNSIGNED_BYTE, bDataPointer);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_LINEAR); // Linear Filtering
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER,GL_LINEAR); // Linear Filtering
//std::cout << "texture generated " << mId << std::endl;
FreeImage_Unload(dib);
}
after reading Peter's suggestion I have changed my main.cpp file to:
#include <iostream>
#include <vector>
#include "Game.h"
using namespace std;
int main(int argc, char** argv)
{
Game theGame;
/* Initialize game control objects and resources */
if (theGame.onInit() != false)
{
return theGame.onExecute();
}
else
{
return -1;
}
}
and it would seem the SIGSEGV error is gone and I'm now left with something not initializing. So thank you peter you were correct now I'm off to solve this issue.
ok so this is obviously a small amount of the code but in order to save time and a bit of sanity: all the code is available at:
GitHub Repo
So after looking at your code I can say that it's probably that you have not initialized you OpenGL context before executing that code.
You need to call your Game::onInit() which also calls RenderEngine::initGraphics() before making any calls to OpenGL. Which you currently don't do. You currently do main()->Game ctor (calls rendering engine ctor but that ctor doesn't init SDL and OpenGL)->Entity ctor->load texture
For details look at the OpenGL Wiki FAQ