VTK Visualizer in a C++ Class Does not Render the Scene - c++

I am trying to put the example provided by VTK here into a C++ class. This example reads an STL file and visualizes it in a window.
Below is my code for which I get no compiler or run-time error. My input STL file is read correctly but my code exits without producing any visualization of the STL object.
In my C++ class, I organized the VTK example code by separating the reader from the visualizer and putting them into two functions in the class. The visualizer uses a mapper, actor, and a renderer to visualize the STL object. However, I am not able to get the code to visualize my STL object.
The only way I am able to get my C++ class to work is to copy the codes of the function visualize into my readSTL function, then I can see the window with my STL object rendered in it.
Could someone kindly give me some hints how I can perform the visualization in a separate function in the class?
#include <vtkPolyData.h>
#include <vtkSTLReader.h>
#include <vtkSmartPointer.h>
#include <vtkPolyDataMapper.h>
#include <vtkActor.h>
#include <vtkRenderWindow.h>
#include <vtkRenderer.h>
#include <vtkRenderWindowInteractor.h>
class triangulation
{
public:
vtkIdType numberOfFaces;
// Constructor
triangulation(void);
triangulation(std::string filename);
void setFilename(std::string filename);
std::string getFilename(void);
// Getters
vtkSmartPointer<vtkPolyData> getMesh(void);
vtkSmartPointer<vtkCellArray> getPolys(void);
vtkSmartPointer<vtkPoints> getPoints(void);
vtkSmartPointer<vtkDataArray> getDataArray(void);
vtkIdType getNumberofFaces(void);
// Visualizer
void visualize(void);
// Reader
void readSTL(void);
private:
std::string stlFilename;
vtkSmartPointer<vtkPolyData> mesh;
vtkSmartPointer<vtkSTLReader> reader;
vtkSmartPointer<vtkCellArray> polys;
vtkSmartPointer<vtkPoints> points;
vtkSmartPointer<vtkDataArray> dataArray;
};
triangulation::triangulation()
{
//
}
triangulation::triangulation(std::string filename)
{
setFilename(filename);
}
void triangulation::readSTL(void)
{
vtkSmartPointer<vtkSTLReader> reader = vtkSmartPointer<vtkSTLReader>::New();
reader->SetFileName(stlFilename.c_str());
reader->Update();
mesh = reader->GetOutput();
polys = mesh->GetPolys();
points = mesh->GetPoints();
numberOfFaces = mesh->GetNumberOfCells();
}
void triangulation::setFilename(std::string filename)
{
stlFilename = filename;
}
std::string triangulation::getFilename(void)
{
return stlFilename;
}
vtkSmartPointer<vtkPolyData> triangulation::getMesh(void)
{
return mesh;
}
vtkSmartPointer<vtkCellArray> triangulation::getPolys(void)
{
return polys;
}
vtkIdType triangulation::getNumberofFaces(void)
{
return numberOfFaces;
}
vtkSmartPointer<vtkPoints> triangulation::getPoints(void)
{
return points;
}
vtkSmartPointer<vtkDataArray> triangulation::getDataArray(void)
{
return dataArray;
}
void triangulation::visualize(void)
{
// Visualize
vtkSmartPointer<vtkPolyDataMapper> mapper = vtkSmartPointer<vtkPolyDataMapper>::New();
mapper->SetInputConnection(reader->GetOutputPort());
vtkSmartPointer<vtkActor> actor = vtkSmartPointer<vtkActor>::New();
actor->SetMapper(mapper);
vtkSmartPointer<vtkRenderer> renderer = vtkSmartPointer<vtkRenderer>::New();
vtkSmartPointer<vtkRenderWindow> renderWindow = vtkSmartPointer<vtkRenderWindow>::New();
renderWindow->AddRenderer(renderer);
vtkSmartPointer<vtkRenderWindowInteractor> renderWindowInteractor = vtkSmartPointer<vtkRenderWindowInteractor>::New();
renderWindowInteractor->SetRenderWindow(renderWindow);
renderer->AddActor(actor);
renderer->SetBackground(.3, .6, .3); // Background color green
renderWindow->Render();
renderWindowInteractor->Start();
}
int main(int argc, char *argv[])
{
if (argc != 2)
{
cout << "Required parameters: Filename" << endl;
return EXIT_FAILURE;
}
std::string inputFilename = argv[1];
cout << inputFilename << endl;
triangulation *tr = new triangulation(inputFilename);
tr->readSTL();
cout << "Number of Faces = " << tr->getNumberofFaces() << endl;
vtkSmartPointer<vtkPolyData> mesh = tr->getMesh();
vtkSmartPointer<vtkCellArray> polys = tr->getPolys();
vtkSmartPointer<vtkPoints> points = tr->getPoints();
vtkSmartPointer<vtkDataArray> data = tr->getDataArray();
//mesh->Print(cout);
// polys->Print(cout);
// points->Print(cout);
// data->Print(cout);
tr->visualize();
delete tr;
return EXIT_SUCCESS;
}
Here is my STL object that I used to test my code:
"myobject.stl":
solid STL generated by MeshLab
facet normal -4.919344e-01 2.986337e-01 8.178133e-01
outer loop
vertex -1.265660e+00 4.756133e+00 1.702858e-01
vertex -1.649185e+00 4.810246e+00 -8.017353e-02
vertex -1.602208e+00 4.484959e+00 6.686619e-02
endloop
endfacet
facet normal -5.898571e-01 2.603427e-01 7.643889e-01
outer loop
vertex -1.861084e+00 4.821712e+00 -2.475956e-01
vertex -1.602208e+00 4.484959e+00 6.686619e-02
vertex -1.649185e+00 4.810246e+00 -8.017353e-02
endloop
endfacet
facet normal -6.398674e-01 2.007942e-01 7.417893e-01
outer loop
vertex -1.861084e+00 4.821712e+00 -2.475956e-01
vertex -1.845646e+00 4.479523e+00 -1.416520e-01
vertex -1.602208e+00 4.484959e+00 6.686619e-02
endloop
endfacet
facet normal -6.901410e-01 1.854371e-01 6.995131e-01
outer loop
vertex -2.043903e+00 4.838252e+00 -4.323493e-01
vertex -1.845646e+00 4.479523e+00 -1.416520e-01
vertex -1.861084e+00 4.821712e+00 -2.475956e-01
endloop
endfacet
endsolid vcg

Your code will work if you simply replace the following line in triangulation::readSTL():
// Problem: local variable 'reader' shadows class member 'reader'.
vtkSmartPointer<vtkSTLReader> reader = vtkSmartPointer<vtkSTLReader>::New();
// Solution:
reader = vtkSmartPointer<vtkSTLReader>::New();
Because the class-member reader remains unintialized, the application crashes with a segfault when it is being accessed in triangulation::visualize().
Hint: you probably would not have encountered this problem if you consequently used this-> or a naming prefix (m_ or _) to refer to class members. See this SO post for some explanations.

Related

How to correctly use VTK ConstrainedDelaunay2D?

I've started from the VTK ConstrainedDelaunay2D example and added my own points:
#include <vtkSmartPointer.h>
#include <vtkDelaunay2D.h>
#include <vtkCellArray.h>
#include <vtkProperty.h>
#include <vtkPolyDataMapper.h>
#include <vtkActor.h>
#include <vtkPoints.h>
#include <vtkPolyData.h>
#include <vtkPolygon.h>
#include <vtkMath.h>
#include <vtkRenderer.h>
#include <vtkRenderWindow.h>
#include <vtkRenderWindowInteractor.h>
#include <vtkNamedColors.h>
#include <vtkVersionMacros.h> // For version macros
int main(int, char *[])
{
vtkSmartPointer<vtkPoints> points = vtkSmartPointer<vtkPoints>::New();
int ptsHeight = 400;
std::vector<std::vector<int>> pts{ {166, 127},{103, 220},{166, 190},{174, 291},{189, 226},{227, 282},{213, 187},{242, 105},{196, 131},{182, 83} };
for (size_t i = 0; i < pts.size(); i++)
{
// !important: flip y
int x = pts[i][0];
int y = ptsHeight - pts[i][1];
points->InsertNextPoint(x, y, 0);
}
vtkSmartPointer<vtkPolyData> aPolyData = vtkSmartPointer<vtkPolyData>::New();
aPolyData->SetPoints(points);
// Create a cell array to store the polygon in
vtkSmartPointer<vtkCellArray> aCellArray = vtkSmartPointer<vtkCellArray>::New();
// Define a polygonal hole with a clockwise polygon
vtkSmartPointer<vtkPolygon> aPolygon = vtkSmartPointer<vtkPolygon>::New();
for (unsigned int i = 0; i < pts.size(); i++)
{
aPolygon->GetPointIds()->InsertNextId(i);
}
aCellArray->InsertNextCell(aPolygon);
// Create a polydata to store the boundary. The points must be the
// same as the points we will triangulate.
vtkSmartPointer<vtkPolyData> boundary =
vtkSmartPointer<vtkPolyData>::New();
boundary->SetPoints(aPolyData->GetPoints());
boundary->SetPolys(aCellArray);
// Triangulate the grid points
vtkSmartPointer<vtkDelaunay2D> delaunay =
vtkSmartPointer<vtkDelaunay2D>::New();
delaunay->SetInputData(aPolyData);
delaunay->SetSourceData(boundary);
// Visualize
vtkSmartPointer<vtkPolyDataMapper> meshMapper =
vtkSmartPointer<vtkPolyDataMapper>::New();
meshMapper->SetInputConnection(delaunay->GetOutputPort());
vtkSmartPointer<vtkNamedColors> colors =
vtkSmartPointer<vtkNamedColors>::New();
vtkSmartPointer<vtkActor> meshActor =
vtkSmartPointer<vtkActor>::New();
meshActor->SetMapper(meshMapper);
meshActor->GetProperty()->EdgeVisibilityOn();
meshActor->GetProperty()->SetEdgeColor(colors->GetColor3d("Peacock").GetData());
meshActor->GetProperty()->SetInterpolationToFlat();
meshActor->GetProperty()->SetBackfaceCulling(true);
// Create a renderer, render window, and interactor
vtkSmartPointer<vtkRenderer> renderer = vtkSmartPointer<vtkRenderer>::New();
vtkSmartPointer<vtkRenderWindow> renderWindow = vtkSmartPointer<vtkRenderWindow>::New();
renderWindow->AddRenderer(renderer);
vtkSmartPointer<vtkRenderWindowInteractor> renderWindowInteractor = vtkSmartPointer<vtkRenderWindowInteractor>::New();
renderWindowInteractor->SetRenderWindow(renderWindow);
// Add the actor to the scene
renderer->AddActor(meshActor);
//renderer->AddActor(boundaryActor);
renderer->SetBackground(colors->GetColor3d("Mint").GetData());
// Render and interact
renderWindow->SetSize(640, 480);
renderWindow->Render();
renderWindowInteractor->Start();
return EXIT_SUCCESS;
}
I'm experiencing two issues:
I get different results if I flip the Y coordinates: why is that ?
Why are there faces pointing in the wrong direction (flipped normal / wrong winding )?
Here's what I mean by the 1st issue:
If I don't flip the Y coordinates I get this:
I get the same effect if I don't flip the Y axis but insert the boundary polygon in reverse order:
for (unsigned int i = 0; i < pts.size(); i++)
{
aPolygon->GetPointIds()->InsertNextId(pts.size() - 1 - i);
}
I don't think I fully understand how the boundary/constraint works.
I thoght that the same points should produce the same triangulation wether the vertices are flipped vertically or not. (I suspect the order of indices changes then ?)
Regarding the second issue (unpredictable flipped faces) I'm not sure what the best way forward is. I had a look at the vtkDelaunay2D class and couldn't find anything related.
(I've tried setting projection plane mode to VTK_DELAUNAY_XY_PLANE, but it didn't seem to affect the output)
I've also tried to use vtkPolyDataNormals but got no output:
vtkSmartPointer<vtkPolyDataNormals> normalGenerator = vtkSmartPointer<vtkPolyDataNormals>::New();
normalGenerator->SetInputData(delaunay->GetOutput());
normalGenerator->ComputePointNormalsOff();
normalGenerator->ComputeCellNormalsOn();
normalGenerator->FlipNormalsOn();
normalGenerator->Update();
(normalGenerator's output has 0 cells and points)
Is there a way to compute constrained delaunay triangulation for a list of 2d points and ensure all the faces point the same way ? (If so, how ? Would it be possible to do this with the vtkDelaunay2D class alone or is it necessary to use other filters?)
Any hints/tips are more than welcome :)
I'm using VTK 8.2 by the way.
the flipping in y effectively reverses the faces orientation (what is clockwise becomes anti-clockwise, like in a mirror).
I'm not sure I can reproduce your example above. A quick test in python seems to give the expected behavior, maybe you can start from this and map it to your c++ version:
import vedo
pts = [
[166, 127],
[103, 220],
[166, 190],
[174, 291],
[189, 226],
[227, 282],
[213, 187],
[242, 105],
[196, 131],
[182, 83],
]
ids = [[2,4,6], [0,2,8]] # faces to erase by pt-index (clockwise)
dly = vedo.delaunay2D(pts, mode='xy', boundaries=ids)
dly.c('grey5').lc('red4').lw(2)
labels = vedo.Points(pts).labels('id').z(1)
vedo.show(labels, dly, axes=1)

Qt3D SceneLoader entity is not rendered

I'm trying to create a Qt3D prototype which is able to render an obj which contains transparent objects. Therefor I need to somehow incorporate QSortPolicy with a custom frame graph. I put together a few examples which I found on the internet (unfortunately there aren't many Qt3D C++ examples). The source of the protoype is shown below. The problem is that I add a QTorusMesh and the content of the QSceneLoader to the root entity, but when rendered only the torus will be shown, the obj (in this case the monkey) is not rendered and I don't know why and also not how to debug it. I dumped all the trees (scene graph and frame graph) and couldn't find any inconsistency. Only thing I can think of that the loaded object somehow needs a different renderer (QGeometryRenderer) which needs to be added to the frame graph? Does someone know what I'm doing wrong?
Examples: Custom frame graph, Scene walker
#include <QGuiApplication>
#include <Qt3DCore/QEntity>
#include <Qt3DCore/QTransform>
#include <Qt3DCore/QAspectEngine>
#include <Qt3DInput/QInputAspect>
#include <Qt3DExtras/Qt3DWindow>
#include <Qt3DExtras/QPhongMaterial>
#include <Qt3DExtras/QOrbitCameraController>
#include <Qt3DExtras/QTorusMesh>
#include <Qt3DRender/QCamera>
#include <Qt3DRender/QRenderAspect>
#include <Qt3DRender/QSceneLoader>
#include <Qt3DRender/QRenderSurfaceSelector>
#include <Qt3DRender/QClearBuffers>
#include <Qt3DRender/QLayerFilter>
#include <Qt3DRender/QViewport>
#include <Qt3DRender/QCameraSelector>
#include <Qt3DRender/QLayer>
#include <Qt3DRender/QRenderSettings>
class SceneWalker : public QObject
{
public:
SceneWalker(Qt3DRender::QSceneLoader* loader):
m_loader(loader)
{}
void onStatusChanged();
private:
void walkEntity(Qt3DCore::QEntity* e, int depth = 0);
Qt3DRender::QSceneLoader* m_loader;
};
void SceneWalker::onStatusChanged()
{
qDebug() << "Status changed:" << m_loader->status();
if (m_loader->status() != Qt3DRender::QSceneLoader::Ready)
return;
// The QSceneLoader instance is a component of an entity. The loaded scene
// tree is added under this entity.
QVector<Qt3DCore::QEntity*> entities = m_loader->entities();
// Technically there could be multiple entities referencing the scene loader
// but sharing is discouraged, and in our case there will be one anyhow.
if (entities.isEmpty())
return;
Qt3DCore::QEntity* root = entities[0];
// Print the tree.
walkEntity(root);
// To access a given node (like a named mesh in the scene), use QObject::findChild().
// The scene structure and names always depend on the asset.
Qt3DCore::QEntity* e = root->findChild<Qt3DCore::QEntity*>(QStringLiteral("PlanePropeller_mesh")); // toyplane.obj
if (e)
qDebug() << "Found propeller node" << e << "with components" << e->components();
}
void SceneWalker::walkEntity(Qt3DCore::QEntity* e, int depth)
{
Qt3DCore::QNodeVector nodes = e->childNodes();
for (int i = 0; i < nodes.count(); ++i)
{
Qt3DCore::QNode* node = nodes[i];
Qt3DCore::QEntity* entity = qobject_cast<Qt3DCore::QEntity*>(node);
if (entity)
{
QString indent;
indent.fill(' ', depth * 2);
qDebug().noquote() << indent << "Entity:" << entity << "Components:" << entity->components();
walkEntity(entity, depth + 1);
}
}
}
int main(int argc, char* argv[])
{
QGuiApplication app(argc, argv);
Qt3DExtras::Qt3DWindow* window = new Qt3DExtras::Qt3DWindow();
// Root
Qt3DCore::QEntity* rootEntity = new Qt3DCore::QEntity();
window->setRootEntity(rootEntity);
Qt3DRender::QRenderSurfaceSelector *renderSurfaceSelector = new Qt3DRender::QRenderSurfaceSelector();
renderSurfaceSelector->setSurface(window);
// clearing the buffers
Qt3DRender::QClearBuffers* clearBuffers = new Qt3DRender::QClearBuffers(renderSurfaceSelector);
clearBuffers->setBuffers(Qt3DRender::QClearBuffers::ColorDepthBuffer);
// Framegraph for objects
Qt3DRender::QLayerFilter* objectsLayerFilter = new Qt3DRender::QLayerFilter(renderSurfaceSelector);
Qt3DRender::QLayer* objectsLayer = new Qt3DRender::QLayer(objectsLayerFilter);
objectsLayerFilter->addLayer(objectsLayer);
Qt3DRender::QViewport* viewport = new Qt3DRender::QViewport(objectsLayer);
Qt3DRender::QCameraSelector* objectsCameraSelector = new Qt3DRender::QCameraSelector(viewport);
Qt3DRender::QCamera* objectsCamera = new Qt3DRender::QCamera(objectsCameraSelector);
objectsCamera->lens()->setPerspectiveProjection(45.f, 16.0f/9.0f, 0.01f, 1000.f);
objectsCamera->setPosition(QVector3D(0, 0, -10));
objectsCamera->setViewCenter(QVector3D(0, 0, 0));
objectsCamera->setUpVector(QVector3D(0, 1, 0));
objectsCameraSelector->setCamera(objectsCamera);
// Set the new framegraph
window->setActiveFrameGraph(renderSurfaceSelector);
window->renderSettings()->setRenderPolicy(Qt3DRender::QRenderSettings::Always);
// camera controls
Qt3DExtras::QOrbitCameraController* camController = new Qt3DExtras::QOrbitCameraController(rootEntity);
camController->setLinearSpeed(50.0f);
camController->setLookSpeed(180.0f);
camController->setCamera(objectsCamera);
// Torus
Qt3DCore::QEntity* torusEntity = new Qt3DCore::QEntity(rootEntity);
Qt3DExtras::QTorusMesh* torusMesh = new Qt3DExtras::QTorusMesh(torusEntity);
torusMesh->setSlices(50.0f);
torusMesh->setRings(50.0f);
torusMesh->setRadius(2.0f);
Qt3DExtras::QPhongMaterial* torusMaterial = new Qt3DExtras::QPhongMaterial(torusEntity);
torusMaterial->setAmbient(Qt::gray);
Qt3DCore::QTransform* torusTransform = new Qt3DCore::QTransform(torusEntity);
torusTransform->setTranslation(QVector3D(0.0f, 0.0f, 10.0f));
torusTransform->setRotationY(50.0f);
torusTransform->setScale(2.0f);
torusEntity->addComponent(torusTransform);
torusEntity->addComponent(torusMesh);
torusEntity->addComponent(torusMaterial);
torusEntity->addComponent(objectsLayer);
// Scene loader
Qt3DCore::QEntity* sceneLoaderEntity = new Qt3DCore::QEntity(rootEntity);
Qt3DRender::QSceneLoader* sceneLoader = new Qt3DRender::QSceneLoader(sceneLoaderEntity);
sceneLoader->setSource(QUrl::fromLocalFile("monkey.obj"));
// Transform
Qt3DCore::QTransform* sceneLoaderTransform = new Qt3DCore::QTransform(sceneLoaderEntity);
sceneLoaderTransform->setScale(2.0f);
sceneLoaderTransform->setTranslation(QVector3D(0.0f, 0.0f, 10.0f));
SceneWalker sceneWalker(sceneLoader);
QObject::connect(sceneLoader, &Qt3DRender::QSceneLoader::statusChanged, &sceneWalker, &SceneWalker::onStatusChanged);
sceneLoaderEntity->addComponent(sceneLoader);
sceneLoaderEntity->addComponent(sceneLoaderTransform);
sceneLoaderEntity->addComponent(objectsLayer);
window->show();
return app.exec();
}
The problem is that you are not actually adding the monkey to the objects layer. QSceneLoader creates its own scene subtree, even when the OBJ file contains a single object. So you are just adding the root entity of the subtree to the objects layer.
The SceneWalker object traverses the QSceneLoader subtree and provides information about its structure. You can just look at the console output, find the names of the entities you want to render, and attach them to the desired layer.
This has to be done asynchronously, using signals and slots. Do this right after creating the QSceneLoader object (assuming your mesh is named "monkey"):
QObject::connect(sceneLoader, &Qt3DRender::QSceneLoader::statusChanged, &app,
[sceneLoader, objectsLayer](Qt3DRender::QSceneLoader::Status s) {
if (s == Qt3DRender::QSceneLoader::Status::Ready)
sceneLoader->entity("monkey")->addComponent(objectsLayer);
});
Since you are using an OBJ file, you can even ditch the SceneWalker entirely. OBJ files are text-based, so you can find the name of the mesh by just opening it with a text editor.
If you succeed with layer filters, you don't need to use QSortPolicy (check out this answer).

VTK Iso Slider not working or not showing slider widget

I have some crazy issues in my code, and these issue is:
1) As you see in code below, if I execute it I will got a 3D preview model without sliderWidget. (Fixed)
2) If I change this line from mapper->SetInputConnection(surface->GetOutputPort()); to mapper->SetInputConnection(reader->GetOutputPort()); I will got a sliderWidget but I will not see a 3D model. (Fixed)
3) If I try to set custom key event, its will be work and change value but every change I need to wait until its reload or build 3d dicom (its work like you run and create a 3D model every time not like a slider).
class vtkSliderCallback : public vtkCommand
{
public:
int counter = 1;
static vtkSliderCallback *New()
{
return new vtkSliderCallback;
}
virtual void Execute(vtkObject *caller, unsigned long, void*)
{
vtkSliderWidget *sliderWidget =
reinterpret_cast<vtkSliderWidget*>(caller);
cout << this->counter << endl;
this->SphereSource->SetValue(this->counter++, static_cast<vtkSliderRepresentation *>(sliderWidget->GetRepresentation())->GetValue());
}
vtkSliderCallback():SphereSource(0) {}
vtkMarchingCubes *SphereSource;
};
int main(int argc, char* argv[])
{
// Verify input arguments
if ( argc < 4 )
{
std::cout << "Usage: " << argv[0]
<< " DicomSiresFolder" << " isoValueStep" << " OutputDirectory" << std::endl;
return EXIT_FAILURE;
}
std::string folder = argv[1];
// A sphere
vtkSmartPointer<vtkImageData> volume =
vtkSmartPointer<vtkImageData>::New();
vtkSmartPointer<vtkDICOMImageReader> reader =
vtkSmartPointer<vtkDICOMImageReader>::New();
reader->SetDirectoryName(folder.c_str());
reader->Update();
volume->DeepCopy(reader->GetOutput());
vtkSmartPointer<vtkMarchingCubes> surface =
vtkSmartPointer<vtkMarchingCubes>::New();
surface->SetInputData(volume);
surface->ComputeNormalsOn();
surface->SetValue(0, 400);
vtkSmartPointer<vtkPolyDataMapper> mapper =
vtkSmartPointer<vtkPolyDataMapper>::New();
mapper->SetInputConnection(surface->GetOutputPort());
mapper->ScalarVisibilityOff();
vtkSmartPointer<vtkActor> actor =
vtkSmartPointer<vtkActor>::New();
actor->SetMapper(mapper);
actor->GetProperty()->SetInterpolationToFlat();
// A renderer and render window
vtkSmartPointer<vtkRenderer> renderer =
vtkSmartPointer<vtkRenderer>::New();
vtkSmartPointer<vtkRenderWindow> renderWindow =
vtkSmartPointer<vtkRenderWindow>::New();
renderWindow->AddRenderer(renderer);
// An interactor
vtkSmartPointer<vtkRenderWindowInteractor> renderWindowInteractor =
vtkSmartPointer<vtkRenderWindowInteractor>::New();
renderWindowInteractor->SetRenderWindow(renderWindow);
// Add the actors to the scene
renderer->AddActor(actor);
// Render an image (lights and cameras are created automatically)
renderWindow->Render();
vtkSmartPointer<vtkSliderRepresentation3D> sliderRep =
vtkSmartPointer<vtkSliderRepresentation3D>::New();
sliderRep->SetMinimumValue(-800.0);
sliderRep->SetMaximumValue(800.0);
sliderRep->SetTitleText("Iso Resolution");
sliderRep->GetPoint1Coordinate()->SetCoordinateSystemToWorld();
sliderRep->GetPoint1Coordinate()->SetValue(-4,6,0);
sliderRep->GetPoint2Coordinate()->SetCoordinateSystemToWorld();
sliderRep->GetPoint2Coordinate()->SetValue(4,6,0);
sliderRep->SetSliderLength(0.075);
sliderRep->SetSliderWidth(0.05);
sliderRep->SetEndCapLength(0.05);
vtkSmartPointer<vtkSliderWidget> sliderWidget =
vtkSmartPointer<vtkSliderWidget>::New();
sliderWidget->SetInteractor(renderWindowInteractor);
sliderWidget->SetRepresentation(sliderRep);
sliderWidget->SetAnimationModeToAnimate();
sliderWidget->EnabledOn();
vtkSmartPointer<vtkSliderCallback> callback =
vtkSmartPointer<vtkSliderCallback>::New();
callback->SphereSource = surface;
sliderWidget->AddObserver(vtkCommand::InteractionEvent,callback);
renderWindowInteractor->Initialize();
renderWindow->Render();
renderWindowInteractor->Start();
return EXIT_SUCCESS;
}
UPDATE:
I Fixed issue on point 1 and 2, but still Issue 3 needed to fix.
Your callback is changing the value for vtkMarchingCube, so you're running a marching cubes every time, so yes, it's rebuilding a mesh every time. As far as I understand what you need to do, one solution can be to precompute vtkMarchingCubes output for all possible values: not elegant (but it could work if you have enough memory).
In any case, use vtkFlyingEdges3D instead of vtkMarchingCubes, it's much faster (it could be fast enough to solve your problem without any other modification).

Sequential off-screen rendering / screen capture without windowing system using OpenSceneGraph

I am working currently on an off-screen renderer so that I can do Mutual Information Registration for real-world scenes. I use OpenSceneGraph to cope with the large data and automatic loading. I am having trouble getting a framebuffer capture within a sequential, single-threaded program.
Well, I have this class (header):
#include <osg/ref_ptr>
#include <osg/Array>
#include <osg/ImageUtils>
#include <osgGA/StateSetManipulator>
#include <osgViewer/Viewer>
#include <osg/GraphicsContext>
#include <osg/Texture2D>
#include <osg/FrameBufferObject>
#include <osgDB/WriteFile>
#include <osg/Referenced>
#include <osg/Vec3>
#include <osg/Image>
#include <osg/State>
#include <string>
#include <chrono>
#include <thread>
#include <assert.h>
#include "ImagingPrimitives.h"
class BoundRenderScene {
public:
BoundRenderScene();
virtual ~BoundRenderScene();
void NextFrame(void);
inline OpenThreads::Mutex* GetMutexObject(void) { return &_mutex; }
inline osg::Image* GetFrame(void)
{
OpenThreads::ScopedLock<OpenThreads::Mutex> lock(_mutex);
return _frame.get();
}
inline void GetFrame(osg::Image* img)
{
OpenThreads::ScopedLock<OpenThreads::Mutex> lock(_mutex);
if(_frame.valid() && (img!=NULL) && img->valid())
{
glReadBuffer(GL_BACK);
img->readPixels(0,0,_camera_configuration->GetSX(),_camera_configuration->GetSY(), GL_RGB,GL_UNSIGNED_BYTE);
uint w = img->s(), h = img->t(), d = img->r(), c = uint(img->getPixelSizeInBits()/8);
/*
* bare testing write op
* osgDB::writeImageFile(const_cast<const osg::Image&>(*img), "/tmp/testimg.png");
*/
}
}
inline void SetCameraConfiguration(CameraConfiguration* configuration) { _camera_configuration = configuration; }
inline void SetCameraMatrix(osg::Matrixd camera_matrix) { _camera_matrix = camera_matrix; }
inline void SetScene(osg::Node* scene) { _scene = scene; }
inline void Initialize(void) {
if(!_initialized)
_init();
else
_re_init();
}
protected:
osgViewer::Viewer _viewer;
osg::Matrixd _camera_matrix;
osg::ref_ptr<osg::Texture2D> _tex;
osg::ref_ptr<osg::FrameBufferObject> _fbo;
mutable osg::ref_ptr<osg::Image> _frame;
osg::ref_ptr<osg::Node> _scene;
osg::ref_ptr<osg::GraphicsContext::Traits> _traits;
osg::ref_ptr<osg::GraphicsContext> _gc;
CameraConfiguration* _camera_configuration;
SnapshotCallback* cb;
std::string _filepath;
private:
void _init(void);
void _re_init(void);
bool _initialized;
mutable OpenThreads::Mutex _mutex;
osg::Matrixd pre_transform;
osg::Matrixd transformation;
};
Also, because many examples within offscreen-rendering and for screen capture work with Post/FinalDrawCallaback's, I copied the callback structure from the "osgdistortion" example, but added the mutex for synchronisation:
struct SnapshotCallback : public osg::Camera::DrawCallback
{
public:
inline SnapshotCallback(OpenThreads::Mutex* mtx_obj, std::string filepath, int width, int height) : _filepath(filepath), _output_to_file(false), _mutex(mtx_obj)
{
_image = new osg::Image();
_image->allocateImage(width, height, 1, GL_RGB, GL_UNSIGNED_BYTE);
if(filepath!="")
_output_to_file = true;
}
inline virtual void operator() (osg::RenderInfo& renderInfo) const
{
OpenThreads::ScopedLock<OpenThreads::Mutex> lock(*_mutex);
osg::Camera* camera = renderInfo.getCurrentCamera();
osg::Viewport* viewport = camera ? camera->getViewport() : 0;
if(viewport && _image.valid())
{
glReadBuffer(GL_BACK);
_image->readPixels(int(viewport->x()),int(viewport->y()),int(viewport->width()),int(viewport->height()), GL_RGB, GL_UNSIGNED_BYTE);
if(_output_to_file)
{
osgDB::writeImageFile(*_image, _filepath);
}
}
}
inline virtual void operator() (const osg::Camera& camera) const
{
OpenThreads::ScopedLock<OpenThreads::Mutex> lock(*_mutex);
osg::Viewport* viewport = camera.getViewport();
if(viewport && _image.valid())
{
glReadBuffer(GL_BACK);
_image->readPixels(int(viewport->x()),int(viewport->y()),int(viewport->width()),int(viewport->height()), GL_RGB, GL_UNSIGNED_BYTE);
if(_output_to_file)
{
osgDB::writeImageFile(*_image, _filepath);
}
}
}
std::string _filepath;
bool _output_to_file;
mutable OpenThreads::Mutex* _mutex;
mutable osg::ref_ptr<osg::Image> _image;
};
I initialize and render the scene as follows:
#include "BoundRenderScene.h"
void BoundRenderScene::_init(void)
{
if(_camera!=NULL)
_viewer.setDone(true);
_traits->x = 0;
_traits->y = 0;
_traits->width = _camera_configuration->GetSX();
_traits->height = _camera_configuration->GetSY();
_traits->red = 8;
_traits->green = 8;
_traits->blue = 8;
_traits->alpha = 0;
_traits->depth = 24;
_traits->windowDecoration = false;
_traits->pbuffer = true;
_traits->doubleBuffer = true;
_traits->sharedContext = 0x0;
if(_gc.get()!=NULL)
{
bool release_success = _gc->releaseContext();
if(!release_success)
std::cerr << "Error releasing Graphics Context.";
}
_gc = osg::GraphicsContext::createGraphicsContext(_traits.get());
_viewer.getCamera()->setGraphicsContext(_gc.get());
_viewer.setThreadingModel(osgViewer::Viewer::SingleThreaded);
_viewer.setUpThreading();
_viewer.realize();
_frame->allocateImage(_camera_configuration->GetSX(), _camera_configuration->GetSY(), 1, GL_RGB, GL_UNSIGNED_BYTE);
_viewer.getCamera()->getOrCreateStateSet();
_viewer.getCamera()->setRenderTargetImplementation(osg::Camera::PIXEL_BUFFER);
cb = new SnapshotCallback(&_mutex,_filepath, _camera_configuration->GetSX(), _camera_configuration->GetSY());
//_viewer.getCamera()->setPostDrawCallback( cb );
//Clear colour "black" for representing "no information" => background elimination in natural image, pls.
_viewer.getCamera()->setClearColor(osg::Vec4f(0.25f, 0.25f, 0.25f, 1.0f));
_viewer.getCamera()->setClearMask(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT);
_viewer.getCamera()->setDrawBuffer(GL_BACK);
_viewer.getCamera()->setReadBuffer(GL_BACK);
_viewer.getCamera()->setViewport(0,0,_camera_configuration->GetSX(),_camera_configuration->GetSY());
_viewer.getCamera()->setProjectionMatrix(osg::Matrixd::perspective(osg::RadiansToDegrees(_camera_configuration->GetFoV()), _camera_configuration->GetAspectRatio(), 0.1, 150.0));
//looking in geo-coord system
_viewer.getCamera()->setViewMatrix(osg::Matrixd::lookAt(osg::Vec3d(0.0, 0.0, -1.0), osg::Vec3d(0.0, 0.0, 1.0), osg::Vec3d(0.0, 1.0, 0.0)));
_viewer.getCamera()->attach(osg::Camera::COLOR_BUFFER, _frame.get());
_viewer.getCamera()->setRenderTargetImplementation(osg::Camera::FRAME_BUFFER_OBJECT);
_tex->setTextureSize(_camera_configuration->GetSX(), _camera_configuration->GetSY());
_tex->setInternalFormat(GL_RGB);
_tex->setFilter(osg::Texture::MIN_FILTER, osg::Texture::LINEAR);
_tex->setFilter(osg::Texture::MAG_FILTER, osg::Texture::LINEAR);
_tex->setWrap(osg::Texture::WRAP_S, osg::Texture::CLAMP_TO_EDGE);
_tex->setWrap(osg::Texture::WRAP_T, osg::Texture::CLAMP_TO_EDGE);
_tex->setResizeNonPowerOfTwoHint(false);
_tex->setImage(0,_frame.get());
_fbo->setAttachment(osg::Camera::COLOR_BUFFER, osg::FrameBufferAttachment(_tex.get()));
_viewer.setDone(false);
_viewer.setSceneData(_scene.get());
_viewer.setCameraManipulator(0x0);
}
void BoundRenderScene::NextFrame(void)
{
OpenThreads::ScopedLock<OpenThreads::Mutex> lock(_mutex);
if(_frame.valid() && !_viewer.done())
{
osg::Matrixd inverse_cam = osg::Matrixd::inverse(_camera_matrix);
transformation = inverse_cam * pre_transform;
_viewer.getCamera()->setViewMatrix(transformation);
_viewer.updateTraversal();
_viewer.frame();
}
else
std::cout << "Viewer or Camera invalid." << std::endl;
}
The main workflow looks like this (simplified):
BoundRenderScene renderer;
std::vector<osg::Matrixd> poses;
/*
* setting initial parameters
* fill poses with camera positions to render, for regsitration
*/
renderer._init();
for(uint i = 0; i < poses.size(); i++)
{
renderer.SetCameraMatrix(poses.at(i));
renderer.NextImage();
sleep(0.04); // to get the 25fps frame limit
osg::Image* reg_image = renderer.GetImage();
/*
* Do further processing
*/
}
Now comes the crux: the OpenSceneGraph example "osgprenderer" (included in OSG) does off-screen rendering using an osg::Camera::DrawCallback, as my SnapshotCallback. Unfortunately, the operator()-function in my case never get's called in my scenegraph, so that way of screen capture doesn't work for me. It's also rather inconvenient as the rest of the Mutual Information procedure is a rather sequential pipeline.
Other wrappers (https://github.com/xarray/osgRecipes/blob/master/integrations/osgberkelium/osgberkelium.cpp) use methods similar to my "void GetFrame(osg::Image* img)" method, where the image is actively read using "readPixels". That is very convenient for my workflow, but the method always returns a blank image. It doesn't crash, but it doesn't do it's job either.
The method that does work is "osg: and :Image* GetFrame(void)", which returns the bound/attached FBO image. It is similar to the "osgdistortion" example. It does work for rendering one- to two images, but after some time, rendering and processing get out of sync and the application crashes as follows:
[---FIRST FRAME---]
GraphicsCostEstimator::calibrate(..)
cull_draw() 0x1998ca0
ShaderComposer::~ShaderComposer() 0x35a4d40
Renderer::compile()
OpenGL extension 'GL_ARB_vertex_buffer_object' is supported.
OpenGL extension 'GL_EXT_secondary_color' is supported.
OpenGL extension 'GL_EXT_fog_coord' is supported.
OpenGL extension '' is not supported.
OpenGL extension 'GL_EXT_packed_depth_stencil' is supported.
Setting up osg::Camera::FRAME_BUFFER_OBJECT
end cull_draw() 0x1998ca0
[processing]
[ SECOND FRAME ]
cull_draw() 0x1998ca0
OpenGL extension 'GL_ARB_fragment_program' is supported.
OpenGL extension 'GL_ARB_vertex_program' is supported.
OpenGL extension 'GL_ARB_shader_objects' is supported.
OpenGL extension 'GL_ARB_vertex_shader' is supported.
OpenGL extension 'GL_ARB_fragment_shader' is supported.
OpenGL extension 'GL_ARB_shading_language_100' is supported.
OpenGL extension 'GL_EXT_geometry_shader4' is supported.
OpenGL extension 'GL_EXT_gpu_shader4' is supported.
OpenGL extension 'GL_ARB_tessellation_shader' is supported.
OpenGL extension 'GL_ARB_uniform_buffer_object' is supported.
OpenGL extension 'GL_ARB_get_program_binary' is supported.
OpenGL extension 'GL_ARB_gpu_shader_fp64' is supported.
OpenGL extension 'GL_ARB_shader_atomic_counters' is supported.
glVersion=4.5, isGlslSupported=YES, glslLanguageVersion=4.5
Warning: detected OpenGL error 'invalid operation' at end of SceneView::draw()
end cull_draw() 0x1998ca0
[-FROM 3rd FRAME ONWARDS-]
[workload, matrix setup]
[_viewer.frame()]
cull_draw() 0x1998ca0
Warning: detected OpenGL error 'invalid operation' at start of State::apply()
end cull_draw() 0x1998ca0
[next frame]
[BREAKING]
cull_draw() 0x1998ca0
Warning: detected OpenGL error 'invalid operation' at start of State::apply()
end cull_draw() 0x1998ca0
[more work]
Segmentation fault (core dumped)
So, the question is:
I had a look into the source files from osg for the Viewer-related classes, but I was not able to determine where the error
Warning: detected OpenGL error 'invalid operation' at start of State::apply()
comes from. Any idea where to start looking for it ?
For sequential rendering and screen capture, which method is the best to use within OSG ?
How can I obtain the mutex of the normal osg::Viewer, so to sync the renderer with the rest of py pipeline ? (Renderer is single-threaded)
Any other suggestions from experiences OpenSceneGraph off-screen
renderers and screen captures ?
As deeper research turned out, releasing the graphics context in the class destructor freed the OpenGL pipeline, BUT: it also disallocated stateset-bound textures of the loaded scene/model, although the model itself was not suspended (as given in the question: it is re-used in the following passes). So, in further render passes, the render pipeline wanted to access OSG assets which have been released via releasing the GL context.
in code it changed from:
BoundRenderScene::~BoundRenderScene() {
// TODO Auto-generated destructor stub
_viewer.setDone(true);
_viewer.setReleaseContextAtEndOfFrameHint(true);
_gc->releaseContext();
#ifdef DEBUG
std::cout << "BoundRenderScene deleted." << std::endl;
#endif
}
to:
BoundRenderScene::~BoundRenderScene() {
// TODO Auto-generated destructor stub
_viewer.setDone(true);
_viewer.setReleaseContextAtEndOfFrameHint(true);
#ifdef DEBUG
std::cout << "BoundRenderScene deleted." << std::endl;
#endif
}
This resolved the OpenSceneGraph-internal error messages. Now, in order to solve the frame capture problem itself, I implemented the callback from osgprenderer:
struct SnapshotCallback : public osg::Camera::DrawCallback
{
public:
inline SnapshotCallback(std::string filepath) : _filepath(filepath), _output_to_file(false), _image(NULL)
{
if(filepath!="")
_output_to_file = true;
_image = new osg::Image();
}
inline virtual void operator() (osg::RenderInfo& renderInfo) const
{
osg::Camera* camera = renderInfo.getCurrentCamera();
osg::Viewport* viewport = camera ? camera->getViewport() : 0;
if(viewport)
{
glReadBuffer(camera->getDrawBuffer());
_image->allocateImage(int(viewport->width()), int(viewport->height()), 1, GL_RGB, GL_UNSIGNED_BYTE);
_image->readPixels(int(viewport->x()),int(viewport->y()),int(viewport->width()),int(viewport->height()), GL_RGB, GL_UNSIGNED_BYTE);
if(_output_to_file)
{
osgDB::writeImageFile(*reinterpret_cast<osg::Image*>(_image->clone(osg::CopyOp::DEEP_COPY_ALL)), _filepath);
}
}
}
inline virtual void operator() (const osg::Camera& camera) const
{
osg::Viewport* viewport = camera.getViewport();
if(viewport)
{
glReadBuffer(camera.getDrawBuffer());
_image->allocateImage(int(viewport->width()), int(viewport->height()), 1, GL_RGB, GL_UNSIGNED_BYTE);
_image->readPixels(int(viewport->x()),int(viewport->y()),int(viewport->width()),int(viewport->height()), GL_RGB, GL_UNSIGNED_BYTE);
if(_output_to_file)
{
osgDB::writeImageFile(*reinterpret_cast<osg::Image*>(_image->clone(osg::CopyOp::DEEP_COPY_ALL)), _filepath);
}
}
}
inline osg::Image* GetImage(void)
{
return reinterpret_cast<osg::Image*>(_image->clone(osg::CopyOp::DEEP_COPY_ALL));
}
protected:
std::string _filepath;
bool _output_to_file;
mutable osg::ref_ptr<osg::Image> _image;
};
Now, with the cloned buffer instead of the actual image buffer (idea taken over from osgscreencapture example), I do get the real image without memory errors.
For double-buffered rendering, I though have to somehow render the scene twice for the right buffer to contain the objects' images, but this is for my use case currently less of an issue (I/O-bound rendering, not operation-bound).
so, the main function looks like follows:
BoundRenderScene renderer;
std::vector<osg::Matrixd> poses;
/*
* setting initial parameters
* fill poses with camera positions to render, for registration
*/
renderer._init();
for(uint i = 0; i < poses.size(); i++)
{
renderer.SetCameraMatrix(poses.at(i));
renderer.NextImage();
renderer.NextImage();
osg::Image* reg_image = renderer.GetImage();
/*
* Do further processing
*/
}

C++ Meshes getting animated when they are not suppose too

I have two meshes. One of them is animated and the other one is not animated. When i render them the inanimate mesh gets animated the same way the animated mesh is animating.
So, let just the animated mesh goes to left than comes back, my inanimate mesh does the same thing!
This is my code class for the inanimate meshes.
main class
class StaticMesh
{
public:
StaticMesh(LPDIRECT3DDEVICE9* device);
~StaticMesh(void);
void Render(void);
void Load(LPCWSTR fileName);
void CleanUp(void);
private:
LPDIRECT3DDEVICE9* d3ddev; // the pointer to the device class
LPD3DXMESH mesh; // define the mesh pointer
D3DMATERIAL9* material; // define the material object
DWORD numMaterials; // stores the number of materials in the mesh
LPDIRECT3DTEXTURE9* texture; // a pointer to a texture
LPD3DXBUFFER bufMeshMaterial;
};
#include "StdAfx.h"
#include "StaticMesh.h"
StaticMesh::StaticMesh(LPDIRECT3DDEVICE9* device)
{
d3ddev=device;
}
StaticMesh::~StaticMesh(void)
{
}
void StaticMesh::Render(void)
{
LPDIRECT3DDEVICE9 device=*d3ddev;
for(DWORD i = 0; i < numMaterials; i++) // loop through each subset
{
device->SetMaterial(&material[i]); // set the material for the subset
device->SetTexture(0, texture[i]); // ...then set the texture
mesh->DrawSubset(i); // draw the subset
}
}
void StaticMesh::Load(LPCWSTR fileName)
{
LPDIRECT3DDEVICE9 device=*d3ddev;
D3DXLoadMeshFromX(fileName, // load this file
D3DXMESH_SYSTEMMEM, // load the mesh into system memory
device, // the Direct3D Device
NULL, // we aren't using adjacency
&bufMeshMaterial, // put the materials here
NULL, // we aren't using effect instances
&numMaterials, // the number of materials in this model
&mesh); // put the mesh here
// retrieve the pointer to the buffer containing the material information
D3DXMATERIAL* tempMaterials = (D3DXMATERIAL*)bufMeshMaterial->GetBufferPointer();
// create a new material buffer and texture for each material in the mesh
material = new D3DMATERIAL9[numMaterials];
texture = new LPDIRECT3DTEXTURE9[numMaterials];
for(DWORD i = 0; i < numMaterials; i++) // for each material...
{
// Copy the material
material[i] = tempMaterials[i].MatD3D;
// Set the ambient color for the material (D3DX does not do this)
material[i].Ambient = material[i].Diffuse;
// Create the texture if it exists - it may not
texture[i] = NULL;
if (tempMaterials[i].pTextureFilename)
{
D3DXCreateTextureFromFileA(device, tempMaterials[i].pTextureFilename,&texture[i]);
}
}
}
void StaticMesh::CleanUp(void)
{
mesh->Release();
}
The anination comes from a transformation matrix.
You either pass that directly to the device (if using fixed function) or to a shader/effect.
All meshes you draw after the matrix has been set will undergo the same transform.
So, if you want to have one mesh stay inanimate you have to change the transform before drawing the mesh.
Pseudo-code:
SetTransform( world* view * projection );
DrawMesh();
SetTransform( identity * view * projection );
DrawMesh();