I'm working with eye images, and want I want to do is take one of these images and apply it as a texture for a sphere. However, I'm having a little bit of trouble with the scaling, here's how it looks as of now:
And here's the image I'm working with:
So far I've tried various methods but I still haven't got the desired result.
Here's the code I've been working on:
#include <vtkSmartPointer.h>
#include <vtkRenderWindowInteractor.h>
#include <vtkRenderWindow.h>
#include <vtkRenderer.h>
#include <vtkActor.h>
#include <vtkPolyDataMapper.h>
#include <vtkTransformTextureCoords.h>
#include <vtkTexture.h>
#include <vtkTextureMapToSphere.h>
#include <vtkImageReader2Factory.h>
#include <vtkImageReader.h>
#include <vtkTexturedSphereSource.h>
#include <vtkSphereSource.h>
int main(int argc, char *argv[])
{
std::string fileName = "C:\\VTK\\eyeTextureNew2.jpg";
//Create sphere
vtkSmartPointer<vtkTexturedSphereSource> source =
vtkSmartPointer<vtkTexturedSphereSource>::New();
source->SetPhiResolution(100);
source->SetThetaResolution(100);
// Read texture file
vtkSmartPointer<vtkImageReader2Factory> readerFactory =
vtkSmartPointer<vtkImageReader2Factory>::New();
vtkImageReader2 *imageReader = readerFactory->CreateImageReader2(fileName.c_str());
imageReader->SetFileName(fileName.c_str());
// Create texture
vtkSmartPointer<vtkTexture> texture =
vtkSmartPointer<vtkTexture>::New();
texture->SetInputConnection(imageReader->GetOutputPort());
vtkSmartPointer<vtkTransformTextureCoords> transformTexture =
vtkSmartPointer<vtkTransformTextureCoords>::New();
transformTexture->SetInputConnection(source->GetOutputPort());
vtkSmartPointer<vtkPolyDataMapper> mapper =
vtkSmartPointer<vtkPolyDataMapper>::New();
mapper->SetInputConnection(transformTexture->GetOutputPort());
vtkSmartPointer<vtkActor> actor =
vtkSmartPointer<vtkActor>::New();
actor->SetMapper(mapper);
actor->SetTexture(texture);
vtkSmartPointer<vtkRenderer> renderer =
vtkSmartPointer<vtkRenderer>::New();
renderer->AddActor(actor);
renderer->SetBackground(.1, .2, .3);
vtkSmartPointer<vtkRenderWindow> renderWindow =
vtkSmartPointer<vtkRenderWindow>::New();
renderWindow->AddRenderer(renderer);
vtkSmartPointer<vtkRenderWindowInteractor> renWinInteractor =
vtkSmartPointer<vtkRenderWindowInteractor>::New();
renWinInteractor->SetRenderWindow(renderWindow);
renderWindow->Render();
renWinInteractor->Start();
imageReader->Delete();
return EXIT_SUCCESS;
}
Any ideas on what could I be doing wrong/missing? Thanks in advance!
with vedo I get this (but symmetric on the backside):
from vedo import Sphere
s = Sphere().texture('QeXKq.jpg')
s.show()
the texture function actually does this:
def texture(self, tname):
"""Assign a texture to actor from image file or predefined texture tname."""
import os
tmapper = vtk.vtkTextureMapToPlane()
tmapper.AutomaticPlaneGenerationOn()
tmapper.SetInputData(self.polydata())
tmapper.Update()
tc = tmapper.GetOutput().GetPointData().GetTCoords()
self.polydata().GetPointData().SetTCoords(tc)
self.polydata().GetPointData().AddArray(tc)
fn = settings.textures_path + tname + ".jpg"
if os.path.exists(tname):
fn = tname
elif not os.path.exists(fn):
colors.printc("~sad Texture", tname, "not found in", settings.textures_path, c="r")
colors.printc("~pin Available textures:", c="m", end=" ")
for ff in os.listdir(settings.textures_path):
colors.printc(ff.split(".")[0], end=" ", c="m")
print()
return self
if ".png" in fn.lower():
reader = vtk.vtkPNGReader()
elif ".jp" in fn.lower():
reader = vtk.vtkJPEGReader()
else:
colors.printc("~times Supported texture files: PNG or JPG", c="r")
return self
reader.SetFileName(fn)
reader.Update()
img = reader.GetOutput()
atext = vtk.vtkTexture()
atext.SetInputData(img)
self.GetProperty().SetColor(1, 1, 1)
self.SetTexture(atext)
self.Modified()
return self
Related
I've started from the VTK ConstrainedDelaunay2D example and added my own points:
#include <vtkSmartPointer.h>
#include <vtkDelaunay2D.h>
#include <vtkCellArray.h>
#include <vtkProperty.h>
#include <vtkPolyDataMapper.h>
#include <vtkActor.h>
#include <vtkPoints.h>
#include <vtkPolyData.h>
#include <vtkPolygon.h>
#include <vtkMath.h>
#include <vtkRenderer.h>
#include <vtkRenderWindow.h>
#include <vtkRenderWindowInteractor.h>
#include <vtkNamedColors.h>
#include <vtkVersionMacros.h> // For version macros
int main(int, char *[])
{
vtkSmartPointer<vtkPoints> points = vtkSmartPointer<vtkPoints>::New();
int ptsHeight = 400;
std::vector<std::vector<int>> pts{ {166, 127},{103, 220},{166, 190},{174, 291},{189, 226},{227, 282},{213, 187},{242, 105},{196, 131},{182, 83} };
for (size_t i = 0; i < pts.size(); i++)
{
// !important: flip y
int x = pts[i][0];
int y = ptsHeight - pts[i][1];
points->InsertNextPoint(x, y, 0);
}
vtkSmartPointer<vtkPolyData> aPolyData = vtkSmartPointer<vtkPolyData>::New();
aPolyData->SetPoints(points);
// Create a cell array to store the polygon in
vtkSmartPointer<vtkCellArray> aCellArray = vtkSmartPointer<vtkCellArray>::New();
// Define a polygonal hole with a clockwise polygon
vtkSmartPointer<vtkPolygon> aPolygon = vtkSmartPointer<vtkPolygon>::New();
for (unsigned int i = 0; i < pts.size(); i++)
{
aPolygon->GetPointIds()->InsertNextId(i);
}
aCellArray->InsertNextCell(aPolygon);
// Create a polydata to store the boundary. The points must be the
// same as the points we will triangulate.
vtkSmartPointer<vtkPolyData> boundary =
vtkSmartPointer<vtkPolyData>::New();
boundary->SetPoints(aPolyData->GetPoints());
boundary->SetPolys(aCellArray);
// Triangulate the grid points
vtkSmartPointer<vtkDelaunay2D> delaunay =
vtkSmartPointer<vtkDelaunay2D>::New();
delaunay->SetInputData(aPolyData);
delaunay->SetSourceData(boundary);
// Visualize
vtkSmartPointer<vtkPolyDataMapper> meshMapper =
vtkSmartPointer<vtkPolyDataMapper>::New();
meshMapper->SetInputConnection(delaunay->GetOutputPort());
vtkSmartPointer<vtkNamedColors> colors =
vtkSmartPointer<vtkNamedColors>::New();
vtkSmartPointer<vtkActor> meshActor =
vtkSmartPointer<vtkActor>::New();
meshActor->SetMapper(meshMapper);
meshActor->GetProperty()->EdgeVisibilityOn();
meshActor->GetProperty()->SetEdgeColor(colors->GetColor3d("Peacock").GetData());
meshActor->GetProperty()->SetInterpolationToFlat();
meshActor->GetProperty()->SetBackfaceCulling(true);
// Create a renderer, render window, and interactor
vtkSmartPointer<vtkRenderer> renderer = vtkSmartPointer<vtkRenderer>::New();
vtkSmartPointer<vtkRenderWindow> renderWindow = vtkSmartPointer<vtkRenderWindow>::New();
renderWindow->AddRenderer(renderer);
vtkSmartPointer<vtkRenderWindowInteractor> renderWindowInteractor = vtkSmartPointer<vtkRenderWindowInteractor>::New();
renderWindowInteractor->SetRenderWindow(renderWindow);
// Add the actor to the scene
renderer->AddActor(meshActor);
//renderer->AddActor(boundaryActor);
renderer->SetBackground(colors->GetColor3d("Mint").GetData());
// Render and interact
renderWindow->SetSize(640, 480);
renderWindow->Render();
renderWindowInteractor->Start();
return EXIT_SUCCESS;
}
I'm experiencing two issues:
I get different results if I flip the Y coordinates: why is that ?
Why are there faces pointing in the wrong direction (flipped normal / wrong winding )?
Here's what I mean by the 1st issue:
If I don't flip the Y coordinates I get this:
I get the same effect if I don't flip the Y axis but insert the boundary polygon in reverse order:
for (unsigned int i = 0; i < pts.size(); i++)
{
aPolygon->GetPointIds()->InsertNextId(pts.size() - 1 - i);
}
I don't think I fully understand how the boundary/constraint works.
I thoght that the same points should produce the same triangulation wether the vertices are flipped vertically or not. (I suspect the order of indices changes then ?)
Regarding the second issue (unpredictable flipped faces) I'm not sure what the best way forward is. I had a look at the vtkDelaunay2D class and couldn't find anything related.
(I've tried setting projection plane mode to VTK_DELAUNAY_XY_PLANE, but it didn't seem to affect the output)
I've also tried to use vtkPolyDataNormals but got no output:
vtkSmartPointer<vtkPolyDataNormals> normalGenerator = vtkSmartPointer<vtkPolyDataNormals>::New();
normalGenerator->SetInputData(delaunay->GetOutput());
normalGenerator->ComputePointNormalsOff();
normalGenerator->ComputeCellNormalsOn();
normalGenerator->FlipNormalsOn();
normalGenerator->Update();
(normalGenerator's output has 0 cells and points)
Is there a way to compute constrained delaunay triangulation for a list of 2d points and ensure all the faces point the same way ? (If so, how ? Would it be possible to do this with the vtkDelaunay2D class alone or is it necessary to use other filters?)
Any hints/tips are more than welcome :)
I'm using VTK 8.2 by the way.
the flipping in y effectively reverses the faces orientation (what is clockwise becomes anti-clockwise, like in a mirror).
I'm not sure I can reproduce your example above. A quick test in python seems to give the expected behavior, maybe you can start from this and map it to your c++ version:
import vedo
pts = [
[166, 127],
[103, 220],
[166, 190],
[174, 291],
[189, 226],
[227, 282],
[213, 187],
[242, 105],
[196, 131],
[182, 83],
]
ids = [[2,4,6], [0,2,8]] # faces to erase by pt-index (clockwise)
dly = vedo.delaunay2D(pts, mode='xy', boundaries=ids)
dly.c('grey5').lc('red4').lw(2)
labels = vedo.Points(pts).labels('id').z(1)
vedo.show(labels, dly, axes=1)
We are aiming to replace our previous 3D Engine with Qt3D. As a last obstacle we need to correctly implement a pixel correct transparency. We are now trying to implement depth peeling as a possible approach to make pixel correct transparency workable. For this algorithm one has to do perform a deferred (multipass) rendering, which can be achieved with QRenderPassFilter and QFilterKey inside an effect.
Now, I already had big problems to make the combination of QRenderPassFilter and QFilterKey together with the material QDiffuseSpecularMaterial going to work correctly. Even if there is just one pass.
This is my source code:
#include <QApplication>
#include <QWidget>
#include <QVBoxLayout>
#include <QFrame>
#include <Qt3DCore/QTransform>
#include <Qt3DRender/QSortPolicy>
#include <Qt3DRender/QRenderSettings>
#include <Qt3DRender/QRenderSurfaceSelector>
#include <Qt3DRender/QViewport>
#include <Qt3DRender/QCamera>
#include <Qt3DRender/QCameraSelector>
#include <Qt3DRender/QClearBuffers>
#include <Qt3DRender/QDirectionalLight>
#include <Qt3DRender/QTexture>
#include <Qt3DExtras/QPlaneMesh>
#include <Qt3DExtras/QDiffuseSpecularMaterial>
#include <Qt3DExtras/Qt3DWindow>
#include <Qt3DRender/QFilterKey>
#include <Qt3DRender/QParameter>
#include <Qt3DRender/QRenderPass>
#include <Qt3DRender/QRenderPassFilter>
#include <Qt3DRender/QTechnique>
#include <QDebug>
int main(int argc, char* argv[])
{
QApplication a(argc, argv);
auto view = new Qt3DExtras::Qt3DWindow();
auto mClearBuffers = new Qt3DRender::QClearBuffers;
auto mMainCameraSelector = new Qt3DRender::QCameraSelector;
mMainCameraSelector->setCamera(view->camera());
auto mRenderSurfaceSelector = new Qt3DRender::QRenderSurfaceSelector;
auto mMainViewport = new Qt3DRender::QViewport;
auto renderPassFilter = new Qt3DRender::QRenderPassFilter;
{
auto filterKey = new Qt3DRender::QFilterKey(renderPassFilter);
filterKey->setName(QStringLiteral("renderingStyle"));
filterKey->setValue(QStringLiteral("forward"));
// Adding the filterKey to the renderPassFilter hides the plane
// Name and Value of filterKey matches the FilterKey inside the QDiffuseSpecularMaterial
renderPassFilter->addMatch(filterKey); // Removing this lines shows the plane mesh
mClearBuffers->setClearColor(Qt::lightGray);
mClearBuffers->setBuffers(Qt3DRender::QClearBuffers::BufferType::ColorDepthBuffer);
mMainCameraSelector->setParent(mClearBuffers);
mClearBuffers->setParent(renderPassFilter);
}
renderPassFilter->setParent(mRenderSurfaceSelector);
mRenderSurfaceSelector->setParent(mMainViewport);
view->setActiveFrameGraph(mMainViewport);
view->activeFrameGraph()->dumpObjectTree();
auto rootEntity = new Qt3DCore::QEntity();
view->setRootEntity(rootEntity);
view->camera()->lens()->setPerspectiveProjection(45.0f, 1., 0.1f, 10000.0f);
view->camera()->setPosition(QVector3D(0, 2, 0));
view->camera()->setUpVector(QVector3D(0, 1, 0));
view->camera()->setViewCenter(QVector3D(0, 0, 0));
auto planeEntity = new Qt3DCore::QEntity(rootEntity);
auto meshMaterial = new Qt3DExtras::QDiffuseSpecularMaterial;
meshMaterial->setDiffuse(QColor("#ff00ff"));
planeEntity->addComponent(meshMaterial);
auto mesh = new Qt3DExtras::QPlaneMesh;
mesh->setWidth(0.3);
mesh->setHeight(0.3);
planeEntity->addComponent(mesh);
auto container = QWidget::createWindowContainer(view);
QFrame frame;
frame.setLayout(new QVBoxLayout);
frame.layout()->addWidget(container);
frame.resize(QSize(400, 300));
frame.show();
return a.exec();
}
The console outputs my framegraph as:
Qt3DRender::QViewport::
Qt3DRender::QRenderSurfaceSelector::
Qt3DRender::QRenderPassFilter::
Qt3DRender::QFilterKey::
Qt3DRender::QClearBuffers::
Qt3DRender::QCameraSelector::
Now, if I remove the line
renderPassFilter->addMatch(filterKey);
everything works as expected and I see my simple plane mesh.
However, adding the line, which should not filter anything the plane mesh is no longer displayed.
I'm really running out of ideas, what I'm possibly doing wrong here. How, can I make my small program with my renderPassFilter going to work and what are my errors?
I also didn't really understood, what are purposes of the settings name and value in the QFilterKey, which of both is necessary to filter out certain effects?
After carefully studying my application and particularly the QDiffuseSpecularMaterial I figured out, that the QFilterKey inside the QDiffuseSpecularMaterial is not added to the QRenderPass Object, but moreover added to the QTechnique, which I found rather obscure.
Now, adding a QTechniqueFilter instead of a QRenderPassFilter made the program working as expected. Changing the string forward to something different e.g. xxx hides the plane as expected.
Adding the line
meshMaterial->dumpObjectTree();
indeed gave me the clue
Qt3DExtras::QDiffuseSpecularMaterial::
Qt3DRender::QShaderProgramBuilder::
Qt3DRender::QShaderProgram::
Qt3DRender::QShaderProgramBuilder::
Qt3DRender::QShaderProgram::
Qt3DRender::QFilterKey::
Qt3DRender::QEffect::
Qt3DRender::QTechnique::
Qt3DRender::QRenderPass::
Qt3DRender::QNoDepthMask::
Qt3DRender::QBlendEquationArguments::
Qt3DRender::QBlendEquation::
Qt3DRender::QTechnique::
Qt3DRender::QRenderPass::
Qt3DRender::QTechnique::
Qt3DRender::QRenderPass::
Qt3DRender::QParameter::
Qt3DRender::QParameter::
Qt3DRender::QParameter::
Qt3DRender::QParameter::
Qt3DRender::QParameter::
So dumpObjectTree() seems to a good of the shelf debugging tool, when dealing with Qt3D and Qt in general.
#include <QApplication>
#include <QWidget>
#include <QVBoxLayout>
#include <QFrame>
#include <Qt3DCore/QTransform>
#include <Qt3DRender/QSortPolicy>
#include <Qt3DRender/QRenderSettings>
#include <Qt3DRender/QRenderSurfaceSelector>
#include <Qt3DRender/QViewport>
#include <Qt3DRender/QCamera>
#include <Qt3DRender/QCameraSelector>
#include <Qt3DRender/QClearBuffers>
#include <Qt3DRender/QTechniqueFilter>
#include <Qt3DRender/QDirectionalLight>
#include <Qt3DRender/QTexture>
#include <Qt3DExtras/QPlaneMesh>
#include <Qt3DExtras/QDiffuseSpecularMaterial>
#include <Qt3DExtras/Qt3DWindow>
#include <Qt3DRender/QFilterKey>
#include <Qt3DRender/QParameter>
#include <Qt3DRender/QRenderPass>
#include <Qt3DRender/QRenderPassFilter>
#include <Qt3DRender/QTechnique>
#include <QDebug>
int main(int argc, char* argv[])
{
QApplication a(argc, argv);
auto view = new Qt3DExtras::Qt3DWindow();
auto mClearBuffers = new Qt3DRender::QClearBuffers;
auto mMainCameraSelector = new Qt3DRender::QCameraSelector;
mMainCameraSelector->setCamera(view->camera());
auto mRenderSurfaceSelector = new Qt3DRender::QRenderSurfaceSelector;
auto mMainViewport = new Qt3DRender::QViewport;
auto renderPassFilter = new Qt3DRender::QTechniqueFilter;
{
auto filterKey = new Qt3DRender::QFilterKey(renderPassFilter);
filterKey->setName(QStringLiteral("renderingStyle"));
filterKey->setValue(QStringLiteral("forward"));
// Adding the filterKey to the renderPassFilter hides the plane
// Name and Value of filterKey matches the FilterKey inside the QDiffuseSpecularMaterial
renderPassFilter->addMatch(filterKey); // Removing this lines shows the plane mesh
mClearBuffers->setClearColor(Qt::lightGray);
mClearBuffers->setBuffers(Qt3DRender::QClearBuffers::BufferType::ColorDepthBuffer);
mMainCameraSelector->setParent(mClearBuffers);
mClearBuffers->setParent(renderPassFilter);
}
renderPassFilter->setParent(mRenderSurfaceSelector);
mRenderSurfaceSelector->setParent(mMainViewport);
view->setActiveFrameGraph(mMainViewport);
view->activeFrameGraph()->dumpObjectTree();
auto rootEntity = new Qt3DCore::QEntity();
view->setRootEntity(rootEntity);
view->camera()->lens()->setPerspectiveProjection(45.0f, 1., 0.1f, 10000.0f);
view->camera()->setPosition(QVector3D(0, 2, 0));
view->camera()->setUpVector(QVector3D(0, 1, 0));
view->camera()->setViewCenter(QVector3D(0, 0, 0));
auto planeEntity = new Qt3DCore::QEntity(rootEntity);
auto meshMaterial = new Qt3DExtras::QDiffuseSpecularMaterial;
meshMaterial->setDiffuse(QColor("#ff00ff"));
planeEntity->addComponent(meshMaterial);
auto mesh = new Qt3DExtras::QPlaneMesh;
mesh->setWidth(0.3);
mesh->setHeight(0.3);
planeEntity->addComponent(mesh);
auto container = QWidget::createWindowContainer(view);
QFrame frame;
frame.setLayout(new QVBoxLayout);
frame.layout()->addWidget(container);
frame.resize(QSize(400, 300));
frame.show();
return a.exec();
}
I am working on some codes about qt3d. I create a Qt3DWindow(named view).
Try to add a root entity(named rootEntity) in it.
Put a cube entity like this:
m_cubeEntity = new Qt3DCore::QEntity;
...
Qt3DExtras::QCuboidMesh *cubeMesh = new Qt3DExtras::QCuboidMesh;
Qt3DRender::QMaterial *cubeMaterial = new Qt3DRender::QMaterial;
Qt3DCore::QTransform *cubeTransform = new Qt3DCore::QTransform;
Qt3DRender::QObjectPicker *objectPicker = new Qt3DRender::QObjectPicker;
...
m_cubeEntity.addComponent(cubeMesh);
m_cubeEntity.addComponent(cubeMaterial);
m_cubeEntity.addComponent(cubeTransform);
m_cubeEntity.adComponent(objectPicker);
m_cubeEntity.setParent(m_rootEntity);
Everything works fine.
And then I find using PickingSettings.BoundingVolumePicking to pick my cube entity is inaccuracy. I want the PickingSettings.TrianglePicking.
====================================================================/
So then i do it like this:
m_renderSettings = new Qt3DRender::QRenderSettings();
m_renderSettings->pickingSettings()->setPickMethod(Qt3DRender::QPickingSettings::TrianglePicking);
m_renderSettings->pickingSettings()->setPickResultMode(Qt3DRender::QPickingSettings::AllPicks);
m_renderer = new Qt3DExtras::QForwardRenderer();
m_renderer->setClearColor(Qt::lightGray);
m_renderSettings->setActiveFrameGraph(m_renderer);
m_rootEntity->addComponent(m_renderSettings);
But now nothing is rendered. If i remove "m_renderSettings" from rootEntity, everything returns correct.
How to set rendersetting correctly for the root entity?
Most likely you just set a bad framegraph. You can easily display the standard framegraph using dumpObjectTree(), which tends to be a very useful function in debugging the framegraph of Qt3D.
For your simple use case it suffices to just use the renderSettings already contained in the activeFrameGraph() of the Qt3DWindow.
Just try the following simple app, that contains your desired QObjectPicker.
#include <QApplication>
#include <Qt3DCore/QTransform>
#include <Qt3DRender/QPickEvent>
#include <Qt3DRender/QCamera>
#include <Qt3DRender/QFrameGraphNode>
#include <Qt3DRender/QObjectPicker>
#include <Qt3DExtras/QDiffuseSpecularMaterial>
#include <Qt3DExtras/QCuboidMesh>
#include <Qt3DExtras/Qt3DWindow>
#include <Qt3DRender/QRenderSettings>
#include <QDebug>
int main(int argc, char* argv[])
{
QApplication a(argc, argv);
auto view = new Qt3DExtras::Qt3DWindow();
auto rootEntity = new Qt3DCore::QEntity();
view->setRootEntity(rootEntity);
// Shows your framegraph! Simple forward renderer!
view->activeFrameGraph()->dumpObjectTree();
auto rendersettings=view->renderSettings();
rendersettings->pickingSettings()->setPickMethod(Qt3DRender::QPickingSettings::TrianglePicking);
rendersettings->pickingSettings()->setPickResultMode(Qt3DRender::QPickingSettings::AllPicks);
auto cameraEntity = view->camera();
cameraEntity->lens()->setPerspectiveProjection(45.0f, 1., 0.1f, 10000.0f);
cameraEntity->setPosition(QVector3D(0, 2, 0));
cameraEntity->setUpVector(QVector3D(0, 1, 0));
cameraEntity->setViewCenter(QVector3D(0, 0, 0));
auto cubeEntity = new Qt3DCore::QEntity(rootEntity);
auto cubeMesh = new Qt3DExtras::QCuboidMesh;
cubeMesh->setXExtent(1.);
cubeMesh->setYExtent(1.);
auto cubeMaterial = new Qt3DExtras::QDiffuseSpecularMaterial;
auto objectPicker = new Qt3DRender::QObjectPicker;
QObject::connect(objectPicker, &Qt3DRender::QObjectPicker::clicked, [](Qt3DRender::QPickEvent* pick) {
qDebug() << pick;
});
cubeEntity->addComponent(cubeMesh);
cubeEntity->addComponent(cubeMaterial);
cubeEntity->addComponent(objectPicker);
view->show();
return a.exec();
}
My company uses Qt3D to display its CAD models. Wee tried to use the function QCamera::viewEntity(Qt3DCore::QEntity *entity) in order to compute the bounding sphere of a given entity and also to fit the entity to the screen.
Now, we stumbled across an unsolvable problem in case of empty QEntity nodes. I'll call a node empty, if it does not contain any vertex/point after all. In this case I expected, that it should be neglected in the computation of bounding volume. Instead it seems, that it will be treated, as it has a bounding sphere with center (0.,0.,0.) and radius 0.
The following code illustrates the issue:
main.cpp
#include <QApplication>
#include <QWidget>
#include <QVBoxLayout>
#include <QFrame>
#include <Qt3DRender/QRenderSettings>
#include <Qt3DCore/QTransform>
#include <Qt3DRender/QCamera>
#include <Qt3DExtras/QSphereMesh>
#include <Qt3DExtras/QDiffuseSpecularMaterial>
#include <Qt3DExtras/QForwardRenderer>
#include <Qt3DExtras/Qt3DWindow>
#include <Qt3DRender/QCamera>
#include <Qt3DRender/QCameraLens>
#include <QPushButton>
Qt3DCore::QEntity* createSphereMesh()
{
auto sphereMat = new Qt3DExtras::QDiffuseSpecularMaterial;
sphereMat->setDiffuse(QColor(Qt::blue));
auto mesh = new Qt3DExtras::QSphereMesh();
mesh->setRadius(0.5);
auto meshEntity = new Qt3DCore::QEntity;
meshEntity->addComponent(mesh);
meshEntity->addComponent(sphereMat);
return meshEntity;
}
int main(int argc, char* argv[])
{
QApplication a(argc, argv);
auto view = new Qt3DExtras::Qt3DWindow();
view->defaultFrameGraph()->setClearColor(QColor(127, 127, 127));
auto settings = view->renderSettings();
settings->setActiveFrameGraph(view->activeFrameGraph());
auto rootEntity = new Qt3DCore::QEntity();
view->setRootEntity(rootEntity);
auto sphere1 = createSphereMesh();
sphere1->setParent(rootEntity);
auto trafo1 = new Qt3DCore::QTransform;
trafo1->setTranslation(QVector3D(20, 10, 0));
sphere1->addComponent(trafo1);
auto sphere2 = createSphereMesh();
sphere2->setParent(rootEntity);
auto trafo2 = new Qt3DCore::QTransform;
trafo2->setTranslation(QVector3D(20, -10, 0));
sphere2->addComponent(trafo2);
QObject::connect(view->camera()->lens(), &Qt3DRender::QCameraLens::viewSphere, [&](const QVector3D& center, float radius) {
qDebug() << "Bounding Sphere:" << center << radius;
auto boundingSphereEntity = new Qt3DCore::QEntity;
auto sphereMat = new Qt3DExtras::QDiffuseSpecularMaterial;
sphereMat->setAlphaBlendingEnabled(true);
sphereMat->setDiffuse(QColor(255,255,255,80));
auto mesh = new Qt3DExtras::QSphereMesh();
mesh->setRadius(radius);
boundingSphereEntity->addComponent(sphereMat);
boundingSphereEntity->addComponent(mesh);
auto trafoAll = new Qt3DCore::QTransform;
trafoAll->setTranslation(center);
boundingSphereEntity->addComponent(trafoAll);
boundingSphereEntity->addComponent(mesh);
boundingSphereEntity->addComponent(sphereMat);
boundingSphereEntity->setParent(rootEntity);
});
auto container = QWidget::createWindowContainer(view);
auto viewAllBtn = new QPushButton("View All");
QObject::connect(viewAllBtn, &QPushButton::clicked, [&]() {
view->camera()->viewAll();
});
QFrame frame;
frame.setFixedSize(500, 500);
frame.setLayout(new QVBoxLayout);
frame.layout()->addWidget(container);
frame.layout()->addWidget(viewAllBtn);
frame.show();
return a.exec();
}
I have two blue spheres with radius 0.5 being at distance 20 from each other. I'm expecting a bounding sphere with center (20,0,0) and radius (10.5).
Instead the program prints:
Bounding Sphere: QVector3D(12.1432, 2.14455, -1.32699e-08) 14.9644
It seems that the value 2.14455 comes really from nowhere and that my add bounding volume sphere is somewhat unpredictable.
If I will replace the translation QVector3D(20, 10, 0) by QVector3D(0,10,0) the result will be like expected.
How, can I exclude the root entity, from my bounding volume computation?
I am trying to put the example provided by VTK here into a C++ class. This example reads an STL file and visualizes it in a window.
Below is my code for which I get no compiler or run-time error. My input STL file is read correctly but my code exits without producing any visualization of the STL object.
In my C++ class, I organized the VTK example code by separating the reader from the visualizer and putting them into two functions in the class. The visualizer uses a mapper, actor, and a renderer to visualize the STL object. However, I am not able to get the code to visualize my STL object.
The only way I am able to get my C++ class to work is to copy the codes of the function visualize into my readSTL function, then I can see the window with my STL object rendered in it.
Could someone kindly give me some hints how I can perform the visualization in a separate function in the class?
#include <vtkPolyData.h>
#include <vtkSTLReader.h>
#include <vtkSmartPointer.h>
#include <vtkPolyDataMapper.h>
#include <vtkActor.h>
#include <vtkRenderWindow.h>
#include <vtkRenderer.h>
#include <vtkRenderWindowInteractor.h>
class triangulation
{
public:
vtkIdType numberOfFaces;
// Constructor
triangulation(void);
triangulation(std::string filename);
void setFilename(std::string filename);
std::string getFilename(void);
// Getters
vtkSmartPointer<vtkPolyData> getMesh(void);
vtkSmartPointer<vtkCellArray> getPolys(void);
vtkSmartPointer<vtkPoints> getPoints(void);
vtkSmartPointer<vtkDataArray> getDataArray(void);
vtkIdType getNumberofFaces(void);
// Visualizer
void visualize(void);
// Reader
void readSTL(void);
private:
std::string stlFilename;
vtkSmartPointer<vtkPolyData> mesh;
vtkSmartPointer<vtkSTLReader> reader;
vtkSmartPointer<vtkCellArray> polys;
vtkSmartPointer<vtkPoints> points;
vtkSmartPointer<vtkDataArray> dataArray;
};
triangulation::triangulation()
{
//
}
triangulation::triangulation(std::string filename)
{
setFilename(filename);
}
void triangulation::readSTL(void)
{
vtkSmartPointer<vtkSTLReader> reader = vtkSmartPointer<vtkSTLReader>::New();
reader->SetFileName(stlFilename.c_str());
reader->Update();
mesh = reader->GetOutput();
polys = mesh->GetPolys();
points = mesh->GetPoints();
numberOfFaces = mesh->GetNumberOfCells();
}
void triangulation::setFilename(std::string filename)
{
stlFilename = filename;
}
std::string triangulation::getFilename(void)
{
return stlFilename;
}
vtkSmartPointer<vtkPolyData> triangulation::getMesh(void)
{
return mesh;
}
vtkSmartPointer<vtkCellArray> triangulation::getPolys(void)
{
return polys;
}
vtkIdType triangulation::getNumberofFaces(void)
{
return numberOfFaces;
}
vtkSmartPointer<vtkPoints> triangulation::getPoints(void)
{
return points;
}
vtkSmartPointer<vtkDataArray> triangulation::getDataArray(void)
{
return dataArray;
}
void triangulation::visualize(void)
{
// Visualize
vtkSmartPointer<vtkPolyDataMapper> mapper = vtkSmartPointer<vtkPolyDataMapper>::New();
mapper->SetInputConnection(reader->GetOutputPort());
vtkSmartPointer<vtkActor> actor = vtkSmartPointer<vtkActor>::New();
actor->SetMapper(mapper);
vtkSmartPointer<vtkRenderer> renderer = vtkSmartPointer<vtkRenderer>::New();
vtkSmartPointer<vtkRenderWindow> renderWindow = vtkSmartPointer<vtkRenderWindow>::New();
renderWindow->AddRenderer(renderer);
vtkSmartPointer<vtkRenderWindowInteractor> renderWindowInteractor = vtkSmartPointer<vtkRenderWindowInteractor>::New();
renderWindowInteractor->SetRenderWindow(renderWindow);
renderer->AddActor(actor);
renderer->SetBackground(.3, .6, .3); // Background color green
renderWindow->Render();
renderWindowInteractor->Start();
}
int main(int argc, char *argv[])
{
if (argc != 2)
{
cout << "Required parameters: Filename" << endl;
return EXIT_FAILURE;
}
std::string inputFilename = argv[1];
cout << inputFilename << endl;
triangulation *tr = new triangulation(inputFilename);
tr->readSTL();
cout << "Number of Faces = " << tr->getNumberofFaces() << endl;
vtkSmartPointer<vtkPolyData> mesh = tr->getMesh();
vtkSmartPointer<vtkCellArray> polys = tr->getPolys();
vtkSmartPointer<vtkPoints> points = tr->getPoints();
vtkSmartPointer<vtkDataArray> data = tr->getDataArray();
//mesh->Print(cout);
// polys->Print(cout);
// points->Print(cout);
// data->Print(cout);
tr->visualize();
delete tr;
return EXIT_SUCCESS;
}
Here is my STL object that I used to test my code:
"myobject.stl":
solid STL generated by MeshLab
facet normal -4.919344e-01 2.986337e-01 8.178133e-01
outer loop
vertex -1.265660e+00 4.756133e+00 1.702858e-01
vertex -1.649185e+00 4.810246e+00 -8.017353e-02
vertex -1.602208e+00 4.484959e+00 6.686619e-02
endloop
endfacet
facet normal -5.898571e-01 2.603427e-01 7.643889e-01
outer loop
vertex -1.861084e+00 4.821712e+00 -2.475956e-01
vertex -1.602208e+00 4.484959e+00 6.686619e-02
vertex -1.649185e+00 4.810246e+00 -8.017353e-02
endloop
endfacet
facet normal -6.398674e-01 2.007942e-01 7.417893e-01
outer loop
vertex -1.861084e+00 4.821712e+00 -2.475956e-01
vertex -1.845646e+00 4.479523e+00 -1.416520e-01
vertex -1.602208e+00 4.484959e+00 6.686619e-02
endloop
endfacet
facet normal -6.901410e-01 1.854371e-01 6.995131e-01
outer loop
vertex -2.043903e+00 4.838252e+00 -4.323493e-01
vertex -1.845646e+00 4.479523e+00 -1.416520e-01
vertex -1.861084e+00 4.821712e+00 -2.475956e-01
endloop
endfacet
endsolid vcg
Your code will work if you simply replace the following line in triangulation::readSTL():
// Problem: local variable 'reader' shadows class member 'reader'.
vtkSmartPointer<vtkSTLReader> reader = vtkSmartPointer<vtkSTLReader>::New();
// Solution:
reader = vtkSmartPointer<vtkSTLReader>::New();
Because the class-member reader remains unintialized, the application crashes with a segfault when it is being accessed in triangulation::visualize().
Hint: you probably would not have encountered this problem if you consequently used this-> or a naming prefix (m_ or _) to refer to class members. See this SO post for some explanations.