Why the model is not displayed in the center of screen? - c++

I want to show a point cloud with VTK. I inherit a class named VTKWidget from QVTKOpenGLNativeWidget, its constructor is that:
VTKWidget::VTKWidget(QVTKOpenGLNativeWidget* parent)
:QVTKOpenGLNativeWidget(parent)
{
QSurfaceFormat::setDefaultFormat(QVTKOpenGLNativeWidget::defaultFormat());
_renderWindow = vtkSmartPointer<vtkGenericOpenGLRenderWindow>::New();
_render = vtkSmartPointer<vtkRenderer>::New();
_actor = vtkSmartPointer<vtkActor>::New();
_points = vtkSmartPointer<vtkPoints>::New();
_mapper = vtkSmartPointer<vtkPolyDataMapper>::New();
_polyData = vtkSmartPointer<vtkPolyData>::New();
_verts = vtkSmartPointer<vtkCellArray>::New();
_polyData->SetPoints(_points);
_polyData->SetVerts(_verts);
_mapper->SetInputData(_polyData);
_actor->SetMapper(_mapper);
_actor->GetProperty()->SetColor(1.0, 0.0, 0.0);
_actor->GetProperty()->SetPointSize(2.0f);
_render->AddActor(_actor);
double color[] = {0.5, 0.5, 0.5};
_render->SetBackground(color);
_renderWindow->AddRenderer(_render);
//setRenderWindow(_renderWindow);
}
Points will be modified in the function updateData, the updateData function is that:
void VTKWidget::updateData(const QVector<QVector<QVector3D>>& data)
{
setRenderWindow(_renderWindow);
_points->Reset();
long long w = data.size();
long long h = data[0].size();
_points->SetNumberOfPoints(w * h);
_polyData->Reset();
_verts->Reset();
for (int i = 0; i < data.size(); ++i)
{
for (int j = 0; j < data[0].size(); ++j)
{
vtkIdType pid;
pid = _points->InsertNextPoint(data[i][j].x(), data[i][j].y(), data[i][j].z());
_verts->InsertNextCell(1, &pid);
}
}
_polyData->Modified();
_mapper->Update();
}
The question is that if the code setRenderWindow (_renderWindow); is in the constructor, the point cloud will not show in the center of this widget, but will be if the code is in the function updateData.

If you want to see the whole point cloud at the end of updateData, you should use _renderer->ResetCamera().
(https://vtk.org/doc/nightly/html/classvtkRenderer.html#ae8055043e676defbbacff6f1ea65ad1e)
The fact that the location of setRenderWindow() impact the rendering is a side effect.

Related

gluPartialDisk in osg/osgEarth

I've been trying to create an openGL gluDisk like object in osgEarth. So far I've attempted to do the following (Edited, this is the correct answer):
void ViewDriver::drawCircZone(double lat, double lon, double innerRadius, double outerRadius, QColor color, double beginAngle, double endAngle){
GeometryFactory g;
osg::ref_ptr<osgEarth::Geometry> outerCircleGeom = g.createArc(osg::Vec3d(lat, lon, 0), convertFromMetersToMercDeg(outerRadius), beginAngle, endAngle);
osg::ref_ptr<osgEarth::Geometry> innerCircleGeom = g.createArc(osg::Vec3d(lat, lon, 0), convertFromMetersToMercDeg(innerRadius), beginAngle, endAngle);
osg::Vec3dArray* outerCircArray = outerCircleGeom->createVec3dArray();
osg::Vec3dArray* innerCircArray = innerCircleGeom->createVec3dArray();
Vec3dVector* diskVec = new Vec3dVector;
for(int i = 0; i < outerCircArray->size() - 1; i++){
diskVec->push_back((*outerCircArray)[i]);
}
//This is important for closing the shape and not giving it a Pac-Man-like mouth
diskVec->push_back((*outerCircArray)[0]);
//This is how you make a "hole", by iterating backwards
for(int i = innerCircArray->size() - 1; i >= 0; i--){
diskVec->push_back((*innerCircArray)[i]);
}
osg::ref_ptr<osgEarth::Symbology::Ring> diskRing = new Ring(diskVec);
diskRing->close();
osg::ref_ptr<Feature> circFeature = new Feature(diskRing, view->getMapViewer()->geoSRS);
Style circStyle;
circStyle.getOrCreate<PolygonSymbol>()->outline() = true;
circStyle.getOrCreate<PolygonSymbol>()->fill()->color() = Color(color.red()/255.0, color.green()/255.0, color.blue()/255.0, 1.0);
circStyle.getOrCreate<AltitudeSymbol>()->clamping() = AltitudeSymbol::CLAMP_RELATIVE_TO_TERRAIN;
circStyle.getOrCreate<AltitudeSymbol>()->technique() = AltitudeSymbol::TECHNIQUE_DRAPE;
osg::ref_ptr<FeatureNode> circNode = new FeatureNode(circFeature, circStyle);
circNode->setDynamic(true);
view->getMapNode()->addChild(circNode);
}
What stumped me originally was that I don't have a lot of graphics knowledge. Somewhere I read that when drawing outlines, do it in clockwise direction. When drawing outlines in counter clockwise direction they will "cut-out" or create a "hole" when combined with clockwise-drawn points. I was filling the "hole" outline with the smaller circle's point in a clockwise direction originally when testing that method which was why it didn't work.
void ViewDriver::drawCircZone(double lat, double lon, double innerRadius, double outerRadius, QColor color, double beginAngle, double endAngle){
GeometryFactory g;
osg::ref_ptr<osgEarth::Geometry> outerCircleGeom = g.createArc(osg::Vec3d(lat, lon, 0), convertFromMetersToMercDeg(outerRadius), beginAngle, endAngle);
osg::ref_ptr<osgEarth::Geometry> innerCircleGeom = g.createArc(osg::Vec3d(lat, lon, 0), convertFromMetersToMercDeg(innerRadius), beginAngle, endAngle);
osg::Vec3dArray* outerCircArray = outerCircleGeom->createVec3dArray();
osg::Vec3dArray* innerCircArray = innerCircleGeom->createVec3dArray();
Vec3dVector* diskVec = new Vec3dVector;
for(int i = 0; i < outerCircArray->size() - 1; i++){
diskVec->push_back((*outerCircArray)[i]);
}
//This is important for closing the shape and not giving it a Pac-Man-like mouth
diskVec->push_back((*outerCircArray)[0]);
for(int i = innerCircArray->size() - 1; i >= 0; i--){
diskVec->push_back((*innerCircArray)[i]);
}
osg::ref_ptr<osgEarth::Symbology::Ring> diskRing = new Ring(diskVec);
diskRing->close();
osg::ref_ptr<Feature> circFeature = new Feature(diskRing, view->getMapViewer()->geoSRS);
Style circStyle;
circStyle.getOrCreate<PolygonSymbol>()->outline() = true;
circStyle.getOrCreate<PolygonSymbol>()->fill()->color() = Color(color.red()/255.0, color.green()/255.0, color.blue()/255.0, 1.0);
circStyle.getOrCreate<AltitudeSymbol>()->clamping() = AltitudeSymbol::CLAMP_RELATIVE_TO_TERRAIN;
circStyle.getOrCreate<AltitudeSymbol>()->technique() = AltitudeSymbol::TECHNIQUE_DRAPE;
osg::ref_ptr<FeatureNode> circNode = new FeatureNode(circFeature, circStyle);
circNode->setDynamic(true);
view->getMapNode()->addChild(circNode);
}

OpenCasCade BrepFeat_MakeCylindricalHole(); function

Currently I'm working on a project to drill some cylindrical holes on to a given shape.
So for do that I did use BRepFeat_MakeCylindricalHole(); function and used Standard_EXPORT void PerformThruNext (const Standard_Real Radius, const Standard_Boolean WithControl = Standard_True);
Because in this scenario I haven't the depth of the cylinder. Using performThruNext it will drilled, Top face to Bottom face.
User will be given the start and the end points, then using that will be drilling holes.
**1.when start and end points having decimal fraction part (gp_pnt StartPnt (328.547,-4.325,97.412);
gp_pnt EndPnt
(291.841,-7.586,101.651);), it doesn't drill the face correctly.
2.when diameter getting to a higher value,happens the same thing,
,
** following is my code,any help would be highly appreciated. Thanks!
void COCCAPPDoc::OnBox2()
{
ofstream out;
out.open("C:/Users/Dell/Desktop/Blade/areaInit.txt");
CString str;
TopoDS_Shape bladeWithCore,DrilledShape,DrilledShape2;
/* IMPORT bladeWithCore.brep FILE */
Standard_CString strFile = "C:/Users/Dell/Desktop/Blade/bladeWithCore.brep";
BRep_Builder brepS;
BRepTools::Read(bladeWithCore,strFile,brepS);
TopoDS_Face Face_52;
Standard_CString strFace = "C:/Users/Dell/Desktop/Blade/Face_52.brep";
BRep_Builder brepF;
BRepTools::Read(Face_52,strFace,brepF);
BRepClass3d_SolidClassifier classify;
classify.Load(bladeWithCore);
classify.PerformInfinitePoint(1.0e-6);
TopAbs_State state = classify.State();
if(state==TopAbs_IN) bladeWithCore.Reverse();
DrilledShape = bladeWithCore;
gp_Vec v1(gp_Pnt( 330,11,108),gp_Pnt(291,4,109)); //FINDING VECTOR FROM START TO THE END POINT
gp_Pnt point(330,11,108);
gp_Pnt startpoint (330,11,108);
TopoDS_Vertex aV1 = BRepBuilderAPI_MakeVertex (startpoint);
BRepTools::Write(aV1,"C:/Users/Dell/Desktop/Blade/startpoint.brep");
/* gp_Vec v1(gp_Pnt( 330.208114,11.203318,108.480255),gp_Pnt(291.103558,4.265285,109.432663));
gp_Pnt point(330.208114,11.203318,108.480255);*/
gp_Dir d1(v1); //GETTING DIRECTION USING FOUND VECTOR
gp_Vec UnitVector(d1); //FINDING UNITVECTOR
gp_Dir dir_line(0,0,1);
gp_Vec UnitVector_line(dir_line);
gp_Pnt minPoint;
Standard_Real diameter = 0.1;
int noOfInstances =3;
int gap = -10;
for(int i=0; i {
point.Translate(UnitVector);
gp_Pnt newPoint = point;
newPoint.Translate(UnitVector_line*10);
BRepBuilderAPI_MakeEdge LINE(newPoint, point);
BRepBuilderAPI_MakeWire lineWire(LINE);
TopoDS_Wire wire = TopoDS::Wire(lineWire);
TopoDS_Shape lineShape = TopoDS_Shape(wire);
BRepTools::Write(lineShape,"C:/Users/Dell/Desktop/Blade/linlineShapee.brep");
intersection( bladeWithCore,lineShape, point, minPoint);
TopoDS_Vertex checkPoint = BRepBuilderAPI_MakeVertex (minPoint);
BRepTools::Write(checkPoint,"C:/Users/Dell/Desktop/Blade/checkPoint.brep");
gp_Ax1 location =gp_Ax1(minPoint,gp_Dir(0,0,1));
BRepFeat_MakeCylindricalHole drilled;
drilled.Init(DrilledShape,location);
drilled.PerformThruNext(diameter/2.0);
BRepFeat_Status status = drilled.Status();
if(status==BRepFeat_NoError)
{
DrilledShape = drilled.Shape();
BRepTools::Write(DrilledShape,"C:/Users/Dell/Desktop/Blade/bladeWithCoreDrilled.brep");
}
}
Handle(AIS_InteractiveObject) obj3 = (new AIS_Shape(DrilledShape));
myAISContext->Display(obj3);
COCCAPPDoc:: Fit();
}
void COCCAPPDoc::intersection(TopoDS_Shape bladeWithCore,TopoDS_Shape lineShape,gp_Pnt point,gp_Pnt& minPoint)
{
BRepExtrema_DistShapeShape interSection( bladeWithCore, lineShape, Extrema_ExtFlag_MIN, Extrema_ExtAlgo_Grad);
interSection.Perform();
Standard_Integer Solutions = interSection.NbSolution();
CArray disArray;
int minindex = 1;
for(int i = 1; i {
double distance = point.Distance((interSection.PointOnShape1(i)));
disArray.Add(distance);
}
for(int i = 1; i {
if(disArray[0] > disArray[i])
{
disArray[0] = disArray[i];
minindex = i;
}
}
minPoint = interSection.PointOnShape1(minindex);
TopoDS_Vertex aV = BRepBuilderAPI_MakeVertex (minPoint);
BRepTools::Write(aV,"C:/Users/Dell/Desktop/Blade/minPoint.brep");
}

Vtk Qt scene with >50 (moving) actors

I am trying to implement (fairly) simple scene where I have ~50 cubes which are moving in certain directions. The position of the cubes changes 20 times per second.
My first shoot was adding and removing actors from the scene. This approach just doesn't scale. Whole scene lags and user is unable to move the camera.
void draw(vtkRenderer *renderer)
{
renderer->RemoveAllViewProps();
for(const Cube& cube : cubes_)
{
vtkSmartPointer<vtkCubeSource> cube_source = vtkSmartPointer<vtkCubeSource>::New();
cube_source->Update();
cube_source->SetXLength(cube.lengt());
cube_source->SetYLength(cube.width());
cube_source->SetZLength(cube.height());
vtkSmartPointer<vtkPolyDataMapper> poly_mapper = vtkSmartPointer<vtkPolyDataMapper>::New();
poly_mapper->SetInputConnection(cube_source->GetOutputPort());
vtkSmartPointer<vtkActor> actor = vtkSmartPointer<vtkActor>::New();
actor->SetMapper(poly_mapper);
actor->SetPosition(cube.x(), cube.y(), cube.z());
renderer->AddActor(actor);
}
}
Second shot is a bit better. I have created "actor pool" where I reuse the actors and hide the ones which are not needed.
Still, moving camera is laggy and the rest of my UI (I have some additional widgets inside Vtk widget) seems to be laggy.
I couldn't find any relevant source for Vtk where scene is "dynamic". All examples preload all scene elements and further work with them. Can anyone tell me what am I doing wrong here?
this my visualization VTK example
.....
vtkSmartPointer<vtkRenderer> m_vtkRenderer;
vtkSmartPointer<MouseInteractor> m_mouseInteractor;
QVector<vtkActor* > m_parcellationActors;
QVector<vtkActor* > m_electrodesActors;
QVector<vtkFollower* > m_textActors;
QVector<vtkActor*> m_vectorFieldsActors;
QVector<vtkActor*> m_streamlinesActors;
......
void VisualizationWidget::create3DViewArea()
{
m_3DViewArea = new QStackedWidget(this);
m_vtkWidget = new QVTKWidget(m_3DViewArea);
m_vtkRenderer = vtkSmartPointer<vtkRenderer>::New();
m_vtkRenderer->SetBackground(0.4, 0.4, 0.4);
this->m_vtkWidget->GetRenderWindow()->AddRenderer(m_vtkRenderer);
m_mouseInteractor = vtkSmartPointer<MouseInteractor>::New();
m_mouseInteractor ->SetDefaultRenderer(m_vtkRenderer);
this->m_vtkWidget->GetRenderWindow()->GetInteractor()->SetInteractorStyle( m_mouseInteractor);
connect(m_mouseInteractor, &MouseInteractor::onActorSelected, this, &VisualizationWidget::onActorSelectedSlot);
m_vtkLoadingWidget = new LoadingWidget(m_3DViewArea);
m_vtkLoadingWidget->setIconPath(Icon::s_getTDCSLoadingGif);
m_3DViewArea->addWidget(m_vtkLoadingWidget);
m_3DViewArea->addWidget(m_vtkWidget);
}
void VisualizationWidget::setParcellationActors(QVector<vtkActor*> actors)
{
m_parcellationActors = actors;
for (int i = 0; i < m_parcellationActors.size(); ++i) {
m_vtkRenderer->AddActor(m_parcellationActors.at(i));
}
m_vtkWidget->update();
}
void VisualizationWidget::setElectrodesActors(QVector<vtkActor*> actors)
{
m_electrodesActors = actors;
for (int i = 0; i < m_electrodesActors.size(); ++i) {
m_vtkRenderer->AddActor(m_electrodesActors.at(i));
}
m_vtkWidget->update();
}
void VisualizationWidget::setElectrodesLabelsActors(QVector<vtkFollower*> actors)
{
m_textActors = actors;
for (int i = 0; i < m_textActors.size(); ++i) {
m_textActors.at(i)->SetCamera(m_vtkRenderer->GetActiveCamera());
m_vtkRenderer->AddActor(m_textActors.at(i));
}
m_vtkRenderer->ResetCamera();
m_vtkWidget->update();
}
void VisualizationWidget::setVectorFieldsActors(QVector<vtkActor*> actors)
{
for (int i = 0; i < m_vectorFieldsActors.size(); ++i) {
m_vtkRenderer->RemoveActor(m_vectorFieldsActors.at(i));
}
m_vectorFieldsActors = actors;
for (int i = 0; i < m_vectorFieldsActors.size(); ++i) {
changeActorOpacity(m_vectorFieldsActors[i], double(m_postProcResOpacSliders.at(i)->value()) / m_postProcResOpacSliders.at(i)->maximum());
m_vtkRenderer->AddActor(m_vectorFieldsActors.at(i));
}
m_vtkRenderer->ResetCamera();
m_vtkWidget->update();
}
void VisualizationWidget::setStreamlinesActors(QVector<vtkActor*> actors)
{
for (int i = 0; i < m_streamlinesActors.size(); ++i) {
m_vtkRenderer->RemoveActor(m_streamlinesActors.at(i));
}
m_streamlinesActors = actors;
for (int i = 0; i < m_streamlinesActors.size(); ++i) {
changeActorOpacity(m_streamlinesActors[i], double(m_streamLinesSlider->value()) / m_streamLinesSlider->maximum());
m_vtkRenderer->AddActor(m_streamlinesActors.at(i));
}
m_vtkRenderer->ResetCamera();
m_vtkWidget->update();
}
void VisualizationWidget::changeActorOpacity(vtkActor* actor, double opac)
{
actor->SetVisibility(opac > 0.05);
actor->GetMapper()->Modified();
actor->GetProperty()->SetOpacity(opac);
}
So afther some days of reaserch I managed to hack working solution:
vtkSmartPointer<vtkPoints> points = vtkSmartPointer<vtkPoints>::New();
// as many points as you like
points->InsertNextPoint(-25, 0, 0);
points->InsertNextPoint(-35, 0, 0);
vtkSmartPointer<vtkFloatArray> scales = vtkSmartPointer<vtkFloatArray>::New();
scales->SetNumberOfComponents(3);
// same as number of points
scales->InsertNextTuple3(1, 1., 8.);
scales->InsertNextTuple3(1., 1., 10.);
vtkSmartPointer<vtkPolyData> polydata = vtkSmartPointer<vtkPolyData>::New();
polydata->SetPoints(points);
polydata->GetPointData()->SetVectors(scales);
vtkSmartPointer<vtkCubeSource> cubeSource = vtkSmartPointer<vtkCubeSource>::New();
vtkSmartPointer<vtkGlyph3D> glyph3D = vtkSmartPointer<vtkGlyph3D>::New();
glyph3D->OrientOff(); // disable orientation
glyph3D->SetScaleModeToScaleByVectorComponents(); // sacle along each axis
glyph3D->SetSourceConnection(cubeSource->GetOutputPort());
glyph3D->SetInputData(polydata);
glyph3D->Update();
vtkSmartPointer<vtkPolyDataMapper> mapper = vtkSmartPointer<vtkPolyDataMapper>::New();
mapper->SetInputConnection(glyph3D->GetOutputPort());
mapper->ScalarVisibilityOff(); // use color from actor
vtkSmartPointer<vtkActor> actor = vtkSmartPointer<vtkActor>::New();
actor->GetProperty()->SetColor(0, 1, 0); // this will change color for whole glyph
actor->SetMapper(mapper);
mapper->Update();
vtk_renderer->AddActor(actor);
Code from above will add as many cubes and you want using a single actor! (which was amazing performance boost in my case)
Further, if you want to update positions of the cubes, you just need to do following:
points->Reset();
points->InsertNextPoint(-25, 0, 0);
points->InsertNextPoint(-35, 0, 0);
scales->Reset();
scales->InsertNextTuple3(1, 1., 8.);
scales->InsertNextTuple3(1., 1., 10.);
polydata_->Modified();
// call render
(notice that I am not removing/adding actors to the scene which is another boost)

vtk 6.x, Qt: 3D (line, surface, scatter) plotting

I am working on a Qt (4.7.4) project and need to plot data in 2D and 3D coordinate systems. I've been looking into vtk 6.1 because it seems very powerful overall and I will also need to visualize image data at a later point. I basically got 2D plots working but am stuck plotting data in 3D.
Here's what I tried: I'm using the following piece of code that I took from one of vtk's tests ( Charts / Core / Testing / Cxx / TestSurfacePlot.cxx ). The only thing I added is the QVTKWidget that I use in my GUI and its interactor:
QVTKWidget vtkWidget;
vtkNew<vtkChartXYZ> chart;
vtkNew<vtkPlotSurface> plot;
vtkNew<vtkContextView> view;
view->GetRenderWindow()->SetSize(400, 300);
vtkWidget.SetRenderWindow(view->GetRenderWindow());
view->GetScene()->AddItem(chart.GetPointer());
chart->SetGeometry(vtkRectf(75.0, 20.0, 250, 260));
// Create a surface
vtkNew<vtkTable> table;
float numPoints = 70;
float inc = 9.424778 / (numPoints - 1);
for (float i = 0; i < numPoints; ++i)
{
vtkNew<vtkFloatArray> arr;
table->AddColumn(arr.GetPointer());
}
table->SetNumberOfRows(numPoints);
for (float i = 0; i < numPoints; ++i)
{
float x = i * inc;
for (float j = 0; j < numPoints; ++j)
{
float y = j * inc;
table->SetValue(i, j, sin(sqrt(x*x + y*y)));
}
}
// Set up the surface plot we wish to visualize and add it to the chart.
plot->SetXRange(0, 9.424778);
plot->SetYRange(0, 9.424778);
plot->SetInputData(table.GetPointer());
chart->AddPlot(plot.GetPointer());
view->GetRenderWindow()->SetMultiSamples(0);
view->SetInteractor(vtkWidget.GetInteractor());
view->GetInteractor()->Initialize();
view->GetRenderWindow()->Render();
Now, this produces a plot but I can neither interact with it not does it look 3D. I would like to do some basic stuff like zoom, pan, or rotate about a pivot. A few questions that come to my mind about this are:
Is it correct to assign the QVTKWidget interactor to the view in the third line from the bottom?
In the test, a vtkChartXYZ is added to the vtkContextView. According to the documentation, the vtkContextView is used to display a 2D scene but here is used with a 3D chart (XYZ). How does this fit together?
The following piece of code worked for me. No need to explicitly assign an interactor because that's already been taken care of by QVTKWidget.
QVTKWidget vtkWidget;
vtkSmartPointer<vtkContextView> view = vtkSmartPointer<vtkContextView>::New();
vtkSmartPointer<vtkChartXYZ> chart = vtkSmartPointer<vtkChartXYZ>::New();
// Create a surface
vtkSmartPointer<vtkTable> table = vtkSmartPointer<vtkTable>::New();
float numPoints = 70;
float inc = 9.424778 / (numPoints - 1);
for (float i = 0; i < numPoints; ++i)
{
vtkSmartPointer<vtkFloatArray> arr = vtkSmartPointer<vtkFloatArray>::New();
table->AddColumn(arr.GetPointer());
}
table->SetNumberOfRows(numPoints);
for (float i = 0; i < numPoints; ++i)
{
float x = i * inc;
for (float j = 0; j < numPoints; ++j)
{
float y = j * inc;
table->SetValue(i, j, sin(sqrt(x*x + y*y)));
}
}
view->SetRenderWindow(vtkWidget.GetRenderWindow());
chart->SetGeometry(vtkRectf(200.0, 200.0, 300, 300));
view->GetScene()->AddItem(chart.GetPointer());
vtkSmartPointer<vtkPlotSurface> plot = vtkSmartPointer<vtkPlotSurface>::New();
// Set up the surface plot we wish to visualize and add it to the chart.
plot->SetXRange(0, 10.0);
plot->SetYRange(0, 10.0);
plot->SetInputData(table.GetPointer());
chart->AddPlot(plot.GetPointer());
view->GetRenderWindow()->SetMultiSamples(0);
view->GetRenderWindow()->Render();
You might want read the detailed description in vtkRenderViewBase
QVTKWidget *widget = new QVTKWidget;
vtkContextView *view = vtkContextView::New();
view->SetInteractor(widget->GetInteractor());
widget->SetRenderWindow(view->GetRenderWindow());

LWJGL - OpenGL - Camera rotation

i tried to create a first person camera. I took a piece of code of my XNA project and tried to convert it into java (LWJGL).
Matrix4f mx = Matrix4f.rotate(pitch, new Vector3f(1, 0, 0),
new Matrix4f(), null);
Matrix4f my = Matrix4f.rotate(yaw, new Vector3f(0, 1, 0),
new Matrix4f(), null);
Matrix4f rotationMatrix = Matrix4f.mul(mx, my, null);
Vector4f tmp = Matrix4f.transform(rotationMatrix, new Vector4f(0, 0,
-1, 0), null);
target = new Vector3f(position.x + tmp.x, position.y + tmp.y,
position.z + tmp.z);
GLU.gluLookAt(this.position.x, this.position.y, this.position.z,
this.target.x, this.target.y, this.target.z, 0, 1.0f, 0);
Pitch and yaw will increase/decrease if i move the mouse. At the start of running my game it works fine but after some movements i cant look down or up. Should it happen if my position is < 0 ?
The XNA code which works perfekt:
Matrix rotationMatrix = Matrix.CreateRotationX(pitch) * Matrix.CreateRotationY(yaw);
camera.Target = Vector3.Transform(Vector3.Forward, rotationMatrix) + camera.Position;
this.viewMatrix = Matrix.CreateLookAt(this.position, this.target, Vector3.Up);
EDIT
So I figured out that there is something different between LWJGL and XNA for rotating a matrix.
Java:
Matrix4f mx = Matrix4f.rotate(1.2345f, new Vector3f(1,0,0),new Matrix4f(),null);
Result:
{1.0 0.0 0.0 0.0} {0.0 0.3299931 -0.9439833 0.0}{0.0 0.9439833 0.3299931 0.0}{0.0 0.0 0.0 1.0}
XNA
Matrix.CreateRotationX(1.2345f)
Result:
{1,0,0,0} {0,0.3299931,0.9439833,0} {0,-0.9439833,0.3299931,0} {0,0,0,1}
The difference is -0.9439833 and +0.9439833 ... can someone explain me why there are different results? Isnt it the same function?
Thank you!
Oh i see. Ok well thats makes the different. Finally i solved my problem. The Problem was the "transform" method in LWJGL. Thats my final working code. Maybe i could optimize it?
public static Matrix4f createRotationX(float pitch) {
Matrix4f mX = new Matrix4f();
mX.m00 = 1;
mX.m10 = 0;
mX.m20 = 0;
mX.m30 = 0;
mX.m01 = 0;
mX.m11 = (float) Math.cos(pitch);
mX.m21 = (float) Math.sin(pitch);
mX.m02 = 0;
mX.m03 = 0;
mX.m12 = (float) -Math.sin(pitch);
mX.m22 = (float) Math.cos(pitch);
mX.m23 = 0;
mX.m03 = 0;
mX.m13 = 0;
mX.m23 = 0;
mX.m33 = 1;
return mX;
}
public static Matrix4f createRotationY(float yaw) {
Matrix4f mY = new Matrix4f();
mY.m00 = (float) Math.cos(yaw);
mY.m10 = 0;
mY.m20 = (float) -Math.sin(yaw);
mY.m30 = 0;
mY.m01 = 0;
mY.m11 = 1;
mY.m21 = 0;
mY.m31 = 0;
mY.m02 = (float) Math.sin(yaw);
mY.m12 = 0;
mY.m22 = (float) Math.cos(yaw);
mY.m23 = 0;
mY.m03 = 0;
mY.m13 = 0;
mY.m23 = 0;
mY.m33 = 1;
return mY;
}
public static Vector3f getNewTarget(Vector3f position,float pitch, float yaw){
Matrix4f mx = MathUtil.createRotationX(pitch);
Matrix4f my = MathUtil.createRotationY(yaw);
Matrix4f rotationMatrix = Matrix4f.mul(mx, my, null);
Matrix4f def = new Matrix4f();
def.m20 = -1;
def.m00 = 0;
def.m11 = 0;
def.m22 = 0;
def.m33 = 0;
Matrix4f v4 = Matrix4f.mul(def,rotationMatrix,null);
return new Vector3f(v4.m00+position.x, v4.m10+position.y, v4.m20+position.z);
}