Qt3D: How to render a mesh in wireframe mode using Qt C++? - c++

Qt3D documentation is increasing but still lacking some information especially on how to do things without Qml/QtQuick. After heavily searching the web about rendering a mesh in wireframe mode, I found a lot of helpful hints and examples that all together resulted in an example viewer that I wanted to present here as a contribution to all the guys that wrote the articles and others who may have searched similar examples.
The most helpful links were these:
Qt basic shapes example
Qt wireframe example
Qt material documentation
Qt MyCylinder example
Stackoverflow question and answer about using an event filter in Qt3DWindow: Mouse controls over Qt 3D Window
The mesh can be rotated and zoomed with the mouse.
A screenshot of the viewer
Any comments on how to improve this are welcome. Especially, I'm interested in how to write a shader program that can render front and back faces in different colors or render the colors per vertex.
And here's the code:
// ######### Opening the viewer #########
void MainWindow::import3dMeshInMeshViewer(QString name)
{
if (!m_viewer3D)
{
m_viewer3D = new Viewer3D(this);
}
m_viewer3D->sceneModifier()->addTriangleMeshCustomMaterial(name, m_meshVector);
m_viewer3D->show();
}
// ######### Viewer class h #########
class Viewer3D : public QDialog
{
Q_OBJECT
public:
Viewer3D(QWidget *parent = 0);
SceneModifier* sceneModifier() {return m_sceneModifier;}
protected:
bool eventFilter(QObject *obj, QEvent *ev);
void mouseMoveEvent(QMouseEvent *ev);
void mousePressEvent(QMouseEvent *ev);
void mouseReleaseEvent(QMouseEvent *ev);
void wheelEvent(QWheelEvent *we);
private:
QPointer<Qt3DCore::QEntity> m_rootEntity;
QPointer<SceneModifier> m_sceneModifier;
Qt3DExtras::Qt3DWindow *m_view;
QPoint m_moveStartPoint;
QMatrix4x4 m_cameraMatrix;
};
// ######### Viewer class cpp #########
Viewer3D::Viewer3D(QWidget *parent) :
QDialog(parent)
{
setAttribute(Qt::WA_DeleteOnClose);
m_moveStartPoint.setX(-1);
m_view = new Qt3DExtras::Qt3DWindow();
m_view->installEventFilter(this);
m_view->defaultFrameGraph()->setClearColor(QColor(QRgb(0x4d4d4f)));
QWidget *container = QWidget::createWindowContainer(m_view);
QSize screenSize = m_view->screen()->size();
container->setMinimumSize(QSize(200, 100));
container->setMaximumSize(screenSize);
QHBoxLayout *hLayout = new QHBoxLayout(this);
QVBoxLayout *vLayout = new QVBoxLayout();
hLayout->addWidget(container, 1);
setWindowTitle(QStringLiteral("Mesh Viewer"));
// Root entity
m_rootEntity = new Qt3DCore::QEntity();
// Scene modifier
m_sceneModifier = new SceneModifier(m_rootEntity);
// Window geometry
resize(parent->geometry().width() * 0.8, parent->geometry().height() * 0.8);
move(parent->geometry().center() - QPoint(width() / 2, height() / 2));
// Camera
Qt3DRender::QCamera *cameraEntity = m_view->camera();
//cameraEntity->lens()->setPerspectiveProjection(22.5f, m_view->width()/m_view->height(), 0.01f, 1000.0f);
cameraEntity->setPosition(QVector3D(0, 0, 500.0f));
cameraEntity->setUpVector(QVector3D(0, 1, 0));
cameraEntity->setViewCenter(QVector3D(0, 0, 0));
cameraEntity->transform()->setScale(1.f);
// Set root object of the scene
m_view->setRootEntity(m_rootEntity);
}
bool Viewer3D::eventFilter(QObject *obj, QEvent *ev)
{
if (ev->type() == QEvent::Wheel)
{
wheelEvent(dynamic_cast<QWheelEvent*>(ev));
return true;
}
else if (ev->type() == QEvent::MouseButtonPress)
{
mousePressEvent(dynamic_cast<QMouseEvent*>(ev));
return true;
}
else if (ev->type() == QEvent::MouseMove)
{
mouseMoveEvent(dynamic_cast<QMouseEvent*>(ev));
return true;
}
else if (ev->type() == QEvent::MouseButtonRelease)
{
mouseReleaseEvent(dynamic_cast<QMouseEvent*>(ev));
return true;
}
return QObject::eventFilter(obj, ev);
}
void Viewer3D::wheelEvent(QWheelEvent *we)
{
Qt3DCore::QTransform* transform = m_view->camera()->transform();
float scale = transform->scale();
QPoint delta = we->angleDelta();
float zoom_distance = scale * static_cast<float>(delta.y()) / 500.f;
scale -= zoom_distance;
scale = std::min(10.0000f, scale);
scale = std::max(0.001f, scale);
transform->setScale(scale);
}
void Viewer3D::mousePressEvent(QMouseEvent *ev)
{
if (ev->button() == Qt::LeftButton)
{
m_moveStartPoint = ev->pos();
m_cameraMatrix = m_view->camera()->transform()->matrix();
}
}
void Viewer3D::mouseMoveEvent(QMouseEvent *ev)
{
if (m_moveStartPoint.x() > -1)
{
QPoint delta = ev->pos() - m_moveStartPoint;
float angle = static_cast<float>(QPoint::dotProduct(delta, delta)) / 100.f;
QVector3D axis = QVector3D(delta.y(), delta.x(), 0);
QMatrix4x4 rotationMatrix = Qt3DCore::QTransform::rotateAround(-m_view->camera()->position(), angle, axis);
QMatrix4x4 matrix = rotationMatrix * m_cameraMatrix;
m_view->camera()->transform()->setMatrix(matrix);
}
}
void Viewer3D::mouseReleaseEvent(QMouseEvent *ev)
{
if (m_moveStartPoint.x() > -1)
{
m_moveStartPoint.setX(-1);
m_cameraMatrix = m_view->camera()->transform()->matrix();
}
}
// ######### Scene modifier class h #########
class SceneModifier : public QObject
{
Q_OBJECT
public:
SceneModifier(Qt3DCore::QEntity* rootEntity);
void addTriangleMeshCustomMaterial(QString name, const std::vector<Import3d::Triangle>& meshVector);
private:
Qt3DCore::QEntity* m_rootEntity;
};
// ######### Scene modifier class cpp #########
#include "SceneModifier.h"
#include "TriangleMeshRenderer.h"
#include "MaterialWireFrame.h"
SceneModifier::SceneModifier(Qt3DCore::QEntity* rootEntity) :
m_rootEntity(rootEntity),
QObject(rootEntity)
{
}
void SceneModifier::addTriangleMeshCustomMaterial(QString name, const std::vector<Import3d::Triangle>& meshVector)
{
if (!m_rootEntity)
{
return;
}
// Mesh entity
Qt3DCore::QEntity *triangleMeshEntity = new Qt3DCore::QEntity(m_rootEntity);
triangleMeshEntity->setObjectName(QStringLiteral("customMeshEntity"));
TriangleMeshRenderer *triangleMeshRenderer = new TriangleMeshRenderer(meshVector);
MaterialWireFrame* materialWireFrame = new MaterialWireFrame();
Qt3DCore::QTransform *transform = new Qt3DCore::QTransform;
transform->setScale(1.f);
triangleMeshEntity->addComponent(triangleMeshRenderer);
triangleMeshEntity->addComponent(transform);
triangleMeshEntity->addComponent(materialWireFrame);
//emit meshAdded(name, triangleMeshEntity);
}
// ######### Point and Triangle structs #########
struct Point
{
QVector3D p; //point x, y, z
QVector3D c; //color red, green, blue
Point() {}
Point(float xp, float yp, float zp)
{
p = QVector3D(xp, yp, zp);
c = QVector3D(0, 0, 0);
}
Point(QVector3D pos, unsigned char r, unsigned char g, unsigned char b)
{
p = pos;
c = QVector3D(static_cast<float>(r) / 255.f,
static_cast<float>(g) / 255.f,
static_cast<float>(b) / 255.f);
}
};
struct Triangle
{
Point vertices[3];
Triangle()
{
}
Triangle(Point p1, Point p2, Point p3)
{
vertices[0] = p1;
vertices[1] = p2;
vertices[2] = p3;
}
};
// ######### TriangleMeshRenderer class h #########
class TriangleMeshRenderer : public Qt3DRender::QGeometryRenderer
{
Q_OBJECT
public:
explicit TriangleMeshRenderer(const std::vector<Import3d::Triangle>& meshVector, Qt3DCore::QNode *parent = 0);
~TriangleMeshRenderer();
};
class TriangleMeshGeometry : public Qt3DRender::QGeometry
{
Q_OBJECT
public:
TriangleMeshGeometry(const std::vector<Import3d::Triangle>& meshVector, TriangleMeshRenderer *parent);
};
// ######### TriangleMeshRenderer class cpp #########
TriangleMeshRenderer::TriangleMeshRenderer(const std::vector<Import3d::Triangle>& meshVector, QNode *parent)
: Qt3DRender::QGeometryRenderer(parent)
{
setPrimitiveType(Qt3DRender::QGeometryRenderer::Triangles);
setGeometry(new TriangleMeshGeometry(meshVector, this));
}
TriangleMeshRenderer::~TriangleMeshRenderer()
{
}
TriangleMeshGeometry::TriangleMeshGeometry(const std::vector<Import3d::Triangle>& meshVector, TriangleMeshRenderer *parent)
: Qt3DRender::QGeometry(parent)
{
Qt3DRender::QBuffer *vertexDataBuffer = new Qt3DRender::QBuffer(Qt3DRender::QBuffer::VertexBuffer, this);
Qt3DRender::QBuffer *indexDataBuffer = new Qt3DRender::QBuffer(Qt3DRender::QBuffer::IndexBuffer, this);
// Vertexbuffer
QByteArray vertexBufferData;
// Buffer size = triangle count * 3 * (3 + 3 + 3), 3 vertices per trinalge, each 3 floats for vertex position x,y,z, 3 floats normal and 3 floats color
int bytesPerVertex = 9 * sizeof(float);
int bytesPerTriangle = 3 * bytesPerVertex;
vertexBufferData.resize(static_cast<int>(meshVector.size()) * bytesPerTriangle);
char* pByte = vertexBufferData.data();
int i = 0;
// Indexbuffer
QByteArray indexBufferData;
indexBufferData.resize(static_cast<int>(meshVector.size()) * 3 * sizeof(uint));
uint* rawIndexArray = reinterpret_cast<uint*>(indexBufferData.data());
int idx = 0;
for (int n = 0; n < meshVector.size(); ++n)
{
QVector3D nt = QVector3D::normal(meshVector[n].vertices[0].p, meshVector[n].vertices[1].p, meshVector[n].vertices[2].p);
for (int v = 0; v < 3; ++v)
{
// Vertex
*reinterpret_cast<float*>(pByte) = meshVector[n].vertices[v].p.x(); pByte += 4;
*reinterpret_cast<float*>(pByte) = meshVector[n].vertices[v].p.y(); pByte += 4;
*reinterpret_cast<float*>(pByte) = meshVector[n].vertices[v].p.z(); pByte += 4;
// Normal
*reinterpret_cast<float*>(pByte) = nt.x(); pByte += 4;
*reinterpret_cast<float*>(pByte) = nt.y(); pByte += 4;
*reinterpret_cast<float*>(pByte) = nt.z(); pByte += 4;
// Color
*reinterpret_cast<float*>(pByte) = meshVector[n].vertices[v].c.x(); pByte += 4;
*reinterpret_cast<float*>(pByte) = meshVector[n].vertices[v].c.y(); pByte += 4;
*reinterpret_cast<float*>(pByte) = meshVector[n].vertices[v].c.z(); pByte += 4;
// Index
rawIndexArray[idx] = static_cast<uint>(idx++);
}
}
vertexDataBuffer->setData(vertexBufferData);
indexDataBuffer->setData(indexBufferData);
// Attributes
Qt3DRender::QAttribute *positionAttribute = new Qt3DRender::QAttribute();
positionAttribute->setAttributeType(Qt3DRender::QAttribute::VertexAttribute);
positionAttribute->setBuffer(vertexDataBuffer);
positionAttribute->setDataType(Qt3DRender::QAttribute::Float);
positionAttribute->setDataSize(3);
positionAttribute->setByteOffset(0);
positionAttribute->setByteStride(bytesPerVertex);
positionAttribute->setCount(3 * static_cast<int>(meshVector.size()));
positionAttribute->setName(Qt3DRender::QAttribute::defaultPositionAttributeName());
Qt3DRender::QAttribute *normalAttribute = new Qt3DRender::QAttribute();
normalAttribute->setAttributeType(Qt3DRender::QAttribute::VertexAttribute);
normalAttribute->setBuffer(vertexDataBuffer);
normalAttribute->setDataType(Qt3DRender::QAttribute::Float);
normalAttribute->setDataSize(3);
normalAttribute->setByteOffset(3 * sizeof(float));
normalAttribute->setByteStride(bytesPerVertex);
normalAttribute->setCount(3 * static_cast<int>(meshVector.size()));
normalAttribute->setName(Qt3DRender::QAttribute::defaultNormalAttributeName());
Qt3DRender::QAttribute *colorAttribute = new Qt3DRender::QAttribute();
colorAttribute->setAttributeType(Qt3DRender::QAttribute::VertexAttribute);
colorAttribute->setBuffer(vertexDataBuffer);
colorAttribute->setDataType(Qt3DRender::QAttribute::Float);
colorAttribute->setDataSize(3);
colorAttribute->setByteOffset(6 * sizeof(float));
colorAttribute->setByteStride(bytesPerVertex);
colorAttribute->setCount(3 * static_cast<int>(meshVector.size()));
colorAttribute->setName(Qt3DRender::QAttribute::defaultColorAttributeName());
Qt3DRender::QAttribute *indexAttribute = new Qt3DRender::QAttribute();
indexAttribute->setAttributeType(Qt3DRender::QAttribute::IndexAttribute);
indexAttribute->setBuffer(indexDataBuffer);
indexAttribute->setDataType(Qt3DRender::QAttribute::UnsignedInt);
indexAttribute->setDataSize(1);
indexAttribute->setByteOffset(0);
indexAttribute->setByteStride(0);
indexAttribute->setCount(3 * static_cast<int>(meshVector.size()));
addAttribute(positionAttribute);
addAttribute(normalAttribute);
addAttribute(colorAttribute);
addAttribute(indexAttribute);
parent->setGeometry(this);
}

The OP is interested on write a shader program, so its necessary to write OpenGL in Qt, right? Like in https://doc.qt.io/qt-5/qtgui-openglwindow-example.html and https://doc.qt.io/qt-5/qtopengl-hellogl2-example.htm.
There is a simple shader example on https://doc.qt.io/qt-5/qopenglshaderprogram.html
program.addShaderFromSourceCode(QOpenGLShader::Vertex,
"attribute highp vec4 vertex;\n"
"uniform highp mat4 matrix;\n"
"void main(void)\n"
"{\n"
" gl_Position = matrix * vertex;\n"
"}");
program.addShaderFromSourceCode(QOpenGLShader::Fragment,
"uniform mediump vec4 color;\n"
"void main(void)\n"
"{\n"
" gl_FragColor = color;\n"
"}");
From https://learnopengl.com/Getting-started/Hello-Triangle
To draw your triangles in wireframe mode, you can configure how OpenGL
draws its primitives via glPolygonMode(GL_FRONT_AND_BACK, GL_LINE).
The first argument says we want to apply it to the front and back of
all triangles and the second line tells us to draw them as lines. Any
subsequent drawing calls will render the triangles in wireframe mode
until we set it back to its default using
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL).
And from https://learnopengl.com/Advanced-OpenGL/Advanced-GLSL
The gl_FrontFacing variable tells us if the current fragment is part
of a front-facing or a back-facing face. We could, for example, decide
to output different colors for all back faces.

Related

How can I change the texture position in DirectX 12

I need to change the position of the texture in the window, I have it drawn in the upper left corner, and I want to place it in the center. In directx 11 we can do this
m_deviceResources->GetD2DDeviceContext()->SetTransform(...), but I haven't found any similar code for directx 12. I don't understand how i can change the texture position, maybe I need to change something in ConstantBuffer or change the code for the shader.
Texture Code
void CreateTexture(int width, int height, const void* pData)
{
D3D12_RESOURCE_DESC resourceDesc = {};
resourceDesc.MipLevels = 1;
resourceDesc.Format = DXGI_FORMAT_B8G8R8A8_UNORM;
resourceDesc.Width = width;
resourceDesc.Height = height;
resourceDesc.Flags = D3D12_RESOURCE_FLAG_NONE;
resourceDesc.DepthOrArraySize = 1;
resourceDesc.SampleDesc.Count = 1;
resourceDesc.SampleDesc.Quality = 0;
resourceDesc.Dimension = D3D12_RESOURCE_DIMENSION_TEXTURE2D;
D3D12MA::ALLOCATION_DESC allocationDesc = {};
allocationDesc.HeapType = D3D12_HEAP_TYPE_DEFAULT;
D3D12MA::Allocation* alloc = nullptr;
allocator->CreateResource(
&allocationDesc,
&resourceDesc,
D3D12_RESOURCE_STATE_PIXEL_SHADER_RESOURCE,
nullptr,
&alloc,
IID_PPV_ARGS(&OffscreenTexture));
UpdateTextureResource(width, height, pData);
// Describe and create a SRV for the texture.
D3D12_SHADER_RESOURCE_VIEW_DESC srvDesc = {};
srvDesc.Shader4ComponentMapping = D3D12_DEFAULT_SHADER_4_COMPONENT_MAPPING;
srvDesc.Format = resourceDesc.Format;
srvDesc.ViewDimension = D3D12_SRV_DIMENSION_TEXTURE2D;
srvDesc.Texture2D.MipLevels = 1;
Device->CreateShaderResourceView(OffscreenTexture, &srvDesc, OffscreenSrvHeap->GetCPUDescriptorHandleForHeapStart());
alloc->Release();
}
void UpdateTextureResource(int width, int height, const void* pData)
{
if (pData == nullptr)
return;
if (upload_texture)
upload_texture->Release();
auto Barrier = CD3DX12_RESOURCE_BARRIER::Transition(OffscreenTexture, D3D12_RESOURCE_STATE_PIXEL_SHADER_RESOURCE, D3D12_RESOURCE_STATE_COPY_DEST);
CommandList->ResourceBarrier(1, &Barrier);
const UINT64 uploadBufferSize = GetRequiredIntermediateSize(OffscreenTexture, 0, 1);
D3D12_RESOURCE_DESC desc = CD3DX12_RESOURCE_DESC::Buffer(uploadBufferSize);
D3D12MA::ALLOCATION_DESC allocationDesc = {};
allocationDesc.HeapType = D3D12_HEAP_TYPE_UPLOAD;
allocator->CreateResource(
&allocationDesc,
&desc,
D3D12_RESOURCE_STATE_GENERIC_READ,
NULL,
&upload_texture,
__uuidof(ID3D12Resource),
nullptr);
if (upload_texture == nullptr)
return;
D3D12_SUBRESOURCE_DATA textureData = {};
textureData.pData = pData;
textureData.RowPitch = width * 4; // TextureWidth * TextureSize
textureData.SlicePitch = textureData.RowPitch * height;
UpdateSubresources(CommandList, OffscreenTexture, upload_texture->GetResource(), 0, 0, 1, &textureData);
auto barrier = CD3DX12_RESOURCE_BARRIER::Transition(OffscreenTexture, D3D12_RESOURCE_STATE_COPY_DEST, D3D12_RESOURCE_STATE_PIXEL_SHADER_RESOURCE);
CommandList->ResourceBarrier(1, &barrier);
}
Shader Code
struct PSInput
{
float4 position : SV_POSITION;
float2 uv : TEXCOORD;
};
Texture2D g_texture : register(t0);
SamplerState g_sampler : register(s0);
PSInput VSMain(float4 position : POSITION, float4 uv : TEXCOORD)
{
PSInput result;
result.position = position;
result.uv = uv;
return result;
}
float4 PSMain(PSInput input) : SV_TARGET
{
return g_texture.Sample(g_sampler, input.uv);
}

Using bullet physics car and heightfield, car get stuck how can I fix?

I've been working on implementing a car in my game using bullet physics. The physics of the car uses btRaycastVehicle and the code is mostly based on the ForkLift Demo.
At this point, the vehicle seems to work properly on a flat ground but then I've started working on a non flat terrain and I saw the class btHeightfieldTerrainShape which take an array of heights to construct a shape.
So I've managed to use this class and the vehicle can be used on it but it sometimes get stuck on terrain's hollows even if the height to climb is really small.
I'm using MLCP constraint solver and tested PGS solver but does not help.
Here is the concerned code :
Vehicle.hpp
#define USE_MLCP_SOLVER
// I removed other #define because all were just floats
class Vehicle {
public:
// theses bools are set to true when the corresponding key is pressed
bool m_foward = false, m_backward = false, m_leftward = false, m_rightward = false;
bool m_reset = false;
Vehicle(Vao *chassisVao, Vao *wheelVao, const float *heights, uint32_t gridsize, float amplitude);
~Vehicle();
// this function runs the logic of the physics simulation, it gets executed each frame
void stepSimulation(uint32_t frameTime);
// this function instantiate objects to rendered, it gets executed each frame
// not including definition, not revalent
void prepareRendering(std::vector<const Entity *> &entities);
private:
// members are declared here ---> <---
// create physics world and vehicle
void initPhysics(const float *heights, uint32_t gridsize, float amplitude);
// cleanup things
// not including definition, not revalent
void exitPhysics(void);
// reset vehicle position, rotation, momentum, etc..
// not including definition, not revalent
void resetVehicle(void);
// helper function to create rigid body
// not including definition, not revalent
btRigidBody* localCreateRigidBody(btScalar mass, const btTransform& startTransform, btCollisionShape* shape);
};
Vehicle.cpp
#include "Vehicle.hpp"
Vehicle::Vehicle(Vao *chassisVao, Vao *wheelVao, const float *heights, uint32_t gridsize, float amplitude) {
initPhysics(heights, gridsize, amplitude);
if (chassisVao) {
m_chassisEntity = new Entity(chassisVao);
}
for (int i = 0; i < 4; ++i) {
m_wheelEntities.push_back(Entity(wheelVao));
}
}
Vehicle::~Vehicle() {
exitPhysics();
if (m_chassisEntity) {
delete m_chassisEntity;
}
m_wheelEntities.clear();
}
void Vehicle::initPhysics(const float *heights, uint32_t gridsize, float amplitude) {
// setup dynamics world
m_collisionConfiguration = new btDefaultCollisionConfiguration();
m_dispatcher = new btCollisionDispatcher(m_collisionConfiguration);
btVector3 worldMin(-1000, -1000, -1000);
btVector3 worldMax(1000, 1000, 1000);
m_overlappingPairCache = new btAxisSweep3(worldMin, worldMax);
#ifdef USE_MLCP_SOLVER
btDantzigSolver* mlcp = new btDantzigSolver();
// btSolveProjectedGaussSeidel* mlcp = new btSolveProjectedGaussSeidel();
btMLCPSolver* sol = new btMLCPSolver(mlcp);
m_solver = sol;
#else
m_solver = new btSequentialImpulseConstraintSolver();
#endif
m_world = new btDiscreteDynamicsWorld(m_dispatcher, m_overlappingPairCache, m_solver, m_collisionConfiguration);
#ifdef USE_MLCP_SOLVER
m_world->getSolverInfo().m_minimumSolverBatchSize = 1;
#else
m_world->getSolverInfo().m_minimumSolverBatchSize = 128;
#endif
m_world->getSolverInfo().m_globalCfm = 0.00001;
// create ground object
// btVector3 groundExtents(100, 3, 100);
// btCollisionShape* groundShape = new btBoxShape(groundExtents);
btCollisionShape* groundShape = new btHeightfieldTerrainShape(gridsize + 1, gridsize + 1, heights, 0.0f, amplitude, 1, false);
m_collisionShapes.push_back(groundShape);
btTransform tr;
tr.setIdentity();
tr.setOrigin(btVector3(gridsize * 0.5f, WHEEL_RADIUS, gridsize * 0.5f));
localCreateRigidBody(0, tr, groundShape);
// create vehicle
// BEGIN - create chassis shape
btVector3 vehicleExtents(1.76f, 1.1f, 4.0f);
btCollisionShape* chassisShape = new btBoxShape(vehicleExtents);
m_collisionShapes.push_back(chassisShape);
btCompoundShape* compound = new btCompoundShape();
m_collisionShapes.push_back(compound);
btTransform localTrans;
localTrans.setIdentity();
//localTrans effectively shifts the center of mass with respect to the chassis
localTrans.setOrigin(btVector3(0, 1, 0));
compound->addChildShape(localTrans, chassisShape);
tr.setOrigin(btVector3(0, 0, 0));
m_carChassis = localCreateRigidBody(800, tr, compound);
// END - create chassis shape
// BEGIN - create vehicle
m_vehicleRayCaster = new btDefaultVehicleRaycaster(m_world);
m_vehicle = new btRaycastVehicle(m_tuning, m_carChassis, m_vehicleRayCaster);
m_carChassis->setActivationState(DISABLE_DEACTIVATION); // never deactivate the vehicle
m_world->addVehicle(m_vehicle);
// choose coordinate system
m_vehicle->setCoordinateSystem(0, 1, 2);
btVector3 wheelDirection(0, -1, 0);
btVector3 wheelAxis(-1, 0, 0);
btVector3 connectionPoint(0.5f * vehicleExtents.x(), WHEEL_RADIUS, 0.5f * vehicleExtents.z() - WHEEL_RADIUS);
m_vehicle->addWheel(connectionPoint, wheelDirection, wheelAxis, SUSPENSION_REST_LENGTH, WHEEL_RADIUS, m_tuning, true);
connectionPoint = btVector3(-0.5f * vehicleExtents.x(), WHEEL_RADIUS, 0.5f * vehicleExtents.z() - WHEEL_RADIUS);
m_vehicle->addWheel(connectionPoint, wheelDirection, wheelAxis, SUSPENSION_REST_LENGTH, WHEEL_RADIUS, m_tuning, true);
connectionPoint = btVector3(0.5f * vehicleExtents.x(), WHEEL_RADIUS, -0.5f * vehicleExtents.z() + WHEEL_RADIUS);
m_vehicle->addWheel(connectionPoint, wheelDirection, wheelAxis, SUSPENSION_REST_LENGTH, WHEEL_RADIUS, m_tuning, false);
connectionPoint = btVector3(-0.5f * vehicleExtents.x(), WHEEL_RADIUS, -0.5f * vehicleExtents.z() + WHEEL_RADIUS);
m_vehicle->addWheel(connectionPoint, wheelDirection, wheelAxis, SUSPENSION_REST_LENGTH, WHEEL_RADIUS, m_tuning, false);
for (int i = 0; i < m_vehicle->getNumWheels(); i++) {
btWheelInfo& wheel = m_vehicle->getWheelInfo(i);
wheel.m_suspensionStiffness = SUSPENSION_STIFFNESS;
wheel.m_wheelsDampingRelaxation = SUSPENSION_DAMPING;
wheel.m_wheelsDampingCompression = SUSPENSION_COMPRESSION;
wheel.m_frictionSlip = WHEEL_FRICTION;
wheel.m_rollInfluence = ROLL_IN_INFLUENCE;
}
resetVehicle();
}
void Vehicle::stepSimulation(uint32_t frameTime) {
float speed = m_vehicle->getCurrentSpeedKmHour();
m_vehicleEngineForce = 0.0f;
m_vehicleBreakingForce = 0.0f;
/* --->
Processing input sets m_vehicleEngineForce, m_vehicleBreakingForce, m_vehicleSteering
<--- */
m_vehicle->applyEngineForce(m_vehicleEngineForce, 2);
m_vehicle->setBrake(m_vehicleBreakingForce, 2);
m_vehicle->applyEngineForce(m_vehicleEngineForce, 3);
m_vehicle->setBrake(m_vehicleBreakingForce, 3);
m_vehicle->setSteeringValue(m_vehicleSteering, 0);
m_vehicle->setSteeringValue(m_vehicleSteering, 1);
m_world->stepSimulation(frameTime * 0.001f, 2);
btMLCPSolver *solver = (btMLCPSolver *) m_world->getConstraintSolver();
int numFallbacks = solver->getNumFallbacks();
if (numFallbacks) {
std::cerr << "MLCP solver failed " << numFallbacks << " times, falling back to btSequentialImpulseSolver" << std::endl;
}
solver->setNumFallbacks(0);
}
And here is a video to illustrate : link
Thank you
I finally solve this issue, I used bullet physics debug drawer to view bounding boxes. The problem was the chassis shape colliding on the terrain because btBoxShape takes the half extent, so I multiplied everything by 0.5 and it works well now.
Here is the debugger code written in C++ for modern OpenGL, based on this forum thread :
BulletDebugDrawer.hpp
#ifndef BULLET_DEBUG_DRAWER_H
#define BULLET_DEBUG_DRAWER_H
#include <bullet/LinearMath/btIDebugDraw.h>
#include <vector>
class BulletDebugDrawer : public btIDebugDraw {
private:
int m_debugMode;
std::vector<float> m_lines;
public:
BulletDebugDrawer();
virtual void drawLine(const btVector3& from,const btVector3& to,const btVector3& color);
virtual void reportErrorWarning(const char* warningString);
virtual void setDebugMode(int debugMode);
virtual int getDebugMode(void) const;
virtual void drawContactPoint(const btVector3& PointOnB, const btVector3& normalOnB, btScalar distance, int lifeTime, const btVector3& color) {
}
virtual void draw3dText(const btVector3& location, const char* textString) {
}
void glfw3_device_create(void);
void glfw3_device_render(const float *matrix);
void glfw3_device_destroy(void);
};
#endif
BulletDebugDrawer.cpp
#include "BulletDebugDrawer.hpp"
#include <algorithm>
#include <cstdint>
#include <iostream>
#include <glad/gl.h>
#define MAX_LINES_DRAWCALL 1000
GLuint dev_program;
GLint dev_uniform_proj;
GLint dev_uniform_col;
GLint dev_attrib_pos;
GLuint dev_vao;
GLuint dev_vbo;
BulletDebugDrawer::BulletDebugDrawer() : m_debugMode(0) {
}
void BulletDebugDrawer::drawLine(const btVector3& from,const btVector3& to, const btVector3& color) {
m_lines.push_back(from.getX());
m_lines.push_back(from.getY());
m_lines.push_back(from.getZ());
m_lines.push_back(to.getX());
m_lines.push_back(to.getY());
m_lines.push_back(to.getZ());
}
void BulletDebugDrawer::setDebugMode(int debugMode) {
m_debugMode = debugMode;
}
int BulletDebugDrawer::getDebugMode() const {
return m_debugMode;
}
void BulletDebugDrawer::reportErrorWarning(const char* warningString) {
std::cout << warningString << std::endl;
}
void BulletDebugDrawer::glfw3_device_create(void) {
GLint status;
static const GLchar *vertex_shader =
"#version 150\n"
"uniform mat4 ProjMtx;\n"
"in vec3 Position;\n"
"void main() {\n"
" gl_Position = ProjMtx * vec4(Position, 1);\n"
"}\n";
static const GLchar *fragment_shader =
"#version 150\n"
"uniform vec3 Color;\n"
"out vec4 Out_Color;\n"
"void main(){\n"
" Out_Color = vec4(Color, 1);\n"
"}\n";
dev_program = glCreateProgram();
GLuint vert_shdr = glCreateShader(GL_VERTEX_SHADER);
GLuint frag_shdr = glCreateShader(GL_FRAGMENT_SHADER);
glShaderSource(vert_shdr, 1, &vertex_shader, 0);
glShaderSource(frag_shdr, 1, &fragment_shader, 0);
glCompileShader(vert_shdr);
glCompileShader(frag_shdr);
glGetShaderiv(vert_shdr, GL_COMPILE_STATUS, &status);
assert(status == GL_TRUE);
glGetShaderiv(frag_shdr, GL_COMPILE_STATUS, &status);
assert(status == GL_TRUE);
glAttachShader(dev_program, vert_shdr);
glAttachShader(dev_program, frag_shdr);
glLinkProgram(dev_program);
glGetProgramiv(dev_program, GL_LINK_STATUS, &status);
assert(status == GL_TRUE);
glDetachShader(dev_program, vert_shdr);
glDetachShader(dev_program, frag_shdr);
glDeleteShader(vert_shdr);
glDeleteShader(frag_shdr);
dev_uniform_proj = glGetUniformLocation(dev_program, "ProjMtx");
dev_uniform_col = glGetUniformLocation(dev_program, "Color");
dev_attrib_pos = glGetAttribLocation(dev_program, "Position");
{
/* buffer setup */
glGenBuffers(1, &dev_vbo);
glGenVertexArrays(1, &dev_vao);
glBindVertexArray(dev_vao);
glBindBuffer(GL_ARRAY_BUFFER, dev_vbo);
glBufferData(GL_ARRAY_BUFFER, MAX_LINES_DRAWCALL * 24, nullptr, GL_STREAM_DRAW);
glEnableVertexAttribArray(dev_attrib_pos);
glVertexAttribPointer(dev_attrib_pos, 3, GL_FLOAT, GL_FALSE, 12, 0);
}
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindVertexArray(0);
}
void BulletDebugDrawer::glfw3_device_render(const float *matrix) {
glUseProgram(dev_program);
glUniformMatrix4fv(dev_uniform_proj, 1, GL_FALSE, matrix);
glUniform3f(dev_uniform_col, 1.0f, 0.0f, 0.0f);
glBindVertexArray(dev_vao);
glBindBuffer(GL_ARRAY_BUFFER, dev_vbo);
for (int i = 0; i < m_lines.size(); i += 2 * MAX_LINES_DRAWCALL) {
int batchVertexCount = std::min<int>(m_lines.size() - i, 2 * MAX_LINES_DRAWCALL);
glBufferSubData(GL_ARRAY_BUFFER, 0, batchVertexCount * 12, reinterpret_cast<void *>(m_lines.data() + i));
glDrawArrays(GL_LINES, 0, batchVertexCount);
}
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindVertexArray(0);
glUseProgram(0);
m_lines.clear();
}
void BulletDebugDrawer::glfw3_device_destroy(void) {
glDeleteProgram(dev_program);
glDeleteBuffers(1, &dev_vbo);
glDeleteVertexArrays(1, &dev_vao);
}

How do I rotate my camera correctly in my 3D world?

Im triyng to learn 3d programming and Im currently working on a FPS camera-style for a hobby project.
I've created those matrices I believe I should use but Im having trouble seeing how to connect everything to the camera rotation.
So, I have a camera Class with:
get_World_To_View matrix
mat4f rotMatrix = mat4f::rotation(-rotation.z, -rotation.x, -rotation.y);
rotMatrix.transpose();
return rotMatrix * mat4f::translation(-position);
get_ViewToWorld matrix
return mat4f::translation(position) * mat4f::rotation(-rotation.z, -rotation.x, -rotation.y);
get_ProjectionMatrix:
return mat4f::projection(vfov, aspect, zNear, zFar);
vector3 for get_forward
mat4f ViewToWorld = mat4f::translation(position) * mat4f::rotation(-rotation.z, -rotation.x, -
rotation.y);
vec4f forward = ViewToWorld * vec4f(0, 0, -1, 0);
return forward.xyz();
and get_rightwards:
mat4f ViewToWorld = mat4f::translation(position) * mat4f::rotation(-rotation.z, -rotation.x, -
rotation.y);
vec4f rightways = ViewToWorld * vec4f(-1, 0, 0, 0);
return rightways.xyz();
From here on Im thinking that a need a function that actually rotate my camera, but I've tried several things but I cant really understand how it should be puzzeled together.
I render my two matrices: get_WorldToView and get_ProjectionMatrix and Im able to move around with the WASD keys.
Does anyone have a tip for how I should think for my RotateCamera()-function? Am I missing something very important?
Im quite new to programming and Im still having a hard time "seeing" the logic before me.
So to be as clear as I can:
I have a function in Main.cpp (Update) for input that works like.
If(mousedeltaX != 0.0f || mousedeltaY != 0.0f)
{
// Call a function that rotate the camera.
}
Its that function I want some help on how to think.
When I move with the WASD keys I just call a function Move() that sets the position += to the vector3 with the correct x,y,z direction multiplied by camera_velocity, so that have ofcause nothing to do with the rotation itself.
I want to demonstrate how camera motion can be simply achieved applying continuous changes to the 4×4 matrix for the camera.
Thereby the camera matrix is the inverse of the view matrix. While the camera matrix represents coordinates (position, orientation) of the camera relative to world origin, the view matrix represents the opposite – the position of world relative to camera origin. The latter is a needed transformation for rendering when 3d contents is mapped to the screen. However, humans (without egocentrical disturbance) are used to see themselves in relation to world. Hence, I consider manipulation of camera matrix more intuitive.
The left 3d view shows the first-person-camera, the right a view from top where the position/orientation of first-person-camera is remarked by the red triangle.
The camera matrix is initially set to identity matrix with a small elevation into y direction to appear above from ground – the x-z plane.
The x-axis points to right.
The y-axis points up.
The z-axis points out of screen.
So, the line-of-sight vector is the negative z-axis.
Hence, moving forward can be achieved adding negative z-values to translation.
The camera-up vector is the y-axis.
Hence, turning to left can be achieved with a positive rotation about y-axis, turning to right with a negative.
Now, if the camera has been turned how can moving forward consider that turned line-of-sight?
The trick is to apply the translation to the z-axis but in the local coordinate system of the camera.
Doing this with matrices, you just need the correct order for multiplications.
void moveObs(
QMatrix4x4 &matCam, // camera matrix
double v, // speed (forwards, backwards)
double rot) // rotate (left, right)
{
QMatrix4x4 matFwd; matFwd.translate(0, 0, -v); // moving forwards / backwards: -z is line-of-sight
QMatrix4x4 matRot; matRot.rotate(rot, 0, 1, 0); // turning left / right: y is camera-up-vector
matCam *= matRot * matFwd;
}
I used QMatrix4x4 as this was what I had at hand. It shouldn't be that different in other APIs like glm or DirectXMath as all of them are based on the same mathematical basics.
(Though, you have always to check whether the specific API exposes the matrix row-major or column major: Matrix array order of OpenGL Vs DirectX.)
I must admit that I'm fellow of the OpenGL community, ignoring Direct3D mostly. Hence, I didn't feel able to prepare an MCVE in Direct3D but made one in OpenGL. I used the Qt framework which provides a lot of things out of the box to keep the sample as compact as possible. (That's not quite easy for 3d programming as well as for GUI programming and especially not for the combination of both.)
The (complete) source code testQOpenGLWidgetNav.cc:
#include <QtWidgets>
/* This function is periodically called to move the observer
* (aka player, aka first person camera).
*/
void moveObs(
QMatrix4x4 &matCam, // camera matrix
double v, // speed (forwards, backwards)
double rot) // rotate (left, right)
{
QMatrix4x4 matFwd; matFwd.translate(0, 0, -v); // moving forwards / backwards: -z is line-of-sight
QMatrix4x4 matRot; matRot.rotate(rot, 0, 1, 0); // turning left / right: y is camera-up-vector
matCam *= matRot * matFwd;
}
class OpenGLWidget: public QOpenGLWidget, public QOpenGLFunctions {
private:
QMatrix4x4 &_matCam, _matProj, _matView, *_pMatObs;
QOpenGLShaderProgram *_pGLPrg;
GLuint _coordAttr;
public:
OpenGLWidget(QMatrix4x4 &matCam, QMatrix4x4 *pMatObs = nullptr):
QOpenGLWidget(),
_matCam(matCam), _pMatObs(pMatObs), _pGLPrg(nullptr)
{ }
QSize sizeHint() const override { return QSize(256, 256); }
protected:
virtual void initializeGL() override
{
initializeOpenGLFunctions();
glClearColor(0.525f, 0.733f, 0.851f, 1.0f);
}
virtual void resizeGL(int w, int h) override
{
_matProj.setToIdentity();
_matProj.perspective(45.0f, GLfloat(w) / h, 0.01f, 100.0f);
}
virtual void paintGL() override;
private:
void drawTriStrip(const GLfloat *coords, size_t nCoords, const QMatrix4x4 &mat, const QColor &color);
};
static const char *vertexShaderSource =
"# version 330\n"
"layout (location = 0) in vec3 coord;\n"
"uniform mat4 mat;\n"
"void main() {\n"
" gl_Position = mat * vec4(coord, 1.0);\n"
"}\n";
static const char *fragmentShaderSource =
"#version 330\n"
"uniform vec4 color;\n"
"out vec4 colorFrag;\n"
"void main() {\n"
" colorFrag = color;\n"
"}\n";
const GLfloat u = 0.5; // base unit
const GLfloat coordsGround[] = {
-15 * u, 0, +15 * u,
+15 * u, 0, +15 * u,
-15 * u, 0, -15 * u,
+15 * u, 0, -15 * u,
};
const size_t sizeCoordsGround = sizeof coordsGround / sizeof *coordsGround;
const GLfloat coordsCube[] = {
-u, +u, +u,
-u, -u, -u,
-u, -u, +u,
+u, -u, +u,
-u, +u, +u,
+u, +u, +u,
+u, +u, -u,
+u, -u, +u,
+u, -u, -u,
-u, -u, -u,
+u, +u, -u,
-u, +u, -u,
-u, +u, +u,
-u, -u, -u
};
const size_t sizeCoordsCube = sizeof coordsCube / sizeof *coordsCube;
const GLfloat coordsObs[] = {
-u, 0, +u,
+u, 0, +u,
0, 0, -u
};
const size_t sizeCoordsObs = sizeof coordsObs / sizeof *coordsObs;
void OpenGLWidget::paintGL()
{
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glEnable(GL_DEPTH_TEST);
glEnable(GL_CULL_FACE);
_matView = _matCam.inverted();
// create shader program if not yet done
if (!_pGLPrg) {
_pGLPrg = new QOpenGLShaderProgram(this);
_pGLPrg->addShaderFromSourceCode(QOpenGLShader::Vertex,
vertexShaderSource);
_pGLPrg->addShaderFromSourceCode(QOpenGLShader::Fragment,
fragmentShaderSource);
_pGLPrg->link();
_coordAttr = _pGLPrg->attributeLocation("coord");
}
_pGLPrg->bind();
// render scene
const QColor colors[] = {
Qt::white, Qt::green, Qt::blue,
Qt::black, Qt::darkRed, Qt::darkGreen, Qt::darkBlue,
Qt::cyan, Qt::magenta, Qt::yellow, Qt::gray,
Qt::darkCyan, Qt::darkMagenta, Qt::darkYellow, Qt::darkGray
};
QMatrix4x4 matModel;
drawTriStrip(coordsGround, sizeCoordsGround, matModel, Qt::lightGray);
const size_t nColors = sizeof colors / sizeof *colors;
for (int x = -2, i = 0; x <= 2; ++x) {
for (int z = -2; z <= 2; ++z, ++i) {
if (!x && !z) continue;
matModel.setToIdentity();
matModel.translate(x * 5 * u, u, z * 5 * u);
drawTriStrip(coordsCube, sizeCoordsCube, matModel, colors[i++ % nColors]);
}
}
// draw cam
if (_pMatObs) drawTriStrip(coordsObs, sizeCoordsObs, *_pMatObs, Qt::red);
// done
_pGLPrg->release();
}
void OpenGLWidget::drawTriStrip(const GLfloat *coords, size_t sizeCoords, const QMatrix4x4 &matModel, const QColor &color)
{
_pGLPrg->setUniformValue("mat", _matProj * _matView * matModel);
_pGLPrg->setUniformValue("color",
QVector4D(color.redF(), color.greenF(), color.blueF(), 1.0));
const size_t nVtcs = sizeCoords / 3;
glVertexAttribPointer(_coordAttr, 3, GL_FLOAT, GL_FALSE, 0, coords);
glEnableVertexAttribArray(0);
glDrawArrays(GL_TRIANGLE_STRIP, 0, nVtcs);
glDisableVertexAttribArray(0);
}
struct ToolButton: QToolButton {
ToolButton(const char *text): QToolButton()
{
setText(QString::fromUtf8(text));
setCheckable(true);
QFont qFont = font();
qFont.setPointSize(2 * qFont.pointSize());
setFont(qFont);
}
};
struct MatrixView: QGridLayout {
QLabel qLbls[4][4];
MatrixView();
void setText(const QMatrix4x4 &mat);
};
MatrixView::MatrixView()
{
QColor colors[4] = { Qt::red, Qt::darkGreen, Qt::blue, Qt::black };
for (int j = 0; j < 4; ++j) {
for (int i = 0; i < 4; ++i) {
QLabel &qLbl = qLbls[i][j];
qLbl.setAlignment(Qt::AlignCenter);
if (i < 3) {
QPalette qPalette = qLbl.palette();
qPalette.setColor(QPalette::WindowText, colors[j]);
qLbl.setPalette(qPalette);
}
addWidget(&qLbl, i, j, Qt::AlignCenter);
}
}
}
void MatrixView::setText(const QMatrix4x4 &mat)
{
for (int j = 0; j < 4; ++j) {
for (int i = 0; i < 4; ++i) {
qLbls[i][j].setText(QString().number(mat.row(i)[j], 'f', 3));
}
}
}
const char *const Up = "\342\206\221", *const Down = "\342\206\223";
const char *const Left = "\342\206\266", *const Right = "\342\206\267";
int main(int argc, char **argv)
{
qDebug() << "Qt Version:" << QT_VERSION_STR;
QApplication app(argc, argv);
// setup GUI
QWidget qWinMain;
QHBoxLayout qHBox;
QMatrix4x4 matCamObs; // position/orientation of observer
matCamObs.setToIdentity();
matCamObs.translate(0, 0.7, 0);
OpenGLWidget qGLViewObs(matCamObs); // observer view
qHBox.addWidget(&qGLViewObs, 1);
QVBoxLayout qVBox;
QGridLayout qGrid;
ToolButton qBtnUp(Up), qBtnLeft(Left), qBtnDown(Down), qBtnRight(Right);
qGrid.addWidget(&qBtnUp, 0, 1);
qGrid.addWidget(&qBtnLeft, 1, 0);
qGrid.addWidget(&qBtnDown, 1, 1);
qGrid.addWidget(&qBtnRight, 1, 2);
qVBox.addLayout(&qGrid);
qVBox.addWidget(new QLabel(), 1); // spacer
qVBox.addWidget(new QLabel("<b>Camera Matrix:</b>"));
MatrixView qMatView;
qMatView.setText(matCamObs);
qVBox.addLayout(&qMatView);
QMatrix4x4 matCamMap; // position/orientation of "god" cam.
matCamMap.setToIdentity();
matCamMap.translate(0, 15, 0);
matCamMap.rotate(-90, 1, 0, 0);
OpenGLWidget qGLViewMap(matCamMap, &matCamObs); // overview
qVBox.addWidget(&qGLViewMap);
qHBox.addLayout(&qVBox);
qWinMain.setLayout(&qHBox);
qWinMain.show();
qWinMain.resize(720, 400);
// setup animation
const double v = 0.5, rot = 15.0; // linear speed, rot. speed
const double dt = 0.05; // target 20 fps
QTimer qTimer;
qTimer.setInterval(dt * 1000 /* ms */);
QObject::connect(&qTimer, &QTimer::timeout,
[&]() {
// fwd and turn are "tristate" vars. with value 0, -1, or +1
const int fwd = (int)qBtnUp.isChecked() - (int)qBtnDown.isChecked();
const int turn = (int)qBtnLeft.isChecked() - (int)qBtnRight.isChecked();
moveObs(matCamObs, v * dt * fwd, rot * dt * turn);
qGLViewObs.update(); qGLViewMap.update(); qMatView.setText(matCamObs);
});
qTimer.start();
// runtime loop
return app.exec();
}
and the CMakeLists.txt from which I prepared my VisualStudio solution:
project(QOpenGLWidgetNav)
cmake_minimum_required(VERSION 3.10.0)
set_property(GLOBAL PROPERTY USE_FOLDERS ON)
#set(CMAKE_CXX_STANDARD 17)
#set(CMAKE_CXX_STANDARD_REQUIRED ON)
set(CMAKE_CXX_EXTENSIONS OFF)
find_package(Qt5Widgets CONFIG REQUIRED)
include_directories("${CMAKE_SOURCE_DIR}")
add_executable(testQOpenGLWidgetNav
testQOpenGLWidgetNav.cc)
target_link_libraries(testQOpenGLWidgetNav
Qt5::Widgets)
Demo Output:

Billboarding C++

I got a code from my teacher that currently shows a 3D globe and a 2D particle system. The camera moves around in circles. The particle system is supposed to face the camera.
According to my lecture notes, I have to multiply the billboard with the inverse of the camera's view matrix. I would love to try that but I have trouble using the variable for the view matrix.
#include "pch.h"
#include <Kore/Application.h>
#include <Kore/IO/FileReader.h>
#include <Kore/Math/Core.h>
#include <Kore/Math/Random.h>
#include <Kore/System.h>
#include <Kore/Input/Keyboard.h>
#include <Kore/Input/Mouse.h>
#include <Kore/Audio/Mixer.h>
#include <Kore/Graphics/Image.h>
#include <Kore/Graphics/Graphics.h>
#include <Kore/Log.h>
#include "ObjLoader.h"
#include "Collision.h"
#include "PhysicsWorld.h"
#include "PhysicsObject.h"
using namespace Kore;
// A simple particle implementation
class Particle {
public:
VertexBuffer* vb;
IndexBuffer* ib;
mat4 M;
// The current position
vec3 position;
// The current velocity
vec3 velocity;
// The remaining time to live
float timeToLive;
// The total time time to live
float totalTimeToLive;
// Is the particle dead (= ready to be re-spawned?)
bool dead;
void init(const VertexStructure& structure) {
vb = new VertexBuffer(4, structure,0);
float* vertices = vb->lock();
SetVertex(vertices, 0, -1, -1, 0, 0, 0);
SetVertex(vertices, 1, -1, 1, 0, 0, 1);
SetVertex(vertices, 2, 1, 1, 0, 1, 1);
SetVertex(vertices, 3, 1, -1, 0, 1, 0);
vb->unlock();
// Set index buffer
ib = new IndexBuffer(6);
int* indices = ib->lock();
indices[0] = 0;
indices[1] = 1;
indices[2] = 2;
indices[3] = 0;
indices[4] = 2;
indices[5] = 3;
ib->unlock();
dead = true;
}
void Emit(vec3 pos, vec3 velocity, float timeToLive) {
position = pos;
this->velocity = velocity;
dead = false;
this->timeToLive = timeToLive;
totalTimeToLive = timeToLive;
}
Particle() {
}
void SetVertex(float* vertices, int index, float x, float y, float z, float u, float v) {
vertices[index* 8 + 0] = x;
vertices[index*8 + 1] = y;
vertices[index*8 + 2] = z;
vertices[index*8 + 3] = u;
vertices[index*8 + 4] = v;
vertices[index*8 + 5] = 0.0f;
vertices[index*8 + 6] = 0.0f;
vertices[index*8 + 7] = -1.0f;
}
void render(TextureUnit tex, Texture* image) {
Graphics::setTexture(tex, image);
Graphics::setVertexBuffer(*vb);
Graphics::setIndexBuffer(*ib);
Graphics::drawIndexedVertices();
}
void Integrate(float deltaTime) {
timeToLive -= deltaTime;
if (timeToLive < 0.0f) {
dead = true;
}
// Note: We are using no forces or gravity at the moment.
position += velocity * deltaTime;
// Build the matrix
M = mat4::Translation(position.x(), position.y(), position.z()) * mat4::Scale(0.2f, 0.2f, 0.2f);
}
};
class ParticleSystem {
public:
// The center of the particle system
vec3 position;
// The minimum coordinates of the emitter box
vec3 emitMin;
// The maximal coordinates of the emitter box
vec3 emitMax;
// The list of particles
Particle* particles;
// The number of particles
int numParticles;
// The spawn rate
float spawnRate;
// When should the next particle be spawned?
float nextSpawn;
ParticleSystem(int maxParticles, const VertexStructure& structure ) {
particles = new Particle[maxParticles];
numParticles = maxParticles;
for (int i = 0; i < maxParticles; i++) {
particles[i].init(structure);
}
spawnRate = 0.05f;
nextSpawn = spawnRate;
position = vec3(0.5f, 1.3f, 0.5f);
float b = 0.1f;
emitMin = position + vec3(-b, -b, -b);
emitMax = position + vec3(b, b, b);
}
void update(float deltaTime) {
// Do we need to spawn a particle?
nextSpawn -= deltaTime;
bool spawnParticle = false;
if (nextSpawn < 0) {
spawnParticle = true;
nextSpawn = spawnRate;
}
for (int i = 0; i < numParticles; i++) {
if (particles[i].dead) {
if (spawnParticle) {
EmitParticle(i);
spawnParticle = false;
}
}
particles[i].Integrate(deltaTime);
}
}
void render(TextureUnit tex, Texture* image, ConstantLocation mLocation, mat4 V) {
Graphics::setBlendingMode(BlendingOperation::SourceAlpha, BlendingOperation::InverseSourceAlpha);
Graphics::setRenderState(RenderState::DepthWrite, false);
/************************************************************************/
/* Exercise 7 1.1 */
/************************************************************************/
/* Change the matrix V in such a way that the billboards are oriented towards the camera */
/************************************************************************/
/* Exercise 7 1.2 */
/************************************************************************/
/* Animate using at least one new control parameter */
for (int i = 0; i < numParticles; i++) {
// Skip dead particles
if (particles[i].dead) continue;
Graphics::setMatrix(mLocation, particles[i].M * V);
particles[i].render(tex, image);
}
Graphics::setRenderState(RenderState::DepthWrite, true);
}
float getRandom(float minValue, float maxValue) {
int randMax = 1000000;
int randInt = Random::get(0, randMax);
float r = (float) randInt / (float) randMax;
return minValue + r * (maxValue - minValue);
}
void EmitParticle(int index) {
// Calculate a random position inside the box
float x = getRandom(emitMin.x(), emitMax.x());
float y = getRandom(emitMin.y(), emitMax.y());
float z = getRandom(emitMin.z(), emitMax.z());
vec3 pos;
pos.set(x, y, z);
vec3 velocity(0, 0.3f, 0);
particles[index].Emit(pos, velocity, 3.0f);
}
};
namespace {
const int width = 1024;
const int height = 768;
double startTime;
Shader* vertexShader;
Shader* fragmentShader;
Program* program;
float angle = 0.0f;
// null terminated array of MeshObject pointers
MeshObject* objects[] = { nullptr, nullptr, nullptr, nullptr, nullptr, nullptr };
// null terminated array of PhysicsObject pointers
PhysicsObject* physicsObjects[] = { nullptr, nullptr, nullptr, nullptr, nullptr, nullptr };
// The view projection matrix aka the camera
mat4 P;
mat4 View;
mat4 PV;
vec3 cameraPosition;
MeshObject* sphere;
PhysicsObject* po;
PhysicsWorld physics;
// uniform locations - add more as you see fit
TextureUnit tex;
ConstantLocation pvLocation;
ConstantLocation mLocation;
ConstantLocation tintLocation;
Texture* particleImage;
ParticleSystem* particleSystem;
double lastTime;
void update() {
double t = System::time() - startTime;
double deltaT = t - lastTime;
//Kore::log(Info, "%f\n", deltaT);
lastTime = t;
Kore::Audio::update();
Graphics::begin();
Graphics::clear(Graphics::ClearColorFlag | Graphics::ClearDepthFlag, 0xff9999FF, 1000.0f);
Graphics::setFloat4(tintLocation, vec4(1, 1, 1, 1));
program->set();
angle += 0.3f * deltaT;
float x = 0 + 3 * Kore::cos(angle);
float z = 0 + 3 * Kore::sin(angle);
cameraPosition.set(x, 2, z);
//PV = mat4::Perspective(60, (float)width / (float)height, 0.1f, 100) * mat4::lookAt(vec3(0, 2, -3), vec3(0, 2, 0), vec3(0, 1, 0));
P = mat4::Perspective(60, (float)width / (float)height, 0.1f, 100);
View = mat4::lookAt(vec3(x, 2, z), vec3(0, 2, 0), vec3(0, 1, 0));
PV = P * View;
Graphics::setMatrix(pvLocation, PV);
// iterate the MeshObjects
MeshObject** current = &objects[0];
while (*current != nullptr) {
// set the model matrix
Graphics::setMatrix(mLocation, (*current)->M);
(*current)->render(tex);
++current;
}
// Update the physics
physics.Update(deltaT);
PhysicsObject** currentP = &physics.physicsObjects[0];
while (*currentP != nullptr) {
(*currentP)->UpdateMatrix();
Graphics::setMatrix(mLocation, (*currentP)->Mesh->M);
(*currentP)->Mesh->render(tex);
++currentP;
}
particleSystem->update(deltaT);
particleSystem->render(tex, particleImage, mLocation, View);
Graphics::end();
Graphics::swapBuffers();
}
void SpawnSphere(vec3 Position, vec3 Velocity) {
PhysicsObject* po = new PhysicsObject();
po->SetPosition(Position);
po->Velocity = Velocity;
po->Collider.radius = 0.2f;
po->Mass = 5;
po->Mesh = sphere;
// The impulse should carry the object forward
// Use the inverse of the view matrix
po->ApplyImpulse(Velocity);
physics.AddObject(po);
}
void keyDown(KeyCode code, wchar_t character) {
if (code == Key_Space) {
// The impulse should carry the object forward
// Use the inverse of the view matrix
vec4 impulse(0, 0.4, 2, 0);
mat4 viewI = View;
viewI.Invert();
impulse = viewI * impulse;
vec3 impulse3(impulse.x(), impulse.y(), impulse.z());
SpawnSphere(cameraPosition + impulse3 *0.2f, impulse3);
}
}
void keyUp(KeyCode code, wchar_t character) {
if (code == Key_Left) {
// ...
}
}
void mouseMove(int x, int y, int movementX, int movementY) {
}
void mousePress(int button, int x, int y) {
}
void mouseRelease(int button, int x, int y) {
}
void init() {
FileReader vs("shader.vert");
FileReader fs("shader.frag");
vertexShader = new Shader(vs.readAll(), vs.size(), VertexShader);
fragmentShader = new Shader(fs.readAll(), fs.size(), FragmentShader);
// This defines the structure of your Vertex Buffer
VertexStructure structure;
structure.add("pos", Float3VertexData);
structure.add("tex", Float2VertexData);
structure.add("nor", Float3VertexData);
program = new Program;
program->setVertexShader(vertexShader);
program->setFragmentShader(fragmentShader);
program->link(structure);
tex = program->getTextureUnit("tex");
pvLocation = program->getConstantLocation("PV");
mLocation = program->getConstantLocation("M");
tintLocation = program->getConstantLocation("tint");
objects[0] = new MeshObject("Base.obj", "Level/basicTiles6x6.png", structure);
objects[0]->M = mat4::Translation(0.0f, 1.0f, 0.0f);
sphere = new MeshObject("ball_at_origin.obj", "Level/unshaded.png", structure);
SpawnSphere(vec3(0, 2, 0), vec3(0, 0, 0));
Graphics::setRenderState(DepthTest, true);
Graphics::setRenderState(DepthTestCompare, ZCompareLess);
Graphics::setTextureAddressing(tex, U, Repeat);
Graphics::setTextureAddressing(tex, V, Repeat);
particleImage = new Texture("SuperParticle.png", true);
particleSystem = new ParticleSystem(100, structure);
}
}
int kore(int argc, char** argv) {
Application* app = new Application(argc, argv, width, height, 0, false, "Exercise7");
init();
app->setCallback(update);
startTime = System::time();
lastTime = 0.0f;
Kore::Mixer::init();
Kore::Audio::init();
Keyboard::the()->KeyDown = keyDown;
Keyboard::the()->KeyUp = keyUp;
Mouse::the()->Move = mouseMove;
Mouse::the()->Press = mousePress;
Mouse::the()->Release = mouseRelease;
app->start();
delete app;
return 0;
}
There's a comment where the teacher wants us to add the code.
The variable for the view matrix "View" is in "namespace". I've only ever used namespace as a library but this one doesn't have a name. So how do I use it?
The comment says that we should use matrix V. So I just add V = Inverse View Matrix * Model Matrix to the code and it removes the rotation?
I'm sorry for the stupid questions, it's supposed to be a class for beginners but it's really anything but. The lecture notes aren't very helpful when it comes to the programming part and I only found tutorials for OpenGL or Unity or Direct X and where not using any of it.
Please help me, I need to hand this in until Saturday morning and I've already spent the last two days trying out code and I've got nothing so far!
You can find the whole thing here: https://github.com/TUDGameTechnology/Exercise7
You don't have to do anything special to access an unnamed namespace. This thread explains more.
You are most probably trying to reference View within methods that cannot see your namespace because of the order in which they are defined in your file.
This line in your update method:
particleSystem->render(tex, particleImage, mLocation, View);
is already passing View into the render method.
void render(TextureUnit tex, Texture* image, ConstantLocation mLocation, mat4 V)
That means that in this case mat4 v is your camera view.

FPS camera rotating by itself. QT 4.8 + OpenGL + C++

I'm trying to port my height map visualization program written on c++, from SFML to Qt, so it can be shown on widget and controlled by the GUI elements.
The problem is that when I start an application, a camera starts to roll around its center very fast(actually, it looks like a terrain mesh flying around a camera, like an Earth around the Sun :), without any actions from my side(e.g moving mouse, pressing buttons).
Camera should move forward, back, left, right when I press w,a,s,d and look around when I move the mouse(Just typical FPS camera behavior).
I think that problem are in the program's main loop, because it's no standard while(true){ //do something// } approach in qt, and it's a little confusing.
Here's my code:
OGLWidget class(here I'm drawing stuff. Problem somewhere here I think) :
class OGLWidget :
public QGLWidget
{
Q_OBJECT
public:
OGLWidget(QWidget *parent = 0);
~OGLWidget(void);
public:
void paintGL();
void initializeGL();
void resizeGL();
public:
void updateCamera();
public slots:
void mainLoop();
protected:
void keyPressEvent(QKeyEvent *e);
void keyReleaseEvent(QKeyEvent *e);
private:
Terrain _terrain;
Camera _camera;
private:
int _keyPressed;
QTimer _timer;
QElapsedTimer _elapsedTimer;
float _simulationTime;
float _fps;
};
OGLWidget::OGLWidget(QWidget *parent) : QGLWidget(parent)
{
_terrain.loadHeightMap("normalHeightMap256_2.png");
_camera.setScreenDimension(this->width(), this->height());
//setting vertical sync
QGLFormat frmt;
frmt.setSwapInterval(1);
setFormat(frmt);
setMouseTracking(true);
setFocus();
_simulationTime = 0;
_fps = 1.f / 60.f;
connect(&_timer, SIGNAL(timeout()), this, SLOT(mainLoop()));
_timer.start();
_elapsedTimer.start();
}
OGLWidget::~OGLWidget(void)
{
}
void OGLWidget::mainLoop()
{
_simulationTime += _elapsedTimer.elapsed();
_elapsedTimer.restart();
while(_simulationTime > _fps)
{
_simulationTime -= _fps;
updateCamera();
}
updateGL();
}
void OGLWidget::updateCamera()
{
QPoint p = mapFromGlobal(QCursor::pos());
_camera.computeMatrices(p.x(), p.y(), _fps, _keyPressed);
glm::mat4 ViewMatrix = _camera.getViewMatrix();
glm::mat4 ProjectionMatrix = _camera.getProjectionMatrix();
glm::mat4 ModelMatrix = glm::mat4(1.0);
_terrain.setMvp(ProjectionMatrix * ViewMatrix * ModelMatrix);
QPoint center = mapToGlobal(QPoint(this->width() / 2, this->height() / 2));
QCursor::setPos(center);
}
void OGLWidget::initializeGL()
{
glewExperimental = GL_TRUE;
if (glewInit() != GLEW_OK)
{
return;
}
glViewport(0, 0, this->width(), this->height());
_terrain.init();
}
void OGLWidget::paintGL()
{
_terrain.draw();
}
void OGLWidget::resizeGL()
{
glViewport(0, 0, this->width(), this->height());
}
void OGLWidget::keyPressEvent(QKeyEvent *e)
{
switch(e->key())
{
case Qt::Key::Key_Escape:
exit(0);
break;
case Qt::Key::Key_W:
_keyPressed = Key::KEY_PRESSED_UP;
break;
case Qt::Key::Key_S:
_keyPressed = Key::KEY_PRESSED_DOWN;
break;
case Qt::Key::Key_A:
_keyPressed = Key::KEY_PRESSED_LEFT;
break;
case Qt::Key::Key_D:
_keyPressed = Key::KEY_PRESSED_RIGHT;
break;
}
}
void OGLWidget::keyReleaseEvent(QKeyEvent *e)
{
if(e->key() == Qt::Key::Key_W ||
e->key() == Qt::Key::Key_S ||
e->key() == Qt::Key::Key_A ||
e->key() == Qt::Key::Key_D)
_keyPressed = KEY_RELEASED;
}
I'm absolutely sure that Terrain and Camera classes are working correct, because I haven't changed code since my SFML project(Except of using QImage instead of sf::Image, but it's working correct too)
*Camera main algorithm: *
void Camera::computeMatrices(int mouseXpos, int mouseYpos, float deltaTime, int keyPressed)
{
_horizontalAngle += _mouseSpeed * deltaTime * float(_screenWidth / 2 - mouseXpos);
_verticalAngle += _mouseSpeed * deltaTime * float(_screenHeight / 2 - mouseYpos);
_direction = glm::vec3
(
cos(_verticalAngle) * sin(_horizontalAngle),
sin(_verticalAngle),
cos(_verticalAngle) * cos(_horizontalAngle)
);
glm::vec3 right = glm::vec3
(
sin(_horizontalAngle - 3.14f/2.0f),
0,
cos(_horizontalAngle - 3.14f/2.0f)
);
glm::vec3 up = glm::cross( right, _direction );
switch(keyPressed)
{
case Key::KEY_PRESSED_UP:
_position += _direction * deltaTime * _speed;
break;
case Key::KEY_PRESSED_DOWN:
_position -= _direction * deltaTime * _speed;
break;
case Key::KEY_PRESSED_LEFT:
_position -= right * deltaTime * _speed;
break;
case Key::KEY_PRESSED_RIGHT:
_position += right * deltaTime * _speed;
break;
case Key::KEY_RELEASED:
break;
}
_projectionMatrix = glm::perspective(_initialFoV, 4.0f / 3.0f, 0.1f, 1000.0f);
_viewMatrix = glm::lookAt
(
_position, // Camera is here
_position+_direction, // and looks here : at the same position, plus "direction"
up // Head is up (set to 0,-1,0 to look upside-down)
);
}
Help me fix this issue.
Ok, I figured out the problem with spinning camera. The cause of it was that I hardcoded an aspect ratio in Camera::computeMatrices, and used a resolution of my widget which doesn't match to it:
_projectionMatrix = glm::perspective
(
_initialFoV,
4.0f / 3.0f, //here it is
0.1f,
1000.0f
);
I changed 4.0f / 3.0f on (float)_screenWidth / (float)_screenHeight but it didn't help too.
So then I just changed a resolution of my widget to 800 x 600 and it helped.
The new problem is that it works only on 4/3 dimensions(e.g 800x600, 1024x768).
The best way to correct
_direction = glm::vec3
(
cos(_verticalAngle) * sin(_horizontalAngle),
sin(_verticalAngle),
cos(_verticalAngle) * cos(_horizontalAngle)
);
_direction.normalize();
...