Fill array with vectors in c++ - c++

I am rather new to c++ and would like to achieve the following:
ttgl::vec3f positions[] = {
ttgl::vec3f(-1.0f, 1.0f, 0.0f),
ttgl::vec3f(1.0f, 1.0f, 0.0f),
ttgl::vec3f(1.0f, -1.0f, 0.0f),
ttgl::vec3f(1.0f, -1.0f, 0.0f),
ttgl::vec3f(-1.0f, -1.0f, 0.0f),
ttgl::vec3f(-1.0f, 1.0f, 0.0f),
};
The problem is, that I don't know the values and have to fill this array dynamically.
I try to achieve it with the following function:
void getCirclePositions(GLfloat radius, GLint sides) {
ttgl::vec3f center = ttgl::vec3f(0.0f, 0.0f, 0.0f);
GLfloat angle = (2.0f * M_PI) / sides;
ttgl::vec3f positions[100];
positions[0] = center;
for (int i = 1; i <= sides; i++) {
GLfloat angleFan = angle * (i + 1);
GLfloat xCoordinate = radius * cos(angleFan);
GLfloat yCoordinate = radius * sin(angleFan);
ttgl::vec3f point = ttgl::vec3f(xCoordinate, yCoordinate, 0.0f);
positions[i] = point;
}
return positions;
};
This leads to the following error:
Run-Time Check Failure #2 - Stack around the variable 'positions' was
corrupted.
How could I insert the values correctly?
EDIT
The function is called as follows:
getCirclePositions(1.0f, 100);
I edited the code accordingly and the error is solved. Thanks for that.
void getCirclePositions(GLfloat radius, GLint sides) {
ttgl::vec3f center = ttgl::vec3f(0.0f, 0.0f, 0.0f);
GLfloat angle = (2.0f * M_PI) / sides;
ttgl::vec3f positions[100];
positions[0] = center;
for (int i = 1; i < sides; i++) {
GLfloat angleFan = angle * (i + 1);
GLfloat xCoordinate = radius * cos(angleFan);
GLfloat yCoordinate = radius * sin(angleFan);
ttgl::vec3f point = ttgl::vec3f(xCoordinate, yCoordinate, 0.0f);
positions[i] = point;
}
for (int i = 0; i >sides; i++) {
std::cout << positions[i];
}
};
How can I print this array?

Related

How to rotate this mesh correctly?

I draw cube with 6 calls of draw_plane. I generate flate XY plane, then i rotate it and create cube. I need to rotate all this cube correctly.
Before rotation
planeXY.setupMesh();
planeXY1.setupMesh();
planeXY1.modelMatrix.translate(QVector3D(0.0, 0.0, 2.0));
planeZY.setupMesh();
planeZY.modelMatrix.rotate(-90.0f, QVector3D(0.0f, 1.0f, 0.0f));
planeZY1.setupMesh();
planeZY1.modelMatrix.translate(QVector3D(2.0, 0.0, 0.0));
planeZY1.modelMatrix.rotate(-90.0f, QVector3D(0.0f, 1.0f, 0.0f));
planeXZ.setupMesh();
planeXZ.modelMatrix.rotate(90.0f, QVector3D(1.0f, 0.0f, 0.0f));
planeXZ1.setupMesh();
planeXZ1.modelMatrix.translate(QVector3D(0.0, 2.0, 0.0));
planeXZ1.modelMatrix.rotate(90.0f, QVector3D(1.0f, 0.0f, 0.0f));
Plane generation
Mesh CreateMeshPlane(Vector3D bottomleft, size_t numvertices_x,
size_t numvertices_y, size_t worldsize_x,
size_t worldsize_y)
{
Mesh mesh;
int numVerts = numvertices_x * numvertices_y;
int numFaces = (numvertices_x - 1) * (numvertices_y - 1);
int numIndices = numFaces * 6;
float xStep = (float)worldsize_x / (numvertices_x - 1);
float yStep = (float)worldsize_y / (numvertices_y - 1);
mesh.vertices.resize(numVerts);
mesh.indices.resize(numIndices);
for (size_t y = 0; y < numvertices_y; y++)
{
for (size_t x = 0; x < numvertices_x; x++)
{
mesh.vertices[x + (y * numvertices_x)] = QVector3D(bottomleft.x() + (xStep * x), bottomleft.y() + (yStep * y), bottomleft.z());
}
}
int offset = 0;
for (int i = 0; i < numIndices; i+=6)
{
unsigned int cornerIndex = i/6 + offset;
if ((cornerIndex + 1)%numvertices_x == 0)
{
offset++;
cornerIndex++;
}
// First triangle
int idx = 0;
Face face;
face.FaceIndices.resize(6);
face.Edges.push_back(Edge((unsigned int)cornerIndex, (unsigned int)cornerIndex + numvertices_x));
face.FaceIndices[idx] = (unsigned int)cornerIndex;/*0*/
idx++;
face.FaceIndices[idx] = (unsigned int)cornerIndex + numvertices_x; /*3*/
idx++;
face.Edges.push_back(Edge((unsigned int)cornerIndex + numvertices_x, (unsigned int)cornerIndex + numvertices_x + 1));
face.FaceIndices[idx] = (unsigned int)cornerIndex + numvertices_x + 1; /*4*/
idx++;
// Second triangle
face.FaceIndices[idx] = (unsigned int)cornerIndex;
face.Edges.push_back(Edge((unsigned int)cornerIndex, (unsigned int)cornerIndex + 1));
idx++;
face.FaceIndices[idx] = (unsigned int)cornerIndex + numvertices_x + 1;
idx++;
face.Edges.push_back(Edge((unsigned int)cornerIndex + numvertices_x + 1, (unsigned int)cornerIndex + 1));
face.FaceIndices[idx] = (unsigned int)cornerIndex + 1;
mesh.faces.push_back(face);
}
return mesh;
}
Creating planes
planeXZ = CreateMeshPlane({0.0, 0.0, 0.0}, ny, nx, 2, 2);
planeXZ1 = CreateMeshPlane({0.0, 0.0, 0.0}, ny, nx, 2, 2);
planeXY = CreateMeshPlane({0.0, 0.0, 0.0}, ny, nz, 2, 2);
planeXY1 = CreateMeshPlane({0.0, 0.0, 0.0}, ny, nz, 2, 2);
planeZY = CreateMeshPlane({0.0,0.0,0.0}, nx, nz, 2, 2);
planeZY1 = CreateMeshPlane({0.0, 0.0,0.0}, nx, nz, 2, 2);
Then i try to rotate all planes around X axis. My code
if(ev->key() == Qt::Key_R)
{
planeXY.modelMatrix.rotate(20.0f, QVector3D(1.0f, 0.0f, 0.0f));
planeXY1.modelMatrix.rotate(20.0f, QVector3D(1.0f, 0.0f, 0.0f));
planeZY.modelMatrix.rotate(20.0f, QVector3D(1.0f, 0.0f, 0.0f));
planeZY1.modelMatrix.rotate(20.0f, QVector3D(1.0f, 0.0f, 0.0f));
planeXZ.modelMatrix.rotate(20.0f, QVector3D(1.0f, 0.0f, 0.0f));
planeXZ1.modelMatrix.rotate(20.0f, QVector3D(1.0f, 0.0f, 0.0f));
}
Result:
We can see that each plane rotates around its own X axes, how to rotate all planes around global X axis
In your keyboard event you need to apply the rotation from the other side. So your code should look similar to this:
if(ev->key() == Qt::Key_R)
{
QMatrix4x4 mat;
//mat.translate(rotationCenter.x, rotationCenter.y, rotationCenter.z);
mat.rotate(20.0f, QVector3D(1.0f, 0.0f, 0.0f));
//mat.translate(-rotationCenter.x, -rotationCenter.y, -rotationCenter.z);
planeXY.modelMatrix = mat * planeXY.modelMatrix;
planeXY1.modelMatrix = mat * planeXY1.modelMatrix;
planeZY.modelMatrix = mat * planeZY.modelMatrix;
planeZY1.modelMatrix = mat * planeZY1.modelMatrix;
planeXZ.modelMatrix = mat * planeXZ.modelMatrix;
planeXZ1.modelMatrix = mat * planeXZ1.modelMatrix;
}
(I never used Qt so far so there might be some typos inside.)
When you do no want to rotate about the origin of world space you need the 2 translations which are commented out.
I think qt and opengl does matrix multiplication from right to left, so the order
planeXY1.modelMatrix.translate(QVector3D(0.0, 0.0, 2.0));
planeXY.modelMatrix.rotate(20.0f, QVector3D(1.0f, 0.0f, 0.0f));
actually does the 20 degree rotate then the translate. I think you want translate then the rotate to happen, so maybe reverse the order of the matrices and see if that helps.

What is the right way to update "front" vector with custom "world up" vector?

I am trying to implement "planet" world. So in any point on it's surface with my old camera class I got result like this :
Camera class update code is:
void cCamera::ProcessMouseMovement(GLdouble dt, glm::vec2 curOffset)
{
curOffset.x *= static_cast<GLfloat>(dt * m_MouseSensitivity);
curOffset.y *= static_cast<GLfloat>(dt * m_MouseSensitivity);
m_fPitch += curOffset.x;
m_fYaw += curOffset.y;
if (m_fYaw > 89.0f)
m_fYaw = 89.0f;
if (m_fYaw < -89.0f)
m_fYaw = -89.0f;
UpdateCameraVectors();
}
void cCamera::UpdateCameraVectors(void)
{
float fYawRad = glm::radians(m_fYaw);
float fPitchRad = glm::radians(m_fPitch);
float fYawCos = cos(fYawRad);
glm::vec3 front { cos(fPitchRad) * fYawCos,
sin(fYawRad),
sin(fPitchRad) * fYawCos };
*m_pCameraFront = glm::normalize(front);
m_Right = glm::normalize(glm::cross(*m_pCameraFront, m_WorldUp));
m_Up = glm::normalize(glm::cross(m_Right, front));
*m_pViewMatrix = glm::lookAt(glm::vec3(0.f), glm::vec3(0.f) + *m_pCameraFront, m_Up);
}
As (0,0,0) is planet center, I suppose I should udate m_WorldUp vector this way:
m_WorldUp = glm::normalize(*m_pPos);
And indeed, the results are good: camera turned the right way, but it's rotation is broken. Yaw and pitch are still dependent on old world up. I think, I should update front vector with new m_WorldUp, but don't really know how.
Solution was found here: https://gamedev.stackexchange.com/questions/73588/how-do-i-fix-my-planet-facing-camera
In my case code is
void cCamera::UpdateCameraVectors(void)
{
m_WorldUp = glm::normalize(*m_pPos);
glm::quat world_axes_rotation = glm::angleAxis(glm::radians(m_fPitch), glm::vec3(0.0f, -1.0f, 0.0f));
world_axes_rotation = glm::normalize(world_axes_rotation);
world_axes_rotation = glm::rotate(world_axes_rotation, glm::radians(m_fYaw), glm::vec3(1.0f, 0.0f, 0.0f));
m_Pole = glm::normalize(m_Pole - glm::dot(m_WorldUp, m_Pole) * m_WorldUp);
glm::mat4 local_transform;
local_transform[0] = glm::vec4(m_Pole, 0.0f);
local_transform[1] = glm::vec4(m_WorldUp, 0.0f);
local_transform[2] = glm::vec4(glm::cross(m_Pole, m_WorldUp), 0.0f);
local_transform[3] = glm::vec4(0.f, 0.f, 0.f, 1.0f);
world_axes_rotation = glm::normalize(world_axes_rotation);
*m_pViewMatrix = local_transform * glm::mat4_cast(world_axes_rotation);
*m_pCameraFront = -1.0f * glm::vec3((*m_pViewMatrix)[2]);
m_Up = glm::vec3((*m_pViewMatrix)[1]);
m_Right = glm::vec3((*m_pViewMatrix)[0]);
*m_pViewMatrix = glm::inverse(*m_pViewMatrix);
}

pure opengl locking mouse to screen center

I have a problem when i lock the cursor to screen center with SetCursorPos
My problem is that it is stopping my camera from rotating, my camera works fine when i dont try and lock it. But it allows the mouse to drag off the screen when not in full screen.
void SetPosition(HWND hWnd, int x, int y)
{
POINT pt;
pt.x = x;
pt.y = y;
ClientToScreen(hWnd, &pt);
SetCursorPos(pt.x, pt.y);
}
void COpenGLRenderer::OnMouseMove(int X, int Y)
{
//This works fine if i dont set the mouse and i look around
Camera.OnMouseMove(LastX - X, LastY - Y);
if (LastX != X && LastY != Y)
SetPosition(hWnd, this->Width / 2, this->Height / 2);
LastX = X;
LastY = Y;
}
void CCamera::OnMouseMove(int dx, int dy)
{
float Sensitivity = 0.25f;
Position -= Reference;
if(dx != 0)
{
float DeltaX = (float)dx * Sensitivity;
X = rotate(X, DeltaX, vec3(0.0f, 1.0f, 0.0f));
Y = rotate(Y, DeltaX, vec3(0.0f, 1.0f, 0.0f));
Z = rotate(Z, DeltaX, vec3(0.0f, 1.0f, 0.0f));
}
if(dy != 0)
{
float DeltaY = (float)dy * Sensitivity;
Y = rotate(Y, DeltaY, X);
Z = rotate(Z, DeltaY, X);
if(Y.y < 0.0f)
{
Z = vec3(0.0f, Z.y > 0.0f ? 1.0f : -1.0f, 0.0f);
Y = cross(Z, X);
}
}
Position = Reference + Z * length(Position);
CalculateViewMatrix();
}
void CCamera::CalculateViewMatrix()
{
ViewMatrix = mat4x4(X.x, Y.x, Z.x, 0.0f, X.y, Y.y, Z.y, 0.0f, X.z, Y.z, Z.z, 0.0f, -dot(X, Position), -dot(Y, Position), -dot(Z, Position), 1.0f);
ViewMatrixInverse = inverse(ViewMatrix);
ViewProjectionMatrix = ProjectionMatrix * ViewMatrix;
ViewProjectionMatrixInverse = ViewMatrixInverse * ProjectionMatrixInverse;
}

Custom vertex processor doesn't work - matrix multiplication error or something else?

I'm writing simple renderer in C++. It uses convention similar to OpenGL, but it does not use OpenGL nor DirectX. float3, float4, float4x4 are my own custom structures.
The problem is, when I set the eye somewhere other then 0, 0, 0, I get strange results with triangles where I would not expect to see them.
I guess it's because of wrong matrix multiplication formula, wrong multiplication order, normalization, or wrong formula of lookAt/setPerspective. But I'm stuck at it and I cannot find the mistake.
I will upload some illustrations/screens later, as I don't have access to them now.
I use column-notation for matrices (matrix[column][row]), like OpenGL does.
Here is the matrix multiplication code:
class float4x4 { //[column][row]
float4 columns[4];
public:
float4x4 multiplyBy(float4x4 &b){
float4x4 c = float4x4();
c.columns[0] = float4(
columns[0].x * b.columns[0].x + columns[1].x * b.columns[0].y + columns[2].x * b.columns[0].z + columns[3].x * b.columns[0].w,
columns[0].y * b.columns[0].x + columns[1].y * b.columns[0].y + columns[2].y * b.columns[0].z + columns[3].y * b.columns[0].w,
columns[0].z * b.columns[0].x + columns[1].z * b.columns[0].y + columns[2].z * b.columns[0].z + columns[3].z * b.columns[0].w,
columns[0].w * b.columns[0].x + columns[1].w * b.columns[0].y + columns[2].w * b.columns[0].z + columns[3].w * b.columns[0].w
);
c.columns[1] = float4(
columns[0].x * b.columns[1].x + columns[1].x * b.columns[1].y + columns[2].x * b.columns[1].z + columns[3].x * b.columns[1].w,
columns[0].y * b.columns[1].x + columns[1].y * b.columns[1].y + columns[2].y * b.columns[1].z + columns[3].y * b.columns[1].w,
columns[0].z * b.columns[1].x + columns[1].z * b.columns[1].y + columns[2].z * b.columns[1].z + columns[3].z * b.columns[1].w,
columns[0].w * b.columns[1].x + columns[1].w * b.columns[1].y + columns[2].w * b.columns[1].z + columns[3].w * b.columns[1].w
);
c.columns[2] = float4(
columns[0].x * b.columns[2].x + columns[1].x * b.columns[2].y + columns[2].x * b.columns[2].z + columns[3].x * b.columns[2].w,
columns[0].y * b.columns[2].x + columns[1].y * b.columns[2].y + columns[2].y * b.columns[2].z + columns[3].y * b.columns[2].w,
columns[0].z * b.columns[2].x + columns[1].z * b.columns[2].y + columns[2].z * b.columns[2].z + columns[3].z * b.columns[2].w,
columns[0].w * b.columns[2].x + columns[1].w * b.columns[2].y + columns[2].w * b.columns[2].z + columns[3].w * b.columns[2].w
);
c.columns[3] = float4(
columns[0].x * b.columns[3].x + columns[1].x * b.columns[3].y + columns[2].x * b.columns[3].z + columns[3].x * b.columns[3].w,
columns[0].y * b.columns[3].x + columns[1].y * b.columns[3].y + columns[2].y * b.columns[3].z + columns[3].y * b.columns[3].w,
columns[0].z * b.columns[3].x + columns[1].z * b.columns[3].y + columns[2].z * b.columns[3].z + columns[3].z * b.columns[3].w,
columns[0].w * b.columns[3].x + columns[1].w * b.columns[3].y + columns[2].w * b.columns[3].z + columns[3].w * b.columns[3].w
);
return c;
}
float4 multiplyBy(const float4 &b){
//based on http://stackoverflow.com/questions/25805126/vector-matrix-product-efficiency-issue
float4x4 a = *this; //getTransposed(); ???
float4 result(
dotProduct(a[0], b),
dotProduct(a[1], b),
dotProduct(a[2], b),
dotProduct(a[3], b)
);
return result;
}
inline float4x4 getTransposed() {
float4x4 transposed;
for (unsigned i = 0; i < 4; i++) {
for (unsigned j = 0; j < 4; j++) {
transposed.columns[i][j] = columns[j][i];
}
}
return transposed;
}
};
Where #define dotProduct(a, b) a.getDotProduct(b) and:
inline float getDotProduct(const float4 &anotherVector) const {
return x * anotherVector.x + y * anotherVector.y + z * anotherVector.z + w * anotherVector.w;
}
My VertexProcessor:
class VertexProcessor {
float4x4 obj2world;
float4x4 world2view;
float4x4 view2proj;
float4x4 obj2proj;
public:
inline float3 tr(const float3 & v) { //in object space
float4 r = obj2proj.multiplyBy(float4(v.x, v.y, v.z, 1.0f/*v.w*/));
return float3(r.x / r.w, r.y / r.w, r.z / r.w); //we get vector in unified cube from -1,-1,-1 to 1,1,1
}
inline void transform() {
obj2proj = obj2world.multiplyBy(world2view);
obj2proj = obj2proj.multiplyBy(view2proj);
}
inline void setIdentity() {
obj2world = float4x4(
float4(1.0f, 0.0f, 0.0f, 0.0f),
float4(0.0f, 1.0f, 0.0f, 0.0f),
float4(0.0f, 0.0f, 1.0f, 0.0f),
float4(0.0f, 0.0f, 0.0f, 1.0f)
);
}
inline void setPerspective(float fovy, float aspect, float nearP, float farP) {
fovy *= PI / 360.0f;
float fValue = cos(fovy) / sin(fovy);
view2proj[0] = float4(fValue/aspect, 0.0f, 0.f, 0.0f);
view2proj[1] = float4(0.0f, fValue, 0.0f, 0.0f);
view2proj[2] = float4(0.0f, 0.0f, (farP + nearP) / (nearP - farP), -1.0f);
view2proj[3] = float4(0.0f, 0.0f, 2.0f * farP * nearP / (nearP - farP), 0.0f);
}
inline void setLookat(float3 eye, float3 center, float3 up) {
float3 f = center - eye;
f.normalizeIt();
up.normalizeIt();
float3 s = f.getCrossProduct(up);
float3 u = s.getCrossProduct(f);
world2view[0] = float4(s.x, u.x, -f.x, 0.0f);
world2view[1] = float4(s.y, u.y, -f.y, 0.0f);
world2view[2] = float4(s.z, u.z, -f.z, 0.0f);
world2view[3] = float4(eye/*.getNormalized() ???*/ * -1.0f, 1.0f);
}
inline void multByTranslation(float3 v) {
float4x4 m(
float4(1.0f, 0.0f, 0.0f, 0.0f),
float4(0.0f, 1.0f, 0.0f, 0.0f),
float4(0.0f, 0.0f, 1.0f, 0.0f),
float4(v.x, v.y, v.z, 1.0f)
);
world2view = m.multiplyBy(world2view);
}
inline void multByScale(float3 v) {
float4x4 m(
float4(v.x, 0.0f, 0.0f, 0.0f),
float4(0.0f, v.y, 0.0f, 0.0f),
float4(0.0f, 0.0f, v.z, 0.0f),
float4(0.0f, 0.0f, 0.0f, 1.0f)
);
world2view = m.multiplyBy(world2view);
}
inline void multByRotation(float a, float3 v) {
float s = sin(a*PI / 180.0f), c = cos(a*PI / 180.0f);
v.normalizeIt();
float4x4 m(
float4(v.x*v.x*(1-c)+c, v.y*v.x*(1 - c) + v.z*s, v.x*v.z*(1-c)-v.y*s, 0.0f),
float4(v.x*v.y*(1-c)-v.z*s, v.y*v.y*(1-c)+c, v.y*v.z*(1-c)+v.x*s, 0.0f),
float4(v.x*v.z*(1-c)+v.y*s, v.y*v.z*(1-c)-v.x*s, v.z*v.z*(1-c)+c, 0.0f),
float4(0.0f, 0.0f, 0.0f, 1.0f)
);
world2view = m.multiplyBy(world2view);
}
};
And the Rasterizer:
class Rasterizer final {
Buffer * buffer = nullptr;
inline float toScreenSpaceX(float x) { return (x + 1) * buffer->getWidth() * 0.5f; }
inline float toScreenSpaceY(float y) { return (y + 1) * buffer->getHeight() * 0.5f; }
inline int orient2d(float ax, float ay, float bx, float by, const float2& c) {
return (bx - ax)*(c.y - ay) - (by - ay)*(c.x - ax);
}
public:
Rasterizer(Buffer * buffer) : buffer(buffer) {}
//v - position in screen space ([0, width], [0, height], [-1, -1])
void triangle(
float3 v0, float3 v1, float3 v2,
float3 n0, float3 n1, float3 n2,
float2 uv0, float2 uv1, float2 uv2,
Light * light0, Light * light1,
float3 camera, Texture * texture
) {
v0.x = toScreenSpaceX(v0.x);
v0.y = toScreenSpaceY(v0.y);
v1.x = toScreenSpaceX(v1.x);
v1.y = toScreenSpaceY(v1.y);
v2.x = toScreenSpaceX(v2.x);
v2.y = toScreenSpaceY(v2.y);
//based on: https://fgiesen.wordpress.com/2013/02/08/triangle-rasterization-in-practice/
//compute triangle bounding box
int minX = MIN3(v0.x, v1.x, v2.x);
int minY = MIN3(v0.y, v1.y, v2.y);
int maxX = MAX3(v0.x, v1.x, v2.x);
int maxY = MAX3(v0.y, v1.y, v2.y);
//clip against screen bounds
minX = MAX(minX, 0);
minY = MAX(minY, 0);
maxX = MIN(maxX, buffer->getWidth() - 1);
maxY = MIN(maxY, buffer->getHeight() - 1);
//rasterize
float2 p(0.0f, 0.0f);
for (p.y = minY; p.y <= maxY; p.y++) {
for (p.x = minX; p.x <= maxX; p.x++) {
// Determine barycentric coordinates
//int w0 = orient2d(v1.x, v1.y, v2.x, v2.y, p);
//int w1 = orient2d(v2.x, v2.y, v0.x, v0.y, p);
//int w2 = orient2d(v0.x, v0.y, v1.x, v1.y, p);
float w0 = (v1.y - v2.y)*(p.x - v2.x) + (v2.x - v1.x)*(p.y - v2.y);
w0 /= (v1.y - v2.y)*(v0.x - v2.x) + (v2.x - v1.x)*(v0.y - v2.y);
float w1 = (v2.y - v0.y)*(p.x - v2.x) + (v0.x - v2.x)*(p.y - v2.y);
w1 /= (v2.y - v0.y)*(v1.x - v2.x) + (v0.x - v2.x)*(v1.y - v2.y);
float w2 = 1 - w0 - w1;
// If p is on or inside all edges, render pixel.
if (w0 >= 0 && w1 >= 0 && w2 >= 0) {
float depth = w0 * v0.z + w1 * v1.z + w2 * v2.z;
if (depth < buffer->getDepthForPixel(p.x, p.y)) {
//...
buffer->setPixel(p.x, p.y, diffuse.r, diffuse.g, diffuse.b, ALPHA_VISIBLE, depth);
}
}
}
}
}
};
I strongly believe that Rasterizer itself works well , because when I test it with code (instead of main loop):
float3 v0{ 0, 0, 0.1f };
float3 v1{ 0.5, 0, 0.1f };
float3 v2{ 1, 1, 0.1f };
//Rasterizer test (without VertexProcessor)
rasterizer->triangle(v0, v1, v2, n0, n1, n2, uv0, uv1, uv2, light0, light1, eye, defaultTexture);
I get the right image, with triangle that has one corner at the middle of the screen ([0, 0] in unified space), one at bottom-right corner ([1, 1]) and one at [0.5, 0].
The float3 structure:
class float3 {
public:
union {
struct { float x, y, z; };
struct { float r, g, b; };
float p[3];
};
float3() = delete;
float3(const float3 &other) : x(other.x), y(other.y), z(other.z) {}
float3(float x, float y, float z) : x(x), y(y), z(z) {}
float &operator[](unsigned index){
ERROR_HANDLE(index < 3, L"The float3 index out of bounds (0-2 range, " + C::toWString(index) + L" given).");
return p[index];
}
float getLength() const { return std::abs(sqrt(x*x + y*y + z*z)); }
void normalizeIt();
inline float3 getNormalized() const {
float3 result(*this);
result.normalizeIt();
return result;
}
inline float3 getCrossProduct(const float3 &anotherVector) const {
//based on: http://www.sciencehq.com/physics/vector-product-multiplying-vectors.html
return float3(
y * anotherVector.z - anotherVector.y * z,
z * anotherVector.x - anotherVector.z * x,
x * anotherVector.y - anotherVector.x * y
);
}
inline float getDotProduct(const float3 &anotherVector) const {
//based on: https://www.ltcconline.net/greenl/courses/107/Vectors/DOTCROS.HTM
return x * anotherVector.x + y * anotherVector.y + z * anotherVector.z;
}
...
};
The main loop:
VertexProcessor vp;
DirectionalLight * light0 = new DirectionalLight({ 0.3f, 0.3f, 0.3f }, { 0.0f, -1.0f, 0.0f });
DirectionalLight * light1 = new DirectionalLight({ 0.4f, 0.4f, 0.4f }, { 0.0f, -1.0f, 0.5f });
while(!my_window.is_closed()) {
tgaBuffer.clearDepth(10.0f); //it could be 1.0f but 10.0f won't hurt, we draw pixel if it's depth < actual depth in buffer
tgaBuffer.clearColor(0, 0, 255, ALPHA_VISIBLE);
vp.setPerspective(75.0f, tgaBuffer.getWidth() / tgaBuffer.getHeight(), 10.0f, 2000.0f);
float3 eye = { 10.0f, 10.0f - frameTotal / 10.0f, 10.0f }; //animate eye
vp.setLookat(eye, float3{ 0.0f, 0.0f, 0.0f }.getNormalized(), { 0.0f, 1.0f, 0.0f });
vp.setIdentity();
//we could call e.g. vp.multByRotation(...) here, but we won't to keep it simple
vp.transform();
//bottom
drawTriangle(0, 1, 2);
drawTriangle(2, 3, 0);
drawTriangle(3, 2, 7);
drawTriangle(7, 2, 6);
drawTriangle(5, 1, 0);
drawTriangle(0, 5, 4);
drawTriangle(4, 5, 6);
drawTriangle(6, 7, 4);
frameTotal++;
}
Where drawTriangle(...) stands for:
#define drawTriangle(i0, i1, i2) rasterizer->triangle(vp.tr(v[i0]), vp.tr(v[i1]), vp.tr(v[i2]), v[i0], v[i1], v[i2], n0, n1, n2, uv0, uv1, uv2, light0, light1, eye, defaultTexture);
And here is the initialization of triangles' data:
float3 offset{ 0.0f, 0.0f, 0.0f };
v.push_back(offset + float3{ -10, -10, -10 });
v.push_back(offset + float3{ +10, -10, -10 });
v.push_back(offset + float3{ +10, -10, +10 });
v.push_back(offset + float3{ -10, -10, +10 });
v.push_back(offset + float3{ -10, +10, -10 });
v.push_back(offset + float3{ +10, +10, -10 });
v.push_back(offset + float3{ +10, +10, +10 });
v.push_back(offset + float3{ -10, +10, +10 });
I've created a little c-library for opengl long time ago. It was generally for learning purpose during my studies of computer graphics. I've looked up my sources and my implementation of perspective projection and orientation very much differs.
pbm_Mat4 pbm_mat4_projection_perspective(PBfloat fov, PBfloat ratio, PBfloat near, PBfloat far) {
PBfloat t = near * tanf(fov / 2.0f);
PBfloat b = -t;
PBfloat r = ratio * t, l = ratio * b;
return pbm_mat4_create(pbm_vec4_create(2.0f * near / (r - l), 0, 0, 0),
pbm_vec4_create(0, 2.0f * near / (t - b), 0, 0),
pbm_vec4_create((r + l) / (r - l), (t + b) / (t - b), - (far + near) / (far - near), -1.0f),
pbm_vec4_create(0, 0, -2.0f * far * near / (far - near), 0));
}
pbm_Mat4 pbm_mat4_orientation_lookAt(pbm_Vec3 pos, pbm_Vec3 target, pbm_Vec3 up) {
pbm_Vec3 forward = pbm_vec3_normalize(pbm_vec3_sub(target, pos));
pbm_Vec3 right = pbm_vec3_normalize(pbm_vec3_cross(forward, up));
up = pbm_vec3_normalize(pbm_vec3_cross(right, forward));
forward = pbm_vec3_scalar(forward, -1);
pos = pbm_vec3_scalar(pos, -1);
return pbm_mat4_create(pbm_vec4_create_vec3(right),
pbm_vec4_create_vec3(up),
pbm_vec4_create_vec3(forward),
pbm_vec4_create_vec3_w(pbm_vec3_create(pbm_vec3_dot(right, pos),
pbm_vec3_dot(up, pos),
pbm_vec3_dot(forward, pos)), 1));
}
These methods are tested and you may want to test against them. Iff you want full sources are availabe here. Furthermore you could revisit frustums and projection matrices online. Unfortanetly I can not share the material from my university with you:(

Rotating a Open GL camera correctly using GLM

I have a camera class, which is initialized like so:
CameraFP::CameraFP() {
this->aspect_ratio = 800.0f / 600.0f;
this->fov = 45.0f;
this->near_plane = 0.1f;
this->far_plane = 1000.0f;
this->position = glm::vec3(0, 0, 0);
this->target = position + glm::vec3(0, 0, -1);
this->up = glm::vec3(0, 1, 0);
this->m_rotation = glm::mat4(1.0);
m_view = glm::lookAt(position, target, up);
m_projection = glm::perspective(fov, aspect_ratio, near_plane, far_plane);
}
And here are other functions of import:
void CameraFP::update(sf::Window *app) {
process_keyboard(app);
process_mouse(app);
calculate_view();
}
void CameraFP::process_keyboard(sf::Window *app) {
const sf::Input *input = &app->GetInput();
up = m_rotation * glm::vec3(0, 1, 0);
glm::vec3 forward = glm::vec3(0, 0, -1);
glm::vec3 forward_rotated = m_rotation * forward;
glm::vec3 right = glm::vec3(1, 0, 0);
glm::vec3 right_rotated = m_rotation * right;
if (input->IsKeyDown(sf::Key::W)) {
position += forward_rotated;
}
if (input->IsKeyDown(sf::Key::S)) {
position -= forward_rotated;
}
if (input->IsKeyDown(sf::Key::A)) {
position -= right_rotated;
}
if (input->IsKeyDown(sf::Key::D)) {
position += right_rotated;
}
}
void CameraFP::process_mouse(sf::Window *app) {
// TODO: Make the below constants, and take framerate into account
GLfloat SPEED_X = 0.000001f;
GLfloat SPEED_Y = 0.000001f;
GLfloat mouse_x = app->GetInput().GetMouseX();
GLfloat mouse_y = app->GetInput().GetMouseY();
GLfloat mouse_x_delta = old_mouse_x - mouse_x;
GLfloat mouse_y_delta = old_mouse_y - mouse_y;
if (mouse_x_delta != 0 ||
mouse_y_delta != 0) {
if (mouse_x_delta != 0) {
y_rot += mouse_x_delta * SPEED_X;
m_rotation = glm::rotate(m_rotation, y_rot, glm::vec3(0, 1, 0));
}
if (mouse_y_delta != 0) {
x_rot += mouse_y_delta * SPEED_Y;
m_rotation = glm::rotate(m_rotation, x_rot, glm::vec3(1, 0, 0));;
}
}
this->old_mouse_x = mouse_x;
this->old_mouse_y = mouse_y;
app->SetCursorPosition(app->GetWidth() / 2, app->GetHeight() / 2);
}
void CameraFP::calculate_view() {
glm::vec3 forward = glm::vec3(0, 0, -1);
glm::vec3 forward_rotated = m_rotation * forward;
target = position += glm::normalize(forward_rotated);
m_view = glm::lookAt(position, target, up);
}
My problem is that when I compile the project, the compiler outputs an error saying:
\CameraFP.cpp|59|error: no match for 'operator*' in '((CameraFP*)this)->CameraFP::m_rotation * glm::detail::tvec3<float>(((const int&)((const int*)(&0))), ((const int&)((const int*)(&1))), ((const int&)((const int*)(&0))))'|
From what I understand vec = mat4 * vec should yield a rotated vector? Since I haven't been able to test this code, I don't know if the function work correctly.
Edit
Updated code according to the comments and awnsers. My problem is now that I get a BSOD, somewhere in the render function...
void CameraFP::process_keyboard(sf::Window *app) {
const sf::Input *input = &app->GetInput();
up = m_rotation * glm::vec4(0.0f, 1.0f, 0.0f, 0.0f);
glm::vec4 forward = glm::vec4(0.0f, 0.0f, -1.0f, 0.0f);
glm::vec4 forward_rotated = m_rotation * forward;
glm::vec4 right = glm::vec4(1.0f, 0.0f, 0.0f, 0.0f);
glm::vec4 right_rotated = m_rotation * right;
if (input->IsKeyDown(sf::Key::W)) {
position += forward_rotated;
}
if (input->IsKeyDown(sf::Key::S)) {
position -= forward_rotated;
}
if (input->IsKeyDown(sf::Key::A)) {
position -= right_rotated;
}
if (input->IsKeyDown(sf::Key::D)) {
position += right_rotated;
}
}
void CameraFP::process_mouse(sf::Window *app) {
// TODO: Make the below constants, and take framerate into account
GLfloat SPEED_X = 0.000001f;
GLfloat SPEED_Y = 0.000001f;
GLfloat mouse_x = app->GetInput().GetMouseX();
GLfloat mouse_y = app->GetInput().GetMouseY();
GLfloat mouse_x_delta = old_mouse_x - mouse_x;
GLfloat mouse_y_delta = old_mouse_y - mouse_y;
if (mouse_x_delta != 0 ||
mouse_y_delta != 0) {
if (mouse_x_delta != 0) {
y_rot += mouse_x_delta * SPEED_X;
m_rotation = glm::rotate(m_rotation, y_rot, glm::vec3(0.0f, 1.0f, 0.0f));
}
if (mouse_y_delta != 0) {
x_rot += mouse_y_delta * SPEED_Y;
m_rotation = glm::rotate(m_rotation, x_rot, glm::vec3(1.0f, 0.0f, 0.0f));;
}
}
this->old_mouse_x = mouse_x;
this->old_mouse_y = mouse_y;
app->SetCursorPosition(app->GetWidth() / 2, app->GetHeight() / 2);
}
void CameraFP::calculate_view() {
glm::vec4 forward = glm::vec4(0.0f, 0.0f, -1.0f, 0.0f);
glm::vec4 forward_rotated = m_rotation * forward;
target = position += forward_rotated;
m_view = glm::lookAt(v4tov3(position), v4tov3(target), v4tov3(up));
}
glm::vec3 v4tov3(glm::vec4 v1) {
return glm::vec3(v1.x, v1.y, v1.z);
}
Edit 2
Problem now is with the camera rotation with the mouse, it just doesn't work, for some reason changes on the x axis oft times effect change on the y and vice versa. In addition, if I move the mouse right or left on the x axis (y rotation) the camera rotates left...
void CameraFP::process_mouse(sf::Clock *clock, sf::Window *app) {
// TODO: Make the below constants, and take framerate into account
GLfloat SPEED_X = 0.25f;
GLfloat SPEED_Y = 0.25f;
GLfloat screen_x = app->GetWidth();
GLfloat screen_y = app->GetHeight();
GLfloat mouse_x = float(screen_x / 2 - app->GetInput().GetMouseX());
GLfloat mouse_y = float(screen_y / 2 - app->GetInput().GetMouseY());
GLfloat mouse_x_delta = old_mouse_x - mouse_x;
GLfloat mouse_y_delta = old_mouse_y - mouse_y;
GLfloat current_time = clock->GetElapsedTime();
GLfloat delta_time = current_time - last_time;
this->last_time = current_time;
if (mouse_x_delta != 0 ||
mouse_y_delta != 0) {
if (mouse_x_delta != 0) {
y_rot += glm::radians(delta_time * SPEED_X * mouse_x);
m_rotation = glm::rotate(m_rotation, y_rot, glm::vec3(0.0f, 1.0f, 0.0f));
std::cout << "Y Rotation: " << y_rot << "\n";
}
if (mouse_y_delta != 0) {
x_rot += glm::radians(delta_time * SPEED_Y * mouse_y);
m_rotation = glm::rotate(m_rotation, x_rot, glm::vec3(1.0f, 0.0f, 0.0f));
std::cout << "X rotation: " << x_rot << "\n";
}
}
app->SetCursorPosition(screen_x / 2, screen_y / 2);
this->old_mouse_x = float(screen_x / 2 - app->GetInput().GetMouseX());
this->old_mouse_y = float(screen_y / 2 - app->GetInput().GetMouseY());
}
Replace all glm::vec3(0, 1, 0); by glm::vec3(0.0f, 1.0f, 0.0f);
As for the vec-mac multiplication, AquilaRapax is right in that you can only multiply a mat4 with a vec4. But since you're multiplying directions, the 4rth coordinate should be 0.0f, not 1.0f. This will have the effect to ignore the translations (1.0 will teke them into account, which you don't want)
See http://www.opengl-tutorial.org/beginners-tutorials/tutorial-3-matrices/ for details on matrices.
However, it's often a good idea to keep vec3 instead of vec4's, mostly for clarity purposes (i.e., glm::vec3 mPosition instead of glm::vec4 mPosition). It is thus handy to have 2 functions like these (untested) :
glm::vec3 TransformDirection(glm::vec3 pDirection, glm::mat4 pMatrix){
return pMatrix * glm::vec4(pDirection, 0.0f);
}
glm::vec3 TransformPosition(glm::vec3 pDirection, glm::mat4 pMatrix){
return pMatrix * glm::vec4(pDirection, 1.0f);
}
At the end of process::mouse you save the coordinates in old_mouse_x and old_mouse_y but then you move the cursor to the middle of the screen. If you do this old_mouse_x and old_mouse_y becomes invalid. What you need to do is set these variables after repositioning the cursor:
app->SetCursorPosition(app->GetWidth() / 2, app->GetHeight() / 2);
this->old_mouse_x = app->GetWidth() / 2;
this->old_mouse_y = app->GetHeight() / 2;