When I use a custom column major matrix in my code, and pass it to the vertex shader, the triangle is not drawn as expected, but when I use a row major matrix, it draws the triangle in its correct position.
I googled it and found some answers related to this question:
Like this and this, but I could not understand what I'm doing wrong.
If I'm not mistaken, a row-major matrix is:
{ 0, 1, 2, 3,
4, 5, 6, 7,
8, 9, 10, 11,
Tx, Ty, Tz, w}
So, using this row-major matrix, the multiplication order should be: v' = v*M.
And a column-major matrix is:
{ 0, 4, 8, Tx,
1, 5, 9, Ty,
2, 6, 10, Tz,
3, 7, 11, w}
Using this column-major matrix, the multiplication order should be: v' = M*v.
Where Tx, Ty, and Tz hold the translation values for x, y and z, respectively.
Having said that, I will focus on what I think I'm having trouble with, in order to have a more compact question, but I will post an example code in the end, using GLFW and GLAD(<glad/gl.h>)
This is my vertex shader:
#version 330 core
layout (location = 0) in vec3 aPos;
uniform mat4 transform;
void main()
{
gl_Position = transform * vec4(aPos, 1.0);
};
These are my Mat4 struct and its functions:
typedef struct Mat4
{
float data[16];
} Mat4;
// Return Mat4 identity matrix
Mat4 mat4_identity()
{
Mat4 m = {0};
m.data[0] = 1.0f;
m.data[5] = 1.0f;
m.data[10] = 1.0f;
m.data[15] = 1.0f;
return m;
}
// Translate Mat4 using row-major order
Mat4 mat4_row_translation(Mat4 a, float x, float y, float z)
{
Mat4 m = mat4_identity();
m.data[12] += x;
m.data[13] += y;
m.data[14] += z;
return m;
}
// Translate Mat4 using column-major order
Mat4 mat4_column_translation(Mat4 a, float x, float y, float z)
{
Mat4 m = mat4_identity();
m.data[3] += x;
m.data[7] += y;
m.data[11] += z;
return m;
}
This is my update_triangle function where I translate the matrix:
Mat4 trans = mat4_identity();
trans = mat4_column_translation(trans, 0.5f, 0.5f, 0.0f);
unsigned int transformLoc = glGetUniformLocation(shader, "transform");
glUniformMatrix4fv(transformLoc, 1, GL_FALSE, trans.data);
Note that I'm passing GL_FALSE in glUniformMatrix4v, which tells OpenGL that the matrix is already in a column-major order.
However, when running the program, I do not get a triangle 0.5f up and 0.5f right, I get this:
Weird triangle translation
But when I use a row-major matrix and change the multiplication order in the vertex shader(v' = v*M), I get the result that I was expecting.
The vertex shader:
#version 330 core
layout (location = 0) in vec3 aPos;
uniform mat4 transform;
void main()
{
gl_Position = vec4(aPos, 1.0) * transform;
};
The update_triangle function:
Mat4 trans = mat4_identity();
trans = mat4_row_translation(trans, 0.5f, 0.5f, 0.0f);
unsigned int transformLoc = glGetUniformLocation(shader, "transform");
glUniformMatrix4fv(transformLoc, 1, GL_TRUE, trans.data);
Note that I'm passing GL_TRUE in glUniformMatrix4v, which tells OpenGL that the matrix is not in a column-major order.
The result:
Triangle drawn as expected
Here is the code in a single file, it needs to be compiled with GLFW and glad/gl.c.
Comment[0] and Comment1 are just to help with which lines to comment together, for example: If you comment a line with "// Comment[0]" in int, you need to comment the other lines with "// Comment[0]" as well.
But in the Vertex Shader, both matrices use the same line to be drawn correct(which is why I don't understand).
If you are on linux, you can compile with: g++ -o ex example.cpp gl.c -lglfw && ./ex
(You will need to download gl.c from Glad generator)
Code:
#include <glad/gl.h>
#include <GLFW/glfw3.h>
#include <stdio.h>
#include <stdlib.h>
// Mat4 structure
typedef struct Mat4
{
float data[16];
} Mat4;
int c = 0;
// Return Mat4 identity matrix
Mat4 mat4_identity()
{
Mat4 m = {0};
m.data[0] = 1.0f;
m.data[5] = 1.0f;
m.data[10] = 1.0f;
m.data[15] = 1.0f;
return m;
}
// Translate Mat4 using row-major order
Mat4 mat4_row_translation(Mat4 a, float x, float y, float z)
{
Mat4 m = mat4_identity();
m.data[12] += x;
m.data[13] += y;
m.data[14] += z;
return m;
}
// Translate Mat4 using column-major order
Mat4 mat4_column_translation(Mat4 a, float x, float y, float z)
{
Mat4 m = mat4_identity();
m.data[3] += x;
m.data[7] += y;
m.data[11] += z;
return m;
}
GLFWwindow *glfw_window;
// Window functions
int init_glfw(const char *window_title, int x, int y, int width, int height);
void framebuffer_size_callback(GLFWwindow* window, int width, int height);
void processInput();
// Shader functions
static unsigned int compile_shader(unsigned int type, const char *source);
static unsigned int create_shader(const char *vertex_shader, const char *fragment_shader);
// Triangle functions
void init_triangle();
void draw_triangle();
void update_triangle();
unsigned int shader = -1;
unsigned int vao = -1;
unsigned int vbo = -1;
float vertices[] = {
-0.5f, -0.5f, 0.0f, // left
0.5f, -0.5f, 0.0f, // right
0.0f, 0.5f, 0.0f // top
};
const char *vshader = "#version 330 core\n"
"layout (location = 0) in vec3 aPos;\n"
"uniform mat4 transform;\n"
"void main()\n"
"{\n"
// " gl_Position = vec4(aPos, 1.0) * transform;\n" // Comment [0] -> Inverted for column-major
" gl_Position = transform * vec4(aPos, 1.0);\n" // Comment [1] -> Inverted for column-major
"}\0";
const char *fshader = "#version 330 core\n"
"out vec4 FragColor;\n"
"void main()\n"
"{\n"
" FragColor = vec4(1.0f, 0.5f, 0.2f, 1.0f);\n"
"}\n\0";
int main()
{
int result = init_glfw("LearnOpenGL", 0, 0, 800, 600);
if(result != 0)
return result;
init_triangle();
while (!glfwWindowShouldClose(glfw_window))
{
// input
processInput();
// Update triangle vertices
update_triangle();
glClearColor(0.2f, 0.3f, 0.3f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT);
// Draw triangle example
draw_triangle();
// glfw: swap buffers and poll IO events (keys pressed/released, mouse moved etc.)
glfwSwapBuffers(glfw_window);
glfwPollEvents();
}
// glfw: terminate, clearing all previously allocated GLFW resources.
glfwTerminate();
return 0;
}
// My confusion is here
void update_triangle()
{
Mat4 trans = mat4_identity();
trans = mat4_column_translation(trans, 0.5f, 0.5f, 0.0f); // Comment [0]
// trans = mat4_row_translation(trans, 0.5f, 0.5f, 0.0f); // Comment [1]
// Print Mat4
if(c == 0)
{
// TODO: Remove this
printf("==== Trans: ====\n");
for(int i = 1; i <= 16; i++)
{
printf("%.2f, ", trans.data[i-1]);
if(i % 4 == 0 && i != 0)
printf("\n");
}
c++;
}
unsigned int transformLoc = glGetUniformLocation(shader, "transform");
glUniformMatrix4fv(transformLoc, 1, GL_FALSE, trans.data); // Comment [0]
// glUniformMatrix4fv(transformLoc, 1, GL_TRUE, trans.data); // Comment [1]
}
// Window functions
int init_glfw(const char *window_title, int x, int y, int width, int height)
{
// glfw: initialize and configure
// ------------------------------
glfwInit();
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 3);
glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
#ifdef __APPLE__
glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GL_TRUE);
#endif
// glfw window creation
// --------------------
glfw_window = glfwCreateWindow(width, height, window_title, NULL, NULL);
if (glfw_window == NULL)
{
printf("Failed to create GLFW window\n");
glfwTerminate();
return -1;
}
glfwMakeContextCurrent(glfw_window);
glfwSetFramebufferSizeCallback(glfw_window, framebuffer_size_callback);
// glad: load all OpenGL function pointers
// ---------------------------------------
int version = gladLoadGL(glfwGetProcAddress);
printf("Current GL loaded: %d.%d\n", GLAD_VERSION_MAJOR(version), GLAD_VERSION_MINOR(version));
return 0;
}
void framebuffer_size_callback(GLFWwindow* window, int width, int height)
{
glViewport(0, 0, width, height);
}
void processInput()
{
if(glfwGetKey(glfw_window, GLFW_KEY_ESCAPE) == GLFW_PRESS)
glfwSetWindowShouldClose(glfw_window, true);
}
/* Default Compilation for Shader */
static unsigned int compile_shader(unsigned int type, const char *source)
{
unsigned int id = glCreateShader(type);
glShaderSource(id, 1, &source, NULL);
glCompileShader(id);
int result;
glGetShaderiv(id, GL_COMPILE_STATUS, &result);
if(!result)
{
int length;
glGetShaderiv(id, GL_INFO_LOG_LENGTH, &length);
char* msg = (char*) alloca(length * sizeof(char));
glGetShaderInfoLog(id, length, &length, msg);
printf("Vertex / Fragment Shader Failed:\n %s", msg);
glDeleteShader(id);
return 0;
}
return id;
}
static unsigned int create_shader(const char *vertex_shader, const char *fragment_shader)
{
unsigned int program = glCreateProgram();
unsigned int vs = compile_shader(GL_VERTEX_SHADER, vertex_shader);
unsigned int fs = compile_shader(GL_FRAGMENT_SHADER, fragment_shader);
glAttachShader(program, vs);
glAttachShader(program, fs);
glLinkProgram(program);
glValidateProgram(program);
glDeleteShader(vs);
glDeleteShader(fs);
return program;
}
// Triangle functions
void init_triangle()
{
shader = create_shader(vshader, fshader);
printf("shader=%d", shader);
glUseProgram(shader);
glGenVertexArrays(1, &vao);
printf("vao=%d", vao);
glBindVertexArray(vao);
glGenBuffers(1, &vbo);
printf("vbo=%d\n", vbo);
glBindBuffer(GL_ARRAY_BUFFER, vbo); // Using this vbo
glBufferData(GL_ARRAY_BUFFER, sizeof(vertices), vertices, GL_STATIC_DRAW);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 3 * sizeof(float), NULL);
}
void draw_triangle()
{
glUseProgram(shader);
glBindVertexArray(vao);
glDrawArrays(GL_TRIANGLES, 0, 3);
}
This is my first question in this forum, so please let me know if there is anything missing.
So many people use row-major or transposed matrices, that they forget that matrices are not naturally oriented that way. So they see a translation matrix as this:
1 0 0 0
0 1 0 0
0 0 1 0
x y z 1
This is a transposed translation matrix. That is not what a normal translation matrix looks like. The translation goes in the 4th column, not the fourth row. Sometimes, you even see this in textbooks, which is utter garbage.
It's easy to know whether a matrix in an array is row or column-major. If it's row-major, then the translation is stored in the 3, 7, and 11th indices. If it's column-major, then the translation is stored in the 12, 13, and 14th indices. Zero-base indices of course.
Your confusion stems from believing that you're using column-major matrices when you're in fact using row-major ones.
The statement that row vs. column major is a notational convention only is entirely true. The mechanics of matrix multiplication and matrix/vector multiplication are the same regardless of the convention.
What changes is the meaning of the results.
A 4x4 matrix after all is just a 4x4 grid of numbers. It doesn't have to refer to a change of coordinate system. However, once you assign meaning to a particular matrix, you now need to know what is stored in it and how to use it.
Take the translation matrix I showed you above. That's a valid matrix. You could store that matrix in a float[16] in one of two ways:
float row_major_t[16] = {1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, x, y, z, 1};
float column_major_t[16] = {1, 0, 0, x, 0, 1, 0, y, 0, 0, 1, z, 0, 0, 0, 1};
However, I said that this translation matrix is wrong, because the translation is in the wrong place. I specifically said that it is transposed relative to the standard convention for how to build translation matrices, which ought to look like this:
1 0 0 x
0 1 0 y
0 0 1 z
0 0 0 1
Let's look at how these are stored:
float row_major[16] = {1, 0, 0, x, 0, 1, 0, y, 0, 0, 1, z, 0, 0, 0, 1};
float column_major[16] = {1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, x, y, z, 1};
Notice that column_major is exactly the same as row_major_t. So, if we take a proper translation matrix, and store it as column-major, it is the same as transposing that matrix and storing it as row-major.
That is what is meant by being only a notational convention. There are really two sets of conventions: memory storage and transposition. Memory storage is column vs row major, while transposition is normal vs. transposed.
If you have a matrix that was generated in row-major order, you can get the same effect by transposing the column-major equivalent of that matrix. And vice-versa.
Matrix multiplication can only be done one way: given two matrices, in a specific order, you multiply certain values together and store the results. Now, A*B != B*A, but the actual source code for A*B is the same as the code for B*A. They both run the same code to compute the output.
The matrix multiplication code does not care whether the matrices happen to be stored in column-major or row-major order.
The same cannot be said for vector/matrix multiplication. And here's why.
Vector/matrix multiplication is a falsehood; it cannot be done. However, you can multiply a matrix by another matrix. So if you pretend a vector is a matrix, then you can effectively do vector/matrix multiplication, simply by doing matrix/matrix multiplication.
A 4D vector can be considered a column-vector or a row-vector. That is, a 4D vector can be thought of as a 4x1 matrix (remember: in matrix notation, the row count comes first) or a 1x4 matrix.
But here's the thing: Given two matrices A and B, A*B is only defined if the number of columns of A is the same as the number of rows of B. Therefore, if A is our 4x4 matrix, B must be a matrix with 4 rows in it. Therefore, you cannot perform A*x, where x is a row-vector. Similarly, you cannot perform x*A where x is a column-vector.
Because of this, most matrix math libraries make this assumption: if you multiply a vector times a matrix, you really mean to do the multiplication that actually works, not the one that makes no sense.
Let us define, for any 4D vector x, the following. C shall be the column-vector matrix form of x, and R shall be the row-vector matrix form of x. Given this, for any 4x4 matrix A, A*C represents matrix multiplying A by the column-vector x. And R*A represents matrix multiplying the row-vector x by A.
But if we look at this using strict matrix math, we see that these are not equivalent. R*A cannot be the same as A*C. This is because a row-vector is not the same thing as a column-vector. They're not the same matrix, so they do not produce the same results.
However, they are related in one way. It is true that R != C. However, it is also true that R = CT, where T is the transpose operation. The two matrices are transposes of each other.
Here's a funny fact. Since vectors are treated as matrices, they too have a column vs. row-major storage question. The problem is that they both look the same. The array of floats is the same, so you can't tell the difference between R and C just by looking at the data. The only way to tell the difference is by how they are used.
If you have any two matrices A and B, and A is stored as row-major and B as column-major, multiplying them is completely meaningless. You get nonsense as a result. Well, not really. Mathematically, what you get is the equivalent of doing ATB. Or ABT; they're mathematically identical.
Therefore, matrix multiplication only makes sense if the two matrices (and remember: vector/matrix multiplication is just matrix multiplication) are stored in the same major ordering.
So, is a vector column-major or row-major? It is both and neither, as stated before. It is column major only when it is used as a column matrix, and it is row major when it is used as a row matrix.
Therefore, if you have a matrix A which is column major, x*A means... nothing. Well, again, it means x*AT, but that's not what you really wanted. Similarly, A*x does transposed multiplication if A is row-major.
Therefore, the order of vector/matrix multiplication does change, depending on your major ordering of the data (and whether you're using transposed matrices).
I am following this site to learn ray tracing using compute shaders: https://github.com/LWJGL/lwjgl3-wiki/wiki/2.6.1.-Ray-tracing-with-OpenGL-Compute-Shaders-%28Part-I%29
My question, The tutorial details a procedure to get the perspective projection. I think I followed his steps correctly but I am getting the wrong result and I believe I made a mistake in my matrix computations.
My code for the perspective projection-
//Getting the perspective projection using glm::perspective
glm::mat4 projection = glm::perspective(60.0f, 1024.0f/768.0f, 1.0f, 2.0f);
//My Camera Position
glm::vec3 camPos=glm::vec3(3.0, 2.0, 7.0);
//My View matrix using glm::lookAt
glm::mat4 view = glm::lookAt(camPos, glm::vec3(0.0, 0.5, 0.0),glm::vec3(0.0, 1.0, 0.0));
//Calculating inverse of the view*projection
glm::mat4 inv = glm::inverse(view*projection);
//Calculating the rays from camera position to the corners of the frustum as detailed in the site.
glm::vec4 ray00=glm::vec4(-1, -1, 0, 1) * inv;
ray00 /= ray00.w;
ray00 -= glm::vec4(camPos,1.0);
glm::vec4 ray10 = glm::vec4(+1, -1, 0, 1) * inv;
ray10 /= ray10.w;
ray10 -= glm::vec4(camPos,1.0);
glm::vec4 ray01=glm::vec4(-1, 1, 0, 1) * inv;
ray01 /= ray01.w;
ray01 -= glm::vec4(camPos,1.0);
glm::vec4 ray11 = glm::vec4(+1, +1, 0, 1) * inv;
ray11 /= ray11.w;
ray11 -= glm::vec4(camPos,1.0);
Result of above tranformations:
[![enter image description here][1]][1]
As additional information, I am calling my compute shaders using
//Dispatch Shaders.
glDispatchCompute ((GLuint)1024.0/16, (GLuint)768.0f/8 , 1);
I am also passing the values to the shader using the
//Querying the location for ray00 and assigning the value. Similarly for the rest
GLuint ray00Id = glGetUniformLocation(computeS, "ray00");
glUniform3f(ray00Id, ray00.x, ray00.y, ray00.z);
GLuint ray01Id = glGetUniformLocation(computeS, "ray01");
glUniform3f(ray01Id, ray01.x, ray01.y, ray01.z);
GLuint ray10Id = glGetUniformLocation(computeS, "ray10");
glUniform3f(ray10Id, ray10.x, ray10.y, ray10.z);
GLuint ray11Id = glGetUniformLocation(computeS, "ray11");
glUniform3f(ray11Id, ray11.x, ray11.y, ray11.z);
GLuint camId = glGetUniformLocation(computeS, "eye");
glUniform3f(camId, camPos.x, camPos.y, camPos.z);
Updated Answer following derhass suggestion.
My image now looks like :
Latest Image
The glm library uses the standard OpenGL matrix conventions, meaning that the matrices are created with the multiplication order Matrix * Vector in mind. So the following code is wrong:
//Calculating inverse of the view*projection
glm::mat4 inv = glm::inverse(view*projection);
The composition of the view matrix (transforming from world space to eye space) and the projection matrix (transforming from eye space to clip space) is projection * view, not view * projection as you put it (which would apply the projection before the view).
I'm trying to create a ray from my mouse location out into 3D space, and apparently in order to do that I need to "UnProject()" it.
Doing so will give me a value between 0 & 1 for each axis.
This can't be right for drawing a "Ray" or a line from the viewport, can it? All this is, is a percentage essentially of my mouse to viewport size.
If this is actually right, then I don't understand the following:
I draw triangles that have vertices that are not constrained from 0-1, rather they are coordinates like (0,100,0), (100,100,0), (100,0,0), And these draw perfectly fine
But also, drawing the vertices that are unprojected from my mouse coordinates as lines/points also draw perfectly fine.
How the heck would I then compare my mouse coordinates to the coordinates of my objects?
If this is actually wrong, then what can cause such an error?
I tried unprojecting my own object's vertices, and those aren't constrained from 0-1.
I don't know whether or not the way I handle my "projections" when rendering is even compatible with gluUnproject. I've been just doing it the way these tutorials here show it (near bottom): http://qt-project.org/wiki/Developer-Guides#28810c65dd0f273a567b83a48839d275
This is the way I try to get my mouse coordinates:
GLdouble modelViewMatrix[16];
GLdouble projectionMatrix[16];
GLint viewport[4];
GLfloat winX, winY, winZ;
glGetDoublev(GL_MODELVIEW_MATRIX, modelViewMatrix);
glGetDoublev(GL_PROJECTION_MATRIX, projectionMatrix);
glGetIntegerv(GL_VIEWPORT, viewport);
winX = (float)x;
winY = (float)viewport[3] - (float)y;
glReadPixels( winX, winY, 1, 1, GL_DEPTH_COMPONENT, GL_FLOAT, &winZ );
GLdouble nearPlaneLocation[3];
gluUnProject(winX, winY, 0, modelViewMatrix, projectionMatrix,
viewport, &nearPlaneLocation[0], &nearPlaneLocation[1],
&nearPlaneLocation[2]);
GLdouble farPlaneLocation[3];
gluUnProject(winX, winY, 1, modelViewMatrix, projectionMatrix,
viewport, &farPlaneLocation[0], &farPlaneLocation[1],
&farPlaneLocation[2]);
QVector3D nearP = QVector3D(nearPlaneLocation[0], nearPlaneLocation[1],
nearPlaneLocation[2]);
QVector3D farP = QVector3D(farPlaneLocation[0], farPlaneLocation[1],
farPlaneLocation[2]);
Perhaps my actual projections are off?
void oglWidget::paintGL()
{
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
QMatrix4x4 mMatrix;
QMatrix4x4 vMatrix;
QMatrix4x4 cameraTransformation;
cameraTransformation.rotate(alpha, 0, 1, 0);
cameraTransformation.rotate(beta, 1, 0, 0);
QVector3D cameraPosition = cameraTransformation * QVector3D(camX, camY, distance);
QVector3D cameraUpDirection = cameraTransformation * QVector3D(0, 1, 0);
vMatrix.lookAt(cameraPosition, QVector3D(camX, camY, 0), cameraUpDirection);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
gluLookAt(cameraPosition.x(), cameraPosition.y(), cameraPosition.z(), camX, camY, 0, cameraUpDirection.x(), cameraUpDirection.y(), cameraUpDirection.z());
shaderProgram.bind();
shaderProgram.setUniformValue("mvpMatrix", pMatrix * vMatrix * mMatrix);
shaderProgram.setUniformValue("texture", 0);
for (int x = 0; x < tileCount; x++)
{
shaderProgram.setAttributeArray("vertex", tiles[x]->vertices.constData());
shaderProgram.enableAttributeArray("vertex");
shaderProgram.setAttributeArray("textureCoordinate", textureCoordinates.constData());
shaderProgram.enableAttributeArray("textureCoordinate");
//Triangle Drawing
glTexImage2D( GL_TEXTURE_2D, 0, GL_RGBA, tiles[x]->image.width(), tiles[x]->image.height(), 0, GL_RGBA, GL_UNSIGNED_BYTE, tiles[x]->image.bits());
glDrawArrays(GL_TRIANGLES, 0, tiles[x]->vertices.size());
}
shaderProgram.release();
}
Where as, pMatrix is a 4x4 matrix, controlled during resize events like:
pMatrix.setToIdentity();
pMatrix.perspective(fov, (float) width / (float) height, 0.001, 10000);
glViewport(0, 0, width, height);
and my vertex shader is set up like this:
uniform mat4 mvpMatrix;
in vec4 vertex;
in vec2 textureCoordinate;
out vec2 varyingTextureCoordinate;
void main(void)
{
varyingTextureCoordinate = textureCoordinate;
gl_Position = mvpMatrix * vertex;
}
glReadPixels takes integers (x and y) and you don't seem to be using winZ for some reason in gluUnProject.
Try it like this:
gluUnProject(winX, winY, winZ, glView, glProjection, viewport, &posX, &posY, &posZ);
Also, if you want the ray to stop when it meets something in the depth buffer then don't clear the depth buffer after rendering. If you do a glClear(GL_DEPTH_BUFFER_BIT) then the ray should go as far as the far clip you set in your projection matrix.
I also have no idea why you need to call it more than once. The last three floats will be the target vector and you can just use your camera position as the source of the ray (depending on what you are doing).
Part of my problem here was poorly describing it. I accidentally left residual code from frantically testing, resulting in bits of "read Pixel" functions and related nonsense which wasn't useful for solving the problem.
The rest of my problem was due to inconsistent data types for the matrices, and trying to pull matrices from OpenGL when it never had them stored in the first place.
The problem was solved by:
Using GLM to hold all my matrices
performing the calculations myself (inverse view matrix * inverse model matrix * inverse projection matrix) * vector holding NDC converted screen space coordinates (range of -1 to 1: x or y divided by width or height, * 2 - 1), which also has a z of -1 or 1 for the far or near planes, and a w of 1.
Divide result by the fourth spot of the vector.
I still do not know why unprojecting doesn't work for me, as I got the wrong results with GLU as well as GLM's unproject function, but doing it manually worked for me.
Since my problem extended over quite a great length of time, and took up several questions, I owe credit to a few individuals who helped me along the way:
srobins of facepunch, in this thread
derhass from here, in this question, and this discussion
I want to create a formula that rotates my object (o1) to always point into the direction of another object (o2), regardless of o1's position.
Kind of like the camera in the following image:
http://puu.sh/bLUWw/aaa653accf.png
I got the following code so far, but the yaw-axis seems to be inverted:
Vector3 lookat = { lookAtPosition.x, lookAtPosition.y, lookAtPosition.z };
Vector3 pos = { position.x, position.y, position.z };
Vector3 objectUpVector = { 0.0f, 1.0f, 0.0f };
Vector3 zaxis = Vector3::normalize(lookat - pos);
Vector3 xaxis = Vector3::normalize(Vector3::cross(objectUpVector, zaxis));
Vector3 yaxis = Vector3::cross(zaxis, xaxis);
Matrix16 pm = {
xaxis.x, yaxis.x, zaxis.x, 0,
xaxis.y, yaxis.y, zaxis.y, 0,
xaxis.z, yaxis.z, zaxis.z, 0,
0, 0, 0, 1
};
See the following image:
http://puu.sh/bLUSG/5228bb2176.jpg
I'm sure it's just a few variables that have to be swapped, but I couldn't find them...
PS: The position of the object matrix is multiplied at a later stage, for testing purposes...
I found the answer to my issue, it turns out that I just had to change the order of the values inside the matrix like so:
Matrix16 pm = {
xaxis.x, xaxis.y, xaxis.z, 0,
yaxis.x, yaxis.y, yaxis.z, 0,
zaxis.x, zaxis.y, zaxis.z, 0,
0, 0, 0, 1
};
camera matrix is inverse of transform matrix representing its coordinate system
look here: Transform matrix anatomy
origin = o1.position
Z axis = o1.position-o2.position
and make it unit length of coarse
mine frustrum/Zbuffer are configured to view in -Z axis direction
now just compute X,Y axises as perpendicular to eachother and Z also via crossproduct
and make them unit length of coarse
so take some vector (non parallel to Z)
ideally something best to align to like Up vector (0,1,0);
multiply it by Z axis and store result to X(or Y)
and then multiply it again by Z axis and store to Y(or X).
Now you have the transform matrix M1 representing O1 coordinate system
if you want just to render the object then thi is it
if you want to have camera on object o1
then just compute:
ViewMatrix=inverse(M1)*ProjectionMatrix;
here inverse matrix computation for mine OpenGL matrices