Multiplication in OpenGL vertex shader using column-major matrix does not draw triangle as expected - opengl

When I use a custom column major matrix in my code, and pass it to the vertex shader, the triangle is not drawn as expected, but when I use a row major matrix, it draws the triangle in its correct position.
I googled it and found some answers related to this question:
Like this and this, but I could not understand what I'm doing wrong.
If I'm not mistaken, a row-major matrix is:
{ 0, 1, 2, 3,
4, 5, 6, 7,
8, 9, 10, 11,
Tx, Ty, Tz, w}
So, using this row-major matrix, the multiplication order should be: v' = v*M.
And a column-major matrix is:
{ 0, 4, 8, Tx,
1, 5, 9, Ty,
2, 6, 10, Tz,
3, 7, 11, w}
Using this column-major matrix, the multiplication order should be: v' = M*v.
Where Tx, Ty, and Tz hold the translation values for x, y and z, respectively.
Having said that, I will focus on what I think I'm having trouble with, in order to have a more compact question, but I will post an example code in the end, using GLFW and GLAD(<glad/gl.h>)
This is my vertex shader:
#version 330 core
layout (location = 0) in vec3 aPos;
uniform mat4 transform;
void main()
{
gl_Position = transform * vec4(aPos, 1.0);
};
These are my Mat4 struct and its functions:
typedef struct Mat4
{
float data[16];
} Mat4;
// Return Mat4 identity matrix
Mat4 mat4_identity()
{
Mat4 m = {0};
m.data[0] = 1.0f;
m.data[5] = 1.0f;
m.data[10] = 1.0f;
m.data[15] = 1.0f;
return m;
}
// Translate Mat4 using row-major order
Mat4 mat4_row_translation(Mat4 a, float x, float y, float z)
{
Mat4 m = mat4_identity();
m.data[12] += x;
m.data[13] += y;
m.data[14] += z;
return m;
}
// Translate Mat4 using column-major order
Mat4 mat4_column_translation(Mat4 a, float x, float y, float z)
{
Mat4 m = mat4_identity();
m.data[3] += x;
m.data[7] += y;
m.data[11] += z;
return m;
}
This is my update_triangle function where I translate the matrix:
Mat4 trans = mat4_identity();
trans = mat4_column_translation(trans, 0.5f, 0.5f, 0.0f);
unsigned int transformLoc = glGetUniformLocation(shader, "transform");
glUniformMatrix4fv(transformLoc, 1, GL_FALSE, trans.data);
Note that I'm passing GL_FALSE in glUniformMatrix4v, which tells OpenGL that the matrix is already in a column-major order.
However, when running the program, I do not get a triangle 0.5f up and 0.5f right, I get this:
Weird triangle translation
But when I use a row-major matrix and change the multiplication order in the vertex shader(v' = v*M), I get the result that I was expecting.
The vertex shader:
#version 330 core
layout (location = 0) in vec3 aPos;
uniform mat4 transform;
void main()
{
gl_Position = vec4(aPos, 1.0) * transform;
};
The update_triangle function:
Mat4 trans = mat4_identity();
trans = mat4_row_translation(trans, 0.5f, 0.5f, 0.0f);
unsigned int transformLoc = glGetUniformLocation(shader, "transform");
glUniformMatrix4fv(transformLoc, 1, GL_TRUE, trans.data);
Note that I'm passing GL_TRUE in glUniformMatrix4v, which tells OpenGL that the matrix is not in a column-major order.
The result:
Triangle drawn as expected
Here is the code in a single file, it needs to be compiled with GLFW and glad/gl.c.
Comment[0] and Comment1 are just to help with which lines to comment together, for example: If you comment a line with "// Comment[0]" in int, you need to comment the other lines with "// Comment[0]" as well.
But in the Vertex Shader, both matrices use the same line to be drawn correct(which is why I don't understand).
If you are on linux, you can compile with: g++ -o ex example.cpp gl.c -lglfw && ./ex
(You will need to download gl.c from Glad generator)
Code:
#include <glad/gl.h>
#include <GLFW/glfw3.h>
#include <stdio.h>
#include <stdlib.h>
// Mat4 structure
typedef struct Mat4
{
float data[16];
} Mat4;
int c = 0;
// Return Mat4 identity matrix
Mat4 mat4_identity()
{
Mat4 m = {0};
m.data[0] = 1.0f;
m.data[5] = 1.0f;
m.data[10] = 1.0f;
m.data[15] = 1.0f;
return m;
}
// Translate Mat4 using row-major order
Mat4 mat4_row_translation(Mat4 a, float x, float y, float z)
{
Mat4 m = mat4_identity();
m.data[12] += x;
m.data[13] += y;
m.data[14] += z;
return m;
}
// Translate Mat4 using column-major order
Mat4 mat4_column_translation(Mat4 a, float x, float y, float z)
{
Mat4 m = mat4_identity();
m.data[3] += x;
m.data[7] += y;
m.data[11] += z;
return m;
}
GLFWwindow *glfw_window;
// Window functions
int init_glfw(const char *window_title, int x, int y, int width, int height);
void framebuffer_size_callback(GLFWwindow* window, int width, int height);
void processInput();
// Shader functions
static unsigned int compile_shader(unsigned int type, const char *source);
static unsigned int create_shader(const char *vertex_shader, const char *fragment_shader);
// Triangle functions
void init_triangle();
void draw_triangle();
void update_triangle();
unsigned int shader = -1;
unsigned int vao = -1;
unsigned int vbo = -1;
float vertices[] = {
-0.5f, -0.5f, 0.0f, // left
0.5f, -0.5f, 0.0f, // right
0.0f, 0.5f, 0.0f // top
};
const char *vshader = "#version 330 core\n"
"layout (location = 0) in vec3 aPos;\n"
"uniform mat4 transform;\n"
"void main()\n"
"{\n"
// " gl_Position = vec4(aPos, 1.0) * transform;\n" // Comment [0] -> Inverted for column-major
" gl_Position = transform * vec4(aPos, 1.0);\n" // Comment [1] -> Inverted for column-major
"}\0";
const char *fshader = "#version 330 core\n"
"out vec4 FragColor;\n"
"void main()\n"
"{\n"
" FragColor = vec4(1.0f, 0.5f, 0.2f, 1.0f);\n"
"}\n\0";
int main()
{
int result = init_glfw("LearnOpenGL", 0, 0, 800, 600);
if(result != 0)
return result;
init_triangle();
while (!glfwWindowShouldClose(glfw_window))
{
// input
processInput();
// Update triangle vertices
update_triangle();
glClearColor(0.2f, 0.3f, 0.3f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT);
// Draw triangle example
draw_triangle();
// glfw: swap buffers and poll IO events (keys pressed/released, mouse moved etc.)
glfwSwapBuffers(glfw_window);
glfwPollEvents();
}
// glfw: terminate, clearing all previously allocated GLFW resources.
glfwTerminate();
return 0;
}
// My confusion is here
void update_triangle()
{
Mat4 trans = mat4_identity();
trans = mat4_column_translation(trans, 0.5f, 0.5f, 0.0f); // Comment [0]
// trans = mat4_row_translation(trans, 0.5f, 0.5f, 0.0f); // Comment [1]
// Print Mat4
if(c == 0)
{
// TODO: Remove this
printf("==== Trans: ====\n");
for(int i = 1; i <= 16; i++)
{
printf("%.2f, ", trans.data[i-1]);
if(i % 4 == 0 && i != 0)
printf("\n");
}
c++;
}
unsigned int transformLoc = glGetUniformLocation(shader, "transform");
glUniformMatrix4fv(transformLoc, 1, GL_FALSE, trans.data); // Comment [0]
// glUniformMatrix4fv(transformLoc, 1, GL_TRUE, trans.data); // Comment [1]
}
// Window functions
int init_glfw(const char *window_title, int x, int y, int width, int height)
{
// glfw: initialize and configure
// ------------------------------
glfwInit();
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 3);
glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
#ifdef __APPLE__
glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GL_TRUE);
#endif
// glfw window creation
// --------------------
glfw_window = glfwCreateWindow(width, height, window_title, NULL, NULL);
if (glfw_window == NULL)
{
printf("Failed to create GLFW window\n");
glfwTerminate();
return -1;
}
glfwMakeContextCurrent(glfw_window);
glfwSetFramebufferSizeCallback(glfw_window, framebuffer_size_callback);
// glad: load all OpenGL function pointers
// ---------------------------------------
int version = gladLoadGL(glfwGetProcAddress);
printf("Current GL loaded: %d.%d\n", GLAD_VERSION_MAJOR(version), GLAD_VERSION_MINOR(version));
return 0;
}
void framebuffer_size_callback(GLFWwindow* window, int width, int height)
{
glViewport(0, 0, width, height);
}
void processInput()
{
if(glfwGetKey(glfw_window, GLFW_KEY_ESCAPE) == GLFW_PRESS)
glfwSetWindowShouldClose(glfw_window, true);
}
/* Default Compilation for Shader */
static unsigned int compile_shader(unsigned int type, const char *source)
{
unsigned int id = glCreateShader(type);
glShaderSource(id, 1, &source, NULL);
glCompileShader(id);
int result;
glGetShaderiv(id, GL_COMPILE_STATUS, &result);
if(!result)
{
int length;
glGetShaderiv(id, GL_INFO_LOG_LENGTH, &length);
char* msg = (char*) alloca(length * sizeof(char));
glGetShaderInfoLog(id, length, &length, msg);
printf("Vertex / Fragment Shader Failed:\n %s", msg);
glDeleteShader(id);
return 0;
}
return id;
}
static unsigned int create_shader(const char *vertex_shader, const char *fragment_shader)
{
unsigned int program = glCreateProgram();
unsigned int vs = compile_shader(GL_VERTEX_SHADER, vertex_shader);
unsigned int fs = compile_shader(GL_FRAGMENT_SHADER, fragment_shader);
glAttachShader(program, vs);
glAttachShader(program, fs);
glLinkProgram(program);
glValidateProgram(program);
glDeleteShader(vs);
glDeleteShader(fs);
return program;
}
// Triangle functions
void init_triangle()
{
shader = create_shader(vshader, fshader);
printf("shader=%d", shader);
glUseProgram(shader);
glGenVertexArrays(1, &vao);
printf("vao=%d", vao);
glBindVertexArray(vao);
glGenBuffers(1, &vbo);
printf("vbo=%d\n", vbo);
glBindBuffer(GL_ARRAY_BUFFER, vbo); // Using this vbo
glBufferData(GL_ARRAY_BUFFER, sizeof(vertices), vertices, GL_STATIC_DRAW);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 3 * sizeof(float), NULL);
}
void draw_triangle()
{
glUseProgram(shader);
glBindVertexArray(vao);
glDrawArrays(GL_TRIANGLES, 0, 3);
}
This is my first question in this forum, so please let me know if there is anything missing.

So many people use row-major or transposed matrices, that they forget that matrices are not naturally oriented that way. So they see a translation matrix as this:
1 0 0 0
0 1 0 0
0 0 1 0
x y z 1
This is a transposed translation matrix. That is not what a normal translation matrix looks like. The translation goes in the 4th column, not the fourth row. Sometimes, you even see this in textbooks, which is utter garbage.
It's easy to know whether a matrix in an array is row or column-major. If it's row-major, then the translation is stored in the 3, 7, and 11th indices. If it's column-major, then the translation is stored in the 12, 13, and 14th indices. Zero-base indices of course.
Your confusion stems from believing that you're using column-major matrices when you're in fact using row-major ones.
The statement that row vs. column major is a notational convention only is entirely true. The mechanics of matrix multiplication and matrix/vector multiplication are the same regardless of the convention.
What changes is the meaning of the results.
A 4x4 matrix after all is just a 4x4 grid of numbers. It doesn't have to refer to a change of coordinate system. However, once you assign meaning to a particular matrix, you now need to know what is stored in it and how to use it.
Take the translation matrix I showed you above. That's a valid matrix. You could store that matrix in a float[16] in one of two ways:
float row_major_t[16] = {1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, x, y, z, 1};
float column_major_t[16] = {1, 0, 0, x, 0, 1, 0, y, 0, 0, 1, z, 0, 0, 0, 1};
However, I said that this translation matrix is wrong, because the translation is in the wrong place. I specifically said that it is transposed relative to the standard convention for how to build translation matrices, which ought to look like this:
1 0 0 x
0 1 0 y
0 0 1 z
0 0 0 1
Let's look at how these are stored:
float row_major[16] = {1, 0, 0, x, 0, 1, 0, y, 0, 0, 1, z, 0, 0, 0, 1};
float column_major[16] = {1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, x, y, z, 1};
Notice that column_major is exactly the same as row_major_t. So, if we take a proper translation matrix, and store it as column-major, it is the same as transposing that matrix and storing it as row-major.
That is what is meant by being only a notational convention. There are really two sets of conventions: memory storage and transposition. Memory storage is column vs row major, while transposition is normal vs. transposed.
If you have a matrix that was generated in row-major order, you can get the same effect by transposing the column-major equivalent of that matrix. And vice-versa.
Matrix multiplication can only be done one way: given two matrices, in a specific order, you multiply certain values together and store the results. Now, A*B != B*A, but the actual source code for A*B is the same as the code for B*A. They both run the same code to compute the output.
The matrix multiplication code does not care whether the matrices happen to be stored in column-major or row-major order.
The same cannot be said for vector/matrix multiplication. And here's why.
Vector/matrix multiplication is a falsehood; it cannot be done. However, you can multiply a matrix by another matrix. So if you pretend a vector is a matrix, then you can effectively do vector/matrix multiplication, simply by doing matrix/matrix multiplication.
A 4D vector can be considered a column-vector or a row-vector. That is, a 4D vector can be thought of as a 4x1 matrix (remember: in matrix notation, the row count comes first) or a 1x4 matrix.
But here's the thing: Given two matrices A and B, A*B is only defined if the number of columns of A is the same as the number of rows of B. Therefore, if A is our 4x4 matrix, B must be a matrix with 4 rows in it. Therefore, you cannot perform A*x, where x is a row-vector. Similarly, you cannot perform x*A where x is a column-vector.
Because of this, most matrix math libraries make this assumption: if you multiply a vector times a matrix, you really mean to do the multiplication that actually works, not the one that makes no sense.
Let us define, for any 4D vector x, the following. C shall be the column-vector matrix form of x, and R shall be the row-vector matrix form of x. Given this, for any 4x4 matrix A, A*C represents matrix multiplying A by the column-vector x. And R*A represents matrix multiplying the row-vector x by A.
But if we look at this using strict matrix math, we see that these are not equivalent. R*A cannot be the same as A*C. This is because a row-vector is not the same thing as a column-vector. They're not the same matrix, so they do not produce the same results.
However, they are related in one way. It is true that R != C. However, it is also true that R = CT, where T is the transpose operation. The two matrices are transposes of each other.
Here's a funny fact. Since vectors are treated as matrices, they too have a column vs. row-major storage question. The problem is that they both look the same. The array of floats is the same, so you can't tell the difference between R and C just by looking at the data. The only way to tell the difference is by how they are used.
If you have any two matrices A and B, and A is stored as row-major and B as column-major, multiplying them is completely meaningless. You get nonsense as a result. Well, not really. Mathematically, what you get is the equivalent of doing ATB. Or ABT; they're mathematically identical.
Therefore, matrix multiplication only makes sense if the two matrices (and remember: vector/matrix multiplication is just matrix multiplication) are stored in the same major ordering.
So, is a vector column-major or row-major? It is both and neither, as stated before. It is column major only when it is used as a column matrix, and it is row major when it is used as a row matrix.
Therefore, if you have a matrix A which is column major, x*A means... nothing. Well, again, it means x*AT, but that's not what you really wanted. Similarly, A*x does transposed multiplication if A is row-major.
Therefore, the order of vector/matrix multiplication does change, depending on your major ordering of the data (and whether you're using transposed matrices).

Related

how does GLM handle translation

The OpenGL maths library(GLM) uses the following algorithm to compute the translation matrix:
//taken from source code
template<typename T, qualifier Q>
GLM_FUNC_QUALIFIER mat<4, 4, T, Q> translate(mat<4, 4, T, Q> const& m, vec<3, T, Q> const& v)
{
mat<4, 4, T, Q> Result(m);
Result[3] = m[0] * v[0] + m[1] * v[1] + m[2] * v[2] + m[3];
return Result;
}
(Here the vector v is a 3 dimensional vector and the matrix m is a 4X4 matrix, since we're using homogeneous coordinates the vector v is also 4 dimensional).
The following is from Linear Algebra Theory:
Let m have the entries:
Now, suppose the matrix m gives some linear transformation, and is also a transformation matrix, and we'd like to add a translation of X, Y, and Z in the X, Y and Z dimensions respectively, if I'm not mistaken, the way we'd do that is by forming a composite matrix:
which gives something like:
Now, I'm not getting what this GLM function of translate does, because it does something like:
And the matrix with added transformation of translation, i.e. m becomes:
Now, these two matrices are not equal and hence they would result in different transformations, so I'm confused to which matrix does the actual translation and which is the correct one or if there is any other idea hidden behind the algorithm?
Note: Before reading the answer note that in column-major representation of a matrix, you access the entries of your matrix using: matrix[column-index][row-index].
Edit
The source code with which I perform transformation:
#include <iostream>
#include <GL/glew.h>
#include <GLFW/glfw3.h>
#include <cmath>
#include <string.h>
#include "glm/glm.hpp"
#include "glm/gtc/matrix_transform.hpp"
#include "glm/gtc/type_ptr.hpp"
// Window Dimensions
const GLint WIDTH=800, HEIGHT=600;
GLuint VAO, VBO, shader;
GLint uniformModel {};
GLint uniformModelRot {};
GLfloat triOffset {};
float triMaxOffset = 0.7f;
bool direction = true;
const float toRadians = 3.14159265f/180.0f;
// vertex shader
static const char* vShader =
"#version 330\n"
"layout (location = 0) in vec3 pos;\n"
"uniform mat4 model;\n"
"void main(){\n"
" gl_Position = model * vec4(0.5*pos, 1.0);\n"
"}\n";
// fragment shader
static const char* fShader = ""
"#version 330\n"
"out vec4 color;\n"
"uniform mat4 model;\n"
"void main(){\n"
" color = model *vec4(1.0, 1.0, 0.0, 1.0);\n"
"}\n";
void AddShader(GLuint theProgram, const char* ShaderCode, GLenum shaderType, std::string info){
std::cerr <<"INFO: Adding "<<info<<" Shader"<<std::endl;
GLuint theShader = glCreateShader(shaderType);
const GLchar* theCode[1];
theCode[0] = ShaderCode;
GLint codeLength[1];
codeLength[0] = strlen(ShaderCode);
glShaderSource(theShader, 1, theCode, codeLength);
glCompileShader(theShader);
GLint result =0;
GLchar eLog[1024] ={0};
glGetShaderiv(theShader, GL_COMPILE_STATUS, &result);
if(!result){
glGetShaderInfoLog(shader, sizeof(eLog), NULL, eLog);
std::cerr<<"Error compiling program"<<std::endl;
return;
}
glAttachShader(theProgram, theShader);
}
void CompileShader(){
shader = glCreateProgram();
if(!shader){
std::cerr<<"Error creating shader"<<std::endl;
return;
}
AddShader(shader, vShader, GL_VERTEX_SHADER, "vertex");
AddShader(shader, fShader, GL_FRAGMENT_SHADER, "fragment");
GLint result =0;
GLchar eLog[1024] ={0};
glLinkProgram(shader);
glGetProgramiv(shader, GL_LINK_STATUS, &result);
if(!result){
glGetProgramInfoLog(shader, sizeof(eLog), NULL, eLog);
std::cerr<<"Error linking program"<<std::endl;
return;
}
glValidateProgram(shader);
glGetProgramiv(shader, GL_VALIDATE_STATUS, &result);
if(!result){
glGetProgramInfoLog(shader, sizeof(eLog), NULL, eLog);
std::cerr<<"Error Validating program"<<std::endl;
return;
}
uniformModel = glGetUniformLocation(shader,"model");
}
void CreateTriangles(){
GLfloat vertices[]={
-1.0f, -1.0f, 0.0f,
1.0f, -1.0f, 0.0f,
0.0f, 1.0f, 0.0f
};
glGenVertexArrays(1, &VAO);
glBindVertexArray(VAO);
glGenBuffers(1, &VBO);
glBindBuffer(GL_ARRAY_BUFFER, VBO);
glBufferData(GL_ARRAY_BUFFER, sizeof(GLfloat)*9,vertices, GL_STATIC_DRAW);
glVertexAttribPointer(0,3,GL_FLOAT,GL_FALSE,0,0);
glEnableVertexAttribArray(0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindVertexArray(0);
}
int main(){
//initialize GLFW
if(!glfwInit()){
std::cerr << "GLFW initialization failed!" << std::endl;
glfwTerminate();
return 1;
}
//Setup GLFW window properties
//openGL version
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 3);
// core profile = no backward compatibility
glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
//allow forward compatibility
glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GL_TRUE);
GLFWwindow *mainWindow = glfwCreateWindow(WIDTH, HEIGHT, "TEST WINDOW", NULL, NULL);
if(!mainWindow){
std::cerr << "GLFW Window creation failed" << std::endl;
glfwTerminate();
return 1;
}
// get Buffer size information
int bufferWidth, bufferHeight;
glfwGetFramebufferSize(mainWindow, &bufferWidth, &bufferHeight);
// set context for GLEW to use
glfwMakeContextCurrent(mainWindow);
// allow modern extension features
if(glewInit()!=GLEW_OK){
std::cerr << "GLEW initialization failed" << std::endl;
glfwDestroyWindow(mainWindow);
glfwTerminate();
return 1;
}
// setup viewport size
glViewport(0, 0, bufferWidth, bufferHeight);
CreateTriangles();
CompileShader();
while(!glfwWindowShouldClose(mainWindow)){
// get and handle user input events
glfwPollEvents();
glClearColor(1.0f, 0.0f, 0.0f, 1.0);
glClear(GL_COLOR_BUFFER_BIT);
if(direction){
triOffset += 0.05f;
}else{
triOffset -= 0.05f;
}
if(abs(triOffset) >= triMaxOffset){
direction = !direction;
}
glUseProgram(shader);
glm::mat4 modelMatrix(1.0f);
modelMatrix = glm::translate(modelMatrix, glm::vec3(triOffset, 0.0f, 0.0f));
glUniformMatrix4fv(uniformModel, 1, GL_FALSE,glm::value_ptr(modelMatrix));
glBindVertexArray(VAO);
glDrawArrays(GL_TRIANGLES,0,3);
glBindVertexArray(0);
glUseProgram(0);
// swap buffers
glfwSwapBuffers(mainWindow);
}
return 0;
}
OpenGL Mathematics (GLM) is based on the OpenGL Shading Language (GLSL). What glm::translate actually does is to set up a translation matrix and multiply the input matrix by the translation. It computes m*t in the meaning of GLSL Vector and Matrix Operations:
mat<4, 4, T, Q> Result(m);
Result[3] = m[0] * v[0] + m[1] * v[1] + m[2] * v[2] + m[3];
(In the following Result is substituted by R)
Note, m[0] * v[0] multiplies each component of the column m[0] by the scalar v[0]. The result is the vector (m[0][0]*v[0], m[0][1]*v[0], m[0][2]*v[0], m[0][3]*v[0]).
So R[3] = m[0]*v[0] + m[1]*v[1] + m[2]*v[2] + m[3] is the same as
R[3][0] = m[0][0] * v[0] + m[1][0] * v[1] + m[2][0] * v[2] + m[3][0]
R[3][1] = m[0][1] * v[0] + m[1][1] * v[1] + m[2][1] * v[2] + m[3][1]
R[3][2] = m[0][2] * v[0] + m[1][2] * v[1] + m[2][2] * v[2] + m[3][2]
R[3][3] = m[0][3] * v[0] + m[1][3] * v[1] + m[2][3] * v[2] + m[3][3]
glm::translate actually calculates:
vh = (v[0], v[1], v[2], 1)
R = m
R[3][0] = dot( (m[0][0], m[1][0], m[2][0], m[3][0]), vh )
R[3][1] = dot( (m[0][1], m[1][1], m[2][1], m[3][1]), vh )
R[3][2] = dot( (m[0][2], m[1][2], m[2][2], m[3][2]), vh )
R[3][3] = dot( (m[0][3], m[1][3], m[2][3], m[3][3]), vh )
The code above computes the Dot product of the rows from m, by vh. vh is the 4th column of the translation t. Note the translation matrix t is defined as:
c0 c1 c2 c3
---------------------
r0: 1 0 0 v[0]
r1: 0 1 0 v[1]
r2: 0 0 0 v[2]
r3: 0 0 0 1
A concatenation of 4x4 matrices (R = m*t) is the Dot product of the rows of m and the columns of t and can be expressed as:
(See OpenGL Shading Language 4.60 Specification - 5.10. Vector and Matrix Operations)
for i from 0 to 3
for j fro 0 to 3
R[i][j] = dot( (m[0][j], m[1][j], m[2][j], m[3][j]), t[i] )
Where dot(a, b) == a[0]*b[0] + a[1]*b[1] + a[2]*b[2] + a[3]*b[3],
(m[0][j], m[1][j], m[2][j], m[3][j]) is the j-th row of m and
t[i] is i-th column of t.
For glm::translate it is sufficient to copy R[0], R[1] and R[2] from m[0], m[1] and m[2].
e.g. for (i=0, j=0):
R[0][0] = dot( (m[0][0], m[1][0], m[2][0], m[3][0]), t[0] )
R[0][0] = dot( (m[0][0], m[1][0], m[2][0], m[3][0]), (1, 0, 0, 0) )
R[0][0] = m[0][0] * 1 + m[1][0] * 0 + m[2][0] * 0 + m[3][0]) * 0
R[0][0] = m[0][0]
GLM matrices (as OpenGL matrices) are stored in column major order. If you investigate matrices in the debugger that may lead to confusions.
If you have the matrix
c0 c1 c2 c3
-------------------
r0: Xx Yx Zx Tx
r1: Xy Yy Zy Ty
r2: Xz Yz Zz Tz
r3: 0 0 0 1
then the memory image of a 4*4 OpenGL matrix looks like this:
Xx, Xy, Xz, 0, Yx, Yy, Yz, 0, Zx, Zy, Zz, 0, Tx, Ty, Tz, 1
If you investigate it in a debugger, it may look like:
[ [ Xx, Xy, Xz, 0 ],
[ Yx, Yy, Yz, 0 ],
[ Zx, Zy, Zz, 0 ],
[ Tx, Ty, Tz, 1 ] ]
The technical details of as to how the math is done is magnificiently done in #Rabbid76's answer, but if anyone would like to understand why m*t is computed instead of t*m then here's the answer:
Computing the matrix tm like this:
here, you're taking the standard basis as the basis vectors for linear combination, so, essentially you're transforming in world space coordinates. but
doing it the other way around and computing mt means now you're essentially taking the basis as the m[0], m[1] and m[2] respectively, so you're transforming in the local space given by the basis, and since this is essentially a model matrix, we just call it model space.
That is probably one way to view it if you're only considering translation, but what if you're handling composite transformations like below:
M=glm::translate(M,T);
R=glm::rotate(M,angle,Rot_axis);
Here the model matrix is M(initialized to identity at first), T is the translation matrix, R the rotation matrix and others are straightforward above.
So the transformation sequence that happens in the above code is:
M.T.R
and say this is applied to the vector v=[x, y, z, 1], the vector undergoes first a rotation, then a translation and then only the model transformation is done, if it helps, you may see it like this:
M.(T.R.v)

How to establish glBindBufferRange() offset with Shader Storage Buffer and std430?

I want to switch between ssbo data to draw things with different setup. To make it happen I need to use glBindBufferRange() with its suitable offset.
I've read that the offset needs to be a multiple of GL_UNIFORM_BUFFER_OFFSET_ALIGNMENT for ubo, but things may be changed with ssbo since using std430 instead of std140.
I tried to do this the easiest way
struct Color
{
float r, g, b, a;
};
struct V2
{
float x, y;
};
struct Uniform
{
Color c1;
Color c2;
V2 v2;
float r;
float f;
int t;
};
GLuint ssbo = 0;
std::vector<Uniform> uniform;
int main()
{
//create window, context etc.
glCreateBuffers(1, &ssbo);
glBindBuffer(GL_SHADER_STORAGE_BUFFER, ssbo);
Uniform u;
u.c1 = {255, 0, 255, 255 };
u.c2 = {255, 0, 255, 255 };
u.v2 = { 0.0f, 0.0f };
u.r = 0.0f;
u.f = 100.0f;
u.t = 0;
uniform.push_back(u);
u.c1 = {255, 255, 0, 255 };
u.c2 = {255, 255, 0, 255 };
u.v2 = { 0.0f, 0.0f };
u.r = 100.0f;
u.f = 100.0f;
u.t = 1;
uniform.push_back(u);
u.c1 = {255, 0, 0, 255 };
u.c2 = {255, 0, 0, 255 };
u.v2 = { 0.0f, 0.0f };
u.r = 100.0f;
u.f = 0.0f;
u.t = 0;
uniform.push_back(u);
glNamedBufferData(ssbo, sizeof(Uniform) * uniform.size(), uniform.data(), GL_STREAM_DRAW);
for(int i = 0; i < uniform.size(); ++i) {
glBindBufferRange(GL_SHADER_STORAGE_BUFFER, 1, ssbo, sizeof(Uniform) * i, sizeof(Uniform));
glDrawArrays(...);
}
//swap buffer etc.
return 0;
}
#version 460 core
layout(location = 0) out vec4 f_color;
layout(std430, binding = 1) buffer Unif
{
vec4 c1;
vec4 c2;
vec2 v2;
float r;
float f;
int t;
};
void main()
{
f_color = vec4(t, 0, 0, 1);
}
There is of course vao, vbo, vertex struct and so on, but they are not affect ssbo.
I got GL_INVALID_VALUE glBindBufferRange() error, though. And that must come from offset, because my next attempt transfers data, but with wrong order.
My next attept was to use GL_SHADER_STORAGE_BUFFER_OFFSET_ALIGNMENT
and a formula I found on the Internet
int align = 4;
glGetIntegerv(GL_SHADER_STORAGE_BUFFER_OFFSET_ALIGNMENT, &align);
int ssboSize = sizeof(Uniform) + align - sizeof(Uniform) % align;
so just changing glNamedBufferData and glBindBufferRange it looks like this
glNamedBufferData(ssbo, ssboSize * uniform.size(), uniform.data(), GL_STREAM_DRAW);
glBindBufferRange(GL_SHADER_STORAGE_BUFFER, 1, ssbo, ssboSize * i, sizeof(Uniform));
and that way, it almost worked. As you can see, ts are
0;
1;
0;
so opengl should draw 3 shapes with colors -
vec4(0, 0, 0, 1);
vec4(1, 0, 0, 1);
vec4(0, 0, 0, 1);
it draws them wrong order
vec4(1, 0, 0, 1);
vec4(0, 0, 0, 1);
vec4(0, 0, 0, 1);
How can I make it transfer data proper way?
The OpenGL spec (Version 4.6) states the following in section "6.1.1 Binding Buffer Objects to Indexed Target Points" regararding the error conditions for glBindBufferRange:
An INVALID_VALUE error is generated by BindBufferRange if buffer is
non-zero and offset or size do not respectively satisfy the constraints described for those parameters for the specified target, as described in section 6.7.1.
Section 6.7.1 "Indexed Buffer Object Limits and Binding Queries" states for SSBOs:
starting offset: SHADER_STORAGE_BUFFER_START
offset restriction: multiple of value of SHADER_STORAGE_BUFFER_OFFSET_ALIGNMENT
binding size SHADER_STORAGE_BUFFER_SIZE
According to Table 23.64 "Implementation Dependent Aggregate Shader Limits":
256 [with the following footnote]: The value of SHADER_STORAGE_BUFFER_OFFSET_ALIGNMENT is the maximum allowed, not the minimum.
So if your offset is not a multiple of 256 (which it isn't), this code is simply not guaranteed to work at all. You can query for the actual restriction by the implementation you are running on and ajust your buffer contents accordingly, but you must be prepared that it is as high as 256 bytes.
I ended up using struct alignas(128) Uniform. I guess my next goal is to not use hardcoded align.

How to get keyboard navigation in OpenGL

I'm trying to create a solar system in OpenGL. I have the basic code for earth spinning on its axis and im trying to set the camera to move with the arrow keys.
using namespace std;
using namespace glm;
const int windowWidth = 1024;
const int windowHeight = 768;
GLuint VBO;
int NUMVERTS = 0;
bool* keyStates = new bool[256]; //Create an array of boolean values of length 256 (0-255)
float fraction = 0.1f; //Fraction for navigation speed using keys
// Transform uniforms location
GLuint gModelToWorldTransformLoc;
GLuint gWorldToViewToProjectionTransformLoc;
// Lighting uniforms location
GLuint gAmbientLightIntensityLoc;
GLuint gDirectionalLightIntensityLoc;
GLuint gDirectionalLightDirectionLoc;
// Materials uniform location
GLuint gKaLoc;
GLuint gKdLoc;
// TextureSampler uniform location
GLuint gTextureSamplerLoc;
// Texture ID
GLuint gTextureObject[11];
//Navigation variables
float posX;
float posY;
float posZ;
float viewX = 0.0f;
float viewY = 0.0f;
float viewZ = 0.0f;
float dirX;
float dirY;
float dirZ;
vec3 cameraPos = vec3(0.0f,0.0f,5.0f);
vec3 cameraView = vec3(viewX,viewY,viewZ);
vec3 cameraDir = vec3(0.0f,1.0f,0.0f);
These are all my variables that im using to edit the camera.
static void renderSceneCallBack()
{
// Clear the back buffer and the z-buffer
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// Create our world space to view space transformation matrix
mat4 worldToViewTransform = lookAt(
cameraPos, // The position of your camera, in world space
cameraView, // where you want to look at, in world space
cameraDir // Camera up direction (set to 0,-1,0 to look upside-down)
);
// Create out projection transform
mat4 projectionTransform = perspective(45.0f, (float)windowWidth / (float)windowHeight, 1.0f, 100.0f);
// Combine the world space to view space transformation matrix and the projection transformation matrix
mat4 worldToViewToProjectionTransform = projectionTransform * worldToViewTransform;
// Update the transforms in the shader program on the GPU
glUniformMatrix4fv(gWorldToViewToProjectionTransformLoc, 1, GL_FALSE, &worldToViewToProjectionTransform[0][0]);
glEnableVertexAttribArray(0);
glEnableVertexAttribArray(1);
glEnableVertexAttribArray(2);
glBindBuffer(GL_ARRAY_BUFFER, VBO);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, sizeof(aitVertex), 0);
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, sizeof(aitVertex), (const GLvoid*)12);
glVertexAttribPointer(2, 2, GL_FLOAT, GL_FALSE, sizeof(aitVertex), (const GLvoid*)24);
// Set the material properties
glUniform1f(gKaLoc, 0.8f);
glUniform1f(gKdLoc, 0.8f);
// Bind the texture to the texture unit 0
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, gTextureObject[0]);
// Set our sampler to user Texture Unit 0
glUniform1i(gTextureSamplerLoc, 0);
// Draw triangle
mat4 modelToWorldTransform = mat4(1.0f);
static float angle = 0.0f;
angle+=1.0f;
modelToWorldTransform = rotate(modelToWorldTransform, angle, vec3(0.0f, 1.0f, 0.0f));
glUniformMatrix4fv(gModelToWorldTransformLoc, 1, GL_FALSE, &modelToWorldTransform[0][0]);
glDrawArrays(GL_TRIANGLES, 0, NUMVERTS);
glDisableVertexAttribArray(0);
glDisableVertexAttribArray(1);
glDisableVertexAttribArray(2);
glutSwapBuffers();
}
This is the function that draws the earth onto the screen and determines where the camera is at.
void keyPressed (unsigned char key, int x, int y)
{
keyStates[key] = true; //Set the state of the current key to pressed
cout<<"keyPressed ";
}
void keyUp(unsigned char key, int x, int y)
{
keyStates[key] = false; //Set the state of the current key to released
cout<<"keyUp ";
}
void keyOperations (void)
{
if(keyStates['a'])
{
viewX += 0.5f;
}
cout<<"keyOperations ";
}
These are the functions I'm trying to use to edit the camera variables dynamically
// Create a vertex buffer
createVertexBuffer();
glutKeyboardFunc(keyPressed); //Tell Glut to use the method "keyPressed" for key events
glutKeyboardUpFunc(keyUp); //Tell Glut to use the method "keyUp" for key events
keyOperations();
glutMainLoop();
Finally here's the few lines in my main method where I'm trying to call the key press functions. In the console I see it detects that im pressing them but the planet doesnt move at all, I think I may be calling the keyOperations in the wrong place but I'm not sure.
You are correct, key operations is being called in the wrong place. Where it is now is called once then never again. It needs to go in your update code where you update the rotation of the planet. That way it is called at least once per frame.

Modern equivalent of `gluOrtho2d `

What is the modern equivalent of the OpenGL function gluOrtho2d? clang is giving me deprecation warnings. I believe I need to write some kind of vertex shader? What should it look like?
I started off this answer thinking "It's not that different, you just have to...".
I started writing some code to prove myself right, and ended up not really doing so. Anyway, here are the fruits of my efforts: a minimal annotated example of "modern" OpenGL.
There's a good bit of code you'll need before modern OpenGL will start to act like old-school OpenGL. I'm not going to get into the reasons why you might like to do it the new way (or not) -- there are countless other answers that give a pretty good rundown. Instead I'll post some minimal code that can get you running if you're so inclined.
You should end up with this stunning piece of art:
Basic Render Process
Part 1: Vertex buffers
void TestDraw(){
// create a vertex buffer (This is a buffer in video memory)
GLuint my_vertex_buffer;
glGenBuffers(1 /*ask for one buffer*/, &my_vertex_buffer);
const float a_2d_triangle[] =
{
200.0f, 10.0f,
10.0f, 200.0f,
400.0f, 200.0f
};
// GL_ARRAY_BUFFER indicates we're using this for
// vertex data (as opposed to things like feedback, index, or texture data)
// so this call says use my_vertex_data as the vertex data source
// this will become relevant as we make draw calls later
glBindBuffer(GL_ARRAY_BUFFER, my_vertex_buffer);
// allocate some space for our buffer
glBufferData(GL_ARRAY_BUFFER, 4096, NULL, GL_DYNAMIC_DRAW);
// we've been a bit optimistic, asking for 4k of space even
// though there is only one triangle.
// the NULL source indicates that we don't have any data
// to fill the buffer quite yet.
// GL_DYNAMIC_DRAW indicates that we intend to change the buffer
// data from frame-to-frame.
// the idea is that we can place more than 3(!) vertices in the
// buffer later as part of normal drawing activity
// now we actually put the vertices into the buffer.
glBufferSubData(GL_ARRAY_BUFFER, 0, sizeof(a_2d_triangle), a_2d_triangle);
Part 2: Vertex Array Object:
We need to define how the data contained in my_vertex_array is structured. This state is contained in a vertex array object (VAO). In modern OpenGL there needs to be at least one of these
GLuint my_vao;
glGenVertexArrays(1, &my_vao);
//lets use the VAO we created
glBindVertexArray(my_vao);
// now we need to tell the VAO how the vertices in my_vertex_buffer
// are structured
// our vertices are really simple: each one has 2 floats of position data
// they could have been more complicated (texture coordinates, color --
// whatever you want)
// enable the first attribute in our VAO
glEnableVertexAttribArray(0);
// describe what the data for this attribute is like
glVertexAttribPointer(0, // the index we just enabled
2, // the number of components (our two position floats)
GL_FLOAT, // the type of each component
false, // should the GL normalize this for us?
2 * sizeof(float), // number of bytes until the next component like this
(void*)0); // the offset into our vertex buffer where this element starts
Part 3: Shaders
OK, we have our source data all set up, now we can set up the shader which will transform it into pixels
// first create some ids
GLuint my_shader_program = glCreateProgram();
GLuint my_fragment_shader = glCreateShader(GL_FRAGMENT_SHADER);
GLuint my_vertex_shader = glCreateShader(GL_VERTEX_SHADER);
// we'll need to compile the vertex shader and fragment shader
// and then link them into a full "shader program"
// load one string from &my_fragment_source
// the NULL indicates that the string is null-terminated
const char* my_fragment_source = FragmentSourceFromSomewhere();
glShaderSource(my_fragment_shader, 1, &my_fragment_source, NULL);
// now compile it:
glCompileShader(my_fragment_shader);
// then check the result
GLint compiled_ok;
glGetShaderiv(my_fragment_shader, GL_COMPILE_STATUS, &compiled_ok);
if (!compiled_ok){ printf("Oh Noes, fragment shader didn't compile!\n"); }
else{
glAttachShader(my_shader_program, my_fragment_shader);
}
// and again for the vertex shader
const char* my_vertex_source = VertexSourceFromSomewhere();
glShaderSource(my_vertex_shader, 1, &my_vertex_source, NULL);
glCompileShader(my_vertex_shader);
glGetShaderiv(my_vertex_shader, GL_COMPILE_STATUS, &compiled_ok);
if (!compiled_ok){ printf("Oh Noes, vertex shader didn't compile!\n"); }
else{
glAttachShader(my_shader_program, my_vertex_shader);
}
//finally, link the program, and set it active
glLinkProgram(my_shader_program);
glUseProgram(my_shader_program);
Part 4: Drawing things on the screen
//get the screen size
float my_viewport[4];
glGetFloatv(GL_VIEWPORT, my_viewport);
//now create a projection matrix
float my_proj_matrix[16];
MyOrtho2D(my_proj_matrix, 0.0f, my_viewport[2], my_viewport[3], 0.0f);
//"uProjectionMatrix" refers directly to the variable of that name in
// shader source
GLuint my_projection_ref =
glGetUniformLocation(my_shader_program, "uProjectionMatrix");
// send our projection matrix to the shader
glUniformMatrix4fv(my_projection_ref, 1, GL_FALSE, my_proj_matrix );
//clear the background
glClearColor(0.3, 0.4, 0.4, 1.0);
glClear(GL_COLOR_BUFFER_BIT| GL_DEPTH_BUFFER_BIT);
// *now* after that tiny setup, we're ready to draw the best 24 bytes of
// vertex data ever.
// draw the 3 vertices starting at index 0, interpreting them as triangles
glDrawArrays(GL_TRIANGLES, 0, 3);
// now just swap buffers however your window manager lets you
}
And That's it!
... except for the actual
Shaders
I started to get a little tired at this point, so the comments are a bit lacking. Let me know if you'd like anything clarified.
const char* VertexSourceFromSomewhere()
{
return
"#version 330\n"
"layout(location = 0) in vec2 inCoord;\n"
"uniform mat4 uProjectionMatrix;\n"
"void main()\n"
"{\n"
" gl_Position = uProjectionMatrix*(vec4(inCoord, 0, 1.0));\n"
"}\n";
}
const char* FragmentSourceFromSomewhere()
{
return
"#version 330 \n"
"out vec4 outFragColor;\n"
"vec4 DebugMagenta(){ return vec4(1.0, 0.0, 1.0, 1.0); }\n"
"void main() \n"
"{\n"
" outFragColor = DebugMagenta();\n"
"}\n";
}
The Actual Question you asked: Orthographic Projection
As noted, the actual math is just directly from Wikipedia.
void MyOrtho2D(float* mat, float left, float right, float bottom, float top)
{
// this is basically from
// http://en.wikipedia.org/wiki/Orthographic_projection_(geometry)
const float zNear = -1.0f;
const float zFar = 1.0f;
const float inv_z = 1.0f / (zFar - zNear);
const float inv_y = 1.0f / (top - bottom);
const float inv_x = 1.0f / (right - left);
//first column
*mat++ = (2.0f*inv_x);
*mat++ = (0.0f);
*mat++ = (0.0f);
*mat++ = (0.0f);
//second
*mat++ = (0.0f);
*mat++ = (2.0*inv_y);
*mat++ = (0.0f);
*mat++ = (0.0f);
//third
*mat++ = (0.0f);
*mat++ = (0.0f);
*mat++ = (-2.0f*inv_z);
*mat++ = (0.0f);
//fourth
*mat++ = (-(right + left)*inv_x);
*mat++ = (-(top + bottom)*inv_y);
*mat++ = (-(zFar + zNear)*inv_z);
*mat++ = (1.0f);
}
Modern OpenGL is significantly different. You won't be able to just drop in a new function. Read up...
http://duriansoftware.com/joe/An-intro-to-modern-OpenGL.-Chapter-1:-The-Graphics-Pipeline.html
http://www.arcsynthesis.org/gltut/index.html
http://www.opengl-tutorial.org/beginners-tutorials/tutorial-2-the-first-triangle/

Why does my translation matrix needs to be transposed?

I'm working on a small graphics engine using OpenGL and I'm having some issues with my translation matrix. I'm using OpenGL 3.3, GLSL and C++.
The situation is this: I have defined a small cube which I want to render on screen. The cube uses it's own coordinate system, so I created a model matrix to be able to transform the cube. To make it myself a bit easier I started out with just a translation matrix as the cube's model matrix and after a bit of coding I've managed to make everything work and the cube appears on the screen. Nothing all to special, but there is one thing about my translation matrix that I find a bit odd.
Now as far as I know, a translation matrix is defined as follows:
1, 0, 0, x
0, 1, 0, y
0, 0, 1, z
0, 0, 0, 1
However, this does not work for me. When I define my translation matrix this way, nothing appears on the screen. It only works when I define my translation matrix like this:
1, 0, 0, 0
0, 1, 0, 0
0, 0, 1, 0
x, y, z, 1
Now I've been over my code several times to find out why this is the case, but I can't seem to find out why or am I just wrong and does a translation matrix needs to be defined like the transposed one here above?
My matrices are defined as a one-dimensional array going from left to right, top to bottom.
Here is some of my code that might help:
//this is called just before cube is being rendered
void DisplayObject::updateMatrices()
{
modelMatrix = identityMatrix();
modelMatrix = modelMatrix * translateMatrix( xPos, yPos, zPos );
/* update modelview-projection matrix */
mvpMatrix = modelMatrix * (*projMatrix);
}
//this creates my translation matrix which causes the cube to disappear
const Matrix4 translateMatrix( float x, float y, float z )
{
Matrix4 tranMatrix = identityMatrix();
tranMatrix.data[3] = x;
tranMatrix.data[7] = y;
tranMatrix.data[11] = z;
return Matrix4(tranMatrix);
}
This is my simple test vertex shader:
#version 150 core
in vec3 vPos;
uniform mat4 mvpMatrix;
void main()
{
gl_Position = mvpMatrix * vec4(vPos, 1.0);
}
I've also did tests to check if my matrix multiplication works and it does.
I * randomMatrix is still just randomMatrix
I hope you guys can help.
Thanks
EDIT:
This is how I send the matrix data to OpenGL:
void DisplayObject::render()
{
updateMatrices();
glBindVertexArray(vaoID);
glUseProgram(progID);
glUniformMatrix4fv( glGetUniformLocation(progID, "mvpMatrix"), 1, GL_FALSE, &mvpMatrix.data[0] );
glDrawElements(GL_TRIANGLES, bufferSize[index], GL_UNSIGNED_INT, 0);
}
mvpMatrix.data is a std::vector:
For OpenGL
1, 0, 0, 0
0, 1, 0, 0
0, 0, 1, 0
x, y, z, 1
Is the correct Translation Matrix.
Why? Opengl Uses column-major matrix ordering. Which is the Transpose of the Matrix you initially presented, which is in row-major ordering. Row major is used in most math text-books and also DirectX, so it is a common point of confusion for those new to OpenGL.
See: http://www.mindcontrol.org/~hplus/graphics/matrix-layout.html
You cannot swap matrices in a matrix multiplication, so A*B is different from B*A. You have to transpose B before swapping the matrices.
A * B = t(B) * A
try
void DisplayObject::updateMatrices()
{
modelMatrix = identityMatrix();
modelMatrix = translateMatrix( xPos, yPos, zPos ) * modelMatrix;
/* update modelview-projection matrix */
mvpMatrix = modelMatrix * (*projMatrix);
}