I'm trying to improve Henry Thasler's GLSL implementation of double-single arithmetic (from his GLSL Mandelbrot demo) to work reliably on NVIDIA graphics on Linux. I've recently learned that since OpenGL 4.0 (§4.7 The Precise Qualifier in the spec) or with GL_ARB_gpu_shader5 extension (spec) we can use precise qualifier to make the calculations follow exact sequence of arithmetic operations specified in the GLSL source.
But the following attempt appears to not give any improvement:
#version 330
#extension GL_ARB_gpu_shader5 : require
vec2 ds_add(vec2 dsa, vec2 dsb)
{
precise float t1 = dsa.x + dsb.x;
precise float e = t1 - dsa.x;
precise float t2 = ((dsb.x - e) + (dsa.x - (t1 - e))) + dsa.y + dsb.y;
precise vec2 dsc;
dsc.x = t1 + t2;
dsc.y = t2 - (dsc.x - t1);
return dsc;
}
The result is the same as if there were no precise added. I've checked that the algorithm itself is correct: it works as is (even without precise) on Intel Core i7-4765T built-in graphics, and if I hide some variables to inhibit optimizations, then NVidia also gives the correct results. Here's how I inhibit the optimizations:
#version 330
#define hide(x) ((x)*one)
uniform float one=1;
vec2 ds_add(vec2 dsa, vec2 dsb)
{
float t1 = dsa.x + dsb.x;
float e = hide(t1) - dsa.x;
float t2 = ((dsb.x - e) + (dsa.x - (t1 - e))) + dsa.y + dsb.y;
vec2 dsc;
dsc.x = t1 + t2;
dsc.y = t2 - (hide(dsc.x) - t1);
return dsc;
}
So, apparently, I'm using the precise qualifier incorrectly. But what exactly is wrong here?
For reference, I'm using NVidia GeForce GTX 750Ti with binary nvidia driver 390.116. Here's the full C++ test:
#include <cmath>
#include <vector>
#include <string>
#include <limits>
#include <iomanip>
#include <iostream>
// glad.h is generated by the following command:
// glad --out-path=. --generator=c --omit-khrplatform --api="gl=3.3" --profile=core --extensions=
#include "glad/glad.h"
#include <GL/freeglut.h>
#include <glm/glm.hpp>
using glm::vec4;
GLuint vao, vbo;
GLuint texFBO;
GLuint program;
GLuint fbo;
int width=1, height=2;
void printShaderOutput(int texW, int texH)
{
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, texFBO);
std::vector<vec4> data(texW*texH);
glGetTexImage(GL_TEXTURE_2D, 0, GL_RGBA, GL_FLOAT, data.data());
std::cout << "a,b,sum,relError(sum),note\n";
for(int i=0;i<width;++i)
{
const auto a=double(data[i+width*0].x)+double(data[i+width*0].y);
const auto b=double(data[i+width*0].z)+double(data[i+width*0].w);
const auto sum=double(data[i+width*1].x)+double(data[i+width*1].y);
const auto trueSum=a+b;
const auto sumErr=(sum-trueSum)/trueSum;
std::cout << std::setprecision(std::numeric_limits<double>::max_digits10)
<< a << ',' << b << ','
<< sum << ','
<< std::setprecision(3)
<< sumErr << ','
<< (std::abs(sumErr)>1e-14 ? "WARN" : "OK")
<< '\n';
}
std::cout.flush();
}
GLuint makeShader(GLenum type, std::string const& srcStr)
{
const auto shader=glCreateShader(type);
const GLint srcLen=srcStr.size();
const GLchar*const src=srcStr.c_str();
glShaderSource(shader, 1, &src, &srcLen);
glCompileShader(shader);
GLint status=-1;
glGetShaderiv(shader, GL_COMPILE_STATUS, &status);
assert(glGetError()==GL_NO_ERROR);
assert(status);
return shader;
}
void loadShaders()
{
program=glCreateProgram();
const auto vertexShader=makeShader(GL_VERTEX_SHADER, 1+R"(
#version 330
in vec4 vertex;
void main() { gl_Position=vertex; }
)");
glAttachShader(program, vertexShader);
const auto fragmentShader=makeShader(GL_FRAGMENT_SHADER, 1+R"(
#version 330
#extension GL_ARB_gpu_shader5 : require
vec2 ds_add(vec2 dsa, vec2 dsb)
{
precise float t1 = dsa.x + dsb.x;
precise float e = t1 - dsa.x;
precise float t2 = ((dsb.x - e) + (dsa.x - (t1 - e))) + dsa.y + dsb.y;
precise vec2 dsc;
dsc.x = t1 + t2;
dsc.y = t2 - (dsc.x - t1);
return dsc;
}
uniform vec2 a, b;
out vec4 color;
void main()
{
if(gl_FragCoord.y<1) // first row
color=vec4(a,b);
else if(gl_FragCoord.y<2) // second row
color=vec4(ds_add(a,b),0,0);
}
)");
glAttachShader(program, fragmentShader);
glLinkProgram(program);
GLint status=0;
glGetProgramiv(program, GL_LINK_STATUS, &status);
assert(glGetError()==GL_NO_ERROR);
assert(status);
glDetachShader(program, fragmentShader);
glDeleteShader(fragmentShader);
glDetachShader(program, vertexShader);
glDeleteShader(vertexShader);
}
void setupBuffers()
{
glGenVertexArrays(1, &vao);
glBindVertexArray(vao);
glGenBuffers(1, &vbo);
glBindBuffer(GL_ARRAY_BUFFER, vbo);
const GLfloat vertices[]=
{
-1, -1,
1, -1,
-1, 1,
1, 1,
};
glBufferData(GL_ARRAY_BUFFER, sizeof vertices, vertices, GL_STATIC_DRAW);
constexpr GLuint attribIndex=0;
constexpr int coordsPerVertex=2;
glVertexAttribPointer(attribIndex, coordsPerVertex, GL_FLOAT, false, 0, 0);
glEnableVertexAttribArray(attribIndex);
glBindVertexArray(0);
}
bool init()
{
if(!gladLoadGL())
{
std::cerr << "Failed to initialize GLAD\n";
return false;
}
if(!GLAD_GL_VERSION_3_3)
{
std::cerr << "OpenGL 3.3 not supported\n";
return false;
}
glGenTextures(1, &texFBO);
glGenFramebuffers(1,&fbo);
loadShaders();
setupBuffers();
glViewport(0,0,width,height);
glBindTexture(GL_TEXTURE_2D,texFBO);
glTexImage2D(GL_TEXTURE_2D,0,GL_RGBA32F,width,height,0,GL_RGBA,GL_UNSIGNED_BYTE,nullptr);
glBindTexture(GL_TEXTURE_2D,0);
glBindFramebuffer(GL_FRAMEBUFFER,fbo);
glFramebufferTexture2D(GL_FRAMEBUFFER,GL_COLOR_ATTACHMENT0,GL_TEXTURE_2D,texFBO,0);
const auto status=glCheckFramebufferStatus(GL_FRAMEBUFFER);
assert(status==GL_FRAMEBUFFER_COMPLETE);
glBindFramebuffer(GL_FRAMEBUFFER,0);
return true;
}
void display()
{
const static bool inited=init();
if(!inited) std::exit(1);
glBindFramebuffer(GL_FRAMEBUFFER,fbo);
glUseProgram(program);
#define SPLIT_DOUBLE_TO_FLOATS(x) GLfloat(x),GLfloat(x-GLfloat(x))
glUniform2f(glGetUniformLocation(program,"a"),SPLIT_DOUBLE_TO_FLOATS(3.1415926535897932));
glUniform2f(glGetUniformLocation(program,"b"),SPLIT_DOUBLE_TO_FLOATS(2.7182818284590452));
glUniform1f(glGetUniformLocation(program,"rtWidth"),width);
glBindVertexArray(vao);
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
glBindVertexArray(0);
printShaderOutput(width, height);
std::exit(0);
glFinish();
}
int main(int argc, char** argv)
{
glutInitContextVersion(3,3);
glutInitContextProfile(GLUT_CORE_PROFILE);
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_RGB);
glutInitWindowSize(width, height);
glutCreateWindow("Test");
glutDisplayFunc(display);
glutMainLoop();
}
I've been able to extract the NVfp5.0 assembly from the GLSL program binaries in the different cases:
Naïve case without hide and without precise:
!!NVfp5.0
OPTION NV_internal;
OPTION NV_bindless_texture;
PARAM c[2] = { program.local[0..1] };
TEMP R0;
TEMP T;
TEMP RC, HC;
OUTPUT result_color0 = result.color;
SLT.F R0.x, fragment.position.y, {1, 0, 0, 0};
TRUNC.U.CC HC.x, R0;
IF NE.x;
MOV.F result_color0.xy, c[0];
MOV.F result_color0.zw, c[1].xyxy;
ELSE;
SLT.F R0.x, fragment.position.y, {2, 0, 0, 0};
TRUNC.U.CC HC.x, R0;
IF NE.x;
ADD.F R0.y, -c[0].x, c[0].x;
ADD.F R0.x, -c[1], c[1];
ADD.F R0.x, R0, R0.y;
ADD.F R0.x, R0, c[0].y;
ADD.F R0.y, R0.x, c[1];
ADD.F R0.x, c[0], c[1];
ADD.F result_color0.x, R0, R0.y;
ADD.F result_color0.y, R0, -R0;
MOV.F result_color0.zw, {0, 0, 0, 0}.x;
ENDIF;
ENDIF;
END
The case with precise (notice that nothing changes except .PREC suffix in the "instructions"):
!!NVfp5.0
OPTION NV_internal;
OPTION NV_bindless_texture;
PARAM c[2] = { program.local[0..1] };
TEMP R0;
TEMP T;
TEMP RC, HC;
OUTPUT result_color0 = result.color;
SLT.F R0.x, fragment.position.y, {1, 0, 0, 0};
TRUNC.U.CC HC.x, R0;
IF NE.x;
MOV.F result_color0.xy, c[0];
MOV.F result_color0.zw, c[1].xyxy;
ELSE;
SLT.F R0.x, fragment.position.y, {2, 0, 0, 0};
TRUNC.U.CC HC.x, R0;
IF NE.x;
ADD.F.PREC R0.y, -c[0].x, c[0].x;
ADD.F.PREC R0.x, -c[1], c[1];
ADD.F.PREC R0.x, R0, R0.y;
ADD.F.PREC R0.x, R0, c[0].y;
ADD.F.PREC R0.y, R0.x, c[1];
ADD.F.PREC R0.x, c[0], c[1];
ADD.F.PREC result_color0.x, R0, R0.y;
ADD.F.PREC result_color0.y, R0, -R0;
MOV.F result_color0.zw, {0, 0, 0, 0}.x;
ENDIF;
ENDIF;
END
The case with hide, which does work, and obviously has a different sequence of arithmetic operations:
!!NVfp5.0
OPTION NV_internal;
OPTION NV_bindless_texture;
PARAM c[3] = { program.local[0..2] };
TEMP R0, R1;
TEMP T;
TEMP RC, HC;
OUTPUT result_color0 = result.color;
SLT.F R0.x, fragment.position.y, {1, 0, 0, 0};
TRUNC.U.CC HC.x, R0;
IF NE.x;
MOV.F result_color0.xy, c[1];
MOV.F result_color0.zw, c[2].xyxy;
ELSE;
SLT.F R0.x, fragment.position.y, {2, 0, 0, 0};
TRUNC.U.CC HC.x, R0;
IF NE.x;
ADD.F R0.x, c[1], c[2];
MAD.F R0.y, R0.x, c[0].x, -c[1].x;
ADD.F R0.z, R0.x, -R0.y;
ADD.F R0.z, -R0, c[1].x;
ADD.F R0.y, -R0, c[2].x;
ADD.F R0.y, R0, R0.z;
ADD.F R0.y, R0, c[1];
ADD.F R0.y, R0, c[2];
ADD.F R1.x, R0, R0.y;
MAD.F R0.x, R1, c[0], -R0;
MOV.F R1.zw, {0, 0, 0, 0}.x;
ADD.F R1.y, R0, -R0.x;
MOV.F result_color0, R1;
ENDIF;
ENDIF;
END
I've never used precise myself, though you may benefit from learning OpenCL or CUDA here.
In any case, your GLSL version is 3.30, which is tied with OpenGL 3.3. The precise qualifier is avaiable through an extension, but I would always attempt to use a built-in feature of OpenGL if you can.
The extension may not be implemented in the same manner, I suggest you try using at least GLSL version 4.0, ideally the latest OpenGL / GLSL version.
Sometimes these old extensions can have regressions on newer GPUs if no one is using them.
GPU compilers tend to be more liberal with optimizations. You may benefit from seeing the output from the compiled shader, there may be some way to view the PTX assembly output from the Nvidia compiler with GLSL. With CUDA, you can definitely preview the assembly output to ensure the operations are not being re-ordered by the compiler.
The spec mentions MAD as being the main reason for the qualifier--it will force the compiler not to use the MAD instruction. Perhaps little testing was done with addition / subtraction with the precise qualifier.
If hide solves it for you, it's probably best to just call it a day, I doubt the precise qualifier has been thoroughly checked on the GLSL side. I highly recommend CUDA or OpenCL for this, you can use CL-GL interop if you want to display the texture quickly as well, which isn't terribly painful.
The precise qualifier ensures there is no re-ordering of operations, but doesn't mention optimizations which don't affect the ordering. It seems like AMD just turns off optimizations when using it. It's still possible that Nvidia still applies optimizations which are affecting your result which aren't related to the order of operations but rather to specific optimizations to addition being performed.
precise float t1 = dsa.x + dsb.x;
precise float e = t1 - dsa.x;
This will probably compute e as simply dsb.x. The compiler may potentially still be adding optimizations which don't affect the order of operations, as that's all that the spec guarantees. I can't think of anything other than the operations being re-ordered that would affect this result, but I'm no expert here.
Another thing to note is that based on my cursory reading of the spec, the result from the ds_add may need to be stored into a precise variable as well in order for the computation to be precise. The function may be inlined only on Nvidia (they have much better optimizations, at least historically), so I imagine that the compiler may perform the inlining and then if you store the result into a non-precise variable then all the existing precise qualifiers are ignored.
Nothing wrong with your shader. The ds_add() code just haven't any operation that could be merged at the compilation time. Usually add and multiply/divide merge. But your code have add operations only.
Update:
In the case all your variables are stored in GPU registers during calculation process. Order of operations for registers doesn't depend on code or compiler. It doesn't even depend on just hardware. It depends on currently running operations in the GPU.
Precision of floating point operations between registers isn't strictly 32 bit. It usually higher then. The actual precision for GPUs is a commercial secret. The actual precision for x86 FPU is 80 bit or 128 bit, despite the variables are stored in 32 bit memory.
However GPUs are not designed for very precise calculation. The algorithm's author knows it and implements double thought pairs of 32-bit floats. If you need to advance precision, then you have to use long double with quads of 32-bit floats. Simple 'precise' doesn't help.
Related
When I use a custom column major matrix in my code, and pass it to the vertex shader, the triangle is not drawn as expected, but when I use a row major matrix, it draws the triangle in its correct position.
I googled it and found some answers related to this question:
Like this and this, but I could not understand what I'm doing wrong.
If I'm not mistaken, a row-major matrix is:
{ 0, 1, 2, 3,
4, 5, 6, 7,
8, 9, 10, 11,
Tx, Ty, Tz, w}
So, using this row-major matrix, the multiplication order should be: v' = v*M.
And a column-major matrix is:
{ 0, 4, 8, Tx,
1, 5, 9, Ty,
2, 6, 10, Tz,
3, 7, 11, w}
Using this column-major matrix, the multiplication order should be: v' = M*v.
Where Tx, Ty, and Tz hold the translation values for x, y and z, respectively.
Having said that, I will focus on what I think I'm having trouble with, in order to have a more compact question, but I will post an example code in the end, using GLFW and GLAD(<glad/gl.h>)
This is my vertex shader:
#version 330 core
layout (location = 0) in vec3 aPos;
uniform mat4 transform;
void main()
{
gl_Position = transform * vec4(aPos, 1.0);
};
These are my Mat4 struct and its functions:
typedef struct Mat4
{
float data[16];
} Mat4;
// Return Mat4 identity matrix
Mat4 mat4_identity()
{
Mat4 m = {0};
m.data[0] = 1.0f;
m.data[5] = 1.0f;
m.data[10] = 1.0f;
m.data[15] = 1.0f;
return m;
}
// Translate Mat4 using row-major order
Mat4 mat4_row_translation(Mat4 a, float x, float y, float z)
{
Mat4 m = mat4_identity();
m.data[12] += x;
m.data[13] += y;
m.data[14] += z;
return m;
}
// Translate Mat4 using column-major order
Mat4 mat4_column_translation(Mat4 a, float x, float y, float z)
{
Mat4 m = mat4_identity();
m.data[3] += x;
m.data[7] += y;
m.data[11] += z;
return m;
}
This is my update_triangle function where I translate the matrix:
Mat4 trans = mat4_identity();
trans = mat4_column_translation(trans, 0.5f, 0.5f, 0.0f);
unsigned int transformLoc = glGetUniformLocation(shader, "transform");
glUniformMatrix4fv(transformLoc, 1, GL_FALSE, trans.data);
Note that I'm passing GL_FALSE in glUniformMatrix4v, which tells OpenGL that the matrix is already in a column-major order.
However, when running the program, I do not get a triangle 0.5f up and 0.5f right, I get this:
Weird triangle translation
But when I use a row-major matrix and change the multiplication order in the vertex shader(v' = v*M), I get the result that I was expecting.
The vertex shader:
#version 330 core
layout (location = 0) in vec3 aPos;
uniform mat4 transform;
void main()
{
gl_Position = vec4(aPos, 1.0) * transform;
};
The update_triangle function:
Mat4 trans = mat4_identity();
trans = mat4_row_translation(trans, 0.5f, 0.5f, 0.0f);
unsigned int transformLoc = glGetUniformLocation(shader, "transform");
glUniformMatrix4fv(transformLoc, 1, GL_TRUE, trans.data);
Note that I'm passing GL_TRUE in glUniformMatrix4v, which tells OpenGL that the matrix is not in a column-major order.
The result:
Triangle drawn as expected
Here is the code in a single file, it needs to be compiled with GLFW and glad/gl.c.
Comment[0] and Comment1 are just to help with which lines to comment together, for example: If you comment a line with "// Comment[0]" in int, you need to comment the other lines with "// Comment[0]" as well.
But in the Vertex Shader, both matrices use the same line to be drawn correct(which is why I don't understand).
If you are on linux, you can compile with: g++ -o ex example.cpp gl.c -lglfw && ./ex
(You will need to download gl.c from Glad generator)
Code:
#include <glad/gl.h>
#include <GLFW/glfw3.h>
#include <stdio.h>
#include <stdlib.h>
// Mat4 structure
typedef struct Mat4
{
float data[16];
} Mat4;
int c = 0;
// Return Mat4 identity matrix
Mat4 mat4_identity()
{
Mat4 m = {0};
m.data[0] = 1.0f;
m.data[5] = 1.0f;
m.data[10] = 1.0f;
m.data[15] = 1.0f;
return m;
}
// Translate Mat4 using row-major order
Mat4 mat4_row_translation(Mat4 a, float x, float y, float z)
{
Mat4 m = mat4_identity();
m.data[12] += x;
m.data[13] += y;
m.data[14] += z;
return m;
}
// Translate Mat4 using column-major order
Mat4 mat4_column_translation(Mat4 a, float x, float y, float z)
{
Mat4 m = mat4_identity();
m.data[3] += x;
m.data[7] += y;
m.data[11] += z;
return m;
}
GLFWwindow *glfw_window;
// Window functions
int init_glfw(const char *window_title, int x, int y, int width, int height);
void framebuffer_size_callback(GLFWwindow* window, int width, int height);
void processInput();
// Shader functions
static unsigned int compile_shader(unsigned int type, const char *source);
static unsigned int create_shader(const char *vertex_shader, const char *fragment_shader);
// Triangle functions
void init_triangle();
void draw_triangle();
void update_triangle();
unsigned int shader = -1;
unsigned int vao = -1;
unsigned int vbo = -1;
float vertices[] = {
-0.5f, -0.5f, 0.0f, // left
0.5f, -0.5f, 0.0f, // right
0.0f, 0.5f, 0.0f // top
};
const char *vshader = "#version 330 core\n"
"layout (location = 0) in vec3 aPos;\n"
"uniform mat4 transform;\n"
"void main()\n"
"{\n"
// " gl_Position = vec4(aPos, 1.0) * transform;\n" // Comment [0] -> Inverted for column-major
" gl_Position = transform * vec4(aPos, 1.0);\n" // Comment [1] -> Inverted for column-major
"}\0";
const char *fshader = "#version 330 core\n"
"out vec4 FragColor;\n"
"void main()\n"
"{\n"
" FragColor = vec4(1.0f, 0.5f, 0.2f, 1.0f);\n"
"}\n\0";
int main()
{
int result = init_glfw("LearnOpenGL", 0, 0, 800, 600);
if(result != 0)
return result;
init_triangle();
while (!glfwWindowShouldClose(glfw_window))
{
// input
processInput();
// Update triangle vertices
update_triangle();
glClearColor(0.2f, 0.3f, 0.3f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT);
// Draw triangle example
draw_triangle();
// glfw: swap buffers and poll IO events (keys pressed/released, mouse moved etc.)
glfwSwapBuffers(glfw_window);
glfwPollEvents();
}
// glfw: terminate, clearing all previously allocated GLFW resources.
glfwTerminate();
return 0;
}
// My confusion is here
void update_triangle()
{
Mat4 trans = mat4_identity();
trans = mat4_column_translation(trans, 0.5f, 0.5f, 0.0f); // Comment [0]
// trans = mat4_row_translation(trans, 0.5f, 0.5f, 0.0f); // Comment [1]
// Print Mat4
if(c == 0)
{
// TODO: Remove this
printf("==== Trans: ====\n");
for(int i = 1; i <= 16; i++)
{
printf("%.2f, ", trans.data[i-1]);
if(i % 4 == 0 && i != 0)
printf("\n");
}
c++;
}
unsigned int transformLoc = glGetUniformLocation(shader, "transform");
glUniformMatrix4fv(transformLoc, 1, GL_FALSE, trans.data); // Comment [0]
// glUniformMatrix4fv(transformLoc, 1, GL_TRUE, trans.data); // Comment [1]
}
// Window functions
int init_glfw(const char *window_title, int x, int y, int width, int height)
{
// glfw: initialize and configure
// ------------------------------
glfwInit();
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 3);
glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
#ifdef __APPLE__
glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GL_TRUE);
#endif
// glfw window creation
// --------------------
glfw_window = glfwCreateWindow(width, height, window_title, NULL, NULL);
if (glfw_window == NULL)
{
printf("Failed to create GLFW window\n");
glfwTerminate();
return -1;
}
glfwMakeContextCurrent(glfw_window);
glfwSetFramebufferSizeCallback(glfw_window, framebuffer_size_callback);
// glad: load all OpenGL function pointers
// ---------------------------------------
int version = gladLoadGL(glfwGetProcAddress);
printf("Current GL loaded: %d.%d\n", GLAD_VERSION_MAJOR(version), GLAD_VERSION_MINOR(version));
return 0;
}
void framebuffer_size_callback(GLFWwindow* window, int width, int height)
{
glViewport(0, 0, width, height);
}
void processInput()
{
if(glfwGetKey(glfw_window, GLFW_KEY_ESCAPE) == GLFW_PRESS)
glfwSetWindowShouldClose(glfw_window, true);
}
/* Default Compilation for Shader */
static unsigned int compile_shader(unsigned int type, const char *source)
{
unsigned int id = glCreateShader(type);
glShaderSource(id, 1, &source, NULL);
glCompileShader(id);
int result;
glGetShaderiv(id, GL_COMPILE_STATUS, &result);
if(!result)
{
int length;
glGetShaderiv(id, GL_INFO_LOG_LENGTH, &length);
char* msg = (char*) alloca(length * sizeof(char));
glGetShaderInfoLog(id, length, &length, msg);
printf("Vertex / Fragment Shader Failed:\n %s", msg);
glDeleteShader(id);
return 0;
}
return id;
}
static unsigned int create_shader(const char *vertex_shader, const char *fragment_shader)
{
unsigned int program = glCreateProgram();
unsigned int vs = compile_shader(GL_VERTEX_SHADER, vertex_shader);
unsigned int fs = compile_shader(GL_FRAGMENT_SHADER, fragment_shader);
glAttachShader(program, vs);
glAttachShader(program, fs);
glLinkProgram(program);
glValidateProgram(program);
glDeleteShader(vs);
glDeleteShader(fs);
return program;
}
// Triangle functions
void init_triangle()
{
shader = create_shader(vshader, fshader);
printf("shader=%d", shader);
glUseProgram(shader);
glGenVertexArrays(1, &vao);
printf("vao=%d", vao);
glBindVertexArray(vao);
glGenBuffers(1, &vbo);
printf("vbo=%d\n", vbo);
glBindBuffer(GL_ARRAY_BUFFER, vbo); // Using this vbo
glBufferData(GL_ARRAY_BUFFER, sizeof(vertices), vertices, GL_STATIC_DRAW);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 3 * sizeof(float), NULL);
}
void draw_triangle()
{
glUseProgram(shader);
glBindVertexArray(vao);
glDrawArrays(GL_TRIANGLES, 0, 3);
}
This is my first question in this forum, so please let me know if there is anything missing.
So many people use row-major or transposed matrices, that they forget that matrices are not naturally oriented that way. So they see a translation matrix as this:
1 0 0 0
0 1 0 0
0 0 1 0
x y z 1
This is a transposed translation matrix. That is not what a normal translation matrix looks like. The translation goes in the 4th column, not the fourth row. Sometimes, you even see this in textbooks, which is utter garbage.
It's easy to know whether a matrix in an array is row or column-major. If it's row-major, then the translation is stored in the 3, 7, and 11th indices. If it's column-major, then the translation is stored in the 12, 13, and 14th indices. Zero-base indices of course.
Your confusion stems from believing that you're using column-major matrices when you're in fact using row-major ones.
The statement that row vs. column major is a notational convention only is entirely true. The mechanics of matrix multiplication and matrix/vector multiplication are the same regardless of the convention.
What changes is the meaning of the results.
A 4x4 matrix after all is just a 4x4 grid of numbers. It doesn't have to refer to a change of coordinate system. However, once you assign meaning to a particular matrix, you now need to know what is stored in it and how to use it.
Take the translation matrix I showed you above. That's a valid matrix. You could store that matrix in a float[16] in one of two ways:
float row_major_t[16] = {1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, x, y, z, 1};
float column_major_t[16] = {1, 0, 0, x, 0, 1, 0, y, 0, 0, 1, z, 0, 0, 0, 1};
However, I said that this translation matrix is wrong, because the translation is in the wrong place. I specifically said that it is transposed relative to the standard convention for how to build translation matrices, which ought to look like this:
1 0 0 x
0 1 0 y
0 0 1 z
0 0 0 1
Let's look at how these are stored:
float row_major[16] = {1, 0, 0, x, 0, 1, 0, y, 0, 0, 1, z, 0, 0, 0, 1};
float column_major[16] = {1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, x, y, z, 1};
Notice that column_major is exactly the same as row_major_t. So, if we take a proper translation matrix, and store it as column-major, it is the same as transposing that matrix and storing it as row-major.
That is what is meant by being only a notational convention. There are really two sets of conventions: memory storage and transposition. Memory storage is column vs row major, while transposition is normal vs. transposed.
If you have a matrix that was generated in row-major order, you can get the same effect by transposing the column-major equivalent of that matrix. And vice-versa.
Matrix multiplication can only be done one way: given two matrices, in a specific order, you multiply certain values together and store the results. Now, A*B != B*A, but the actual source code for A*B is the same as the code for B*A. They both run the same code to compute the output.
The matrix multiplication code does not care whether the matrices happen to be stored in column-major or row-major order.
The same cannot be said for vector/matrix multiplication. And here's why.
Vector/matrix multiplication is a falsehood; it cannot be done. However, you can multiply a matrix by another matrix. So if you pretend a vector is a matrix, then you can effectively do vector/matrix multiplication, simply by doing matrix/matrix multiplication.
A 4D vector can be considered a column-vector or a row-vector. That is, a 4D vector can be thought of as a 4x1 matrix (remember: in matrix notation, the row count comes first) or a 1x4 matrix.
But here's the thing: Given two matrices A and B, A*B is only defined if the number of columns of A is the same as the number of rows of B. Therefore, if A is our 4x4 matrix, B must be a matrix with 4 rows in it. Therefore, you cannot perform A*x, where x is a row-vector. Similarly, you cannot perform x*A where x is a column-vector.
Because of this, most matrix math libraries make this assumption: if you multiply a vector times a matrix, you really mean to do the multiplication that actually works, not the one that makes no sense.
Let us define, for any 4D vector x, the following. C shall be the column-vector matrix form of x, and R shall be the row-vector matrix form of x. Given this, for any 4x4 matrix A, A*C represents matrix multiplying A by the column-vector x. And R*A represents matrix multiplying the row-vector x by A.
But if we look at this using strict matrix math, we see that these are not equivalent. R*A cannot be the same as A*C. This is because a row-vector is not the same thing as a column-vector. They're not the same matrix, so they do not produce the same results.
However, they are related in one way. It is true that R != C. However, it is also true that R = CT, where T is the transpose operation. The two matrices are transposes of each other.
Here's a funny fact. Since vectors are treated as matrices, they too have a column vs. row-major storage question. The problem is that they both look the same. The array of floats is the same, so you can't tell the difference between R and C just by looking at the data. The only way to tell the difference is by how they are used.
If you have any two matrices A and B, and A is stored as row-major and B as column-major, multiplying them is completely meaningless. You get nonsense as a result. Well, not really. Mathematically, what you get is the equivalent of doing ATB. Or ABT; they're mathematically identical.
Therefore, matrix multiplication only makes sense if the two matrices (and remember: vector/matrix multiplication is just matrix multiplication) are stored in the same major ordering.
So, is a vector column-major or row-major? It is both and neither, as stated before. It is column major only when it is used as a column matrix, and it is row major when it is used as a row matrix.
Therefore, if you have a matrix A which is column major, x*A means... nothing. Well, again, it means x*AT, but that's not what you really wanted. Similarly, A*x does transposed multiplication if A is row-major.
Therefore, the order of vector/matrix multiplication does change, depending on your major ordering of the data (and whether you're using transposed matrices).
Here is my code:
modify from qt example: Examples\Qt-5.14.2\quick\scenegraph\openglunderqml
void SquircleRenderer::init()
{
unsigned char* data = (unsigned char*)malloc(1200*4);
for(int i=0;i<600;i++)
{
data[i*4] = 0;
data[i*4+1] = 255;
data[i*4+2] = 0;
data[i*4+3] = 255;
}
for(int i=600;i<1200;i++)
{
data[i*4] = 0;
data[i*4+1] = 0;
data[i*4+2] = 255;
data[i*4+3] = 255;
}
if (!m_program) {
QSGRendererInterface *rif = m_window->rendererInterface();
Q_ASSERT(rif->graphicsApi() == QSGRendererInterface::OpenGL || rif->graphicsApi() == QSGRendererInterface::OpenGLRhi);
initializeOpenGLFunctions();
if (texs[0])
{
glDeleteTextures(1, texs);
}
glGenTextures(1, texs);
glBindTexture(GL_TEXTURE_2D, texs[0]);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, 30, 40, 0, GL_RGBA, GL_UNSIGNED_BYTE, data);
m_program = new QOpenGLShaderProgram();
m_program->addCacheableShaderFromSourceCode(QOpenGLShader::Vertex,
"attribute highp vec4 vertices;"
"varying highp vec2 coords;"
"void main() {"
" gl_Position = vertices;"
" coords = vertices.xy;"
"}");
m_program->addCacheableShaderFromSourceCode(QOpenGLShader::Fragment,
"varying highp vec2 coords;"
"uniform sampler2D inputImageTexture;"
"void main() {"
" gl_FragColor = texture2D(inputImageTexture, coords);"
"}");
m_program->bindAttributeLocation("vertices", 0);
m_program->link();
arrUni[0] = m_program->uniformLocation("inputImageTexture");
}
}
//! [4] //! [5]
void SquircleRenderer::paint()
{
// Play nice with the RHI. Not strictly needed when the scenegraph uses
// OpenGL directly.
m_window->beginExternalCommands();
m_program->bind();
m_program->enableAttributeArray(0);
float values[] = {
-1, 1,
1, 1,
-1, -1,
1, -1
};
// This example relies on (deprecated) client-side pointers for the vertex
// input. Therefore, we have to make sure no vertex buffer is bound.
glBindBuffer(GL_ARRAY_BUFFER, 0);
m_program->setAttributeArray(0, GL_FLOAT, values, 2);//values
//m_program->setUniformValue("t", (float) m_t);
qDebug()<<m_viewportSize.width()<<m_viewportSize.height()<<"\n";
glViewport(0, 0, m_viewportSize.width(), m_viewportSize.height());
glDisable(GL_DEPTH_TEST);
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, texs[0]);
glUniform1i(arrUni[0], 0);
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
m_program->disableAttributeArray(0);
m_program->release();
m_window->endExternalCommands();
}
As the picture you can see,it produces 4 same pictures,could you tell me how to produce 1 picture fill the whole window?:
I tried so many methods, but it didn't work, I guess the problem exists in the values array or the glTexImage2D function.
Textures are mapped accross the [0, 1] range, and values outside of that range are modulo-looped back into it, which creates a repeating pattern. Interpreting the texture over the [-1, 1] range leads to what you are seeing since you are mapping exactly twice the UV range in both axises.
There's a few ways to fix this. But my personal preference for a full-framebuffer pass like this is to have the attribute be normalized, and then have it converted to the expected [-1, 1] range for the clip-space coordinate in the vertex shader:
float values[] = {
0.f, 1.f,
1.f, 1.f,
0.f, 0.f,
1.f, 0.f
};
gl_Position = vertices * 2.0 - vec4(1.0, 1.0,0.0,1.0);
Another common technique is to do away with the attribute buffer altogether, and use gl_VertexID to directly generate both the UVs and coordinates.
The OpenGL maths library(GLM) uses the following algorithm to compute the translation matrix:
//taken from source code
template<typename T, qualifier Q>
GLM_FUNC_QUALIFIER mat<4, 4, T, Q> translate(mat<4, 4, T, Q> const& m, vec<3, T, Q> const& v)
{
mat<4, 4, T, Q> Result(m);
Result[3] = m[0] * v[0] + m[1] * v[1] + m[2] * v[2] + m[3];
return Result;
}
(Here the vector v is a 3 dimensional vector and the matrix m is a 4X4 matrix, since we're using homogeneous coordinates the vector v is also 4 dimensional).
The following is from Linear Algebra Theory:
Let m have the entries:
Now, suppose the matrix m gives some linear transformation, and is also a transformation matrix, and we'd like to add a translation of X, Y, and Z in the X, Y and Z dimensions respectively, if I'm not mistaken, the way we'd do that is by forming a composite matrix:
which gives something like:
Now, I'm not getting what this GLM function of translate does, because it does something like:
And the matrix with added transformation of translation, i.e. m becomes:
Now, these two matrices are not equal and hence they would result in different transformations, so I'm confused to which matrix does the actual translation and which is the correct one or if there is any other idea hidden behind the algorithm?
Note: Before reading the answer note that in column-major representation of a matrix, you access the entries of your matrix using: matrix[column-index][row-index].
Edit
The source code with which I perform transformation:
#include <iostream>
#include <GL/glew.h>
#include <GLFW/glfw3.h>
#include <cmath>
#include <string.h>
#include "glm/glm.hpp"
#include "glm/gtc/matrix_transform.hpp"
#include "glm/gtc/type_ptr.hpp"
// Window Dimensions
const GLint WIDTH=800, HEIGHT=600;
GLuint VAO, VBO, shader;
GLint uniformModel {};
GLint uniformModelRot {};
GLfloat triOffset {};
float triMaxOffset = 0.7f;
bool direction = true;
const float toRadians = 3.14159265f/180.0f;
// vertex shader
static const char* vShader =
"#version 330\n"
"layout (location = 0) in vec3 pos;\n"
"uniform mat4 model;\n"
"void main(){\n"
" gl_Position = model * vec4(0.5*pos, 1.0);\n"
"}\n";
// fragment shader
static const char* fShader = ""
"#version 330\n"
"out vec4 color;\n"
"uniform mat4 model;\n"
"void main(){\n"
" color = model *vec4(1.0, 1.0, 0.0, 1.0);\n"
"}\n";
void AddShader(GLuint theProgram, const char* ShaderCode, GLenum shaderType, std::string info){
std::cerr <<"INFO: Adding "<<info<<" Shader"<<std::endl;
GLuint theShader = glCreateShader(shaderType);
const GLchar* theCode[1];
theCode[0] = ShaderCode;
GLint codeLength[1];
codeLength[0] = strlen(ShaderCode);
glShaderSource(theShader, 1, theCode, codeLength);
glCompileShader(theShader);
GLint result =0;
GLchar eLog[1024] ={0};
glGetShaderiv(theShader, GL_COMPILE_STATUS, &result);
if(!result){
glGetShaderInfoLog(shader, sizeof(eLog), NULL, eLog);
std::cerr<<"Error compiling program"<<std::endl;
return;
}
glAttachShader(theProgram, theShader);
}
void CompileShader(){
shader = glCreateProgram();
if(!shader){
std::cerr<<"Error creating shader"<<std::endl;
return;
}
AddShader(shader, vShader, GL_VERTEX_SHADER, "vertex");
AddShader(shader, fShader, GL_FRAGMENT_SHADER, "fragment");
GLint result =0;
GLchar eLog[1024] ={0};
glLinkProgram(shader);
glGetProgramiv(shader, GL_LINK_STATUS, &result);
if(!result){
glGetProgramInfoLog(shader, sizeof(eLog), NULL, eLog);
std::cerr<<"Error linking program"<<std::endl;
return;
}
glValidateProgram(shader);
glGetProgramiv(shader, GL_VALIDATE_STATUS, &result);
if(!result){
glGetProgramInfoLog(shader, sizeof(eLog), NULL, eLog);
std::cerr<<"Error Validating program"<<std::endl;
return;
}
uniformModel = glGetUniformLocation(shader,"model");
}
void CreateTriangles(){
GLfloat vertices[]={
-1.0f, -1.0f, 0.0f,
1.0f, -1.0f, 0.0f,
0.0f, 1.0f, 0.0f
};
glGenVertexArrays(1, &VAO);
glBindVertexArray(VAO);
glGenBuffers(1, &VBO);
glBindBuffer(GL_ARRAY_BUFFER, VBO);
glBufferData(GL_ARRAY_BUFFER, sizeof(GLfloat)*9,vertices, GL_STATIC_DRAW);
glVertexAttribPointer(0,3,GL_FLOAT,GL_FALSE,0,0);
glEnableVertexAttribArray(0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindVertexArray(0);
}
int main(){
//initialize GLFW
if(!glfwInit()){
std::cerr << "GLFW initialization failed!" << std::endl;
glfwTerminate();
return 1;
}
//Setup GLFW window properties
//openGL version
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 3);
// core profile = no backward compatibility
glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
//allow forward compatibility
glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GL_TRUE);
GLFWwindow *mainWindow = glfwCreateWindow(WIDTH, HEIGHT, "TEST WINDOW", NULL, NULL);
if(!mainWindow){
std::cerr << "GLFW Window creation failed" << std::endl;
glfwTerminate();
return 1;
}
// get Buffer size information
int bufferWidth, bufferHeight;
glfwGetFramebufferSize(mainWindow, &bufferWidth, &bufferHeight);
// set context for GLEW to use
glfwMakeContextCurrent(mainWindow);
// allow modern extension features
if(glewInit()!=GLEW_OK){
std::cerr << "GLEW initialization failed" << std::endl;
glfwDestroyWindow(mainWindow);
glfwTerminate();
return 1;
}
// setup viewport size
glViewport(0, 0, bufferWidth, bufferHeight);
CreateTriangles();
CompileShader();
while(!glfwWindowShouldClose(mainWindow)){
// get and handle user input events
glfwPollEvents();
glClearColor(1.0f, 0.0f, 0.0f, 1.0);
glClear(GL_COLOR_BUFFER_BIT);
if(direction){
triOffset += 0.05f;
}else{
triOffset -= 0.05f;
}
if(abs(triOffset) >= triMaxOffset){
direction = !direction;
}
glUseProgram(shader);
glm::mat4 modelMatrix(1.0f);
modelMatrix = glm::translate(modelMatrix, glm::vec3(triOffset, 0.0f, 0.0f));
glUniformMatrix4fv(uniformModel, 1, GL_FALSE,glm::value_ptr(modelMatrix));
glBindVertexArray(VAO);
glDrawArrays(GL_TRIANGLES,0,3);
glBindVertexArray(0);
glUseProgram(0);
// swap buffers
glfwSwapBuffers(mainWindow);
}
return 0;
}
OpenGL Mathematics (GLM) is based on the OpenGL Shading Language (GLSL). What glm::translate actually does is to set up a translation matrix and multiply the input matrix by the translation. It computes m*t in the meaning of GLSL Vector and Matrix Operations:
mat<4, 4, T, Q> Result(m);
Result[3] = m[0] * v[0] + m[1] * v[1] + m[2] * v[2] + m[3];
(In the following Result is substituted by R)
Note, m[0] * v[0] multiplies each component of the column m[0] by the scalar v[0]. The result is the vector (m[0][0]*v[0], m[0][1]*v[0], m[0][2]*v[0], m[0][3]*v[0]).
So R[3] = m[0]*v[0] + m[1]*v[1] + m[2]*v[2] + m[3] is the same as
R[3][0] = m[0][0] * v[0] + m[1][0] * v[1] + m[2][0] * v[2] + m[3][0]
R[3][1] = m[0][1] * v[0] + m[1][1] * v[1] + m[2][1] * v[2] + m[3][1]
R[3][2] = m[0][2] * v[0] + m[1][2] * v[1] + m[2][2] * v[2] + m[3][2]
R[3][3] = m[0][3] * v[0] + m[1][3] * v[1] + m[2][3] * v[2] + m[3][3]
glm::translate actually calculates:
vh = (v[0], v[1], v[2], 1)
R = m
R[3][0] = dot( (m[0][0], m[1][0], m[2][0], m[3][0]), vh )
R[3][1] = dot( (m[0][1], m[1][1], m[2][1], m[3][1]), vh )
R[3][2] = dot( (m[0][2], m[1][2], m[2][2], m[3][2]), vh )
R[3][3] = dot( (m[0][3], m[1][3], m[2][3], m[3][3]), vh )
The code above computes the Dot product of the rows from m, by vh. vh is the 4th column of the translation t. Note the translation matrix t is defined as:
c0 c1 c2 c3
---------------------
r0: 1 0 0 v[0]
r1: 0 1 0 v[1]
r2: 0 0 0 v[2]
r3: 0 0 0 1
A concatenation of 4x4 matrices (R = m*t) is the Dot product of the rows of m and the columns of t and can be expressed as:
(See OpenGL Shading Language 4.60 Specification - 5.10. Vector and Matrix Operations)
for i from 0 to 3
for j fro 0 to 3
R[i][j] = dot( (m[0][j], m[1][j], m[2][j], m[3][j]), t[i] )
Where dot(a, b) == a[0]*b[0] + a[1]*b[1] + a[2]*b[2] + a[3]*b[3],
(m[0][j], m[1][j], m[2][j], m[3][j]) is the j-th row of m and
t[i] is i-th column of t.
For glm::translate it is sufficient to copy R[0], R[1] and R[2] from m[0], m[1] and m[2].
e.g. for (i=0, j=0):
R[0][0] = dot( (m[0][0], m[1][0], m[2][0], m[3][0]), t[0] )
R[0][0] = dot( (m[0][0], m[1][0], m[2][0], m[3][0]), (1, 0, 0, 0) )
R[0][0] = m[0][0] * 1 + m[1][0] * 0 + m[2][0] * 0 + m[3][0]) * 0
R[0][0] = m[0][0]
GLM matrices (as OpenGL matrices) are stored in column major order. If you investigate matrices in the debugger that may lead to confusions.
If you have the matrix
c0 c1 c2 c3
-------------------
r0: Xx Yx Zx Tx
r1: Xy Yy Zy Ty
r2: Xz Yz Zz Tz
r3: 0 0 0 1
then the memory image of a 4*4 OpenGL matrix looks like this:
Xx, Xy, Xz, 0, Yx, Yy, Yz, 0, Zx, Zy, Zz, 0, Tx, Ty, Tz, 1
If you investigate it in a debugger, it may look like:
[ [ Xx, Xy, Xz, 0 ],
[ Yx, Yy, Yz, 0 ],
[ Zx, Zy, Zz, 0 ],
[ Tx, Ty, Tz, 1 ] ]
The technical details of as to how the math is done is magnificiently done in #Rabbid76's answer, but if anyone would like to understand why m*t is computed instead of t*m then here's the answer:
Computing the matrix tm like this:
here, you're taking the standard basis as the basis vectors for linear combination, so, essentially you're transforming in world space coordinates. but
doing it the other way around and computing mt means now you're essentially taking the basis as the m[0], m[1] and m[2] respectively, so you're transforming in the local space given by the basis, and since this is essentially a model matrix, we just call it model space.
That is probably one way to view it if you're only considering translation, but what if you're handling composite transformations like below:
M=glm::translate(M,T);
R=glm::rotate(M,angle,Rot_axis);
Here the model matrix is M(initialized to identity at first), T is the translation matrix, R the rotation matrix and others are straightforward above.
So the transformation sequence that happens in the above code is:
M.T.R
and say this is applied to the vector v=[x, y, z, 1], the vector undergoes first a rotation, then a translation and then only the model transformation is done, if it helps, you may see it like this:
M.(T.R.v)
I try to obtain a so called world space coordinates for 2D rendering by creating a orthogonal projection matrix. But the result is disappointing I expect a blue triangle but there is only some blue pixels in the bottom right.
Here is my code in Nim language. It's a very simplified version who reproduces the issue.
The important part is in the function "projection2D", or maybe in the vertex shader. I don't think the problem is elsewhere but for safety I post the complete example
type
OGLfloat = float32
OGLuint = uint32
OGLint = int32
type Mat4x4* = array[16, OGLfloat] # 4 x 4 Matrix
# Here OpenGL constants should be here but not pasted to save space.
#....
const
POSITION_LENGTH = 3.OGLint
COLOR_LENGTH = 4.OGLint
const
WINDOW_W = 640
WINDOW_H = 480
let
colorDataOffset = POSITION_LENGTH * OGLint(sizeof(OGLfloat))
# OpenGL function import should be here.
#...
var
# Thanks to an orthogonal projection matrix I expect to use coordinates in pixels.
vertices = #[OGLfloat(420), 0, 0, # Position
0, 0, 1, 1, # Color
640, 480, 0,
0, 0, 1, 1,
0, 480, 0,
0, 0, 1, 1]
indices = #[OGLuint(0), 1 , 2]
proc projection2D(left, right, bottom, top, far, near:float):Mat4x4 =
result = [OGLfloat(1), 0, 0, 0, # Start from an identity matrix.
0, 1, 0, 0,
0, 0, 1, 0,
0, 0, 0, 1]
# Orthographic projection, inspired from a Wikipedia article example.
result[0] = OGLfloat(2.0 / (right - left))
result[5] = OGLfloat(2.0 / (top - bottom))
result[3] = OGLfloat(- ((right + left) / (right - left)))
result[7] = OGLfloat(- ((top + bottom) / (top - bottom)))
result[10] = OGLfloat(-2 / (far - near))
result[11] = OGLfloat(-((far + near)/(far - near)))
# These parameters comes from "learnopengl.com".
var projectionMatrix = projection2D(0.0, OGLfloat(WINDOW_W), OGLfloat(WINDOW_H), 0.0, - 1.0, 1.0)
var glfwErr = glfwInit()
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3)
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 3)
glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE)
var winHandle = glfwCreateWindow(WINDOW_W, WINDOW_H)
glfwMakeContextCurrent(winHandle)
var glewErr = glewInit()
var
shadID:OGLuint
vertSrc:cstring = """
#version 330 core
layout (location = 0) in vec3 aPos;
layout (location = 1) in vec4 aColor;
out vec4 vColor;
uniform mat4 projection;
void main()
{
gl_Position = projection * vec4(aPos, 1.0f);
vColor = aColor;
}
"""
fragSrc:cstring = """
#version 330 core
out vec4 FragColor;
in vec4 vColor;
void main()
{
FragColor = vColor;
}
"""
proc send_src(vert:var cstring, frag:var cstring):OGLuint =
var success:OGLint
# vertex
var vertexShader = glCreateShader(GL_VERTEX_SHADER)
glShaderSource(vertexShader, 1, addr vert, nil)
glCompileShader(vertexShader)
# Check compilation errors.
glGetShaderiv(vertexShader, GL_COMPILE_STATUS, addr success)
if bool(success) == false:
echo(" vertex shader compilation failed (send_src)")
else:
echo("vertexShader compiled (send_src)")
# fragment
var fragmentShader = glCreateShader(GL_FRAGMENT_SHADER)
glShaderSource(fragmentShader, 1, addr frag, nil)
glCompileShader(fragmentShader)
# Check compilation errors.
glGetShaderiv(fragmentShader, GL_COMPILE_STATUS, addr success)
if bool(success) == false:
echo("fragment shader compilation failed (send_src)")
else:
echo("fragmentShader compiled (send_src)")
# Shader program
result = glCreateProgram()
glAttachShader(result, vertexShader)
glAttachShader(result, fragmentShader)
glLinkProgram(result)
# Check for linkage errors.
glGetProgramiv(result, GL_LINK_STATUS, addr success)
if success == 0:
echo("program linking failed (send_src)")
else:
echo("shader linked (send_src)")
glDeleteShader(vertexShader)
glDeleteShader(fragmentShader)
glViewport(0, 0, WINDOW_W, WINDOW_H)
shadID = send_src(vertSrc, fragSrc)
var VAO, VBO, EBO:OGLuint
glGenVertexArrays(1, addr VAO)
glGenBuffers(1, addr VBO)
glGenBuffers(1, addr EBO)
glBindVertexArray(VAO)
glBindBuffer(GL_ARRAY_BUFFER, VBO)
glBufferData(GL_ARRAY_BUFFER, vertices.len * sizeof(OGLfloat),
addr vertices[0], GL_STATIC_DRAW)
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, EBO)
glBufferData(GL_ELEMENT_ARRAY_BUFFER, indices.len * sizeof(OGLuint),
addr indices[0], GL_STATIC_DRAW)
# Position layout
glVertexAttribPointer(0, POSITION_LENGTH, GL_FLOAT, GL_FALSE, (POSITION_LENGTH + COLOR_LENGTH) * OGLint(sizeof(OGLfloat)),
nil)
glEnableVertexAttribArray(0)
# Color layout
glVertexAttribPointer(1, COLOR_LENGTH, GL_FLOAT, GL_FALSE, (POSITION_LENGTH + COLOR_LENGTH) * OGLint(sizeof(OGLfloat)),
cast[pointer](colorDataOffset))
glEnableVertexAttribArray(1)
glBindBuffer(GL_ARRAY_BUFFER, 0)
glBindVertexArray(0)
glUseProgram(shadID)
while bool(glfwWindowShouldClose(winHandle)) == false:
glClearColor(0.2, 0.3, 0.3, 1.0)
glClear(GL_COLOR_BUFFER_BIT)
glBindVertexArray(VAO)
glUniformMatrix4fv(glGetUniformLocation(shadID, "projection"), 1, GL_FALSE, addr projectionMatrix[0])
glDrawElements(GL_TRIANGLES, OGLint(indices.len), GL_UNSIGNED_INT, nil)
glfwSwapBuffers(winHandle)
glfwPollEvents()
glDeleteVertexArrays(1, addr VAO)
glDeleteBuffers(1, addr VBO)
glDeleteBuffers(1, addr EBO)
glfwDestroyWindow(winHandle)
glfwTerminate()
Don't hesitate to share your thoughts !
You have to transpose the projection matrix, this means the 3rd parameter of glUniformMatrix4fv has to be GL_TRUE:
glUniformMatrix4fv(glGetUniformLocation(shadID, "projection"),
1, GL_TRUE, addr projectionMatrix[0])
Or you have to initialize the matrix according to the specification (see below):
proc projection2D(left, right, bottom, top, far, near:float):Mat4x4 =
result = [OGLfloat(1), 0, 0, 0, # Start from an identity matrix.
0, 1, 0, 0,
0, 0, 1, 0,
0, 0, 0, 1]
# Orthographic projection, inspired from a Wikipedia article example.
result[0] = OGLfloat(2.0 / (right - left))
result[5] = OGLfloat(2.0 / (top - bottom))
result[12] = OGLfloat(- ((right + left) / (right - left)))
result[13] = OGLfloat(- ((top + bottom) / (top - bottom)))
result[10] = OGLfloat(-2 / (far - near))
result[14] = OGLfloat(-((far + near)/(far - near)))
See The OpenGL Shading Language 4.6, 5.4.2 Vector and Matrix Constructors, page 101:
To initialize a matrix by specifying vectors or scalars, the components are assigned to the matrix elements in column-major order.
mat4(float, float, float, float, // first column
float, float, float, float, // second column
float, float, float, float, // third column
float, float, float, float); // fourth column
Note, in compare to a mathematical matrix where the columns are written from top to bottom, which feels natural, at the initialization of an OpenGL matrix, the columns are written from the left to the right. This leads to the benefit, that the x, y, z components of an axis or of the translation are in direct succession in the memory.
See also Data Type (GLSL) - Matrix constructors
Just to get an idea of what kind of speeds I should be expecting I have been trying to benchmark transfer between global memory and shaders, rather than relying on GPU spec sheets. However I can't get close to the theoretical maximum. In fact I'm out by a factor of 50!.
I'm using a GTX Titan X, which is said to have 336.5GB/s. Linux x64 driver 352.21.
I found a CUDA benchmark here which gives me ~240–250GB/s (this is more what I expect).
I'm trying to match exactly what they do with shaders. I've tried vertex shaders, compute shaders, accessing buffer objects via image_load_store and NV_shader_buffer_store, with floats, vec4s, loops inside the shader (with coalesced addressing within the work group) and various methods of timing. I'm stuck at ~7GB/s (see the update below).
Why is GL so much slower? Am I doing something wrong and if so, how should it be done?
Here's my MWE with three methods (1. vertex shader with image_load_store, 2. vertex shader with bindless graphics, 3. compute shader with bindless graphics):
//#include <windows.h>
#include <assert.h>
#include <stdio.h>
#include <memory.h>
#include <GL/glew.h>
#include <GL/glut.h>
const char* imageSource =
"#version 440\n"
"uniform layout(r32f) imageBuffer data;\n"
"uniform float val;\n"
"void main() {\n"
" imageStore(data, gl_VertexID, vec4(val, 0.0, 0.0, 0.0));\n"
" gl_Position = vec4(0.0);\n"
"}\n";
const char* bindlessSource =
"#version 440\n"
"#extension GL_NV_gpu_shader5 : enable\n"
"#extension GL_NV_shader_buffer_load : enable\n"
"uniform float* data;\n"
"uniform float val;\n"
"void main() {\n"
" data[gl_VertexID] = val;\n"
" gl_Position = vec4(0.0);\n"
"}\n";
const char* bindlessComputeSource =
"#version 440\n"
"#extension GL_NV_gpu_shader5 : enable\n"
"#extension GL_NV_shader_buffer_load : enable\n"
"layout(local_size_x = 256) in;\n"
"uniform float* data;\n"
"uniform float val;\n"
"void main() {\n"
" data[gl_GlobalInvocationID.x] = val;\n"
"}\n";
GLuint compile(GLenum type, const char* shaderSrc)
{
GLuint shader = glCreateShader(type);
glShaderSource(shader, 1, (const GLchar**)&shaderSrc, NULL);
glCompileShader(shader);
int success = 0;
int loglen = 0;
glGetShaderiv(shader, GL_COMPILE_STATUS, &success);
glGetShaderiv(shader, GL_INFO_LOG_LENGTH, &loglen);
GLchar* log = new GLchar[loglen];
glGetShaderInfoLog(shader, loglen, &loglen, log);
if (!success)
{
printf("%s\n", log);
exit(0);
}
GLuint program = glCreateProgram();
glAttachShader(program, shader);
glLinkProgram(program);
return program;
}
GLuint timerQueries[2];
void start()
{
glGenQueries(2, timerQueries);
glQueryCounter(timerQueries[0], GL_TIMESTAMP);
}
float stop()
{
glMemoryBarrier(GL_ALL_BARRIER_BITS);
GLsync sync = glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0);
glWaitSync(sync, 0, GL_TIMEOUT_IGNORED);
glQueryCounter(timerQueries[1], GL_TIMESTAMP);
GLint available = 0;
while (!available) //sometimes gets stuck here for whatever reason
glGetQueryObjectiv(timerQueries[1], GL_QUERY_RESULT_AVAILABLE, &available);
GLuint64 a, b;
glGetQueryObjectui64v(timerQueries[0], GL_QUERY_RESULT, &a);
glGetQueryObjectui64v(timerQueries[1], GL_QUERY_RESULT, &b);
glDeleteQueries(2, timerQueries);
return b - a;
}
int main(int argc, char** argv)
{
float* check;
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGB | GLUT_DEPTH);
glutCreateWindow("test");
glewInit();
int bufferSize = 64 * 1024 * 1024; //64MB
int loops = 500;
glEnable(GL_RASTERIZER_DISCARD);
float* dat = new float[bufferSize/sizeof(float)];
memset(dat, 0, bufferSize);
//create a buffer with data
GLuint buffer;
glGenBuffers(1, &buffer);
glBindBuffer(GL_TEXTURE_BUFFER, buffer);
glBufferData(GL_TEXTURE_BUFFER, bufferSize, NULL, GL_STATIC_DRAW);
//get a bindless address
GLuint64 address;
glMakeBufferResidentNV(GL_TEXTURE_BUFFER, GL_READ_WRITE);
glGetBufferParameterui64vNV(GL_TEXTURE_BUFFER, GL_BUFFER_GPU_ADDRESS_NV, &address);
//make a texture alias for it
GLuint bufferTexture;
glGenTextures(1, &bufferTexture);
glBindTexture(GL_TEXTURE_BUFFER, bufferTexture);
glTexBuffer(GL_TEXTURE_BUFFER, GL_R32F, buffer);
glBindImageTextureEXT(0, bufferTexture, 0, GL_FALSE, 0, GL_READ_WRITE, GL_R32F);
//compile the shaders
GLuint imageShader = compile(GL_VERTEX_SHADER, imageSource);
GLuint bindlessShader = compile(GL_VERTEX_SHADER, bindlessSource);
GLuint bindlessComputeShader = compile(GL_COMPUTE_SHADER, bindlessComputeSource);
//warm-up and check values
glBufferData(GL_TEXTURE_BUFFER, bufferSize, dat, GL_STATIC_DRAW);
glUseProgram(imageShader);
glUniform1i(glGetUniformLocation(imageShader, "data"), 0);
glUniform1f(glGetUniformLocation(imageShader, "val"), 1.0f);
glDrawArrays(GL_POINTS, 0, bufferSize/sizeof(float));
glMemoryBarrier(GL_SHADER_IMAGE_ACCESS_BARRIER_BIT);
//check = (float*)glMapBuffer(GL_TEXTURE_BUFFER, GL_READ_ONLY);
//for (int i = 0; i < bufferSize/sizeof(float); ++i)
// assert(check[i] == 1.0f);
//glUnmapBuffer(GL_TEXTURE_BUFFER);
glBufferData(GL_TEXTURE_BUFFER, bufferSize, dat, GL_STATIC_DRAW);
glUseProgram(bindlessShader);
glProgramUniformui64NV(bindlessShader, glGetUniformLocation(bindlessShader, "data"), address);
glUniform1f(glGetUniformLocation(bindlessShader, "val"), 1.0f);
glDrawArrays(GL_POINTS, 0, bufferSize/sizeof(float));
//glMemoryBarrier(GL_ALL_BARRIER_BITS); //this causes glDispatchCompute to segfault later, so don't uncomment
//check = (float*)glMapBuffer(GL_TEXTURE_BUFFER, GL_READ_ONLY);
//for (int i = 0; i < bufferSize/sizeof(float); ++i)
// assert(check[i] == 1.0f);
//glUnmapBuffer(GL_TEXTURE_BUFFER);
glBufferData(GL_TEXTURE_BUFFER, bufferSize, dat, GL_STATIC_DRAW);
glUseProgram(bindlessComputeShader);
glProgramUniformui64NV(bindlessComputeShader, glGetUniformLocation(bindlessComputeShader, "data"), address);
glUniform1f(glGetUniformLocation(bindlessComputeShader, "val"), 1.0f);
glDispatchCompute(bufferSize/(sizeof(float) * 256), 1, 1);
glMemoryBarrier(GL_ALL_BARRIER_BITS);
//check = (float*)glMapBuffer(GL_TEXTURE_BUFFER, GL_READ_ONLY);
//for (int i = 0; i < bufferSize/sizeof(float); ++i)
// assert(check[i] == 1.0f); //glDispatchCompute doesn't actually write anything with bindless graphics
//glUnmapBuffer(GL_TEXTURE_BUFFER);
glFinish();
//time image_load_store
glUseProgram(imageShader);
glUniform1i(glGetUniformLocation(imageShader, "data"), 0);
glUniform1f(glGetUniformLocation(imageShader, "val"), 1.0f);
start();
for (int i = 0; i < loops; ++i)
glDrawArrays(GL_POINTS, 0, bufferSize/sizeof(float));
GLuint64 imageTime = stop();
printf("image_load_store: %.2fGB/s\n", (float)((bufferSize * (double)loops) / imageTime));
//time bindless
glUseProgram(bindlessShader);
glProgramUniformui64NV(bindlessShader, glGetUniformLocation(bindlessShader, "data"), address);
glUniform1f(glGetUniformLocation(bindlessShader, "val"), 1.0f);
start();
for (int i = 0; i < loops; ++i)
glDrawArrays(GL_POINTS, 0, bufferSize/sizeof(float));
GLuint64 bindlessTime = stop();
printf("bindless: %.2fGB/s\n", (float)((bufferSize * (double)loops) / bindlessTime));
//time bindless in a compute shader
glUseProgram(bindlessComputeShader);
glProgramUniformui64NV(bindlessComputeShader, glGetUniformLocation(bindlessComputeShader, "data"), address);
glUniform1f(glGetUniformLocation(bindlessComputeShader, "val"), 1.0f);
start();
for (int i = 0; i < loops; ++i)
glDispatchCompute(bufferSize/(sizeof(float) * 256), 1, 1);
GLuint64 bindlessComputeTime = stop();
printf("bindless compute: %.2fGB/s\n", (float)((bufferSize * (double)loops) / bindlessComputeTime));
assert(glGetError() == GL_NO_ERROR);
return 0;
}
My output:
image_load_store: 6.66GB/s
bindless: 6.68GB/s
bindless compute: 6.65GB/s
Some notes:
Compute shaders with bindless graphics don't appear to write anything (the commented out assert fails), or at least the data isn't retrieved with glMapBuffer even though the speed matches the other methods. Using image_load_store in the compute shader works and gives the same speed the vertex shaders (though I thought that'd be one too many permutations to post).
Calling glMemoryBarrier(GL_ALL_BARRIER_BITS) before glDispatchCompute causes a crash in the driver.
Commenting out the three glBufferData(GL_TEXTURE_BUFFER, bufferSize, dat, GL_STATIC_DRAW);, which are used to check the output, raises the speed of the first two tests to 17GB/s and the compute shader skyrockets to 292GB/s which is much closer to what I'd like but this can't be trusted because of point 1.
Sometimes while (!available) hangs for ages (ctrl-c when I get tired of waiting shows its still in the loop).
For reference, here's the CUDA code:
//http://www.ks.uiuc.edu/Research/vmd/doxygen/CUDABench_8cu-source.html
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <cuda.h>
#define CUERR { cudaError_t err; \
if ((err = cudaGetLastError()) != cudaSuccess) { \
printf("CUDA error: %s, %s line %d\n", cudaGetErrorString(err), __FILE__, __LINE__); \
return -1; }}
//
// GPU device global memory bandwidth benchmark
//
template <class T>
__global__ void gpuglobmemcpybw(T *dest, const T *src) {
const unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x;
dest[idx] = src[idx];
}
template <class T>
__global__ void gpuglobmemsetbw(T *dest, const T val) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
dest[idx] = val;
}
typedef float4 datatype;
static int cudaglobmembw(int cudadev, double *gpumemsetgbsec, double *gpumemcpygbsec) {
int i;
int len = 1 << 22; // one thread per data element
int loops = 500;
datatype *src, *dest;
datatype val=make_float4(1.0f, 1.0f, 1.0f, 1.0f);
// initialize to zero for starters
float memsettime = 0.0f;
float memcpytime = 0.0f;
*gpumemsetgbsec = 0.0;
*gpumemcpygbsec = 0.0;
// attach to the selected device
cudaError_t rc;
rc = cudaSetDevice(cudadev);
if (rc != cudaSuccess) {
#if CUDART_VERSION >= 2010
rc = cudaGetLastError(); // query last error and reset error state
if (rc != cudaErrorSetOnActiveProcess)
return -1; // abort and return an error
#else
cudaGetLastError(); // just ignore and reset error state, since older CUDA
// revs don't have a cudaErrorSetOnActiveProcess enum
#endif
}
cudaMalloc((void **) &src, sizeof(datatype)*len);
CUERR
cudaMalloc((void **) &dest, sizeof(datatype)*len);
CUERR
dim3 BSz(256, 1, 1);
dim3 GSz(len / (BSz.x * BSz.y * BSz.z), 1, 1);
// do a warm-up pass
gpuglobmemsetbw<datatype><<< GSz, BSz >>>(src, val);
CUERR
gpuglobmemsetbw<datatype><<< GSz, BSz >>>(dest, val);
CUERR
gpuglobmemcpybw<datatype><<< GSz, BSz >>>(dest, src);
CUERR
cudaEvent_t start, end;
cudaEventCreate(&start);
cudaEventCreate(&end);
// execute the memset kernel
cudaEventRecord(start, 0);
for (i=0; i<loops; i++) {
gpuglobmemsetbw<datatype><<< GSz, BSz >>>(dest, val);
}
CUERR
cudaEventRecord(end, 0);
CUERR
cudaEventSynchronize(start);
CUERR
cudaEventSynchronize(end);
CUERR
cudaEventElapsedTime(&memsettime, start, end);
CUERR
// execute the memcpy kernel
cudaEventRecord(start, 0);
for (i=0; i<loops; i++) {
gpuglobmemcpybw<datatype><<< GSz, BSz >>>(dest, src);
}
cudaEventRecord(end, 0);
CUERR
cudaEventSynchronize(start);
CUERR
cudaEventSynchronize(end);
CUERR
cudaEventElapsedTime(&memcpytime, start, end);
CUERR
cudaEventDestroy(start);
CUERR
cudaEventDestroy(end);
CUERR
*gpumemsetgbsec = (len * sizeof(datatype) / (1024.0 * 1024.0)) / (memsettime / loops);
*gpumemcpygbsec = (2 * len * sizeof(datatype) / (1024.0 * 1024.0)) / (memcpytime / loops);
cudaFree(dest);
cudaFree(src);
CUERR
return 0;
}
int main()
{
double a, b;
cudaglobmembw(0, &a, &b);
printf("%f %f\n", (float)a, (float)b);
return 0;
}
Update:
It seems that the buffer gets made non-resident on my glBufferData calls which were there to check output was being written. As per the extension:
A buffer is also made non-resident implicitly as a result of being respecified via BufferData or being deleted.
...
BufferData is specified to "delete the existing data store",
so the GPU address of that data should become invalid. The buffer is
therefore made non-resident in the current context.
At a guess, OpenGL then streams in the buffer object data each frame and doesn't cache it in video memory. This explains why the compute shader failed the assert, however there's a slight anomaly that bindless graphics in the vertex shader still worked when not resident, but I'll ignore that for now. I have no idea why a 64MB buffer object wouldn't default to being resident (though perhaps after first use) when there's 12GB available.
So after each call to glBufferData I make it resident again and get the address in case its changed:
glBufferData(GL_TEXTURE_BUFFER, bufferSize, dat, GL_STATIC_DRAW);
glMakeBufferResidentNV(GL_TEXTURE_BUFFER, GL_READ_WRITE);
glGetBufferParameterui64vNV(GL_TEXTURE_BUFFER, GL_BUFFER_GPU_ADDRESS_NV, &address);
assert(glIsBufferResidentNV(GL_TEXTURE_BUFFER)); //sanity check
I'm now getting 270–290GB/s with the compute shader using either image_load_store or bindless graphics. Now my question includes:
Given the buffer seems to be resident for each test and the compute shader is nice and fast, why are the vertex shader versions still so slow?
Without the bindless graphics extension, how should regular OpenGL users put data into video memory (actually put and not idly suggest that the driver might just like to)?
I'm pretty sure I would have noticed this problem in real world situations, and it's this contrived benchmark that hits a slow path, so how could I trick the driver into making a buffer object resident? Running a compute shader first doesn't change anything.
You are asking the driver to read from your process memory, dat. This causes extensive cache coherency traffic. When the GPU reads that memory, it can't be sure that it is up to date, it might be in the CPU cache, modified, and not written back to RAM yet. This causes the GPU to actually have to read from the CPU cache, which is far more expensive than bypassing the CPU and reading the RAM. The RAM is often idle during normal operation, because a modern CPU's hit rate is typically 95% to 99%. The cache is used continuously.
To achieve maximum performance, you need to let the driver allocate the memory. Normal memory your program uses, like global variables and the heap are allocated in writeback memory. Driver allocated memory will usually be allocated as write combining or uncacheable, which eliminates the coherency traffic.
Peak advertised bandwidth numbers will be achieved only without cache coherency overhead.
To let the driver allocate it, use glBufferData with a nullptr for the data.
It isn't all rosy though, if you manage to coerce the driver into using a system memory write combining buffer. CPU reads to such addresses will be very slow. Sequential writes are optimized by the CPU, but random writes will cause the write combining buffer to flush frequently, hurting performance.