Closed. This question is not reproducible or was caused by typos. It is not currently accepting answers.
This question was caused by a typo or a problem that can no longer be reproduced. While similar questions may be on-topic here, this one was resolved in a way less likely to help future readers.
Closed 5 years ago.
Improve this question
As the title says, I am using ImGui and I can't get my render function to render the fonts.
Things I have done:
Verified my texture is loaded properly with RenderDoc
Verified that my vertex attribute pointers are compliant with ImGui's convention (array of structs).
Below is my rendering code. You can also see the developer's example code for OpenGL here: https://github.com/ocornut/imgui/blob/master/examples/opengl3_example/imgui_impl_glfw_gl3.cpp
// Setup some GL state
glEnable(GL_BLEND);
glBlendEquation(GL_FUNC_ADD);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
glDisable(GL_CULL_FACE);
glDisable(GL_DEPTH_TEST);
glEnable(GL_SCISSOR_TEST);
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL);
// Setup orthographic projection
glViewport(0, 0, (GLsizei)fb_width, (GLsizei)fb_height);
const float ortho_projection[4][4] =
{
{ 2.0f/io.DisplaySize.x, 0.0f, 0.0f, 0.0f },
{ 0.0f, 2.0f/-io.DisplaySize.y, 0.0f, 0.0f },
{ 0.0f, 0.0f, -1.0f, 0.0f },
{-1.0f, 1.0f, 0.0f, 1.0f },
};
// Setup the shader. bind() calls glUseProgram and enables/disables the proper vertex attributes
shadeTextured->bind();
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, g_FontTexture);
shadeTextured->setUniformM4(Shader::uhi_transform, *(glm::mat4*)&ortho_projection[0][0]);
shadeTextured->setUniformSampler(1, 0);
// Set my vertex attribute pointers for position and tex coords
glVertexAttribPointer(0,
2,
GL_FLOAT,
GL_FALSE,
sizeof(ImDrawVert),
(GLvoid*)IM_OFFSETOF(ImDrawVert, pos));
glVertexAttribPointer(1,
2,
GL_FLOAT,
GL_FALSE,
sizeof(ImDrawVert),
(GLvoid*)IM_OFFSETOF(ImDrawVert, uv));
// Loop through all commands ImGui has
for (int n = 0; n < draw_data->CmdListsCount; n++) {
const ImDrawList* cmd_list = draw_data->CmdLists[n];
const ImDrawIdx* idx_buffer_offset = 0;
glBindBuffer(GL_ARRAY_BUFFER, g_VboHandle);
glBufferData(GL_ARRAY_BUFFER,
(GLsizeiptr)cmd_list->VtxBuffer.Size * sizeof(ImDrawVert),
(const GLvoid*)cmd_list->VtxBuffer.Data,
GL_STREAM_DRAW);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, g_ElementsHandle);
glBufferData(GL_ELEMENT_ARRAY_BUFFER,
(GLsizeiptr)cmd_list->IdxBuffer.Size * sizeof(ImDrawIdx),
(const GLvoid*)cmd_list->IdxBuffer.Data,
GL_STREAM_DRAW);
for (int cmd_i = 0; cmd_i < cmd_list->CmdBuffer.Size; cmd_i++) {
const ImDrawCmd* pcmd = &cmd_list->CmdBuffer[cmd_i];
glScissor((int)pcmd->ClipRect.x,
(int)(fb_height - pcmd->ClipRect.w),
(int)(pcmd->ClipRect.z - pcmd->ClipRect.x),
(int)(pcmd->ClipRect.w - pcmd->ClipRect.y));
glDrawElements(GL_TRIANGLES,
(GLsizei)pcmd->ElemCount,
sizeof(ImDrawIdx) == 2 ? GL_UNSIGNED_SHORT : GL_UNSIGNED_INT,
idx_buffer_offset);
idx_buffer_offset += pcmd->ElemCount;
}
}
And here are the (very, very simple) shaders I have written. The shaders have worked texturing a button before, so I am assuming they are functionally correct.
Vertex shader:
#version 330 core
layout (location = 0) in vec2 pos;
layout (location = 1) in vec2 texCoord;
out vec2 fragTexCoord;
uniform mat4 transform;
void main() {
gl_Position = transform * vec4(pos, 0.0, 1.0);
fragTexCoord = texCoord;
}
Fragment shader:
#version 330 core
out vec4 fragColor;
in vec2 fragTexCoord;
uniform sampler2D sampler;
void main() {
fragColor = texture(sampler, fragTexCoord);
}
I'm at a total loss! Any help would be greatly appreciated
Debugging an incorrect OpenGL setup/state can be quite difficult. It's unclear why you are not using exactly the code provided in imgui_impl_glfw_gl3.cpp and rewriting your own, but what you may do is:
Start again from the supposedly working imgui_impl_glfw_gl3.cpp and turn it step by step into your own and see what makes it break?
Disable scissor temporarily.
Since you are using RenderDoc already: does it show you the correct mesh? Are the vertices that it shows you ok?
Related
I have some code to render GUI's, and if I dont use a vertex shader then it renders exactly where its meant to :
However, as soon as I use a vertex shader, even simply one that calls
gl_position = vec4(position,1.0);
It hides, or moves, or otherwise makes my GUI disappear
Whats the correct way to have a shader for GUIs in OpenGL?
GUI render :
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(0, width, 0, height, -10, 10);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glDisable(GL_CULL_FACE);
glDisable(GL_DEPTH_TEST);
RendererUtils.setWireframeMode(false);
for (Interface i : interfaces)
{
i.updateShaderForThisB();
if (i instanceof InterfaceContainer)
{
((InterfaceContainer) i).draw();
}
else
{
((InterfaceControl) i).draw();
}
}
InterfaceShader.getInstance().unbind();
glEnable(GL_DEPTH_TEST);
glEnable(GL_CULL_FACE);
InterfaceContainer and InterfaceControl's draw call is largely the same, so I'll only add one of them.
InterfaceControl.draw()
public void draw()
{
this.updateShaderForThisB();
this.getMesh().draw();
if (this.hasText)
{
//this.updateShaderForThisF();
//drawText();
}
}
InterfaceControl.updateShaderForThisB()
public void updateShaderForThisB()
{
InterfaceShader shader = InterfaceShader.getInstance();
shader.bind();
shader.setColour(this.getActingColour());
shader.setLocation(this.getLocation());
shader.setSize(this.getBounds());
shader.setGradient(this.getShouldGradient());
shader.updateUniforms();
}
Mesh.draw()
public void draw()
{
glEnableVertexAttribArray(0); //Vertices
glEnableVertexAttribArray(1); //Tex coords
glEnableVertexAttribArray(2); //Normals
glBindBuffer(GL_ARRAY_BUFFER,vbo);
glVertexAttribPointer(0, 3, GL_FLOAT, false, Vertex.SIZE * 4, 0);
glVertexAttribPointer(1, 2, GL_FLOAT, false, Vertex.SIZE * 4, 12);
glVertexAttribPointer(2, 3, GL_FLOAT, false, Vertex.SIZE * 4, 20);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, ibo);
glDrawElements(GL_TRIANGLES, size, GL_UNSIGNED_INT,0);
glDisableVertexAttribArray(0);
glDisableVertexAttribArray(1);
glDisableVertexAttribArray(2);
}
InterfaceShader.vs
#version 330
layout(location = 0) in vec3 position;
layout(location = 1) in vec2 texCoord;
uniform mat4 viewMatrix;
out vec2 texCoord0;
void main()
{
gl_Position = viewMatrix * vec4(position,1.0);
texCoord0 = texCoord;
}
Can anyone see an obvious problem that i've overlooked? My first thought was that the shader was translating my interface-oriented coordinates (ie 50,50,1) into real world coordinates, but I dont know
Edit : As requested, updated shader code and added matrix projection code
https://pastebin.com/gKdewDVi
pastebin for Transform class, for getting the view matrix and such, and how it's related back to the shader
The fixed function vertex processing pipeline multiplies the vertex positions by the current matrix given by GL_PROJECTION * GL_MODELVIEW. You have to do the same thing in your vertex shader like so:
gl_Position = gl_ModelViewProjectionMatrix * vec4(position,1.0);
Please note that you are using outdated OpenGL programming practices by using the fixed-function processing. This has been deprecated for a while now.
I'm creating a set of classes to read in 3d objects from COLLADA files. I started with some basic code to read in the positions and normals and plot them with opengl. I added code to scale the vertices successfully and added all the code I need to read in the color or texture connected with each graphics element in the COLLAD file. But now I need to add the code to draw the vertices with color. I have created the buffer object array to house the color array for each of the vertices array and buffer objects.
This is the code I have to build the arrays from data I obtain from the COLLADA file:
Keep in mind I am still creating this it's not perfect.
// Set vertex coordinate data
glBindBuffer(GL_ARRAY_BUFFER, vbosPosition[i]);
glBufferData(GL_ARRAY_BUFFER, col->vectorGeometry[i].map["POSITION"].size,
scaledData, GL_STATIC_DRAW);
free(scaledData);
loc = glGetAttribLocation(program, "in_coords");//get a GLuint for the attribute and put it into GLuint loc.
glVertexAttribPointer(loc, col->vectorGeometry[i].map["POSITION"].stride, col->vectorGeometry[i].map["POSITION"].type, GL_FALSE, 0, 0);//glVertexAttribPointer — loc specifies the index of the generic vertex attribute to be modified.
glEnableVertexAttribArray(0);
#ifdef Testing_Mesh3D
PrintGLVertex(vbosPosition[i], col->vectorGeometry[i].map["POSITION"].size / 4);
#endif // Set normal vector data
glBindBuffer(GL_ARRAY_BUFFER, vbosNormal[i]);
glBufferData(GL_ARRAY_BUFFER, col->vectorGeometry[i].map["NORMAL"].size, col->vectorGeometry[i].map["NORMAL"].data, GL_STATIC_DRAW);
loc = glGetAttribLocation(program, "in_normals");
glVertexAttribPointer(loc, col->vectorGeometry[i].map["NORMAL"].stride, col->vectorGeometry[i].map["NORMAL"].type, GL_FALSE, 0, 0);
glEnableVertexAttribArray(1);
glBindBuffer(GL_ARRAY_BUFFER, vbosColor[i]);
Material* material = col->mapGeometryUrlToMaterial2Effect[col->vectorGeometry[i].id];
if (material->effect1.size() > 0)
{
Effect effect1 = material->effect1[0];
if (effect1.type == enumEffectTypes::color)
{
Color color = effect1.color;
glBufferData(GL_ARRAY_BUFFER, color.length, color.values, GL_STATIC_DRAW);
loc = glGetAttribLocation(program, "in_colors");
glVertexAttribPointer(loc, color.length, color.type, GL_FALSE, 0, 0);
}
else
{
}
}
}
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindVertexArray(0);
}
// Initialize uniform data
void Mesh3D::InitializeUniforms(GLuint program) {
GLuint program_index, ubo_index;
struct LightParameters params;
// Specify the rotation matrix
glm::vec4 diff_color = glm::vec4(0.3f, 0.3f, 1.0f, 1.0f);
GLint location = glGetUniformLocation(program, "diffuse_color");
glUniform4fv(location, 1, &(diff_color[0]));
// Initialize UBO data
params.diffuse_intensity = glm::vec4(0.5f, 0.5f, 0.5f, 1.0f);
params.ambient_intensity = glm::vec4(0.3f, 0.3f, 0.3f, 1.0f);
params.light_direction = glm::vec4(-1.0f, -1.0f, 0.25f, 1.0f);
// Set the uniform buffer object
glUseProgram(program);
glGenBuffers(1, &ubo);
glBindBuffer(GL_UNIFORM_BUFFER, ubo);
glBufferData(GL_UNIFORM_BUFFER, 3 * sizeof(glm::vec4), ¶ms, GL_STREAM_DRAW);
glBindBuffer(GL_UNIFORM_BUFFER, 0);
glUseProgram(program);
// Match the UBO to the uniform block
glUseProgram(program);
ubo_index = 0;
program_index = glGetUniformBlockIndex(program, "LightParameters");
glUniformBlockBinding(program, program_index, ubo_index);
glBindBufferRange(GL_UNIFORM_BUFFER, ubo_index, ubo, 0, 3 * sizeof(glm::vec4));
glUseProgram(program);
This is a hearder file containing the two string literals I housing the strings used to build the vertex and fragment shader. Again I am new to this and not sure how I need to modify the shader to include colored vertices, I have started by adding an input vec4 for the four float colour ( includes alpha). Any help?
#pragma once
#ifndef Included_shaders
#define Included_shaders
#include<stdio.h>
#include<iostream>
static std::string shaderVert = "#version 330\n"
"in vec3 in_coords;\n"
"in vec3 in_normals;\n"
"in vec4 in_colors; \n"//added by me
"out vec3 vertex_normal;\n"
"void main(void) {\n"
"vertex_normal = in_normals;\n"
"gl_Position = vec4(in_coords, 1.0);\n"
"}\n";
static std::string shaderFrag = "#version 330\n"
"in vec3 vertex_normal;\n"
"out vec4 output_color;\n"
"layout(std140) uniform LightParameters{\n"
"vec4 diffuse_intensity;\n"
"vec4 ambient_intensity;\n"
"vec4 light_direction;\n"
"};\n"
"uniform vec4 diffuse_color;\n"
"void main() {\n"
"/* Compute cosine of angle of incidence */\n"
"float cos_incidence = dot(vertex_normal, light_direction.xyz);\n"
"cos_incidence = clamp(cos_incidence, 0, 1);\n"
"/* Compute Blinn term */\n"
"vec3 view_direction = vec3(0, 0, 1);\n"
"vec3 half_angle = normalize(light_direction.xyz + view_direction);\n"
"float blinn_term = dot(vertex_normal, half_angle);\n"
"blinn_term = clamp(blinn_term, 0, 1);\n"
"blinn_term = pow(blinn_term, 1.0);\n"
"/* Set specular color and compute final color */\n"
"vec4 specular_color = vec4(0.25, 0.25, 0.25, 1.0);\n"
"output_color = ambient_intensity * diffuse_color +\n"
"diffuse_intensity * diffuse_color * cos_incidence +\n"
"diffuse_intensity * specular_color * blinn_term;\n"
"}\n";
#endif
Finally this is the funciton I am modifying to draw the colored elements
void Mesh3D::DrawToParent()
{
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// Draw elements of each mesh in the vector
for (int i = 0; i<nVectorGeometry; i++)
{
glBindVertexArray(vaos[i]);
glDrawElements(col->vectorGeometry[i].primitive/*This is 4 for GL_Triangles*/, col->vectorGeometry[i].index_count,
GL_UNSIGNED_SHORT, col->vectorGeometry[i].indices);
}
glBindVertexArray(0);
glutSwapBuffers();
}
am getting a little confused about the glVertexAttribPointer and glGetAttribLocation though I think I get the basic idea. Am I using this right.
Am I setting up the buffer object for colors correctly. Am I correct I have a color for each vertex in this buffer, right now I have only placed the single color that applies to all associated buffers in this array and probably need to change that?
How exactly do I go about drawing the colored vertices when I call glDrawElements?
Don't just refer me to the resources for opengl a lot of the wordy explanations make little sense to me.
Make your vertex shader output color and make the fragment shader take it as input. The color will be interpolated between vertices.
Yes, it seems you have understood glAttribPointer correctly.
DrawElements takes the indices of the vertices you want to draw. The last argument should not contain the indices. Instead, it should probably be null. The indices should be specified with glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, ...).
If the color buffer is correctly bound you don't need to do anything special id drawElements for the colors. The shaders will get all the enabled attribarrays.
I can help you better if you run the code and you tell me what problems you get. It would also help if the code was easier to read. If you split it into functions under ten lines in length you may spot some errors yourself and possibly remove some duplication.
EDIT: see at the end for new investigations on the subject.
I've been experiencing an odd behavior with my shaders. In short, I find it very strange that to pass a single float from a vertex shader to a fragment shader I have to pass also a fake variable, and I am looking for an explanation of this behavior.
In more details : I wrote two minimalistic shaders
a vertex shader that passes one float (only one, this is important), vertAlpha, to the fragment shader, like so:
#version 150
uniform float alphaFactor;
uniform mat4 cameramodel;
in float vertAlpha;
in vec3 vert;
in vec3 vertScale;
in vec3 trans;
out float fragAlpha;
void main()
{
fragAlpha = alphaFactor * vertAlpha;
gl_Position = cameramodel * vec4( trans + ( vert * vertScale ), 1.0f );
}
and a fragment shader that uses the passed variable:
#version 150
in float fragAlpha;
out vec4 finalColor;
void main()
{
finalColor = vec4( 0.0f, 0.0f, 0.0f, fragAlpha );
}
But that doesn't work, nothing appears on the screen, it seems that in the fragment shader, fragAlpha keeps it's initialization value of 0, and ignores the passed value.
After investigating, I found a "hack" to solve this. I found that the fragment shader "sees" the passed value for fragAlpha only if a fake (unused) value is passed with it (depending on the platform (osx / NVidia GeForce 9400, Windows laptop / Intel HD Graphics), a vec3 or a vec2 is sufficient).
So this vertex shader solves the problem:
#version 150
uniform float alphaFactor;
uniform mat4 cameramodel;
in vec3 vertFake;
in float vertAlpha;
in vec3 vert;
in vec3 vertScale;
in vec3 trans;
out float fragAlpha;
out vec3 fragFake;
void main()
{
fragAlpha = alphaFactor * vertAlpha;
fragFake = vertFake;
gl_Position = cameramodel * vec4( trans + ( vert * vertScale ), 1.0f );
}
I find this more like a "hack" than a solution. What should I do to solve this properly?
Is there a "per-driver manufacturer minimum threshold" in terms of size of data that can pass from a vertex shader to a fragment shader?
EDIT:
After reading derhass comment, I went back to my code to see if I was doing something wrong.
After more investigation, I found that the problem is not inherent to the fact that I pass the attribute value to the fragment shader. I changed the order of attributes declarations in the vertex shader and saw that the attribute that has the "0" location (location returned by glGetAttribLocation) is not updated by a call to glVertexAttribute, it's value stays at 0.
It will occur for example for the "trans" attribute if it is declared before all other attributes. Introducing a fake attribute in my shader only fixed the issue because the fake attribute took the location "0".
In any case, glGetError returns no error anywhere.
I use glDrawElements to render, with a vbo that contains the vertex positions, maybe I'm using it inadequatly... or it's not well supported by my hardware(s)?
To give some more context, I copy here the calls i do to opengl, for setup and rendering:
Setup:
GLuint vao, vbo;
glGenBuffers(1, &vbo);
glGenVertexArrays(1, &vao);
glBindVertexArray(vao);
glBindBuffer(GL_ARRAY_BUFFER, vbo);
GLfloat vertexData[] = {
// X Y Z
0.0f, 0.0f, 1.0f, // 0
1.0f, 0.0f, 1.0f, // 1
0.0f, 0.0f, 0.0f, // 2
1.0f, 0.0f, 0.0f, // 3
0.0f, 1.0f, 1.0f, // 4
1.0f, 1.0f, 1.0f, // 5
1.0f, 1.0f, 0.0f, // 6
0.0f, 1.0f, 0.0f // 7
};
glBufferData(GL_ARRAY_BUFFER, sizeof(vertexData), vertexData, GL_STATIC_DRAW);
GLint vert = glGetAttribLocation(p, "vert")
glEnableVertexAttribArray(vert);
glVertexAttribPointer(vert, 3, GL_FLOAT, GL_FALSE, 0, NULL);
glBindVertexArray(0);
render:
glUseProgram(p)
aphaFactor = glGetUniformLocation(p,"alphaFactor");
glUniform1f(alphaFactor, 1.0f);
cameramodel = glGetUniformLocation(p,"cameramodel");
glm::mat4 mat = gCamera.matrix();
glUniformMatrix4fv(cameramodel, 1, GL_FALSE, glm::value_ptr(mat); //
glBindVertexArray(vao);
GLubyte indices[36] =
{
3, 2, 6, 7, 6, 2, 6, 7, 4, 2, 4, 7, 4, 2, 0, 3, 0, 2, 0, 3, 1, 6, 1, 3, 1, 6, 5, 4, 5, 6, 5, 4, 1, 0, 1, 4
};
GLint trans = glGetAttribLocation(p, "trans");
GLint alpha = glGetAttribLocation(p, "vertAlpha");
GLint scale = glGetAttribLocation(p, "vertScale");
loop over many entities:
glVertexAttrib3f(trans, m_vInstTransData[offset], m_vInstTransData[offset + 1], m_vInstTransData[offset + 2]);
glVertexAttrib1f(alpha, m_vInstTransData[offset + 3]);
glVertexAttrib3f(scale, m_vInstTransData[offset + 4], m_vInstTransData[offset + 5], m_vInstTransData[offset + 6]);
glDrawElements(GL_TRIANGLES, sizeof(indices) / sizeof(GLubyte), GL_UNSIGNED_BYTE, indices);
end loop
glBindVertexArray(0);
you need to specify the interpolator for this so to prevent interpolation between fragments of the same primitive change:
out float fragAlpha;
in float fragAlpha;
into:
out flat float fragAlpha;
in flat float fragAlpha;
because float is interpolated by default. Similarly mat3 is not interpolated by default and if you want to interpolate it then use smooth instead of flat ...
If I remember correctly Scalars (int,float,double) and vectors (vec?,dvec?) are interpolated by default and matrices (mat?) are not.
Not sure which property will be set in your flat variable my bet is the one set on the last vertex of the primitive pass ... In case you need to compute it on some specified vertex then you should move the computation into geometry shader.
There are a couple of questions like this, but I still haven't really understood. I was coding with OpenGL over 10 years ago and noticed how difficult it is to get into modern OpenGL. The OpenGL.org page is a horrible mess when it comes to examples, you never know what version it is, any version seems to be mixed up in various code examples.
Alright, I have an old code I want to update to OpenGL >3 at least. So first thing I did was to move on from glVertex3fv to finally make it with glVertexAttribPointer (over a step with glVertexPointer until I read this is deprecated now as well). This works out fine, but when trying to place textures I got stuck quickly and I assume it is because of wrong positioning and I wanted to get rid of c++ code :
glMatrixMode( GL_PROJECTION );
glLoadIdentity();
glFrustum( -RProjZ, +RProjZ, -Aspect*RProjZ, +Aspect*RProjZ, 1.0, 32768.0 );
and to draw it
// bind vertex buffer
glBindBuffer(GL_ARRAY_BUFFER, VertBuffer);
glBufferData(GL_ARRAY_BUFFER, sizeof(float) * size, verts, GL_STATIC_DRAW);
// enable arrays
glEnableVertexAttribArray(0);
// set pointers
glVertexAttribPointer(0,3,GL_FLOAT, GL_FALSE, sizeof(float) * floatsPerVertex, 0);
// render ComplexSurface
glDrawArrays(GL_TRIANGLE_FAN, 0, size);
glDisableVertexAttribArray(0);
with in the vertexshader
gl_Position = gl_ModelViewProjectionMatrix * gl_Vertex;
And everything is working magically. Now don't get me wrong, I'm a big fan of magic, but...
Then I found a couple of matrix conversions which can be used to get a matrix to replace glFrustum, but whenever I try to replace it, it fails badly (although I think I understood the maths behind glFrustum and the conversion into the matrix).
What tried is something like
buildPerspProjMat(g_ProjView,FovAngle,Aspect,1.0,32768.0 );
glUseProgram(g_program);
glUniformMatrix4fv(g_programFrustum, 1, GL_FALSE, g_ProjView );
glUseProgram(0);
and using the position in the shader from the buffer above with the projection matix, but this doesn't work out at all.
So what I plain don't get now is where to replace this and with what in the shader. I don't know at which point the glMatrixMode takes place and "when" to replace it with some uniform matrix (passing the args as uniform ain't the problem here).
I can't count how many tutorials I read already, but I always get confused over all the mixed versions. I am always happy about some code examples, but please OpenGL 3 or higher.
The next would be a replacement for glTexCoord2f for texturing, but that's a different story :)
I find that when thinking about modern OpenGL it is best to forget that glMatrixMode ever existed.
With that in mind, let's go over what you need for the most basic draw operation: a replacement for gl_ModelViewProjectionMatrix. As it's name implies this is a combination of 3 different matrices: the model matrix, the view matrix, and the projection matrix.
So what you'll need in your shader to accomodate this is 3 uniform variables of type mat4. Which you'll use like so:
uniform mat4 projMat;
uniform mat4 viewMat;
uniform mat4 modelMat;
layout (location = 0) in vec3 position;
void main()
{
gl_Position = projMat * viewMat * modelMat * vec4(position, 1.0);
}
This bit of shader code performs the same function as the one you had above. What changed is the built-in gl_ModelViewProjectionMatrix was replaced by 3 uniform variables (which could be combined as one if you make sure to multiply them yourself on the C++ side before passing it in). And the builtin gl_Vertex was replaced by an input variable.
On the C++ side you will need to do 2 things. First you'll need to get the location for each of these uniforms:
GLuint modelMatIdx = glGetUniformLocation(shaderProgId, "modelMat");
GLuint viewMatIdx = glGetUniformLocation(shaderProgId, "viewMat");
GLuint projMatIdx = glGetUniformLocation(shaderProgId, "projMat");
And with this in hand you can now pass in the values for each uniform right before drawing using glUniformMatrix4fv.
One particular library which makes this particularly easy is glm. For example to get the same projection matrix as in your example you would do:
glm::mat4 projMat = glm::frustum(-RProjZ, +RProjZ, -Aspect*RProjZ, +Aspect*RProjZ, 1.0, 32768.0);
and you would pass it in like so:
glUniformMatrix4fv(projMatIdx, 1, GL_FALSE, glm::value_ptr(projMat));
Now that you know how, I'd like to address the issue of "when". You said you weren't clear about the matrix mode stuff and that brings me back to my earlier assertion of "forget about it". The matrix mode was there so that you could tell opengl which built in should be affected by calls to OpenGL matrix operations such as glTranslate, glFrustum and so on, but all of this is gone now. You are now in charge of managing the (possibly many) matrices involved. All you have to do is pass them in before you draw (as I've shown above) and you'll be fine. Just make sure the program is bound before you attempt to modify its uniforms.
Here's a working example (if you're suprised by gl::... instead of gl... it's because I'm using an opengl header generated by glLoadGen which puts all of the opengl API functions in the gl namespace).
GLuint simpleProgramID;
// load the shader and make the program
GLuint modelMatIdx = gl::GetUniformLocation(simpleProgramID, "modelMat");
GLuint viewMatIdx = gl::GetUniformLocation(simpleProgramID, "viewMat");
GLuint projMatIdx = gl::GetUniformLocation(simpleProgramID, "projMat");
GLuint vaoID;
gl::GenVertexArrays(1, &vaoID);
gl::BindVertexArray(vaoID);
GLuint vertBufferID, indexBufferID;
gl::GenBuffers(1, &vertBufferID);
gl::GenBuffers(1, &indexBufferID);
struct Vec2 { float x, y; };
struct Vec3 { float x, y, z; };
struct Vert { Vec3 pos; Vec2 tex; };
std::array<Vert, 8> cubeVerts = {{
{ { 0.5f, 0.5f, 0.5f }, { 1.0f, 0.0f } }, { { 0.5f, 0.5f, -0.5f }, { 1.0f, 1.0f } },
{ { 0.5f, -0.5f, -0.5f }, { 0.0f, 1.0f } }, { { 0.5f, -0.5f, 0.5f }, { 0.0f, 0.0f } },
{ { -0.5f, 0.5f, 0.5f }, { 0.0f, 0.0f } }, { { -0.5f, 0.5f, -0.5f }, { 0.0f, 1.0f } },
{ { -0.5f, -0.5f, -0.5f }, { 1.0f, 1.0f } }, { { -0.5f, -0.5f, 0.5f }, { 1.0f, 0.0f } }
}};
std::array<unsigned int, 36> cubeIdxs = {{
0, 2, 1, 0, 3, 2, // Right
4, 5, 6, 4, 6, 7, // Left
0, 7, 3, 0, 4, 7, // Top
1, 6, 2, 1, 5, 6, // Bottom
0, 5, 1, 0, 4, 5, // Front
3, 7, 6, 3, 6, 2 // Back
}};
// Vertex buffer
gl::BindBuffer(gl::ARRAY_BUFFER, vertBufferID);
gl::BufferData(gl::ARRAY_BUFFER, sizeof(Vert) * cubeVerts.size(), cubeVerts.data(), gl::STATIC_DRAW);
gl::EnableVertexAttribArray(0); // Matches layout (location = 0)
gl::VertexAttribPointer(0, 3, gl::FLOAT, gl::FALSE_, sizeof(Vert), 0);
gl::EnableVertexAttribArray(1); // Matches layout (location = 1)
gl::VertexAttribPointer(1, 2, gl::FLOAT, gl::FALSE_, sizeof(Vert), (GLvoid*)sizeof(Vec3));
// Index buffer
gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, indexBufferID);
gl::BufferData(gl::ELEMENT_ARRAY_BUFFER, sizeof(unsigned int) * cubeIdxs.size(), cubeIdxs.data(), gl::STATIC_DRAW);
gl::BindVertexArray(0);
glm::mat4 projMat = glm::perspective(56.25f, 16.0f/9.0f, 0.1f, 100.0f);
glm::mat4 viewMat = glm::lookAt(glm::vec3(5, 5, 5), glm::vec3(0, 0, 0), glm::vec3(0, 0, 1));
glm::mat4 modelMat; // identity
while (!glfwWindowShouldClose(window))
{
gl::Clear(gl::COLOR_BUFFER_BIT | gl::DEPTH_BUFFER_BIT);
gl::UseProgram(simpleProgramID);
gl::UniformMatrix4fv(projMatIdx, 1, gl::FALSE_, glm::value_ptr(projMat));
gl::UniformMatrix4fv(viewMatIdx, 1, gl::FALSE_, glm::value_ptr(viewMat));
gl::UniformMatrix4fv(modelMatIdx, 1, gl::FALSE_, glm::value_ptr(modelMat));
gl::BindVertexArray(vaoID);
gl::DrawElements(gl::TRIANGLES, 36, gl::UNSIGNED_INT, 0);
gl::BindVertexArray(0);
gl::UseProgram(0);
glfwSwapBuffers(window);
glfwPollEvents();
}
Associated Vertex Shader:
//[VERTEX SHADER]
#version 430
uniform mat4 projMat;
uniform mat4 viewMat;
uniform mat4 modelMat;
layout (location = 0) in vec3 in_position; // matches gl::EnableVertexAttribArray(0);
layout (location = 1) in vec2 in_uv; // matches gl::EnableVertexAttribArray(1);
out vec2 uv;
void main()
{
gl_Position = projMat * viewMat * modelMat * vec4(in_position, 1.0);
uv = in_uv;
}
And finally Fragment shader:
//[FRAGMENT SHADER]
#version 430
in vec2 uv;
out vec4 color;
void main()
{
color = vec4(uv, 0.0, 1.0);
}
The resulting image is:
Well, I agree that most OpenGL tutorials confuse bits of deprecated and non-deprecated stuff. To get you in the right direction, let me explain.
gl_ModelViewProjectionMatrix, gl_ModeView, glMatrixMode() and the matrix stack glPushMatrix() glPopMatrix() are deprecated. You need to define your own matrices as a uniform variables then set and pass them to the shader using glUniform*.
gl_Vertex is also deprecated, actually the whole fixed attributes names are deprecated. Alternatively you need to define your own attribute names and bind them to specific locations. Then you can set their values using glVertexAttribPointer by passing the attribute location to it (Full explanation here). For example:
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, vertices); // for vertices
glVertexAttribPointer(1, 4, GL_FLOAT, GL_FALSE, 0, color); // for color
And for the shader code
layout (location = 0) in vec4 vertex;
layout (location = 1) in vec4 color;
uniform mat4 modelview;
uniform mat4 projection;
void main()
{
gl_Position = projection* modelview* vertex;
}
For the attributes locations you can set them in the shader code as I did, or from OpenGL API using glBindAttribLocation.
Managing uniform variables can be somehow tricky if you are used to the old OpenGL globals variables such as gl_ModelView I wrote an article that hopefully can help you manage uniform variables for a big project.
I created a class that renders videoframes (on Mac) to a custom framebuffer object. As input I have a YUV texture, and I successfully created a fragment shader, which takes as input 3 rectangle textures (one for Y, U, and V planes each, the data for which is uploaded by glTexSubImage2D using GL_TEXTURE_RECTANGLE_ARB, GL_LUMINANCE and GL_UNSIGNED_BYTE), before rendering I set the active textures to three different texture units (0, 1 and 2) and bind a texture for each, and for performance reasons I used GL_APPLE_client_storage and GL_APPLE_texture_range. Then I rendered it using glUseProgram(myProg), glBegin(GL_QUADS) ... glEnd().
That worked fine, and I got the expected result (aside from a flickering effect, which I guess has to do with the fact that I used two different GL contexts on two different threads, and I suppose they get into each other's way at some point [that's topic for another question later]). Anyway, I decided to further improve my code by adding a vertex shader as well, so that I can skip the glBegin/glEnd - which I read is outdated and should be avoided anyway.
So as a next step I created two buffer objects, one for the vertices and one for the texture coordinates:
const GLsizeiptr posSize = 4 * 4 * sizeof(GLfloat);
const GLfloat posData[] =
{
-1.0f, -1.0f, -1.0f, 1.0f,
1.0f, -1.0f, -1.0f, 1.0f,
1.0f, 1.0f, -1.0f, 1.0f,
-1.0f, 1.0f, -1.0f, 1.0f
};
const GLsizeiptr texCoordSize = 4 * 2 * sizeof(GLfloat);
const GLfloat texCoordData[] =
{
0.0, 0.0,
1.0, 0.0,
1.0, 1.0,
0.0, 1.0
};
glGenBuffers(1, &m_vertexBuffer);
glBindBuffer(GL_ARRAY_BUFFER, m_vertexBuffer);
glBufferData(GL_ARRAY_BUFFER, posSize, posData, GL_STATIC_DRAW);
glGenBuffers(1, &m_texCoordBuffer);
glBindBuffer(GL_ARRAY_BUFFER, m_texCoordBuffer);
glBufferData(GL_ARRAY_BUFFER, texCoordSize, texCoordData, GL_STATIC_DRAW);
Then after loading the shaders I try to retrieve the locations of the attributes in the vertex shader:
m_attributeTexCoord = glGetAttribLocation( m_shaderProgram, "texCoord");
m_attributePos = glGetAttribLocation( m_shaderProgram, "position");
which gives me 0 for texCoord and 1 for position, which seems fine.
After getting the attributes I also call
glEnableVertexAttribArray(m_attributePos);
glEnableVertexAttribArray(m_attributeTexCoord);
(I am doing that only once, or does it have to be done before every glVertexAttribPointer and glDrawArrays? Does it need to be done per texture unit? or while my shader is activated with glProgram? Or can I do it just anywhere?)
After that I changed the rendering code to replace the glBegin/glEnd:
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_RECTANGLE_ARB, texID_Y);
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_RECTANGLE_ARB, texID_U);
glActiveTexture(GL_TEXTURE2);
glBindTexture(GL_TEXTURE_RECTANGLE_ARB, texID_V);
glUseProgram(myShaderProgID);
// new method with shaders and buffers
glBindBuffer(GL_ARRAY_BUFFER, m_vertexBuffer);
glVertexAttribPointer(m_attributePos, 4, GL_FLOAT, GL_FALSE, 0, NULL);
glBindBuffer(GL_ARRAY_BUFFER, m_texCoordBuffer);
glVertexAttribPointer(m_attributeTexCoord, 2, GL_FLOAT, GL_FALSE, 0, NULL);
glDrawArrays(GL_QUADS, 0, 4);
glUseProgram(0);
But since changing the code to this, I always only ever get a black screen as result. So I suppose I am missing some simple steps, maybe some glEnable/glDisable or setting some things properly - but but like I said I am new to this, so I haven't really got an idea. For your reference, here is the vertex shader:
#version 110
attribute vec2 texCoord;
attribute vec4 position;
// the tex coords for the fragment shader
varying vec2 texCoordY;
varying vec2 texCoordUV;
//the shader entry point is the main method
void main()
{
texCoordY = texCoord;
texCoordUV = texCoordY * 0.5; // U and V are only half the size of Y texture
gl_Position = gl_ModelViewProjectionMatrix * position;
}
My guess is that I am missing something obvious here, or just don't have a deep enough understanding of the ongoing processes here yet. I tried using OpenGLShaderBuilder as well, which helped me get the original code for the fragment shader right (this is why I haven't posted it here), but since adding the vertex shader it doesn't give me any output either (was wondering how it could know how to produce the output, if it doesn't know the position/texCoord attributes anyway?)
I haven't closely studied every line, but I think your logic is mostly correct. What I see missing is glEnableVertexAttribArray. You need to enable both vertex attributes before the glDrawArrays call.