OpenGL GL_INVALID_OPERATION at glDrawArrays when using tesselation shaders - c++

I am following the OpenGL SuperBible 6th Edition and I have come upon a peculiar problem.
I have the following code rendering:
const GLfloat color[] = { 0.0f, 0.2f, 0.0f, 1.0f };
//crtime += 0.01;
glClearBufferfv(GL_COLOR, 0, color);
checkError();
glUseProgram(program);
checkError();
GLfloat attrib[] = { (float)sin(crtime) * 0.5f, (float)cos(crtime) * 0.6f, 0.0f, 0.0f };
glVertexAttrib4fv(0, attrib);
checkError();
glDrawArrays(GL_TRIANGLES, 0, 3);
checkError();
checkError() is a simple function that checks if there's any OpenGL error and prints the gluErrorString for it if there is.
My OpenGL program consists of the following shaders:
vertex.glsl
#version 430 core
layout (location = 0) in vec4 offset;
void main(void) {
const vec4 vertices[3] = vec4[3](vec4(0.25, -0.25, 0.5, 1.0), vec4(-0.25, -0.25, 0.5, 1.0), vec4(0.25, 0.25, 0.5, 1.0));
gl_Position = vertices[gl_VertexID] + offset;
}
tessctrlshader.glsl
#version 430 core
layout (vertices = 3) out;
void main(void) {
if(gl_InvocationID == 0) {
gl_TessLevelInner[0] = 5.0;
gl_TessLevelOuter[0] = 5.0;
gl_TessLevelOuter[1] = 5.0;
gl_TessLevelOuter[2] = 5.0;
}
gl_out[gl_InvocationID].gl_Position = gl_in[gl_InvocationID].gl_Position;
}
tessevalshader.glsl
#version 430 core
layout (triangles) in;
void main(void) {
gl_Position = (gl_TessCoord.x * gl_in[0].gl_Position) + (gl_TessCoord.y * gl_in[1].gl_Position) + (gl_TessCoord.z * gl_in[2].gl_Position);
}
fragment.glsl
#version 430 core
out vec4 color;
void main(void) {
color = vec4(0.0, 0.8, 1.0, 1.0);
}
Here's the initialization code (LCAShader() loads from a file and prints compilation errors if any)
program = glCreateProgram();
LCAShader("vertex.glsl", program, GL_VERTEX_SHADER);
LCAShader("tessctrlshader.glsl", program, GL_TESS_CONTROL_SHADER);
LCAShader("tessevalshader.glsl", program, GL_TESS_EVALUATION_SHADER);
LCAShader("fragment.glsl", program, GL_FRAGMENT_SHADER);
glLinkProgram(program);
glGenVertexArrays(1, &vertex_array_object);
glBindVertexArray(vertex_array_object);
Every time glDrawArrays is hit, OpenGL throws an GL_INVALID_OPERATION error. However, if I remove both of the tesselation shaders, this does not happen.

I know it's an old thread but there is still no answer.
In code form book author calls glDrawArrays with GL_TRIANGLES parameter.
What you have to do is call glDrawArray(GL_PATCHES,0,3); in your render function.

Related

Vertex shader recieving input but no output is generated

I have this vertex shader, which simply passes the position given and passes the UV and color to the fragment shader:
#version 330 core
layout (location = 0) in vec2 in_pos;
layout (location = 1) in vec2 in_texUV;
layout (location = 2) in vec4 in_color;
out vec2 ex_texUV;
out vec4 ex_color;
uniform mat4 projection;
void main()
{
gl_Position = vec4(in_pos, 0.0, 1.0) * projection;
ex_texUV = in_texUV;
ex_color = in_color;
}
Edit: The fragment shader is shown here, and all uniforms are properly set:
#version 330 core
in vec2 in_texUV;
in vec4 in_color;
out vec4 out_color;
uniform vec2 screenSize;
uniform vec3 transparentColour;
uniform sampler2D sprite;
uniform sampler2D palette;
uniform int paletteLines[0x100];
void main()
{
if (in_color.a == 0.0) {
vec4 coord = gl_FragCoord - 0.5;
vec2 screenPos;
screenPos.x = coord.x * screenSize.x;
screenPos.y = coord.y * screenSize.y;
int id = paletteLines[int(screenPos.y)];
int index = int(texture2D(sprite, in_texUV).r * 255);
if (index == 0)
discard;
vec2 palvec;
palvec.x = index;
palvec.y = id;
out_color = texture(palette, palvec);
}
}
(The projection variable is properly set, shown using NVIDIA Nsight.)
Both the vertex and fragment shader have been edited to be simple passthroughs (even setting the fragment shader to a constant vec4(1.0, 1.0, 1.0, 1.0),) but it's always shown nothing.
To setup for the shader, I first set up the VAO and VBO to pass from a list of DrawVertex:
glGenVertexArrays(1, &VAO);
glBindVertexArray(VAO);
glGenBuffers(2, &GFXVBO);
glBindBuffer(GL_ARRAY_BUFFER, GFXVBO);
glVertexAttribPointer(0, 2, GL_SHORT, GL_FALSE, sizeof(DrawVertex), 0);
glEnableVertexAttribArray(0);
glVertexAttribPointer(1, 2, GL_SHORT, GL_FALSE, sizeof(DrawVertex), (void *)(sizeof(short) * 2));
glEnableVertexAttribArray(1);
glVertexAttribPointer(2, 4, GL_UNSIGNED_BYTE, GL_FALSE, sizeof(DrawVertex), (void *)(sizeof(short) * 4));
glEnableVertexAttribArray(2);
and then draw using the code below (the VAO and VBO are ensured to be bound, and gfxShader is just a helper to use a program):
gfxShader.use();
// [setup the program, change uniforms as necessary]
// lastRenderCount is how many to render
// gfxPolyList is the list of DrawVertex
glBufferData(GL_ARRAY_BUFFER, lastRenderCount * sizeof(DrawVertex), gfxPolyList, GL_DYNAMIC_DRAW);
glDrawArrays(GL_TRIANGLES, 0, lastRenderCount);
gfxShader.stop();
However, despite this, although RenderDoc shows that the input is being passed through, the output shows nothing at all. On top of this, NVIDIA Nsight says that no fragments are being drawn. Where could I be going wrong?
For context, here is struct DrawVertex:
struct DrawVertex {
short x;
short y;
short u;
short v;
Color color = 0; //0xRRGGBBAA
};
gl_Position = vec4(in_pos, 0.0, 1.0) * projection;
This is wrong. Matrix multiplication is not commutative. Should be:
gl_Position = projection * vec4(in_pos, 0.0, 1.0);
If you don't believe me, try this:
glm::mat4 proj = {
1.0f, 0.0f, 3.0f, 5.0f,
0.0f, 1.0f, 3.0f, 6.0f,
0.0f, 0.0f, 1.0f, 0.0f,
0.0f, 0.0f, 3.0f, 1.0f
};
glm::vec4 vec = { 0.0f, 1.0f, 0.0f, 1.0f };
glm::vec4 result1 = proj * vec;
glm::vec4 result2 = vec * proj;
std::cout << "result1: " << result1.x << result1.y << result1.z << result1.w << '\n';
std::cout << "result2: " << result2.x << result2.y << result2.z << result2.w << '\n';
// Output:
result1: 0167
result2: 5701
https://learnopengl.com/Getting-started/Transformations
Edit: Your fragment shader runs only if in_color.a == 0.0? Are you sure this is correct? Maybe you meant to use != instead.

OpenGL Transparent textures overlapping/blending issue

I'm making a simple 2D game using OpenGL 4.6 in C++. I've currently got both depth testing and blending enabled, where I've been able to render my transparent textures properly. However, when I move one texture to the point that it overlaps another texture, the transparent pixels overlap the texture underneath as well. All my vertices have a z depth value of 0.0f. Would it be better to just disable depth testing? If not, how can I resolve this?
Initial:
Overlap:
Main.cpp:
// Initialize application
GLFWwindow *window = init();
if (!window)
return -1;
// Settings
cout << "OpenGL Version: " << glGetString(GL_VERSION) << endl;
glEnable(GL_DEPTH_TEST);
glDepthFunc(GL_LESS);
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
glClearColor(0.5f, 0.5f, 0.5f, 1.0f);
// Entities
GameUI gameUI;
glfwSetWindowUserPointer(window, &gameUI);
glm::mat4 Projection {glm::ortho(0.0f, 1280.0f, 0.0f, 720.0f, 0.0f, 1.0f)};
glm::mat4 View {glm::translate(glm::mat4(1.0f), glm::vec3(0, 0, 0))};
glm::vec3 translationA {glm::vec3(0, 0, 0)};
glm::mat4 Model = glm::translate(glm::mat4(1.0f), translationA);
glm::mat4 mvp = Projection * View * Model;
Shader shader {"shader/vertex.vert", "shader/fragment.frag"};
shader.bind();
shader.setUniformMat4f("MVP", mvp);
int sampler[] {0, 1, 2};
shader.setUniform1iv("v_Textures", sampler, 3);
// MAIN LOOP //
while (glfwWindowShouldClose(window) == 0) {
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// glClear(GL_COLOR_BUFFER_BIT);
gameUI.update();
gameUI.draw();
glfwSwapBuffers(window);
glfwPollEvents();
// cout << glGetError() << endl;
}
Vertex Shader:
#version 460 core
layout(location = 0) in vec3 position;
layout(location = 1) in vec2 texCoord;
layout(location = 2) in float texIndex;
out vec2 v_TexCoord;
out float v_TexIndex;
uniform mat4 MVP;
void main() {
gl_Position = MVP * vec4(position, 1.0);
v_TexCoord = texCoord;
v_TexIndex = texIndex;
}
Fragment Shader:
#version 460 core
in vec2 v_TexCoord;
in float v_TexIndex;
uniform sampler2D v_Textures[3];
void main() {
gl_FragColor = texture(v_Textures[int(v_TexIndex)], v_TexCoord);
}

Where did I make a mistake in my godRays fragment shader?

I am trying to implement god rays however I do not understand where it went wrong. The source of god rays is the center of the cube.
Vertex shader:
#version 330 core
layout (location = 0) in vec2 aPos;
layout (location = 1) in vec2 aTexCoords;
out vec2 TexCoords;
void main()
{
gl_Position = vec4(aPos.x, aPos.y, 0.0, 1.0);
TexCoords = aTexCoords;
}
This is simple fragment shader just to show you how scene looks like when I did not add code for god rays to the fragment shader:
#version 330 core
out vec4 FragColor;
in vec2 TexCoords;
uniform sampler2D screenTexture;
void main()
{
FragColor = texture2D(screenTexture, TexCoords);
}
Scene without godrays:
Fragment shader when god rays code is added:
#version 330 core
out vec4 FragColor;
in vec2 TexCoords;
uniform vec2 lightPositionOnScreen;
uniform sampler2D screenTexture;
const float exposure = 0.3f;
const float decay = 0.96815;
const float density = 0.926;
const float weight = 0.587;
const int NUM_SAMPLES = 80;
void main()
{
// Calculate vector from pixel to light source in screen space.
vec2 deltaTexCoord = (TexCoords - lightPositionOnScreen.xy);
vec2 texCoord = TexCoords;
// Divide by number of samples and scale by control factor.
deltaTexCoord *= 1.0f / NUM_SAMPLES * density;
// Store initial sample.
vec3 color = texture2D(screenTexture, TexCoords);
// Set up illumination decay factor.
float illuminationDecay = 1.0f;
// Evaluate summation from Equation 3 NUM_SAMPLES iterations.
for (int i = 0; i < NUM_SAMPLES; i++)
{
// Step sample location along ray.
texCoord -= deltaTexCoord;
// Retrieve sample at new location.
vec3 sample = texture2D(screenTexture, texCoord);
// Apply sample attenuation scale/decay factors.
sample *= illuminationDecay * weight;
// Accumulate combined color.
color += sample;
// Update exponential decay factor.
illuminationDecay *= decay;
}
FragColor = vec4(color * exposure, 1.0);
}
How scene looks after godRays code:
This code is used to translate coordinates of cube center from world to window space position:
glm::vec4 clipSpacePos = projection * (view * glm::vec4(m_cubeCenter, 1.0));
glm::vec3 ndcSpacePos = glm::vec3(clipSpacePos.x / clipSpacePos.w, clipSpacePos.y / clipSpacePos.w, clipSpacePos.z / clipSpacePos.w);
glm::vec2 windowSpacePos;
windowSpacePos.x = (ndcSpacePos.x + 1.0) / 2.0;
windowSpacePos.y = 1.0f - (ndcSpacePos.y + 1.0) / 2.0;
wxMessageOutputDebug().Printf("test %f x position", windowSpacePos.x);
wxMessageOutputDebug().Printf("test %f y position", windowSpacePos.y);
shaderProgram.loadShaders("Shaders/godRays.vert", "Shaders/godRays.frag");
shaderProgram.use();
shaderProgram.setUniform("lightPositionOnScreen", windowSpacePos);
This is how I am setting up texture:
GLfloat vertices[] = {
1.0f, 1.0f, 0.0f, 1.0f, 1.0f, // top right
1.0f, -1.0f, 0.0f, 1.0f, 0.0f, // bottom right
-1.0f, -1.0f, 0.0f, 0.0f, 0.0f, // bottom left
-1.0f, -1.0f, 0.0f, 0.0f, 0.0f, // bottom left
-1.0f, 1.0f, 0.0f, 0.0f, 1.0f, // top left
1.0f, 1.0f, 0.0f, 1.0f, 1.0f, // top right
};
GLuint testBuffer;
glGenBuffers(1, &testBuffer);
glBindBuffer(GL_ARRAY_BUFFER, testBuffer);
glBufferData(GL_ARRAY_BUFFER, 30 * sizeof(GLfloat), &vertices[0], GL_STATIC_DRAW);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 5 * sizeof(GLfloat), NULL);
glEnableVertexAttribArray(1);
glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, 5 * sizeof(GLfloat), (void*)(3 * sizeof(float)));
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, screenTexture);
glDrawArrays(GL_TRIANGLES, 0, 6);
shaderProgram.deleteProgram();
glDeleteBuffers(1, &testBuffer);
Here is the solution. The problem was in the lines vec3 color = texture2D(screenTexture, TexCoords); and vec3 sample = texture2D(screenTexture, texCoord); I replaced them with vec3 color = texture(screenTexture, TexCoords).rgb; vec3 sample = texture(screenTexture, texCoord).rgb; respectively.
#version 330 core
out vec4 FragColor;
in vec2 TexCoords;
uniform vec2 lightPositionOnScreen;
uniform sampler2D screenTexture;
const float exposure = 0.3f;
const float decay = 0.96815;
const float density = 0.926;
const float weight = 0.587;
const int NUM_SAMPLES = 100;
void main()
{
vec2 deltaTexCoord = vec2(TexCoords.xy - lightPositionOnScreen.xy);
vec2 texCoord = TexCoords;
deltaTexCoord *= 1.0f / NUM_SAMPLES * density;
vec3 color = texture(screenTexture, TexCoords).rgb;
float illuminationDecay = 1.0f;
for (int i = 0; i < NUM_SAMPLES; i++)
{
texCoord -= deltaTexCoord;
vec3 sample = texture(screenTexture, texCoord).rgb;
sample *= illuminationDecay * weight;
color += sample;
illuminationDecay *= decay;
}
FragColor = vec4(color * exposure, 1.0);
}

Geometry shader and MVP matrices issues

I'm working with openGL's Geometry Shader, and doing some tests to get the hang of it before I move on to more complex tasks. I wrote some code to transform inputted GL_POINTS into GL_TRIANGLE_STRIPs, and I apply the MVP matrix transformation in the Geometry Shader. While rendering without the MVP matrix produces the correct output, rendering with it produces nothing. Here is the code (hopefully minimal enough):
int main(){
// Create context and window
sf::Context context;
sf::Window window(sf::VideoMode(800, 600), "Voxel Engine");
window.setVerticalSyncEnabled(true);
// Initialize glew
glewExperimental = true;
GLenum err = glewInit();
if (err != GLEW_OK){
std::cout << "glewIniti failed, aborting...\n";
}
// Create VAO
GLuint VertexArrayID;
glGenVertexArrays(1, &VertexArrayID);
glBindVertexArray(VertexArrayID);
glClearColor(0.0f, 0.0f, 0.4f, 0.0f);
glEnable(GL_DEPTH_TEST);
glDepthFunc(GL_LESS);
// Vector for verts
std::vector<glm::vec3> vertices;
// Insert a few vertices
for (int i = -5; i < 5; i++){
vertices.push_back(glm::vec3(i*.3, 0.0, 0.0));
}
// View matrix vectors
glm::vec3 eye = glm::vec3(0.0f, 5.0f, 5.0f);
glm::vec3 at = glm::vec3(0.0f, 0.0f, 0.0f);
glm::vec3 up = glm::vec3(0.0f, 1.0f, 0.0f);
// Create VBO for vertices
GLuint vertexbuffer;
glGenBuffers(1, &vertexbuffer);
glBindBuffer(GL_ARRAY_BUFFER, vertexbuffer);
glBufferData(GL_ARRAY_BUFFER, vertices.size()*sizeof(glm::vec3), &vertices[0], GL_STATIC_DRAW);
// Create MVP matrices
glm::mat4 Projection = glm::perspective(45.0f, 4.0f / 3.0f, 0.1f, 100.0f);
glm::mat4 View = glm::lookAt(eye, at, up);
glm::mat4 Model = glm::mat4(1.0f);
// Load the shaders
GLuint programID = LoadShaders("VertexShader.glsl", "GeometryShader.glsl", "FragmentShader.glsl");
// Get uniform locations
GLuint M = glGetUniformLocation(programID, "M");
GLuint V = glGetUniformLocation(programID, "V");
GLuint P = glGetUniformLocation(programID, "P");
// Main loop
bool running = true;
sf::Event event;
while (running){
while (window.pollEvent(event)){
switch (event.type){
case sf::Event::Closed:
window.close();
return 0;
}
}
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glUseProgram(programID);
glUniformMatrix4fv(M, 1, GL_FALSE, &Model[0][0]);
glUniformMatrix4fv(V, 1, GL_FALSE, &View[0][0]);
glUniformMatrix4fv(P, 1, GL_FALSE, &Projection[0][0]);
glEnableVertexAttribArray(0);
glBindBuffer(GL_ARRAY_BUFFER, vertexbuffer);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, (void*)0);
glDrawArrays(GL_POINTS, 0, 10);
glDisableVertexAttribArray(0);
window.display();
}
return 0;
}
Vertex Shader:
#version 430 core
layout(location = 0) in vec3 position;
void main(){
gl_Position = vec4(position, 1);
}
Geometry Shader:
#version 430 core
layout (points) in;
uniform mat4 M, V, P;
layout (triangle_strip) out;
layout (max_vertices = 14) out;
void main(void){
// create a vertex for each point on the cube
vec4 v1 = M*V*P*(gl_in[0].gl_Position + vec4(.1, -.1, -.1, 0.0));
vec4 v2 = M*V*P*(gl_in[0].gl_Position + vec4(-.1, -.1, -.1, 0.0));
vec4 v3 = M*V*P*(gl_in[0].gl_Position + vec4(.1, .1, -.1, 0.0));
vec4 v4 = M*V*P*(gl_in[0].gl_Position + vec4(-.1, .1, -.1, 0.0));
vec4 v5 = M*V*P*(gl_in[0].gl_Position + vec4(.1, -.1, .1, 0.0));
vec4 v6 = M*V*P*(gl_in[0].gl_Position + vec4(-.1, -.1, .1, 0.0));
vec4 v7 = M*V*P*(gl_in[0].gl_Position + vec4(-.1, .1, .1, 0.0));
vec4 v8 = M*V*P*(gl_in[0].gl_Position + vec4(.1, .1, .1, 0.0));
gl_Position = v4;
EmitVertex();
gl_Position = v3;
EmitVertex();
gl_Position = v7;
EmitVertex();
gl_Position = v8;
EmitVertex();
gl_Position = v5;
EmitVertex();
gl_Position = v3;
EmitVertex();
gl_Position = v1;
EmitVertex();
gl_Position = v4;
EmitVertex();
gl_Position = v2;
EmitVertex();
gl_Position = v7;
EmitVertex();
gl_Position = v6;
EmitVertex();
gl_Position = v5;
EmitVertex();
gl_Position = v2;
EmitVertex();
gl_Position = v1;
EmitVertex();
EndPrimitive();
}
Fragment Shader:
#version 430 core
out vec4 color;
void main(){
color = vec4(0, 1, 0, 1);
}
I can provide my LoadShaders function if necessary, but I don't think the issue lies there.
So, performance aside, is there anything I'm doing which is blatantly incorrect?

Geometry shader doesn't emit geometry

I am setting the following pipeline:
Vertex shader gets as input 4 vertices to draw as full screen quad with triangle strip :
Vertex Shader:
#version 420 core
layout(location = 0) in vec4 position;
out gl_PerVertex
{
vec4 gl_Position;
}
;
void main()
{
gl_Position = position;
}
Then I want to do some work on them in geometry shader .But for now I just try to emit the same vertices as triangle strip from the shader just to make sure it work ok.
Geom Shader
#version 420 core
layout(invocations = 1,triangles) in;
layout(triangle_strip, max_vertices =4) out;
out gl_PerVertex
{
vec4 gl_Position;
};
void main()
{
gl_Position = vec4( 1.0, 1.0, 0.0, 1.0 );
EmitVertex();
gl_Position = vec4(-1.0, 1.0, 0.0, 1.0 );
EmitVertex();
gl_Position = vec4( 1.0,-1.0, 0.0, 1.0 );
EmitVertex();
gl_Position = vec4(-1.0,-1.0, 0.0, 1.0 );
EmitVertex();
EndPrimitive();
}
And the fragment shader:
#version 420 core
layout(binding=0) uniform sampler2D COLOR_MAP_0;
out vec4 OUTPUT;
void main(void) {
OUTPUT = vec4(1,0,0,1);
}
The client side setup is like this:
glm::vec4 va[4] ={glm::vec4(1.0, 1.0, 0.0, 1.0),glm::vec4(-1.0, 1.0, 0.0, 1.0),glm::vec4( 1.0,-1.0, 0.0, 1.0),glm::vec4(-1.0,-1.0, 0.0, 1.0)};
auto ss = sizeof(va);
glGenVertexArrays(1,&_dummyVao);
glBindVertexArray(_dummyVao);
glGenBuffers(1,&_dummyPoinBuff);
glBindBuffer(GL_ARRAY_BUFFER, _dummyPoinBuff);
glBufferData(GL_ARRAY_BUFFER, ss, &va[0], GL_STATIC_DRAW);
glVertexAttribPointer(0, 4, GL_FLOAT, GL_FALSE, 0, ((GLubyte *)NULL + (0)));
glEnableVertexAttribArray(0);
glBindVertexArray(0);
The draw call:
glBindVertexArray(_dummyVao);
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
glBindVertexArray(0);
The weird thing is that it doesn't work!The output is empty.If I switch to emit points instead ,it does emit them.The vertex data is 100% correct.it draws the quad ok if I detach the geometry shader.What can be possibly wrong here?
UPDATE:
This variation does work.
const vec2 vert_data[4] = vec2[](
vec2(-1.0, 1.0),
vec2(-1.0, -1.0),
vec2(1.0, 1.0),
vec2(1.0, -1.0)
);
for(int i=0; i<4; i++)
{
gl_Position = vec4( vert_data[i].xy,0,1);///gl_Position;
EmitVertex();
}
EndPrimitive();
Redclare of inputs / outputs in geometry shader still didn't help:
in gl_PerVertex
{
vec4 gl_Position;
} gl_in[];
out gl_PerVertex
{
vec4 gl_Position;
};