Why is my triangle not displayed after tessellation? OpenGL - c++

I started learning opengl and I'm at the stage of tessellation and I have problem, my triangle won't displaying when I attach tesselation shaders to program. Shaders compile correctly and there is no linking error. Everything seems to be fine from the code side. When i only use vertex shader and fragment shader everything works good. What could be the reason?
Shaders::Shaders(const char* vertexPath, const char* fragmentPath, const char* tessControlPath, const char* tessEvaluationPath)
{
std::string vs_str = load_file(vertexPath);
std::string fs_str = load_file(fragmentPath);
std::string tc_str = load_file(tessControlPath);
std::string te_str = load_file(tessEvaluationPath);
const char* vs_src = vs_str.c_str();
const char* fs_src = fs_str.c_str();
const char* tc_src = tc_str.c_str();
const char* te_src = te_str.c_str();
create_shader(GL_VERTEX_SHADER, vertexShader, vs_src);
create_shader(GL_FRAGMENT_SHADER, fragmentShader, fs_src);
create_shader(GL_TESS_CONTROL_SHADER, tessControlShader, tc_src);
create_shader(GL_TESS_EVALUATION_SHADER, tessEvaluationShader, te_src);
program = glCreateProgram();
glAttachShader(program, vertexShader);
glAttachShader(program, fragmentShader);
glAttachShader(program, tessControlShader);
glAttachShader(program, tessEvaluationShader);
glLinkProgram(program);
glDeleteShader(vertexShader);
glDeleteShader(fragmentShader);
glDeleteShader(tessControlShader);
glDeleteShader(tessEvaluationShader);
glCreateVertexArrays(1, &vertexArrayObject);
glBindVertexArray(vertexArrayObject);
}
void Shaders::create_shader(GLenum type, GLuint& shader, const char* src)
{
GLint isCompiled = 0;
shader = glCreateShader(type);
glShaderSource(shader, 1, &src, nullptr);
glCompileShader(shader);
glGetShaderiv(shader, GL_COMPILE_STATUS, &isCompiled);
if (isCompiled == GL_FALSE)
{
GLint maxLength = 0;
glGetShaderiv(shader, GL_INFO_LOG_LENGTH, &maxLength);
std::vector<GLchar> errorLog(maxLength);
glGetShaderInfoLog(shader, maxLength, &maxLength, &errorLog[0]);
for (int i = 0; i < errorLog.size(); i++)
{
std::cerr << errorLog[i];
}
glDeleteShader(shader);
}
}
int main()
{
Window* window = new Window(1200, 1000);
Shaders* shader = new Shaders("shaders/vertex.glsl", "shaders/fragment.glsl", "shaders/tess_control.glsl", "shaders/tess_evaluation.glsl");
while (glfwWindowShouldClose(window->get_window()) == false)
{
window->loop();
const GLfloat color[]{1.0f, 0.5f, 0.5f, 1.0f};
glClearBufferfv(GL_COLOR, 0, color);
const GLfloat attrib[]{0.0f, 0.0f, 1.0f, 1.0f};
glUseProgram(shader->get_program());
glVertexAttrib4fv(0, attrib);
glPolygonMode(GL_FRONT_AND_BACK, GL_LINE);
glDrawArrays(GL_PATCHES, 0, 3);
}
delete window;
delete shader;
glfwTerminate();
}
vertex.glsl
#version 450 core
layout (location = 0) in vec4 color;
out VS_OUT
{
vec4 color;
} vs_out;
void main(void)
{
const vec4 vertices[3] = vec4[3](vec4( 0.25, -0.25, 0.5, 1.0), vec4(-0.25, -0.25, 0.5, 1.0), vec4(0.25, 0.25, 0.5, 1.0));
gl_Position = vertices[gl_VertexID];
vs_out.color = color;
}
fragment.glsl
#version 450 core
in VS_OUT
{
vec4 color;
} fs_in;
out vec4 color;
void main(void)
{
color = fs_in.color;
}
tess_control.glsl
#version 450 core
layout (vertices = 3) out;
void main(void)
{
if (gl_InvocationID = 0)
{
gl_TessLevelInner[0] = 5.0;
gl_TessLevelOuter[0] = 5.0;
gl_TessLevelOuter[1] = 5.0;
gl_TessLevelOuter[2] = 5.0;
}
gl_out[gl_InvocationID].gl_Position = gl_in[gl_InvocationID].gl_Position;
}
tess_evaluation.glsl
#version 450 core
layout (triangles, equal_spacing, ccw) in;
void main(void)
{
gl_Position = (gl_TessCoord.x * gl_in[0].gl_Position + gl_TessCoord.y * gl_in[1].gl_Position + gl_TessCoord.z * gl_in[2].gl_Position);
}

You have specifies the number of vertices that will be used to make up a single patch primitive, by glPatchParameteri:
glPatchParameteri(GL_PATCH_VERTICES, 3);
glDrawArrays(GL_PATCHES, 0, 3);
Further more you have to pass the color all the way, through all the shader stages, from the vertex shader to the fragment shader:
The input to the Tessellation Control Shader is a patch and the output is patch too. But the input patch size can be different of the output patch size. While the input patch size is defined by GL_PATCH_VERTICES, the output patch size is define by layout (vertices = 3) out. Thus you have to specify, in the shader program, how the attributes of the input patch are mapped to the attributes of the output patch.
#version 450 core
layout (vertices = 3) out;
in VS_OUT
{
vec4 color;
} in_data[];
out VS_OUT
{
vec4 color;
} out_data[];
void main(void)
{
out_data[gl_InvocationID].color = in_data[gl_InvocationID].color;
if (gl_InvocationID = 0)
{
gl_TessLevelInner[0] = 5.0;
gl_TessLevelOuter[0] = 5.0;
gl_TessLevelOuter[1] = 5.0;
gl_TessLevelOuter[2] = 5.0;
}
gl_out[gl_InvocationID].gl_Position = gl_in[gl_InvocationID].gl_Position;
}
The Tessellation Evaluation Shader performs the interpolation. The attributes of the output patch of the tessellation control shader are interpolated dependent on gl_TessCoord.
You have to implement the algorithm which interpolates the attributes in the shader program.
#version 450 core
layout (triangles, equal_spacing, ccw) in;
in VS_OUT
{
vec4 color;
} in_data[];
out VS_OUT
{
vec4 color;
} out_data;
void main(void)
{
out_data.color = in_data[0].color * gl_TessCoord.x + in_data[1].color * gl_TessCoord.y + in_data[2].color * gl_TessCoord.z;
gl_Position = (gl_TessCoord.x * gl_in[0].gl_Position + gl_TessCoord.y * gl_in[1].gl_Position + gl_TessCoord.z * gl_in[2].gl_Position);
}

Related

Is it possible to use transform feedback to retrieve the triangles generated by an OpenGL vertex-geometry shader?

I found a great transform feedback tutorial at:
https://open.gl/feedback
What I want is to be able to read the vertices emitted by the geometry shader, rather than using the geometry shader's out variables. I can use the out variables, but it's a bit of a hack.
Is this possible?
The code is:
const GLchar* vertexShaderSrc = R"glsl(
#version 150 core
in float inValue;
out float geoValue;
void main()
{
geoValue = sqrt(inValue);
}
)glsl";
// Geometry shader
const GLchar* geoShaderSrc = R"glsl(
#version 150 core
layout(points) in;
layout(triangle_strip, max_vertices = 3) out;
in float[] geoValue;
out float outValue;
void main()
{
for (int i = 0; i < 3; i++) {
outValue = geoValue[0] + i;
EmitVertex();
}
EndPrimitive();
}
)glsl";
...
// Create query object to collect info
GLuint query;
glGenQueries(1, &query);
// Perform feedback transform
glEnable(GL_RASTERIZER_DISCARD);
glBindBufferBase(GL_TRANSFORM_FEEDBACK_BUFFER, 0, tbo);
glBeginQuery(GL_TRANSFORM_FEEDBACK_PRIMITIVES_WRITTEN, query);
glBeginTransformFeedback(GL_TRIANGLES);
glDrawArrays(GL_POINTS, 0, 5);
glEndTransformFeedback();
glEndQuery(GL_TRANSFORM_FEEDBACK_PRIMITIVES_WRITTEN);
glDisable(GL_RASTERIZER_DISCARD);
glFlush();
// Fetch and print results
GLuint primitives;
glGetQueryObjectuiv(query, GL_QUERY_RESULT, &primitives);
GLfloat feedback[15];
glGetBufferSubData(GL_TRANSFORM_FEEDBACK_BUFFER, 0, sizeof(feedback), feedback);
printf("%u primitives written!\n\n", primitives);
for (int i = 0; i < 15; i++) {
printf("%f\n", feedback[i]);
}
Thanks again for all of the comments. A working code can be found at https://github.com/sjhalayka/opengl_gs_transform_feedback
The geometry shader is:
#version 430 core
layout (points) in;
layout (triangle_strip) out;
layout (max_vertices = 6) out;
out vec3 vert;
in VS_OUT
{
vec4 position;
} gs_in[];
void main(void)
{
vec3 vertex0 = vec3(1, 2, 3);
vec3 vertex1 = vec3(4, 5, 6);
vec3 vertex2 = vec3(7, 8, 9);
vert = vertex0;
EmitVertex();
vert = vertex1;
EmitVertex();
vert = vertex2;
EmitVertex();
EndPrimitive();
vertex0 = vec3(10, 11, 12);
vertex1 = vec3(13, 14, 15);
vertex2 = vec3(16, 17, 18);
vert = vertex0;
EmitVertex();
vert = vertex1;
EmitVertex();
vert = vertex2;
EmitVertex();
EndPrimitive();
}
The pertinent C++ code is too long to be useful here.

GLSL shaders going black/transparent

I am trying to add some shaders to my glut scene objects.
At this time I am trying to implement "hello world" shaders
but when I use the default vertex shader, my objects dissapear.
shaders:
#define GLSL(version, shader) "#version " #version " core\n" #shader
const char* vert = GLSL
(
330,
layout (std140) uniform Matrices {
mat4 pvm;
} ;
in vec4 position;
out vec4 color;
void main()
{
color = position;
gl_Position = pvm * position ;
}
);
const char* frag = GLSL
(
330,
in vec4 color;
out vec4 outputF;
void main()
{
outputF = vec4(1.0, 0.5, 0.25, 1.0);
}
);
Compilation shows no error:
Compiling shader : vertex shader
VERTEX STATUS:1
Compiling shader : fragment shader
FRAGMENT STATUS:1
Linking program
PROGRAM STATUS:1
PROGRAM ID : 3
Before calling glUseProgram:
After calling glUseProgram:
After calling glUseProgram without attach vertex shader:
CODE for rendering:
int opengl_draw_path_gl(rendered_path_t *p) {
unsigned int num_vertices,j;
unsigned int face_size;
unsigned long i,num_elems;
vect_t *a,*b;
num_elems=p->num_prisms;
num_vertices=p->prism_faces;
face_size=num_vertices*2;
a=p->data+2; // saltem punt centre primera cara
b=a+face_size;
glColor4fv(p->color);
// dibuixem tapa inici
_opengl_draw_path_terminator(num_vertices,p->data,a);
// Dibuixem tots els prismes
glBegin(GL_TRIANGLE_STRIP);
for(i=0;i<num_elems;i++) {
for(j=0;j<num_vertices;j++) {
glNormal3fv((GLfloat *)(a+j*2));
glVertex3fv((GLfloat *)(a+j*2+1));
glNormal3fv((GLfloat *)(b+j*2));
glVertex3fv((GLfloat *)(b+j*2+1));
}
glNormal3fv((GLfloat *)(a));
glVertex3fv((GLfloat *)(a+1));
glNormal3fv((GLfloat *)(b));
glVertex3fv((GLfloat *)(b+1));
a+=face_size;
b+=face_size;
}
glEnd();
// dibuixem tapa final
_opengl_draw_path_terminator(num_vertices,b,a);
return 0;
}
First of all I recommend you, to read a tutorial about vertex array objects.
But, since you are drawing with glBegin and glEnd, which is deprecated, you have to use compatibility mode shaders. You have to use the deprecated built in uniforms gl_Vertex and gl_Normal, according to the OpenGL commands glVertex3fv and glNormal3fv.
Adapt your code somhow like this:
#define GLSL(version, shader) "#version " #version "\n" #shader
Vertex shader:
const char* vert = GLSL
(
110,
varying vec4 position;
varying vec3 normal;
void main()
{
position = gl_ModelViewMatrix * gl_Vertex;
normal = normalize( gl_NormalMatrix * gl_Normal.xyz );
gl_Position = gl_ModelViewProjectionMatrix * gl_Vertex;
}
);
Fragment shader:
const char* frag = GLSL
(
110,
varying vec4 position;
varying vec3 normal;
void main()
{
gl_FragColor = vec4(1.0, 0.5, 0.25, 1.0);
}
);

GLSL shaders not working. No compile errors, and used glUseProgram

I came up with code to render a rectangle, but the shaders won't work. It still has the blank white color.
Here I will include the important code
Main:
float verts[] = {
-.5f, -.5f, .0f,
-.5f, .5f, .0f,
.5f, .5f, .0f,
.5f, .5f, .0f,
.5f, -.5f, .0f,
-.5f, -.5f, .0f
};
Shader shader("basicVert.glsl", "basicFrag.glsl");
GLuint VBO;
glGenBuffers(1, &VBO);
glBindBuffer(GL_ARRAY_BUFFER, VBO);
glBufferData(GL_ARRAY_BUFFER, sizeof(verts), &verts, GL_STATIC_DRAW);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, 0);
glEnableVertexAttribArray(0);
shader.enable();
Shader.cpp (class functions)
Shader::Shader(const string vpath, const string fpath) {
Shader();
current_vpath = vpath;
current_fpath = fpath;
shaderID = init();
}
Shader::Shader(const char *vpath, const char *fpath) {
Shader(string(vpath), string(fpath));
}
Shader::~Shader() {
shaderID = NULL;
glDeleteProgram(shaderID);
}
void Shader::enable() {
glUseProgram(shaderID);
}
GLuint Shader::makeVertextShader(const char* source) {
GLuint vertShaderID = glCreateShader(GL_VERTEX_SHADER);
glShaderSource(vertShaderID, 1, &source, NULL);
glCompileShader(vertShaderID);
GLint r;
glGetShaderiv(vertShaderID, GL_COMPILE_STATUS, &r);
if (r == GL_FALSE) {
GLint l;
glGetShaderiv(vertShaderID, GL_INFO_LOG_LENGTH, &l);
cout << l << endl;
char *bfer = new char[l];
glGetShaderInfoLog(vertShaderID, l, &l, bfer);
cerr << "Failed to compile VERTEXT SHADER! FILE NAME: " <<
current_vpath << endl;
cerr << bfer << endl;
glDeleteShader(vertShaderID);
delete[] bfer;
return NULL;
}
return vertShaderID;
}
GLuint Shader::makeFragmentShader(const char* source) {
GLuint fragShaderID = glCreateShader(GL_FRAGMENT_SHADER);
glShaderSource(fragShaderID, 1, &source, NULL);
glCompileShader(fragShaderID);
GLint r;
glGetShaderiv(fragShaderID, GL_COMPILE_STATUS, &r);
if (r == GL_FALSE) {
GLint l;
glGetShaderiv(fragShaderID, GL_INFO_LOG_LENGTH, &l);
char *bfer = new char[l];
glGetShaderInfoLog(fragShaderID, l, &l, bfer);
cerr << "Failed to compile FRAGMENT SHADER! FILE NAME: " <<
current_fpath << endl;
cerr << bfer << endl;
glDeleteShader(fragShaderID);
delete[] bfer;
return NULL;
}
return fragShaderID;
}
GLuint Shader::init() {
GLuint program = glCreateProgram();
const string vs = readFile(current_vpath);
const string vf = readFile(current_fpath);
const char *vertexsrc = vs.c_str();
const char *fragmentsrc = vf.c_str();
GLuint vertShaderID = this->makeVertextShader(vertexsrc);
GLuint fragShaderID = this->makeFragmentShader(fragmentsrc);
glAttachShader(program, vertShaderID);
glAttachShader(program, fragShaderID);
glLinkProgram(program);
glValidateProgram(program);
glDeleteShader(vertShaderID);
glDeleteShader(fragShaderID);
return program;
}
GLSL Vertex Shader
#version 330 core
layout(location = 0) in vec3 position;
void main(){
gl_Position = position;
}
GLSL Fragment Shader
#version 330 core
layout(location = 0) out vec4 color;
void main(){
color = vec4(1.0, 0.0, 1.0, 1.0);
gl_FragColor = color;
}
gl_FragColor is no longer supported in modern versions of GLSL
so it will be in Vertex Shader,
layout(location = 0) in vec4 position;
void main()
{
gl_Position = position;
}
in FS,
layout(location = 0) out vec4 color;
void main()
{
color = vec4(1.0, 0.0, 1.0, 1.0);
//gl_FragColor is no longer supported in modern versions of GLSL
}
The vertex and fragment shader have to look like this:
#version 330 core
layout(location = 0) in vec3 position;
void main()
{
gl_Position = vec4( position.xyz, 1.0 );
}
#version 330 core
layout(location = 0) out vec4 color;
void main()
{
color = vec4(1.0, 0.0, 1.0, 1.0);
}
Explanation:
There are 2 issues in your code:
1.) While the vertex attribute position, in the Vertex Shader has the type vec3, the Built-in Variable (GLSL) gl_Position has the type vec4.
Either the type of the vertex attribute has to be changed:
layout(location = 0) in vec4 position;
or the assignment to gl_Position has to be adapted:
gl_Position = vec4( position.xyz, 1.0 );
2.) In the Fragment Shader either can be used the Built-in output Variable (GLSL) gl_FragColor:
void main()
{
gl_FragColor = [...];
}
or an explicit output variable has to be declared:
out vec4 color;
void main()
{
color = [...];
}
The problem seems to lie in the vertex shader: You pass a vec3 position to a vec4 gl_Position which effectively sets the w-coordinate to 0. What you need for proper rendering is a homogeneous coordinate of 1 (otherwise the division by w is a division by 0). Try to change your code to
gl_Position = vec4(position, 1.0);

Attribute being removed by compiler, but I can't see why

I'm trying to write a set of shaders to do per-vertex lighting with multiple lights, and textures. I think I'm running into the shader compiler optimizing out 'v_texture', one of my attribute variables, because glVertexAttribLocation is failing to find it.
Here's my vertex shader code:
attribute vec3 v_position;
attribute vec3 v_normal;
attribute vec2 v_texture;
varying vec4 color;
varying vec2 texCoord;
const int MAX_LIGHTS = 8;
uniform struct lightSource
{
vec4 position;
vec4 color;
vec3 coneDirection;
float coneAngle;
float ambientFactor;
} sceneLights[MAX_LIGHTS];
uniform mat4 modelView;
uniform mat4 Projection;
uniform float shininess;
uniform int numLights;
vec4 applyLight(lightSource light)
{
vec4 outColor;
float attenuation;
vec3 surfacePos = (modelView * vec4(v_position.xyz, 1.0)).xyz;
vec3 toLight;
if(light.position.w == 0.0)
{
toLight = normalize(light.position.xyz);
attenuation = 1.0;
}
else
{
toLight = normalize(light.position.xyz - surfacePos);
float distanceToLight = length(light.position.xyz - surfacePos);
float lightAngle = degrees(acos(dot(-surfacePos, normalize(light.coneDirection))));
if(lightAngle > light.coneAngle)
{
attenuation = 0.0;
}
else
{
attenuation = 1.0/(1.0 + (0.1 * pow(distanceToLight, 2.0)));
}
}
vec3 Eye = normalize(-surfacePos);
vec3 Halfway = normalize(toLight + Eye);
vec3 worldNormal = normalize(modelView * vec4(v_normal, 0.0)).xyz;
vec4 ambient = vec4(light.ambientFactor * vec3(light.color.xyz), 1.0);
float Kd = max(dot(toLight, worldNormal), 0.0);
vec4 diffuse = Kd * light.color;
float Ks = pow(max(dot(worldNormal, Halfway), 0.0), shininess);
vec4 specular = Ks * light.color;
if(dot(toLight, worldNormal) < 0.0)
{
specular = vec4(0.0, 0.0, 0.0, 0.0);
}
outColor = ambient + (attenuation * (diffuse + specular));
outColor.a = 1.0;
return outColor;
}
void main(void)
{
vec4 colorSum;
colorSum = vec4(0, 0, 0, 0);
for(int i = 0; i < MAX_LIGHTS; i++)
{
if(i >= numLights)
{
break;
}
colorSum += applyLight(sceneLights[i]);
}
colorSum.xyzw = normalize(colorSum.xyzw);
vec3 gammaCorrection = vec3(1.0/2.2);
color = vec4(pow(vec3(colorSum.xyz), gammaCorrection), 1.0);
texCoord = v_texture;
gl_Position = Projection * modelView * vec4(v_position.xyz, 1.0);
}
And the fragment shader:
varying vec4 color;
varying vec2 texCoord;
uniform sampler2D texSampler;
void main(void)
{
gl_FragColor = (color * texture2D(texSampler, texCoord.xy));
}
I've been over the code top to bottom but I just can't see where my problem is. I'm assigning v_texture to the texCoord, and passing it to the frag shader, where I'm using it together with the final lighting result from the vertex shader to yield the final color. I'm checking for shader errors when I compile them, and I'm not getting anything. My only guess at the moment is that maybe gl_FragColor = (color * texture2D(texSampler, texCoord.xy)) isn't a valid statement, but then shouldn't this have caught it?
glGetShaderiv(fragment_shader, GL_COMPILE_STATUS, &shader_status);
if (!shader_status)
{
GLchar InfoLog[1024];
glGetShaderInfoLog(fragment_shader, sizeof(InfoLog), NULL, InfoLog);
fprintf(stderr, "Fragment Shader %d: '%s'\n", fragment_shader, InfoLog);
}
Edit: I probably should have put this in here in the first place, but this snippet is the shader section from my C++ program's initialize() function. The only one of the glGetAttribLocation calls to fail is for v_texture.
// Begin shader loading.
//compile the shaders
GLuint vertex_shader = glCreateShader(GL_VERTEX_SHADER);
GLuint fragment_shader = glCreateShader(GL_FRAGMENT_SHADER);
GLint shader_status;
pass=new char [2048];
unsigned int len;
//Give a maximum number of attempts to load the vertex shader
int attempts=10;
//Load the vertex shader
do
{
loader.load("lighting.vsh");
shaderCode = loader.hold.c_str();
len=loader.length;
pass=shaderCode;
attempts-=1;
}
while(len!=pass.length()&& attempts>0);
//Pass the temperary variable to a pointer
tmp = pass.c_str();
// Vertex shader first
glShaderSource(vertex_shader, 1,&tmp, NULL);
glCompileShader(vertex_shader);
//check the compile status
glGetShaderiv(vertex_shader, GL_COMPILE_STATUS, &shader_status);
if (!shader_status)
{
GLchar InfoLog[1024];
glGetShaderInfoLog(vertex_shader, sizeof(InfoLog), NULL, InfoLog);
fprintf(stderr, "Vertex Shader %d: '%s'\n", vertex_shader, InfoLog);
}
const char *fs=loader.load("lighting.fsh");
// Now the Fragment shader
glShaderSource(fragment_shader, 1, &fs, NULL);
glCompileShader(fragment_shader);
//check the compile status
glGetShaderiv(fragment_shader, GL_COMPILE_STATUS, &shader_status);
if (!shader_status)
{
GLchar InfoLog[1024];
glGetShaderInfoLog(fragment_shader, sizeof(InfoLog), NULL, InfoLog);
fprintf(stderr, "Fragment Shader %d: '%s'\n", fragment_shader, InfoLog);
}
//Now we link the 2 shader objects into a program
//This program is what is run on the GPU
program = glCreateProgram();
glAttachShader(program, vertex_shader);
glAttachShader(program, fragment_shader);
glLinkProgram(program);
//check if everything linked ok
glGetProgramiv(program, GL_LINK_STATUS, &shader_status);
if(!shader_status)
{
std::cerr << "[F] THE SHADER PROGRAM FAILED TO LINK" << std::endl;
return false;
}
//Now we set the locations of the attributes and uniforms
//this allows us to access them easily while rendering
loc_position = glGetAttribLocation(program,
const_cast<const char*>("v_position"));
if(loc_position == -1)
{
std::cerr << "Error: POSITION NOT FOUND IN SHADER" << std::endl;
return false;
}
loc_normals = glGetAttribLocation(program, const_cast<const char*>("v_normal"));
if(loc_normals == -1)
{
std::cerr << "Error: NORMALS NOT FOUND IN SHADER" << std:: endl;
return false;
}
loc_texture = glGetAttribLocation(program, const_cast<const char*>("v_texture"));
if(loc_texture == -1)
{
std::cerr << "[F] TEXTURE NOT FOUND IN SHADER" << std::endl;
return false;
}
// Begin light initialization.

OpenGL - Adding Tessellation Control Shader yields black screen

When I add my tesselation control shader to my rendering program, the viewport gets black. Without the TSC the vertex and fragment shader work fine. I also checked for compile errors but no occurs.
Vertex shader:
#version 410 core
layout (location = 0) in vec4 offset;
layout (location = 1) in vec4 color;
out VS_OUT {
vec4 color;
} vs_out;
void main(void) {
const vec4 vertices[3] = vec4[3]
(
vec4( 0.25, -0.25, 0.5, 1.0),
vec4(-0.25, -0.25, 0.5, 1.0),
vec4( 0.25, 0.25, 0.5, 1.0)
);
// Add "offset" to our hard-coded vertex position
gl_Position = vertices[gl_VertexID] + offset;
// Output the color from input attrib
vs_out.color = color;
}
Tessellation control shader:
#version 410 core
layout (vertices = 3) out;
void main(void) {
if (gl_InvocationID == 0) {
gl_TessLevelInner[0] = 5.0;
gl_TessLevelOuter[0] = 5.0;
gl_TessLevelOuter[1] = 5.0;
gl_TessLevelOuter[2] = 5.0;
}
gl_out[gl_InvocationID].gl_Position = gl_in[gl_InvocationID].gl_Position;
}
Tessellation evaluation shader:
#version 410 core
layout (triangles, equal_spacing, cw) in;
void main(void) {
gl_Position = (gl_TessCoord.x * gl_in[0].gl_Position +
gl_TessCoord.y * gl_in[1].gl_Position +
gl_TessCoord.z * gl_in[2].gl_Position);
}
Fragment shader:
#version 410 core
in VS_OUT {
vec4 color;
} fs_in;
out vec4 color;
void main(void) {
color = fs_in.color;
}
I forgot to check for shader linking errors. And this is what I get:
WARNING: Output of vertex shader '<out VS_OUT.color>' not read by tessellation control shader
ERROR: Input of fragment shader '<in VS_OUT.color>' not written by tessellation evaluation shader
How can I fix this?
Without the code of the other shaders it's hard to help you.
Make sure your tessellation evaluation shader is correct too. A default one should look like this :
#version 410 core
layout(triangles, equal_spacing, ccw) in;
layout(packed) uniform MatrixBlock
{
mat4 projmat;
mat4 viewmat;
} matTransform;
void main ()
{
vec4 pos = gl_TessCoord.x * gl_in[0].gl_Position
+ gl_TessCoord.y * gl_in[1].gl_Position
+ gl_TessCoord.z * gl_in[2].gl_Position;
gl_Position = matTransform.projmat * matTransform.viewmat * pos;
}
The important part is the interpolation using the barycentric coordinates on the patch triangle. Also if the transformations are done in your vertex shader instead of the tess eval shader you may have strange results too.
Edit :
Now that you added tessellation stages you can't pass varying data from the vertex shader to the fragment shader. Indeed their are new triangles in the original patch triangle so you have to set the color for all these new triangles too. Actually when you use tessellation stages, the vertex shader and the tess control usually forward the vertices input to the tess eval shader.
So your tess control shader should be like :
#version 410 core
layout (vertices = 3) out;
in VS_OUT { vec4 color; } tcs_in[]; /* new */
out TCS_OUT { vec4 color; } tcs_out[]; /* new */
void main(void) {
if (gl_InvocationID == 0) {
gl_TessLevelInner[0] = 5.0;
gl_TessLevelOuter[0] = 5.0;
gl_TessLevelOuter[1] = 5.0;
gl_TessLevelOuter[2] = 5.0;
}
gl_out[gl_InvocationID].gl_Position = gl_in[gl_InvocationID].gl_Position;
tcs_out[gl_InvocationID].color = tcs_in[gl_InvocationID].color; /* forward the data */
}
And you tess eval shader must also interpolate the color :
#version 410 core
layout (triangles, equal_spacing, cw) in;
in TCS_OUT { vec4 color; } tes_in[]; /* new */
out TES_OUT { vec4 color; } tes_out; /* new */
void main(void) {
tes_out.color = (gl_TessCoord.x * tes_in[0].color + /* Interpolation */
gl_TessCoord.y * tes_in[1].color +
gl_TessCoord.z * tes_in[2].color );
gl_Position = (gl_TessCoord.x * gl_in[0].gl_Position +
gl_TessCoord.y * gl_in[1].gl_Position +
gl_TessCoord.z * gl_in[2].gl_Position);
}
And of course in your fragment shader you now have a TES_OUT instead of VS_OUT.
I know this question is two years old now, but I this might help people in the future who experience the same issue and find this question.
After many hours of trying I figured out the problem. It seems as though the gl_in[].gl_Position inputs of the Tessellation Control Shader are not written to by the vertex shader. I suspect this must be a driver bug (maybe in the NVidia drivers?) because I cannot think of any reason this shouldn't work.
Solution:
Instead of relying on the gl_in[].gl_Position inputs of the Tessellation Control Shader just pass them yourself in a custom output/input.
This can be done by (roughly )adding the following lines to the respective shader:
// vertex shader
// ...
out vec4 vVertexOut;
void main() {
// ...
vVertexOut = uMVPMatrix * inVertex; // output your transformed vertex
}
// tesselation control shader
// ...
in vec4 vVertexOut[];
out vec4 tVertexOut[];
void main() {
// ...
tVertexOut[gl_InvocationID] = vVertexOut[gl_InvocationID];
}
// tesselation evaluation shader
// ...
in vec4 tVertexOut[];
void main() {
// ...
gl_Position = (tVertexOut[0] * gl_TessCoord[0]) + (tVertexOut[1] * gl_TessCoord[1]) + (tVertexOut[2] * gl_TessCoord[2]);
}