opengl + emscripten textures not getting displayed - c++

i am tring to display textures using opengles + emscripten, but all i am getting are black triangles.
output image
my vertex shader:
attribute vec3 position;
attribute vec4 color;
attribute vec3 normal;
attribute vec2 uv;
varying vec4 v_color;
varying vec3 v_normal;
varying vec2 v_uv;
uniform mat4 model;
uniform mat4 view;
uniform mat4 projection;
highp mat4 transpose(in highp mat4 inMatrix) {
highp vec4 i0 = inMatrix[0];
highp vec4 i1 = inMatrix[1];
highp vec4 i2 = inMatrix[2];
highp vec4 i3 = inMatrix[3];
highp mat4 outMatrix = mat4(
vec4(i0.x, i1.x, i2.x, i3.x),
vec4(i0.y, i1.y, i2.y, i3.y),
vec4(i0.z, i1.z, i2.z, i3.z),
vec4(i0.w, i1.w, i2.w, i3.w)
);
return outMatrix;
}
void main()
{
v_color = color;
v_normal = normal;
v_uv = uv;
gl_Position = projection * view * transpose(model) * vec4(position.xyz, 1.0);
}
fragment shader:
precision mediump float;
varying vec4 v_color;
varying vec3 v_normal;
varying vec2 v_uv;
uniform sampler2D tex;
void main()
{
// gl_FragColor = v_color;
gl_FragColor = texture2D(tex, v_uv.xy);
}
texture create function:
void IG::Texture::create(unsigned char* image,
uint32_t width,
uint32_t height,
uint32_t channels)
{
if(image == nullptr)
{
std::cout<<"Texture creation failed! invalid image data.."<<std::endl;
return;
}
glPixelStorei( GL_UNPACK_ALIGNMENT, 1 );
glGenTextures(1, &ID);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, ID);
// set the texture wrapping parameters
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, attribs.uv_mode);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, attribs.uv_mode);
// set texture filtering parameters
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, attribs.min_filter);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, attribs.mag_filter);
glTexImage2D(GL_TEXTURE_2D,
attribs.mip_level,
GL_RGB,
width,
height,
attribs.border,
GL_RGB,
GL_UNSIGNED_BYTE,
image);
// glGenerateMipmap(GL_TEXTURE_2D);
}
here attribs is a struct with values:
uv_mode(GL_REPEAT),
min_filter(GL_LINEAR_MIPMAP_LINEAR),
mag_filter(GL_LINEAR),
mip_level(0),
border(0)
i have verified that the image is valid using stb_image.
inside the draw loop all triangles are iterated and drawn using this call:
void Renderer::draw_objects(){
for(auto& v : gl_objects){
auto &shape = v.second;
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, shape.tex.ID);
shader->set_mat4("model", shape.transform);
glBindVertexArray(shape.VAO);
glUniform1i(shape.tex.samplerLoc, 0);
glDrawArrays(GL_TRIANGLES, 0, 3);
}
}
i am taking references from these links learopengl emscripten opengles examples
i am unable to debug what i am doing wrong here. what could be the issue here?

Your problem come from the missing mipmaps, it try to render a specific mipmaps that is not created, resulting in black pixels.
A proper call to glGenerateMipmap after glTexImage2D would solve the issue.

If you don't generate mipmaps mipmaps (with glGenerateMipmap) you must use one of the non mipmap minifying functions (GL_NEAREST or GL_LINEAR). If you use a mipmap minification function, the texture would be "Mipmap Incomplete".
Eiter use min_filter(GL_LINEAR) or call glGenerateMipmap(GL_TEXTURE_2D);.

Related

When using texture arrays, why do I not have to bind the sampler to the shader?

I am creating an array of textures using GL_TEXTURE_2D_ARRAY in my code:
// Load all images ito opengl
unsigned int width, height;
std::vector<unsigned char> textures;
int num = 0;
for ( auto each : image_list )
{
// Load PNG
std::vector<unsigned char> buffer, this_texture;
lodepng::load_file(buffer, each.string().c_str());
auto lode_error = lodepng::decode(this_texture, width, height, buffer);
if (lode_error)
{
LOG_ERROR("lodepng has reported this error: " + std::string(lodepng_error_text(lode_error)));
return false;
}
m_indexes.insert(std::make_pair(each.filename().string(), num));
textures.insert(textures.end(), this_texture.begin(), this_texture.end());
num++;
}
// Active texture
glActiveTexture(GL_TEXTURE0);
// Generate texture
glGenTextures(1, &m_texture_id);
glBindTexture(GL_TEXTURE_2D_ARRAY, m_texture_id);
// Send pixels
glTexImage3D(GL_TEXTURE_2D_ARRAY,
0,
GL_RGBA,
width, height,
image_list.size(),
0,
GL_RGBA,
GL_UNSIGNED_BYTE,
textures.data());
// Set options
glTexParameteri(GL_TEXTURE_2D_ARRAY, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D_ARRAY, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D_ARRAY, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D_ARRAY, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
Here are the shaders I am using:
Vertex Shader
#version 430 core
/* layouts */
layout (location = 0) in vec3 in_vertex;
layout (location = 1) in vec2 in_uv;
layout (location = 2) in vec4 in_tint;
layout (location = 3) in mat4 in_model;
layout (location = 7) in vec3 in_scale;
layout (location = 8) in float in_textured_index;
/* uniforms */
uniform mat4 ortho;
uniform mat4 view;
/* outputs */
out vec4 tint;
out vec2 uv;
out float textured_index;
void main()
{
mat4 mvp = ortho * view * in_model;
gl_Position = mvp * vec4(in_vertex * in_scale, 1.0);
tint = in_tint;
uv = in_uv;
textured_index = in_textured_index;
}
Fragment Shader
#version 430 core
/* inputs from vertex shader */
in vec4 tint;
in vec2 uv;
in float textured_index;
/* output to GPU */
out vec4 fragment;
/* texture sampler */
uniform sampler2DArray sampler_unit;
void main()
{
fragment = texture(sampler_unit, vec3(uv.xy, textured_index)).rgba;
fragment = fragment * tint;
}
Code to bind the texture array:
void ArrayTextures::attach()
{
if (glIsTexture(m_texture_id)){
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D_ARRAY, m_texture_id);
}
}
What I have noticed, is that I do not have to attach the texture unit or the texture id to my shader, as long as the texture is bound with above function. It just works. I would like to understand why. In OpenGL 3.X, you have to bind the sampler to your shader before you can use it. Is there any automatism behind the scenes, that I am not aware of? Since I have a 5700XT, may this be an AMD specific oddity? What is the correct way here, so I can be sure it also works on NVIDIA?
This has nothing to do with the sampler type. The binding between the texture object and the texture sampler is the texture unit. The texture object must be bound to a texture unit, and the texture unit number must be set to the texture sampler uniform.
In GLSL almost everything is initialized with 0 respectively 0.0 by default. Therefore the default Binding point is 0. If the texture is bound to the texture unit 0 (GL_Texture0), it is not necessary to set the texture sampler uniform as it is 0 by default.

Empty (white) framebuffer - shadow mapping

See EDIT since the first part of the problem is solved.
I am trying to replicate the shadow mapping demo from http://learnopengl.com/#!Advanced-Lighting/Shadows/Shadow-Mapping with my own framework, but interestingly I did not get any shadows. The first significant problem is that my depthmap is not correctly working. I have debugged and double checked each line without success. Maybe another pair of eyes will have more success.
See (top left, 5th row - the image is completely white):
I will write about the second render pass, since it seems that the first one is not working. By the way, the objects are centered at 0, 0, 0. The following code is used for the first render pass:
/// 1. render target is the depth map
glViewport(0, 0, SHADOW_MAP_WIDTH_u32, SHADOW_MAP_HEIGHT_u32);
m_frameBufferObject.bind(); // set the depth map as render target
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
/// place the camera where the light is positioned and render the scene
math::Matrix4D l_lightViewMatrix = math::Matrix4D::lookAt(m_light_p->getPosition(), math::Vector3D(0, 0, 0), math::Vector3D(0, 1, 0));
const math::Matrix4D& l_orthographicLightMatrix_r = m_light_p->getShadowInformation().getProjectionMatrix();
math::Matrix4D lightSpaceMatrix = l_orthographicLightMatrix_r * l_lightViewMatrix;
m_depthMapShader_p->bind();
m_depthMapShader_p->setUniformMat4("lightSpaceMatrix", lightSpaceMatrix);
renderNodes();
m_depthMapShader_p->printShaderInfoLog();
m_depthMapShader_p->unbind();
m_frameBufferObject.unbind();
I have tested that the view matrix and projection matrix generation delivers exactly the same results as GLM (math library for opengl). However, my orthographic matrix is defined by:
left = -10.0f
right = 10.0f
bottom = -10.0f
top = 10.0f
near = -1.0f
far = 7.5f
The initialization of the framebuffer object and the texture is as follows:
// - Create depth texture
glGenTextures(1, &m_shadowTextureBuffer_u32);
glBindTexture(GL_TEXTURE_2D, m_shadowTextureBuffer_u32);
glTexImage2D(GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT, SHADOW_MAP_WIDTH_u32, SHADOW_MAP_HEIGHT_u32, 0, GL_DEPTH_COMPONENT, GL_FLOAT, NULL);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
m_frameBufferObject.bind();
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D, m_shadowTextureBuffer_u32, 0);
glDrawBuffer(GL_NONE);
glReadBuffer(GL_NONE);
if (glCheckFramebufferStatus(GL_FRAMEBUFFER) != GL_FRAMEBUFFER_COMPLETE)
{
fprintf(stderr, "Error on building shadow framebuffer\n");
exit(EXIT_FAILURE);
}
m_frameBufferObject.unbind();
The fragment and the vertex shader looks like below.
#version 430
// Fragment shader for rendering the depth values to a texture.
out vec4 gl_FragColor;
void main()
{
gl_FragColor = vec4 (gl_FragCoord.z);
}
#version 430
// Vertex shader for rendering the depth values to a texture.
in layout (location = 0) vec3 position;
in layout (location = 1) vec4 color;
in layout (location = 2) vec3 normal;
in layout (location = 3) vec2 uv;
in layout (location = 4) vec3 tangent;
in layout (location = 5) int materialId;
uniform mat4 pr_matrix;
uniform mat4 vw_matrix;
uniform mat4 ml_matrix;
uniform mat4 lightSpaceMatrix;
void main()
{
gl_Position = lightSpaceMatrix * ml_matrix * vec4(position, 1.0);
}
EDIT:
After some sleep, I have found a little error in my renderer and the shader draws a "nice" depth map.
However, it looks like that the texture mapping (depth comparison) is in the same coordinate system.
But the second rendering step is still not correct:
The vertex and the fragment shader for the second render pass looks like
#version 430
in layout (location = 0) vec3 position;
in layout (location = 1) vec4 color;
in layout (location = 2) vec3 normal;
in layout (location = 3) vec2 uv;
in layout (location = 4) vec3 tangent;
in layout (location = 5) int materialId;
uniform mat4 pr_matrix = mat4(1.0);
uniform mat4 vw_matrix = mat4(1.0);
uniform mat4 ml_matrix = mat4(1.0);
uniform mat4 lightSpaceMatrix = mat4(1.0);
out VS_OUT
{
vec4 color;
vec2 texture_coordinates;
vec3 normal;
vec3 tangent;
vec3 binormal;
vec3 worldPos;
vec4 shadowProj;
flat int materialIdOut;
} vs_out;
void main()
{
vs_out.color = color;
vs_out.texture_coordinates = uv;
mat3 normalMatrix = transpose ( inverse ( mat3 ( ml_matrix )));
vs_out.normal = normalize ( normalMatrix * normalize ( normal ));
vs_out.tangent = normalize ( normalMatrix * normalize ( tangent ));
vs_out.binormal = normalize ( normalMatrix * normalize ( cross (normal , tangent )));
vs_out.worldPos = ( ml_matrix * vec4 ( position, 1)).xyz;
vs_out.materialIdOut = materialId;
vs_out.shadowProj = ( lightSpaceMatrix * ml_matrix * vec4 (position, 1.0) );
gl_Position = ( pr_matrix * vw_matrix * ml_matrix ) * vec4 (position, 1.0);
}
and
#version 430
#define MAX_NUM_TEXTURES 5
#define MAX_NUM_MATERIALS 12
struct SMaterial
{
vec3 m_ambient_v3;
vec3 m_diffuse_v3;
vec3 m_specular_v3;
float m_shininess_f32;
int m_textureIds[MAX_NUM_TEXTURES];
};
in VS_OUT
{
vec4 color;
vec2 texture_coordinates;
vec3 normal;
vec3 tangent;
vec3 binormal;
vec3 worldPos;
vec4 shadowProj;
flat int materialIdOut;
} fs_in;
uniform vec3 cameraPos;
uniform mat4 ml_matrix;
uniform mat4 vw_matrix;
uniform sampler2D texSlots[32];
uniform SMaterial material[MAX_NUM_MATERIALS];
uniform SLight light;
out vec4 gl_FragColor;
float shadowCalculation(vec4 fragPosLightSpace)
{
// perform perspective divide
vec3 projCoords = fragPosLightSpace.xyz / fragPosLightSpace.w;
// Transform to [0,1] range
projCoords = projCoords * vec3(0.5) + vec3(0.5);
// Get closest depth value from light's perspective (using [0,1] range fragPosLight as coords)
float closestDepth = texture(texSlots[31], projCoords.xy).r;
// Get depth of current fragment from light's perspective
float currentDepth = projCoords.z;
// Check whether current frag pos is in shadow
float shadow = currentDepth > closestDepth ? 1.0 : 0.0;
return shadow;
}
void main()
{
if ( (fs_in.materialIdOut >= 0) && (fs_in.materialIdOut < MAX_NUM_MATERIALS) )
{
int ambientTextureId = material[fs_in.materialIdOut].m_textureIds[0];
int diffuseTextureId = material[fs_in.materialIdOut].m_textureIds[1];
int specularTextureId = material[fs_in.materialIdOut].m_textureIds[2];
int alphaTextureId = material[fs_in.materialIdOut].m_textureIds[3];
int bumpTextureId = material[fs_in.materialIdOut].m_textureIds[4];
vec3 diffTexColor = vec3(0.6,0.6,0.6);
if ((diffuseTextureId >= 0) && (32 > diffuseTextureId))
{
diffTexColor = texture (texSlots[diffuseTextureId], fs_in.texture_coordinates).rgb;
}
// Calculate shadow
float shadow = 1.0 - shadowCalculation(fs_in.shadowProj);
gl_FragColor = vec4(diffTexColor, 1.0) * vec4(shadow, shadow, shadow, 1.0);
}
else
{
gl_FragColor = vec4(fs_in.normal,1.0);
}
}
In my experience a depth map is pretty much always completely white, because a distance of more than 1 away from the light already makes that pixel white. If your whole scene is further than 1 unit then the whole map is white.
To render the map like they show in the tutorial you either need your scene to be really small or to perform an operation on your depth map. I always like to check my maps by dividing their depth values by the camera's zFar distance. Try to find the best value at which you can see contrast.

GLSL How to show normals with Geometry shader?

I have vertex shader
#version 330 core
layout(location = 0) in vec3 VertexPosition;
layout(location = 1) in vec2 VertexUV;
layout(location = 2) in vec3 VertexNormal;
out VS_GS_VERTEX
{
vec2 UV;
vec3 vs_worldpos;
vec3 vs_normal;
} vertex_out;
uniform mat4 proj_matrix;
uniform mat4 model_matrix;
void main(void)
{
gl_Normal = VertexNormal;
gl_Position = proj_matrix * vec4(VertexPosition, 1.0);
vertex_out.UV = VertexUV; //VertexPosition.xy;
vertex_out.vs_worldpos = gl_Position.xyz;
vertex_out.vs_normal = mat3(model_matrix) * gl_Normal;
}
and fragment shader
#version 330 core
in GS_FS_VERTEX
{
vec2 UV;
vec3 vs_worldpos;
vec3 vs_normal;
} vertex_in;
// Values that stay constant for the whole mesh.
uniform sampler2D sampler0;
uniform sampler2D sampler1;
uniform sampler2D sampler2;
uniform sampler2D sampler3;
//uniform sampler2D alphamap0;
uniform sampler2D alphamap1;
uniform sampler2D alphamap2;
uniform sampler2D alphamap3;
uniform int tex_count;
uniform vec4 color_ambient = vec4(0.75, 0.75, 0.75, 1.0);
uniform vec4 color_diffuse = vec4(0.25, 0.25, 0.25, 1.0);
//uniform vec4 color_specular = vec4(1.0, 1.0, 1.0, 1.0);
uniform vec4 color_specular = vec4(0.1, 0.1, 0.1, 0.25);
uniform float shininess = 5.0f;
uniform vec3 light_position = vec3(12.0f, 32.0f, 560.0f);
void main(){
vec3 light_direction = normalize(light_position - vertex_in.vs_worldpos);
vec3 normal = normalize(vertex_in.vs_normal);
vec3 half_vector = normalize(light_direction + normalize(vertex_in.vs_worldpos));
float diffuse = max(0.0, dot(normal, light_direction));
float specular = pow(max(0.0, dot(vertex_in.vs_normal, half_vector)), shininess);
gl_FragColor = texture( sampler0, vertex_in.UV ) * color_ambient + diffuse * color_diffuse + specular * color_specular;
// http://www.opengl.org/wiki/Texture_Combiners
// GL_MODULATE = *
// GL_INTERPOLATE Blend tex0 and tex1 based on a blending factor = mix(texel0, texel1, BlendFactor)
// GL_INTERPOLATE Blend tex0 and tex1 based on alpha of tex0 = mix(texel0, texel1, texel0.a)
// GL_ADD = clamp(texel0 + texel1, 0.0, 1.0)
if (tex_count > 0){
vec4 temp = texture( sampler1, vertex_in.UV );
vec4 amap = texture( alphamap1, vertex_in.UV);
gl_FragColor = mix(gl_FragColor, temp, amap.a);
}
if (tex_count > 1){
vec4 temp = texture( sampler2, vertex_in.UV );
vec4 amap = texture( alphamap2, vertex_in.UV);
gl_FragColor = mix(gl_FragColor, temp, amap.a);
}
if (tex_count > 2){
vec4 temp = texture( sampler3, vertex_in.UV );
vec4 amap = texture( alphamap3, vertex_in.UV);
gl_FragColor = mix(gl_FragColor, temp, amap.a);
}
}
It takes indexed GL_TRIANGLE_STRIP as input
glBindBuffer(GL_ARRAY_BUFFER, tMt.vertex_buf_id[cx, cy]);
glVertexAttribPointer(VERTEX_LAYOUT_POSITION, 3, GL_FLOAT, false, 0, pointer(0));
glEnableVertexAttribArray(0);
{ chunk tex position }
glBindBuffer(GL_ARRAY_BUFFER, chunkTexPositionBO);
glVertexAttribPointer(VERTEX_LAYOUT_TEX_UV, 2, GL_FLOAT, false, 0, pointer(0));
glEnableVertexAttribArray(1);
glBindBuffer(GL_ARRAY_BUFFER, tMt.normal_buf_id[cx, cy]);
glVertexAttribPointer(VERTEX_LAYOUT_NORMAL, 3, GL_FLOAT, true, 0, pointer(0));
glEnableVertexAttribArray(2);
{ index buffer }
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, chunkIndexBO);
for i := 0 to tMt.texCount - 1 do begin
bt := tMt.texture_buf_id[cx, cy][i];
if bt = nil then
break;
glUniform1i(proj_tex_count_loc, i);
glActiveTexture(GL_TEXTURE0 + i);
glBindTexture(GL_TEXTURE_2D, bt.id);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
if i > 0 then begin
// this time, use blending:
glActiveTexture(GL_TEXTURE4 + 1);
glBindTexture(GL_TEXTURE_2D, tMt.alphamaps[cx, cy][i - 1]);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
end;
end;
glDrawElements(GL_TRIANGLE_STRIP, length(chunkIndexArr), GL_UNSIGNED_SHORT, nil);
Code works as intended except I'm not sure is my normals arranged properly: they was stored as bytes (converted to GLfloat as b / FF), coordinates xyz changed and some probably need negation.
Can someone show me geometry shader to show normals as lines as shown at http://blogs.agi.com/insight3d/index.php/2008/10/23/geometry-shader-for-debugging-normals/ (those shader not works at all and it seems out/in data losed between vertex and fragment shader).
P.S. I'm not sure I did everything properly (starting OpenGL and GLSL) so any suggestions also appreciated.
Edit:
I made simple geometry shader by examples
// This is a very simple pass-through geometry shader
#version 330 core
layout (triangles) in;
layout (triangle_strip, max_vertices = 145) out;
in VS_GS_VERTEX
{
vec2 UV;
vec3 vs_worldpos;
vec3 vs_normal;
} vertex_in[];
out GS_FS_VERTEX
{
vec2 UV;
vec3 vs_worldpos;
vec3 vs_normal;
} vertex_out;
uniform float uNormalsLength = 0.5;
void main()
{
int i;
// Loop over the input vertices
for (i = 0; i < gl_in.length(); i++)
{
vertex_out.UV = vertex_in[i].UV;
vertex_out.vs_worldpos = vertex_in[i].vs_worldpos;
vertex_out.vs_normal = vertex_in[i].vs_normal;
// Copy the input position to the output
gl_Position = gl_PositionIn[i];
EmitVertex();
gl_Position = gl_ModelViewProjectionMatrix * (gl_PositionIn[i] + (vec4(vertex_in[i].vs_normal, 0) * uNormalsLength));
gl_FrontColor = vec4(0.0, 0.0, 0.0, 1.0); //gl_FrontColorIn[i];
EmitVertex();
}
// End the primitive. This is not strictly necessary
// and is only here for illustrative purposes.
EndPrimitive();
}
but I don't knwo where it takes gl_ModelViewProjectionMatrix (seems deprecated) and result looks awful, it seems everything including normals stripped. Picture in glPolygonMode(GL_FRONT, GL_LINE) mode, textures also trying to map onto those.
As it seems, you're doing it all in a single pass and you actually emit 6 vertices per incoming triangle. This is not what you want.
Either do it in two passes, i.e. one pass for the mesh, the other for the normals, or try to emit the original triangle and a degenerate triangle for the normal. For simplicity I'd go for the two-pass version:
Inside your render loop:
render terrain
if and only if debug geometry is to be rendered
enable your debug normals shader
render the terrain mesh a second time, passing POINTS to the vertex shader
To make this work, you'll need a second program object that is made up like in the blog post you previously linked to, consisting of a simple pass trough vertex shader, the following geometry shader and a fragment shader for coloring the lines representing the normals.
The vertex and fragment shaders should be no problem. Assuming you have a smoothed mesh, i.e. you have actual, averaged vertex normals, you can simply pass in points and emit lines.
#version 330 core
// assuming you have vertex normals, you need to render a vertex
// only a single time. with any other prim type, you may render
// the same normal multiple times
layout (points) in;
// Geometry shaders can only output points, line strips or triangle
// strips by definition. you output a single line per vertex. therefore,
// the maximum number of vertices per line_strip is 2. This is effectively
// the same as rendering distinct line segments.
layout (line_strip, max_vertices = 2) out;
in vec3 vs_normal[];
uniform float normal_scale = 0.5; // don't forget: this is the default value!
/* if you're never going to change the normal_scale, consider simply putting a
constant there instead:
const float normal_scale = 0.5;
*/
void main()
{
// we simply transform and emit the incoming vertex - this is v0 of our
// line segment
vec4 v0 = gl_in[0].gl_Position;
gl_Position = gl_ModelViewProjectionMatrix * v0;
EmitVertex();
// we calculate v1 of our line segment
vec4 v1 = v0 + vec4(vs_normal[0] * normal_scale, 0);
gl_Position = gl_ModelViewProjectionMatrix * v1;
EmitVertex();
EndPrimitive();
}
Warning: Untested code!
This is probably as simple as it gets. Add a uniform to your fragment shader so you can color your normals as you like or simply export a constant color.
Note: This code still uses gl_ModevelViewProjectionMatrix. If you're writing GL core code, please consider replacing legacy GL constructs, like the matrix stack, with your own stuff!
Note 2: Your geometry shader is not what is usually referred to as a pass through shader. First, you do processing on the incoming data that is more than just assigning incoming values to outgoing values. Second, how can it be a pass-through shader, if you generate geometry? Pass-through means, you don't do anything else than pass incoming values to the next shader stage.

Sampler 2D Alpha Value Stays at 1

I create a texture for use in a 2D sampler as a displacement map for a mesh of tessellated terrain. Using the passed in vertex coordinates, I have a smooth interpolated value of the patch corners for subsequent vertices. When using the height value derived from the sampler, all I receive is a flat plane. When I multiply that value by a hundred, the height of the plane increases by around a hundred leading me to believe the alpha value is constantly one.
Here is the GLSL evaluation shader and the texture setup.
#version 430
layout(triangles, equal_spacing, ccw) in;
uniform mat4 camera;
uniform mat4 model;
uniform sampler2D terrain;
//uniform float lod_factor;
uniform float size;
in vec4 WorldPos_ES_in[];
in vec2 TexCoord_ES_in[];
in vec3 Normal_ES_in[];
out vec4 WorldPos_FS_in;
out vec2 TexCoord_FS_in;
out vec3 Normal_FS_in;
vec3 interpolate3D(vec3, vec3, vec3);
vec2 interpolate2D(vec2, vec2, vec2);
void main()
{
// Interpolate the attributes of the output vertex using the barycentric coordinates
TexCoord_FS_in = interpolate2D(TexCoord_ES_in[0], TexCoord_ES_in[1], TexCoord_ES_in[2]);
Normal_FS_in = interpolate3D(Normal_ES_in[0], Normal_ES_in[1], Normal_ES_in[2]);
Normal_FS_in = normalize(Normal_FS_in);
WorldPos_FS_in = vec4(interpolate3D(WorldPos_ES_in[0].xyz, WorldPos_ES_in[1].xyz, WorldPos_ES_in[2].xyz),1);
vec2 position=WorldPos_FS_in.xz;
float Displacement = texture(terrain, position/size).a;
//gl_Position = camera*model * WorldPos_FS_in;
gl_Position = camera*model * vec4(WorldPos_FS_in.x, Displacement,WorldPos_FS_in.z, 1.0);
}
vec2 interpolate2D(vec2 v0, vec2 v1, vec2 v2)
{
return vec2(gl_TessCoord.x) * v0 + vec2(gl_TessCoord.y) * v1 + vec2(gl_TessCoord.z) * v2;
}
vec3 interpolate3D(vec3 v0, vec3 v1, vec3 v2)
{
return vec3(gl_TessCoord.x) * v0 + vec3(gl_TessCoord.y) * v1 + vec3(gl_TessCoord.z) * v2;
}
and
glActiveTexture(GL_TEXTURE1);
GLuint tex2 = createTerrainMap();
glBindTexture(GL_TEXTURE_2D, tex2);
shader->setUniform("terrain", 1);
static GLuint createTerrainMap(){
GLuint texName;
glGenTextures(1, &texName);
glBindTexture(GL_TEXTURE_2D, texName);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA32F, size+1, size+1, 0, GL_RGBA, GL_FLOAT, terrainM);
return texName;
}
terrainM is declared as
GLfloat terrain[size+1][size+1][4];
As mentioned, using the vertex coordinates yields the correct result but defeats the purpose of the displacement map. In addition, I use another texture from a file as GL_TEXTURE1 which is TexCoord_ES_in and it is for the fragment shader and correctly applied. Any ideas to what is causing the flat plane instead of a displaced value?
Since the TEC does not provide data about the position of the triangle outside of the barycentric coordinates inside of it. Using the interpolated data for the triangle is useless. In my situation where the world coordinates is from 0 to an arbitrary amount, another set of UV coordinates is needed to find the correct height.

Alternate gl_FragColor values from two textures in GLSL

I have two textures, cloud and hill, each with 512 x 512 size, and i intend to create a gl_FragColor output which will obtain the pixel values from the previous textures. In this case, i want to obtain the 1st pixel in gl_FragColor from the 1st pixel in the 1st texture, the 2nd pixel in the gl_FragColor from the 2nd pixel in the 2nd texture, the 3rd pixel in gl_FragColor from the 3rd pixel in the 1st texture an so on. Here is my fragment shader code:
uniform sampler2D tex0;
uniform sampler2D tex1;
void main() {
vec4 cloud = texture2D(tex0, gl_TexCoord[0].st);
vec4 hill = texture2D(tex1, gl_TexCoord[0].st);
for (int i=0;i<512;i++) {
for (int j=0;j<512;j++) {
if ( j%2 == 0)
gl_FragColor = cloud;
else
gl_FragColor = hill;
}
}
}
Here's the texture unit setup:
t1 = loadTexture("pY.raw", 512, 512);
t2 = loadTexture("pZ.raw", 512, 512);
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_2D, t2);
glEnable(GL_TEXTURE_2D);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, t1);
glEnable(GL_TEXTURE_2D);
And here is the uniform setup:
glUseProgram(program);
GLuint t1Location = glGetUniformLocation(program, "tex0");
GLuint t2Location = glGetUniformLocation(program, "tex1");
glUniform1i(t1Location, 0);
glUniform1i(t2Location, 1);
The problem is, the output for the program is only the hill texture, and i don't know how to fix this. Any suggestion?
You don't need to do any iterations in your shader. Pixel shader will be called once for every pixel in your object. Instead use gl_TexCoord[0] to get current texture coordinates. Your code should look something like that:
uniform sampler2D tex0;
uniform sampler2D tex1;
void main()
{
vec4 cloud = texture2D(tex0, gl_TexCoord[0].st);
vec4 hill = texture2D(tex1, gl_TexCoord[0].st);
if ( int(gl_TexCoord[0].x*512)%2 == 0)
gl_FragColor = cloud;
else
gl_FragColor = hill;
}
}
This one should work, even with older opengl:
#ifdef GL_ES
precision highp float;
#endif
uniform sampler2D tex0;
uniform sampler2D tex1;
void main(void)
{
if((gl_FragCoord/32.0- vec4(ivec4(gl_FragCoord/32.0))).x<0.5)
gl_FragColor = texture2D(tex0, gl_FragCoord.xy/512.0);
else
gl_FragColor = texture2D(tex1, gl_FragCoord.xy/512.0);
}
You can try it out with WebGL at: http://www.iquilezles.org/apps/shadertoy/