How can I use texelFetch() to get elements in a sampler1D texture? - opengl

I stored a 1D Gaussian filter in a sampler1D texture and send it to a fragment shader. When doing the vertical and horizontal blur, I use texelFetch() to get the weights in samlper1D map but didn't work. How is the data stored in the sampler1D ?
in vec2 texCoord;
uniform int BlurFilterWidth;
uniform sampler1D BlurFilterTex;
uniform sampler2D OriginalImageTex;
uniform sampler2D ThresholdImageTex;
uniform sampler2D HorizBlurImageTex;
uniform sampler2D VertBlurImageTex;
layout (location = 0) out vec4 FragColor;
void HorizBlurImage()
{
ivec2 pix = ivec2(gl_FragCoord.xy);
int TexSize = textureSize(BlurFilterTex,0);
vec4 sum = texelFetch(ThresholdImageTex,pix,0) * texelFetch(BlurFilterTex, 0, 0).r;
for( int i = 1; i < BlurFilterWidth; i++){
sum += texelFetchOffset(ThresholdImageTex, pix, 0, ivec2(i,0)) * texelFetch(BlurFilterTex, i*TexSize, 0).r;
sum += texelFetchOffset(ThresholdImageTex, pix, 0, ivec2(-i,0)) * texelFetch(BlurFilterTex, i*TexSize, 0).r;
}
FragColor = sum;
}

In a GL_TEXTURE_1D the elements are organized in a 1 dimensional array. The texture coordinate is just an integer that address the element in the array.
So why do you multiply the index i by TexSize? The texture coordinate is simply i:
texelFetch(BlurFilterTex, i*TexSize, 0).r;
texelFetch(BlurFilterTex, i, 0).r;

Related

GLSL shader to convert pixel colors as per the usecase

In the code mentioned below, I want to accept 2 arguments colors & activeColor. colors array contains the list of allowed colors to be drawn in the image, activeColor is the selected color which is yet to be painted on image. I am using HTML5 canvas to paint on the top of image and WebGLShader to convert pixel colors as per the use-case.
While drawing(painting on canvas) a color my use-case is to change other colors if present to activeColor.
Written a basic logic below but it's not syntactically right. Help needed.
colors: dynamic Float32Array of colors ex: [0,0,0,1, 1,1,1,1] represents black & white color array
activeColor: dynamic Float32Array of color ex: [0,0,0,1] represents black color
uniform sampler2D texture;
varying float colors;
varying float activeColor;
varying vec2 texCoord;
void main() {
vec4 color = texture2D(texture, texCoord);
for (int i = 0; i < colors.length; i+=3){
vec4 c = vec4(colors[i], colors[i+1], colors[i+2], colors[i+3]);
if(color.a > 0 && color.rgb != c) {
color.rgb = vec4(activeColor[0], activeColor[1], activeColor[2], activeColor[3]);
}
}
gl_FragColor = color;
}
The code doesn't make a lot of sense
varying float colors;
varying float activeColor;
Those are type float so they only old 1 value. They are not colors (vec4s) nor are they arrays.
colors.length
There is no such thing someArray.length in GLSL. In GLSL you can't pass variable sized arrays. They must be a fixed size. Similarly you can't pass in arrays as varying.
It's not clear what you're trying to do
Your code appears to be trying to draw activeColor everywhere the image does not contain the colors in colors.
You could do something like this
#define MAX_COLORS 10
uniform sampler2D texture;
uniform vec4 colors[MAX_COLORS];
uniform int numColors;
uniform vec4 activeColor;
varying vec2 texCoord;
void main() {
vec4 color = texture2D(texture, texCoord);
for (int i = 0; i < MAX_COLORS; ++i) {
if (i >= numColors) {
break;
}
vec4 c = colors[i];
if(color.a > 0 && color.rgb != c) {
color.rgb = activeColor.rgb;
break;
}
}
gl_FragColor = color;
}
There's a limit on the number of uniform vec4s you can have. It's between 29 and 4096 through looking at the stats you probably want to stay under 221.
It's more common to pass in arrays of data as textures.
uniform sampler2D texture;
uniform sampler2D colors; // texture holding colors
uniform vec2 colorsSize; // size of texture
uniform int numColors;
uniform vec4 activeColor;
varying vec2 texCoord;
vec4 getColor(int i) {
vec2 pixelCoord = vec2(mod(float(i), colorsSize.x,
floor(float(i) / colorsSize.x));
return texture2D(colors, vec2(pixelCoord + 0.5 / colorsSize));
}
void main() {
vec4 color = texture2D(texture, texCoord);
for (int i = 0; i < MAX_COLORS; ++i) {
if (i >= numColors) {
break;
}
vec4 c = getColor(i);
if(color.a > 0 && color.rgb != c) {
color.rgb = activeColor.rgb;
break;
}
}
gl_FragColor = color;
}
Now you can pass in the colors as a texture.
You might find these tutorials useful.
You might also find this technique semi related to your problem (replacing colors) for which there is a live example here and another explanation of the technique here

OpenGL Deferred Pixelated Lighting

I'm working on a 3-pass deferred lighting system for a voxel game, however I am having problems with pixelated lighting and ambient occlusion.
The first stage renders the color, position and normal of each pixel on the screen into separate textures. This part works correctly:
The second shader calculates an ambient occlusion value for each pixel on the screen and renders that to a texture. This part doesn't work correctly and is pixelated:
Raw occlusion data:
The third shader uses the color, position, normal and occlusion textures to render the game scene onto the screen. The lighting in this stage is also pixelated:
The SSAO (2nd pass) fragment shader comes from the www.LearnOpenGL.com tutorial for Screen Space Ambient Occlusion:
out float FragColor;
layout (binding = 0) uniform sampler2D gPosition; // World space position
layout (binding = 1) uniform sampler2D gNormal; // Normalised normal values
layout (binding = 2) uniform sampler2D texNoise;
uniform vec3 samples[64]; // 64 random precalculated vectors (-0.1 to 0.1 magnitude)
uniform mat4 projection;
float kernelSize = 64;
float radius = 1.5;
in vec2 TexCoords;
const vec2 noiseScale = vec2(1600.0/4.0, 900.0/4.0);
void main()
{
vec4 n = texture(gNormal, TexCoords);
// The alpha value of the normal is used to determine whether to apply SSAO to this pixel
if (int(n.a) > 0)
{
vec3 normal = normalize(n.rgb);
vec3 fragPos = texture(gPosition, TexCoords).xyz;
vec3 randomVec = normalize(texture(texNoise, TexCoords * noiseScale).xyz);
// Some maths. I don't understand this bit, it's from www.learnopengl.com
vec3 tangent = normalize(randomVec - normal * dot(randomVec, normal));
vec3 bitangent = cross(normal, tangent);
mat3 TBN = mat3(tangent, bitangent, normal);
float occlusion = 0.0;
// Test 64 points around the pixel
for (int i = 0; i < kernelSize; i++)
{
vec3 sam = fragPos + TBN * samples[i] * radius;
vec4 offset = projection * vec4(sam, 1.0);
offset.xyz = (offset.xyz / offset.w) * 0.5 + 0.5;
// If the normal's are different, increase the occlusion value
float l = length(normal - texture(gNormal, offset.xy).rgb);
occlusion += l * 0.3;
}
occlusion = 1 - (occlusion / kernelSize);
FragColor = occlusion;
}
}
The lighting and final fragment shader:
out vec4 FragColor;
in vec2 texCoords;
layout (binding = 0) uniform sampler2D gColor; // Colour of each pixel
layout (binding = 1) uniform sampler2D gPosition; // World-space position of each pixel
layout (binding = 2) uniform sampler2D gNormal; // Normalised normal of each pixel
layout (binding = 3) uniform sampler2D gSSAO; // Red channel contains occlusion value of each pixel
// Each of these textures are 300 wide and 2 tall.
// The first row contains light positions. The second row contains light colours.
uniform sampler2D playerLightData; // Directional lights
uniform sampler2D mapLightData; // Spherical lights
uniform float worldBrightness;
// Amount of player and map lights
uniform float playerLights;
uniform float mapLights;
void main()
{
vec4 n = texture(gNormal, texCoords);
// BlockData: a = 4
// ModelData: a = 2
// SkyboxData: a = 0;
// Don't do lighting calculations on the skybox
if (int(n.a) > 0)
{
vec3 Normal = n.rgb;
vec3 FragPos = texture(gPosition, texCoords).rgb;
vec3 Albedo = texture(gColor, texCoords).rgb;
vec3 lighting = Albedo * worldBrightness * texture(gSSAO, texCoords).r;
for (int i = 0; i < playerLights; i++)
{
vec3 pos = texelFetch(playerLightData, ivec2(i, 0), 0).rgb;
vec3 direction = pos - FragPos;
float l = length(direction);
if (l < 40)
{
// Direction of the light to the position
vec3 spotDir = normalize(direction);
// Angle of the cone of the light
float angle = dot(spotDir, -normalize(texelFetch(playerLightData, ivec2(i, 1), 0).rgb));
// Crop the cone
if (angle >= 0.95)
{
float fade = (angle - 0.95) * 40;
lighting += (40.0 - l) / 40.0 * max(dot(Normal, spotDir), 0.0) * Albedo * fade;
}
}
}
for (int i = 0; i < mapLights; i++)
{
// Compare this pixel's position with the light's position
vec3 difference = texelFetch(mapLightData, ivec2(i, 0), 0).rgb - FragPos;
float l = length(difference);
if (l < 7.0)
{
lighting += (7.0 - l) / 7.0 * max(dot(Normal, normalize(difference)), 0.0) * Albedo * texelFetch(mapLightData, ivec2(i, 1), 0).rgb;
}
}
FragColor = vec4(lighting, 1.0);
}
else
{
FragColor = vec4(texture(gColor, texCoords).rgb, 1.0);
}
}
The size of each block face in the game is 1x1 (world space size). I have tried splitting these faces up into smaller triangles, as illustrated below, however there wasn't much visible difference.
How can I increase the resolution of the lighting and SSAO data to reduce these pixelated artifacts? Thank you in advance
Good news! Thanks to some_rand over at the GameDev stack exchange, I was able to fix this by upgrading the resolution of my position buffer from GL_RGBA16F to GL_RGBA32F.
Here is his answer.

OPEN GL - Change Vertex position from a texture color

I have a plane, made from a NURB surface, with many vertex so it can create a curved surface depending on the vertex positions ( control points ).
I bind the plane object with two different textures, one is the color texture to be displayed on the object, the other is an heightMap, ( black and white ), which has to alter de vertex yy positions of the plane depending of the color white in the correspondent texture coordinate.
I know the problem is in my shaders. I do not have many experience with OPENGL.
Here is the shader.vert that I use:
attribute vec3 aVertexPosition;
attribute vec3 aVertexNormal;
attribute vec2 aTextureCoord;
uniform mat4 uMVMatrix;
uniform mat4 uPMatrix;
uniform mat4 uNMatrix;
varying vec2 vTextureCoord;
uniform sampler2D uSampler2;
uniform float heightScale;
void main() {
//change texture coordinates
vec2 texInver=vec2(1.0, -1.0);
vTextureCoord = aTextureCoord*texInver;
//--------------------------
//change vertex position
vec4 filter = texture2D(uSampler2, vTextureCoord);
float offset = filter.r;
vec3 inc = vec3(0.0, offset, 0.0);
gl_Position = uPMatrix * uMVMatrix * vec4(aVertexPosition + inc, 1.0);
//----------------------
}
Since the image is black and white, R = G = B. That is why I only check the filter.r
And my shader.frag is:
#ifdef GL_ES
precision highp float;
#endif
varying vec2 vTextureCoord;
uniform sampler2D uSampler;
void main() {
gl_FragColor = texture2D(uSampler, vTextureCoord);
}
This is the height map ( .jpg ):
The result I get is a plane all incremented by 1 in the yy coordinate.
The result I expect is SOME vertex of the plane to be incremented by a 0-1 value in the yy coordinate.
I was forgetting to change the number of the object's vertexes
This was the problem, after I did that it was solved.

Empty (white) framebuffer - shadow mapping

See EDIT since the first part of the problem is solved.
I am trying to replicate the shadow mapping demo from http://learnopengl.com/#!Advanced-Lighting/Shadows/Shadow-Mapping with my own framework, but interestingly I did not get any shadows. The first significant problem is that my depthmap is not correctly working. I have debugged and double checked each line without success. Maybe another pair of eyes will have more success.
See (top left, 5th row - the image is completely white):
I will write about the second render pass, since it seems that the first one is not working. By the way, the objects are centered at 0, 0, 0. The following code is used for the first render pass:
/// 1. render target is the depth map
glViewport(0, 0, SHADOW_MAP_WIDTH_u32, SHADOW_MAP_HEIGHT_u32);
m_frameBufferObject.bind(); // set the depth map as render target
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
/// place the camera where the light is positioned and render the scene
math::Matrix4D l_lightViewMatrix = math::Matrix4D::lookAt(m_light_p->getPosition(), math::Vector3D(0, 0, 0), math::Vector3D(0, 1, 0));
const math::Matrix4D& l_orthographicLightMatrix_r = m_light_p->getShadowInformation().getProjectionMatrix();
math::Matrix4D lightSpaceMatrix = l_orthographicLightMatrix_r * l_lightViewMatrix;
m_depthMapShader_p->bind();
m_depthMapShader_p->setUniformMat4("lightSpaceMatrix", lightSpaceMatrix);
renderNodes();
m_depthMapShader_p->printShaderInfoLog();
m_depthMapShader_p->unbind();
m_frameBufferObject.unbind();
I have tested that the view matrix and projection matrix generation delivers exactly the same results as GLM (math library for opengl). However, my orthographic matrix is defined by:
left = -10.0f
right = 10.0f
bottom = -10.0f
top = 10.0f
near = -1.0f
far = 7.5f
The initialization of the framebuffer object and the texture is as follows:
// - Create depth texture
glGenTextures(1, &m_shadowTextureBuffer_u32);
glBindTexture(GL_TEXTURE_2D, m_shadowTextureBuffer_u32);
glTexImage2D(GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT, SHADOW_MAP_WIDTH_u32, SHADOW_MAP_HEIGHT_u32, 0, GL_DEPTH_COMPONENT, GL_FLOAT, NULL);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
m_frameBufferObject.bind();
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D, m_shadowTextureBuffer_u32, 0);
glDrawBuffer(GL_NONE);
glReadBuffer(GL_NONE);
if (glCheckFramebufferStatus(GL_FRAMEBUFFER) != GL_FRAMEBUFFER_COMPLETE)
{
fprintf(stderr, "Error on building shadow framebuffer\n");
exit(EXIT_FAILURE);
}
m_frameBufferObject.unbind();
The fragment and the vertex shader looks like below.
#version 430
// Fragment shader for rendering the depth values to a texture.
out vec4 gl_FragColor;
void main()
{
gl_FragColor = vec4 (gl_FragCoord.z);
}
#version 430
// Vertex shader for rendering the depth values to a texture.
in layout (location = 0) vec3 position;
in layout (location = 1) vec4 color;
in layout (location = 2) vec3 normal;
in layout (location = 3) vec2 uv;
in layout (location = 4) vec3 tangent;
in layout (location = 5) int materialId;
uniform mat4 pr_matrix;
uniform mat4 vw_matrix;
uniform mat4 ml_matrix;
uniform mat4 lightSpaceMatrix;
void main()
{
gl_Position = lightSpaceMatrix * ml_matrix * vec4(position, 1.0);
}
EDIT:
After some sleep, I have found a little error in my renderer and the shader draws a "nice" depth map.
However, it looks like that the texture mapping (depth comparison) is in the same coordinate system.
But the second rendering step is still not correct:
The vertex and the fragment shader for the second render pass looks like
#version 430
in layout (location = 0) vec3 position;
in layout (location = 1) vec4 color;
in layout (location = 2) vec3 normal;
in layout (location = 3) vec2 uv;
in layout (location = 4) vec3 tangent;
in layout (location = 5) int materialId;
uniform mat4 pr_matrix = mat4(1.0);
uniform mat4 vw_matrix = mat4(1.0);
uniform mat4 ml_matrix = mat4(1.0);
uniform mat4 lightSpaceMatrix = mat4(1.0);
out VS_OUT
{
vec4 color;
vec2 texture_coordinates;
vec3 normal;
vec3 tangent;
vec3 binormal;
vec3 worldPos;
vec4 shadowProj;
flat int materialIdOut;
} vs_out;
void main()
{
vs_out.color = color;
vs_out.texture_coordinates = uv;
mat3 normalMatrix = transpose ( inverse ( mat3 ( ml_matrix )));
vs_out.normal = normalize ( normalMatrix * normalize ( normal ));
vs_out.tangent = normalize ( normalMatrix * normalize ( tangent ));
vs_out.binormal = normalize ( normalMatrix * normalize ( cross (normal , tangent )));
vs_out.worldPos = ( ml_matrix * vec4 ( position, 1)).xyz;
vs_out.materialIdOut = materialId;
vs_out.shadowProj = ( lightSpaceMatrix * ml_matrix * vec4 (position, 1.0) );
gl_Position = ( pr_matrix * vw_matrix * ml_matrix ) * vec4 (position, 1.0);
}
and
#version 430
#define MAX_NUM_TEXTURES 5
#define MAX_NUM_MATERIALS 12
struct SMaterial
{
vec3 m_ambient_v3;
vec3 m_diffuse_v3;
vec3 m_specular_v3;
float m_shininess_f32;
int m_textureIds[MAX_NUM_TEXTURES];
};
in VS_OUT
{
vec4 color;
vec2 texture_coordinates;
vec3 normal;
vec3 tangent;
vec3 binormal;
vec3 worldPos;
vec4 shadowProj;
flat int materialIdOut;
} fs_in;
uniform vec3 cameraPos;
uniform mat4 ml_matrix;
uniform mat4 vw_matrix;
uniform sampler2D texSlots[32];
uniform SMaterial material[MAX_NUM_MATERIALS];
uniform SLight light;
out vec4 gl_FragColor;
float shadowCalculation(vec4 fragPosLightSpace)
{
// perform perspective divide
vec3 projCoords = fragPosLightSpace.xyz / fragPosLightSpace.w;
// Transform to [0,1] range
projCoords = projCoords * vec3(0.5) + vec3(0.5);
// Get closest depth value from light's perspective (using [0,1] range fragPosLight as coords)
float closestDepth = texture(texSlots[31], projCoords.xy).r;
// Get depth of current fragment from light's perspective
float currentDepth = projCoords.z;
// Check whether current frag pos is in shadow
float shadow = currentDepth > closestDepth ? 1.0 : 0.0;
return shadow;
}
void main()
{
if ( (fs_in.materialIdOut >= 0) && (fs_in.materialIdOut < MAX_NUM_MATERIALS) )
{
int ambientTextureId = material[fs_in.materialIdOut].m_textureIds[0];
int diffuseTextureId = material[fs_in.materialIdOut].m_textureIds[1];
int specularTextureId = material[fs_in.materialIdOut].m_textureIds[2];
int alphaTextureId = material[fs_in.materialIdOut].m_textureIds[3];
int bumpTextureId = material[fs_in.materialIdOut].m_textureIds[4];
vec3 diffTexColor = vec3(0.6,0.6,0.6);
if ((diffuseTextureId >= 0) && (32 > diffuseTextureId))
{
diffTexColor = texture (texSlots[diffuseTextureId], fs_in.texture_coordinates).rgb;
}
// Calculate shadow
float shadow = 1.0 - shadowCalculation(fs_in.shadowProj);
gl_FragColor = vec4(diffTexColor, 1.0) * vec4(shadow, shadow, shadow, 1.0);
}
else
{
gl_FragColor = vec4(fs_in.normal,1.0);
}
}
In my experience a depth map is pretty much always completely white, because a distance of more than 1 away from the light already makes that pixel white. If your whole scene is further than 1 unit then the whole map is white.
To render the map like they show in the tutorial you either need your scene to be really small or to perform an operation on your depth map. I always like to check my maps by dividing their depth values by the camera's zFar distance. Try to find the best value at which you can see contrast.

2d terrain generation using shader

I am trying to render a random terrain using shader scripts
Create function..
tex = new Texture(Gdx.files.internal("ground.png"));
top = new Texture(Gdx.files.internal("top.png"));
ShaderProgram.pedantic = false;
shader = new ShaderProgram(VERT, FRAG);
shader.begin();
shader.setUniformi("u_top", 1);
shader.end();
top.bind(1);
Gdx.gl.glActiveTexture(GL10.GL_TEXTURE0);
batch = new SpriteBatch(1000, shader);
batch.setShader(shader);
Render function
batch.begin();
for (int i = 0; i < 4; i++)
batch.draw(tex, i * 256 * Initiate.getScale(), 0,
256 * Initiate.getScale(), 256 * Initiate.getScale());
batch.end();
Vertex shader
attribute vec4 a_position;
attribute vec4 a_color;
attribute vec2 a_texCoord0;
uniform mat4 u_projTrans;
varying vec4 vColor;
varying vec2 vTexCoord;
void main()
{
vTexCoord = a_texCoord0;
gl_Position = u_projTrans * a_position;
}
Fragment shader
#ifdef GL_ES
#define LOWP lowp
precision mediump float;
#else
#define LOWP
#endif
varying LOWP vec4 vColor;
varying vec2 vTexCoord;
uniform sampler2D u_texture1;
uniform sampler2D u_texture2;
void main()
vec4 texColor ;
//calculate top vertex
float clamp = 0.5 + slope*(sin(vTexCoord0.x*mfrq)/mfrq+sin(vTexCoord0.x*frq)/frq+sin(vTexCoord0.x*nfrq)/nfrq);
//if larger the draw texture
if(vTexCoord0.y > clamp){
texColor = texture2D(u_texture1, vTexCoord1);
}
// else map coordinate for top texture and draw
else{
float tempy = 16.0*(vTexCoord0.y + 0.0625- clamp);
texColor = texture2D(u_texture2, vec2 (vTexCoord1.x,tempy));
}
gl_FragColor = texColor;
Output
So the question is..
How do a add a texture at the top of this terrain?
Is there any simple way of redering such a terrain?
You can do this in fragment shader by calculating two parameters - distance along the terrain surface, and depth perpendicular to surface, this can be done through attributes or calculated in vertex shader, depending on how you generate you terrain, then just sample your texture based on those two coordinates, with tiling along the first coordinate.
Distance along the surface is just a sum lengths of edges that form surface.
Depth can be calculated as distance from vertex to surface edge, and interpolation will give you approximate depth in the whole polygon. Quads should give you better interpolation then triangles.