OpenGL(with GLSL) using glTexBuffer and texelFetch - opengl

Trying to render a rectangle in OpenGL desktop, but the internal Format used in glTexBuffer(...) and respective code for texelFetch(...) is not working for me. I have the correct primitive rendering, need to correct the texture buffer part only. Below is the relevant code snippet
unsigned char texData [16] =
{
255, 0, 0, 0, //Red
0,255,0,255, //Green
0,0,255,255, //Blue
255,0,255,255, //PINK
};
glGenBuffers( 2,texBuffObj);
glBindBuffer( GL_TEXTURE_BUFFER,texBuffObj[0]);
glBufferData( GL_TEXTURE_BUFFER,sizeof(texData),texData,GL_DYNAMIC_DRAW);
glGenTextures(1, &textureID);
glBindTexture(GL_TEXTURE_BUFFER, textureID);
glTexBuffer(GL_TEXTURE_BUFFER,GL_RGBA8UI,texBuffObj[0]);
Fragement Shader Snippet:
#version 330 core
uniform usamplerBuffer samplerTexBuffer;
out vec4 color;
in vec2 vs_texCoord;
in vec3 vert_Color;
void main()
{
int offset = 8; // 0:RED 4:GREEN 8:BLUE 12:PINK
vec4 colBuff;
colBuff = texelFetch(samplerTexBuffer,offset) ;
color = colBuff;
}
The required rendering is such that with :
offset value 0 primitive color : 255, 0, 0, 0, //Red
offset value 4 primitive color : 0, 255, 0, 255, //Green
offset value 8 primitive color : 0, 0, 255, 255, //Blue
offset value 12 primitive color : 255, 0, 255, 255, //PINK
What are the necessary corrections required ?

texelFetch takes texel coordinates, not a byte offset into the buffer. Since your texels are 4 bytes wide, you want to retrieve them using indices 0, 1, 2, 3 rather than 0, 4, 8, 12.

Related

OpenGL textures smaller than 4x4 in non-RGBA format are rendered malformed [duplicate]

I have a very simple program that maps a dummy red texture to a quad.
Here is the texture definition in C++:
struct DummyRGB8Texture2d
{
uint8_t data[3*4];
int width;
int height;
};
DummyRGB8Texture2d myTexture
{
{
255,0,0,
255,0,0,
255,0,0,
255,0,0
},
2u,
2u
};
This is how I setup the texture:
void SetupTexture()
{
// allocate a texture on the default texture unit (GL_TEXTURE0):
GL_CHECK(glCreateTextures(GL_TEXTURE_2D, 1, &m_texture));
// allocate texture:
GL_CHECK(glTextureStorage2D(m_texture, 1, GL_RGB8, myTexture.width, myTexture.height));
GL_CHECK(glTextureParameteri(m_texture, GL_TEXTURE_WRAP_S, GL_REPEAT));
GL_CHECK(glTextureParameteri(m_texture, GL_TEXTURE_WRAP_T, GL_REPEAT));
GL_CHECK(glTextureParameteri(m_texture, GL_TEXTURE_MAG_FILTER, GL_NEAREST));
GL_CHECK(glTextureParameteri(m_texture, GL_TEXTURE_MIN_FILTER, GL_NEAREST));
// tell the shader that the sampler2d uniform uses the default texture unit (GL_TEXTURE0)
GL_CHECK(glProgramUniform1i(m_program->Id(), /* location in shader */ 3, /* texture unit index */ 0));
// bind the created texture to the specified target. this is necessary even in dsa
GL_CHECK(glBindTexture(GL_TEXTURE_2D, m_texture));
GL_CHECK(glGenerateMipmap(GL_TEXTURE_2D));
}
This is how I draw the texture to the quad:
void Draw()
{
m_target->ClearTargetBuffers();
m_program->MakeCurrent();
// load the texture to the GPU:
GL_CHECK(glTextureSubImage2D(m_texture, 0, 0, 0, myTexture.width, myTexture.height,
GL_RGB, GL_UNSIGNED_BYTE, myTexture.data));
GL_CHECK(glBindVertexArray(m_vao));
GL_CHECK(glDrawElements(GL_TRIANGLES, static_cast<GLsizei>(VideoQuadElementArray.size()), GL_UNSIGNED_INT, 0));
m_target->SwapTargetBuffers();
}
The Result:
I can't figure out why this texture won't appear Red. Also, if I change the texture internal format to RGBA / RGBA8 and the texture data array to have another element in each row, I get a nice red texture.
In case its relevant, here are my vertex attributes and my (very simple) shaders:
struct VideoQuadVertex
{
glm::vec3 vertex;
glm::vec2 uv;
};
std::array<VideoQuadVertex, 4> VideoQuadInterleavedArray
{
/* vec3 */ VideoQuadVertex{ glm::vec3{ -0.25f, -0.25f, 0.5f }, /* vec2 */ glm::vec2{ 0.0f, 0.0f } },
/* vec3 */ VideoQuadVertex{ glm::vec3{ 0.25f, -0.25f, 0.5f }, /* vec2 */ glm::vec2{ 1.0f, 0.0f } },
/* vec3 */ VideoQuadVertex{ glm::vec3{ 0.25f, 0.25f, 0.5f }, /* vec2 */ glm::vec2{ 1.0f, 1.0f } },
/* vec3 */ VideoQuadVertex{ glm::vec3{ -0.25f, 0.25f, 0.5f }, /* vec2 */ glm::vec2{ 0.0f, 1.0f } }
};
vertex setup:
void SetupVertexData()
{
// create a VAO to hold all node rendering states, no need for binding:
GL_CHECK(glCreateVertexArrays(1, &m_vao));
// create vertex buffer objects for data and indices and initialize them:
GL_CHECK(glCreateBuffers(static_cast<GLsizei>(m_vbo.size()), m_vbo.data()));
// allocate memory for interleaved vertex attributes and transfer them to the GPU:
GL_CHECK(glNamedBufferData(m_vbo[EVbo::Data], VideoQuadInterleavedArray.size() * sizeof(VideoQuadVertex), VideoQuadInterle
GL_CHECK(glVertexArrayAttribBinding(m_vao, 0, 0));
GL_CHECK(glVertexArrayVertexBuffer(m_vao, 0, m_vbo[EVbo::Data], 0, sizeof(VideoQuadVertex)));
// setup the indices array:
GL_CHECK(glNamedBufferData(m_vbo[EVbo::Element], VideoQuadElementArray.size() * sizeof(GLuint), VideoQuadElementArray.data
GL_CHECK(glVertexArrayElementBuffer(m_vao, m_vbo[EVbo::Element]));
// enable the relevant attributes for this VAO and
// specify their format and binding point:
// vertices:
GL_CHECK(glEnableVertexArrayAttrib(m_vao, 0 /* location in shader*/));
GL_CHECK(glVertexArrayAttribFormat(
m_vao,
0, // attribute location
3, // number of components in each data member
GL_FLOAT, // type of each component
GL_FALSE, // should normalize
offsetof(VideoQuadVertex, vertex) // offset from the begining of the buffer
));
// uvs:
GL_CHECK(glEnableVertexArrayAttrib(m_vao, 1 /* location in shader*/));
GL_CHECK(glVertexAttribFormat(
1, // attribute location
2, // number of components in each data member
GL_FLOAT, // type of each component
GL_FALSE, // should normalize
offsetof(VideoQuadVertex, uv) // offset from the begining of the buffer
));
GL_CHECK(glVertexArrayAttribBinding(m_vao, 1, 0));
}
vertex shader:
layout(location = 0) in vec3 position;
layout(location = 1) in vec2 texture_coordinate;
out FragmentData
{
vec2 uv;
} toFragment;
void main(void)
{
toFragment.uv = texture_coordinate;
gl_Position = vec4 (position, 1.0f);
}
fragment shader:
in FragmentData
{
vec2 uv;
} data;
out vec4 color;
layout (location = 3) uniform sampler2D tex_object;
void main()
{
color = texture(tex_object, data.uv);
}
GL_UNPACK_ALIGNMENT specifies the alignment requirements for the start of each pixel row in memory. By default GL_UNPACK_ALIGNMENT is set to 4.
This means each row of the texture is supposed to have a length of 4*N bytes.
You specify a 2*2 texture with the data: 255, 0, 0, 255, 0, 0, 255, 0, 0, 255, 0, 0
With GL_UNPACK_ALIGNMENT set to 4 this is interpreted as
column 1 column 2 alignment
row 1: 255, 0, 0, 255, 0, 0, 255, 0,
row 2: 0, 255, 0, 0, undef, undef
So the texture is read as
column 1 olumn 2
row 1: red, red,
row 2: green, RGB(0, ?, ?)
You have to set glPixelStorei(GL_UNPACK_ALIGNMENT, 1); before glTextureSubImage2D, for reading a tight packed texture.
If you do not want to change GL_UNPACK_ALIGNMENT (the alignment remains set to 4), you must adjust the data as follows:
struct DummyRGB8Texture2d
{
uint8_t data[8*2];
int width;
int height;
};
DummyRGB8Texture2d myTexture
{
{
255, 0, 0, 255, 0, 0, // row 1
0, 0, // 2 bytes alignment
255, 0, 0, 255, 0, 0, // row 2
0, 0 // 2 bytes alignment
},
2u,
2u
};
See further:
Stackoverflow question glPixelStorei(GL_UNPACK_ALIGNMENT, 1) Disadvantages?
Stackoverflow question OpenGL GL_UNPACK_ALIGNMENT
Khronos OpenGL Common Mistakes - Texture upload and pixel reads

Store array of floats in texture & access the floats from the shader using texture coordinates

Edit: Removed alot of clutter and rephrased the question.
I have stored an array of floats into my shader using:
float simpleArray2D[4] = { 10.0f, 20.0f, 30.0f, 400.0f };
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA32F, 2, 2, 0, GL_RGB, GL_FLOAT, &simpleArray2D);
How do I access specific elements from the float array in the shader?
Specific fragment shader code showing what I've done to test it so far, displaying a green color when the value is the specified one (10.0f in this case), and red if it's not.
vec2 textureCoordinates = vec2(0.0f, 0.0f);
float testValueFloat = float(texture(floatArraySampler, textureCoordinates));
outColor = testValueFloat >= 10.0f ? vec4(0,1,0,1) : vec4(1,0,0,1); //Showed green
//outColor = testValueFloat >= 10.1f ? vec4(0,1,0,1) : vec4(1,0,0,1); //Showed red
In GLSL you can use texelFetch to get a texel from a texture by integral coordinates.
This means the texels of the texture can be addressed similar the elements of an array, by its index:
ivec2 ij = ivec2(0, 0);
float testValueFloat = texelFetch(floatArraySampler, ij, 0).r;
But note, the array consists of 4 elements.
float simpleArray2D[4] = { 10.0f, 20.0f, 30.0f, 400.0f };
So the texture can be a 2x2 texture with one color channel (GL_RED)
glTexImage2D(GL_TEXTURE_2D, 0, GL_R32F, 2, 2, 0, GL_RED, GL_FLOAT, &simpleArray2D);
or a 1x1 texture with 4 color channels (GL_RGBA)
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA32F, 1, 1, 0, GL_RGBA, GL_FLOAT, &simpleArray2D);
but it can't be a 2x2 RGBA texture, because for this the array would have to have 16 elements.

Failing to map a simple unsigned byte rgb texture to a quad:

I have a very simple program that maps a dummy red texture to a quad.
Here is the texture definition in C++:
struct DummyRGB8Texture2d
{
uint8_t data[3*4];
int width;
int height;
};
DummyRGB8Texture2d myTexture
{
{
255,0,0,
255,0,0,
255,0,0,
255,0,0
},
2u,
2u
};
This is how I setup the texture:
void SetupTexture()
{
// allocate a texture on the default texture unit (GL_TEXTURE0):
GL_CHECK(glCreateTextures(GL_TEXTURE_2D, 1, &m_texture));
// allocate texture:
GL_CHECK(glTextureStorage2D(m_texture, 1, GL_RGB8, myTexture.width, myTexture.height));
GL_CHECK(glTextureParameteri(m_texture, GL_TEXTURE_WRAP_S, GL_REPEAT));
GL_CHECK(glTextureParameteri(m_texture, GL_TEXTURE_WRAP_T, GL_REPEAT));
GL_CHECK(glTextureParameteri(m_texture, GL_TEXTURE_MAG_FILTER, GL_NEAREST));
GL_CHECK(glTextureParameteri(m_texture, GL_TEXTURE_MIN_FILTER, GL_NEAREST));
// tell the shader that the sampler2d uniform uses the default texture unit (GL_TEXTURE0)
GL_CHECK(glProgramUniform1i(m_program->Id(), /* location in shader */ 3, /* texture unit index */ 0));
// bind the created texture to the specified target. this is necessary even in dsa
GL_CHECK(glBindTexture(GL_TEXTURE_2D, m_texture));
GL_CHECK(glGenerateMipmap(GL_TEXTURE_2D));
}
This is how I draw the texture to the quad:
void Draw()
{
m_target->ClearTargetBuffers();
m_program->MakeCurrent();
// load the texture to the GPU:
GL_CHECK(glTextureSubImage2D(m_texture, 0, 0, 0, myTexture.width, myTexture.height,
GL_RGB, GL_UNSIGNED_BYTE, myTexture.data));
GL_CHECK(glBindVertexArray(m_vao));
GL_CHECK(glDrawElements(GL_TRIANGLES, static_cast<GLsizei>(VideoQuadElementArray.size()), GL_UNSIGNED_INT, 0));
m_target->SwapTargetBuffers();
}
The Result:
I can't figure out why this texture won't appear Red. Also, if I change the texture internal format to RGBA / RGBA8 and the texture data array to have another element in each row, I get a nice red texture.
In case its relevant, here are my vertex attributes and my (very simple) shaders:
struct VideoQuadVertex
{
glm::vec3 vertex;
glm::vec2 uv;
};
std::array<VideoQuadVertex, 4> VideoQuadInterleavedArray
{
/* vec3 */ VideoQuadVertex{ glm::vec3{ -0.25f, -0.25f, 0.5f }, /* vec2 */ glm::vec2{ 0.0f, 0.0f } },
/* vec3 */ VideoQuadVertex{ glm::vec3{ 0.25f, -0.25f, 0.5f }, /* vec2 */ glm::vec2{ 1.0f, 0.0f } },
/* vec3 */ VideoQuadVertex{ glm::vec3{ 0.25f, 0.25f, 0.5f }, /* vec2 */ glm::vec2{ 1.0f, 1.0f } },
/* vec3 */ VideoQuadVertex{ glm::vec3{ -0.25f, 0.25f, 0.5f }, /* vec2 */ glm::vec2{ 0.0f, 1.0f } }
};
vertex setup:
void SetupVertexData()
{
// create a VAO to hold all node rendering states, no need for binding:
GL_CHECK(glCreateVertexArrays(1, &m_vao));
// create vertex buffer objects for data and indices and initialize them:
GL_CHECK(glCreateBuffers(static_cast<GLsizei>(m_vbo.size()), m_vbo.data()));
// allocate memory for interleaved vertex attributes and transfer them to the GPU:
GL_CHECK(glNamedBufferData(m_vbo[EVbo::Data], VideoQuadInterleavedArray.size() * sizeof(VideoQuadVertex), VideoQuadInterle
GL_CHECK(glVertexArrayAttribBinding(m_vao, 0, 0));
GL_CHECK(glVertexArrayVertexBuffer(m_vao, 0, m_vbo[EVbo::Data], 0, sizeof(VideoQuadVertex)));
// setup the indices array:
GL_CHECK(glNamedBufferData(m_vbo[EVbo::Element], VideoQuadElementArray.size() * sizeof(GLuint), VideoQuadElementArray.data
GL_CHECK(glVertexArrayElementBuffer(m_vao, m_vbo[EVbo::Element]));
// enable the relevant attributes for this VAO and
// specify their format and binding point:
// vertices:
GL_CHECK(glEnableVertexArrayAttrib(m_vao, 0 /* location in shader*/));
GL_CHECK(glVertexArrayAttribFormat(
m_vao,
0, // attribute location
3, // number of components in each data member
GL_FLOAT, // type of each component
GL_FALSE, // should normalize
offsetof(VideoQuadVertex, vertex) // offset from the begining of the buffer
));
// uvs:
GL_CHECK(glEnableVertexArrayAttrib(m_vao, 1 /* location in shader*/));
GL_CHECK(glVertexAttribFormat(
1, // attribute location
2, // number of components in each data member
GL_FLOAT, // type of each component
GL_FALSE, // should normalize
offsetof(VideoQuadVertex, uv) // offset from the begining of the buffer
));
GL_CHECK(glVertexArrayAttribBinding(m_vao, 1, 0));
}
vertex shader:
layout(location = 0) in vec3 position;
layout(location = 1) in vec2 texture_coordinate;
out FragmentData
{
vec2 uv;
} toFragment;
void main(void)
{
toFragment.uv = texture_coordinate;
gl_Position = vec4 (position, 1.0f);
}
fragment shader:
in FragmentData
{
vec2 uv;
} data;
out vec4 color;
layout (location = 3) uniform sampler2D tex_object;
void main()
{
color = texture(tex_object, data.uv);
}
GL_UNPACK_ALIGNMENT specifies the alignment requirements for the start of each pixel row in memory. By default GL_UNPACK_ALIGNMENT is set to 4.
This means each row of the texture is supposed to have a length of 4*N bytes.
You specify a 2*2 texture with the data: 255, 0, 0, 255, 0, 0, 255, 0, 0, 255, 0, 0
With GL_UNPACK_ALIGNMENT set to 4 this is interpreted as
column 1 column 2 alignment
row 1: 255, 0, 0, 255, 0, 0, 255, 0,
row 2: 0, 255, 0, 0, undef, undef
So the texture is read as
column 1 olumn 2
row 1: red, red,
row 2: green, RGB(0, ?, ?)
You have to set glPixelStorei(GL_UNPACK_ALIGNMENT, 1); before glTextureSubImage2D, for reading a tight packed texture.
If you do not want to change GL_UNPACK_ALIGNMENT (the alignment remains set to 4), you must adjust the data as follows:
struct DummyRGB8Texture2d
{
uint8_t data[8*2];
int width;
int height;
};
DummyRGB8Texture2d myTexture
{
{
255, 0, 0, 255, 0, 0, // row 1
0, 0, // 2 bytes alignment
255, 0, 0, 255, 0, 0, // row 2
0, 0 // 2 bytes alignment
},
2u,
2u
};
See further:
Stackoverflow question glPixelStorei(GL_UNPACK_ALIGNMENT, 1) Disadvantages?
Stackoverflow question OpenGL GL_UNPACK_ALIGNMENT
Khronos OpenGL Common Mistakes - Texture upload and pixel reads

Can I use three-dimensional compute shader to write to a three-dimensional image

define a three-dimensional Texture and dispatch compute
glTexStorage3D(GL_TEXTURE_3D, 1, GL_RGBA32F, SCREEN_WIDTH, SCREEN_HEIGHT, TEXTURE_DEPTH);
glBindImageTexture(0, m_Texture, 0, GL_FALSE, 0, GL_READ_WRITE, GL_RGBA32F);
glDispatchCompute(16, 16, 2);
compute shader
#version 450
layout(local_size_x = 32,local_size_y = 32, local_size_z = 2) in;
layout(binding = 0, rgba32f) uniform image3D Image;
void main()
{
ivec3 position = ivec3(gl_GlobalInvocationID.xyz);
vec4 color = vec4(gl_WorkGroupID / vec3(gl_NumWorkGroups), 1.0);
imageStore(Image, position, color);
}
but the code doesn't work, I want to konw the value of gl_GlobalInvocationID.z is the depth of space
I had solved this probelm, the parameters of glDispatchCompute() is x,y,z, ifxyzlocal_size_xlocal_size_ylocal_size_z > screensize.xscreensize.y, it cannot work, so I downsample the texture resolution.

Arrange color in fragment shader without texture coordinates depending on fragment position

I need to draw a rectangle in OpenGL ES 2.0 but to arrange color for rectangle in fragment shader. I will draw two triangles to represent the rectangle. This is similar to texture mapping but without the texture coordinates. What would be ideal is to take a specific pixel and calculate its position. On the other side there is an array containing 16 elements of 0s and 1s. The pixel is calculated and it is compared to the arrays element at the same location (this is possible if you take remainder of division with 16 since it will return 0 - 15). If the element in corresponding array's index is 1, that pixel will be colored using a specific color from a fragment shader attribute, otherwise it will not be colored.
The following diagram illustrates the problem:
This is my problem
The array is passed as uniform value to fragment shader and it seems that it is not passed correctly:
The following is the code where uniform value is passed to fragment shader:
void GeometryEngine::drawLineGeometry(QGLShaderProgram *program)
{
glBindBuffer(GL_ARRAY_BUFFER, vboId);
// Bind attribute position in vertex shader to the program
int vertexLocation = program->attributeLocation("position");
program->enableAttributeArray(vertexLocation);
glVertexAttribPointer(vertexLocation, 3, GL_FLOAT, GL_FALSE, 0, 0);
const int array[16] = {1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0};
int arrayLocation = program->attributeLocation("array");
program->setUniformValueArray(arrayLocation, array, 16);
// Draw triangles (6 points)
glDrawArrays(GL_TRIANGLES, 0, 6);
}
Fragment shader:
uniform int array[16];
void main()
{
gl_FragColor = vec4(array[0], 0.0, 0.0, 1.0);
/* int index = int(mod(gl_FragCoord.x, 16.0));
if( array[index] == 1 )
gl_FragColor = vec4(1.0, 0.0, 0.0, 1.0);
else
gl_FragColor = vec4(0.0, 1.0, 0.0, 1.0);
*/
}
Commented lines should create specific color for the fragments, but I cannot test this part because the array is not passed correctly. The rectangle is black, instead of red (array[0] is 1, not 0).