Binding an empty texture to image unit produces segfault - c++

I want to draw to the texture via a compute shader. For this I am trying to bind the texture to an image unit:
glBindImageTexture(0, texture, 0, GL_FALSE, 0, GL_WRITE_ONLY, GL_RGBA32F);
But when I run the code it produces a segfault.
This is how I create the empty texture:
unsigned int texture;
glGenTextures(1, &texture);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, texture);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_BORDER);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_BORDER);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA32F, width, height, 0, GL_RGBA, GL_FLOAT, nullptr);
glBindTexture(GL_TEXTURE_2D, 0);
I used the following tutorial for compute shaders: https://antongerdelan.net/opengl/compute.html

According to https://www.khronos.org/registry/OpenGL-Refpages/gl4/html/glBindImageTexture.xhtml glBindImageTextureis only available in GL versions greater or equal to 4.2. After updating the GL version of my project everyone works fine now.

Related

Passing array of single precision floats to GLSL shader using glBindImageTexture

I want to pass a 2D array of single precision floats to a GLSL shader. I am trying to use glBindImageTexture for this purpose.
Before the main OpenGL render loop I define the texture and fill it with random values using
glGenTextures(1,#imagetexture);
glBindTexture(GL_TEXTURE_2D,imagetexture);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
setlength(arrayvalues,imwidth*imheight);
for loop:=0 to imwidth*imheight-1 do arrayvalues[loop]:=random;
glTexImage2D(GL_TEXTURE_2D, 0, GL_R32F, imwidth, imheight, 0, GL_R32F, GL_FLOAT, arrayvalues);
setlength(arrayvalues,0);
glBindImageTexture( 0, //unit
imagetexture, //texture
0, //level
false, //layered
0, //layer
GL_READ_WRITE, //access
GL_R32F); //format
The shader code is the bare minimum for testing
#version 430
layout (binding=0,r32f) uniform image2D my_image;
void main()
{
float f = imageLoad( my_image, ivec2(gl_FragCoord.xy) ).r;
//f = 0.5;
gl_FragColor=vec4(f,f,f,1.0);
}
The code all runs, but the output is always black? Any ideas what I have missed here?
The same surrounding code works fine for other shaders that do not rely on glBindImageTexture so I am assuming it is all OK.
If I unremark the f = 0.5 then I get a grey image, so it all seems OK otherwise.
Working code. The issue was me not originally calling glBindTexture before glBIndImageTexture and also using GL_R32F rather than GL_RED.
glGenTextures(1,#imagetexture);
glBindTexture(GL_TEXTURE_2D,imagetexture);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
setlength(arrayvalues,imwidth*imheight);
for loop:=0 to imwidth*imheight-1 do arrayvalues[loop]:=random;
glTexImage2D(GL_TEXTURE_2D, 0, GL_R32F, imwidth, imheight, 0, GL_RED, GL_FLOAT, arrayvalues);
setlength(arrayvalues,0);
glBindImageTexture( 0, //unit
imagetexture, //texture
0, //level
false, //layered
0, //layer
GL_READ_WRITE, //access
GL_R32F); //format
If you use glGenTextures to create a texture name, that texture is considered unused until you first bind it. And I don't mean glBindImageTexture; I mean glBindTexture.
glBindImageTexture does not bind the texture to a texture unit; it binds the texture to an image unit for use as an image. It does not bind it to the context for manipulation purposes. So calls to functions like glTexParameter don't care about what you've bound to image units.
You have to bind the texture with glBindTexture, then establish its information like the size and so forth, and after that, bind it for use as an image.

Why is glCheckFramebufferStatus always 36054 for GL_DEPTH_COMPONENT on OpenGL ES 3.1+?

I render the color (GL_COLOR_ATTACHMENT0) and depth (GL_DEPTH_ATTACHMENT) of my scene into a FBO, which works fine on PC with OpenGL 4+.
But on my Smartphone with OpenGL ES 3.1+ I always get for the depth via glCheckFramebufferStatus(GL_FRAMEBUFFER) the status = 36054.
glGenTextures(1, &m_textCol);
glBindTexture(GL_TEXTURE_2D, m_textCol);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, wth, hgt, 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL);
glBindTexture(GL_TEXTURE_2D, 0);
glGenTextures(1, &m_textDepth);
glBindTexture(GL_TEXTURE_2D, m_textDepth);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexImage2D(GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT, wth, hgt, 0, GL_DEPTH_COMPONENT, GL_FLOAT, NULL);
glBindTexture(GL_TEXTURE_2D, 0);
GLuint framebuffer;
glGenFramebuffers(1, &framebuffer);
glBindFramebuffer(GL_FRAMEBUFFER, framebuffer);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, m_textCol, 0);
GLenum status = glCheckFramebufferStatus(GL_FRAMEBUFFER); always 36053 -> ok
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D, m_textDepth, 0);
status = glCheckFramebufferStatus(GL_FRAMEBUFFER); // always 36054 -> not ok
I tried it also with
glTexImage2D(GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT, wth, hgt, 0, GL_DEPTH_COMPONENT, GL_UNSIGNED_BYTE, NULL);
but no change.
Has maybe someone an idea what I'm doing wrong? Thanks.
Status 36054 is GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT.
If we look at Section 9.4 of the OpenGL ES3.1 specification, it lists the following valid internalFormat values for depth attachments:
DEPTH_COMPONENT16
DEPTH_COMPONENT24
DEPTH_COMPONENT32F
DEPTH24_STENCIL8
DEPTH32F_STENCIL8
Note that the last two also contain a stencil part. You can use these for the depth component and ignore the stencil attachment if you do not need it. Especially DEPTH24_STENCIL8 has a good chance of being widely supported.

Separate stencil with FBO (NVidia)

I'm trying to have a FBO with separate depth and stencil buffers.
I know that NVidia GPU's historically only supported packed depth/stencil.
However I stumbled on the ARB_texture_stencil8 extension and wonder how to use it against a FBO.
This code gives gl error 1159 on glFramebufferTexture2DEXT(GL_FRAMEBUFFER_EXT, GL_STENCIL_ATTACHMENT_EXT, GL_TEXTURE_2D, fboStencilTexture, 0):
static GLuint fboId, rboDepth, rboStencil, TEXTURE_WIDTH, TEXTURE_HEIGHT;
if (!fboColorTexture) {
fboDepthTexture = fboId = rboDepth = rboStencil = TEXTURE_WIDTH = TEXTURE_HEIGHT = 0; // vid restart?
glGenTextures(1, &fboColorTexture);
glBindTexture(GL_TEXTURE_2D, fboColorTexture);
qglTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
qglTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
qglTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
qglTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
}
if (!fboDepthTexture) {
glGenTextures(1, &fboDepthTexture);
glBindTexture(GL_TEXTURE_2D, fboDepthTexture);
qglTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
qglTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
qglTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
qglTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
}
if (!fboStencilTexture) {
glGenTextures(1, &fboStencilTexture);
glBindTexture(GL_TEXTURE_2D, fboStencilTexture);
qglTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
qglTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
qglTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
qglTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
}
GLuint curWidth = r_virtualResolution.GetFloat() * glConfig.vidWidth, curHeight = r_virtualResolution.GetFloat() * glConfig.vidHeight;
if (curWidth != TEXTURE_WIDTH || curHeight != TEXTURE_HEIGHT) {
TEXTURE_WIDTH = curWidth;
TEXTURE_HEIGHT = curHeight;
glBindTexture(GL_TEXTURE_2D, fboColorTexture);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB5_A1, TEXTURE_WIDTH, TEXTURE_HEIGHT, 0, GL_BGRA, GL_UNSIGNED_BYTE, NULL); //NULL means reserve texture memory, but texels are undefined
glBindTexture(GL_TEXTURE_2D, fboDepthTexture);
glTexImage2D(GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT16, TEXTURE_WIDTH, TEXTURE_HEIGHT, 0, GL_DEPTH_COMPONENT, GL_FLOAT, 0);
glBindTexture(GL_TEXTURE_2D, fboStencilTexture);
glTexImage2D(GL_TEXTURE_2D, 0, GL_STENCIL_INDEX8, TEXTURE_WIDTH, TEXTURE_HEIGHT, 0, GL_STENCIL_INDEX, GL_FLOAT, 0);
}
//-------------------------
if (!fboId) {
// create a framebuffer object, you need to delete them when program exits.
glGenFramebuffersEXT(1, &fboId);
glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, fboId);
// attach a texture to FBO color attachement point
glFramebufferTexture2DEXT(GL_FRAMEBUFFER_EXT, GL_COLOR_ATTACHMENT0_EXT, GL_TEXTURE_2D, fboColorTexture, 0);
// attach a renderbuffer to depth attachment point
glFramebufferTexture2DEXT(GL_FRAMEBUFFER_EXT, GL_DEPTH_ATTACHMENT_EXT, GL_TEXTURE_2D, fboDepthTexture, 0);
glFramebufferTexture2DEXT(GL_FRAMEBUFFER_EXT, GL_STENCIL_ATTACHMENT_EXT, GL_TEXTURE_2D, fboStencilTexture, 0);
glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, 0);
}
glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, fboId);
The purpose of ARB_texture_stencil8 is to permit you to use stencil-only formats as textures. That is, reading from them in shaders.
This does not mean that you can separate your stencil and depth buffers. The idea behind stencil-8 textures is that you would generate their data by either copying the stencil portion of a depth/stencil texture, or you would render without a depth buffer entirely.
So you cannot use this extension to guarantee that you can render to separate depth and stencil images. That's still hardware dependent.
Also, if you're going to use new features like ARB_texture_stencil8, you shouldn't be combining them with old EXT_framebuffer_object APIs. So stop using glFramebufferTexture2DEXT and start using glFramebufferTexture.

Is a texture required to bound to be sampled?

I was under the impression if you set your sampler uniforms to the correct texture unit, it doesn't matter if the currently bound texture target is 0 or not. For example,
glActiveTexture(GL_TEXTURE1);
glGenTextures(1, &mytexture);
glBindTexture(GL_TEXTURE_2D, mytexture);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, width, height, 0, GL_RGB, GL_UNSIGNED_BYTE, my_data);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glBindTexture(GL_TEXTURE_2D, 0); // This is the line I'm wondering about
Sometime later when drawing ...
glUniform1i(glGetUniformLocation(program, "mysampler"), 1);
//draw_stuff
Unfortunately, the screen is all black unless I keep GL_TEXTURE_2D bound to mytexture. Is it illegal to sample when GL_TEXTURE_2D is bound to 0???
Exactly, think about GL_TEXTUREN as a slot of several texture target types (GL_TEXTURE_2D, GL_TEXTURE_3D etc). While activating GL_TEXTURE1 and binding a texture to GL_TEXTURE_2D you're telling the driver that 2d texture in slot 1 is going to be set to "mytexture".
Then you need to pass this information to your shader as well:
glUniform1i(glGetUniformLocation(program, "mysampler"), 1);
This simply tells your sampler2D in your shader that it should look for GL_TEXTURE_2D in slot 1. If you unbind the texture it will have nothing to sample from.

OpenGL ES Depth Buffer rendering (iOS simulator VS real device)

When rendering the depth buffer with the iOS simulator Getting the true z value from the depth buffer , all is fine.
But the rendering on real device gives bad result: the depth is rendered with only few values (there is no fade) like when you display an image into a 256 values color range.
Here is the code for the fbo generation:
glGenFramebuffers(1, &sceneFBO);
glGenTextures(2, textures_scene);
glBindTexture(GL_TEXTURE_2D, textures_scene[0]);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, widthResolution, heightResolution, 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL);
glBindTexture(GL_TEXTURE_2D, textures_scene[1]);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexImage2D(GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT, widthResolution, heightResolution, 0, GL_DEPTH_COMPONENT, GL_UNSIGNED_INT, NULL);
//TODO: GL_DEPTH_COMPONENT cannot be GL_UNSIGNED_BYTE ?
glBindFramebuffer(GL_FRAMEBUFFER, sceneFBO);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, textures_scene[0], 0);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D, textures_scene[1], 0);
It seems that the only working value for GL_DEPTH_ATTACHMENT fbo is GL_UNSIGNED_INT. But it is not enough to render the depth...
PS: I don't want to display again the scene into a GL_COLOR_ATTACHMENT0 with the z-values in order to have a correct depth (for performance reasons) nor use OpenGL ES3.0 or the new metal (iPhone4s support).
Any idea?
There is no depth buffer support in ES 1 or 2. See the answer to this question : glReadPixels doesn't read depth buffer values on iOS
Seems, that it's not possible to use GL_UNSIGNED_BYTE as type parameter together with GL_DEPTH_COMPONENT as internalformat, only GL_UNSIGNED_SHORT and GL_UNSIGNED_INT are allowed according to the GL_OES_depth_texture extension spec: https://www.khronos.org/registry/gles/extensions/OES/OES_depth_texture.txt
I finally put the z value in the alpha part of the scene (I don't use transparency for this fbo).
color.w=(gl_postion.z-near)/(far-near)
A little mcgyver but it does the job.