OpenGL imageSize is always zero - opengl

I wrote a simple test case to get the height of an image within a compute shader and write it to an SSBO. I've used the SSBO code before, and I know that part works fine. I used apitrace to inspect the state during the glDispatchCompute call, and I can see both the original texture and the image bound to the correct image unit. However, imageSize always returns zero (the output is all zeros, with the exception of some leftover -1s at the end because the division with the workgroup size rounds down). No OpenGL errors are thrown.
I based this test case on one of my earlier questions which included code to bind an SSBO to a compute shader (I use it here to get debug output from the compute shader).
class ComputeShaderWindow : public QOpenGLWindow {
public:
void initializeGL() {
// Create the opengl functions object
gl = context()->versionFunctions<QOpenGLFunctions_4_3_Core>();
m_compute_program = new QOpenGLShaderProgram(this);
auto compute_shader_s = fs::readFile(
"test_assets/example_compute_shader.comp");
QImage img("test_assets/input/out.png");
// Adds the compute shader, then links and binds it
m_compute_program->addShaderFromSourceCode(QOpenGLShader::Compute,
compute_shader_s);
m_compute_program->link();
m_compute_program->bind();
GLuint frame;
// Create the texture
gl->glGenTextures(1, &frame);
// Bind the texture
gl->glBindTexture(GL_TEXTURE_2D, frame);
// Fill the texture with the image
gl->glTexImage2D(GL_TEXTURE_2D,
0,
GL_RGB8,
img.width(),
img.height(),
0,
GL_BGRA,
GL_UNSIGNED_BYTE,
img.bits());
GLuint image_unit = 1;
// Get the location of the image uniform
GLuint uniform_location = gl->glGetUniformLocation(
m_compute_program->programId(),
"video_frame");
// Set location to 0 (a unique value that we choose)
gl->glUniform1i(uniform_location, image_unit);
// Bind layer of texture to image unit
gl->glBindImageTexture(image_unit,
frame,
0,
GL_FALSE,
0,
GL_READ_ONLY,
GL_RGBA8UI);
// We should only need the bit for shader image access,
// but for the purpose of this example, I set all the bits
// just to be safe
gl->glMemoryBarrier(GL_ALL_BARRIER_BITS);
// SSBO stuff to get output from the shader
GLfloat* default_values = new GLfloat[NUM_INVOCATIONS];
std::fill(default_values, default_values + NUM_INVOCATIONS, -1.0);
GLuint ssbo;
gl->glGenBuffers(1, &ssbo);
gl->glBindBuffer(GL_SHADER_STORAGE_BUFFER, ssbo);
gl->glBufferData(GL_SHADER_STORAGE_BUFFER,
NUM_INVOCATIONS * sizeof(float),
&default_values[0],
GL_STATIC_DRAW);
gl->glBindBufferBase(GL_SHADER_STORAGE_BUFFER, 0, ssbo);
gl->glDispatchCompute(NUM_INVOCATIONS / WORKGROUP_SIZE, 1, 1);
gl->glMemoryBarrier(GL_VERTEX_ATTRIB_ARRAY_BARRIER_BIT);
gl->glBindBufferBase(GL_SHADER_STORAGE_BUFFER, 0, ssbo);
// Now read from the buffer so that we can check its values
GLfloat* read_data = (GLfloat*) gl->glMapBuffer(GL_SHADER_STORAGE_BUFFER,
GL_READ_ONLY);
std::vector<GLfloat> buffer_data(NUM_INVOCATIONS);
// Read from buffer
for (int i = 0; i < NUM_INVOCATIONS; i++) {
DEBUG(read_data[i]);
}
DEBUG("Done!");
gl->glUnmapBuffer(GL_SHADER_STORAGE_BUFFER);
assert(gl->glGetError() == GL_NO_ERROR);
}
void resizeGL(int width, int height) {
}
void paintGL() {
}
void teardownGL() {
}
private:
QOpenGLFunctions_4_3_Core* gl;
QOpenGLShaderProgram* m_compute_program;
static constexpr int NUM_INVOCATIONS = 9000;
static constexpr int WORKGROUP_SIZE = 128;
};
As for the compute shader:
#version 430 core
layout(local_size_x = 128) in;
layout(rgba8ui, binding = 1) readonly uniform uimage2D video_frame;
layout(std430, binding = 0) writeonly buffer SSBO {
float data[];
};
void main() {
uint ident = int(gl_GlobalInvocationID);
uint num_workgroups = int(gl_WorkGroupID);
// Write the height of the image into the buffer
data[ident] = float(imageSize(video_frame).y);
}

Turns out I forgot the texture parameters:
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
No clue why that breaks imageSize() calls though.

Related

How to sample a mip level in glsl using textureLod?

How do I go about sampling a mip level in glsl using textureLod()?
From what I know, mipmap LOD can only be "explicitly" accessed through the vertex shader (although not sure if it's supported in version 420, as most of the documentation is outdated). Second, you need to define the mipmap level-of-detail by setting texture parameters, such as GL_TEXTURE_MAX_LEVEL and GL_TEXTURE_BASE_LEVEL.
In my code, I define these texture parameters after calling glCompressedTexImage2D:
glTexParameteri(texture_type, GL_TEXTURE_MIN_FILTER, min_filter);
glTexParameteri(texture_type, GL_TEXTURE_MAG_FILTER, mag_filter);
glTexParameteri(texture_type, GL_TEXTURE_MAX_LEVEL, 9);
glTexParameteri(texture_type, GL_TEXTURE_BASE_LEVEL, 0);
glTexParameteri(texture_type, GL_TEXTURE_MAG_FILTER, GL_LINEAR_MIPMAP_LINEAR);
glTexParameteri(texture_type, GL_TEXTURE_WRAP_S, wrap_s);
glTexParameteri(texture_type, GL_TEXTURE_WRAP_T, wrap_t);
Next, I use this code for each binding each texture sample (types such as albedo map ect):
glActiveTexture(GL_TEXTURE0 + unit); // Set active texture type
glBindTexture(GL_TEXTURE_2D, id); // Bind the texture object
Finally, here is my shader code:
Vertex:
#version 420 core
out vec3 _texcoord;
out vec4 _albedo_lod;
uniform sampler2D albedo; // Albedo and specular map
void main()
{
_texcoord = texcoord;
_albedo_lod = textureLod(albedo, vec2(_texcoord.st), 2.0);
}
With the attaching fragment:
#version 420 core
layout(location = 0) out vec4 gAlbedo; // Albedo texel colour
in vec3 _texcoord;
in vec4 _albedo_lod;
void main()
{
gAlbedo = _albedo_lod; // Assign albedo
}
Now for some reason, no matter what LOD value I input, the result always resorts to this:
Which seems to be the very last mip level (despite what value I input). Bearing in mind I'm packing 10 mip levels as a .dds file. When however I manually set the base mip level via the texture parameter GL_TEXTURE_BASE_LEVEL, it works.
So all in all, Why won't it sample the correct mip level in glsl using textureLod? Is this somewhat deprecated in version 420?
EDIT: Here is the code for loading the dds file:
// This function imports a dds file and returns the dds data as a struct
inline GLuint LoadDds(std::vector<std::string> file, size_t &img_width, size_t &img_height, size_t &num_mips, GLvoid* data, GLint wrap_s, GLint wrap_t, GLint min_filter, GLint mag_filter, size_t texture_type, bool anistropic_filtering)
{
// Create one OpenGL texture
GLuint textureID;
glGenTextures(1, &textureID);
// "Bind" the newly created texture : all future texture functions will modify this texture
glBindTexture(texture_type, textureID);
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
for (unsigned int i = 0; i < file.size(); i++) // For each image...
{
FILE *fp;
unsigned char header[124];
unsigned int height;
unsigned int width;
unsigned int linearSize;
unsigned int mipMapCount;
unsigned int fourCC;
unsigned int components;
unsigned int format;
unsigned int bufsize;
unsigned char* buffer;
/* try to open the file */
errno_t err;
err = fopen_s(&fp, file[i].c_str(), "rb");
if (fp == NULL)
return 0;
/* verify the type of file */
char filecode[4];
fread(filecode, 1, 4, fp);
if (strncmp(filecode, "DDS ", 4) != 0)
{
fclose(fp);
return 0;
}
/* get the surface desc */
fread(&header, 124, 1, fp);
height = *(unsigned int*)&(header[8]);
width = *(unsigned int*)&(header[12]);
linearSize = *(unsigned int*)&(header[16]);
mipMapCount = *(unsigned int*)&(header[24]);
fourCC = *(unsigned int*)&(header[80]);
bufsize = mipMapCount > 1 ? linearSize * 2 : linearSize;
buffer = (unsigned char*)malloc(bufsize * sizeof(unsigned char));
fread(buffer, 1, bufsize, fp);
/* close the file pointer */
fclose(fp);
components = (fourCC == FOURCC_DXT1) ? 3 : 4;
switch (fourCC)
{
case FOURCC_DXT1:
format = GL_COMPRESSED_RGBA_S3TC_DXT1_EXT;
break;
case FOURCC_DXT3:
format = GL_COMPRESSED_RGBA_S3TC_DXT3_EXT;
break;
case FOURCC_DXT5:
format = GL_COMPRESSED_RGBA_S3TC_DXT5_EXT;
break;
default:
free(buffer);
return 0;
}
unsigned int blockSize = (format == GL_COMPRESSED_RGBA_S3TC_DXT1_EXT) ? 8 : 16;
unsigned int offset = 0;
for (unsigned int level = 0; level < mipMapCount && (width || height); ++level)
{
unsigned int size = ((width + 3) / 4) * ((height + 3) / 4) * blockSize;
glCompressedTexImage2D(texture_type != GL_TEXTURE_CUBE_MAP ? GL_TEXTURE_2D : GL_TEXTURE_CUBE_MAP_POSITIVE_X + i, level, format, width, height,
0, size, buffer + offset);
if ((level < 1) && (i < 1)) // Only assign input variable values from first image
{
img_width = width; // Assign texture width
img_height = height; // Assign texture height
data = buffer; // Assign buffer data
num_mips = mipMapCount; // Assign number of mips
}
offset += size;
width /= 2;
height /= 2;
}
if (anistropic_filtering) // If anistropic_filtering is true...
{
GLfloat f_largest; // A contianer for storing the amount of texels in view for anistropic filtering
glGetFloatv(GL_MAX_TEXTURE_MAX_ANISOTROPY_EXT, &f_largest); // Query the amount of texels for calculation
glTexParameterf(texture_type, GL_TEXTURE_MAX_ANISOTROPY_EXT, f_largest); // Apply filter to texture
}
if (!mipMapCount)
glGenerateMipmap(texture_type); // Generate mipmap
free(buffer); // Free buffers from memory
}
// Parameters
glTexParameteri(texture_type, GL_TEXTURE_MIN_FILTER, min_filter);
glTexParameteri(texture_type, GL_TEXTURE_MAG_FILTER, mag_filter);
glTexParameteri(texture_type, GL_GENERATE_MIPMAP, GL_TRUE);
glTexParameteri(texture_type, GL_TEXTURE_MAX_LEVEL, 9);
glTexParameteri(texture_type, GL_TEXTURE_BASE_LEVEL, 0);
glTexParameteri(texture_type, GL_TEXTURE_MAG_FILTER, GL_LINEAR_MIPMAP_LINEAR);
glTexParameteri(texture_type, GL_TEXTURE_WRAP_S, wrap_s);
glTexParameteri(texture_type, GL_TEXTURE_WRAP_T, wrap_t);
// Set additional cubemap parameters
if (texture_type == GL_TEXTURE_CUBE_MAP)
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_R, wrap_s);
return textureID; // Return texture id
}
And here is an image of each mipmap level being generated using NVIDIA's dds plugin:
Since you sample per vertex this seems to be exactly the expected behavior.
You say the mip level parameter has no influence, but from what I can see the difference should only be noticeable once the pixel density goes under the vertex density and values starts averaging out. This might however never happen if you don't store the entire mipchain, since the lowest resolution might still have enough definition (I can't really tell from the screen capture, and I can only guess the model's tesselation).
Since you're generating the mipchain manually though you could easily test out with different flat colors for each level and see if they're indeed properly fetched (and actually if you're unsure about the importer it might be worth it to try it out in the pixel shader as well first).

GLSL Render to Texture not working

I'm trying to do a compute pass where I render to a texture that will be used in a draw pass later on. My initial implementation was based on shader storage buffer objects and was working nicely. But I want to apply a computation method that is going to take advantage of the blend hardware of the GPU so I started porting the SSBO implementation to RTT one. Unfortunately the code has stopped working. Now when I read back the texture it is getting wrong values.
Here is my texture and frame buffer setup code:
glGenFramebuffers(1, &m_fbo);
glBindFramebuffer(GL_FRAMEBUFFER, m_fbo);
// Create render textures
glGenTextures(NUM_TEX_OUTPUTS, m_renderTexs);
m_texSize = square_approximation(m_numVertices);
cout << "Textures size: " << glm::to_string(m_texSize) << endl;
GLenum drawBuffers[NUM_TEX_OUTPUTS];
for (int i = 0 ; i < NUM_TEX_OUTPUTS; ++i)
{
glBindTexture(GL_TEXTURE_2D, m_renderTexs[i]);
// 1st 0: level, 2nd 0: no border, 3rd 0: no initial data
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, m_texSize.x, m_texSize.y, 0, GL_RGBA, GL_FLOAT, 0);
// XXX: do we need this?
// Poor filtering. Needed !
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glBindTexture(GL_TEXTURE_2D, 0);
// 0: level
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0 + i, GL_TEXTURE_2D, m_renderTexs[i], 0);
drawBuffers[i] = GL_COLOR_ATTACHMENT0 + i;
}
glDrawBuffers(NUM_TEX_OUTPUTS, drawBuffers);
if (glCheckFramebufferStatus(GL_FRAMEBUFFER) != GL_FRAMEBUFFER_COMPLETE)
{
cout << "Error when setting frame buffer" << endl;
// throw exception?
}
glBindFramebuffer(GL_FRAMEBUFFER, 0);
And this is the code to start the compute pass:
m_shaderProgram.use();
// setup openGL
glPolygonMode(GL_FRONT_AND_BACK, GL_LINE);
glDisable(GL_CULL_FACE);
glDisable(GL_DEPTH_TEST);
glViewport(0, 0, m_texSize.x, m_texSize.y); // setup viewport (equal to textures size)
// make a single patch have the vertex, the bases and the neighbours
glPatchParameteri(GL_PATCH_VERTICES, m_maxNeighbours + 5);
// Wait all writes to shader storage to finish
glMemoryBarrier(GL_SHADER_STORAGE_BARRIER_BIT);
glUniform1i(m_shaderProgram.getUniformLocation("curvTex"), m_renderTexs[2]);
glUniform2i(m_shaderProgram.getUniformLocation("size"), m_texSize.x, m_texSize.y);
glUniform2f(m_shaderProgram.getUniformLocation("vertexStep"), (umax - umin)/divisoes,
(vmax-vmin)/divisoes);
// Bind buffers
glBindFramebuffer(GL_FRAMEBUFFER, m_fbo);
glBindBuffer(GL_ARRAY_BUFFER, m_vbo);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, m_ibo);
glBindBufferBase(GL_UNIFORM_BUFFER, m_mvp_location, m_mvp_ubo);
// Make textures active
for (int i = 0; i < NUM_TEX_OUTPUTS; ++i)
{
glActiveTexture(GL_TEXTURE0 + i);
glBindTexture(GL_TEXTURE_2D, m_renderTexs[i]);
}
// no need to pass index array 'cause ibo is bound already
glDrawElements(GL_PATCHES, m_numElements, GL_UNSIGNED_INT, 0);
I then read back the textures using the following:
bool readTex(GLuint tex, void *dest)
{
glBindTexture(GL_TEXTURE_2D, tex);
glGetTexImage(GL_TEXTURE_2D, 0, GL_RGBA, GL_FLOAT, dest);
glBindTexture(GL_TEXTURE_2D, 0);
// TODO: check glGetTexImage return values for error
return true;
}
for (int i = 0; i < NUM_TEX_OUTPUTS; ++i)
{
if (m_tensors[i] == NULL) {
m_tensors[i] = new glm::vec4[m_texSize.x*m_texSize.y];
}
memset(m_tensors[i], 0, m_texSize.x*m_texSize.y*sizeof(glm::vec4));
readTex(m_renderTexs[i], m_tensors[i]);
}
Finally, the fragment shader code is:
#version 430
#extension GL_ARB_shader_storage_buffer_object: require
layout(pixel_center_integer) in vec4 gl_FragCoord;
layout(std140, binding=6) buffer EvalBuffer {
vec4 evalDebug[];
};
uniform ivec2 size;
in TEData {
vec4 _a;
vec4 _b;
vec4 _c;
vec4 _d;
vec4 _e;
};
layout(location = 0) out vec4 a;
layout(location = 1) out vec4 b;
layout(location = 2) out vec4 c;
layout(location = 3) out vec4 d;
layout(location = 4) out vec4 e;
void main()
{
a= _a;
b= _b;
c= _c;
d= _d;
e= _e;
evalDebug[gl_PrimitiveID] = gl_FragCoord;
}
The fragment coordinates are correct (each fragment is pointing to a x,y coordinate in the texture), so are all the input values (_a to _e), but I do not see them outputted correctly to the textures when reading back. I also tried accessing the texture in the shader to see if it was only a read-back error, but my debug SSBO returned all zeroes.
Am I missing some setup step?
I've tested both on linux and windows (titan and 540M geforces) and I'm using openGL 4.3.
As derhass pointed out in the comments above, the problem was with the texture format. I assumed that by passing GL_FLOAT as the data type it would use 32bit floats for each of the RGBA channels. It was not so.
As derhass said, the data type parameter here does not change the texture format. I had to change the internalFormat parameter to what I wanted (GL_RGBA32F) so that it would work as expected.
So, after changing glTexImage2D call to:
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA32F, m_texSize.x, m_texSize.y, 0, GL_RGBA, GL_FLOAT, 0);
I was able to correctly render the results to the texture and read it back. :)

COLOR_ATTACHMENT's - How to render to multiple textures as color attachments inside a Framebuffer Object?

I am trying to render to multiple textures as COLOR_ATTACHMENTs without success. All I get from displaying them is a black screen (with a red clear fill) meaning my texture is read but is 'empty'.
My pseudo code is : attach 3 textures to an FBO with texture indexes 1, 2 and 3 and color attachments 0, 1 and 2 respectively. As a test case, I tried to render my scene to the 3 color attachments so they are supposed to hold the same exact data. Then read either of those textures at shader pass 2 (with a 2Dsampler) and display them on a quad.
My original intent for those 2 extra color attachments is to use them as random data buffers using the GPU ping-pong technique. So far I just use them as texture clones for testing purpose.
When trying to read from GL_TEXTURE1 (COLOR_ATTACHMENT0) things go fine but not from the other 2 (black screen).
The code :
// Texture indices - inside a 'myGlut' struct
GLenum skyboxTextureIndex = GL_TEXTURE0;
GLenum colorTextureIndex = GL_TEXTURE1;
unsigned int colorTextureIndexInt = 1;
GLenum depthTexture1Index = GL_TEXTURE2;
unsigned int depthTexture1IndexInt = 2;
GLenum depthTexture2Index = GL_TEXTURE3;
unsigned int depthTexture2IndexInt = 3;
//** Below is inside 'main()' **//
// Create frame buffer
myGlut.frameBuffer = glutils::createFrameBuffer();
// Create texture to hold color buffer
glActiveTexture(myGlut.colorTextureIndex);
glBindTexture(GL_TEXTURE_2D, myGlut.colorTexture);
myGlut.colorTexture = glutils::createTextureAttachment(myGlut.camera -> getRenderResizedWidthPx(), myGlut.camera -> getRenderResizedHeightPx());
glutils::bindTextureAttachment(GL_COLOR_ATTACHMENT0, myGlut.colorTexture);
// Create 1st texture to hold depth buffer wannabe :>
glActiveTexture(myGlut.depthTexture1Index);
glBindTexture(GL_TEXTURE_2D, myGlut.depthTexture1);
myGlut.depthTexture1 = glutils::createTextureAttachment(myGlut.camera -> getRenderResizedWidthPx(), myGlut.camera -> getRenderResizedHeightPx());
glutils::bindTextureAttachment(GL_COLOR_ATTACHMENT1, myGlut.depthTexture1);
// Create 2nd texture to hold depth buffer wannabe :>
glActiveTexture(myGlut.depthTexture2Index);
glBindTexture(GL_TEXTURE_2D, myGlut.depthTexture2);
myGlut.depthTexture2 = glutils::createTextureAttachment(myGlut.camera -> getRenderResizedWidthPx(), myGlut.camera -> getRenderResizedHeightPx());
glutils::bindTextureAttachment(GL_COLOR_ATTACHMENT2, myGlut.depthTexture2);
// Check FBO
if (!glutils::checkFBOStatus()) return 0;
With glutils:: functions
// Clear screen
void glutils::clearScreen (float r, float g, float b, float a) {
glClearColor(r, g, b, a);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
}
// Bind select framebuffer
void glutils::bindFrameBuffer(int frameBuffer, int width, int height) {
glBindFramebuffer(GL_FRAMEBUFFER, frameBuffer);
glViewport(0, 0, width, height);
}
// Create frame buffer
GLuint glutils::createFrameBuffer() {
GLuint frameBuffer;
glGenFramebuffers(1, &frameBuffer);
glBindFramebuffer(GL_FRAMEBUFFER, frameBuffer);
return frameBuffer;
}
// Create a texture attachment
GLuint glutils::createTextureAttachment(int width, int height) {
GLuint texture;
glGenTextures(1, &texture);
glBindTexture(GL_TEXTURE_2D, texture);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, width, height, 0, GL_RGB, GL_UNSIGNED_BYTE, NULL);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
return texture;
}
// Bind a texture attachment to select framebuffer
void glutils::bindTextureAttachment (GLenum colorAttachment, GLuint texture) {
glFramebufferTexture2D(GL_FRAMEBUFFER, colorAttachment, GL_TEXTURE_2D, texture, 0);
}
// Check current frame buffer status
bool glutils::checkFBOStatus () {
if (glCheckFramebufferStatus(GL_FRAMEBUFFER) != GL_FRAMEBUFFER_COMPLETE) {
std::cerr << "##### ERROR : Frambuffer not complete... #####" << std::endl;
return false;
}
else return true;
}
Then the glut display func :
// Clear screen
glutils::clearScreen(1.f, 0.f, 0.f, 1.f);
// Bind to custom framebuffer
glutils::bindFrameBuffer(myGlut.frameBuffer, myGlut.camera -> getScreenWidthPx(), myGlut.camera -> getScreenHeightPx());
// Set draw context
GLuint drawBuffers[2];
if (myGlut.depthTextureSwitch) { drawBuffers[0] = GL_COLOR_ATTACHMENT0;
drawBuffers[1] = GL_COLOR_ATTACHMENT2;
} else { drawBuffers[0] = GL_COLOR_ATTACHMENT0;
drawBuffers[1] = GL_COLOR_ATTACHMENT1;
} glDrawBuffers(2, drawBuffers);
// Use main program and bind uniforms
glUseProgram(myGlut.theProgram);
myGlut.refreshUniformsPass_1();
// Draw quad to sample
glutils::drawQuad();
// Unbind custom framebuffer -> use default (screen)
glutils::unbindCurrentFrameBuffer(myGlut.camera -> getScreenWidthPx(), myGlut.camera -> getScreenHeightPx());
// Use secondary program and bind uniforms
glUseProgram(myGlut.theProgram2);
myGlut.refreshUniformsPass_2();
// Draw quad to apply texture to
glutils::drawQuad();
// Switch
myGlut.depthTextureSwitch = !myGlut.depthTextureSwitch;
// Display & loop
glutSwapBuffers();
glutPostRedisplay();
Relevant uniform bindings -> pass 1
glUniform1i(glGetUniformLocation(myGlut.theProgram, "depthTexture"), !myGlut.depthTextureSwitch ? myGlut.depthTexture2IndexInt : myGlut.depthTexture1IndexInt);
Relevant shader code -> Pass 1
layout (location = 0) out vec4 outputColor;
layout (location = 1) out vec4 outputDepth1;
layout (location = 2) out vec4 outputDepth2;
uniform sampler2D depthTexture;
void main() {
// ...
outputColor = someColor;
outputDepth1 = someColor;
outputDepth2 = someColor;
}
Relevant uniform bindings -> pass 2
glUniform1i(glGetUniformLocation(myGlut.theProgram2, "texFramebuffer"), myGlut.depthTextureSwitch ? myGlut.depthTexture1IndexInt : myGlut.depthTexture2IndexInt);
With relevant shader code -> pass 2
uniform sampler2D texFramebuffer;
out vec4 outputColor;
// ...
void main() {
outputColor = texture(texFramebuffer, vec2(gl_FragCoord.x / screenWidthPx * resRatio, gl_FragCoord.y / screenHeightPx * resRatio));
}
In a nutshell : my GL_TEXTURE0 holds the scene while GL_TEXTURE1 and GL_TEXTURE2 are black. Why ?
I finally found the culprit. Because I am binding the framebuffer inside the looped display() function, I needed to bind texture attachments as well after I bound the FBO. Changing to
// Bind to custom framebuffer
glutils::bindFrameBuffer(myGlut.frameBuffer, myGlut.camera -> getScreenWidthPx(), myGlut.camera -> getScreenHeightPx());
// Bind to select attachments
glutils::bindTextureAttachment(GL_COLOR_ATTACHMENT0, myGlut.colorTexture);
if (!myGlut.depthTextureSwitch) glutils::bindTextureAttachment(GL_COLOR_ATTACHMENT1, myGlut.depthTexture1);
else glutils::bindTextureAttachment(GL_COLOR_ATTACHMENT1, myGlut.depthTexture2);
allowed me to render to all needed color attachments.

OpenGL Texturing, no error but grey

Trying to colour terrain points based on texture colour (currently hard coded to vec2(0.5, 0.5) for test purposes - which should be light blue) but all the points are grey. glGetError returns 0 throughout the whole process. I think I might be doing the render process wrong or have a problem with my shaders(?)
Vertex Shader:
void main(){
gl_Position = gl_ModelViewProjectionMatrix * gl_Vertex;
}
Fragment Shader:
uniform sampler2D myTextureSampler;
void main(){
gl_FragColor = texture2D(myTextureSampler, vec2(0.5, 0.5));
}
Terrain Class:
class Terrain
{
public:
Terrain(GLuint pProgram, char* pHeightmap, char* pTexture){
if(!LoadTerrain(pHeightmap))
{
OutputDebugString("Loading terrain failed.\n");
}
if(!LoadTexture(pTexture))
{
OutputDebugString("Loading terrain texture failed.\n");
}
mProgram = pProgram;
mTextureLocation = glGetUniformLocation(pProgram, "myTextureSampler");
};
~Terrain(){};
void Draw()
{
glEnableClientState(GL_VERTEX_ARRAY); // Uncommenting this causes me to see nothing at all
glBindBuffer(GL_ARRAY_BUFFER, mVBO);
glVertexPointer(3, GL_FLOAT, 0, 0);
glEnable( GL_TEXTURE_2D );
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, mBMP);
glProgramUniform1i(mProgram, mTextureLocation, 0);
GLenum a = glGetError();
glPointSize(5.0f);
glDrawArrays(GL_POINTS, 0, mNumberPoints);
a = glGetError();
glBindBuffer(GL_ARRAY_BUFFER, 0);
glDisable( GL_TEXTURE_2D );
glDisableClientState(GL_VERTEX_ARRAY);
}
private:
GLuint mVBO, mBMP, mUV, mTextureLocation, mProgram;
int mWidth;
int mHeight;
int mNumberPoints;
bool LoadTerrain(char* pFile)
{
/* Definitely no problem here - Vertex data is fine and rendering nice and dandy */
}
// TEXTURES MUST BE POWER OF TWO!!
bool LoadTexture(char *pFile)
{
unsigned char header[54]; // Each BMP file begins by a 54-bytes header
unsigned int dataPos; // Position in the file where the actual data begins
unsigned int width, height;
unsigned int imageSize;
unsigned char * data;
FILE * file = fopen(pFile, "rb");
if(!file)
return false;
if(fread(header, 1, 54, file) != 54)
{
fclose(file);
return false;
}
if ( header[0]!='B' || header[1]!='M' )
{
fclose(file);
return false;
}
// Read ints from the byte array
dataPos = *(int*)&(header[0x0A]);
imageSize = *(int*)&(header[0x22]);
width = *(int*)&(header[0x12]);
height = *(int*)&(header[0x16]);
// Some BMP files are misformatted, guess missing information
if (imageSize==0) imageSize=width*height*3; // 3 : one byte for each Red, Green and Blue component
if (dataPos==0) dataPos=54; // The BMP header is done that way
// Create a buffer
data = new unsigned char [imageSize];
// Read the actual data from the file into the buffer
fread(data,1,imageSize,file);
//Everything is in memory now, the file can be closed
fclose(file);
// Create one OpenGL texture
glGenTextures(1, &mBMP);
// "Bind" the newly created texture : all future texture functions will modify this texture
glBindTexture(GL_TEXTURE_2D, mBMP);
// Give the image to OpenGL
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, width, height, 0, GL_BGR, GL_UNSIGNED_BYTE, data);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexEnvf( GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_MODULATE );
glTexParameterf( GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT );
glTexParameterf( GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT );
delete [] data;
data = 0;
return true;
}
};
Answering own question incase anyone has a similar problem:
I had tested this with multiple images - but it turns out theres a bug in my graphics application of choice; which has been exporting 8-bit Bitmap's even though I explicitally told it to export 24-bit Bitmap's. So basically - reverting back to MS Paint solved my solution. 3 cheers for MS Paint.

How to use GL_TEXTURE_2D_ARRAY in OpenGL 3.2

So I've tried following the docs, however I can't seem to get a texture 2D array to work.
-(GLint)buildTextureArray:(NSArray *)arrayOfImages
{
GLImage *sample = [GLImage imageWithImageName:[arrayOfImages objectAtIndex:0] shouldFlip:NO]; //Creates a sample to examine texture width and height
int width = sample.width, height = sample.height;
GLsizei count = (GLsizei)arrayOfImages.count;
GLuint texture3D;
glGenTextures(1, &texture3D);
glBindTexture(GL_TEXTURE_2D_ARRAY, texture3D);
glPixelStorei(GL_UNPACK_ROW_LENGTH, width);
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glTexParameteri(GL_TEXTURE_2D_ARRAY,GL_TEXTURE_MIN_FILTER,GL_LINEAR_MIPMAP_LINEAR);
glTexParameteri(GL_TEXTURE_2D_ARRAY,GL_TEXTURE_MAG_FILTER,GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D_ARRAY,GL_TEXTURE_WRAP_S,GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D_ARRAY,GL_TEXTURE_WRAP_T,GL_REPEAT);
glTexImage3D(GL_TEXTURE_2D_ARRAY, 0, GL_RGBA8, width, height, count, 0, GL_BGRA, GL_UNSIGNED_INT_8_8_8_8_REV, NULL);
int i = 0;
for (NSString *name in arrayOfImages) //Loops through everything in arrayOfImages
{
GLImage *image = [GLImage imageWithImageName:name shouldFlip:NO]; //My own class that loads an image
glTexSubImage3D(GL_TEXTURE_2D_ARRAY, 0, 0, 0, i, image.width, image.height, 1, GL_RGBA, GL_UNSIGNED_BYTE, image.data);
i++;
}
return texture3D;
}
//Setting Uniform elsewhere
glBindTexture(GL_TEXTURE_2D_ARRAY, textureArray);
glUniform1i(textures, 0);
//Fragment Shader
#version 150
in vec3 texCoords;
uniform sampler2DArray textures;
out vec3 color;
void main()
{
color = texture(textures, texCoords.stp, 0).rgb;
}
I am able to load individual textures with the same texture parameters, but I can't get it to work with the texture 2D array. All I get is a black texture. Why is this happening?
glTexParameteri(GL_TEXTURE_2D_ARRAY,GL_TEXTURE_MIN_FILTER,GL_LINEAR_MIPMAP_LINEAR);
Your texture in fact does not have mipmaps. So stop telling OpenGL that it does.
Also, always set the mipmap range parameters (GL_TEXTURE_BASE_LAYER and GL_TEXTURE_MAX_LAYER) for your texture. Or better yet, use texture storage to allocate your texture's storage, and it will do it for you.
for 2d_array textures 'v' component of texcoords varies from 0-height and 'w' from 0-depth(as it denote layer). Try changing these texcordinates.