OpenGL texture 2d array won't render with depth > 1 - c++

Below code uploads my texture memory that's described in the passed parameters. When 'vPixelData' only holds 1 item/texture it is rendered properly, but once there's 2 or more nothing shows up.
glTexSubImage3D() is returning 'GL_INVALID_OPERATION' when I call glGetError() after it only when vPixelData.size() is greater than 1.
/*virtual*/ uint32 HyOpenGL::AddTextureArray(uint32 uiNumColorChannels, uint32 uiWidth, uint32 uiHeight, vector<unsigned char *> &vPixelData)
{
GLenum eInternalFormat = uiNumColorChannels == 4 ? GL_RGBA8 : (uiNumColorChannels == 3 ? GL_RGB8 : GL_R8);
GLenum eFormat = uiNumColorChannels == 4 ? GL_RGBA : (uiNumColorChannels == 3 ? GL_RGB : GL_RED);
glTexImage3D(GL_TEXTURE_2D_ARRAY, 0, eInternalFormat, uiWidth, uiHeight, static_cast<uint32>(vPixelData.size()), 0, eFormat, GL_UNSIGNED_BYTE, NULL);
GLuint hGLTextureArray;
glGenTextures(1, &hGLTextureArray);
//glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D_ARRAY, hGLTextureArray);
// Create storage for the texture
glTexStorage3D(GL_TEXTURE_2D_ARRAY,
1, // Number of mipmaps
eInternalFormat, // Internal format
uiWidth, uiHeight, // width, height
static_cast<uint32>(vPixelData.size()));
for(unsigned int i = 0; i != vPixelData.size(); ++i)
{
// Write each texture into storage
glTexSubImage3D(GL_TEXTURE_2D_ARRAY,
0, // Mipmap number
0, 0, i, // xoffset, yoffset, zoffset
uiWidth, uiHeight, 1, // width, height, depth (of texture you're copying in)
eFormat, // format
GL_UNSIGNED_BYTE, // type
vPixelData[i]); // pointer to pixel data
GLenum eError = glGetError(); // Getting 'GL_INVALID_OPERATION' when > 1 texture depth. It's 'GL_NO_ERROR' otherwise
}
glTexParameteri(GL_TEXTURE_2D_ARRAY, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D_ARRAY, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D_ARRAY, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D_ARRAY, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
return hGLTextureArray;
}
(My current passed parameters are uiNumColorChannels == 4)
(uiWidth and uiHeight are both 512)

Apparently everything works if I use:
glTexImage3D(GL_TEXTURE_2D_ARRAY,
0,
eFormat,
uiWidth, uiHeight,
uiNumTextures,
0,
eFormat,
GL_UNSIGNED_BYTE,
NULL);
instead of:
glTexStorage3D(GL_TEXTURE_2D_ARRAY,
1, // Number of mipmaps
eInternalFormat, // Internal format
uiWidth, uiHeight, // width, height
static_cast<uint32>(vPixelData.size()));

Related

Changing internal format causes texture to have incorrect parameters

I set up framebuffer texture on which to draw a scene on as follows:
glGenFramebuffers(1, &renderFBO);
glGenTextures(1, &renderTexture);
glBindTexture(GL_TEXTURE_2D, renderTexture);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA32F, W_WIDTH, W_HEIGHT, 0, GL_RGBA, GL_FLOAT, NULL);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
int width, height;
glGetTexLevelParameteriv(GL_TEXTURE_2D, 0, GL_TEXTURE_WIDTH, &width);
glGetTexLevelParameteriv(GL_TEXTURE_2D, 0, GL_TEXTURE_HEIGHT, &height);
cout << "width: " << width << " height: " << height << endl;
glBindFramebuffer(GL_FRAMEBUFFER, renderFBO);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, renderTexture, 0);
glBindFramebuffer(GL_FRAMEBUFFER, 0);
By validating width and height, they do indeed have the correct dimensions, 1024, 768.
Now I am trying to set up a second texture, to be used as a counter, meaning I am trying to count the occurences of every color value, by incrementing the appropriate texel in the texture. Since I am trying to count the colors, I use a 2D texture of size 256 x 3. In each cell I want a single unsigned integer, showing the number of times a specific color value has appeared up until that point. For instance, if the color (255,5,3) was encountered, I would to increment the numbers inside tex[255][0], tex[5][0], tex[3][0]
num_bins = 256;
GLuint* hist_data = new GLuint[num_bins * color_components]();
glGenTextures(1, &tex_output);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, tex_output);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexImage2D(GL_TEXTURE_2D, 0, GL_R32UI, 256, 3, 0, GL_RED, GL_UNSIGNED_INT, hist_data);
glBindImageTexture(0, tex_output, 0, GL_FALSE, 0, GL_READ_WRITE, GL_R32UI);
int width, height;
glGetTexLevelParameteriv(GL_TEXTURE_2D, 0, GL_TEXTURE_WIDTH, &width);
glGetTexLevelParameteriv(GL_TEXTURE_2D, 0, GL_TEXTURE_HEIGHT, &height);
cout << "width: " << width << " height: " << height << endl;
I used the sized and base internal format correspondence shown in the opengl specification. However when this code executes the output values are width=0, height=0. The input data doesn't seem to be the issue, because I tried providing NULL as the last argument and it still didn't work. What am I missing here?
EDIT:
Incrementation of the values in the texture is supposed to happen inside the following compute shader:
#version 450
layout(local_size_x = 1, local_size_y = 1) in;
layout(rgba32f, binding = 0) uniform image2D img_input;
layout(r32ui, binding = 1) uniform uimage2D img_output;
void main() {
// grabbing pixel value from input image
vec4 pixel_color = imageLoad(img_input, ivec2(gl_GlobalInvocationID.xy));
vec3 rgb = round(pixel_color.rgb * 255);
ivec2 r = ivec2(rgb.r, 0);
ivec2 g = ivec2(rgb.g, 1);
ivec2 b = ivec2(rgb.b, 2);
uint inc = 1;
imageAtomicAdd(img_output, r, inc);
imageAtomicAdd(img_output, g, inc);
imageAtomicAdd(img_output, b, inc);
}
You get a GL_INVALID_OPERATION error, because the texture format argument doesn't correspond with the internal format of the texture. For an unsigned integral format you have to use GL_RED_INTEGER instead of GL_RED (see glTexImage2D):
glTexImage2D(GL_TEXTURE_2D, 0, GL_R32UI, 256, 3, 0, GL_RED, GL_UNSIGNED_INT, hist_data);
glTexImage2D(GL_TEXTURE_2D, 0, GL_R32UI, 256, 3, 0, GL_RED_INTEGER, GL_UNSIGNED_INT, hist_data);
I recommend using Debug Output, it saves a lot of time and lets you find such errors quickly.

Array texture in OpenGL 3.+ with mipmap

I have read this tutorial: Array Texture,
but I don't want to use the glTexStorage3D()(requires OpenGL 4.2) function. First of all, can someone check whether I have implemented this code properly(I'm using glTexImage3D instead of glTexStorage3D):
unsigned int nrTextures = 6;
GLsizei width = 256;
GLsizei height = 256;
GLuint arrayTextureID;
std::vector<unsigned char*> textures(nrTextures);
//textures: Load textures here...
glGenTextures(1, &arrayTextureID);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D_ARRAY, arrayTextureID);
//Gamma to linear color space for each texture.
glTexImage3D(GL_TEXTURE_2D_ARRAY, 0, GL_SRGB, width, height, nrTextures, 0, GL_RGB, GL_UNSIGNED_BYTE, NULL);
for(unsigned int i = 0; i < nrTextures; i++)
glTexSubImage3D(GL_TEXTURE_2D_ARRAY, 0, 0, 0, i, width, height, 1, GL_RGB, GL_UNSIGNED_BYTE, textures[i]);
/*glTexParameteri(GL_TEXTURE_2D_ARRAY, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);*/
glTexParameteri(GL_TEXTURE_2D_ARRAY, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D_ARRAY, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D_ARRAY, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D_ARRAY, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
/*glGenerateMipmap(GL_TEXTURE_2D_ARRAY);*/
glBindTexture(GL_TEXTURE_2D_ARRAY, 0);
How do I implement mipmapping with this implementation?
This mostly looks ok. The only problem I can see is that GL_SRGB is not a valid internal texture format. So the glTexImage3D() call needs to use GL_SRGB8 instead:
glTexImage3D(GL_TEXTURE_2D_ARRAY, 0, GL_SRGB8, width, height, nrTextures, 0,
GL_RGB, GL_UNSIGNED_BYTE, NULL);
For generating mipmaps, you can call:
glGenerateMipmap(GL_TEXTURE_2D_ARRAY);
after you filled the texture with data with the glTexSubImage3D() calls.

C++ Adding a texture to a GL_QUAD and it's coming out black

I have a series of rectangles of different colours and I'm trying to add a texture to one of them. However when I apply the texture to the given rectangle, it just turns black. Below is the function I use to load the texture.
GLuint GLWidget:: LoadTexture(const char * pic, int width, int height){
GLuint Texture;
BYTE * data;
FILE * picfile;
picfile = fopen(pic, "rb");
if (picfile == NULL)
return 0;
data = (BYTE *)malloc(width * height * 3);
fread(data, width * height, 3, picfile);
fclose(picfile);
glGenTextures(1, &Texture);
glBindTexture(GL_TEXTURE_2D, Texture);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri (GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexParameteri (GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri (GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB8, width, height, 0, GL_RGB8, GL_UNSIGNED_BYTE, data);
return Texture;
}
In another function where the GL_QUADS are drawn, I then have...
GLuint myTex = LoadTexture("texture.bmp", 500, 500);
glEnable(GL_TEXTURE_2D);
glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_MODULATE);
glBindTexture(GL_TEXTURE_2D, myTex);
glBegin(GL_QUADS);
glTexCoord2f(1, 1); glVertex3f(42, 10, 42);
glTexCoord2f(1, 0); glVertex3f(42, 10, -42);
glTexCoord2f(0, 0); glVertex3f(-42,10,-42);
glTexCoord2f(0, 1); glVertex3f(-42,10, 42);
glEnd();
If anyone could let me know where I am going wrong that would be great, thanks!
This call
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB8, width, height, 0, GL_RGB8, GL_UNSIGNED_BYTE, data);
is invalid. GL_RGB8 is a valid internalFormat, but it is not a valid enum for format. Use GL_RGB, GL_UNSIGNED_BYTE as format and type if your client-side data is 3 channels with 8 but unsigned int data per channel.
Another thing is
LoadTexture("texture.bmp", 500, 500);
This suggests that you are dealing with BMP files, but your loader only deals with completely raw image data.

glTexSubImage leads to black texture, while glTexImage works fine

When I bind my image using glTexImage2D, it renders fine.
First, the code in the fragment shader:
uniform sampler2D tex;
in vec2 tex_coord;
// main:
fragment_out = mix(
texture(tex, tex_coord),
vec4(tex_coord.x, tex_coord.y, 0.0, 1.0),
0.5
);
Using this code, it draws the image:
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height,
0, GL_RGBA, GL_UNSIGNED_BYTE, image);
But if I bind it via glTexSubImage2D, the texture is drawn just black.
glTexStorage2D(GL_TEXTURE_2D,
1,
GL_RGBA,
width, height);
glTexSubImage2D(GL_TEXTURE_2D,
0,
0, 0,
width, height,
GL_RGBA,
GL_UNSIGNED_BYTE,
image);
When I replace GL_RGBA with GL_RGBA8 or GL_RGBA16 in the second code, the rendering is distorted.
This is the whole texture loading and binding code:
GLuint texture;
glGenTextures(1, &texture);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, texture);
int width, height;
unsigned char *image = SOIL_load_image(Resource::getPath("Images/2hehwdv.jpg").c_str(), &width, &height, 0, SOIL_LOAD_RGBA);
std::cout << SOIL_last_result() << std::endl;
std::cout << width << "x" << height << std::endl;
glTexStorage2D(GL_TEXTURE_2D,
1,
GL_RGBA,
width, height);
glTexSubImage2D(GL_TEXTURE_2D,
0,
0, 0,
width, height,
GL_RGBA,
GL_UNSIGNED_BYTE,
image);
//glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, image);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glGenerateMipmap(GL_TEXTURE_2D);
Can someone explain this behavior to me?
glTexStorage2D( GL_TEXTURE_2D, 1, GL_RGBA, width, height );
^^^^^^^ not sized
GL_RGBA is an unsized format.
glTexStorage2D()'s internalFormat parameter must be passed a sized internal format.
A glGetError() call after glTexStorage2D() would have returned GL_INVALID_ENUM:
GL_INVALID_ENUM is generated if internalformat is not a valid sized internal format.
Try a sized format from Table 1 like GL_RGBA8.

openGL SubTexturing

I have image data and i want to get a sub image of that to use as an opengl texture.
glGenTextures(1, &m_name);
glGetIntegerv(GL_TEXTURE_BINDING_2D, &oldName);
glBindTexture(GL_TEXTURE_2D, m_name);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, m_width, m_height, 0, GL_RGBA, GL_UNSIGNED_BYTE, m_data);
How can i get a sub image of that image loaded as a texture. I think it has something to do with using glTexSubImage2D, but i have no clue how to use it to create a new texture that i can load. Calling:
glTexSubImage2D(GL_TEXTURE_2D, 0, xOffset, yOffset, xWidth, yHeight, GL_RGBA, GL_UNSIGNED_BYTE, m_data);
does nothing that i can see, and calling glCopyTexSubImage2D just takes part of my framebuffer.
Thanks
Edit: Use glPixelStorei. You use it to set GL_UNPACK_ROW_LENGTH to the width (in pixels) of the entire image. Then you call glTexImage2D (or whatever), passing it a pointer to the first pixel of the subimage and the width and height of the subimage.
Don't forget to restore GL_UNPACK_ROW_LENGTH to 0 when you're finished with it.
Ie:
glPixelStorei( GL_UNPACK_ROW_LENGTH, img_width );
char *subimg = (char*)m_data + (sub_x + sub_y*img_width)*4;
glTexImage2D( GL_TEXTURE_2D, 0, GL_RGBA, sub_width, sub_height, 0, GL_RGBA, GL_UNSIGNED_BYTE, subimg );
glPixelStorei( GL_UNPACK_ROW_LENGTH, 0 );
Or, if you're allergic to pointer maths:
glPixelStorei( GL_UNPACK_ROW_LENGTH, img_width );
glPixelStorei( GL_UNPACK_SKIP_PIXELS, sub_x );
glPixelStorei( GL_UNPACK_SKIP_ROWS, sub_y );
glTexImage2D( GL_TEXTURE_2D, 0, GL_RGBA, sub_width, sub_height, 0, GL_RGBA, GL_UNSIGNED_BYTE, m_data );
glPixelStorei( GL_UNPACK_ROW_LENGTH, 0 );
glPixelStorei( GL_UNPACK_SKIP_PIXELS, 0 );
glPixelStorei( GL_UNPACK_SKIP_ROWS, 0 );
Edit2: For the sake of completeness, I should point out that if you're using OpenGL-ES then you don't get GL_UNPACK_ROW_LENGTH. In which case, you could either (a) extract the subimage into a new buffer yourself, or (b)...
glTexImage2D( GL_TEXTURE_2D, 0, GL_RGBA, sub_width, sub_height, 0, GL_RGBA, GL_UNSIGNED_BYTES, NULL );
for( int y = 0; y < sub_height; y++ )
{
char *row = m_data + ((y + sub_y)*img_width + sub_x) * 4;
glTexSubImage2D( GL_TEXTURE_2D, 0, 0, y, sub_width, 1, GL_RGBA, GL_UNSIGNED_BYTE, row );
}
For those stuck with OpenGL ES 1.1/2.0 in 2018 and later, I did some tests with different methods how to update part of texture from image data (image is of same size as texture).
Method 1: Copy whole image with glTexImage2D:
glTexImage2D( GL_TEXTURE_2D, 0, GL_RGBA, mWidth, mHeight, 0, GL_RGBA, GL_UNSIGNED_BYTE, m_Pixels );
Method 2: Copy whole image with glTexSubImage2D:
glTexSubImage2D( GL_TEXTURE_2D, 0, 0, 0, mWidth, mHeight, GL_RGBA, GL_UNSIGNED_BYTE, m_Pixels );
Method 3: Copy image part, line by line in a loop:
auto *ptr = m_Pixels + (x + y * mWidth) * 4;
for( int i = 0; i < h; i++, ptr += mWidth * 4 ) {
glTexSubImage2D( GL_TEXTURE_2D, 0, x, y+i, w, 1, GL_RGBA, GL_UNSIGNED_BYTE, ptr );
}
Method 4: Copy whole width of the image, but vertically copy only part which has changed:
auto *ptr = m_Pixels + (y * mWidth) * 4;
glTexSubImage2D( GL_TEXTURE_2D, 0, 0, y, mWidth, h, GL_RGBA, GL_UNSIGNED_BYTE, ptr );
And here are the results of test done on PC, by 100000 times updating different parts of the texture which were about 1/5th of size of the whole texture.
Method 1 - 38.17 sec
Method 2 - 26.09 sec
Method 3 - 54.83 sec - slowest
Method 4 - 5.93 sec - winner
Not surprisingly, method 4 is fastest, as it copies only part of the image, and does it with a single call to glTex...() function.