Problem with imageStore in compute shader - opengl

I have a problem with a very simple compute shader that just copies a texture using imageStore.
#define KS 16 // kernel size
layout (local_size_x = KS, local_size_y = KS) in;
layout(location = 0) uniform sampler2D u_inputTex;
layout(location = 1) uniform writeonly image2D u_outImg;
void main()
{
const ivec2 gid = ivec2(gl_WorkGroupID.xy);
const ivec2 tid = ivec2(gl_LocalInvocationID.xy);
const ivec2 pixelPos = ivec2(KS) * gid + tid;
imageStore(u_outImg, pixelPos,
uvec4(255.0 * texelFetch(u_inputTex, pixelPos, 0).rgb, 255u));
}
In the C++ side, I have this:
int w, h;
u32 inTex = -1;
{
int nc;
auto img = stbi_load("imgs/Windmill_NOAA.png", &w, &h, &nc, 3);
if (img) {
glGenTextures(1, &inTex);
glBindTexture(GL_TEXTURE_2D, inTex);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB8, w, h, 0, GL_RGB, GL_UNSIGNED_BYTE, img);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
stbi_image_free(img);
}
else
printf("Error loading img\n");
}
u32 outTex;
{
glGenTextures(1, &outTex);
glBindTexture(GL_TEXTURE_2D, outTex);
glTexStorage2D(GL_TEXTURE_2D, 1, GL_RGBA8UI, w, h);
}
u32 compProg = easyCreateComputeShaderProg("compute", shader_srcs::computeSrc);
glUseProgram(compProg);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, inTex);
glBindImageTexture(0, outTex, 0, GL_FALSE, 0, GL_WRITE_ONLY, GL_RGBA8UI);
glUniform1i(0, 0);
glUniform1i(1, 0);
glDispatchCompute((w+15)/16, (h+15)/16, 1);
glMemoryBarrier(GL_SHADER_IMAGE_ACCESS_BARRIER_BIT); // make sure the output image has been written
u8* img = new u8[w * h * 4];
glBindTexture(GL_TEXTURE_2D, outTex);
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glGetTexImage(GL_TEXTURE_2D, 0, GL_RGBA_INTEGER, GL_UNSIGNED_BYTE, img);
stbi_write_png("imgs/out.png", w, h, 1, img, w*4);
delete[] img;
The input image looks like this:
But this is what I get in the output image:
I simplified the shader further: instead of reading from the input texture, I just write a fixed value:
imageStore(u_outImg, pixelPos,
//uvec4(255.0 * texelFetch(u_inputTex, pixelPos, 0).rgb, 255u));
uvec4(1u));
I have noticed that:
If I write 0 it's all black
If I write 255 it's all white
But if I write something in the middle (100 for example) it's not grey but white as well
I have also tried like this but didn't work either:
imageStore(u_outImg, pixelPos,
vec4(texelFetch(u_inputTex, pixelPos, 0).rgb, 255u));
What I'm doing wrong? My end goal is to make a prostprocessing filter but I couldn't get it to work, so I tried to make it as simple as possible and yet it doesn't work.
Minimal example repo: https://github.com/tuket/stackoverflow_image_store_problem

glTexStorage2D(GL_TEXTURE_2D, 1, GL_RGBA8UI, w, h);
If you want to use an unnormalized unsigned integer image, you must declare it as uimage2D in the sahder. image2D is for floating-point or normalized integer (range [0,1]) only.
glMemoryBarrier(GL_SHADER_IMAGE_ACCESS_BARRIER_BIT);
That's the wrong memory barrier. The barrier is about how you're going to access the resources modified by your shader after the barrier, so the correct one is:
GL_TEXTURE_UPDATE_BARRIER_BIT
which is explained in the reference page as (emphasis mine):
Writes to a texture via glTex(Sub)Image*, glCopyTex(Sub)Image*,
glCompressedTex(Sub)Image*, and reads via glGetTexImage after the
barrier will reflect data written by shaders prior to the barrier.
Additionally, texture writes from these commands issued after the
barrier will not execute until all shader writes initiated prior to
the barrier complete.

Related

OpenGL imageSize is always zero

I wrote a simple test case to get the height of an image within a compute shader and write it to an SSBO. I've used the SSBO code before, and I know that part works fine. I used apitrace to inspect the state during the glDispatchCompute call, and I can see both the original texture and the image bound to the correct image unit. However, imageSize always returns zero (the output is all zeros, with the exception of some leftover -1s at the end because the division with the workgroup size rounds down). No OpenGL errors are thrown.
I based this test case on one of my earlier questions which included code to bind an SSBO to a compute shader (I use it here to get debug output from the compute shader).
class ComputeShaderWindow : public QOpenGLWindow {
public:
void initializeGL() {
// Create the opengl functions object
gl = context()->versionFunctions<QOpenGLFunctions_4_3_Core>();
m_compute_program = new QOpenGLShaderProgram(this);
auto compute_shader_s = fs::readFile(
"test_assets/example_compute_shader.comp");
QImage img("test_assets/input/out.png");
// Adds the compute shader, then links and binds it
m_compute_program->addShaderFromSourceCode(QOpenGLShader::Compute,
compute_shader_s);
m_compute_program->link();
m_compute_program->bind();
GLuint frame;
// Create the texture
gl->glGenTextures(1, &frame);
// Bind the texture
gl->glBindTexture(GL_TEXTURE_2D, frame);
// Fill the texture with the image
gl->glTexImage2D(GL_TEXTURE_2D,
0,
GL_RGB8,
img.width(),
img.height(),
0,
GL_BGRA,
GL_UNSIGNED_BYTE,
img.bits());
GLuint image_unit = 1;
// Get the location of the image uniform
GLuint uniform_location = gl->glGetUniformLocation(
m_compute_program->programId(),
"video_frame");
// Set location to 0 (a unique value that we choose)
gl->glUniform1i(uniform_location, image_unit);
// Bind layer of texture to image unit
gl->glBindImageTexture(image_unit,
frame,
0,
GL_FALSE,
0,
GL_READ_ONLY,
GL_RGBA8UI);
// We should only need the bit for shader image access,
// but for the purpose of this example, I set all the bits
// just to be safe
gl->glMemoryBarrier(GL_ALL_BARRIER_BITS);
// SSBO stuff to get output from the shader
GLfloat* default_values = new GLfloat[NUM_INVOCATIONS];
std::fill(default_values, default_values + NUM_INVOCATIONS, -1.0);
GLuint ssbo;
gl->glGenBuffers(1, &ssbo);
gl->glBindBuffer(GL_SHADER_STORAGE_BUFFER, ssbo);
gl->glBufferData(GL_SHADER_STORAGE_BUFFER,
NUM_INVOCATIONS * sizeof(float),
&default_values[0],
GL_STATIC_DRAW);
gl->glBindBufferBase(GL_SHADER_STORAGE_BUFFER, 0, ssbo);
gl->glDispatchCompute(NUM_INVOCATIONS / WORKGROUP_SIZE, 1, 1);
gl->glMemoryBarrier(GL_VERTEX_ATTRIB_ARRAY_BARRIER_BIT);
gl->glBindBufferBase(GL_SHADER_STORAGE_BUFFER, 0, ssbo);
// Now read from the buffer so that we can check its values
GLfloat* read_data = (GLfloat*) gl->glMapBuffer(GL_SHADER_STORAGE_BUFFER,
GL_READ_ONLY);
std::vector<GLfloat> buffer_data(NUM_INVOCATIONS);
// Read from buffer
for (int i = 0; i < NUM_INVOCATIONS; i++) {
DEBUG(read_data[i]);
}
DEBUG("Done!");
gl->glUnmapBuffer(GL_SHADER_STORAGE_BUFFER);
assert(gl->glGetError() == GL_NO_ERROR);
}
void resizeGL(int width, int height) {
}
void paintGL() {
}
void teardownGL() {
}
private:
QOpenGLFunctions_4_3_Core* gl;
QOpenGLShaderProgram* m_compute_program;
static constexpr int NUM_INVOCATIONS = 9000;
static constexpr int WORKGROUP_SIZE = 128;
};
As for the compute shader:
#version 430 core
layout(local_size_x = 128) in;
layout(rgba8ui, binding = 1) readonly uniform uimage2D video_frame;
layout(std430, binding = 0) writeonly buffer SSBO {
float data[];
};
void main() {
uint ident = int(gl_GlobalInvocationID);
uint num_workgroups = int(gl_WorkGroupID);
// Write the height of the image into the buffer
data[ident] = float(imageSize(video_frame).y);
}
Turns out I forgot the texture parameters:
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
No clue why that breaks imageSize() calls though.

Compute Shader cannot sample Sampler2D

I am trying to implement a new project using Tiled Deferred Shading using glfw, but I cannot access Sampler2Ds for some reason, I've been running this code before on another one of my projects so I don't know if its a glfw issue or some driver issue ( using nvidia 358.50 drivers). Here's the code where depthTexture is a sampler2D:
void main()
{
if(gl_LocalInvocationIndex == 0){
minDepth = 0xFFFFFFFF;
maxDepth = 0;
}
//Use the GlobalInvocation ID as a pixel position since we dont have built in GLSL features
ivec2 pixelPosition = ivec2(gl_GlobalInvocationID.xy);
//ivec2 texCoord = getUVCoordinates(pixelPosition);
vec2 texCoord = vec2(pixelPosition.x / SCREEN_WIDTH, pixelPosition.y / SCREEN_HEIGHT);
float depthFloat = texture(depthTexture, texCoord).z;
//float depthFloat = 0.6f;
//Atomics only works on integers
uint depthInt = uint(depthFloat * 0xFFFFFFFF);
//Calculate the max depth of this work group/tile
atomicMin(minDepth, depthInt);
atomicMax(maxDepth, depthInt);
//A barrier is required at this point since we need all the calculations to be done before we proceed
barrier();
imageStore(finalImage, pixelPosition, vec4(vec3(depthFloat),1.0f));
imageStore(otherImage, pixelPosition, vec4(float(float(minDepth) / float(0xFFFFFFFF))));
barrier();
};
And how I bind the textures, now I can write to finalImage and otherImage just fine but I can't sample the depth texture
glActiveTexture(GL_TEXTURE2);
glBindTexture(GL_TEXTURE_2D, mDepthTexture);
glUniform1i(shader.getUniformLocation("depthTexture"), 2);
glBindImageTexture(0, mFinalTexture, 0, GL_FALSE, 0, GL_WRITE_ONLY, GL_RGBA32F);
glBindImageTexture(1, mOtherTexture, 0, GL_FALSE, 0, GL_WRITE_ONLY, GL_RGBA32F);
And how the depth texture is setup
glGenTextures(1, &mDepthTexture);
glBindTexture(GL_TEXTURE_2D, mDepthTexture);
glTexImage2D(GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT32F, width, height, 0, GL_DEPTH_COMPONENT, GL_FLOAT, NULL);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D, mDepthTexture, 0);
glfw hints:
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 4);
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 3);
glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GL_TRUE);
glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
I've confirmed that the depth texture is valid using gdebugger, so I'm not sure where the error is
EDIT
I've also tried glBindImage for the depth like this:
glBindImageTexture(2, mDepthTexture, 0, GL_FALSE, 0, GL_READ_ONLY, GL_R32F);
and then
....
layout(binding = 0, rgba32f) uniform writeonly image2D finalImage;
layout(binding = 1, rgba32f) uniform writeonly image2D otherImage;
layout(binding = 2, r32f) uniform readonly image2D depthTexture;
....
float depthFloat = imageLoad(depthTexture, texCoord).z;
....
EDIT
Okay.. two things, first there was a silly mistake where the texture sampler should have been .x not .z since it's a single value. I'm not sure why this worked on the other computer, secondly 0xFFFFFFFF seems to not be supported or cause an overflow, because I had to remove one F for the implementation to work, and I have no idea since every example I've come across uses it and is a bit worrying since that means some precision is lost

PBO Indexed Color Texture Rendering with Palette in Fragment Shader not working

I am working on a game with 8bit graphics. I provide a Pixelbuffer (OSXRenderer.pbo)
to my gameloop to fill it up. Then texsubimage it onto a texture (OSXRenderer.ScreenTexture).
The texture is rendered to the screen via a quad.
I got it working without problems with a RGB PBO (size: width*height*3).
But now i want the pbo to be indexed color. So i load a palette into another texture
(OSXRenderer.PaletteTexture) and changed my PBO. (size: width*height).
How i figure it should work is:
PBO gets filled with noise (random uint8 0-63), Screentexture gets texsubimaged,
and when rendering it onto the screen via quad, my fragmentshader replaces all the
RED channel values with the corresponding colors from my palette and i get 8bit noise on the screen.
But i simply can't get it to work. I only get a black screen. If I set my fragcolor to the incoming
screentexture(pbo) data i get red noise. Just as expected.
[EDIT]
I tested the fragment-shaders "color"-variable values. And they are always 0.0 except alpha which is always 1.0
setup:
static uint8 palette[] = {
0x80,0x80,0x80, 0x00,0x00,0xBB, 0x37,0x00,0xBF, 0x84,0x00,0xA6,
0xBB,0x00,0x6A, 0xB7,0x00,0x1E, 0xB3,0x00,0x00, 0x91,0x26,0x00,
0x7B,0x2B,0x00, 0x00,0x3E,0x00, 0x00,0x48,0x0D, 0x00,0x3C,0x22,
0x00,0x2F,0x66, 0x00,0x00,0x00, 0x05,0x05,0x05, 0x05,0x05,0x05,
0xC8,0xC8,0xC8, 0x00,0x59,0xFF, 0x44,0x3C,0xFF, 0xB7,0x33,0xCC,
0xFF,0x33,0xAA, 0xFF,0x37,0x5E, 0xFF,0x37,0x1A, 0xD5,0x4B,0x00,
0xC4,0x62,0x00, 0x3C,0x7B,0x00, 0x1E,0x84,0x15, 0x00,0x95,0x66,
0x00,0x84,0xC4, 0x11,0x11,0x11, 0x09,0x09,0x09, 0x09,0x09,0x09,
0xFF,0xFF,0xFF, 0x00,0x95,0xFF, 0x6F,0x84,0xFF, 0xD5,0x6F,0xFF,
0xFF,0x77,0xCC, 0xFF,0x6F,0x99, 0xFF,0x7B,0x59, 0xFF,0x91,0x5F,
0xFF,0xA2,0x33, 0xA6,0xBF,0x00, 0x51,0xD9,0x6A, 0x4D,0xD5,0xAE,
0x00,0xD9,0xFF, 0x66,0x66,0x66, 0x0D,0x0D,0x0D, 0x0D,0x0D,0x0D,
0xFF,0xFF,0xFF, 0x84,0xBF,0xFF, 0xBB,0xBB,0xFF, 0xD0,0xBB,0xFF,
0xFF,0xBF,0xEA, 0xFF,0xBF,0xCC, 0xFF,0xC4,0xB7, 0xFF,0xCC,0xAE,
0xFF,0xD9,0xA2, 0xCC,0xE1,0x99, 0xAE,0xEE,0xB7, 0xAA,0xF7,0xEE,
0xB3,0xEE,0xFF, 0xDD,0xDD,0xDD, 0x11,0x11,0x11, 0x11,0x11,0x11
};
/* Create the PBO */
glGenBuffers(1, &OSXRenderer.pbo);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, OSXRenderer.pbo);
glBufferData(GL_PIXEL_UNPACK_BUFFER, W*H, NULL, GL_STREAM_DRAW);
/* Create the Screen Texture (400*240 pixel) */
glGenTextures(1, &OSXRenderer.ScreenTexture);
glBindTexture(GL_TEXTURE_2D, OSXRenderer.ScreenTexture);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexImage2D(GL_TEXTURE_2D, 0, GL_R8, W, H, 0,
GL_RED, GL_UNSIGNED_BYTE, OSXRenderer.Pixelbuffer.Data);
/* Create the Palette Texture (64*1 pixel) */
glGenTextures(1, &OSXRenderer.PaletteTexture);
glBindTexture(GL_TEXTURE_2D, OSXRenderer.PaletteTexture);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, 64, 1, 0,
GL_RGB, GL_UNSIGNED_BYTE, &palette);
/* Compile and Link Shaders */
OSXRenderer.Program = OSXCreateProgram();
glUseProgram(OSXRenderer.Program);
/* Get the uniforms for the screen- and the palette-texture */
OSXRenderer.UniformTex = glGetUniformLocation(OSXRenderer.Program, "tex");
OSXRenderer.UniformPal = glGetUniformLocation(OSXRenderer.Program, "pal");
update loop:
/* Rendering Prerequesites */
glUseProgram(OSXRenderer.Program);
glActiveTexture(GL_TEXTURE0);
glEnable(GL_TEXTURE_2D);
glBindTexture(GL_TEXTURE_2D, OSXRenderer.PaletteTexture);
glUniform1i(OSXRenderer.UniformPal, 0);
glActiveTexture(GL_TEXTURE1);
glEnable(GL_TEXTURE_2D);
glBindTexture(GL_TEXTURE_2D, OSXRenderer.ScreenTexture);
glUniform1i(OSXRenderer.UniformTex, 1);
/* Bind the PBO */
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, OSXRenderer.pbo);
glBufferData(GL_PIXEL_UNPACK_BUFFER, W*H, NULL, GL_STREAM_DRAW);
OSXRenderer.Pixelbuffer.Data = glMapBuffer(GL_PIXEL_UNPACK_BUFFER, GL_WRITE_ONLY);
//
//
FillPixelBuffer();
//
//
glUnmapBuffer(GL_PIXEL_UNPACK_BUFFER);
glBindTexture(GL_TEXTURE_2D, OSXRenderer.ScreenTexture);
/* Bind the screentexture again just to be save
and fill it with the PBO data */
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, W, H, GL_RED, GL_UNSIGNED_BYTE, 0);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
/* Render it to the screen */
glBegin(GL_QUADS);
glTexCoord2f(0.0f,1.0f);
glVertex2f(-1.0f,1.0f);
glTexCoord2f(1.0f,1.0f);
glVertex2f(1.0f,1.0f);
glTexCoord2f(1.0f,0.0f);
glVertex2f(1.0f,-1.0f);
glTexCoord2f(0.0f,0.0f);
glVertex2f(-1.0f,-1.0f);
glEnd();
/* glFlush() */
CGLFlushDrawable();
vertexshader:
# version 120
varying vec2 texcoord;
// Simple Passthrough
void main(void)
{
gl_Position = ftransform();
texcoord = gl_MultiTexCoord0.xy;
}
fragmentshader:
# version 120
uniform sampler2D tex;
uniform sampler2D pal;
varying vec2 texcoord;
void main(void)
{
// Get the color values of the screen-texture. I only want the RED channel
vec4 index = texture2D(tex, texcoord);
// Get the color values of the palette texture
// using the screen-texture's RED channel as an index
//[EDIT] First post multiplied index.r with 255 here.
vec4 color = texture2D(pal, vec2(index.r, 0));
// Use it
gl_FragColor = color;
}

OpenGL FreeType: weird texture

After I have initialized the library and loaded the texture I get http://postimg.org/image/4tzkq4uhl.
But when I added this line to the texture code:
std::vector<unsigned char> buffer(w * h, 0);
I get http://postimg.org/image/kqycmumvt.
Why is this happening when I add that specific code, and why does it seems like the letter is multiplied? I have searched examples and tutorials about FreeType and I saw that in some of them they change the buffer array, but I didn't really understand that, so if you can explain that to me, I may handle this better.
Texture Load:
Texture::Texture(FT_GlyphSlot slot) {
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glGenTextures(1, &textureID);
glBindTexture(GL_TEXTURE_2D, textureID);
int w = slot->bitmap.width;
int h = slot->bitmap.rows;
// When I remove this line, the black rectangle below the letter reappears.
std::vector<unsigned char> buffer(w * h, 0);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, slot->bitmap.width, slot->bitmap.rows, 0, GL_LUMINANCE_ALPHA, GL_UNSIGNED_BYTE, slot->bitmap.buffer);
glGenerateMipmap(GL_TEXTURE_2D);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
}
Fragment Shader:
#version 330
in vec2 uv;
in vec4 tColor;
uniform sampler2D tex;
out vec4 color;
void main () {
color = vec4(tColor.rgb, texture(tex, uv).a);
}
You're specifying GL_LUMINANCE_ALPHA for the format of the data you pass to glTexImage2D(). Based on the corresponding FreeType documentation I found here:
http://www.freetype.org/freetype2/docs/reference/ft2-basic_types.html#FT_Pixel_Mode
There is no FT_Pixel_Mode value specifying that the data in slot->bitmap.buffer is in fact luminance-alpha. GL_LUMINANCE_ALPHA is a format with 2 bytes per pixel, where the first byte is used for R, G, and B when the data is used to specify a RGBA image, and the second byte is used for A.
Based on the data you're showing, slot->bitmap.pixel_mode is most likely FT_PIXEL_MODE_GRAY, which means that the bitmap data is 1 byte per pixel. In this case, you need to use GL_ALPHA for the format:
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, slot->bitmap.width, slot->bitmap.rows, 0,
GL_ALPHA, GL_UNSIGNED_BYTE, slot->bitmap.buffer);
If the pixel_mode is something other than FT_PIXEL_MODE_GRAY, you'll have to adjust the format accordingly, or potentially create a copy of the data if it's a format that is not supported by glTexImage2D().
The reason you get garbage if you specify GL_LUMINANCE_ALPHA instead of GL_ALPHA is that it reads twice as much data as is contained in the data you pass in. The content of the data that is read beyond the allocated bitmap data is undefined, and may well change depending on what other variables you declare/allocate.
If you want to use texture formats that are still supported in the core profile instead of the deprecated GL_LUMINANCE_ALPHA or GL_ALPHA, you can use GL_R8 instead. Since this format has only one component, instead of the four in GL_RGBA, this will also use 75% less texture memory:
glTexImage2D(GL_TEXTURE_2D, 0, GL_R8, slot->bitmap.width, slot->bitmap.rows, 0,
GL_RED, GL_UNSIGNED_BYTE, slot->bitmap.buffer);
This will also require a slight change in the shader to read the r component instead of the a component:
color = vec4(tColor.rgb, texture(tex, uv).r);
Solved it. I added the following to my code and it works good.
GLubyte * data = new GLubyte[2 * w * h];
for( int y = 0; y < slot->bitmap.rows; y++ )
{
for( int x = 0; x < slot->bitmap.width; x++ )
{
data[2 * ( x + y * w )] = 255;
data[2 * ( x + y * w ) + 1] = slot->bitmap.buffer[x + slot->bitmap.width * y];
}
}
I don't know what happened with that particular line I added but now it works.

How to use GL_TEXTURE_2D_ARRAY in OpenGL 3.2

So I've tried following the docs, however I can't seem to get a texture 2D array to work.
-(GLint)buildTextureArray:(NSArray *)arrayOfImages
{
GLImage *sample = [GLImage imageWithImageName:[arrayOfImages objectAtIndex:0] shouldFlip:NO]; //Creates a sample to examine texture width and height
int width = sample.width, height = sample.height;
GLsizei count = (GLsizei)arrayOfImages.count;
GLuint texture3D;
glGenTextures(1, &texture3D);
glBindTexture(GL_TEXTURE_2D_ARRAY, texture3D);
glPixelStorei(GL_UNPACK_ROW_LENGTH, width);
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glTexParameteri(GL_TEXTURE_2D_ARRAY,GL_TEXTURE_MIN_FILTER,GL_LINEAR_MIPMAP_LINEAR);
glTexParameteri(GL_TEXTURE_2D_ARRAY,GL_TEXTURE_MAG_FILTER,GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D_ARRAY,GL_TEXTURE_WRAP_S,GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D_ARRAY,GL_TEXTURE_WRAP_T,GL_REPEAT);
glTexImage3D(GL_TEXTURE_2D_ARRAY, 0, GL_RGBA8, width, height, count, 0, GL_BGRA, GL_UNSIGNED_INT_8_8_8_8_REV, NULL);
int i = 0;
for (NSString *name in arrayOfImages) //Loops through everything in arrayOfImages
{
GLImage *image = [GLImage imageWithImageName:name shouldFlip:NO]; //My own class that loads an image
glTexSubImage3D(GL_TEXTURE_2D_ARRAY, 0, 0, 0, i, image.width, image.height, 1, GL_RGBA, GL_UNSIGNED_BYTE, image.data);
i++;
}
return texture3D;
}
//Setting Uniform elsewhere
glBindTexture(GL_TEXTURE_2D_ARRAY, textureArray);
glUniform1i(textures, 0);
//Fragment Shader
#version 150
in vec3 texCoords;
uniform sampler2DArray textures;
out vec3 color;
void main()
{
color = texture(textures, texCoords.stp, 0).rgb;
}
I am able to load individual textures with the same texture parameters, but I can't get it to work with the texture 2D array. All I get is a black texture. Why is this happening?
glTexParameteri(GL_TEXTURE_2D_ARRAY,GL_TEXTURE_MIN_FILTER,GL_LINEAR_MIPMAP_LINEAR);
Your texture in fact does not have mipmaps. So stop telling OpenGL that it does.
Also, always set the mipmap range parameters (GL_TEXTURE_BASE_LAYER and GL_TEXTURE_MAX_LAYER) for your texture. Or better yet, use texture storage to allocate your texture's storage, and it will do it for you.
for 2d_array textures 'v' component of texcoords varies from 0-height and 'w' from 0-depth(as it denote layer). Try changing these texcordinates.