Related
I have a problem with a very simple compute shader that just copies a texture using imageStore.
#define KS 16 // kernel size
layout (local_size_x = KS, local_size_y = KS) in;
layout(location = 0) uniform sampler2D u_inputTex;
layout(location = 1) uniform writeonly image2D u_outImg;
void main()
{
const ivec2 gid = ivec2(gl_WorkGroupID.xy);
const ivec2 tid = ivec2(gl_LocalInvocationID.xy);
const ivec2 pixelPos = ivec2(KS) * gid + tid;
imageStore(u_outImg, pixelPos,
uvec4(255.0 * texelFetch(u_inputTex, pixelPos, 0).rgb, 255u));
}
In the C++ side, I have this:
int w, h;
u32 inTex = -1;
{
int nc;
auto img = stbi_load("imgs/Windmill_NOAA.png", &w, &h, &nc, 3);
if (img) {
glGenTextures(1, &inTex);
glBindTexture(GL_TEXTURE_2D, inTex);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB8, w, h, 0, GL_RGB, GL_UNSIGNED_BYTE, img);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
stbi_image_free(img);
}
else
printf("Error loading img\n");
}
u32 outTex;
{
glGenTextures(1, &outTex);
glBindTexture(GL_TEXTURE_2D, outTex);
glTexStorage2D(GL_TEXTURE_2D, 1, GL_RGBA8UI, w, h);
}
u32 compProg = easyCreateComputeShaderProg("compute", shader_srcs::computeSrc);
glUseProgram(compProg);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, inTex);
glBindImageTexture(0, outTex, 0, GL_FALSE, 0, GL_WRITE_ONLY, GL_RGBA8UI);
glUniform1i(0, 0);
glUniform1i(1, 0);
glDispatchCompute((w+15)/16, (h+15)/16, 1);
glMemoryBarrier(GL_SHADER_IMAGE_ACCESS_BARRIER_BIT); // make sure the output image has been written
u8* img = new u8[w * h * 4];
glBindTexture(GL_TEXTURE_2D, outTex);
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glGetTexImage(GL_TEXTURE_2D, 0, GL_RGBA_INTEGER, GL_UNSIGNED_BYTE, img);
stbi_write_png("imgs/out.png", w, h, 1, img, w*4);
delete[] img;
The input image looks like this:
But this is what I get in the output image:
I simplified the shader further: instead of reading from the input texture, I just write a fixed value:
imageStore(u_outImg, pixelPos,
//uvec4(255.0 * texelFetch(u_inputTex, pixelPos, 0).rgb, 255u));
uvec4(1u));
I have noticed that:
If I write 0 it's all black
If I write 255 it's all white
But if I write something in the middle (100 for example) it's not grey but white as well
I have also tried like this but didn't work either:
imageStore(u_outImg, pixelPos,
vec4(texelFetch(u_inputTex, pixelPos, 0).rgb, 255u));
What I'm doing wrong? My end goal is to make a prostprocessing filter but I couldn't get it to work, so I tried to make it as simple as possible and yet it doesn't work.
Minimal example repo: https://github.com/tuket/stackoverflow_image_store_problem
glTexStorage2D(GL_TEXTURE_2D, 1, GL_RGBA8UI, w, h);
If you want to use an unnormalized unsigned integer image, you must declare it as uimage2D in the sahder. image2D is for floating-point or normalized integer (range [0,1]) only.
glMemoryBarrier(GL_SHADER_IMAGE_ACCESS_BARRIER_BIT);
That's the wrong memory barrier. The barrier is about how you're going to access the resources modified by your shader after the barrier, so the correct one is:
GL_TEXTURE_UPDATE_BARRIER_BIT
which is explained in the reference page as (emphasis mine):
Writes to a texture via glTex(Sub)Image*, glCopyTex(Sub)Image*,
glCompressedTex(Sub)Image*, and reads via glGetTexImage after the
barrier will reflect data written by shaders prior to the barrier.
Additionally, texture writes from these commands issued after the
barrier will not execute until all shader writes initiated prior to
the barrier complete.
I am attempting to create an empty 3D texture, the dimensions and format of which are loaded in at runtime. I then want to modify the values of this texture, which I am then volume rendering with a ray tracer. I know my rendering function works fine, as I can render the volume that the dimensions and format comes from without a problem. The empty volume also renders, but I am unable to write any data to it, and so it is just white all the way through.
//My function that creates the blank texture initially
//Its part of a larger class that reads in a populated volume and a transfer function,
//I'm just initialising it in this way so it is identical to the other volume, but empty
GLuint Texture3D::GenerateBlankTexture(VolumeDataset volume)
{
GLuint tex;
glEnable(GL_TEXTURE_3D);
glGenTextures(1, &tex);
glBindTexture(GL_TEXTURE_3D, tex);
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_R, GL_CLAMP_TO_EDGE);
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
// Reverses endianness in copy
if (!volume.littleEndian)
glPixelStoref(GL_UNPACK_SWAP_BYTES, true);
if (volume.elementType == "MET_UCHAR")
{
// texture format, ?, channels, dimensions, ?, pixel format, data type, data
glTexImage3D(GL_TEXTURE_3D, 0, GL_R8, volume.xRes, volume.yRes, volume.zRes, 0, GL_RED, GL_UNSIGNED_BYTE, NULL);
glBindImageTexture(0, tex, 0, GL_TRUE, 0, GL_READ_WRITE, GL_R8);
}
else if (volume.elementType == "SHORT")
{
glTexImage3D(GL_TEXTURE_3D, 0, GL_R16F, volume.xRes, volume.yRes, volume.zRes, 0, GL_RED, GL_UNSIGNED_SHORT, NULL);
glBindImageTexture(0, tex, 0, GL_TRUE, 0, GL_READ_WRITE, GL_R16F);
}
else if (volume.elementType == "FLOAT")
{
glTexImage3D(GL_TEXTURE_3D, 0, GL_R32F, volume.xRes, volume.yRes, volume.zRes, 0, GL_RED, GL_FLOAT, NULL);
glBindImageTexture(0, tex, 0, GL_TRUE, 0, GL_READ_WRITE, GL_R32F);
}
glPixelStoref(GL_UNPACK_SWAP_BYTES, false);
GLenum err = glGetError();
glBindTexture(GL_TEXTURE_3D, 0);
return tex;
}
With the volume created, I then read it into a compute shader in my display function:
glUseProgram(Compute3DShaderID);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_3D, tex_output);
glDispatchCompute((GLuint)volume.xRes/4, (GLuint)volume.yRes/4, (GLuint)volume.zRes/4);
glMemoryBarrier(GL_SHADER_IMAGE_ACCESS_BARRIER_BIT);
Within my shader, all I'm trying to do is change the colour based on its position in the volume:
#version 430
layout (local_size_x = 4, local_size_y = 4, local_size_z = 4) in;
layout (r8, binding = 0) uniform image3D tex_output;
void main()
{
ivec3 dims = imageSize (tex_output);
ivec3 pixel_coords = ivec3(gl_GlobalInvocationID.xyz);
vec4 pixel = vec4(pixel_coords.x/dims.x, pixel_coords.y/dims.y, pixel_coords.y/dims.y, 1.0);
imageStore (tex_output, pixel_coords, pixel);
}
I'm sure the error is something to do with access to writing being denied, but I can't pinpoint exactly what it is.
Note: I'm using GL_RED and such because this is volume data, and this is how I have it in the rest of my volume renderer and it seems to work fine.
So, stupid mistake. Turns out my shaders were working fine. What I hadn't anticipated was that the values I was attempting to write to the volume mapped to a white colour on my transfer function. Once I pulled up the schematic for the transfer function, and tested with values that should work fine, I got actual colours.
Anyone seeing this question in the future, if your code isn't working, it should be as follows:
Create your texture, and set it as an image texture using glTexImage3D. Then, when you wish to use it, call glBindImageTextureand draw, making sure you set layered to GL_TRUE since its a 3D texture. Also make sure that you bind to the correct binding (In my code above I bind to 0, but I've since added a second texture thats bound to 1) and unbind if you're going to use a second set of textures and shaders.
If you're having trouble, set it so that every iteration in your compute adds 0.01 to the final value, so you can see the colour change in real time.
I've created an array of 2D textures and initialized it with glTexImage3D. Then I attached separate textures to color attachments with glFramebufferTextureLayer, Framebuffer creation doesn't throw an error and everything seems fine until the draw call happens.
When shader tries to access color attachment the following message appears:
OpenGL Debug Output message : Source : API; Type : ERROR; Severity : HIGH;
GL_INVALID_OPERATION error generated. <location> is invalid.
Shaders are accessing layers of an array with location qualifier:
layout (location = 0) out vec3 WorldPosOut;
layout (location = 1) out vec3 DiffuseOut;
layout (location = 2) out vec3 NormalOut;
layout (location = 3) out vec3 TexCoordOut;
Documentation says that glFramebufferTextureLayer works just like glFramebufferTexture2D, except the layer parameter, so can I use location qualifiers with texture array, or some other way exsists?
I finally managed to bind texture array as a color buffer. It is hard to find useful information on the topic, so here is an instruction:
№1. You need to create a texture array and initialize it properly:
glGenTextures(1, &arrayBuffer);
glBindTexture(GL_TEXTURE_2D_ARRAY, arrayBuffer);
// we should initialize layers for each mipmap level
for (int mip = 0; mip < mipLevelCount; ++mip) {
glTexImage3D(GL_TEXTURE_2D_ARRAY, mip, internalFormat, ImageWidth, ImageHeight,
layerCount, 0, GL_RGB, GL_UNSIGNED_INT, 0);
glTexParameterf(GL_TEXTURE_2D_ARRAY, GL_TEXTURE_MIN_FILTER, textureFilter);
glTexParameterf(GL_TEXTURE_2D_ARRAY, GL_TEXTURE_MAG_FILTER, textureFilter);
glTexParameteri(GL_TEXTURE_2D_ARRAY, GL_TEXTURE_BASE_LEVEL, 0);
glTexParameteri(GL_TEXTURE_2D_ARRAY, GL_TEXTURE_MAX_LEVEL, mipLevelCount - 1);
glTexParameteri(GL_TEXTURE_2D_ARRAY, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D_ARRAY, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
}
Keep in mind, that setting texture parameters like MIN/MAG filters and BASE/MAX mipmap level is important. OpenGL sets maximum mipmap level to 1000 and if you didn't provide the whole thousand of mipmaps you will get an incomplete texture, you won't get anything except the black screen.
№2. Don't forget to bind arrayBuffer to the GL_TEXTURE_2D_ARRAY target before attaching the layers to the color buffers:
glBindTexture(GL_TEXTURE_2D_ARRAY, arrayBuffer);
for (unsigned int i = 0; i < NUMBER_OF_TEXTURES; i++) {
glFramebufferTextureLayer(GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0 + i, arrayBuffer, 0, i);
}
Don't forget to set the GL_TEXTURE_2D_ARRAY target to 0 with glBindTexture or it can get modified outside of the initialization code.
№3. Since the internalFormat of each image in the array must stay the same, I recommend to create a separate texture for the depth/stencil buffer:
glGenTextures(1, &m_depthTexture);
...
glBindTexture(GL_TEXTURE_2D, m_depthTexture);
glTexImage2D(GL_TEXTURE_2D, 0, GL_DEPTH32F_STENCIL8, WindowWidth,
WindowHeight, 0, GL_DEPTH_STENCIL, GL_FLOAT_32_UNSIGNED_INT_24_8_REV, NULL);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_BASE_LEVEL, 0);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAX_LEVEL, 0);
glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT,
GL_TEXTURE_2D, m_depthTexture, 0);
Don't forget to set up index for each color buffer:
for (int i = 0; i < GBUFFER_NUM_TEXTURES; ++i)
DrawBuffers[i] = GL_COLOR_ATTACHMENT0 + i; //Sets appropriate indices for each color buffer
glDrawBuffers(ARRAY_SIZE_IN_ELEMENTS(DrawBuffers), DrawBuffers);
In shaders you can use layout(location = n) qualifiers to specify the color buffer.
OpenGL 3 Note (NVIDIA): glFramebufferTextureLayer is available since OpenGL 3.2 (Core profile), but on NVIDIA GPU's drivers will force OpenGL version to 4.5, so you should specify the exact version of OpenGL if you care about compatibility. I use SDL2 in my application, so I use the following calls to set OpenGL version:
SDL_GL_SetAttribute(SDL_GL_CONTEXT_MAJOR_VERSION, 3);
SDL_GL_SetAttribute(SDL_GL_CONTEXT_MINOR_VERSION, 3);
SDL_GL_SetAttribute(SDL_GL_CONTEXT_PROFILE_MASK, SDL_GL_CONTEXT_PROFILE_CORE);
Results of the deferred shading:
I am working on a game with 8bit graphics. I provide a Pixelbuffer (OSXRenderer.pbo)
to my gameloop to fill it up. Then texsubimage it onto a texture (OSXRenderer.ScreenTexture).
The texture is rendered to the screen via a quad.
I got it working without problems with a RGB PBO (size: width*height*3).
But now i want the pbo to be indexed color. So i load a palette into another texture
(OSXRenderer.PaletteTexture) and changed my PBO. (size: width*height).
How i figure it should work is:
PBO gets filled with noise (random uint8 0-63), Screentexture gets texsubimaged,
and when rendering it onto the screen via quad, my fragmentshader replaces all the
RED channel values with the corresponding colors from my palette and i get 8bit noise on the screen.
But i simply can't get it to work. I only get a black screen. If I set my fragcolor to the incoming
screentexture(pbo) data i get red noise. Just as expected.
[EDIT]
I tested the fragment-shaders "color"-variable values. And they are always 0.0 except alpha which is always 1.0
setup:
static uint8 palette[] = {
0x80,0x80,0x80, 0x00,0x00,0xBB, 0x37,0x00,0xBF, 0x84,0x00,0xA6,
0xBB,0x00,0x6A, 0xB7,0x00,0x1E, 0xB3,0x00,0x00, 0x91,0x26,0x00,
0x7B,0x2B,0x00, 0x00,0x3E,0x00, 0x00,0x48,0x0D, 0x00,0x3C,0x22,
0x00,0x2F,0x66, 0x00,0x00,0x00, 0x05,0x05,0x05, 0x05,0x05,0x05,
0xC8,0xC8,0xC8, 0x00,0x59,0xFF, 0x44,0x3C,0xFF, 0xB7,0x33,0xCC,
0xFF,0x33,0xAA, 0xFF,0x37,0x5E, 0xFF,0x37,0x1A, 0xD5,0x4B,0x00,
0xC4,0x62,0x00, 0x3C,0x7B,0x00, 0x1E,0x84,0x15, 0x00,0x95,0x66,
0x00,0x84,0xC4, 0x11,0x11,0x11, 0x09,0x09,0x09, 0x09,0x09,0x09,
0xFF,0xFF,0xFF, 0x00,0x95,0xFF, 0x6F,0x84,0xFF, 0xD5,0x6F,0xFF,
0xFF,0x77,0xCC, 0xFF,0x6F,0x99, 0xFF,0x7B,0x59, 0xFF,0x91,0x5F,
0xFF,0xA2,0x33, 0xA6,0xBF,0x00, 0x51,0xD9,0x6A, 0x4D,0xD5,0xAE,
0x00,0xD9,0xFF, 0x66,0x66,0x66, 0x0D,0x0D,0x0D, 0x0D,0x0D,0x0D,
0xFF,0xFF,0xFF, 0x84,0xBF,0xFF, 0xBB,0xBB,0xFF, 0xD0,0xBB,0xFF,
0xFF,0xBF,0xEA, 0xFF,0xBF,0xCC, 0xFF,0xC4,0xB7, 0xFF,0xCC,0xAE,
0xFF,0xD9,0xA2, 0xCC,0xE1,0x99, 0xAE,0xEE,0xB7, 0xAA,0xF7,0xEE,
0xB3,0xEE,0xFF, 0xDD,0xDD,0xDD, 0x11,0x11,0x11, 0x11,0x11,0x11
};
/* Create the PBO */
glGenBuffers(1, &OSXRenderer.pbo);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, OSXRenderer.pbo);
glBufferData(GL_PIXEL_UNPACK_BUFFER, W*H, NULL, GL_STREAM_DRAW);
/* Create the Screen Texture (400*240 pixel) */
glGenTextures(1, &OSXRenderer.ScreenTexture);
glBindTexture(GL_TEXTURE_2D, OSXRenderer.ScreenTexture);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexImage2D(GL_TEXTURE_2D, 0, GL_R8, W, H, 0,
GL_RED, GL_UNSIGNED_BYTE, OSXRenderer.Pixelbuffer.Data);
/* Create the Palette Texture (64*1 pixel) */
glGenTextures(1, &OSXRenderer.PaletteTexture);
glBindTexture(GL_TEXTURE_2D, OSXRenderer.PaletteTexture);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, 64, 1, 0,
GL_RGB, GL_UNSIGNED_BYTE, &palette);
/* Compile and Link Shaders */
OSXRenderer.Program = OSXCreateProgram();
glUseProgram(OSXRenderer.Program);
/* Get the uniforms for the screen- and the palette-texture */
OSXRenderer.UniformTex = glGetUniformLocation(OSXRenderer.Program, "tex");
OSXRenderer.UniformPal = glGetUniformLocation(OSXRenderer.Program, "pal");
update loop:
/* Rendering Prerequesites */
glUseProgram(OSXRenderer.Program);
glActiveTexture(GL_TEXTURE0);
glEnable(GL_TEXTURE_2D);
glBindTexture(GL_TEXTURE_2D, OSXRenderer.PaletteTexture);
glUniform1i(OSXRenderer.UniformPal, 0);
glActiveTexture(GL_TEXTURE1);
glEnable(GL_TEXTURE_2D);
glBindTexture(GL_TEXTURE_2D, OSXRenderer.ScreenTexture);
glUniform1i(OSXRenderer.UniformTex, 1);
/* Bind the PBO */
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, OSXRenderer.pbo);
glBufferData(GL_PIXEL_UNPACK_BUFFER, W*H, NULL, GL_STREAM_DRAW);
OSXRenderer.Pixelbuffer.Data = glMapBuffer(GL_PIXEL_UNPACK_BUFFER, GL_WRITE_ONLY);
//
//
FillPixelBuffer();
//
//
glUnmapBuffer(GL_PIXEL_UNPACK_BUFFER);
glBindTexture(GL_TEXTURE_2D, OSXRenderer.ScreenTexture);
/* Bind the screentexture again just to be save
and fill it with the PBO data */
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, W, H, GL_RED, GL_UNSIGNED_BYTE, 0);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
/* Render it to the screen */
glBegin(GL_QUADS);
glTexCoord2f(0.0f,1.0f);
glVertex2f(-1.0f,1.0f);
glTexCoord2f(1.0f,1.0f);
glVertex2f(1.0f,1.0f);
glTexCoord2f(1.0f,0.0f);
glVertex2f(1.0f,-1.0f);
glTexCoord2f(0.0f,0.0f);
glVertex2f(-1.0f,-1.0f);
glEnd();
/* glFlush() */
CGLFlushDrawable();
vertexshader:
# version 120
varying vec2 texcoord;
// Simple Passthrough
void main(void)
{
gl_Position = ftransform();
texcoord = gl_MultiTexCoord0.xy;
}
fragmentshader:
# version 120
uniform sampler2D tex;
uniform sampler2D pal;
varying vec2 texcoord;
void main(void)
{
// Get the color values of the screen-texture. I only want the RED channel
vec4 index = texture2D(tex, texcoord);
// Get the color values of the palette texture
// using the screen-texture's RED channel as an index
//[EDIT] First post multiplied index.r with 255 here.
vec4 color = texture2D(pal, vec2(index.r, 0));
// Use it
gl_FragColor = color;
}
I'm trying to figure out how to render an object (a cube) with different textures for each face. For simplicities sake, I have 2 textures that are applied to 3 faces of the cube each. I understand that I should be using texture arrays with 3 coordinates to represent the relevant texture to be used. I'm just unsure of how to do this and how to code my fragment shader.
Here is the relevant part of my init() function:
final String textureName = model.getTextures().get(i).textureName;
final FileTexture textureGenerator = new FileTexture(this.getClass().getResourceAsStream(textureName),
true, context);
textureId = textureGenerator.getTextureId();
width = textureGenerator.getWidth();
height = textureGenerator.getHeight();
textureMap.put(model.getTextures().get(i).matName, textureId);
context.getGL().glActiveTexture(GL.GL_TEXTURE0 + i);
context.getGL().glBindTexture(GL.GL_TEXTURE_2D, textureId);
I am slightly confused here however because the Orange Book (OpenGL Shading Language) gives examples in which the glActiveTexture and glBindTexture is used but the GLSL common mistakes says you shouldn't do this.
From there, my display() function looks like this:
gl.glBindBuffer(GL.GL_ARRAY_BUFFER, getVertexBufferObject());
gl.glBufferData(GL.GL_ARRAY_BUFFER, getNoOfVertices() * 3 * 4, getVertices(), GL.GL_STREAM_DRAW);
gl.glBindBuffer(GL.GL_ARRAY_BUFFER, getTexCoordBufferObject());
gl.glBufferData(GL.GL_ARRAY_BUFFER, getNoOfVertices() * 2 * 4, getTexCoords(), GL.GL_STREAM_DRAW);
gl.glBindBuffer(GL.GL_ELEMENT_ARRAY_BUFFER, getIndicesBufferObject());
gl.glBufferData(GL.GL_ELEMENT_ARRAY_BUFFER, getNoOfIndices() * 4, getIndices(), GL.GL_STREAM_DRAW);
gl.glBindBuffer(GL.GL_ARRAY_BUFFER, getColorBufferObject());
gl.glBufferData(GL.GL_ARRAY_BUFFER, getNoOfVertices() * 4 * 4, getColors(), GL.GL_STREAM_DRAW);
layerTextureShader.use(gl);
gl.glEnableClientState(GL.GL_VERTEX_ARRAY);
gl.glBindBuffer(GL.GL_ARRAY_BUFFER, getVertexBufferObject());
gl.glVertexPointer(3, GL.GL_FLOAT, 0, 0);
gl.glEnableClientState(GL.GL_COLOR_ARRAY);
gl.glBindBuffer(GL.GL_ARRAY_BUFFER, mask ? getMaskColorBufferObject() : getColorBufferObject());
gl.glColorPointer(4, GL.GL_FLOAT, 0, 0);
gl.glClientActiveTexture(GL.GL_TEXTURE0);
gl.glEnableClientState(GL.GL_TEXTURE_COORD_ARRAY);
gl.glTexCoordPointer(3, GL.GL_FLOAT, 0, 0);
gl.glClientActiveTexture(GL.GL_TEXTURE1);
gl.glEnableClientState(GL.GL_TEXTURE_COORD_ARRAY);
gl.glTexCoordPointer(3, GL.GL_FLOAT, 0, 0);
gl.glBindBuffer(GL.GL_ELEMENT_ARRAY_BUFFER, getIndicesBufferObject());
final int count = getNoOfIndices();
gl.glDrawElements(GL.GL_TRIANGLES, count, GL.GL_UNSIGNED_INT, 0);
gl.glBindBuffer(GL.GL_ELEMENT_ARRAY_BUFFER, 0);
gl.glBindBuffer(GL.GL_ARRAY_BUFFER, 0);
gl.glClientActiveTexture(GL.GL_TEXTURE0);
gl.glDisableClientState(GL.GL_TEXTURE_COORD_ARRAY);
gl.glClientActiveTexture(GL.GL_TEXTURE1);
gl.glDisableClientState(GL.GL_TEXTURE_COORD_ARRAY);
gl.glDisableClientState(GL.GL_VERTEX_ARRAY);
gl.glDisableClientState(GL.GL_COLOR_ARRAY);
gl.glDisableClientState(GL.GL_TEXTURE_COORD_ARRAY);
layerTextureShader.release(gl);
I am unsure of what to put in my GLSL shaders. My vertex shader has the standard gl_TexCoord[0] = gl_MultiTexCoord0; and my fragment shader looks like:
uniform sampler2D texture;
void main()
{
gl_FragColor = texture2D(texture, gl_TexCoord[0].st);
}
How do I instruct the fragment shader on which texture to use? I assume it's when I'm populating the vertex, index, textures buffers etc and I do it by passing in this 3rd texture coordinate for each point? Is the value of this 3rd coordinate the value of the relevant texture coordinate?
I hope my question makes sense and thanks for any help.
Chris
What you are looking for is a cube map. In OpenGL, you can define six textures at once (representing the size sides of a cube) and map them using 3D texture coordinates instead of the common 2D texture coordinates. For a simple cube, the texture coordinates would be the same as the vertices' respective normals. (If you will only be texturing plane cubes in this manner, you can consolidate normals and texture coordinates in your vertex shader, too!) Cube maps are much simpler than trying to bind six distinct textures simultaneously the way you are doing right now.
GLuint mHandle;
glGenTextures(1, &mHandle); // create your texture normally
// Note the target being used instead of GL_TEXTURE_2D!
glTextParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTextParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_R, GL_CLAMP_TO_EDGE);
glTextParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTextParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glBindTexture(GL_TEXTURE_CUBE_MAP, mHandle);
// Now, load in your six distinct images. They need to be the same dimensions!
// Notice the targets being specified: the six sides of the cube map.
glTexImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_X, 0, GL_RGBA, width, height, 0,
format, GL_UNSIGNED_BYTE, data1);
glTexImage2D(GL_TEXTURE_CUBE_MAP_NEGATIVE_X, 0, GL_RGBA, width, height, 0,
format, GL_UNSIGNED_BYTE, data2);
glTexImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_Y, 0, GL_RGBA, width, height, 0,
format, GL_UNSIGNED_BYTE, data3);
glTexImage2D(GL_TEXTURE_CUBE_MAP_NEGATIVE_Y, 0, GL_RGBA, width, height, 0,
format, GL_UNSIGNED_BYTE, data4);
glTexImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_Z, 0, GL_RGBA, width, height, 0,
format, GL_UNSIGNED_BYTE, data5);
glTexImage2D(GL_TEXTURE_CUBE_MAP_NEGATIVE_Z, 0, GL_RGBA, width, height, 0,
format, GL_UNSIGNED_BYTE, data6);
glGenerateMipmap(GL_TEXTURE_CUBE_MAP);
glTextParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
// And of course, after you are all done using the textures...
glDeleteTextures(1, &mHandle);
Now, when doing your shaders, you need the vertex shader to accept and/or pass 3D coordinates (vec3) instead of 2D coordinates (vec2).
// old GLSL style
attribute vec3 inTextureCoordinate;
varying vec3 vTextureCoordinate;
// more recent GLSL
in vec3 inTextureCoordinate;
out vec3 vTextureCoordinate;
In this example, your vertex shader would simply assign vTextureCoordinate = inTextureCoordinate. Your fragment shader then needs to accept that texture coordinate and sample the cube map uniform.
uniform samplerCube cubeMap;
...
gl_FragColor = textureCube(cubeMap, vTextureCoordinate);
Whew! That was a lot. Did I leave anything out?