Compute shader not modifying 3d texture - opengl

I want to move the initialization of my 3D texture from the CPU to the GPU. As a test, I wrote a shader to set all voxels to a constant value, but the texture is not modified at all. How do I make it work?
Compute shader:
#version 430
layout(local_size_x=1, local_size_y=1, local_size_z=1) in;
layout(r8, location = 0) uniform image3D volume;
void main()
{
imageStore(volume, ivec3(gl_WorkGroupID), vec4(0));
}
Invocation:
glEnable(GL_TEXTURE_3D);
glActiveTexture(GL_TEXTURE0);
glGenTextures(1, &volume_tid);
glBindTexture(GL_TEXTURE_3D, volume_tid);
glTexImage3D(GL_TEXTURE_3D, 0, GL_R8, volume_dims[0], volume_dims[1], volume_dims[2], 0, GL_RED, GL_UNSIGNED_BYTE, voxels);
ShaderProgram computeVolumeShader;
computeVolumeShader.loadShader(GL_COMPUTE_SHADER, "compute_volume.glsl");
computeVolumeShader.link();
computeVolumeShader.use();
computeVolumeShader.uniform("volume", 0);
glBindImageTexture(0, volume_tid, 0, GL_FALSE, 0, GL_READ_WRITE, GL_R8);
glDispatchCompute(volume_dims[0], volume_dims[1], volume_dims[2]);
glBindImageTexture(0, 0, 0, GL_FALSE, 0, GL_READ_WRITE, GL_R8);
computeVolumeShader.unUse();
glMemoryBarrier(GL_ALL_BARRIER_BITS);
Note: voxels fed into glTexImage3D contains the CPU initialized data.

Ugh. So apparently a 3D texture has to be bound as layered, otherwise the shader isn't able to modify any uvw coordinate with w>0.
glBindImageTexture(0, volume_tid, 0, /*layered=*/GL_TRUE, 0, GL_READ_WRITE, GL_R8);
does the trick.

Related

Why there is no output to the framebuffer's textures?

Here is the code:
int main(){
//init gl environment
//...
//create textures for pass 1
GLuint normal_color_output;
glCreateTextures(GL_TEXTURE_2D_MULTISAMPLE, 1, &normal_color_output);
glTextureStorage2DMultisample(normal_color_output, 8, GL_RGBA32F, 1000, 800, GL_TRUE);
GLuint high_color_output;
glCreateTextures(GL_TEXTURE_2D_MULTISAMPLE, 1, &high_color_output);
glTextureStorage2DMultisample(high_color_output,8, GL_R11F_G11F_B10F, 1000, 800,GL_TRUE);
//init framebuffer
GLuint render_buffer;
glCreateRenderbuffers(1, &render_buffer);
glNamedRenderbufferStorageMultisample(render_buffer, 8, GL_DEPTH24_STENCIL8, 1000, 800);
GLuint framebuffer;
glCreateFramebuffers(1, &framebuffer);
glNamedFramebufferTexture(framebuffer, GL_COLOR_ATTACHMENT0, normal_color_output,0);
glNamedFramebufferTexture(framebuffer, GL_COLOR_ATTACHMENT1, high_color_output, 0);
glNamedFramebufferRenderbuffer(framebuffer, GL_DEPTH_STENCIL_ATTACHMENT, GL_RENDERBUFFER, render_buffer);
const GLenum drawbuffers[] = {GL_COLOR_ATTACHMENT0,GL_COLOR_ATTACHMENT1};
glNamedFramebufferDrawBuffers(framebuffer, 2, drawbuffers);
glBindFramebuffer(GL_FRAMEBUFFER, framebuffer);
//init another framebuffer
//What I want to do is trying to achieve implementing my own msaa color resolve solution.
GLuint mix_framebuffer;
glCreateFramebuffers(1, &mix_framebuffer);
GLuint mix_renderbuffer;
glCreateRenderbuffers(1, &mix_renderbuffer);
glNamedRenderbufferStorage(mix_renderbuffer, GL_DEPTH24_STENCIL8, 1000, 800);
GLuint normal_antialiasing_texture, hdr_antialiasing_texture;
glCreateTextures(GL_TEXTURE_2D, 1, &normal_antialiasing_texture);
glTextureStorage2D(normal_antialiasing_texture, 1, GL_RGBA32F, 1000, 800);
glCreateTextures(GL_TEXTURE_2D, 1, &hdr_antialiasing_texture);
glTextureStorage2D(hdr_antialiasing_texture, 1, GL_RGBA32F, 1000, 800);
glNamedFramebufferTexture(mix_framebuffer, GL_COLOR_ATTACHMENT0, normal_antialiasing_texture, 0);
glNamedFramebufferTexture(mix_framebuffer, GL_COLOR_ATTACHMENT1, hdr_antialiasing_texture, 0);
glNamedFramebufferDrawBuffers(mix_framebuffer,2, drawbuffers);
glNamedFramebufferRenderbuffer(mix_framebuffer, GL_DEPTH_STENCIL_ATTACHMENT, GL_RENDERBUFFER, mix_renderbuffer);
glBindFramebuffer(GL_FRAMEBUFFER, mix_framebuffer);
//....
//draw commands
while (!glfwWindowShouldClose(window)) {
// pass 1
glBindFramebuffer(GL_FRAMEBUFFER, framebuffer);
glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT);
glUseProgram(program);
glUniformMatrix4fv(3, 1, GL_FALSE, glm::value_ptr(camera.GetViewMat()));
model.Render(program);
glPointSize(20.f);
glUseProgram(light_shader);// I draw a point to show the light's position
glUniformMatrix4fv(0, 1, GL_FALSE, glm::value_ptr(camera.GetViewMat()));
glDrawArrays(GL_POINTS, 0, 1);
//pass 2
glBindFramebuffer(GL_FRAMEBUFFER, mix_framebuffer);
glUseProgram(mix_program);
glBindTextureUnit(0, normal_color_output);
glBindTextureUnit(1, high_color_output);
glClear(GL_COLOR_BUFFER_BIT);
glNamedFramebufferDrawBuffers(mix_framebuffer, 2, drawbuffers);
glDrawArrays(GL_POINTS, 0, 1);
//...
}
}
I use geometry shader to model a square, here is the code:
//mix_gs.glsl
#version 450 core
layout(points) in;
layout(triangle_strip) out;
layout(max_vertices = 4) out;
void main(){
gl_Position = vec4(-1,1,-1,1);
EmitVertex();
gl_Position = vec4(-1,-1,-1,1);
EmitVertex();
gl_Position = vec4(1,1,-1,1);
EmitVertex();
gl_Position = vec4(1,-1,-1,1);
EmitVertex();
EndPrimitive();
}
here is the mix_fs.glsl:
#version 450 core
layout(location = 0)out vec4 color;
layout(location = 1)out vec4 hdr_color;
layout(binding = 0) uniform sampler2DMS color_sdms;
layout(binding = 1) uniform sampler2DMS hdr_sdms;
void main(){
/*
for(int i=0;i<8;i++){
color += texelFetch(color_sdms,ivec2(gl_FragCoord.xy),i);
hdr_color += vec4(texelFetch(hdr_sdms,ivec2(gl_FragCoord.xy),i).xyz,1);
}
*/
color = vec4(1,0,0,1);//I just output a color
hdr_color = vec4(0,1,0,1);
}
I encount a problem that I find during the draw pass 2, gl cannot outout any color to textures bind to mix_framebuffer.
Here is the debugging info in RenderDoc:
draw pass 1 texture output:
draw pass 2's geometry output:
draw pass 2 texture input:
draw pass 2 texture output:
You can see, draw-pass 1's output was passed to draw-pass2's pipeline successfully, but there is no output to draw-pass2's textures. I don't know why.
If you don't see even the plain color, the first I'd recommend to check how it was discarded. There are no so many options:
glColorMask. Highly likely it's not your case, since pass 1 works;
Wrong face culling and polygon winding order (CW, CCW). By your geometry shader, it looks like CW;
Blending options;
Depth-stencil state. I see you use glNamedRenderbufferStorage(mix_renderbuffer, GL_DEPTH24_STENCIL8, 1000, 800); What are depth-stencil settings?
If everything above looks good, any glGetError messages? If it's ok, try to remove MRT for a debugging purposes and output the only color in the second pass. If it would work, probably some error in MRT + Depth buffer setup.

Writing to an empty 3D texture in a Compute Shader

I am attempting to create an empty 3D texture, the dimensions and format of which are loaded in at runtime. I then want to modify the values of this texture, which I am then volume rendering with a ray tracer. I know my rendering function works fine, as I can render the volume that the dimensions and format comes from without a problem. The empty volume also renders, but I am unable to write any data to it, and so it is just white all the way through.
//My function that creates the blank texture initially
//Its part of a larger class that reads in a populated volume and a transfer function,
//I'm just initialising it in this way so it is identical to the other volume, but empty
GLuint Texture3D::GenerateBlankTexture(VolumeDataset volume)
{
GLuint tex;
glEnable(GL_TEXTURE_3D);
glGenTextures(1, &tex);
glBindTexture(GL_TEXTURE_3D, tex);
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_R, GL_CLAMP_TO_EDGE);
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
// Reverses endianness in copy
if (!volume.littleEndian)
glPixelStoref(GL_UNPACK_SWAP_BYTES, true);
if (volume.elementType == "MET_UCHAR")
{
// texture format, ?, channels, dimensions, ?, pixel format, data type, data
glTexImage3D(GL_TEXTURE_3D, 0, GL_R8, volume.xRes, volume.yRes, volume.zRes, 0, GL_RED, GL_UNSIGNED_BYTE, NULL);
glBindImageTexture(0, tex, 0, GL_TRUE, 0, GL_READ_WRITE, GL_R8);
}
else if (volume.elementType == "SHORT")
{
glTexImage3D(GL_TEXTURE_3D, 0, GL_R16F, volume.xRes, volume.yRes, volume.zRes, 0, GL_RED, GL_UNSIGNED_SHORT, NULL);
glBindImageTexture(0, tex, 0, GL_TRUE, 0, GL_READ_WRITE, GL_R16F);
}
else if (volume.elementType == "FLOAT")
{
glTexImage3D(GL_TEXTURE_3D, 0, GL_R32F, volume.xRes, volume.yRes, volume.zRes, 0, GL_RED, GL_FLOAT, NULL);
glBindImageTexture(0, tex, 0, GL_TRUE, 0, GL_READ_WRITE, GL_R32F);
}
glPixelStoref(GL_UNPACK_SWAP_BYTES, false);
GLenum err = glGetError();
glBindTexture(GL_TEXTURE_3D, 0);
return tex;
}
With the volume created, I then read it into a compute shader in my display function:
glUseProgram(Compute3DShaderID);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_3D, tex_output);
glDispatchCompute((GLuint)volume.xRes/4, (GLuint)volume.yRes/4, (GLuint)volume.zRes/4);
glMemoryBarrier(GL_SHADER_IMAGE_ACCESS_BARRIER_BIT);
Within my shader, all I'm trying to do is change the colour based on its position in the volume:
#version 430
layout (local_size_x = 4, local_size_y = 4, local_size_z = 4) in;
layout (r8, binding = 0) uniform image3D tex_output;
void main()
{
ivec3 dims = imageSize (tex_output);
ivec3 pixel_coords = ivec3(gl_GlobalInvocationID.xyz);
vec4 pixel = vec4(pixel_coords.x/dims.x, pixel_coords.y/dims.y, pixel_coords.y/dims.y, 1.0);
imageStore (tex_output, pixel_coords, pixel);
}
I'm sure the error is something to do with access to writing being denied, but I can't pinpoint exactly what it is.
Note: I'm using GL_RED and such because this is volume data, and this is how I have it in the rest of my volume renderer and it seems to work fine.
So, stupid mistake. Turns out my shaders were working fine. What I hadn't anticipated was that the values I was attempting to write to the volume mapped to a white colour on my transfer function. Once I pulled up the schematic for the transfer function, and tested with values that should work fine, I got actual colours.
Anyone seeing this question in the future, if your code isn't working, it should be as follows:
Create your texture, and set it as an image texture using glTexImage3D. Then, when you wish to use it, call glBindImageTextureand draw, making sure you set layered to GL_TRUE since its a 3D texture. Also make sure that you bind to the correct binding (In my code above I bind to 0, but I've since added a second texture thats bound to 1) and unbind if you're going to use a second set of textures and shaders.
If you're having trouble, set it so that every iteration in your compute adds 0.01 to the final value, so you can see the colour change in real time.

Compute Shader cannot sample Sampler2D

I am trying to implement a new project using Tiled Deferred Shading using glfw, but I cannot access Sampler2Ds for some reason, I've been running this code before on another one of my projects so I don't know if its a glfw issue or some driver issue ( using nvidia 358.50 drivers). Here's the code where depthTexture is a sampler2D:
void main()
{
if(gl_LocalInvocationIndex == 0){
minDepth = 0xFFFFFFFF;
maxDepth = 0;
}
//Use the GlobalInvocation ID as a pixel position since we dont have built in GLSL features
ivec2 pixelPosition = ivec2(gl_GlobalInvocationID.xy);
//ivec2 texCoord = getUVCoordinates(pixelPosition);
vec2 texCoord = vec2(pixelPosition.x / SCREEN_WIDTH, pixelPosition.y / SCREEN_HEIGHT);
float depthFloat = texture(depthTexture, texCoord).z;
//float depthFloat = 0.6f;
//Atomics only works on integers
uint depthInt = uint(depthFloat * 0xFFFFFFFF);
//Calculate the max depth of this work group/tile
atomicMin(minDepth, depthInt);
atomicMax(maxDepth, depthInt);
//A barrier is required at this point since we need all the calculations to be done before we proceed
barrier();
imageStore(finalImage, pixelPosition, vec4(vec3(depthFloat),1.0f));
imageStore(otherImage, pixelPosition, vec4(float(float(minDepth) / float(0xFFFFFFFF))));
barrier();
};
And how I bind the textures, now I can write to finalImage and otherImage just fine but I can't sample the depth texture
glActiveTexture(GL_TEXTURE2);
glBindTexture(GL_TEXTURE_2D, mDepthTexture);
glUniform1i(shader.getUniformLocation("depthTexture"), 2);
glBindImageTexture(0, mFinalTexture, 0, GL_FALSE, 0, GL_WRITE_ONLY, GL_RGBA32F);
glBindImageTexture(1, mOtherTexture, 0, GL_FALSE, 0, GL_WRITE_ONLY, GL_RGBA32F);
And how the depth texture is setup
glGenTextures(1, &mDepthTexture);
glBindTexture(GL_TEXTURE_2D, mDepthTexture);
glTexImage2D(GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT32F, width, height, 0, GL_DEPTH_COMPONENT, GL_FLOAT, NULL);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D, mDepthTexture, 0);
glfw hints:
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 4);
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 3);
glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GL_TRUE);
glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
I've confirmed that the depth texture is valid using gdebugger, so I'm not sure where the error is
EDIT
I've also tried glBindImage for the depth like this:
glBindImageTexture(2, mDepthTexture, 0, GL_FALSE, 0, GL_READ_ONLY, GL_R32F);
and then
....
layout(binding = 0, rgba32f) uniform writeonly image2D finalImage;
layout(binding = 1, rgba32f) uniform writeonly image2D otherImage;
layout(binding = 2, r32f) uniform readonly image2D depthTexture;
....
float depthFloat = imageLoad(depthTexture, texCoord).z;
....
EDIT
Okay.. two things, first there was a silly mistake where the texture sampler should have been .x not .z since it's a single value. I'm not sure why this worked on the other computer, secondly 0xFFFFFFFF seems to not be supported or cause an overflow, because I had to remove one F for the implementation to work, and I have no idea since every example I've come across uses it and is a bit worrying since that means some precision is lost

Problems outputting gl_PrimitiveID to custom frame buffer object (FBO)

I have a very basic fragment shader which I want to output 'gl_PrimitiveID' to a fragment buffer object (FBO) which I have defined. Below is my fragment shader:
#version 150
uniform vec4 colorConst;
out vec4 fragColor;
out uvec4 triID;
void main(void)
{
fragColor = colorConst;
triID.r = uint(gl_PrimitiveID);
}
I setup my FBO like this:
GLuint renderbufId0;
GLuint renderbufId1;
GLuint depthbufId;
GLuint framebufId;
// generate render and frame buffer objects
glGenRenderbuffers( 1, &renderbufId0 );
glGenRenderbuffers( 1, &renderbufId1 );
glGenRenderbuffers( 1, &depthbufId );
glGenFramebuffers ( 1, &framebufId );
// setup first renderbuffer (fragColor)
glBindRenderbuffer(GL_RENDERBUFFER, renderbufId0);
glRenderbufferStorage(GL_RENDERBUFFER, GL_RGBA, gViewWidth, gViewHeight);
// setup second renderbuffer (triID)
glBindRenderbuffer(GL_RENDERBUFFER, renderbufId1);
glRenderbufferStorage(GL_RENDERBUFFER, GL_RGB32UI, gViewWidth, gViewHeight);
// setup depth buffer
glBindRenderbuffer(GL_RENDERBUFFER, depthbufId);
glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH_COMPONENT32, gViewWidth, gViewHeight);
// setup framebuffer
glBindFramebuffer(GL_FRAMEBUFFER, framebufId);
glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_RENDERBUFFER, renderbufId0);
glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT1, GL_RENDERBUFFER, renderbufId1);
glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_RENDERBUFFER, depthbufId );
// check if everything went well
GLenum stat = glCheckFramebufferStatus(GL_FRAMEBUFFER);
if(stat != GL_FRAMEBUFFER_COMPLETE) { exit(0); }
// setup color attachments
const GLenum att[] = {GL_COLOR_ATTACHMENT0, GL_COLOR_ATTACHMENT1};
glDrawBuffers(2, att);
// render mesh
RenderMyMesh()
// copy second color attachment (triID) to local buffer
glReadBuffer(GL_COLOR_ATTACHMENT1);
glReadPixels(0, 0, gViewWidth, gViewHeight, GL_RED, GL_UNSIGNED_INT, data);
For some reason glReadPixels gives me a 'GL_INVALID_OPERATION' error? However if i change the internal format of renderbufId1 from 'GL_RGB32UI' to 'GL_RGB' and I use 'GL_FLOAT' in glReadPixels instead of 'GL_UNSIGNED_INT' then everything works fine. Does anyone know why I am getting the 'GL_INVALID_OPERATION' error and how I can solve it?
Is there an alternative way of outputting 'gl_PrimitiveID'?
PS: The reason I want to output 'gl_PrimitiveID' like this is explained here: Picking triangles in OpenGL core profile when using glDrawElements
glReadPixels(0, 0, gViewWidth, gViewHeight, GL_RED, GL_UNSIGNED_INT, data);
As stated on the OpenGL Wiki, you need to use GL_RED_INTEGER when transferring true integer data. Otherwise, OpenGL will try to use floating-point conversion on it.
BTW, make sure you're using glBindFragDataLocation to set up which buffers those fragment shader outputs go to. Alternatively, you can set it up explicitly in the shader if you're using GLSL 3.30 or above.

OpenGL multiple textures with VBOs

I'm trying to figure out how to render an object (a cube) with different textures for each face. For simplicities sake, I have 2 textures that are applied to 3 faces of the cube each. I understand that I should be using texture arrays with 3 coordinates to represent the relevant texture to be used. I'm just unsure of how to do this and how to code my fragment shader.
Here is the relevant part of my init() function:
final String textureName = model.getTextures().get(i).textureName;
final FileTexture textureGenerator = new FileTexture(this.getClass().getResourceAsStream(textureName),
true, context);
textureId = textureGenerator.getTextureId();
width = textureGenerator.getWidth();
height = textureGenerator.getHeight();
textureMap.put(model.getTextures().get(i).matName, textureId);
context.getGL().glActiveTexture(GL.GL_TEXTURE0 + i);
context.getGL().glBindTexture(GL.GL_TEXTURE_2D, textureId);
I am slightly confused here however because the Orange Book (OpenGL Shading Language) gives examples in which the glActiveTexture and glBindTexture is used but the GLSL common mistakes says you shouldn't do this.
From there, my display() function looks like this:
gl.glBindBuffer(GL.GL_ARRAY_BUFFER, getVertexBufferObject());
gl.glBufferData(GL.GL_ARRAY_BUFFER, getNoOfVertices() * 3 * 4, getVertices(), GL.GL_STREAM_DRAW);
gl.glBindBuffer(GL.GL_ARRAY_BUFFER, getTexCoordBufferObject());
gl.glBufferData(GL.GL_ARRAY_BUFFER, getNoOfVertices() * 2 * 4, getTexCoords(), GL.GL_STREAM_DRAW);
gl.glBindBuffer(GL.GL_ELEMENT_ARRAY_BUFFER, getIndicesBufferObject());
gl.glBufferData(GL.GL_ELEMENT_ARRAY_BUFFER, getNoOfIndices() * 4, getIndices(), GL.GL_STREAM_DRAW);
gl.glBindBuffer(GL.GL_ARRAY_BUFFER, getColorBufferObject());
gl.glBufferData(GL.GL_ARRAY_BUFFER, getNoOfVertices() * 4 * 4, getColors(), GL.GL_STREAM_DRAW);
layerTextureShader.use(gl);
gl.glEnableClientState(GL.GL_VERTEX_ARRAY);
gl.glBindBuffer(GL.GL_ARRAY_BUFFER, getVertexBufferObject());
gl.glVertexPointer(3, GL.GL_FLOAT, 0, 0);
gl.glEnableClientState(GL.GL_COLOR_ARRAY);
gl.glBindBuffer(GL.GL_ARRAY_BUFFER, mask ? getMaskColorBufferObject() : getColorBufferObject());
gl.glColorPointer(4, GL.GL_FLOAT, 0, 0);
gl.glClientActiveTexture(GL.GL_TEXTURE0);
gl.glEnableClientState(GL.GL_TEXTURE_COORD_ARRAY);
gl.glTexCoordPointer(3, GL.GL_FLOAT, 0, 0);
gl.glClientActiveTexture(GL.GL_TEXTURE1);
gl.glEnableClientState(GL.GL_TEXTURE_COORD_ARRAY);
gl.glTexCoordPointer(3, GL.GL_FLOAT, 0, 0);
gl.glBindBuffer(GL.GL_ELEMENT_ARRAY_BUFFER, getIndicesBufferObject());
final int count = getNoOfIndices();
gl.glDrawElements(GL.GL_TRIANGLES, count, GL.GL_UNSIGNED_INT, 0);
gl.glBindBuffer(GL.GL_ELEMENT_ARRAY_BUFFER, 0);
gl.glBindBuffer(GL.GL_ARRAY_BUFFER, 0);
gl.glClientActiveTexture(GL.GL_TEXTURE0);
gl.glDisableClientState(GL.GL_TEXTURE_COORD_ARRAY);
gl.glClientActiveTexture(GL.GL_TEXTURE1);
gl.glDisableClientState(GL.GL_TEXTURE_COORD_ARRAY);
gl.glDisableClientState(GL.GL_VERTEX_ARRAY);
gl.glDisableClientState(GL.GL_COLOR_ARRAY);
gl.glDisableClientState(GL.GL_TEXTURE_COORD_ARRAY);
layerTextureShader.release(gl);
I am unsure of what to put in my GLSL shaders. My vertex shader has the standard gl_TexCoord[0] = gl_MultiTexCoord0; and my fragment shader looks like:
uniform sampler2D texture;
void main()
{
gl_FragColor = texture2D(texture, gl_TexCoord[0].st);
}
How do I instruct the fragment shader on which texture to use? I assume it's when I'm populating the vertex, index, textures buffers etc and I do it by passing in this 3rd texture coordinate for each point? Is the value of this 3rd coordinate the value of the relevant texture coordinate?
I hope my question makes sense and thanks for any help.
Chris
What you are looking for is a cube map. In OpenGL, you can define six textures at once (representing the size sides of a cube) and map them using 3D texture coordinates instead of the common 2D texture coordinates. For a simple cube, the texture coordinates would be the same as the vertices' respective normals. (If you will only be texturing plane cubes in this manner, you can consolidate normals and texture coordinates in your vertex shader, too!) Cube maps are much simpler than trying to bind six distinct textures simultaneously the way you are doing right now.
GLuint mHandle;
glGenTextures(1, &mHandle); // create your texture normally
// Note the target being used instead of GL_TEXTURE_2D!
glTextParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTextParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_R, GL_CLAMP_TO_EDGE);
glTextParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTextParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glBindTexture(GL_TEXTURE_CUBE_MAP, mHandle);
// Now, load in your six distinct images. They need to be the same dimensions!
// Notice the targets being specified: the six sides of the cube map.
glTexImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_X, 0, GL_RGBA, width, height, 0,
format, GL_UNSIGNED_BYTE, data1);
glTexImage2D(GL_TEXTURE_CUBE_MAP_NEGATIVE_X, 0, GL_RGBA, width, height, 0,
format, GL_UNSIGNED_BYTE, data2);
glTexImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_Y, 0, GL_RGBA, width, height, 0,
format, GL_UNSIGNED_BYTE, data3);
glTexImage2D(GL_TEXTURE_CUBE_MAP_NEGATIVE_Y, 0, GL_RGBA, width, height, 0,
format, GL_UNSIGNED_BYTE, data4);
glTexImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_Z, 0, GL_RGBA, width, height, 0,
format, GL_UNSIGNED_BYTE, data5);
glTexImage2D(GL_TEXTURE_CUBE_MAP_NEGATIVE_Z, 0, GL_RGBA, width, height, 0,
format, GL_UNSIGNED_BYTE, data6);
glGenerateMipmap(GL_TEXTURE_CUBE_MAP);
glTextParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
// And of course, after you are all done using the textures...
glDeleteTextures(1, &mHandle);
Now, when doing your shaders, you need the vertex shader to accept and/or pass 3D coordinates (vec3) instead of 2D coordinates (vec2).
// old GLSL style
attribute vec3 inTextureCoordinate;
varying vec3 vTextureCoordinate;
// more recent GLSL
in vec3 inTextureCoordinate;
out vec3 vTextureCoordinate;
In this example, your vertex shader would simply assign vTextureCoordinate = inTextureCoordinate. Your fragment shader then needs to accept that texture coordinate and sample the cube map uniform.
uniform samplerCube cubeMap;
...
gl_FragColor = textureCube(cubeMap, vTextureCoordinate);
Whew! That was a lot. Did I leave anything out?