I am trying to load a texture array into my shader. The issue I'm facing is that I can only draw the last loaded texture in the array. Strangely, this is always at index 0. I create the texture array like so (I'm using Go bindings, but the general idea is clear):
// Allocate the array
width := 32
height := 32
var textureArray uint32
gl.GenTextures(1, &textureArray)
gl.BindTexture(gl.TEXTURE_2D_ARRAY_EXT, textureArray)
gl.TexStorage3D(gl.TEXTURE_2D_ARRAY_EXT, 1, gl.RGBA, width, height, layerCount)
gl.TexParameteri(gl.TEXTURE_2D_ARRAY_EXT, gl.TEXTURE_MIN_FILTER, gl.LINEAR)
gl.TexParameteri(gl.TEXTURE_2D_ARRAY_EXT, gl.TEXTURE_MAG_FILTER, gl.LINEAR)
gl.TexParameteri(gl.TEXTURE_2D_ARRAY_EXT, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE)
gl.TexParameteri(gl.TEXTURE_2D_ARRAY_EXT, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE)
// Load the textures into the array (assume rgbaN.Pix returns an RGBA image
gl.TexSubImage3D(gl.TEXTURE_2D_ARRAY_EXT, 0, 0, 0, 0, width, height, 1, gl.RGBA, gl.UNSIGNED_BYTE, gl.Ptr(rgba0.Pix))
gl.TexSubImage3D(gl.TEXTURE_2D_ARRAY_EXT, 0, 0, 0, 1, width, height, 1, gl.RGBA, gl.UNSIGNED_BYTE, gl.Ptr(rgba1.Pix))
gl.TexSubImage3D(gl.TEXTURE_2D_ARRAY_EXT, 0, 0, 0, 2, width, height, 1, gl.RGBA, gl.UNSIGNED_BYTE, gl.Ptr(rgba2.Pix))
Then, inside my draw loop I do:
gl.BindTexture(gl.TEXTURE_2D_ARRAY_EXT, textureArray)
gl.BindVertexArray(vertexArray)
gl.DrawElements(gl.TRIANGLES, int32(len(indices)), gl.UNSIGNED_INT, nil)
My shader looks like this:
#version 110
#if GL_EXT_texture_array
#extension GL_EXT_texture_array : enable
#endif
varying vec2 texCoord;
uniform sampler2DArray texArray;
void main()
{
gl_FragColor = texture2DArray(texArray, vec3(texCoord, 0.0));
}
With this shader, I always draw the last added texture to the array. If I change the vec3(texCoord, 0.0) to something like vec3(texCoord, 1.0) I get blackness. Nothing is drawn. My current thought is that I'm not assigning the uniform correctly, but I don't know how to fix that.
The glTexImage3D() function creates the entire 3D (or 2D array) texture. The depth parameter is the depth of the image itself (i.e., the total number of planes in a 2D array). So when you load the second plane, it replaces the first plane, and when you load the third plane, it replaces the second plane. (It also overruns the array bounds, whoops!)
If you want to load a 3D (or 2D array) texture one plane at a time, use glTexSubImage3D.
Related
I'm trying to access a DepthComponent Texture in my GLSL Shader of version 400.
The program does a two pass rendering. In the first pass I render all the geometry and colors to a Framebuffer on which I have a ColorAttachment and DepthAttachment. The DepthAttachment is bound like this:
(Note: I'm using C# with OpenTK, which is strongly typed, in my code examples.)
GL.FramebufferTexture2D(FramebufferTarget.Framebuffer, FramebufferAttachment.DepthAttachment, TextureTarget.Texture2D, depthTexture.ID, 0);
The depth Texture has an internal pixel format of DepthComponent32f, pixel format of DepthComponent and Float as pixel type. All the other properties have default values.
The second pass renders the framebuffers color image onto the screen using the following shader:
#version 400
uniform sampler2D finalImage;
in vec2 texCoords;
out vec4 fragColor;
void main(){
fragColor = vec4(texture2D(finalImage, texCoords.xy).rgb, 1.0);
}
But now I want to read the depth Texture(DepthComponent) instead of the color Texture(RGBA).
I tried a lot of things like disabling TextureCompareMode, using shadow2DSampler with shadow2DProj(sampler, vec4(texCoords.xy, 0.0, 1.0)) or just textureProj(sampler, vec3(texCoords.xy, 0.0)). But it returns only 1 or 0, depends on which configuration I use.
To be sure that my depth Texture is ok, I've read the pixels back to a float array like this:
GL.ReadPixels(0, 0, depthTexture.Width, depthTexture.Height, PixelFormat.DepthComponent, PixelType.Float, float_array);
Everything seems to be correct, its showing me 1.0 for empty space and values between 0.99 and 1.0 for visible objects.
Edit
Here is a code example how my process looks like:
Init code
depthTexture= new GLEXTexture2D(width, height);
depthTexture.TextureCompareMode = TextureCompareMode.None;
depthTexture.CreateMutable(PixelInternalFormat.DepthComponent32f, PixelFormat.DepthComponent, PixelType.Float);
***CreateMutable Function***
ReserveTextureID();
GLEX.glBeginTexture2D(ID);
GL.TexImage2D(TextureTarget.Texture2D, 0, pInternalFormat, width, height, 0, pFormat, pType, IntPtr.Zero);
ApplyOptions();
MarkReserved(true);
GLEX.glEndTexture2D();
(Framebuffer attachment mentioned above)
Render pass 1
GL.BindFramebuffer(FramebufferTarget.Framebuffer, drawBuffer.ID);
GL.Viewport(0, 0, depthTexture.Width, depthTexture.Height);
GL.Clear(ClearBufferMask.ColorBufferBit | ClearBufferMask.DepthBufferBit | ClearBufferMask.StencilBufferBit);
GL.Enable(EnableCap.DepthTest);
GL.ClearColor(Color.Gray);
GL.UseProgram(geometryPassShader.ID);
geometry_shaderUniformMVPM.SetValueMat4(false, geometryImageMVMatrix * geometryImageProjMatrix);
testRectangle.Render(PrimitiveType.QuadStrip);
GL.UseProgram(0);
GL.BindFramebuffer(FramebufferTarget.Framebuffer, 0);
Render pass 2
GL.Viewport(0, 0, depthTexture.Width, depthTexture.Height);
GL.Clear(ClearBufferMask.ColorBufferBit | ClearBufferMask.DepthBufferBit | ClearBufferMask.StencilBufferBit);
GL.ClearColor(Color.White);
GL.UseProgram(finalImageShader.ID);
GL.ActiveTexture(TextureUnit.Texture0);
depthTexture.Bind();
final_shaderUniformMVPM.SetValueMat4(false, finalImageMatrix);
screenQuad.Render(PrimitiveType.Quads);
GL.UseProgram(0);
GL.BindTexture(TextureTarget.Texture2D, 0);
A few hours later I found the solution.
The Problem was the MinFilter. Like the khronos group said on glTexParameter:
The initial value of GL_TEXTURE_MIN_FILTER is GL_NEAREST_MIPMAP_LINEAR.
I changed the MinFilter of my depth Texture to GL_NEAREST (where GL_LINEAR is also legal) and now the depth values in the GLSL shader are right (after linearization of course).
Additional Info:
There are some extensions for MagFilter like LINEAR_DETAIL_ALPHA_SGIS. I`ve tried some of these, the depth value correctness was not affected.
So I am creating a terrain and for texturing, I want to use a 3D texture (depth 3) which holds 3 images (512x512) on each z-layer, so that I would be able to use GPU interpolation between these layers based on just one factor: 0/3 = image 1, 1/3 = image 2, 2/3 = image 3, and every value in between interpolates with the next level (cyclic).
This works perfectly as long as I don't enable mip maps on this 3D texture. When I do enable it, my terrain gets the same one image all over unless I come closer, as if the images have shifted from being z-layers to being mip-map layers.
I don't understand this, can someone tell me what I'm doing wrong?
This is where I generate the texture:
glGenTextures(1, &m_textureId);
glBindTexture(GL_TEXTURE_3D, m_textureId);
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
glTexImage3D(GL_TEXTURE_3D, 0, GL_RGB, 512, 512, 3, 0, GL_BGR, GL_UNSIGNED_BYTE, 0);
This is the step I perform for every Z:
glTexSubImage3D(GL_TEXTURE_3D, 0, 0, 0, Z, 512, 512, 1, GL_BGR, GL_UNSIGNED_BYTE, imageData);
After this, I do:
glGenerateMipmap(GL_TEXTURE_3D);
In the shader, I define the texture as:
uniform sampler3D tGround;
and simply sample it with:
texture(tGround, vec3(texcoord, f));
where texcoord is a 2D coordinate and f is the layer we need, simply based on height at this moment.
There is a way to do something like what you want, but it does require work. And you can't use a 3D texture to do it.
You have to use Array Textures instead. The usual way to think of a 2D array texture is as a bundle of 2D textures of the same size. But you can also think of it as a 3D texture where each mipmap level has the same number of Z layers. However, there's also the issue where there is no blending between array layers.
Since you want blending, you will need to synthesize it. But that's easy enough with shaders:
vec4 ArrayTextureBlend(in vec3 texCoord)
{
float frac = fract(texCoord.z);
texCoord.z = floor(texCoord.z);
vec4 top = texture(arrayTex, texCoord);
vec4 bottom = texture(arrayTex, texCoord + vec3(0, 0, 1));
return mix(top, bottom, frac); //Linearly interpolate top and bottom.
}
So I've tried following the docs, however I can't seem to get a texture 2D array to work.
-(GLint)buildTextureArray:(NSArray *)arrayOfImages
{
GLImage *sample = [GLImage imageWithImageName:[arrayOfImages objectAtIndex:0] shouldFlip:NO]; //Creates a sample to examine texture width and height
int width = sample.width, height = sample.height;
GLsizei count = (GLsizei)arrayOfImages.count;
GLuint texture3D;
glGenTextures(1, &texture3D);
glBindTexture(GL_TEXTURE_2D_ARRAY, texture3D);
glPixelStorei(GL_UNPACK_ROW_LENGTH, width);
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glTexParameteri(GL_TEXTURE_2D_ARRAY,GL_TEXTURE_MIN_FILTER,GL_LINEAR_MIPMAP_LINEAR);
glTexParameteri(GL_TEXTURE_2D_ARRAY,GL_TEXTURE_MAG_FILTER,GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D_ARRAY,GL_TEXTURE_WRAP_S,GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D_ARRAY,GL_TEXTURE_WRAP_T,GL_REPEAT);
glTexImage3D(GL_TEXTURE_2D_ARRAY, 0, GL_RGBA8, width, height, count, 0, GL_BGRA, GL_UNSIGNED_INT_8_8_8_8_REV, NULL);
int i = 0;
for (NSString *name in arrayOfImages) //Loops through everything in arrayOfImages
{
GLImage *image = [GLImage imageWithImageName:name shouldFlip:NO]; //My own class that loads an image
glTexSubImage3D(GL_TEXTURE_2D_ARRAY, 0, 0, 0, i, image.width, image.height, 1, GL_RGBA, GL_UNSIGNED_BYTE, image.data);
i++;
}
return texture3D;
}
//Setting Uniform elsewhere
glBindTexture(GL_TEXTURE_2D_ARRAY, textureArray);
glUniform1i(textures, 0);
//Fragment Shader
#version 150
in vec3 texCoords;
uniform sampler2DArray textures;
out vec3 color;
void main()
{
color = texture(textures, texCoords.stp, 0).rgb;
}
I am able to load individual textures with the same texture parameters, but I can't get it to work with the texture 2D array. All I get is a black texture. Why is this happening?
glTexParameteri(GL_TEXTURE_2D_ARRAY,GL_TEXTURE_MIN_FILTER,GL_LINEAR_MIPMAP_LINEAR);
Your texture in fact does not have mipmaps. So stop telling OpenGL that it does.
Also, always set the mipmap range parameters (GL_TEXTURE_BASE_LAYER and GL_TEXTURE_MAX_LAYER) for your texture. Or better yet, use texture storage to allocate your texture's storage, and it will do it for you.
for 2d_array textures 'v' component of texcoords varies from 0-height and 'w' from 0-depth(as it denote layer). Try changing these texcordinates.
I have a cube defined as:
float vertices[] = { -width, -height, -depth, // 0
width, -height, -depth, // 1
width, height, -depth, // 2
-width, height, -depth, // 3
-width, -height, depth, // 4
width, -height, depth, // 5
width, height, depth, // 6
-width, height, depth // 7
};
and I have image 128x128 which I simply want to be painted on each of the 6 faces of the cube and nothing else. So what are the texture cooridinates? I need the actual values.
This is the drawing code:
// Counter-clockwise winding.
gl.glFrontFace(GL10.GL_CCW);
// Enable face culling.
gl.glEnable(GL10.GL_CULL_FACE);
// What faces to remove with the face culling.
gl.glCullFace(GL10.GL_BACK);
// Enabled the vertices buffer for writing and to be used during
// rendering.
gl.glEnableClientState(GL10.GL_VERTEX_ARRAY);
// Specifies the location and data format of an array of vertex
// coordinates to use when rendering.
gl.glVertexPointer(3, GL10.GL_FLOAT, 0, mVerticesBuffer);
// Bind the texture according to the set texture filter
gl.glBindTexture(GL10.GL_TEXTURE_2D, textures[filter]);
gl.glEnable(GL10.GL_TEXTURE_2D);
// Enable the texture state
gl.glEnableClientState(GL10.GL_TEXTURE_COORD_ARRAY);
// Point to our buffers
gl.glTexCoordPointer(2, GL10.GL_FLOAT, 0, mTextureBuffer);
// Set flat color
gl.glColor4f(red, green, blue, alpha);
gl.glDrawElements(GL10.GL_TRIANGLES, mNumOfIndices,
GL10.GL_UNSIGNED_SHORT, mIndicesBuffer);
// ALL the DRAWING IS DONE NOW
// Disable the vertices buffer.
gl.glDisableClientState(GL10.GL_VERTEX_ARRAY);
gl.glDisableClientState(GL10.GL_TEXTURE_COORD_ARRAY);
// Disable face culling.
gl.glDisable(GL10.GL_CULL_FACE);
This is the index array:
short indices[] = { 0, 2, 1,
0, 3, 2,
1,2,6,
6,5,1,
4,5,6,
6,7,4,
2,3,6,
6,3,7,
0,7,3,
0,4,7,
0,1,5,
0,5,4
};
I am not sure if index array is needed to find tex coordinates. Note that the cube vertex array I gave is the most efficient representation of a cube using the index array. The cube draws perfectly but not the textures. Only one side shows correct picture but other sides are messed up. I used the methods described in various online tutorials on textures but it does not work.
What you are looking for is a cube map. In OpenGL, you can define six textures at once (representing the size sides of a cube) and map them using 3D texture coordinates instead of the common 2D texture coordinates. For a simple cube, the texture coordinates would be the same as the vertices' respective normals. (If you will only be texturing plane cubes in this manner, you can consolidate normals and texture coordinates in your vertex shader, too!) Cube maps are much simpler than trying to apply the same texture to repeating quads (extra unnecessary drawing steps).
GLuint mHandle;
glGenTextures(1, &mHandle); // create your texture normally
// Note the target being used instead of GL_TEXTURE_2D!
glTextParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTextParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_R, GL_CLAMP_TO_EDGE);
glTextParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTextParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glBindTexture(GL_TEXTURE_CUBE_MAP, mHandle);
// Now, load in your six distinct images. They need to be the same dimensions!
// Notice the targets being specified: the six sides of the cube map.
glTexImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_X, 0, GL_RGBA, width, height, 0,
format, GL_UNSIGNED_BYTE, data1);
glTexImage2D(GL_TEXTURE_CUBE_MAP_NEGATIVE_X, 0, GL_RGBA, width, height, 0,
format, GL_UNSIGNED_BYTE, data2);
glTexImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_Y, 0, GL_RGBA, width, height, 0,
format, GL_UNSIGNED_BYTE, data3);
glTexImage2D(GL_TEXTURE_CUBE_MAP_NEGATIVE_Y, 0, GL_RGBA, width, height, 0,
format, GL_UNSIGNED_BYTE, data4);
glTexImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_Z, 0, GL_RGBA, width, height, 0,
format, GL_UNSIGNED_BYTE, data5);
glTexImage2D(GL_TEXTURE_CUBE_MAP_NEGATIVE_Z, 0, GL_RGBA, width, height, 0,
format, GL_UNSIGNED_BYTE, data6);
glGenerateMipmap(GL_TEXTURE_CUBE_MAP);
glTextParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
// And of course, after you are all done using the textures...
glDeleteTextures(1, &mHandle);
When specifying your texture coordinates, you will then use sets of 3 coordinates instead of sets of 2. In a simple cube, you point to the 8 corners using normalized vectors. If N = 1.0 / sqrt(3.0) then one corner would be N, N, N; another would be N, N, -N; etc.
You need to define which orientation you want on each face (and that will change which texture coordinates are put on each vertex)
You need to duplicate the vertex positions as the same cube corner will have different texture coordinates depending on which face it is part of
if you want the full texture on each face, then the texture coordinates are (0, 0) (0, 1) (1, 1) (1, 0). How you map them to the specific vertices (the 24 of them, 4 per face) depends on the orientation you want.
For me, it's easier to consider your verticies as width = x, height = y and depth = z.
Then it's a simple matter of getting the 6 faces.
float vertices[] = { -x, -y, -z, // 0
x, -y, -z, // 1
x, y, -z, // 2
-x, y, -z, // 3
-x, -y, z, // 4
x, -y, z, // 5
x, y, z, // 6
-x, y, z// 7
};
For example the front face of your cube will have a positive depth (this cube's center is at 0,0,0 from the verticies that you've given), now since there are 8 points with 4 positive depths, your front face is 4,5,6,7, this is going from -x,-y anti clockwise to -x,y.
Ok, so your back face is all negative depth or -z so it's simply 0,1,2,3.
See the picture? Your left face is all negative width or -x so 0,3,4,7 and your right face is positive x so 1,2,5,6.
I'll let you figure out the top and bottom of the cube.
Your vertex array only describes 2 sides of a cube, but for arguments sake, say vertices[0] - vertices[3] describe 1 side then your texture coordinates may be:
float texCoords[] = { 0.0, 0.0, //bottom left of texture
1.0, 0.0, //bottom right " "
1.0, 1.0, //top right " "
0.0, 1.0 //top left " "
};
You can use those coordinates for texturing each subsequent side with the entire texture.
To render a skybox (cubemap), the below shader works for me:
Cubemap vertexshader::
attribute vec4 a_position;
varying vec3 v_cubemapTexture;
vec3 texture_pos;
uniform vec3 u_cubeCenterPt;
uniform mat4 mvp;
void main(void)
{
gl_Position = mvp * a_position;
texture_pos = vec3(a_position.x - u_cubeCenterPt.x, a_position.y - u_cubeCenterPt.y, a_position.z - u_cubeCenterPt.z);
v_cubemapTexture = normalize(texture_pos.xyz);
}
Cubemap fragmentshader::
precision highp float;
varying vec3 v_cubemapTexture;
uniform samplerCube cubeMapTextureSample;
void main(void)
{
gl_FragColor = textureCube(cubeMapTextureSample, v_cubemapTexture);
}
Hope it is useful...
I'm trying to figure out how to render an object (a cube) with different textures for each face. For simplicities sake, I have 2 textures that are applied to 3 faces of the cube each. I understand that I should be using texture arrays with 3 coordinates to represent the relevant texture to be used. I'm just unsure of how to do this and how to code my fragment shader.
Here is the relevant part of my init() function:
final String textureName = model.getTextures().get(i).textureName;
final FileTexture textureGenerator = new FileTexture(this.getClass().getResourceAsStream(textureName),
true, context);
textureId = textureGenerator.getTextureId();
width = textureGenerator.getWidth();
height = textureGenerator.getHeight();
textureMap.put(model.getTextures().get(i).matName, textureId);
context.getGL().glActiveTexture(GL.GL_TEXTURE0 + i);
context.getGL().glBindTexture(GL.GL_TEXTURE_2D, textureId);
I am slightly confused here however because the Orange Book (OpenGL Shading Language) gives examples in which the glActiveTexture and glBindTexture is used but the GLSL common mistakes says you shouldn't do this.
From there, my display() function looks like this:
gl.glBindBuffer(GL.GL_ARRAY_BUFFER, getVertexBufferObject());
gl.glBufferData(GL.GL_ARRAY_BUFFER, getNoOfVertices() * 3 * 4, getVertices(), GL.GL_STREAM_DRAW);
gl.glBindBuffer(GL.GL_ARRAY_BUFFER, getTexCoordBufferObject());
gl.glBufferData(GL.GL_ARRAY_BUFFER, getNoOfVertices() * 2 * 4, getTexCoords(), GL.GL_STREAM_DRAW);
gl.glBindBuffer(GL.GL_ELEMENT_ARRAY_BUFFER, getIndicesBufferObject());
gl.glBufferData(GL.GL_ELEMENT_ARRAY_BUFFER, getNoOfIndices() * 4, getIndices(), GL.GL_STREAM_DRAW);
gl.glBindBuffer(GL.GL_ARRAY_BUFFER, getColorBufferObject());
gl.glBufferData(GL.GL_ARRAY_BUFFER, getNoOfVertices() * 4 * 4, getColors(), GL.GL_STREAM_DRAW);
layerTextureShader.use(gl);
gl.glEnableClientState(GL.GL_VERTEX_ARRAY);
gl.glBindBuffer(GL.GL_ARRAY_BUFFER, getVertexBufferObject());
gl.glVertexPointer(3, GL.GL_FLOAT, 0, 0);
gl.glEnableClientState(GL.GL_COLOR_ARRAY);
gl.glBindBuffer(GL.GL_ARRAY_BUFFER, mask ? getMaskColorBufferObject() : getColorBufferObject());
gl.glColorPointer(4, GL.GL_FLOAT, 0, 0);
gl.glClientActiveTexture(GL.GL_TEXTURE0);
gl.glEnableClientState(GL.GL_TEXTURE_COORD_ARRAY);
gl.glTexCoordPointer(3, GL.GL_FLOAT, 0, 0);
gl.glClientActiveTexture(GL.GL_TEXTURE1);
gl.glEnableClientState(GL.GL_TEXTURE_COORD_ARRAY);
gl.glTexCoordPointer(3, GL.GL_FLOAT, 0, 0);
gl.glBindBuffer(GL.GL_ELEMENT_ARRAY_BUFFER, getIndicesBufferObject());
final int count = getNoOfIndices();
gl.glDrawElements(GL.GL_TRIANGLES, count, GL.GL_UNSIGNED_INT, 0);
gl.glBindBuffer(GL.GL_ELEMENT_ARRAY_BUFFER, 0);
gl.glBindBuffer(GL.GL_ARRAY_BUFFER, 0);
gl.glClientActiveTexture(GL.GL_TEXTURE0);
gl.glDisableClientState(GL.GL_TEXTURE_COORD_ARRAY);
gl.glClientActiveTexture(GL.GL_TEXTURE1);
gl.glDisableClientState(GL.GL_TEXTURE_COORD_ARRAY);
gl.glDisableClientState(GL.GL_VERTEX_ARRAY);
gl.glDisableClientState(GL.GL_COLOR_ARRAY);
gl.glDisableClientState(GL.GL_TEXTURE_COORD_ARRAY);
layerTextureShader.release(gl);
I am unsure of what to put in my GLSL shaders. My vertex shader has the standard gl_TexCoord[0] = gl_MultiTexCoord0; and my fragment shader looks like:
uniform sampler2D texture;
void main()
{
gl_FragColor = texture2D(texture, gl_TexCoord[0].st);
}
How do I instruct the fragment shader on which texture to use? I assume it's when I'm populating the vertex, index, textures buffers etc and I do it by passing in this 3rd texture coordinate for each point? Is the value of this 3rd coordinate the value of the relevant texture coordinate?
I hope my question makes sense and thanks for any help.
Chris
What you are looking for is a cube map. In OpenGL, you can define six textures at once (representing the size sides of a cube) and map them using 3D texture coordinates instead of the common 2D texture coordinates. For a simple cube, the texture coordinates would be the same as the vertices' respective normals. (If you will only be texturing plane cubes in this manner, you can consolidate normals and texture coordinates in your vertex shader, too!) Cube maps are much simpler than trying to bind six distinct textures simultaneously the way you are doing right now.
GLuint mHandle;
glGenTextures(1, &mHandle); // create your texture normally
// Note the target being used instead of GL_TEXTURE_2D!
glTextParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTextParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_R, GL_CLAMP_TO_EDGE);
glTextParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTextParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glBindTexture(GL_TEXTURE_CUBE_MAP, mHandle);
// Now, load in your six distinct images. They need to be the same dimensions!
// Notice the targets being specified: the six sides of the cube map.
glTexImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_X, 0, GL_RGBA, width, height, 0,
format, GL_UNSIGNED_BYTE, data1);
glTexImage2D(GL_TEXTURE_CUBE_MAP_NEGATIVE_X, 0, GL_RGBA, width, height, 0,
format, GL_UNSIGNED_BYTE, data2);
glTexImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_Y, 0, GL_RGBA, width, height, 0,
format, GL_UNSIGNED_BYTE, data3);
glTexImage2D(GL_TEXTURE_CUBE_MAP_NEGATIVE_Y, 0, GL_RGBA, width, height, 0,
format, GL_UNSIGNED_BYTE, data4);
glTexImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_Z, 0, GL_RGBA, width, height, 0,
format, GL_UNSIGNED_BYTE, data5);
glTexImage2D(GL_TEXTURE_CUBE_MAP_NEGATIVE_Z, 0, GL_RGBA, width, height, 0,
format, GL_UNSIGNED_BYTE, data6);
glGenerateMipmap(GL_TEXTURE_CUBE_MAP);
glTextParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
// And of course, after you are all done using the textures...
glDeleteTextures(1, &mHandle);
Now, when doing your shaders, you need the vertex shader to accept and/or pass 3D coordinates (vec3) instead of 2D coordinates (vec2).
// old GLSL style
attribute vec3 inTextureCoordinate;
varying vec3 vTextureCoordinate;
// more recent GLSL
in vec3 inTextureCoordinate;
out vec3 vTextureCoordinate;
In this example, your vertex shader would simply assign vTextureCoordinate = inTextureCoordinate. Your fragment shader then needs to accept that texture coordinate and sample the cube map uniform.
uniform samplerCube cubeMap;
...
gl_FragColor = textureCube(cubeMap, vTextureCoordinate);
Whew! That was a lot. Did I leave anything out?