So I've tried following the docs, however I can't seem to get a texture 2D array to work.
-(GLint)buildTextureArray:(NSArray *)arrayOfImages
{
GLImage *sample = [GLImage imageWithImageName:[arrayOfImages objectAtIndex:0] shouldFlip:NO]; //Creates a sample to examine texture width and height
int width = sample.width, height = sample.height;
GLsizei count = (GLsizei)arrayOfImages.count;
GLuint texture3D;
glGenTextures(1, &texture3D);
glBindTexture(GL_TEXTURE_2D_ARRAY, texture3D);
glPixelStorei(GL_UNPACK_ROW_LENGTH, width);
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glTexParameteri(GL_TEXTURE_2D_ARRAY,GL_TEXTURE_MIN_FILTER,GL_LINEAR_MIPMAP_LINEAR);
glTexParameteri(GL_TEXTURE_2D_ARRAY,GL_TEXTURE_MAG_FILTER,GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D_ARRAY,GL_TEXTURE_WRAP_S,GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D_ARRAY,GL_TEXTURE_WRAP_T,GL_REPEAT);
glTexImage3D(GL_TEXTURE_2D_ARRAY, 0, GL_RGBA8, width, height, count, 0, GL_BGRA, GL_UNSIGNED_INT_8_8_8_8_REV, NULL);
int i = 0;
for (NSString *name in arrayOfImages) //Loops through everything in arrayOfImages
{
GLImage *image = [GLImage imageWithImageName:name shouldFlip:NO]; //My own class that loads an image
glTexSubImage3D(GL_TEXTURE_2D_ARRAY, 0, 0, 0, i, image.width, image.height, 1, GL_RGBA, GL_UNSIGNED_BYTE, image.data);
i++;
}
return texture3D;
}
//Setting Uniform elsewhere
glBindTexture(GL_TEXTURE_2D_ARRAY, textureArray);
glUniform1i(textures, 0);
//Fragment Shader
#version 150
in vec3 texCoords;
uniform sampler2DArray textures;
out vec3 color;
void main()
{
color = texture(textures, texCoords.stp, 0).rgb;
}
I am able to load individual textures with the same texture parameters, but I can't get it to work with the texture 2D array. All I get is a black texture. Why is this happening?
glTexParameteri(GL_TEXTURE_2D_ARRAY,GL_TEXTURE_MIN_FILTER,GL_LINEAR_MIPMAP_LINEAR);
Your texture in fact does not have mipmaps. So stop telling OpenGL that it does.
Also, always set the mipmap range parameters (GL_TEXTURE_BASE_LAYER and GL_TEXTURE_MAX_LAYER) for your texture. Or better yet, use texture storage to allocate your texture's storage, and it will do it for you.
for 2d_array textures 'v' component of texcoords varies from 0-height and 'w' from 0-depth(as it denote layer). Try changing these texcordinates.
Related
I'm now building a Voxel game. In the beginning, I use a texture atlas that stores all voxel textures and it works fine. After that, I decided to use Greedy Meshing in my game, thus texture atlas is not useful anymore. I read some articles which said that should use Texture Array instead. Then I tried to read and use the texture array technique for texturing. However, the result I got was all black in my game. So what am I missing?
This is my texture atlas (600 x 600)
Here is my Texture2DArray, I use this class to read and save a texture array
Texture2DArray::Texture2DArray() : Internal_Format(GL_RGBA8), Image_Format(GL_RGBA), Wrap_S(GL_REPEAT), Wrap_T(GL_REPEAT), Wrap_R(GL_REPEAT), Filter_Min(GL_NEAREST), Filter_Max(GL_NEAREST), Width(0), Height(0)
{
glGenTextures(1, &this->ID);
}
void Texture2DArray::Generate(GLuint width, GLuint height, unsigned char* data)
{
this->Width = width;
this->Height = height;
glBindTexture(GL_TEXTURE_2D_ARRAY, this->ID);
// I cannot decide what the texture array layer (depth) should be (I put here is 1 for layer number)
//Can anyone explain to me how to decide the texture layer here?
glTexImage3D(GL_TEXTURE_2D_ARRAY, 1, this->Internal_Format, this->Width, this->Height, 0, 1 , this->Image_Format, GL_UNSIGNED_BYTE, data);
glTexParameteri(GL_TEXTURE_2D_ARRAY, GL_TEXTURE_WRAP_S, this->Wrap_S);
glTexParameteri(GL_TEXTURE_2D_ARRAY, GL_TEXTURE_WRAP_T, this->Wrap_T);
glTexParameteri(GL_TEXTURE_2D_ARRAY, GL_TEXTURE_WRAP_R, this->Wrap_R);
glTexParameteri(GL_TEXTURE_2D_ARRAY, GL_TEXTURE_MIN_FILTER, this->Filter_Min);
glTexParameteri(GL_TEXTURE_2D_ARRAY, GL_TEXTURE_MIN_FILTER, this->Filter_Max);
//unbind this texture for another creating texture
glBindTexture(GL_TEXTURE_2D_ARRAY, 0);
}
void Texture2DArray::Bind() const
{
glBindTexture(GL_TEXTURE_2D_ARRAY, this->ID);
}
Here is my Fragment Shader
#version 330 core
uniform sampler2DArray ourTexture;
in vec2 texCoord;
out vec4 FragColor;
void main(){
// 1 (the layer number) just for testing
FragColor = texture(ourTexture,vec3(texCoord, 1));
}
Here is my Vertex Shader
#version 330 core
layout (location = 0) in vec3 inPos;
layout (location = 1) in vec2 inTexCoord;
out vec2 texCoord;
uniform mat4 model;
uniform mat4 view;
uniform mat4 projection;
void main(){
gl_Position = projection * view * vec4(inPos,1.0f);
texCoord = inTexCoord;
}
This my rendering result
EDIT 1:
I figured out that texture atlas doesn't work with texture array because it is a grid so OpenGl cannot decide where it should begin. So I create a vertical texture (18 x 72) and try again but it still all black everywhere.
I have checked binding the texture before using it.
When the 3 dimensional texture image is specified, then the depth has to be the number of images which have to be stored in the array (e.g. imageCount). The width and the height parameter represent the width and height of 1 tile (e.g. tileW, tileH). The layer should be 0 and the border parameter has to be 0. See glTexImage3D. glTexImage3D creates the data store for the texture image. The memory which is required for the textures is reserved (GPU). It is possible to pass a pointer to the image data, but it is not necessary.
If all the tiles are stored in a vertical atlas, then the image data can be set directly:
glTexImage3D(GL_TEXTURE_2D_ARRAY, 0, this->Internal_Format,
tileW, tileH, imageCount, 0,
this->Image_Format, GL_UNSIGNED_BYTE, data);
If the tiles are in the 16x16 atlas, then the tiles have to by extracted from the texture atlas and to set each subimage in the texture array. (data[i] is the imaged data of one tile). Create the texture image:
glTexImage3D(GL_TEXTURE_2D_ARRAY, 0, this->Internal_Format,
tileW, tileH, imageCount, 0,
this->Image_Format, GL_UNSIGNED_BYTE, nullptr);
After that use glTexSubImage3D to put the texture data to the data store of the texture object. glTexSubImage3D uses the existing data store and copies data. e.g.:
for (int i = 0; i < imageCount; ++i)
{
glTexSubImage3D(GL_TEXTURE_2D_ARRAY, 0,
0, 0, i,
tileW, tileH, 1,
this->Image_Format, GL_UNSIGNED_BYTE, data[i]);
}
Note, you've to extract the tiles from the texture atlas and to set each subimage in the texture array. (data[i] is the imaged data of one tile)
An algorithm to extract the tiles and specify the texture image may look as follows
#include <algorithm> // std::copy
#include <vector> // std::vector
unsigned char* data = ...; // 16x16 texture atlas image data
int tileW = ...; // number of pixels in a row of 1 tile
int tileH = ...; // number of pixels in a column of 1 tile
int channels = 4; // 4 for RGBA
int tilesX = 16;
int tilesY = 16;
int imageCount = tilesX * tilesY;
glTexImage3D(GL_TEXTURE_2D_ARRAY, 0, this->Internal_Format,
tileW, tileH, imageCount, 0,
this->Image_Format, GL_UNSIGNED_BYTE, nullptr);
std::vector<unsigned char> tile(tileW * tileH * channels);
int tileSizeX = tileW * channels;
int rowLen = tilesX * tileSizeX;
for (int iy = 0; iy < tilesY; ++ iy)
{
for (int ix = 0; ix < tilesX; ++ ix)
{
unsigned char *ptr = data + iy*rowLen + ix*tileSizeX;
for (int row = 0; row < tileH; ++ row)
std::copy(ptr + row*rowLen, ptr + row*rowLen + tileSizeX,
tile.begin() + row*tileSizeX);
int i = iy * tilesX + ix;
glTexSubImage3D(GL_TEXTURE_2D_ARRAY, 0,
0, 0, i,
tileW, tileH, 1,
this->Image_Format, GL_UNSIGNED_BYTE, tile.data());
}
}
I am trying to display a layer in a 3D texture created from some 3D data, but all points sampled are always black (I guess my texture creation/ allocation is failing somehow). It is being rendered on a plane using window coords. I have checked my data, it is a vector with the correct values. glEnable(GL_TEXTURE_3D) has been called earlier. Any clues why this would fail?
Function that creates the texture:
bool VolumeRender::setVolumeData(QOpenGLShaderProgram *program, vector<unsigned short> v, int x, int y, int z){
voxels.resize(v.size(), 0);
cout << "Processing texture" << endl;
unsigned short sMax = 0;
unsigned short sMin = 32768;
for (unsigned int i =0; i< voxels.size(); i++){
sMax = max(sMax, v[i]);
sMin = min(sMin, v[i]);
} for (unsigned int i =0; i< voxels.size(); i++) voxels[i] = (v[i] - sMin)/(float)(sMax-sMin);
cout << "Loading 3D texture" << endl;
gl.glActiveTexture(GL_TEXTURE0);
gl.glGenTextures(1, &volumeTexture);
gl.glBindTexture(GL_TEXTURE_3D, volumeTexture);
gl.glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
gl.glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
gl.glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_BORDER);
gl.glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_BORDER);
gl.glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_R, GL_CLAMP_TO_BORDER);
gl.glTexImage3D(GL_TEXTURE_3D, 0, GL_RED, x, y, z, 0, GL_RED, GL_FLOAT, &voxels[0]);
gl.glBindTexture(GL_TEXTURE_3D, 0);
program->bind();
program->setUniformValue("VOXELS", 0);
program->release();
voxelsLoaded = true;
return true;
}
Simple fragment shader:
#version 330 core
uniform sampler3D VOXELS;
uniform vec2 SIZE;
out vec4 color;
void main(){
vec2 coords = (gl_FragCoord.xy - 0.5) / SIZE;
vec3 texcoords = vec3(coords, 0.5);
color = texture(VOXELS, texcoords);
}
glEnable(GL_TEXTURE_…) has no effect when using shaders. It's a relic from the fixed function pipeline era. On the other hand the texture must be actually bound when drawing.
In your code you have
gl.glBindTexture(GL_TEXTURE_3D, 0);
program->bind();
program->setUniformValue("VOXELS", 0);
program->release();
Now since this is in initialization code, it's unclear if you actually understand the consequences of these lines. So let's break it down:
gl.glBindTexture(GL_TEXTURE_3D, 0);
This means that texture 0 (which with shaders is the nil texture, but in old and busted OpenGL-1.0 it actually could be sampled form) is bount to texture unit 0. From there on, when trying to sample from texture unit 0, it will not sample anything.
program->bind();
program->setUniformValue("VOXELS", 0);
program->release();
Set the sampler uniform with name "VOXELS" to sample from texture unit 0. Whatever texture is bound to that texture unit at the moment of calling the draw function, that texture will be sampled from.
Somewhere in your program you're making a draw call. You didn't show us where. But in order for your drawing to actually sample from your texture you have to bind your 3d texture to texture unit 0.
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_3D, volumeTexture);
draw_stuff();
I wrote a simple test case to get the height of an image within a compute shader and write it to an SSBO. I've used the SSBO code before, and I know that part works fine. I used apitrace to inspect the state during the glDispatchCompute call, and I can see both the original texture and the image bound to the correct image unit. However, imageSize always returns zero (the output is all zeros, with the exception of some leftover -1s at the end because the division with the workgroup size rounds down). No OpenGL errors are thrown.
I based this test case on one of my earlier questions which included code to bind an SSBO to a compute shader (I use it here to get debug output from the compute shader).
class ComputeShaderWindow : public QOpenGLWindow {
public:
void initializeGL() {
// Create the opengl functions object
gl = context()->versionFunctions<QOpenGLFunctions_4_3_Core>();
m_compute_program = new QOpenGLShaderProgram(this);
auto compute_shader_s = fs::readFile(
"test_assets/example_compute_shader.comp");
QImage img("test_assets/input/out.png");
// Adds the compute shader, then links and binds it
m_compute_program->addShaderFromSourceCode(QOpenGLShader::Compute,
compute_shader_s);
m_compute_program->link();
m_compute_program->bind();
GLuint frame;
// Create the texture
gl->glGenTextures(1, &frame);
// Bind the texture
gl->glBindTexture(GL_TEXTURE_2D, frame);
// Fill the texture with the image
gl->glTexImage2D(GL_TEXTURE_2D,
0,
GL_RGB8,
img.width(),
img.height(),
0,
GL_BGRA,
GL_UNSIGNED_BYTE,
img.bits());
GLuint image_unit = 1;
// Get the location of the image uniform
GLuint uniform_location = gl->glGetUniformLocation(
m_compute_program->programId(),
"video_frame");
// Set location to 0 (a unique value that we choose)
gl->glUniform1i(uniform_location, image_unit);
// Bind layer of texture to image unit
gl->glBindImageTexture(image_unit,
frame,
0,
GL_FALSE,
0,
GL_READ_ONLY,
GL_RGBA8UI);
// We should only need the bit for shader image access,
// but for the purpose of this example, I set all the bits
// just to be safe
gl->glMemoryBarrier(GL_ALL_BARRIER_BITS);
// SSBO stuff to get output from the shader
GLfloat* default_values = new GLfloat[NUM_INVOCATIONS];
std::fill(default_values, default_values + NUM_INVOCATIONS, -1.0);
GLuint ssbo;
gl->glGenBuffers(1, &ssbo);
gl->glBindBuffer(GL_SHADER_STORAGE_BUFFER, ssbo);
gl->glBufferData(GL_SHADER_STORAGE_BUFFER,
NUM_INVOCATIONS * sizeof(float),
&default_values[0],
GL_STATIC_DRAW);
gl->glBindBufferBase(GL_SHADER_STORAGE_BUFFER, 0, ssbo);
gl->glDispatchCompute(NUM_INVOCATIONS / WORKGROUP_SIZE, 1, 1);
gl->glMemoryBarrier(GL_VERTEX_ATTRIB_ARRAY_BARRIER_BIT);
gl->glBindBufferBase(GL_SHADER_STORAGE_BUFFER, 0, ssbo);
// Now read from the buffer so that we can check its values
GLfloat* read_data = (GLfloat*) gl->glMapBuffer(GL_SHADER_STORAGE_BUFFER,
GL_READ_ONLY);
std::vector<GLfloat> buffer_data(NUM_INVOCATIONS);
// Read from buffer
for (int i = 0; i < NUM_INVOCATIONS; i++) {
DEBUG(read_data[i]);
}
DEBUG("Done!");
gl->glUnmapBuffer(GL_SHADER_STORAGE_BUFFER);
assert(gl->glGetError() == GL_NO_ERROR);
}
void resizeGL(int width, int height) {
}
void paintGL() {
}
void teardownGL() {
}
private:
QOpenGLFunctions_4_3_Core* gl;
QOpenGLShaderProgram* m_compute_program;
static constexpr int NUM_INVOCATIONS = 9000;
static constexpr int WORKGROUP_SIZE = 128;
};
As for the compute shader:
#version 430 core
layout(local_size_x = 128) in;
layout(rgba8ui, binding = 1) readonly uniform uimage2D video_frame;
layout(std430, binding = 0) writeonly buffer SSBO {
float data[];
};
void main() {
uint ident = int(gl_GlobalInvocationID);
uint num_workgroups = int(gl_WorkGroupID);
// Write the height of the image into the buffer
data[ident] = float(imageSize(video_frame).y);
}
Turns out I forgot the texture parameters:
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
No clue why that breaks imageSize() calls though.
I'm trying to do a compute pass where I render to a texture that will be used in a draw pass later on. My initial implementation was based on shader storage buffer objects and was working nicely. But I want to apply a computation method that is going to take advantage of the blend hardware of the GPU so I started porting the SSBO implementation to RTT one. Unfortunately the code has stopped working. Now when I read back the texture it is getting wrong values.
Here is my texture and frame buffer setup code:
glGenFramebuffers(1, &m_fbo);
glBindFramebuffer(GL_FRAMEBUFFER, m_fbo);
// Create render textures
glGenTextures(NUM_TEX_OUTPUTS, m_renderTexs);
m_texSize = square_approximation(m_numVertices);
cout << "Textures size: " << glm::to_string(m_texSize) << endl;
GLenum drawBuffers[NUM_TEX_OUTPUTS];
for (int i = 0 ; i < NUM_TEX_OUTPUTS; ++i)
{
glBindTexture(GL_TEXTURE_2D, m_renderTexs[i]);
// 1st 0: level, 2nd 0: no border, 3rd 0: no initial data
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, m_texSize.x, m_texSize.y, 0, GL_RGBA, GL_FLOAT, 0);
// XXX: do we need this?
// Poor filtering. Needed !
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glBindTexture(GL_TEXTURE_2D, 0);
// 0: level
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0 + i, GL_TEXTURE_2D, m_renderTexs[i], 0);
drawBuffers[i] = GL_COLOR_ATTACHMENT0 + i;
}
glDrawBuffers(NUM_TEX_OUTPUTS, drawBuffers);
if (glCheckFramebufferStatus(GL_FRAMEBUFFER) != GL_FRAMEBUFFER_COMPLETE)
{
cout << "Error when setting frame buffer" << endl;
// throw exception?
}
glBindFramebuffer(GL_FRAMEBUFFER, 0);
And this is the code to start the compute pass:
m_shaderProgram.use();
// setup openGL
glPolygonMode(GL_FRONT_AND_BACK, GL_LINE);
glDisable(GL_CULL_FACE);
glDisable(GL_DEPTH_TEST);
glViewport(0, 0, m_texSize.x, m_texSize.y); // setup viewport (equal to textures size)
// make a single patch have the vertex, the bases and the neighbours
glPatchParameteri(GL_PATCH_VERTICES, m_maxNeighbours + 5);
// Wait all writes to shader storage to finish
glMemoryBarrier(GL_SHADER_STORAGE_BARRIER_BIT);
glUniform1i(m_shaderProgram.getUniformLocation("curvTex"), m_renderTexs[2]);
glUniform2i(m_shaderProgram.getUniformLocation("size"), m_texSize.x, m_texSize.y);
glUniform2f(m_shaderProgram.getUniformLocation("vertexStep"), (umax - umin)/divisoes,
(vmax-vmin)/divisoes);
// Bind buffers
glBindFramebuffer(GL_FRAMEBUFFER, m_fbo);
glBindBuffer(GL_ARRAY_BUFFER, m_vbo);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, m_ibo);
glBindBufferBase(GL_UNIFORM_BUFFER, m_mvp_location, m_mvp_ubo);
// Make textures active
for (int i = 0; i < NUM_TEX_OUTPUTS; ++i)
{
glActiveTexture(GL_TEXTURE0 + i);
glBindTexture(GL_TEXTURE_2D, m_renderTexs[i]);
}
// no need to pass index array 'cause ibo is bound already
glDrawElements(GL_PATCHES, m_numElements, GL_UNSIGNED_INT, 0);
I then read back the textures using the following:
bool readTex(GLuint tex, void *dest)
{
glBindTexture(GL_TEXTURE_2D, tex);
glGetTexImage(GL_TEXTURE_2D, 0, GL_RGBA, GL_FLOAT, dest);
glBindTexture(GL_TEXTURE_2D, 0);
// TODO: check glGetTexImage return values for error
return true;
}
for (int i = 0; i < NUM_TEX_OUTPUTS; ++i)
{
if (m_tensors[i] == NULL) {
m_tensors[i] = new glm::vec4[m_texSize.x*m_texSize.y];
}
memset(m_tensors[i], 0, m_texSize.x*m_texSize.y*sizeof(glm::vec4));
readTex(m_renderTexs[i], m_tensors[i]);
}
Finally, the fragment shader code is:
#version 430
#extension GL_ARB_shader_storage_buffer_object: require
layout(pixel_center_integer) in vec4 gl_FragCoord;
layout(std140, binding=6) buffer EvalBuffer {
vec4 evalDebug[];
};
uniform ivec2 size;
in TEData {
vec4 _a;
vec4 _b;
vec4 _c;
vec4 _d;
vec4 _e;
};
layout(location = 0) out vec4 a;
layout(location = 1) out vec4 b;
layout(location = 2) out vec4 c;
layout(location = 3) out vec4 d;
layout(location = 4) out vec4 e;
void main()
{
a= _a;
b= _b;
c= _c;
d= _d;
e= _e;
evalDebug[gl_PrimitiveID] = gl_FragCoord;
}
The fragment coordinates are correct (each fragment is pointing to a x,y coordinate in the texture), so are all the input values (_a to _e), but I do not see them outputted correctly to the textures when reading back. I also tried accessing the texture in the shader to see if it was only a read-back error, but my debug SSBO returned all zeroes.
Am I missing some setup step?
I've tested both on linux and windows (titan and 540M geforces) and I'm using openGL 4.3.
As derhass pointed out in the comments above, the problem was with the texture format. I assumed that by passing GL_FLOAT as the data type it would use 32bit floats for each of the RGBA channels. It was not so.
As derhass said, the data type parameter here does not change the texture format. I had to change the internalFormat parameter to what I wanted (GL_RGBA32F) so that it would work as expected.
So, after changing glTexImage2D call to:
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA32F, m_texSize.x, m_texSize.y, 0, GL_RGBA, GL_FLOAT, 0);
I was able to correctly render the results to the texture and read it back. :)
I'm trying to figure out how to render an object (a cube) with different textures for each face. For simplicities sake, I have 2 textures that are applied to 3 faces of the cube each. I understand that I should be using texture arrays with 3 coordinates to represent the relevant texture to be used. I'm just unsure of how to do this and how to code my fragment shader.
Here is the relevant part of my init() function:
final String textureName = model.getTextures().get(i).textureName;
final FileTexture textureGenerator = new FileTexture(this.getClass().getResourceAsStream(textureName),
true, context);
textureId = textureGenerator.getTextureId();
width = textureGenerator.getWidth();
height = textureGenerator.getHeight();
textureMap.put(model.getTextures().get(i).matName, textureId);
context.getGL().glActiveTexture(GL.GL_TEXTURE0 + i);
context.getGL().glBindTexture(GL.GL_TEXTURE_2D, textureId);
I am slightly confused here however because the Orange Book (OpenGL Shading Language) gives examples in which the glActiveTexture and glBindTexture is used but the GLSL common mistakes says you shouldn't do this.
From there, my display() function looks like this:
gl.glBindBuffer(GL.GL_ARRAY_BUFFER, getVertexBufferObject());
gl.glBufferData(GL.GL_ARRAY_BUFFER, getNoOfVertices() * 3 * 4, getVertices(), GL.GL_STREAM_DRAW);
gl.glBindBuffer(GL.GL_ARRAY_BUFFER, getTexCoordBufferObject());
gl.glBufferData(GL.GL_ARRAY_BUFFER, getNoOfVertices() * 2 * 4, getTexCoords(), GL.GL_STREAM_DRAW);
gl.glBindBuffer(GL.GL_ELEMENT_ARRAY_BUFFER, getIndicesBufferObject());
gl.glBufferData(GL.GL_ELEMENT_ARRAY_BUFFER, getNoOfIndices() * 4, getIndices(), GL.GL_STREAM_DRAW);
gl.glBindBuffer(GL.GL_ARRAY_BUFFER, getColorBufferObject());
gl.glBufferData(GL.GL_ARRAY_BUFFER, getNoOfVertices() * 4 * 4, getColors(), GL.GL_STREAM_DRAW);
layerTextureShader.use(gl);
gl.glEnableClientState(GL.GL_VERTEX_ARRAY);
gl.glBindBuffer(GL.GL_ARRAY_BUFFER, getVertexBufferObject());
gl.glVertexPointer(3, GL.GL_FLOAT, 0, 0);
gl.glEnableClientState(GL.GL_COLOR_ARRAY);
gl.glBindBuffer(GL.GL_ARRAY_BUFFER, mask ? getMaskColorBufferObject() : getColorBufferObject());
gl.glColorPointer(4, GL.GL_FLOAT, 0, 0);
gl.glClientActiveTexture(GL.GL_TEXTURE0);
gl.glEnableClientState(GL.GL_TEXTURE_COORD_ARRAY);
gl.glTexCoordPointer(3, GL.GL_FLOAT, 0, 0);
gl.glClientActiveTexture(GL.GL_TEXTURE1);
gl.glEnableClientState(GL.GL_TEXTURE_COORD_ARRAY);
gl.glTexCoordPointer(3, GL.GL_FLOAT, 0, 0);
gl.glBindBuffer(GL.GL_ELEMENT_ARRAY_BUFFER, getIndicesBufferObject());
final int count = getNoOfIndices();
gl.glDrawElements(GL.GL_TRIANGLES, count, GL.GL_UNSIGNED_INT, 0);
gl.glBindBuffer(GL.GL_ELEMENT_ARRAY_BUFFER, 0);
gl.glBindBuffer(GL.GL_ARRAY_BUFFER, 0);
gl.glClientActiveTexture(GL.GL_TEXTURE0);
gl.glDisableClientState(GL.GL_TEXTURE_COORD_ARRAY);
gl.glClientActiveTexture(GL.GL_TEXTURE1);
gl.glDisableClientState(GL.GL_TEXTURE_COORD_ARRAY);
gl.glDisableClientState(GL.GL_VERTEX_ARRAY);
gl.glDisableClientState(GL.GL_COLOR_ARRAY);
gl.glDisableClientState(GL.GL_TEXTURE_COORD_ARRAY);
layerTextureShader.release(gl);
I am unsure of what to put in my GLSL shaders. My vertex shader has the standard gl_TexCoord[0] = gl_MultiTexCoord0; and my fragment shader looks like:
uniform sampler2D texture;
void main()
{
gl_FragColor = texture2D(texture, gl_TexCoord[0].st);
}
How do I instruct the fragment shader on which texture to use? I assume it's when I'm populating the vertex, index, textures buffers etc and I do it by passing in this 3rd texture coordinate for each point? Is the value of this 3rd coordinate the value of the relevant texture coordinate?
I hope my question makes sense and thanks for any help.
Chris
What you are looking for is a cube map. In OpenGL, you can define six textures at once (representing the size sides of a cube) and map them using 3D texture coordinates instead of the common 2D texture coordinates. For a simple cube, the texture coordinates would be the same as the vertices' respective normals. (If you will only be texturing plane cubes in this manner, you can consolidate normals and texture coordinates in your vertex shader, too!) Cube maps are much simpler than trying to bind six distinct textures simultaneously the way you are doing right now.
GLuint mHandle;
glGenTextures(1, &mHandle); // create your texture normally
// Note the target being used instead of GL_TEXTURE_2D!
glTextParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTextParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_R, GL_CLAMP_TO_EDGE);
glTextParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTextParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glBindTexture(GL_TEXTURE_CUBE_MAP, mHandle);
// Now, load in your six distinct images. They need to be the same dimensions!
// Notice the targets being specified: the six sides of the cube map.
glTexImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_X, 0, GL_RGBA, width, height, 0,
format, GL_UNSIGNED_BYTE, data1);
glTexImage2D(GL_TEXTURE_CUBE_MAP_NEGATIVE_X, 0, GL_RGBA, width, height, 0,
format, GL_UNSIGNED_BYTE, data2);
glTexImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_Y, 0, GL_RGBA, width, height, 0,
format, GL_UNSIGNED_BYTE, data3);
glTexImage2D(GL_TEXTURE_CUBE_MAP_NEGATIVE_Y, 0, GL_RGBA, width, height, 0,
format, GL_UNSIGNED_BYTE, data4);
glTexImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_Z, 0, GL_RGBA, width, height, 0,
format, GL_UNSIGNED_BYTE, data5);
glTexImage2D(GL_TEXTURE_CUBE_MAP_NEGATIVE_Z, 0, GL_RGBA, width, height, 0,
format, GL_UNSIGNED_BYTE, data6);
glGenerateMipmap(GL_TEXTURE_CUBE_MAP);
glTextParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
// And of course, after you are all done using the textures...
glDeleteTextures(1, &mHandle);
Now, when doing your shaders, you need the vertex shader to accept and/or pass 3D coordinates (vec3) instead of 2D coordinates (vec2).
// old GLSL style
attribute vec3 inTextureCoordinate;
varying vec3 vTextureCoordinate;
// more recent GLSL
in vec3 inTextureCoordinate;
out vec3 vTextureCoordinate;
In this example, your vertex shader would simply assign vTextureCoordinate = inTextureCoordinate. Your fragment shader then needs to accept that texture coordinate and sample the cube map uniform.
uniform samplerCube cubeMap;
...
gl_FragColor = textureCube(cubeMap, vTextureCoordinate);
Whew! That was a lot. Did I leave anything out?