I'm now building a Voxel game. In the beginning, I use a texture atlas that stores all voxel textures and it works fine. After that, I decided to use Greedy Meshing in my game, thus texture atlas is not useful anymore. I read some articles which said that should use Texture Array instead. Then I tried to read and use the texture array technique for texturing. However, the result I got was all black in my game. So what am I missing?
This is my texture atlas (600 x 600)
Here is my Texture2DArray, I use this class to read and save a texture array
Texture2DArray::Texture2DArray() : Internal_Format(GL_RGBA8), Image_Format(GL_RGBA), Wrap_S(GL_REPEAT), Wrap_T(GL_REPEAT), Wrap_R(GL_REPEAT), Filter_Min(GL_NEAREST), Filter_Max(GL_NEAREST), Width(0), Height(0)
{
glGenTextures(1, &this->ID);
}
void Texture2DArray::Generate(GLuint width, GLuint height, unsigned char* data)
{
this->Width = width;
this->Height = height;
glBindTexture(GL_TEXTURE_2D_ARRAY, this->ID);
// I cannot decide what the texture array layer (depth) should be (I put here is 1 for layer number)
//Can anyone explain to me how to decide the texture layer here?
glTexImage3D(GL_TEXTURE_2D_ARRAY, 1, this->Internal_Format, this->Width, this->Height, 0, 1 , this->Image_Format, GL_UNSIGNED_BYTE, data);
glTexParameteri(GL_TEXTURE_2D_ARRAY, GL_TEXTURE_WRAP_S, this->Wrap_S);
glTexParameteri(GL_TEXTURE_2D_ARRAY, GL_TEXTURE_WRAP_T, this->Wrap_T);
glTexParameteri(GL_TEXTURE_2D_ARRAY, GL_TEXTURE_WRAP_R, this->Wrap_R);
glTexParameteri(GL_TEXTURE_2D_ARRAY, GL_TEXTURE_MIN_FILTER, this->Filter_Min);
glTexParameteri(GL_TEXTURE_2D_ARRAY, GL_TEXTURE_MIN_FILTER, this->Filter_Max);
//unbind this texture for another creating texture
glBindTexture(GL_TEXTURE_2D_ARRAY, 0);
}
void Texture2DArray::Bind() const
{
glBindTexture(GL_TEXTURE_2D_ARRAY, this->ID);
}
Here is my Fragment Shader
#version 330 core
uniform sampler2DArray ourTexture;
in vec2 texCoord;
out vec4 FragColor;
void main(){
// 1 (the layer number) just for testing
FragColor = texture(ourTexture,vec3(texCoord, 1));
}
Here is my Vertex Shader
#version 330 core
layout (location = 0) in vec3 inPos;
layout (location = 1) in vec2 inTexCoord;
out vec2 texCoord;
uniform mat4 model;
uniform mat4 view;
uniform mat4 projection;
void main(){
gl_Position = projection * view * vec4(inPos,1.0f);
texCoord = inTexCoord;
}
This my rendering result
EDIT 1:
I figured out that texture atlas doesn't work with texture array because it is a grid so OpenGl cannot decide where it should begin. So I create a vertical texture (18 x 72) and try again but it still all black everywhere.
I have checked binding the texture before using it.
When the 3 dimensional texture image is specified, then the depth has to be the number of images which have to be stored in the array (e.g. imageCount). The width and the height parameter represent the width and height of 1 tile (e.g. tileW, tileH). The layer should be 0 and the border parameter has to be 0. See glTexImage3D. glTexImage3D creates the data store for the texture image. The memory which is required for the textures is reserved (GPU). It is possible to pass a pointer to the image data, but it is not necessary.
If all the tiles are stored in a vertical atlas, then the image data can be set directly:
glTexImage3D(GL_TEXTURE_2D_ARRAY, 0, this->Internal_Format,
tileW, tileH, imageCount, 0,
this->Image_Format, GL_UNSIGNED_BYTE, data);
If the tiles are in the 16x16 atlas, then the tiles have to by extracted from the texture atlas and to set each subimage in the texture array. (data[i] is the imaged data of one tile). Create the texture image:
glTexImage3D(GL_TEXTURE_2D_ARRAY, 0, this->Internal_Format,
tileW, tileH, imageCount, 0,
this->Image_Format, GL_UNSIGNED_BYTE, nullptr);
After that use glTexSubImage3D to put the texture data to the data store of the texture object. glTexSubImage3D uses the existing data store and copies data. e.g.:
for (int i = 0; i < imageCount; ++i)
{
glTexSubImage3D(GL_TEXTURE_2D_ARRAY, 0,
0, 0, i,
tileW, tileH, 1,
this->Image_Format, GL_UNSIGNED_BYTE, data[i]);
}
Note, you've to extract the tiles from the texture atlas and to set each subimage in the texture array. (data[i] is the imaged data of one tile)
An algorithm to extract the tiles and specify the texture image may look as follows
#include <algorithm> // std::copy
#include <vector> // std::vector
unsigned char* data = ...; // 16x16 texture atlas image data
int tileW = ...; // number of pixels in a row of 1 tile
int tileH = ...; // number of pixels in a column of 1 tile
int channels = 4; // 4 for RGBA
int tilesX = 16;
int tilesY = 16;
int imageCount = tilesX * tilesY;
glTexImage3D(GL_TEXTURE_2D_ARRAY, 0, this->Internal_Format,
tileW, tileH, imageCount, 0,
this->Image_Format, GL_UNSIGNED_BYTE, nullptr);
std::vector<unsigned char> tile(tileW * tileH * channels);
int tileSizeX = tileW * channels;
int rowLen = tilesX * tileSizeX;
for (int iy = 0; iy < tilesY; ++ iy)
{
for (int ix = 0; ix < tilesX; ++ ix)
{
unsigned char *ptr = data + iy*rowLen + ix*tileSizeX;
for (int row = 0; row < tileH; ++ row)
std::copy(ptr + row*rowLen, ptr + row*rowLen + tileSizeX,
tile.begin() + row*tileSizeX);
int i = iy * tilesX + ix;
glTexSubImage3D(GL_TEXTURE_2D_ARRAY, 0,
0, 0, i,
tileW, tileH, 1,
this->Image_Format, GL_UNSIGNED_BYTE, tile.data());
}
}
Related
So i'm trying to pass a bunch of vectors to the fragment shader and apparently i should do it with a 1d texture. But if i try to access the passed vectors, the values are not what i expect.
How should i index the texture() function?
Passing the texture:
std::vector<vec3> triangles;
//triangles is already filled by this point
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_1D, texture);
glTexParameteri(GL_TEXTURE_1D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexImage1D(GL_TEXTURE_1D, 0, GL_RGB16F, Object::triangles.size(), 0, GL_RGB, GL_FLOAT, &Object::triangles[0]);
GLint textureLoc = glGetUniformLocation( getId(), "triangles" );
glUniform1f(textureLoc, 0);
setUniform((int)Object::triangles.size(), "triCount");
glBindVertexArray(vao);
glDrawArrays(GL_TRIANGLE_FAN, 0, 4);
//draw a rectangle from -1,-1 to 1,1
fragment shader code:
uniform sampler1D triangles;
uniform int triCount;
struct Triangle{
vec3 a,b,c;
vec3 normal;
};
void main(){
for(int i = 0;i < triCount;i++){//for each triangle
Triangle triangle;
//set the points of the triangle
triangle.a = vec3(texture(triangles,i));
triangle.b = vec3(texture(triangles,i++));
triangle.c = vec3(texture(triangles,i++));
//set the normal vector of the triangle
triangle.normal = vec3(texture(triangles,i++));
//then i do stuff with the current triangle and return a color
}
}
The array contains 3 points and a normal vector of a bunch of triangles, that's why i read from the texture this way.
edit:
glGetTexImage confirmed that the passed texture is correct.
When using texture, the texture coordinates are floating point values in the range [0.0, 1.0]. Use texelFetch to perform a lookup of a single Texel from texture with integral texture coordinates in the range [0, width):
triangle.a = texelFetch(triangles, i*4, 0).xyz;
triangle.b = texelFetch(triangles, i*4+1, 0).xyz;
triangle.c = texelFetch(triangles, i*4+2, 0).xyz;
triangle.normal = texelFetch(triangles, i*4+3, 0).xyz;
Be aware, that the computation of the Texel indices in your shader code is incorrect.
Alternatively, you can calculate the texture coordinate by dividing the index by the width of the texture. The size of a texture can be get by textureSize:
float width = float(textureSize(triangles, 0));
triangle.a = texture(triangles, (float(i*4)+0.5) / width).xyz;
triangle.b = texture(triangles, (float(i*4)+1.5) / width).xyz;
triangle.c = texture(triangles, (float(i*4)+2.5) / width).xyz;
triangle.normal = texture(triangles, (float(i*4)+3.5) / width).xyz;
I am trying to display a layer in a 3D texture created from some 3D data, but all points sampled are always black (I guess my texture creation/ allocation is failing somehow). It is being rendered on a plane using window coords. I have checked my data, it is a vector with the correct values. glEnable(GL_TEXTURE_3D) has been called earlier. Any clues why this would fail?
Function that creates the texture:
bool VolumeRender::setVolumeData(QOpenGLShaderProgram *program, vector<unsigned short> v, int x, int y, int z){
voxels.resize(v.size(), 0);
cout << "Processing texture" << endl;
unsigned short sMax = 0;
unsigned short sMin = 32768;
for (unsigned int i =0; i< voxels.size(); i++){
sMax = max(sMax, v[i]);
sMin = min(sMin, v[i]);
} for (unsigned int i =0; i< voxels.size(); i++) voxels[i] = (v[i] - sMin)/(float)(sMax-sMin);
cout << "Loading 3D texture" << endl;
gl.glActiveTexture(GL_TEXTURE0);
gl.glGenTextures(1, &volumeTexture);
gl.glBindTexture(GL_TEXTURE_3D, volumeTexture);
gl.glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
gl.glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
gl.glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_BORDER);
gl.glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_BORDER);
gl.glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_R, GL_CLAMP_TO_BORDER);
gl.glTexImage3D(GL_TEXTURE_3D, 0, GL_RED, x, y, z, 0, GL_RED, GL_FLOAT, &voxels[0]);
gl.glBindTexture(GL_TEXTURE_3D, 0);
program->bind();
program->setUniformValue("VOXELS", 0);
program->release();
voxelsLoaded = true;
return true;
}
Simple fragment shader:
#version 330 core
uniform sampler3D VOXELS;
uniform vec2 SIZE;
out vec4 color;
void main(){
vec2 coords = (gl_FragCoord.xy - 0.5) / SIZE;
vec3 texcoords = vec3(coords, 0.5);
color = texture(VOXELS, texcoords);
}
glEnable(GL_TEXTURE_…) has no effect when using shaders. It's a relic from the fixed function pipeline era. On the other hand the texture must be actually bound when drawing.
In your code you have
gl.glBindTexture(GL_TEXTURE_3D, 0);
program->bind();
program->setUniformValue("VOXELS", 0);
program->release();
Now since this is in initialization code, it's unclear if you actually understand the consequences of these lines. So let's break it down:
gl.glBindTexture(GL_TEXTURE_3D, 0);
This means that texture 0 (which with shaders is the nil texture, but in old and busted OpenGL-1.0 it actually could be sampled form) is bount to texture unit 0. From there on, when trying to sample from texture unit 0, it will not sample anything.
program->bind();
program->setUniformValue("VOXELS", 0);
program->release();
Set the sampler uniform with name "VOXELS" to sample from texture unit 0. Whatever texture is bound to that texture unit at the moment of calling the draw function, that texture will be sampled from.
Somewhere in your program you're making a draw call. You didn't show us where. But in order for your drawing to actually sample from your texture you have to bind your 3d texture to texture unit 0.
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_3D, volumeTexture);
draw_stuff();
For the gui for my game, I have a custom texture object that stores the rgba data for a texture. Each GUI element registered by my game adds to the final GUI texture, and then that texture is overlayed onto the framebuffer after post-processing.
I'm having trouble converting my Texture object to an openGL texture.
First I create a 1D int array that goes rgbargbargba... etc.
public int[] toIntArray(){
int[] colors = new int[(width*height)*4];
int i = 0;
for(int y = 0; y < height; ++y){
for(int x = 0; x < width; ++x){
colors[i] = r[x][y];
colors[i+1] = g[x][y];
colors[i+2] = b[x][y];
colors[i+3] = a[x][y];
i += 4;
}
}
return colors;
}
Where r, g, b, and a are jagged int arrays from 0 to 255. Next I create the int buffer and the texture.
id = glGenTextures();
glBindTexture(GL_TEXTURE_2D, id);
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
IntBuffer iBuffer = BufferUtils.createIntBuffer(((width * height)*4));
int[] data = toIntArray();
iBuffer.put(data);
iBuffer.rewind();
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_INT, iBuffer);
glBindTexture(GL_TEXTURE_2D, 0);
After that I add a 50x50 red square into the upper left of the texture, and bind the texture to the framebuffer shader and render the fullscreen rect that displays my framebuffer.
frameBuffer.unbind(window.getWidth(), window.getHeight());
postShaderProgram.bind();
glEnable(GL_TEXTURE_2D);
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_2D, guiManager.texture()); // this gets the texture id that was created
postShaderProgram.setUniform("gui_texture", 1);
mesh.render();
postShaderProgram.unbind();
And then in my fragment shader, I try displaying the GUI:
#version 330
in vec2 Texcoord;
out vec4 outColor;
uniform sampler2D texFramebuffer;
uniform sampler2D gui_texture;
void main()
{
outColor = texture(gui_texture, Texcoord);
}
But all it outputs is a black window!
I added a red 50x50 rectangle into the upper left corner and verified that it exists, but for some reason it isn't showing in the final output.
That gives me reason to believe that I'm not converting my texture into an opengl texture with glTexImage2D correctly.
Can you see anything I'm doing wrong?
Update 1:
Here I saw them doing a similar thing using a float array, so I tried converting my 0-255 to a 0-1 float array and passing it as the image data like so:
public float[] toFloatArray(){
float[] colors = new float[(width*height)*4];
int i = 0;
for(int y = 0; y < height; ++y){
for(int x = 0; x < width; ++x){
colors[i] = (( r[x][y] * 1.0f) / 255);
colors[i+1] = (( g[x][y] * 1.0f) / 255);
colors[i+2] = (( b[x][y] * 1.0f) / 255);
colors[i+3] = (( a[x][y] * 1.0f) / 255);
i += 4;
}
}
return colors;
}
...
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_FLOAT, toFloatArray());
And it works!
I'm going to leave the question open however as I want to learn why the int buffer wasn't working :)
When you specified GL_UNSIGNED_INT as the type of the "Host" data, OpenGL expected 32 bits allocated for each color. Since OpenGL only maps the output colors in the default framebuffer to the range [0.0f, 1.0f], it'll take your input color values (mapped in the range [0, 255]) and divide all of them by the maximum size of an int (about 4.2 Billion) to get the final color displayed on screen. As an exercise, using your original code, set the "clear" color of the screen to white, and see that a black rectangle is getting drawn on screen.
You have two options. The first is to convert the color values to the range specified by GL_UNSIGNED_INT, which means for each color value, multiply them by Math.pow((long)2, 24), and trust that the integer overflow of multiplying by that value will behave correctly (since Java doesn't have unsigned integer types).
The other, far safer option, is to store each 0-255 value in a byte[] object (do not use char. char is 1 byte in C/C++/OpenGL, but is 2 bytes in Java) and specify the type of the elements as GL_UNSIGNED_BYTE.
I wrote a simple test case to get the height of an image within a compute shader and write it to an SSBO. I've used the SSBO code before, and I know that part works fine. I used apitrace to inspect the state during the glDispatchCompute call, and I can see both the original texture and the image bound to the correct image unit. However, imageSize always returns zero (the output is all zeros, with the exception of some leftover -1s at the end because the division with the workgroup size rounds down). No OpenGL errors are thrown.
I based this test case on one of my earlier questions which included code to bind an SSBO to a compute shader (I use it here to get debug output from the compute shader).
class ComputeShaderWindow : public QOpenGLWindow {
public:
void initializeGL() {
// Create the opengl functions object
gl = context()->versionFunctions<QOpenGLFunctions_4_3_Core>();
m_compute_program = new QOpenGLShaderProgram(this);
auto compute_shader_s = fs::readFile(
"test_assets/example_compute_shader.comp");
QImage img("test_assets/input/out.png");
// Adds the compute shader, then links and binds it
m_compute_program->addShaderFromSourceCode(QOpenGLShader::Compute,
compute_shader_s);
m_compute_program->link();
m_compute_program->bind();
GLuint frame;
// Create the texture
gl->glGenTextures(1, &frame);
// Bind the texture
gl->glBindTexture(GL_TEXTURE_2D, frame);
// Fill the texture with the image
gl->glTexImage2D(GL_TEXTURE_2D,
0,
GL_RGB8,
img.width(),
img.height(),
0,
GL_BGRA,
GL_UNSIGNED_BYTE,
img.bits());
GLuint image_unit = 1;
// Get the location of the image uniform
GLuint uniform_location = gl->glGetUniformLocation(
m_compute_program->programId(),
"video_frame");
// Set location to 0 (a unique value that we choose)
gl->glUniform1i(uniform_location, image_unit);
// Bind layer of texture to image unit
gl->glBindImageTexture(image_unit,
frame,
0,
GL_FALSE,
0,
GL_READ_ONLY,
GL_RGBA8UI);
// We should only need the bit for shader image access,
// but for the purpose of this example, I set all the bits
// just to be safe
gl->glMemoryBarrier(GL_ALL_BARRIER_BITS);
// SSBO stuff to get output from the shader
GLfloat* default_values = new GLfloat[NUM_INVOCATIONS];
std::fill(default_values, default_values + NUM_INVOCATIONS, -1.0);
GLuint ssbo;
gl->glGenBuffers(1, &ssbo);
gl->glBindBuffer(GL_SHADER_STORAGE_BUFFER, ssbo);
gl->glBufferData(GL_SHADER_STORAGE_BUFFER,
NUM_INVOCATIONS * sizeof(float),
&default_values[0],
GL_STATIC_DRAW);
gl->glBindBufferBase(GL_SHADER_STORAGE_BUFFER, 0, ssbo);
gl->glDispatchCompute(NUM_INVOCATIONS / WORKGROUP_SIZE, 1, 1);
gl->glMemoryBarrier(GL_VERTEX_ATTRIB_ARRAY_BARRIER_BIT);
gl->glBindBufferBase(GL_SHADER_STORAGE_BUFFER, 0, ssbo);
// Now read from the buffer so that we can check its values
GLfloat* read_data = (GLfloat*) gl->glMapBuffer(GL_SHADER_STORAGE_BUFFER,
GL_READ_ONLY);
std::vector<GLfloat> buffer_data(NUM_INVOCATIONS);
// Read from buffer
for (int i = 0; i < NUM_INVOCATIONS; i++) {
DEBUG(read_data[i]);
}
DEBUG("Done!");
gl->glUnmapBuffer(GL_SHADER_STORAGE_BUFFER);
assert(gl->glGetError() == GL_NO_ERROR);
}
void resizeGL(int width, int height) {
}
void paintGL() {
}
void teardownGL() {
}
private:
QOpenGLFunctions_4_3_Core* gl;
QOpenGLShaderProgram* m_compute_program;
static constexpr int NUM_INVOCATIONS = 9000;
static constexpr int WORKGROUP_SIZE = 128;
};
As for the compute shader:
#version 430 core
layout(local_size_x = 128) in;
layout(rgba8ui, binding = 1) readonly uniform uimage2D video_frame;
layout(std430, binding = 0) writeonly buffer SSBO {
float data[];
};
void main() {
uint ident = int(gl_GlobalInvocationID);
uint num_workgroups = int(gl_WorkGroupID);
// Write the height of the image into the buffer
data[ident] = float(imageSize(video_frame).y);
}
Turns out I forgot the texture parameters:
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
No clue why that breaks imageSize() calls though.
So I've tried following the docs, however I can't seem to get a texture 2D array to work.
-(GLint)buildTextureArray:(NSArray *)arrayOfImages
{
GLImage *sample = [GLImage imageWithImageName:[arrayOfImages objectAtIndex:0] shouldFlip:NO]; //Creates a sample to examine texture width and height
int width = sample.width, height = sample.height;
GLsizei count = (GLsizei)arrayOfImages.count;
GLuint texture3D;
glGenTextures(1, &texture3D);
glBindTexture(GL_TEXTURE_2D_ARRAY, texture3D);
glPixelStorei(GL_UNPACK_ROW_LENGTH, width);
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glTexParameteri(GL_TEXTURE_2D_ARRAY,GL_TEXTURE_MIN_FILTER,GL_LINEAR_MIPMAP_LINEAR);
glTexParameteri(GL_TEXTURE_2D_ARRAY,GL_TEXTURE_MAG_FILTER,GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D_ARRAY,GL_TEXTURE_WRAP_S,GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D_ARRAY,GL_TEXTURE_WRAP_T,GL_REPEAT);
glTexImage3D(GL_TEXTURE_2D_ARRAY, 0, GL_RGBA8, width, height, count, 0, GL_BGRA, GL_UNSIGNED_INT_8_8_8_8_REV, NULL);
int i = 0;
for (NSString *name in arrayOfImages) //Loops through everything in arrayOfImages
{
GLImage *image = [GLImage imageWithImageName:name shouldFlip:NO]; //My own class that loads an image
glTexSubImage3D(GL_TEXTURE_2D_ARRAY, 0, 0, 0, i, image.width, image.height, 1, GL_RGBA, GL_UNSIGNED_BYTE, image.data);
i++;
}
return texture3D;
}
//Setting Uniform elsewhere
glBindTexture(GL_TEXTURE_2D_ARRAY, textureArray);
glUniform1i(textures, 0);
//Fragment Shader
#version 150
in vec3 texCoords;
uniform sampler2DArray textures;
out vec3 color;
void main()
{
color = texture(textures, texCoords.stp, 0).rgb;
}
I am able to load individual textures with the same texture parameters, but I can't get it to work with the texture 2D array. All I get is a black texture. Why is this happening?
glTexParameteri(GL_TEXTURE_2D_ARRAY,GL_TEXTURE_MIN_FILTER,GL_LINEAR_MIPMAP_LINEAR);
Your texture in fact does not have mipmaps. So stop telling OpenGL that it does.
Also, always set the mipmap range parameters (GL_TEXTURE_BASE_LAYER and GL_TEXTURE_MAX_LAYER) for your texture. Or better yet, use texture storage to allocate your texture's storage, and it will do it for you.
for 2d_array textures 'v' component of texcoords varies from 0-height and 'w' from 0-depth(as it denote layer). Try changing these texcordinates.