How do I go about sampling a mip level in glsl using textureLod()?
From what I know, mipmap LOD can only be "explicitly" accessed through the vertex shader (although not sure if it's supported in version 420, as most of the documentation is outdated). Second, you need to define the mipmap level-of-detail by setting texture parameters, such as GL_TEXTURE_MAX_LEVEL and GL_TEXTURE_BASE_LEVEL.
In my code, I define these texture parameters after calling glCompressedTexImage2D:
glTexParameteri(texture_type, GL_TEXTURE_MIN_FILTER, min_filter);
glTexParameteri(texture_type, GL_TEXTURE_MAG_FILTER, mag_filter);
glTexParameteri(texture_type, GL_TEXTURE_MAX_LEVEL, 9);
glTexParameteri(texture_type, GL_TEXTURE_BASE_LEVEL, 0);
glTexParameteri(texture_type, GL_TEXTURE_MAG_FILTER, GL_LINEAR_MIPMAP_LINEAR);
glTexParameteri(texture_type, GL_TEXTURE_WRAP_S, wrap_s);
glTexParameteri(texture_type, GL_TEXTURE_WRAP_T, wrap_t);
Next, I use this code for each binding each texture sample (types such as albedo map ect):
glActiveTexture(GL_TEXTURE0 + unit); // Set active texture type
glBindTexture(GL_TEXTURE_2D, id); // Bind the texture object
Finally, here is my shader code:
Vertex:
#version 420 core
out vec3 _texcoord;
out vec4 _albedo_lod;
uniform sampler2D albedo; // Albedo and specular map
void main()
{
_texcoord = texcoord;
_albedo_lod = textureLod(albedo, vec2(_texcoord.st), 2.0);
}
With the attaching fragment:
#version 420 core
layout(location = 0) out vec4 gAlbedo; // Albedo texel colour
in vec3 _texcoord;
in vec4 _albedo_lod;
void main()
{
gAlbedo = _albedo_lod; // Assign albedo
}
Now for some reason, no matter what LOD value I input, the result always resorts to this:
Which seems to be the very last mip level (despite what value I input). Bearing in mind I'm packing 10 mip levels as a .dds file. When however I manually set the base mip level via the texture parameter GL_TEXTURE_BASE_LEVEL, it works.
So all in all, Why won't it sample the correct mip level in glsl using textureLod? Is this somewhat deprecated in version 420?
EDIT: Here is the code for loading the dds file:
// This function imports a dds file and returns the dds data as a struct
inline GLuint LoadDds(std::vector<std::string> file, size_t &img_width, size_t &img_height, size_t &num_mips, GLvoid* data, GLint wrap_s, GLint wrap_t, GLint min_filter, GLint mag_filter, size_t texture_type, bool anistropic_filtering)
{
// Create one OpenGL texture
GLuint textureID;
glGenTextures(1, &textureID);
// "Bind" the newly created texture : all future texture functions will modify this texture
glBindTexture(texture_type, textureID);
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
for (unsigned int i = 0; i < file.size(); i++) // For each image...
{
FILE *fp;
unsigned char header[124];
unsigned int height;
unsigned int width;
unsigned int linearSize;
unsigned int mipMapCount;
unsigned int fourCC;
unsigned int components;
unsigned int format;
unsigned int bufsize;
unsigned char* buffer;
/* try to open the file */
errno_t err;
err = fopen_s(&fp, file[i].c_str(), "rb");
if (fp == NULL)
return 0;
/* verify the type of file */
char filecode[4];
fread(filecode, 1, 4, fp);
if (strncmp(filecode, "DDS ", 4) != 0)
{
fclose(fp);
return 0;
}
/* get the surface desc */
fread(&header, 124, 1, fp);
height = *(unsigned int*)&(header[8]);
width = *(unsigned int*)&(header[12]);
linearSize = *(unsigned int*)&(header[16]);
mipMapCount = *(unsigned int*)&(header[24]);
fourCC = *(unsigned int*)&(header[80]);
bufsize = mipMapCount > 1 ? linearSize * 2 : linearSize;
buffer = (unsigned char*)malloc(bufsize * sizeof(unsigned char));
fread(buffer, 1, bufsize, fp);
/* close the file pointer */
fclose(fp);
components = (fourCC == FOURCC_DXT1) ? 3 : 4;
switch (fourCC)
{
case FOURCC_DXT1:
format = GL_COMPRESSED_RGBA_S3TC_DXT1_EXT;
break;
case FOURCC_DXT3:
format = GL_COMPRESSED_RGBA_S3TC_DXT3_EXT;
break;
case FOURCC_DXT5:
format = GL_COMPRESSED_RGBA_S3TC_DXT5_EXT;
break;
default:
free(buffer);
return 0;
}
unsigned int blockSize = (format == GL_COMPRESSED_RGBA_S3TC_DXT1_EXT) ? 8 : 16;
unsigned int offset = 0;
for (unsigned int level = 0; level < mipMapCount && (width || height); ++level)
{
unsigned int size = ((width + 3) / 4) * ((height + 3) / 4) * blockSize;
glCompressedTexImage2D(texture_type != GL_TEXTURE_CUBE_MAP ? GL_TEXTURE_2D : GL_TEXTURE_CUBE_MAP_POSITIVE_X + i, level, format, width, height,
0, size, buffer + offset);
if ((level < 1) && (i < 1)) // Only assign input variable values from first image
{
img_width = width; // Assign texture width
img_height = height; // Assign texture height
data = buffer; // Assign buffer data
num_mips = mipMapCount; // Assign number of mips
}
offset += size;
width /= 2;
height /= 2;
}
if (anistropic_filtering) // If anistropic_filtering is true...
{
GLfloat f_largest; // A contianer for storing the amount of texels in view for anistropic filtering
glGetFloatv(GL_MAX_TEXTURE_MAX_ANISOTROPY_EXT, &f_largest); // Query the amount of texels for calculation
glTexParameterf(texture_type, GL_TEXTURE_MAX_ANISOTROPY_EXT, f_largest); // Apply filter to texture
}
if (!mipMapCount)
glGenerateMipmap(texture_type); // Generate mipmap
free(buffer); // Free buffers from memory
}
// Parameters
glTexParameteri(texture_type, GL_TEXTURE_MIN_FILTER, min_filter);
glTexParameteri(texture_type, GL_TEXTURE_MAG_FILTER, mag_filter);
glTexParameteri(texture_type, GL_GENERATE_MIPMAP, GL_TRUE);
glTexParameteri(texture_type, GL_TEXTURE_MAX_LEVEL, 9);
glTexParameteri(texture_type, GL_TEXTURE_BASE_LEVEL, 0);
glTexParameteri(texture_type, GL_TEXTURE_MAG_FILTER, GL_LINEAR_MIPMAP_LINEAR);
glTexParameteri(texture_type, GL_TEXTURE_WRAP_S, wrap_s);
glTexParameteri(texture_type, GL_TEXTURE_WRAP_T, wrap_t);
// Set additional cubemap parameters
if (texture_type == GL_TEXTURE_CUBE_MAP)
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_R, wrap_s);
return textureID; // Return texture id
}
And here is an image of each mipmap level being generated using NVIDIA's dds plugin:
Since you sample per vertex this seems to be exactly the expected behavior.
You say the mip level parameter has no influence, but from what I can see the difference should only be noticeable once the pixel density goes under the vertex density and values starts averaging out. This might however never happen if you don't store the entire mipchain, since the lowest resolution might still have enough definition (I can't really tell from the screen capture, and I can only guess the model's tesselation).
Since you're generating the mipchain manually though you could easily test out with different flat colors for each level and see if they're indeed properly fetched (and actually if you're unsure about the importer it might be worth it to try it out in the pixel shader as well first).
Related
Closed. This question needs debugging details. It is not currently accepting answers.
Edit the question to include desired behavior, a specific problem or error, and the shortest code necessary to reproduce the problem. This will help others answer the question.
Closed 2 years ago.
Improve this question
I know that there is another question with exactly the same title here however the solution provided over there does not work for my case.
I am trying to access pixel value from my compute shader. But the imageLoad function always returns 0.
Here is how I load the image:
void setTexture(GLuint texture_input, const char *fname)
{
// set texture related
int width, height, nbChannels;
unsigned char *data = stbi_load(fname, &width, &height, &nbChannels, 0);
if (data)
{
GLenum format;
if (nbChannels == 1)
{
format = GL_RED;
}
else if (nbChannels == 3)
{
format = GL_RGB;
}
else if (nbChannels == 4)
{
format = GL_RGBA;
}
glActiveTexture(GL_TEXTURE0 + 1);
gerr();
glBindTexture(GL_TEXTURE_2D, texture_input);
gerr();
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
gerr();
glTexImage2D(GL_TEXTURE_2D, // target
0, // level, 0 means base level
format, // internal format of image specifies color components
width, height, // what it says
0, // border, should be 0 at all times
format, GL_UNSIGNED_BYTE, // data type of pixel data
data);
gerr();
glBindImageTexture(1, // unit
texture_input, // texture id
0, // level
GL_FALSE, // is layered
0, // layer no
GL_READ_ONLY, // access type
GL_RGBA32F);
// end texture handling
gerr();
glBindTexture(GL_TEXTURE_2D, 0); // unbind
}
else
{
std::cout << "Failed to load texture" << std::endl;
}
stbi_image_free(data);
}
And here is the relevant declaration and calling code in the shader:
layout(rgba32f, location = 1, binding = 1) readonly uniform image2D in_image;
struct ImageTexture
{
int width;
int height;
};
vec3 imageValue(ImageTexture im, float u, float v, in vec3 p)
{
u = clamp(u, 0.0, 1.0);
v = 1 - clamp(v, 0.0, 1.0);
int i = int(u * im.width);
int j = int(v * im.height);
if (i >= im.width)
i = im.width - 1;
if (j >= im.height)
j = im.height - 1;
vec3 color = imageLoad(in_image, ivec2(i, j)).xyz;
if (color == vec3(0))
color = vec3(0, u, v); // debug
return color;
}
I am seeing a green gradient instead of the contents of the image, which means my debugging code is in effect.
Either the internal format of the texture does not match the format which is specified at glBindImageTexture or the format argument is not a valid enumerator constant, when the two-dimensional texture image is specified, because format is used twice, for the internal format and the format (see glTexImage2D):
glTexImage2D(GL_TEXTURE_2D, // target
0, // level, 0 means base level
format, // internal format of image specifies color
// components
width, height, // what it says
0, // border, should be 0 at all times
format, GL_UNSIGNED_BYTE, // data type of pixel data
data);
The format argument to glBindImageTexture is GL_RGBA32F:
glBindImageTexture(1, // unit
texture_input, // texture id
0, // level
GL_FALSE, // is layered
0, // layer no
GL_READ_ONLY, // access type
GL_RGBA32F);
Hence, internal format has to be GL_RGBA32F. A possible fomrat is GL_RGBA:
glTexImage2D(GL_TEXTURE_2D, // target
0, // level, 0 means base level
GL_RGBA32F, // internal format of image specifies color
// components
width, height, // what it says
0, // border, should be 0 at all times
GL_RGBA, GL_UNSIGNED_BYTE, // data type of pixel data
data);
I'm now building a Voxel game. In the beginning, I use a texture atlas that stores all voxel textures and it works fine. After that, I decided to use Greedy Meshing in my game, thus texture atlas is not useful anymore. I read some articles which said that should use Texture Array instead. Then I tried to read and use the texture array technique for texturing. However, the result I got was all black in my game. So what am I missing?
This is my texture atlas (600 x 600)
Here is my Texture2DArray, I use this class to read and save a texture array
Texture2DArray::Texture2DArray() : Internal_Format(GL_RGBA8), Image_Format(GL_RGBA), Wrap_S(GL_REPEAT), Wrap_T(GL_REPEAT), Wrap_R(GL_REPEAT), Filter_Min(GL_NEAREST), Filter_Max(GL_NEAREST), Width(0), Height(0)
{
glGenTextures(1, &this->ID);
}
void Texture2DArray::Generate(GLuint width, GLuint height, unsigned char* data)
{
this->Width = width;
this->Height = height;
glBindTexture(GL_TEXTURE_2D_ARRAY, this->ID);
// I cannot decide what the texture array layer (depth) should be (I put here is 1 for layer number)
//Can anyone explain to me how to decide the texture layer here?
glTexImage3D(GL_TEXTURE_2D_ARRAY, 1, this->Internal_Format, this->Width, this->Height, 0, 1 , this->Image_Format, GL_UNSIGNED_BYTE, data);
glTexParameteri(GL_TEXTURE_2D_ARRAY, GL_TEXTURE_WRAP_S, this->Wrap_S);
glTexParameteri(GL_TEXTURE_2D_ARRAY, GL_TEXTURE_WRAP_T, this->Wrap_T);
glTexParameteri(GL_TEXTURE_2D_ARRAY, GL_TEXTURE_WRAP_R, this->Wrap_R);
glTexParameteri(GL_TEXTURE_2D_ARRAY, GL_TEXTURE_MIN_FILTER, this->Filter_Min);
glTexParameteri(GL_TEXTURE_2D_ARRAY, GL_TEXTURE_MIN_FILTER, this->Filter_Max);
//unbind this texture for another creating texture
glBindTexture(GL_TEXTURE_2D_ARRAY, 0);
}
void Texture2DArray::Bind() const
{
glBindTexture(GL_TEXTURE_2D_ARRAY, this->ID);
}
Here is my Fragment Shader
#version 330 core
uniform sampler2DArray ourTexture;
in vec2 texCoord;
out vec4 FragColor;
void main(){
// 1 (the layer number) just for testing
FragColor = texture(ourTexture,vec3(texCoord, 1));
}
Here is my Vertex Shader
#version 330 core
layout (location = 0) in vec3 inPos;
layout (location = 1) in vec2 inTexCoord;
out vec2 texCoord;
uniform mat4 model;
uniform mat4 view;
uniform mat4 projection;
void main(){
gl_Position = projection * view * vec4(inPos,1.0f);
texCoord = inTexCoord;
}
This my rendering result
EDIT 1:
I figured out that texture atlas doesn't work with texture array because it is a grid so OpenGl cannot decide where it should begin. So I create a vertical texture (18 x 72) and try again but it still all black everywhere.
I have checked binding the texture before using it.
When the 3 dimensional texture image is specified, then the depth has to be the number of images which have to be stored in the array (e.g. imageCount). The width and the height parameter represent the width and height of 1 tile (e.g. tileW, tileH). The layer should be 0 and the border parameter has to be 0. See glTexImage3D. glTexImage3D creates the data store for the texture image. The memory which is required for the textures is reserved (GPU). It is possible to pass a pointer to the image data, but it is not necessary.
If all the tiles are stored in a vertical atlas, then the image data can be set directly:
glTexImage3D(GL_TEXTURE_2D_ARRAY, 0, this->Internal_Format,
tileW, tileH, imageCount, 0,
this->Image_Format, GL_UNSIGNED_BYTE, data);
If the tiles are in the 16x16 atlas, then the tiles have to by extracted from the texture atlas and to set each subimage in the texture array. (data[i] is the imaged data of one tile). Create the texture image:
glTexImage3D(GL_TEXTURE_2D_ARRAY, 0, this->Internal_Format,
tileW, tileH, imageCount, 0,
this->Image_Format, GL_UNSIGNED_BYTE, nullptr);
After that use glTexSubImage3D to put the texture data to the data store of the texture object. glTexSubImage3D uses the existing data store and copies data. e.g.:
for (int i = 0; i < imageCount; ++i)
{
glTexSubImage3D(GL_TEXTURE_2D_ARRAY, 0,
0, 0, i,
tileW, tileH, 1,
this->Image_Format, GL_UNSIGNED_BYTE, data[i]);
}
Note, you've to extract the tiles from the texture atlas and to set each subimage in the texture array. (data[i] is the imaged data of one tile)
An algorithm to extract the tiles and specify the texture image may look as follows
#include <algorithm> // std::copy
#include <vector> // std::vector
unsigned char* data = ...; // 16x16 texture atlas image data
int tileW = ...; // number of pixels in a row of 1 tile
int tileH = ...; // number of pixels in a column of 1 tile
int channels = 4; // 4 for RGBA
int tilesX = 16;
int tilesY = 16;
int imageCount = tilesX * tilesY;
glTexImage3D(GL_TEXTURE_2D_ARRAY, 0, this->Internal_Format,
tileW, tileH, imageCount, 0,
this->Image_Format, GL_UNSIGNED_BYTE, nullptr);
std::vector<unsigned char> tile(tileW * tileH * channels);
int tileSizeX = tileW * channels;
int rowLen = tilesX * tileSizeX;
for (int iy = 0; iy < tilesY; ++ iy)
{
for (int ix = 0; ix < tilesX; ++ ix)
{
unsigned char *ptr = data + iy*rowLen + ix*tileSizeX;
for (int row = 0; row < tileH; ++ row)
std::copy(ptr + row*rowLen, ptr + row*rowLen + tileSizeX,
tile.begin() + row*tileSizeX);
int i = iy * tilesX + ix;
glTexSubImage3D(GL_TEXTURE_2D_ARRAY, 0,
0, 0, i,
tileW, tileH, 1,
this->Image_Format, GL_UNSIGNED_BYTE, tile.data());
}
}
I wrote a simple test case to get the height of an image within a compute shader and write it to an SSBO. I've used the SSBO code before, and I know that part works fine. I used apitrace to inspect the state during the glDispatchCompute call, and I can see both the original texture and the image bound to the correct image unit. However, imageSize always returns zero (the output is all zeros, with the exception of some leftover -1s at the end because the division with the workgroup size rounds down). No OpenGL errors are thrown.
I based this test case on one of my earlier questions which included code to bind an SSBO to a compute shader (I use it here to get debug output from the compute shader).
class ComputeShaderWindow : public QOpenGLWindow {
public:
void initializeGL() {
// Create the opengl functions object
gl = context()->versionFunctions<QOpenGLFunctions_4_3_Core>();
m_compute_program = new QOpenGLShaderProgram(this);
auto compute_shader_s = fs::readFile(
"test_assets/example_compute_shader.comp");
QImage img("test_assets/input/out.png");
// Adds the compute shader, then links and binds it
m_compute_program->addShaderFromSourceCode(QOpenGLShader::Compute,
compute_shader_s);
m_compute_program->link();
m_compute_program->bind();
GLuint frame;
// Create the texture
gl->glGenTextures(1, &frame);
// Bind the texture
gl->glBindTexture(GL_TEXTURE_2D, frame);
// Fill the texture with the image
gl->glTexImage2D(GL_TEXTURE_2D,
0,
GL_RGB8,
img.width(),
img.height(),
0,
GL_BGRA,
GL_UNSIGNED_BYTE,
img.bits());
GLuint image_unit = 1;
// Get the location of the image uniform
GLuint uniform_location = gl->glGetUniformLocation(
m_compute_program->programId(),
"video_frame");
// Set location to 0 (a unique value that we choose)
gl->glUniform1i(uniform_location, image_unit);
// Bind layer of texture to image unit
gl->glBindImageTexture(image_unit,
frame,
0,
GL_FALSE,
0,
GL_READ_ONLY,
GL_RGBA8UI);
// We should only need the bit for shader image access,
// but for the purpose of this example, I set all the bits
// just to be safe
gl->glMemoryBarrier(GL_ALL_BARRIER_BITS);
// SSBO stuff to get output from the shader
GLfloat* default_values = new GLfloat[NUM_INVOCATIONS];
std::fill(default_values, default_values + NUM_INVOCATIONS, -1.0);
GLuint ssbo;
gl->glGenBuffers(1, &ssbo);
gl->glBindBuffer(GL_SHADER_STORAGE_BUFFER, ssbo);
gl->glBufferData(GL_SHADER_STORAGE_BUFFER,
NUM_INVOCATIONS * sizeof(float),
&default_values[0],
GL_STATIC_DRAW);
gl->glBindBufferBase(GL_SHADER_STORAGE_BUFFER, 0, ssbo);
gl->glDispatchCompute(NUM_INVOCATIONS / WORKGROUP_SIZE, 1, 1);
gl->glMemoryBarrier(GL_VERTEX_ATTRIB_ARRAY_BARRIER_BIT);
gl->glBindBufferBase(GL_SHADER_STORAGE_BUFFER, 0, ssbo);
// Now read from the buffer so that we can check its values
GLfloat* read_data = (GLfloat*) gl->glMapBuffer(GL_SHADER_STORAGE_BUFFER,
GL_READ_ONLY);
std::vector<GLfloat> buffer_data(NUM_INVOCATIONS);
// Read from buffer
for (int i = 0; i < NUM_INVOCATIONS; i++) {
DEBUG(read_data[i]);
}
DEBUG("Done!");
gl->glUnmapBuffer(GL_SHADER_STORAGE_BUFFER);
assert(gl->glGetError() == GL_NO_ERROR);
}
void resizeGL(int width, int height) {
}
void paintGL() {
}
void teardownGL() {
}
private:
QOpenGLFunctions_4_3_Core* gl;
QOpenGLShaderProgram* m_compute_program;
static constexpr int NUM_INVOCATIONS = 9000;
static constexpr int WORKGROUP_SIZE = 128;
};
As for the compute shader:
#version 430 core
layout(local_size_x = 128) in;
layout(rgba8ui, binding = 1) readonly uniform uimage2D video_frame;
layout(std430, binding = 0) writeonly buffer SSBO {
float data[];
};
void main() {
uint ident = int(gl_GlobalInvocationID);
uint num_workgroups = int(gl_WorkGroupID);
// Write the height of the image into the buffer
data[ident] = float(imageSize(video_frame).y);
}
Turns out I forgot the texture parameters:
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
No clue why that breaks imageSize() calls though.
I am trying to render a texture with an alpha channel in it.
This is what I used for texture loading:
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_BGRA, GL_UNSIGNED_BYTE, data);
I enabled GL_BLEND just before I render the texture: glEnable(GL_BLEND);
I also did this at the beginning of the code(the initialization): glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
This is the result(It should be a transparent texture of a first person hand):
But when I load my texture like this(no alpha channel):
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, width, height, 0, GL_BGR, GL_UNSIGNED_BYTE, data);
This is the result:
Does anyone know what can cause this, or do I have to give more code?
Sorry for bad English, thanks in advance.
EDIT:
My texture loading code:
GLuint Texture::loadTexture(const char * imagepath) {
printf("Reading image %s\n", imagepath);
// Data read from the header of the BMP file
unsigned char header[54];
unsigned int dataPos;
unsigned int imageSize;
unsigned int width, height;
// Actual RGB data
unsigned char * data;
// Open the file
FILE * file = fopen(imagepath, "rb");
if (!file) { printf("%s could not be opened. \n", imagepath); getchar(); exit(0); }
// Read the header, i.e. the 54 first bytes
// If less than 54 bytes are read, problem
if (fread(header, 1, 54, file) != 54) {
printf("Not a correct BMP file\n");
exit(0);
}
// A BMP files always begins with "BM"
if (header[0] != 'B' || header[1] != 'M') {
printf("Not a correct BMP file\n");
exit(0);
}
// Make sure this is a 24bpp file
if (*(int*)&(header[0x1E]) != 0) { printf("Not a correct BMP file\n");}
if (*(int*)&(header[0x1C]) != 24) { printf("Not a correct BMP file\n");}
// Read the information about the image
dataPos = *(int*)&(header[0x0A]);
imageSize = *(int*)&(header[0x22]);
width = *(int*)&(header[0x12]);
height = *(int*)&(header[0x16]);
// Some BMP files are misformatted, guess missing information
if (imageSize == 0) imageSize = width*height * 3; // 3 : one byte for each Red, Green and Blue component
if (dataPos == 0) dataPos = 54; // The BMP header is done that way
// Create a buffer
data = new unsigned char[imageSize];
// Read the actual data from the file into the buffer
fread(data, 1, imageSize, file);
// Everything is in memory now, the file wan be closed
fclose(file);
// Create one OpenGL texture
GLuint textureID;
glGenTextures(1, &textureID);
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glBindTexture(GL_TEXTURE_2D, textureID);
if (imagepath == "hand.bmp") {
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, data);
}else {
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, width, height, 0, GL_BGR, GL_UNSIGNED_BYTE, data);
}
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
glGenerateMipmap(GL_TEXTURE_2D);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_R, GL_CLAMP_TO_EDGE);
delete[] data;
return textureID;
}
As you can see its not my own written code, Ive got it from opengl-tutorial.org
My first comment stated:
The repeating, offset pattern looks like the data is treated as having a larger offset, when in reality it has smaller (or opposite).
And that was before I actually noticed what you did. Yes, this is precisely that. You can't treat 4-bytes-per-pixel data as 3-bytes-per-pixel data. The alpha channel gets interpreted as colour and that's why it all offsets this way.
If you want to disregard the alpha channel, you need to strip it off when loading so that it ends up having 3 bytes for each pixel value in the OpenGL texture memory. (That's what #RetoKoradi's answer is proposing, namely creating an RGB texture from RGBA data).
If it isn't actually supposed to look so blue-ish, maybe it's not actually in BGR layout?
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_BGRA, GL_UNSIGNED_BYTE, data);
^
\--- change to GL_RGBA as well
My wild guess is that human skin would have more red than blue light reflected by it.
It looks like you misunderstood how the arguments of glTexImage2D() work:
The 3rd argument (internalformat) defines what format you want to use for the data stored in the texture.
The 7th and 8th argument (format and type) define the format of the data you pass into the call as the last argument.
Based on this, if the format of the data you're passing as the last argument is BGRA, and you want to create an RGB texture from it, the correct call is:
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, width, height, 0, GL_BGRA, GL_UNSIGNED_BYTE, data);
Note that the 7th argument is now GL_BGRA, matching your input data, while the 3rd argument is GL_RGB, specifying that you want to use an RGB texture.
Seams you chose worng texture pixel alignment. To specify the right one try to experiment with values (1, 2, 4) of glPixelStorei with GL_UNPACK_ALIGNMENT.
Specification:
void glPixelStorei( GLenum pname,
GLint param);
pname Specifies the symbolic name of the parameter to be set. One value affects the packing of pixel data into memory: GL_PACK_ALIGNMENT. The other affects the unpacking of pixel data from memory: GL_UNPACK_ALIGNMENT.
param Specifies the value that pname is set to.
glPixelStorei sets pixel storage modes that affect the operation of subsequent glReadPixels as well as the unpacking of texture patterns (see glTexImage2D and glTexSubImage2D).
pname is a symbolic constant indicating the parameter to be set, and param is the new value. One storage parameter affects how pixel data is returned to client memory:
GL_PACK_ALIGNMENT
Specifies the alignment requirements for the start of each pixel row in memory. The allowable values are 1 (byte-alignment), 2 (rows aligned to even-numbered bytes), 4 (word-alignment), and 8 (rows start on double-word boundaries).
The other storage parameter affects how pixel data is read from client memory:
GL_UNPACK_ALIGNMENT
Specifies the alignment requirements for the start of each pixel row in memory. The allowable values are 1 (byte-alignment), 2 (rows aligned to even-numbered bytes), 4 (word-alignment), and 8 (rows start on double-word boundaries).
The following table gives the type, initial value, and range of valid values for each storage parameter that can be set with glPixelStorei.
BMP format do not support transparency at least most common 3 version (only work GL_BGR mode and its masked modifications). USE PNG, DDS, TIFF, TGA(simplest) instead.
Secondly your total image data size computation formula is wrong
imageSize = width*height * 3; // 3 : one byte for each Red, Green and Blue component
Right formula is:
imageSize = 4 * ((width * bitsPerPel + 31) / 32) * height;
where bitsPerPel is current picture bits per pixel (8, 16 or 24).
Here is the code of function wich used to load simple TGA files with transparency support:
// Define targa header.
#pragma pack(1)
typedef struct
{
GLbyte identsize; // Size of ID field that follows header (0)
GLbyte colorMapType; // 0 = None, 1 = paletted
GLbyte imageType; // 0 = none, 1 = indexed, 2 = rgb, 3 = grey, +8=rle
unsigned short colorMapStart; // First colour map entry
unsigned short colorMapLength; // Number of colors
unsigned char colorMapBits; // bits per palette entry
unsigned short xstart; // image x origin
unsigned short ystart; // image y origin
unsigned short width; // width in pixels
unsigned short height; // height in pixels
GLbyte bits; // bits per pixel (8 16, 24, 32)
GLbyte descriptor; // image descriptor
} TGAHEADER;
#pragma pack(8)
GLbyte *gltLoadTGA(const char *szFileName, GLint *iWidth, GLint *iHeight, GLint *iComponents, GLenum *eFormat)
{
FILE *pFile; // File pointer
TGAHEADER tgaHeader; // TGA file header
unsigned long lImageSize; // Size in bytes of image
short sDepth; // Pixel depth;
GLbyte *pBits = NULL; // Pointer to bits
// Default/Failed values
*iWidth = 0;
*iHeight = 0;
*eFormat = GL_BGR_EXT;
*iComponents = GL_RGB8;
// Attempt to open the fil
pFile = fopen(szFileName, "rb");
if(pFile == NULL)
return NULL;
// Read in header (binary)
fread(&tgaHeader, 18/* sizeof(TGAHEADER)*/, 1, pFile);
// Do byte swap for big vs little endian
#ifdef __APPLE__
BYTE_SWAP(tgaHeader.colorMapStart);
BYTE_SWAP(tgaHeader.colorMapLength);
BYTE_SWAP(tgaHeader.xstart);
BYTE_SWAP(tgaHeader.ystart);
BYTE_SWAP(tgaHeader.width);
BYTE_SWAP(tgaHeader.height);
#endif
// Get width, height, and depth of texture
*iWidth = tgaHeader.width;
*iHeight = tgaHeader.height;
sDepth = tgaHeader.bits / 8;
// Put some validity checks here. Very simply, I only understand
// or care about 8, 24, or 32 bit targa's.
if(tgaHeader.bits != 8 && tgaHeader.bits != 24 && tgaHeader.bits != 32)
return NULL;
// Calculate size of image buffer
lImageSize = tgaHeader.width * tgaHeader.height * sDepth;
// Allocate memory and check for success
pBits = new GLbyte[lImageSize];
if(pBits == NULL)
return NULL;
// Read in the bits
// Check for read error. This should catch RLE or other
// weird formats that I don't want to recognize
if(fread(pBits, lImageSize, 1, pFile) != 1)
{
free(pBits);
return NULL;
}
// Set OpenGL format expected
switch(sDepth)
{
case 3: // Most likely case
*eFormat = GL_BGR_EXT;
*iComponents = GL_RGB8;
break;
case 4:
*eFormat = GL_BGRA_EXT;
*iComponents = GL_RGBA8;
break;
case 1:
*eFormat = GL_LUMINANCE;
*iComponents = GL_LUMINANCE8;
break;
};
// Done with File
fclose(pFile);
// Return pointer to image data
return pBits;
}
iWidth, iHeight return texture dimensions, eFormat i iCompoments external and internal image formats. than actual function return value is pointer to texture data.
So your function must look like:
GLuint Texture::loadTexture(const char * imagepath) {
printf("Reading image %s\n", imagepath);
// Data read from the header of the BMP file
int width, height;
int component;
GLenum eFormat;
// Actual RGB data
char * data = LoadTGA(imagepath, &width, &height, &component, &eFormat);
// Create one OpenGL texture
GLuint textureID;
glGenTextures(1, &textureID);
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glBindTexture(GL_TEXTURE_2D, textureID);
if (!strcmp(imagepath,"hand.tga")) { // important because we comparing strings not pointers
glTexImage2D(GL_TEXTURE_2D, 0, component, width, height, 0, eFormat, GL_UNSIGNED_BYTE, data);
}else {
glTexImage2D(GL_TEXTURE_2D, 0, component, width, height, 0, eFormat, GL_UNSIGNED_BYTE, data);
}
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
glGenerateMipmap(GL_TEXTURE_2D);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_R, GL_CLAMP_TO_EDGE);
delete[] data;
return textureID;
}
Trying to colour terrain points based on texture colour (currently hard coded to vec2(0.5, 0.5) for test purposes - which should be light blue) but all the points are grey. glGetError returns 0 throughout the whole process. I think I might be doing the render process wrong or have a problem with my shaders(?)
Vertex Shader:
void main(){
gl_Position = gl_ModelViewProjectionMatrix * gl_Vertex;
}
Fragment Shader:
uniform sampler2D myTextureSampler;
void main(){
gl_FragColor = texture2D(myTextureSampler, vec2(0.5, 0.5));
}
Terrain Class:
class Terrain
{
public:
Terrain(GLuint pProgram, char* pHeightmap, char* pTexture){
if(!LoadTerrain(pHeightmap))
{
OutputDebugString("Loading terrain failed.\n");
}
if(!LoadTexture(pTexture))
{
OutputDebugString("Loading terrain texture failed.\n");
}
mProgram = pProgram;
mTextureLocation = glGetUniformLocation(pProgram, "myTextureSampler");
};
~Terrain(){};
void Draw()
{
glEnableClientState(GL_VERTEX_ARRAY); // Uncommenting this causes me to see nothing at all
glBindBuffer(GL_ARRAY_BUFFER, mVBO);
glVertexPointer(3, GL_FLOAT, 0, 0);
glEnable( GL_TEXTURE_2D );
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, mBMP);
glProgramUniform1i(mProgram, mTextureLocation, 0);
GLenum a = glGetError();
glPointSize(5.0f);
glDrawArrays(GL_POINTS, 0, mNumberPoints);
a = glGetError();
glBindBuffer(GL_ARRAY_BUFFER, 0);
glDisable( GL_TEXTURE_2D );
glDisableClientState(GL_VERTEX_ARRAY);
}
private:
GLuint mVBO, mBMP, mUV, mTextureLocation, mProgram;
int mWidth;
int mHeight;
int mNumberPoints;
bool LoadTerrain(char* pFile)
{
/* Definitely no problem here - Vertex data is fine and rendering nice and dandy */
}
// TEXTURES MUST BE POWER OF TWO!!
bool LoadTexture(char *pFile)
{
unsigned char header[54]; // Each BMP file begins by a 54-bytes header
unsigned int dataPos; // Position in the file where the actual data begins
unsigned int width, height;
unsigned int imageSize;
unsigned char * data;
FILE * file = fopen(pFile, "rb");
if(!file)
return false;
if(fread(header, 1, 54, file) != 54)
{
fclose(file);
return false;
}
if ( header[0]!='B' || header[1]!='M' )
{
fclose(file);
return false;
}
// Read ints from the byte array
dataPos = *(int*)&(header[0x0A]);
imageSize = *(int*)&(header[0x22]);
width = *(int*)&(header[0x12]);
height = *(int*)&(header[0x16]);
// Some BMP files are misformatted, guess missing information
if (imageSize==0) imageSize=width*height*3; // 3 : one byte for each Red, Green and Blue component
if (dataPos==0) dataPos=54; // The BMP header is done that way
// Create a buffer
data = new unsigned char [imageSize];
// Read the actual data from the file into the buffer
fread(data,1,imageSize,file);
//Everything is in memory now, the file can be closed
fclose(file);
// Create one OpenGL texture
glGenTextures(1, &mBMP);
// "Bind" the newly created texture : all future texture functions will modify this texture
glBindTexture(GL_TEXTURE_2D, mBMP);
// Give the image to OpenGL
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, width, height, 0, GL_BGR, GL_UNSIGNED_BYTE, data);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexEnvf( GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_MODULATE );
glTexParameterf( GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT );
glTexParameterf( GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT );
delete [] data;
data = 0;
return true;
}
};
Answering own question incase anyone has a similar problem:
I had tested this with multiple images - but it turns out theres a bug in my graphics application of choice; which has been exporting 8-bit Bitmap's even though I explicitally told it to export 24-bit Bitmap's. So basically - reverting back to MS Paint solved my solution. 3 cheers for MS Paint.