Hello everyone I'm trying to load a texture (normal map) using OpenGL.
GLuint loadTexture(const char* fileName) {
GLuint textureID;
glGenTextures(1, &textureID);
// load file - using core SDL library
SDL_Surface* tmpSurface;
tmpSurface = SDL_LoadBMP(fileName);
if (tmpSurface == nullptr) {
std::cout << "Error loading bitmap" << std::endl;
}
// bind texture and set parameters
glBindTexture(GL_TEXTURE_2D, textureID);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, tmpSurface->w, tmpSurface->h, 0,
GL_BGR, GL_UNSIGNED_BYTE, tmpSurface->pixels);
glGenerateMipmap(GL_TEXTURE_2D);
SDL_FreeSurface(tmpSurface);
return textureID;
}
then if I try to render it, it gives me that :
Instead of :
But I can render this normaly :
Do you have an idea ? color deep is 32 for the one that is not working and 24 for the working one
Use SDL_ConvertSurfaceFormat to convert the surface format and load the converted curface:
SDL_Surface* tmpSurface = SDL_LoadBMP(fileName);
SDL_Surface* formattedSurface = SDL_ConvertSurfaceFormat(
tmpSurface, SDL_PIXELFORMAT_ABGR8888, 0);
SDL_FreeSurface(tmpSurface);
// [...]
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, tmpSurface->w, tmpSurface->h, 0,
GL_RGBA, GL_UNSIGNED_BYTE, formattedSurface->pixels);
SDL_FreeSurface(formattedSurface);
Of course you can evaluate the format attribute of the SDL_Surface and set a proper format attribute when two-dimensional texture image is specified by glTexImage2D.
Related
I had this problem when compiling my OpenGL code on lower-end PC's that don't support OpenGL 4.5 and macs. In my regular code, I would use functions like glCreateTextures and glTextureStorage2D, but they are not supported in other versions so I went the other glGenTextures path.
Here's the image generation code:
Texture::Texture(const std::string& path)
: m_Path(path)
{
int width, height, channels;
stbi_set_flip_vertically_on_load(1);
unsigned char* data = stbi_load(path.c_str(), &width, &height, &channels, 0);
RW_CORE_ASSERT(data, "Failed to load image!");
m_Width = width;
m_Height = height;
GLenum internalFormat = 0, dataFormat = 0;
if (channels == 4)
{
internalFormat = GL_RGBA8;
dataFormat = GL_RGBA;
}
else if (channels == 3)
{
internalFormat = GL_RGB8;
dataFormat = GL_RGB;
}
m_InternalFormat = internalFormat;
m_DataFormat = dataFormat;
RW_CORE_ASSERT(internalFormat & dataFormat, "Format not supported!");
glGenTextures(1, &m_ID);
glBindTexture(GL_TEXTURE_2D, m_ID);
glTexParameteri(m_ID, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(m_ID, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(m_ID, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(m_ID, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexImage2D(m_ID, 1, internalFormat, m_Width, m_Height, 0, dataFormat, GL_UNSIGNED_BYTE, data);
glBindTexture(GL_TEXTURE_2D, 0);
stbi_image_free(data);
}
And I want to bind my textures to specific slots on the GPU so I have this function:
void Texture::Bind(uint32_t slot) const
{
glActiveTexture(GL_TEXTURE0 + slot);
glBindTexture(GL_TEXTURE_2D, m_ID);
}
Here's the screenshot of what gets drawn:
To make sure it wasn't a rendering problem I decided to put it into ImGui's renderer.
And here's the picture I am supposed to get:
And image imports correctly, I get no errors and same importing code and paths work on higher-end PC and the only thing that's changed is that the higher-end PC has OpenGL 4.5 texture generation code.
It turns out I had to specify GL_TEXTURE_2D in these places instead of texture ID:
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexImage2D(GL_TEXTURE_2D, 1, internalFormat, m_Width, m_Height, 0, dataFormat, GL_UNSIGNED_BYTE, data);
I am trying to load an image into an OpenGL texture using SOIL2; however, it never seems to be correct unless I use SOIL2's load to texture function. I have tried using STB image and Devil, but both get similar results.
Code:
GLuint load_image(const std::string& path) {
int iwidth, iheight, channels;
unsigned char* image = SOIL_load_image(path.c_str(), &iwidth, &iheight, &channels, SOIL_LOAD_RGBA);
// std::cout << SOIL_last_result() << std::endl;
// float* image = stbi_loadf(path.c_str(), &iwidth, &iheight, &channels, STBI_rgb_alpha);
// if(!ilLoadImage(path.c_str()))
// std::cout << "Devil Failed to load image: " << iluErrorString(ilGetError()) << std::endl;
//
// unsigned char* image = ilGetData();
//
// int iwidth = ilGetInteger(IL_IMAGE_WIDTH);
// int iheight = ilGetInteger(IL_IMAGE_HEIGHT);
// int channels = ilGetInteger(IL_IMAGE_CHANNELS);
GLuint texture;
glGenTextures(1, &texture);
glActiveTexture(GL_TEXTURE0 + texture);
GLint old_unpack_alignment;
glGetIntegerv(GL_UNPACK_ALIGNMENT, &old_unpack_alignment);
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glBindTexture(GL_TEXTURE_2D, texture);
glCheckError();
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glCheckError();
GLenum original_format = (channels == 4 ? GL_RGBA : GL_RGB);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, image);
glGenerateMipmap(GL_TEXTURE_2D);
glPixelStorei(GL_UNPACK_ALIGNMENT, old_unpack_alignment);
return texture;
}
Screenshot:
What I should get:
I would like to know how to properly load an image into a texture.
Here is an example of what my texture loading function looks like:
unsigned int loadTexture(char const * path)
{
unsigned int textureID;
glGenTextures(1, &textureID);
int width, height, nrComponents;
unsigned char *data = stbi_load(path, &width, &height, &nrComponents, 0);
if (data)
{
GLenum format;
if (nrComponents == 1)
format = GL_RED;
else if (nrComponents == 3)
format = GL_RGB;
else if (nrComponents == 4)
format = GL_RGBA;
glBindTexture(GL_TEXTURE_2D, textureID);
glTexImage2D(GL_TEXTURE_2D, 0, format, width, height, 0, format, GL_UNSIGNED_BYTE, data);
glGenerateMipmap(GL_TEXTURE_2D);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, format == GL_RGBA ? GL_CLAMP_TO_EDGE : GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, format == GL_RGBA ? GL_CLAMP_TO_EDGE : GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
stbi_image_free(data);
}
else
{
std::cout << "Texture failed to load at path: " << path << std::endl;
stbi_image_free(data);
}
return textureID
}
I will usually set up my VAO & VBO before hand, then I'll use this to load in a texture. After this I'll configure my shader(s) for use, then within the render loop is where I'll use my shader, set the matrices passing in any of the needed uniforms, then after all the "model" information is completed I'll finally bind the VertexArrays, set the approrpriate texture to Active, then Bind those Texture(s) and fish up with drawing the arrays or primitives.
I have decided to use SOIL to load images for use with OpenGL for my project. I have this method, which loads an image and returns a GLTexture, which is a struct that hold a GLuint textureid and two ints width and height:
GLTexture loadTexture(const char *filePath) {
GLTexture texture = {};
int width;
int height;
unsigned char *data;
//Load Image File Directly into an OpenGL Texture
texture.id = SOIL_load_OGL_texture
(
filePath,
SOIL_LOAD_AUTO,
SOIL_CREATE_NEW_ID,
SOIL_FLAG_MIPMAPS | SOIL_FLAG_INVERT_Y | SOIL_FLAG_NTSC_SAFE_RGB | SOIL_FLAG_COMPRESS_TO_DXT
);
//Error Checking (Load Process)
if (texture.id == 0) {
fatalError("SOIL Loading Error!");
}
//Generate and Bind Texture
glGenTextures(1, &(texture.id));
glBindTexture(GL_TEXTURE_2D, texture.id);
//Get Width, Height and Data of Image
glGetTexLevelParameteriv(GL_TEXTURE_2D, 0, GL_TEXTURE_WIDTH, &width);
glGetTexLevelParameteriv(GL_TEXTURE_2D, 0, GL_TEXTURE_HEIGHT, &height);
glGetTexImage(GL_TEXTURE_2D, 0, GL_RGBA, GL_UNSIGNED_BYTE, &data);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, &data);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
glGenerateMipmap(GL_TEXTURE_2D);
//Unbind Texture
glBindTexture(GL_TEXTURE_2D, 0);
//Return Texture
texture.width = width;
texture.height = height;
return texture;
}
As far as I know, glGetTexLevelParameteriv() should return the width of the texture that is binded into width and height, but whenever I load an image, this returns 0.
Should I fill in width and height as parameters for the method or is it possible to get them via OpenGL?
The texture id generated by SOIL_load_OGL_texture is overridden in the
glGenTextures(1, &(texture.id));
line (glGenTextures creates a new texture and stores the id in &(texture.id)). All operations afterwards work on the newly created texture. Since this new texture is empty, width and height are 0.
I'm not sure what you want to achieve here, but if you only want to load the texture, then this code might work:
texture.id = SOIL_load_OGL_texture (...);
//Error Checking (Load Process)
if (texture.id == 0) {
fatalError("SOIL Loading Error!");
}
//Just bind and do not create a new texture
glBindTexture(GL_TEXTURE_2D, texture.id);
glGetTexLevelParameteriv(GL_TEXTURE_2D, 0, GL_TEXTURE_WIDTH, &width);
glGetTexLevelParameteriv(GL_TEXTURE_2D, 0, GL_TEXTURE_HEIGHT, &height);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
glGenerateMipmap(GL_TEXTURE_2D);
//Unbind Texture
glBindTexture(GL_TEXTURE_2D, 0);
//Return Texture
texture.width = width;
texture.height = height;
return texture;
I'm trying to simply load 6 pictures as texture of cube in OpenGL. Blow is the loading code:
glTexParameteri(GL_TEXTURE_CUBE_MAP_ARB, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_CUBE_MAP_ARB, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
glTexParameteri(GL_TEXTURE_CUBE_MAP_ARB, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_CUBE_MAP_ARB, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_CUBE_MAP_ARB, GL_TEXTURE_WRAP_R, GL_CLAMP_TO_EDGE);
for (int i = 0;i < 6;++i)
{
int width, height, channel;
unsigned char* img = SOIL_load_image(skybox[i].c_str(), &width, &height, &channel, SOIL_LOAD_AUTO);
glTexImage2D(cubeTarget[i], 0, GL_RGB, width, height, 0, GL_RGB, GL_UNSIGNED_BYTE, img);
delete img;
}
glEnable(GL_TEXTURE_CUBE_MAP_ARB);
The rendering code is passed. The weird thing is that the cube rendered is white. It seems that the texture isn't loaded at all. I change the loading code to see whether a 2D texture will work:
glGenTextures(1, texture);
glBindTexture(GL_TEXTURE_2D, texture[0]);
int width, height, channel;
unsigned char* img = SOIL_load_image(skybox[0].c_str(), &width, &height, &channel, SOIL_LOAD_AUTO);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, width, height, 0, GL_RGB, GL_UNSIGNED_BYTE, img);
delete img;
if(texture[0] == 0) return false;
glBindTexture(GL_TEXTURE_2D, texture[0]);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glEnable(GL_TEXTURE_2D);
The result is I can see the texture after all, no matter how strange the distribution caused by the texture coordinates. I gathered the following information:
The lib I use for loading image works well;
The setting of the cubemap is from the SuperBibble chapter 9. Almost the same code will work well when I compiling the code of the book.
BTW, does anyone have some suggestion about loading image library? The one I use seems to stop updating for a really long time...
Appending: What I find out now is that if I try to only load one img as all the skybox faces' texture, it will be shown. As long as use a variable to replace a specific value, nothing won't be displayed.
Finally I figure it out. It's because the resolution of the last picture is different from the others.
Realy kind of wasting a lot of time.
I've been messing around with framebuffers and render to texture and I came across the need to blit them. Again on some machines I get a GL_INVALID_OPERATION right after the glBlitFramebuffer call. Each texture bound to the framebuffer is setup the exact same way, all the same size and parameters. Also, when I try to blit one entire texture (previously succesfully rendered to) to another framebuffer, only the destination 'rectangle' to write in is smaller than the rectangle to read from (e.g. when I want to blit it to a quarter of the screen), it throws a GL_INVALID_OPERATION too.
EDIT: Actually it always throws the error whenever the rectangles to read from and draw to have a different size, so I can't blit to a texture of a different size, or the same size but a different sized 'render to' area...?
Everytime I blit to a manually generated framebuffer the status is checked through glCheckFramebufferStatus and it always returns GL_FRAMEBUFFER_COMPLETE.
-BIGGEST SNIP EVER-, see below for shorter 'source code', obviously a couple C++ errors and not complete, but its only for the GL calls
The OpenGL error occurs when I call the last method of the viewport (Viewport::blit) with the screen framebuffer as target (by passing NULL). It first sets the read buffer of its own framebuffer (the draw buffers were already set) and then it calls RenderTarget::blit which calls glBlitFramebuffer. In the blit method it binds both buffers, and you can see it calls glCheckFramebufferStatus there which does not return an error.
I've been reading this over and over but I can't seem to find the error that causes it. When I blit the color buffer I use GL_LINEAR, otherwise I use GL_NEAREST All color buffers use GL_RGB32F as internal format and the depth buffer (which I never blit) uses GL_DEPTH_COMPONENT32F
EDIT, a shorter example, just took all the GL calls and filled the params I used
glBindFramebuffer(GL_READ_FRAMEBUFFER, _GL_Framebuffer);
glReadBuffer(GL_COLOR_ATTACHMENT0 + index);
glBindFramebuffer(GL_READ_FRAMEBUFFER, 0);
// OpenGL error check, does not return an error
glBindFramebuffer(GL_READ_FRAMEBUFFER, _GL_Framebuffer);
GLenum status = glCheckFramebufferStatus(GL_READ_FRAMEBUFFER);
if(status != GL_FRAMEBUFFER_COMPLETE)
{
// Some error checking, fortunately status always turns out to be complete
}
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0);
glBlitFramebuffer(0, 0, screenWidth, screenHeight, 0, 0, screenWidth, screenHeight, target, (target == GL_COLOR_BUFFER_BIT) ? GL_LINEAR : GL_NEAREST);
// If the source/destination read/draw rectangles are different in size, GL_INVALID_OPERATION is cought here
And the Framebuffer/Texture creation:
glGenFramebuffer(1, &_GL_Framebuffer);
glGenTextures(1, &_GL_ZBuffer);
glBindTexture(GL_TEXTURE_2D, _GL_ZBuffer);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_BASE_LEVEL, 0);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAX_LEVEL, 0);
glTexImage2D(GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT32F, screenWidth, screenHeight, 0, GL_DEPTH_COMPONENT, GL_FLOAT, 0);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_R, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glBindTexture(GL_TEXTURE_2D, 0);
glBindFramebuffer(GL_FRAMEBUFFER, _GL_Framebuffer);
glFramebufferTexture(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT + 0, _GL_ZBuffer, 0);
glBindFramebuffer(GL_FRAMEBUFFER, 0);
int writeIndices[BUFFER_COUNT];
for(unsigned int i = 0; i < BUFFER_COUNT; ++i)
{
writeIndices[i] = i;
glGenTextures(1, &_GL_Texture);
glBindTexture(GL_TEXTURE_2D, _GL_Texture);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_BASE_LEVEL, 0);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAX_LEVEL, 0);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB32F, screenWidth, screenHeight, 0, GL_RGB, GL_FLOAT, 0);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_R, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glBindTexture(GL_TEXTURE_2D, 0);
glBindFramebuffer(GL_FRAMEBUFFER, _GL_Framebuffer);
glFramebufferTexture(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0 + i, _GL_Texture, 0);
glBindFramebuffer(GL_FRAMEBUFFER, 0);
// In the actual code each texture is obviously saved in an object
}
GLenum *enums = new GLenum[BUFFER_COUNT];
for(unsigned int i = 0; i < BUFFER_COUNT; ++i)
{
// Get index and validate
int index = *(indices + i); // indices = writeIndices
if(index < 0 || index >= maxAttachments)
{
delete[] enums;
return false;
}
// Set index
enums[i] = GL_COLOR_ATTACHMENT0 + index;
}
// Set indices
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, _GL_Framebuffer);
glDrawBuffers(BUFFER_COUNT, enums);
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0);
delete[] enums;
// OpenGL error check, no errors
After some careful reading I found out the difference in multisampling was the problem. The 'main' FBO was setup by SFML, so by simply setting the anti aliasing level on startup to 0 the problem was partially solved.
It now blits if the draw/read rectangles are unequal in size, but it keeps crashing on some machines where it is SUPPOSED to work.