I had this problem when compiling my OpenGL code on lower-end PC's that don't support OpenGL 4.5 and macs. In my regular code, I would use functions like glCreateTextures and glTextureStorage2D, but they are not supported in other versions so I went the other glGenTextures path.
Here's the image generation code:
Texture::Texture(const std::string& path)
: m_Path(path)
{
int width, height, channels;
stbi_set_flip_vertically_on_load(1);
unsigned char* data = stbi_load(path.c_str(), &width, &height, &channels, 0);
RW_CORE_ASSERT(data, "Failed to load image!");
m_Width = width;
m_Height = height;
GLenum internalFormat = 0, dataFormat = 0;
if (channels == 4)
{
internalFormat = GL_RGBA8;
dataFormat = GL_RGBA;
}
else if (channels == 3)
{
internalFormat = GL_RGB8;
dataFormat = GL_RGB;
}
m_InternalFormat = internalFormat;
m_DataFormat = dataFormat;
RW_CORE_ASSERT(internalFormat & dataFormat, "Format not supported!");
glGenTextures(1, &m_ID);
glBindTexture(GL_TEXTURE_2D, m_ID);
glTexParameteri(m_ID, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(m_ID, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(m_ID, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(m_ID, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexImage2D(m_ID, 1, internalFormat, m_Width, m_Height, 0, dataFormat, GL_UNSIGNED_BYTE, data);
glBindTexture(GL_TEXTURE_2D, 0);
stbi_image_free(data);
}
And I want to bind my textures to specific slots on the GPU so I have this function:
void Texture::Bind(uint32_t slot) const
{
glActiveTexture(GL_TEXTURE0 + slot);
glBindTexture(GL_TEXTURE_2D, m_ID);
}
Here's the screenshot of what gets drawn:
To make sure it wasn't a rendering problem I decided to put it into ImGui's renderer.
And here's the picture I am supposed to get:
And image imports correctly, I get no errors and same importing code and paths work on higher-end PC and the only thing that's changed is that the higher-end PC has OpenGL 4.5 texture generation code.
It turns out I had to specify GL_TEXTURE_2D in these places instead of texture ID:
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexImage2D(GL_TEXTURE_2D, 1, internalFormat, m_Width, m_Height, 0, dataFormat, GL_UNSIGNED_BYTE, data);
Related
I created a class for OpenGL textures and for some applications, a static object would be useful.
The problem is, when I create a texture as static or global object, my program crashes, while as local object, everything works fine. I have absolutely no idea what is going on.
This is my constructor:
Texture::Texture(std::string file, bool bitmap):
textureName("tex"), transparent(false) {
glGenTextures(1, &tex);
glBindTexture(GL_TEXTURE_2D, tex);
int texWidth, texHeight;
unsigned char *data = SOIL_load_image(file.c_str(), &texWidth, &texHeight, 0, SOIL_LOAD_RGBA);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, texWidth, texHeight, 0, GL_RGBA, GL_UNSIGNED_BYTE, data);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
if(bitmap) {
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR_MIPMAP_NEAREST);
glGenerateMipmap(GL_TEXTURE_2D);
}
else {
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
}
glBindTexture(GL_TEXTURE_2D, 0);
for(int i = 3; i < texWidth * texHeight * 4; i+= 4) {
if(data[i] != 0xff) {
transparent = true;
break;
}
}
textureSize = glm::vec2(texWidth, texHeight);
SOIL_free_image_data(data);
}
OpenGL must be initialized before you can call any OpenGL functions. (More accurately: you need to have "made an OpenGL context current")
The way you do that depends on which libraries you are using.
One thing that all the libraries have in common is: there are some functions you need to call, which you usually call in main. (for example, glfwCreateWindow and glfwMakeContextCurrent in GLFW)
Since global objects are created before main is called, you are trying to call OpenGL functions before you have an OpenGL context. This doesn't work.
Hello everyone I'm trying to load a texture (normal map) using OpenGL.
GLuint loadTexture(const char* fileName) {
GLuint textureID;
glGenTextures(1, &textureID);
// load file - using core SDL library
SDL_Surface* tmpSurface;
tmpSurface = SDL_LoadBMP(fileName);
if (tmpSurface == nullptr) {
std::cout << "Error loading bitmap" << std::endl;
}
// bind texture and set parameters
glBindTexture(GL_TEXTURE_2D, textureID);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, tmpSurface->w, tmpSurface->h, 0,
GL_BGR, GL_UNSIGNED_BYTE, tmpSurface->pixels);
glGenerateMipmap(GL_TEXTURE_2D);
SDL_FreeSurface(tmpSurface);
return textureID;
}
then if I try to render it, it gives me that :
Instead of :
But I can render this normaly :
Do you have an idea ? color deep is 32 for the one that is not working and 24 for the working one
Use SDL_ConvertSurfaceFormat to convert the surface format and load the converted curface:
SDL_Surface* tmpSurface = SDL_LoadBMP(fileName);
SDL_Surface* formattedSurface = SDL_ConvertSurfaceFormat(
tmpSurface, SDL_PIXELFORMAT_ABGR8888, 0);
SDL_FreeSurface(tmpSurface);
// [...]
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, tmpSurface->w, tmpSurface->h, 0,
GL_RGBA, GL_UNSIGNED_BYTE, formattedSurface->pixels);
SDL_FreeSurface(formattedSurface);
Of course you can evaluate the format attribute of the SDL_Surface and set a proper format attribute when two-dimensional texture image is specified by glTexImage2D.
I am trying to load OBJ files to use them in OpenGL. Every object in the OBJ file has its own texture. In my code I split up every object of the file into a struct that is defended like this:
struct ObjModelComponent {
std::vector<glm::vec3> vertices;
std::vector<glm::vec2> uvs;
std::vector<glm::vec3> normals;
Texture tex;
ArrayBuffer vertex, uv, normal;
GLuint VAO;
};
The Texture class is a simple class to make loading and handling a texture easier.
In the class of the model I have a std::vector of my ObjModelComponent.
I'm rendering the Object like this:
void ObjModel::render() {
for(int i = 0; i < components.size(); i++) {
components[i].vertex.activate();
components[i].uv.activate();
components[i].normal.activate();
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, components[i].tex.getTextureID());
shader->sendInt(0, components[i].tex.getTextureName());
glBindVertexArray(components[i].VAO);
shader->sendMat4(*data->projection, "projection");
shader->sendMat4(data->viewMat, "view");
shader->sendMat4(model, "model");
glDrawArrays(GL_TRIANGLES, 0, int(components[i].vertices.size()));
glBindVertexArray(0);
}
}
The Texture class looks like this:
Texture::Texture(std::string fileName, TextureType type):
textureName("tex"), fileName(fileName), type(type) {
glGenTextures(1, &tex);
glBindTexture(GL_TEXTURE_2D, tex);
unsigned char *image = SOIL_load_image(fileName.c_str(), &texWidth, &texHeight, 0, SOIL_LOAD_RGBA);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, texWidth, texHeight, 0, GL_RGBA, GL_UNSIGNED_BYTE, image);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
switch (type) {
case TEXTURE_GENERATE_MIPMAP:
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR_MIPMAP_NEAREST);
glGenerateMipmap(GL_TEXTURE_2D);
break;
case TEXTURE_NO_MIP_MAP:
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
default:
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
break;
}
glBindTexture(GL_TEXTURE_2D, 0);
SOIL_free_image_data(image);
}
Texture::Texture() {
}
Texture& Texture::operator=(const Texture &other) {
this->fileName = other.fileName;
this->textureName = other.textureName;
this->type = other.type;
// glDeleteTextures(1, &tex);
glGenTextures(1, &tex);
glBindTexture(GL_TEXTURE_2D, tex);
unsigned char *image = SOIL_load_image(fileName.c_str(), &texWidth, &texHeight, 0, SOIL_LOAD_RGBA);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, texWidth, texHeight, 0, GL_RGBA, GL_UNSIGNED_BYTE, image);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
switch (type) {
case TEXTURE_GENERATE_MIPMAP:
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR_MIPMAP_NEAREST);
glGenerateMipmap(GL_TEXTURE_2D);
break;
case TEXTURE_NO_MIP_MAP:
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
default:
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
break;
}
glBindTexture(GL_TEXTURE_2D, 0);
SOIL_free_image_data(image);
return *this;
}
Texture::~Texture() {
glDeleteTextures(1, &tex);
}
GLuint Texture::getTextureID() {
return tex;
}
void Texture::setTextureName(std::string name) {
textureName = name;
}
std::string Texture::getTextureName() {
return textureName;
}
glm::vec2 Texture::getTextureSize() {
return glm::vec2(texWidth, texHeight);
}
This is how the components are generated:
for(int i = 0; i < fileLines.size(); i++) {
if(fileLines[i].substr(0, 2) == "o ") {
if(!first) {
tmpComp.tex = tmpTex;
components.emplace_back(tmpComp);
componentIndex++;
}
first = false;
tmpTex = Texture(hg::substr(path, 0, int(path.find_last_of("/"))) + "/" + hg::substr(fileLines[i], 2, int(fileLines[i].length())) + ".png");
}
}
This is how the VAOs and array buffers are generated:
for(int i = 0; i < components.size(); i++) {
glGenVertexArrays(1, &components[i].VAO);
glBindVertexArray(components[i].VAO);
components[i].vertex.setData(components[i].vertices.data(), sizeof(glm::vec3) * components[i].vertices.size(), 0);
components[i].uv.setData(components[i].uvs.data(), sizeof(glm::vec2) * components[i].uvs.size(), 1);
components[i].normal.setData(components[i].normals.data(), sizeof(glm::vec3) * components[i].normals.size(), 2);
components[i].vertex.activate();
components[i].uv.activate();
components[i].normal.activate();
glBindVertexArray(0);
}
You can find my complete code on GitHub: https://github.com/Kuechenzwiebel/SDL-OpenGL-Tests
The problem is, that the texture displayed on my object is that, that was used by the last object that was rendered.
Update: Removing the glDeleteTextures in the destructor of the Texture, yields in the result, that the texture displayed on all components is that that was used for the last component.
I really don't understand why this isn't working, I copied it from other primitives, that only use one texture. And there everything is working fine.
I found a workaround, instead of having separate ArrayBuffers and VAOs, I only use one of each and also store at which index a new object begins. There I change the texture that is used.
I am trying to load an image into an OpenGL texture using SOIL2; however, it never seems to be correct unless I use SOIL2's load to texture function. I have tried using STB image and Devil, but both get similar results.
Code:
GLuint load_image(const std::string& path) {
int iwidth, iheight, channels;
unsigned char* image = SOIL_load_image(path.c_str(), &iwidth, &iheight, &channels, SOIL_LOAD_RGBA);
// std::cout << SOIL_last_result() << std::endl;
// float* image = stbi_loadf(path.c_str(), &iwidth, &iheight, &channels, STBI_rgb_alpha);
// if(!ilLoadImage(path.c_str()))
// std::cout << "Devil Failed to load image: " << iluErrorString(ilGetError()) << std::endl;
//
// unsigned char* image = ilGetData();
//
// int iwidth = ilGetInteger(IL_IMAGE_WIDTH);
// int iheight = ilGetInteger(IL_IMAGE_HEIGHT);
// int channels = ilGetInteger(IL_IMAGE_CHANNELS);
GLuint texture;
glGenTextures(1, &texture);
glActiveTexture(GL_TEXTURE0 + texture);
GLint old_unpack_alignment;
glGetIntegerv(GL_UNPACK_ALIGNMENT, &old_unpack_alignment);
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glBindTexture(GL_TEXTURE_2D, texture);
glCheckError();
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glCheckError();
GLenum original_format = (channels == 4 ? GL_RGBA : GL_RGB);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, image);
glGenerateMipmap(GL_TEXTURE_2D);
glPixelStorei(GL_UNPACK_ALIGNMENT, old_unpack_alignment);
return texture;
}
Screenshot:
What I should get:
I would like to know how to properly load an image into a texture.
Here is an example of what my texture loading function looks like:
unsigned int loadTexture(char const * path)
{
unsigned int textureID;
glGenTextures(1, &textureID);
int width, height, nrComponents;
unsigned char *data = stbi_load(path, &width, &height, &nrComponents, 0);
if (data)
{
GLenum format;
if (nrComponents == 1)
format = GL_RED;
else if (nrComponents == 3)
format = GL_RGB;
else if (nrComponents == 4)
format = GL_RGBA;
glBindTexture(GL_TEXTURE_2D, textureID);
glTexImage2D(GL_TEXTURE_2D, 0, format, width, height, 0, format, GL_UNSIGNED_BYTE, data);
glGenerateMipmap(GL_TEXTURE_2D);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, format == GL_RGBA ? GL_CLAMP_TO_EDGE : GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, format == GL_RGBA ? GL_CLAMP_TO_EDGE : GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
stbi_image_free(data);
}
else
{
std::cout << "Texture failed to load at path: " << path << std::endl;
stbi_image_free(data);
}
return textureID
}
I will usually set up my VAO & VBO before hand, then I'll use this to load in a texture. After this I'll configure my shader(s) for use, then within the render loop is where I'll use my shader, set the matrices passing in any of the needed uniforms, then after all the "model" information is completed I'll finally bind the VertexArrays, set the approrpriate texture to Active, then Bind those Texture(s) and fish up with drawing the arrays or primitives.
I have decided to use SOIL to load images for use with OpenGL for my project. I have this method, which loads an image and returns a GLTexture, which is a struct that hold a GLuint textureid and two ints width and height:
GLTexture loadTexture(const char *filePath) {
GLTexture texture = {};
int width;
int height;
unsigned char *data;
//Load Image File Directly into an OpenGL Texture
texture.id = SOIL_load_OGL_texture
(
filePath,
SOIL_LOAD_AUTO,
SOIL_CREATE_NEW_ID,
SOIL_FLAG_MIPMAPS | SOIL_FLAG_INVERT_Y | SOIL_FLAG_NTSC_SAFE_RGB | SOIL_FLAG_COMPRESS_TO_DXT
);
//Error Checking (Load Process)
if (texture.id == 0) {
fatalError("SOIL Loading Error!");
}
//Generate and Bind Texture
glGenTextures(1, &(texture.id));
glBindTexture(GL_TEXTURE_2D, texture.id);
//Get Width, Height and Data of Image
glGetTexLevelParameteriv(GL_TEXTURE_2D, 0, GL_TEXTURE_WIDTH, &width);
glGetTexLevelParameteriv(GL_TEXTURE_2D, 0, GL_TEXTURE_HEIGHT, &height);
glGetTexImage(GL_TEXTURE_2D, 0, GL_RGBA, GL_UNSIGNED_BYTE, &data);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, &data);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
glGenerateMipmap(GL_TEXTURE_2D);
//Unbind Texture
glBindTexture(GL_TEXTURE_2D, 0);
//Return Texture
texture.width = width;
texture.height = height;
return texture;
}
As far as I know, glGetTexLevelParameteriv() should return the width of the texture that is binded into width and height, but whenever I load an image, this returns 0.
Should I fill in width and height as parameters for the method or is it possible to get them via OpenGL?
The texture id generated by SOIL_load_OGL_texture is overridden in the
glGenTextures(1, &(texture.id));
line (glGenTextures creates a new texture and stores the id in &(texture.id)). All operations afterwards work on the newly created texture. Since this new texture is empty, width and height are 0.
I'm not sure what you want to achieve here, but if you only want to load the texture, then this code might work:
texture.id = SOIL_load_OGL_texture (...);
//Error Checking (Load Process)
if (texture.id == 0) {
fatalError("SOIL Loading Error!");
}
//Just bind and do not create a new texture
glBindTexture(GL_TEXTURE_2D, texture.id);
glGetTexLevelParameteriv(GL_TEXTURE_2D, 0, GL_TEXTURE_WIDTH, &width);
glGetTexLevelParameteriv(GL_TEXTURE_2D, 0, GL_TEXTURE_HEIGHT, &height);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
glGenerateMipmap(GL_TEXTURE_2D);
//Unbind Texture
glBindTexture(GL_TEXTURE_2D, 0);
//Return Texture
texture.width = width;
texture.height = height;
return texture;