How can I use less ram? Texture and model loading - c++

So I am following the opengl tutorial at https://learnopengl.com/Model-Loading/Model and when I run the application it is using a little more than half a gigabyte for textures and models! But when I check the size on disk of all the textures and the model file its only ~30MB on disk?
How can I make it so I use less ram? I think the issue has something to do with loading the texture or the model, but I'm not sure why it is using so much ram. This is the texture loading function:
unsigned int TextureFromFile(const char* path, const std::string& directory, bool gamma)
{
std::string filename = std::string(path);
filename = directory + '/' + filename;
unsigned int textureID;
glGenTextures(1, &textureID);
int width, height, nrComponents;
unsigned char* data = stbi_load(filename.c_str(), &width, &height, &nrComponents, 0);
if (data)
{
GLenum format;
if (nrComponents == 1)
format = GL_RED;
else if (nrComponents == 3)
format = GL_RGB;
else if (nrComponents == 4)
format = GL_RGBA;
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glBindTexture(GL_TEXTURE_2D, textureID);
glTexImage2D(GL_TEXTURE_2D, 0, format, width, height, 0, format, GL_UNSIGNED_BYTE, data);
glGenerateMipmap(GL_TEXTURE_2D);
stbi_image_free(data);
}
else
{
std::cout << "Texture failed to load at path: " << path << std::endl;
stbi_image_free(data);
}
return textureID;
}
The tutorial's source code can be found here.

Related

OpenGL 4.1 and lower Black Texture, Mac and Windows

I had this problem when compiling my OpenGL code on lower-end PC's that don't support OpenGL 4.5 and macs. In my regular code, I would use functions like glCreateTextures and glTextureStorage2D, but they are not supported in other versions so I went the other glGenTextures path.
Here's the image generation code:
Texture::Texture(const std::string& path)
: m_Path(path)
{
int width, height, channels;
stbi_set_flip_vertically_on_load(1);
unsigned char* data = stbi_load(path.c_str(), &width, &height, &channels, 0);
RW_CORE_ASSERT(data, "Failed to load image!");
m_Width = width;
m_Height = height;
GLenum internalFormat = 0, dataFormat = 0;
if (channels == 4)
{
internalFormat = GL_RGBA8;
dataFormat = GL_RGBA;
}
else if (channels == 3)
{
internalFormat = GL_RGB8;
dataFormat = GL_RGB;
}
m_InternalFormat = internalFormat;
m_DataFormat = dataFormat;
RW_CORE_ASSERT(internalFormat & dataFormat, "Format not supported!");
glGenTextures(1, &m_ID);
glBindTexture(GL_TEXTURE_2D, m_ID);
glTexParameteri(m_ID, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(m_ID, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(m_ID, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(m_ID, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexImage2D(m_ID, 1, internalFormat, m_Width, m_Height, 0, dataFormat, GL_UNSIGNED_BYTE, data);
glBindTexture(GL_TEXTURE_2D, 0);
stbi_image_free(data);
}
And I want to bind my textures to specific slots on the GPU so I have this function:
void Texture::Bind(uint32_t slot) const
{
glActiveTexture(GL_TEXTURE0 + slot);
glBindTexture(GL_TEXTURE_2D, m_ID);
}
Here's the screenshot of what gets drawn:
To make sure it wasn't a rendering problem I decided to put it into ImGui's renderer.
And here's the picture I am supposed to get:
And image imports correctly, I get no errors and same importing code and paths work on higher-end PC and the only thing that's changed is that the higher-end PC has OpenGL 4.5 texture generation code.
It turns out I had to specify GL_TEXTURE_2D in these places instead of texture ID:
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexImage2D(GL_TEXTURE_2D, 1, internalFormat, m_Width, m_Height, 0, dataFormat, GL_UNSIGNED_BYTE, data);

which image format takes less time to load into memory

I have a requirement where i need to load a sequence of images to the memory and than play them back to back.
I load all of the files into a std::vector and after they all get loaded i play them.
this is code for loading each file.
void loadTextureFromFile(const GLchar *file)
{
// Create Texture object
Texture2D texture;
// Load image
int width, height, channels;
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
unsigned char* image = SOIL_load_image(file, &width, &height, &channels, SOIL_LOAD_AUTO);
// Set The Internal Format
if (channels > 3)
{
texture.Internal_Format = GL_RGBA;
texture.Image_Format = GL_RGBA;
}
else
{
texture.Internal_Format = GL_RGB;
texture.Image_Format = GL_RGB;
}
// Now generate texture
texture.Generate(width, height, image);
imageSequence.push_back(texture);
// And finally free image data
SOIL_free_image_data(image);
}
////////////////////////////////////////////////////////
Texture2D::Texture2D()
: Width(0), Height(0), Internal_Format(GL_RGB), Image_Format(GL_RGB), Wrap_S(GL_REPEAT), Wrap_T(GL_REPEAT), Filter_Min(GL_LINEAR), Filter_Max(GL_LINEAR) , WrapMode(SumTextureDecl::TextureWrapMode::Repeat)
{
glGenTextures(1, &this->ID);
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////
void Texture2D::Generate(GLuint width, GLuint height, unsigned char* data)
{
float aniso = 0.0f;
this->Width = width;
this->Height = height;
glBindTexture(GL_TEXTURE_2D, this->ID);
// Set Texture wrap and filter modes
glGetFloatv(GL_MAX_TEXTURE_MAX_ANISOTROPY_EXT, &aniso);
// Create Texture
glTexImage2D(GL_TEXTURE_2D, 0, this->Internal_Format, width, height, 0, this->Image_Format, GL_UNSIGNED_BYTE, data);
glGenerateMipmap(GL_TEXTURE_2D);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, this->Wrap_S);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, this->Wrap_T);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAX_ANISOTROPY_EXT, aniso);
// Unbind texture
glBindTexture(GL_TEXTURE_2D, 0);
}
it takes some time to load all the files of image sequence so what file format would load the fastest ?
I am using soil to load images by using some other library would the loading become more fast ?

bmp texture not loading correctly using Open GL

Hello everyone I'm trying to load a texture (normal map) using OpenGL.
GLuint loadTexture(const char* fileName) {
GLuint textureID;
glGenTextures(1, &textureID);
// load file - using core SDL library
SDL_Surface* tmpSurface;
tmpSurface = SDL_LoadBMP(fileName);
if (tmpSurface == nullptr) {
std::cout << "Error loading bitmap" << std::endl;
}
// bind texture and set parameters
glBindTexture(GL_TEXTURE_2D, textureID);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, tmpSurface->w, tmpSurface->h, 0,
GL_BGR, GL_UNSIGNED_BYTE, tmpSurface->pixels);
glGenerateMipmap(GL_TEXTURE_2D);
SDL_FreeSurface(tmpSurface);
return textureID;
}
then if I try to render it, it gives me that :
Instead of :
But I can render this normaly :
Do you have an idea ? color deep is 32 for the one that is not working and 24 for the working one
Use SDL_ConvertSurfaceFormat to convert the surface format and load the converted curface:
SDL_Surface* tmpSurface = SDL_LoadBMP(fileName);
SDL_Surface* formattedSurface = SDL_ConvertSurfaceFormat(
tmpSurface, SDL_PIXELFORMAT_ABGR8888, 0);
SDL_FreeSurface(tmpSurface);
// [...]
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, tmpSurface->w, tmpSurface->h, 0,
GL_RGBA, GL_UNSIGNED_BYTE, formattedSurface->pixels);
SDL_FreeSurface(formattedSurface);
Of course you can evaluate the format attribute of the SDL_Surface and set a proper format attribute when two-dimensional texture image is specified by glTexImage2D.

Get colour of specific pixel in an OpenGL texture?

I'm trying to get the colour of a specific pixel from a specific texture using OpenGL (C++). I've been looking at glGetTexImage() since it looks somewhat like what I want, but I can't figure out the context in which I should put it. Am I wrong? It doesn't need to be the fastest option since it's not a frame-by-frame thing; just when the game starts up.
The texture isn't going to be rendered to the screen and is just used as a way to get information. I use the following function to load the texture.
GLuint TextureUtil::loadTexture(const char* filename, int* widthVar, int* heightVar) {
unsigned char* image = SOIL_load_image(filename, widthVar, heightVar, NULL, SOIL_LOAD_RGBA);
GLuint texture;
glGenTextures(1, &texture);
glBindTexture(GL_TEXTURE_2D, texture);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
if (image) {
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, *widthVar, *heightVar, 0, GL_RGBA, GL_UNSIGNED_BYTE, image);
glGenerateMipmap(GL_TEXTURE_2D);
} else {
std::cout << "ERROR: TextureUtil.cpp - Texture loading failed." << std::endl;
}
glActiveTexture(0);
glBindTexture(GL_TEXTURE_2D, 0);
SOIL_free_image_data(image);
return texture;
}
Assuming you are interested in a pixel at coordinates column x and row y, then:
unsigned char* image = SOIL_load_image(filename, widthVar, heightVar, NULL, SOIL_LOAD_RGBA);
int width = *widthVar;
unsigned char* pixel = image + y * width * 4 + x * 4;
unsigned char red = pixel[0];
unsigned char green = pixel[1];
unsigned char blue = pixel[2];
unsigned char alpha = pixel[3];
Error checking of the SOIL_load_image function is left for to you to add. I would fully expect it to return nullptr if the filename didn't exist, for example.

How to properly load an image to use as an OpenGL texture?

I am trying to load an image into an OpenGL texture using SOIL2; however, it never seems to be correct unless I use SOIL2's load to texture function. I have tried using STB image and Devil, but both get similar results.
Code:
GLuint load_image(const std::string& path) {
int iwidth, iheight, channels;
unsigned char* image = SOIL_load_image(path.c_str(), &iwidth, &iheight, &channels, SOIL_LOAD_RGBA);
// std::cout << SOIL_last_result() << std::endl;
// float* image = stbi_loadf(path.c_str(), &iwidth, &iheight, &channels, STBI_rgb_alpha);
// if(!ilLoadImage(path.c_str()))
// std::cout << "Devil Failed to load image: " << iluErrorString(ilGetError()) << std::endl;
//
// unsigned char* image = ilGetData();
//
// int iwidth = ilGetInteger(IL_IMAGE_WIDTH);
// int iheight = ilGetInteger(IL_IMAGE_HEIGHT);
// int channels = ilGetInteger(IL_IMAGE_CHANNELS);
GLuint texture;
glGenTextures(1, &texture);
glActiveTexture(GL_TEXTURE0 + texture);
GLint old_unpack_alignment;
glGetIntegerv(GL_UNPACK_ALIGNMENT, &old_unpack_alignment);
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glBindTexture(GL_TEXTURE_2D, texture);
glCheckError();
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glCheckError();
GLenum original_format = (channels == 4 ? GL_RGBA : GL_RGB);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, image);
glGenerateMipmap(GL_TEXTURE_2D);
glPixelStorei(GL_UNPACK_ALIGNMENT, old_unpack_alignment);
return texture;
}
Screenshot:
What I should get:
I would like to know how to properly load an image into a texture.
Here is an example of what my texture loading function looks like:
unsigned int loadTexture(char const * path)
{
unsigned int textureID;
glGenTextures(1, &textureID);
int width, height, nrComponents;
unsigned char *data = stbi_load(path, &width, &height, &nrComponents, 0);
if (data)
{
GLenum format;
if (nrComponents == 1)
format = GL_RED;
else if (nrComponents == 3)
format = GL_RGB;
else if (nrComponents == 4)
format = GL_RGBA;
glBindTexture(GL_TEXTURE_2D, textureID);
glTexImage2D(GL_TEXTURE_2D, 0, format, width, height, 0, format, GL_UNSIGNED_BYTE, data);
glGenerateMipmap(GL_TEXTURE_2D);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, format == GL_RGBA ? GL_CLAMP_TO_EDGE : GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, format == GL_RGBA ? GL_CLAMP_TO_EDGE : GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
stbi_image_free(data);
}
else
{
std::cout << "Texture failed to load at path: " << path << std::endl;
stbi_image_free(data);
}
return textureID
}
I will usually set up my VAO & VBO before hand, then I'll use this to load in a texture. After this I'll configure my shader(s) for use, then within the render loop is where I'll use my shader, set the matrices passing in any of the needed uniforms, then after all the "model" information is completed I'll finally bind the VertexArrays, set the approrpriate texture to Active, then Bind those Texture(s) and fish up with drawing the arrays or primitives.