here is my code
typedef struct Texture
{
GLubyte *Data;
GLuint bpp;
GLuint width, height;
GLuint ID;
}Texture;
class TextureLoader
{
public:
TextureLoader()
{
ilInit();
iluInit();
}
void load(ILenum FileType, const char *filename, Texture *texture)
{
ilLoad(FileType, filename);
texture->width = ilGetInteger(IL_IMAGE_WIDTH);
texture->height = ilGetInteger(IL_IMAGE_HEIGHT);
texture->bpp = ilGetInteger(IL_IMAGE_BYTES_PER_PIXEL);
texture->Data = ilGetData();
ilEnable(IL_CONV_PAL);
unsigned int type = ilGetInteger(IL_IMAGE_FORMAT);
glGenTextures(1, &texture->ID);
glBindTexture(GL_TEXTURE_2D,texture->ID);
gluBuild2DMipmaps(GL_TEXTURE_2D, texture->bpp, texture->width,
texture->height, type, GL_UNSIGNED_BYTE, texture->Data);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_LINEAR_MIPMAP_NEAREST);
}
}Loader;
and it doesn't seem to work.
There is segmentation fault after calling gluBuild2DMipmaps.
I can't understand what's a problem because code was mostly copy-pasted from an example(...
This is a call of TextureLoader::load:
vector <Texture> bgtextures;
bgtextures.resize(1);
Loader.load(IL_JPG,"bgsnow.jpg",&bgtextures[0]);
Can anybody help me?
Look at the return value of this:
ilLoad(FileType, filename);
By doing the following code:
if (ilLoad(FileType, filename) == IL_FALSE) {
//Make it produce an error or something.
}
As genpfault suggested, you are likely passing in an incorrect filename.
Related
I'm having access violation on every gl call after this texture initialization (actually the last GLCALL(glBindTexture(m_Target, bound)); is also causing access violation so the code at the top is what probably causing it):
Texture2D::Texture2D(unsigned int format, unsigned int width, unsigned int height, unsigned int unit, unsigned int mimapLevels, unsigned int layers)
: Texture(GL_TEXTURE_2D_ARRAY, unit)
{
unsigned int internalFormat;
if (format == GL_DEPTH_COMPONENT)
{
internalFormat = GL_DEPTH_COMPONENT32;
}
else
{
internalFormat = format;
}
m_Format = format;
m_Width = width;
m_Height = height;
unsigned int bound = 0;
glGetIntegerv(GL_TEXTURE_BINDING_2D_ARRAY, (int*)&bound);
GLCALL(glGenTextures(1, &m_ID));
GLCALL(glActiveTexture(GL_TEXTURE0 + m_Unit));
GLCALL(glBindTexture(m_Target, m_ID));
GLCALL(glTexParameteri(m_Target, GL_TEXTURE_MIN_FILTER, GL_LINEAR));
GLCALL(glTexParameteri(m_Target, GL_TEXTURE_MAG_FILTER, GL_LINEAR));
GLCALL(glTexStorage3D(m_Target, mimapLevels, internalFormat, width, height, layers));
for (size_t i = 0; i < layers; i++)
{
glTexSubImage3D(m_Target, 0, 0, 0, i, m_Width, m_Height, 1, m_Format, s_FormatTypeMap[internalFormat], NULL);
}
GLCALL(glBindTexture(m_Target, bound));
}
OGL pointers are initialized with glad at the beginning of the program:
if (!gladLoadGLLoader((GLADloadproc)glfwGetProcAddress))
{
std::cout << "Failed to initialize GLAD" << std::endl;
return -1;
}
And this only happens with GL_TEXTURE_2D_ARRAY, even when this is the first line of my code (after initialization of-course), example code:
auto t = Texture2D(GL_DEPTH_COMPONENT, 1024, 1024, 10, 1, 4);
Any idea what may be causing it?
Thanks in advance!
You're passing a NULL for the last argument of glTexSubImage3D, but OpenGL does not allow that:
TexSubImage*D and TextureSubImage*D arguments width, height, depth, format, type, and data match the corresponding arguments to the corresponding TexImage*D command (where those arguments exist), meaning that they accept the same values, and have the same meanings. The exception is that a NULL data pointer does not represent unspecified image contents.
...and there's no text that allows a NULL pointer, therefore you cannot pass NULL.
It's unclear what you're trying to achieve with those glTexSubImage3D calls. Since you're using an immutable texture (glTexStorage3D) you don't need to do anything extra. If instead you want to clear your texture then you can use glClearTexSubImage which does accept NULL for data to means 'clear with zeros'.
I've a problem with the stbi library and I thought, maybe you have an idea why this isn't working. I have declared a function like this:
bool LoadTextureFile(std::string file, unsigned char ** pixel_data, int * width, int * height, int * n);
In this function I get the result of stbi_load directly saved in the *pixel_data variable:
*pixel_data = stbi_load(file.c_str(), width, height, n, 0);
// Do some more stuff till return
return true;
So, now my pixel_data pointer points to the memory of the result of stbi_load. Now I wanna use this result with the glTexImage2D method in my previous function. This function calls the LoadTextureFile method before calling the glTexImage2D method of OpenGL like this:
bool LoadTexture(...)
{
int tex_width, tex_height, tex_n;
unsigned char * pixel_data = NULL;
LoadTextureFile(filename, &pixel_data, &tex_width, &tex_height, &tex_n);
// Do something special ...
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, tex_width, tex_height, 0, GL_RGB, GL_UNSIGNED_BYTE, &pixel_data);
stbi_image_free(&pixel_data);
// ...
}
But if I do it like that, then I get a memory violation message at the point of calling the glTexImage2D.
If I move this whole magic into the LoadTextureFile, after loading a new texture file with stbi_load, then it works:
bool LoadTextureFile(std::string file, unsigned char ** pixel_data, int * width, int * height, int * n)
{
unsigned char * = = stbi_load(file.c_str(), width, height, n, 0);
// Do some magic ...
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, 80, 80, 0, GL_RGB, GL_UNSIGNED_BYTE, pixel_data);
stbi_image_free(pixel_data);
return true;
}
Can someone tell me why I get this message and how to solve this problem?
I guess, it is a matter of keep the reserved memory safe, but I'm not really sure, how to solve it. I tried this in a simple console application before, and there it works.
Thank you for your help!
This:
unsigned char * pixel_data = NULL;
[...]
glTexImage2D(..., &pixel_data);
is certainly not what you want. You are using the address of the pionter to your pixel data, not the value of the pointer, so you are basically telling the GL to use some random segment of your stack memory as source for the texture. It should be just
glTexImage2D(..., pixel_data);
In your second variant, what actually happens is unclear since the line
unsigned char * = = stbi_load(file.c_str(), width, height, n, 0);
just doesn't make sense and will never compile. So I assume it is copy and paste error when writing the question. But it is hard to guess what your real code would do.
I am currently trying to render textured objects in Opengl. Everything worked fine until I wanted to render a texture with transparency. Instead of showing the the object transparent it just rendered in total black.
The method fo loading the texture file is this:
// structures for reading and information variables
char magic[4];
unsigned char header[124];
unsigned int width, height, linearSize, mipMapCount, fourCC;
unsigned char* dataBuffer;
unsigned int bufferSize;
fstream file(path, ios::in|ios::binary);
// read magic and header
if (!file.read((char*)magic, sizeof(magic))){
cerr<< "File " << path << " not found!"<<endl;
return false;
}
if (magic[0]!='D' || magic[1]!='D' || magic[2]!='S' || magic[3]!=' '){
cerr<< "File does not comply with dds file format!"<<endl;
return false;
}
if (!file.read((char*)header, sizeof(header))){
cerr<< "Not able to read file information!"<<endl;
return false;
}
// derive information from header
height = *(int*)&(header[8]);
width = *(int*)&(header[12]);
linearSize = *(int*)&(header[16]);
mipMapCount = *(int*)&(header[24]);
fourCC = *(int*)&(header[80]);
// determine dataBuffer size
bufferSize = mipMapCount > 1 ? linearSize * 2 : linearSize;
dataBuffer = new unsigned char [bufferSize*2];
// read data and close file
if (file.read((char*)dataBuffer, bufferSize/1.5))
cout<<"Loading texture "<<path<<" successful"<<endl;
else{
cerr<<"Data of file "<<path<<" corrupted"<<endl;
return false;
}
file.close();
// check pixel format
unsigned int format;
switch(fourCC){
case FOURCC_DXT1:
format = GL_COMPRESSED_RGBA_S3TC_DXT1_EXT;
break;
case FOURCC_DXT3:
format = GL_COMPRESSED_RGBA_S3TC_DXT3_EXT;
break;
case FOURCC_DXT5:
format = GL_COMPRESSED_RGBA_S3TC_DXT5_EXT;
break;
default:
cerr << "Compression type not supported or corrupted!" << endl;
return false;
}
glGenTextures(1, &ID);
glBindTexture(GL_TEXTURE_2D, ID);
glPixelStorei(GL_UNPACK_ALIGNMENT,1);
unsigned int blockSize = (format == GL_COMPRESSED_RGBA_S3TC_DXT1_EXT) ? 8 : 16;
unsigned int offset = 0;
/* load the mipmaps */
for (unsigned int level = 0; level < mipMapCount && (width || height); ++level) {
unsigned int size = ((width+3)/4)*((height+3)/4)*blockSize;
glCompressedTexImage2D(GL_TEXTURE_2D, level, format, width, height,
0, size, dataBuffer + offset);
offset += size;
width /= 2;
height /= 2;
}
textureType = DDS_TEXTURE;
return true;
In the fragment shader I just set the gl_FragColor = texture2D( myTextureSampler, UVcoords )
I hope that there is an easy explanation such as some code missing.
In the openGL initialization i glEnabled GL_Blend and set a blend function.
Does anyone have an idea of what I did wrong?
Make sure the blend function is the correct function for what you are trying to accomplish. For what you've described that should be glBlendFunc(GL_SRC_ALPHA,GL_ONE_MINUS_SRC_ALPHA);
You probably shouldn't set the blend function in your openGL initialization function but should wrap it around your draw calls like:
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA,GL_ONE_MINUS_SRC_ALPHA);
//gl draw functions (glDrawArrays,glDrawElements,etc..)
glDisable(GL_BLEND)
Are you clearing the 2D texture binding before you swap buffers? i.e ...
glBindTexture(GL_TEXTURE_2D, 0);
I'm trying to access the data that I previously allocated with the calloc method through a shared_ptr. For some reason I can't access it (keeps on crashing with EXC_BAD_ACCESS) on glTexImage2D (last line of my code snippets).
My util method to load the data:
shared_ptr<ImageData> IOSFileSystem::loadImageFile(string path) const
{
// Result
shared_ptr<ImageData> result = shared_ptr<ImageData>();
...
// Check if file exists
if([[NSFileManager defaultManager] fileExistsAtPath:fullPath isDirectory:NO])
{
...
GLubyte *spriteData = (GLubyte*) calloc(width * height * 4, sizeof(GLubyte));
...
// Put result in shared ptr
shared_ptr<GLubyte> spriteDataPtr = shared_ptr<GLubyte>(spriteData);
result = shared_ptr<ImageData>(new ImageData(path, width, height, spriteDataPtr));
}
else
{
cout << "IOSFileSystem::loadImageFile -> File does not exist at path.\nPath: " + path;
exit(1);
}
return result;
}
Header for ImageData:
class ImageData
{
public:
ImageData(string path, int width, int height, shared_ptr<GLubyte> data);
~ImageData();
string getPath() const;
int getWidth() const;
int getHeight() const;
shared_ptr<GLubyte> getData() const;
private:
string path;
int width;
int height;
shared_ptr<GLubyte> data;
};
File that calls the util class:
void TextureMaterial::load()
{
shared_ptr<IFileSystem> fileSystem = ServiceLocator::getFileSystem();
shared_ptr<ImageData> imageData = fileSystem->loadImageFile(path);
this->bind(imageData);
}
void TextureMaterial::bind(shared_ptr<ImageData> data)
{
// Pointer to pixel data
shared_ptr<GLubyte> pixelData = data->getData();
...
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, data->getWidth(), data->getHeight(), 0, GL_RGBA, GL_UNSIGNED_BYTE, &pixelData);
}
Just for the record: if I throw out all shared_ptr's I'm able to access the data. Signature for glTexImage2D:
void glTexImage2D(GLenum target, GLint level, GLint internalFormat, GLsizei width, GLsizei height, GLint border, GLenum format, GLenum type, const GLvoid *data);
Additional question: normally you have to free(spriteData) but since I gave the data to a shared_ptr, will the data be free'd when the shared_ptr is removed?
shared_ptr cannot magically guess how to release the memory. By default it tries to delete it, and since you didn't use new, that ends up in disaster.
You need to tell it how to do it:
shared_ptr<GLubyte>(spriteData, &std::free);
I think this is your problem:
..., &pixelData);
You are taking an address of a local variable (of type shared_ptr<GLubyte>), which is silently cast to void*, instead of getting the pointer from it. Replace it with:
..., pixelData.get());
I've been working through a basic OpenGl tutorial on loading a TGA file, to be used as a texture on a 3d object. I've been able to load data from the TGA header, but when I attempt to load the actual image data, it fails. I'm not sure where it is going wrong. Here is my texture loading class:
Header file:
struct TGA_Header
{
GLbyte ID_Length;
GLbyte ColorMapType;
GLbyte ImageType;
// Color map specifications
GLbyte firstEntryIndex[2];
GLbyte colorMapLength[2];
GLbyte colorMapEntrySize;
//image specification
GLshort xOrigin;
GLshort yOrigin;
GLshort ImageWidth;
GLshort ImageHeight;
GLbyte PixelDepth;
GLbyte ImageDescriptor;
};
class Texture
{
public:
Texture(string in_filename, string in_name = "");
~Texture();
public:
unsigned short width;
unsigned short height;
unsigned int length;
unsigned char type;
unsigned char *imageData;
unsigned int bpp;
unsigned int texID;
string name;
static vector<Texture *> textures;
private:
bool loadTGA(string filename);
bool createTexture(unsigned char *imageData, int width, int height, int type);
void swap(unsigned char * ori, unsigned char * dest, GLint size);
void flipImage(unsigned char * image, bool flipHorizontal, bool flipVertical, GLushort width, GLushort height, GLbyte bpp);
};
Here is the load TGA function in the cpp:
bool Texture::loadTGA(string filename)
{
TGA_Header TGAheader;
ifstream file( filename.data(), std::ios::in, std::ios::binary );
//make sure the file was opened properly
if (!file.is_open() )
return false;
if( !file.read( (char *)&TGAheader, sizeof(TGAheader) ) )
return false;
//make sure the image is of a type we can handle
if( TGAheader.ImageType != 2 )
return false;
width = TGAheader.ImageWidth;
height = TGAheader.ImageHeight;
bpp = TGAheader.PixelDepth;
if( width < 0 || // if the width or height is less than 0, than
height <= 0 || // the image is corrupt
(bpp != 24 && bpp != 32) ) // make sure we are of the correct bit depth
{
return false;
}
//check for an alpha channel
GLuint type = GL_RGBA;
if ( bpp == 24 )
type = GL_RGB;
GLuint bytesPerPixel = bpp / 8;
//allocate memory for the TGA so we can read it
GLuint imageSize = width * height * bytesPerPixel;
imageData = new GLubyte[imageSize];
if ( imageData == NULL )
return false;
//make sure we are in the correct position to load the image data
file.seekg(-imageSize, std::ios::end);
// if something when wrong, make sure we free up the memory
//NOTE: It never gets past this point. The conditional always fails.
if ( !file.read( (char *)imageData, imageSize ) )
{
delete imageData;
return false;
}
//more code is down here, but it doesnt matter because it does not pass the above function
}
It seems to load some data, but it keeps returning that it failed. Any help on why would be greatly appreciated. Appologies if it gets a bit wordy, but I'm not sure what is or is not significant.
UPDATE:
So, I just rewrote the function. The ifsteam I was using, seemed to be the cause of the problem. Specifically, it would try and load far more bytes of data than I had entered. I don't know the cause of the behavior, but I've listed my functioning code below. Thank you every one for your help.
The problem could be depending on the TGA algorithm which do not support compressed TGA.
Make sure you do not compress the TGA and that the TGA order (less important) is in a Bottom Left origin.
I usually work with GIMP and at the moment of the same, uncheck the RLE compression and put the Bottom Left alignment.
I'm not familiar with C++, sorry.
Are you sure this line file.seekg(-imageSize, std::ios::end); is not supposed to be file.seekg(headerSize, std::ios::start); ?
Makes more sense to seek from start than from end.
You should also check for ColorMapType != 0.
P.S. Here if( width < 0 || height <=0 width check should be <= as well.
So, I changed from using an ifstream to a FILE. The ifstream, was trying to load far more bytes than I had listed in the arguments. Here is the new code. (NOTE: It still needs optomized. I believe there are some unused variables floating around, but it works perfectly.). Thanks again everyone for your help.
The header file:
//struct to hold tga data
struct TGA_Header
{
GLbyte ID_Length;
GLbyte ColorMapType;
GLbyte ImageType;
// Color map specifications
GLbyte firstEntryIndex[2];
GLbyte colorMapLength[2];
GLbyte colorMapEntrySize;
//image specification
GLshort xOrigin;
GLshort yOrigin;
GLshort ImageWidth;
GLshort ImageHeight;
GLbyte PixelDepth;
GLbyte ImageDescriptor;
};
class Texture
{
public:
//functions
Texture(string in_filename, string in_name = "");
~Texture();
public:
//vars
unsigned char *imageData;
unsigned int texID;
string name;
//temp global access point for accessing all loaded textures
static vector<Texture *> textures;
private:
//can add additional load functions for other image types
bool loadTGA(string filename);
bool createTexture(unsigned char *imageData, int width, int height, int type);
void swap(unsigned char * ori, unsigned char * dest, GLint size);
void flipImage(unsigned char * image, bool flipHorizontal, bool flipVertical, GLushort width, GLushort height, GLbyte bpp);
};
#endif
Here is the load TGA function:
bool Texture::loadTGA(string filename)
{
//var for swapping colors
unsigned char colorSwap = 0;
GLuint type;
TGA_Header TGAheader;
FILE* file = fopen(filename.c_str(), "rb");
unsigned char Temp_TGAheader[18];
//check to make sure the file loaded
if( file == NULL )
return false;
fread(Temp_TGAheader, 1, sizeof(Temp_TGAheader), file);
//pull out the relavent data. 2 byte data (short) must be converted
TGAheader.ID_Length = Temp_TGAheader[0];
TGAheader.ImageType = Temp_TGAheader[2];
TGAheader.ImageWidth = *static_cast<unsigned short*>(static_cast<void*>(&Temp_TGAheader[12]));
TGAheader.ImageHeight = *static_cast<unsigned short*>(static_cast<void*>(&Temp_TGAheader[14]));
TGAheader.PixelDepth = Temp_TGAheader[16];
//make sure the image is of a type we can handle
if( TGAheader.ImageType != 2 || TGAheader.ImageWidth <= 0 || TGAheader.ImageHeight <= 0 )
{
fclose(file);
return false;
}
//set the type
if ( TGAheader.PixelDepth == 32 )
{
type = GL_RGBA;
}
else if ( TGAheader.PixelDepth == 24 )
{
type = GL_RGB;
}
else
{
//incompatable image type
return false;
}
//remember bits != bytes. To convert we need to divide by 8
GLuint bytesPerPixel = TGAheader.PixelDepth / 8;
//The Memory Required For The TGA Data
unsigned int imageSize = TGAheader.ImageWidth * TGAheader.ImageHeight * bytesPerPixel;// Calculate
//request the needed memory
imageData = new GLubyte[imageSize];
if ( imageData == NULL ) // just in case
return false;
if( fread(imageData, 1, imageSize, file) != imageSize )
{
//Kill it
delete [] imageData;
fclose(file);
return false;
}
fclose(file);
for (unsigned int x = 0; x < imageSize; x +=bytesPerPixel)
{
colorSwap = imageData[x];
imageData[x] = imageData[x + 2];
imageData[x + 2] = colorSwap;
}
createTexture( imageData, TGAheader.ImageWidth, TGAheader.ImageHeight, type );
return true;
}