Related
I've been searching all around for a simple solution to add sprites to my OpenGl GLUT simple moon lander game in c++ and it appears I must use bmp's since they're easiest to load and use them as textures on a rectangle.
How exactly can I load the bmp's as textures though?
Look my simple c implementation function to load texture.
GLuint LoadTexture( const char * filename )
{
GLuint texture;
int width, height;
unsigned char * data;
FILE * file;
file = fopen( filename, "rb" );
if ( file == NULL ) return 0;
width = 1024;
height = 512;
data = (unsigned char *)malloc( width * height * 3 );
//int size = fseek(file,);
fread( data, width * height * 3, 1, file );
fclose( file );
for(int i = 0; i < width * height ; ++i)
{
int index = i*3;
unsigned char B,R;
B = data[index];
R = data[index+2];
data[index] = R;
data[index+2] = B;
}
glGenTextures( 1, &texture );
glBindTexture( GL_TEXTURE_2D, texture );
glTexEnvf( GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE,GL_MODULATE );
glTexParameterf( GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER,GL_LINEAR_MIPMAP_NEAREST );
glTexParameterf( GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER,GL_LINEAR );
glTexParameterf( GL_TEXTURE_2D, GL_TEXTURE_WRAP_S,GL_REPEAT );
glTexParameterf( GL_TEXTURE_2D, GL_TEXTURE_WRAP_T,GL_REPEAT );
gluBuild2DMipmaps( GL_TEXTURE_2D, 3, width, height,GL_RGB, GL_UNSIGNED_BYTE, data );
free( data );
return texture;
}
Above function returns the texture data. Store the texture data in variable
GLuint texture;
texture= LoadTexture( "your_image_name.bmp" );
Now you can bind the texture using glBindTexture
glBindTexture (GL_TEXTURE_2D, texture);
Checkout my the TextureLoader (TextureLoader.h + TextureLoader.cpp) from OpenGL_3_2_Utils:
https://github.com/mortennobel/OpenGL_3_2_Utils
The two files does not depend on any other files and should also work seamless on any version of OpenGL (and any platform). Example usage can be found in the file comment.
You can use library GLAUX and SOIL(Simple OpenGL Image Library) .
There are also other image libriries for OpenGL.
How to load a bmp on GLUT to use it as a texture?
Another very simple solution would be to use STB library, which can be found at GitHub - nothings/stb.
All what is needed is one source file, the header file "stb_image.h". It doesn't require to link any library file or to compile any additional source file.
Include the header file and enable image reading by the setting the preprocessor definition STB_IMAGE IMPLEMENTATION:
#define STB_IMAGE_IMPLEMENTATION
#include <stb_image.h>
The image file can be read by the function stbi_load:
const char *filename = .....; // path and filename
int req_channels = 3; // 3 color channels of BMP-file
int width = 0, height = 0, channels = 0;
stbi_uc *image = stbi_load( filename, &width, &height, &channels, 3 );
When the image is loaded to a texture object, then GL_UNPACK_ALIGNMENT has to be set to 1.
By default GL_UNPACK_ALIGNMENT is 4, so each line of an image is assumed to be aligned to 4 bytes. The pixels of a BMP-file in common have a size of 3 bytes and are tightly packed, this would cause a misalignment.
After loading the image, the memory can be freed by stbi_image_free:
GLuint texture_obj = 0;
if ( image != nullptr )
{
glGenTextures(1, &texture_obj);
glBindTexture(GL_TEXTURE_2D, texture_obj);
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGB, GL_UNSIGNED_BYTE, image);
glPixelStorei(GL_UNPACK_ALIGNMENT, 4); // default
stbi_image_free( image );
}
GLuint LoadTexture(GLuint tex, const char * filename, int width, int height)
{
//bmp 24 bit
unsigned char * data;
unsigned char R,G,B;
FILE * file;
//open .bmp
file = fopen(filename, "rb");
if(file == NULL)return 0;
//get memory for data
data =(unsigned char *)malloc(width * height * 3);
//data skip offset
fseek(file,128,0);
//read file to data
fread(data, width * height * 3, 1, file);
//close file
fclose(file);
//transpose R,G,B values
int index;
for(int i = 0; i < width * height ; ++i)
{
index = i*3;
B = data[index]; G = data[index+1]; R = data[index+2];
data[index] = R; data[index+1] = G; data[index+2] = B;
}
//create a texture
glGenTextures(1, &tex);
glBindTexture(GL_TEXTURE_2D, tex);
gluBuild2DMipmaps(GL_TEXTURE_2D, 3, width, height,GL_RGB, GL_UNSIGNED_BYTE, data);
//texture filtering
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexEnvi(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_DECAL);
//glTexEnvi(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_MODULATE);
//free memory
free(data);
return 0;
}
void init(void)
{
//texture loading, bmp 24 bit
LoadTexture(1, "01.bmp", 316, 316);
LoadTexture(2, "02.bmp", 316, 316);
LoadTexture(3, "05.bmp", 316, 316);
LoadTexture(4, "03.bmp", 316, 316);
LoadTexture(5, "06.bmp", 316, 316);
LoadTexture(6, "04.bmp", 316, 316);
. . . . . . . . . . . .
Linux
gcc cube.c -o cube -lglut -lGL -lGLU
Windows
tcc cube.c -o cube.exe -LC:\tcc\lib -lopengl32 -lglu32 -lglut32 -Wl,-subsystem=windows
http://coliru.stacked-crooked.com/a/400b648e7442eeb3
You can read a piece of code: the idea is to fullfill a polygon quad with a texture ( the image is repeated inside the quad).
I want that the image would repeat only once: in other words to put the image as background of the polygon. Im in a 800x600 window, and i would like to put a billiards table for example 600x400 ( in a quad defined by the glvertex3i). It could be glvertex2i and implemented in 2D,as is a 2D game.
Any help would be apreciated
Also i have a better code to support any jpg file instead of loadtexture and readjpeg( i think):
GLuint MyLoadTexture(std::string const filename)
{
GLuint texname = 0;
/* this is actually tied to the OpenGL context, so this should
* actually be a map GLcontext -> std::string -> texturename */
static std::map<std::string, GLuint> loaded_textures;
if( loaded_textures.find(filename) != loaded_textures.end() ) {
texname = loaded_textures[filename];
glBindTexture(GL_TEXTURE_2D, texname);
return texname;
}
int width,height;
std::vector<uint8_t> image;
if( ReadJPEG(filename, &image, &width, &height) ) {
std::cerr
<< "error reading JPEG"
<< std::endl;
return 0;
}
glGenTextures(1, &texname);
if( !texname ) {
std::cerr
<< "error generating OpenGL texture name"
<< std::endl;
return 0;
}
glBindTexture(GL_TEXTURE_2D, texname);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER,GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_LINEAR);
glTexEnvi(GL_TEXTURE_ENV,GL_TEXTURE_ENV_MODE,GL_REPLACE);
/* glTexImage2D(
GL_TEXTURE_2D, 0, GL_RGB,
width, height, 0,
GL_RGB,
GL_UNSIGNED_BYTE, buffer );
*/
glTexImage2D(
GL_TEXTURE_2D, 0, GL_RGB,
width, height, 0,
GL_RGB,
GL_UNSIGNED_BYTE, &image[0]);
loaded_textures[filename] = texname;
return texname;
}
int ReadJPEG(
std::string const filename,
std::vector<uint8_t> *image,
int *width, int *height )
{
if( !image ) {
return -1;
}
FILE * const infile = fopen(filename.c_str(), "rb");
if( !infile ) {
std::cerr
<< "error opening file "
<< filename
<< " : "
<< strerror(errno)
<< std::endl;
return -2;
}
struct jpeg_decompress_struct cinfo;
struct jpeg_error_mgr jerr;
cinfo.err = jpeg_std_error(&jerr);
jpeg_create_decompress(&cinfo);
jpeg_stdio_src(&cinfo, infile);
jpeg_read_header(&cinfo, TRUE);
jpeg_calc_output_dimensions(&cinfo);
jpeg_start_decompress(&cinfo);
if( width ) { *width = cinfo.output_width; }
if( height ) { *height = cinfo.output_height; }
size_t const stride = cinfo.output_width * cinfo.output_components;
image->resize(cinfo.output_height * stride);
for(size_t i = 0; i < cinfo.output_height;) {
uint8_t * const row = &(*image)[stride * i];
i += jpeg_read_scanlines(&cinfo, (unsigned char**)&row, 1);
}
jpeg_finish_decompress(&cinfo);
fclose(infile);
return 0;
}
I noticed you are using a deprecated version of OpenGL. Now as for your situation, It is not obvious to me if you are concerned with alpha values within your textures or not. Also how many bytes does each color channel have? This is important to know.
In this section of your code there are a couple things that I see that should be improved:
glGenTextures(1, &texname);
if( !texname ) {
std::cerr
<< "error generating OpenGL texture name"
<< std::endl;
return 0;
}
glBindTexture(GL_TEXTURE_2D, texname);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER,GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_LINEAR);
glTexEnvi(GL_TEXTURE_ENV,GL_TEXTURE_ENV_MODE,GL_REPLACE);
/* glTexImage2D(
GL_TEXTURE_2D, 0, GL_RGB,
width, height, 0,
GL_RGB,
GL_UNSIGNED_BYTE, buffer );
*/
glTexImage2D(
GL_TEXTURE_2D, 0, GL_RGB,
width, height, 0,
GL_RGB,
GL_UNSIGNED_BYTE, &image[0]);
loaded_textures[filename] = texname;
Before you call glGenTextures(1, &texname); you should call this first:
glGetError(); // This will clear errors
After your call to glGenTextures(1, &texname);instead of checking texname for errors this method is preferred:
GLenum err = glGetError();
if ( err != GL_NO_ERROR ) {
// Generate Error Here, either log message to file, console or throw an error
}
Next is your glBindTexture( GL_TEXTURE_2D, texname ); which is good. Now as for OpenGL's functions that generate and bind the textures the second parameter is an unsigned ID that OpenGL generates automatically that associates it with the filename that it is given. It is easier for a machine to read an unsigned value to check for multiple resources than it is to check and verify a string. It is also a performance increase when an application is working 100s, 1000s or even 1,000,000s of resource files.
After this you are now setting up the parameters on how OpenGL will deal with mipmaps. And this section of your code appears to be fine if this is the behavior you want for mipmapping. However the caller to this function for using a texture has no method to set the quality level of a mipmap. What I normally do here is have an enumeration that deals with the different quality levels of mimpmaps here is snippet of how I handle working with mipmaps:
Note: the enum value is not defined or declared within this function it would be a parameter that would be passed to it.
// This enum I usually have it declared or defined in a CommonStructs header.
enum FilterQuality {
FILTER_NONE = 1,
FILTER_GOOD,
FILTER_BETTER,
FILTER_BEST
}; // FilterQuality
// Now as within side of my function for generating texture files I have something like this for mipmaps:
if ( texture.generateMipMap ) {
switch ( texture.filterQuality ) {
case TextureInfo::FILTER_NONE : {
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST_MIPMAP_NEAREST );
break;
}
case TextureInfo::FILTER_GOOD: {
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST_MIPMAP_LINEAR );
break;
}
case TextureInfo::FILTER_BEST: {
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR );
break;
}
default: {
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_NEAREST );
}
} // Switch
if ( openglVersion.x < 3 ) {
// In OpenGL v3 GL_GENERATE_MIPMAP Is Deprecated, And In 3.1+ It Was Removed
// So For Those Versions We Use glGenerateMipmap below
static const unsigned int GL_GENERATE_MIPMAP = 0x8191;
glTexParameteri( GL_TEXTURE_2D, GL_GENERATE_MIPMAP, GL_TRUE );
}
} else { // No MipMaps
switch( texture.filterQuality ) {
case TextureInfo::FILTER_NONE:
case TextureInfo::FILTER_GOOD: {
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST );
break;
}
default: {
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR );
}
}
}
The only thing that I see that could be irrelevant is your call to glTexEnvi(GL_TEXTURE_ENV,GL_TEXTURE_ENV_MODE,GL_REPLACE); for I have never used it, now this may be some type of behavior that you want and for that I do not know.
After you set up your mimpmapping information this is where you want to set the parameters for clamping or wrap repeat which you are missing from your code to generate a texture. They look like this:
bool wrapRepeat; // This variable would not be here inside of the function,
// but would come from this function's definition so that the caller can
// set this parameter or flag to the behavior they want for each texture in use.
// What these two lines of code will do depending on the state of wrapRepeat
// is they will cause the last parameter to be either WRAP in S & T coordinates or
// to clamp to edge in both S & T coordinates.
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, (wrapRepeat ? GL_REPEAT : GL_CLAMP_TO_EDGE ) );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, (wrapRepeat ? GL_REPEAT : GL_CLAMP_TO_EDGE ) );
Finally you have your code that will load the texture into memory and as a reference this is what I have within my function to generate textures:
// Load Texture Into Video Memory
glPixelStorei( GL_UNPACK_ALIGNMENT, texture.hasAlphaChannel ? 4 : 1 );
glTexImage2D( GL_TEXTURE_2D,
0,
( texture.hasAlphaChannel ? GL_RGBA8 : GL_RGB8 ),
texture.uWidth,
texture.uHeight,
0,
( texture.hasAlphaChannel ? GL_RGBA : GL_RGB ),
GL_UNSIGNED_BYTE,
&texture.vPixelData[0] );
if ( texture.generateMipMap && openglVersion.x >= 3 ) {
glGenerateMipmap( GL_TEXTURE_2D );
}
This should help you to resolve your current problem with your textures being wrapped and not clamped.
Now as for my project, I have a derived filehandler class that reads in texture files specifically and it loads in either a TGA or a PNG file and the way that it is designed it can load in any other texture or image file as long as you add a function to that class to parse the file type. My actual image types used in rendering are separate from the file reader. And they are separate from this code here. This code belongs to a class that is an AssetStorage. This class of mine is responsible for storing all assets and managing their memory and that is it. This class does not render anything, but it does load objects into ram and video ram. It is my batch class and batch manager classes that handles all of the rendering of objects. Now my objects are all built off of using GLSL Shaders. But to show you this function in full can be used as a reference into how my framework is designed.
// ----------------------------------------------------------------------------
// add()
// Creates An OpenGL Texture And Returns It's ID Value
// This Can Only Be Called From The Main OpenGL Thread
TextureInfo AssetStorage::add( const Texture& texture, const std::string& strFilename ) {
if ( INVALID_UNSIGNED != getTextureInfo( strFilename ).uTextureId ) {
std::ostringstream strStream;
strStream << __FUNCTION__ << " can not store " << strFilename << " multiple times";
throw ExceptionHandler( strStream );
}
TextureInfo textureInfo;
textureInfo.hasTransparency = texture.hasAlphaChannel;
textureInfo.size = glm::uvec2( texture.uWidth, texture.uHeight );
glGetError(); // Clear Errors
glGenTextures( 1, &textureInfo.uTextureId );
GLenum err = glGetError();
if ( err != GL_NO_ERROR ) {
std::ostringstream strStream;
strStream << __FUNCTION__ << " failed glGenTextures with error code 0x" << std::hex << err;
throw ExceptionHandler( strStream );
}
glBindTexture( GL_TEXTURE_2D, textureInfo.uTextureId );
// Wrap Textures
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, ( texture.wrapRepeat ? GL_REPEAT : GL_CLAMP_TO_EDGE ) );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, ( texture.wrapRepeat ? GL_REPEAT : GL_CLAMP_TO_EDGE ) );
const glm::uvec2& openglVersion = s_pSettings->getOpenglVersion();
if ( texture.generateMipMap ) {
switch ( texture.filterQuality ) {
case TextureInfo::FILTER_NONE : {
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST_MIPMAP_NEAREST );
break;
}
case TextureInfo::FILTER_GOOD: {
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST_MIPMAP_LINEAR );
break;
}
case TextureInfo::FILTER_BEST: {
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR );
break;
}
default: {
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_NEAREST );
}
} // Switch
if ( openglVersion.x < 3 ) {
// In OpenGL v3 GL_GENERATE_MIPMAP Is Deprecated, And In 3.1+ It Was Removed
// So For Those Versions We Use glGenerateMipmap below
static const unsigned int GL_GENERATE_MIPMAP = 0x8191;
glTexParameteri( GL_TEXTURE_2D, GL_GENERATE_MIPMAP, GL_TRUE );
}
} else { // No MipMaps
switch( texture.filterQuality ) {
case TextureInfo::FILTER_NONE:
case TextureInfo::FILTER_GOOD: {
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST );
break;
}
default: {
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR );
}
}
}
// Load Texture Into Video Memory
glPixelStorei( GL_UNPACK_ALIGNMENT, texture.hasAlphaChannel ? 4 : 1 );
glTexImage2D( GL_TEXTURE_2D,
0,
( texture.hasAlphaChannel ? GL_RGBA8 : GL_RGB8 ),
texture.uWidth,
texture.uHeight,
0,
( texture.hasAlphaChannel ? GL_RGBA : GL_RGB ),
GL_UNSIGNED_BYTE,
&texture.vPixelData[0] );
if ( texture.generateMipMap && openglVersion.x >= 3 ) {
glGenerateMipmap( GL_TEXTURE_2D );
}
// Store TextureId
BlockThread blockThread( s_criticalSection );
m_textureInfos.insert( MapTextureInfos::value_type( strFilename, textureInfo ) );
if ( s_pSettings->isDebugLoggingEnabled( Settings::DEBUG_MEMORY ) ) {
Logger::log( std::string( "Created " ) + strFilename );
}
return textureInfo;
} // add
I'm having some weird memory issues in a C program I'm writing, and I think something related to my texture loading system is the cause.
The problem is that, depending on how many textures I make, different issues start coming up. Less textures tend to ever so slightly change other variables in the program. If I include all the textures I want to include, the program may spit out a host of different "* glibc detected *" type errors, and occasionally a Segmentation Fault.
The kicker is that occasionally, the program works perfectly. It's all the luck of the draw.
My code is pretty heavy at this point, so I'll just post what I believe to be the relevant parts of it.
d_newTexture(d_loadBMP("resources/sprites/default.bmp"), &textures);
Is the function I call to load a texture into OpenGL. "textures" is a variable of type texMan_t, which is a struct I made.
typedef struct {
GLuint texID[500];
int texInc;
} texMan_t;
The idea is that texMan_t encompasses all your texture IDs for easier use. texInc just keeps track of what the next available member of texID is.
This is d_newTexture:
void d_newTexture(imgInfo_t info, texMan_t* tex) {
glEnable(GL_TEXTURE_2D);
glGenTextures(1, &tex->texID[tex->texInc]);
glBindTexture(GL_TEXTURE_2D, tex->texID[tex->texInc]);
glTexEnvf( GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_DECAL );
glTexParameterf( GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST );
glTexParameterf( GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST );
glTexParameterf( GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT );
glTexParameterf( GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT );
gluBuild2DMipmaps( GL_TEXTURE_2D, 4, info.width, info.height, GL_RGBA, GL_UNSIGNED_BYTE, info.data );
tex->texInc++;
glDisable(GL_TEXTURE_2D);
}
I also use a function by the name of d_newTextures, which is identical to d_newTexture, except for that it splits up a simple sprite sheet into multiple textures.
void d_newTextures(imgInfo_t info, int count, texMan_t* tex) {
glEnable(GL_TEXTURE_2D);
glGenTextures(count, &tex->texID[tex->texInc]);
for(int i=0; i<count; i++) {
glBindTexture(GL_TEXTURE_2D, tex->texID[tex->texInc+i]);
glTexEnvf( GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_DECAL );
glTexParameterf( GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST );
glTexParameterf( GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST );
glTexParameterf( GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT );
glTexParameterf( GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT );
gluBuild2DMipmaps( GL_TEXTURE_2D, 4, info.width, info.height/count,
GL_RGBA, GL_UNSIGNED_BYTE, &info.data[info.width*(info.height/count)*4*i] );
}
tex->texInc+=count;
glDisable(GL_TEXTURE_2D);
}
What could be the cause of the issues I'm seeing?
EDIT: Recently, I've also been getting the error "* glibc detected out/PokeEngine: free(): invalid pointer: 0x01010101 **" after closing the program as well, assuming it's able to properly begin. The backtrace looks like this:
/lib/i386-linux-gnu/libc.so.6(+0x75ee2)[0xceeee2]
/usr/lib/nvidia-173/libGLcore.so.1(+0x277c7c)[0x109ac7c]
EDIT 2:
Here's the code for d_loadBMP as well. Hope it helps!
imgInfo_t d_loadBMP(char* filename) {
imgInfo_t out;
FILE * bmpFile;
bmpFile = fopen(filename, "r");
if(bmpFile == NULL) {
printf("ERROR: Texture file not found!\n");
}
bmp_sign bmpSig;
bmp_fHeader bmpFileHeader;
bmp_iHeader bmpInfoHeader;
fread(&bmpSig, sizeof(bmp_sign), 1, bmpFile);
fread(&bmpFileHeader, sizeof(bmp_fHeader), 1, bmpFile);
fread(&bmpInfoHeader, sizeof(bmp_iHeader), 1, bmpFile);
out.width = bmpInfoHeader.width;
out.height = bmpInfoHeader.height;
out.size = bmpInfoHeader.imageSize;
out.data = (char*)malloc(sizeof(char)*out.width*out.height*4);
// Loaded backwards because that's how BMPs are stored
for(int i=out.width*out.height*4; i>0; i-=4) {
fread(&out.data[i+2], sizeof(char), 1, bmpFile);
fread(&out.data[i+1], sizeof(char), 1, bmpFile);
fread(&out.data[i], sizeof(char), 1, bmpFile);
out.data[i+3] = 255;
}
return out;
}
The way you're loading BMP files is wrong. You're reading right into structs, which is very unreliable, because the memory layout your compiler chooses for a struct may vastly differ from the data layout in a file. Also your code contains zero error checks. If I had to make an educated guess I'd say this is where your problems are.
BTW. glEnable(GL_TEXTURE_…) enables a texture target as data source for rendering. It's completely unnecessary for just generating and uploading textures. You can omit the bracing glEnable(GL_TEXTURE_2D); … glDisable(GL_TEXTURE_2D) blocks in your loading code. Also I'd not use gluBuildMipmaps2D – it doesn't support arbitrary texture dimensions, and you're disabling mipmapping anyway – and just upload directly with glTexImage2D.
Also I don't get your need for a texture manager. Or at least not why your texture manager looks like this. A much better approach would be using a hash map file path → texture ID and a reference count.
#include <stdio.h>
#include <GL/gl.h>
#include <GL/glut.h>
#define KEY_ESCAPE 27
void display();
void keyboard(unsigned char,int,int);
GLuint LoadTextureRAW( const char * filename, int wrap );
int main(int argc, char **argv) {
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_RGB|GLUT_DOUBLE|GLUT_DEPTH );
glutInitWindowSize(600,400);
glutCreateWindow("Opengl Test");
glutDisplayFunc(display);
glutKeyboardFunc(keyboard);
glutMainLoop();
return 0;
}
void display() {
GLuint texture=LoadTextureRAW("ball.png",1);
glEnable(GL_TEXTURE_2D);
glBindTexture(GL_TEXTURE_2D,texture);
glBegin( GL_QUADS );
glTexCoord2d(0.0,0.0); glVertex2d(0.0,0.0);
glTexCoord2d(1.0,0.0); glVertex2d(1.0,0.0);
glTexCoord2d(1.0,1.0); glVertex2d(1.0,1.0);
glTexCoord2d(0.0,1.0); glVertex2d(0.0,1.0);
glEnd();
glutSwapBuffers();
}
// load a 256x256 RGB .RAW file as a texture
GLuint LoadTextureRAW( const char * filename, int wrap )
{
GLuint texture;
int width, height;
// BYTE * data;
int *data;
FILE * file;
// open texture data
file = fopen( filename, "rb" );
if ( file == NULL ) return 0;
// allocate buffer
width = 256;
height = 256;
data = (int*)malloc( width * height * 3 );
// read texture data
fread( data, width * height * 3, 1, file );
fclose( file );
// allocate a texture name
glGenTextures( 1, &texture );
// select our current texture
glBindTexture( GL_TEXTURE_2D, texture );
// select modulate to mix texture with color for shading
glTexEnvf( GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_MODULATE );
// when texture area is small, bilinear filter the closest mipmap
glTexParameterf( GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER,
GL_LINEAR_MIPMAP_NEAREST );
// when texture area is large, bilinear filter the first mipmap
glTexParameterf( GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR );
// if wrap is true, the texture wraps over at the edges (repeat)
// ... false, the texture ends at the edges (clamp)
glTexParameterf( GL_TEXTURE_2D, GL_TEXTURE_WRAP_S,
wrap ? GL_REPEAT : GL_CLAMP );
glTexParameterf( GL_TEXTURE_2D, GL_TEXTURE_WRAP_T,
wrap ? GL_REPEAT : GL_CLAMP );
// build our texture mipmaps
gluBuild2DMipmaps( GL_TEXTURE_2D, 3, width, height,
GL_RGB, GL_UNSIGNED_BYTE, data );
// free buffer
free( data );
return texture;
}
void keyboard(unsigned char key, int mousePositionX, int mousePositionY) {
switch ( key ) {
case KEY_ESCAPE:
exit ( 0 );
break;
default:
break;
}
}
I followed this, http://www.nullterminator.net/gltexture.html
What should i do?
"LoadTextureRAW()" is not for PNG files. You will need a third party library like libpng to decode a png file, because they are compressed.
If you don't want to implement libpng yourself, which is kind of advanced, then you can probably find a wrapper library on google somewhere.
You can find a minimal implementation of libpng here:
http://zarb.org/~gc/html/libpng.html
I have seen many code samples for loading textures for OpenGL, many of them a bit complicated to understand or requiring new functions with a lot of code.
I was thinking that as OpenCV allows us to load any image format it can be a simple an efficient way to load textures to OpenGL, but I am missing something. I have this piece of code in c++:
cv::Mat texture_cv;
GLuint texture[1];
int Status=FALSE;
if( texture_cv = imread("stones.jpg")) {
Status=TRUE; // Set The Status To TRUE
glGenTextures(1, &texture[0]); // Create The Texture
glBindTexture(GL_TEXTURE_2D, texture[0]);
glTexParameterf(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_LINEAR);
glTexParameterf(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER,GL_LINEAR);
glTexParameterf( GL_TEXTURE_2D, GL_TEXTURE_WRAP_S , GL_REPEAT );
glTexParameterf( GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT );
glTexImage2D(GL_TEXTURE_2D, 0, 3, texture_cv.cols, texture_cv.rows, 0, GL_RGB, GL_UNSIGNED_BYTE, texture_cv.data);
}
And it is not compiling because of this error:
error C2451: conditional expression of type 'cv::Mat' is illegal
Any suggestions? How should I do the conversion from cv::Mat to openGL texture?
Your error appears there, right?
if( texture_cv = imread("stones.jpg")) {
because in if(expr) expr must be bool or can be casted to bool. But there is no way to convert cv::Mat into boolean implicitly. But you can check the result of imread like that:
texture_cv = imread("stones.jpg");
if (texture_cv.empty()) {
// handle was an error
} else {
// do right job
}
See: cv::Mat::empty(), cv::imread
Hope that helped you.
The assignment operator
texture_cv = imread("stones.jpg")
returns a cv::Mat that can't be used in a conditional expression. You should write something like
if((texture_cv = imread("stones.jpg")) != /* insert condition here */ ) {
//...
}
or
texture = imread("stone.jpg");
if(!texture.empty()) {
//...
}
from this doc, I suggest you to change your test:
texture_cv = imread("stones.jpg");
if (texture_cv.data != NULL) {
...
Another short question...
I think you may need to use
glTexImage2D(GL_TEXTURE_2D, 0, 3, texture_cv.cols, texture_cv.rows, 0, GL_RGB, GL_UNSIGNED_BYTE, texture_cv.ptr());
instead of
glTexImage2D(GL_TEXTURE_2D, 0, 3, texture_cv.cols, texture_cv.rows, 0, GL_RGB, GL_UNSIGNED_BYTE, texture_cv.data);