Fill a polygon with a image opengl - c++

http://coliru.stacked-crooked.com/a/400b648e7442eeb3
You can read a piece of code: the idea is to fullfill a polygon quad with a texture ( the image is repeated inside the quad).
I want that the image would repeat only once: in other words to put the image as background of the polygon. Im in a 800x600 window, and i would like to put a billiards table for example 600x400 ( in a quad defined by the glvertex3i). It could be glvertex2i and implemented in 2D,as is a 2D game.
Any help would be apreciated
Also i have a better code to support any jpg file instead of loadtexture and readjpeg( i think):
GLuint MyLoadTexture(std::string const filename)
{
GLuint texname = 0;
/* this is actually tied to the OpenGL context, so this should
* actually be a map GLcontext -> std::string -> texturename */
static std::map<std::string, GLuint> loaded_textures;
if( loaded_textures.find(filename) != loaded_textures.end() ) {
texname = loaded_textures[filename];
glBindTexture(GL_TEXTURE_2D, texname);
return texname;
}
int width,height;
std::vector<uint8_t> image;
if( ReadJPEG(filename, &image, &width, &height) ) {
std::cerr
<< "error reading JPEG"
<< std::endl;
return 0;
}
glGenTextures(1, &texname);
if( !texname ) {
std::cerr
<< "error generating OpenGL texture name"
<< std::endl;
return 0;
}
glBindTexture(GL_TEXTURE_2D, texname);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER,GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_LINEAR);
glTexEnvi(GL_TEXTURE_ENV,GL_TEXTURE_ENV_MODE,GL_REPLACE);
/* glTexImage2D(
GL_TEXTURE_2D, 0, GL_RGB,
width, height, 0,
GL_RGB,
GL_UNSIGNED_BYTE, buffer );
*/
glTexImage2D(
GL_TEXTURE_2D, 0, GL_RGB,
width, height, 0,
GL_RGB,
GL_UNSIGNED_BYTE, &image[0]);
loaded_textures[filename] = texname;
return texname;
}
int ReadJPEG(
std::string const filename,
std::vector<uint8_t> *image,
int *width, int *height )
{
if( !image ) {
return -1;
}
FILE * const infile = fopen(filename.c_str(), "rb");
if( !infile ) {
std::cerr
<< "error opening file "
<< filename
<< " : "
<< strerror(errno)
<< std::endl;
return -2;
}
struct jpeg_decompress_struct cinfo;
struct jpeg_error_mgr jerr;
cinfo.err = jpeg_std_error(&jerr);
jpeg_create_decompress(&cinfo);
jpeg_stdio_src(&cinfo, infile);
jpeg_read_header(&cinfo, TRUE);
jpeg_calc_output_dimensions(&cinfo);
jpeg_start_decompress(&cinfo);
if( width ) { *width = cinfo.output_width; }
if( height ) { *height = cinfo.output_height; }
size_t const stride = cinfo.output_width * cinfo.output_components;
image->resize(cinfo.output_height * stride);
for(size_t i = 0; i < cinfo.output_height;) {
uint8_t * const row = &(*image)[stride * i];
i += jpeg_read_scanlines(&cinfo, (unsigned char**)&row, 1);
}
jpeg_finish_decompress(&cinfo);
fclose(infile);
return 0;
}

I noticed you are using a deprecated version of OpenGL. Now as for your situation, It is not obvious to me if you are concerned with alpha values within your textures or not. Also how many bytes does each color channel have? This is important to know.
In this section of your code there are a couple things that I see that should be improved:
glGenTextures(1, &texname);
if( !texname ) {
std::cerr
<< "error generating OpenGL texture name"
<< std::endl;
return 0;
}
glBindTexture(GL_TEXTURE_2D, texname);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER,GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_LINEAR);
glTexEnvi(GL_TEXTURE_ENV,GL_TEXTURE_ENV_MODE,GL_REPLACE);
/* glTexImage2D(
GL_TEXTURE_2D, 0, GL_RGB,
width, height, 0,
GL_RGB,
GL_UNSIGNED_BYTE, buffer );
*/
glTexImage2D(
GL_TEXTURE_2D, 0, GL_RGB,
width, height, 0,
GL_RGB,
GL_UNSIGNED_BYTE, &image[0]);
loaded_textures[filename] = texname;
Before you call glGenTextures(1, &texname); you should call this first:
glGetError(); // This will clear errors
After your call to glGenTextures(1, &texname);instead of checking texname for errors this method is preferred:
GLenum err = glGetError();
if ( err != GL_NO_ERROR ) {
// Generate Error Here, either log message to file, console or throw an error
}
Next is your glBindTexture( GL_TEXTURE_2D, texname ); which is good. Now as for OpenGL's functions that generate and bind the textures the second parameter is an unsigned ID that OpenGL generates automatically that associates it with the filename that it is given. It is easier for a machine to read an unsigned value to check for multiple resources than it is to check and verify a string. It is also a performance increase when an application is working 100s, 1000s or even 1,000,000s of resource files.
After this you are now setting up the parameters on how OpenGL will deal with mipmaps. And this section of your code appears to be fine if this is the behavior you want for mipmapping. However the caller to this function for using a texture has no method to set the quality level of a mipmap. What I normally do here is have an enumeration that deals with the different quality levels of mimpmaps here is snippet of how I handle working with mipmaps:
Note: the enum value is not defined or declared within this function it would be a parameter that would be passed to it.
// This enum I usually have it declared or defined in a CommonStructs header.
enum FilterQuality {
FILTER_NONE = 1,
FILTER_GOOD,
FILTER_BETTER,
FILTER_BEST
}; // FilterQuality
// Now as within side of my function for generating texture files I have something like this for mipmaps:
if ( texture.generateMipMap ) {
switch ( texture.filterQuality ) {
case TextureInfo::FILTER_NONE : {
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST_MIPMAP_NEAREST );
break;
}
case TextureInfo::FILTER_GOOD: {
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST_MIPMAP_LINEAR );
break;
}
case TextureInfo::FILTER_BEST: {
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR );
break;
}
default: {
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_NEAREST );
}
} // Switch
if ( openglVersion.x < 3 ) {
// In OpenGL v3 GL_GENERATE_MIPMAP Is Deprecated, And In 3.1+ It Was Removed
// So For Those Versions We Use glGenerateMipmap below
static const unsigned int GL_GENERATE_MIPMAP = 0x8191;
glTexParameteri( GL_TEXTURE_2D, GL_GENERATE_MIPMAP, GL_TRUE );
}
} else { // No MipMaps
switch( texture.filterQuality ) {
case TextureInfo::FILTER_NONE:
case TextureInfo::FILTER_GOOD: {
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST );
break;
}
default: {
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR );
}
}
}
The only thing that I see that could be irrelevant is your call to glTexEnvi(GL_TEXTURE_ENV,GL_TEXTURE_ENV_MODE,GL_REPLACE); for I have never used it, now this may be some type of behavior that you want and for that I do not know.
After you set up your mimpmapping information this is where you want to set the parameters for clamping or wrap repeat which you are missing from your code to generate a texture. They look like this:
bool wrapRepeat; // This variable would not be here inside of the function,
// but would come from this function's definition so that the caller can
// set this parameter or flag to the behavior they want for each texture in use.
// What these two lines of code will do depending on the state of wrapRepeat
// is they will cause the last parameter to be either WRAP in S & T coordinates or
// to clamp to edge in both S & T coordinates.
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, (wrapRepeat ? GL_REPEAT : GL_CLAMP_TO_EDGE ) );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, (wrapRepeat ? GL_REPEAT : GL_CLAMP_TO_EDGE ) );
Finally you have your code that will load the texture into memory and as a reference this is what I have within my function to generate textures:
// Load Texture Into Video Memory
glPixelStorei( GL_UNPACK_ALIGNMENT, texture.hasAlphaChannel ? 4 : 1 );
glTexImage2D( GL_TEXTURE_2D,
0,
( texture.hasAlphaChannel ? GL_RGBA8 : GL_RGB8 ),
texture.uWidth,
texture.uHeight,
0,
( texture.hasAlphaChannel ? GL_RGBA : GL_RGB ),
GL_UNSIGNED_BYTE,
&texture.vPixelData[0] );
if ( texture.generateMipMap && openglVersion.x >= 3 ) {
glGenerateMipmap( GL_TEXTURE_2D );
}
This should help you to resolve your current problem with your textures being wrapped and not clamped.
Now as for my project, I have a derived filehandler class that reads in texture files specifically and it loads in either a TGA or a PNG file and the way that it is designed it can load in any other texture or image file as long as you add a function to that class to parse the file type. My actual image types used in rendering are separate from the file reader. And they are separate from this code here. This code belongs to a class that is an AssetStorage. This class of mine is responsible for storing all assets and managing their memory and that is it. This class does not render anything, but it does load objects into ram and video ram. It is my batch class and batch manager classes that handles all of the rendering of objects. Now my objects are all built off of using GLSL Shaders. But to show you this function in full can be used as a reference into how my framework is designed.
// ----------------------------------------------------------------------------
// add()
// Creates An OpenGL Texture And Returns It's ID Value
// This Can Only Be Called From The Main OpenGL Thread
TextureInfo AssetStorage::add( const Texture& texture, const std::string& strFilename ) {
if ( INVALID_UNSIGNED != getTextureInfo( strFilename ).uTextureId ) {
std::ostringstream strStream;
strStream << __FUNCTION__ << " can not store " << strFilename << " multiple times";
throw ExceptionHandler( strStream );
}
TextureInfo textureInfo;
textureInfo.hasTransparency = texture.hasAlphaChannel;
textureInfo.size = glm::uvec2( texture.uWidth, texture.uHeight );
glGetError(); // Clear Errors
glGenTextures( 1, &textureInfo.uTextureId );
GLenum err = glGetError();
if ( err != GL_NO_ERROR ) {
std::ostringstream strStream;
strStream << __FUNCTION__ << " failed glGenTextures with error code 0x" << std::hex << err;
throw ExceptionHandler( strStream );
}
glBindTexture( GL_TEXTURE_2D, textureInfo.uTextureId );
// Wrap Textures
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, ( texture.wrapRepeat ? GL_REPEAT : GL_CLAMP_TO_EDGE ) );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, ( texture.wrapRepeat ? GL_REPEAT : GL_CLAMP_TO_EDGE ) );
const glm::uvec2& openglVersion = s_pSettings->getOpenglVersion();
if ( texture.generateMipMap ) {
switch ( texture.filterQuality ) {
case TextureInfo::FILTER_NONE : {
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST_MIPMAP_NEAREST );
break;
}
case TextureInfo::FILTER_GOOD: {
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST_MIPMAP_LINEAR );
break;
}
case TextureInfo::FILTER_BEST: {
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR );
break;
}
default: {
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_NEAREST );
}
} // Switch
if ( openglVersion.x < 3 ) {
// In OpenGL v3 GL_GENERATE_MIPMAP Is Deprecated, And In 3.1+ It Was Removed
// So For Those Versions We Use glGenerateMipmap below
static const unsigned int GL_GENERATE_MIPMAP = 0x8191;
glTexParameteri( GL_TEXTURE_2D, GL_GENERATE_MIPMAP, GL_TRUE );
}
} else { // No MipMaps
switch( texture.filterQuality ) {
case TextureInfo::FILTER_NONE:
case TextureInfo::FILTER_GOOD: {
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST );
break;
}
default: {
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR );
}
}
}
// Load Texture Into Video Memory
glPixelStorei( GL_UNPACK_ALIGNMENT, texture.hasAlphaChannel ? 4 : 1 );
glTexImage2D( GL_TEXTURE_2D,
0,
( texture.hasAlphaChannel ? GL_RGBA8 : GL_RGB8 ),
texture.uWidth,
texture.uHeight,
0,
( texture.hasAlphaChannel ? GL_RGBA : GL_RGB ),
GL_UNSIGNED_BYTE,
&texture.vPixelData[0] );
if ( texture.generateMipMap && openglVersion.x >= 3 ) {
glGenerateMipmap( GL_TEXTURE_2D );
}
// Store TextureId
BlockThread blockThread( s_criticalSection );
m_textureInfos.insert( MapTextureInfos::value_type( strFilename, textureInfo ) );
if ( s_pSettings->isDebugLoggingEnabled( Settings::DEBUG_MEMORY ) ) {
Logger::log( std::string( "Created " ) + strFilename );
}
return textureInfo;
} // add

Related

Texture is rendered blank but glGetTexImage works perfectly

I've written some code that adds a UI overlay to an existing OpenGL application.
Unfortunately, I am not proficient in OpenGL but I do know that it somehow always manages to fail on some device.
Some backstory:
The general pipeline is:
Application renders -> UI is rendered to FBO -> FBO is blitted to obtain an RGBA texture -> Texture is drawn on top of the UI scene.
So far so good, then I encountered an issue on Intel cards on Ubuntu 16.04 where the texture broke between context switches (UI rendering is done using a QOpenGLContext, application is raw OpenGL context managed by OGRE but the QOpenGLContext is set to share resources). I solved that issue by checking if sharing works (create a texture in one context and check if the content is correct in the other) and if not, I load the content while still in context B and upload it again in context A.
However, on Ubuntu 18.04 on the same machine for some reason, this will actually work. The texture is still correct in the other context when retrieving its content with glGetTexImage.
Now here's the problem:
It's not being rendered. I get just the application scene without anything on top but if I manually enable the workaround of downloading it and reuploading to a texture that was created in the application context, it works.
How can it be that the texture's content is alright but it won't show unless I grab it using glGetTexImage and re-upload it to a texture created in the other context using glTexImage2D?
There's gotta be some state that is invalid and set correct when using glTexImage2D.
Here's the code after the UI is rendered:
if (!checked_can_share_texture_)
{
qopengl_wrapper_->drawInvisibleTestOverlay();
}
qopengl_wrapper_->finishRender();
if ( !can_share_texture_ || !checked_can_share_texture_ )
{
glBindTexture( GL_TEXTURE_2D, qopengl_wrapper_->texture());
glGetTexImage( GL_TEXTURE_2D, 0, GL_RGBA, GL_UNSIGNED_BYTE, pixel_data_ );
}
glBindTexture( GL_TEXTURE_2D, 0 );
qopengl_wrapper_->doneCurrent(); // Makes the applications context current again
glDisable( GL_DEPTH_TEST );
glDisable( GL_CULL_FACE );
glDisable( GL_LIGHTING );
glEnable( GL_BLEND );
glBlendFunc( GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA );
glUseProgram(shader_program_);
glUniform1i(glGetUniformLocation(shader_program_, "tex"), 0);
glBindBuffer(GL_ARRAY_BUFFER, vertex_buffer_object_);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 5 * sizeof(float), nullptr);
glEnableVertexAttribArray(0);
glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, 5 * sizeof(float), (void*)(3 * sizeof(float)));
glEnableVertexAttribArray(1);
glActiveTexture( GL_TEXTURE0 );
glEnable( GL_TEXTURE_2D );
if ( can_share_texture_ )
{
glBindTexture( GL_TEXTURE_2D, qopengl_wrapper_->texture());
if ( !checked_can_share_texture_)
{
const int count = qopengl_wrapper_->size().width() * qopengl_wrapper_->size().height() * 4;
const int thresh = std::ceil(count / 100.f);
unsigned char content[count];
glGetTexImage( GL_TEXTURE_2D, 0, GL_RGBA, GL_UNSIGNED_BYTE, content);
int wrong = 0;
// can_share_texture_ = false; // Bypassing the actual check will make it work
for (int i = 0; i < count; ++i) {
if (content[i] == pixel_data_[i]) continue;
if (++wrong < thresh) continue;
can_share_texture_ = false;
LOG(
"OverlayManager: Looks like texture sharing isn't working on your system. Falling back to texture copying." );
// If we can't share textures, we have to generate one
glActiveTexture( GL_TEXTURE0 );
glGenTextures( 1, &texture_ );
glBindTexture( GL_TEXTURE_2D, texture_ );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_BORDER );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_BORDER );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST );
break;
}
if (can_share_texture_)
{
delete pixel_data_;
pixel_data_ = nullptr;
LOG("Texture sharing seems supported. Count: %d", count);
}
checked_can_share_texture_ = true;
}
}
else
{
glBindTexture( GL_TEXTURE_2D, texture_ );
glTexImage2D( GL_TEXTURE_2D, 0, GL_RGBA, qopengl_wrapper_->size().width(), qopengl_wrapper_->size().height(), 0,
GL_RGBA, GL_UNSIGNED_BYTE, pixel_data_ );
}
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
glDisableVertexAttribArray(0);
glDisableVertexAttribArray(1);
glUseProgram(0);
Vertex Shader
#version 130
in vec3 pos;
in vec2 coord;
out vec2 texCoord;
void main()
{
gl_Position = vec4(pos, 1.0); //Just output the incoming vertex
texCoord = coord;
}
Fragment Shader
#version 130
uniform sampler2D tex;
in vec2 texCoord;
void main()
{
gl_FragColor = texture(tex, texCoord);
}
TL;DR Texture isn't rendered (completely transparent) but if I copy it to memory using glGetTexImage it looks fine and if I copy that back to a texture created on the application context, it will render fine.
Graphics card is an Intel UHD 620 with Mesa version 18.2.8.
Edit: In case it wasn't clear, I'm copying the texture in the context of the application not the texture's original context, the same context where the working texture is created, so, if sharing didn't work I shouldn't get the correct content at that point.

Switching between glTexImage3D and glTexStorage3D

I use a texture array to store texture atlases. For hardware which support OpenGL 4.2 I use the glTexStorage3D approach however I would like to use texture arrays pre 4.2 too.
I checked several other threads with the same problem like this or this. I tried to follow the solutions provided there however the texture array seems to be empty, no texture is visible during rendering.
My glTexStorage3D solution which works without any problem:
glTexStorage3D(GL_TEXTURE_2D_ARRAY,
1,
GL_R8,
2048, 2048,
100);
And the glTexImage3D which should be equivalent, however produces no display:
glTexImage3D(GL_TEXTURE_2D_ARRAY,
0,
GL_R8,
2048, 2048, 100,
0,
GL_RED,
GL_UNSIGNED_BYTE,
0);
The texture data is uploaded to the specified index with the following snippet (atlas width and height are 2048 and depth is 1):
glBindTexture(GL_TEXTURE_2D_ARRAY, m_arrayTexture);
glTexSubImage3D(GL_TEXTURE_2D_ARRAY,
0,
0, 0, m_nextTextureLevel,
atlas->width, atlas->height, atlas->depth,
GL_RED,
GL_UNSIGNED_BYTE,
atlas->data);
What am I missing here? Any help would be highly appreciated.
Edit:
Uploading the texture data to the array right away is not an option as new textures can be added to the array during execution.
Edit v2, solution
As usually the problem was something trivial which I overlooked. I dived into Nazar554's solution and tried to compare it to my code. The problem was that I accidentally set the texture parameters using the wrong constant, so the glTexParameteri calls were made with GL_TEXTURE_2D instead of GL_TEXTURE_2D_ARRAY. After changing these values everything worked like a charm.
You can take a look at my Texture.cpp I used in my project.
However I did not use glTexSubImage() in fallback case. Instead I uploaded the texture data immediately (you are passing a 0 to preallocate the buffer)
Functions that might be interesting to you: Texture::loadTexStorageInternal(const std::string& fileName) and
bool Texture::loadTexInternal(const std::string& fileName)
Here is one of them, it handles fallback when glTexStorage3D is unavailable. It is quite long because it tries to handle compressed formats/mipmaps.
bool Texture::loadTexInternal(const std::string& fileName)
{
gli::texture Texture = gli::load(fileName);
if(Texture.empty())
return 0;
const gli::gl GL(gli::gl::PROFILE_GL33);
const gli::gl::format Format = GL.translate(Texture.format(), Texture.swizzles());
GLenum Target = static_cast<GLenum>(GL.translate(Texture.target()));
Binder texBinder(*this, Target);
glTexParameteri(Target, GL_TEXTURE_BASE_LEVEL, 0);
glTexParameteri(Target, GL_TEXTURE_MAX_LEVEL, static_cast<GLint>(Texture.levels() - 1));
glTexParameteri(Target, GL_TEXTURE_SWIZZLE_R, Format.Swizzles[0]);
glTexParameteri(Target, GL_TEXTURE_SWIZZLE_G, Format.Swizzles[1]);
glTexParameteri(Target, GL_TEXTURE_SWIZZLE_B, Format.Swizzles[2]);
glTexParameteri(Target, GL_TEXTURE_SWIZZLE_A, Format.Swizzles[3]);
if(Texture.levels() >= 1)
glTexParameteri(Target, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
else
glTexParameteri(Target, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(Target, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(Target, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(Target, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(Target, GL_TEXTURE_WRAP_R, GL_CLAMP_TO_EDGE);
//glm::tvec3<GLsizei> const Extent(Texture.extent());
for(std::size_t Layer = 0; Layer < Texture.layers(); ++Layer)
for(std::size_t Level = 0; Level < Texture.levels(); ++Level)
for(std::size_t Face = 0; Face < Texture.faces(); ++Face)
{
GLsizei const LayerGL = static_cast<GLsizei>(Layer);
glm::tvec3<GLsizei> loopExtent(Texture.extent(Level));
Target = gli::is_target_cube(Texture.target())
? static_cast<GLenum>(static_cast<GLint>(GL_TEXTURE_CUBE_MAP_POSITIVE_X) + static_cast<GLint>(Face))
: Target;
switch(Texture.target())
{
case gli::TARGET_1D:
if(gli::is_compressed(Texture.format()))
glCompressedTexImage1D(
Target,
static_cast<GLint>(Level),
static_cast<GLenum>(static_cast<GLenum>(Format.Internal)),
0, loopExtent.x,
static_cast<GLsizei>(Texture.size(Level)),
Texture.data(Layer, Face, Level));
else
glTexImage1D(
Target, static_cast<GLint>(Level),
static_cast<GLenum>(Format.Internal),
loopExtent.x,
0,
static_cast<GLenum>(Format.External), static_cast<GLenum>(Format.Type),
Texture.data(Layer, Face, Level));
break;
case gli::TARGET_1D_ARRAY:
case gli::TARGET_2D:
case gli::TARGET_CUBE:
if(gli::is_compressed(Texture.format()))
glCompressedTexImage2D(
Target, static_cast<GLint>(Level),
static_cast<GLenum>(Format.Internal),
loopExtent.x,
Texture.target() == gli::TARGET_1D_ARRAY ? LayerGL : loopExtent.y,
0,
static_cast<GLsizei>(Texture.size(Level)),
Texture.data(Layer, Face, Level));
else
glTexImage2D(
Target, static_cast<GLint>(Level),
static_cast<GLenum>(Format.Internal),
loopExtent.x,
Texture.target() == gli::TARGET_1D_ARRAY ? LayerGL : loopExtent.y,
0,
static_cast<GLenum>(Format.External), static_cast<GLenum>(Format.Type),
Texture.data(Layer, Face, Level));
break;
case gli::TARGET_2D_ARRAY:
case gli::TARGET_3D:
case gli::TARGET_CUBE_ARRAY:
if(gli::is_compressed(Texture.format()))
glCompressedTexImage3D(
Target, static_cast<GLint>(Level),
static_cast<GLenum>(Format.Internal),
loopExtent.x, loopExtent.y,
Texture.target() == gli::TARGET_3D ? loopExtent.z : LayerGL,
0,
static_cast<GLsizei>(Texture.size(Level)),
Texture.data(Layer, Face, Level));
else
glTexImage3D(
Target, static_cast<GLint>(Level),
static_cast<GLenum>(Format.Internal),
loopExtent.x, loopExtent.y,
Texture.target() == gli::TARGET_3D ? loopExtent.z : LayerGL,
0,
static_cast<GLenum>(Format.External), static_cast<GLenum>(Format.Type),
Texture.data(Layer, Face, Level));
break;
default:
return false;
}
}
return true;
}

glTexImage2D convert alpha to rgba openGL ES

I'm trying to convert working openGL code to openGL ES. After some digging, I've concluded the following function doesn't work in ES because converting between format and internalFormat isn't supported (i.e. the source and destination formats need to be the same). The easiest fix seems to be converting the alpha data to rgba where r=g=b=0 which is what openGL was doing before under the surface. My attached fix doesn't seem to work though, because
I don't think I am understanding how the buffer is formatted to make that conversion manually. Also maybe there is an openGL ES function I can call that will make this copy for me. Not sure if it matters but the file is a TGA file.
void foo( unsigned char *inBytes,
unsigned int inWidth,
unsigned int inHeight ) {
int error;
GLenum internalTexFormat = GL_RGBA;
GLenum texDataFormat = GL_ALPHA;
if( myAttemptedFix ) {
texDataFormat = GL_RGBA;
unsigned char rgbaBytes[inWidth * inHeight * 4];
for(int i=0; i < inWidth * inHeight; i++) {
rgbaBytes[4*i] = 0;
rgbaBytes[4*i + 1] = 0;
rgbaBytes[4*i + 2] = 0;
rgbaBytes[4*i + 3] = inBytes[i];
}
inBytes = &rgbaBytes[0];
}
glBindTexture( GL_TEXTURE_2D, mTextureID );
error = glGetError();
if( error != GL_NO_ERROR ) { // error
printf( "Error binding to texture id %d, error = %d\n",
(int)mTextureID,
error );
}
glPixelStorei( GL_UNPACK_ALIGNMENT, 1 );
if( mRepeat ) {
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT );
}
else {
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP );
}
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR );
glTexEnvf( GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_MODULATE );
glTexImage2D( GL_TEXTURE_2D, 0,
internalTexFormat, inWidth,
inHeight, 0,
texDataFormat, GL_UNSIGNED_BYTE, inBytes );
error = glGetError();
if( error != GL_NO_ERROR ) { // error
printf( "Error setting texture data for id %d, error = %d, \"%s\"\n",
(int)mTextureID, error, glGetString( error ) );
}
}
Edit: When I run my fix it outlines the sprite correctly but also puts a lot of junk at the bottom that kind of looks like braille:
This looks more like a C++ problem. I believe your corrupted data is caused by this (shortened) code structure:
if (myAttemptedFix) {
unsigned char rgbaBytes[inWidth * inHeight * 4];
inBytes = &rgbaBytes[0];
}
The scope of rgbaBytes is the body of the if-statement. So the memory reserved for the array becomes invalid after the closing brace, and its content becomes undefined beyond that point. But you make your inBytes variable point at this memory, and use it after rgbaBytes has gone out of scope.
Since inBytes then points at unreserved memory, it's very likely that the memory is occupied by other variables in the code between this point and the glTexImage2D() call. So the content gets trashed before inBytes is consumed by the glTexImage2D() call.
The easiest way to fix this is to move the rgbaBytes declaration outside the if-statement:
unsigned char rgbaBytes[inWidth * inHeight * 4];
if (myAttemptedFix) {
inBytes = &rgbaBytes[0];
}
You'll probably want to make the code structure a little nicer once you have this all figured out, but this should at least make it functional.

OpenGL Textures Causes Memory Issues

I'm having some weird memory issues in a C program I'm writing, and I think something related to my texture loading system is the cause.
The problem is that, depending on how many textures I make, different issues start coming up. Less textures tend to ever so slightly change other variables in the program. If I include all the textures I want to include, the program may spit out a host of different "* glibc detected *" type errors, and occasionally a Segmentation Fault.
The kicker is that occasionally, the program works perfectly. It's all the luck of the draw.
My code is pretty heavy at this point, so I'll just post what I believe to be the relevant parts of it.
d_newTexture(d_loadBMP("resources/sprites/default.bmp"), &textures);
Is the function I call to load a texture into OpenGL. "textures" is a variable of type texMan_t, which is a struct I made.
typedef struct {
GLuint texID[500];
int texInc;
} texMan_t;
The idea is that texMan_t encompasses all your texture IDs for easier use. texInc just keeps track of what the next available member of texID is.
This is d_newTexture:
void d_newTexture(imgInfo_t info, texMan_t* tex) {
glEnable(GL_TEXTURE_2D);
glGenTextures(1, &tex->texID[tex->texInc]);
glBindTexture(GL_TEXTURE_2D, tex->texID[tex->texInc]);
glTexEnvf( GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_DECAL );
glTexParameterf( GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST );
glTexParameterf( GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST );
glTexParameterf( GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT );
glTexParameterf( GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT );
gluBuild2DMipmaps( GL_TEXTURE_2D, 4, info.width, info.height, GL_RGBA, GL_UNSIGNED_BYTE, info.data );
tex->texInc++;
glDisable(GL_TEXTURE_2D);
}
I also use a function by the name of d_newTextures, which is identical to d_newTexture, except for that it splits up a simple sprite sheet into multiple textures.
void d_newTextures(imgInfo_t info, int count, texMan_t* tex) {
glEnable(GL_TEXTURE_2D);
glGenTextures(count, &tex->texID[tex->texInc]);
for(int i=0; i<count; i++) {
glBindTexture(GL_TEXTURE_2D, tex->texID[tex->texInc+i]);
glTexEnvf( GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_DECAL );
glTexParameterf( GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST );
glTexParameterf( GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST );
glTexParameterf( GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT );
glTexParameterf( GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT );
gluBuild2DMipmaps( GL_TEXTURE_2D, 4, info.width, info.height/count,
GL_RGBA, GL_UNSIGNED_BYTE, &info.data[info.width*(info.height/count)*4*i] );
}
tex->texInc+=count;
glDisable(GL_TEXTURE_2D);
}
What could be the cause of the issues I'm seeing?
EDIT: Recently, I've also been getting the error "* glibc detected out/PokeEngine: free(): invalid pointer: 0x01010101 **" after closing the program as well, assuming it's able to properly begin. The backtrace looks like this:
/lib/i386-linux-gnu/libc.so.6(+0x75ee2)[0xceeee2]
/usr/lib/nvidia-173/libGLcore.so.1(+0x277c7c)[0x109ac7c]
EDIT 2:
Here's the code for d_loadBMP as well. Hope it helps!
imgInfo_t d_loadBMP(char* filename) {
imgInfo_t out;
FILE * bmpFile;
bmpFile = fopen(filename, "r");
if(bmpFile == NULL) {
printf("ERROR: Texture file not found!\n");
}
bmp_sign bmpSig;
bmp_fHeader bmpFileHeader;
bmp_iHeader bmpInfoHeader;
fread(&bmpSig, sizeof(bmp_sign), 1, bmpFile);
fread(&bmpFileHeader, sizeof(bmp_fHeader), 1, bmpFile);
fread(&bmpInfoHeader, sizeof(bmp_iHeader), 1, bmpFile);
out.width = bmpInfoHeader.width;
out.height = bmpInfoHeader.height;
out.size = bmpInfoHeader.imageSize;
out.data = (char*)malloc(sizeof(char)*out.width*out.height*4);
// Loaded backwards because that's how BMPs are stored
for(int i=out.width*out.height*4; i>0; i-=4) {
fread(&out.data[i+2], sizeof(char), 1, bmpFile);
fread(&out.data[i+1], sizeof(char), 1, bmpFile);
fread(&out.data[i], sizeof(char), 1, bmpFile);
out.data[i+3] = 255;
}
return out;
}
The way you're loading BMP files is wrong. You're reading right into structs, which is very unreliable, because the memory layout your compiler chooses for a struct may vastly differ from the data layout in a file. Also your code contains zero error checks. If I had to make an educated guess I'd say this is where your problems are.
BTW. glEnable(GL_TEXTURE_…) enables a texture target as data source for rendering. It's completely unnecessary for just generating and uploading textures. You can omit the bracing glEnable(GL_TEXTURE_2D); … glDisable(GL_TEXTURE_2D) blocks in your loading code. Also I'd not use gluBuildMipmaps2D – it doesn't support arbitrary texture dimensions, and you're disabling mipmapping anyway – and just upload directly with glTexImage2D.
Also I don't get your need for a texture manager. Or at least not why your texture manager looks like this. A much better approach would be using a hash map file path → texture ID and a reference count.

Can't load .png image using opengl texture

#include <stdio.h>
#include <GL/gl.h>
#include <GL/glut.h>
#define KEY_ESCAPE 27
void display();
void keyboard(unsigned char,int,int);
GLuint LoadTextureRAW( const char * filename, int wrap );
int main(int argc, char **argv) {
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_RGB|GLUT_DOUBLE|GLUT_DEPTH );
glutInitWindowSize(600,400);
glutCreateWindow("Opengl Test");
glutDisplayFunc(display);
glutKeyboardFunc(keyboard);
glutMainLoop();
return 0;
}
void display() {
GLuint texture=LoadTextureRAW("ball.png",1);
glEnable(GL_TEXTURE_2D);
glBindTexture(GL_TEXTURE_2D,texture);
glBegin( GL_QUADS );
glTexCoord2d(0.0,0.0); glVertex2d(0.0,0.0);
glTexCoord2d(1.0,0.0); glVertex2d(1.0,0.0);
glTexCoord2d(1.0,1.0); glVertex2d(1.0,1.0);
glTexCoord2d(0.0,1.0); glVertex2d(0.0,1.0);
glEnd();
glutSwapBuffers();
}
// load a 256x256 RGB .RAW file as a texture
GLuint LoadTextureRAW( const char * filename, int wrap )
{
GLuint texture;
int width, height;
// BYTE * data;
int *data;
FILE * file;
// open texture data
file = fopen( filename, "rb" );
if ( file == NULL ) return 0;
// allocate buffer
width = 256;
height = 256;
data = (int*)malloc( width * height * 3 );
// read texture data
fread( data, width * height * 3, 1, file );
fclose( file );
// allocate a texture name
glGenTextures( 1, &texture );
// select our current texture
glBindTexture( GL_TEXTURE_2D, texture );
// select modulate to mix texture with color for shading
glTexEnvf( GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_MODULATE );
// when texture area is small, bilinear filter the closest mipmap
glTexParameterf( GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER,
GL_LINEAR_MIPMAP_NEAREST );
// when texture area is large, bilinear filter the first mipmap
glTexParameterf( GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR );
// if wrap is true, the texture wraps over at the edges (repeat)
// ... false, the texture ends at the edges (clamp)
glTexParameterf( GL_TEXTURE_2D, GL_TEXTURE_WRAP_S,
wrap ? GL_REPEAT : GL_CLAMP );
glTexParameterf( GL_TEXTURE_2D, GL_TEXTURE_WRAP_T,
wrap ? GL_REPEAT : GL_CLAMP );
// build our texture mipmaps
gluBuild2DMipmaps( GL_TEXTURE_2D, 3, width, height,
GL_RGB, GL_UNSIGNED_BYTE, data );
// free buffer
free( data );
return texture;
}
void keyboard(unsigned char key, int mousePositionX, int mousePositionY) {
switch ( key ) {
case KEY_ESCAPE:
exit ( 0 );
break;
default:
break;
}
}
I followed this, http://www.nullterminator.net/gltexture.html
What should i do?
"LoadTextureRAW()" is not for PNG files. You will need a third party library like libpng to decode a png file, because they are compressed.
If you don't want to implement libpng yourself, which is kind of advanced, then you can probably find a wrapper library on google somewhere.
You can find a minimal implementation of libpng here:
http://zarb.org/~gc/html/libpng.html