Qt OpenGL QGLFramebufferObject: Framebuffer incomplete attachment - c++

Can any one tell me why my code works on local machine but not on a remote machine I'm trying to push to?
local driver version: NVIDIA-SMI 460.91.03 Driver Version: 460.91.03
remote driver version: NVIDIA-SMI 435.21 Driver Version: 435.21
When trying to run on remote machine, I keep getting:
QGLFramebufferObject: Framebuffer incomplete attachment.
QGLFramebufferObject: Framebuffer incomplete attachment.
Framebuffer is not valid, may be out of memoryor context is not valid
Could not bind framebuffer
Image passed to GenImage is null!
Framebuffer code:
/*--------------------------------------------------------------------------*/
/*--------------------------------------------------------------------------*/
void GlowFramebuffer::Create( int width , int height )
{
QGLFramebufferObjectFormat format;
if( m_format == GLOW_R )
{
format.setInternalTextureFormat(GL_RED );
m_framebuffer =
QSharedPointer<QGLFramebufferObject>(
new QGLFramebufferObject(width, height, format) );
} else if ( m_attachment == GLOW_DEPTH_STENCIL ) {
format.setAttachment( QGLFramebufferObject::CombinedDepthStencil );
m_framebuffer =
QSharedPointer<QGLFramebufferObject>(
new QGLFramebufferObject(width, height, format) );
}
else // GLOW_RGBA
{
m_framebuffer =
QSharedPointer<QGLFramebufferObject>(
new QGLFramebufferObject(width, height) );
}
SetClearColor( m_clear_color );
}
/*--------------------------------------------------------------------------*/
/*--------------------------------------------------------------------------*/
void GlowFramebuffer::Create( const QSize& size )
{
Create( size.width() , size.height() );
if( !m_framebuffer->isValid() )
{
qCritical() << "Framebuffer is not valid, may be out of memory"
"or context is not valid";
}
}
/*--------------------------------------------------------------------------*/
/*--------------------------------------------------------------------------*/
int GlowFramebuffer::CopyMultiTexture( GlowFilter filter , GlowFormat format )
{
GLint width = m_framebuffer->width();
GLint height = m_framebuffer->height();
GLuint FramebufferName = 0;
glGenFramebuffers(1, &FramebufferName);
glBindFramebuffer(GL_FRAMEBUFFER, FramebufferName);
GLenum glfilter = (filter == GLOW_NEAREST) ? GL_NEAREST : GL_LINEAR;
GLenum glformat = (format == GLOW_R ) ? GL_R : GL_RGBA;
GLuint renderedTexture;
glGenTextures(1, &renderedTexture);
// "Bind" the newly created texture : all future texture functions will modify this texture
glBindTexture(GL_TEXTURE_2D, renderedTexture);
// Give an empty image to OpenGL ( the last "0" )
glTexImage2D(GL_TEXTURE_2D, 0,glformat, width, height, 0,glformat, GL_UNSIGNED_BYTE, 0);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, glfilter);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, glfilter);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, renderedTexture, 0);
// Set the list of draw buffers.
GLenum DrawBuffers[1] = {GL_COLOR_ATTACHMENT0};
glDrawBuffers(1, DrawBuffers); // "1" is the size of DrawBuffers
GLenum status = glCheckFramebufferStatus( GL_FRAMEBUFFER );
if( status != GL_FRAMEBUFFER_COMPLETE )
{
qCritical() << "Error with framebuffer!";
}
GLuint handle = m_framebuffer->handle();
GLClear();
glBindFramebuffer( GL_DRAW_FRAMEBUFFER , FramebufferName );
glBindFramebuffer( GL_READ_FRAMEBUFFER , handle );
glDrawBuffer( GL_BACK );
glBlitFramebuffer( 0 , 0 , width , height , 0 , 0 , width , height , GL_COLOR_BUFFER_BIT , GL_NEAREST );
glBindTexture( GL_TEXTURE_2D , 0 );
glBindFramebuffer( GL_FRAMEBUFFER , handle );
glDeleteFramebuffers( 1 , &FramebufferName );
return renderedTexture;
}
I know its likely because FBOs are specific to each machine and driver. In order to ensure its compatible, you need to check the system to make sure the Format which you created your frame buffer is valid.
I think its failing on the remote machine at this line:
GLenum status = glCheckFramebufferStatus( GL_FRAMEBUFFER );
if( status != GL_FRAMEBUFFER_COMPLETE )
{
qCritical() << "Error with framebuffer!";
}
I think glformat variable is the format to be adjusted in this function:
glTexImage2D(GL_TEXTURE_2D, 0,glformat, width, height, 0,glformat, GL_UNSIGNED_BYTE, 0);
How do I have it pick the appropriate format to make the FBO on the remote machine?
Code for the image from the framebuffer we're trying to attach:
bool Product::SetImgInfo(GeoRadarProd &prod, const JsonConfig &config)
{
DataInput input(FLAGS_input.c_str());
QString file_name_data = FLAGS_file_name.c_str();
int width = config.GetInt("width");
int height = config.GetInt("height");
if(!input.isValid())
{
LOG(INFO) << "Input is not valid";
return false;
}
QByteArray data = input.getByteArray();
VLOG(3) << "width from config: "<< width;
VLOG(3) << "height from config: "<< height;
VLOG(3) << "data : "<< data.size();
QImage image_data;
image_data.fromData(data, "PNG");
VLOG(3) << "what is file_name_data ???: " << file_name_data.toStdString();
VLOG(3) << "is image_data load???: " << image_data.load(file_name_data, "PNG");
VLOG(3) << "is image_data null???: " << image_data.isNull();
VLOG(3) << "image data width: "<< image_data.width();
VLOG(3) << "image data height: "<< image_data.height();
VLOG(3)<< "Original Format was tif";
VLOG(3)<<"Data Img H: "<< image_data.height()<<" W: "<<image_data.width();
QImage new_image(image_data.width(), image_data.height(), QImage::Format_RGBA8888);
// Format_ARGB32 , Format_RGBA8888_Premultiplied
VLOG(3)<<"New Img H: "<<new_image.height()<<" W: "<<new_image.width();
VLOG(3)<<"Setting img data";
for(int idx = 0; idx < image_data.width(); idx++)
{
for(int idy = 0; idy < image_data.height(); idy++)
{
int index_value = image_data.pixelIndex(idx, idy);
uint color_value;
if(index_value == 0 )
{
color_value = qRgba((int(0)), 0, 0, 0);
}
else
{
//! +1*20 to have a wider spread in the palette
//! and since the values start from 0
// index_value = index_value + 1;
color_value = qRgb(((int)index_value ), 0, 0);
}
new_image.setPixel(idx, idy, color_value);
}
}
const QImage& img = QGLWidget::convertToGLFormat(new_image);
prod.setQImageData(img);
return true;
}

The image format you use is never the cause of this completeness error:
Framebuffer incomplete attachment.
This error means that one of the attachments violates OpenGL's rules on attachment completeness. This is not a problem of the wrong format (per se); it is a code bug.
If you see this on one implementation, you should see it on all implementations running the same FBO setup code. Attachment completeness rules are not allowed to change based on the implementation (with an exception). So if you're seeing this error appear or not appear based on the implementation, one of the following is happening:
There is a bug in one or more of the implementations. Your code may or may not be following the attachment completeness rules, but implementations are supposed to implement the same rules.
You are not in fact using the same FBO setup logic on the different implementations. Since you're creating a QGLFramebufferObject rather than a proper FBO, a lot of the FBO setup code is out of your hands. So maybe stop using Qt for this.
You are running on implementations that differ significantly in which version of OpenGL they implement. See, while the attachment rules don't allow implementation variance, this is only true for a specific version of OpenGL itself. New versions of the API can change the attachment rules, but backwards compatibility guarantees ensure that code that worked on older versions continues to work on newer ones.
But that only works in one direction: code written against newer OpenGL versions may not be compatible with older implementation versions. Not unless you specifically check the standards and write code that ought to work on all versions you intend to run on.
Again, most of your FBO logic is hidden behind Qt's implementation, so there's no simple way to know what's going on. It would be good to ditch Qt and do it yourself to make sure.
Now, the above assumes that when Qt tells you "QGLFramebufferObject: Framebuffer incomplete attachment", it really means the GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT completeness status. It's possible Qt is lying to you and your problem really is the format.
When making textures meant to be used by FBOs, you should stick to the image formats that are required to work for FBO-attached images. GL_RED is not one of them.

Related

Unable to upload bitmaps from most fonts loaded by Freetype to OpenGL as textures. However, other fonts work perfectly. (with image examples)

I'm facing this issue for months. Every font I load with freetype works fine, I can prove that to myself by writing the glyphs to a BMP file using the stb_write_bmp from the STB library. I can see all hundreds of glyph bitmaps written correctly in my folder.
OpenGL, on the other hand, is unable to load the textures from specific fonts, and I see no explainable reason as to why some load and others don't. I have done everything to try and solve it but it keeps happening. The textures simply don't load at all. I'm doing exactly the same procedure for every font, so I should be getting the same results.
Examples: (with maximum of 0.1 alpha for debugging invisible squares like the last ones)
Cantarell-Regular.ttf
CPMono_v07-Light.otf
SourceSansPro-Regular.ttf (bugged - no texture)
RobotoCondensed-Regular.ttf (bugged - no texture)
What do the loaded fonts have in common? It's not the extension, at least, since I can render cantarell's TTF files. All of them, even the ones that the texture don't load, are correctly written as BMP if i send them to the STB function. So why are the last ones not loaded by the GL? This makes no sense to me... there's no consistency except that the ones that don't load, never load, and the ones that load, always load. But I see nothing that could have different effects on different fonts, and this is confusing me a lot. I am also returning the errors to an int on every FT function and it's all clear by their side.
This is my renderer's constructor, where everything relevant to the text rendering and texture uploading is setup:
if (error = FT_Init_FreeType(&ftlib))
{
std::cout << "Freetype failed to initialize (exit " << error << ')'
<< std::endl;
}
else
{
std::cout << "Freetype initialized";
}
if (error = FT_New_Face(ftlib, fontFile, 0, &face))
{
std::cout << "but could not load font " << face->family_name
<< " (exit " << error << ')' << std::endl;
}
else
{
std::cout << " and loaded font " << face->family_name << std::endl;
FT_Set_Pixel_Sizes(face, 0, height);
/// Determine total length of texture atlas
FT_UInt cindex = 0;
for (
FT_ULong c = FT_Get_First_Char(face, &cindex);
cindex != 0;
c = FT_Get_Next_Char(face, c, &cindex)
) {
if (error = FT_Load_Glyph(face, cindex, FT_LOAD_BITMAP_METRICS_ONLY))
{
std::cout << "Freetype: Could not load glyph "
"(exit " << error << "[1])"
<< std::endl;
continue;
}
else
{
atlasLength += face->glyph->bitmap.width;
if (totalRows < face->glyph->bitmap.rows) {
totalRows = face->glyph->bitmap.rows;
};
}
}
bindShaders();
/// Setup VAO and texture object.
newVertexArray();
bindVertexArray(vertexArray[0]);
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glGenTextures(1, &atlas);
glBindTexture(GL_TEXTURE_2D, atlas);
glTexImage2D(
GL_TEXTURE_2D, 0, GL_RED,
atlasLength, totalRows,
0, GL_RED, GL_UNSIGNED_BYTE, 0
);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR_MIPMAP_LINEAR);
glGenerateMipmap(GL_TEXTURE_2D);
GLint offset = 0;
cindex = 0;
/// Start indexing textures
for (
FT_ULong c = FT_Get_First_Char(face, &cindex);
cindex != 0;
c = FT_Get_Next_Char(face, c, &cindex))
{
if (error = FT_Load_Glyph(face, cindex, FT_LOAD_RENDER | FT_LOAD_FORCE_AUTOHINT))
{
std::cout << "Freetype could not load glyph "
"(exit " << error << "[2])" << "\n"
;
continue;
}
else
{
FT_GlyphSlot glyph = face->glyph;
glTexSubImage2D(
GL_TEXTURE_2D, 0, offset, 0,
glyph->bitmap.width, glyph->bitmap.rows,
GL_RED, GL_UNSIGNED_BYTE, glyph->bitmap.buffer
);
Glyph character =
{
static_cast<Float>(offset),
static_cast<Float>(offset) + glyph->bitmap.width,
glm::u16vec2(glyph->bitmap.width, glyph->bitmap.rows),
glm::u16vec2(face->glyph->bitmap_left, face->glyph->bitmap_top),
glm::u16vec2(face->glyph->advance.x, face->glyph->advance.y),
};
characters.insert(std::make_pair(c, character));
}
offset += face->glyph->bitmap.width;
}
}
glBindVertexArray(0);
bindShaders(0);
Am I doing something wrong? As far as I can see there's nothing that could go wrong here.
Edit:
I have found that the font height is playing a part here. For some reason, if I send a value of 12px to FT_Set_Pixel_Sizes, all fonts open. Increasing the height makes Source Sans eventually stop working at some point (I haven't marked exactly at what size it stopped working but somewhere around 20px), and at 42px Roboto stops working too. I don't know why though, since the bitmaps are ok in all these cases. At least, this is a bit less mistifying now.
I don’t know if it gonna help, but there is one problem with your code. You’re ignoring the FT_Bitmap.pitch field. The RAM layout of the bitmap depends substantially on that value, yet there’s no way to supply the value to OpenGL. There’s GL_UNPACK_ROW_LENGTH setting but that value is pixels, not bytes.
Try to copy these bitmaps to another buffer with stride equal to glyph width * sizeof(pixel), and then pass that temporary buffer to glTexSubImage2D. If you’re on Windows call MFCopyImage, otherwise do it manually by calling memcpy() in a loop.
Don’t forget the pitch can be negative, which means the FT bitmap is stored with bottom-to-top row order.
Also, check FT_Bitmap.pixel_mode field is FT_PIXEL_MODE_GRAY, that’s the only value compatible with your GL texture.

Loading a texture using OpenGL

I am trying to load a texture via OpenGL for a 2d platformer and the code seems to be crashing on this exact part, but the lack of knowledge in C++ or openGL seems to be my problem, pls help!
bool Texture::LoadTextureFromFile( std::string path )
{
//Texture loading success
bool textureLoaded = false;
//Generate and set current image ID
GLuint imgID = 0;
glGenTextures( 1, &imgID );
glBindTexture(GL_TEXTURE_2D ,imgID );
//Load image
GLboolean success = glLoadTextures( path.c_str() );
//Image loaded successfully
if( success == GL_TRUE )
{
//Convert image to RGBA
// success = ilConvertImage( IL_RGBA, IL_UNSIGNED_BYTE );
if( success == GL_TRUE )
{
//Create texture from file pixels
textureLoaded = LoadTextureFromPixels32
( (GLuint*)glGetDoublev, (GLuint*)glGetIntegerv( GL_TEXTURE_WIDTH ), GLuint*(glGetIntegerv( GL_TEXTURE_HEIGHT )) );
}
//Delete file from memory
glDeleteTextures( 1, &imgID );
}
//Report error
if( !textureLoaded )
{
printf( "Unable to load %s\n", path.c_str() );
}
return textureLoaded;
}
There are a few possible issues.
Firstly, where do you get glLoadTextures() from? And what does it do? That's not part of the OpenGL specification. It's not clear to me what it does. It's possible that this function does the entire texture loading for you, and the code below just screws things up.
Next, your first parameter to LoadTextureFromPixels32() is glGetDoubleV. You're passing it a pointer to the function glGetDoubleV, which is definitely NOT right. I assume that parameter expects a pointer to the loaded image data.
Finally, your code deletes the texture you just created with glDeleteTextures(). That makes no sense. Your texture object's ID is stored in imgID, which is a local variable. You're deleting it so the texture's gone.
The normal procedure for creating a texture is:
Load the texture data (using something like SDL_image, or another image loading library)
Create the texture object using glGenTextures(); NOTE: This is your handle to the texture; you MUST store this for future use
Bind it glBindTexture()
Load in the image data glTexture2D()
That's it.

How do you upload texture data to a Sparse Texture using TexSubImage in OpenGL?

I am following apitest on github, and am seeing some very strange behavior in my renderer.
It seems like the Virtual Pages are not receiving the correct image data.
Original Image is 500x311:
When i render this image using a Sparse Texture, i must resize the backing store to 512x384 (to be a mutliple of the page size) and my result is:
As you can see it looks like a portion of the subimage (a sub sub image) was loaded to each individual virtual page.
To test this, i cropped the image to the size of just 1 virtual page (256x128): here is the result:
as expected, the single virutal page was filled with the exact, correct, cropped image.
Lastly, I increased the crop size to be 2 virtual pages worth, 256x256, one on top of another. here is the result:
This proves that calling texSubimage with an amount of texelData larger than Virtual_Page_Size causes errors.
Does care need to be taken when passing data to glsubimage that is larger than the virtual page size? I see no logic for this in apitest so think this could be a driver issue. Or I am missing something major.
Here is some code:
I stored the Texture in a Texture Array and to simplify turned the array into just a texture2d. both produce the same exact result. Here is the Texture Memory Allocation:
_check_gl_error();
glGenTextures(1, &mTexId);
glBindTexture(GL_TEXTURE_2D, mTexId);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_SPARSE_ARB, GL_TRUE);
// TODO: This could be done once per internal format. For now, just do it every time.
GLint indexCount = 0,
xSize = 0,
ySize = 0,
zSize = 0;
GLint bestIndex = -1,
bestXSize = 0,
bestYSize = 0;
glGetInternalformativ(GL_TEXTURE_2D, internalformat, GL_NUM_VIRTUAL_PAGE_SIZES_ARB, 1, &indexCount);
if(indexCount == 0) {
fprintf(stdout, "No Virtual Page Sizes for given format");
fflush(stdout);
}
_check_gl_error();
for (GLint i = 0; i < indexCount; ++i) {
glTexParameteri(GL_TEXTURE_2D, GL_VIRTUAL_PAGE_SIZE_INDEX_ARB, i);
glGetInternalformativ(GL_TEXTURE_2D, internalformat, GL_VIRTUAL_PAGE_SIZE_X_ARB, 1, &xSize);
glGetInternalformativ(GL_TEXTURE_2D, internalformat, GL_VIRTUAL_PAGE_SIZE_Y_ARB, 1, &ySize);
glGetInternalformativ(GL_TEXTURE_2D, internalformat, GL_VIRTUAL_PAGE_SIZE_Z_ARB, 1, &zSize);
// For our purposes, the "best" format is the one that winds up with Z=1 and the largest x and y sizes.
if (zSize == 1) {
if (xSize >= bestXSize && ySize >= bestYSize) {
bestIndex = i;
bestXSize = xSize;
bestYSize = ySize;
}
}
}
_check_gl_error();
mXTileSize = bestXSize;
glTexParameteri(GL_TEXTURE_2D, GL_VIRTUAL_PAGE_SIZE_INDEX_ARB, bestIndex);
_check_gl_error();
//Need to ensure that the texture is a multiple of the tile size.
physicalWidth = roundUpToMultiple(width, bestXSize);
physicalHeight = roundUpToMultiple(height, bestYSize);
// We've set all the necessary parameters, now it's time to create the sparse texture.
glTexStorage2D(GL_TEXTURE_2D, levels, GL_RGBA8, physicalWidth, physicalHeight);
_check_gl_error();
for (GLsizei i = 0; i < slices; ++i) {
mFreeList.push(i);
}
_check_gl_error();
mHandle = glGetTextureHandleARB(mTexId);
_check_gl_error();
glMakeTextureHandleResidentARB(mHandle);
_check_gl_error();
mWidth = physicalWidth;
mHeight = physicalHeight;
mLevels = levels;
Here is what happens after the allocation:
glTextureSubImage2DEXT(mTexId, GL_TEXTURE_2D, level, 0, 0, width, height, GL_RGB, GL_UNSIGNED_BYTE, data);
I have tried making width and height the physical width of the backing store AND the width/height of the incoming image content. Neither produce desired results. I exclude mip levels for now. When I was using Mip levels and the texture array i was getting different results but similar behavior.
Also the image is loaded from SOIL and before i implemented sparse textures, that worked very well (before sparse i implemented bindless).

OpenGL multisample integer texture attached to framebuffer doesn't resolve correctly

So, I'm using a multiple render target framebuffer, on which, the first color attachment is a color texture (RGBA8), while the second draw buffer (color attachment 1) is an index texture (R32UI).
gl::BindTexture(gl::TEXTURE_2D_MULTISAMPLE, m_Textures[eTEXTURE_COLORBUFFER]);
gl::TexParameteri(gl::TEXTURE_2D_MULTISAMPLE, gl::TEXTURE_BASE_LEVEL, 0);
gl::TexParameteri(gl::TEXTURE_2D_MULTISAMPLE, gl::TEXTURE_MAX_LEVEL, 0);
gl::TexImage2DMultisample(gl::TEXTURE_2D_MULTISAMPLE, m_Multisample,gl::RGBA8,width,height,gl::FALSE_);
gl::BindTexture(gl::TEXTURE_2D_MULTISAMPLE, m_Textures[eTEXTURE_CLASSBUFFER]);
gl::TexParameteri(gl::TEXTURE_2D_MULTISAMPLE, gl::TEXTURE_BASE_LEVEL, 0);
gl::TexParameteri(gl::TEXTURE_2D_MULTISAMPLE, gl::TEXTURE_MAX_LEVEL, 0);
gl::TexImage2DMultisample(gl::TEXTURE_2D_MULTISAMPLE, m_Multisample,gl::R32UI,width,height,gl::FALSE_);
Both textures are multisampled, and I would like to download them on CPU once the render has been completed. Although, when I blit the multisample FBO into a singlesample FBO, the index texture data returned is made of all zeros, while the color texture is correctly resolved.
// Resolve multisampling
if ( m_Multisample > 0 )
{
gl::BindFramebuffer(gl::READ_FRAMEBUFFER, m_Framebuffers[eFBO_RENDERBUFFER]);
gl::BindFramebuffer(gl::DRAW_FRAMEBUFFER, m_Framebuffers[eFBO_RESOLVEBUFFER]);
gl::BlitFramebuffer(0, 0, m_FrameDims, m_FrameDims, 0, 0, m_FrameDims, m_FrameDims,
gl::COLOR_BUFFER_BIT, gl::NEAREST);
gl::enum_t blit_error = gl::NO_ERROR_;
blit_error = gl::GetError();
if ( blit_error != gl::NO_ERROR_ )
{
throw framebuffer_error(string::format<128>(
"BlitFramebuffer failed with error: %d",blit_error));
}
gl::BindFramebuffer(gl::READ_FRAMEBUFFER, m_Framebuffers[eFBO_RESOLVEBUFFER]);
}
I use the NEARST flag because, it actually seems that integer textures don't work with LINEAR interpolation.
The code I use to download the image is listed down here.
uint32_t* tex_data = new uint32_t[query.m_FrameDims*query.m_FrameDims];
memset(tex_data,0,sizeof(uint32_t)*query.m_FrameDims*query.m_FrameDims);
gl::BindTexture(gl::TEXTURE_2D,query.m_DestColorTexture);
{
// Copy color texture
gl::ReadBuffer(gl::COLOR_ATTACHMENT0);
gl::ReadPixels(0,0,query.m_FrameDims,query.m_FrameDims,
gl::RGBA,gl::UNSIGNED_BYTE,tex_data);
gl::TexSubImage2D(gl::TEXTURE_2D,0,0,0,query.m_FrameDims,query.m_FrameDims,
gl::RGBA,gl::UNSIGNED_BYTE,tex_data);
}
gl::BindTexture(gl::TEXTURE_2D,query.m_DestClassTexture);
{
// Copy class texture
gl::ReadBuffer(gl::COLOR_ATTACHMENT1);
gl::ReadPixels(0,0,query.m_FrameDims,query.m_FrameDims,
gl::RED_INTEGER,gl::UNSIGNED_INT,tex_data);
gl::TexSubImage2D(gl::TEXTURE_2D,0,0,0,query.m_FrameDims,query.m_FrameDims,
gl::RED_INTEGER,gl::UNSIGNED_INT,tex_data);
}
delete[] tex_data;
If I disable the multisample FBO, therefore avoiding to call the gl::BlitFramebuffer() function, everything works fine.
I don't see any docs that says that integer texture can note be multisampled, but even though, I'm not sure they make sense at all.
Any clue where I'm may mistaking?
You can only read from a single color buffer at a time when you do a framebuffer blit; you can write to many draw buffers (technically up to GL_MAX_DRAW_BUFFERS at once). The problem here is that you want to read from two different color buffers, and you are trying to do it in a single blit.
OpenGL 4.4 Core Specification - 18.3. Copying Pixels - (pp. 483)
When the color buffer is transferred, values are taken from the read buffer of the read framebuffer and written to each of the draw buffers of the draw framebuffer.
Your code indicates to me that you forgot to actually do a second blit for the second color attachment, and that is why it comes up empty.
To correct this, I would expect to see something like:
// Resolve multisampling
if ( m_Multisample > 0 )
{
//
// Do the first resolve (COLOR_ATTACHMENT0)
//
gl::BindFramebuffer(gl::READ_FRAMEBUFFER,m_Framebuffers[eFBO_RENDERBUFFER]);
gl::ReadBuffer (gl::COLOR_ATTACHMENT0); // Read: Attachment 0 (MSAA)
gl::BindFramebuffer(gl::DRAW_FRAMEBUFFER,m_Framebuffers[eFBO_RESOLVEBUFFER]);
gl::DrawBuffer (gl::COLOR_ATTACHMENT0); // Write: Attachment 0 (Resolve)
gl::BlitFramebuffer(0, 0, m_FrameDims, m_FrameDims, 0, 0, m_FrameDims,
m_FrameDims, gl::COLOR_BUFFER_BIT, gl::NEAREST);
gl::enum_t blit_error = gl::NO_ERROR_;
blit_error = gl::GetError();
if ( blit_error != gl::NO_ERROR_ )
{
throw framebuffer_error(string::format<128>(
"BlitFramebuffer failed with error: %d",blit_error));
}
//
// Do the second resolve (COLOR_ATTACHMENT1)
//
gl::BindFramebuffer(gl::READ_FRAMEBUFFER,m_Framebuffers[eFBO_RENDERBUFFER]);
gl::ReadBuffer (gl::COLOR_ATTACHMENT1); // Read: Attachment 1 (MSAA)
gl::BindFramebuffer(gl::DRAW_FRAMEBUFFER,m_Framebuffers[eFBO_RESOLVEBUFFER]);
gl::DrawBuffer (gl::COLOR_ATTACHMENT1); // Write: Attachment 1 (Resolve)
gl::BlitFramebuffer(0, 0, m_FrameDims, m_FrameDims, 0, 0, m_FrameDims,
m_FrameDims, gl::COLOR_BUFFER_BIT, gl::NEAREST);
gl::enum_t blit_error = gl::NO_ERROR_;
blit_error = gl::GetError();
if ( blit_error != gl::NO_ERROR_ )
{
throw framebuffer_error(string::format<128>(
"BlitFramebuffer failed with error: %d",blit_error));
}
gl::BindFramebuffer(gl::READ_FRAMEBUFFER,m_Framebuffers[eFBO_RESOLVEBUFFER]);
}
This assumes that the textures / renderbuffers you want to blit from/to have the same attachment point. If not, you can adjust gl::ReadBuffer (...) and gl::DrawBuffer (...) accordingly.

SIGSEGV error while using c++ on linux with openGL and SDL

Myself and a few other guys are taking a crack at building a simple side scroller type game. However, I can not get a hold of them to help answer my question so I put it to you, the following code leaves me with a SIGSEGV error in the notated place... if anyone can tell me why, I would really appreciate it. If you need anymore info I will be watching this closely.
Main.cpp
Vector2 dudeDim(60,60);
Vector2 dudePos(300, 300);
Entity *test = new Entity("img/images.jpg", dudeDim, dudePos, false);
leads to:
Entity.cpp
Entity::Entity(std::string filename, Vector2 size, Vector2 position, bool passable):
mTexture(filename)
{
mTexture.load(false);
mDimension2D = size;
mPosition2D = position;
mPassable = passable;
}
leads to:
Textures.cpp
void Texture::load(bool generateMipmaps)
{
FREE_IMAGE_FORMAT imgFormat = FIF_UNKNOWN;
FIBITMAP *dib(0);
imgFormat = FreeImage_GetFileType(mFilename.c_str(), 0);
//std::cout << "File format: " << imgFormat << std::endl;
if (FreeImage_FIFSupportsReading(imgFormat)) // Check if the plugin has reading capabilities and load the file
dib = FreeImage_Load(imgFormat, mFilename.c_str());
if (!dib)
std::cout << "Error loading texture files!" << std::endl;
BYTE* bDataPointer = FreeImage_GetBits(dib); // Retrieve the image data
mWidth = FreeImage_GetWidth(dib); // Get the image width and height
mHeight = FreeImage_GetHeight(dib);
mBitsPerPixel = FreeImage_GetBPP(dib);
if (!bDataPointer || !mWidth || !mHeight)
std::cout << "Error loading texture files!" << std::endl;
// Generate and bind ID for this texture
vvvvvvvvvv!!!ERROR HERE!!!vvvvvvvvvvv
glGenTextures(1, &mId);
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
glBindTexture(GL_TEXTURE_2D, mId);
int format = mBitsPerPixel == 24 ? GL_BGR_EXT : mBitsPerPixel == 8 ? GL_LUMINANCE : 0;
int iInternalFormat = mBitsPerPixel == 24 ? GL_RGB : GL_DEPTH_COMPONENT;
if(generateMipmaps)
glTexParameteri(GL_TEXTURE_2D, GL_GENERATE_MIPMAP, GL_TRUE);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, mWidth, mHeight, 0, format, GL_UNSIGNED_BYTE, bDataPointer);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_LINEAR); // Linear Filtering
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER,GL_LINEAR); // Linear Filtering
//std::cout << "texture generated " << mId << std::endl;
FreeImage_Unload(dib);
}
after reading Peter's suggestion I have changed my main.cpp file to:
#include <iostream>
#include <vector>
#include "Game.h"
using namespace std;
int main(int argc, char** argv)
{
Game theGame;
/* Initialize game control objects and resources */
if (theGame.onInit() != false)
{
return theGame.onExecute();
}
else
{
return -1;
}
}
and it would seem the SIGSEGV error is gone and I'm now left with something not initializing. So thank you peter you were correct now I'm off to solve this issue.
ok so this is obviously a small amount of the code but in order to save time and a bit of sanity: all the code is available at:
GitHub Repo
So after looking at your code I can say that it's probably that you have not initialized you OpenGL context before executing that code.
You need to call your Game::onInit() which also calls RenderEngine::initGraphics() before making any calls to OpenGL. Which you currently don't do. You currently do main()->Game ctor (calls rendering engine ctor but that ctor doesn't init SDL and OpenGL)->Entity ctor->load texture
For details look at the OpenGL Wiki FAQ