I've followed some tutorials on how to setup and render an FBO. At first it worked and the scene rendered fine, but it turns out the program was using the integrated GPU. (I'm using my laptop)
Then, out of curiosity, I ran it with the "higher-performance" GPU (an Nvidia GeForce GT540M) and the screen was all black. At this point I tried saving the FBO's color texture into a file and the scene was actually being drawn there.
But eventually I found a solution. Previously I would only clear the FBO (color and depth buffers), but now I clear both the FBO and the default framebuffer and the scene is rendered again.
So the question is, is it bad that I have to call glClear twice? Do I really have to call glClear twice? Why would the one clear work on the integrated card?
I can show some code if it helps.
Framebuffer initialization
bool FrameBufferObject::initializeFBO( int width, int height ) {
glActiveTexture( GL_TEXTURE0 );
if ( !_colorTexture.createEmptyTexture( width, height ) ) {
return false;
}
_textureId = _colorTexture.getTextureId();
if ( !_depthTexture.createDepthTexture( width, height ) ) {
return false;
}
_depthTextureId = _depthTexture.getTextureId();
glGenFramebuffers( 1, &_frameBufferID );
glBindFramebuffer( GL_FRAMEBUFFER, _frameBufferID );
glFramebufferTexture2D( GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, _textureId, 0 );
glFramebufferTexture2D( GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D, _depthTextureId, 0 );
if ( glCheckFramebufferStatus( GL_FRAMEBUFFER ) != GL_FRAMEBUFFER_COMPLETE ) {
return false;
}
glBindFramebuffer( GL_FRAMEBUFFER, 0 );
_width = width;
_height = height;
_isInitialized = true;
return true;
}
Color Texture
bool Texture::createEmptyTexture( int width, int height ) {
if ( isLoaded() ) {
closeTexture();
}
GLuint textureId = 0;
glGenTextures( 1, &textureId );
if ( getOpenGLError( "Unable to generate TextureID." ) ) {
return false;
}
glBindTexture( GL_TEXTURE_2D, textureId );
glTexImage2D( GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE );
glBindTexture( GL_TEXTURE_2D, 0 );
if ( getOpenGLError( "Error creating empty texture." ) ) {
glDeleteTextures( 1, &textureId );
return false;
}
_isLoaded = true;
_textureWidth = _imageWidth = width;
_textureHeight = _imageHeight = height;
_textureId = textureId;
return true;
}
The Depth Texture is the same, except it uses the GL_DEPTH_COMPONENT format,
glTexImage2D( GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT, width, height, 0, GL_DEPTH_COMPONENT, GL_UNSIGNED_BYTE, NULL );
Post Processing Shader
Vertex shader
#version 330 core
in vec2 inVertex;
in vec2 inTexture;
out vec2 texCoords;
void main() {
texCoords = inTexture;
gl_Position = vec4( inVertex.x, inVertex.y, 0, 1 );
}
Fragment shader
#version 330 core
in vec2 texCoords;
uniform sampler2D texture0;
layout(location = 0) out vec4 outColor;
void main() {
vec4 color = texture( texture0, texCoords );
outColor = color;
}
The rendering code looks like this,
glClear( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT );
fbo.bindFBO();
glClear( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT );
// .. render scene ..
fbo.unbindFBO();
// This is a namespace
PostProcShader::shader.useShader();
PostProcShader::render( fbo.getColorTexture() );
PostProcShader::shader.disableShader();
SDL_GL_SwapWindow( window );
Where the post processing shader simply renders the texture on a screen-sized quad. The scene uses 3 shaders: one for 3D objects, the second for the skybox and last one for fonts.
I'm using C++, SDL2 and (of course) OpenGL/Glew in VS 2015.
Edit:
Depth Test initialization
glEnable( GL_DEPTH_TEST );
glDepthMask( GL_TRUE );
glDepthFunc( GL_LEQUAL );
glDepthRange( 0.0f, 1.0f );
glClearDepth( 1.0f );
I got a similar problem when I went from Intel HUD to Nvidia. However my solution was simple. I had to ensure I called glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT) before rendering to the new framebuffer (on Nvidia) otherwise I would see nothing rendered. I suspect that on Nvidia the depth buffer is not set to the same defaults as on Intel so clearing it sets it to the opengl default clear value.
Related
I am trying to get a simple triangle textured, but got no image on the screen.
By using another shader that uses the colors from a color buffer it works fine.
I checked the content of the bmp image before passing it to gl.
This is the part that loads and delivers the texture to gl
uv 0,0 / 1,0 / 0.5, 1.0
bmp_s = Load_BMP( tex_file );
glActiveTexture( GL_TEXTURE0 );
glGenTextures( 1, &tex_buf->texture ); // Create the buffer id
glBindTexture( GL_TEXTURE_2D, tex_buf->texture ); // bind it
glTexImage2D( GL_TEXTURE_2D, 0, GL_RGB, bmp_s->width, bmp_s->height, 0, GL_BGR, GL_UNSIGNED_BYTE, bmp_s->img );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR );
glGenerateMipmap( GL_TEXTURE_2D );
this is the drawing loop
do
{
Process_Input_Events(); // get mouse & kb -- calc mvp / view matrix
glClear( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT );
_uid_mvp = glGetUniformLocation( __sh_list[ 3 ].prg_id, "mvp_mx" );
_uid_tex_sampler = glGetUniformLocation( __sh_list[ 3 ].prg_id, "tex_sampler" );
glUseProgram( sh_s->prg_id ); // use the shader
glUniformMatrix4fv( _uid_mvp, 1, GL_FALSE, &_sys.mvp_mx[ 0 ][ 0 ] ); // send mvp
glBindVertexArray( vao->id ); // bind vao
glActiveTexture( GL_TEXTURE0 );
glBindTexture( GL_TEXTURE_2D, vao->tex_buf->texture );
glUniform1i( _uid_tex_sampler, 0 );
glDrawArrays( vao->prim, 0, vao->vx_b->numb );
glBindVertexArray( 0 ); // unbind vao
glUseProgram( 0 ); // unbind shader
} while( condition );
and here are the shaders
#version 450 core
layout( location = 0 ) in vec3 vx; // the vertex_buffer in modelspace
layout( location = 3 ) in vec2 uv_in; // uv coordiantes for each vertex
out vec2 uv;
uniform mat4 mvp_mx;
void main()
{
gl_Position = mvp_mx * vec4( vx, 1.0f );
uv = uv_in;
}
#version 450 core
in vec2 uv; // interpolated values from vertex shader
out vec3 out_color;
uniform sampler2D tex_sampler; // Index of opengl texture
void main()
{
out_color = texture( tex_sampler, uv ).rgb;
}
any help will be appreciated
Short storry:
when I render anything using texture loaded like this
glTexImage2D ( GL_TEXTURE_2D, 0, GL_R8, width, height, 0, GL_RED, GL_UNSIGNED_BYTE, pixels );
I get only black
Long storry:
I can get RGBA texture with alpha channel (e.g. text with transparent backgorund using this code):
This code works:
// === load
#define GL_ABGR 0x8000
SDL_Surface * surf = SDL_LoadBMP( "common_resources/dejvu_sans_mono_RGBA.bmp" );
glGenTextures ( 1, &itex );
glBindTexture ( GL_TEXTURE_2D, itex );
glTexImage2D ( GL_TEXTURE_2D, 0, GL_RGBA, surf->w, surf->h, 0, GL_ABGR, GL_UNSIGNED_BYTE, surf->pixels );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR );
// ....
// === render
glEnable( GL_TEXTURE_2D );
glBindTexture( GL_TEXTURE_2D, itex );
glColor3f(1.0f,1.0f,1.0f);
glEnable(GL_BLEND);
glEnable(GL_ALPHA_TEST);
glBlendFunc(GL_SRC_ALPHA,GL_ONE_MINUS_SRC_ALPHA);
drawString ( caption, xmin, ymin+12, 6 );
renders like
But I'm trying to use just one channel (8-bit; grayscale) images / textures instead of RGBA. These I cannot get to render neither with nor without transparancy. Whatever I do I get only black image.
This doesn't
// === load
#define GL_ABGR 0x8000
SDL_Surface * surf = SDL_LoadBMP( "common_resources/dejvu_sans_mono_Alpha.bmp" );
glGenTextures ( 1, &itex );
glBindTexture ( GL_TEXTURE_2D, itex );
glTexImage2D ( GL_TEXTURE_2D, 0, GL_R8, surf->w, surf->h, 0, GL_RED, GL_UNSIGNED_BYTE, surf->pixels );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR );
// ....
// === render
glEnable( GL_TEXTURE_2D );
glBindTexture( GL_TEXTURE_2D, itex );
glColor3f(1.0f,1.0f,1.0f);
//glEnable(GL_BLEND);
//glEnable(GL_ALPHA_TEST);
//glBlendFunc(GL_SRC_ALPHA,GL_ONE_MINUS_SRC_ALPHA);
drawString ( caption, xmin, ymin+12, 6 );
renders like
Notes:
I know that I should somehow use glTexEnv according to e.g. here but my main problem is that apparently the monochrome texture does not render at all
I tried also other GL_LUMINANCE and GL_INTENSITY instead of GL_RED in glTexImage2D with no difference
there are other questions like here and here but mostly with OpenGL>3.0 and fragment shaders
Also, is it possible that my graphics card or driver does not support this ? I'm on ubuntu 16.04
GL_VENDOR: Intel Open Source Technology Center
GL_RENDERER: Mesa DRI Intel(R) HD Graphics 530 (Skylake GT2)
GL_VERSION: 3.0 Mesa 11.2.0
for completeness - although it is not importaint the drawString looks like this:
drawString ( caption, xmin, ymin+12, 6 ){
const int nchars = 95;
float persprite = 1.0f/nchars;
glBegin(GL_QUADS);
for(int i=0; i<65536; i++){
if( str[i] == 0 ) break; // 0-terminated string
int isprite = str[i] - 33; // 33 is offset of meaningfull ASCII characters
float offset = isprite*persprite+(persprite*0.57);
float xi = i*sz + x;
glTexCoord2f( offset , 1.0f ); glVertex3f( xi, y, 3.0f );
glTexCoord2f( offset+persprite, 1.0f ); glVertex3f( xi+sz, y, 3.0f );
glTexCoord2f( offset+persprite, 0.0f ); glVertex3f( xi+sz, y+sz*2, 3.0f );
glTexCoord2f( offset , 0.0f ); glVertex3f( xi, y+sz*2, 3.0f );
}
glEnd();
}
I want to try to help you. In my projects I am using this arguments for generating textures from grayscale source images:
glTexImage2D(GL_TEXTURE_2D, 0, 1, width, height, 0, GL_RED,
GL_UNSIGNED_BYTE, pixels);
As written in documentation, third argument - number of color components (1 in our case). Need to check integer value of GL_R8 or replace it explicitly.
GL_RED means that you place luminances in red channel (not in each red, green, blue channels as for grayscale image).
I'm writing Win32 app using https://www.opengl.org/wiki/Image_Load_Store - I want to read a value from the texture attached to Pixel shader (and once it's working, I will change operation to write). I'm using GLFW and GLEW, the supported OGL version of my gfx card is 4.2. The relevant texture setup code looks like this:
typedef unsigned int BufferType;
int buf_w = 3;
int buf_h = 3;
int buf_size = buf_w * buf_h;
BufferType * bufferCounter = new BufferType[buf_size];
for (int i = 0; i < buf_size; ++i)
bufferCounter[i] = 200;
glActiveTexture( GL_TEXTURE0 );
glEnable( GL_TEXTURE_2D );
glGenTextures( 1, &tex2d );
glBindTexture( GL_TEXTURE_2D, tex2d );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_BASE_LEVEL, 0 );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MAX_LEVEL, 0 );
glTexImage2D( GL_TEXTURE_2D, 0, GL_R32UI, buf_w, buf_h, 0, GL_RED_INTEGER, GL_UNSIGNED_INT, bufferCounter );
Main loop:
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glUseProgram(shader_program);
glBindVertexArray(vao);
glBindTexture( GL_TEXTURE_2D, tex2d );
glBindImageTexture( 0, tex2d, 0, GL_FALSE, 0, GL_READ_ONLY, GL_R32UI ); // bind 2d tex to read_tex
glDrawArrays(GL_TRIANGLES, 0, 3);
Vertex shader is irrelevant, just gl_Position = vec4 (vertex_position, 1.0); Fragment shader:
#version 430
#extension ARB_shader_image_load_store : enable
in vec3 color;
out vec4 frag_color;
readonly layout(r8ui) uniform uimage2D read_tex;
void main()
{
frag_color = imageLoad(read_tex, ivec2(0,0) ); // read from attached tex
}
I check with glGetError - no errors. Also shaders compile and link. The triangle isn't visible on the screen (but if I set color to constant, it is). Any ideas on what might be wrong? I tried using in main loop functions loc = glGetUniformLocation( shader_program, "read_tex" ); glUniform1i( loc, tex2d ); but they didn't help. Thank you for help. Unfortunately I couldn't find any example on how to use this OGL functionality, and the documentation is scarce.
I am trying to apply 1D texture to lines. It works but lines shimmer. I don't want this shimmering effect. Like on the picture.
I've also added a video about shimmering problem:
Shimmering Problem Video
How can I solve this problem? I am adding the codes below.
Vertex Shader:
attribute vec4 vertexMC;
uniform mat4 MCVCMatrix;
uniform mat4 VCDCMatrix;
attribute float tcoordMC;
varying float tcoordVC;
void main()
{
tcoordVC=(vertexMC.x+vertexMC.y+vertexMC.z)/3.0;
gl_Position=VCDCMatrix*MCVCMatrix*vertexMC;
}
Fragment Shader:
varying float tcoordVC;
uniform sampler1D texture1;
uniform vec3 GridColor;
void main()
{
gl_FragColor=vec4(GridColor, texture1D(texture1,tcoordVC).a);
if (gl_FragColor.a <= 0.0) discard;
}
Texture:
glGenTextures(1, &texture);
glActiveTexture(GL_TEXTURE1);
glBindTexture( GL_TEXTURE_1D, texture );
glTexParameteri(GL_TEXTURE_1D, GL_TEXTURE_WRAP_S, GL_REPEAT );
glTexParameteri(GL_TEXTURE_1D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_1D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
unsigned char buf[ 16 * 4 ];
for( size_t i=0; i < sizeof( buf ); i+=4 )
{
buf[ i+0 ]=255;
buf[ i+1 ]=0;
buf[ i+2 ]=0;
}
buf[3]= 255;
buf[7]= 0;
buf[11]=255;
buf[15]=0;
buf[19]=255;
buf[23]=0;
buf[27]=255;
buf[31]=0;
buf[35]=255;
buf[39]=0;
buf[43]=255;
buf[47]=0;
buf[51]=255;
buf[55]=0;
buf[59]=255;
buf[63]=0;
glTexImage1D( GL_TEXTURE_1D, 0, 4, sizeof( buf ) / 4, 0, GL_RGBA, GL_UNSIGNED_BYTE, buf );
InternalShader->Program.SetUniformi("texture1",1);
Draw Section:
glEnable( GL_TEXTURE_1D );
glBindTexture( GL_TEXTURE_1D, texture);
for(unsigned int i=0;i<GridsInternal.size();i++)
{
AGrid=GridsInternal[i];
if(!AGrid->GetActivity())
continue;
ACellBO=CellBOs[i];
AVBO=VBOs[i];
ALayout=Layouts[i];
AVBO->Bind();
SetMapperShaderParameters(i);
SetPropertyShaderParameters(i);
ACellBO->ibo.Bind();
glDrawRangeElements(GL_LINES, 0,
static_cast<GLuint>(ALayout->VertexCount-1),
static_cast<GLsizei>(ACellBO->indexCount),
GL_UNSIGNED_INT,
reinterpret_cast<const GLvoid *>(NULL));
AVBO->Release();
ACellBO->ibo.Release();
ACellBO->vao.Release();
}
glDeleteTextures(1, &texture);
glDisable(GL_TEXTURE_1D);
This is the before:
http://img22.imageshack.us/img22/5310/beforedes.jpg
znd after:
http://img189.imageshack.us/img189/8890/afterr.jpg
EDIT:: Now that I look at imageshack's upload, the artifacts are diminished a great deal.. but trust me, they are more pronounced than that.
I don't understand why this is happening. Imageshack uploads them to jpg, but in my program they are in the image folder as .tif (The reason for .tif is because I couldn't get ANY other image to maintain their transparent parts).
But anyways, these artifacts follow the original top of the image as it rotates anywhere except the original.
Here's part of my code that loads the image
GLuint texture;
GLenum texture_format;
GLint nofcolors;
GLfloat spin;
bool Game::loadImage()
{
SDL_Surface * surface; // this surface will tell us the details of the image
if ( surface = SM.load_image("Images/tri2.tif") )
{
//get number of channels in the SDL surface
nofcolors = surface->format->BytesPerPixel;
//contains an alpha channel
if ( nofcolors == 4 )
{
if ( surface->format->Rmask == 0x000000ff )
texture_format = GL_RGBA;
else texture_format = GL_BGRA;
}
else if ( nofcolors == 3 ) //no alpha channel
{
if ( surface->format->Rmask == 0x000000ff )
texture_format = GL_RGB;
else texture_format = GL_BGR;
}
// Have OpenGL generate a texture object handle for us
glGenTextures( 1, &texture );
// Bind the texture object
glBindTexture( GL_TEXTURE_2D, texture );
// Set the texture’s stretching properties
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR );
glTexImage2D( GL_TEXTURE_2D, 0, nofcolors, surface->w, surface->h, 0, texture_format, GL_UNSIGNED_BYTE, surface->pixels );
glEnable(GL_TEXTURE_2D);
glEnable(GL_BLEND);
glBlendFunc(GL_ONE, GL_ONE_MINUS_SRC_ALPHA);
}
else
{
SDL_Quit();
return false;
}
// Free the SDL_Surface only if it was successfully created
if ( surface )
{
SDL_FreeSurface( surface );
return true;
}
else return false;
}
void Game::drawImage()
{
// Clear the screen before drawing
glClear( GL_COLOR_BUFFER_BIT );
glTranslatef( float(S_WIDTH/2), float(S_HEIGHT/2), 0.0f );
glRotatef( spin, 0.0, 0.0, 1.0 );
// Bind the texture to which subsequent calls refer to
glBindTexture( GL_TEXTURE_2D, texture );
glBegin( GL_QUADS );
{
// Top-left vertex (corner)
glTexCoord2i( 0, 0 );
glVertex3f( -64, 0, 0 );
// Top-right vertex (corner)
glTexCoord2i( 1, 0 );
glVertex3f( 64, 0, 0 );
// Bottom-right vertex (corner)
glTexCoord2i( 1, 1 );
glVertex3f( 64, 128, 0 );
// Bottom-left vertex (corner)
glTexCoord2i( 0, 1 );
glVertex3f( -64, 128, 0 );
}
glEnd();
glLoadIdentity();
SDL_GL_SwapBuffers();
}
Looks like the texture is set to GL_WRAP. Try GL_CLAMP_TO_EDGE instead.
In Game::loadImage, after your glBindTexture call:
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
Your current setting is GL_REPEAT, which is the OpenGL default.