OpenGL 4.0 Cubemap issues - c++

Been reading 'OpenGL 4.0 Shading Language Cookbook'. But I've run into a wall with the cubemap tutorial.
The issue is that model I'm drawing appears completely grey. As if it's not getting any data from the samplerCube texture.
All my code seems to be correct. I've looked at other tutorials and it's the same thing.
Don't know if my Intel HD Graphics 4000 is responsible, but I have made certain that I do have the GL_ARB_texture_cube_map extension.
I'm using the DevIL library for loading images from file, which it seems to do just fine, but from what I can tell something is going wrong in transferring the data to OpenGL.
I'm posting the loading where I get the data from the files. All files are loading correctly as well.
I'm also posting the drawing code, where I bind the texture to the pipeline.
And I'm also posting my vertex and fragment shader just in case, but they do appear to be working as they should.
Any ideas?
Loading code
uint TARGETS[6] =
{
GL_TEXTURE_CUBE_MAP_POSITIVE_X,
GL_TEXTURE_CUBE_MAP_NEGATIVE_Y,
GL_TEXTURE_CUBE_MAP_POSITIVE_Y,
GL_TEXTURE_CUBE_MAP_NEGATIVE_Y,
GL_TEXTURE_CUBE_MAP_POSITIVE_Z,
GL_TEXTURE_CUBE_MAP_NEGATIVE_Z
};
string EXTS[6] =
{
"posx",
"negx",
"posy",
"negy",
"posz",
"negz"
};
// Create & bind cubemap texture
glGenTextures( 1, &cubemap );
glBindTexture( GL_TEXTURE_CUBE_MAP, cubemap );
for( int i = 0; i < 6; i++ )
{
string file = "textures/cubemap_" + EXTS[i] + ".png";
uint image = ilGenImage();
// Load with DevIL
ilBindImage( image );
if( !ilLoadImage( file.c_str() ) )
{
cout << "ERROR: Failed to load image " << endl;
return false;
}
// Fetch info from DevIL
int width = ilGetInteger( IL_IMAGE_WIDTH );
int height = ilGetInteger( IL_IMAGE_HEIGHT );
uint format = ilGetInteger( IL_IMAGE_FORMAT );
uint type = ilGetInteger( IL_IMAGE_TYPE );
// Send data to OpenGL
glTexImage2D(
TARGETS[i],
0,
GL_RGBA,
width,
height,
0,
format,
type,
ilGetData() );
// Error check
if( !ErrorCheck("Failed to bind a side of the cubemap!") )
return false;
// Get rid of DevIL data
ilDeleteImage( image );
}
// Parameters
glTexParameterf( GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MAG_FILTER, GL_LINEAR );
glTexParameterf( GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MIN_FILTER, GL_LINEAR );
glTexParameterf( GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE );
glTexParameterf( GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE );
glTexParameterf( GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_R, GL_CLAMP_TO_EDGE );
Draw code
// Update
glfwPollEvents();
UpdateTime();
// Clear back buffer for new frame
glClear( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT );
// Bind shader
shader->Bind();
// Cubemap
shader->SetUniform( "cubemapTexture", 0 );
glActiveTexture( GL_TEXTURE0 );
glBindTexture( GL_TEXTURE_CUBE_MAP, cubemap );
// Bind model
if( model->Bind() )
{
static float angle = 0;
angle += 25.0f * deltaTime;
// Matrices
mat4 world =
translate( vec3( 0.0f, 0.0f, 0.0f) ) *
rotateZ( angle * PI / 180 ) *
rotateX( angle * PI / 180 ) *
scale( vec3( 1.0f, 1.0f, 1.0f) );
mat4 view = ViewMatrix(
cameraPosition,
cameraTarget,
vec3( 0.0f, 0.0f, 1.0f) );
mat4 proj = ProjectionMatrix(
fov,
(float)windowX,
(float)windowY,
nearPlane,
farPlane );
// Uniforms
shader->SetUniform( "uWorld", world );
shader->SetUniform( "uView", view );
shader->SetUniform( "uProj", proj );
shader->SetUniform( "materialColor", vec3( 0.5f, 0.5f, 0.5f ) );
shader->SetUniform( "drawSkybox", false );
shader->SetUniform( "world_cameraPosition", cameraPosition );
shader->SetUniform( "reflectFactor", 0.5f );
// Draw
glDrawElements( GL_TRIANGLES, model->GetIndexCount(), GL_UNSIGNED_SHORT, NULL );
}
// Put the new image on the screen
glfwSwapBuffers( window );
Vertex Shader
#version 400
layout(location=0) in vec3 vertex_position;
layout(location=1) in vec3 vertex_normal;
layout(location=2) in vec4 vertex_tangent;
layout(location=3) in vec2 vertex_texCoords;
out vec2 texCoords;
out vec3 reflectDir;
uniform mat4 uWorld;
uniform mat4 uView;
uniform mat4 uProj;
uniform bool drawSkybox;
uniform vec3 world_cameraPosition;
void main()
{
if( drawSkybox )
{
reflectDir = vertex_position;
}
else
{
vec3 world_pos = vec3( uWorld * vec4(vertex_position,1.0) );
vec3 world_norm = vec3( uWorld * vec4(vertex_normal,0.0) );
vec3 world_view = normalize( world_cameraPosition - world_pos );
reflectDir = reflect( -world_view, world_norm );
}
gl_Position = uProj * uView * uWorld * vec4(vertex_position,1.0);
texCoords = vertex_texCoords;
}
Fragment shader
#version 400
out vec4 fragColor;
in vec2 texCoords;
in vec3 reflectDir;
uniform samplerCube cubemapTexture;
uniform vec3 materialColor;
uniform bool drawSkybox;
uniform float reflectFactor;
void main()
{
vec3 color = texture( cubemapTexture, reflectDir ).rgb;
if( drawSkybox )
{
fragColor = vec4( color, 1.0 );
}
else
{
fragColor = vec4( mix( materialColor, color, reflectFactor ), 1.0 );
}
}

Your cube map texture is not texture complete. All 6 sides need to be specified for a cube map texture to be complete. From the specs:
Additionally, a cube map texture is cube complete if the following conditions all hold true: [..] The level_base arrays of each of the six texture images making up the cube map have identical, positive, and square dimensions.
Your code does not specify an image for NEGATIVE_X:
uint TARGETS[6] =
{
GL_TEXTURE_CUBE_MAP_POSITIVE_X,
GL_TEXTURE_CUBE_MAP_NEGATIVE_Y,
GL_TEXTURE_CUBE_MAP_POSITIVE_Y,
GL_TEXTURE_CUBE_MAP_NEGATIVE_Y,
GL_TEXTURE_CUBE_MAP_POSITIVE_Z,
GL_TEXTURE_CUBE_MAP_NEGATIVE_Z
};
Using this table, the image for NEGATIVE_Y is specified twice, but it's missing NEGATIVE_X. It should be:
uint TARGETS[6] =
{
GL_TEXTURE_CUBE_MAP_POSITIVE_X,
GL_TEXTURE_CUBE_MAP_NEGATIVE_X,
GL_TEXTURE_CUBE_MAP_POSITIVE_Y,
GL_TEXTURE_CUBE_MAP_NEGATIVE_Y,
GL_TEXTURE_CUBE_MAP_POSITIVE_Z,
GL_TEXTURE_CUBE_MAP_NEGATIVE_Z
};
Instead of enumerating the 6 targets, you can also use GL_TEXTURE_CUBE_MAP_POSITIVE_X + i for i in the range 0..5 to address the 6 targets.

Related

Shadow Mapping OpenGL shadow not always drawing, and drawing where the position of the light is

I have been trying to do basic Shadow Mapping in my custom Engine using LearnOpenGL as the source. The link for the exact tutorial can be found: here.
I have been debugging this bug for around two weeks, researching the internet, and even trying to wrap my head around this, but all I can say is that the shadow almost never appears, and when it appears it is where the light is Pos is terms of x and z. I tried to do everything exactly like in the tutorial around 10 times, I also tried to check this website for similar questions but for every way I found, it was not my case.
findings
In this Image(1) you can see that the shadow is not visible when the light is on top of it, but it is then visible on this Image (2) when the lightPos.x variable is around -4.5 or 4.5, this is so for the lightPos.z variable too. The shadow when appearing is being drawn where the lightPos is, where in the pictures it is circled by a red line.
I use multiple shaders, one for the light and shadow calculations (ShadowMapping) one for a basic depth mapping (ShadowMapGen)
Here is my ShadowMapping shader:
ShadowMapping Vertex
version 460
in vec3 vertexIn;
in vec3 normalIn;
in vec2 textureIn;
out vec3 FragPos;
out vec3 normalOut;
out vec2 textureOut;
out vec4 FragPosLightSpace;
uniform mat4 model;
uniform mat4 view;
uniform mat4 projection;
uniform mat4 lightSpaceMatrix;
void main()
{
textureOut = textureIn;
FragPos = vec3(model * vec4(vertexIn, 1.0));
normalOut = mat3(transpose(inverse(model))) * normalIn;
FragPosLightSpace = lightSpaceMatrix * vec4(FragPos, 1.0);
gl_Position = projection * view * model * vec4(vertexIn, 1.0);
}
ShadowMapping Frag
out vec4 FragColor;
in vec3 FragPos;
in vec3 normalOut;
in vec2 textureOut;
in vec4 FragPosLightSpace;
uniform sampler2D diffuseTexture;
uniform sampler2D shadowMap;
uniform vec3 lightPos;
uniform vec3 viewPos;
float ShadowCalculation(vec4 fragPosLightSpace, vec3 lightdir)
{
// perform perspective divide
vec3 projCoords = fragPosLightSpace.xyz / fragPosLightSpace.w;
// transform to [0,1] range
projCoords = projCoords * 0.5 + 0.5;
// get closest depth value from light's perspective (using [0,1] range fragPosLight as coords)
float closestDepth = texture(shadowMap, projCoords.xy).r;
// get depth of current fragment from light's perspective
float currentDepth = projCoords.z;
// check whether current frag pos is in shadow
float bias = max(0.05 * (1.0 - dot(normalOut, lightdir)), 0.005);
// check whether current frag pos is in shadow
// float shadow = currentDepth - bias > closestDepth ? 1.0 : 0.0;
// // PCF
float shadow = 0.0;
vec2 texelSize = 1.0 / textureSize(shadowMap, 0);
for(int x = -1; x <= 1; ++x)
{
for(int y = -1; y <= 1; ++y)
{
float pcfDepth = texture(shadowMap, projCoords.xy + vec2(x, y) * texelSize).r;
shadow += currentDepth - bias > pcfDepth ? 1.0 : 0.0;
}
}
shadow /= 9.0;
// keep the shadow at 0.0 when outside the far_plane region of the light's frustum.
if(projCoords.z > 1.0)
shadow = 0.0;
return shadow;
}
void main()
{
vec3 color = texture(diffuseTexture, textureOut).rgb;
vec3 normal = normalize(normalOut);
vec3 lightColor = vec3(1.0f);
// ambient
vec3 ambient = 0.30 * color;
// diffuse
vec3 lightDir = normalize(lightPos - FragPos);
float diff = max(dot(lightDir, normal), 0.0);
vec3 diffuse = diff * lightColor;
// specular
vec3 viewDir = normalize(viewPos - FragPos);
vec3 reflectDir = reflect(-lightDir, normal);
float spec = 0.0;
vec3 halfwayDir = normalize(lightDir + viewDir);
spec = pow(max(dot(normal, halfwayDir), 0.0), 64.0);
vec3 specular = spec * lightColor;
// calculate shadow
float shadow = ShadowCalculation(FragPosLightSpace, lightDir);
vec3 lighting = (ambient + (1.0 - shadow) * (diffuse + specular)) * color;
FragColor = vec4(lighting, 1.0);
}
ShadowMapGen Vertex
Fragment Shader is empty for this shader
version 460
in vec3 vertexIn;
uniform mat4 model;
uniform mat4 lightSpaceMatrix;
void main()
{
gl_Position = model * lightSpaceMatrix * vec4(vertexIn, 1.0);
}
Variable initialisation
lightPos = glm::vec3(-2.0f, 4.0f, -1.0f);
near_plane = 1.0f;
far_plane = 7.5f;
//SAMPLE 2D Uniform binding
TheShader::Instance()->SendUniformData("ShadowMapping_diffuseTexture", 0);
TheShader::Instance()->SendUniformData("ShadowMapping_shadowMap", 1);
Depth Map Framebuffer Generation
This is how I generate my depth map/ shadow map texture in the constructor of my scene:
glGenFramebuffers(1, &depthMapFBO);
//Create depth texture
glGenTextures(1, &depthMap);
glBindTexture(GL_TEXTURE_2D, depthMap);
glTexImage2D(GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT, SHADOW_WIDTH, SHADOW_HEIGHT, 0, GL_DEPTH_COMPONENT, GL_FLOAT, NULL); // Height and Width = 1024
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_BORDER);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_BORDER);
float borderColor[] = { 1.0, 1.0, 1.0, 1.0 };
glTexParameterfv(GL_TEXTURE_2D, GL_TEXTURE_BORDER_COLOR, borderColor);
//Attach depth texture as FBO's depth buffer
glBindFramebuffer(GL_FRAMEBUFFER, depthMapFBO);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D, depthMap, 0);
glDrawBuffer(GL_NONE);
glReadBuffer(GL_NONE);
glBindFramebuffer(GL_FRAMEBUFFER, 0);
Then in an Update() function that runs in the While loop of the engine I firstly do:
Render Objects from light's perspective
//Light Projection and view Matrix
m_lightProjection = glm::ortho(-10.0f, 10.0f, -10.0f, 10.0f, near_plane, far_plane);
m_lightView = glm::lookAt(lightPos, glm::vec3(0.0f), glm::vec3(0.0f, 1.0f, 0.0f));
//Calculate light matrix and send it.
m_lightSpaceMatrix = m_lightProjection * m_lightView;
TheShader::Instance()->SendUniformData("ShadowMapGen_lightSpaceMatrix", 1, GL_FALSE, m_lightSpaceMatrix);
//Render to Framebuffer depth Map
glViewport(0, 0, SHADOW_WIDTH, SHADOW_HEIGHT);
glBindFramebuffer(GL_FRAMEBUFFER, depthMapFBO);
glClear(GL_DEPTH_BUFFER_BIT);
//Set current Shader to ShadowMapGen
m_floor.SetShader("ShadowMapGen");
m_moon.SetShader("ShadowMapGen");
//Send model Matrix to current Shader
m_floor.Draw();
m_moon.Draw();
//Set current Shader back to ShadowMapping
m_moon.SetShader("ShadowMapping");
m_floor.SetShader("ShadowMapping");
glBindFramebuffer(GL_FRAMEBUFFER, 0);
Render Objects from Camera's perspective
glViewport(0, 0, SCREEN_WIDTH, SCREEN_HEIGHT);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
//Update Camera and Send the view and projection matrices to the ShadowMapping shader
m_freeCamera->Update();
m_freeCamera->Draw();
//Send Light Pos
TheShader::Instance()->SendUniformData("ShadowMapping_lightPos", lightPos);
//Send LightSpaceMatrix
TheShader::Instance()->SendUniformData("ShadowMapping_lightSpaceMatrix", 1, GL_FALSE, m_lightSpaceMatrix);
//Activate Shadow Mapping texture
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_2D, depthMap);
//Send model Matrix to ShadowMapping shaders
m_moon.Draw();
m_floor.Draw();
I hope someone will see this, thank you for your time.
I tried to do everything exactly like in the tutorial around 10 times
Well, you seem to have missed at least one obvious thing:
m_lightSpaceMatrix = m_lightProjection * m_lightView;
So far, so good, but in your "ShadowMapGen" vertex shader, you wrote:
gl_Position = model * lightSpaceMatrix * vec4(vertexIn, 1.0);
So you end up with model * projection * view multiplication order, which does not make sense no matter which conventions you adhere to. Since the tutorial uses default GL conventions, you always need projection * view * model * vertex multiplication order, which the tutorial also correctly uses.

Point Sprite Alpha Blending Issue

I'm trying to render a load of stars in 3D space using point sprites. My texture image is definitely 'good' as far as the alpha channel is concerned. It renders perfectly fine as a quad but when I render a lot of point sprites, I can see the square border of the image overwriting some of the images.
The above image shows the star being rendered nicely over the top of my cube. In the bottom right I would expect to see a continuous trail of star images in the 'flower' pattern that they're been drawn in.
What am I doing wrong?
Fragment shader:
#version 140
#extension GL_ARB_explicit_attrib_location : enable
precision mediump float;
uniform sampler2D uTexture;
void main( void )
{
gl_FragColor = texture2D( uTexture, gl_PointCoord );
}
Vertex shader:
#version 140
#extension GL_ARB_explicit_attrib_location : enable
precision mediump float;
layout (location = 0) in float aTheta;
uniform float uK;
uniform mat4 model;
uniform mat4 view;
uniform mat4 projection;
void main( void )
{
float x = cos( uK * aTheta ) * sin( aTheta );
float y = cos( uK * aTheta ) * cos( aTheta );
gl_Position = projection * view * model * vec4( x, y, 0.0, 1.0 );
gl_PointSize = 64.0;
}
Code:
void CPoint::Render( void )
{
g_PointProgram.UseProgram();
glEnable( GL_PROGRAM_POINT_SIZE );
glEnable( GL_POINT_SPRITE );
// Set the alpha blending function
glEnable( GL_BLEND );
glBlendFunc( GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA );
glBlendEquation( GL_FUNC_ADD );
glBindBuffer( GL_ARRAY_BUFFER, m_VBO );
glm::mat4 Model = glm::mat4( 1.0 );
Model = glm::translate( Model, glm::vec3( 2.0f, 0.0f, 4.0f ) );
glm::mat4 Projection = g_Camera.GetProjection();
glm::mat4 View = g_Camera.GetView();
g_PointProgram.SetUint( "uTexture", g_ImgStar.m_Texture );
g_PointProgram.SetMatrix4fv( "model", Model );
g_PointProgram.SetMatrix4fv( "view", View );
g_PointProgram.SetMatrix4fv( "projection", Projection );
g_PointProgram.SetFloat( "uK", m_Emitter.k );
glActiveTexture( GL_TEXTURE0 );
glBindTexture( GL_TEXTURE_2D, g_ImgStar.m_Texture );
glEnable( GL_TEXTURE_2D );
// Attributes
glVertexAttribPointer( 0, // 1st attribute array (only have 1)
1, // One theta angle per particle
GL_FLOAT, // Data is floating point type
GL_FALSE, // No fixed point scaling
sizeof( Particle ), // No gaps in data
(void*) 0 ); // Start from "theta" offset within bound buffer
glEnableVertexAttribArray( 0 );
glDrawArrays( GL_POINTS, 0, NUM_PARTICLES );
glDisableVertexAttribArray( 0 );
glBindBuffer( GL_ARRAY_BUFFER, 0 );
glDisable( GL_TEXTURE_2D );
}
Perhaps depth culling is playing tricks on you? Try glDepthMask(false) while rendering the sprites

Update a QImage using a shader in QOpenGLWidget

I have a simple vertex shader
static const char *vertexShader=
"attribute vec4 vPosition; \n"
"void main(){\n"
"gl_Position = vPosition;\n"
"}";
Also I have a shader which creates a "Billboard" effect on an image.
static const char *fragmentShader=
"uniform float grid;\n"
"uniform float dividerValue;\n"
"uniform float step_x;\n"
"uniform float step_y;\n"
"uniform sampler2D source;\n"
"uniform lowp float qt_Opacity;\n"
"uniform vec2 qt_TexCoord0;\n"
"void main(){\n"
"vec2 uv = qt_TexCoord0.xy;\n"
"float offx = floor(uv.x / (grid * step_x));\n"
"float offy = floor(uv.y / (grid * step_y));\n"
"vec3 res = texture2D(source, vec2(offx * grid * step_x , offy * grid * step_y)).rgb;\n"
"vec2 prc = fract(uv / vec2(grid * step_x, grid * step_y));\n"
"vec2 pw = pow(abs(prc - 0.5), vec2(2.0));\n"
"float rs = pow(0.45, 2.0);\n"
"float gr = smoothstep(rs - 0.1, rs + 0.1, pw.x + pw.y);\n"
"float y = (res.r + res.g + res.b) / 3.0;\n"
"vec3 ra = res / y;\n"
"float ls = 0.3;\n"
"float lb = ceil(y / ls);\n"
"float lf = ls * lb + 0.3;\n"
"res = lf * res;\n"
"vec3 col = mix(res, vec3(0.1, 0.1, 0.1), gr);\n"
"if (uv.x < dividerValue)\n"
"gl_FragColor = qt_Opacity * vec4(col, 1.0);\n"
"else\n"
"gl_FragColor = qt_Opacity * texture2D(source, uv);\n"
"}";
What I'd like to do is to use this shader to apply this effect on an image in QtOpenGlWidget. But I dont get how to set my image as a texture and pass it to a shader and then to return it modified with a shader effect. What is I want to achieve is: https://imgur.com/a/NSY0u But my shader doesn't affect image https://imgur.com/a/dgSfq . My GLWidget class:
GLWidget::GLWidget(Helper *helper, QWidget *parent)
: QOpenGLWidget(parent), helper(helper)
{
QImage img("E:\\pictures\\0151.jpg");
image = img;
image = image.convertToFormat(QImage::Format_RGBA8888);
setFixedSize(512, 512);
setAutoFillBackground(false);
targetWidth = width();
targetHeight = height();
qDebug() << "targetWidth="<<targetWidth;
qDebug() << "targetHeight ="<<targetHeight ;
//this values i am trying to pass to my fragment shader
grid = 5.0;//grid on image
dividerValue = 0.5;
step_x = 0.0015625;
step_y = height() ? (2.5 * step_x * targetWidth / targetHeight) : 0.0;
}
void GLWidget::initializeGL()
{
initializeOpenGLFunctions();
m_program = new QOpenGLShaderProgram;
m_program->addShaderFromSourceCode(QOpenGLShader::Vertex, vertexShader);
m_program->addShaderFromSourceCode(QOpenGLShader::Fragment,fragmentShader);//?
m_program->link();
m_program->bind();
m_program->release();
}
//we can use paintEvent to display our image with opengl
void GLWidget::paintEvent(QPaintEvent *event)
{
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
m_program->bind();
QPainter painter;
painter.begin(this);
painter.drawImage(0,0,image);
QOpenGLTexture texture(image); //I dont know how to setUniformValue(m_program->uniformLocation("source"),texture) to my shader
GLuint m_texture;
glGenTextures(1, &m_texture);
glBindTexture(GL_TEXTURE_2D, m_texture);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, image.width(), image.height(), 0, GL_BGRA, GL_UNSIGNED_BYTE, image.bits());
glGenerateMipmap(GL_TEXTURE_2D);
glEnable(GL_TEXTURE_2D);
//open an image
m_program->setUniformValue("grid", grid);
m_program->setUniformValue("dividerValue",dividerValue);
m_program->setUniformValue("step_x", step_x);
m_program->setUniformValue("step_y", step_y);
m_program->setUniformValue(m_program->uniformLocation("source"),m_texture);
painter.end();
m_program->release();
}
When you bind a texture, it is bound to the currently active texture image unit (See Binding textures to samplers).
The active texture unit can be selected by glActiveTexture. The default texture unit is GL_TEXTURE0.
The value which you have to provide to the texture sampler uniform is not the name of a texture, it is the texture unit (number), where the texture is bound to:
int texture_unit = 0; // <----- e.g. texture unit 0
glActiveTexture( GL_TEXTURE0 + texture_unit );
glBindTexture( GL_TEXTURE_2D, m_texture );
.....
m_program->bind();
m_program->setUniformValue( "source", texture_unit ); // <----- texture unit
For a QOpenGLTexture object the texture unit can be selected by QOpenGLTexture::bind:
int texture_unit = 1; // <----- e.g. texture unit 1
QOpenGLTexture texture(image);
texture.bind( texture_unit );
m_program->bind();
m_program->setUniformValue( "source", texture_unit ); // <----- texture unit
Note, since OpenGL 4.2 the texture unit can be initialized within the shader, by a Binding point:
layout(binding = 0) uniform sampler2D source; // binding = 0 -> texture unit 0
Extension to the answer:
The following code will draw the a image to the entire widget with processing it by your shader. Finally the rendered image is read back from the GPU:
class GLWidget : public QOpenGLWidget
{
.....
QOpenGLShaderProgram * m_program = nullptr;
QOpenGLTexture * m_texture = nullptr;
};
void GLWidget::initializeGL()
{
initializeOpenGLFunctions();
QImage img("E:\\pictures\\0151.jpg");
m_texture = new QOpenGLTexture( img );
m_program = new QOpenGLShaderProgram;
m_program->addShaderFromSourceCode(QOpenGLShader::Vertex, vertexShader);
m_program->addShaderFromSourceCode(QOpenGLShader::Fragment, fragmentShader);
m_program->bindAttributeLocation("vPosition", 0);
m_program->link();
}
void GLWidget::paintEvent(QPaintEvent *event)
{
// celar the framebuffer
glClearColor(0, 0, 0, 1);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// bind the texture
uint texture_unit = 1;
m_texture->bind( texture_unit );
// activate the shader
m_program->bind();
m_program->setUniformValue( "source", texture_unit );
m_program->setUniformValue( "grid", grid );
m_program->setUniformValue( "dividerValue", dividerValue );
m_program->setUniformValue( "step_x", step_x );
m_program->setUniformValue( "step_y", step_y );
// draw a quad over the entire widget
GLfloat vertices[]{ -1.0f, -1.0f, 1.0f, -1.0f, -1.0f, 1.0f, 1.0f, 1.0f };
m_program->enableAttributeArray(0);
m_program->setAttributeArray(0, GL_FLOAT, vertices, 2);
glDrawArrays( GL_TRIANGLE_STRIP, 0, 4 );
m_program->disableAttributeArray(0);
// release the shader
m_program->release();
// read the rendered image
int width = ....;
int height = ....;
unsigned char *pixels = new unsigned char[width * height * 4];
glReadPixels(0, 0, width, height, GL_RGBA, GL_UNSIGNED_BYTE, pixels);
QImage *img = new QImage( pixels, width, height, QImage::Format_RGBA8888 );
.....
}
Further you have to do some changes to the vertex shader and fragment shader. In the vertex shader you have to pass the vertex position to the fragment shader:
attribute vec2 vPosition;
varying vec2 v_pos;
void main()
{
v_pos = vPosition.xy;
gl_Position = vec4(vPosition.xy, 0.0, 1.0);
}
In the fragment shader you have to calcualte the texute coordinate fromt the vertex position:
varying vec2 v_pos;
void main()
{
vec2 uv = v_pos.xy * 0.5 + 0.5;
....
}
See also glwidget.cpp Example File.

Textures appear pure black, unless I pass an invalid value to texture uniform location

My program has been working perfectly so far, but it turns out that I've been lucky. I began doing some cleanup of the shader, because it was full of experimental stuff, and I had the following line at the end of the fragment shader:
gl_FragColor = final_color * (texture2D(tex, gl_TexCoord[0].st)*1.0 + texture2D(tex2, gl_TexCoord[0].st)*1.0);
I attempted to clean it up and I had the following declared at the top:
uniform sampler2D tex, tex2;
Changing these lines to:
gl_FragColor = final_color * texture2D(tex, gl_TexCoord[0].st;
and
uniform sampler2D tex;
actually broke the program (black screen), even though I am doing
GLuint tex_loc = glGetUniformLocation(shader_prog_id_, "tex");
glUniform1i(tex_loc, texture_id_);
in my main code. I'm sure it's a texture issue and not a glsl compiler error, because I can add 1.0 to the output and end up with a white silhouette of my mesh.
The strangeness begins when I change the lines in my shader to:
gl_FragColor = final_color * texture2D(tex2, gl_TexCoord[0].st;
and
uniform sampler2D tex2;
but still retrieve the location for tex. The program works as it always has, even though inspecting the value of tex_loc in the debugger indicates an error. I'm not happy doing this, and now that I'm trying to load multiple textures, it will cause bigger headaches down the line.
I'm using VBOs, in interleaved format, to render the geometry. I'm passing in the vertex position, normal and texcoord this way.
There are other questions with the "black texture" issue, but they're using immediate mode calls and setting the wrong texture state. I tried changing the texture unit before supplying the arrays, with no success.
Here is as much relevant code as possible from the main program:
void MeshWidget::draw() {
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glTranslatef(0.0f, 0.0f, -4.0f + zoom_factor_);
glRotatef(rotX, 1.0f, 0.0f, 0.0f);
glRotatef(rotY, 0.0f, 1.0f, 0.0f);
glRotatef(rotZ, 0.0f, 0.0f, 1.0f);
// Auto centre mesh based on vertex bounds.
glTranslatef(-x_mid_, -y_mid_, -z_mid_);
glDrawElements(GL_TRIANGLES, mesh_.num_indices, GL_UNSIGNED_SHORT, BUFFER_OFFSET(0)); //The starting point of the IBO
}
void MeshWidget::openMesh(const string& filename) {
if (mesh_filename_ != filename) {
clearMeshData(mesh_);
glDeleteBuffersARB(1, &VertexVBOID);
glDeleteBuffersARB(1, &IndexVBOID);
ReadMsh(mesh_, filename);
// Create buffer objects here.
glGenBuffersARB(1, &VertexVBOID);
glBindBufferARB(GL_ARRAY_BUFFER, VertexVBOID);
glBufferDataARB(GL_ARRAY_BUFFER, sizeof(VertexAttributes)*mesh_.num_vertices, &mesh_.vertices[0], GL_STATIC_DRAW);
glGenBuffersARB(1, &IndexVBOID);
glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER, IndexVBOID);
glBufferDataARB(GL_ELEMENT_ARRAY_BUFFER, sizeof(uint16_t)*mesh_.num_indices, &mesh_.indices[0], GL_STATIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, VertexVBOID);
glEnableClientState(GL_VERTEX_ARRAY);
glVertexPointer(3, GL_FLOAT, sizeof(VertexAttributes), BUFFER_OFFSET(0)); //The starting point of the VBO, for the vertices
glEnableClientState(GL_NORMAL_ARRAY);
glNormalPointer(GL_FLOAT, sizeof(VertexAttributes), BUFFER_OFFSET(12)); //The starting point of normals, 12 bytes away
glClientActiveTexture(GL_TEXTURE0);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
glTexCoordPointer(2, GL_FLOAT, sizeof(VertexAttributes), BUFFER_OFFSET(24)); //The starting point of texcoords, 24 bytes away
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, IndexVBOID);
if (startup_done_) updateGL();
}
}
void MeshWidget::openTexture(const string& filename) {
size_t dot = filename.find_last_of('.');
string ext(filename, dot, filename.size()); // 3rd parameter should be length of new string, but is internally clipped to end.
glActiveTexture(GL_TEXTURE0);
glClientActiveTexture(GL_TEXTURE0);
glDeleteTextures(1, &texture_id_);
glGenTextures(1, &texture_id_);
glBindTexture(GL_TEXTURE_2D, texture_id_);
glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_MODULATE);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER,GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
if (ext == ".dds") {
if (GLEE_EXT_texture_compression_s3tc) {
texture_id_ = SOIL_load_OGL_texture(filename.c_str(), SOIL_LOAD_AUTO, texture_id_, SOIL_FLAG_DDS_LOAD_DIRECT);
// SOIL takes care of calling glTexParams, glTexImage2D, etc.
yflip_texture_ = true;
} else {
//std::cout << "S3TC not supported on this graphics hardware." << std::endl;
// TODO: Error message in status bar?
}
} else {
QImage tex(filename.c_str());
tex = QGLWidget::convertToGLFormat(tex);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, tex.width(), tex.height(), 0, GL_RGBA, GL_UNSIGNED_BYTE, tex.bits());
yflip_texture_ = false;
}
updateUniforms();
if (startup_done_) updateGL();
}
void MeshWidget::updateUniforms() {
GLuint texture_flip_uniform = glGetUniformLocation(shader_prog_id_, "yflip");
glUniform1f(texture_flip_uniform, float(yflip_texture_ * 1.0f));
GLuint tex_loc = glGetUniformLocation(shader_prog_id_, "tex");
glUniform1i(tex_loc, texture_id_);
}
And my shaders (there's still some junk in here because I was experimenting, but nothing that affects the output):
varying vec3 normal, lightDir, eyeVec;
uniform float yflip;
void main()
{
normal = gl_NormalMatrix * gl_Normal;
vec3 vVertex = vec3(gl_ModelViewMatrix * gl_Vertex);
lightDir = vec3(gl_LightSource[0].position.xyz - vVertex);
eyeVec = -vVertex;
gl_TexCoord[0].x = gl_MultiTexCoord0.x;
gl_TexCoord[1].x = gl_MultiTexCoord1.x;
if (yflip == 1.0) {
gl_TexCoord[0].y = 1 - gl_MultiTexCoord0.y;
gl_TexCoord[1].y = 1 - gl_MultiTexCoord1.y;
} else {
gl_TexCoord[0].y = gl_MultiTexCoord0.y;
gl_TexCoord[1].y = gl_MultiTexCoord1.y;
}
gl_Position = ftransform();
}
fragment shader:
varying vec3 normal, lightDir, eyeVec;
uniform sampler2D tex2;
void main (void)
{
vec4 texel = texture2D(tex2, gl_TexCoord[0].st);
vec4 final_color =
(gl_FrontLightModelProduct.sceneColor * gl_FrontMaterial.ambient) +
(gl_LightSource[0].ambient * gl_FrontMaterial.ambient);
vec3 N = normalize(normal);
vec3 L = normalize(lightDir);
float lambertTerm = dot(N,L);
if(lambertTerm > 0.0)
{
final_color += gl_LightSource[0].diffuse *
gl_FrontMaterial.diffuse *
lambertTerm;
vec3 E = normalize(eyeVec);
vec3 R = reflect(-L, N);
float specular = pow( max(dot(R, E), 0.0),
gl_FrontMaterial.shininess );
final_color += gl_LightSource[0].specular *
gl_FrontMaterial.specular *
specular;
}
gl_FragColor = final_color * texel;
}
glUniform1i(tex_loc, texture_id_);
The second parameter should specify ID of a texture unit(hint: glActiveTexture sets the currently active texture unit), not ID of particular texture object.

Parallax mapping glitch in OpenGL

And this is result when I invert the tangent vector right after transferring it to vertex shader:
The "shadow" is in the wrong place.
(And it works only when I rotate it through Y axis so the last image seem to present a good parallax mapped cube)
IM SURE IT IS NOT A TANGENT VECTOR OR TEXTURE COORDINATES PROBLEM
Because
I used exactly the same tangent calculation functions and exactly the same cube position, normal and texture coordinate data as in working demo.
After all, I exported arrays with position/texcoord/normal/tangent data into a .txt file and I saw what I exactly expected (and what I expected is the same pos/tex/norm data as in working demo, including calculated tangents which I managed to export from working demo).
The next argument is, I copied my shader code to a working demo and it still works.
Other one is, I tried multiple ways to render this cube.
I tried VBO with glVertexAttribPointer, I tried VBO with saving tangent as other texture coordinate (as in the demo), I tried DisplayList with glVertexAttrib4f. Result is... EXACTLY THE SAME.
Height map is loading correctly, I tried to set it as a diffuse map and it looked OK.
glGetError() gives me No Errors and shader compile logs says so.
It is probably something with camera or init states.
Maybe posting an init code will help.
void CDepthBase::OpenGLSet() {
glEnable( GL_TEXTURE_2D );
glShadeModel( GL_SMOOTH );
glClearColor( 0.0f, 0.0f, 0.0f, 0.0f );
glClearDepth( 1.0f );
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glDepthFunc( GL_LEQUAL );
glEnable(GL_DEPTH_TEST);
glBlendFunc( GL_ONE, GL_ONE );
GLfloat ratio;
glViewport(0, 0, ResolutionWidth, ResolutionHeight);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluPerspective(45.0f, ResolutionWidth / (float)ResolutionHeight, 0.1f, 900.0f);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
if (GLEW_OK != glewInit()) {
MBX("Failed to init GLEW.", "Error");
}
if (glewIsSupported("GL_ARB_vertex_buffer_object")) {
VBO_supported = true;
} else VBO_supported = false;
glHint( GL_FOG_HINT, GL_DONT_CARE );
glHint( GL_PERSPECTIVE_CORRECTION_HINT, GL_NICEST );
glShadeModel(GL_SMOOTH);
glAlphaFunc(GL_ALWAYS, 0);
}
By the way, I'm using GL Extension Wrangler with extensions.
Shader code & log (this exported file contains code which was directly passed to glShaderSource):
Vertex shader was successfully compiled to run on hardware.
Fragment shader was successfully compiled to run on hardware.
Fragment shader(s) linked, vertex shader(s) linked.
------------------------------------------------------------------------------------------
varying vec3 lightDir;
varying vec3 viewDir;
attribute vec4 tangent;
void main()
{
gl_Position = gl_ModelViewProjectionMatrix * gl_Vertex;
gl_TexCoord[0] = gl_MultiTexCoord0;
vec3 vertexPos = vec3(gl_ModelViewMatrix * gl_Vertex);
vec3 tn = tangent.xyz;
vec3 n = normalize(gl_NormalMatrix * gl_Normal);
vec3 t = normalize(gl_NormalMatrix * tangent.xyz);
vec3 b = cross(t, n) * -tangent.w;
mat3 tbnMatrix = mat3(t.x, b.x, n.x,
t.y, b.y, n.y,
t.z, b.z, n.z);
lightDir = (gl_LightSource[0].position.xyz - vertexPos) / 100.0;
lightDir = tbnMatrix * lightDir;
viewDir = -vertexPos;
viewDir = tbnMatrix * viewDir;
}
-----------------------------------------------------------------------------------------
varying vec3 lightDir;
varying vec3 viewDir;
uniform sampler2D diffuseMap;
uniform sampler2D normalMap;
uniform sampler2D heightMap;
uniform float scale;
uniform float bias;
void main()
{
vec3 v = normalize(viewDir);
vec2 TexCoord = gl_TexCoord[0].st;
{
float height = texture2D(heightMap, gl_TexCoord[0].st).r;
height = height * scale + bias;
TexCoord = gl_TexCoord[0].st + (height * v.xy);
}
vec3 l = lightDir;
float atten = max(0.0, 1.0 - dot(l, l));
l = normalize(l);
vec3 n = normalize(texture2D(normalMap, TexCoord).rgb * 2.0 - 1.0);
vec3 h = normalize(l + v);
float nDotL = max(0.0, dot(n, l));
float nDotH = max(0.0, dot(n, h));
float power = (nDotL == 0.0) ? 0.0 : pow(nDotH, gl_FrontMaterial.shininess);
vec4 ambient = gl_FrontLightProduct[0].ambient * atten;
vec4 diffuse = gl_FrontLightProduct[0].diffuse * nDotL * atten;
vec4 specular = gl_FrontLightProduct[0].specular * power * atten;
vec4 color = gl_FrontLightModelProduct.sceneColor + ambient + diffuse + specular;color *= texture2D(diffuseMap,TexCoord);
gl_FragColor = color ;
}
Uniforms are working correctly because results are the same if I switch them with constant values.
Compiling shader:
void __Shader::import(){
if(imported) __Shader::~__Shader();
v = glCreateShader(GL_VERTEX_SHADER);
f = glCreateShader(GL_FRAGMENT_SHADER);
glShaderSource(v, 1, (const GLchar **)&vsrc.cstr,NULL);
glShaderSource(f, 1, (const GLchar **)&fsrc.cstr,NULL);
glCompileShader(v);
glCompileShader(f);
p = glCreateProgram();
glAttachShader(p,v);
glAttachShader(p,f);
if(_flags & NORMAL_MAPPING)
glBindAttribLocation(p, ATTRIB_TANGENT, "tangent");
glLinkProgram(p);
if(_flags & DIFFUSE_MAPPING)
diffuseUni.loc = glGetUniformLocation(p, "diffuseMap");
if(_flags & NORMAL_MAPPING)
normalUni.loc = glGetUniformLocation(p, "normalMap");
if(_flags & PARALLAX_MAPPING)
heightUni.loc = glGetUniformLocation(p, "heightMap");
if(_flags & SPECULAR_MAPPING)
specularUni.loc = glGetUniformLocation(p, "specularMap");
imported = true;
}
Setting attribute in VBO:
if(tangents.size() > 0){
buffered |= 3;
glGenBuffers(1, &VBO_tangent);
glBindBuffer(GL_ARRAY_BUFFER, VBO_tangent);
glBufferData(GL_ARRAY_BUFFER, tangents.size()*sizeof(tangent), tangents.get_ptr(), GL_STATIC_DRAW);
}
// and in draw:
if(buffered & 3) {
glBindBuffer(GL_ARRAY_BUFFER, VBO_tangent);
glVertexAttribPointer(__Shader::ATTRIB_TANGENT, 4, GL_FLOAT, GL_FALSE, 0, 0);
glEnableVertexAttribArray(__Shader::ATTRIB_TANGENT);
}
and a small note
for(int i = 0; i < responders.size(); ++i)
if(strstr(responders[i].idea, "tangent problem"))
responders[i].please_dont_talk();
Just tell me your other ideas about what can be the reason of those bad results.
Wheew... already solved it. The problem was with loading texture files even though I did not see any disorders with diffuse mapping or even with diffuse+normal mapping. I was using IMG_Load from SDL, maybe I used it wrong way but it did not work for me. It was probably normal map messed up.
bad texture import code:
if(imported || filenamez.length() < 1) return;
SDL_Surface* surface = 0;
surface = IMG_Load(filenamez.c_str());
if (surface) {
glGenTextures(1, &texture);
glBindTexture(GL_TEXTURE_2D, texture);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
bool endianess = filenamez.substr(filenamez.length()-4) == ".jpg";
glTexImage2D(GL_TEXTURE_2D, 0, 3, surface->w, surface->h, 0,
(endianess ? GL_RGB : GL_BGR), GL_UNSIGNED_BYTE, surface->pixels);
}
BEWARE !
I'm now using HBITMAP-based texture loading taken from dhpoware demo which I was talking about. And it works fine.
peace.
After 2-3 days of hard debugging, let me feel a little bit of euphoria.
Oh, I'd forget, the final result: