c++ - FreeType bitmap is empty - c++

I am using freetype to display text, with the the help of this tutorial: http://en.wikibooks.org/wiki/OpenGL_Programming/Modern_OpenGL_Tutorial_Text_Rendering_01
But all i see is empty squares, I played around a lot with the shaders and came to a conclusion that that the alpha value of the squares was always 0.0 also for some reason the program crashes after first time of compiling edited text shaders
This is the code:
Vertex Shader
#version 420
//vertexData.xy contains pos vertexData.zw contains uv coords
in vec4 vertexData;
out vec2 TexCoord;
void main(){
gl_Position = vec4(vertexData.x, vertexData.y, 0.0, 1.0);
TexCoord = vertexData.zw;
}
Fragment Shader
#version 420
in vec2 TexCoord;
out vec4 fragData;
uniform sampler2D tex;
uniform vec4 TextColor;
void main(){
//TextColor.rgb is the color texture(tex, TexCoord).r is texture alpha value
fragData = vec4(TextColor.rgb, texture(tex, TexCoord).r * TextColor.a);
}
FreeType
//Freetype init and face creation is in func Font::init()
void Font::renderText(const char *text, float x, float y, float sx, float sy, float rgba[4]){
//Attribute and Uniform Locations
GLint vDataLoc = ShaderResource::TextProgram->getAttributeLocation("vertexData");
GLint textColLoc = ShaderResource::TextProgram->getUniformLocation("TextColor");
GLint texSamplerLoc = ShaderResource::TextProgram->getUniformLocation("tex");
const char *p;
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
FT_Set_Pixel_Sizes(face, 0, 48);
//iterate through text
for(p = text; *p; p++) {
if(FT_Load_Char(face, *p, FT_LOAD_RENDER))
continue;
FT_GlyphSlot g = face->glyph;
int width = to_nearest_pow2( g->bitmap.width );
int height = to_nearest_pow2( g->bitmap.rows );
glUniform4fv(textColLoc, 1, rgba);
glEnable(GL_TEXTURE_2D);
Texture Tex;
Tex = Texture();
Tex.bind();
Tex.setParameter(GL_TEXTURE_WRAP_S, GL_CLAMP);
Tex.setParameter(GL_TEXTURE_WRAP_T, GL_CLAMP);
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
//At first I thought I was passing wrong formats to glteximage2d so I played around with it using gl alpha, gl luminance alpha, gl luminance8 alpha8 etc...
//I believe the issue is that g->bitmap.buffer was not created properly i must not be loading that char correctly??
glTexImage2D(
GL_TEXTURE_2D,
0,
GL_RED,
width,
height,
0,
GL_RED,
GL_UNSIGNED_BYTE,
g->bitmap.buffer
);
Tex.active(0);
glUniform1i(texSamplerLoc, 0);
//Then I create textbox and draw it using a vertex array obj and vbos using gl triangle strip etc...
Main.cpp Freetype Func call
//Disable Normal Shader Enable Text Shader
mainProgram->disable();
textProgram->use();
//Scale x & Scale y / window get screen width and height
float sx = 2.0 / window->getFrameBufferWidth();
float sy = 2.0 / window->getFrameBufferHeight();
//Text color
float color[4] = {0.0,0.0,0.0,1.0};
IHateComicSans.renderText("Hello World!", 0.0, 0.0, sx, sy, color);
textProgram->disable();
mainProgram->use();
After gltexImage2d glGetError returns GL_INVALID_OPERATION but only during the last iteration of the text

The problem was in my Texture class, fixed by just using opengl functions.

Related

Update a QImage using a shader in QOpenGLWidget

I have a simple vertex shader
static const char *vertexShader=
"attribute vec4 vPosition; \n"
"void main(){\n"
"gl_Position = vPosition;\n"
"}";
Also I have a shader which creates a "Billboard" effect on an image.
static const char *fragmentShader=
"uniform float grid;\n"
"uniform float dividerValue;\n"
"uniform float step_x;\n"
"uniform float step_y;\n"
"uniform sampler2D source;\n"
"uniform lowp float qt_Opacity;\n"
"uniform vec2 qt_TexCoord0;\n"
"void main(){\n"
"vec2 uv = qt_TexCoord0.xy;\n"
"float offx = floor(uv.x / (grid * step_x));\n"
"float offy = floor(uv.y / (grid * step_y));\n"
"vec3 res = texture2D(source, vec2(offx * grid * step_x , offy * grid * step_y)).rgb;\n"
"vec2 prc = fract(uv / vec2(grid * step_x, grid * step_y));\n"
"vec2 pw = pow(abs(prc - 0.5), vec2(2.0));\n"
"float rs = pow(0.45, 2.0);\n"
"float gr = smoothstep(rs - 0.1, rs + 0.1, pw.x + pw.y);\n"
"float y = (res.r + res.g + res.b) / 3.0;\n"
"vec3 ra = res / y;\n"
"float ls = 0.3;\n"
"float lb = ceil(y / ls);\n"
"float lf = ls * lb + 0.3;\n"
"res = lf * res;\n"
"vec3 col = mix(res, vec3(0.1, 0.1, 0.1), gr);\n"
"if (uv.x < dividerValue)\n"
"gl_FragColor = qt_Opacity * vec4(col, 1.0);\n"
"else\n"
"gl_FragColor = qt_Opacity * texture2D(source, uv);\n"
"}";
What I'd like to do is to use this shader to apply this effect on an image in QtOpenGlWidget. But I dont get how to set my image as a texture and pass it to a shader and then to return it modified with a shader effect. What is I want to achieve is: https://imgur.com/a/NSY0u But my shader doesn't affect image https://imgur.com/a/dgSfq . My GLWidget class:
GLWidget::GLWidget(Helper *helper, QWidget *parent)
: QOpenGLWidget(parent), helper(helper)
{
QImage img("E:\\pictures\\0151.jpg");
image = img;
image = image.convertToFormat(QImage::Format_RGBA8888);
setFixedSize(512, 512);
setAutoFillBackground(false);
targetWidth = width();
targetHeight = height();
qDebug() << "targetWidth="<<targetWidth;
qDebug() << "targetHeight ="<<targetHeight ;
//this values i am trying to pass to my fragment shader
grid = 5.0;//grid on image
dividerValue = 0.5;
step_x = 0.0015625;
step_y = height() ? (2.5 * step_x * targetWidth / targetHeight) : 0.0;
}
void GLWidget::initializeGL()
{
initializeOpenGLFunctions();
m_program = new QOpenGLShaderProgram;
m_program->addShaderFromSourceCode(QOpenGLShader::Vertex, vertexShader);
m_program->addShaderFromSourceCode(QOpenGLShader::Fragment,fragmentShader);//?
m_program->link();
m_program->bind();
m_program->release();
}
//we can use paintEvent to display our image with opengl
void GLWidget::paintEvent(QPaintEvent *event)
{
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
m_program->bind();
QPainter painter;
painter.begin(this);
painter.drawImage(0,0,image);
QOpenGLTexture texture(image); //I dont know how to setUniformValue(m_program->uniformLocation("source"),texture) to my shader
GLuint m_texture;
glGenTextures(1, &m_texture);
glBindTexture(GL_TEXTURE_2D, m_texture);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, image.width(), image.height(), 0, GL_BGRA, GL_UNSIGNED_BYTE, image.bits());
glGenerateMipmap(GL_TEXTURE_2D);
glEnable(GL_TEXTURE_2D);
//open an image
m_program->setUniformValue("grid", grid);
m_program->setUniformValue("dividerValue",dividerValue);
m_program->setUniformValue("step_x", step_x);
m_program->setUniformValue("step_y", step_y);
m_program->setUniformValue(m_program->uniformLocation("source"),m_texture);
painter.end();
m_program->release();
}
When you bind a texture, it is bound to the currently active texture image unit (See Binding textures to samplers).
The active texture unit can be selected by glActiveTexture. The default texture unit is GL_TEXTURE0.
The value which you have to provide to the texture sampler uniform is not the name of a texture, it is the texture unit (number), where the texture is bound to:
int texture_unit = 0; // <----- e.g. texture unit 0
glActiveTexture( GL_TEXTURE0 + texture_unit );
glBindTexture( GL_TEXTURE_2D, m_texture );
.....
m_program->bind();
m_program->setUniformValue( "source", texture_unit ); // <----- texture unit
For a QOpenGLTexture object the texture unit can be selected by QOpenGLTexture::bind:
int texture_unit = 1; // <----- e.g. texture unit 1
QOpenGLTexture texture(image);
texture.bind( texture_unit );
m_program->bind();
m_program->setUniformValue( "source", texture_unit ); // <----- texture unit
Note, since OpenGL 4.2 the texture unit can be initialized within the shader, by a Binding point:
layout(binding = 0) uniform sampler2D source; // binding = 0 -> texture unit 0
Extension to the answer:
The following code will draw the a image to the entire widget with processing it by your shader. Finally the rendered image is read back from the GPU:
class GLWidget : public QOpenGLWidget
{
.....
QOpenGLShaderProgram * m_program = nullptr;
QOpenGLTexture * m_texture = nullptr;
};
void GLWidget::initializeGL()
{
initializeOpenGLFunctions();
QImage img("E:\\pictures\\0151.jpg");
m_texture = new QOpenGLTexture( img );
m_program = new QOpenGLShaderProgram;
m_program->addShaderFromSourceCode(QOpenGLShader::Vertex, vertexShader);
m_program->addShaderFromSourceCode(QOpenGLShader::Fragment, fragmentShader);
m_program->bindAttributeLocation("vPosition", 0);
m_program->link();
}
void GLWidget::paintEvent(QPaintEvent *event)
{
// celar the framebuffer
glClearColor(0, 0, 0, 1);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// bind the texture
uint texture_unit = 1;
m_texture->bind( texture_unit );
// activate the shader
m_program->bind();
m_program->setUniformValue( "source", texture_unit );
m_program->setUniformValue( "grid", grid );
m_program->setUniformValue( "dividerValue", dividerValue );
m_program->setUniformValue( "step_x", step_x );
m_program->setUniformValue( "step_y", step_y );
// draw a quad over the entire widget
GLfloat vertices[]{ -1.0f, -1.0f, 1.0f, -1.0f, -1.0f, 1.0f, 1.0f, 1.0f };
m_program->enableAttributeArray(0);
m_program->setAttributeArray(0, GL_FLOAT, vertices, 2);
glDrawArrays( GL_TRIANGLE_STRIP, 0, 4 );
m_program->disableAttributeArray(0);
// release the shader
m_program->release();
// read the rendered image
int width = ....;
int height = ....;
unsigned char *pixels = new unsigned char[width * height * 4];
glReadPixels(0, 0, width, height, GL_RGBA, GL_UNSIGNED_BYTE, pixels);
QImage *img = new QImage( pixels, width, height, QImage::Format_RGBA8888 );
.....
}
Further you have to do some changes to the vertex shader and fragment shader. In the vertex shader you have to pass the vertex position to the fragment shader:
attribute vec2 vPosition;
varying vec2 v_pos;
void main()
{
v_pos = vPosition.xy;
gl_Position = vec4(vPosition.xy, 0.0, 1.0);
}
In the fragment shader you have to calcualte the texute coordinate fromt the vertex position:
varying vec2 v_pos;
void main()
{
vec2 uv = v_pos.xy * 0.5 + 0.5;
....
}
See also glwidget.cpp Example File.

Problems using glReadPixels with GLSL

I`m trying to render some particles and save the scene to a bmp file,
here is my code
// vertex shader
const char *vertexShader = STRINGIFY(
uniform float pointRadius; // point size in world space
uniform float pointScale; // scale to calculate size in pixels
void main()
{
// calculate window-space point size
vec3 posEye = vec3(gl_ModelViewMatrix * vec4(gl_Vertex.xyz, 1.0));
float dist = length(posEye);
gl_PointSize = pointRadius * (pointScale / dist);
gl_TexCoord[0] = gl_MultiTexCoord0;
gl_Position = gl_ModelViewProjectionMatrix * vec4(gl_Vertex.xyz, 1.0);
gl_FrontColor = gl_Color;
}
);
// pixel shader for rendering points as shaded spheres
const char *spherePixelShader = STRINGIFY(
void main()
{
const vec3 lightDir = vec3(0.577, 0.577, 0.577);
// calculate normal from texture coordinates
vec3 N ;
N.xy = gl_TexCoord[0].xy*vec2(2.0, -2.0) + vec2(-1.0, 1.0);
float mag = dot(N.xy, N.xy);
if (mag > 1.0) discard; // kill pixels outside circle
N.z = sqrt(1.0 - mag);
// calculate lighting
float diffuse = max(0.0, dot(lightDir, N));
gl_FragColor = gl_Color *diffuse;
}
Here is the rendering code
Position of the particles are stored in the VBO target_point_buffer as well as corresponding color data
void display()
{
//pointsprite
glEnable(GL_POINT_SPRITE);
glTexEnvi(GL_POINT_SPRITE, GL_COORD_REPLACE, GL_TRUE);
glEnable(GL_VERTEX_PROGRAM_POINT_SIZE_NV);
glDepthMask(GL_TRUE);
glEnable(GL_DEPTH_TEST);
//attach shader
glUseProgram(program);
glUniform1f(glGetUniformLocation(program, "pointScale"), winHeight / tanf(fov*0.5f*(float)M_PI / 180.0f));
glUniform1f(glGetUniformLocation(program, "pointRadius"),radius[0]*scale);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
//use vbo
glBindBuffer(GL_ARRAY_BUFFER, vbo);
glVertexPointer(3, GL_DOUBLE, 0, 0);
glEnableClientState(GL_VERTEX_ARRAY);
//color buffer
glBindBuffer(GL_ARRAY_BUFFER, color_vbo);
glColorPointer(3, GL_FLOAT, 0, 0);
glEnableClientState(GL_COLOR_ARRAY);
Drawsomething();
}
//Save the scene as an bmp file
void save_as_bmp(char *filename)
{
GLbyte pBits[Imagesize];
GLint iViewPort[4];
GLuint lastBuffer;
glGetIntegerv(GL_VIEWPORT,iViewPort);
glGetIntegerv(GL_READ_BUFFER,&lastBUffer);
glReadPixels(iViewPort[0], iViewPort[1], iViewPort[2], iViewPort[3], GL_BGR, GL_UNSIGNED_BYTE, pBits);
writeBMP(filename,pBits);
}
I`ve got the expected scene like this:
However,when I tried to save the scene as a BMP file,the result was not like I expected:
I suppose that it might be something wrong with the gl_TexCoord in the shader, but I can`t figure it out. Can anyone help?
Set GL_PACK_ALIGNMENT to 1 before your glReadPixels() call if you're going to use a three-component format like GL_BGR with GL_UNSIGNED_BYTE.

Shadow Map: whole mesh is in shadow, there is no light where it should be according to depth map

First time trying to implement shadow map using openGL ang glsl shader language.
I think the first pass where I render to a texture is correct but when I compare the depth values it seems to shadow everything.
https://www.dropbox.com/s/myxenx9y41yz2fc/Screenshot%202014-12-09%2012.18.53.png?dl=0
My perspective projection matrix looks like this:
FOV = 90
Aspect = According to the programs window size. (I also tried to put different values here)
Near = 2;
Far= 10000;
Function to initialize the frame buffer
void OpenGLWin::initDepthMap()
{
//Framebuffer
m_glFunctions->glGenFramebuffers(1, &m_frameBuffer);
m_glFunctions->glBindFramebuffer(GL_FRAMEBUFFER, m_frameBuffer);
//////////////////////////////////////////////////////////////////////////
//Texture to render scene to
m_glFunctions->glGenTextures(1, &m_renderToTexture);
//Bind created texture to make it current
m_glFunctions->glBindTexture(GL_TEXTURE_2D, m_renderToTexture);
//Creates an empty texture of specified size.
//m_glFunctions->glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, 1024, 768, 0, GL_RGB, GL_UNSIGNED_BYTE, 0);
m_glFunctions->glTexImage2D(GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT24, 1024, 1024, 0, GL_DEPTH_COMPONENT, GL_FLOAT, 0);
m_glFunctions->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
m_glFunctions->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
m_glFunctions->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
m_glFunctions->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
m_glFunctions->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_COMPARE_FUNC, GL_LEQUAL);
m_glFunctions->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_COMPARE_MODE, GL_COMPARE_R_TO_TEXTURE);
m_glFunctions->glDrawBuffer(GL_NONE);
m_glFunctions->glReadBuffer(GL_NONE);
// Always check that our framebuffer is ok
if (m_glFunctions->glCheckFramebufferStatus(GL_FRAMEBUFFER) != GL_FRAMEBUFFER_COMPLETE){
qDebug() << "FrameBuffer not OK";
return;
}
m_glFunctions->glBindFramebuffer(GL_FRAMEBUFFER, 0);
}
Draw function for each mesh. Model matrix is passed as argument from a Transform class draw function
void Mesh::draw(const Matrix4x4& projection, const Matrix4x4& view, const Matrix4x4& model)
{
//Shadow map pass 1
if (m_shadowMapFirstpass){
//Pass 1 Shaders
m_glFunctions->glUseProgram(m_depthRTTShaderProgram);
//Light view matrix
m_depthMVP = projection*view*model;
//Get the location of the uniform name mvp
GLuint depthMVPLocation = m_glFunctions->glGetUniformLocation(m_depthRTTShaderProgram, "depthMVP");
m_glFunctions->glUniformMatrix4fv(depthMVPLocation, 1, GL_TRUE, &m_depthMVP[0][0]);
m_shadowMapFirstpass = false;
}
//Shadow map pass 2
else if(m_shadowMapFirstpass == false){
//Pass 2 Shader
m_glFunctions->glUseProgram(m_shaderProgram);
//Gets the model matrix which is then multiplied with view and projection to form the mvp matrix
Matrix4x4 mvp = projection * view * model;
//Get the location of the uniform name mvp
GLuint mvpLocation = m_glFunctions->glGetUniformLocation(m_shaderProgram, "mvp");
//Send the mvp matrix to the vertex shader
m_glFunctions->glUniformMatrix4fv(mvpLocation, 1, GL_TRUE, &mvp[0][0]);
Matrix4x4 depthBiasMVP = m_depthMVP;// biasMatrix*m_depthMVP;
GLuint depthBiasMVPLocation = m_glFunctions->glGetUniformLocation(m_shaderProgram, "depthBiasMVP");
m_glFunctions->glUniformMatrix4fv(depthBiasMVPLocation, 1, GL_TRUE, &depthBiasMVP[0][0]);
m_shadowMapFirstpass = true;
}
//Bind this mesh VAO
m_glFunctions->glBindVertexArray(m_vao);
//Draw the triangles using the index buffer(EBO)
glDrawElements(GL_TRIANGLES, m_indices.size(), GL_UNSIGNED_INT, 0);
//Unbind the VAO
m_glFunctions->glBindVertexArray(0);
/////////////////////////////////////////////////////////////////////////////////////////////////////
//Calls the childrens' update
if (!m_children.empty())
{
for (int i = 0; i < m_children.size(); i++)
{
if (m_children[i] != NULL)
{
m_children[i]->draw(frustumCheck, projection, view, bvScaleFactor, model);
}
}
}
}
My render loop
void OpenGLWin::paintGL()
{
// m_glFunctions->glBindFramebuffer(GL_FRAMEBUFFER, m_frameBuffer);
m_glFunctions->glBindFramebuffer(GL_DRAW_FRAMEBUFFER, m_frameBuffer);
glViewport(0, 0, 1024, 1024);
// Clear the buffer with the current clearing color
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
//Light View Matrix
Matrix4x4 lightView;
lightView.lookAt(Vector3(0, 0, 0), Vector3(0, 0, -1), Vector3(0, 1, 0));
//Draw scene to Texture
m_root->draw(m_projection, lightView);
///////////////////////////////////////////////////////////////////
//Draw to real scene
m_glFunctions->glBindFramebuffer(GL_FRAMEBUFFER, 0);
// m_glFunctions->glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0);
// Clear the screen
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
//Bind Pass 2 shader
m_glFunctions->glUseProgram(m_shadowMapShaderProgram->getShaderProgramID());
GLuint shadowMapLocation = m_glFunctions->glGetUniformLocation(m_shadowMapShaderProgram->getShaderProgramID(), "shadowMap");
//Shadow Texture
m_glFunctions->glActiveTexture(GL_TEXTURE0);
m_glFunctions->glBindTexture(GL_TEXTURE_2D, m_renderToTexture);
m_glFunctions->glUniform1i(shadowMapLocation, 0);
//Updates matrices and view matrix for player camera
m_root->update(m_view);
//Render scene to main frame buffer
m_root->draw(m_projection, m_view);
}
Pass 1 Vertex Shader
#version 330 core
//Passthrough vertex shader
uniform mat4 depthMVP;
//Vertex received from the program
layout(location = 0) in vec3 vertexPosition_modelspace;
void main(void)
{
//Output position of vertex in clip space
gl_Position = depthMVP * vec4(vertexPosition_modelspace, 1);
}
Pass 1 Fragment Shader
#version 330 core
//Render to texture
// Ouput data
layout(location = 0) out float depthValue;
void main(void)
{
depthValue = gl_FragCoord.z;
}
Pass 2 Vertex Shader
#version 330 core
layout(location = 0) in vec3 vertexPosition_modelspace;
out vec4 ShadowCoord;
// Values that stay constant for the whole mesh.
uniform mat4 mvp;
uniform mat4 depthBiasMVP;
void main(){
// Output position of the vertex, in clip space : MVP * position
gl_Position = mvp * vec4(vertexPosition_modelspace,1);
ShadowCoord = depthBiasMVP * vec4(vertexPosition_modelspace,1);
}
Pass 2 Fragment Shader
#version 330 core
in vec4 ShadowCoord;
// Ouput data
layout(location = 0) out vec3 color;
// Values that stay constant for the whole mesh.
uniform sampler2D shadowMap;
void main(){
float visibility=1.0;
vec3 ProjCoords = ShadowCoord.xyz / ShadowCoord.w;
vec2 UVCoords;
UVCoords.x = 0.5 * ProjCoords.x + 0.5;
UVCoords.y = 0.5 * ProjCoords.y + 0.5;
float z = 0.5 * ProjCoords.z + 0.5;
float Depth = texture(shadowMap, UVCoords).z;//or x
if (Depth < (z + 0.00001)){
visibility = 0.1;
}
color = visibility*vec3(1,0,0);
}
Disable texture comparison for one thing. That's only valid when used with sampler2DShadow and you clearly are not using that in your code because your texture coordinates are 2D.
This means replacing the following code:
m_glFunctions->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_COMPARE_FUNC, GL_LEQUAL);
m_glFunctions->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_COMPARE_MODE, GL_COMPARE_R_TO_TEXTURE);
With this instead:
m_glFunctions->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_COMPARE_MODE, GL_NONE);
Likewise, using GL_LINEAR filtering on a non-sampler2DShadow texture is a bad idea. That is going to average the 4 nearest depth values and give you a single depth back. But that's not the proper way to anti-alias shadows; you actually want to average the result of 4 depth tests instead of doing a single test on the average of 4 depths.

OpenGL: Multisampling texture y axis inverted?

I have a problem with a multisampled texture. It seems after blitting it to another surface for rendering, it's flipped upside down. What might cause that ? Should I provide some code ?
edit: Well, it's gonna be a lot of code, but here we go. This is how I create my surfaces / textures:
protected override void Create(int width, int height, SurfaceFormat format)
{
this.format = format;
bool multisample = format.Multisampling > 0;
int samples = Math.Max(0, Math.Min(format.Multisampling, 4));
format.TextureTarget = multisample ? TextureTarget.Texture2DMultisample : format.TextureTarget;
format.MipMapping = format.MipMapping && format.TextureTarget == TextureTarget.Texture2D;
Width = width;
Height = height;
textureHandle = GL.GenTexture();
//bind texture
GL.BindTexture(format.TextureTarget, textureHandle);
Log.Error("Bound Texture: " + GL.GetError());
if (format.TextureTarget == TextureTarget.Texture2D)
{
GL.TexParameter(format.TextureTarget, TextureParameterName.TextureMinFilter, (int)(format.MipMapping ? TextureMinFilter.LinearMipmapLinear : TextureMinFilter.Linear));
GL.TexParameter(format.TextureTarget, TextureParameterName.TextureMagFilter, (int)TextureMagFilter.Linear);
GL.TexParameter(format.TextureTarget, TextureParameterName.TextureWrapS, (int)format.WrapMode);
GL.TexParameter(format.TextureTarget, TextureParameterName.TextureWrapT, (int)format.WrapMode);
}
Log.Debug("Created Texture Parameters: " + GL.GetError());
if (samples < 1)
GL.TexImage2D(format.TextureTarget, 0, format.InternalFormat, Width, Height, 0, format.PixelFormat, format.SourceType, format.Pixels);
else
GL.TexImage2DMultisample(TextureTargetMultisample.Texture2DMultisample, samples, format.InternalFormat, Width, Height, true);
if (format.MipMapping)
GL.GenerateMipmap(GenerateMipmapTarget.Texture2D);
Log.Debug("Created Image: " + GL.GetError());
//unbind texture
GL.BindTexture(format.TextureTarget, 0);
//create depthbuffer
if (format.DepthBuffer)
{
GL.GenRenderbuffers(1, out dbHandle);
GL.BindRenderbuffer(RenderbufferTarget.RenderbufferExt, dbHandle);
if(multisample)
GL.RenderbufferStorageMultisample(RenderbufferTarget.RenderbufferExt, samples, RenderbufferStorage.DepthComponent24, Width, Height);
else
GL.RenderbufferStorage(RenderbufferTarget.RenderbufferExt, RenderbufferStorage.DepthComponent24, Width, Height);
}
//create fbo
fboHandle = GL.GenFramebuffer();
GL.BindFramebuffer(FramebufferTarget.FramebufferExt, fboHandle);
GL.FramebufferTexture2D(FramebufferTarget.FramebufferExt, FramebufferAttachment.ColorAttachment0Ext, format.TextureTarget, textureHandle, 0);
if (format.DepthBuffer)
GL.FramebufferRenderbuffer(FramebufferTarget.FramebufferExt, FramebufferAttachment.DepthAttachmentExt, RenderbufferTarget.RenderbufferExt, dbHandle);
Log.Debug("Framebuffer status: " + GL.CheckFramebufferStatus(FramebufferTarget.FramebufferExt));
Log.Debug("Created Framebuffer: " + GL.GetError());
GL.BindFramebuffer(FramebufferTarget.FramebufferExt, 0);
}
creation:
var sf = SurfaceFormat.Surface2D;
sf.Multisampling = 4;
multisampler = new Surface(Window.Width, Window.Height, sf);
Now in the render loop I do the following:
//Render entire scene to multisampler
SceneRenderer.RenderMultisampled(ActiveCamera, multisampler, time);
//blit sampler to my material input texture
multisampler.CloneTo(postEffect.Textures["_tex"]);
//blit this texture to my "Canvas" (basically a surface with additional drawing methods. The canvas material is use as a texture for a quad in my scene, thus rendering a copy of the output image to a plane.
postEffect.Textures["_tex"].CloneTo(canvas.Surface);
//This would be the same but via rendering with a quad instead of blitting. Has the same result
//canvas.Clear();
//canvas.DrawMaterial(postEffect);
//clear framebuffer
GL.BindFramebuffer(FramebufferTarget.Framebuffer, 0);
//Set viewport
GL.Viewport(0, 0, Window.Width, Window.Height);
//use material (bind shader & shader params) and draw the scene.
postEffect.Use();
Helper.DrawScreenQuad();
GL.UseProgram(0);
If this is not enough, I can also post the shaders & mesh code.
EDIT2: Okay everything is now working as expected EXCEPT when I use canvas.draw() instead of blitting the texture. The draw method looks like this:
public void DrawMaterial(Material material)
{
GL.Viewport(0, 0, Surface.Width, Surface.Height);
Surface.BindFramebuffer();
material.Use();
Helper.DrawScreenQuad();
GL.UseProgram(0);
GL.BindFramebuffer(FramebufferTarget.Framebuffer, 0);
}
Draw screen quad:
public static void DrawScreenQuad()
{
GL.Begin(PrimitiveType.Quads);
GL.TexCoord2(0, 1);
GL.Vertex2(-1, -1);
GL.TexCoord2(1, 1);
GL.Vertex2(1, -1);
GL.TexCoord2(1, 0);
GL.Vertex2(1, 1);
GL.TexCoord2(0, 0);
GL.Vertex2(-1, 1);
GL.End();
}
Shader used:
[Shader vertex]
#version 150 core
in vec2 _pos;
out vec2 texCoord;
uniform float _time;
uniform sampler2D tex;
void main() {
gl_Position = vec4(_pos, 0, 1);
texCoord = _pos/2+vec2(0.5,0.5);
texCoord.y = 1 - texCoord.y;
}
[Shader fragment]
#version 150 core
#define PI 3.1415926535897932384626433832795
out vec4 outColor;
uniform float _time;
uniform sampler2D tex;
in vec2 texCoord;
//
void main() {
outColor = texture2D(tex, texCoord);
}
Somehow the rendered scene gets turned upside down by this. Why ?
I think I found my mistake. I had the texture coordinates AND the camera inverted. It seems to be fixed now. Why I still don't undestand is, why this works:
[Shader vertex]
#version 150 core
in vec2 _pos;
out vec2 texCoord;
uniform float _time;
uniform sampler2D tex;
void main() {
gl_Position = vec4(_pos, 0, 1);
texCoord = _pos/2+vec2(0.5,0.5);
//texCoord.y = 1 - texCoord.y;
}
[Shader fragment]
#version 150 core
#define PI 3.1415926535897932384626433832795
out vec4 outColor;
uniform float _time;
uniform sampler2D tex;
in vec2 texCoord;
//
void main() {
outColor = texture2D(tex, texCoord);
}
I would've expected that the y coordinate of the tex coord would need to be inverted.

Why can't access the G-Buffer from my lighting shader?

I implemented a new rendering pipeline in my engine and rendering is broken now. When I directly draw a texture of the G-Buffer to screen, it shows up correctly. So the G-Buffer is fine. But somehow the lighting pass makes trouble. Even if I don't use the resulting texture of it but try to display albedo from G-Buffer after the lighting pass, it shows a solid gray color.
I can't explain this behavior and the strange thing is that there are no OpenGL errors at any point.
Vertex Shader to draw a fullscreen quad.
#version 330
in vec4 vertex;
out vec2 coord;
void main()
{
coord = vertex.xy;
gl_Position = vertex * 2.0 - 1.0;
}
Fragment Shader for lighting.
#version 330
in vec2 coord;
out vec3 image;
uniform int type = 0;
uniform sampler2D positions;
uniform sampler2D normals;
uniform vec3 light;
uniform vec3 color;
uniform float radius;
uniform float intensity = 1.0;
void main()
{
if(type == 0) // directional light
{
vec3 normal = texture2D(normals, coord).xyz;
float fraction = max(dot(normalize(light), normal) / 2.0 + 0.5, 0);
image = intensity * color * fraction;
}
else if(type == 1) // point light
{
vec3 pixel = texture2D(positions, coord).xyz;
vec3 normal = texture2D(normals, coord).xyz;
float dist = max(distance(pixel, light), 1);
float magnitude = 1 / pow(dist / radius + 1, 2);
float cutoff = 0.4;
float attenuation = clamp((magnitude - cutoff) / (1 - cutoff), 0, 1);
float fraction = clamp(dot(normalize(light - pixel), normal), -1, 1);
image = intensity * color * attenuation * max(fraction, 0.2);
}
}
Targets and samplers for the lighting pass. Texture ids are mapped to attachment respectively shader location.
unordered_map<GLenum, GLuint> targets;
targets.insert(make_pair(GL_COLOR_ATTACHMENT2, ...)); // light
targets.insert(make_pair(GL_DEPTH_STENCIL_ATTACHMENT, ...)); // depth and stencil
unordered_map<string, GLuint> samplers;
samplers.insert(make_pair("positions", ...)); // positions from G-Buffer
samplers.insert(make_pair("normals", ...)); // normals from G-Buffer
Draw function for lighting pass.
void DrawLights(unordered_map<string, GLuint> Samplers, GLuint Program)
{
auto lis = Entity->Get<Light>();
glClear(GL_COLOR_BUFFER_BIT);
glEnable(GL_BLEND);
glBlendFunc(GL_ONE, GL_ONE);
glUseProgram(Program);
int n = 0; for(auto i : Samplers)
{
glActiveTexture(GL_TEXTURE0 + n);
glBindTexture(GL_TEXTURE_2D, i.second);
glUniform1i(glGetUniformLocation(Program, i.first.c_str()), n);
n++;
}
mat4 view = Entity->Get<Camera>(*Global->Get<unsigned int>("camera"))->View;
for(auto i : lis)
{
int type = i.second->Type == Light::DIRECTIONAL ? 0 : 1;
vec3 pos = vec3(view * vec4(Entity->Get<Form>(i.first)->Position(), !type ? 0 : 1));
glUniform1i(glGetUniformLocation(Program, "type"), type);
glUniform3f(glGetUniformLocation(Program, "light"), pos.x, pos.y, pos.z);
glUniform3f(glGetUniformLocation(Program, "color"), i.second->Color.x, i.second->Color.y, i.second->Color.z);
glUniform1f(glGetUniformLocation(Program, "radius"), i.second->Radius);
glUniform1f(glGetUniformLocation(Program, "intensity"), i.second->Intensity);
glBegin(GL_QUADS);
glVertex2i(0, 0);
glVertex2i(1, 0);
glVertex2i(1, 1);
glVertex2i(0, 1);
glEnd();
}
glDisable(GL_BLEND);
glActiveTexture(GL_TEXTURE0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindTexture(GL_TEXTURE_2D, 0);
}
I found the error and it was such a stupid one. The old rendering pipeline bound the correct framebuffer before calling the draw function of that pass. But the new one didn't so each draw function had to do that itself. Therefore I wanted to update all draw function, but I missed the draw function of the lighting pass.
Therefore the framebuffer of the G-Buffer was still bound and the lighting pass changed its targets.
Thanks to you guys, you had no change to find that error, since I hadn't posted my complete pipeline system.