Related
I have an HDR radiance environment map as a LatLong 2D texture image that I want to convert to a cubemap. I do this by loading the HDR map as a 2D float texture, project it onto a cube and then render the scene inside this cube from 6 different directions, directly filling a cubemap with glFramebufferTexture2D with the respective cubemap faces as the function's texture target.
The generated cubemap is a floating point cubemap generated as follows:
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_R, GL_CLAMP_TO_EDGE);
for (unsigned int i = 0; i < 6; ++i)
{
glTexImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_X + i, 0, GL_RGB, width, height, 0, GL_RGB, GL_FLOAT, NULL);
}
if (mipmap)
glGenerateMipmap(GL_TEXTURE_CUBE_MAP);
Note that the type parameter is GL_FLOAT so it should properly accept HDR values. The HDR image is loaded using stb_image.h as follows:
if (stbi_is_hdr(path.c_str()))
{
stbi_set_flip_vertically_on_load(true);
int width, height, nrComponents;
float *data = stbi_loadf(path.c_str(), &width, &height, &nrComponents, 0);
if (data)
{
GLenum format;
if (nrComponents == 3)
format = GL_RGB;
else if (nrComponents == 4)
format = GL_RGBA;
Bind();
glTexImage2D(GL_TEXTURE_2D, 0, format, width, height, 0, format, GL_FLOAT, data);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
if (Mipmapping)
glGenerateMipmap(GL_TEXTURE_2D);
Unbind();
stbi_image_free(data);
}
}
I also tried iterating over this array and retrieving the max float value to see if the HDR loaded correctly and the highest float value of my current HDR map is 288 which is far above 1.0 which I would expect.
Here's where things get tricky, based on the input texture (HDR float map) and the output cubemap (as a float) I'd expect that the cubemap faces get properly treated as floating point textures and directly copy the HDR values. However, the cubemap appears LDR as the moment I add tonemapping (w/ a variable exposure) I get quite a lot of banding and I'm clearly missing the precision of HDR as the following image shows (with an exposure of ~7.5)
I'm not sure whether there's something I'm missing and I couldn't find much on OpenGL's docs regarding rendering directly to floating point framebuffers; I assume this is possible as it wouldn't make sense if it wasn't.
For completeness' sake, here is the relevant code that generates the cubemap from the LatLong image (with renderCustomCommand rendering the cube with proper samplers set):
glGenFramebuffers(1, &m_FramebufferCubemap);
glGenRenderbuffers(1, &m_CubemapDepthRBO);
Camera faceCameras[6] = {
Camera(position, vec3( 1.0f, 0.0f, 0.0f), vec3(0.0f, -1.0f, 0.0f)),
Camera(position, vec3(-1.0f, 0.0f, 0.0f), vec3(0.0f, -1.0f, 0.0f)),
Camera(position, vec3( 0.0f, 1.0f, 0.0f), vec3(0.0f, 0.0f, 1.0f)),
Camera(position, vec3( 0.0f, -1.0f, 0.0f), vec3(0.0f, 0.0f,- 1.0f)),
Camera(position, vec3( 0.0f, 0.0f, 1.0f), vec3(0.0f, -1.0f, 0.0f)),
Camera(position, vec3( 0.0f, 0.0f, -1.0f), vec3(0.0f, -1.0f, 0.0f))
};
glBindFramebuffer(GL_FRAMEBUFFER, m_FramebufferCubemap);
glBindRenderbuffer(GL_RENDERBUFFER, m_CubemapDepthRBO);
glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH_COMPONENT24, width, height);
glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_RENDERBUFFER, m_CubemapDepthRBO);
glViewport(0, 0, width, height);
glBindFramebuffer(GL_FRAMEBUFFER, m_FramebufferCubemap);
for (unsigned int i = 0; i < 6; ++i)
{
Camera *camera = &faceCameras[i];
camera->SetPerspective(90.0f, width/height, 0.1f, 100.0f);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_CUBE_MAP_POSITIVE_X + i, cubeTarget->ID, 0);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
for (unsigned int i = 0; i < renderCommands.size(); ++i)
{
renderCustomCommand(&renderCommands[i], camera);
}
}
glBindFramebuffer(GL_FRAMEBUFFER, 0);
glViewport(0, 0, m_RenderSize.x, m_RenderSize.y);
And here's the code for sampling the LatLong 2D image -> cube:
#version 330 core
out vec4 FragColor;
in vec3 wPos;
#include sample.glsl
uniform sampler2D environment;
void main()
{
vec2 uv = SampleLatLong(normalize(wPos));
vec3 color = texture(environment, uv).rgb;
FragColor = vec4(color, 1.0);
}
Note that the LatLong to Cubemap conversion goes well, as the 2D environment is properly rendered on a cubemap, but simply clamped to the [0,1] range the moment it's rendered as a skybox, as if somewhere along the process it lost its floating point data.
I've been stuck on this problem for a while now and was hoping any one of you could shed some insight (is it even possible to render directly to float cubemaps like this?). Thank you.
EDIT: Here is the same picture with a high exposure set from Photoshop, as you can see a lot of details emerge which I've lost in the renderer.
The third parameter of your glTexImage2D call needs to be GL_RGB16F or GL_RGB32F.
It specifies the internal format.
The two parameters GL_RGB and GL_FLOAT at the end are only used to specify memory layout of the optional data pointer. They do not influence the internal format.
I am trying to render a simple 2d Image with the QOpenGLWidget using a quad as a texture. But no matter what I am trying, I always get a black box.
I have read many tutorials, I have a simple pixelbuffer like this
uchar* m_pData;
which stores my RGB values. This is valid, since I have it tested with glDrawPixles(). I will post the three most important functions considering the OpenGLWidget.
initializeGL() first:
void QGLImageviewer::initializeGL()
{
initializeOpenGLFunctions();
// Clear the color
float r = ((float)m_backColor.darker().red())/255.0f;
float g = ((float)m_backColor.darker().green())/255.0f;
float b = ((float)m_backColor.darker().blue())/255.0f;
glClearColor(r,g,b,1.0f);
// Set shading model.
glShadeModel(GL_SMOOTH);
// Set the viewport
glViewport(0.f, 0.f, m_origW, m_origH);
// Init. Projection Matrix
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(0.0, m_origW, m_origH,0.0,1.0,-1.0);
// Init Modelview Matrix
glClearColor(0.f, 0.f, 0.f, 1.f);
// Enable texturing
glEnable(GL_TEXTURE_2D);
// Generate texture ID
glGenTextures(1, &m_textureID);
// Bind texture ID
glBindTexture(GL_TEXTURE_2D, m_textureID);
// Set texture parameters
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
//glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
//glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
// Generate Texture, and assign pixles to our texture ID
if (m_pData != nullptr)
{
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, m_origW, m_origH, 0, GL_RGB, GL_UNSIGNED_BYTE, (GLvoid*)m_pData);
}
else
{
qCritical("Buffer is empty!!");
}
// Unbind texture
glBindTexture(GL_TEXTURE_2D, NULL);
// Check for Error
GLenum error_ = glGetError();
if (error_ != GL_NO_ERROR)
{
qCritical("Error Loading Texture!");
}
}
Then paintGL():
void QGLImageviewer::paintGL()
{
makeCurrent();
glClearColor(0.0f, 0.0f, 0.0f, 0.0f);
// Clear Screen And Depth Buffer
glClear(GL_COLOR_BUFFER_BIT);
if (!(m_renderQtImg->isNull()))
{
//QImage image;
int imWidth = m_renderQtImg->width();
int imHeight = m_renderQtImg->height();
// The image has to be resized to fit the widget
if (imWidth != this->size().width() &&
imHeight != this->size().height())
{
imWidth = this->size().width();
imHeight = this->size().height();
}
else
{
//image = *m_renderQtImg;
imWidth = m_origW;
imHeight = m_origH;
}
if (m_textureID != 0)
{
glMatrixMode(GL_MODELVIEW);
// Remove any previous transformations
glLoadIdentity();
glPushMatrix();
// Move to rendering point
glTranslatef(0.f, 0.f, 0.f);
glColor3f(0.0f, 0.0f, 0.5f);
// Set texture ID
glBindTexture(GL_TEXTURE_2D, m_textureID);
glBegin(GL_QUADS);
glTexCoord2f(0.f, 0.f); glVertex2f(0.f, 0.f);
glTexCoord2f(1.f, 0.f); glVertex2f(imWidth, 0.f);
glTexCoord2f(1.f, 1.f); glVertex2f(imWidth, imHeight);
glTexCoord2f(0.f, 1.f); glVertex2f(0.f, imHeight);
glEnd();
//glDisable(GL_TEXTURE_2D);
glPopMatrix();
glFlush();
}
}
}
And last but not least resizeGL():
void QGLImageviewer::resizeGL(int width, int height)
{
makeCurrent();
glViewport(0,0,(GLint)width,(GLint)height);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
//gluOrtho2D(0.0, width, 0.0, height);
glOrtho(0.0, width, 0.0, height, 0.0, 1.0);
}
I am using Qt5.6 and the Microsoft Visual Studio 2015 compiler.
Damn, the problem was on
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, m_origW , m_origH, 0, GL_RGB, GL_UNSIGNED_BYTE, (float*)m_pData);
My member variables m_orgiW and m_orgiH were initialized to 0. So quite unsurprising that my code did not work.
For the record I will post my running code, the three most important functions of the QOpenGLWidget, hope that it will be useful to somebody with the same issue.
void QGLImageviewer::initializeGL()
{
initializeOpenGLFunctions();
float r = ((float)m_backColor.darker().red())/255.0f;
float g = ((float)m_backColor.darker().green())/255.0f;
float b = ((float)m_backColor.darker().blue())/255.0f;
glClearColor(r,g,b,1.0f);
// Generate texture ID
glGenTextures(1, &m_textureID);
// Bind texture ID
glBindTexture(GL_TEXTURE_2D, m_textureID);
if (m_pData != nullptr)
{
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, m_origW , m_origH, 0, GL_RGB, GL_UNSIGNED_BYTE, (float*)m_pData);
}
else
{
qCritical("Buffer is empty!!");
}
// Set texture parameters
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); //IMPORTANT FOR NON POWER OF 2 TEXTURES
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
// Enable texturing Mapping
glEnable(GL_TEXTURE_2D);
// Enable Smooth Shading
glShadeModel(GL_SMOOTH);
// Black Background
glClearColor(0.0f, 0.0f, 0.0f, 0.5f);
// Depth Buffer Setup
glClearDepth(1.0f);
// Enables Depth Testing
glEnable(GL_DEPTH_TEST);
// Type of depth testing to do
glDepthFunc(GL_LEQUAL);
// Really Nice Perspective Calculations
glHint(GL_PERSPECTIVE_CORRECTION_HINT, GL_NICEST);
// Unbind Texture
glBindTexture(GL_TEXTURE_2D, NULL);
}
Now paintGL()
void QGLImageviewer::paintGL()
{
makeCurrent();
// Clear Screen And Depth Buffer
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glEnable(GL_TEXTURE_2D);
glBindTexture(GL_TEXTURE_2D, m_textureID);
glBegin(GL_QUADS);
// Drawing the quad with the texture mapped on it
//
// OpenGL 2D Coordinates
// Sticking with the Coordinate Convention mentioned here
glTexCoord2f(1.0f, 0.0f); glVertex2f(m_width, 0.0f); // vertex 1
glTexCoord2f(1.0f, 1.0f); glVertex2f(m_width, m_height); // vertex 2
glTexCoord2f(0.0f, 1.0f); glVertex2f(0.0f, m_height); // vertex 3
glTexCoord2f(0.0f, 0.0f); glVertex2f(0.0f, 0.0f); // vertex 4
glEnd();
}
And resizeGL()
void QGLImageviewer::resizeGL(int width, int height)
{
makeCurrent();
m_width = width;
m_height = height;
// Compute aspect ratio of the new window
if (height == 0) height = 1; // To prevent divide by 0
GLfloat aspect = (GLfloat)width / (GLfloat)height;
// Set the viewport to cover the new window
glViewport(0, 0, width, height);
// Set the aspect ratio of the clipping area to match the viewport
glMatrixMode(GL_PROJECTION); // To operate on the Projection matrix
glLoadIdentity(); // Reset the projection matrix
glOrtho(0, m_width/ m_zoomFactor, m_height/ m_zoomFactor,0, -1, 1);
}
I have been trying to implement shadow mapping. Whilst I think that I am now close, I have come stuck with a strange effect (illustrated below):
As you can see, the shadow region appears too small. There is also an unusual effect on the cube itself.
The geometry being rendered is a cube of dimensions 1.0 on a square plane of dimensions 100.0. The scene contains a single spotlight with an angle (from one side to the other) of 0.5 radians and a range of 100.0. This spotlight orbits about the y-axis and adjusts its rotation to look at the origin.
I setup the framebuffer and depth texture (512 x 512) as follows:
// Create and configure the depth texture.
glGenTextures(1, &m_depthTexture);
glBindTexture(GL_TEXTURE_2D, m_depthTexture);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_BORDER);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_BORDER);
GLfloat border[] = { 1.0f, 1.0f, 1.0f, 1.0f };
glTexParameterfv(GL_TEXTURE_2D, GL_TEXTURE_BORDER_COLOR, border);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_COMPARE_MODE, GL_NONE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_COMPARE_FUNC, GL_LEQUAL);
glTexImage2D(GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT32, width, height, 0, GL_DEPTH_COMPONENT, GL_FLOAT, (void*)0);
// Assign the depth texture to texture channel 0.
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, m_depthTexture);
// Create and configure the framebuffer.
glGenFramebuffers(1, &m_framebuffer);
glBindFramebuffer(GL_FRAMEBUFFER, m_framebuffer);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D, m_depthTexture, 0);
GLenum drawBuffers[] = { GL_NONE };
glDrawBuffers(1, drawBuffers);
I then render the scene to the shadow map framebuffer from the perspective of the spotlight. This seems to be working. Inspecting the depth texture using an OpenGL debugging tool revealed the following:
The scene is rendered a second time, where I set the uniforms for the depth texture and shadow matrix:
glActiveTexture(GL_TEXTURE0 + 1);
glBindTexture(GL_TEXTURE_2D, shadowMap.depthTexture());
program->uniform("shadowMap", 1);
const M3D::Matrix4 lightViewMatrix = lightTransformComponent->transformationMatrix().inverse();
const float invTanHalfFov = 1.0f / std::tan(coneAngle * 0.5f);
const float nearClipPlane = 0.3f;
const float farClipPlane = lightLightComponent->range();
const float zRange = nearClipPlane - farClipPlane;
const Matrix4 lightProjectionMatrix(
invTanHalfFov, 0.0f, 0.0f, 0.0f,
0.0f, invTanHalfFov, 0.0f, 0.0f,
0.0f, 0.0f, -(nearClipPlane + farClipPlane) / zRange, 2.0f * nearClipPlane * farClipPlane / zRange,
0.0f, 0.0f, 1.0f, 0.0f
);
const Matrix4 shadowMatrix = lightProjectionMatrix * lightViewMatrix * modelMatrix;
program->uniform("shadowMatrix", shadowMatrix);
I compute the shadow coordinate in the vertex shader:
f_shadowCoordinate = shadowMatrix * vec4(v_position, 1.0f);
Then, in the fragment shader, I project this coordinate and bias it to range in the interval [0, 1].
vec2 projectedShadowCoordinates = (f_shadowCoordinate.xy / f_shadowCoordinate.w) * 0.5f + vec2(0.5f, 0.5f);
float shadowDistance = texture(shadowMap, projectedShadowCoordinates).x;
return vec4(1.0f) * shadowDistance;
The problem was caused by mistakenly setting the projection matrix uniform to the camera's projection matrix (instead of the light's projection matrix) when rendering to the shadow framebuffer.
Using openGL to do some image processing, the first experiment is
convert the color image to gray, everything are fine except I don’t want
to show the widget.
If I don’t call “show()” the QGLWidget would not begin to render the texture
Could I render the texture without showing the widget?
Is QGLWidget a right tool to do that?
part of the codes
#include <QDebug>
#include "toGray.hpp"
toGray::toGray(std::string const &vertex_file, std::string const &fragment_file, QWidget *parent)
:basicGLWidget(vertex_file, fragment_file, parent) //read shaders, compile them, allocate VBO
{
}
void toGray::initializeVertexBuffer()
{
std::vector<GLfloat> const vertex{
-1.0f, 1.0f, 0.0f, 1.0f,
1.0f, 1.0f, 0.0f, 1.0f,
-1.0f, -1.0f, 0.0f, 1.0f,
1.0f, 1.0f, 0.0f, 1.0f,
1.0f, -1.0f, 0.0f, 1.0f,
-1.0f, -1.0f, 0.0f, 1.0f,
};
initializeVertexBufferImpl(vertex); //copy the data into QOpenGLBuffer
QImage img(":/simpleGPGPU/images/emili.jpg");
texture_addr_ = bindTexture(img);
resize(img.width(), img.height());
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
}
void toGray::paintGL()
{
qglClearColor(Qt::white);
glClear(GL_COLOR_BUFFER_BIT);
program_.bind();
bind_buffer();
program_.enableAttributeArray("qt_Vertex");
program_.setAttributeBuffer( "qt_Vertex", GL_FLOAT, 0, 4);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, texture_addr_);
glDrawArrays(GL_TRIANGLES, 0, get_buffer(0).size());
program_.disableAttributeArray("qt_Vertex");
program_.release();
glActiveTexture(0);
release_buffer();
}
vertex shader
attribute highp vec4 qt_Vertex;
varying highp vec2 qt_TexCoord0;
void main(void)
{
gl_Position = qt_Vertex;
qt_TexCoord0 = (qt_Vertex.xy + vec2(1.0)) * 0.5;
}
fragment shader
uniform sampler2D source;
varying highp vec2 qt_TexCoord0;
vec3 toGray(vec3 rgb)
{
return vec3(rgb.r * 0.299 + rgb.g * 0.587 + rgb.b * 0.114);
}
void main(void)
{
vec3 gray = toGray(texture2D(source, qt_TexCoord0).rgb);
gl_FragColor = vec4(gray, 0.0);
}
Edit: use QWindow FBO to do offscreen render, but the result is a blank image
the codes can compile, but the QImage return by the QOpenGLFrameBufferObject is empty.
.hpp
#ifndef OFFSCREENEXP_HPP
#define OFFSCREENEXP_HPP
#include <QOpenGLFunctions>
#include <QWindow>
class QImage;
class QOpenGLContext;
class offScreenExp : public QWindow, protected QOpenGLFunctions
{
public:
explicit offScreenExp(QWindow *parent = 0);
QImage render();
private:
QOpenGLContext *context_;
};
#endif // OFFSCREENEXP_HPP
.cpp
#include <QImage>
#include <QOpenGLBuffer>
#include <QOpenGLContext>
#include <QOpenGLFramebufferObject>
#include <QOpenGLShaderProgram>
#include <QString>
#include <QWidget>
#include <QDebug>
#include "offScreenExp.hpp"
offScreenExp::offScreenExp(QWindow *parent) :
QWindow(parent),
context_(nullptr)
{
setSurfaceType(QWindow::OpenGLSurface);
setFormat(QSurfaceFormat());
create();
}
QImage offScreenExp::render()
{
//create the context
if (!context_) {
context_ = new QOpenGLContext(this);
QSurfaceFormat format;
context_->setFormat(format);
if (!context_->create())
qFatal("Cannot create the requested OpenGL context!");
}
context_->makeCurrent(this);
initializeOpenGLFunctions();
//load image and create fbo
QString const prefix("/Users/Qt/program/experiment_apps_and_libs/openGLTest/simpleGPGPU/");
QImage img(prefix + "images/emili.jpg");
if(img.isNull()){
qFatal("image is null");
}
QOpenGLFramebufferObject fbo(img.size());
qDebug()<<"bind success? : "<<fbo.bind();
//if(glCheckFramebufferStatus(fbo.handle()) != GL_FRAMEBUFFER_COMPLETE){
// qDebug()<<"frame buffer error";
//}
qDebug()<<"has opengl fbo : "<<QOpenGLFramebufferObject::hasOpenGLFramebufferObjects();
//use two triangles two cover whole screen
std::vector<GLfloat> const vertex{
-1.0f, 1.0f, 0.0f, 1.0f,
1.0f, 1.0f, 0.0f, 1.0f,
-1.0f, -1.0f, 0.0f, 1.0f,
1.0f, 1.0f, 0.0f, 1.0f,
1.0f, -1.0f, 0.0f, 1.0f,
-1.0f, -1.0f, 0.0f, 1.0f,
};
//initialize vbo
QOpenGLBuffer buffer(QOpenGLBuffer::VertexBuffer);
buffer.create();
buffer.setUsagePattern(QOpenGLBuffer::StaticDraw);
buffer.bind();
buffer.allocate(&vertex[0], vertex.size() * sizeof(GLfloat) );
buffer.release();
//create texture
GLuint rendered_texture;
glGenTextures(1, &rendered_texture);
// "Bind" the newly created texture : all future texture functions will modify this texture
glBindTexture(GL_TEXTURE_2D, rendered_texture);
//naive solution, better encapsulate the format in a function
if(img.format() == QImage::Format_Indexed8){
glTexImage2D(GL_TEXTURE_2D, 0, GL_RED, img.width(), img.height(), 0, GL_RED, GL_UNSIGNED_BYTE, img.scanLine(0));
}else if(img.format() == QImage::Format_RGB888){
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, img.width(), img.height(), 0, GL_RGB, GL_UNSIGNED_BYTE, img.scanLine(0));
}else{
QImage temp = img.convertToFormat(QImage::Format_RGB888);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, img.width(), img.height(), 0, GL_RGB, GL_UNSIGNED_BYTE, temp.scanLine(0));
}
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glBindTexture(GL_TEXTURE_2D, 0);
//compile and link program
QOpenGLShaderProgram program;
if(!program.addShaderFromSourceCode(QOpenGLShader::Vertex,
"attribute highp vec4 qt_Vertex;"
"varying highp vec2 qt_TexCoord0;"
"void main(void)"
"{"
" gl_Position = qt_Vertex;"
" qt_TexCoord0 = (qt_Vertex.xy + vec2(1.0)) * 0.5;"
"}")){
qDebug()<<"QOpenGLShader::Vertex error : " + program.log();
}
// Compile fragment shader
if (!program.addShaderFromSourceCode(QOpenGLShader::Fragment,
"uniform sampler2D source;"
"varying highp vec2 qt_TexCoord0;"
"vec3 toGray(vec3 rgb)"
"{"
" return vec3(rgb.r * 0.299 + rgb.g * 0.587 + rgb.b * 0.114);"
"}"
"void main(void)"
"{"
"vec3 gray = toGray(texture2D(source, qt_TexCoord0).rgb);"
"gl_FragColor = vec4(gray, 0.0);"
"}"
)){
qDebug()<<"QOpenGLShader::Fragment error : " + program.log();
}
// Link shader pipeline
if (!program.link()){
qDebug()<<"link error : " + program.log();
}
//render the texture as usual
//render the texture as usual
glClearColor(0, 0, 0, 1);
glClear(GL_COLOR_BUFFER_BIT);
glViewport(0, 0, img.width(), img.height());
program.bind();
buffer.bind();
program.enableAttributeArray("qt_Vertex");
program.setAttributeBuffer( "qt_Vertex", GL_FLOAT, 0, 4);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, rendered_texture);
//bind and create fbo
QOpenGLFramebufferObject fbo(img.size());
qDebug()<<"bind success? : "<<fbo.bind();
glDrawArrays(GL_TRIANGLES, 0, buffer.size());
QImage result = fbo.toImage();
program.disableAttributeArray("qt_Vertex");
program.release();
buffer.release();
fbo.release();
glActiveTexture(0);
glBindTexture(GL_TEXTURE_2D, 0);
context_->doneCurrent();
return result;
}
I try my best to simplify the codes, but it is still pretty verbose
For offscreen rendering, try rendering into a QOpenGLFramebufferObject, which can be converted into a QImage, which in turn can easily be saved to disk.
For that however, you still need a surface to render onto (as required by QOpenGLContext::makeCurrent()), so your only choice is indeed using a QWindow or a QGLWidget to get such a surface.
In Qt 5.1, there will be a new class called QOffscreenSurface, which will probably be most suitable for your usecase. You would use QOffscreenSurface to get a surface for your OpenGL context, and then render into a FBO using QOpenGLFramebufferObject, and then call QOpenGLFramebufferObject::toImage() to get access to the pixels.
I am trying to do a basic shadow map but for some reason, It doesn't render properly.
Video of the Problem
I render the house using a flat shader:
int shadowMapWidth = WINDOW_SIZE_X * (int)SHADOW_MAP_RATIO;
int shadowMapHeight = WINDOW_SIZE_Y * (int)SHADOW_MAP_RATIO;
// Rendering into the shadow texture.
glActiveTexture(GL_TEXTURE0);
CALL_GL(glBindTexture(GL_TEXTURE_2D, shadowTexture));
// Bind the framebuffer.
CALL_GL(glBindFramebuffer(GL_FRAMEBUFFER, shadowFBO));
//Clear it
CALL_GL(glClear(GL_DEPTH_BUFFER_BIT));
CALL_GL(glViewport(0, 0, shadowMapWidth, shadowMapHeight));
CALL_GL(glColorMask(GL_FALSE, GL_FALSE, GL_FALSE, GL_FALSE));
//Render stuff
flatShader.use();
flatShader["baseColor"] = glm::vec4(1.0f,1.0f,1.0f,1.0f);
flatShader["pvm"] = projectionMatrix*pointLight.viewMatrix*cursor.modelMatrix;
cursor.draw(); //binds the vao and draws
// Revert for the scene.
CALL_GL(glBindFramebuffer(GL_FRAMEBUFFER, 0));
CALL_GL(glColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE));
CALL_GL(glViewport(0, 0, WINDOW_SIZE_X, WINDOW_SIZE_Y));
Notice that I only render the house. I don't render the floor in the depth-buffer pass.
Following this I render the quad that represents the floor using the following shader pair:
/* [VERT] */
#version 330
in vec3 in_Position;
in vec2 in_TexCoord;
uniform mat4 shadowMatrix;
uniform mat4 mvp;
out vec2 UV;
out vec4 shadowProj;
void main()
{
gl_Position = mvp*vec4(in_Position,1.0);
shadowProj = shadowMatrix*vec4(in_Position,1.0);
UV = in_TexCoord;
}
And the Fragment Shader:
/* [FRAG] */
#version 330
in vec2 UV;
in vec4 shadowProj;
out vec4 fragColor;
uniform sampler2D texturex;
uniform sampler2DShadow shadowMap;
void main()
{
fragColor = vec4(texture(texturex, UV).rgb,1);
float shadow = 1.0;
shadow = textureProj(shadowMap,shadowProj);
fragColor *= shadow;
}
I then draw the house again in color and ... the floor:
textureShader.use();
glUniform1i(baseImageLoc, 0); //Texture unit 0 is for base images.
glUniform1i(shadowMapLoc, 1); //Texture unit 1 is for shadow maps.
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, floorTexture);
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_2D, shadowTexture);
textureShader["shadowMatrix"] = projectionMatrix*pointLight.viewMatrix*floorMatrix;
textureShader["mvp"] = projectionMatrix*viewMatrix*floorMatrix;
CALL_GL(glBindVertexArray(floorVAO));
CALL_GL(glDrawArrays(GL_TRIANGLES,0,18));
glfwSwapBuffers();
Has anybody seen this behavior before? Any idea what could be wrong? By the way, the light's coordinates place it directly on top of the house so the shadow should be directly below the house on the floor (but it ends up sideways).
For reference here is how I generate the shadow FBO:
int shadowMapWidth = WINDOW_SIZE_X * (int)SHADOW_MAP_RATIO;
int shadowMapHeight = WINDOW_SIZE_Y * (int)SHADOW_MAP_RATIO;
glGenTextures(1, &shadowTexture);
glBindTexture(GL_TEXTURE_2D, shadowTexture);
glTexImage2D(GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT, shadowMapWidth, shadowMapHeight, 0, GL_DEPTH_COMPONENT, GL_UNSIGNED_BYTE, 0);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_COMPARE_MODE, GL_COMPARE_REF_TO_TEXTURE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_COMPARE_FUNC, GL_LESS);
glTexImage2D(GL_TEXTURE_2D,0,GL_DEPTH_COMPONENT,shadowMapWidth,shadowMapHeight,0,GL_DEPTH_COMPONENT,GL_FLOAT,NULL);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_COMPARE_MODE,GL_COMPARE_R_TO_TEXTURE);
glBindTexture(GL_TEXTURE_2D, 0); //unbind the texture
glGenFramebuffers(1, &shadowFBO);
glBindFramebuffer(GL_FRAMEBUFFER, shadowFBO);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D, shadowTexture, 0);
glDrawBuffer(GL_NONE);
glReadBuffer(GL_NONE);
if (glCheckFramebufferStatus(GL_FRAMEBUFFER) != GL_FRAMEBUFFER_COMPLETE)
{ printf("GL_FRAMEBUFFER_COMPLETE error 0x%x", glCheckFramebufferStatus(GL_FRAMEBUFFER)); }
glClearDepth(1.0f); glEnable(GL_DEPTH_TEST);
// Needed when rendering the shadow map. This will avoid artifacts.
glPolygonOffset(1.0f, 0.0f); glBindFramebuffer(GL_FRAMEBUFFER, 0);
//to convert the texture coordinates to -1 ~ 1
GLfloat biasMatrixf[] = {
0.5f, 0.0f, 0.0f, 0.0f,
0.0f, 0.5f, 0.0f, 0.0f,
0.0f, 0.0f, 0.5f, 0.0f,
0.5f, 0.5f, 0.5f, 1.0f };
biasMatrix = glm::make_mat4(biasMatrixf);
It looks like you forgot to multiply your shadow matrix by the bias matrix.