Although simple for what it does OpenGL is still confusing to me, I am beginning to learn how it works.
I am looking for a minimal example of off-screen rendering to get me started.
My application shall take a bunch of triangles and information about how to position them relative to the camera and save the rendered result to an image file. No lighting, materials or post processing for now.
I watched tutorials about creating off-screen contexts, creating an FBO, render to texture etc. I don't mind using QT as it conveniently provides OpenGL tools, windows and QImage. From what I understand, in order to be able to do image processing on the rendered image you need to set up your render target to be a texture, then use shaders and finally read the texture to an array.
Trying to put things together never landed me to a good starting point. I either get stuck with setting dependencies, getting black screen or gaze at projects that do too many things aside for what I need.
Update 1:
got it sort of working.
#include <QtGui/QGuiApplication>
#include <QtGui/QSurfaceFormat>
#include <QtGui/QOffscreenSurface>
#include <QtGui/QOpenGLFunctions_4_3_Core>
#include <QtGui/QOpenGLFramebufferObject>
#include <QtGui/QOpenGLShaderProgram>
#include <QDebug>
#include <QImage>
#include <QOpenGLBuffer>
int main(int argc, char* argv[])
{
QGuiApplication a(argc, argv);
QSurfaceFormat surfaceFormat;
surfaceFormat.setMajorVersion(4);
surfaceFormat.setMinorVersion(3);
QOpenGLContext openGLContext;
openGLContext.setFormat(surfaceFormat);
openGLContext.create();
if(!openGLContext.isValid()) return -1;
QOffscreenSurface surface;
surface.setFormat(surfaceFormat);
surface.create();
if(!surface.isValid()) return -2;
openGLContext.makeCurrent(&surface);
QOpenGLFunctions_4_3_Core f;
if(!f.initializeOpenGLFunctions()) return -3;
qDebug() << QString::fromLatin1((const char*)f.glGetString(GL_VERSION));
QSize vpSize = QSize(100, 200);
qDebug("Hi");
QOpenGLFramebufferObjectFormat fboFormat;
fboFormat.setAttachment(QOpenGLFramebufferObject::CombinedDepthStencil);
QOpenGLFramebufferObject fbo(vpSize, fboFormat);
fbo.bind();
// //////////
static const float vertexPositions[] = {
-0.8f, -0.8f, 0.0f,
0.8f, -0.8f, 0.0f,
0.0f, 0.8f, 0.0f
};
static const float vertexColors[] = {
1.0f, 0.0f, 0.0f,
0.0f, 1.0f, 0.0f,
0.0f, 0.0f, 1.0f
};
QOpenGLBuffer vertexPositionBuffer(QOpenGLBuffer::VertexBuffer);
vertexPositionBuffer.create();
vertexPositionBuffer.setUsagePattern(QOpenGLBuffer::StaticDraw);
vertexPositionBuffer.bind();
vertexPositionBuffer.allocate(vertexPositions, 9 * sizeof(float));
QOpenGLBuffer vertexColorBuffer(QOpenGLBuffer::VertexBuffer);
vertexColorBuffer.create();
vertexColorBuffer.setUsagePattern(QOpenGLBuffer::StaticDraw);
vertexColorBuffer.bind();
vertexColorBuffer.allocate(vertexColors, 9 * sizeof(float));
QOpenGLShaderProgram program;
program.addShaderFromSourceCode(QOpenGLShader::Vertex,
"#version 330\n"
"in vec3 position;\n"
"in vec3 color;\n"
"out vec3 fragColor;\n"
"void main() {\n"
" fragColor = color;\n"
" gl_Position = vec4(position, 1.0);\n"
"}\n"
);
program.addShaderFromSourceCode(QOpenGLShader::Fragment,
"#version 330\n"
"in vec3 fragColor;\n"
"out vec4 color;\n"
"void main() {\n"
" color = vec4(fragColor, 1.0);\n"
"}\n"
);
program.link();
program.bind();
vertexPositionBuffer.bind();
program.enableAttributeArray("position");
program.setAttributeBuffer("position", GL_FLOAT, 0, 3);
vertexColorBuffer.bind();
program.enableAttributeArray("color");
program.setAttributeBuffer("color", GL_FLOAT, 0, 3);
f.glClearColor(0.3f, 0.0f, 0.7f, 1.0f);
f.glClear(GL_COLOR_BUFFER_BIT);
f.glDrawArrays(GL_TRIANGLES, 0, 3);
program.disableAttributeArray("position");
program.disableAttributeArray("color");
program.release();
// ///////////////
fbo.release();
qDebug("FBO released");
QImage im = fbo.toImage();
if (im.save("asd.png")){
qDebug("Image saved!!");
}
return 0;
}
The saved image has the same size as the FBO, the color corresponds to the one set in glClearColor but the triangle is not rendered. What am I missing?
You can use glReadPixels after rendering and swapping:
int* buffer = new int[ WIDTH * HEIGHT * 3 ];
...
glReadPixels( 0, 0, WIDTH, HEIGHT, GL_BGR, GL_UNSIGNED_BYTE, buffer );
Then you can write the buffer into a .tga file:
FILE *out = fopen(tga_file, "w");
short TGAhead[] = {0, 2, 0, 0, 0, 0, WIDTH, HEIGHT, 24};
fwrite(&TGAhead, sizeof(TGAhead), 1, out);
fwrite(buffer, 3 * WIDTH * HEIGHT, 1, out);
fclose(out);
A little late.
As a FBO is being used the following code added just before the fbo->release() will save the image as a PNG.
QImage fb = fbo->toImage().convertToFormat(QImage::Format_RGB32);
fb.save("/path_to_image_file/image_file.png", "PNG");
If you want to make sure all the rendering is complete then you could do the following before:
QOpenGLContext::currentContext()->functions()->glFlush();
You might also want to add the following to the fbo format:
fboFormat.setTextureTarget(GL_TEXTURE_2D);
Related
I'm using Qt (5.6.0) to make a dungeon crawler/rogue-like with the interface done in Qt and the gameplay scene (3D map, character, monsters..) in OpenGL. The view is a promoted QWidget and I am able to draw using the old glBegin/glEnd method but whenever I try using glDrawArrays or glDrawElements, I get a blank screen.
The clear color is working and is set to be slightly lighter than black, so a black shape should show up. I am using the glBegin/glEnd method for testing with the same vertices and it does render as it should. I have tried a more or less straight OpenGL approach as shown by Jamie King, through several more examples and tutorials, finally ending on this example for using the QOpenGLShaderProgram and QOpenGLVertexArrayObject objects along with the example in the QOpenGLShaderProgram Qt doc. Currently the shader code in the initializeGL function is preventing the glBegin/glEnd triangle from being drawn.
Current Code:
oglwidget.cpp:
#include "GL/glew.h"
#include "GL/glext.h"
#include "oglwidget.h"
#include "general.h"
#define BUFFER_OFFSET(bytes) ((GLubyte*) NULL + (bytes))
extern const char * vertexShader;
extern const char * fragmentShader;
OGLWidget::OGLWidget(QWidget *parent): QOpenGLWidget(parent){
}
OGLWidget::~OGLWidget(){
}
void OGLWidget::initializeGL(){
QOpenGLFunctions gl;
gl.initializeOpenGLFunctions();
cout<<"Init"<<endl;
//-----Create Shader
shader2.create();
shader2.addShaderFromSourceCode(QOpenGLShader::Vertex,vertexShader);
shader2.addShaderFromSourceCode(QOpenGLShader::Fragment,fragmentShader);
shader2.link();
shader2.bind();
int vertexLocation = shader2.attributeLocation("vertex");
int matrixLocation = shader2.uniformLocation("matrix");
int colorLocation = shader2.uniformLocation("color");
QMatrix4x4 pmvMatrix;
pmvMatrix.ortho(rect());
QColor color(0, 255, 0, 255);
shader2.enableAttributeArray(vertexLocation);
shader2.setAttributeArray(vertexLocation, verts, 3);
shader2.setUniformValue(matrixLocation, pmvMatrix);
shader2.setUniformValue(colorLocation, color);
//-----Create VAO2
VAO2=new QOpenGLVertexArrayObject(this);
VAO2->create();
VAO2->bind();
//-----Create VBO2
VBO2.create();
VBO2.setUsagePattern(QOpenGLBuffer::StaticDraw);
VBO2.bind();
VBO2.allocate(verts,sizeof(verts));
}
void OGLWidget::paintGL(){
cout<<"Painting"<<endl;
QOpenGLFunctions gl;
gl.initializeOpenGLFunctions();
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glClearColor(0.05,0.05,0.05,1);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glTranslatef(0.0f,0.0f,-2.0f);
//draw();
shader2.bind();
VAO2->bind();
glDrawElements(GL_TRIANGLES,3,GL_UNSIGNED_INT,inds);
VAO2->release();
}
void OGLWidget::resizeGL(int w, int h){
cout<<"Resizing"<<endl;
glViewport(0,0,w,h);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
//glFrustum (-1.0, 1.0, -1.0, 1.0, 1, 1000.0); //-----Assuming this is right?
glOrtho(-5,5,-5,5,-5,5);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
}
int OGLWidget::loadModel(string path){
//-----loads the model path if not already loaded, returns the index of the model
//---check if already loaded
for(int p=0;p<loadedPaths.size();p++){
if(!loadedPaths[p].compare(path)){
return p;
}
}
loadedPaths.push_back(path);
//-----continue with path loading
Model m;
m.loadModel(path);
return loadedPaths.size()-1;
}
void OGLWidget::draw(){
cout<<"drawing..."<<endl;
glBegin(GL_TRIANGLES);
for(int i=0;i<sizeof(verts)/sizeof(GLfloat);i+=3){
//cout<<i<<endl;
glVertex3f(verts[i],verts[i+1],verts[i+2]);
}
glEnd();
}
oglwidget.h:
#ifndef OGLWIDGET_H
#define OGLWIDGET_H
#include <QWidget>
#include <QOpenGLWidget>
#include <glm/glm.hpp>
#include <QOpenGLFunctions>
#include <vector>
#include "entity.h"
#include "general.h"
#include <qopenglfunctions_3_3_core.h>
//#include <GL/glu.h>
//#include <GL/gl.h>
#define VERTEXATTR 0
#define INDEXATTR 1
#define POSITIONATTR 2
#define ROTATIONATTR 3
using namespace std;
class OGLWidget : public QOpenGLWidget
{
//QOpenGLFunctions gl;
//QOpenGLFunctions_3_3_Core core;
QOpenGLVertexArrayObject *VAO2;
QOpenGLBuffer VBO2;
QOpenGLShaderProgram shader2;
QOpenGLContext *m_context;
vector<GLuint>statics; //-----buffer number for static models
vector<GLuint>dynamics; //-----buffer number of dynamic models
vector<GLfloat[1]>staticVerts; //-----vertex data for static models
vector<GLfloat[1]>dynamicVerts; //-----vertex data for dynamic models
vector<vector<GLfloat>*>staticPos; //-----position data for static models
vector<vector<GLfloat>*>dynamicPos; //-----position data for dynamic models
vector<GLfloat>staticRot; //-----rotation data for static models
vector<GLfloat>dynamicRot; //-----rotation data for dynamic models
vector<string>loadedPaths; //-----name in folder of matching VBO
GLuint VBO;
GLuint IND;
GLuint FragID;
GLuint VertID;
GLuint shader;
//-----Testing
GLfloat verts[9]={-1.0f, 0.0f, 0.0f,
0.0f, 1.0f, 0.0f,
1.0f, 0.0f, 0.0f};
GLuint inds[3]={0,1,2};
public:
OGLWidget(QWidget *parent = 0);
~OGLWidget();
int loadModel(string path);
private:
void draw();
QGLShaderProgram m_shader;
QGLBuffer m_vertexBuffer;
protected:
void initializeGL();
void resizeGL(int w, int h);
void paintGL();
};
#endif // OGLWIDGET_H
shader.cpp:
const char * vertexShader=
"attribute highp vec4 vertex;\n"
"uniform highp mat4 matrix;\n"
"void main(void)\n"
"{\n"
" gl_Position = matrix * vertex;\n"
"}";
const char * fragmentShader=
"uniform mediump vec4 color;\n"
"void main(void)\n"
"{\n"
" gl_FragColor = color;\n"
"}";
From what I've read (correct me if I'm wrong) the objective it to load the vertex, color, texture and other data to the GPU memory using a VAO, rebind the VAO before drawing, draw using glDrawArrays or glDrawElements and release the VAO. Using the indexed version of the function will allow for changes in position, scale and rotation, meaning faster rendering for larger quantities of the same object (ie. the game tiles) and GL_STATIC_DRAW should be used for objects that are not updated frequently, with GL_DYNAMIC_DRAW for everything else.
I'm wanting to know what I'm missing or doing wrong with what really should be a simple exercise. I've redone this at least 5 times over and am at a complete loss.
OS: Debian Testing
GPU: GeForce 610m
OpenGL Core: 3.3
Qt: 5.6
Software: Qt Creator
Solution thanks to #PeterT in the comments: move the shader enableAttributeArray and setAttributeBuffer (changed from setAttributeArray)to after the VBO allocation and modify the vertex coordinates and ortho to be 'larger'.
void OGLWidget::initializeGL()
{
QOpenGLFunctions gl;
gl.initializeOpenGLFunctions();
glDisable(GL_CULL_FACE);
cout<<"Init"<<endl;
shader2.create();
shader2.addShaderFromSourceCode(QOpenGLShader::Vertex,vertexShader);
shader2.addShaderFromSourceCode(QOpenGLShader::Fragment,fragmentShader);
shader2.link();
shader2.bind();
int vertexLocation = shader2.attributeLocation("vertex");
int matrixLocation = shader2.uniformLocation("matrix");
int colorLocation = shader2.uniformLocation("color");
QMatrix4x4 pmvMatrix;
//pmvMatrix.ortho(rect());
pmvMatrix.ortho(-100,100,-100,100,-1000,1000);
QColor color(0, 255, 0, 255);
shader2.enableAttributeArray(vertexLocation);
shader2.setUniformValue(matrixLocation, pmvMatrix);
shader2.setUniformValue(colorLocation, color);
//-----Create VAO2
VAO2=new QOpenGLVertexArrayObject(this);
VAO2->create();
VAO2->bind();
VBO2.create();
VBO2.setUsagePattern(QOpenGLBuffer::StaticDraw);
VBO2.bind();
VBO2.allocate(verts,sizeof(verts));
shader2.enableAttributeArray("vertex");
shader2.setAttributeBuffer("vertex",GL_FLOAT,0,3,0);
}
*
GLfloat verts[9]={-100.0f, 0.0f, 30.0f,
0.0f, 100.0f, 50.0f,
100.0f, 0.0f, 30.0f};
I've been trying to get a handle on using surface references in CUDA for OpenGL interop. I been able to use a surface object without problems to do exactly this, but I want to support older hardware (compute < 2.5) that does not support surface objects.
Please note: I had a similar question yesterday, and it was suggested that I write a simple proof-of-problem. In doing that I've run into a slightly different problem, so I'm asking a new question. In this case, in the code listed below, when I call cudaBindSurfaceToArray I get the error
CUDA error at test.cu:75 code=37(cudaErrorInvalidSurface) "cudaBindSurfaceToArray(outputSurface, textureArray)"
Unfortunately this "minimum" sample code is pretty long because of the GL interop. It has no external dependencies, only the CUDA development files, and the CUDA samples. It compiles in Linux with the command
nvcc -m64 -ccbin g++ -gencode arch=compute_20,code=sm_20 -I<samples_path>/NVIDIA_CUDA-7.0_Samples/common/inc -lGL -lglut test.cu -o test
This code is very similar to the CUDA example simpleSurfaceWrite. I don't understand how the surface reference in my code is any different than the surface reference in that example. For the record, I am able to successfully compile and run simpleSurfaceWrite. Please note that this uses this definition of cudaBindSurfaceToArray from the CUDA C++ API, just like simpleSurfaceWrite. I am getting a runtime error, not a compile error.
Here's the code:
#define GL_GLEXT_PROTOTYPES
#include <GL/freeglut.h>
#include <cuda_runtime.h>
#include <cuda_gl_interop.h>
#include <helper_functions.h>
#include <helper_cuda.h>
#include <helper_cuda_gl.h>
GLuint texID;
GLuint bufID;
GLenum passthroughVertexShaderID;
GLenum simpleFragmentShaderID;
GLenum simpleProgramID;
GLint fragmentShaderTextureID;
cudaGraphicsResource *texResource;
const int width = 640;
const int height = 480;
const int blockSize = 8;
// Define a CUDA surface reference
surface<void, cudaSurfaceType2D> outputSurface;
// Define some simple GL shaders
const GLchar *passthroughVertexSource =
"#version 130"
"in vec3 positionAttrib;"
"in vec2 textureAttrib;"
"out vec2 texCoord;"
"void main()"
"{"
" gl_Position = vec4(positionAttrib,1.0f);"
" texoord = textureAttrib;"
"}";
const GLchar *simpleFragmentSource =
"#version 130"
"uniform sampler2DRect tex;"
"in vec2 texCoord;"
"out vec4 fragColor;"
"void main()"
"{"
" fragColor = texture2DRect(tex, texCoord);"
"}";
__global__ void cudaTestKernel() {
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
float4 sample = make_float4(0.0f, 1.0f, 0.0f, 1.0f);
surf2Dwrite(sample, outputSurface, (int)sizeof(float4)*x, y, cudaBoundaryModeClamp);
}
void displayFunc() {
// Clear the screen (if nothing else is drawn, screen will be blue)
glClear(GL_COLOR_BUFFER_BIT);
// Make the OpenGL texture available to CUDA through the `outputSurface' surface reference
checkCudaErrors(cudaGraphicsMapResources(1, &texResource, 0));
cudaArray *textureArray;
checkCudaErrors(cudaGraphicsSubResourceGetMappedArray(&textureArray, texResource, 0, 0));
// Bind the array to the surface. This is where I'm getting an error
checkCudaErrors(cudaBindSurfaceToArray(outputSurface, textureArray));
// Call the CUDA kernel
dim3 grid = dim3(blockSize,blockSize,1);
dim3 block = dim3(width/blockSize, height/blockSize, 1);
cudaTestKernel<<<grid,block>>>();
checkCudaErrors(cudaGraphicsUnmapResources(1, &texResource, 0));
// Call the OpenGL shaders to draw the texture
// If the CUDA kernel was successful, the screen will be green
// If not, it will be gray.
glActiveTexture(GL_TEXTURE0);
glUseProgram(simpleProgramID);
glUniform1i(fragmentShaderTextureID, 0);
glBindTexture(GL_TEXTURE_RECTANGLE_NV, texID);
glBindBuffer(GL_ARRAY_BUFFER, bufID);
glDrawArrays(GL_QUADS, 0, 4);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glUseProgram(0);
glutSwapBuffers();
}
int main(int argc, char **argv) {
float *floatBuf;
// Initialize GLUT/GL
glutInit (&argc, argv);
glutInitWindowSize(width,height);
glutCreateWindow("Surface Test");
glutDisplayFunc(displayFunc);
// Initialize CUDA
findCudaGLDevice(argc, (const char**)argv);
// Create Texture
floatBuf = new float[width*height*4];
for(unsigned int i=0;i<width*height*4;i++)
floatBuf[i]=0.5f;
glGenTextures(1, &texID);
glEnable(GL_TEXTURE_RECTANGLE_NV);
glBindTexture(GL_TEXTURE_RECTANGLE_NV, texID);
glTexImage2D(GL_TEXTURE_RECTANGLE_NV, 0, GL_RGBA32F_ARB, width, height, 0, GL_RGBA, GL_FLOAT, floatBuf);
delete [] floatBuf;
// Map Texture into CUDA
checkCudaErrors(cudaGraphicsGLRegisterImage(&texResource, texID, GL_TEXTURE_RECTANGLE_NV, cudaGraphicsRegisterFlagsSurfaceLoadStore));
// Create shader program
simpleProgramID = glCreateProgram();
simpleFragmentShaderID = glCreateShader(GL_FRAGMENT_SHADER);
passthroughVertexShaderID = glCreateShader(GL_VERTEX_SHADER);
glShaderSource(simpleFragmentShaderID, 1, &simpleFragmentSource, NULL);
glShaderSource(passthroughVertexShaderID, 1, &passthroughVertexSource, NULL);
glCompileShader(simpleFragmentShaderID);
glCompileShader(passthroughVertexShaderID);
glAttachShader(simpleProgramID, simpleFragmentShaderID);
glAttachShader(simpleProgramID, passthroughVertexShaderID);
glBindAttribLocation(simpleProgramID, 0, "positionAttrib");
glBindAttribLocation(simpleProgramID, 1, "textureAttrib");
glLinkProgram(simpleProgramID);
fragmentShaderTextureID = glGetUniformLocation(simpleProgramID, "tex");
// Create Vertex Array Buffer for rendering texture on screen
#define BUFFER_OFFSET(i) ((char *)NULL + (i))
float v[] = {-1.0f, -1.0f, 0.0f,
1.0f, -1.0f, 0.0f,
1.0f, 1.0f, 0.0f,
-1.0f, 1.0f, 0.0f,
0.0f, 0.0f,
width, 0.0f,
width, height,
0.0f, height
};
glGenBuffers(1, &bufID);
glBindBuffer(GL_ARRAY_BUFFER, bufID);
glBufferData(GL_ARRAY_BUFFER, sizeof(float)*3*4+sizeof(float)*2*4, v, GL_STATIC_DRAW);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, 0);
glEnableVertexAttribArray(1);
glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, 0, BUFFER_OFFSET(sizeof(float)*3*4));
glClearColor(0.f,0.f,1.f,1.f);
glutMainLoop();
}
I'm trying to write a simple fragment shader, which should to mix 2 or more textures. I've written the test project on Qt 5.4, but for some reason it can't operate any textures which had bound to non zero unite.
it ignore any values in
setUniformValue("tex*", *); (str. 83-90)
and any sampler2d always operates only texture which had bound to 0 unite.
whats wrong?
Source of test project on Qt 5.4 available at bitbucket
#include <QApplication>
#include <QCoreApplication>
#include <QOffscreenSurface>
#include <QOpenGLContext>
#include <QOpenGLFunctions>
#include <QOpenGLFramebufferObject>
#include <QOpenGLShader>
#include <QOpenGLTexture>
#include <QLabel>
int main(int argc, char *argv[])
{
QApplication a(argc, argv);
QSurfaceFormat format;
format.setMinorVersion( 2 );
format.setMajorVersion( 4 );
format.setProfile( QSurfaceFormat::CompatibilityProfile );
// format.setProfile( QSurfaceFormat::CoreProfile );
QOpenGLContext context;
context.setFormat(format);
if(!context.create()){
qFatal("Cannot create the requested OpenGL context!");
}
QOffscreenSurface surface;
surface.setFormat( format );
surface.create();
context.makeCurrent( &surface );
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(0.0, 1.0, 0.0, 1.0, 0.0, 1.0);
glClearColor(0.0f, 0.0f, 0.0f, 0.0f);
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
const float c_01SquareVertices[8] ={0.0f, 0.0f,
1.0f, 0.0f,
1.0f, 1.0f,
0.0f, 1.0f};
glVertexPointer(2, GL_FLOAT, 0, c_01SquareVertices);
glTexCoordPointer(2, GL_FLOAT, 0, c_01SquareVertices);
glDisable(GL_DEPTH_TEST);
glDisable(GL_TEXTURE_2D);
int maxTextureUnits;
glGetIntegerv ( GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS, &maxTextureUnits );
qDebug()<<"GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS" << maxTextureUnits;
QImage texImg = QImage(":/tex/tex");
QOpenGLTexture tex(texImg.mirrored());
QImage texImg1 = QImage(":/tex/tex1");
QOpenGLTexture tex1(texImg1.mirrored());
QImage texImg2 = QImage(":/tex/tex2");
QOpenGLTexture tex2(texImg2.mirrored());
QString fsc =
"uniform sampler2D tex;"
"uniform sampler2D tex1;"
"uniform sampler2D tex2;"
"varying vec4 gl_TexCoord[];"
"void main(void)"
"{"
" gl_FragColor = texture2D(tex2, gl_TexCoord[0].yx * 2.0);"
// " gl_FragColor = texture2D(tex1, gl_TexCoord[0].xy) + texture2D(tex2, gl_TexCoord[0].xy);"
"}";
QOpenGLShader fsh( QOpenGLShader::Fragment, &context );
fsh.compileSourceCode( fsc );
QOpenGLShaderProgram pr( &context );
pr.addShader( &fsh );
pr.link();
QOpenGLFramebufferObjectFormat fboFormat;
// fboFormat.setInternalTextureFormat(GL_ALPHA32F);
QOpenGLFramebufferObject fbo( 1000, 1000, fboFormat );
fbo.bind();
glViewport(0,0,fbo.width(),fbo.height());
glClear(GL_COLOR_BUFFER_BIT);
tex.bind(0);
pr.setUniformValue("tex", GLuint(1));
tex1.bind(2);
pr.setUniformValue("tex1", GLuint(2));
tex2.bind(3);
pr.setUniformValue("tex2", GLuint(3));
pr.bind();
glDrawArrays(GL_TRIANGLE_FAN, 0, 4);
fbo.release();
QLabel w;
w.resize(fbo.size());
w.setPixmap(QPixmap::fromImage(fbo.toImage()));
w.show();
return a.exec();
}
In the C API for OpenGL, in order to use or modify an object, you must first bind it*.
Apparently, pr.setUniformValue does not bind pr before calling glUniform to change the uniform value. While a bit inconvenient and unintuitive, it's understandable; redundantly binding the same shader over and over has a performance overhead.
So just move pr.bind() to above where you call pr.setUniformValue.
*The EXT_direct_state_access extension allows you to modify objects without binding them, but requires a different API and is not guaranteed to appear in OpenGL versions before 4.5 (the latest).
OS : mac osx 10.8.3
compiler : clang3.2
I am a beginner of opengl, trying to play opengl with Qt5
There are two problems about this simple program(plot a triangle)
I can't see the triangle
The program could not exit even I close the window
hpp
#include <QGLWidget>
#include <QtGui/QOpenGLFunctions>
#include <QtGui/QOpenGLShaderProgram>
class QWidget;
class ch1HelloTriangle : public QGLWidget, protected QOpenGLFunctions
{
Q_OBJECT
public:
explicit ch1HelloTriangle(QWidget *parent = 0);
protected:
virtual void initializeGL();
void initShaders();
void InitializeVertexBuffer();
virtual void resizeGL(int w, int h);
virtual void paintGL();
private:
QOpenGLShaderProgram program;
GLuint positionBufferObject;
};
.cpp
#include <locale.h>
#include <QWidget>
#include "ch1HelloTriangle.hpp"
namespace
{
float const vertexPositions[] = {
0.75f, 0.75f, 0.0f, 1.0f,
0.75f, -0.75f, 0.0f, 1.0f,
-0.75f, -0.75f, 0.0f, 1.0f,
};
}
ch1HelloTriangle::ch1HelloTriangle(QWidget *parent) :
QGLWidget(parent)
{
}
void ch1HelloTriangle::initializeGL()
{
initializeOpenGLFunctions();
InitializeVertexBuffer();
initShaders();
}
void ch1HelloTriangle::initShaders()
{
// Override system locale until shaders are compiled
setlocale(LC_NUMERIC, "C");
// Compile vertex shader
if (!program.addShaderFromSourceCode(QOpenGLShader::Vertex,
"attribute vec4 position;\n"
"void main()\n"
"{\n"
" gl_Position = position;\n"
"}\n"))
{
close();
}
// Compile fragment shader
if (!program.addShaderFromSourceCode(QOpenGLShader::Fragment,
"out vec4 outputColor;\n"
"void main()\n"
"{\n"
" outputColor = vec4(1.0f, 1.0f, 1.0f, 1.0f);\n"
"}\n"))
{
close();
}
// Link shader pipeline
if (!program.link())
close();
// Bind shader pipeline for use
if (!program.bind())
close();
// Restore system locale
setlocale(LC_ALL, "");
}
void ch1HelloTriangle::InitializeVertexBuffer()
{
glGenBuffers(1, &positionBufferObject);
glBindBuffer(GL_ARRAY_BUFFER, positionBufferObject);
glBufferData(GL_ARRAY_BUFFER, sizeof(vertexPositions), vertexPositions, GL_STATIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
}
void ch1HelloTriangle::resizeGL(int w, int h)
{
// Set OpenGL viewport to cover whole widget
glViewport(0, 0, w, h);
}
void ch1HelloTriangle::paintGL()
{
/*
//codes propose by http://stackoverflow.com/questions/13111291/displaying-a-triangle-with-qt-and-opengl?rq=1, can't see the triangle either
QSize viewport_size = size();
glViewport(0, 0, viewport_size.width(), viewport_size.height());
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glFrustum(-1, 1, -1, 1, 5, 7); // near and far match your triangle Z distance
glMatrixMode(GL_MODELVIEW);*/
glClearColor(0.0f, 0.0f, 0.0f, 0.0f);
glClear(GL_COLOR_BUFFER_BIT);
glBindBuffer(GL_ARRAY_BUFFER, positionBufferObject);
int vertexLocation = program.attributeLocation("position");
program.enableAttributeArray(vertexLocation);
glVertexAttribPointer(vertexLocation, 4, GL_FLOAT, GL_FALSE, 0, 0);
glDrawArrays(GL_TRIANGLES, 0, 3);
}
main.cpp
#include <QApplication>
#include "ch1HelloTriangle.hpp"
int main(int argc, char *argv[])
{
QApplication a(argc, argv);
ch1HelloTriangle ch1;
ch1.show();
return a.exec();
}
After a lot of trial and error, I solved the problem.
Change two things :
1 : read the shader by files
// Compile vertex shader
if (!program.addShaderFromSourceFile(QOpenGLShader::Vertex, "modernOpenGLShader/ch1/vertex")){
QMessageBox::warning(this, "QOpenGLShader::Vertex", "QOpenGLShader::Vertex" + program.log());
close();
}
// Compile fragment shader
if (!program.addShaderFromSourceFile(QOpenGLShader::Fragment, "modernOpenGLShader/ch1/frag")){
QMessageBox::warning(this, "QOpenGLShader::Fragment", "QOpenGLShader::Fragment" + program.log());
close();
}
2 : change the fragment shader, remove the output variable
void main() {
//set every drawn pixel to white
gl_FragColor = vec4(1.0, 1.0, 1.0, 1.0);
}
From the websites, "modern opengl":http://www.arcsynthesis.org/gltut/
and "another site":tomdalling.com/blog/modern-opengl/01-getting-started-in-xcode-and-visual-cpp/
The output qualify should exist(atleast the codes of the second website will work on my pc)
But Qt will throw error message
QOpenGLShader::FragmentERROR: 0:4: Invalid qualifiers 'out' in global variable context
Do anyone know why?Thanks
I want to play YUV video sequence by using Qt. Now I am using QPixmap, by using DrawPixel on QPixmap pixel by pixel. However, it can't play the video in real-time. How can I do to improve the speed?
Did you try using the Phonon classes like VideoPlayer?
Take a look at this:
http://doc.qt.io/archives/4.6/phonon-videoplayer.html
Pixel by pixel is about the slowest method to create a picture. It would improve performance a lot if you processed the image data before and used QPixmap's loadFromData() method.
Well, DrawPixel is definetily the worst perfomance solution.
QOpenGLWiget nowadays (Qt 5) could be used for rendering video frames to a texture.
Actually, depending on the video pixel format, it could be either simple texture rendering or a pixel format conversion via shaders with further texture drawing.
The question is old, so I'll leave a sketchy solution just because it took me some time to get to it myself once. So, the simpliest (not best, because lots of optimizations are possible) solution is:
OpenGLDisplayRGB.h
#pragma once
#include <QOpenGLWidget>
#include <QOpenGLFunctions>
#include <QScopedPointer>
#include <QException>
/*!
* \brief The OpenGLDisplay class
* Simple OpenGL display, that renders RGBA to texture
*/
class OpenGLDisplayRGB : public QOpenGLWidget, public QOpenGLFunctions
{
Q_OBJECT
public:
explicit OpenGLDisplayRGB(QWidget* parent = nullptr);
~OpenGLDisplayRGB() override;
protected:
void initializeGL() override;
void resizeGL(int w, int h) override;
void paintGL() override;
void closeEvent(QCloseEvent* e) override;
public:
void DisplayVideoFrame(unsigned char* data, int frameWidth, int frameHeight);
Q_SIGNAL void closed();
private:
struct OpenGLDisplayRGBImpl;
QScopedPointer<OpenGLDisplayRGBImpl> impl;
};
OpenGLDisplayRGB.cpp
#include "OpenGLDisplayRGB.h"
#include <QOpenGLShader>
#include <QOpenGLTexture>
#include <QCoreApplication>
#include <QResizeEvent>
#include <QTimer>
#include <QDebug>
#define ATTRIB_VERTEX 0
#define ATTRIB_TEXTURE 1
namespace
{
//Vertex matrix
static const GLfloat vertexVertices[] = {
-1.0f, -1.0f,
1.0f, -1.0f,
-1.0f, 1.0f,
1.0f, 1.0f,
};
//Texture matrix
static const GLfloat textureVertices[] = {
0.0f, 1.0f,
1.0f, 1.0f,
0.0f, 0.0f,
1.0f, 0.0f,
};
}
struct OpenGLDisplayRGB::OpenGLDisplayRGBImpl
{
OpenGLDisplayRGBImpl(QObject* ownerPtr)
: mBufRGB(nullptr)
//, mRepaintTimer(new QTimer(ownerPtr))
, mEnabled(true)
, mShaderProgram(new QOpenGLShaderProgram(ownerPtr))
, mTexture(new QOpenGLTexture(QOpenGLTexture::Target2D))
{ }
unsigned char* mBufRGB;
//QTimer* mRepaintTimer;
bool mEnabled;
QOpenGLShader* mVShader;
QOpenGLShader* mFShader;
QOpenGLShaderProgram* mShaderProgram;
QScopedPointer<QOpenGLTexture> mTexture;
int mTextureUniform;
GLsizei mVideoW, mVideoH;
};
/*************************************************************************/
OpenGLDisplayRGB::OpenGLDisplayRGB(QWidget* parent)
: QOpenGLWidget(parent)
, impl(new OpenGLDisplayRGBImpl(this))
{
setAttribute(Qt::WA_OpaquePaintEvent);
// setAttribute(Qt::WA_PaintOnScreen);
setAttribute(Qt::WA_NoSystemBackground);
/*
impl->mRepaintTimer->setInterval(50);
connect(impl->mRepaintTimer, SIGNAL(timeout()), this, SLOT(update()));
impl->mRepaintTimer->start();*/
}
OpenGLDisplayRGB::~OpenGLDisplayRGB()
{
makeCurrent();
}
void OpenGLDisplayRGB::DisplayVideoFrame(unsigned char *data, int frameWidth, int frameHeight)
{
impl->mVideoW = frameWidth;
impl->mVideoH = frameHeight;
impl->mBufRGB = data;
update();
}
void OpenGLDisplayRGB::initializeGL()
{
initializeOpenGLFunctions();
glEnable(GL_DEPTH_TEST);
/* Modern opengl rendering pipeline relies on shaders to handle incoming data.
* Shader: is a small function written in OpenGL Shading Language (GLSL).
* GLSL is the language that makes up all OpenGL shaders.
* The syntax of the specific GLSL language requires the reader to find relevant information. */
impl->mEnabled = impl->mShaderProgram->addShaderFromSourceFile(QOpenGLShader::Vertex, ":/OpenGL/simple_vertex_shader.v.glsl");
if(!impl->mEnabled)
qDebug() << QString("[Error] Vertex shader failed: %1").arg(impl->mShaderProgram->log());
impl->mShaderProgram->addShaderFromSourceFile(QOpenGLShader::Fragment, ":/OpenGL/simple_texture_shader.f.glsl");
if(!impl->mEnabled)
qDebug() << QString("[Error] Fragment shader failed: %1").arg(impl->mShaderProgram->log());
// Bind the property vertexIn to the specified location ATTRIB_VERTEX, this property
// has a declaration in the vertex shader source
impl->mShaderProgram->bindAttributeLocation("vertexIn", ATTRIB_VERTEX);
// Bind the attribute textureIn to the specified location ATTRIB_TEXTURE, the attribute
// has a declaration in the vertex shader source
impl->mShaderProgram->bindAttributeLocation("textureIn", ATTRIB_TEXTURE);
//Link all the shader programs added to
impl->mShaderProgram->link();
//activate all links
impl->mShaderProgram->bind();
// Read the position of the data variable tex_rgb in the shader, the declaration
// of these variables can be seen in
// fragment shader source
impl->mTextureUniform = impl->mShaderProgram->uniformLocation("uSampler");
// Set the value of the vertex matrix of the attribute ATTRIB_VERTEX and format
glVertexAttribPointer(ATTRIB_VERTEX, 2, GL_FLOAT, 0, 0, vertexVertices);
// Set the texture matrix value and format of the attribute ATTRIB_TEXTURE
glVertexAttribPointer(ATTRIB_TEXTURE, 2, GL_FLOAT, 0, 0, textureVertices);
// Enable the ATTRIB_VERTEX attribute data, the default is off
glEnableVertexAttribArray(ATTRIB_VERTEX);
// Enable the ATTRIB_TEXTURE attribute data, the default is off
glEnableVertexAttribArray(ATTRIB_TEXTURE);
impl->mTexture->create();
impl->mTexture->setMinMagFilters(QOpenGLTexture::Linear, QOpenGLTexture::Linear);
impl->mTexture->setWrapMode(QOpenGLTexture::ClampToEdge);
glClearColor (1.0f, 0.0f, 1.0f, 1.0f); // set the background color
}
void OpenGLDisplayRGB::resizeGL(int w, int h)
{
if(h == 0)// prevents being divided by zero
h = 1;// set the height to 1
// Set the viewport
glViewport(0, 0, w, h);
}
void OpenGLDisplayRGB::paintGL()
{
if (!impl->mEnabled || !impl->mBufRGB)
return; //RET
// Load y data texture
// Activate the texture unit GL_TEXTURE0
glActiveTexture(GL_TEXTURE0);
// Use the texture generated from y to generate texture
glBindTexture(GL_TEXTURE_2D, impl->mTexture->textureId());
// Use the memory mBufYuv data to create a real y data texture
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, impl->mVideoW, impl->mVideoH, 0, GL_RGBA, GL_UNSIGNED_BYTE, impl->mBufRGB);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
// Specify y texture to use the new value can only use 0, 1, 2, etc. to represent
// the index of the texture unit, this is the place where opengl is not humanized
//0 corresponds to the texture unit GL_TEXTURE0 1 corresponds to the
// texture unit GL_TEXTURE1 2 corresponds to the texture unit GL_TEXTURE2
glUniform1i(impl->mTextureUniform, 0);
// Use the vertex array way to draw graphics
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
}
void OpenGLDisplayRGB::closeEvent(QCloseEvent *e)
{
emit closed();
e->accept();
}
simple_texture_shader.f.glsl
varying vec2 vTextureCoord;
uniform sampler2D uSampler;
void main(void)
{
gl_FragColor = texture2D(uSampler, vTextureCoord);
}
simple_vertex_shader.v.glsl
attribute vec4 vertexIn;
attribute vec2 textureIn;
varying vec2 vTextureCoord;
void main(void)
{
gl_Position = vertexIn;
vTextureCoord = textureIn;
}