How to use PBO with Qt OpenGL - c++

I'm trying to use QOpenGLBuffer as a Pixel Buffer Object. The goal would be to display a high resolution video stream (4K, 60FPS) so I need good performance.
Since I'm just starting with OpenGL, I'm trying at first to display a simple 2D texture the best way possible. I've already included VBO and VAO, the next step (As I read it) would be to use a PBO for better performance.
There are tutorial out there for PBO but with glGenBufferARB(), not with QOpenGLBuffer.
Here is my code :
glwidget.h
#ifndef GLWIDGET_H
#define GLWIDGET_H
#include <QOpenGLWidget>
#include <QOpenGLFunctions>
#include <QOpenGLBuffer>
#include <QDebug>
#include <QOpenGLTexture>
#include <QOpenGLShader>
#include <QOpenGLShaderProgram>
#include <QOpenGLVertexArrayObject>
class GLWidget : public QOpenGLWidget, protected QOpenGLFunctions
{
Q_OBJECT
public:
explicit GLWidget(QWidget *parent = 0);
~GLWidget();
void initializeGL();
void paintGL();
void resizeGL(int w, int h);
void LoadGLTextures();
private :
QOpenGLShaderProgram *program;
QOpenGLBuffer vbo;
QOpenGLVertexArrayObject vao;
GLuint tex;
GLint vertexLocation;
GLint texcoordLocation;
int tailleVerticesBytes;
int tailleCoordTexturesBytes;
float vertices[8];
float coordTexture[8];
public slots:
private slots:
};
#endif // GLWIDGET_H
glwidget.cpp
#ifndef BUFFER_OFFSET
#define BUFFER_OFFSET(offset) ((char*)NULL + (offset))
#include "glwidget.h"
#include <QElapsedTimer>
GLWidget::GLWidget(QWidget *parent) :
QOpenGLWidget(parent)
{
tailleVerticesBytes = 8*sizeof(float);
tailleCoordTexturesBytes = 8*sizeof(float);
}
GLWidget::~GLWidget(){
vao.destroy();
vbo.destroy();
delete program;
glDeleteTextures(1, &tex);
}
void GLWidget::LoadGLTextures(){
QImage img;
if(!img.load("C:\\Users\\Adrien\\Desktop\\open3.bmp")){
qDebug()<<"Image loading failed";
}
QImage t = (img.convertToFormat(QImage::Format_RGBA8888)).mirrored();
glGenTextures(1, &tex);
glBindTexture(GL_TEXTURE_2D, tex);
glTexImage2D(GL_TEXTURE_2D, 0, 3, t.width(), t.height(), 0, GL_RGBA, GL_UNSIGNED_BYTE, t.bits());
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glBindTexture( GL_TEXTURE_2D, 0);
}
void GLWidget::initializeGL(){
float verticesTmp[] = {-1.0,-1.0, 1.0,-1.0, 1.0,1.0, -1.0,1.0};
float coordTextureTmp[] = {0.0,0.0, 1.0,0.0, 1.0,1.0, 0.0,1.0};
for(int i = 0; i<8; i++){
vertices[i] = verticesTmp[i];
coordTexture[i] = coordTextureTmp[i];
}
initializeOpenGLFunctions();
glClear(GL_COLOR_BUFFER_BIT);
glEnable(GL_TEXTURE_2D);
LoadGLTextures();
//Shader setup
QOpenGLShader *vshader = new QOpenGLShader(QOpenGLShader::Vertex, this);
const char *vsrc =
"#version 150 core\n"
"in vec2 in_Vertex;\n"
"in vec2 vertTexCoord;\n"
"out vec2 fragTexCoord;\n"
"void main(void)\n"
"{\n"
" gl_Position = vec4(in_Vertex, 0.0, 1.0);\n"
" fragTexCoord = vertTexCoord;\n"
"}\n";
vshader->compileSourceCode(vsrc);
QOpenGLShader *fshader = new QOpenGLShader(QOpenGLShader::Fragment, this);
const char *fsrc =
"#version 150 core\n"
"uniform sampler2D tex;\n"
"in vec2 fragTexCoord;\n"
"void main(void)\n"
"{\n"
" gl_FragColor = texture2D(tex,fragTexCoord);\n"
"}\n";
fshader->compileSourceCode(fsrc);
program = new QOpenGLShaderProgram;
program->addShader(vshader);
program->addShader(fshader);
program->link();
program->bind();
glActiveTexture(GL_TEXTURE0);
program->setUniformValue("tex", 0);
vertexLocation = glGetAttribLocation(program->programId(), "in_Vertex");
texcoordLocation = glGetAttribLocation(program->programId(), "vertTexCoord");
//VAO setup
vao.create();
vao.bind();
//VBO setup
vbo.create();
vbo.setUsagePattern(QOpenGLBuffer::StaticDraw);
vbo.bind();
vbo.allocate(tailleVerticesBytes + tailleCoordTexturesBytes);
vbo.write(0, vertices, tailleVerticesBytes);
vbo.write(tailleVerticesBytes, coordTexture, tailleCoordTexturesBytes);
program->enableAttributeArray(vertexLocation);
program->setAttributeBuffer(vertexLocation, GL_FLOAT, 0, 2);
program->enableAttributeArray(texcoordLocation);
program->setAttributeBuffer(texcoordLocation, GL_FLOAT, tailleVerticesBytes, 2);
vbo.release();
vao.release();
program->release();
}
void GLWidget::paintGL(){
glClear(GL_COLOR_BUFFER_BIT);
program->bind();
{
vao.bind();
glBindTexture(GL_TEXTURE_2D, tex);
glDrawArrays(GL_QUADS, 0, 4);
glBindTexture(GL_TEXTURE_2D, 0);
vao.release();
}
program->release();
}
void GLWidget::resizeGL(int w, int h){
glViewport(0, 0, (GLint)w, (GLint)h);
}
#endif
So basically, how would I do to use PBO in this code ?
The first thing to do would be to create a QOpenGLBuffer object while specifying the type (QOpenglBuffer::PixelUnpackBuffer), then I guess I would need to upload the pixel on the buffer and finally use it instead of glTexImage2D ? This is just the global idea and I have no idea how to do it.
Thanks.

The goal would be to display a high resolution video stream (4K, 60FPS) so I need good performance.
The only and proper way to do this is using some accelerated presentation API (which have nothing to do with OpenGL).
If you want to stick with OpenGL, you'd want at least to have the GPU do the video decoding and uploading into a texture. How to do so depends on your OS and GPU. For instance, under Linux and using NVIDIA, you can use VDPAU for accelerated decoding and NV_VDPAU_interop for getting textures populated with decoded frames.
If, still, you want to use Pixel Unpack Buffer Objects for this (PUBO; you're uploading into GL => it's an unpack) there's very little magic going on. Create one:
QOpenGLBuffer *pubo = new QOpenGLBuffer(QOpenGLBuffer::PixelUnpackBuffer);
pubo->create();
then populate it with your frame's data:
pubo->bind();
pubo->allocate(...); // or, if already allocated, also write/map
Now the effect of a PUBO is that, if one is bound, certain calls will change semantics to read data not from user memory but from the PUBO. Notably, the calls that upload texture data. So if you have your texture around (and you should be using QOpenGLTexture, which uses immutable storage, not manual calls to glTexImage), you can do:
pubo->bind();
glBindTexture(GL_TEXTURE_2D, textureId);
glTexImage2D(GL_TEXTURE_2D,
level,
internalFormat,
width,
heigth,
border,
externalFormat,
type,
data);
Since there's a PUBO bound, the very last argument (data) changes semantics: it's no longer a pointer into client memory, but it's a byte offset into the currently bound pixel unpack buffer object. So, if your texture data starts at offset 0 into the buffer, you need to pass 0 there (or, actually, (const GLvoid *)0). Otherwise you'll need to adjust it accordingly.
Now you can release the pubo:
pubo->release();
and then use the texture as usual in your shaders and everything will be just fine.
Except that if you use the texture straight away you won't get any performance improvement! The whole point of this complicated setup is to allow the GL to transfer the image data asynchronously, while you render the data uploaded in the previous frame(s). If you use the image data immediately, GL needs to sync the entire pipeline in order to wait for the image data to be uploaded to the GPU.
So the typical setup in this scenario is having a number of PUBOs used in a round-robin fashion. For instance, if you have two (in ping-poing), you upload data in one PBO, and use the previous one to populate and draw the texture. This should buy "enough time" for the GL to actually transfer the current data across the bus, so at the next frame the texture upload and the draw will find the data immediately available.
Ideally, you could also perform the upload of data in PUBOs from another thread using a shared OpenGL context, and use fences to signal the render thread when the upload is complete so that the texture can be populated. And you can build further on by using orphaning, pinned maps, unsynchronized writes, and even more.
A great in depth explaination of all of this is available in chapters 28/29 of OpenGL Insights, which I can't reproduce integrally here, and comes with some code available here.
You can find also some more info about buffer object streaming on the OpenGL wiki here.

Related

Can't load multiple texture on OpenGL

I'm trying to load multiple textures in openGL.
To validate this I want to load 2 textures and mix them with the following fragment shader:
#version 330 core
out vec4 color;
in vec2 v_TexCoord;
uniform sampler2D u_Texture0;
uniform sampler2D u_Texture1;
void main()
{
color = mix(texture(u_Texture0, v_TexCoord), texture(u_Texture1, v_TexCoord), 0.5);
}
I'have abstract couple of OpenGL's functionality into classes like Shader, Texture UniformXX etc..
Here's an attempt to load the 2 textures into the sampler units of the fragment:
Shader shader;
shader.Attach(GL_VERTEX_SHADER, "res/shaders/vs1.shader");
shader.Attach(GL_FRAGMENT_SHADER, "res/shaders/fs1.shader");
shader.Link();
shader.Bind();
Texture texture0("res/textures/container.jpg", GL_RGB, GL_RGB);
texture0.Bind(0);
Uniform1i textureUnit0Uniform("u_Texture0");
textureUnit0Uniform.SetValues({ 0 });
shader.SetUniform(textureUnit0Uniform);
Texture texture1("res/textures/awesomeface.png", GL_RGBA, GL_RGBA);
texture1.Bind(1);
Uniform1i textureUnit1Uniform("u_Texture1");
textureUnit1Uniform.SetValues({ 1 });
shader.SetUniform(textureUnit1Uniform);
Here's what the Texture implementation looks like:
#include "Texture.h"
#include "Renderer.h"
#include "stb_image/stb_image.h"
Texture::Texture(const std::string& path, unsigned int destinationFormat, unsigned int sourceFormat)
: m_Path(path)
{
stbi_set_flip_vertically_on_load(1);
m_Buffer = stbi_load(path.c_str(), &m_Width, &m_Height, &m_BPP, 0);
GLCALL(glGenTextures(1, &m_RendererID));
GLCALL(glBindTexture(GL_TEXTURE_2D, m_RendererID));
GLCALL(glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST));
GLCALL(glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR));
GLCALL(glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT));
GLCALL(glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT));
GLCALL(glTexImage2D(GL_TEXTURE_2D, 0, destinationFormat, m_Width, m_Height, 0, sourceFormat, GL_UNSIGNED_BYTE, m_Buffer));
glGenerateMipmap(GL_TEXTURE_2D);
GLCALL(glBindTexture(GL_TEXTURE_2D, 0));
if (m_Buffer)
stbi_image_free(m_Buffer);
}
Texture::~Texture()
{
GLCALL(glDeleteTextures(1, &m_RendererID));
}
void Texture::Bind(unsigned int unit) const
{
GLCALL(glActiveTexture(GL_TEXTURE0 + unit));
GLCALL(glBindTexture(GL_TEXTURE_2D, m_RendererID));
}
void Texture::Unbind() const
{
GLCALL(glBindTexture(GL_TEXTURE_2D, 0));
}
Now instead of actually getting an even mix of color from both textures I only get the second texture appearing and blending with the background:
I've pinpointed the problem to the constructor of the Texture implementation, if I comment out the initialization of the second texture such as that its constructor is never being called then I can get the first texture to show up.
Can anyone suggest what I'm doing wrong?
Took me a while to spot, but at the point where you call the constructor of the second texture, your active texture unit is still 0, so the constructor happily repoints your texture unit and you are left with two texture units bound to the same texture.
The solution should be simple enough: do not interleave texture creation and texture unit assignment, by creating the textures first and only then binding them explicitly.
Better yet, look into using direct state access to avoid all this binding.
To highlight the problem for future viewers of this question, this is the problematic sequence of calls:
// constructor of texture 1
glGenTextures(1, &container)
glBindTexture(GL_TEXTURE_2D, container) // Texture Unit 0 is now bound to container
// explicit texture0.Bind call
glActiveTexture(GL_TEXTURE0) // noop
glBindTexture(GL_TEXTURE_2D, container) // Texture Unit 0 is now bound to container
// constructor of texture 2
glGenTextures(1, &awesomeface)
glBindTexture(GL_TEXTURE_2D, awesomeface) // Texture Unit 0 is now bound to awesomeface instead of container.
// explicit texture1.Bind call
glActiveTexture(GL_TEXTURE1)
glBindTexture(GL_TEXTURE_2D, awesomeface) // Texture Unit 0 and 1 are now bound to awesomeface.

OpenGL crashes on virtual machine (Ubuntu 16.04) setuped on VMWare Fusion Version 10.1.3.

I'm trying to draw some graphics using OpenGL.
I'm using VMWare Fusion Version 10.1.3 with Ubuntu 16.04 and QtCreator 4.6.2.
My task is to play recording with some drawing in OpenGL.
In case of that, I need FBO to increment the content of my drawing and create an image on the end of the recording and I also need texture to display on screen during playing the recording.
My problem is that I have a permanent crash during destroying an object in which I use OpenGL.
Here is my code:
void DrawingView::paint(QPainter *painter) {
painter->beginNativePainting();
if(_needsErase) {
QOpenGLContext *context = QOpenGLContext::currentContext();
_frameBuffer = new QOpenGLFramebufferObject(QSize(_rect.width(), _rect.height()), _format);
glBindTexture(GL_TEXTURE_2D, _frameBuffer->texture());
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_GENERATE_MIPMAP, GL_FALSE);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, _rect.width(), _rect.height(), 0, GL_RGBA , GL_UNSIGNED_BYTE, 0);
glBindTexture(GL_TEXTURE_2D, 0);
_frameBuffer->bind();
{
glClearColor(0.0, 0.0, 0.0, 0.0);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
}
_frameBuffer->release();
_needsErase = false;
}
glEnable(GL_BLEND);
_frameBuffer->bind();
{ // here I'm drawing to my fbo, this part is irrelevant. }
_frameBuffer->release();
_shaderProgram->release();
glBlendFunc(GL_ONE, GL_ONE_MINUS_SRC_ALPHA);
glUniform1i(_tex0Uniform, 0);
_copyBuffer->bind();
glActiveTexture(GL_TEXTURE0);
glEnable(GL_TEXTURE_2D);
glBindTexture(GL_TEXTURE_2D, _frameBuffer->texture());
glVertexAttribPointer(ATTRIB_VERTEX, 2, GL_FLOAT, 0, 0, squareVertices);
glEnableVertexAttribArray(ATTRIB_VERTEX);
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
glActiveTexture(GL_TEXTURE0);
glBindTexture( GL_TEXTURE_2D, 0);
glDisable(GL_TEXTURE_2D);
_copyBuffer->release();
glDisable(GL_BLEND);
painter->endNativePainting();
}
App crashes during destroying my DrawingView Object after line delete _frameBuffer; with log:
context mismatch in svga_surface_destroy
VMware: vmw_ioctl_command error Invalid argument.
Here is how I'm freeing my DrawingView Object.
void DrawingView::cleanupGL() {
glClearColor(0.0f, 0.0f, 0.0f, 0.0f);
glClear(GL_COLOR_BUFFER_BIT);
_shaderProgram->release();
_frameBuffer->release();
_copyBuffer->release();
delete _shaderProgram;
delete _copyBuffer;
delete _frameBuffer;
glDeleteBuffers(1, &_ebo);
glDeleteBuffers(1, &_vbo);
}
I noticed that app stops crashing when I erase line with glDrawArrays(...) command, but I need this command to display drawing during playing the recording.
I was using this code also on macOS and there everything was working PERFECTLY, but unfortunately, I need to use this code on a virtual machine with Ubuntu.
Here is also the code of my vertex and fragment shaders:
Vertex shader:
uniform vec2 screenSize;
attribute vec2 position;
varying vec2 coord;
void main(void)
{
coord = position;
vec2 halfScreenSize = screenSize * 0.5f;
vec2 pos = halfScreenSize * position + halfScreenSize;
gl_Position = gl_ModelViewProjectionMatrix * vec4(pos, 0.0, 1.0);
}
Fragment shader:
uniform sampler2D tex0;
varying vec2 coord;
void main(void)
{
vec2 coords = coord * 0.5 + 0.5;
gl_FragColor = texture2D(tex0, coords.xy);
}
Do anyone have any idea why this doesn't want to work on a virtual machine?
I really wanted to be specific describing my problem, but if You have any further questions please ask.
You create a (new) QOpenGLFramebufferObject instance only if _needsErase ever is set true.
if(_needsErase) {
QOpenGLContext *context = QOpenGLContext::currentContext();
_frameBuffer = new QOpenGLFramebufferObject(…
If that doesn't happen, it will be an uninitialized pointer, and calling member functions on it will invoke undefined behaviour. This particular code it wrong anyway, because it never deletes whatever instance may have been pointed to by _frameBuffer before overwriting the pointer.
Instead of manual memory management I strongly advise, to make use of automatic constructs. I appreciate the need for dynamic instance creation in this particular case. The way to go about this is through either std::shared_ptr or std::unique_ptr. I strongly suggest to start with std::unique_ptr and change it to a shared pointer only if absolutely needed so.
In your DrawingView class
#include <memory>
class DrawingView : public ... {
...
protected:
std::unique_ptr<QOpenGLFramebufferObject> _frameBuffer;
...
};
in the DrawingView::paint method:
if(_needsErase) {
QOpenGLContext *context = QOpenGLContext::currentContext();
_frameBuffer = std::make_unique<QOpenGLFramebufferObject>(…
Do not use new or delete.

Rendering text- freetype blank screen

I am using freetype, and the only thing I have left to do in order to render text is convert an ft_bitmap to something that can be rendered with opengl can someone explain how to do this? I am using glfw. With the way I have tried to do it it just gives a blank screen And here is the code that I am using:
#include <exception>
#include <iostream>
#include <string>
#include <glew.h>
#include <GL/glfw.h>
#include <iterator>
#include "../include/TextRenderer.h"
#include <ft2build.h>
#include FT_FREETYPE_H
#include <stdexcept>
#include <freetype/ftglyph.h>
using std::runtime_error;
using std::cout;
TextRenderer::TextRenderer(int x, int y, FT_Face Face, std::string s)
{
FT_Set_Char_Size(
Face, /* handle to face object */
0, /* char_width in 1/64th of points */
16*64, /* char_height in 1/64th of points */
0, /* horizontal device resolution */
0 ); /* vertical device resolution */
slot= Face->glyph;
text = s;
setsx(x);
setsy(y);
penX = x;
penY = y;
face = Face;
//shaders
GLuint v = glCreateShader(GL_VERTEX_SHADER) ;
const char* vs = "void main(){ gl_Position = ftransform();}";
glShaderSource(v,1,&vs,NULL);
glCompileShader(v);
GLuint f = glCreateShader(GL_FRAGMENT_SHADER) ;
const char* fs = "uniform sampler2D texture1; void main() { gl_FragColor = texture2D(texture1, gl_TexCoord[0].st); //And that is all we need}";
glShaderSource(f,1,&fs,NULL);
glCompileShader(f);
Program= glCreateProgram();
glAttachShader(Program,v);
glAttachShader(Program,f);
glLinkProgram(Program);
}
void TextRenderer::render()
{
glUseProgram(Program);
FT_UInt glyph_index;
for ( int n = 0; n < text.size(); n++ )
{
/* retrieve glyph index from character code */
glyph_index = FT_Get_Char_Index( face, text[n] );
/* load glyph image into the slot (erase previous one) */
error = FT_Load_Glyph( face, glyph_index, FT_LOAD_RENDER );
draw(&face->glyph->bitmap,penX + slot->bitmap_left,penY - slot->bitmap_top );
penX += *(&face->glyph->bitmap.width)+3;
penY += slot->advance.y >> 6; /* not useful for now */
}
}
void TextRenderer::draw(FT_Bitmap * bitmap,float x,float y)
{
GLuint texture [0] ;
glGenTextures(1,texture);
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri (GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexParameteri (GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri (GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_DECAL);
glTexImage2D (GL_TEXTURE_2D, 0, GL_RED , bitmap->width, bitmap->rows, 0, GL_RED , GL_UNSIGNED_BYTE, bitmap);
// int loc = glGetUniformLocation(Program, "texture1");
// glUniform1i(loc, 0);
glBindTexture(GL_TEXTURE_2D, texture[0]);
glEnable(GL_TEXTURE_2D);
int height=bitmap->rows/10;
int width=bitmap->width/10;
glBegin(GL_QUADS);
glTexCoord2f (0.0, 0.0);
glVertex2f(x,y);
glTexCoord2f (1.0, 0.0);
glVertex2f(x+width,y);
glTexCoord2f (1.0, 1.0);
glVertex2f(x+width,y+height);
glTexCoord2f (0.0, 1.0);
glVertex2f(x,y+height);
glEnd();
glDisable(GL_TEXTURE_2D);
}
What i am using to initialize text renderer:
FT_Library library;
FT_Face arial;
FT_Error error = FT_Init_FreeType( &library );
if ( error )
{
throw std::runtime_error("Freetype failed");
}
error = FT_New_Face( library,
"C:/Windows/Fonts/Arial.ttf",
0,
&arial );
if ( error == FT_Err_Unknown_File_Format )
{
throw std::runtime_error("font format not available");
}
else if ( error )
{
throw std::runtime_error("Freetype font failed");
}
TextRenderer t(5,10,arial,"Hello");
t.render();
There's a lot of Problems in your program that result from not understanding what each call that you make to OpenGL or Freetype do. You should really read the documentation for the libraries instead of stacking tutorials into each other.
Let's do this one by one
Fragment Shader
const char* fs = "uniform sampler2D texture1;
void main() {
gl_FragColor = texture2D(texture1, gl_TexCoord[0].st);
//And that is all we need}";`
This shader doesn't compile (you should really check if it compiles with glGetShaderiv and if it links with glGetProgramiv). If you indent it correctly then you'll see that you commented out the final } because it's in the same line and after the //. So, you should remove the comment or use a \n to end the comment.
Also, for newer versions of OpenGL using gl_TexCoord is deprecated but it works if you use a compatibility profile.
Vertex Shader
just like the fragment shaders there's deprecated functionality used, namely ftransform().
But the bigger problem is that you use gl_TexCoord[0] in the fragment shader without passing it through from the vertex shader. So, you need to add the line gl_TexCoord[0]=gl_MultiTexCoord0; in your vertex shader. (As you might have guessed that is also deprecated)
Texture passing
You are passing a pointer to bitmap to glTexImage2D but bitmap is of type FT_Bitmap *, you need to pass bitmap->buffer instead.
You should not generate a new texture for each letter every frame (especially not if you're not deleting it). You should call glGentextures only once (you could put it in your TextRenderer constructor since you put all the other initialization stuff there).
Then there's the GLuint texture [0]; which should give you a compiler error. If you really need an array with one element then the syntax is GLuint texture [1];
So your final call would look something like this:
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, bitmap->width, bitmap->rows, 0, GL_LUMINANCE, GL_UNSIGNED_BYTE, bitmap->buffer);
Miscellaneous
int height=bitmap->rows/10;
int width=bitmap->width/10;
this is an integer division and if your values for bitmap->width get smaller than 10 you would get 0 as the result, which would make the quad you're trying to draw invisible (height or width of 0). If you have trouble getting the objects into view you should just translate/scale it into view. This is also deprecated but if you keep using the other stuff this would make your window have a coordinate system from [-100,-100] to [100,100] (lower-left to upper-right).
glLoadIdentity();
glScalef(0.01f, 0.01f, 1.0f);
You're also missing the coordinate conversion from FreeType to OpenGL, Freetype uses a coordinate system which starts at [0,0] in the top left corner and x is the offset to the right while y is the offset to the bottom. So if you just use these coordinates in OpenGL everything will be upside-down.
If you do all that your result should look something like this (grey background to highlight where the polygons begin and end):
As for your general approach, repurposing one texture and drawing letter by letter re-using and overwriting the same texture seems like an inefficient approach. It would be better to just allocate one larger texture and then use glTexSubImage2D to write the glyphs to it. If freetype re-rendering letters is a bottleneck you could also just write all the symbols you need into one texture at the beginning (for example the whole ASCII range) and then use that texture as a texture-atlas.
My general advice would also be that if you don't really want to learn OpenGL but just want to use some cross-platform rendering without bothering with the low-level stuff I'd recommend using a rendering framework instead.

issues with mixing glGetTexImage and imageStore on nvidia opengl

I wrote some code, too long to paste here, that renders into a 3D 1 component float texture via a fragment shader that uses bindless imageLoad and imageStore.
That code is definitely working.
I then needed to work around some GLSL compiler bugs, so wanted to read the 3D texture above back to the host via glGetTexImage. Yes, I did do a glMemoryBarrierEXT(GL_ALL_BARRIER_BITS).
I did check the texture info via glGetTexLevelparameteriv() and everything I see matches. I did check for OpenGL errors, and have none.
Sadly, though, glGetTexImage never seems to read what was written by the fragment shader. Instead, it only returns the fake values I put in when I called glTexImage3D() to create the texture.
Is that expected behavior? The documentation implies otherwise.
If glGetTexImage actually works that way, how can I read back the data in that 3D texture (resident on the device?) Clearly the driver can do that as it does when the texture is made non-resident. Surely there's a simple way to do this simple thing...
I was asking if glGetTexImage was supposed to work that way or not. Here's the code:
void Bindless3DArray::dump_array(Array3D<float> &out)
{
bool was_mapped = m_image_mapped;
if (was_mapped)
unmap_array(); // unmap array so it's accessible to opengl
out.resize(m_depth, m_height, m_width);
glBindTexture(GL_TEXTURE_3D, m_textureid); // from glGenTextures()
#if 0
int w,h,d;
glGetTexLevelParameteriv(GL_TEXTURE_3D, 0, GL_TEXTURE_WIDTH, &w);
glGetTexLevelParameteriv(GL_TEXTURE_3D, 0, GL_TEXTURE_HEIGHT, &h);
glGetTexLevelParameteriv(GL_TEXTURE_3D, 0, GL_TEXTURE_DEPTH, &d);
int internal_format;
glGetTexLevelParameteriv(GL_TEXTURE_3D, 0, GL_TEXTURE_INTERNAL_FORMAT, &internal_format);
int data_type_r, data_type_g;
glGetTexLevelParameteriv(GL_TEXTURE_3D, 0, GL_TEXTURE_RED_TYPE, &data_type_r);
glGetTexLevelParameteriv(GL_TEXTURE_3D, 0, GL_TEXTURE_GREEN_TYPE, &data_type_g);
int size_r, size_g;
glGetTexLevelParameteriv(GL_TEXTURE_3D, 0, GL_TEXTURE_RED_SIZE, &size_r);
glGetTexLevelParameteriv(GL_TEXTURE_3D, 0, GL_TEXTURE_GREEN_SIZE, &size_g);
#endif
glGetTexImage(GL_TEXTURE_3D, 0, GL_RED, GL_FLOAT, &out(0,0,0));
glBindTexture(GL_TEXTURE_3D, 0);
CHECK_GLERROR();
if (was_mapped)
map_array_to_cuda(); // restore state
}
Here's the code that creates the bindless array:
void Bindless3DArray::allocate(int w, int h, int d, ElementType t)
{
if (!m_textureid)
glGenTextures(1, &m_textureid);
m_type = t;
m_width = w;
m_height = h;
m_depth = d;
glBindTexture(GL_TEXTURE_3D, m_textureid);
CHECK_GLERROR();
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_MAX_LEVEL, 0); // ensure only 1 miplevel is allocated
CHECK_GLERROR();
Array3D<float> foo(d, h, w);
// DEBUG -- glGetTexImage returns THIS data, not what's on device
for (int z=0; z<m_depth; ++z)
for (int y=0; y<m_height; ++y)
for (int x=0; x<m_width; ++x)
foo(z,y,x) = 3.14159;
//-- Texture creation
if (t == ElementInteger)
glTexImage3D(GL_TEXTURE_3D, 0, GL_R32UI, w, h, d, 0, GL_RED_INTEGER, GL_INT, 0);
else if (t == ElementFloat)
glTexImage3D(GL_TEXTURE_3D, 0, GL_R32F, w, h, d, 0, GL_RED, GL_FLOAT, &foo(0,0,0));
else
throw "Invalid type for Bindless3DArray";
CHECK_GLERROR();
m_handle = glGetImageHandleNV(m_textureid, 0, true, 0, (t == ElementInteger) ? GL_R32UI : GL_R32F);
glMakeImageHandleResidentNV(m_handle, GL_READ_WRITE);
CHECK_GLERROR();
#ifdef USE_CUDA
checkCuda(cudaGraphicsGLRegisterImage(&m_image_resource, m_textureid, GL_TEXTURE_3D, cudaGraphicsRegisterFlagsSurfaceLoadStore));
#endif
}
I allocate the array, render to it via an OpenGL fragment program, and then I call dump_array() to read the data back. Sadly, I only get what I loaded in the allocate call.
The render program looks like
void App::clear_deepz()
{
deepz_clear_program.bind();
deepz_clear_program.setUniformValue("sentinel", SENTINEL);
deepz_clear_program.setUniformValue("deepz", deepz_array.handle());
deepz_clear_program.setUniformValue("sem", semaphore_array.handle());
run_program();
glMemoryBarrierEXT(GL_ALL_BARRIER_BITS);
// glMemoryBarrierEXT(GL_SHADER_IMAGE_ACCESS_BARRIER_BIT);
// glMemoryBarrierEXT(GL_SHADER_GLOBAL_ACCESS_BARRIER_BIT_NV);
deepz_clear_program.release();
}
and the fragment program is:
#version 420\n
in vec4 gl_FragCoord;
uniform float sentinel;
coherent uniform layout(size1x32) image3D deepz;
coherent uniform layout(size1x32) uimage3D sem;
void main(void)
{
ivec3 coords = ivec3(gl_FragCoord.x, gl_FragCoord.y, 0);
imageStore(deepz, coords, vec4(sentinel));
imageStore(sem, coords, ivec4(0));
discard; // don't write to FBO at all
}
discard; // don't write to FBO at all
That's not what discard means. Oh, it does mean that. But it also means that all Image Load/Store writes will be discarded too. Indeed, odds are, the compiler will see that statement and just do nothing for the entire fragment shader.
If you want to just execute the fragment shader, you can employ the GL 4.3 feature (available on your NVIDIA hardware) of having an empty framebuffer object. Or you could use a compute shader. If you can't use GL 4.3 yet, then use a write mask to turn off all color writes.
As Nicol mentions above, if you want side effects only of image load and store, the proper way is to use an empty frame buffer object.
The bug of mixing glGetTexImage() and bindless textures was in fact a driver bug, and has been fixed as of driver version 335.23. I filed the bug and have confirmed my code is now working properly.
Note I am using empty frame buffer objects in the code, and don't use "discard" any more.

How to play YUV video in Qt4?

I want to play YUV video sequence by using Qt. Now I am using QPixmap, by using DrawPixel on QPixmap pixel by pixel. However, it can't play the video in real-time. How can I do to improve the speed?
Did you try using the Phonon classes like VideoPlayer?
Take a look at this:
http://doc.qt.io/archives/4.6/phonon-videoplayer.html
Pixel by pixel is about the slowest method to create a picture. It would improve performance a lot if you processed the image data before and used QPixmap's loadFromData() method.
Well, DrawPixel is definetily the worst perfomance solution.
QOpenGLWiget nowadays (Qt 5) could be used for rendering video frames to a texture.
Actually, depending on the video pixel format, it could be either simple texture rendering or a pixel format conversion via shaders with further texture drawing.
The question is old, so I'll leave a sketchy solution just because it took me some time to get to it myself once. So, the simpliest (not best, because lots of optimizations are possible) solution is:
OpenGLDisplayRGB.h
#pragma once
#include <QOpenGLWidget>
#include <QOpenGLFunctions>
#include <QScopedPointer>
#include <QException>
/*!
* \brief The OpenGLDisplay class
* Simple OpenGL display, that renders RGBA to texture
*/
class OpenGLDisplayRGB : public QOpenGLWidget, public QOpenGLFunctions
{
Q_OBJECT
public:
explicit OpenGLDisplayRGB(QWidget* parent = nullptr);
~OpenGLDisplayRGB() override;
protected:
void initializeGL() override;
void resizeGL(int w, int h) override;
void paintGL() override;
void closeEvent(QCloseEvent* e) override;
public:
void DisplayVideoFrame(unsigned char* data, int frameWidth, int frameHeight);
Q_SIGNAL void closed();
private:
struct OpenGLDisplayRGBImpl;
QScopedPointer<OpenGLDisplayRGBImpl> impl;
};
OpenGLDisplayRGB.cpp
#include "OpenGLDisplayRGB.h"
#include <QOpenGLShader>
#include <QOpenGLTexture>
#include <QCoreApplication>
#include <QResizeEvent>
#include <QTimer>
#include <QDebug>
#define ATTRIB_VERTEX 0
#define ATTRIB_TEXTURE 1
namespace
{
//Vertex matrix
static const GLfloat vertexVertices[] = {
-1.0f, -1.0f,
1.0f, -1.0f,
-1.0f, 1.0f,
1.0f, 1.0f,
};
//Texture matrix
static const GLfloat textureVertices[] = {
0.0f, 1.0f,
1.0f, 1.0f,
0.0f, 0.0f,
1.0f, 0.0f,
};
}
struct OpenGLDisplayRGB::OpenGLDisplayRGBImpl
{
OpenGLDisplayRGBImpl(QObject* ownerPtr)
: mBufRGB(nullptr)
//, mRepaintTimer(new QTimer(ownerPtr))
, mEnabled(true)
, mShaderProgram(new QOpenGLShaderProgram(ownerPtr))
, mTexture(new QOpenGLTexture(QOpenGLTexture::Target2D))
{ }
unsigned char* mBufRGB;
//QTimer* mRepaintTimer;
bool mEnabled;
QOpenGLShader* mVShader;
QOpenGLShader* mFShader;
QOpenGLShaderProgram* mShaderProgram;
QScopedPointer<QOpenGLTexture> mTexture;
int mTextureUniform;
GLsizei mVideoW, mVideoH;
};
/*************************************************************************/
OpenGLDisplayRGB::OpenGLDisplayRGB(QWidget* parent)
: QOpenGLWidget(parent)
, impl(new OpenGLDisplayRGBImpl(this))
{
setAttribute(Qt::WA_OpaquePaintEvent);
// setAttribute(Qt::WA_PaintOnScreen);
setAttribute(Qt::WA_NoSystemBackground);
/*
impl->mRepaintTimer->setInterval(50);
connect(impl->mRepaintTimer, SIGNAL(timeout()), this, SLOT(update()));
impl->mRepaintTimer->start();*/
}
OpenGLDisplayRGB::~OpenGLDisplayRGB()
{
makeCurrent();
}
void OpenGLDisplayRGB::DisplayVideoFrame(unsigned char *data, int frameWidth, int frameHeight)
{
impl->mVideoW = frameWidth;
impl->mVideoH = frameHeight;
impl->mBufRGB = data;
update();
}
void OpenGLDisplayRGB::initializeGL()
{
initializeOpenGLFunctions();
glEnable(GL_DEPTH_TEST);
/* Modern opengl rendering pipeline relies on shaders to handle incoming data.
* Shader: is a small function written in OpenGL Shading Language (GLSL).
* GLSL is the language that makes up all OpenGL shaders.
* The syntax of the specific GLSL language requires the reader to find relevant information. */
impl->mEnabled = impl->mShaderProgram->addShaderFromSourceFile(QOpenGLShader::Vertex, ":/OpenGL/simple_vertex_shader.v.glsl");
if(!impl->mEnabled)
qDebug() << QString("[Error] Vertex shader failed: %1").arg(impl->mShaderProgram->log());
impl->mShaderProgram->addShaderFromSourceFile(QOpenGLShader::Fragment, ":/OpenGL/simple_texture_shader.f.glsl");
if(!impl->mEnabled)
qDebug() << QString("[Error] Fragment shader failed: %1").arg(impl->mShaderProgram->log());
// Bind the property vertexIn to the specified location ATTRIB_VERTEX, this property
// has a declaration in the vertex shader source
impl->mShaderProgram->bindAttributeLocation("vertexIn", ATTRIB_VERTEX);
// Bind the attribute textureIn to the specified location ATTRIB_TEXTURE, the attribute
// has a declaration in the vertex shader source
impl->mShaderProgram->bindAttributeLocation("textureIn", ATTRIB_TEXTURE);
//Link all the shader programs added to
impl->mShaderProgram->link();
//activate all links
impl->mShaderProgram->bind();
// Read the position of the data variable tex_rgb in the shader, the declaration
// of these variables can be seen in
// fragment shader source
impl->mTextureUniform = impl->mShaderProgram->uniformLocation("uSampler");
// Set the value of the vertex matrix of the attribute ATTRIB_VERTEX and format
glVertexAttribPointer(ATTRIB_VERTEX, 2, GL_FLOAT, 0, 0, vertexVertices);
// Set the texture matrix value and format of the attribute ATTRIB_TEXTURE
glVertexAttribPointer(ATTRIB_TEXTURE, 2, GL_FLOAT, 0, 0, textureVertices);
// Enable the ATTRIB_VERTEX attribute data, the default is off
glEnableVertexAttribArray(ATTRIB_VERTEX);
// Enable the ATTRIB_TEXTURE attribute data, the default is off
glEnableVertexAttribArray(ATTRIB_TEXTURE);
impl->mTexture->create();
impl->mTexture->setMinMagFilters(QOpenGLTexture::Linear, QOpenGLTexture::Linear);
impl->mTexture->setWrapMode(QOpenGLTexture::ClampToEdge);
glClearColor (1.0f, 0.0f, 1.0f, 1.0f); // set the background color
}
void OpenGLDisplayRGB::resizeGL(int w, int h)
{
if(h == 0)// prevents being divided by zero
h = 1;// set the height to 1
// Set the viewport
glViewport(0, 0, w, h);
}
void OpenGLDisplayRGB::paintGL()
{
if (!impl->mEnabled || !impl->mBufRGB)
return; //RET
// Load y data texture
// Activate the texture unit GL_TEXTURE0
glActiveTexture(GL_TEXTURE0);
// Use the texture generated from y to generate texture
glBindTexture(GL_TEXTURE_2D, impl->mTexture->textureId());
// Use the memory mBufYuv data to create a real y data texture
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, impl->mVideoW, impl->mVideoH, 0, GL_RGBA, GL_UNSIGNED_BYTE, impl->mBufRGB);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
// Specify y texture to use the new value can only use 0, 1, 2, etc. to represent
// the index of the texture unit, this is the place where opengl is not humanized
//0 corresponds to the texture unit GL_TEXTURE0 1 corresponds to the
// texture unit GL_TEXTURE1 2 corresponds to the texture unit GL_TEXTURE2
glUniform1i(impl->mTextureUniform, 0);
// Use the vertex array way to draw graphics
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
}
void OpenGLDisplayRGB::closeEvent(QCloseEvent *e)
{
emit closed();
e->accept();
}
simple_texture_shader.f.glsl
varying vec2 vTextureCoord;
uniform sampler2D uSampler;
void main(void)
{
gl_FragColor = texture2D(uSampler, vTextureCoord);
}
simple_vertex_shader.v.glsl
attribute vec4 vertexIn;
attribute vec2 textureIn;
varying vec2 vTextureCoord;
void main(void)
{
gl_Position = vertexIn;
vTextureCoord = textureIn;
}