Rendering text- freetype blank screen - c++

I am using freetype, and the only thing I have left to do in order to render text is convert an ft_bitmap to something that can be rendered with opengl can someone explain how to do this? I am using glfw. With the way I have tried to do it it just gives a blank screen And here is the code that I am using:
#include <exception>
#include <iostream>
#include <string>
#include <glew.h>
#include <GL/glfw.h>
#include <iterator>
#include "../include/TextRenderer.h"
#include <ft2build.h>
#include FT_FREETYPE_H
#include <stdexcept>
#include <freetype/ftglyph.h>
using std::runtime_error;
using std::cout;
TextRenderer::TextRenderer(int x, int y, FT_Face Face, std::string s)
{
FT_Set_Char_Size(
Face, /* handle to face object */
0, /* char_width in 1/64th of points */
16*64, /* char_height in 1/64th of points */
0, /* horizontal device resolution */
0 ); /* vertical device resolution */
slot= Face->glyph;
text = s;
setsx(x);
setsy(y);
penX = x;
penY = y;
face = Face;
//shaders
GLuint v = glCreateShader(GL_VERTEX_SHADER) ;
const char* vs = "void main(){ gl_Position = ftransform();}";
glShaderSource(v,1,&vs,NULL);
glCompileShader(v);
GLuint f = glCreateShader(GL_FRAGMENT_SHADER) ;
const char* fs = "uniform sampler2D texture1; void main() { gl_FragColor = texture2D(texture1, gl_TexCoord[0].st); //And that is all we need}";
glShaderSource(f,1,&fs,NULL);
glCompileShader(f);
Program= glCreateProgram();
glAttachShader(Program,v);
glAttachShader(Program,f);
glLinkProgram(Program);
}
void TextRenderer::render()
{
glUseProgram(Program);
FT_UInt glyph_index;
for ( int n = 0; n < text.size(); n++ )
{
/* retrieve glyph index from character code */
glyph_index = FT_Get_Char_Index( face, text[n] );
/* load glyph image into the slot (erase previous one) */
error = FT_Load_Glyph( face, glyph_index, FT_LOAD_RENDER );
draw(&face->glyph->bitmap,penX + slot->bitmap_left,penY - slot->bitmap_top );
penX += *(&face->glyph->bitmap.width)+3;
penY += slot->advance.y >> 6; /* not useful for now */
}
}
void TextRenderer::draw(FT_Bitmap * bitmap,float x,float y)
{
GLuint texture [0] ;
glGenTextures(1,texture);
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri (GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexParameteri (GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri (GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_DECAL);
glTexImage2D (GL_TEXTURE_2D, 0, GL_RED , bitmap->width, bitmap->rows, 0, GL_RED , GL_UNSIGNED_BYTE, bitmap);
// int loc = glGetUniformLocation(Program, "texture1");
// glUniform1i(loc, 0);
glBindTexture(GL_TEXTURE_2D, texture[0]);
glEnable(GL_TEXTURE_2D);
int height=bitmap->rows/10;
int width=bitmap->width/10;
glBegin(GL_QUADS);
glTexCoord2f (0.0, 0.0);
glVertex2f(x,y);
glTexCoord2f (1.0, 0.0);
glVertex2f(x+width,y);
glTexCoord2f (1.0, 1.0);
glVertex2f(x+width,y+height);
glTexCoord2f (0.0, 1.0);
glVertex2f(x,y+height);
glEnd();
glDisable(GL_TEXTURE_2D);
}
What i am using to initialize text renderer:
FT_Library library;
FT_Face arial;
FT_Error error = FT_Init_FreeType( &library );
if ( error )
{
throw std::runtime_error("Freetype failed");
}
error = FT_New_Face( library,
"C:/Windows/Fonts/Arial.ttf",
0,
&arial );
if ( error == FT_Err_Unknown_File_Format )
{
throw std::runtime_error("font format not available");
}
else if ( error )
{
throw std::runtime_error("Freetype font failed");
}
TextRenderer t(5,10,arial,"Hello");
t.render();

There's a lot of Problems in your program that result from not understanding what each call that you make to OpenGL or Freetype do. You should really read the documentation for the libraries instead of stacking tutorials into each other.
Let's do this one by one
Fragment Shader
const char* fs = "uniform sampler2D texture1;
void main() {
gl_FragColor = texture2D(texture1, gl_TexCoord[0].st);
//And that is all we need}";`
This shader doesn't compile (you should really check if it compiles with glGetShaderiv and if it links with glGetProgramiv). If you indent it correctly then you'll see that you commented out the final } because it's in the same line and after the //. So, you should remove the comment or use a \n to end the comment.
Also, for newer versions of OpenGL using gl_TexCoord is deprecated but it works if you use a compatibility profile.
Vertex Shader
just like the fragment shaders there's deprecated functionality used, namely ftransform().
But the bigger problem is that you use gl_TexCoord[0] in the fragment shader without passing it through from the vertex shader. So, you need to add the line gl_TexCoord[0]=gl_MultiTexCoord0; in your vertex shader. (As you might have guessed that is also deprecated)
Texture passing
You are passing a pointer to bitmap to glTexImage2D but bitmap is of type FT_Bitmap *, you need to pass bitmap->buffer instead.
You should not generate a new texture for each letter every frame (especially not if you're not deleting it). You should call glGentextures only once (you could put it in your TextRenderer constructor since you put all the other initialization stuff there).
Then there's the GLuint texture [0]; which should give you a compiler error. If you really need an array with one element then the syntax is GLuint texture [1];
So your final call would look something like this:
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, bitmap->width, bitmap->rows, 0, GL_LUMINANCE, GL_UNSIGNED_BYTE, bitmap->buffer);
Miscellaneous
int height=bitmap->rows/10;
int width=bitmap->width/10;
this is an integer division and if your values for bitmap->width get smaller than 10 you would get 0 as the result, which would make the quad you're trying to draw invisible (height or width of 0). If you have trouble getting the objects into view you should just translate/scale it into view. This is also deprecated but if you keep using the other stuff this would make your window have a coordinate system from [-100,-100] to [100,100] (lower-left to upper-right).
glLoadIdentity();
glScalef(0.01f, 0.01f, 1.0f);
You're also missing the coordinate conversion from FreeType to OpenGL, Freetype uses a coordinate system which starts at [0,0] in the top left corner and x is the offset to the right while y is the offset to the bottom. So if you just use these coordinates in OpenGL everything will be upside-down.
If you do all that your result should look something like this (grey background to highlight where the polygons begin and end):
As for your general approach, repurposing one texture and drawing letter by letter re-using and overwriting the same texture seems like an inefficient approach. It would be better to just allocate one larger texture and then use glTexSubImage2D to write the glyphs to it. If freetype re-rendering letters is a bottleneck you could also just write all the symbols you need into one texture at the beginning (for example the whole ASCII range) and then use that texture as a texture-atlas.
My general advice would also be that if you don't really want to learn OpenGL but just want to use some cross-platform rendering without bothering with the low-level stuff I'd recommend using a rendering framework instead.

Related

OpenGL C++, Textures Suddenly Black After Being Working For Days

Open GL 3.3
My textures suddenly became black after working for many days
Pretty much all the posts that had a similiar issue were about
incorrect or absent use of glTexParameteri or incorrect texture loading but i seem to be doing everything correctly regarding that,
the vector containing the data is 1024 bytes (16 pixels x 16 pixels x 4 bytes) so that's good,
after the issue arose i made a test texture just to make shure everything about that was right.
also saw that many posts issues were incomplete texture but here im using glTexImage2D passing the data so the texture has to be complete, also am not creating mipmaps, i disabled them for testing. Altough they were on and working before this bug.
Also im calling glGetError quite frequently and there are no errors
Here is the texture creation code:
unsigned int testTexture;
unsigned long w, h;
std::vector<byte> data;
std::vector<byte> img;
loadFile(data, "./assets/textures/blocks/brick.png");
decodePNG(img, w, h, &data[0], data.size());
glGenTextures(1, &testTexture);
glBindTexture(GL_TEXTURE_2D, testTexture);
glTexImage2D(GL_TEXTURE_2D,0,GL_RGBA8,w,h,0,GL_RGBA, GL_UNSIGNED_BYTE,&img[0]);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
data.clear();
img.clear();
And here is where i setup my Uniforms:
glUseProgram(worldShaderProgram);
glUniform1f(glGetUniformLocation(worldShaderProgram, "time"), gameTime);
glUniformMatrix4fv(glGetUniformLocation(worldShaderProgram, "MVP"), 1, GL_FALSE, &TheMatrix[0][0]);
glUniform1i(glGetUniformLocation(worldShaderProgram, "texAtlas"), testTexture);
glUniform1f(glGetUniformLocation(worldShaderProgram, "texMult"), 16.0f / 256.0f);
glUniform4f(glGetUniformLocation(worldShaderProgram, "fogColor"), fogColor.r, fogColor.g, fogColor.b, fogColor.a);
Also Here Is The Fragment Shader
#version 330
in vec4 tex_color;
in vec2 tex_coord;
layout(location = 0) out vec4 color;
uniform sampler2D texAtlas;
uniform mat4 MVP;
uniform vec4 fogColor;
const float fogStart = 0.999f;
const float fogEnd = 0.9991f;
const float fogMult = 1.0f / (fogEnd - fogStart);
void main() {
if (gl_FragCoord.z >= fogEnd)
discard;
//color = vec4(tex_coord.x,tex_coord.y,0.0f,1.0f) * tex_color; // This Line Does What Its Supposed To
color = texture(texAtlas,tex_coord) * tex_color; // This One Does Not
if (gl_FragCoord.z >= fogStart)
color = mix(color,fogColor,(gl_FragCoord.z - fogStart) * fogMult);
}
If i use this line color = vec4(tex_coord.x,tex_coord.y,0.0f,1.0f) * tex_color;
Instead of this line color = texture(texAtlas,tex_coord) * tex_color;
To show the coord from witch it would be getting its color from the texture, the result is what you would expect: (Currenlty only testing it with the top faces)
Image Link Cause I Cant Do Images But Please Click
That Proves That The Vertex Shader Is Working Corretly
(The sampler2D is obtained from a uniform at the fragment shader)
Main Loop Rendering Code
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glBindTexture(GL_TEXTURE_2D, textures.textureId);
glUseProgram(worldShaderProgram);
wm.render();
// wm.render() calls lots of meshes to render themselves
// just wanted to point out each one of them has their own
// vertex arry buffer, vertex buffer, and index buffer
// to render i bind the vertex array buffer with glBindVertexArray(vertexArrayBuffer);
// then i call glDrawElements();
Also here is the OpenGL Initialization Code
if (!glfwInit()) // Initialize the library
return -1;
window = glfwCreateWindow(wndSize.width, wndSize.height, "Minecraft", NULL, NULL);
if (!window)
{
glfwTerminate();
return -1;
}
glfwMakeContextCurrent(window); // Make the window's context current
glfwSetWindowSizeCallback(window,resiseEvent);
glfwSwapInterval(1);
if (glewInit() != GLEW_OK)
return -1;
glClearColor(fogColor.r, fogColor.g, fogColor.b, fogColor.a);
glClearDepth(1.0f);
glEnable(GL_DEPTH_TEST); // Enable depth testing for z-culling
glEnable(GL_CULL_FACE); // Orientation Culling
glDepthFunc(GL_LEQUAL); // Set the type of depth-test (<=)
glShadeModel(GL_SMOOTH); // Enable smooth shading
glHint(GL_PERSPECTIVE_CORRECTION_HINT, GL_NICEST); // Nice perspective corrections
glLineWidth(2.0f);
You wrongly set the texture object to the texture sampler uniform. This is wrong:
glUniform1i(glGetUniformLocation(worldShaderProgram, "texAtlas"), testTexture);
The binding point between the texture object and the texture sampler uniform is the texture unit. When glBindTexture is invoked, then the texture object is bound to the specified target and the current texture unit. The texture unit can be chosen by glActivTexture. The default texture unit is GL_TEXTURE0.
Since your texture is bound to texture unit 0 (GL_TEXTURE0), you have set the value 0 to the texture sampler uniform:
glUniform1i(glGetUniformLocation(worldShaderProgram, "texAtlas"), 0);
Note that your code worked before by chance. You just had 1 texture object or testTexture was the first texture name created. Hence the value of testTexture was 0. Now the value of testTexture is no longer 0, causing your code to fail.

Can't load multiple texture on OpenGL

I'm trying to load multiple textures in openGL.
To validate this I want to load 2 textures and mix them with the following fragment shader:
#version 330 core
out vec4 color;
in vec2 v_TexCoord;
uniform sampler2D u_Texture0;
uniform sampler2D u_Texture1;
void main()
{
color = mix(texture(u_Texture0, v_TexCoord), texture(u_Texture1, v_TexCoord), 0.5);
}
I'have abstract couple of OpenGL's functionality into classes like Shader, Texture UniformXX etc..
Here's an attempt to load the 2 textures into the sampler units of the fragment:
Shader shader;
shader.Attach(GL_VERTEX_SHADER, "res/shaders/vs1.shader");
shader.Attach(GL_FRAGMENT_SHADER, "res/shaders/fs1.shader");
shader.Link();
shader.Bind();
Texture texture0("res/textures/container.jpg", GL_RGB, GL_RGB);
texture0.Bind(0);
Uniform1i textureUnit0Uniform("u_Texture0");
textureUnit0Uniform.SetValues({ 0 });
shader.SetUniform(textureUnit0Uniform);
Texture texture1("res/textures/awesomeface.png", GL_RGBA, GL_RGBA);
texture1.Bind(1);
Uniform1i textureUnit1Uniform("u_Texture1");
textureUnit1Uniform.SetValues({ 1 });
shader.SetUniform(textureUnit1Uniform);
Here's what the Texture implementation looks like:
#include "Texture.h"
#include "Renderer.h"
#include "stb_image/stb_image.h"
Texture::Texture(const std::string& path, unsigned int destinationFormat, unsigned int sourceFormat)
: m_Path(path)
{
stbi_set_flip_vertically_on_load(1);
m_Buffer = stbi_load(path.c_str(), &m_Width, &m_Height, &m_BPP, 0);
GLCALL(glGenTextures(1, &m_RendererID));
GLCALL(glBindTexture(GL_TEXTURE_2D, m_RendererID));
GLCALL(glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST));
GLCALL(glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR));
GLCALL(glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT));
GLCALL(glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT));
GLCALL(glTexImage2D(GL_TEXTURE_2D, 0, destinationFormat, m_Width, m_Height, 0, sourceFormat, GL_UNSIGNED_BYTE, m_Buffer));
glGenerateMipmap(GL_TEXTURE_2D);
GLCALL(glBindTexture(GL_TEXTURE_2D, 0));
if (m_Buffer)
stbi_image_free(m_Buffer);
}
Texture::~Texture()
{
GLCALL(glDeleteTextures(1, &m_RendererID));
}
void Texture::Bind(unsigned int unit) const
{
GLCALL(glActiveTexture(GL_TEXTURE0 + unit));
GLCALL(glBindTexture(GL_TEXTURE_2D, m_RendererID));
}
void Texture::Unbind() const
{
GLCALL(glBindTexture(GL_TEXTURE_2D, 0));
}
Now instead of actually getting an even mix of color from both textures I only get the second texture appearing and blending with the background:
I've pinpointed the problem to the constructor of the Texture implementation, if I comment out the initialization of the second texture such as that its constructor is never being called then I can get the first texture to show up.
Can anyone suggest what I'm doing wrong?
Took me a while to spot, but at the point where you call the constructor of the second texture, your active texture unit is still 0, so the constructor happily repoints your texture unit and you are left with two texture units bound to the same texture.
The solution should be simple enough: do not interleave texture creation and texture unit assignment, by creating the textures first and only then binding them explicitly.
Better yet, look into using direct state access to avoid all this binding.
To highlight the problem for future viewers of this question, this is the problematic sequence of calls:
// constructor of texture 1
glGenTextures(1, &container)
glBindTexture(GL_TEXTURE_2D, container) // Texture Unit 0 is now bound to container
// explicit texture0.Bind call
glActiveTexture(GL_TEXTURE0) // noop
glBindTexture(GL_TEXTURE_2D, container) // Texture Unit 0 is now bound to container
// constructor of texture 2
glGenTextures(1, &awesomeface)
glBindTexture(GL_TEXTURE_2D, awesomeface) // Texture Unit 0 is now bound to awesomeface instead of container.
// explicit texture1.Bind call
glActiveTexture(GL_TEXTURE1)
glBindTexture(GL_TEXTURE_2D, awesomeface) // Texture Unit 0 and 1 are now bound to awesomeface.

OpenGL and QtQuick Texture Problems

I'm developing a simple QQuickItem implementation in C++ based on the "openglunderqml" example that came with Qt. I made some modifications to use different shaders and two textures that I load in. The idea is that the shaders will crossfade between the two textures (which are essentially just images I have loaded into the textures).
When I put this QQuickItem alone inside a QML file and run it, everything works fine. The images crossfade between each other (I've setup a property animation to keep them crossfading) and everything appears fine. However if I put other elements such as text, the text doesn't render properly -- just little oddly shaped blocks. If I put an image in, things get really weird. Instead of the QQuickItem rendering the the box that its supposed to render in, it renders full screen and upside down. As far as I can tell the other image is never loaded.
I think I must be not doing something that I should be, but I've no idea what. Note that the first code block contains the shaders and rendering stuff, the second contains the function loadNewTexture() which loads a new image into a texture (only called once per texture -- not every rendering) and the third contains the QtQuick .qml file.
Heres the opengl code (within the QQuckItem::Paint method):
// Builds the OpenGL shaders that handle the crossfade
if (!m_program) {
m_program = new QOpenGLShaderProgram();
// Shader loads coordinate positions
m_program->addShaderFromSourceCode(QOpenGLShader::Vertex,
"attribute vec2 position;"
"varying vec2 texcoord;"
"void main() {"
" gl_Position = vec4(position, 0.0, 1.0);"
" texcoord = position * vec2(0.5) + vec2(0.5);"
"}");
// Shader does the crossfade
m_program->addShaderFromSourceCode(QOpenGLShader::Fragment,
"uniform lowp float xfade;"
"uniform sampler2D textures[2];"
"varying vec2 texcoord;"
"void main() {"
" gl_FragColor = mix("
" texture2D(textures[0], texcoord),"
" texture2D(textures[1], texcoord),"
" xfade"
" );"
"}");
m_program->bindAttributeLocation("vertices", 0);
m_program->link();
connect(window()->openglContext(), SIGNAL(aboutToBeDestroyed()),
this, SLOT(cleanup()), Qt::DirectConnection);
}
m_program->bind();
// Loads corner vertices as triangle strip
m_program->enableAttributeArray(0);
float values[] = {
-1, -1,
1, -1,
-1, 1,
1, 1
};
m_program->setAttributeArray(0, GL_FLOAT, values, 2);
// Loads the fade value
m_program->setUniformValue("xfade", (float) m_thread_xfade);
glEnable(GL_TEXTURE_2D);
// Check if a new texture needs to be loaded
if (!new_source_loaded && !m_adSource.isEmpty())
new_source_loaded = loadNewTexture(m_adSource);
// Loads texture 0 into the shader
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, textures[0]);
m_program->setUniformValue("textures[0]", 0);
// Loads texture 1 into the shader
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_2D, textures[1]);
m_program->setUniformValue("textures[1]", 1);
// Sets the OpenGL render area to the space given to this components
glViewport((GLint) this->x(), (GLint) this->y(), (GLint) this->width(), (GLint) this->height());
// Sets some parameters
glDisable(GL_DEPTH_TEST);
// Sets the clear color (backround color) to black
glClearColor(0, 0, 0, 1);
glClear(GL_COLOR_BUFFER_BIT);
// Draws triangle strip
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE);
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
glDisable(GL_BLEND);
glDisable(GL_TEXTURE_2D);
// Cleans up vertices
m_program->disableAttributeArray(0);
m_program->release();
The loadNewTexture() function:
bool AdRotator::loadNewTexture(QUrl source) {
// Load the image from source url
QImage image(source.path());
// Check that the image was loaded properly
if (image.isNull()) {
qDebug() << QString("AdRotator::loadTexture: Loading image from source: ") << source.toString() << QString(" failed.");
return false;
}
// Update this as the active texture
active_texture = !active_texture;
// Convert into GL-friendly format
QImage GL_formatted_image = QGLWidget::convertToGLFormat(image);
// Check that the image was converted properly
if (image.isNull()) {
qDebug() << QString("AdRotator::loadTexture: Converting image from source: ") << source.toString() << QString(" failed.");
return false;
}
// Generate the texture base
glGenTextures(1, &textures[active_texture]);
glBindTexture(GL_TEXTURE_2D, textures[active_texture]);
// Give texture parameters (scaling and edging options)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
// Load pixels from image into texture
glTexImage2D(
GL_TEXTURE_2D, 0, /* target, level of detail */
GL_RGBA, /* internal format */
GL_formatted_image.width(), GL_formatted_image.height(), 0, /* width, height, border */
GL_RGBA, GL_UNSIGNED_BYTE, /* external format, type */
GL_formatted_image.bits() /* pixels */
);
if (textures[active_texture] == 0) qDebug() << QString("New Texture post-load failed.");
return true;
}
The .qml file:
import QtQuick 2.0
Item {
width: 1920
height: 1080
/* Image{} element breaks things
Image {
id: image1
x: 0
y: 0
anchors.rightMargin: 0
anchors.bottomMargin: 0
anchors.leftMargin: 0
anchors.topMargin: 0
sourceSize.height: 1080
sourceSize.width: 1920
anchors.fill: parent
fillMode: Image.PreserveAspectCrop
source: "images/background.png"
}*/
/* The QQuickItem */
ImageCrossfader {
x: 753
y: 107
width: 1150
height: 865
}
}
I recently did pretty much the same exercise and (not having actually ran your code and only having dealt with one texture) I think I might have an idea what you missed: You have to make sure your OpenGL state machine is left at the end of your paint function (more or less) exactly as you found it at the beginning. You did release the shader program and disabled the array attribute but did not unbind the two textures in your two texture units. The following at the end of your paint member function should do the trick:
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_2D, 0);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, 0);
Other than that, two further comments:
Please note that your image1 Image from the QML file would hide your ImageCrossfader item completely (if I am not mistaken interpreting the anchors property). Everything you add to your QML scene will get painted over your opengl underlay (hence the name ;)).
You can safely remove all glEnable(), glDisable() and glBlendFunc() calls. Actually removing them should make your code safer, because the less you change, the less changes you have to remember to revert.

OpenGL renders texture all white

I'm attempting to render a .png image as a texture. However, all that is being rendered is a white square.
I give my texture a unique int ID called texID, read the pixeldata into a buffer 'image' (declared in the .h file). I load my pixelbuffer, do all of my OpenGL stuff and bind that pixelbuffer to a texture for OpenGL. I then draw it all using glDrawElements.
Also I initialize the texture with a size of 32x32 when its contructor is called, therefore i doubt it is related to a power of two size issue.
Can anybody see any mistakes in my OpenGL GL_TEXTURE_2D setup that might give me a block white square.
#include "Texture.h"
Texture::Texture(int width, int height, string filename)
{
const char* fnPtr = filename.c_str(); //our image loader accepts a ptr to a char, not a string
printf(fnPtr);
w = width; //give our texture a width and height, the reason that we need to pass in the width and height values manually
h = height;//UPDATE, these MUST be P.O.T.
unsigned error = lodepng::decode(image,w,h,fnPtr);//lodepng's decode function will load the pixel data into image vector
//display any errors with the texture
if(error)
{
cout << "\ndecoder error " << error << ": " << lodepng_error_text(error) <<endl;
}
for(int i = 0; i<image.size(); i++)
{
printf("%i,", image.at(i));
}
printf("\nImage size is %i", image.size());
//image now contains our pixeldata. All ready for OpenGL to do its thing
//let's get this texture up in the video memory
texGLInit();
}
void Texture::texGLInit()
{
//WHERE YOU LEFT OFF: glGenTextures isn't assigning an ID to textures. it stays at zero the whole time
//i believe this is why it's been rendering white
glGenTextures(1, &textures);
printf("\ntexture = %u", textures);
glBindTexture(GL_TEXTURE_2D, textures);//evrything we're about to do is about this texture
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
//glTexParameteri (GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
//glTexParameteri (GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
//glTexParameteri (GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri (GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_REPLACE);
//glDisable(GL_COLOR_MATERIAL);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8,w,h,0, GL_RGBA, GL_UNSIGNED_BYTE, &image);
//we COULD free the image vectors memory right about now.
}
void Texture::draw(point centerPoint, point dimensions)
{
glEnable(GL_TEXTURE_2D);
printf("\nDrawing block at (%f, %f)",centerPoint.x, centerPoint.y);
glBindTexture(GL_TEXTURE_2D, textures);//bind the texture
//create a quick vertex array for the primitive we're going to bind the texture to
printf("TexID = %u",textures);
GLfloat vArray[8] =
{
centerPoint.x-(dimensions.x/2), centerPoint.y-(dimensions.y/2),//bottom left i0
centerPoint.x-(dimensions.x/2), centerPoint.y+(dimensions.y/2),//top left i1
centerPoint.x+(dimensions.x/2), centerPoint.y+(dimensions.y/2),//top right i2
centerPoint.x+(dimensions.x/2), centerPoint.y-(dimensions.y/2)//bottom right i3
};
//create a quick texture array (we COULD create this on the heap rather than creating/destoying every cycle)
GLfloat tArray[8] =
{
0.0f,0.0f, //0
0.0f,1.0f, //1
1.0f,1.0f, //2
1.0f,0.0f //3
};
//and finally.. the index array...remember, we draw in triangles....(and we'll go CW)
GLubyte iArray[6] =
{
0,1,2,
0,2,3
};
//Activate arrays
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
//Give openGL a pointer to our vArray and tArray
glVertexPointer(2, GL_FLOAT, 0, &vArray[0]);
glTexCoordPointer(2, GL_FLOAT, 0, &tArray[0]);
//Draw it all
glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_BYTE, &iArray[0]);
//glDrawArrays(GL_TRIANGLES,0,6);
//Disable the vertex arrays
glDisableClientState(GL_VERTEX_ARRAY);
glDisableClientState(GL_TEXTURE_COORD_ARRAY);
glDisable(GL_TEXTURE_2D);
//done!
/*glBegin(GL_QUADS);
glTexCoord2f(0.0f,0.0f);
glVertex2f(centerPoint.x-(dimensions.x/2), centerPoint.y-(dimensions.y/2));
glTexCoord2f(0.0f,1.0f);
glVertex2f(centerPoint.x-(dimensions.x/2), centerPoint.y+(dimensions.y/2));
glTexCoord2f(1.0f,1.0f);
glVertex2f(centerPoint.x+(dimensions.x/2), centerPoint.y+(dimensions.y/2));
glTexCoord2f(1.0f,0.0f);
glVertex2f(centerPoint.x+(dimensions.x/2), centerPoint.y-(dimensions.y/2));
glEnd();*/
}
Texture::Texture(void)
{
}
Texture::~Texture(void)
{
}
I'll also include the main class' init, where I do a bit more OGL setup before this.
void init(void)
{
printf("\n......Hello Guy. \n....\nInitilising");
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluOrtho2D(0,XSize,0,YSize);
glEnable(GL_TEXTURE_2D);
myBlock = new Block(0,0,offset);
glClearColor(0,0.4,0.7,1);
glLineWidth(2); // Width of the drawing line
glMatrixMode(GL_MODELVIEW);
glDisable(GL_DEPTH_TEST);
printf("\nInitialisation Complete");
}
Update: adding in the main function where I first setup my OpenGL window.
int main(int argc, char** argv)
{
glutInit(&argc, argv); // GLUT Initialization
glutInitDisplayMode(GLUT_RGBA|GLUT_DOUBLE); // Initializing the Display mode
glutInitWindowSize(800,600); // Define the window size
glutCreateWindow("Gem Miners"); // Create the window, with caption.
printf("\n========== McLeanTech Systems =========\nBecoming Sentient\n...\n...\n....\nKILL\nHUMAN\nRACE \n");
init(); // All OpenGL initialization
//-- Callback functions ---------------------
glutDisplayFunc(display);
glutKeyboardFunc(mykey);
glutSpecialFunc(processSpecialKeys);
glutSpecialUpFunc(processSpecialUpKeys);
//glutMouseFunc(mymouse);
glutMainLoop(); // Loop waiting for event
}
Here's the usual checklist for whenever textures come out white:
OpenGL context created and being bound to current thread when attemting to load texture?
Allocated texture ID using glGenTextures?
Are the parameters format and internal format to glTex[Sub]Image… valid OpenGL tokens allowed as input for this function?
Is mipmapping being used?
YES: Supply all mipmap layers – optimally set glTexParameteri GL_TEXTURE_BASE_LEVEL and GL_TEXTURE_MAX_LEVEL, as well as GL_TEXTURE_MIN_LOD and GL_TEXTURE_MAX_LOG.
NO: Turn off mipmap filtering by setting glTexParameteri GL_TEXTURE_MIN_FILTER to GL_NEAREST or GL_LINEAR.

Problems with GL_LUMINANCE and ATI

I'm trying to use luminance textures on my ATI graphics card.
The problem: I'm not being able to correctly retrieve data from my GPU. Whenever I try to read it (using glReadPixels), all it gives me is an 'all-ones' array (1.0, 1.0, 1.0...).
You can test it with this code:
#include <stdio.h>
#include <stdlib.h>
#include <GL/glew.h>
#include <GL/glut.h>
static int arraySize = 64;
static int textureSize = 8;
//static GLenum textureTarget = GL_TEXTURE_2D;
//static GLenum textureFormat = GL_RGBA;
//static GLenum textureInternalFormat = GL_RGBA_FLOAT32_ATI;
static GLenum textureTarget = GL_TEXTURE_RECTANGLE_ARB;
static GLenum textureFormat = GL_LUMINANCE;
static GLenum textureInternalFormat = GL_LUMINANCE_FLOAT32_ATI;
int main(int argc, char** argv)
{
// create test data and fill arbitrarily
float* data = new float[arraySize];
float* result = new float[arraySize];
for (int i = 0; i < arraySize; i++)
{
data[i] = i + 1.0;
}
// set up glut to get valid GL context and
// get extension entry points
glutInit (&argc, argv);
glutCreateWindow("TEST1");
glewInit();
// viewport transform for 1:1 pixel=texel=data mapping
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluOrtho2D(0.0, textureSize, 0.0, textureSize);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glViewport(0, 0, textureSize, textureSize);
// create FBO and bind it (that is, use offscreen render target)
GLuint fboId;
glGenFramebuffersEXT(1, &fboId);
glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, fboId);
// create texture
GLuint textureId;
glGenTextures (1, &textureId);
glBindTexture(textureTarget, textureId);
// set texture parameters
glTexParameteri(textureTarget, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(textureTarget, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(textureTarget, GL_TEXTURE_WRAP_S, GL_CLAMP);
glTexParameteri(textureTarget, GL_TEXTURE_WRAP_T, GL_CLAMP);
// define texture with floating point format
glTexImage2D(textureTarget, 0, textureInternalFormat, textureSize, textureSize, 0, textureFormat, GL_FLOAT, 0);
// attach texture
glFramebufferTexture2DEXT(GL_FRAMEBUFFER_EXT, GL_COLOR_ATTACHMENT0_EXT, textureTarget, textureId, 0);
// transfer data to texture
//glDrawBuffer(GL_COLOR_ATTACHMENT0_EXT);
//glRasterPos2i(0, 0);
//glDrawPixels(textureSize, textureSize, textureFormat, GL_FLOAT, data);
glBindTexture(textureTarget, textureId);
glTexSubImage2D(textureTarget, 0, 0, 0, textureSize, textureSize, textureFormat, GL_FLOAT, data);
// and read back
glReadBuffer(GL_COLOR_ATTACHMENT0_EXT);
glReadPixels(0, 0, textureSize, textureSize, textureFormat, GL_FLOAT, result);
// print out results
printf("**********************\n");
printf("Data before roundtrip:\n");
printf("**********************\n");
for (int i = 0; i < arraySize; i++)
{
printf("%f, ", data[i]);
}
printf("\n\n\n");
printf("**********************\n");
printf("Data after roundtrip:\n");
printf("**********************\n");
for (int i = 0; i < arraySize; i++)
{
printf("%f, ", result[i]);
}
printf("\n");
// clean up
delete[] data;
delete[] result;
glDeleteFramebuffersEXT (1, &fboId);
glDeleteTextures (1, &textureId);
system("pause");
return 0;
}
I also read somewhere on the internet that ATI cards don't support luminance yet. Does anyone know if this is true?
This has nothing to do with luminance values; the problem is with you reading floating point values.
In order to read floating-point data back properly via glReadPixels, you first need to set the color clamping mode. Since you're obviously not using OpenGL 3.0+, you should be looking at the ARB_color_buffer_float extension. In that extension is glClampColorARB, which works pretty much like the core 3.0 verison.
here's what I found out:
1) If you use GL_LUMINANCE as texture format (and GL_LUMINANCE_FLOAT32_ATI GL_LUMINANCE32F_ARB or GL_RGBA_FLOAT32_ATI as internal format), the glClampColor(..) (or glClampColorARB(..)) doesn't seem to work at all.
I was only able to see the values getting actively clamped/not clamped if I set the texture format to GL_RGBA. I don't understand why this happens, since the only glClampColor(..) limitation I heard of is that it works exclusively with floating-point buffers, which all chosen internal formats seems to be.
2) If you use GL_LUMINANCE (again, with GL_LUMINANCE_FLOAT32_ATI, GL_LUMINANCE32F_ARB or GL_RGBA_FLOAT32_ATI as internal format), it looks like you must "correct" your output buffer dividing each of its elements by 3. I guess this happens because when you use glTexImage2D(..) with GL_LUMINANCE it internally replicates each array component three times and when you read GL_LUMINANCE values with glReadPixel(..) it calculates its values from the sum of the RGB components (thus, three times what you have given as input). But again, it stills give you clamped values.
3) Finally, if you use GL_RED as texture format (instead of GL_LUMINANCE), you don't need to pack your input buffer and you get your output buffer properly. The values are not clamped and you don't need to call glClampColor(..) at all.
So, I guess I'll stick with GL_RED, because in the end what I wanted was an easy way to send and collect floating-point values from my "kernels" without having to worry about offsetting array indexes or anything like this.