I'm using opengl ES 2.0
I'm using a framebuffer linked to a texture to compile an offscreen render (of some simplistic metaballs), and then I'm rendering that texture to the main back buffer.
Everything is looking great except that the texture appears clipped, ie. it is not the full window dimensions (short of about 128 pixels on one axis). Here's a screenshot: http://tinypic.com/r/9telwg/7
Any ideas what could cause this? I read here to set glViewport to the size of the texture, but that gives me a different aspect ratio since the texture metaballsTexture is square (1024x1024) and my window is 768x1024. It also still remains a bit clipped, as it seems I can't get the frame buffer to be big enough, even though the texture is bigger than my window size. Below is my code. I call PrepareToAddMetaballs() during the render when I'm ready, then successive calls to AddMetaball, now rendered onto my offscreeen FBO, then FinishedAddingMetaballs when I'm done, and later call Render() to display the offscreen texture linked to the FBO onto the main backbuffer.
#include "Metaballs.h"
#include "s3e.h"
#include "IwGL.h"
#include "Render.h"
#include "vsml.h"
#include <vector>
#include <string>
#include <iostream>
#include "1013Maths.h"
#define GL_RGBA8 0x8058
MetaBalls::MetaBalls() : metaballsTexture(NULL), metaballsShader(NULL) {
glGenFramebuffers(1, &myFBO);
metaballTexture[0] = NULL;
metaballTexture[1] = NULL;
metaballTexture[2] = NULL;
CRender::Instance()->CreateTexture("WaterCanvas.png", &metaballsTexture);
CRender::Instance()->CreateTexture("metaball.pvr", &metaballTexture[0]);
CRender::Instance()->CreateTexture("metaball-1.png", &metaballTexture[1]);
CRender::Instance()->CreateTexture("metaball-2.png", &metaballTexture[2]);
CRender::Instance()->CreateShader("Shaders/metaballs.fs", "Shaders/metaballs.vs", &metaballsShader);
glBindFramebuffer(GL_FRAMEBUFFER, myFBO);
// Attach texture to frame buffer
glBindTexture(GL_TEXTURE_2D, metaballsTexture->m_id);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, metaballsTexture->m_id, 0);
glClearColor(1,1,1,0);
glClear(GL_COLOR_BUFFER_BIT);
if (glCheckFramebufferStatus(GL_FRAMEBUFFER) != GL_FRAMEBUFFER_COMPLETE) {
std::string error = "Metaballs framebuffer incomplete";
std::cerr << error << std::endl;
throw error;
}
float w = PTM_DOWNSCALE(float(metaballsTexture->GetWidth()));
float h = PTM_DOWNSCALE(float(metaballsTexture->GetHeight()));
CRender::Instance()->BuildQuad(
tVertex( b2Vec3(0,0,0), b2Vec2(0,1) ),
tVertex( b2Vec3(w,0,0), b2Vec2(1,1) ),
tVertex( b2Vec3(w,h,0), b2Vec2(1,0) ),
tVertex( b2Vec3(0,h,0), b2Vec2(0,0) ),
buffer);
}
MetaBalls::~MetaBalls() {
CRender::Instance()->ReleaseShader(metaballsShader);
CRender::Instance()->ReleaseTexture(metaballsTexture);
CRender::Instance()->ReleaseTexture(metaballTexture[0]);
CRender::Instance()->ReleaseTexture(metaballTexture[1]);
CRender::Instance()->ReleaseTexture(metaballTexture[2]);
glDeleteFramebuffers(1, &myFBO);
}
void MetaBalls::PrepareToAddMetaballs(b2Vec3& paintColour) {
// bind render to texture
glBindFramebuffer(GL_FRAMEBUFFER, myFBO);
// Set our viewport so our texture isn't clipped (appears stretched and clipped)
// glViewport(0, 0, metaballsTexture->GetWidth(), metaballsTexture->GetHeight());
glClearColor(paintColour.x, paintColour.y, paintColour.z, 0.0f);
glClear(GL_COLOR_BUFFER_BIT);
}
void MetaBalls::FinishedAddingMetaballs() {
glBindFramebuffer(GL_FRAMEBUFFER, NULL);
// CRender::Instance()->SetWindowViewport();
}
void MetaBalls::AddMetaball(float x, float y, uint size) {
// render the metaball texture to larger texture
VSML::setIdentityMatrix(pTransform);
pTransform[12] = PTM_DOWNSCALE(x);
pTransform[13] = PTM_DOWNSCALE(y+4); // the +4 is for a bit of overlap with land
float oldview[16];
float identity[16];
VSML::setIdentityMatrix(identity);
memcpy(oldview, CRender::Instance()->GetViewMatrix(), sizeof(float)*16);
memcpy(CRender::Instance()->GetViewMatrix(),identity, sizeof(float)*16);
CRender::Instance()->DrawSprite(metaballTexture[size], pTransform, 1.0f, true);
memcpy(CRender::Instance()->GetViewMatrix(),oldview, sizeof(float)*16);
}
void MetaBalls::Render() {
VSML::setIdentityMatrix(pTransform);
pTransform[12] = PTM_DOWNSCALE(-128);
pTransform[13] = PTM_DOWNSCALE(-256);
// render our metaballs texture using alpha test shader
CRender::Instance()->BindShader(metaballsShader);
CRender::Instance()->BindTexture(0, metaballsTexture);
CRender::Instance()->SetMatrix(metaballsShader, "view", CRender::Instance()->GetViewMatrix());
CRender::Instance()->SetMatrix(metaballsShader, "world", pTransform);
CRender::Instance()->SetMatrix(metaballsShader, "proj", CRender::Instance()->GetProjMatrix());
CRender::Instance()->SetBlending(true);
CRender::Instance()->DrawPrimitives(buffer);
CRender::Instance()->SetBlending(false);
}
====================
EDIT
Aha! Got it. I haven't found this example anywhere, but I fixed it by adjusting the perspective matrix. It was set to 1024x768 when it was working, but with a window size of 768x1024, the projection matrix was changing, as well as the viewport. By setting each to 1024x768 manually (I chose to use constants), the metaballs are rendered correctly offscreen with proper aspect ratio. Their 1024x1024 texture is rendered as a billboard with that aspect ratio nice and sharp. After I'm done I restore them to what the rest of the application uses. Below is the working code:
#include "Metaballs.h"
#include "s3e.h"
#include "IwGL.h"
#include "Render.h"
#include "vsml.h"
#include <vector>
#include <string>
#include <iostream>
#include "1013Maths.h"
MetaBalls::MetaBalls() : metaballsTexture(NULL), metaballsShader(NULL) {
glGenFramebuffers(1, &myFBO);
metaballTexture[0] = NULL;
metaballTexture[1] = NULL;
metaballTexture[2] = NULL;
CRender::Instance()->CreateTexture("WaterCanvas.png", &metaballsTexture);
CRender::Instance()->CreateTexture("metaball.pvr", &metaballTexture[0]);
CRender::Instance()->CreateTexture("metaball-1.png", &metaballTexture[1]);
CRender::Instance()->CreateTexture("metaball-2.png", &metaballTexture[2]);
CRender::Instance()->CreateShader("Shaders/metaballs.fs", "Shaders/metaballs.vs", &metaballsShader);
glBindFramebuffer(GL_FRAMEBUFFER, myFBO);
// Attach texture to frame buffer
glBindTexture(GL_TEXTURE_2D, metaballsTexture->m_id);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, metaballsTexture->m_id, 0);
if (glCheckFramebufferStatus(GL_FRAMEBUFFER) != GL_FRAMEBUFFER_COMPLETE) {
std::string error = "Metaballs framebuffer incomplete";
std::cerr << error << std::endl;
throw error;
}
float w = PTM_DOWNSCALE(float(metaballsTexture->m_width));
float h = PTM_DOWNSCALE(float(metaballsTexture->m_height));
CRender::Instance()->BuildQuad(
tVertex( b2Vec3(0,0,0), b2Vec2(0,1) ),
tVertex( b2Vec3(w,0,0), b2Vec2(1,1) ),
tVertex( b2Vec3(w,h,0), b2Vec2(1,0) ),
tVertex( b2Vec3(0,h,0), b2Vec2(0,0) ),
buffer);
// return to default state
glBindFramebuffer(GL_FRAMEBUFFER, 0);
}
MetaBalls::~MetaBalls() {
CRender::Instance()->ReleaseShader(metaballsShader);
CRender::Instance()->ReleaseTexture(metaballsTexture);
CRender::Instance()->ReleaseTexture(metaballTexture[0]);
CRender::Instance()->ReleaseTexture(metaballTexture[1]);
CRender::Instance()->ReleaseTexture(metaballTexture[2]);
glDeleteFramebuffers(1, &myFBO);
}
void MetaBalls::PrepareToAddMetaballs(b2Vec3& paintColour) {
// bind render to texture
glBindFramebuffer(GL_FRAMEBUFFER, myFBO);
// Set orthographic projection
cfloat w = SCREEN_WIDTH / PTM_RATIO;
cfloat h = SCREEN_HEIGHT / PTM_RATIO;
VSML::ortho(-w, 0, -h, 0, 0.0f, -1.0f, CRender::Instance()->m_Proj);
// Set our viewport so our texture isn't clipped
glViewport(0, 0, SCREEN_WIDTH, SCREEN_HEIGHT);
glClearColor(paintColour.x, paintColour.y, paintColour.z, 0.1f);
glClear(GL_COLOR_BUFFER_BIT);
}
void MetaBalls::FinishedAddingMetaballs() {
glBindFramebuffer(GL_FRAMEBUFFER, NULL);
CRender::Instance()->SetWindowViewport();
}
void MetaBalls::AddMetaball(float x, float y, uint size) {
// render the metaball texture to larger texture
VSML::setIdentityMatrix(pTransform);
pTransform[12] = PTM_DOWNSCALE(x);
pTransform[13] = PTM_DOWNSCALE(y);
float oldview[16];
float identity[16];
VSML::setIdentityMatrix(identity);
memcpy(oldview, CRender::Instance()->GetViewMatrix(), sizeof(float)*16);
memcpy(CRender::Instance()->GetViewMatrix(),identity, sizeof(float)*16);
CRender::Instance()->DrawSprite(metaballTexture[size], pTransform, 1.0f, true);
memcpy(CRender::Instance()->GetViewMatrix(),oldview, sizeof(float)*16);
}
void MetaBalls::Render() {
VSML::setIdentityMatrix(pTransform);
pTransform[12] = PTM_DOWNSCALE(0);
pTransform[13] = PTM_DOWNSCALE(-256);
// render our metaballs texture using alpha test shader
CRender::Instance()->BindShader(metaballsShader);
CRender::Instance()->BindTexture(0, metaballsTexture);
CRender::Instance()->SetMatrix(metaballsShader, "view", CRender::Instance()->GetViewMatrix());
CRender::Instance()->SetMatrix(metaballsShader, "world", pTransform);
CRender::Instance()->SetMatrix(metaballsShader, "proj", CRender::Instance()->GetProjMatrix());
CRender::Instance()->SetBlending(true);
CRender::Instance()->DrawPrimitives(buffer);
CRender::Instance()->SetBlending(false);
}
Are you setting your viewport according to the texture's size? I didnt find any view port setting on your code...
Related
I am trying to render some polygons to a texture, and then render the texture to the screen.
I'm not sure how to debug my code since that would require to probe the internal state of OpenGL, so I would appreciate tips on how to debug myself more than pointing out the error I have done.
Anyway, I commented the code I wrote explaining what I expect each line to do.
Here is a description of what the code is supposed to do.
Basically, I made a vertex shader that provides the position, UV and color to the fragment shader. The fragment shader has a uniform to activate texture sampling, otherwise it will just output the input color. In both cases, the color is multiplied by a uniform color. First I create a texture, and I fill it with red and green raw pixel data to test. This texture is correcly rendered to the screen (I see the red and green part correctly as I initialized it). Then i try to do the actual rendering on the texture. I try to render a small blue square in the middle of it (sampler disabled on the fragment shader, color uniform set to blue) but I can't get this blue square to appear on the rendered texture.
#include <GL/glew.h>
#include <GLFW/glfw3.h>
#include "utils.h"
#include <glm/glm.hpp>
#include <glm/gtc/type_ptr.hpp>
#include <glm/gtc/matrix_transform.hpp>
#include <iostream>
using namespace std;
#define numVAOs 1
#define numVBOs 1
GLuint shaderProgram;
GLuint unifUseTexture, unifInTexture, unifTMat, unifDrawColor;
GLuint texture;
GLuint textureFrameBuffer;
GLuint vao[numVAOs];
GLuint vbo[numVBOs];
void drawRectangle() {
}
void init() {
// Compile the shaderProgram
shaderProgram = createShaderProgram("vertex.glsl","fragment.glsl");
// Retrieve the uniform location
unifUseTexture = glGetUniformLocation(shaderProgram,"useTexture");
unifInTexture = glGetUniformLocation(shaderProgram,"inTexture");
unifTMat = glGetUniformLocation(shaderProgram,"tMat");
unifDrawColor = glGetUniformLocation(shaderProgram,"drawColor");
// Create vertex array object and vertex buffer object
glGenVertexArrays(numVAOs,vao);
glBindVertexArray(vao[0]);
float xyzuvrgbaSquare[54] = {
/* C */ 1.0,-1.0,0.0, 1.0,0.0, 1.0,1.0,1.0,1.0,
/* A */ -1.0,1.0,0.0, 0.0,1.0, 1.0,1.0,1.0,1.0,
/* B */ 1.0,1.0,0.0, 1.0,1.0, 1.0,1.0,1.0,1.0,
/* A */ -1.0,1.0,0.0, 0.0,1.0, 1.0,1.0,1.0,1.0,
/* C */ 1.0,-1.0,0.0, 1.0,0.0, 1.0,1.0,1.0,1.0,
/* D */-1.0,-1.0,0.0, 0.0,0.0, 1.0,1.0,1.0,1.0
};
glGenBuffers(numVBOs,vbo);
glBindBuffer(GL_ARRAY_BUFFER,vbo[0]);
glBufferData(GL_ARRAY_BUFFER, 4*54,xyzuvrgbaSquare,GL_STATIC_DRAW);
// Associate vbo with the correct vertex attribute to display the rectangle
glBindBuffer(GL_ARRAY_BUFFER,vbo[0]);
glVertexAttribPointer(0,3,GL_FLOAT,GL_FALSE,36,0); // inPosition
glVertexAttribPointer(1,4,GL_FLOAT,GL_FALSE,36,(void*)20); // inColor
glVertexAttribPointer(2,2,GL_FLOAT,GL_FALSE,36,(void*)12); // inUV
glEnableVertexAttribArray(0); // location=0 in the shader
glEnableVertexAttribArray(1);
glEnableVertexAttribArray(2);
// Generate a small 128x128 texture. I followed the tutorial
// over http://www.opengl-tutorial.org/intermediate-tutorials/tutorial-14-render-to-texture/
// generate a frameBuffer to contain the texture
glGenFramebuffers(1,&textureFrameBuffer);
// Bind it, so when I will generate the texture it will be associated with it
glBindFramebuffer(GL_FRAMEBUFFER, textureFrameBuffer);
glGenTextures(1,&texture);
glBindTexture(GL_TEXTURE_2D,texture);
// Put some raw data inside of it for testing purposes. I will fill it
// half with green, half with red
unsigned char* imageRaw = new unsigned char[4*128*128];
for(int i=0; i<4*128*64; i+=4) {
imageRaw[i] = 255;
imageRaw[i+1] = 0;
imageRaw[i+2] = 0;
imageRaw[i+3] = 255;
imageRaw[4*128*64+i] = 0;
imageRaw[4*128*64+i+1] = 255;
imageRaw[4*128*64+i+2] = 0;
imageRaw[4*128*64+i+3] = 255;
}
glTexImage2D(GL_TEXTURE_2D,0,GL_RGBA,128,128,0,GL_RGBA,GL_UNSIGNED_BYTE,imageRaw);
// Setup some required parameters
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
// Draw a small blue square on the texture
// So, activate the previously compiled shader program and setup the uniforms
glUseProgram(shaderProgram);
// First, create a transform matrix to make the square smaller (20% of texture)
glm::mat4 tMat = glm::scale(glm::mat4(1.0f),glm::vec3(0.2,0.2,0));
glUniformMatrix4fv(unifTMat,1,GL_FALSE,glm::value_ptr(tMat));
// do not use a texture (ignore sampler2D in fragment shader)
glUniform1i(unifUseTexture,0);
// use the color BLUE for the rectangle
glUniform4f(unifDrawColor,0.0,0.0,1.0,1.0);
// Bind the textureFrameBuffer to render on the texture instead of the screen
glBindFramebuffer(GL_FRAMEBUFFER,textureFrameBuffer);
glFramebufferTexture(GL_FRAMEBUFFER,GL_COLOR_ATTACHMENT0,texture,0);
GLenum drawBuffers[1] = {GL_COLOR_ATTACHMENT0};
glDrawBuffers(1, drawBuffers);
GLenum status = glCheckFramebufferStatus(GL_DRAW_FRAMEBUFFER);
if( status != GL_FRAMEBUFFER_COMPLETE ) {
cout << "framebuffer status: " << status << endl;
}
// the vertex framebuffer and vertex attribute pointer have already been
// described, so I'll just do the draw call here
glDrawArrays(GL_TRIANGLES,0,6);
// Display the textore on screen
// Bind the screen framebuffer (0) so the following rendering will occurr on screen
glBindFramebuffer(GL_FRAMEBUFFER,0);
// Put a white background color
glClearColor(1.0,1.0,1.0,1.0);
glClear(GL_COLOR_BUFFER_BIT);
// Change properly the shader uniforms
glUniform4f(unifDrawColor,1.0,1.0,1.0,1.0); // multiply by white, no changes
glUniform1i(unifUseTexture,1); // set useTexture to True
// Create a transform matrix to scale the rectangle so that it uses up only half screen
tMat = glm::scale(glm::mat4(1.0f),glm::vec3(.5,.5,.0));
glUniformMatrix4fv(unifTMat,1,GL_FALSE,glm::value_ptr(tMat));
// Put the sampler2D
glActiveTexture(GL_TEXTURE0); // Work on texture0
// 0 because of (binding = 0) on the fragment shader
glBindTexture(GL_TEXTURE_2D,texture);
glDrawArrays(GL_TRIANGLES,0,6); // 6 vertices
}
int main(int argc, char** argv) {
// Build the window
if (!glfwInit()) exit(EXIT_FAILURE);
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR,4);
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR,3);
GLFWwindow* window = glfwCreateWindow(600,600,"Dashboard",NULL,NULL);
glfwMakeContextCurrent(window);
if(glewInit() != GLEW_OK) exit(EXIT_FAILURE);
glfwSwapInterval(1);
init();
while(!glfwWindowShouldClose(window)) {
//display(window,glfwGetTime());
glfwSwapBuffers(window);
glfwPollEvents();
}
glfwDestroyWindow(window);
glfwTerminate();
exit(EXIT_SUCCESS);
}
edit: I forgot to put the shader code here, though the problem is not within the shader because it does work when used to render the texture to screen.
vertex.glsl:
#version 430
layout (location=0) in vec3 inPosition;
layout (location=1) in vec4 inColor;
layout (location=2) in vec2 inUV;
uniform mat4 tMat;
uniform vec4 drawColor;
out vec4 varyingColor;
out vec2 varyingUV;
void main(void) {
gl_Position = tMat * vec4(inPosition,1.0);
varyingColor = inColor*drawColor;
varyingUV = inUV;
}
fragment.glsl:
#version 430
in vec4 varyingColor;
in vec2 varyingUV;
layout(location = 0) out vec4 color;
layout (binding=0) uniform sampler2D inTexture;
uniform bool useTexture;
void main(void) {
if( useTexture )
color = vec4(texture(inTexture,varyingUV).rgb,1.0) * varyingColor;
else
color = varyingColor;
}
The texture which is attached to the framebuffer, has a different size than the window. Hence you've to adjust the viewport rectangle (glViewport) to the size of the size of the currently bound framebuffer, before drawing the geometry:
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, 128, 128, 0, GL_RGBA, GL_UNSIGNED_BYTE, imageRaw);
// [...]
glBindFramebuffer(GL_FRAMEBUFFER, textureFrameBuffer);
glFramebufferTexture(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, texture,0);
glViewport(0, 0, 128, 128);
// [...]
glDrawArrays(GL_TRIANGLES, 0, 6);
// [...]
glBindFramebuffer(GL_FRAMEBUFFER, 0);
glViewport(0, 0, WIDTH, HEIGHT);
// [...]
glDrawArrays(GL_TRIANGLES, 0, 6);
I'm trying to display text using SDL2 TTF and OpenGL. A weird texture appears in the window, it's got the right size and the right position but you can't see any letters.
I've tried using the SDL_CreateRGBSurface() thinking that it might be a cleaner way to recuperate the pixels, but it didn't work either. My surface is never NULL and always passes the validation test.
I use the get_front() function before the while() loop, and the displayMoney() function inside it, right after using glClear(GL_COLOR_BUFFER_BIT).
SDL, TTF and OpenGL are initialized properly and I have created an OpenGL context. Here's the problematic code:
SDL_Surface* get_font()
{
TTF_Font *font;
font = TTF_OpenFont("lib/ariali.ttf", 35);
if (!font) cout << "problem loading font" << endl;
SDL_Color white = {150,200,200};
SDL_Color black = {0,100,0};
SDL_Surface* text = TTF_RenderText_Shaded(font, "MO", white, black);
if (!text) cout << "text not loaded" << endl;
return text;
}
void displayMoney(SDL_Surface* surface)
{
glEnable( GL_BLEND );
glBlendFunc( GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA );
glEnable(GL_TEXTURE_2D);
GLuint TextureID = 0;
glGenTextures(1, &TextureID);
glBindTexture(GL_TEXTURE_2D, TextureID);
int Mode = GL_RGB;
if(surface->format->BytesPerPixel == 4) {
Mode = GL_RGBA;
}
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexImage2D(GL_TEXTURE_2D, 0, Mode, 128, 64, 0, Mode, GL_UNSIGNED_BYTE, surface->pixels);
glPushMatrix();
glTranslated(100,100,0);
glScalef(100,100,0);
glBegin(GL_QUADS);
glTexCoord2f(0, 1); glVertex2f(-0.5f, -0.5f);
glTexCoord2f(1, 1); glVertex2f(0.5f, -0.5f);
glTexCoord2f(1, 0); glVertex2f(0.5f, 0.5f);
glTexCoord2f(0, 0); glVertex2f(-0.5f, 0.5f);
glEnd();
glPopMatrix();
glBindTexture(GL_TEXTURE_2D, 0);
}
#include <SDL2/SDL.h>
#include <stdlib.h>
#include <stdio.h>
#include <iostream>
using namespace std;
#include <GL/gl.h>
#include <GL/glu.h>
#include <stb_image/stb_image.h>
#include <SDL2_ttf/SDL_ttf.h>
#include "init.h"
int main(int argc, char **argv) {
SDL_Window* window = init();
if (window == nullptr) {
cout << "Error window init" << endl;
}
if (TTF_Init() < 0) {
cout << "Error TTF init" << endl;
}
SDL_Surface* text = get_font();
while (loop) {
glClear(GL_COLOR_BUFFER_BIT);
displayMoney(text);
...
SDL_GL_SwapWindow(window);
There aren't any error messages. Also, instead of using my surface, I tested my code with an image by using the stbi_load function and it worked perfectly well. The issue therefore seems to be with the SDL part.
EDIT : I've recently found out the surface I get from my text has the following properties: Rmask=Gmask=Bmask=Amask = 0. This is obviously a problem but I've no idea how to fix it...
As stated in SDL_ttf documentation at https://www.libsdl.org/projects/SDL_ttf/docs/SDL_ttf.html#SEC42 ,
Shaded: Create an 8-bit palettized surface and render the given text at high quality with the given font and colors. The 0 pixel value is background, while other pixels have varying degrees of the foreground color from the background color.
So your resulting surface is indexed with 8-bit palette, not an RGBA (also indicated by missing colour masks in surface format, as you've noted). RGBA surface with alpha channel is produced by e.g. TTF_RenderText_Blended, or use different texture format, or perform format conversion. You need to pass surface width/height to glTexImage2D instead of 128/64 constants as surface size may vary.
You also have several resource leaks in question's code: creating new texture on each draw and never deleting it (which is also unnecessary if text isn't changing), and never closing font with TTF_CloseFont.
Okay so I am working on a toy 2d engine. I initially used regular SDL_Surfaces for rendering and the built in SDL_Renderer. But I thought why not use OpenGL, get some experience with that.
But I am stuck now. I have a context and things are rendered to the screen, but it looks like the textures I am trying to display are way to big to fit in the screen. Like I only see a couple of pixels, but not really.
The texture class can be found here:
#include "texture.h"
Texture::Texture(std::string path, bool loadNow) {
//Initialize texture ID
mTextureID = 0;
//Initialize texture dimensions
width = 0;
height = 0;
this->path = path;
if(loadNow) {
loadTexture(path);
}
}
Texture::~Texture() {
freeTexture();
}
bool Texture::loadTexture(std::string path) {
//Texture loading success
loaded = false;
SDL_Surface *image = IMG_Load(path.c_str());
//Image loaded successfully
if(image != NULL) {
if((image->w & (image->w - 1)) == 0) {
printf("Warning: image width not power of 2 -> %s\n", path.c_str());
}
if((image->h & (image->h - 1)) == 0) {
printf("Warning: image height not power of 2 -> %s\n", path.c_str());
}
loaded = loadTextureFromPixels32(image, (GLuint)image->w, (GLuint)image->h);
}
//Report error
if(!loaded) {
printf( "Unable to load %s\n", path.c_str() );
}
return loaded;
}
bool Texture::loadTextureFromPixels32(SDL_Surface *image, GLuint width, GLuint height ) {
//Free texture if it exists
freeTexture();
//Get texture dimensions
this->width = width;
this->height = height;
//Generate texture ID
glGenTextures(1, &mTextureID);
//Bind texture ID
glBindTexture(GL_TEXTURE_2D, mTextureID);
//Generate texture
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, image->pixels);
//Set texture parameters
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
//Unbind texture
glBindTexture(GL_TEXTURE_2D, 0);
//Check for error
GLenum error = glGetError();
if(error != GL_NO_ERROR) {
printf("Error loading texture from %p pixels!\n", image->pixels);
return false;
}
return true;
}
void Texture::render(GLfloat x, GLfloat y) {
if(loaded) {
//If the texture exists
if(mTextureID != 0) {
GLfloat realX = x;// - (this->width / 2);
GLfloat realY = y;// - (this->height / 2);
//Remove any previous transformations
glLoadIdentity();
//Move to rendering point
glTranslatef(realX, realY, 0.f);
glClearDepth(1.0f);
//Set texture ID
glBindTexture(GL_TEXTURE_2D, mTextureID);
//Render textured quad
glBegin(GL_QUADS);
glTexCoord2f( 0.f, 0.f ); glVertex2f(0.f, 0.f);
glTexCoord2f( 1.f, 0.f ); glVertex2f(width, 0.f);
glTexCoord2f( 1.f, 1.f ); glVertex2f(width, height);
glTexCoord2f( 0.f, 1.f ); glVertex2f(0.f, height);
glEnd();
}
} else {
// do nothing
}
}
GLuint Texture::getWidth() {
return this->width;
}
GLuint Texture::getHeight() {
return this->height;
}
void Texture::freeTexture() {
//Delete texture
if(mTextureID != 0) {
glDeleteTextures(1, &mTextureID);
mTextureID = 0;
}
width = 0;
height = 0;
}
I am guessing the problem is here, but it could also be in how I initialize OpenGL so here is that:
void Main::initGL() {
/* Request opengl 3.2 context.
* SDL doesn't have the ability to choose which profile at this time of writing,
* but it should default to the core profile */
SDL_GL_SetAttribute(SDL_GL_CONTEXT_MAJOR_VERSION, 3);
SDL_GL_SetAttribute(SDL_GL_CONTEXT_MINOR_VERSION, 2);
/* Turn on double buffering with a 24bit Z buffer.
* You may need to change this to 16 or 32 for your system */
SDL_GL_SetAttribute(SDL_GL_DOUBLEBUFFER, 1);
SDL_GL_SetAttribute(SDL_GL_DEPTH_SIZE, 32);
glContext = SDL_GL_CreateContext(this->window);
glViewport(0.0, 0.0, SCREEN_WIDTH, SCREEN_HEIGHT);
glOrtho( 0.0, SCREEN_WIDTH, SCREEN_HEIGHT, 0.0, 1.0, -1.0 );
SDL_GL_SetSwapInterval(0);
//Initialize clear color
glClearColor( 0.f, 0.f, 0.f, 1.f );
//Enable texturing
glEnable( GL_TEXTURE_2D );
//Check for error
GLenum error = glGetError();
if(error != GL_NO_ERROR) {
printf("Error initializing OpenGL!\n");
}
}
SDL is correctly initialized otherwise there wouldn't be anything on the screen. I am completely new to OpenGL so any help would be appreciated.
You mix ordinate GL_TEXTURE_2D stuff with GL_TEXTURE_RECTANGLE, and enabling both is a very bad idea. You are using texcoords in the range [0,1], so you actually seem to want to use GL_TEXTURE_2D. You should rewrite your texture code to use that, and dropt those rectangle textures entirely.
The next thing is that your projection setup is wrong. Your glOrtho call has no effect since you completely overwrite this by loading the identity matrix a few lines later. You should make yourself familiar with the stae machine approach the GL is using. As your matrices are set up currently, you draw a huge quad with most of it completely ot of the screen.
Now that part is completely strange:
/* Request opengl 3.2 context.
* SDL doesn't have the ability to choose which profile at this time of writing,
* but it should default to the core profile */
SDL_GL_SetAttribute(SDL_GL_CONTEXT_MAJOR_VERSION, 2);
SDL_GL_SetAttribute(SDL_GL_CONTEXT_MINOR_VERSION, 1);
This code is will never create a core profile, because core profiles didn't even exist in GL2.1, they were introduced in GL3.2. It is unclear what SDL version you are using, but modern SDL is capable of selecting the profile.
However, your code is using completely outdated ande deprecated OpenGL, there is no way this will work with a core profile. If you learn OpenGL in this decade, I'd strongly suggest that you forget about all that and start with some documentation/tutorial on modern GL, and actually use a core profiel.
I am using freetype, and the only thing I have left to do in order to render text is convert an ft_bitmap to something that can be rendered with opengl can someone explain how to do this? I am using glfw. With the way I have tried to do it it just gives a blank screen And here is the code that I am using:
#include <exception>
#include <iostream>
#include <string>
#include <glew.h>
#include <GL/glfw.h>
#include <iterator>
#include "../include/TextRenderer.h"
#include <ft2build.h>
#include FT_FREETYPE_H
#include <stdexcept>
#include <freetype/ftglyph.h>
using std::runtime_error;
using std::cout;
TextRenderer::TextRenderer(int x, int y, FT_Face Face, std::string s)
{
FT_Set_Char_Size(
Face, /* handle to face object */
0, /* char_width in 1/64th of points */
16*64, /* char_height in 1/64th of points */
0, /* horizontal device resolution */
0 ); /* vertical device resolution */
slot= Face->glyph;
text = s;
setsx(x);
setsy(y);
penX = x;
penY = y;
face = Face;
//shaders
GLuint v = glCreateShader(GL_VERTEX_SHADER) ;
const char* vs = "void main(){ gl_Position = ftransform();}";
glShaderSource(v,1,&vs,NULL);
glCompileShader(v);
GLuint f = glCreateShader(GL_FRAGMENT_SHADER) ;
const char* fs = "uniform sampler2D texture1; void main() { gl_FragColor = texture2D(texture1, gl_TexCoord[0].st); //And that is all we need}";
glShaderSource(f,1,&fs,NULL);
glCompileShader(f);
Program= glCreateProgram();
glAttachShader(Program,v);
glAttachShader(Program,f);
glLinkProgram(Program);
}
void TextRenderer::render()
{
glUseProgram(Program);
FT_UInt glyph_index;
for ( int n = 0; n < text.size(); n++ )
{
/* retrieve glyph index from character code */
glyph_index = FT_Get_Char_Index( face, text[n] );
/* load glyph image into the slot (erase previous one) */
error = FT_Load_Glyph( face, glyph_index, FT_LOAD_RENDER );
draw(&face->glyph->bitmap,penX + slot->bitmap_left,penY - slot->bitmap_top );
penX += *(&face->glyph->bitmap.width)+3;
penY += slot->advance.y >> 6; /* not useful for now */
}
}
void TextRenderer::draw(FT_Bitmap * bitmap,float x,float y)
{
GLuint texture [0] ;
glGenTextures(1,texture);
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri (GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexParameteri (GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri (GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_DECAL);
glTexImage2D (GL_TEXTURE_2D, 0, GL_RED , bitmap->width, bitmap->rows, 0, GL_RED , GL_UNSIGNED_BYTE, bitmap);
// int loc = glGetUniformLocation(Program, "texture1");
// glUniform1i(loc, 0);
glBindTexture(GL_TEXTURE_2D, texture[0]);
glEnable(GL_TEXTURE_2D);
int height=bitmap->rows/10;
int width=bitmap->width/10;
glBegin(GL_QUADS);
glTexCoord2f (0.0, 0.0);
glVertex2f(x,y);
glTexCoord2f (1.0, 0.0);
glVertex2f(x+width,y);
glTexCoord2f (1.0, 1.0);
glVertex2f(x+width,y+height);
glTexCoord2f (0.0, 1.0);
glVertex2f(x,y+height);
glEnd();
glDisable(GL_TEXTURE_2D);
}
What i am using to initialize text renderer:
FT_Library library;
FT_Face arial;
FT_Error error = FT_Init_FreeType( &library );
if ( error )
{
throw std::runtime_error("Freetype failed");
}
error = FT_New_Face( library,
"C:/Windows/Fonts/Arial.ttf",
0,
&arial );
if ( error == FT_Err_Unknown_File_Format )
{
throw std::runtime_error("font format not available");
}
else if ( error )
{
throw std::runtime_error("Freetype font failed");
}
TextRenderer t(5,10,arial,"Hello");
t.render();
There's a lot of Problems in your program that result from not understanding what each call that you make to OpenGL or Freetype do. You should really read the documentation for the libraries instead of stacking tutorials into each other.
Let's do this one by one
Fragment Shader
const char* fs = "uniform sampler2D texture1;
void main() {
gl_FragColor = texture2D(texture1, gl_TexCoord[0].st);
//And that is all we need}";`
This shader doesn't compile (you should really check if it compiles with glGetShaderiv and if it links with glGetProgramiv). If you indent it correctly then you'll see that you commented out the final } because it's in the same line and after the //. So, you should remove the comment or use a \n to end the comment.
Also, for newer versions of OpenGL using gl_TexCoord is deprecated but it works if you use a compatibility profile.
Vertex Shader
just like the fragment shaders there's deprecated functionality used, namely ftransform().
But the bigger problem is that you use gl_TexCoord[0] in the fragment shader without passing it through from the vertex shader. So, you need to add the line gl_TexCoord[0]=gl_MultiTexCoord0; in your vertex shader. (As you might have guessed that is also deprecated)
Texture passing
You are passing a pointer to bitmap to glTexImage2D but bitmap is of type FT_Bitmap *, you need to pass bitmap->buffer instead.
You should not generate a new texture for each letter every frame (especially not if you're not deleting it). You should call glGentextures only once (you could put it in your TextRenderer constructor since you put all the other initialization stuff there).
Then there's the GLuint texture [0]; which should give you a compiler error. If you really need an array with one element then the syntax is GLuint texture [1];
So your final call would look something like this:
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, bitmap->width, bitmap->rows, 0, GL_LUMINANCE, GL_UNSIGNED_BYTE, bitmap->buffer);
Miscellaneous
int height=bitmap->rows/10;
int width=bitmap->width/10;
this is an integer division and if your values for bitmap->width get smaller than 10 you would get 0 as the result, which would make the quad you're trying to draw invisible (height or width of 0). If you have trouble getting the objects into view you should just translate/scale it into view. This is also deprecated but if you keep using the other stuff this would make your window have a coordinate system from [-100,-100] to [100,100] (lower-left to upper-right).
glLoadIdentity();
glScalef(0.01f, 0.01f, 1.0f);
You're also missing the coordinate conversion from FreeType to OpenGL, Freetype uses a coordinate system which starts at [0,0] in the top left corner and x is the offset to the right while y is the offset to the bottom. So if you just use these coordinates in OpenGL everything will be upside-down.
If you do all that your result should look something like this (grey background to highlight where the polygons begin and end):
As for your general approach, repurposing one texture and drawing letter by letter re-using and overwriting the same texture seems like an inefficient approach. It would be better to just allocate one larger texture and then use glTexSubImage2D to write the glyphs to it. If freetype re-rendering letters is a bottleneck you could also just write all the symbols you need into one texture at the beginning (for example the whole ASCII range) and then use that texture as a texture-atlas.
My general advice would also be that if you don't really want to learn OpenGL but just want to use some cross-platform rendering without bothering with the low-level stuff I'd recommend using a rendering framework instead.
I want to draw a 2D array of pixel data (RGB / grayscale values) on the screen as fast as possible, using OpenGL. The pixel data changes frequently.
I had hoped that I would find a simple function that would let me push in a pointer to an array representing the pixel data, since this is probably the fastest approach. Unfortunately, I have found no such function.
What is the best way to accomplish this task?
Maybe glDrawPixels is the function you are looking for? Though if the data is static it would be better to create a texture with it, and then draw that each frame.
I recently had a similar problem, as I am trying to render a video to screen (ie repeatedly upload pixel data to the VRAM), my approach is:
use glTexImage2D and glTexSubImage2D to upload the data to the texture (ie bind the texture (and texture unit, if applicable) before calling that)
in my case as the video frame rate (usually about 24 fps) is lower than the framerate of my application (aimed at 60 fps), in order to avoid uploading the same data again I use a framebuffer object (check out glGenFramebuffers/glBindFramebuffer/glDeleteFramebuffers) and link my texture with the framebuffer (glFramebufferTexture2D). I then upload that texture once, and draw the same frame multiple times (just normal texture access with glBindTexture)
I don't know which platform you are using, but as I am targetting Mac I use some Apple extensions to ensure the data transfer to the VRAM happens through DMA (ie make glTexSubImage2D return immediately to let the CPU do other work) - please feel free to ask me for more info if you are using Mac too
also as you are using just grayscale, you might want to consider just using a GL_LUMINANCE texture (ie 1 byte per pixel) rather than RGB based format to make the upload faster (but that depends on the size of your texture data, I was streaming HD 1920x1080 video so I needed to make sure to keep it down)
also be aware of the format your hardware is using to avoid unnecessary data conversions (ie normally it seems better to use BGRA data than for example just RGB)
finally, in my code I replaced all the fixed pipeline functionality with shaders (in particular the conversion of the data from grayscale or YUV format to RGB), but again all that depends on the size of your data, and the workload of your CPU or GPU
Hope this helps, feel free to message me if you need further info
I would think the fastest way would be to draw a screen sized quad with ortho projection and use a pixel shader and Texture Buffer Object to draw directly to the texture in the pixel shader. Due to latency transferring to/from the TBO you may want to see if double buffering would help.
If speed isn't much of a concern (you just need fairly interactive framerates) glDrawPixels is easy to use and works well enough for many purposes.
My solution for getting dynamically changing image data to the screen in OpenGL,
#define WIN32_LEAN_AND_MEAN
#include "wx/wx.h"
#include "wx/sizer.h"
#include "wx/glcanvas.h"
#include "BasicGLPane.h"
// include OpenGL
#ifdef __WXMAC__
#include "OpenGL/glu.h"
#include "OpenGL/gl.h"
#else
#include <GL/glu.h>
#include <GL/gl.h>
#endif
#include "ORIScanMainFrame.h"
BEGIN_EVENT_TABLE(BasicGLPane, wxGLCanvas)
EVT_MOTION(BasicGLPane::mouseMoved)
EVT_LEFT_DOWN(BasicGLPane::mouseDown)
EVT_LEFT_UP(BasicGLPane::mouseReleased)
EVT_RIGHT_DOWN(BasicGLPane::rightClick)
EVT_LEAVE_WINDOW(BasicGLPane::mouseLeftWindow)
EVT_SIZE(BasicGLPane::resized)
EVT_KEY_DOWN(BasicGLPane::keyPressed)
EVT_KEY_UP(BasicGLPane::keyReleased)
EVT_MOUSEWHEEL(BasicGLPane::mouseWheelMoved)
EVT_PAINT(BasicGLPane::render)
END_EVENT_TABLE()
// Test data for image generation. floats range 0.0 to 1.0, in RGBRGBRGB... order.
// Array is 1024 * 3 long. Note that 32 * 32 is 1024 and is the largest image we can randomly generate.
float* randomFloatRGB;
float* randomFloatRGBGrey;
BasicGLPane::BasicGLPane(wxFrame* parent, int* args) :
wxGLCanvas(parent, wxID_ANY, args, wxDefaultPosition, wxDefaultSize, wxFULL_REPAINT_ON_RESIZE)
{
m_context = new wxGLContext(this);
randomFloatRGB = new float[1024 * 3];
randomFloatRGBGrey = new float[1024 * 3];
// In GL images 0,0 is in the lower left corner so the draw routine does a vertical flip to get 'regular' images right side up.
for (int i = 0; i < 1024; i++) {
// Red
randomFloatRGB[i * 3] = static_cast <float> (rand()) / static_cast <float> (RAND_MAX);
// Green
randomFloatRGB[i * 3 + 1] = static_cast <float> (rand()) / static_cast <float> (RAND_MAX);
// Blue
randomFloatRGB[i * 3 + 2] = static_cast <float> (rand()) / static_cast <float> (RAND_MAX);
// Telltale 2 white pixels in 0,0 corner.
if (i < 2) {
randomFloatRGB[i * 3] = randomFloatRGB[i * 3 + 1] = randomFloatRGB[i * 3 + 2] = 1.0f;
}
randomFloatRGBGrey[i * 3] = randomFloatRGB[i * 3];
randomFloatRGBGrey[i * 3 + 1] = randomFloatRGB[i * 3];
randomFloatRGBGrey[i * 3 + 2] = randomFloatRGB[i * 3];
}
// To avoid flashing on MSW
SetBackgroundStyle(wxBG_STYLE_CUSTOM);
}
BasicGLPane::~BasicGLPane()
{
delete m_context;
}
void BasicGLPane::resized(wxSizeEvent& evt)
{
// wxGLCanvas::OnSize(evt);
Refresh();
}
int BasicGLPane::getWidth()
{
return GetSize().x;
}
int BasicGLPane::getHeight()
{
return GetSize().y;
}
void BasicGLPane::render(wxPaintEvent& evt)
{
assert(GetParent());
assert(GetParent()->GetParent());
ORIScanMainFrame* mf = dynamic_cast<ORIScanMainFrame*>(GetParent()->GetParent());
assert(mf);
switch (mf->currentMainView) {
case ORIViewSelection::ViewCamera:
renderCamera(evt);
break;
case ORIViewSelection::ViewDepth:
renderDepth(evt);
break;
case ORIViewSelection::ViewPointCloud:
renderPointCloud(evt);
break;
case ORIViewSelection::View3DModel:
render3DModel(evt);
break;
default:
renderNone(evt);
}
}
void BasicGLPane::renderNone(wxPaintEvent& evt) {
if (!IsShown())
return;
SetCurrent(*(m_context));
glPushAttrib(GL_ALL_ATTRIB_BITS);
glClearColor(0.08f, 0.11f, 0.15f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glFlush();
SwapBuffers();
glPopAttrib();
}
GLuint makeOpenGlTextureFromDataLuninanceFloats(int width, int height, float* f) {
GLuint textureID;
glEnable(GL_TEXTURE_2D);
glGenTextures(1, &textureID);
// "Bind" the newly created texture : all future texture functions will modify this texture
glBindTexture(GL_TEXTURE_2D, textureID);
// Give the image to OpenGL
glTexImage2D(GL_TEXTURE_2D, 0, GL_FLOAT, width, height, 0, GL_FLOAT, GL_LUMINANCE, f);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
return textureID;
}
GLuint makeOpenGlTextureFromRGBInts(int width, int height, unsigned int* f) {
GLuint textureID;
glEnable(GL_TEXTURE_2D);
glGenTextures(1, &textureID);
// "Bind" the newly created texture : all future texture functions will modify this texture
glBindTexture(GL_TEXTURE_2D, textureID);
// Give the image to OpenGL
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_INT, f);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
return textureID;
}
/// <summary>
/// Range of each float is 0.0f to 1.0f
/// </summary>
/// <param name="width"></param>
/// <param name="height"></param>
/// <param name="floatRGB"></param>
/// <returns></returns>
GLuint makeOpenGlTextureFromRGBFloats(int width, int height, float* floatRGB) {
GLuint textureID;
// 4.6.0 NVIDIA 457.30 (R Keene machine, 11/25/2020)
// auto sss = glGetString(GL_VERSION);
glGenTextures(1, &textureID);
// "Bind" the newly created texture : all future texture functions will modify this texture
glBindTexture(GL_TEXTURE_2D, textureID);
// Give the image to OpenGL
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, width, height, 0, GL_RGB, GL_FLOAT, floatRGB);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
return textureID;
}
void BasicGLPane::DrawTextureToScreenFloat(int w, int h, float* floatDataPtr, GLuint (*textureFactory)(int width, int height, float* floatRGB)) {
if (w <= 0 || h <= 0 || floatDataPtr == NULL || w > 5000 || h > 5000) {
assert(false);
return;
}
SetCurrent(*(m_context));
glPushAttrib(GL_ALL_ATTRIB_BITS);
glPushMatrix();
glPushClientAttrib(GL_CLIENT_ALL_ATTRIB_BITS);
glClearColor(0.15f, 0.11f, 0.02f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glEnable(GL_TEXTURE_2D);
glLoadIdentity();
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
// 4.6.0 NVIDIA 457.30 (R Keene machine, 11/25/2020)
// auto sss = glGetString(GL_VERSION);
float onePixelW = (float)getWidth() / (float)w;
float onePixelH = (float)getHeight() / (float)h;
float orthoW = w;
float orthoH = h;
if (onePixelH > onePixelW) {
orthoH = h * onePixelH / onePixelW;
}
else {
orthoW = w * onePixelW / onePixelH;
}
// We want the image at the top of the window, not the bottom if the window is too tall.
int topOfScreen = (float)getHeight() / onePixelH;
// If the winjdow resizes after creation you need to change the viewport.
glViewport(0, 0, getWidth(), getHeight());
gluOrtho2D(0.0, orthoW, (double)topOfScreen - (double)orthoH, topOfScreen);
GLuint myTextureName = textureFactory(w, h, floatDataPtr);
glBegin(GL_QUADS);
{
// This order of UV coords and verticies will do the vertical flip of the image to get the 'regular' image 0,0
// in the top left corner.
glTexCoord2f(0.0f, 1.0f); glVertex3f(0.0f, 0.0f, 0.0f);
glTexCoord2f(1.0f, 1.0f); glVertex3f(0.0f + w, 0.0f, 0.0f);
glTexCoord2f(1.0f, 0.0f); glVertex3f(0.0f + w, 0.0f + h, 0.0f);
glTexCoord2f(0.0f, 0.0f); glVertex3f(0.0f, 0.0f + h, 0.0f);
}
glEnd();
glDeleteTextures(1, &myTextureName);
glFlush();
SwapBuffers();
glPopClientAttrib();
glPopMatrix();
glPopAttrib();
}
void BasicGLPane::DrawTextureToScreenMat(wxPaintEvent& evt, cv::Mat m, float brightness) {
m.type();
if (m.empty()) {
renderNone(evt);
return;
}
if (m.type() == CV_32FC1) { // Grey scale.
DrawTextureToScreenFloat(m.cols, m.rows, (float*)m.data, makeOpenGlTextureFromDataLuninanceFloats);
}
if (m.type() == CV_32FC3) { // Color.
DrawTextureToScreenFloat(m.cols, m.rows, (float*)m.data, makeOpenGlTextureFromRGBFloats);
}
else {
renderNone(evt);
}
}
void BasicGLPane::renderCamera(wxPaintEvent& evt) {
if (!IsShown())
return;
DrawTextureToScreenMat(evt, ORITopControl::Instance->im_white);
}
void BasicGLPane::renderDepth(wxPaintEvent& evt) {
if (!IsShown())
return;
DrawTextureToScreenMat(evt, ORITopControl::Instance->depth_map);
}
void BasicGLPane::render3DModel(wxPaintEvent& evt) {
if (!IsShown())
return;
SetCurrent(*(m_context));
glPushAttrib(GL_ALL_ATTRIB_BITS);
glPushMatrix();
glClearColor(0.08f, 0.11f, 0.15f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glFlush();
SwapBuffers();
glPopMatrix();
glPopAttrib();
}
void BasicGLPane::renderPointCloud(wxPaintEvent& evt) {
if (!IsShown())
return;
boost::unique_lock<boost::mutex> lk(ORITopControl::Instance->pointCloudCacheMutex);
SetCurrent(*(m_context));
glPushAttrib(GL_ALL_ATTRIB_BITS);
glPushMatrix();
glLoadIdentity();
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glViewport(0, 0, getWidth(), getHeight());
glClearColor(0.08f, 0.11f, 0.15f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
if (ORITopControl::Instance->pointCloudCache.size() > 0) {
glMatrixMode(GL_PROJECTION);
gluPerspective( /* field of view in degree */ 40.0,
/* aspect ratio */ 1.0,
/* Z near */ 1.0, /* Z far */ 500.0);
glMatrixMode(GL_MODELVIEW);
gluLookAt(100, 70, 200, // Eye
25, 25, 25, // Look at pt
0, 0, 1); // Up Vector
glPointSize(2.0);
glBegin(GL_POINTS);
// Use explicit for loop because pointCloudFragments can grow asynchronously.
for (int i = 0; i < ORITopControl::Instance->pointCloudCache.size(); i++) {
auto frag = ORITopControl::Instance->pointCloudCache[i];
auto current_point_cloud_ptr = frag->cloud;
glPushMatrix();
// glMultMatrixf(frag->xform.data());
for (size_t n = 0; n < current_point_cloud_ptr->size(); n++) {
glColor3ub(255, 255, 255);
glVertex3d(current_point_cloud_ptr->points[n].x, current_point_cloud_ptr->points[n].y, current_point_cloud_ptr->points[n].z);
}
glPopMatrix();
}
glEnd();
}
glFlush();
SwapBuffers();
glPopMatrix();
glPopAttrib();
}