Using CUDA, SFML, and OpenGL: Texture Refuses to Appear on Quad - c++

Using various tutorials/examples/documentations/forums online, I have typed out code to allow CUDA to manipulate OpenGL textures such that it can be outputted to the screen. My method of displaying is to use PBO and an allocated texture image of uchar4 array. Despite all my attempts at fixing the problem, the texture would not show up on the 2D surface. I cannot seem to pinpoint the problem.
These are all the things I have checked/done thus far: I have created a PBO and registered it with CUDA, called cudaGraphicsResourceGetMappedPointer and the unmapping equivalent before and after the GPU function calls, made sure that glEnable is called for 2D_TEXTURE, glDisable called for any unnecessary values, unbinded textures/buffers when not in need. I have also reset SFML OpenGL states in case SFML was the cause. Square textures have also been employed. My OpenGL verision and CUDA version work for all function calls I use.
There did not seem to be any errors within the program when I checked cudaErrors and OpenGL Errors.
I am not sure if this has something to do with it but when I call:
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
My quad does not seem to display.
I have mainly found inspiration from this website.
Thank you very much!
Here is my code:
Main.cpp
#include <GL/glew.h>
#include <windows.h>
#include <GL/GL.h>
#include <SFML/Window.hpp>
#include <SFML/OpenGL.hpp>
#include <SFML/System.hpp>
#include <SFML/Graphics/RenderWindow.hpp>
#include "GeneralTypedef.h"
#include "OpenGLTest.cuh"
int main()
{
// create the window
sf::RenderWindow window(sf::VideoMode(1024, 1024), "OpenGL");
//window.setVerticalSyncEnabled(true);
sf::Vector2u windowSize;
windowSize = sf::Vector2u(window.getSize());
bool running = true;
glewInit();
window.resetGLStates();
std::printf("OpenGL: %s:", glGetString(GL_VERSION));
// We will not be using SFML's gl states.
OpenGLTest* test = new OpenGLTest(window.getSize());
sf::Time time;
while (running)
{
// handle events
sf::Event event;
while (window.pollEvent(event))
{
if (event.type == sf::Event::Closed)
{
// end the program
running = false;
}
else if (event.type == sf::Event::Resized)
{
// adjust the viewport when the window is resized
glViewport(0, 0, event.size.width, event.size.height);
windowSize = window.getSize();
}
}
// clear the buffers
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
test->createFrame(time.asMicroseconds());
test->drawFrame();
window.display();
}
// release resources...
delete test;
return 0;
}
OpenGLTest.cuh
#ifndef OPENGLTEST_CUH
#define OPENGLTEST_CUH
#include <GL/glew.h>
#include <windows.h>
#include <GL/GL.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_gl_interop.h>
#include <SFML/OpenGL.hpp>
#include <SFML/Graphics.hpp>
#include <SFML/System.hpp>
#include "GeneralTypedef.h"
class OpenGLTest
{
public:
uchar4* image;
GLuint gltexture;
GLuint pbo;
cudaGraphicsResource_t cudaPBO;
uchar4* d_textureBufferData;
sf::Vector2u windowSize;
OpenGLTest(sf::Vector2u windowSize)
{
this->windowSize = sf::Vector2u(windowSize);
this->setupOpenGL();
};
~OpenGLTest()
{
delete image;
image == nullptr;
cudaFree(d_textureBufferData);
d_textureBufferData == nullptr;
glDeleteTextures(1, &gltexture);
}
void drawFrame();
void createFrame(float time);
private:
void setupOpenGL();
};
#endif //OPENGLTEST_CUH
OpenGLTest.cu
#include "OpenGLTest.cuh"
__global__ void createGPUTexture(uchar4* d_texture)
{
uint pixelID = blockIdx.x*blockDim.x + threadIdx.x;
d_texture[pixelID].x = 0;
d_texture[pixelID].y = 1;
d_texture[pixelID].z = 1;
d_texture[pixelID].w = 0;
}
__global__ void wow(uchar4* pos, unsigned int width, unsigned int height,
float time)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int x = index%width;
unsigned int y = index / width;
if (index < width*height) {
unsigned char r = (x + (int)time) & 0xff;
unsigned char g = (y + (int)time) & 0xff;
unsigned char b = ((x + y) + (int)time) & 0xff;
// Each thread writes one pixel location in the texture (textel)
pos[index].w = 0;
pos[index].x = r;
pos[index].y = g;
pos[index].z = b;
}
}
void OpenGLTest::drawFrame()
{
glColor3f(1.0f,1.0f,1.0f);
glBindTexture(GL_TEXTURE_2D, gltexture);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, pbo);
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, windowSize.x, windowSize.y, GL_RGBA, GL_UNSIGNED_BYTE, 0);
glBegin(GL_QUADS);
glTexCoord2f(0.0f, 0.0f);
glVertex2f(0.0f, float(windowSize.y));
glTexCoord2f(1.0f, 0.0f);
glVertex2f(float(windowSize.x), float(windowSize.y));
glTexCoord2f(1.0f, 1.0f);
glVertex2f(float(windowSize.x), 0.0f);
glTexCoord2f(0.0f,1.0f);
glVertex2f(0.0f, 0.0f);
glEnd();
glFlush();
// Release
glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, 0);
glBindTexture(GL_TEXTURE_2D, 0);
// Test Triangle
/*
glBegin(GL_TRIANGLES);
glColor3f(0.1, 0.2, 0.3);
glVertex2f(0, 0);
glVertex2f(10, 0);
glVertex2f(0, 100);
glEnd();
*/
}
void OpenGLTest::createFrame(float time)
{
cudaGraphicsMapResources(1, &cudaPBO, 0);
size_t numBytes;
cudaGraphicsResourceGetMappedPointer((void**)&d_textureBufferData, &numBytes, cudaPBO);
int totalThreads = windowSize.x * windowSize.y;
int nBlocks = totalThreads/ 256;
// Run code here.
createGPUTexture << <nBlocks, 256>> >(d_textureBufferData);
//wow << <nBlocks, 256 >> >(d_textureBufferData, windowSize.x, windowSize.y, time);
// Unmap mapping to PBO so that OpenGL can access.
cudaGraphicsUnmapResources(1, &cudaPBO, 0);
}
void OpenGLTest::setupOpenGL()
{
image = new uchar4[1024*1024];
glViewport(0, 0, windowSize.x, windowSize.y);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(0.0, windowSize.x, windowSize.y, 0.0, -1.0, 1.0);
glEnable(GL_TEXTURE_2D);
glDisable(GL_LIGHTING);
glDisable(GL_DEPTH_TEST);
// Unbind any textures from previous.
glBindTexture(GL_TEXTURE_2D, 0);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, 0);
// Create new textures.
glGenTextures(1, &gltexture);
glBindTexture(GL_TEXTURE_2D, gltexture);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
// Create image with same resolution as window.
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, windowSize.x , windowSize.y, 0, GL_RGBA, GL_UNSIGNED_BYTE, image);
// Create pixel buffer boject.
glGenBuffers(1, &pbo);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, pbo);
glBufferData(GL_PIXEL_UNPACK_BUFFER_ARB, windowSize.x * windowSize.y * sizeof(uchar4), image, GL_STREAM_COPY);
cudaGraphicsGLRegisterBuffer(&cudaPBO, pbo, cudaGraphicsMapFlagsNone);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, 0);
glBindTexture(GL_TEXTURE_2D, 0);
}
GeneralType
#ifndef GENERALTYPEDEF_CUH
#define GENERALTYPEDEF_CUH
typedef unsigned int uint;
#endif // GENERALTYPEDEF_CUH

After rewriting the entire code and understanding it more, I have figured out the reason. The color components for the uchar4 in the kernel function is mapped from 0-255. The w component is transparency. As such, it should be mapped to 255 for the image to show. I hope this helps for those who may have the same problem. Some sites have this value set very low as well.

Related

Load jpg image as texture - freeimage, opengl

I tried to load jpg image with FreeImage Library. I used this code, but the result was only white window. I think to use this image like background and after that to load object file.
It`s the code, that i used:
#include <windows.h>
#include <GL/glut.h>
#include <iostream>
#include <FreeImage.h>
FIBITMAP *loadImage(const char *filename)
{
FIBITMAP *dib1 = NULL;
FREE_IMAGE_FORMAT fif = FreeImage_GetFIFFromFilename(filename);
dib1 = FreeImage_Load(fif, filename, JPEG_DEFAULT);
if (!dib1)
{
std::cerr << "Erreur ouverture d\'image" << std::endl;
exit (0);
}
std::cerr << "Success" << std::endl;
return dib1;
}
GLuint loadTexture (FIBITMAP * dib1)
{
GLuint tex_id = 0;
int x, y;
int height, width;
RGBQUAD rgbquad;
FREE_IMAGE_TYPE type;
BITMAPINFOHEADER *header;
type = FreeImage_GetImageType(dib1);
height = FreeImage_GetHeight(dib1);
width = FreeImage_GetWidth(dib1);
header = FreeImage_GetInfoHeader(dib1);
int scanLineWidh = ((3*width)%4 == 0) ? 3*width : ((3*width)/4)*4+4;
unsigned char * texels= (GLubyte*)calloc(height*scanLineWidh, sizeof(GLubyte));
for (x=0 ; x<width ; x++)
for (y=0 ; y<height; y++)
{
FreeImage_GetPixelColor(dib1,x,y,&rgbquad);
texels[(y*scanLineWidh+3*x)]=((GLubyte*)&rgbquad)[2];
texels[(y*scanLineWidh+3*x)+1]=((GLubyte*)&rgbquad)[1];
texels[(y*scanLineWidh+3*x)+2]=((GLubyte*)&rgbquad)[0];
}
glGenTextures (1, &tex_id);
glBindTexture (GL_TEXTURE_2D, tex_id);
glTexParameteri (GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri (GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexImage2D (GL_TEXTURE_2D, 0, GL_RGB,
width, height, 0, GL_RGB,
GL_UNSIGNED_BYTE, texels);
free(texels);
return tex_id;
}
void display(void)
{
glClearColor (0.0,0.0,0.0,1.0);
glClear (GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glLoadIdentity();
glutSwapBuffers(); //swap the buffers
}
int main(int argc, char **argv) {
FIBITMAP *dib1 = loadImage("planina.jpg");
glutInit(&argc,argv);
glutInitDisplayMode(GLUT_DOUBLE|GLUT_RGB|GLUT_DEPTH);
glutInitWindowSize(800,450);
glutInitWindowPosition(20,20);
glutCreateWindow("Loader");
//glutReshapeFunc(reshape);
//glutDisplayFunc(display);
loadTexture(dib1);
FreeImage_Unload(dib1);
glutMainLoop();
return 0;
}
What I am doing wrong?
This is your display function:
void display(void)
{
glClearColor (0.0,0.0,0.0,1.0);
glClear (GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glLoadIdentity();
glutSwapBuffers(); //swap the buffers
}
And what's immediately apparent is, that the only thing it does is
setting a clear color
clear the window
load an identity matrix
displays the result
What's lacking is any kind of actually drawing something. You have to draw some triangles or quads with the texture applied for the texture to actually show up somehow.

Crash with CUDA/OGL interop

I am trying to setup a little CUDA/GL interop example. I have looked around in the internet, so I found some tutorials with some helpful stuff.
All I want is, is to produce a texture in CUDA and draw it with OpenGL.
The source I have now is crashing my Macbook Pro every time I run it, so I thought that if somebody could take an eye on it, that would be really helpful.
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#ifdef _WIN32
# define WINDOWS_LEAN_AND_MEAN
# define NOMINMAX
# include <windows.h>
#endif
// OpenGL Graphics includes
#include <GL/glew.h>
#if defined (__APPLE__) || defined(MACOSX)
#include <GLUT/glut.h>
#else
#include <GL/freeglut.h>
#endif
// includes, cuda
#include <cuda_runtime.h>
#include <cuda_gl_interop.h>
// Utilities and timing functions
#include <helper_functions.h> // includes cuda.h and cuda_runtime_api.h
#include <timer.h> // timing functions
// CUDA helper functions
#include <helper_cuda.h> // helper functions for CUDA error check
#include <helper_cuda_gl.h> // helper functions for CUDA/GL interop
#include <vector_types.h>
const unsigned int window_width = 512;
const unsigned int window_height = 512;
GLuint viewGLTexture;
cudaGraphicsResource_t viewCudaResource;
void initGLandCUDA() {
int argc = 0;
char** argv = NULL;
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE);
glutInitWindowSize(window_width, window_height);
glutCreateWindow("CUDA GL Interop");
glewInit();
glEnable(GL_TEXTURE_2D);
glGenTextures(1, &viewGLTexture);
glBindTexture(GL_TEXTURE_2D, viewGLTexture);
{
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, 512, 512, 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL);
}
glBindTexture(GL_TEXTURE_2D, 0);
cudaGLSetGLDevice(gpuGetMaxGflopsDeviceId());
cudaGraphicsGLRegisterImage(&viewCudaResource, viewGLTexture, GL_TEXTURE_2D, cudaGraphicsRegisterFlagsWriteDiscard);
}
__global__ void renderingKernel(cudaSurfaceObject_t image) {
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
uchar4 color = make_uchar4(0.f, 0.f, 0.f, .3f);
//if I write in 0, 0 and not x,y, the computer is not crashing, but there is no black pixel at 0,0
surf2Dwrite(color, image, x, y, cudaBoundaryModeClamp);
}
void callCUDAKernel(cudaSurfaceObject_t image) {
dim3 block(8, 1, 1);
dim3 grid(8, 1, 1);
renderingKernel<<< grid, block>>>(image);
}
void renderFrame() {
cudaGraphicsMapResources(1, &viewCudaResource);
{
cudaArray_t viewCudaArray;
checkCudaErrors(cudaGraphicsSubResourceGetMappedArray(&viewCudaArray, viewCudaResource, 0, 0));
cudaResourceDesc viewCudaArrayResourceDesc;
{
viewCudaArrayResourceDesc.resType = cudaResourceTypeArray;
viewCudaArrayResourceDesc.res.array.array = viewCudaArray;
}
cudaSurfaceObject_t viewCudaSurfaceObject;
checkCudaErrors(cudaCreateSurfaceObject(&viewCudaSurfaceObject, &viewCudaArrayResourceDesc));
callCUDAKernel(viewCudaSurfaceObject);
checkCudaErrors(cudaDestroySurfaceObject(viewCudaSurfaceObject));
}
checkCudaErrors(cudaGraphicsUnmapResources(1, &viewCudaResource));
checkCudaErrors(cudaStreamSynchronize(0));
glBindTexture(GL_TEXTURE_2D, viewGLTexture);
{
glBegin(GL_QUADS);
{
glTexCoord2f(0.0f, 0.0f); glVertex2f(-1.0f, -1.0f);
glTexCoord2f(1.0f, 0.0f); glVertex2f(+1.0f, -1.0f);
glTexCoord2f(1.0f, 1.0f); glVertex2f(+1.0f, +1.0f);
glTexCoord2f(0.0f, 1.0f); glVertex2f(-1.0f, +1.0f);
}
glEnd();
}
glBindTexture(GL_TEXTURE_2D, 0);
glFinish();
}
int main(int argc, char **argv)
{
initGLandCUDA();
glutDisplayFunc(renderFrame);
//glutKeyboardFunc(keyboard);
//glutMouseFunc(mouse);
glutMainLoop();
}
It seems like some kind of out-of-range error, but I am currently out of ideas (btw, this is cc 3.0, running to nVidia 650M).
Edit :
By crashing I mean : crashing. Computer freezes. I can't move my mouse and I have to reboot.
Yes, I have looked in all examples, they are not exactly what I want. Changing them to be want I want results in this problem. If there was any other help in the manual or anywhere else that would help me and I have found I would not bother asking for help. You need to link with cuda_runtime and glut libs
Below is a working version of your code. The issues in your code were:
Your kernel was depending on being launched with 512x512 threads, but you were only launching with 64x1 threads.
Your kernel was writing to unaligned addresses with surf2Dwrite().
You were setting up double buffering in OpenGL but you were not swapping the buffers. (glutSwapBuffers()).
You were initializing an uchar4 with floats.
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#ifdef _WIN32
# define WINDOWS_LEAN_AND_MEAN
# define NOMINMAX
# include <windows.h>
#endif
// OpenGL Graphics includes
#include <GL/glew.h>
#if defined (__APPLE__) || defined(MACOSX)
#include <GLUT/glut.h>
#else
#include <GL/freeglut.h>
#endif
#include <cuda_runtime.h>
#include <cuda_gl_interop.h>
#include <vector_types.h>
const unsigned int window_width = 512;
const unsigned int window_height = 512;
GLuint viewGLTexture;
cudaGraphicsResource_t viewCudaResource;
#define check(ans) { _check((ans), __FILE__, __LINE__); }
inline void _check(cudaError_t code, char *file, int line)
{
if (code != cudaSuccess) {
fprintf(stderr,"CUDA Error: %s %s %d\n", cudaGetErrorString(code), file, line);
exit(code);
}
}
void initGLandCUDA() {
int argc = 0;
char** argv = NULL;
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_RGBA);
glutInitWindowSize(window_width, window_height);
glutCreateWindow("CUDA GL Interop");
glewInit();
glEnable(GL_TEXTURE_2D);
glGenTextures(1, &viewGLTexture);
glBindTexture(GL_TEXTURE_2D, viewGLTexture);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, window_width, window_height, 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL);
glBindTexture(GL_TEXTURE_2D, 0);
check(cudaGLSetGLDevice(0));
check(cudaGraphicsGLRegisterImage(&viewCudaResource, viewGLTexture, GL_TEXTURE_2D, cudaGraphicsRegisterFlagsWriteDiscard));
}
__global__ void renderingKernel(cudaSurfaceObject_t image) {
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
uchar4 color = make_uchar4(x / 2, y / 2, 0, 127);
surf2Dwrite(color, image, x * sizeof(color), y, cudaBoundaryModeClamp);
}
void callCUDAKernel(cudaSurfaceObject_t image) {
dim3 block(256, 1, 1);
dim3 grid(2, 512, 1);
renderingKernel<<<grid, block>>>(image);
check(cudaPeekAtLastError());
check(cudaDeviceSynchronize());
}
void renderFrame() {
check(cudaGraphicsMapResources(1, &viewCudaResource));
cudaArray_t viewCudaArray;
check(cudaGraphicsSubResourceGetMappedArray(&viewCudaArray, viewCudaResource, 0, 0));
cudaResourceDesc viewCudaArrayResourceDesc;
memset(&viewCudaArrayResourceDesc, 0, sizeof(viewCudaArrayResourceDesc));
viewCudaArrayResourceDesc.resType = cudaResourceTypeArray;
viewCudaArrayResourceDesc.res.array.array = viewCudaArray;
cudaSurfaceObject_t viewCudaSurfaceObject;
check(cudaCreateSurfaceObject(&viewCudaSurfaceObject, &viewCudaArrayResourceDesc));
callCUDAKernel(viewCudaSurfaceObject);
check(cudaDestroySurfaceObject(viewCudaSurfaceObject));
check(cudaGraphicsUnmapResources(1, &viewCudaResource));
check(cudaStreamSynchronize(0));
glBindTexture(GL_TEXTURE_2D, viewGLTexture);
{
glBegin(GL_QUADS);
{
glTexCoord2f(0.0f, 0.0f); glVertex2f(-1.0f, -1.0f);
glTexCoord2f(1.0f, 0.0f); glVertex2f(+1.0f, -1.0f);
glTexCoord2f(1.0f, 1.0f); glVertex2f(+1.0f, +1.0f);
glTexCoord2f(0.0f, 1.0f); glVertex2f(-1.0f, +1.0f);
}
glEnd();
}
glBindTexture(GL_TEXTURE_2D, 0);
glFinish();
}
int main(int argc, char **argv)
{
initGLandCUDA();
glutDisplayFunc(renderFrame);
//glutKeyboardFunc(keyboard);
//glutMouseFunc(mouse);
glutMainLoop();
}
Output:

OpenGL repeated calls to glTexImage2D and alpha blending

This is more out of curiosity than for any practical purpose: is there anything in the OpenGL specification that suggests that calling glTexImage2D many times (e.g., once per frame) is illegal? I mean illegal as in 'it could produce wrong results', not just inefficient (suppose I don't care about the performance impact of not using glTexSubImage2D instead).
The reason I'm asking is that I noticed some very odd artifacts when drawing overlapping, texture-mapped primitives that use a partly-transparent texture which is loaded once per every frame using glTexImage2D (see the attached picture): after a few seconds (i.e., a few hundred frames), small rectangular black patches appear on the screen (they're actually flipping between black and normal between consecutive frames).
I'm attaching below the simplest example code I could write that exhibits the problem.
#include <stdio.h>
#ifndef __APPLE__
# include <SDL/SDL.h>
# include <SDL/SDL_opengl.h>
#else
# include <SDL.h>
# include <SDL_opengl.h>
#endif
/* some constants and variables that several functions use */
const int width = 640;
const int height = 480;
#define texSize 64
GLuint vbo;
GLuint tex;
/* forward declaration, creates a random texture; uses glTexSubImage2D if
update is non-zero (otherwise glTexImage2D) */
void createTexture(GLuint label, int update);
int init()
{
/* SDL initialization */
if (SDL_Init(SDL_INIT_VIDEO) < 0)
return 0;
SDL_GL_SetAttribute(SDL_GL_DOUBLEBUFFER, 1);
if (!SDL_SetVideoMode(width, height, 0, SDL_OPENGL)) {
fprintf(stderr, "Couldn't initialize OpenGL");
return 0;
}
/* OpenGL initialization */
glClearColor(0, 0, 0, 0);
glEnable(GL_TEXTURE_2D);
glEnable(GL_BLEND);
glBlendFunc(GL_ONE, GL_ONE_MINUS_SRC_ALPHA);
glViewport(0, 0, width, height);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(0, width, height, 0, -1, 1);
glMatrixMode(GL_MODELVIEW);
/* creating the VBO and the textures */
glGenBuffers(1, &vbo);
glBindBuffer(GL_ARRAY_BUFFER, vbo);
glBufferData(GL_ARRAY_BUFFER, 1024, 0, GL_DYNAMIC_DRAW);
glGenTextures(1, &tex);
createTexture(tex, 0);
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
return 1;
}
/* draw a triangle at the specified point */
void drawTriangle(GLfloat x, GLfloat y)
{
GLfloat coords1[12] = {0, 0, 0, 0, /**/200, 0, 1, 0, /**/200, 150, 1, 1};
glLoadIdentity();
glTranslatef(x, y, 0);
glBufferSubData(GL_ARRAY_BUFFER, 0, sizeof(coords1), coords1);
glVertexPointer(2, GL_FLOAT, 4*sizeof(GLfloat), (void*)0);
glTexCoordPointer(2, GL_FLOAT, 4*sizeof(GLfloat),
(char*)0 + 2*sizeof(GLfloat));
glDrawArrays(GL_TRIANGLES, 0, 3);
}
void render()
{
glClear(GL_COLOR_BUFFER_BIT);
drawTriangle(250, 50);
createTexture(tex, 0);
drawTriangle(260, 120);
SDL_GL_SwapBuffers();
}
void cleanup()
{
glDeleteTextures(1, &tex);
glDeleteBuffers(1, &vbo);
SDL_Quit();
}
int main(int argc, char* argv[])
{
SDL_Event event;
if (!init()) return 1;
while (1) {
while (SDL_PollEvent(&event))
if (event.type == SDL_QUIT)
return 0;
render();
}
cleanup();
return 0;
}
void createTexture(GLuint label, int update)
{
GLubyte data[texSize*texSize*4];
GLubyte* p;
int i, j;
glBindTexture(GL_TEXTURE_2D, label);
for (i = 0; i < texSize; ++i) {
for (j = 0; j < texSize; ++j) {
p = data + (i + j*texSize)*4;
p[0] = ((i % 8) > 4?255:0);
p[1] = ((j % 8) > 4?255:0);
p[2] = ((i % 8) > 4?255:0);
p[3] = 255 - i*3;
}
}
if (!update)
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, texSize, texSize, 0, GL_RGBA,
GL_UNSIGNED_BYTE, data);
else
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, texSize, texSize, GL_RGBA,
GL_UNSIGNED_BYTE, data);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
}
Notes:
I'm using SDL, but I've seen the same happening in wxWidgets, so it's not an SDL-related problem.
If I use glTexSubImage2D instead for every frame (use update = 1 in createTexture), the artifacts disappear.
If I disable blending, there are no more artifacts.
I've been testing this on a late 2010 MacBook Air, though I doubt that's particularly relevant.
This clearly an OpenGL implementation bug (just calling glTexImage2D in a loop should not cause this to happen).

Open GL texturing , texture coordinates are incorrect to what is rendered

I'm rendering a scene using opengl with textures loaded using some sample code and the FreeImage API.
Here is a link to what i'm seeing
[Image Removed]
I can confirm that all texture coordinates are provided to glTexCoord2f between 0.0f and 1.0f as required along with the vertex coordinates.
Each of the rendered triangles appears to have the full texture pasted across it (and repeated) not the area of the texture specified by the coordinates.
The texture is 1024x1024.
This is the function for loading the texture
bool loadImageToTexture(const char* image_path, unsigned int &handle)
{
if(!image_path)
return false;
FREE_IMAGE_FORMAT fif = FIF_UNKNOWN;
FIBITMAP* dib(0);
BYTE* bits(0);
unsigned int width(0), height(0);
//open the file
fif = FreeImage_GetFileType(image_path, 0);
if(fif == FIF_UNKNOWN)
fif = FreeImage_GetFIFFromFilename(image_path);
if(fif == FIF_UNKNOWN)
return false;
if(FreeImage_FIFSupportsReading(fif))
dib = FreeImage_Load(fif, image_path);
if(!dib)
return false;
//is the file of the correct type
FREE_IMAGE_COLOR_TYPE type = FreeImage_GetColorType(dib);
if(FIC_RGBALPHA != type)
{
//convert to type
FIBITMAP* ndib = FreeImage_ConvertTo32Bits(dib);
dib = ndib;
}
//get data for glTexImage2D
bits = FreeImage_GetBits(dib);
width = FreeImage_GetWidth(dib);
height = FreeImage_GetHeight(dib);
if((bits == 0) || (width == 0) || (height == 0))
return false;
//create the texture in open gl
glGenTextures(1, &handle);
glBindTexture(GL_TEXTURE_2D, handle);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glPixelStorei(GL_TEXTURE_2D, 4);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height,
0, GL_RGBA, GL_UNSIGNED_BYTE, bits);
//unload the image now loaded
FreeImage_Unload(dib);
return true;
}
These are the functions for rendering
inline void glTexture(float textureSize, float t, float u)
{
glTexCoord2f(u/textureSize, (textureSize - t)/textureSize);
}
void processTriangle(const btVector3* triangle, const textureCoord* tc, const btVector3& normal,
const int partId, const int triangleIndex, const bool wireframe)
{
if(wireframe)
{
glBegin(GL_LINES);
glColor3f(1, 0, 0);
glVertex3d(triangle[0].getX(), triangle[0].getY(), triangle[0].getZ());
glVertex3d(triangle[1].getX(), triangle[1].getY(), triangle[1].getZ());
glColor3f(0, 1, 0);
glVertex3d(triangle[2].getX(), triangle[2].getY(), triangle[2].getZ());
glVertex3d(triangle[1].getX(), triangle[1].getY(), triangle[1].getZ());
glColor3f(0, 0, 1);
glVertex3d(triangle[2].getX(), triangle[2].getY(), triangle[2].getZ());
glVertex3d(triangle[0].getX(), triangle[0].getY(), triangle[0].getZ());
glEnd();
}
else
{
glBegin(GL_TRIANGLES);
glColor3f(1, 1, 1);
//Normals are per triangle
glNormal3f(normal.getX(), normal.getY(), normal.getZ());
glTexture(1024.0f, tc[0].t, tc[0].u);
glVertex3d(triangle[0].getX(), triangle[0].getY(), triangle[0].getZ());
glTexture(1024.0f, tc[1].t, tc[1].u);
glVertex3d(triangle[1].getX(), triangle[1].getY(), triangle[1].getZ());
glTexture(1024.0f, tc[2].t, tc[2].u);
glVertex3d(triangle[2].getX(), triangle[2].getY(), triangle[2].getZ());
glEnd();
}
}
void processAllTriangles(const btVector3& worldBoundsMin, const btVector3& worldBoundsMax)
{
btVector3 triangle[3];
textureCoord tc[3];
//go through the index list build triangles and draw them
unsigned int k = 0;
for(unsigned int i = 0; i<dVertices.size(); i+=3, k++)
{
//first vertex
triangle[0] = dVertices[i];
tc[0] = dTexCoords[i];
//second vertex
triangle[1] = dVertices[i+1];
tc[1] = dTexCoords[i+1];
//third vertex
triangle[2] = dVertices[i+2];
tc[2] = dTexCoords[i+2];
processTriangle(triangle, tc, dNormals[k], 0, 0, false);
}
}
//draw the world given the players position
void render(btScalar* m, const btCollisionShape* shape, const btVector3& color, int debugMode,
const btVector3& worldBoundsMin, const btVector3& worldBoundsMax)
{
//render the world using the generated OpenGL lists
//enable and specify pointers to vertex arrays
glPushMatrix();
glMultMatrixf(m);
glMatrixMode(GL_TEXTURE);
glLoadIdentity();
glMatrixMode(GL_MODELVIEW);
glEnable(GL_TEXTURE_2D);
glShadeModel(GL_SMOOTH);
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
glBindTexture(GL_TEXTURE_2D, m_texturehandle);
processAllTriangles(worldBoundsMin, worldBoundsMax);
glPopMatrix();
}
I'm working in a larger code base and texturing is being done differently in different areas mean state from those other textured objects was not being disabled.
Before per vertex texturing is done make sure to turn off other varieties of texturing.
glDisable(GL_TEXTURE_GEN_S);
glDisable(GL_TEXTURE_GEN_T);
glDisable(GL_TEXTURE_GEN_R);

OpenGL - draw pixels to screen?

I want to draw a 2D array of pixel data (RGB / grayscale values) on the screen as fast as possible, using OpenGL. The pixel data changes frequently.
I had hoped that I would find a simple function that would let me push in a pointer to an array representing the pixel data, since this is probably the fastest approach. Unfortunately, I have found no such function.
What is the best way to accomplish this task?
Maybe glDrawPixels is the function you are looking for? Though if the data is static it would be better to create a texture with it, and then draw that each frame.
I recently had a similar problem, as I am trying to render a video to screen (ie repeatedly upload pixel data to the VRAM), my approach is:
use glTexImage2D and glTexSubImage2D to upload the data to the texture (ie bind the texture (and texture unit, if applicable) before calling that)
in my case as the video frame rate (usually about 24 fps) is lower than the framerate of my application (aimed at 60 fps), in order to avoid uploading the same data again I use a framebuffer object (check out glGenFramebuffers/glBindFramebuffer/glDeleteFramebuffers) and link my texture with the framebuffer (glFramebufferTexture2D). I then upload that texture once, and draw the same frame multiple times (just normal texture access with glBindTexture)
I don't know which platform you are using, but as I am targetting Mac I use some Apple extensions to ensure the data transfer to the VRAM happens through DMA (ie make glTexSubImage2D return immediately to let the CPU do other work) - please feel free to ask me for more info if you are using Mac too
also as you are using just grayscale, you might want to consider just using a GL_LUMINANCE texture (ie 1 byte per pixel) rather than RGB based format to make the upload faster (but that depends on the size of your texture data, I was streaming HD 1920x1080 video so I needed to make sure to keep it down)
also be aware of the format your hardware is using to avoid unnecessary data conversions (ie normally it seems better to use BGRA data than for example just RGB)
finally, in my code I replaced all the fixed pipeline functionality with shaders (in particular the conversion of the data from grayscale or YUV format to RGB), but again all that depends on the size of your data, and the workload of your CPU or GPU
Hope this helps, feel free to message me if you need further info
I would think the fastest way would be to draw a screen sized quad with ortho projection and use a pixel shader and Texture Buffer Object to draw directly to the texture in the pixel shader. Due to latency transferring to/from the TBO you may want to see if double buffering would help.
If speed isn't much of a concern (you just need fairly interactive framerates) glDrawPixels is easy to use and works well enough for many purposes.
My solution for getting dynamically changing image data to the screen in OpenGL,
#define WIN32_LEAN_AND_MEAN
#include "wx/wx.h"
#include "wx/sizer.h"
#include "wx/glcanvas.h"
#include "BasicGLPane.h"
// include OpenGL
#ifdef __WXMAC__
#include "OpenGL/glu.h"
#include "OpenGL/gl.h"
#else
#include <GL/glu.h>
#include <GL/gl.h>
#endif
#include "ORIScanMainFrame.h"
BEGIN_EVENT_TABLE(BasicGLPane, wxGLCanvas)
EVT_MOTION(BasicGLPane::mouseMoved)
EVT_LEFT_DOWN(BasicGLPane::mouseDown)
EVT_LEFT_UP(BasicGLPane::mouseReleased)
EVT_RIGHT_DOWN(BasicGLPane::rightClick)
EVT_LEAVE_WINDOW(BasicGLPane::mouseLeftWindow)
EVT_SIZE(BasicGLPane::resized)
EVT_KEY_DOWN(BasicGLPane::keyPressed)
EVT_KEY_UP(BasicGLPane::keyReleased)
EVT_MOUSEWHEEL(BasicGLPane::mouseWheelMoved)
EVT_PAINT(BasicGLPane::render)
END_EVENT_TABLE()
// Test data for image generation. floats range 0.0 to 1.0, in RGBRGBRGB... order.
// Array is 1024 * 3 long. Note that 32 * 32 is 1024 and is the largest image we can randomly generate.
float* randomFloatRGB;
float* randomFloatRGBGrey;
BasicGLPane::BasicGLPane(wxFrame* parent, int* args) :
wxGLCanvas(parent, wxID_ANY, args, wxDefaultPosition, wxDefaultSize, wxFULL_REPAINT_ON_RESIZE)
{
m_context = new wxGLContext(this);
randomFloatRGB = new float[1024 * 3];
randomFloatRGBGrey = new float[1024 * 3];
// In GL images 0,0 is in the lower left corner so the draw routine does a vertical flip to get 'regular' images right side up.
for (int i = 0; i < 1024; i++) {
// Red
randomFloatRGB[i * 3] = static_cast <float> (rand()) / static_cast <float> (RAND_MAX);
// Green
randomFloatRGB[i * 3 + 1] = static_cast <float> (rand()) / static_cast <float> (RAND_MAX);
// Blue
randomFloatRGB[i * 3 + 2] = static_cast <float> (rand()) / static_cast <float> (RAND_MAX);
// Telltale 2 white pixels in 0,0 corner.
if (i < 2) {
randomFloatRGB[i * 3] = randomFloatRGB[i * 3 + 1] = randomFloatRGB[i * 3 + 2] = 1.0f;
}
randomFloatRGBGrey[i * 3] = randomFloatRGB[i * 3];
randomFloatRGBGrey[i * 3 + 1] = randomFloatRGB[i * 3];
randomFloatRGBGrey[i * 3 + 2] = randomFloatRGB[i * 3];
}
// To avoid flashing on MSW
SetBackgroundStyle(wxBG_STYLE_CUSTOM);
}
BasicGLPane::~BasicGLPane()
{
delete m_context;
}
void BasicGLPane::resized(wxSizeEvent& evt)
{
// wxGLCanvas::OnSize(evt);
Refresh();
}
int BasicGLPane::getWidth()
{
return GetSize().x;
}
int BasicGLPane::getHeight()
{
return GetSize().y;
}
void BasicGLPane::render(wxPaintEvent& evt)
{
assert(GetParent());
assert(GetParent()->GetParent());
ORIScanMainFrame* mf = dynamic_cast<ORIScanMainFrame*>(GetParent()->GetParent());
assert(mf);
switch (mf->currentMainView) {
case ORIViewSelection::ViewCamera:
renderCamera(evt);
break;
case ORIViewSelection::ViewDepth:
renderDepth(evt);
break;
case ORIViewSelection::ViewPointCloud:
renderPointCloud(evt);
break;
case ORIViewSelection::View3DModel:
render3DModel(evt);
break;
default:
renderNone(evt);
}
}
void BasicGLPane::renderNone(wxPaintEvent& evt) {
if (!IsShown())
return;
SetCurrent(*(m_context));
glPushAttrib(GL_ALL_ATTRIB_BITS);
glClearColor(0.08f, 0.11f, 0.15f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glFlush();
SwapBuffers();
glPopAttrib();
}
GLuint makeOpenGlTextureFromDataLuninanceFloats(int width, int height, float* f) {
GLuint textureID;
glEnable(GL_TEXTURE_2D);
glGenTextures(1, &textureID);
// "Bind" the newly created texture : all future texture functions will modify this texture
glBindTexture(GL_TEXTURE_2D, textureID);
// Give the image to OpenGL
glTexImage2D(GL_TEXTURE_2D, 0, GL_FLOAT, width, height, 0, GL_FLOAT, GL_LUMINANCE, f);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
return textureID;
}
GLuint makeOpenGlTextureFromRGBInts(int width, int height, unsigned int* f) {
GLuint textureID;
glEnable(GL_TEXTURE_2D);
glGenTextures(1, &textureID);
// "Bind" the newly created texture : all future texture functions will modify this texture
glBindTexture(GL_TEXTURE_2D, textureID);
// Give the image to OpenGL
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_INT, f);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
return textureID;
}
/// <summary>
/// Range of each float is 0.0f to 1.0f
/// </summary>
/// <param name="width"></param>
/// <param name="height"></param>
/// <param name="floatRGB"></param>
/// <returns></returns>
GLuint makeOpenGlTextureFromRGBFloats(int width, int height, float* floatRGB) {
GLuint textureID;
// 4.6.0 NVIDIA 457.30 (R Keene machine, 11/25/2020)
// auto sss = glGetString(GL_VERSION);
glGenTextures(1, &textureID);
// "Bind" the newly created texture : all future texture functions will modify this texture
glBindTexture(GL_TEXTURE_2D, textureID);
// Give the image to OpenGL
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, width, height, 0, GL_RGB, GL_FLOAT, floatRGB);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
return textureID;
}
void BasicGLPane::DrawTextureToScreenFloat(int w, int h, float* floatDataPtr, GLuint (*textureFactory)(int width, int height, float* floatRGB)) {
if (w <= 0 || h <= 0 || floatDataPtr == NULL || w > 5000 || h > 5000) {
assert(false);
return;
}
SetCurrent(*(m_context));
glPushAttrib(GL_ALL_ATTRIB_BITS);
glPushMatrix();
glPushClientAttrib(GL_CLIENT_ALL_ATTRIB_BITS);
glClearColor(0.15f, 0.11f, 0.02f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glEnable(GL_TEXTURE_2D);
glLoadIdentity();
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
// 4.6.0 NVIDIA 457.30 (R Keene machine, 11/25/2020)
// auto sss = glGetString(GL_VERSION);
float onePixelW = (float)getWidth() / (float)w;
float onePixelH = (float)getHeight() / (float)h;
float orthoW = w;
float orthoH = h;
if (onePixelH > onePixelW) {
orthoH = h * onePixelH / onePixelW;
}
else {
orthoW = w * onePixelW / onePixelH;
}
// We want the image at the top of the window, not the bottom if the window is too tall.
int topOfScreen = (float)getHeight() / onePixelH;
// If the winjdow resizes after creation you need to change the viewport.
glViewport(0, 0, getWidth(), getHeight());
gluOrtho2D(0.0, orthoW, (double)topOfScreen - (double)orthoH, topOfScreen);
GLuint myTextureName = textureFactory(w, h, floatDataPtr);
glBegin(GL_QUADS);
{
// This order of UV coords and verticies will do the vertical flip of the image to get the 'regular' image 0,0
// in the top left corner.
glTexCoord2f(0.0f, 1.0f); glVertex3f(0.0f, 0.0f, 0.0f);
glTexCoord2f(1.0f, 1.0f); glVertex3f(0.0f + w, 0.0f, 0.0f);
glTexCoord2f(1.0f, 0.0f); glVertex3f(0.0f + w, 0.0f + h, 0.0f);
glTexCoord2f(0.0f, 0.0f); glVertex3f(0.0f, 0.0f + h, 0.0f);
}
glEnd();
glDeleteTextures(1, &myTextureName);
glFlush();
SwapBuffers();
glPopClientAttrib();
glPopMatrix();
glPopAttrib();
}
void BasicGLPane::DrawTextureToScreenMat(wxPaintEvent& evt, cv::Mat m, float brightness) {
m.type();
if (m.empty()) {
renderNone(evt);
return;
}
if (m.type() == CV_32FC1) { // Grey scale.
DrawTextureToScreenFloat(m.cols, m.rows, (float*)m.data, makeOpenGlTextureFromDataLuninanceFloats);
}
if (m.type() == CV_32FC3) { // Color.
DrawTextureToScreenFloat(m.cols, m.rows, (float*)m.data, makeOpenGlTextureFromRGBFloats);
}
else {
renderNone(evt);
}
}
void BasicGLPane::renderCamera(wxPaintEvent& evt) {
if (!IsShown())
return;
DrawTextureToScreenMat(evt, ORITopControl::Instance->im_white);
}
void BasicGLPane::renderDepth(wxPaintEvent& evt) {
if (!IsShown())
return;
DrawTextureToScreenMat(evt, ORITopControl::Instance->depth_map);
}
void BasicGLPane::render3DModel(wxPaintEvent& evt) {
if (!IsShown())
return;
SetCurrent(*(m_context));
glPushAttrib(GL_ALL_ATTRIB_BITS);
glPushMatrix();
glClearColor(0.08f, 0.11f, 0.15f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glFlush();
SwapBuffers();
glPopMatrix();
glPopAttrib();
}
void BasicGLPane::renderPointCloud(wxPaintEvent& evt) {
if (!IsShown())
return;
boost::unique_lock<boost::mutex> lk(ORITopControl::Instance->pointCloudCacheMutex);
SetCurrent(*(m_context));
glPushAttrib(GL_ALL_ATTRIB_BITS);
glPushMatrix();
glLoadIdentity();
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glViewport(0, 0, getWidth(), getHeight());
glClearColor(0.08f, 0.11f, 0.15f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
if (ORITopControl::Instance->pointCloudCache.size() > 0) {
glMatrixMode(GL_PROJECTION);
gluPerspective( /* field of view in degree */ 40.0,
/* aspect ratio */ 1.0,
/* Z near */ 1.0, /* Z far */ 500.0);
glMatrixMode(GL_MODELVIEW);
gluLookAt(100, 70, 200, // Eye
25, 25, 25, // Look at pt
0, 0, 1); // Up Vector
glPointSize(2.0);
glBegin(GL_POINTS);
// Use explicit for loop because pointCloudFragments can grow asynchronously.
for (int i = 0; i < ORITopControl::Instance->pointCloudCache.size(); i++) {
auto frag = ORITopControl::Instance->pointCloudCache[i];
auto current_point_cloud_ptr = frag->cloud;
glPushMatrix();
// glMultMatrixf(frag->xform.data());
for (size_t n = 0; n < current_point_cloud_ptr->size(); n++) {
glColor3ub(255, 255, 255);
glVertex3d(current_point_cloud_ptr->points[n].x, current_point_cloud_ptr->points[n].y, current_point_cloud_ptr->points[n].z);
}
glPopMatrix();
}
glEnd();
}
glFlush();
SwapBuffers();
glPopMatrix();
glPopAttrib();
}