Segfault when calling glGenFramebuffers()? [closed] - c++

Closed. This question needs debugging details. It is not currently accepting answers.
Edit the question to include desired behavior, a specific problem or error, and the shortest code necessary to reproduce the problem. This will help others answer the question.
Closed 5 years ago.
Improve this question
I wanted to do some 2D on top of 3D so I could do a decent GUI. So I created the textures and so on.
I can compile the code, it generates no errors. But when I run the program all goes right until I call this:
glGenFramebuffers(1, &fb);
Then this appears:
error 139 segmentation fault (core dumped).
Does someone know what's wrong with the code?
std::cout << "test1" << std::endl;
unsigned int fb;
glGenFramebuffers(1, &fb);
std::cout << "test2" << std::endl;
glBindRenderbuffer(GL_RENDERBUFFER, fb);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, renderTexture.getId(), 0);
The "test1" text is displayed, but the "test2" is not.
CODE:
game.cpp
#include "game.h"
game::game(){
SDL_Init(SDL_INIT_EVERYTHING);
SDL_Surface* screen = SDL_SetVideoMode(1000, 600, 32, SDL_SWSURFACE|SDL_OPENGL);
glClearColor(0.5, 0.5, 0.5, 1.0);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluPerspective(45, 1000.0/600.0, 1.0, 500.0);
glMatrixMode(GL_MODELVIEW);
glEnable(GL_DEPTH_TEST);
glEnable(GL_TEXTURE_2D);
}
game::~game(){
SDL_Quit();
}
void game::start(){
Uint32 start;
SDL_Event event;
texture renderTexture = texture();
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, 1000, 600, 0, GL_BGRA, GL_UNSIGNED_BYTE, 0);
std::cerr << "test1" << std::endl;
unsigned int fb;
glGenFramebuffers(1, &fb);
std::cerr << "test2" << std::endl;
glBindRenderbuffer(GL_RENDERBUFFER, fb);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, renderTexture.getId(), 0);
bool running = true;
while (running){
start = SDL_GetTicks();
while (SDL_PollEvent(&event)){
switch (event.type){
case SDL_QUIT:
running = false;
break;
}
}
update();
show(fb);
showMenu();
SDL_GL_SwapBuffers();
if (1000/30 > (SDL_GetTicks()-start)){
SDL_Delay(1000/30 - (SDL_GetTicks()-start));
}
}
}
void game::update(){
}
void game::show(unsigned int fb){
glBindFramebuffer(GL_FRAMEBUFFER, fb);
glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT);
glLoadIdentity();
}
void game::showMenu(){
bindWindowAsRenderTarget();
glViewport(0, 0, 1000, 600);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(0.0, 1000.0, 0.0, 600.0, -1.0, 1.0);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
}
void game::bindWindowAsRenderTarget(){
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0);
glViewport(0, 0, 1000, 600);
}
game.h
#ifndef GAME_H_INCLUDED
#define GAME_H_INCLUDED
#include <iostream>
#include <SDL/SDL.h>
#include <GL/glew.h>
#include <GL/gl.h>
#include <GL/glu.h>
#include "texture.h"
class game{
void update();
void show(unsigned int fb);
void showMenu();
void bindWindowAsRenderTarget();
public:
game();
~game();
void start();
};
#endif // GAME_H_INCLUDED
main.cpp
#include "game.h"
int main(int argc, char** argv){
game g;
g.start();
return 0;
}
texture.cpp
#ifndef TEXTURE_H_INCLUDED
#define TEXTURE_H_INCLUDED
#include <SDL/SDL.h>
#include <GL/glew.h>
#include <GL/gl.h>
#include <GL/glu.h>
class texture{
unsigned int id;
public:
texture();
~texture();
void loadImage(const char* filename);
unsigned int getId();
};
#endif // TEXTURE_H_INCLUDED
texture.cpp
#include "texture.h"
texture::texture(){
glGenTextures(1, &id);
glBindTexture(GL_TEXTURE_2D, id);
}
texture::~texture(){
glDeleteTextures(1, &id);
}
void texture::loadImage(const char* filename){
SDL_Surface* img = SDL_LoadBMP(filename);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, img->w, img->h, 0, GL_RGB, GL_UNSIGNED_SHORT_5_6_5, img->pixels);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
SDL_FreeSurface(img);
}
unsigned int texture::getId(){
return id;
}

Dollars to donuts you're #includeing a GL extension loader (GLEW, GLAD, etc.) and then:
Trying to init it before you have a current GL context (leaving function pointers like glGenFramebuffers() NULL), or
Forgetting to init it entirely, or
(Unlikely, unless you're doing something silly like trying to use OpenGL via remote desktop) correctly initing your extension loader but using a GL implementation that doesn't support core FBOs
EDIT Oh hey it's #2.
You need to call glewInit() after SDL_SetVideoMode(). You should also use the GLEW version checking functions to verify that the version of the underlying GL implementation supports core FBOs (they went in in OpenGL 2.1 IIRC).

Related

Load jpg image as texture - freeimage, opengl

I tried to load jpg image with FreeImage Library. I used this code, but the result was only white window. I think to use this image like background and after that to load object file.
It`s the code, that i used:
#include <windows.h>
#include <GL/glut.h>
#include <iostream>
#include <FreeImage.h>
FIBITMAP *loadImage(const char *filename)
{
FIBITMAP *dib1 = NULL;
FREE_IMAGE_FORMAT fif = FreeImage_GetFIFFromFilename(filename);
dib1 = FreeImage_Load(fif, filename, JPEG_DEFAULT);
if (!dib1)
{
std::cerr << "Erreur ouverture d\'image" << std::endl;
exit (0);
}
std::cerr << "Success" << std::endl;
return dib1;
}
GLuint loadTexture (FIBITMAP * dib1)
{
GLuint tex_id = 0;
int x, y;
int height, width;
RGBQUAD rgbquad;
FREE_IMAGE_TYPE type;
BITMAPINFOHEADER *header;
type = FreeImage_GetImageType(dib1);
height = FreeImage_GetHeight(dib1);
width = FreeImage_GetWidth(dib1);
header = FreeImage_GetInfoHeader(dib1);
int scanLineWidh = ((3*width)%4 == 0) ? 3*width : ((3*width)/4)*4+4;
unsigned char * texels= (GLubyte*)calloc(height*scanLineWidh, sizeof(GLubyte));
for (x=0 ; x<width ; x++)
for (y=0 ; y<height; y++)
{
FreeImage_GetPixelColor(dib1,x,y,&rgbquad);
texels[(y*scanLineWidh+3*x)]=((GLubyte*)&rgbquad)[2];
texels[(y*scanLineWidh+3*x)+1]=((GLubyte*)&rgbquad)[1];
texels[(y*scanLineWidh+3*x)+2]=((GLubyte*)&rgbquad)[0];
}
glGenTextures (1, &tex_id);
glBindTexture (GL_TEXTURE_2D, tex_id);
glTexParameteri (GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri (GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexImage2D (GL_TEXTURE_2D, 0, GL_RGB,
width, height, 0, GL_RGB,
GL_UNSIGNED_BYTE, texels);
free(texels);
return tex_id;
}
void display(void)
{
glClearColor (0.0,0.0,0.0,1.0);
glClear (GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glLoadIdentity();
glutSwapBuffers(); //swap the buffers
}
int main(int argc, char **argv) {
FIBITMAP *dib1 = loadImage("planina.jpg");
glutInit(&argc,argv);
glutInitDisplayMode(GLUT_DOUBLE|GLUT_RGB|GLUT_DEPTH);
glutInitWindowSize(800,450);
glutInitWindowPosition(20,20);
glutCreateWindow("Loader");
//glutReshapeFunc(reshape);
//glutDisplayFunc(display);
loadTexture(dib1);
FreeImage_Unload(dib1);
glutMainLoop();
return 0;
}
What I am doing wrong?
This is your display function:
void display(void)
{
glClearColor (0.0,0.0,0.0,1.0);
glClear (GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glLoadIdentity();
glutSwapBuffers(); //swap the buffers
}
And what's immediately apparent is, that the only thing it does is
setting a clear color
clear the window
load an identity matrix
displays the result
What's lacking is any kind of actually drawing something. You have to draw some triangles or quads with the texture applied for the texture to actually show up somehow.

Using CUDA, SFML, and OpenGL: Texture Refuses to Appear on Quad

Using various tutorials/examples/documentations/forums online, I have typed out code to allow CUDA to manipulate OpenGL textures such that it can be outputted to the screen. My method of displaying is to use PBO and an allocated texture image of uchar4 array. Despite all my attempts at fixing the problem, the texture would not show up on the 2D surface. I cannot seem to pinpoint the problem.
These are all the things I have checked/done thus far: I have created a PBO and registered it with CUDA, called cudaGraphicsResourceGetMappedPointer and the unmapping equivalent before and after the GPU function calls, made sure that glEnable is called for 2D_TEXTURE, glDisable called for any unnecessary values, unbinded textures/buffers when not in need. I have also reset SFML OpenGL states in case SFML was the cause. Square textures have also been employed. My OpenGL verision and CUDA version work for all function calls I use.
There did not seem to be any errors within the program when I checked cudaErrors and OpenGL Errors.
I am not sure if this has something to do with it but when I call:
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
My quad does not seem to display.
I have mainly found inspiration from this website.
Thank you very much!
Here is my code:
Main.cpp
#include <GL/glew.h>
#include <windows.h>
#include <GL/GL.h>
#include <SFML/Window.hpp>
#include <SFML/OpenGL.hpp>
#include <SFML/System.hpp>
#include <SFML/Graphics/RenderWindow.hpp>
#include "GeneralTypedef.h"
#include "OpenGLTest.cuh"
int main()
{
// create the window
sf::RenderWindow window(sf::VideoMode(1024, 1024), "OpenGL");
//window.setVerticalSyncEnabled(true);
sf::Vector2u windowSize;
windowSize = sf::Vector2u(window.getSize());
bool running = true;
glewInit();
window.resetGLStates();
std::printf("OpenGL: %s:", glGetString(GL_VERSION));
// We will not be using SFML's gl states.
OpenGLTest* test = new OpenGLTest(window.getSize());
sf::Time time;
while (running)
{
// handle events
sf::Event event;
while (window.pollEvent(event))
{
if (event.type == sf::Event::Closed)
{
// end the program
running = false;
}
else if (event.type == sf::Event::Resized)
{
// adjust the viewport when the window is resized
glViewport(0, 0, event.size.width, event.size.height);
windowSize = window.getSize();
}
}
// clear the buffers
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
test->createFrame(time.asMicroseconds());
test->drawFrame();
window.display();
}
// release resources...
delete test;
return 0;
}
OpenGLTest.cuh
#ifndef OPENGLTEST_CUH
#define OPENGLTEST_CUH
#include <GL/glew.h>
#include <windows.h>
#include <GL/GL.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_gl_interop.h>
#include <SFML/OpenGL.hpp>
#include <SFML/Graphics.hpp>
#include <SFML/System.hpp>
#include "GeneralTypedef.h"
class OpenGLTest
{
public:
uchar4* image;
GLuint gltexture;
GLuint pbo;
cudaGraphicsResource_t cudaPBO;
uchar4* d_textureBufferData;
sf::Vector2u windowSize;
OpenGLTest(sf::Vector2u windowSize)
{
this->windowSize = sf::Vector2u(windowSize);
this->setupOpenGL();
};
~OpenGLTest()
{
delete image;
image == nullptr;
cudaFree(d_textureBufferData);
d_textureBufferData == nullptr;
glDeleteTextures(1, &gltexture);
}
void drawFrame();
void createFrame(float time);
private:
void setupOpenGL();
};
#endif //OPENGLTEST_CUH
OpenGLTest.cu
#include "OpenGLTest.cuh"
__global__ void createGPUTexture(uchar4* d_texture)
{
uint pixelID = blockIdx.x*blockDim.x + threadIdx.x;
d_texture[pixelID].x = 0;
d_texture[pixelID].y = 1;
d_texture[pixelID].z = 1;
d_texture[pixelID].w = 0;
}
__global__ void wow(uchar4* pos, unsigned int width, unsigned int height,
float time)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int x = index%width;
unsigned int y = index / width;
if (index < width*height) {
unsigned char r = (x + (int)time) & 0xff;
unsigned char g = (y + (int)time) & 0xff;
unsigned char b = ((x + y) + (int)time) & 0xff;
// Each thread writes one pixel location in the texture (textel)
pos[index].w = 0;
pos[index].x = r;
pos[index].y = g;
pos[index].z = b;
}
}
void OpenGLTest::drawFrame()
{
glColor3f(1.0f,1.0f,1.0f);
glBindTexture(GL_TEXTURE_2D, gltexture);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, pbo);
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, windowSize.x, windowSize.y, GL_RGBA, GL_UNSIGNED_BYTE, 0);
glBegin(GL_QUADS);
glTexCoord2f(0.0f, 0.0f);
glVertex2f(0.0f, float(windowSize.y));
glTexCoord2f(1.0f, 0.0f);
glVertex2f(float(windowSize.x), float(windowSize.y));
glTexCoord2f(1.0f, 1.0f);
glVertex2f(float(windowSize.x), 0.0f);
glTexCoord2f(0.0f,1.0f);
glVertex2f(0.0f, 0.0f);
glEnd();
glFlush();
// Release
glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, 0);
glBindTexture(GL_TEXTURE_2D, 0);
// Test Triangle
/*
glBegin(GL_TRIANGLES);
glColor3f(0.1, 0.2, 0.3);
glVertex2f(0, 0);
glVertex2f(10, 0);
glVertex2f(0, 100);
glEnd();
*/
}
void OpenGLTest::createFrame(float time)
{
cudaGraphicsMapResources(1, &cudaPBO, 0);
size_t numBytes;
cudaGraphicsResourceGetMappedPointer((void**)&d_textureBufferData, &numBytes, cudaPBO);
int totalThreads = windowSize.x * windowSize.y;
int nBlocks = totalThreads/ 256;
// Run code here.
createGPUTexture << <nBlocks, 256>> >(d_textureBufferData);
//wow << <nBlocks, 256 >> >(d_textureBufferData, windowSize.x, windowSize.y, time);
// Unmap mapping to PBO so that OpenGL can access.
cudaGraphicsUnmapResources(1, &cudaPBO, 0);
}
void OpenGLTest::setupOpenGL()
{
image = new uchar4[1024*1024];
glViewport(0, 0, windowSize.x, windowSize.y);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(0.0, windowSize.x, windowSize.y, 0.0, -1.0, 1.0);
glEnable(GL_TEXTURE_2D);
glDisable(GL_LIGHTING);
glDisable(GL_DEPTH_TEST);
// Unbind any textures from previous.
glBindTexture(GL_TEXTURE_2D, 0);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, 0);
// Create new textures.
glGenTextures(1, &gltexture);
glBindTexture(GL_TEXTURE_2D, gltexture);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
// Create image with same resolution as window.
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, windowSize.x , windowSize.y, 0, GL_RGBA, GL_UNSIGNED_BYTE, image);
// Create pixel buffer boject.
glGenBuffers(1, &pbo);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, pbo);
glBufferData(GL_PIXEL_UNPACK_BUFFER_ARB, windowSize.x * windowSize.y * sizeof(uchar4), image, GL_STREAM_COPY);
cudaGraphicsGLRegisterBuffer(&cudaPBO, pbo, cudaGraphicsMapFlagsNone);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, 0);
glBindTexture(GL_TEXTURE_2D, 0);
}
GeneralType
#ifndef GENERALTYPEDEF_CUH
#define GENERALTYPEDEF_CUH
typedef unsigned int uint;
#endif // GENERALTYPEDEF_CUH
After rewriting the entire code and understanding it more, I have figured out the reason. The color components for the uchar4 in the kernel function is mapped from 0-255. The w component is transparency. As such, it should be mapped to 255 for the image to show. I hope this helps for those who may have the same problem. Some sites have this value set very low as well.

Crash with CUDA/OGL interop

I am trying to setup a little CUDA/GL interop example. I have looked around in the internet, so I found some tutorials with some helpful stuff.
All I want is, is to produce a texture in CUDA and draw it with OpenGL.
The source I have now is crashing my Macbook Pro every time I run it, so I thought that if somebody could take an eye on it, that would be really helpful.
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#ifdef _WIN32
# define WINDOWS_LEAN_AND_MEAN
# define NOMINMAX
# include <windows.h>
#endif
// OpenGL Graphics includes
#include <GL/glew.h>
#if defined (__APPLE__) || defined(MACOSX)
#include <GLUT/glut.h>
#else
#include <GL/freeglut.h>
#endif
// includes, cuda
#include <cuda_runtime.h>
#include <cuda_gl_interop.h>
// Utilities and timing functions
#include <helper_functions.h> // includes cuda.h and cuda_runtime_api.h
#include <timer.h> // timing functions
// CUDA helper functions
#include <helper_cuda.h> // helper functions for CUDA error check
#include <helper_cuda_gl.h> // helper functions for CUDA/GL interop
#include <vector_types.h>
const unsigned int window_width = 512;
const unsigned int window_height = 512;
GLuint viewGLTexture;
cudaGraphicsResource_t viewCudaResource;
void initGLandCUDA() {
int argc = 0;
char** argv = NULL;
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE);
glutInitWindowSize(window_width, window_height);
glutCreateWindow("CUDA GL Interop");
glewInit();
glEnable(GL_TEXTURE_2D);
glGenTextures(1, &viewGLTexture);
glBindTexture(GL_TEXTURE_2D, viewGLTexture);
{
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, 512, 512, 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL);
}
glBindTexture(GL_TEXTURE_2D, 0);
cudaGLSetGLDevice(gpuGetMaxGflopsDeviceId());
cudaGraphicsGLRegisterImage(&viewCudaResource, viewGLTexture, GL_TEXTURE_2D, cudaGraphicsRegisterFlagsWriteDiscard);
}
__global__ void renderingKernel(cudaSurfaceObject_t image) {
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
uchar4 color = make_uchar4(0.f, 0.f, 0.f, .3f);
//if I write in 0, 0 and not x,y, the computer is not crashing, but there is no black pixel at 0,0
surf2Dwrite(color, image, x, y, cudaBoundaryModeClamp);
}
void callCUDAKernel(cudaSurfaceObject_t image) {
dim3 block(8, 1, 1);
dim3 grid(8, 1, 1);
renderingKernel<<< grid, block>>>(image);
}
void renderFrame() {
cudaGraphicsMapResources(1, &viewCudaResource);
{
cudaArray_t viewCudaArray;
checkCudaErrors(cudaGraphicsSubResourceGetMappedArray(&viewCudaArray, viewCudaResource, 0, 0));
cudaResourceDesc viewCudaArrayResourceDesc;
{
viewCudaArrayResourceDesc.resType = cudaResourceTypeArray;
viewCudaArrayResourceDesc.res.array.array = viewCudaArray;
}
cudaSurfaceObject_t viewCudaSurfaceObject;
checkCudaErrors(cudaCreateSurfaceObject(&viewCudaSurfaceObject, &viewCudaArrayResourceDesc));
callCUDAKernel(viewCudaSurfaceObject);
checkCudaErrors(cudaDestroySurfaceObject(viewCudaSurfaceObject));
}
checkCudaErrors(cudaGraphicsUnmapResources(1, &viewCudaResource));
checkCudaErrors(cudaStreamSynchronize(0));
glBindTexture(GL_TEXTURE_2D, viewGLTexture);
{
glBegin(GL_QUADS);
{
glTexCoord2f(0.0f, 0.0f); glVertex2f(-1.0f, -1.0f);
glTexCoord2f(1.0f, 0.0f); glVertex2f(+1.0f, -1.0f);
glTexCoord2f(1.0f, 1.0f); glVertex2f(+1.0f, +1.0f);
glTexCoord2f(0.0f, 1.0f); glVertex2f(-1.0f, +1.0f);
}
glEnd();
}
glBindTexture(GL_TEXTURE_2D, 0);
glFinish();
}
int main(int argc, char **argv)
{
initGLandCUDA();
glutDisplayFunc(renderFrame);
//glutKeyboardFunc(keyboard);
//glutMouseFunc(mouse);
glutMainLoop();
}
It seems like some kind of out-of-range error, but I am currently out of ideas (btw, this is cc 3.0, running to nVidia 650M).
Edit :
By crashing I mean : crashing. Computer freezes. I can't move my mouse and I have to reboot.
Yes, I have looked in all examples, they are not exactly what I want. Changing them to be want I want results in this problem. If there was any other help in the manual or anywhere else that would help me and I have found I would not bother asking for help. You need to link with cuda_runtime and glut libs
Below is a working version of your code. The issues in your code were:
Your kernel was depending on being launched with 512x512 threads, but you were only launching with 64x1 threads.
Your kernel was writing to unaligned addresses with surf2Dwrite().
You were setting up double buffering in OpenGL but you were not swapping the buffers. (glutSwapBuffers()).
You were initializing an uchar4 with floats.
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#ifdef _WIN32
# define WINDOWS_LEAN_AND_MEAN
# define NOMINMAX
# include <windows.h>
#endif
// OpenGL Graphics includes
#include <GL/glew.h>
#if defined (__APPLE__) || defined(MACOSX)
#include <GLUT/glut.h>
#else
#include <GL/freeglut.h>
#endif
#include <cuda_runtime.h>
#include <cuda_gl_interop.h>
#include <vector_types.h>
const unsigned int window_width = 512;
const unsigned int window_height = 512;
GLuint viewGLTexture;
cudaGraphicsResource_t viewCudaResource;
#define check(ans) { _check((ans), __FILE__, __LINE__); }
inline void _check(cudaError_t code, char *file, int line)
{
if (code != cudaSuccess) {
fprintf(stderr,"CUDA Error: %s %s %d\n", cudaGetErrorString(code), file, line);
exit(code);
}
}
void initGLandCUDA() {
int argc = 0;
char** argv = NULL;
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_RGBA);
glutInitWindowSize(window_width, window_height);
glutCreateWindow("CUDA GL Interop");
glewInit();
glEnable(GL_TEXTURE_2D);
glGenTextures(1, &viewGLTexture);
glBindTexture(GL_TEXTURE_2D, viewGLTexture);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, window_width, window_height, 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL);
glBindTexture(GL_TEXTURE_2D, 0);
check(cudaGLSetGLDevice(0));
check(cudaGraphicsGLRegisterImage(&viewCudaResource, viewGLTexture, GL_TEXTURE_2D, cudaGraphicsRegisterFlagsWriteDiscard));
}
__global__ void renderingKernel(cudaSurfaceObject_t image) {
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
uchar4 color = make_uchar4(x / 2, y / 2, 0, 127);
surf2Dwrite(color, image, x * sizeof(color), y, cudaBoundaryModeClamp);
}
void callCUDAKernel(cudaSurfaceObject_t image) {
dim3 block(256, 1, 1);
dim3 grid(2, 512, 1);
renderingKernel<<<grid, block>>>(image);
check(cudaPeekAtLastError());
check(cudaDeviceSynchronize());
}
void renderFrame() {
check(cudaGraphicsMapResources(1, &viewCudaResource));
cudaArray_t viewCudaArray;
check(cudaGraphicsSubResourceGetMappedArray(&viewCudaArray, viewCudaResource, 0, 0));
cudaResourceDesc viewCudaArrayResourceDesc;
memset(&viewCudaArrayResourceDesc, 0, sizeof(viewCudaArrayResourceDesc));
viewCudaArrayResourceDesc.resType = cudaResourceTypeArray;
viewCudaArrayResourceDesc.res.array.array = viewCudaArray;
cudaSurfaceObject_t viewCudaSurfaceObject;
check(cudaCreateSurfaceObject(&viewCudaSurfaceObject, &viewCudaArrayResourceDesc));
callCUDAKernel(viewCudaSurfaceObject);
check(cudaDestroySurfaceObject(viewCudaSurfaceObject));
check(cudaGraphicsUnmapResources(1, &viewCudaResource));
check(cudaStreamSynchronize(0));
glBindTexture(GL_TEXTURE_2D, viewGLTexture);
{
glBegin(GL_QUADS);
{
glTexCoord2f(0.0f, 0.0f); glVertex2f(-1.0f, -1.0f);
glTexCoord2f(1.0f, 0.0f); glVertex2f(+1.0f, -1.0f);
glTexCoord2f(1.0f, 1.0f); glVertex2f(+1.0f, +1.0f);
glTexCoord2f(0.0f, 1.0f); glVertex2f(-1.0f, +1.0f);
}
glEnd();
}
glBindTexture(GL_TEXTURE_2D, 0);
glFinish();
}
int main(int argc, char **argv)
{
initGLandCUDA();
glutDisplayFunc(renderFrame);
//glutKeyboardFunc(keyboard);
//glutMouseFunc(mouse);
glutMainLoop();
}
Output:

GL_UNSIGNED_SHORT_5_6_5 undeclared?

I have a problem loading a texture using SDL library.
Usually I make programs on Linux but I try to create a code that is compatible with Visual Studio also.
On Linux are everything OK but on Visual Studio it crashes in "GL_UNSIGNED_SHORT_5_6_5" in the glTexImage2D(...) function.
Below is a general idea about what i want to do which I inspired by this tutorial:
#include "stdafx.h"
#include <stdlib.h>
#include <stdio.h>
#include <GL/glut.h>
//#include <GL/glext.h>
#include "SDL.h"
int brick;
float c=0.5;
float rx_min=0, ry_min=0;
float rx_max=1, ry_max=1;
unsigned int LoadTexture(const char* filename);
void DrawTexture(int object);
void setupmywindow();
void myDrawing();
void setupmywindow()
{
glClearColor(1.0,1.0,1.0,0);
glColor3f(0.0, 0.0, 0.0);
glPolygonMode(GL_FRONT_AND_BACK,GL_FILL);
gluOrtho2D(rx_min,ry_min, rx_max, ry_max);
brick = LoadTexture("brick.bmp");
}
void DrawTexture(int object)
{
glBindTexture(GL_TEXTURE_2D, object);
glColor3f(c,c,c);
glBegin(GL_QUADS);
glTexCoord2f(0., 1. );
glVertex2f( rx_min , ry_min );
glTexCoord2f(0., 0. );
glVertex2f( rx_min, ry_max );
glTexCoord2f(1., 0. );
glVertex2f( rx_max , ry_max );
glTexCoord2f(1., 1. );
glVertex2f( rx_max , ry_min );
glEnd();
}
unsigned int LoadTexture(const char* filename)
{
SDL_Surface* img=SDL_LoadBMP(filename);
unsigned int id;
glGenTextures(1, &id);
glBindTexture(GL_TEXTURE_2D,id);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, img->w, img->h, 0, GL_RGB, GL_UNSIGNED_SHORT_5_6_5, img->pixels);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
SDL_FreeSurface(img);
return id;
}
void myDrawing()
{
glClear(GL_COLOR_BUFFER_BIT);
DrawTexture(brick);
glFlush();
}
int main(int argc, char **argv)
{
printf("AUTH Computational Physics - Computer Graphics\n");
printf("Project >>TestTexture.cpp\n");
printf("--------------------------------------------------------\n");
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_SINGLE|GLUT_RGB);
glutInitWindowPosition(50,50);
glutCreateWindow("Texture Test");
setupmywindow();
glutDisplayFunc(myDrawing);
glutMainLoop();
return 0;
}
The error is:
error C2065: 'GL_UNSIGNED_SHORT_5_6_5' : undeclared identifier
Here is the image that I try to load and it is configured as a bitmap (8bit 5 6 5) with GIMP 2.8
NOTE: When I uncoment #include < GL/glext.h > which is not needed on Linux, I get the above message:
Unhandled exception at 0x00d1193f in testTesxture.exe: 0xC0000005: Access violation reading location 0x00000014.
Generally if I save a bitmap image (for example with paint) how can I uderstand the type I have to put (GL_UNSIGNED_SHORT_5_6_5, GL_UNSIGNED_BYTE etc)?
The problem is likely that Windows uses an older version of OpenGL than Linux, and this old OpenGL version does not have that specific identifier (and others, I'm sure). To get around this and any other possible version problems, I would use GLEW which does the hard work for you.
In windows, add this line after the includes:
#ifndef GL_UNSIGNED_SHORT_5_6_5
#define GL_UNSIGNED_SHORT_5_6_5 0x8363
#endif
#ifndef GL_CLAMP_TO_EDGE
#define GL_CLAMP_TO_EDGE 0x812F
#endif
According to this video.

OpenGL repeated calls to glTexImage2D and alpha blending

This is more out of curiosity than for any practical purpose: is there anything in the OpenGL specification that suggests that calling glTexImage2D many times (e.g., once per frame) is illegal? I mean illegal as in 'it could produce wrong results', not just inefficient (suppose I don't care about the performance impact of not using glTexSubImage2D instead).
The reason I'm asking is that I noticed some very odd artifacts when drawing overlapping, texture-mapped primitives that use a partly-transparent texture which is loaded once per every frame using glTexImage2D (see the attached picture): after a few seconds (i.e., a few hundred frames), small rectangular black patches appear on the screen (they're actually flipping between black and normal between consecutive frames).
I'm attaching below the simplest example code I could write that exhibits the problem.
#include <stdio.h>
#ifndef __APPLE__
# include <SDL/SDL.h>
# include <SDL/SDL_opengl.h>
#else
# include <SDL.h>
# include <SDL_opengl.h>
#endif
/* some constants and variables that several functions use */
const int width = 640;
const int height = 480;
#define texSize 64
GLuint vbo;
GLuint tex;
/* forward declaration, creates a random texture; uses glTexSubImage2D if
update is non-zero (otherwise glTexImage2D) */
void createTexture(GLuint label, int update);
int init()
{
/* SDL initialization */
if (SDL_Init(SDL_INIT_VIDEO) < 0)
return 0;
SDL_GL_SetAttribute(SDL_GL_DOUBLEBUFFER, 1);
if (!SDL_SetVideoMode(width, height, 0, SDL_OPENGL)) {
fprintf(stderr, "Couldn't initialize OpenGL");
return 0;
}
/* OpenGL initialization */
glClearColor(0, 0, 0, 0);
glEnable(GL_TEXTURE_2D);
glEnable(GL_BLEND);
glBlendFunc(GL_ONE, GL_ONE_MINUS_SRC_ALPHA);
glViewport(0, 0, width, height);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(0, width, height, 0, -1, 1);
glMatrixMode(GL_MODELVIEW);
/* creating the VBO and the textures */
glGenBuffers(1, &vbo);
glBindBuffer(GL_ARRAY_BUFFER, vbo);
glBufferData(GL_ARRAY_BUFFER, 1024, 0, GL_DYNAMIC_DRAW);
glGenTextures(1, &tex);
createTexture(tex, 0);
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
return 1;
}
/* draw a triangle at the specified point */
void drawTriangle(GLfloat x, GLfloat y)
{
GLfloat coords1[12] = {0, 0, 0, 0, /**/200, 0, 1, 0, /**/200, 150, 1, 1};
glLoadIdentity();
glTranslatef(x, y, 0);
glBufferSubData(GL_ARRAY_BUFFER, 0, sizeof(coords1), coords1);
glVertexPointer(2, GL_FLOAT, 4*sizeof(GLfloat), (void*)0);
glTexCoordPointer(2, GL_FLOAT, 4*sizeof(GLfloat),
(char*)0 + 2*sizeof(GLfloat));
glDrawArrays(GL_TRIANGLES, 0, 3);
}
void render()
{
glClear(GL_COLOR_BUFFER_BIT);
drawTriangle(250, 50);
createTexture(tex, 0);
drawTriangle(260, 120);
SDL_GL_SwapBuffers();
}
void cleanup()
{
glDeleteTextures(1, &tex);
glDeleteBuffers(1, &vbo);
SDL_Quit();
}
int main(int argc, char* argv[])
{
SDL_Event event;
if (!init()) return 1;
while (1) {
while (SDL_PollEvent(&event))
if (event.type == SDL_QUIT)
return 0;
render();
}
cleanup();
return 0;
}
void createTexture(GLuint label, int update)
{
GLubyte data[texSize*texSize*4];
GLubyte* p;
int i, j;
glBindTexture(GL_TEXTURE_2D, label);
for (i = 0; i < texSize; ++i) {
for (j = 0; j < texSize; ++j) {
p = data + (i + j*texSize)*4;
p[0] = ((i % 8) > 4?255:0);
p[1] = ((j % 8) > 4?255:0);
p[2] = ((i % 8) > 4?255:0);
p[3] = 255 - i*3;
}
}
if (!update)
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, texSize, texSize, 0, GL_RGBA,
GL_UNSIGNED_BYTE, data);
else
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, texSize, texSize, GL_RGBA,
GL_UNSIGNED_BYTE, data);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
}
Notes:
I'm using SDL, but I've seen the same happening in wxWidgets, so it's not an SDL-related problem.
If I use glTexSubImage2D instead for every frame (use update = 1 in createTexture), the artifacts disappear.
If I disable blending, there are no more artifacts.
I've been testing this on a late 2010 MacBook Air, though I doubt that's particularly relevant.
This clearly an OpenGL implementation bug (just calling glTexImage2D in a loop should not cause this to happen).