Crash with CUDA/OGL interop - c++

I am trying to setup a little CUDA/GL interop example. I have looked around in the internet, so I found some tutorials with some helpful stuff.
All I want is, is to produce a texture in CUDA and draw it with OpenGL.
The source I have now is crashing my Macbook Pro every time I run it, so I thought that if somebody could take an eye on it, that would be really helpful.
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#ifdef _WIN32
# define WINDOWS_LEAN_AND_MEAN
# define NOMINMAX
# include <windows.h>
#endif
// OpenGL Graphics includes
#include <GL/glew.h>
#if defined (__APPLE__) || defined(MACOSX)
#include <GLUT/glut.h>
#else
#include <GL/freeglut.h>
#endif
// includes, cuda
#include <cuda_runtime.h>
#include <cuda_gl_interop.h>
// Utilities and timing functions
#include <helper_functions.h> // includes cuda.h and cuda_runtime_api.h
#include <timer.h> // timing functions
// CUDA helper functions
#include <helper_cuda.h> // helper functions for CUDA error check
#include <helper_cuda_gl.h> // helper functions for CUDA/GL interop
#include <vector_types.h>
const unsigned int window_width = 512;
const unsigned int window_height = 512;
GLuint viewGLTexture;
cudaGraphicsResource_t viewCudaResource;
void initGLandCUDA() {
int argc = 0;
char** argv = NULL;
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE);
glutInitWindowSize(window_width, window_height);
glutCreateWindow("CUDA GL Interop");
glewInit();
glEnable(GL_TEXTURE_2D);
glGenTextures(1, &viewGLTexture);
glBindTexture(GL_TEXTURE_2D, viewGLTexture);
{
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, 512, 512, 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL);
}
glBindTexture(GL_TEXTURE_2D, 0);
cudaGLSetGLDevice(gpuGetMaxGflopsDeviceId());
cudaGraphicsGLRegisterImage(&viewCudaResource, viewGLTexture, GL_TEXTURE_2D, cudaGraphicsRegisterFlagsWriteDiscard);
}
__global__ void renderingKernel(cudaSurfaceObject_t image) {
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
uchar4 color = make_uchar4(0.f, 0.f, 0.f, .3f);
//if I write in 0, 0 and not x,y, the computer is not crashing, but there is no black pixel at 0,0
surf2Dwrite(color, image, x, y, cudaBoundaryModeClamp);
}
void callCUDAKernel(cudaSurfaceObject_t image) {
dim3 block(8, 1, 1);
dim3 grid(8, 1, 1);
renderingKernel<<< grid, block>>>(image);
}
void renderFrame() {
cudaGraphicsMapResources(1, &viewCudaResource);
{
cudaArray_t viewCudaArray;
checkCudaErrors(cudaGraphicsSubResourceGetMappedArray(&viewCudaArray, viewCudaResource, 0, 0));
cudaResourceDesc viewCudaArrayResourceDesc;
{
viewCudaArrayResourceDesc.resType = cudaResourceTypeArray;
viewCudaArrayResourceDesc.res.array.array = viewCudaArray;
}
cudaSurfaceObject_t viewCudaSurfaceObject;
checkCudaErrors(cudaCreateSurfaceObject(&viewCudaSurfaceObject, &viewCudaArrayResourceDesc));
callCUDAKernel(viewCudaSurfaceObject);
checkCudaErrors(cudaDestroySurfaceObject(viewCudaSurfaceObject));
}
checkCudaErrors(cudaGraphicsUnmapResources(1, &viewCudaResource));
checkCudaErrors(cudaStreamSynchronize(0));
glBindTexture(GL_TEXTURE_2D, viewGLTexture);
{
glBegin(GL_QUADS);
{
glTexCoord2f(0.0f, 0.0f); glVertex2f(-1.0f, -1.0f);
glTexCoord2f(1.0f, 0.0f); glVertex2f(+1.0f, -1.0f);
glTexCoord2f(1.0f, 1.0f); glVertex2f(+1.0f, +1.0f);
glTexCoord2f(0.0f, 1.0f); glVertex2f(-1.0f, +1.0f);
}
glEnd();
}
glBindTexture(GL_TEXTURE_2D, 0);
glFinish();
}
int main(int argc, char **argv)
{
initGLandCUDA();
glutDisplayFunc(renderFrame);
//glutKeyboardFunc(keyboard);
//glutMouseFunc(mouse);
glutMainLoop();
}
It seems like some kind of out-of-range error, but I am currently out of ideas (btw, this is cc 3.0, running to nVidia 650M).
Edit :
By crashing I mean : crashing. Computer freezes. I can't move my mouse and I have to reboot.
Yes, I have looked in all examples, they are not exactly what I want. Changing them to be want I want results in this problem. If there was any other help in the manual or anywhere else that would help me and I have found I would not bother asking for help. You need to link with cuda_runtime and glut libs

Below is a working version of your code. The issues in your code were:
Your kernel was depending on being launched with 512x512 threads, but you were only launching with 64x1 threads.
Your kernel was writing to unaligned addresses with surf2Dwrite().
You were setting up double buffering in OpenGL but you were not swapping the buffers. (glutSwapBuffers()).
You were initializing an uchar4 with floats.
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#ifdef _WIN32
# define WINDOWS_LEAN_AND_MEAN
# define NOMINMAX
# include <windows.h>
#endif
// OpenGL Graphics includes
#include <GL/glew.h>
#if defined (__APPLE__) || defined(MACOSX)
#include <GLUT/glut.h>
#else
#include <GL/freeglut.h>
#endif
#include <cuda_runtime.h>
#include <cuda_gl_interop.h>
#include <vector_types.h>
const unsigned int window_width = 512;
const unsigned int window_height = 512;
GLuint viewGLTexture;
cudaGraphicsResource_t viewCudaResource;
#define check(ans) { _check((ans), __FILE__, __LINE__); }
inline void _check(cudaError_t code, char *file, int line)
{
if (code != cudaSuccess) {
fprintf(stderr,"CUDA Error: %s %s %d\n", cudaGetErrorString(code), file, line);
exit(code);
}
}
void initGLandCUDA() {
int argc = 0;
char** argv = NULL;
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_RGBA);
glutInitWindowSize(window_width, window_height);
glutCreateWindow("CUDA GL Interop");
glewInit();
glEnable(GL_TEXTURE_2D);
glGenTextures(1, &viewGLTexture);
glBindTexture(GL_TEXTURE_2D, viewGLTexture);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, window_width, window_height, 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL);
glBindTexture(GL_TEXTURE_2D, 0);
check(cudaGLSetGLDevice(0));
check(cudaGraphicsGLRegisterImage(&viewCudaResource, viewGLTexture, GL_TEXTURE_2D, cudaGraphicsRegisterFlagsWriteDiscard));
}
__global__ void renderingKernel(cudaSurfaceObject_t image) {
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
uchar4 color = make_uchar4(x / 2, y / 2, 0, 127);
surf2Dwrite(color, image, x * sizeof(color), y, cudaBoundaryModeClamp);
}
void callCUDAKernel(cudaSurfaceObject_t image) {
dim3 block(256, 1, 1);
dim3 grid(2, 512, 1);
renderingKernel<<<grid, block>>>(image);
check(cudaPeekAtLastError());
check(cudaDeviceSynchronize());
}
void renderFrame() {
check(cudaGraphicsMapResources(1, &viewCudaResource));
cudaArray_t viewCudaArray;
check(cudaGraphicsSubResourceGetMappedArray(&viewCudaArray, viewCudaResource, 0, 0));
cudaResourceDesc viewCudaArrayResourceDesc;
memset(&viewCudaArrayResourceDesc, 0, sizeof(viewCudaArrayResourceDesc));
viewCudaArrayResourceDesc.resType = cudaResourceTypeArray;
viewCudaArrayResourceDesc.res.array.array = viewCudaArray;
cudaSurfaceObject_t viewCudaSurfaceObject;
check(cudaCreateSurfaceObject(&viewCudaSurfaceObject, &viewCudaArrayResourceDesc));
callCUDAKernel(viewCudaSurfaceObject);
check(cudaDestroySurfaceObject(viewCudaSurfaceObject));
check(cudaGraphicsUnmapResources(1, &viewCudaResource));
check(cudaStreamSynchronize(0));
glBindTexture(GL_TEXTURE_2D, viewGLTexture);
{
glBegin(GL_QUADS);
{
glTexCoord2f(0.0f, 0.0f); glVertex2f(-1.0f, -1.0f);
glTexCoord2f(1.0f, 0.0f); glVertex2f(+1.0f, -1.0f);
glTexCoord2f(1.0f, 1.0f); glVertex2f(+1.0f, +1.0f);
glTexCoord2f(0.0f, 1.0f); glVertex2f(-1.0f, +1.0f);
}
glEnd();
}
glBindTexture(GL_TEXTURE_2D, 0);
glFinish();
}
int main(int argc, char **argv)
{
initGLandCUDA();
glutDisplayFunc(renderFrame);
//glutKeyboardFunc(keyboard);
//glutMouseFunc(mouse);
glutMainLoop();
}
Output:

Related

Segfault when calling glGenFramebuffers()? [closed]

Closed. This question needs debugging details. It is not currently accepting answers.
Edit the question to include desired behavior, a specific problem or error, and the shortest code necessary to reproduce the problem. This will help others answer the question.
Closed 5 years ago.
Improve this question
I wanted to do some 2D on top of 3D so I could do a decent GUI. So I created the textures and so on.
I can compile the code, it generates no errors. But when I run the program all goes right until I call this:
glGenFramebuffers(1, &fb);
Then this appears:
error 139 segmentation fault (core dumped).
Does someone know what's wrong with the code?
std::cout << "test1" << std::endl;
unsigned int fb;
glGenFramebuffers(1, &fb);
std::cout << "test2" << std::endl;
glBindRenderbuffer(GL_RENDERBUFFER, fb);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, renderTexture.getId(), 0);
The "test1" text is displayed, but the "test2" is not.
CODE:
game.cpp
#include "game.h"
game::game(){
SDL_Init(SDL_INIT_EVERYTHING);
SDL_Surface* screen = SDL_SetVideoMode(1000, 600, 32, SDL_SWSURFACE|SDL_OPENGL);
glClearColor(0.5, 0.5, 0.5, 1.0);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluPerspective(45, 1000.0/600.0, 1.0, 500.0);
glMatrixMode(GL_MODELVIEW);
glEnable(GL_DEPTH_TEST);
glEnable(GL_TEXTURE_2D);
}
game::~game(){
SDL_Quit();
}
void game::start(){
Uint32 start;
SDL_Event event;
texture renderTexture = texture();
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, 1000, 600, 0, GL_BGRA, GL_UNSIGNED_BYTE, 0);
std::cerr << "test1" << std::endl;
unsigned int fb;
glGenFramebuffers(1, &fb);
std::cerr << "test2" << std::endl;
glBindRenderbuffer(GL_RENDERBUFFER, fb);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, renderTexture.getId(), 0);
bool running = true;
while (running){
start = SDL_GetTicks();
while (SDL_PollEvent(&event)){
switch (event.type){
case SDL_QUIT:
running = false;
break;
}
}
update();
show(fb);
showMenu();
SDL_GL_SwapBuffers();
if (1000/30 > (SDL_GetTicks()-start)){
SDL_Delay(1000/30 - (SDL_GetTicks()-start));
}
}
}
void game::update(){
}
void game::show(unsigned int fb){
glBindFramebuffer(GL_FRAMEBUFFER, fb);
glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT);
glLoadIdentity();
}
void game::showMenu(){
bindWindowAsRenderTarget();
glViewport(0, 0, 1000, 600);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(0.0, 1000.0, 0.0, 600.0, -1.0, 1.0);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
}
void game::bindWindowAsRenderTarget(){
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0);
glViewport(0, 0, 1000, 600);
}
game.h
#ifndef GAME_H_INCLUDED
#define GAME_H_INCLUDED
#include <iostream>
#include <SDL/SDL.h>
#include <GL/glew.h>
#include <GL/gl.h>
#include <GL/glu.h>
#include "texture.h"
class game{
void update();
void show(unsigned int fb);
void showMenu();
void bindWindowAsRenderTarget();
public:
game();
~game();
void start();
};
#endif // GAME_H_INCLUDED
main.cpp
#include "game.h"
int main(int argc, char** argv){
game g;
g.start();
return 0;
}
texture.cpp
#ifndef TEXTURE_H_INCLUDED
#define TEXTURE_H_INCLUDED
#include <SDL/SDL.h>
#include <GL/glew.h>
#include <GL/gl.h>
#include <GL/glu.h>
class texture{
unsigned int id;
public:
texture();
~texture();
void loadImage(const char* filename);
unsigned int getId();
};
#endif // TEXTURE_H_INCLUDED
texture.cpp
#include "texture.h"
texture::texture(){
glGenTextures(1, &id);
glBindTexture(GL_TEXTURE_2D, id);
}
texture::~texture(){
glDeleteTextures(1, &id);
}
void texture::loadImage(const char* filename){
SDL_Surface* img = SDL_LoadBMP(filename);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, img->w, img->h, 0, GL_RGB, GL_UNSIGNED_SHORT_5_6_5, img->pixels);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
SDL_FreeSurface(img);
}
unsigned int texture::getId(){
return id;
}
Dollars to donuts you're #includeing a GL extension loader (GLEW, GLAD, etc.) and then:
Trying to init it before you have a current GL context (leaving function pointers like glGenFramebuffers() NULL), or
Forgetting to init it entirely, or
(Unlikely, unless you're doing something silly like trying to use OpenGL via remote desktop) correctly initing your extension loader but using a GL implementation that doesn't support core FBOs
EDIT Oh hey it's #2.
You need to call glewInit() after SDL_SetVideoMode(). You should also use the GLEW version checking functions to verify that the version of the underlying GL implementation supports core FBOs (they went in in OpenGL 2.1 IIRC).

Initialize array of objects

If I were to write a working code of what I want to accomplish, it would be this:
Sprite s1(img_path1);
Sprite s2(img_path2);
Sprite s[] = { s1, s2 }
I want to create the objects inside the {} of array declaration instead of first putting the objects into variables and then in the array.
I checked out this post before asking this question and I will clarify a few things first:
I am compiling with -std=c++11
I have not defined a copy constructor of my own
I'm trying to create an array like this:
Sprite s[] =
{
Sprite(img_path1),
Sprite(img_path2)
};
After executing that statement, the contents of s are the same as if I created the array of objects using the default constructor:
Sprite s[2];
The problem seems not to be with the default copy constructor because the following executes perfectly fine:
sp = Sprite(img_path);
Sprite sp1(sp);
std::cout << sp1.getAspectRatio();
Relevant code:
/*
* Sprite.h
*
*/
#ifndef SPRITE_H
#define SPRITE_H
#include <GL/freeglut.h>
#include <FreeImage.h>
#include <iostream>
#include <stdio.h>
class Sprite
{
GLuint texture;
float aspectRatio;
public:
Sprite();
Sprite(std::string path);
Sprite(GLuint texture, float aspectRatio);
float getAspectRatio();
void draw(float x, float y, float alpha, float size);
virtual ~Sprite();
protected:
private:
};
#endif // SPRITE_H
/*
* Sprite.cpp
*
*/
#include "Sprite.h"
Sprite::Sprite()
{
//ctor
}
Sprite::Sprite(std::string file) {
GLuint texture = 0;
int sWidth, sHeight;
if (texture == 0)
{
FIBITMAP* bitmap = FreeImage_Load(
FreeImage_GetFileType(file.c_str(), 0),
file.c_str());
if (bitmap == NULL) {
printf("Could no load image file %s\n", file.c_str());
}
glGenTextures(1, &texture);
printf("%d\n", texture);
glBindTexture(GL_TEXTURE_2D, texture);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
FIBITMAP *pImage = FreeImage_ConvertTo32Bits(bitmap);
int nWidth = sWidth = FreeImage_GetWidth(pImage);
int nHeight = sHeight = FreeImage_GetHeight(pImage);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, nWidth, nHeight,
0, GL_BGRA_EXT, GL_UNSIGNED_BYTE, (void*)FreeImage_GetBits(pImage));
FreeImage_Unload(pImage);
FreeImage_Unload(bitmap);
}
this->texture = texture;
this->aspectRatio = (float)sWidth / sHeight;
if(texture == 0) {
std::cout << file << std::endl;
}
}
Sprite::Sprite(GLuint t, float as) : texture(t), aspectRatio(as) {}
void Sprite::draw(float x, float y, float alpha, float size) {
float h = size;
float w = size * aspectRatio;
glPushMatrix();
glEnable(GL_TEXTURE_2D);
glBindTexture(GL_TEXTURE_2D, texture);
glTexEnvf(GL_TEXTURE_2D, GL_TEXTURE_ENV_MODE, GL_MODULATE);
glDepthMask(GL_FALSE);
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
glColor4f(1.0, 1.0, 1.0, alpha);
glBegin(GL_QUADS);
glTexCoord2f(0.0f, 0.0f); glVertex2f(x, y);
glTexCoord2f(0.0f, 1.0f); glVertex2f(x, y + h);
glTexCoord2f(1.0f, 1.0f); glVertex2f(x + w, y + h);
glTexCoord2f(1.0f, 0.0f); glVertex2f(x + w, y);
glEnd();
glDisable(GL_TEXTURE_2D);
glDisable(GL_BLEND);
glDepthMask(GL_TRUE);
glPopMatrix();
}
float Sprite::getAspectRatio() { return aspectRatio; }
Sprite::~Sprite()
{
//dtor
}
/*
* main.cpp (extract)
*
*/
#include "Globals.h"
#include <GL/freeglut.h>
#include "Sprite.h"
void render();
std::string img_path = "/home/saurabh/Dropbox/Code/C/OpenGL_Game/images/";
Sprite s[] =
{
Sprite(img_path + "gait-right-1.gif"),
Sprite(img_path + "gait-right-0.gif"),
Sprite(img_path + "gait-left-1.gif"),
Sprite(img_path + "gait-left-0.gif"),
Sprite(img_path + "gait-top-1.gif"),
Sprite(img_path + "gait-top-0.gif"),
Sprite(img_path + "gait-bottom-1.gif"),
Sprite(img_path + "gait-bottom-0.gif")
};
int main(int argn, char** argc) {
FreeImage_Initialise();
glutInit(&argn, argc);
glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH);
glutInitWindowSize(GV.windowWidth, GV.windowHeight);
glutCreateWindow("Monogatari");
glutDisplayFunc(render);
glutIdleFunc(render);
glutMainLoop();
FreeImage_DeInitialise();
}
void render() {
s[0].draw(0,0,1,0.5);
}

Using CUDA, SFML, and OpenGL: Texture Refuses to Appear on Quad

Using various tutorials/examples/documentations/forums online, I have typed out code to allow CUDA to manipulate OpenGL textures such that it can be outputted to the screen. My method of displaying is to use PBO and an allocated texture image of uchar4 array. Despite all my attempts at fixing the problem, the texture would not show up on the 2D surface. I cannot seem to pinpoint the problem.
These are all the things I have checked/done thus far: I have created a PBO and registered it with CUDA, called cudaGraphicsResourceGetMappedPointer and the unmapping equivalent before and after the GPU function calls, made sure that glEnable is called for 2D_TEXTURE, glDisable called for any unnecessary values, unbinded textures/buffers when not in need. I have also reset SFML OpenGL states in case SFML was the cause. Square textures have also been employed. My OpenGL verision and CUDA version work for all function calls I use.
There did not seem to be any errors within the program when I checked cudaErrors and OpenGL Errors.
I am not sure if this has something to do with it but when I call:
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
My quad does not seem to display.
I have mainly found inspiration from this website.
Thank you very much!
Here is my code:
Main.cpp
#include <GL/glew.h>
#include <windows.h>
#include <GL/GL.h>
#include <SFML/Window.hpp>
#include <SFML/OpenGL.hpp>
#include <SFML/System.hpp>
#include <SFML/Graphics/RenderWindow.hpp>
#include "GeneralTypedef.h"
#include "OpenGLTest.cuh"
int main()
{
// create the window
sf::RenderWindow window(sf::VideoMode(1024, 1024), "OpenGL");
//window.setVerticalSyncEnabled(true);
sf::Vector2u windowSize;
windowSize = sf::Vector2u(window.getSize());
bool running = true;
glewInit();
window.resetGLStates();
std::printf("OpenGL: %s:", glGetString(GL_VERSION));
// We will not be using SFML's gl states.
OpenGLTest* test = new OpenGLTest(window.getSize());
sf::Time time;
while (running)
{
// handle events
sf::Event event;
while (window.pollEvent(event))
{
if (event.type == sf::Event::Closed)
{
// end the program
running = false;
}
else if (event.type == sf::Event::Resized)
{
// adjust the viewport when the window is resized
glViewport(0, 0, event.size.width, event.size.height);
windowSize = window.getSize();
}
}
// clear the buffers
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
test->createFrame(time.asMicroseconds());
test->drawFrame();
window.display();
}
// release resources...
delete test;
return 0;
}
OpenGLTest.cuh
#ifndef OPENGLTEST_CUH
#define OPENGLTEST_CUH
#include <GL/glew.h>
#include <windows.h>
#include <GL/GL.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_gl_interop.h>
#include <SFML/OpenGL.hpp>
#include <SFML/Graphics.hpp>
#include <SFML/System.hpp>
#include "GeneralTypedef.h"
class OpenGLTest
{
public:
uchar4* image;
GLuint gltexture;
GLuint pbo;
cudaGraphicsResource_t cudaPBO;
uchar4* d_textureBufferData;
sf::Vector2u windowSize;
OpenGLTest(sf::Vector2u windowSize)
{
this->windowSize = sf::Vector2u(windowSize);
this->setupOpenGL();
};
~OpenGLTest()
{
delete image;
image == nullptr;
cudaFree(d_textureBufferData);
d_textureBufferData == nullptr;
glDeleteTextures(1, &gltexture);
}
void drawFrame();
void createFrame(float time);
private:
void setupOpenGL();
};
#endif //OPENGLTEST_CUH
OpenGLTest.cu
#include "OpenGLTest.cuh"
__global__ void createGPUTexture(uchar4* d_texture)
{
uint pixelID = blockIdx.x*blockDim.x + threadIdx.x;
d_texture[pixelID].x = 0;
d_texture[pixelID].y = 1;
d_texture[pixelID].z = 1;
d_texture[pixelID].w = 0;
}
__global__ void wow(uchar4* pos, unsigned int width, unsigned int height,
float time)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int x = index%width;
unsigned int y = index / width;
if (index < width*height) {
unsigned char r = (x + (int)time) & 0xff;
unsigned char g = (y + (int)time) & 0xff;
unsigned char b = ((x + y) + (int)time) & 0xff;
// Each thread writes one pixel location in the texture (textel)
pos[index].w = 0;
pos[index].x = r;
pos[index].y = g;
pos[index].z = b;
}
}
void OpenGLTest::drawFrame()
{
glColor3f(1.0f,1.0f,1.0f);
glBindTexture(GL_TEXTURE_2D, gltexture);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, pbo);
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, windowSize.x, windowSize.y, GL_RGBA, GL_UNSIGNED_BYTE, 0);
glBegin(GL_QUADS);
glTexCoord2f(0.0f, 0.0f);
glVertex2f(0.0f, float(windowSize.y));
glTexCoord2f(1.0f, 0.0f);
glVertex2f(float(windowSize.x), float(windowSize.y));
glTexCoord2f(1.0f, 1.0f);
glVertex2f(float(windowSize.x), 0.0f);
glTexCoord2f(0.0f,1.0f);
glVertex2f(0.0f, 0.0f);
glEnd();
glFlush();
// Release
glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, 0);
glBindTexture(GL_TEXTURE_2D, 0);
// Test Triangle
/*
glBegin(GL_TRIANGLES);
glColor3f(0.1, 0.2, 0.3);
glVertex2f(0, 0);
glVertex2f(10, 0);
glVertex2f(0, 100);
glEnd();
*/
}
void OpenGLTest::createFrame(float time)
{
cudaGraphicsMapResources(1, &cudaPBO, 0);
size_t numBytes;
cudaGraphicsResourceGetMappedPointer((void**)&d_textureBufferData, &numBytes, cudaPBO);
int totalThreads = windowSize.x * windowSize.y;
int nBlocks = totalThreads/ 256;
// Run code here.
createGPUTexture << <nBlocks, 256>> >(d_textureBufferData);
//wow << <nBlocks, 256 >> >(d_textureBufferData, windowSize.x, windowSize.y, time);
// Unmap mapping to PBO so that OpenGL can access.
cudaGraphicsUnmapResources(1, &cudaPBO, 0);
}
void OpenGLTest::setupOpenGL()
{
image = new uchar4[1024*1024];
glViewport(0, 0, windowSize.x, windowSize.y);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(0.0, windowSize.x, windowSize.y, 0.0, -1.0, 1.0);
glEnable(GL_TEXTURE_2D);
glDisable(GL_LIGHTING);
glDisable(GL_DEPTH_TEST);
// Unbind any textures from previous.
glBindTexture(GL_TEXTURE_2D, 0);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, 0);
// Create new textures.
glGenTextures(1, &gltexture);
glBindTexture(GL_TEXTURE_2D, gltexture);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
// Create image with same resolution as window.
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, windowSize.x , windowSize.y, 0, GL_RGBA, GL_UNSIGNED_BYTE, image);
// Create pixel buffer boject.
glGenBuffers(1, &pbo);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, pbo);
glBufferData(GL_PIXEL_UNPACK_BUFFER_ARB, windowSize.x * windowSize.y * sizeof(uchar4), image, GL_STREAM_COPY);
cudaGraphicsGLRegisterBuffer(&cudaPBO, pbo, cudaGraphicsMapFlagsNone);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, 0);
glBindTexture(GL_TEXTURE_2D, 0);
}
GeneralType
#ifndef GENERALTYPEDEF_CUH
#define GENERALTYPEDEF_CUH
typedef unsigned int uint;
#endif // GENERALTYPEDEF_CUH
After rewriting the entire code and understanding it more, I have figured out the reason. The color components for the uchar4 in the kernel function is mapped from 0-255. The w component is transparency. As such, it should be mapped to 255 for the image to show. I hope this helps for those who may have the same problem. Some sites have this value set very low as well.

OpenGl texturing ........ ppm background

i am using a ppm loader to set image as a background , but there is a problem
in colors here is the code and the image that i am use .
http://imgur.com/w732d6j
http://imgur.com/mJr26Ik
here is the code .....
texture.h
#ifndef TEXTURE_H
#define TEXTURE_H
struct Image
{
unsigned char* pixels;
int width;
int height;
int numChannels;
};
class Texture
{
public:
Texture ();
void Prepare (int texN);
void ReadPPMImage (char *fn);
GLuint texName;
Image image;
};
#endif
texture.cpp
#include <fstream>
#include <glut.h>
#pragma warning (disable : 4996)
#include "Texture.h"
Texture::Texture ()
{
}
void Texture::Prepare (int texN)
{
texName = texN;
glPixelStorei (GL_UNPACK_ALIGNMENT, 1);
glBindTexture (GL_TEXTURE_2D, texName);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER,GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER,GL_NEAREST);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, image.width,
image.height, 0, GL_RGB, GL_UNSIGNED_BYTE,
image.pixels);
}
void Texture::ReadPPMImage (char* fn)
{
int tmpint;
char str[100];
FILE* inFile = fopen (fn,"rb");
if (inFile == NULL)
{
printf ("Can't open input file %s. Exiting.\n",fn);
exit (1);
}
fscanf (inFile,"P%d\n", &tmpint);
if (tmpint != 6)
{
printf ("Input file is not ppm. Exiting.\n");
exit (1);
}
// skip comments embedded in header
fgets (str,100,inFile);
while (str[0]=='#')
fgets(str,100,inFile);
// read image dimensions
sscanf (str,"%d %d",&image.width, &image.height);
fgets (str,100,inFile);
sscanf (str,"%d",&tmpint);
if (tmpint != 255)
printf("Warning: maxvalue is not 255 in ppm file\n");
image.numChannels = 3;
image.pixels = (unsigned char*) malloc (image.numChannels * image.width * image.height * sizeof (unsigned char));
if (image.pixels == NULL)
{
printf ("Can't allocate image of size %dx%d. Exiting\n", image.width, image.height);
exit (1);
}
else
printf("Reading image %s of size %dx%d\n", fn, image.width, image.height);
fread (image.pixels, sizeof (unsigned char), image.numChannels * image.width * image.height, inFile);
fclose (inFile);
}
Main.cpp
#include <glut.h>
#include "Texture.h"
#pragma warning (disable : 4996)
const float fMinX = -5.0, fMinY = -5.0, fNearZ = 1.0,
fMaxX = 5.0 , fMaxY = 5.0 , fFarZ = 10.0;
Texture ImageOne ;
void Init ()
{
glClearColor (0.0, 0.0, 0.0, 0.0);
glEnable (GL_DEPTH_TEST);
glGenTextures (1, &ImageOne.texName);
ImageOne.ReadPPMImage("wood_1.ppm");
ImageOne.Prepare(1) ;
}
void Reshape (int width, int height)
{
glViewport (0, 0, width, height);
glMatrixMode (GL_PROJECTION);
glLoadIdentity ();
glOrtho (fMinX, fMaxX, fMinY, fMaxY, fNearZ, fFarZ);
glMatrixMode (GL_MODELVIEW);
glLoadIdentity ();
}
void Display ()
{
glClear (GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glEnable (GL_TEXTURE_2D);
glTexEnvf (GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_BLEND);
glBindTexture (GL_TEXTURE_2D, ImageOne.texName);
glBegin(GL_QUADS);
glTexCoord2f(0,1);
glVertex3f(-5.5,5,-6);
glTexCoord2f(0,0);
glVertex3f(-5.5,-5,-6);
glTexCoord2f(1,0);
glVertex3f(5,-5,-6);
glTexCoord2f(1,1);
glVertex3f(5,5,-6);
glEnd();
glDisable(GL_TEXTURE_2D);
glutSwapBuffers ();
glFlush ();
}
void main (int argc, char **argv)
{
// init GLUT and create window
glutInit (&argc, argv);
glutInitDisplayMode (GLUT_DOUBLE | GLUT_RGB | GLUT_DEPTH);
glutInitWindowPosition(100,100);
glutInitWindowSize(500,500);
glutCreateWindow ("OpenGL - Rotating Cubes");
Init ();
// register callbacks
glutDisplayFunc (Display);
glutReshapeFunc (Reshape);
glutIdleFunc (Display); // used in animation
// enter GLUT event processing cycle
glutMainLoop();
}
Why are you using
glTexEnvf (GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_BLEND);
?
It does not make sense for your use case (and perfectly explays the "inversion" of the color values). You probably want GL_REPLACE or GL_MODULATE.

GL_UNSIGNED_SHORT_5_6_5 undeclared?

I have a problem loading a texture using SDL library.
Usually I make programs on Linux but I try to create a code that is compatible with Visual Studio also.
On Linux are everything OK but on Visual Studio it crashes in "GL_UNSIGNED_SHORT_5_6_5" in the glTexImage2D(...) function.
Below is a general idea about what i want to do which I inspired by this tutorial:
#include "stdafx.h"
#include <stdlib.h>
#include <stdio.h>
#include <GL/glut.h>
//#include <GL/glext.h>
#include "SDL.h"
int brick;
float c=0.5;
float rx_min=0, ry_min=0;
float rx_max=1, ry_max=1;
unsigned int LoadTexture(const char* filename);
void DrawTexture(int object);
void setupmywindow();
void myDrawing();
void setupmywindow()
{
glClearColor(1.0,1.0,1.0,0);
glColor3f(0.0, 0.0, 0.0);
glPolygonMode(GL_FRONT_AND_BACK,GL_FILL);
gluOrtho2D(rx_min,ry_min, rx_max, ry_max);
brick = LoadTexture("brick.bmp");
}
void DrawTexture(int object)
{
glBindTexture(GL_TEXTURE_2D, object);
glColor3f(c,c,c);
glBegin(GL_QUADS);
glTexCoord2f(0., 1. );
glVertex2f( rx_min , ry_min );
glTexCoord2f(0., 0. );
glVertex2f( rx_min, ry_max );
glTexCoord2f(1., 0. );
glVertex2f( rx_max , ry_max );
glTexCoord2f(1., 1. );
glVertex2f( rx_max , ry_min );
glEnd();
}
unsigned int LoadTexture(const char* filename)
{
SDL_Surface* img=SDL_LoadBMP(filename);
unsigned int id;
glGenTextures(1, &id);
glBindTexture(GL_TEXTURE_2D,id);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, img->w, img->h, 0, GL_RGB, GL_UNSIGNED_SHORT_5_6_5, img->pixels);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
SDL_FreeSurface(img);
return id;
}
void myDrawing()
{
glClear(GL_COLOR_BUFFER_BIT);
DrawTexture(brick);
glFlush();
}
int main(int argc, char **argv)
{
printf("AUTH Computational Physics - Computer Graphics\n");
printf("Project >>TestTexture.cpp\n");
printf("--------------------------------------------------------\n");
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_SINGLE|GLUT_RGB);
glutInitWindowPosition(50,50);
glutCreateWindow("Texture Test");
setupmywindow();
glutDisplayFunc(myDrawing);
glutMainLoop();
return 0;
}
The error is:
error C2065: 'GL_UNSIGNED_SHORT_5_6_5' : undeclared identifier
Here is the image that I try to load and it is configured as a bitmap (8bit 5 6 5) with GIMP 2.8
NOTE: When I uncoment #include < GL/glext.h > which is not needed on Linux, I get the above message:
Unhandled exception at 0x00d1193f in testTesxture.exe: 0xC0000005: Access violation reading location 0x00000014.
Generally if I save a bitmap image (for example with paint) how can I uderstand the type I have to put (GL_UNSIGNED_SHORT_5_6_5, GL_UNSIGNED_BYTE etc)?
The problem is likely that Windows uses an older version of OpenGL than Linux, and this old OpenGL version does not have that specific identifier (and others, I'm sure). To get around this and any other possible version problems, I would use GLEW which does the hard work for you.
In windows, add this line after the includes:
#ifndef GL_UNSIGNED_SHORT_5_6_5
#define GL_UNSIGNED_SHORT_5_6_5 0x8363
#endif
#ifndef GL_CLAMP_TO_EDGE
#define GL_CLAMP_TO_EDGE 0x812F
#endif
According to this video.