I have a problem loading a texture using SDL library.
Usually I make programs on Linux but I try to create a code that is compatible with Visual Studio also.
On Linux are everything OK but on Visual Studio it crashes in "GL_UNSIGNED_SHORT_5_6_5" in the glTexImage2D(...) function.
Below is a general idea about what i want to do which I inspired by this tutorial:
#include "stdafx.h"
#include <stdlib.h>
#include <stdio.h>
#include <GL/glut.h>
//#include <GL/glext.h>
#include "SDL.h"
int brick;
float c=0.5;
float rx_min=0, ry_min=0;
float rx_max=1, ry_max=1;
unsigned int LoadTexture(const char* filename);
void DrawTexture(int object);
void setupmywindow();
void myDrawing();
void setupmywindow()
{
glClearColor(1.0,1.0,1.0,0);
glColor3f(0.0, 0.0, 0.0);
glPolygonMode(GL_FRONT_AND_BACK,GL_FILL);
gluOrtho2D(rx_min,ry_min, rx_max, ry_max);
brick = LoadTexture("brick.bmp");
}
void DrawTexture(int object)
{
glBindTexture(GL_TEXTURE_2D, object);
glColor3f(c,c,c);
glBegin(GL_QUADS);
glTexCoord2f(0., 1. );
glVertex2f( rx_min , ry_min );
glTexCoord2f(0., 0. );
glVertex2f( rx_min, ry_max );
glTexCoord2f(1., 0. );
glVertex2f( rx_max , ry_max );
glTexCoord2f(1., 1. );
glVertex2f( rx_max , ry_min );
glEnd();
}
unsigned int LoadTexture(const char* filename)
{
SDL_Surface* img=SDL_LoadBMP(filename);
unsigned int id;
glGenTextures(1, &id);
glBindTexture(GL_TEXTURE_2D,id);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, img->w, img->h, 0, GL_RGB, GL_UNSIGNED_SHORT_5_6_5, img->pixels);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
SDL_FreeSurface(img);
return id;
}
void myDrawing()
{
glClear(GL_COLOR_BUFFER_BIT);
DrawTexture(brick);
glFlush();
}
int main(int argc, char **argv)
{
printf("AUTH Computational Physics - Computer Graphics\n");
printf("Project >>TestTexture.cpp\n");
printf("--------------------------------------------------------\n");
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_SINGLE|GLUT_RGB);
glutInitWindowPosition(50,50);
glutCreateWindow("Texture Test");
setupmywindow();
glutDisplayFunc(myDrawing);
glutMainLoop();
return 0;
}
The error is:
error C2065: 'GL_UNSIGNED_SHORT_5_6_5' : undeclared identifier
Here is the image that I try to load and it is configured as a bitmap (8bit 5 6 5) with GIMP 2.8
NOTE: When I uncoment #include < GL/glext.h > which is not needed on Linux, I get the above message:
Unhandled exception at 0x00d1193f in testTesxture.exe: 0xC0000005: Access violation reading location 0x00000014.
Generally if I save a bitmap image (for example with paint) how can I uderstand the type I have to put (GL_UNSIGNED_SHORT_5_6_5, GL_UNSIGNED_BYTE etc)?
The problem is likely that Windows uses an older version of OpenGL than Linux, and this old OpenGL version does not have that specific identifier (and others, I'm sure). To get around this and any other possible version problems, I would use GLEW which does the hard work for you.
In windows, add this line after the includes:
#ifndef GL_UNSIGNED_SHORT_5_6_5
#define GL_UNSIGNED_SHORT_5_6_5 0x8363
#endif
#ifndef GL_CLAMP_TO_EDGE
#define GL_CLAMP_TO_EDGE 0x812F
#endif
According to this video.
Related
Closed. This question needs debugging details. It is not currently accepting answers.
Edit the question to include desired behavior, a specific problem or error, and the shortest code necessary to reproduce the problem. This will help others answer the question.
Closed 5 years ago.
Improve this question
I wanted to do some 2D on top of 3D so I could do a decent GUI. So I created the textures and so on.
I can compile the code, it generates no errors. But when I run the program all goes right until I call this:
glGenFramebuffers(1, &fb);
Then this appears:
error 139 segmentation fault (core dumped).
Does someone know what's wrong with the code?
std::cout << "test1" << std::endl;
unsigned int fb;
glGenFramebuffers(1, &fb);
std::cout << "test2" << std::endl;
glBindRenderbuffer(GL_RENDERBUFFER, fb);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, renderTexture.getId(), 0);
The "test1" text is displayed, but the "test2" is not.
CODE:
game.cpp
#include "game.h"
game::game(){
SDL_Init(SDL_INIT_EVERYTHING);
SDL_Surface* screen = SDL_SetVideoMode(1000, 600, 32, SDL_SWSURFACE|SDL_OPENGL);
glClearColor(0.5, 0.5, 0.5, 1.0);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluPerspective(45, 1000.0/600.0, 1.0, 500.0);
glMatrixMode(GL_MODELVIEW);
glEnable(GL_DEPTH_TEST);
glEnable(GL_TEXTURE_2D);
}
game::~game(){
SDL_Quit();
}
void game::start(){
Uint32 start;
SDL_Event event;
texture renderTexture = texture();
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, 1000, 600, 0, GL_BGRA, GL_UNSIGNED_BYTE, 0);
std::cerr << "test1" << std::endl;
unsigned int fb;
glGenFramebuffers(1, &fb);
std::cerr << "test2" << std::endl;
glBindRenderbuffer(GL_RENDERBUFFER, fb);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, renderTexture.getId(), 0);
bool running = true;
while (running){
start = SDL_GetTicks();
while (SDL_PollEvent(&event)){
switch (event.type){
case SDL_QUIT:
running = false;
break;
}
}
update();
show(fb);
showMenu();
SDL_GL_SwapBuffers();
if (1000/30 > (SDL_GetTicks()-start)){
SDL_Delay(1000/30 - (SDL_GetTicks()-start));
}
}
}
void game::update(){
}
void game::show(unsigned int fb){
glBindFramebuffer(GL_FRAMEBUFFER, fb);
glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT);
glLoadIdentity();
}
void game::showMenu(){
bindWindowAsRenderTarget();
glViewport(0, 0, 1000, 600);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(0.0, 1000.0, 0.0, 600.0, -1.0, 1.0);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
}
void game::bindWindowAsRenderTarget(){
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0);
glViewport(0, 0, 1000, 600);
}
game.h
#ifndef GAME_H_INCLUDED
#define GAME_H_INCLUDED
#include <iostream>
#include <SDL/SDL.h>
#include <GL/glew.h>
#include <GL/gl.h>
#include <GL/glu.h>
#include "texture.h"
class game{
void update();
void show(unsigned int fb);
void showMenu();
void bindWindowAsRenderTarget();
public:
game();
~game();
void start();
};
#endif // GAME_H_INCLUDED
main.cpp
#include "game.h"
int main(int argc, char** argv){
game g;
g.start();
return 0;
}
texture.cpp
#ifndef TEXTURE_H_INCLUDED
#define TEXTURE_H_INCLUDED
#include <SDL/SDL.h>
#include <GL/glew.h>
#include <GL/gl.h>
#include <GL/glu.h>
class texture{
unsigned int id;
public:
texture();
~texture();
void loadImage(const char* filename);
unsigned int getId();
};
#endif // TEXTURE_H_INCLUDED
texture.cpp
#include "texture.h"
texture::texture(){
glGenTextures(1, &id);
glBindTexture(GL_TEXTURE_2D, id);
}
texture::~texture(){
glDeleteTextures(1, &id);
}
void texture::loadImage(const char* filename){
SDL_Surface* img = SDL_LoadBMP(filename);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, img->w, img->h, 0, GL_RGB, GL_UNSIGNED_SHORT_5_6_5, img->pixels);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
SDL_FreeSurface(img);
}
unsigned int texture::getId(){
return id;
}
Dollars to donuts you're #includeing a GL extension loader (GLEW, GLAD, etc.) and then:
Trying to init it before you have a current GL context (leaving function pointers like glGenFramebuffers() NULL), or
Forgetting to init it entirely, or
(Unlikely, unless you're doing something silly like trying to use OpenGL via remote desktop) correctly initing your extension loader but using a GL implementation that doesn't support core FBOs
EDIT Oh hey it's #2.
You need to call glewInit() after SDL_SetVideoMode(). You should also use the GLEW version checking functions to verify that the version of the underlying GL implementation supports core FBOs (they went in in OpenGL 2.1 IIRC).
Using various tutorials/examples/documentations/forums online, I have typed out code to allow CUDA to manipulate OpenGL textures such that it can be outputted to the screen. My method of displaying is to use PBO and an allocated texture image of uchar4 array. Despite all my attempts at fixing the problem, the texture would not show up on the 2D surface. I cannot seem to pinpoint the problem.
These are all the things I have checked/done thus far: I have created a PBO and registered it with CUDA, called cudaGraphicsResourceGetMappedPointer and the unmapping equivalent before and after the GPU function calls, made sure that glEnable is called for 2D_TEXTURE, glDisable called for any unnecessary values, unbinded textures/buffers when not in need. I have also reset SFML OpenGL states in case SFML was the cause. Square textures have also been employed. My OpenGL verision and CUDA version work for all function calls I use.
There did not seem to be any errors within the program when I checked cudaErrors and OpenGL Errors.
I am not sure if this has something to do with it but when I call:
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
My quad does not seem to display.
I have mainly found inspiration from this website.
Thank you very much!
Here is my code:
Main.cpp
#include <GL/glew.h>
#include <windows.h>
#include <GL/GL.h>
#include <SFML/Window.hpp>
#include <SFML/OpenGL.hpp>
#include <SFML/System.hpp>
#include <SFML/Graphics/RenderWindow.hpp>
#include "GeneralTypedef.h"
#include "OpenGLTest.cuh"
int main()
{
// create the window
sf::RenderWindow window(sf::VideoMode(1024, 1024), "OpenGL");
//window.setVerticalSyncEnabled(true);
sf::Vector2u windowSize;
windowSize = sf::Vector2u(window.getSize());
bool running = true;
glewInit();
window.resetGLStates();
std::printf("OpenGL: %s:", glGetString(GL_VERSION));
// We will not be using SFML's gl states.
OpenGLTest* test = new OpenGLTest(window.getSize());
sf::Time time;
while (running)
{
// handle events
sf::Event event;
while (window.pollEvent(event))
{
if (event.type == sf::Event::Closed)
{
// end the program
running = false;
}
else if (event.type == sf::Event::Resized)
{
// adjust the viewport when the window is resized
glViewport(0, 0, event.size.width, event.size.height);
windowSize = window.getSize();
}
}
// clear the buffers
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
test->createFrame(time.asMicroseconds());
test->drawFrame();
window.display();
}
// release resources...
delete test;
return 0;
}
OpenGLTest.cuh
#ifndef OPENGLTEST_CUH
#define OPENGLTEST_CUH
#include <GL/glew.h>
#include <windows.h>
#include <GL/GL.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_gl_interop.h>
#include <SFML/OpenGL.hpp>
#include <SFML/Graphics.hpp>
#include <SFML/System.hpp>
#include "GeneralTypedef.h"
class OpenGLTest
{
public:
uchar4* image;
GLuint gltexture;
GLuint pbo;
cudaGraphicsResource_t cudaPBO;
uchar4* d_textureBufferData;
sf::Vector2u windowSize;
OpenGLTest(sf::Vector2u windowSize)
{
this->windowSize = sf::Vector2u(windowSize);
this->setupOpenGL();
};
~OpenGLTest()
{
delete image;
image == nullptr;
cudaFree(d_textureBufferData);
d_textureBufferData == nullptr;
glDeleteTextures(1, &gltexture);
}
void drawFrame();
void createFrame(float time);
private:
void setupOpenGL();
};
#endif //OPENGLTEST_CUH
OpenGLTest.cu
#include "OpenGLTest.cuh"
__global__ void createGPUTexture(uchar4* d_texture)
{
uint pixelID = blockIdx.x*blockDim.x + threadIdx.x;
d_texture[pixelID].x = 0;
d_texture[pixelID].y = 1;
d_texture[pixelID].z = 1;
d_texture[pixelID].w = 0;
}
__global__ void wow(uchar4* pos, unsigned int width, unsigned int height,
float time)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int x = index%width;
unsigned int y = index / width;
if (index < width*height) {
unsigned char r = (x + (int)time) & 0xff;
unsigned char g = (y + (int)time) & 0xff;
unsigned char b = ((x + y) + (int)time) & 0xff;
// Each thread writes one pixel location in the texture (textel)
pos[index].w = 0;
pos[index].x = r;
pos[index].y = g;
pos[index].z = b;
}
}
void OpenGLTest::drawFrame()
{
glColor3f(1.0f,1.0f,1.0f);
glBindTexture(GL_TEXTURE_2D, gltexture);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, pbo);
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, windowSize.x, windowSize.y, GL_RGBA, GL_UNSIGNED_BYTE, 0);
glBegin(GL_QUADS);
glTexCoord2f(0.0f, 0.0f);
glVertex2f(0.0f, float(windowSize.y));
glTexCoord2f(1.0f, 0.0f);
glVertex2f(float(windowSize.x), float(windowSize.y));
glTexCoord2f(1.0f, 1.0f);
glVertex2f(float(windowSize.x), 0.0f);
glTexCoord2f(0.0f,1.0f);
glVertex2f(0.0f, 0.0f);
glEnd();
glFlush();
// Release
glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, 0);
glBindTexture(GL_TEXTURE_2D, 0);
// Test Triangle
/*
glBegin(GL_TRIANGLES);
glColor3f(0.1, 0.2, 0.3);
glVertex2f(0, 0);
glVertex2f(10, 0);
glVertex2f(0, 100);
glEnd();
*/
}
void OpenGLTest::createFrame(float time)
{
cudaGraphicsMapResources(1, &cudaPBO, 0);
size_t numBytes;
cudaGraphicsResourceGetMappedPointer((void**)&d_textureBufferData, &numBytes, cudaPBO);
int totalThreads = windowSize.x * windowSize.y;
int nBlocks = totalThreads/ 256;
// Run code here.
createGPUTexture << <nBlocks, 256>> >(d_textureBufferData);
//wow << <nBlocks, 256 >> >(d_textureBufferData, windowSize.x, windowSize.y, time);
// Unmap mapping to PBO so that OpenGL can access.
cudaGraphicsUnmapResources(1, &cudaPBO, 0);
}
void OpenGLTest::setupOpenGL()
{
image = new uchar4[1024*1024];
glViewport(0, 0, windowSize.x, windowSize.y);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(0.0, windowSize.x, windowSize.y, 0.0, -1.0, 1.0);
glEnable(GL_TEXTURE_2D);
glDisable(GL_LIGHTING);
glDisable(GL_DEPTH_TEST);
// Unbind any textures from previous.
glBindTexture(GL_TEXTURE_2D, 0);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, 0);
// Create new textures.
glGenTextures(1, &gltexture);
glBindTexture(GL_TEXTURE_2D, gltexture);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
// Create image with same resolution as window.
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, windowSize.x , windowSize.y, 0, GL_RGBA, GL_UNSIGNED_BYTE, image);
// Create pixel buffer boject.
glGenBuffers(1, &pbo);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, pbo);
glBufferData(GL_PIXEL_UNPACK_BUFFER_ARB, windowSize.x * windowSize.y * sizeof(uchar4), image, GL_STREAM_COPY);
cudaGraphicsGLRegisterBuffer(&cudaPBO, pbo, cudaGraphicsMapFlagsNone);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, 0);
glBindTexture(GL_TEXTURE_2D, 0);
}
GeneralType
#ifndef GENERALTYPEDEF_CUH
#define GENERALTYPEDEF_CUH
typedef unsigned int uint;
#endif // GENERALTYPEDEF_CUH
After rewriting the entire code and understanding it more, I have figured out the reason. The color components for the uchar4 in the kernel function is mapped from 0-255. The w component is transparency. As such, it should be mapped to 255 for the image to show. I hope this helps for those who may have the same problem. Some sites have this value set very low as well.
Ok, before anyone marks this question as a duplicate, I have looked at What is an undefined reference/unresolved external symbol error and how do I fix it? and many other online posts. I've tried every solution I've come acoss but I still can't fix these errors:
1>SOIL.lib(stb_image_aug.o) : error LNK2019: unresolved external symbol __alloca referenced in function _stbi_zlib_decode_noheader_buffer
1>SOIL.lib(image_helper.o) : error LNK2019: unresolved external symbol _sqrtf referenced in function _RGBE_to_RGBdivA2
I'm using Visual Studio 2012 on Windows 8. I've tried rebuilding the library and I have quintuple checked all my includes and directories.Here are the SOIL includes/directory I have:
>Configuration Properties->VC++ Directories->Include Directories: "C:\SOIL\Simple OpenGL Image Library\src
>Configuration Properties->VC++ Directories->Library Directories: "C:\SOIL\Simple OpenGL Image Library\lib
>Congiguration Properties->Linker->Input->Additional Dependencies: "SOIL.lib"
And here's my code. The SOIL_load_image function is what's causing the errors:
#include <Windows.h>
#include <GL/glut.h>
#include "glext.h"
#include <SOIL.h>
void glEnable2D( void );
void display();
void glDisable2D( void );
GLuint tex;
/* Main function: GLUT runs as a console application starting at main() */
int main(int argc, char** argv)
{
glutInit(&argc, argv); // Initialize GLUT
glutCreateWindow("OpenGL Setup Test"); // Create a window with the given title
glutInitWindowSize(320, 320); // Set the window's initial width & height
glutInitWindowPosition(50, 50); // Position the window's initial top-left corner
//Texture
glGenTextures(1, &tex);
glBindTexture(GL_TEXTURE_2D, tex);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_BORDER);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_BORDER);
float color[] = { 1.0f, 0.0f, 0.0f, 1.0f };
glTexParameterfv(GL_TEXTURE_2D, GL_TEXTURE_BORDER_COLOR, color);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
int width, height;
unsigned char* image = SOIL_load_image("Resources/Sprites/playerCharacter.png", &width, &height, 0, SOIL_LOAD_RGB);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, width, height, 0, GL_RGB, GL_UNSIGNED_BYTE, image);
SOIL_free_image_data(image);
//Texture
glutDisplayFunc(display); // Register display callback handler for window re-paint
glutMainLoop(); // Enter the infinitely event-processing loop
return 0;
}
I've been working on this all day and I haven't been able to figure out what I'm doing wrong, and it's been extremely frustrating. Note: I know the original name of SOIL.lib is "libSOIL.a." I tried working with it with that name and got the same errors.
You can build the VC8 or VC9 solution files yourself in VS2012/13. Just make sure you are Win32 configuration if you are building a 32bit application. Change the configuration properties to x64 if you are building a 64bit application. Once the projects are built, link your openGL project to your these generated SOIL.lib files and you will be good to go.
I am trying to setup a little CUDA/GL interop example. I have looked around in the internet, so I found some tutorials with some helpful stuff.
All I want is, is to produce a texture in CUDA and draw it with OpenGL.
The source I have now is crashing my Macbook Pro every time I run it, so I thought that if somebody could take an eye on it, that would be really helpful.
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#ifdef _WIN32
# define WINDOWS_LEAN_AND_MEAN
# define NOMINMAX
# include <windows.h>
#endif
// OpenGL Graphics includes
#include <GL/glew.h>
#if defined (__APPLE__) || defined(MACOSX)
#include <GLUT/glut.h>
#else
#include <GL/freeglut.h>
#endif
// includes, cuda
#include <cuda_runtime.h>
#include <cuda_gl_interop.h>
// Utilities and timing functions
#include <helper_functions.h> // includes cuda.h and cuda_runtime_api.h
#include <timer.h> // timing functions
// CUDA helper functions
#include <helper_cuda.h> // helper functions for CUDA error check
#include <helper_cuda_gl.h> // helper functions for CUDA/GL interop
#include <vector_types.h>
const unsigned int window_width = 512;
const unsigned int window_height = 512;
GLuint viewGLTexture;
cudaGraphicsResource_t viewCudaResource;
void initGLandCUDA() {
int argc = 0;
char** argv = NULL;
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE);
glutInitWindowSize(window_width, window_height);
glutCreateWindow("CUDA GL Interop");
glewInit();
glEnable(GL_TEXTURE_2D);
glGenTextures(1, &viewGLTexture);
glBindTexture(GL_TEXTURE_2D, viewGLTexture);
{
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, 512, 512, 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL);
}
glBindTexture(GL_TEXTURE_2D, 0);
cudaGLSetGLDevice(gpuGetMaxGflopsDeviceId());
cudaGraphicsGLRegisterImage(&viewCudaResource, viewGLTexture, GL_TEXTURE_2D, cudaGraphicsRegisterFlagsWriteDiscard);
}
__global__ void renderingKernel(cudaSurfaceObject_t image) {
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
uchar4 color = make_uchar4(0.f, 0.f, 0.f, .3f);
//if I write in 0, 0 and not x,y, the computer is not crashing, but there is no black pixel at 0,0
surf2Dwrite(color, image, x, y, cudaBoundaryModeClamp);
}
void callCUDAKernel(cudaSurfaceObject_t image) {
dim3 block(8, 1, 1);
dim3 grid(8, 1, 1);
renderingKernel<<< grid, block>>>(image);
}
void renderFrame() {
cudaGraphicsMapResources(1, &viewCudaResource);
{
cudaArray_t viewCudaArray;
checkCudaErrors(cudaGraphicsSubResourceGetMappedArray(&viewCudaArray, viewCudaResource, 0, 0));
cudaResourceDesc viewCudaArrayResourceDesc;
{
viewCudaArrayResourceDesc.resType = cudaResourceTypeArray;
viewCudaArrayResourceDesc.res.array.array = viewCudaArray;
}
cudaSurfaceObject_t viewCudaSurfaceObject;
checkCudaErrors(cudaCreateSurfaceObject(&viewCudaSurfaceObject, &viewCudaArrayResourceDesc));
callCUDAKernel(viewCudaSurfaceObject);
checkCudaErrors(cudaDestroySurfaceObject(viewCudaSurfaceObject));
}
checkCudaErrors(cudaGraphicsUnmapResources(1, &viewCudaResource));
checkCudaErrors(cudaStreamSynchronize(0));
glBindTexture(GL_TEXTURE_2D, viewGLTexture);
{
glBegin(GL_QUADS);
{
glTexCoord2f(0.0f, 0.0f); glVertex2f(-1.0f, -1.0f);
glTexCoord2f(1.0f, 0.0f); glVertex2f(+1.0f, -1.0f);
glTexCoord2f(1.0f, 1.0f); glVertex2f(+1.0f, +1.0f);
glTexCoord2f(0.0f, 1.0f); glVertex2f(-1.0f, +1.0f);
}
glEnd();
}
glBindTexture(GL_TEXTURE_2D, 0);
glFinish();
}
int main(int argc, char **argv)
{
initGLandCUDA();
glutDisplayFunc(renderFrame);
//glutKeyboardFunc(keyboard);
//glutMouseFunc(mouse);
glutMainLoop();
}
It seems like some kind of out-of-range error, but I am currently out of ideas (btw, this is cc 3.0, running to nVidia 650M).
Edit :
By crashing I mean : crashing. Computer freezes. I can't move my mouse and I have to reboot.
Yes, I have looked in all examples, they are not exactly what I want. Changing them to be want I want results in this problem. If there was any other help in the manual or anywhere else that would help me and I have found I would not bother asking for help. You need to link with cuda_runtime and glut libs
Below is a working version of your code. The issues in your code were:
Your kernel was depending on being launched with 512x512 threads, but you were only launching with 64x1 threads.
Your kernel was writing to unaligned addresses with surf2Dwrite().
You were setting up double buffering in OpenGL but you were not swapping the buffers. (glutSwapBuffers()).
You were initializing an uchar4 with floats.
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#ifdef _WIN32
# define WINDOWS_LEAN_AND_MEAN
# define NOMINMAX
# include <windows.h>
#endif
// OpenGL Graphics includes
#include <GL/glew.h>
#if defined (__APPLE__) || defined(MACOSX)
#include <GLUT/glut.h>
#else
#include <GL/freeglut.h>
#endif
#include <cuda_runtime.h>
#include <cuda_gl_interop.h>
#include <vector_types.h>
const unsigned int window_width = 512;
const unsigned int window_height = 512;
GLuint viewGLTexture;
cudaGraphicsResource_t viewCudaResource;
#define check(ans) { _check((ans), __FILE__, __LINE__); }
inline void _check(cudaError_t code, char *file, int line)
{
if (code != cudaSuccess) {
fprintf(stderr,"CUDA Error: %s %s %d\n", cudaGetErrorString(code), file, line);
exit(code);
}
}
void initGLandCUDA() {
int argc = 0;
char** argv = NULL;
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_RGBA);
glutInitWindowSize(window_width, window_height);
glutCreateWindow("CUDA GL Interop");
glewInit();
glEnable(GL_TEXTURE_2D);
glGenTextures(1, &viewGLTexture);
glBindTexture(GL_TEXTURE_2D, viewGLTexture);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, window_width, window_height, 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL);
glBindTexture(GL_TEXTURE_2D, 0);
check(cudaGLSetGLDevice(0));
check(cudaGraphicsGLRegisterImage(&viewCudaResource, viewGLTexture, GL_TEXTURE_2D, cudaGraphicsRegisterFlagsWriteDiscard));
}
__global__ void renderingKernel(cudaSurfaceObject_t image) {
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
uchar4 color = make_uchar4(x / 2, y / 2, 0, 127);
surf2Dwrite(color, image, x * sizeof(color), y, cudaBoundaryModeClamp);
}
void callCUDAKernel(cudaSurfaceObject_t image) {
dim3 block(256, 1, 1);
dim3 grid(2, 512, 1);
renderingKernel<<<grid, block>>>(image);
check(cudaPeekAtLastError());
check(cudaDeviceSynchronize());
}
void renderFrame() {
check(cudaGraphicsMapResources(1, &viewCudaResource));
cudaArray_t viewCudaArray;
check(cudaGraphicsSubResourceGetMappedArray(&viewCudaArray, viewCudaResource, 0, 0));
cudaResourceDesc viewCudaArrayResourceDesc;
memset(&viewCudaArrayResourceDesc, 0, sizeof(viewCudaArrayResourceDesc));
viewCudaArrayResourceDesc.resType = cudaResourceTypeArray;
viewCudaArrayResourceDesc.res.array.array = viewCudaArray;
cudaSurfaceObject_t viewCudaSurfaceObject;
check(cudaCreateSurfaceObject(&viewCudaSurfaceObject, &viewCudaArrayResourceDesc));
callCUDAKernel(viewCudaSurfaceObject);
check(cudaDestroySurfaceObject(viewCudaSurfaceObject));
check(cudaGraphicsUnmapResources(1, &viewCudaResource));
check(cudaStreamSynchronize(0));
glBindTexture(GL_TEXTURE_2D, viewGLTexture);
{
glBegin(GL_QUADS);
{
glTexCoord2f(0.0f, 0.0f); glVertex2f(-1.0f, -1.0f);
glTexCoord2f(1.0f, 0.0f); glVertex2f(+1.0f, -1.0f);
glTexCoord2f(1.0f, 1.0f); glVertex2f(+1.0f, +1.0f);
glTexCoord2f(0.0f, 1.0f); glVertex2f(-1.0f, +1.0f);
}
glEnd();
}
glBindTexture(GL_TEXTURE_2D, 0);
glFinish();
}
int main(int argc, char **argv)
{
initGLandCUDA();
glutDisplayFunc(renderFrame);
//glutKeyboardFunc(keyboard);
//glutMouseFunc(mouse);
glutMainLoop();
}
Output:
When I use GL3W, I cannot load textures. They appear blank or messed up.
I wanted an OpenGL 4.2 context in SDL 1.3 and so I decided to use GL3W, as GLEW used deprecated functions. Everything seems to work fine, however, when I try to load a texture, it either gets messed up (randomly colored lines) or simply ends up blank (black without alpha, transparent with). Everything else I've tried so far has worked (shaders, VAO's, VBO's, etc.)
I wrote the most simple example I could come up with to illustrate:
#include <SDL.h>
#include <SDL_image.h>
#include <GL3/gl3w.h>
#include <gl/gl.h>
#include <gl/glu.h>
int main(int argc, char* argv[]) {
SDL_Init(SDL_INIT_EVERYTHING);
SDL_WindowID mainWindow = SDL_CreateWindow("Test", SDL_WINDOWPOS_CENTERED, SDL_WINDOWPOS_CENTERED, 800, 600, SDL_WINDOW_OPENGL | SDL_WINDOW_SHOWN);
SDL_GL_SetAttribute(SDL_GL_CONTEXT_MAJOR_VERSION, 4);
SDL_GL_SetAttribute(SDL_GL_CONTEXT_MINOR_VERSION, 2);
SDL_GLContext mainContext = SDL_GL_CreateContext(mainWindow);
SDL_GL_MakeCurrent(mainWindow, mainContext);
gl3wInit();
GLuint id;
glGenTextures(1, &id);
glBindTexture(GL_TEXTURE_2D, id);
SDL_Surface* test2 = IMG_Load("test.png");
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, test2->w, test2->h, 0, GL_RGBA, GL_UNSIGNED_BYTE, test2->pixels);
// Loop to keep the window open while debugging in gDEBugger
while (1) {
SDL_GL_SwapWindow(mainWindow);
}
return 0;
}
I don't know how relevant it is, but since gl3w is generated by a python script I'll include it (external links because of length):
gl3w.c: http://pastebin.com/T5GNdJL8
gl3w.h: http://pastebin.com/yU2EzBZP
gl3.h: http://pastebin.com/0uCgB8d1
If I remove #include <GL3/gl3w.h> and glewInit(); the texture is successfully loaded.