Both my local computer and EC2 server is on Ubuntu 14.04. Suppose I am testing a cuda opengl interop code as below.
Test.cu
#include <iostream>
#include <GL/glew.h>
#include <GLFW/glfw3.h>
#include <cuda_gl_interop.h>
__global__ static void CUDAKernelTEST(float *data){
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int mx = gridDim.x * blockDim.x;
data[y * mx + x] = 0.5;
}
GLFWwindow *glfw_window_;
void Setup(){
if (!glfwInit()) exit(EXIT_FAILURE);
glfwWindowHint(GLFW_VISIBLE, GL_FALSE);
glfw_window_ = glfwCreateWindow(10, 10, "", NULL, NULL);
if (!glfw_window_) glfwTerminate();
glfwMakeContextCurrent(glfw_window_);
glewExperimental = GL_TRUE;
if (glewInit() != GLEW_OK) exit(EXIT_FAILURE);
}
void TearDown(){
glfwDestroyWindow(glfw_window_);
glfwTerminate();
}
int main(){
Setup();
GLuint id;
glGenBuffers(1, &id);
glBindBuffer(GL_ARRAY_BUFFER, id);
glBufferData(GL_ARRAY_BUFFER, 3 * 24 * sizeof(GLfloat), 0, GL_STATIC_DRAW);
cudaGraphicsResource *vbo_res;
cudaGraphicsGLRegisterBuffer(&vbo_res, id, cudaGraphicsMapFlagsWriteDiscard);
cudaGraphicsMapResources(1, &vbo_res, 0);
float *test;
size_t size;
cudaGraphicsResourceGetMappedPointer(
reinterpret_cast<void **>(&test), &size, vbo_res);
dim3 blks(1, 1);
dim3 threads(72, 1);
CUDAKernelTEST<<<blks, threads>>>(test);
cudaDeviceSynchronize();
cudaGraphicsUnmapResources(1, &vbo_res, 0);
// do some more with OpenGL
std::cout << "you passed the test" << std::endl;
TearDown();
return 0;
}
The current approach is create a hidden window and a context. The code compiles and runs fine on my local machine. However, glfwInit() returns GL_FALSE when run on EC2. If I log the messages sent to the error callback, it shows "X11: The DISPLAY environment variable is missing", which looks like it needs a display monitor to be connected in order for it work.
I tried replacing the Setup and TearDown section from GLFW into SDL or GLX and it returns similar error seemingly also requiring a display monitor attached.
I also try running the code with Xvfb and Xdummy which is supposedly to faked a monitor but I got error message from Xvfb "Xlib: extension "GLX" missing on display ":99", and from Xdummy "Fatal server error: (EE) no screens found(EE)"
I can't be the first one attempting to unit test opengl related code on EC2, but I can't find any solutions after googling around. Please advice, thank you so much.
The DISPLAY variable has nothing to do with connected monitors. This environment variable tells X11 client programs which X11 server to talk to. In Linux and Unix systems the X11 server is the de-facto standard graphics system and window multiplexer. It is also the host to the GPU driver.
With your program expecting to talk to a X11 server, you must provide it a server with the necessary capabilities. Which in your case means a Xorg server with support for GLX protocol (so that OpenGL can be used) and, because you're using CUDA, it should host the NVidia driver. The only X11 server that can do that is the full blown Xorg server with the nvidia driver loaded. Xvfb or Xdummy can do neither.
So if you really want to talk X11 then you'll have to setup a Xorg server with the nvidia driver. Never mind if there are no displays connected, you can coax the driver into headless operation just fine (it may take some convinving though).
However since recently there's a better way: NVidias latest driver release includes support for creating a fully headless, off-screen OpenGL context on the GPU with full support for CUDA–OpenGL interop: http://devblogs.nvidia.com/parallelforall/egl-eye-opengl-visualization-without-x-server/
It boils down to create the OpenGL context with EGL instead of with X11/GLX using display device configured for headless operation by selecting PBuffer framebuffer attribute. The essential code outline looks like this (taken directly from the NVidia code example):
#include <EGL/egl.h>
static const EGLint configAttribs[] = {
EGL_SURFACE_TYPE, EGL_PBUFFER_BIT, // make this off-screen
EGL_BLUE_SIZE, 8,
EGL_GREEN_SIZE, 8,
EGL_RED_SIZE, 8,
EGL_DEPTH_SIZE, 8,
EGL_RENDERABLE_TYPE, EGL_OPENGL_BIT,
EGL_NONE
};
static const int pbufferWidth = 9;
static const int pbufferHeight = 9;
static const EGLint pbufferAttribs[] = {
EGL_WIDTH, pbufferWidth,
EGL_HEIGHT, pbufferHeight,
EGL_NONE,
};
int main(int argc, char *argv[])
{
// 1. Initialize EGL
EGLDisplay eglDpy = eglGetDisplay(EGL_DEFAULT_DISPLAY);
EGLint major, minor;
eglInitialize(eglDpy, &major, &minor);
// 2. Select an appropriate configuration
EGLint numConfigs;
EGLConfig eglCfg;
eglChooseConfig(eglDpy, configAttribs, &eglCfg, 1, &numConfigs);
// 3. Create a surface
EGLSurface eglSurf = eglCreatePbufferSurface(eglDpy, eglCfg,
pbufferAttribs);
// 4. Bind the API
eglBindAPI(EGL_OPENGL_API);
// 5. Create a context and make it current
EGLContext eglCtx = eglCreateContext(eglDpy, eglCfg, EGL_NO_CONTEXT,
NULL);
eglMakeCurrent(eglDpy, eglSurf, eglSurf, eglCtx);
// from now on use your OpenGL context
// 6. Terminate EGL when finished
eglTerminate(eglDpy);
return 0;
}
#datenwolf: unfortunately, the nvidia's example you provide above won't run w/o an X11 server running. AFAIK, libEGL-nvidia (either linux or BSD) is linked to libX11:
$ ldd libEGL-NVIDIA.so.1
/usr/X11R6/lib/libEGL-NVIDIA.so.1:
libthr.so.3 => /lib/libthr.so.3 (0x801302000)
librt.so.1 => /usr/lib/librt.so.1 (0x80152a000)
libm.so.5 => /lib/libm.so.5 (0x80172f000)
libc.so.7 => /lib/libc.so.7 (0x800824000)
libnvidia-glsi.so.1 => /usr/X11R6/lib/libnvidia-glsi.so.1 (0x80195a000)
libX11.so.6 => /usr/X11R6/lib/libX11.so.6 (0x801bdf000)
libXext.so.6 => /usr/X11R6/lib/libXext.so.6 (0x801f1f000)
libxcb.so.1 => /usr/X11R6/lib/libxcb.so.1 (0x802130000)
libXau.so.6 => /usr/X11R6/lib/libXau.so.6 (0x802356000)
libXdmcp.so.6 => /usr/X11R6/lib/libXdmcp.so.6 (0x802559000)
and there's no way to change this (nvidia provides its drivers already compiled).
So, if you compile the nvidia's example like that (either with ES or GL API):
$ gcc egltest.c -o egltest -lEGL
you will get this (using GLESx or GL as well):
egltest:
libEGL.so.1 => /usr/X11R6/lib/libEGL-NVIDIA.so.1 (0x800823000)
libc.so.7 => /lib/libc.so.7 (0x800b25000)
libthr.so.3 => /lib/libthr.so.3 (0x800edd000)
librt.so.1 => /usr/lib/librt.so.1 (0x801105000)
libm.so.5 => /lib/libm.so.5 (0x80130a000)
libnvidia-glsi.so.1 => /usr/X11R6/lib/libnvidia-glsi.so.1 (0x801535000)
libX11.so.6 => /usr/X11R6/lib/libX11.so.6 (0x8017ba000)
libXext.so.6 => /usr/X11R6/lib/libXext.so.6 (0x801afa000)
libxcb.so.1 => /usr/X11R6/lib/libxcb.so.1 (0x801d0b000)
libXau.so.6 => /usr/X11R6/lib/libXau.so.6 (0x801f31000)
libXdmcp.so.6 => /usr/X11R6/lib/libXdmcp.so.6 (0x802134000)
Perhaps it could be more accurate to name the nvidia's EGL library EGLX, because it uses X11 and cannot run w/o X.
Caveats: from your example, nvidia EGL could bind to GL API (see attrib EGL_OPENGL_BIT ...) from v355 drivers only. From previous version, you could bind to GLES only (ie use EGL_OPENGL_ESx_BIT instead of EGL_OPENGL_BIT).
The only distro I knew that could run native window/drawable straight on the linux console - meaning w/o any X server or Wayland running - was the raspbian for the RPI-B, from which you will find the 'dispmanx' library that provides an easy way to access to the GPU/Fb through EGL (GLES2 API only supported).
B.R.
V.S.
Related
I am trying to run a simple example to set up EGL context in a docker container. However, I keep getting this error message :
Detected 0 devices
terminate called after throwing an instance of 'std::runtime_error'
what(): EGL error 0x300c at eglGetDisplay
Aborted
Basically, eglQueryDevicesEXT returns 0, and eglGetPlatformDisplayEXT returns error code 0x300c (EGL_BAD_PARAMETER) .
I have tried on :
Ubuntu 16.04 docker on a Macbook Pro
Ubuntu 16.04 docker on a Ubuntu 16.04 server with Nvidia GPU
On these docker environments, I installed openGL and ELG using apt-get install libgl1-mesa-dev and apt-get install libegl1-mesa-dev. CMake can find the components GL::GL and EGL::EGL.
I have tried link to libEGL.so both manually and using find_package in CMake.
This is driving me crazy! I can't figure out why EGL can't detect devices? How can I resolve this error?
Here is my full code. I have also tried EGLDisplay eglDpy = eglGetDisplay(EGL_DEFAULT_DISPLAY); which gives me the same error. Thanks!
#include <EGL/egl.h>
#include <EGL/eglext.h>
void assertEGLError(const std::string& msg) {
EGLint error = eglGetError();
if (error != EGL_SUCCESS) {
std::stringstream s;
s << "EGL error 0x" << std::hex << error << " at " << msg;
throw std::runtime_error(s.str());
}
}
int main(int argc, char *argv[])
{
// 1. Initialize EGL
// EGLDisplay eglDpy = eglGetDisplay(EGL_DEFAULT_DISPLAY);
int deviceID = 0; // TODO hardcode
EGLDisplay eglDpy;
EGLConfig config;
EGLContext context;
EGLint num_config;
static const int MAX_DEVICES = 16;
EGLDeviceEXT eglDevs[MAX_DEVICES];
EGLint numDevices;
PFNEGLQUERYDEVICESEXTPROC eglQueryDevicesEXT =
(PFNEGLQUERYDEVICESEXTPROC)eglGetProcAddress("eglQueryDevicesEXT");
eglQueryDevicesEXT(MAX_DEVICES, eglDevs, &numDevices);
printf("Detected %d devices\n", numDevices);
PFNEGLGETPLATFORMDISPLAYEXTPROC eglGetPlatformDisplayEXT =
(PFNEGLGETPLATFORMDISPLAYEXTPROC)eglGetProcAddress("eglGetPlatformDisplayEXT");
// Choose device by deviceID
eglDpy = eglGetPlatformDisplayEXT(EGL_PLATFORM_DEVICE_EXT, eglDevs[deviceID], nullptr);
assertEGLError("eglGetDisplay");
return 0;
}
OMG I finally solved this problem. Turned out I was linking to the wrong library on the server.
I found the answer in this post: https://forums.developer.nvidia.com/t/problem-with-opengl-visualization-without-an-x-server/73204/15
The key is to link to the libraries in /usr/lib/nvidia-410/, not the system default /usr/local/lib/x86_64-linux-gnu/libEGL.so
The new CMake that works:
target_link_libraries(sandbox PRIVATE /usr/lib/nvidia-410/libEGL.so)
target_link_libraries(sandbox PRIVATE /usr/lib/nvidia-410/libGLX.so)
target_link_libraries(sandbox PRIVATE /usr/lib/nvidia-410/libOpenGL.so)
Previously I used CMake's findOpenGL to search for link libraries, which does not work
find_package(OpenGL REQUIRED COMPONENTS OpenGL EGL GLX)
include_directories(${OPENGL_INCLUDE_DIRS})
if(OPENGL_FOUND)
message("Found OpenGL in the current environment!")
else()
message("Error: No OpenGL found.")
endif()
message("OpenGL include dirs" )
message("${OPENGL_INCLUDE_DIR}")
message("EGL include dirs" )
message("${OPENGL_EGL_INCLUDE_DIRS}")
if (OpenGL_EGL_FOUND)
message("EGL Found!")
else()
message("EGL Not Found!")
endif()
message(${OPENGL_egl_LIBRARY})
message(${OPENGL_glx_LIBRARY})
message(${OPENGL_opengl_LIBRARY}
Note again this does not work !!
/usr/local/lib/x86_64-linux-gnu/libEGL.so
/usr/local/lib/x86_64-linux-gnu/libGLX.so
/usr/local/lib/x86_64-linux-gnu/libOpenGL.so
After my first successful attempt at a 3D engine using Java and OpenGL (LWJGL3), I have decided to try my hand at Vulkan, using C++.
I have barely any experience with C/C++ and I am aware of the steep learning curve of Vulkan. This is however not a problem.
I decided to follow this tutorial: https://vulkan-tutorial.com/Introduction
It has showed me how to create a new project with Vulkan using XCode (as I am on Mac OS Mojave). I would, however, like to continue the rest of the tutorial using CLion as I would be switching between multiple operating systems.
I tried my hand at creating a CLion project and succeeded in making my first CMakeLists file, however something seems to be wrong. The file currently consists of the following:
cmake_minimum_required(VERSION 3.12)
project(VulkanTesting)
set(CMAKE_CXX_STANDARD 14)
add_executable(VulkanTesting main.cpp)
include_directories(/usr/local/include)
include_directories(/Users/[username]/Documents/Vulkan/SDK/vulkansdk-macos-1.1.92.1/macOS/include)
target_link_libraries(VulkanTesting /usr/local/lib/libglfw.3.3.dylib)
target_link_libraries(VulkanTesting /Users/[username]/Documents/Vulkan/SDK/vulkansdk-macos-1.1.92.1/macOS/lib/libvulkan.1.dylib)
target_link_libraries(VulkanTesting /Users/[username]/Documents/Vulkan/SDK/vulkansdk-macos-1.1.92.1/macOS/lib/libvulkan.1.1.92.dylib)
# Don't know if I need the next two lines
link_directories(/usr/local/lib)
link_directories(/Users/[username]/Documents/Vulkan/SDK/vulkansdk-macos-1.1.92.1/macOS/lib)
The reason I showed the above file will become apparent in the question.
The 'Program' so far is the following:
#define GLFW_INCLUDE_VULKAN
#include <GLFW/glfw3.h>
#include <iostream>
#include <stdexcept>
#include <functional>
#include <cstdlib>
#include <vector>
const int WIDTH = 800;
const int HEIGHT = 600;
class HelloTriangleApplication {
public:
void run() {
initWindow();
initVulkan();
mainLoop();
cleanup();
}
private:
GLFWwindow* window;
VkInstance instance;
void initWindow(){
glfwInit();
glfwWindowHint(GLFW_CLIENT_API, GLFW_NO_API);
glfwWindowHint(GLFW_RESIZABLE, GLFW_FALSE);
window = glfwCreateWindow(WIDTH, HEIGHT, "My first Vulkan window", nullptr, nullptr);
}
void initVulkan() {
createInstance();
}
void createInstance(){
// Instantiate Application Info
VkApplicationInfo applicationInfo = {};
applicationInfo.sType = VK_STRUCTURE_TYPE_APPLICATION_INFO;
applicationInfo.pApplicationName = "Hello Triangle";
applicationInfo.applicationVersion = VK_MAKE_VERSION(1,0,0);
applicationInfo.pEngineName = "No Engine";
applicationInfo.engineVersion = VK_MAKE_VERSION(1,0,0);
applicationInfo.apiVersion = VK_API_VERSION_1_0;
// Instantiate Instance Creation Info
VkInstanceCreateInfo createInfo = {};
createInfo.sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO;
createInfo.pApplicationInfo = &applicationInfo;
// Get GLFW platform specific extensions
uint32_t glfwExtensionCount = 0;
const char** glfwExtensions;
glfwExtensions = glfwGetRequiredInstanceExtensions(&glfwExtensionCount);
// Fill in required extensions in Instance Creation Info
createInfo.enabledExtensionCount = glfwExtensionCount;
createInfo.ppEnabledExtensionNames = glfwExtensions;
// For validation layers, this is a later step in the tutorial.
createInfo.enabledLayerCount = 0;
// Create the Vulkan instance, and check if it was successful.
VkResult result = vkCreateInstance(&createInfo, nullptr, &instance);
if(result != VK_SUCCESS){
std::cout << "glfwExtensionCount: " << glfwExtensionCount << "\n";
std::cout << "glfwExtensionNames: " << &glfwExtensions << "\n";
std::cout << "result: " << result << "\n";
throw std::runtime_error("Failed to create Vulkan Instance");
}
}
void mainLoop() {
while(!glfwWindowShouldClose(window)){
glfwPollEvents();
}
}
void cleanup() {
glfwDestroyWindow(window);
glfwTerminate();
}
};
int main() {
HelloTriangleApplication app;
try {
app.run();
} catch (const std::exception& e) {
std::cerr << e.what() << std::endl;
return EXIT_FAILURE;
}
return EXIT_SUCCESS;
}
The problem I am having is that when I try to run the program, it will not create a VkInstance. The function returns VK_ERROR_INCOMPATIBLE_DRIVER. Now, I doubt that the driver is in fact incompatible as I have run the demo applications that came with the Vulkan SDK for one, and for another I have been able to run the exact same 'program' in XCode. When I investigated the problem a bit further, I noticed that the glfwGetRequiredInstanceExtensions function returns no extensions when the program is run in CLion like this, but does return one in the XCode equivalent.
This all leads me to believe that there is something I have done wrong in linking the libraries/frameworks in the Cmake file because I am aware of the fact that Vulkan is not directly supported in Mac OS, but instead (somehow?) passes through a layer to communicate with Metal.
Do I need to specify a way for the program to pass its Vulkan functionality through a Metal layer, and is this done automagically in XCode, or is there another problem with my approach?
Any help would be greatly appreciated!
You might want to look at the MacOS Getting Started Guide on the LunarXchange website and in your SDK. There is a section at the end that shows how to use CMake to build a Vulkan app and run it on MacOS. You also may want to use the FindVulkan CMake module instead of manually setting the include directories and the target link libraries.
But my first guess about your specific problem is that you may not be setting the VK_ICD_FILENAMES environment variable. You are correct in your observation that there is no direct support for Vulkan. Instead, the support is provided by the MoltenVK library which is treated as a Vulkan driver. But this "driver" is not installed in any system directory by the SDK. The SDK is just unzipped in your home directory structure, so you must tell the Vulkan loader where to find it via this environment variable.
Again, the CMake section at the end of the Getting Started Guide demonstrates the use of this environment variable. And the entire guide goes into additional detail about how the various Vulkan and MoltenVK components work.
I was troubleshooting an OpenGL application on a new computer when I discovered that GLFW could not create a window with the specified version of OpenGL. I created a minimal version of the application to test the version of OpenGL created, and no matter what version I hint, the version I get is 0.0. Do I simply not have OpenGL? This seems impossible, since glxgears runs and glxinfo suggests that I have version 2.1.
#include <iostream>
#include <GLFW/glfw3.h>
int main(int argc, const char *argv[]) {
if(!glfwInit()) {
return 1;
}
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 2);
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 1);
auto win = glfwCreateWindow(640, 480, "", NULL, NULL);
if(!win) {
return 1;
}
int major = 0, minor = 0;
glfwMakeContextCurrent(win);
glGetIntegerv(GL_MAJOR_VERSION, &major);
glGetIntegerv(GL_MINOR_VERSION, &minor);
std::cout << "Initialized with OpenGL "
<< major << "." << minor << std::endl;
glfwDestroyWindow(win);
glfwTerminate();
}
The output of the application is "Initialized with OpenGL 0.0". A window briefly opens and closes and the application terminates without errors.
The GL_MAJOR_VERSION and GL_MINOR_VERSION queries were introduced in GL 3.0. Prior to that, this will just generate an GL_INVALID_ENUM error during the glGetIntegerv call, and leave your variables untouched.
You have to use glGetString(GL_VERSION) to reliably get the verison number if you can't make sure that you are on a >= 3.0 context. If you need those as numbers, you'll have to manually parse the string.
I am finding that QGLShaderProgram is consistently failing to compile any shader and providing no error log. Here are the symptoms:
QGLShaderProgram reports that it failed to compile but produces an empty error log. If I try to bind the shader an exception is thrown.
I can compile a shader using glCompileShader without problem. However, the first time I try to compile this way after QGLShaderProgram has failed, fails with this error log:
ERROR: error(#270) Internal error: Wrong symbol table level
ERROR: 0:2: error(#232) Function declarations cannot occur inside of functions:
main
ERROR: error(#273) 2 compilation errors. No code generated
Following that one failure, the next time I try to compile using glCompileShader works fine.
The problem has arisen only since upgrading from Qt 4.8 to 5.2. Nothing else has changed on this machine.
I have tested on two PCs, one with an ATI Radeon HD 5700, the other with an AMD FirePro V7900. The problem only appears on the Radeon PC.
Here is my test code demonstrating the problem:
main.cpp
#include <QApplication>
#include "Test.h"
int main(int argc, char* argv[])
{
QApplication* app = new QApplication(argc, argv);
Drawer* drawer = new Drawer;
return app->exec();
}
Test.h
#pragma once
#include <qobject>
#include <QTimer>
#include <QWindow>
#include <QOpenGLContext>
#include <QOpenGLFunctions>
class Drawer : public QWindow, protected QOpenGLFunctions
{
Q_OBJECT;
public:
Drawer();
QTimer* mTimer;
QOpenGLContext* mContext;
int frame;
public Q_SLOTS:
void draw();
};
Test.cpp
#include "Test.h"
#include <QGLShaderProgram>
#include <iostream>
#include <ostream>
using namespace std;
Drawer::Drawer()
: mTimer(new QTimer)
, mContext(new QOpenGLContext)
, frame(0)
{
mContext->create();
setSurfaceType(OpenGLSurface);
mTimer->setInterval(40);
connect(mTimer, SIGNAL(timeout()), this, SLOT(draw()));
mTimer->start();
show();
}
const char* vertex = "#version 110 \n void main() { gl_Position = gl_Vertex; }";
const char* fragment = "#version 110 \n void main() { gl_FragColor = vec4(0.0,0.0,0.0,0.0); }";
void Drawer::draw()
{
mContext->makeCurrent(this);
if (frame==0) {
initializeOpenGLFunctions();
}
// Compile using QGLShaderProgram. This always fails
if (frame < 5)
{
QGLShaderProgram* prog = new QGLShaderProgram;
bool f = prog->addShaderFromSourceCode(QGLShader::Fragment, fragment);
cout << "fragment "<<f<<endl;
bool v = prog->addShaderFromSourceCode(QGLShader::Vertex, vertex);
cout << "vertex "<<v<<endl;
bool link = prog->link();
cout << "link "<<link<<endl;
}
// Manual compile using OpenGL direct. This works except for the first time it
// follows the above block
{
GLuint prog = glCreateShader(GL_FRAGMENT_SHADER);
glShaderSource(prog, 1, &fragment, 0);
glCompileShader(prog);
GLint success = 0;
glGetShaderiv(prog, GL_COMPILE_STATUS, &success);
GLint logSize = 0;
glGetShaderiv(prog, GL_INFO_LOG_LENGTH, &logSize);
GLchar* log = new char[8192];
glGetShaderInfoLog(prog, 8192, 0, log);
cout << "manual compile " << success << endl << log << endl;
delete[] log;
}
glClearColor(1,1,0,1);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
mContext->swapBuffers(this);
frame++;
}
Elsewhere, I have tested using QGLWidget, and on a project that uses GLEW instead of QOpenGLFunctions with exactly the same results.
The version of Qt I'm linking against was built with the following configuration:
configure -developer-build -opensource -nomake examples -nomake tests -mp -opengl desktop -icu -confirm-license
Any suggestions? Or shall I just send this in as a bug report?
Update
In response to peppe's comments:
1) What does QOpenGLDebugLogger says?
The only thing I can get from QOpenGLDebugLogger is
QWindowsGLContext::getProcAddress: Unable to resolve 'glGetPointerv'
This is printed when I initialize it (and not as a debug event firing, but just to console). It happens even though mContext->hasExtension(QByteArrayLiteral("GL_KHR_debug")) returns true and I'm initializing it within the first frame's draw() function.
2) Can you print the compile log of the QOGLShaders even if they compile successfully?
I cannot successfully compile QOpenGLShader or QGLShader at any point so I'm not able to test this. However, when compiling successfully using plain GL functions, the log returns blank.
3) Which GL version did you get from the context? (Check with QSurfaceFormat).
I've tried with versions 3.0, 3.2, 4.2, all with the same result.
4) Please set the same QSurfaceFormat on both the context and the window before creating them
5) Remember to create() the window
I've implemented both of these now and the result is the same.
I've just tested on a third PC and that has no issues. So it is this specific computer which, incidentally, happens to be a Mac Pro running Windows in bootcamp. It has had absolutely no trouble in any other context running the latest ATI drivers but I can only really conclude that there is a bug somewhere between the ATI drivers, this computer's graphics chip and QOpenGLShaderProgram.
I think I'm unlikely to find a solution, so giving up. Thank you for all your input!
I have recently migrated from Windows to Linux (Debian, 64-bit) and am trying to get a GPGPU development environment up and running, so I am testing a program which worked under Windows.
Compiling and linking goes fine, but when I run the program I get some odd errors. I am using glew and freeglut.
First snippet: OpenGL only
i = 1;
info = PROGRAM_NAME;
glutInitContextVersion(4,2);
glutInit(&i, &info);
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGBA);
glutInitWindowSize(W_SIZEX, W_SIZEY);
glutInitWindowPosition(W_POSX, W_POSY);
glutCreateWindow(info);
glClearColor(1.0,1.0,1.0,0);
/**/
printf("Before glewInit: %i\n", glGetError());
/**/
printf("glewInit returns: %i\n", glewInit());
/**/
printf("After glewInit: %i\n", glGetError());
/**/
From which I get the following output:
Before glewInit: 0
glewInit returns: 0
After glewInit: 1280
This is an invalid enum error. I don't know what's causing it, but I suspect it might be related to the next error I get, later in the program's execution.
Second snippet: OpenCL-OpenGL interop
/* BUFFERS */
(*BFR).C[0] = clCreateBuffer(*CTX, CL_MEM_READ_WRITE, SD, 0, 0);
(*BFR).C[1] = clCreateBuffer(*CTX, CL_MEM_READ_WRITE, SD, 0, &i);
dcl(i);
glGenBuffers(2, (*BFR).G);
glBindBuffer(GL_ARRAY_BUFFER, (*BFR).G[0]);
glBufferData(GL_ARRAY_BUFFER, SI, 0, GL_DYNAMIC_DRAW);
(*BFR).D[0] = clCreateFromGLBuffer(*CTX, CL_MEM_WRITE_ONLY, (*BFR).G[0], &i);
dcl(i);
glBindBuffer(GL_ARRAY_BUFFER, 0);
Here, the dcl(int) method just decodes the CL error code. When I run this, I get a CL_INVALID_GL_OBJECT error from clCreateFromGLBuffer(). However, OpenGL has no issues generating, binding or unbinding the buffers in question. The OpenCL context is apparently valid, generating no errors on creation or query. Everything works in VS2010 on Windows 7 64-bit.
Compilation Details
Here are the relevant includes:
/* OPENGL */
#include "GL/glew.h"
#include "GL/freeglut.h"
/* OPENCL */
#include "CL/cl.h"
#include "CL/cl_gl.h"
I am using GCC and linking like so:
gcc -w -I./include CLGL.c -o ~/Templates/GOL-CLGL/run/a.out -lGLEW -lGLU -lglut -lGL -lOpenCL;
Compilation and linking results in no errors (plenty of warnings about pointer abuse but I doubt that's the culprit).
I'm currently out of ideas on how to debug this. Can anyone suggest further steps?
I had this issue recently too so here is the answer:
OpenGL: glGetError() returns invalid enum after call to glewInit()
So you can discard that error .