Display FFMPEG decoded frame in a GLFW window - c++

I am implementing the client program of a game where the server sends encoded frames of the game to the client (via UDP), while the client decodes them (via FFMPEG) and displays them in a GLFW window.
My program has two threads:
Thread 1: renders the content of the uint8_t* variable dataToRender
Thread 2: keeps obtaining frames from the server, decodes them and updates dataToRender accordingly
Thread 1 does the typical rendering of a GLFW window in a while-loop. I have already tried to display some dummy frame data (a completely red frame) and it worked:
while (!glfwWindowShouldClose(window)) {
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
...
glBindTexture(GL_TEXTURE_2D, tex_handle);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, window_width, window_height, 0, GL_RGB, GL_UNSIGNED_BYTE, dataToRender);
...
glfwSwapBuffers(window);
}
Thread 2 is where I am having trouble. I am unable to properly store the decoded frame into my dataToRender variable. On top if it, the frame data is originally in YUV format and needs to be converted to RGB. I use FFMPEG's sws_scale for that, which also gives me a bad dst image pointers error output in the console. Here's the code snippet responsible for that part:
size_t data_size = frameBuffer.size(); // frameBuffer is a std::vector where I accumulate the frame data chunks
uint8_t* data = frameBuffer.data(); // convert the vector to a pointer
picture->format = AV_PIX_FMT_RGB24;
av_frame_get_buffer(picture, 1);
while (data_size > 0) {
int ret = av_parser_parse2(parser, c, &pkt->data, &pkt->size,
data, data_size, AV_NOPTS_VALUE, AV_NOPTS_VALUE, 0);
if (ret < 0) {
fprintf(stderr, "Error while parsing\n");
exit(1);
}
data += ret;
data_size -= ret;
if (pkt->size) {
swsContext = sws_getContext(
c->width, c->height,
AV_PIX_FMT_YUV420P, c->width, c->height,
AV_PIX_FMT_RGB24, SWS_BILINEAR, NULL, NULL, NULL
);
uint8_t* rgb24[1] = { data };
int rgb24_stride[1] = { 3 * c->width };
sws_scale(swsContext, rgb24, rgb24_stride, 0, c->height, picture->data, picture->linesize);
decode(c, picture, pkt, outname);
// TODO: copy content of picture->data[0] to "dataToRender" maybe?
}
}
I have already tried doing another sws_scale to copy the content to dataToRender and I cannot get rid of the bad dst image pointers error. Any advice or solution to the problem would be greatly appreciated as I have been stuck for days on this.

I think you should convert YUV to RGB using OpenGL. That is much high efficiency and simple. The fragment shader looks like below:
precision mediump float;
varying vec2 v_texPo;
uniform sampler2D sampler_y;
uniform sampler2D sampler_u;
uniform sampler2D sampler_v;
void main() {
float y, u, v;
vec3 rgb;
y = texture2D(sampler_y, v_texPo).r;
u = texture2D(sampler_u, v_texPo).r - 0.5;
v = texture2D(sampler_v, v_texPo).r - 0.5;
rgb.r = y + 1.403 * v;
rgb.g = y - 0.344 * u - 0.714 * v;
rgb.b = y + 1.770 * u;
gl_FragColor = vec4(rgb, 1);
}
And you should upload three texture to OpenGL.

Related

OpenCL: Drawing to OpenGL Framebuffer - Framebuffer stays empty

I am trying to render to the OpenGL Framebuffer via an OpenGL Renderbuffer from an OpenCL kernel. The issue is: Even though I can (propably) render/write to the Renderbuffer from an OpenCL kernel, the screen stays empty (-> Black).
I am getting to my limits of what I can test in finite time, so I am asking someone with much more experience to give a tip, about what I am missing.
I personally suspect that I forgot to Bind a Buffer at the right point, but since I don't see which and where, this is practically impossible to check.
Now for some reduced code (So you don't have to look at all the error checking etc.)(This is the function that is called during the render routine):
void TestBuffer(){
GLubyte *buffer = (GLubyte *) malloc(1000 * 1000 * 4);
glReadBuffer(GL_COLOR_ATTACHMENT0);
error = glGetError();
if(error != GL_NO_ERROR){
printf("error with readBuffer, %i\n", error);
}
glReadPixels(0, 0, 1000, 1000, GL_RGBA, GL_UNSIGNED_BYTE, (GLvoid *)buffer);
error = glGetError();
if(error != GL_NO_ERROR){
printf("error with readpixels\n");
}
for(int i = 0; i < 1000*100; i++){
if(buffer[i] != 0){
printf("buffer was not empty # %i: %u\n", i, buffer[i]);
free(buffer);
return;
}
}
printf("buffer was empty\n");
free(buffer);
}
void runShader(){
glFinish(); //Make sure, that OpenGL isn't using our objects
ret = clEnqueueAcquireGLObjects(command_queue, 1, &cl_renderBuffer, 0, NULL, NULL);
// Execute the OpenCL kernel on the list
size_t global_item_size = 1000 * 1000; // Process the entire lists
size_t local_item_size = 1000; // Divide work items into groups of SceenWidth
ret = clEnqueueNDRangeKernel(command_queue, kernel, 1, NULL, &global_item_size, &local_item_size, 0, NULL, NULL);
ret = clEnqueueReleaseGLObjects(command_queue, 1, &cl_renderBuffer, 0, NULL, NULL);
clFlush(command_queue);
clFinish(command_queue);
// We are going to blit into the window (default framebuffer)
glBindFramebuffer (GL_DRAW_FRAMEBUFFER, 0);
glDrawBuffer (GL_BACK); // Use backbuffer as color dst.
// Read from your FBO
glBindFramebuffer (GL_READ_FRAMEBUFFER, gl_frameBuffer);
glReadBuffer (GL_COLOR_ATTACHMENT0); // Use Color Attachment 0 as color src.
// Copy the color and depth buffer from your FBO to the default framebuffer
glBlitFramebuffer (0,0, 1000, 1000, 0,0, 1000, 1000, GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT, GL_NEAREST);
TestBuffer();
}
My ideas where:
Blit the contents of the renderbuffer to the screenbuffer, in case I messed up with binding the new framebuffer object (created earlier), or attaching the renderbuffer (which you can see in the last few lines of the code)
Check, if I messed up with the double Buffer or sth.: this is the TestBuffer() function
Flushing before Finishing, just in case
The shader/kernel code is simple on purpose, to see if the other stuff actually works (.w should be alpha, which should be opaque, so we can see the result, the rest is just a gray rainbow):
#pragma OPENCL EXTENSION all : enable
#define ScreenWidth 1000
#define ScreenHight 1000
const sampler_t sampler = CLK_NORMALIZED_COORDS_FALSE | CLK_ADDRESS_NONE | CLK_FILTER_NEAREST;
__kernel void rainbow(__write_only image2d_t asd) {
int i = get_global_id(0);
unsigned int x = i%ScreenWidth;
unsigned int y = i/ScreenHight;
uint4 pixel; //I wish, I could access this as an array
pixel.x = i;
pixel.y = i;
pixel.z = i;
pixel.w = 255;
write_imageui(asd, (int2)(x, y), pixel);
}
Some further information:
I am only rendering stuff to the COLOR_ATTACHMENT0, since I don't care about the depth or stencil buffer in my usecase. This could be an issue though. (I didn't even generate buffers for them)
I am compiling for Windows 10
The format of the Renderbuffer is RGBA8, but I think the natural format is RGBA24. It once was just RGBA as you can see in the TestBuffer Routine, but I think this should be fine.
What could cause the screen to stay black/empty?

How to Process RGB Data from the CPU on an NVIDIA GPU and Visualize the Data with an OpenGL Texture

I'm hoping to create a simple computer vision library in C++/CUDA C++ that allows me to do the following:
Grab some RGB data from the host memory. This data will come in a BGR byte array, 8 bits per channel per pixel.
Process that data in a CUDA kernel.
Write the output of that kernel back into some host memory.
Render the output in an OpenGL texture for easy viewing.
These functions would go inside a class like so:
class Processor{
public:
setInput(const byte* data, int imageWidth, int imageHeight);
void processData();
GLuint getInputTexture();
GLuint getOutputTexture();
void writeOutputTo(byte* destination);
}
setInput() is going to be called with every frame of a video (hundreds or thousands of images of the same dimensions).
How can I write the Processor class so that setInput() can efficiently update an instance's internal CUDA array and processData() can synchronize the CUDA array with the OpenGL texture?
Below is my attempt at implementing such a class, contained in one CUDA C++ file along with a simple test. (Requires GLFW and GLAD.) With this implementation, I can provide some input image data, run a CUDA kernel that produces an output image, and visualize both with OpenGL textures. But it's extremely inefficient because every time setInput() is called, two OpenGL textures and two CUDA surface objects need to be created. And if more than one image is processed, two OpenGL textures and two CUDA surface objects also have to be destroyed.
#include <glad/glad.h>
#include <GLFW/glfw3.h>
#include <cudaGL.h>
#include <cuda_gl_interop.h>
#include <iostream>
/** Macro for checking if CUDA has problems */
#define cudaCheckError() { \
cudaError_t err = cudaGetLastError(); \
if(err != cudaSuccess) { \
printf("Cuda error: %s:%d: %s\n", __FILE__, __LINE__, cudaGetErrorString(err)); \
exit(1); \
} \
}
/*Window dimensions*/
const int windowWidth = 1280, windowHeight = 720;
/*Window address*/
GLFWwindow* currentGLFWWindow = 0;
/**
* A simple image processing kernel that copies the inverted data from the input surface to the output surface.
*/
__global__ void kernel(cudaSurfaceObject_t input, cudaSurfaceObject_t output, int width, int height) {
//Get the pixel index
unsigned int xPx = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int yPx = threadIdx.y + blockIdx.y * blockDim.y;
//Don't do any computation if this thread is outside of the surface bounds.
if (xPx >= width || yPx >= height) return;
//Copy the contents of input to output.
uchar4 pixel = { 255,128,0,255 };
//Read a pixel from the input. Disable to default to the flat orange color above
surf2Dread<uchar4>(&pixel, input, xPx * sizeof(uchar4), yPx, cudaBoundaryModeClamp);
//Invert the color
pixel.x = ~pixel.x;
pixel.y = ~pixel.y;
pixel.z = ~pixel.z;
//Write the new pixel color to the
surf2Dwrite(pixel, output, xPx * sizeof(uchar4), yPx);
}
class Processor {
public:
void setInput( uint8_t* const data, int imageWidth, int imageHeight);
void processData();
GLuint getInputTexture();
GLuint getOutputTexture();
void writeOutputTo(uint8_t* destination);
private:
/**
* #brief True if the textures and surfaces are initialized.
*
* Prevents memory leaks
*/
bool surfacesInitialized = false;
/**
* #brief The width and height of a texture/surface pair.
*
*/
struct ImgDim { int width, height; };
/**
* #brief Creates a CUDA surface object, CUDA resource, and OpenGL texture from some data.
*/
void createTextureSurfacePair(const ImgDim& dimensions, uint8_t* const data, GLuint& textureOut, cudaGraphicsResource_t& graphicsResourceOut, cudaSurfaceObject_t& surfaceOut);
/**
* #brief Destroys every CUDA surface object, CUDA resource, and OpenGL texture created by this instance.
*/
void destroyEverything();
/**
* #brief The dimensions of an image and its corresponding texture.
*
*/
ImgDim imageInputDimensions, imageOutputDimensions;
/**
* #brief A CUDA surface that can be read to, written from, or synchronized with a Mat or
* OpenGL texture
*
*/
cudaSurfaceObject_t d_imageInputTexture = 0, d_imageOutputTexture = 0;
/**
* #brief A CUDA resource that's bound to an array in CUDA memory
*/
cudaGraphicsResource_t d_imageInputGraphicsResource, d_imageOutputGraphicsResource;
/**
* #brief A renderable OpenGL texture that is synchronized with the CUDA data
* #see d_imageInputTexture, d_imageOutputTexture
*/
GLuint imageInputTexture = 0, imageOutputTexture = 0;
/** Returns true if nothing can be rendered */
bool empty() { return imageInputTexture == 0; }
};
void Processor::setInput(uint8_t* const data, int imageWidth, int imageHeight)
{
//Same-size images don't need texture regeneration, so skip that.
if (imageHeight == imageInputDimensions.height && imageWidth == imageInputDimensions.width) {
/*
Possible shortcut: we know the input is the same size as the texture and CUDA surface object.
So instead of destroying the surface and texture, why not just overwrite them?
That's what I try to do in the following block, but because "data" is BGR and the texture
is RGBA, the channels get all messed up.
*/
/*
//Use the input surface's CUDAResourceDesc to gain access to the surface data array
struct cudaResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
cudaGetSurfaceObjectResourceDesc(&resDesc, d_imageInputTexture);
cudaCheckError();
//Copy the data from the input array to the surface
cudaMemcpyToArray(resDesc.res.array.array, 0, 0, input.data, imageInputDimensions.width * imageInputDimensions.height * 3, cudaMemcpyHostToDevice);
cudaCheckError();
//Set status flags
surfacesInitialized = true;
return;
*/
}
//Clear everything that originally existed in the texture/surface
destroyEverything();
//Get the size of the image and place it here.
imageInputDimensions.width = imageWidth;
imageInputDimensions.height = imageHeight;
imageOutputDimensions.width = imageWidth;
imageOutputDimensions.height = imageHeight;
//Create the input surface/texture pair
createTextureSurfacePair(imageInputDimensions, data, imageInputTexture, d_imageInputGraphicsResource, d_imageInputTexture);
//Create the output surface/texture pair
uint8_t* outData = new uint8_t[imageOutputDimensions.width * imageOutputDimensions.height * 3];
createTextureSurfacePair(imageOutputDimensions, outData, imageOutputTexture, d_imageOutputGraphicsResource, d_imageOutputTexture);
delete outData;
//Set status flags
surfacesInitialized = true;
}
void Processor::processData()
{
const int threadsPerBlock = 128;
//Call the algorithm
//Set the number of blocks to call the kernel with.
dim3 blocks((unsigned int)ceil((float)imageInputDimensions.width / threadsPerBlock), imageInputDimensions.height);
kernel <<<blocks, threadsPerBlock >>> (d_imageInputTexture, d_imageOutputTexture, imageInputDimensions.width, imageInputDimensions.height);
//Sync the surface with the texture
cudaDeviceSynchronize();
cudaCheckError();
}
GLuint Processor::getInputTexture()
{
return imageInputTexture;
}
GLuint Processor::getOutputTexture()
{
return imageOutputTexture;
}
void Processor::writeOutputTo(uint8_t* destination)
{
//Haven't figured this out yet
}
void Processor::createTextureSurfacePair(const Processor::ImgDim& dimensions, uint8_t* const data, GLuint& textureOut, cudaGraphicsResource_t& graphicsResourceOut, cudaSurfaceObject_t& surfaceOut) {
// Create the OpenGL texture that will be displayed with GLAD and GLFW
glGenTextures(1, &textureOut);
// Bind to our texture handle
glBindTexture(GL_TEXTURE_2D, textureOut);
// Set texture interpolation methods for minification and magnification
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
// Set texture clamping method
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP);
// Create the texture and its attributes
glTexImage2D(GL_TEXTURE_2D, // Type of texture
0, // Pyramid level (for mip-mapping) - 0 is the top level
GL_RGBA, // Internal color format to convert to
dimensions.width, // Image width i.e. 640 for Kinect in standard mode
dimensions.height, // Image height i.e. 480 for Kinect in standard mode
0, // Border width in pixels (can either be 1 or 0)
GL_BGR, // Input image format (i.e. GL_RGB, GL_RGBA, GL_BGR etc.)
GL_UNSIGNED_BYTE, // Image data type.
data); // The actual image data itself
//Note that the type of this texture is an RGBA UNSIGNED_BYTE type. When CUDA surfaces
//are synchronized with OpenGL textures, the surfaces will be of the same type.
//They won't know or care about their data types though, for they are all just byte arrays
//at heart. So be careful to ensure that any CUDA kernel that handles a CUDA surface
//uses it as an appropriate type. You will see that the update_surface kernel (defined
//above) treats each pixel as four unsigned bytes along the X-axis: one for red, green, blue,
//and alpha respectively.
//Create the CUDA array and texture reference
cudaArray* bitmap_d;
//Register the GL texture with the CUDA graphics library. A new cudaGraphicsResource is created, and its address is placed in cudaTextureID.
//Documentation: https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__OPENGL.html#group__CUDART__OPENGL_1g80d12187ae7590807c7676697d9fe03d
cudaGraphicsGLRegisterImage(&graphicsResourceOut, textureOut, GL_TEXTURE_2D,
cudaGraphicsRegisterFlagsNone);
cudaCheckError();
//Map graphics resources for access by CUDA.
//Documentation: https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__INTEROP.html#group__CUDART__INTEROP_1gad8fbe74d02adefb8e7efb4971ee6322
cudaGraphicsMapResources(1, &graphicsResourceOut, 0);
cudaCheckError();
//Get the location of the array of pixels that was mapped by the previous function and place that address in bitmap_d
//Documentation: https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__INTEROP.html#group__CUDART__INTEROP_1g0dd6b5f024dfdcff5c28a08ef9958031
cudaGraphicsSubResourceGetMappedArray(&bitmap_d, graphicsResourceOut, 0, 0);
cudaCheckError();
//Create a CUDA resource descriptor. This is used to get and set attributes of CUDA resources.
//This one will tell CUDA how we want the bitmap_surface to be configured.
//Documentation for the struct: https://docs.nvidia.com/cuda/cuda-runtime-api/structcudaResourceDesc.html#structcudaResourceDesc
struct cudaResourceDesc resDesc;
//Clear it with 0s so that some flags aren't arbitrarily left at 1s
memset(&resDesc, 0, sizeof(resDesc));
//Set the resource type to be an array for convenient processing in the CUDA kernel.
//List of resTypes: https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__TYPES.html#group__CUDART__TYPES_1g067b774c0e639817a00a972c8e2c203c
resDesc.resType = cudaResourceTypeArray;
//Bind the new descriptor with the bitmap created earlier.
resDesc.res.array.array = bitmap_d;
//Create a new CUDA surface ID reference.
//This is really just an unsigned long long.
//Docuentation: https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__TYPES.html#group__CUDART__TYPES_1gbe57cf2ccbe7f9d696f18808dd634c0a
surfaceOut = 0;
//Create the surface with the given description. That surface ID is placed in bitmap_surface.
//Documentation: https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__SURFACE__OBJECT.html#group__CUDART__SURFACE__OBJECT_1g958899474ab2c5f40d233b524d6c5a01
cudaCreateSurfaceObject(&surfaceOut, &resDesc);
cudaCheckError();
}
void Processor::destroyEverything()
{
if (surfacesInitialized) {
//Input image CUDA surface
cudaDestroySurfaceObject(d_imageInputTexture);
cudaGraphicsUnmapResources(1, &d_imageInputGraphicsResource);
cudaGraphicsUnregisterResource(d_imageInputGraphicsResource);
d_imageInputTexture = 0;
//Output image CUDA surface
cudaDestroySurfaceObject(d_imageOutputTexture);
cudaGraphicsUnmapResources(1, &d_imageOutputGraphicsResource);
cudaGraphicsUnregisterResource(d_imageOutputGraphicsResource);
d_imageOutputTexture = 0;
//Input image GL texture
glDeleteTextures(1, &imageInputTexture);
imageInputTexture = 0;
//Output image GL texture
glDeleteTextures(1, &imageOutputTexture);
imageOutputTexture = 0;
surfacesInitialized = false;
}
}
/** A way to initialize OpenGL with GLFW and GLAD */
void initGL() {
// Setup window
if (!glfwInit())
return;
// Decide GL+GLSL versions
#if __APPLE__
// GL 3.2 + GLSL 150
const char* glsl_version = "#version 150";
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 2);
glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE); // 3.2+ only
glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GL_TRUE); // Required on Mac
#else
// GL 3.0 + GLSL 130
const char* glsl_version = "#version 130";
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 0);
//glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE); // 3.2+ only
//glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GL_TRUE); // 3.0+ only
#endif
// Create window with graphics context
currentGLFWWindow = glfwCreateWindow(windowWidth, windowHeight, "Output image (OpenGL + GLFW)", NULL, NULL);
if (currentGLFWWindow == NULL)
return;
glfwMakeContextCurrent(currentGLFWWindow);
glfwSwapInterval(3); // Enable vsync
if (!gladLoadGL()) {
// GLAD failed
printf( "GLAD failed to initialize :(" );
return;
}
//Change GL settings
glViewport(0, 0, windowWidth, windowHeight); // use a screen size of WIDTH x HEIGHT
glMatrixMode(GL_PROJECTION); // Make a simple 2D projection on the entire window
glLoadIdentity();
glOrtho(0.0, windowWidth, windowHeight, 0.0, 0.0, 100.0);
glMatrixMode(GL_MODELVIEW); // Set the matrix mode to object modeling
glClearColor(0.0f, 0.0f, 0.0f, 0.0f);
glClearDepth(0.0f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); // Clear the window
}
/** Renders the textures on the GLFW window and requests GLFW to update */
void showTextures(GLuint top, GLuint bottom) {
// Clear color and depth buffers
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glMatrixMode(GL_MODELVIEW); // Operate on model-view matrix
glBindTexture(GL_TEXTURE_2D, top);
/* Draw top quad */
glEnable(GL_TEXTURE_2D);
glBegin(GL_QUADS);
glTexCoord2i(0, 0); glVertex2i(0, 0);
glTexCoord2i(0, 1); glVertex2i(0, windowHeight/2);
glTexCoord2i(1, 1); glVertex2i(windowWidth, windowHeight / 2);
glTexCoord2i(1, 0); glVertex2i(windowWidth, 0);
glEnd();
glDisable(GL_TEXTURE_2D);
/* Draw top quad */
glBindTexture(GL_TEXTURE_2D, bottom);
glEnable(GL_TEXTURE_2D);
glBegin(GL_QUADS);
glTexCoord2i(0, 0); glVertex2i(0, windowHeight / 2);
glTexCoord2i(0, 1); glVertex2i(0, windowHeight);
glTexCoord2i(1, 1); glVertex2i(windowWidth, windowHeight);
glTexCoord2i(1, 0); glVertex2i(windowWidth, windowHeight / 2);
glEnd();
glDisable(GL_TEXTURE_2D);
glfwSwapBuffers(currentGLFWWindow);
glfwPollEvents();
}
int main() {
initGL();
int imageWidth = windowWidth;
int imageHeight = windowHeight / 2;
uint8_t* imageData = new uint8_t[imageWidth * imageHeight * 3];
Processor p;
while (!glfwWindowShouldClose(currentGLFWWindow))
{
//Process the image here
p.setInput(imageData, imageWidth, imageHeight);
p.processData();
showTextures(p.getInputTexture(), p.getOutputTexture());
}
}
TL;DR: I can see at least 2 ways forward here, either convert your data to 4 byte pixels (somehow) and use cudaMemcpy2DToArray, or allow the CUDA kernel to take in raw data (instead of using a surface as input). I'll try to demonstrate both, although I don't wish to put in a large effort at polishing this, so really just demonstrating ideas.
This answer is working off the code you provided in an edit which is not your latest. However in the subsequent edits, mainly you seem to be just ripping out OpenCV, which I would normally applaud. However, since I've worked off your edit that had OpenCV in it, I've elected to use an OpenCV "test case" of my own.
Using 4 byte-per-pixel data, and cudaMemcpy2DToArray: This seems to adhere most closely to what you have demonstrated, albeit commented-out. The idea is we will access the input data by copying it to the CUDA array (acquired from the interop mechanism) directly. As you had previously pointed out, cudaMemcpyToArray is deprecated, so we won't use that. Furthermore, our data format (bytes per pixel) has to match what is in the array. I think there are a number of ways to solve this, depending on your overall pipeline, but the approach I show here isn't efficient, it's just to demonstrate that the method is "workable". If there is a way to use 4 byte per pixel data in your pipeline, however, you may be able to get rid of the "inefficiency" here. To use this method, compile the code with the -DUSE_1 switch.
Input of the data through the kernel. We can skip the inefficiency of the first case by just allowing the kernel to do the 3-byte to 4-byte conversion of data on the fly. Either way, there is a copy of data from host to device, but this method doesn't require 4 byte per pixel input data.
Here is code demonstrating both options:
//nvcc -arch=sm_35 -o t19 glad/src/glad.c t19.cu -lGL -lGLU -I./glad/include -lglfw -std=c++11 -lopencv_core -lopencv_highgui -lopencv_imgcodecs -Wno-deprecated-gpu-targets
#include <glad/glad.h>
#include <GLFW/glfw3.h>
#include <cudaGL.h>
#include <cuda_gl_interop.h>
#include <iostream>
#include <opencv2/highgui.hpp>
/** Macro for checking if CUDA has problems */
#define cudaCheckError() { \
cudaError_t err = cudaGetLastError(); \
if(err != cudaSuccess) { \
printf("Cuda error: %s:%d: %s\n", __FILE__, __LINE__, cudaGetErrorString(err)); \
exit(1); \
} \
}
/*Window dimensions*/
//const int windowWidth = 1280, windowHeight = 720;
/*Window address*/
GLFWwindow* currentGLFWWindow = 0;
/**
* A simple image processing kernel that copies the inverted data from the input surface to the output surface.
*/
__global__ void kernel(cudaSurfaceObject_t input, cudaSurfaceObject_t output, int width, int height, uint8_t *data) {
//Get the pixel index
unsigned int xPx = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int yPx = threadIdx.y + blockIdx.y * blockDim.y;
//Don't do any computation if this thread is outside of the surface bounds.
if (xPx >= width || yPx >= height) return;
//Copy the contents of input to output.
#ifdef USE_1
uchar4 pixel = { 255,128,0,255 };
//Read a pixel from the input. Disable to default to the flat orange color above
surf2Dread<uchar4>(&pixel, input, xPx * sizeof(uchar4), yPx, cudaBoundaryModeClamp);
#else
uchar4 pixel;
pixel.x = data[(xPx+yPx*width)*3 + 0];
pixel.y = data[(xPx+yPx*width)*3 + 1];
pixel.z = data[(xPx+yPx*width)*3 + 2];
pixel.w = 255;
surf2Dwrite(pixel, input, xPx * sizeof(uchar4), yPx);
#endif
//Invert the color
pixel.x = ~pixel.x;
pixel.y = ~pixel.y;
pixel.z = ~pixel.z;
//Write the new pixel color to the
surf2Dwrite(pixel, output, xPx * sizeof(uchar4), yPx);
}
class Processor {
public:
void setInput( uint8_t* const data, int imageWidth, int imageHeight);
void processData(uint8_t *data, uint8_t *d_data);
GLuint getInputTexture();
GLuint getOutputTexture();
void writeOutputTo(uint8_t* destination);
private:
/**
* #brief True if the textures and surfaces are initialized.
*
* Prevents memory leaks
*/
bool surfacesInitialized = false;
/**
* #brief The width and height of a texture/surface pair.
*
*/
struct ImgDim { int width, height; };
/**
* #brief Creates a CUDA surface object, CUDA resource, and OpenGL texture from some data.
*/
void createTextureSurfacePair(const ImgDim& dimensions, uint8_t* const data, GLuint& textureOut, cudaGraphicsResource_t& graphicsResourceOut, cudaSurfaceObject_t& surfaceOut);
/**
* #brief Destroys every CUDA surface object, CUDA resource, and OpenGL texture created by this instance.
*/
void destroyEverything();
/**
* #brief The dimensions of an image and its corresponding texture.
*
*/
ImgDim imageInputDimensions, imageOutputDimensions;
/**
* #brief A CUDA surface that can be read to, written from, or synchronized with a Mat or
* OpenGL texture
*
*/
cudaSurfaceObject_t d_imageInputTexture = 0, d_imageOutputTexture = 0;
/**
* #brief A CUDA resource that's bound to an array in CUDA memory
*/
cudaGraphicsResource_t d_imageInputGraphicsResource, d_imageOutputGraphicsResource;
/**
* #brief A renderable OpenGL texture that is synchronized with the CUDA data
* #see d_imageInputTexture, d_imageOutputTexture
*/
GLuint imageInputTexture = 0, imageOutputTexture = 0;
/** Returns true if nothing can be rendered */
bool empty() { return imageInputTexture == 0; }
};
void Processor::setInput(uint8_t* const data, int imageWidth, int imageHeight)
{
//Same-size images don't need texture regeneration, so skip that.
if (imageHeight == imageInputDimensions.height && imageWidth == imageInputDimensions.width) {
/*
Possible shortcut: we know the input is the same size as the texture and CUDA surface object.
So instead of destroying the surface and texture, why not just overwrite them?
That's what I try to do in the following block, but because "data" is BGR and the texture
is RGBA, the channels get all messed up.
*/
//Use the input surface's CUDAResourceDesc to gain access to the surface data array
#ifdef USE_1
struct cudaResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
cudaGetSurfaceObjectResourceDesc(&resDesc, d_imageInputTexture);
cudaCheckError();
uint8_t *data4 = new uint8_t[imageInputDimensions.width*imageInputDimensions.height*4];
for (int i = 0; i < imageInputDimensions.width*imageInputDimensions.height; i++){
data4[i*4+0] = data[i*3+0];
data4[i*4+1] = data[i*3+1];
data4[i*4+2] = data[i*3+2];
data4[i*4+3] = 255;}
//Copy the data from the input array to the surface
// cudaMemcpyToArray(resDesc.res.array.array, 0, 0, data, imageInputDimensions.width * imageInputDimensions.height * 3, cudaMemcpyHostToDevice);
cudaMemcpy2DToArray(resDesc.res.array.array, 0, 0, data4, imageInputDimensions.width*4, imageInputDimensions.width*4, imageInputDimensions.height, cudaMemcpyHostToDevice);
cudaCheckError();
delete[] data4;
#endif
//Set status flags
surfacesInitialized = true;
return;
}
//Clear everything that originally existed in the texture/surface
destroyEverything();
//Get the size of the image and place it here.
imageInputDimensions.width = imageWidth;
imageInputDimensions.height = imageHeight;
imageOutputDimensions.width = imageWidth;
imageOutputDimensions.height = imageHeight;
//Create the input surface/texture pair
createTextureSurfacePair(imageInputDimensions, data, imageInputTexture, d_imageInputGraphicsResource, d_imageInputTexture);
//Create the output surface/texture pair
uint8_t* outData = new uint8_t[imageOutputDimensions.width * imageOutputDimensions.height * 3];
createTextureSurfacePair(imageOutputDimensions, outData, imageOutputTexture, d_imageOutputGraphicsResource, d_imageOutputTexture);
delete outData;
//Set status flags
surfacesInitialized = true;
}
void Processor::processData(uint8_t *data, uint8_t *d_data)
{
const int threadsPerBlock = 128;
//Call the algorithm
//Set the number of blocks to call the kernel with.
dim3 blocks((unsigned int)ceil((float)imageInputDimensions.width / threadsPerBlock), imageInputDimensions.height);
#ifndef USE_1
cudaMemcpy(d_data, data, imageInputDimensions.width*imageInputDimensions.height*3, cudaMemcpyHostToDevice);
#endif
kernel <<<blocks, threadsPerBlock >>> (d_imageInputTexture, d_imageOutputTexture, imageInputDimensions.width, imageInputDimensions.height, d_data);
//Sync the surface with the texture
cudaDeviceSynchronize();
cudaCheckError();
}
GLuint Processor::getInputTexture()
{
return imageInputTexture;
}
GLuint Processor::getOutputTexture()
{
return imageOutputTexture;
}
void Processor::writeOutputTo(uint8_t* destination)
{
//Haven't figured this out yet
}
void Processor::createTextureSurfacePair(const Processor::ImgDim& dimensions, uint8_t* const data, GLuint& textureOut, cudaGraphicsResource_t& graphicsResourceOut, cudaSurfaceObject_t& surfaceOut) {
// Create the OpenGL texture that will be displayed with GLAD and GLFW
glGenTextures(1, &textureOut);
// Bind to our texture handle
glBindTexture(GL_TEXTURE_2D, textureOut);
// Set texture interpolation methods for minification and magnification
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
// Set texture clamping method
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP);
// Create the texture and its attributes
glTexImage2D(GL_TEXTURE_2D, // Type of texture
0, // Pyramid level (for mip-mapping) - 0 is the top level
GL_RGBA, // Internal color format to convert to
dimensions.width, // Image width i.e. 640 for Kinect in standard mode
dimensions.height, // Image height i.e. 480 for Kinect in standard mode
0, // Border width in pixels (can either be 1 or 0)
GL_BGR, // Input image format (i.e. GL_RGB, GL_RGBA, GL_BGR etc.)
GL_UNSIGNED_BYTE, // Image data type.
data); // The actual image data itself
//Note that the type of this texture is an RGBA UNSIGNED_BYTE type. When CUDA surfaces
//are synchronized with OpenGL textures, the surfaces will be of the same type.
//They won't know or care about their data types though, for they are all just byte arrays
//at heart. So be careful to ensure that any CUDA kernel that handles a CUDA surface
//uses it as an appropriate type. You will see that the update_surface kernel (defined
//above) treats each pixel as four unsigned bytes along the X-axis: one for red, green, blue,
//and alpha respectively.
//Create the CUDA array and texture reference
cudaArray* bitmap_d;
//Register the GL texture with the CUDA graphics library. A new cudaGraphicsResource is created, and its address is placed in cudaTextureID.
//Documentation: https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__OPENGL.html#group__CUDART__OPENGL_1g80d12187ae7590807c7676697d9fe03d
cudaGraphicsGLRegisterImage(&graphicsResourceOut, textureOut, GL_TEXTURE_2D,
cudaGraphicsRegisterFlagsNone);
cudaCheckError();
//Map graphics resources for access by CUDA.
//Documentation: https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__INTEROP.html#group__CUDART__INTEROP_1gad8fbe74d02adefb8e7efb4971ee6322
cudaGraphicsMapResources(1, &graphicsResourceOut, 0);
cudaCheckError();
//Get the location of the array of pixels that was mapped by the previous function and place that address in bitmap_d
//Documentation: https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__INTEROP.html#group__CUDART__INTEROP_1g0dd6b5f024dfdcff5c28a08ef9958031
cudaGraphicsSubResourceGetMappedArray(&bitmap_d, graphicsResourceOut, 0, 0);
cudaCheckError();
//Create a CUDA resource descriptor. This is used to get and set attributes of CUDA resources.
//This one will tell CUDA how we want the bitmap_surface to be configured.
//Documentation for the struct: https://docs.nvidia.com/cuda/cuda-runtime-api/structcudaResourceDesc.html#structcudaResourceDesc
struct cudaResourceDesc resDesc;
//Clear it with 0s so that some flags aren't arbitrarily left at 1s
memset(&resDesc, 0, sizeof(resDesc));
//Set the resource type to be an array for convenient processing in the CUDA kernel.
//List of resTypes: https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__TYPES.html#group__CUDART__TYPES_1g067b774c0e639817a00a972c8e2c203c
resDesc.resType = cudaResourceTypeArray;
//Bind the new descriptor with the bitmap created earlier.
resDesc.res.array.array = bitmap_d;
//Create a new CUDA surface ID reference.
//This is really just an unsigned long long.
//Docuentation: https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__TYPES.html#group__CUDART__TYPES_1gbe57cf2ccbe7f9d696f18808dd634c0a
surfaceOut = 0;
//Create the surface with the given description. That surface ID is placed in bitmap_surface.
//Documentation: https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__SURFACE__OBJECT.html#group__CUDART__SURFACE__OBJECT_1g958899474ab2c5f40d233b524d6c5a01
cudaCreateSurfaceObject(&surfaceOut, &resDesc);
cudaCheckError();
}
void Processor::destroyEverything()
{
if (surfacesInitialized) {
//Input image CUDA surface
cudaDestroySurfaceObject(d_imageInputTexture);
cudaGraphicsUnmapResources(1, &d_imageInputGraphicsResource);
cudaGraphicsUnregisterResource(d_imageInputGraphicsResource);
d_imageInputTexture = 0;
//Output image CUDA surface
cudaDestroySurfaceObject(d_imageOutputTexture);
cudaGraphicsUnmapResources(1, &d_imageOutputGraphicsResource);
cudaGraphicsUnregisterResource(d_imageOutputGraphicsResource);
d_imageOutputTexture = 0;
//Input image GL texture
glDeleteTextures(1, &imageInputTexture);
imageInputTexture = 0;
//Output image GL texture
glDeleteTextures(1, &imageOutputTexture);
imageOutputTexture = 0;
surfacesInitialized = false;
}
}
/** A way to initialize OpenGL with GLFW and GLAD */
void initGL(int windowWidth, int windowHeight) {
// Setup window
if (!glfwInit())
return;
// Decide GL+GLSL versions
#if __APPLE__
// GL 3.2 + GLSL 150
const char* glsl_version = "#version 150";
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 2);
glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE); // 3.2+ only
glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GL_TRUE); // Required on Mac
#else
// GL 3.0 + GLSL 130
//const char* glsl_version = "#version 130";
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 0);
//glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE); // 3.2+ only
//glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GL_TRUE); // 3.0+ only
#endif
// Create window with graphics context
currentGLFWWindow = glfwCreateWindow(windowWidth, windowHeight, "Output image (OpenGL + GLFW)", NULL, NULL);
if (currentGLFWWindow == NULL)
return;
glfwMakeContextCurrent(currentGLFWWindow);
glfwSwapInterval(3); // Enable vsync
if (!gladLoadGL()) {
// GLAD failed
printf( "GLAD failed to initialize :(" );
return;
}
//Change GL settings
glViewport(0, 0, windowWidth, windowHeight); // use a screen size of WIDTH x HEIGHT
glMatrixMode(GL_PROJECTION); // Make a simple 2D projection on the entire window
glLoadIdentity();
glOrtho(0.0, windowWidth, windowHeight, 0.0, 0.0, 100.0);
glMatrixMode(GL_MODELVIEW); // Set the matrix mode to object modeling
glClearColor(0.0f, 0.0f, 0.0f, 0.0f);
glClearDepth(0.0f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); // Clear the window
}
/** Renders the textures on the GLFW window and requests GLFW to update */
void showTextures(GLuint top, GLuint bottom, int windowWidth, int windowHeight) {
// Clear color and depth buffers
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glMatrixMode(GL_MODELVIEW); // Operate on model-view matrix
glBindTexture(GL_TEXTURE_2D, top);
/* Draw top quad */
glEnable(GL_TEXTURE_2D);
glBegin(GL_QUADS);
glTexCoord2i(0, 0); glVertex2i(0, 0);
glTexCoord2i(0, 1); glVertex2i(0, windowHeight/2);
glTexCoord2i(1, 1); glVertex2i(windowWidth, windowHeight / 2);
glTexCoord2i(1, 0); glVertex2i(windowWidth, 0);
glEnd();
glDisable(GL_TEXTURE_2D);
/* Draw bottom quad */
glBindTexture(GL_TEXTURE_2D, bottom);
glEnable(GL_TEXTURE_2D);
glBegin(GL_QUADS);
glTexCoord2i(0, 0); glVertex2i(0, windowHeight / 2);
glTexCoord2i(0, 1); glVertex2i(0, windowHeight);
glTexCoord2i(1, 1); glVertex2i(windowWidth, windowHeight);
glTexCoord2i(1, 0); glVertex2i(windowWidth, windowHeight / 2);
glEnd();
glDisable(GL_TEXTURE_2D);
glfwSwapBuffers(currentGLFWWindow);
glfwPollEvents();
}
int main() {
using namespace cv;
using namespace std;
// initGL();
std::string filename = "./lena.pgm";
Mat image;
image = imread(filename, CV_LOAD_IMAGE_COLOR); // Read the file
if(! image.data ) // Check for invalid input
{
cout << "Could not open or find the image" << std::endl ;
return -1;
}
int windoww = 1280;
int windowh = 720;
initGL(windoww,windowh);
uint8_t *d_data;
cudaMalloc(&d_data, image.cols*image.rows*3);
Processor p;
for (int i = 0; i < image.cols; i++)
{
image.data[i*3+0] = 0;
image.data[i*3+1] = 0;
image.data[i*3+2] = 0;
//Process the image here
p.setInput(image.data, image.cols, image.rows);
p.processData(image.data, d_data);
showTextures(p.getInputTexture(), p.getOutputTexture(), windoww, windowh);
}
}
Notes:
The compilation command is given in the comment in the first line
I created a "video" of sorts using a single image. The "video" will show the image with a black or white line moving horizontally from left to right in the top pixel row of the image. The input image is lena.pgm which can be found in the CUDA samples (for example, at /usr/local/cuda-10.1/samples/3_Imaging/SobelFilter/data/lena.pgm).
It looks to me like you are "sharing" resources between OpenGL and CUDA. This doesn't look like the right map/unmap sequence to me, but it seems to be working, and it doesn't seem to be the focus of your question. I haven't spent any time investigating. I may have missed something.
I'm not suggesting this code is defect free or suitable for any particular purpose. It is mostly your code. I've modified it slightly to demonstrate some ideas described in the text.
There shouldn't be any visual difference in the output whether you compile with -DUSE_1 or not.
This is an useful feature that came across first in (https://www.3dgep.com/opengl-interoperability-with-cuda/), and I have improved upon it to use latest CUDA APIs and flow. You can refer to these 2 functions in cudammf.
https://github.com/prabindh/cudammf/blob/5f93358784fcbaae7eea0850424c59d2ed057dab/cuda_postproces.cu#L119
https://github.com/prabindh/cudammf/blob/5f93358784fcbaae7eea0850424c59d2ed057dab/decoder3.cpp#L507
Basic working is as below:
Create a regular GL texture (GLTextureId). Map it for CUDA access, via cudaGraphicsGLRegisterImage
Do some CUDA processing, and result is in a CUDA buffer
USe cudaMemcpyToArray to transfer between the above 2 device memories
If your output is coming from a Nvidia codec output, you should also refer to the AppDecGL sample in the Nvidia Video SDK (https://developer.nvidia.com/nvidia-video-codec-sdk).

Mapping depth to color in OpenGL

I have a code that map Depth value with Color camera image.
I used Realsense ZR300 to capture (x,y,z) information.
My difficulty now is I can't map depth to color information.
From nearest to furthest, the color change from one color to another color with different color information.
I made two subplot and like to plot Depth color in the first subplot and normal color image in the second subplot.
My code is as follow.
int main() try
{
// Create a context object. This object owns the handles to all connected realsense devices.
rs::context ctx;
printf("There are %d connected RealSense devices.\n", ctx.get_device_count());
if(ctx.get_device_count() == 0) return EXIT_FAILURE;
// This tutorial will access only a single device, but it is trivial to extend to multiple devices
rs::device * dev = ctx.get_device(0);
printf("\nUsing device 0, an %s\n", dev->get_name());
printf(" Serial number: %s\n", dev->get_serial());
printf(" Firmware version: %s\n", dev->get_firmware_version());
// Configure all streams to run at VGA resolution at 60 frames per second
dev->enable_stream(rs::stream::depth, 640, 480, rs::format::z16, 60);
dev->enable_stream(rs::stream::color, 640, 480, rs::format::rgb8, 60);
dev->start();
// Open a GLFW window to display our output
glfwInit();
GLFWwindow * win = glfwCreateWindow(1280, 480, "Depth & Color images", nullptr, nullptr);
glfwMakeContextCurrent(win);
while(!glfwWindowShouldClose(win))
{
// Wait for new frame data
glfwPollEvents();
dev->wait_for_frames();
glClear(GL_COLOR_BUFFER_BIT);
glPixelZoom(1, -1);
// Display depth data by linearly mapping depth between 0 and 2 meters to the red channel
glRasterPos2f(-1, 1);
rs::intrinsics depth_intrin = dev->get_stream_intrinsics(rs::stream::depth);
const uint16_t * depth_image = (const uint16_t *)dev->get_frame_data(rs::stream::depth);
float scale = dev->get_depth_scale();
glBegin(GL_POINTS);
for(int dy=0; dy<depth_intrin.height; ++dy)
{
for(int dx=0; dx<depth_intrin.width; ++dx)
{
uint16_t depth_value = depth_image[dy * depth_intrin.width + dx];
float depth_in_meters = depth_value * scale;
if(depth_value == 0) continue;
rs::float2 depth_pixel = {(float)dx, (float)dy};
rs::float3 depth_point = depth_intrin.deproject(depth_pixel, depth_in_meters);
//////////////////Here I need to plot Depth to Color////////////////////////
////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////
}
}
glEnd();
//glPixelTransferf(GL_RED_SCALE, 0xFFFF * dev->get_depth_scale() / 2.0f);
//glDrawPixels(640, 480, GL_RED, GL_UNSIGNED_SHORT, dev->get_frame_data(rs::stream::depth));
//glPixelTransferf(GL_RED_SCALE, 1.0f);
// Display color image as RGB triples
glRasterPos2f(0, 1);
glDrawPixels(640, 480, GL_RGB, GL_UNSIGNED_BYTE, dev->get_frame_data(rs::stream::color));
glfwSwapBuffers(win);
}
return EXIT_SUCCESS;
}
catch(const rs::error & e)
{
// Method calls against librealsense objects may throw exceptions of type rs::error
printf("rs::error was thrown when calling %s(%s):\n", e.get_failed_function().c_str(), e.get_failed_args().c_str());
printf(" %s\n", e.what());
return EXIT_FAILURE;
}
I did as follow and I have x,y,d information and can plot depth mapping to RED color upto 4 meters.
while(!glfwWindowShouldClose(win))
{
// Wait for new frame data
glfwPollEvents();
dev->wait_for_frames();
//FPS
auto t1 = std::chrono::high_resolution_clock::now();
time += std::chrono::duration<float>(t1-t0).count();
t0 = t1;
++frames;
if(time > 0.5f)
{
fps = frames / time;
frames = 0;
time = 0;
}
//
glClear(GL_COLOR_BUFFER_BIT);
glPixelZoom(1, -1);
// Display depth data by linearly mapping depth between 0 and 2 meters to the red channel
glRasterPos2f(-1, 1);
rs::intrinsics depth_intrin = dev->get_stream_intrinsics(rs::stream::depth);
const uint16_t * depth_image = (const uint16_t *)dev->get_frame_data(rs::stream::depth);
float scale = dev->get_depth_scale();
for(int dy=0; dy<depth_intrin.height; ++dy)
{
for(int dx=0; dx<depth_intrin.width; ++dx)
{
uint16_t depth_value = depth_image[dy * depth_intrin.width + dx];
float depth_in_meters = depth_value * scale;
}
}
glPixelTransferf(GL_RED_SCALE, 0xFFFF * dev->get_depth_scale() / 4.0f);
glDrawPixels(640, 480, GL_RED, GL_UNSIGNED_SHORT, dev->get_frame_data(rs::stream::depth));
glPixelTransferf(GL_RED_SCALE, 1.0f);
// glDrawPixels( 640, 480, GL_RGB, GL_UNSIGNED_INT, data );
// Display color image as RGB triples
glRasterPos2f(0, 1);
glDrawPixels(640, 480, GL_RGB, GL_UNSIGNED_BYTE, dev->get_frame_data(rs::stream::color));
//ss.str(""); ss << fps << " FPS";
//printf("FPS %s\n", ss.str().c_str());
draw_text(0, 0, ss.str().c_str());
glfwSwapBuffers(win);
}

Texture Mapping a square image onto a circle OpenGl

I am trying to map a square image of a clock face onto a circle GL_POLYGON that I have created. I am currently using the following code:
float angle, radian, x, y, xcos, ysin, tx, ty;
glEnable(GL_TEXTURE_2D);
glBindTexture(GL_TEXTURE_2D, an_face_texture1);
glBegin(GL_POLYGON);
for (angle=0.0; angle<360.0; angle+=2.0)
{
radian = angle * (pi/180.0f);
xcos = (float)cos(radian);
ysin = (float)sin(radian);
x = xcos * radius;
y = ysin * radius;
tx = (x/radius + 1)*0.5;
ty = (y/radius + 1)*0.5;
glTexCoord2f(tx, ty);
glVertex2f(x, y);
}
glEnd();
glDisable(GL_TEXTURE_2D);
However when I do it I end up with a weird overlapping image effect. As shown here: The original texture image is however the corners are cut out and it is png format. This way of generating the texture coordinates and is took from a previous answer: HERE
Below is the code used to load the image:
#ifndef PNGLOAD_H
#include <png.h>
#include <stdlib.h>
int png_load(const char* file_name,
int* width,
int* height,
char** image_data_ptr)
{
png_byte header[8];
FILE* fp = fopen(file_name, "rb");
if (fp == 0)
{
fprintf(stderr, "erro: could not open PNG file %s\n", file_name);
perror(file_name);
return 0;
}
// read the header
fread(header, 1, 8, fp);
if (png_sig_cmp(header, 0, 8))
{
fprintf(stderr, "error: %s is not a PNG.\n", file_name);
fclose(fp);
return 0;
}
png_structp png_ptr = png_create_read_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL);
if (!png_ptr)
{
fprintf(stderr, "error: png_create_read_struct returned 0.\n");
fclose(fp);
return 0;
}
// create png info struct
png_infop info_ptr = png_create_info_struct(png_ptr);
if (!info_ptr)
{
fprintf(stderr, "error: png_create_info_struct returned 0.\n");
png_destroy_read_struct(&png_ptr, (png_infopp)NULL, (png_infopp)NULL);
fclose(fp);
return 0;
}
// create png info struct
png_infop end_info = png_create_info_struct(png_ptr);
if (!end_info)
{
fprintf(stderr, "error: png_create_info_struct returned 0.\n");
png_destroy_read_struct(&png_ptr, &info_ptr, (png_infopp) NULL);
fclose(fp);
return 0;
}
// the code in this if statement gets called if libpng encounters an error
if (setjmp(png_jmpbuf(png_ptr))) {
fprintf(stderr, "error from libpng\n");
png_destroy_read_struct(&png_ptr, &info_ptr, &end_info);
fclose(fp);
return 0;
}
// init png reading
png_init_io(png_ptr, fp);
// let libpng know you already read the first 8 bytes
png_set_sig_bytes(png_ptr, 8);
// read all the info up to the image data
png_read_info(png_ptr, info_ptr);
// variables to pass to get info
int bit_depth, color_type;
png_uint_32 temp_width, temp_height;
// get info about png
png_get_IHDR(png_ptr, info_ptr, &temp_width, &temp_height, &bit_depth, &color_type,
NULL, NULL, NULL);
if (width) { *width = temp_width; }
if (height){ *height = temp_height; }
// Update the png info struct.
png_read_update_info(png_ptr, info_ptr);
// Row size in bytes.
int rowbytes = png_get_rowbytes(png_ptr, info_ptr);
// glTexImage2d requires rows to be 4-byte aligned
rowbytes += 3 - ((rowbytes-1) % 4);
// Allocate the image_data as a big block, to be given to opengl
png_byte* image_data;
image_data = (png_byte*)malloc(rowbytes * temp_height * sizeof(png_byte)+15);
if (image_data == NULL)
{
fprintf(stderr, "error: could not allocate memory for PNG image data\n");
png_destroy_read_struct(&png_ptr, &info_ptr, &end_info);
fclose(fp);
return 0;
}
// row_pointers is for pointing to image_data for reading the png with libpng
png_bytep* row_pointers = (png_bytep*)malloc(temp_height * sizeof(png_bytep));
if (row_pointers == NULL)
{
fprintf(stderr, "error: could not allocate memory for PNG row pointers\n");
png_destroy_read_struct(&png_ptr, &info_ptr, &end_info);
free(image_data);
fclose(fp);
return 0;
}
// set the individual row_pointers to point at the correct offsets of image_data
int i;
for (i = 0; i < temp_height; i++)
{
row_pointers[temp_height - 1 - i] = image_data + i * rowbytes;
}
// read the png into image_data through row_pointers
png_read_image(png_ptr, row_pointers);
// clean up
png_destroy_read_struct(&png_ptr, &info_ptr, &end_info);
//free(image_data);
*image_data_ptr = (char*)image_data; // return data pointer
free(row_pointers);
fclose(fp);
fprintf(stderr, "\t texture image size is %d x %d\n", *width, *height);
return 1;
}
#endif
and:
unsigned int load_and_bind_texture(const char* filename)
{
char* image_buffer = NULL; // the image data
int width = 0;
int height = 0;
// read in the PNG image data into image_buffer
if (png_load(filename, &width, &height, &image_buffer)==0)
{
fprintf(stderr, "Failed to read image texture from %s\n", filename);
exit(1);
}
unsigned int tex_handle = 0;
// request one texture handle
glGenTextures(1, &tex_handle);
// create a new texture object and bind it to tex_handle
glBindTexture(GL_TEXTURE_2D, tex_handle);
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_MODULATE);
glTexImage2D(GL_TEXTURE_2D, 0,
GL_RGB, width, height, 0,
GL_RGB, GL_UNSIGNED_BYTE, image_buffer);
free(image_buffer); // free the image buffer memory
return tex_handle;
}
these are then called from the init() method:
background_texture = load_and_bind_texture("images/office-wall.png");
an_face_texture1 = load_and_bind_texture("images/clock.png");
the image is loaded in the same way the background is loaded.
Yes, and that is almost certainly the problem. While both images are PNGs, they are almost certainly not the same format.
Let's actually debug what you see in the loaded texture. You see 2 overlapping with 10. 3 overlapped with 9. 8 overlapped with 4. All interlaced with each other. And this pattern repeats 3 times.
It's as if you took the original image, folded it over itself vertically, and then repeated it. 3 times.
The repetition of "3" in this strongly suggests a mismatch between what libPNG actually read and what you told OpenGL the texel data actually was. You told OpenGL that the texture was in the RGB format, 3 bytes per pixel.
But not every PNG is formatted that way. Some PNGs are greyscale; one byte per pixel. And because you used the low-level libPNG reading interface, you read the exact format of the pixel data from the PNG. Yes, it decompresses it. But you're reading exactly what the PNG stored conceptually.
So if the PNG is a greyscale PNG, your call to png_read_image can read data that isn't 3-bytes per pixel. But you told OpenGL that the data was 3 bytes per pixel. So if the libPNG wrote 1 byte per pixel, you will be giving OpenGL the wrong texel data.
That's bad.
If you're going to use libPNG's low-level reading routines, then you must actually check the format of the PNG being read and adjust your OpenGL code to match.
It would be much easier to use the higher-level reading routines and explicitly telling it to translate grayscale to RGB.

Saving the openGL context as a video output

I am currently trying to save the animation made in openGL to a video file. I have tried using openCV's videowriter but to no advantage. I have successfully been able to generate a snapshot and save it as bmp using the SDL library. If I save all snapshots and then generate the video using ffmpeg, that is like collecting 4 GB worth of images. Not practical.
How can I write video frames directly during rendering?
Here the code i use to take snapshots when I require:
void snapshot(){
SDL_Surface* snap = SDL_CreateRGBSurface(SDL_SWSURFACE,WIDTH,HEIGHT,24, 0x000000FF, 0x0000FF00, 0x00FF0000, 0);
char * pixels = new char [3 *WIDTH * HEIGHT];
glReadPixels(0, 0,WIDTH, HEIGHT, GL_RGB, GL_UNSIGNED_BYTE, pixels);
for (int i = 0 ; i <HEIGHT ; i++)
std::memcpy( ((char *) snap->pixels) + snap->pitch * i, pixels + 3 * WIDTH * (HEIGHT-i - 1), WIDTH*3 );
delete [] pixels;
SDL_SaveBMP(snap, "snapshot.bmp");
SDL_FreeSurface(snap);
}
I need the video output. I have discovered that ffmpeg can be used to create videos from C++ code but have not been able to figure out the process. Please help!
EDIT : I have tried using openCV CvVideoWriter class but the program crashes ("segmentation fault") the moment it is declared.Compilation shows no errors ofcourse. Any suggestions to that?
SOLUTION FOR PYTHON USERS (Requires Python2.7,python-imaging,python-opengl,python-opencv, codecs of format you want to write to, I am on Ubuntu 14.04 64-bit):
def snap():
pixels=[]
screenshot = glReadPixels(0,0,W,H,GL_RGBA,GL_UNSIGNED_BYTE)
snapshot = Image.frombuffer("RGBA",W,H),screenshot,"raw","RGBA",0,0)
snapshot.save(os.path.dirname(videoPath) + "/temp.jpg")
load = cv2.cv.LoadImage(os.path.dirname(videoPath) + "/temp.jpg")
cv2.cv.WriteFrame(videoWriter,load)
Here W and H are the window dimensions (width,height). What is happening is I am using PIL to convert the raw pixels read from the glReadPixels command into a JPEG image. I am loading that JPEG into the openCV image and writing to the videowriter. I was having certain issues by directly using the PIL image into the videowriter (which would save millions of clock cycles of I/O), but right now I am not working on that. Image is a PIL module cv2 is a python-opencv module.
It sounds as though you are using the command line utility: ffmpeg. Rather than using the command-line to encode video from a collection of still images, you should use libavcodec and libavformat. These are the libraries upon which ffmpeg is actually built, and will allow you to encode video and store it in a standard stream/interchange format (e.g. RIFF/AVI) without using a separate program.
You probably will not find a lot of tutorials on implementing this because it has traditionally been the case that people wanted to use ffmpeg to go the other way; that is, decode various video formats for display in OpenGL. I think this is going to change very soon with the introduction of gameplay video encoding to the PS4 and Xbox One consoles, suddenly demand for this functionality will skyrocket.
The general process is this, however:
Pick a container format and CODEC
Often one will decide the other, (e.g. MPEG-2 + MPEG Program Stream)
Start filling a buffer with your still frames
Periodically encode your buffer of still frames and write to your output (packet writing in MPEG terms)
You will do this either when the buffer becomes full, or every n-many ms; you might prefer one over the other depending on whether you want to stream your video live or not.
When your program terminates flush the buffer and close your stream
One nice thing about this is you do not actually need to write to a file. Since you are periodically encoding packets of data from your buffer of still frames, you can stream your encoded video over a network if you want - this is why codec and container (interchange) format are separate.
Another nice thing is you do not have to synchronize the CPU and GPU, you can setup a pixel buffer object and have OpenGL copy data into CPU memory a couple of frames behind the GPU. This makes real-time encoding of video much less demanding, you only have to encode and flush the video to disk or over the network periodically if video latency demands are not unreasonable. This works very well in real-time rendering, since you have a large enough pool of data to keep a CPU thread busy encoding at all times.
Encoding frames can even be done in real-time on the GPU provided enough storage for a large buffer of frames (since ultimately the encoded data has to be copied from GPU to CPU and you want to do this as infrequently as possible). Obviously this is not done using ffmpeg, there are specialized libraries using CUDA / OpenCL / compute shaders for this purpose. I have never used them, but they do exist.
For portability sake, you should stick with libavcodec and Pixel Buffer Objects for asynchronous GPU->CPU copy. CPUs these days have enough cores that you can probably get away without GPU-assisted encoding if you buffer enough frames and encode in multiple simultaneous threads (this creates added synchronization overhead and increased latency when outputting encoded video) or simply drop frames / lower resolution (poor man's solution).
There are a lot of concepts covered here that go well beyond the scope of SDL, but you did ask how to do this with better performance than your current solution. In short, use OpenGL Pixel Buffer Objects to transfer data, and libavcodec for encoding. An example application that encodes video can be found on the ffmpeg libavcodec examples page.
For some fast test something like the code below work (tested), resizable windows are unhandled.
#include <stdio.h>
FILE *avconv = NULL;
...
/* initialize */
avconv = popen("avconv -y -f rawvideo -s 800x600 -pix_fmt rgb24 -r 25 -i - -vf vflip -an -b:v 1000k test.mp4", "w");
...
/* save */
glReadPixels(0, 0, 800, 600, GL_RGB, GL_UNSIGNED_BYTE, pixels);
if (avconv)
fwrite(pixels ,800*600*3 , 1, avconv);
...
/* term */
if (avconv)
pclose(avconv);
Runnable mpg example with FFmpeg 2.7
Explanation and a superset example at: How to use GLUT/OpenGL to render to a file?
Consider https://github.com/FFmpeg/FFmpeg/blob/n3.0/doc/examples/muxing.c to generate a contained format.
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#define GL_GLEXT_PROTOTYPES 1
#include <GL/gl.h>
#include <GL/glu.h>
#include <GL/glut.h>
#include <GL/glext.h>
#include <libavcodec/avcodec.h>
#include <libavutil/imgutils.h>
#include <libavutil/opt.h>
#include <libswscale/swscale.h>
enum Constants { SCREENSHOT_MAX_FILENAME = 256 };
static GLubyte *pixels = NULL;
static GLuint fbo;
static GLuint rbo_color;
static GLuint rbo_depth;
static const unsigned int HEIGHT = 100;
static const unsigned int WIDTH = 100;
static int offscreen = 1;
static unsigned int max_nframes = 100;
static unsigned int nframes = 0;
static unsigned int time0;
/* Model. */
static double angle;
static double delta_angle;
/* Adapted from: https://github.com/cirosantilli/cpp-cheat/blob/19044698f91fefa9cb75328c44f7a487d336b541/ffmpeg/encode.c */
static AVCodecContext *c = NULL;
static AVFrame *frame;
static AVPacket pkt;
static FILE *file;
static struct SwsContext *sws_context = NULL;
static uint8_t *rgb = NULL;
static void ffmpeg_encoder_set_frame_yuv_from_rgb(uint8_t *rgb) {
const int in_linesize[1] = { 4 * c->width };
sws_context = sws_getCachedContext(sws_context,
c->width, c->height, AV_PIX_FMT_RGB32,
c->width, c->height, AV_PIX_FMT_YUV420P,
0, NULL, NULL, NULL);
sws_scale(sws_context, (const uint8_t * const *)&rgb, in_linesize, 0,
c->height, frame->data, frame->linesize);
}
void ffmpeg_encoder_start(const char *filename, int codec_id, int fps, int width, int height) {
AVCodec *codec;
int ret;
avcodec_register_all();
codec = avcodec_find_encoder(codec_id);
if (!codec) {
fprintf(stderr, "Codec not found\n");
exit(1);
}
c = avcodec_alloc_context3(codec);
if (!c) {
fprintf(stderr, "Could not allocate video codec context\n");
exit(1);
}
c->bit_rate = 400000;
c->width = width;
c->height = height;
c->time_base.num = 1;
c->time_base.den = fps;
c->gop_size = 10;
c->max_b_frames = 1;
c->pix_fmt = AV_PIX_FMT_YUV420P;
if (codec_id == AV_CODEC_ID_H264)
av_opt_set(c->priv_data, "preset", "slow", 0);
if (avcodec_open2(c, codec, NULL) < 0) {
fprintf(stderr, "Could not open codec\n");
exit(1);
}
file = fopen(filename, "wb");
if (!file) {
fprintf(stderr, "Could not open %s\n", filename);
exit(1);
}
frame = av_frame_alloc();
if (!frame) {
fprintf(stderr, "Could not allocate video frame\n");
exit(1);
}
frame->format = c->pix_fmt;
frame->width = c->width;
frame->height = c->height;
ret = av_image_alloc(frame->data, frame->linesize, c->width, c->height, c->pix_fmt, 32);
if (ret < 0) {
fprintf(stderr, "Could not allocate raw picture buffer\n");
exit(1);
}
}
void ffmpeg_encoder_finish(void) {
uint8_t endcode[] = { 0, 0, 1, 0xb7 };
int got_output, ret;
do {
fflush(stdout);
ret = avcodec_encode_video2(c, &pkt, NULL, &got_output);
if (ret < 0) {
fprintf(stderr, "Error encoding frame\n");
exit(1);
}
if (got_output) {
fwrite(pkt.data, 1, pkt.size, file);
av_packet_unref(&pkt);
}
} while (got_output);
fwrite(endcode, 1, sizeof(endcode), file);
fclose(file);
avcodec_close(c);
av_free(c);
av_freep(&frame->data[0]);
av_frame_free(&frame);
}
void ffmpeg_encoder_encode_frame(uint8_t *rgb) {
int ret, got_output;
ffmpeg_encoder_set_frame_yuv_from_rgb(rgb);
av_init_packet(&pkt);
pkt.data = NULL;
pkt.size = 0;
ret = avcodec_encode_video2(c, &pkt, frame, &got_output);
if (ret < 0) {
fprintf(stderr, "Error encoding frame\n");
exit(1);
}
if (got_output) {
fwrite(pkt.data, 1, pkt.size, file);
av_packet_unref(&pkt);
}
}
void ffmpeg_encoder_glread_rgb(uint8_t **rgb, GLubyte **pixels, unsigned int width, unsigned int height) {
size_t i, j, k, cur_gl, cur_rgb, nvals;
const size_t format_nchannels = 4;
nvals = format_nchannels * width * height;
*pixels = realloc(*pixels, nvals * sizeof(GLubyte));
*rgb = realloc(*rgb, nvals * sizeof(uint8_t));
/* Get RGBA to align to 32 bits instead of just 24 for RGB. May be faster for FFmpeg. */
glReadPixels(0, 0, width, height, GL_RGBA, GL_UNSIGNED_BYTE, *pixels);
for (i = 0; i < height; i++) {
for (j = 0; j < width; j++) {
cur_gl = format_nchannels * (width * (height - i - 1) + j);
cur_rgb = format_nchannels * (width * i + j);
for (k = 0; k < format_nchannels; k++)
(*rgb)[cur_rgb + k] = (*pixels)[cur_gl + k];
}
}
}
static int model_init(void) {
angle = 0;
delta_angle = 1;
}
static int model_update(void) {
angle += delta_angle;
return 0;
}
static int model_finished(void) {
return nframes >= max_nframes;
}
static void init(void) {
int glget;
if (offscreen) {
/* Framebuffer */
glGenFramebuffers(1, &fbo);
glBindFramebuffer(GL_FRAMEBUFFER, fbo);
/* Color renderbuffer. */
glGenRenderbuffers(1, &rbo_color);
glBindRenderbuffer(GL_RENDERBUFFER, rbo_color);
/* Storage must be one of: */
/* GL_RGBA4, GL_RGB565, GL_RGB5_A1, GL_DEPTH_COMPONENT16, GL_STENCIL_INDEX8. */
glRenderbufferStorage(GL_RENDERBUFFER, GL_RGB565, WIDTH, HEIGHT);
glFramebufferRenderbuffer(GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_RENDERBUFFER, rbo_color);
/* Depth renderbuffer. */
glGenRenderbuffers(1, &rbo_depth);
glBindRenderbuffer(GL_RENDERBUFFER, rbo_depth);
glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH_COMPONENT16, WIDTH, HEIGHT);
glFramebufferRenderbuffer(GL_DRAW_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_RENDERBUFFER, rbo_depth);
glReadBuffer(GL_COLOR_ATTACHMENT0);
/* Sanity check. */
assert(glCheckFramebufferStatus(GL_FRAMEBUFFER));
glGetIntegerv(GL_MAX_RENDERBUFFER_SIZE, &glget);
assert(WIDTH * HEIGHT < (unsigned int)glget);
} else {
glReadBuffer(GL_BACK);
}
glClearColor(0.0, 0.0, 0.0, 0.0);
glEnable(GL_DEPTH_TEST);
glPixelStorei(GL_PACK_ALIGNMENT, 1);
glViewport(0, 0, WIDTH, HEIGHT);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glMatrixMode(GL_MODELVIEW);
time0 = glutGet(GLUT_ELAPSED_TIME);
model_init();
ffmpeg_encoder_start("tmp.mpg", AV_CODEC_ID_MPEG1VIDEO, 25, WIDTH, HEIGHT);
}
static void deinit(void) {
printf("FPS = %f\n", 1000.0 * nframes / (double)(glutGet(GLUT_ELAPSED_TIME) - time0));
free(pixels);
ffmpeg_encoder_finish();
free(rgb);
if (offscreen) {
glDeleteFramebuffers(1, &fbo);
glDeleteRenderbuffers(1, &rbo_color);
glDeleteRenderbuffers(1, &rbo_depth);
}
}
static void draw_scene(void) {
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glLoadIdentity();
glRotatef(angle, 0.0f, 0.0f, -1.0f);
glBegin(GL_TRIANGLES);
glColor3f(1.0f, 0.0f, 0.0f);
glVertex3f( 0.0f, 0.5f, 0.0f);
glColor3f(0.0f, 1.0f, 0.0f);
glVertex3f(-0.5f, -0.5f, 0.0f);
glColor3f(0.0f, 0.0f, 1.0f);
glVertex3f( 0.5f, -0.5f, 0.0f);
glEnd();
}
static void display(void) {
char extension[SCREENSHOT_MAX_FILENAME];
char filename[SCREENSHOT_MAX_FILENAME];
draw_scene();
if (offscreen) {
glFlush();
} else {
glutSwapBuffers();
}
frame->pts = nframes;
ffmpeg_encoder_glread_rgb(&rgb, &pixels, WIDTH, HEIGHT);
ffmpeg_encoder_encode_frame(rgb);
nframes++;
if (model_finished())
exit(EXIT_SUCCESS);
}
static void idle(void) {
while (model_update());
glutPostRedisplay();
}
int main(int argc, char **argv) {
GLint glut_display;
glutInit(&argc, argv);
if (argc > 1)
offscreen = 0;
if (offscreen) {
/* TODO: if we use anything smaller than the window, it only renders a smaller version of things. */
/*glutInitWindowSize(50, 50);*/
glutInitWindowSize(WIDTH, HEIGHT);
glut_display = GLUT_SINGLE;
} else {
glutInitWindowSize(WIDTH, HEIGHT);
glutInitWindowPosition(100, 100);
glut_display = GLUT_DOUBLE;
}
glutInitDisplayMode(glut_display | GLUT_RGBA | GLUT_DEPTH);
glutCreateWindow(argv[0]);
if (offscreen) {
/* TODO: if we hide the window the program blocks. */
/*glutHideWindow();*/
}
init();
glutDisplayFunc(display);
glutIdleFunc(idle);
atexit(deinit);
glutMainLoop();
return EXIT_SUCCESS;
}
I solved the writing of a video file in Python from Python OpenGL the following way:
In the main section, setup the video file to write to:
#Set up video:
width=640
height=480
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
#Open video output file:
out = cv2.VideoWriter('videoout.mp4',fourcc, 20.0, (width,height))
And in the DisplayFunction:
#Read frame:
screenshot = glReadPixels(0,0,width,height,GL_RGB,GL_UNSIGNED_BYTE)
#Convert from binary to cv2 numpy array:
snapshot = Image.frombuffer("RGB",(width,height),screenshot,"raw","RGB",0,0)
snapshot= np.array(snapshot)
snapshot=cv2.flip(snapshot,0)
#write frame to video file:
out.write(snapshot)
if (...): #End movie
glutLeaveMainLoop()
out.release()
print("Exit")
This writes to "videoout.mp4". Observe that it needs the "out.release()" in the end to get a proper mp4 file.