Texture Mapping a square image onto a circle OpenGl - opengl

I am trying to map a square image of a clock face onto a circle GL_POLYGON that I have created. I am currently using the following code:
float angle, radian, x, y, xcos, ysin, tx, ty;
glEnable(GL_TEXTURE_2D);
glBindTexture(GL_TEXTURE_2D, an_face_texture1);
glBegin(GL_POLYGON);
for (angle=0.0; angle<360.0; angle+=2.0)
{
radian = angle * (pi/180.0f);
xcos = (float)cos(radian);
ysin = (float)sin(radian);
x = xcos * radius;
y = ysin * radius;
tx = (x/radius + 1)*0.5;
ty = (y/radius + 1)*0.5;
glTexCoord2f(tx, ty);
glVertex2f(x, y);
}
glEnd();
glDisable(GL_TEXTURE_2D);
However when I do it I end up with a weird overlapping image effect. As shown here: The original texture image is however the corners are cut out and it is png format. This way of generating the texture coordinates and is took from a previous answer: HERE
Below is the code used to load the image:
#ifndef PNGLOAD_H
#include <png.h>
#include <stdlib.h>
int png_load(const char* file_name,
int* width,
int* height,
char** image_data_ptr)
{
png_byte header[8];
FILE* fp = fopen(file_name, "rb");
if (fp == 0)
{
fprintf(stderr, "erro: could not open PNG file %s\n", file_name);
perror(file_name);
return 0;
}
// read the header
fread(header, 1, 8, fp);
if (png_sig_cmp(header, 0, 8))
{
fprintf(stderr, "error: %s is not a PNG.\n", file_name);
fclose(fp);
return 0;
}
png_structp png_ptr = png_create_read_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL);
if (!png_ptr)
{
fprintf(stderr, "error: png_create_read_struct returned 0.\n");
fclose(fp);
return 0;
}
// create png info struct
png_infop info_ptr = png_create_info_struct(png_ptr);
if (!info_ptr)
{
fprintf(stderr, "error: png_create_info_struct returned 0.\n");
png_destroy_read_struct(&png_ptr, (png_infopp)NULL, (png_infopp)NULL);
fclose(fp);
return 0;
}
// create png info struct
png_infop end_info = png_create_info_struct(png_ptr);
if (!end_info)
{
fprintf(stderr, "error: png_create_info_struct returned 0.\n");
png_destroy_read_struct(&png_ptr, &info_ptr, (png_infopp) NULL);
fclose(fp);
return 0;
}
// the code in this if statement gets called if libpng encounters an error
if (setjmp(png_jmpbuf(png_ptr))) {
fprintf(stderr, "error from libpng\n");
png_destroy_read_struct(&png_ptr, &info_ptr, &end_info);
fclose(fp);
return 0;
}
// init png reading
png_init_io(png_ptr, fp);
// let libpng know you already read the first 8 bytes
png_set_sig_bytes(png_ptr, 8);
// read all the info up to the image data
png_read_info(png_ptr, info_ptr);
// variables to pass to get info
int bit_depth, color_type;
png_uint_32 temp_width, temp_height;
// get info about png
png_get_IHDR(png_ptr, info_ptr, &temp_width, &temp_height, &bit_depth, &color_type,
NULL, NULL, NULL);
if (width) { *width = temp_width; }
if (height){ *height = temp_height; }
// Update the png info struct.
png_read_update_info(png_ptr, info_ptr);
// Row size in bytes.
int rowbytes = png_get_rowbytes(png_ptr, info_ptr);
// glTexImage2d requires rows to be 4-byte aligned
rowbytes += 3 - ((rowbytes-1) % 4);
// Allocate the image_data as a big block, to be given to opengl
png_byte* image_data;
image_data = (png_byte*)malloc(rowbytes * temp_height * sizeof(png_byte)+15);
if (image_data == NULL)
{
fprintf(stderr, "error: could not allocate memory for PNG image data\n");
png_destroy_read_struct(&png_ptr, &info_ptr, &end_info);
fclose(fp);
return 0;
}
// row_pointers is for pointing to image_data for reading the png with libpng
png_bytep* row_pointers = (png_bytep*)malloc(temp_height * sizeof(png_bytep));
if (row_pointers == NULL)
{
fprintf(stderr, "error: could not allocate memory for PNG row pointers\n");
png_destroy_read_struct(&png_ptr, &info_ptr, &end_info);
free(image_data);
fclose(fp);
return 0;
}
// set the individual row_pointers to point at the correct offsets of image_data
int i;
for (i = 0; i < temp_height; i++)
{
row_pointers[temp_height - 1 - i] = image_data + i * rowbytes;
}
// read the png into image_data through row_pointers
png_read_image(png_ptr, row_pointers);
// clean up
png_destroy_read_struct(&png_ptr, &info_ptr, &end_info);
//free(image_data);
*image_data_ptr = (char*)image_data; // return data pointer
free(row_pointers);
fclose(fp);
fprintf(stderr, "\t texture image size is %d x %d\n", *width, *height);
return 1;
}
#endif
and:
unsigned int load_and_bind_texture(const char* filename)
{
char* image_buffer = NULL; // the image data
int width = 0;
int height = 0;
// read in the PNG image data into image_buffer
if (png_load(filename, &width, &height, &image_buffer)==0)
{
fprintf(stderr, "Failed to read image texture from %s\n", filename);
exit(1);
}
unsigned int tex_handle = 0;
// request one texture handle
glGenTextures(1, &tex_handle);
// create a new texture object and bind it to tex_handle
glBindTexture(GL_TEXTURE_2D, tex_handle);
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_MODULATE);
glTexImage2D(GL_TEXTURE_2D, 0,
GL_RGB, width, height, 0,
GL_RGB, GL_UNSIGNED_BYTE, image_buffer);
free(image_buffer); // free the image buffer memory
return tex_handle;
}
these are then called from the init() method:
background_texture = load_and_bind_texture("images/office-wall.png");
an_face_texture1 = load_and_bind_texture("images/clock.png");

the image is loaded in the same way the background is loaded.
Yes, and that is almost certainly the problem. While both images are PNGs, they are almost certainly not the same format.
Let's actually debug what you see in the loaded texture. You see 2 overlapping with 10. 3 overlapped with 9. 8 overlapped with 4. All interlaced with each other. And this pattern repeats 3 times.
It's as if you took the original image, folded it over itself vertically, and then repeated it. 3 times.
The repetition of "3" in this strongly suggests a mismatch between what libPNG actually read and what you told OpenGL the texel data actually was. You told OpenGL that the texture was in the RGB format, 3 bytes per pixel.
But not every PNG is formatted that way. Some PNGs are greyscale; one byte per pixel. And because you used the low-level libPNG reading interface, you read the exact format of the pixel data from the PNG. Yes, it decompresses it. But you're reading exactly what the PNG stored conceptually.
So if the PNG is a greyscale PNG, your call to png_read_image can read data that isn't 3-bytes per pixel. But you told OpenGL that the data was 3 bytes per pixel. So if the libPNG wrote 1 byte per pixel, you will be giving OpenGL the wrong texel data.
That's bad.
If you're going to use libPNG's low-level reading routines, then you must actually check the format of the PNG being read and adjust your OpenGL code to match.
It would be much easier to use the higher-level reading routines and explicitly telling it to translate grayscale to RGB.

Related

Save image data to sqlite

I have a function which loads an image from file and successfully creates an opengl texture from it.
/**
* #brief Loads a texture from file and generates an OpenGL texture from it.
*
* #param filename Path to the image file.
* #param out_texture Texture id the results are bound to.
* #param out_width Value pointer the resulting image width is written to.
* #param out_height Value pointer the resulting image height is written to.
* #param flip_image Stb indicator for flipping the image.
* #return true Image has been successfully loaded.
* #return false Failed loading the image.
*/
bool LoadTextureFromFile(const char *filename, GLuint *out_texture, int *out_width, int *out_height, bool flip_image = false)
{
// Load from file
int image_width = 0;
int image_height = 0;
stbi_set_flip_vertically_on_load(flip_image);
unsigned char *image_data = stbi_load(filename, &image_width, &image_height, NULL, 4);
if (image_data == NULL)
{
std::cout << "ERROR::Tools::GLHelper::LoadTextureFromFile - Failed to load image from file '" << filename << "'." << std::endl;
stbi_image_free(image_data);
return false;
}
// Create a OpenGL texture identifier
GLuint image_texture;
glGenTextures(1, &image_texture);
glBindTexture(GL_TEXTURE_2D, image_texture);
// Set texture wrapping parameters
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
// Set texture filtering parameters
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glPixelStorei(GL_UNPACK_ROW_LENGTH, 0);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, image_width, image_height, 0, GL_RGBA, GL_UNSIGNED_BYTE, image_data);
glGenerateMipmap(GL_TEXTURE_2D);
*out_texture = image_texture;
*out_width = image_width;
*out_height = image_height;
stbi_image_free(image_data);
return true;
}
What I am trying to do is to load an image via stbi_load like above and save it as BLOB to sqlite. Afterwards I want to be able to load the very same blob and create an opengl texture from it in a separate function.
In the first step I created a function which only loads the image:
unsigned char *ImageDataFromFile(const char *filename)
{
int image_width = 0;
int image_height = 0;
unsigned char *image_data = stbi_load(filename, &image_width, &image_height, NULL, 4);
if (image_data == NULL)
{
std::cout << "ERROR::Tools::GLHelper::LoadTextureFromFile - Failed to load image from file '" << filename << "'." << std::endl;
stbi_image_free(image_data);
}
return image_data;
}
In the next step I want to store this data into my sqlite database:
void DBConnector::AddImage(std::string name, unsigned char *data)
{
sqlite3_stmt *stmt;
int err = sqlite3_prepare_v2(db, "INSERT INTO images (name, img_data) VALUES (?, ?)", -1, &stmt, NULL);
if (err != SQLITE_OK)
{
std::cout << "ERROR::DATA::DBConnector - Failed to prepare sqlite3 statement: \n"
<< sqlite3_errmsg(db) << std::endl;
}
sqlite3_bind_text(stmt, 1, name.c_str(), -1, SQLITE_TRANSIENT);
sqlite3_bind_blob(stmt, 2, data, -1, SQLITE_TRANSIENT);
sqlite3_step(stmt);
sqlite3_finalize(stmt);
return;
}
Finally I connect the pieces:
unsigned char *image_data = Tools::FileHelper::ImageDataFromFile(selected_filepath.c_str());
db->AddImage("Foo", image_data);
What happens is that seemingly arbitrary data ends up in the database, which is definetly not image data. Sometimes the entries are just empty.
I suspect that I am handling the return type of stbi_load incorrectly, forcing random memory data into the database. Extract from stbi documentation:
The return value from an image loader is an 'unsigned char *' which points to the pixel data..
As I understand it I am simply passing the array pointer to sqlite3_bind_blob which accepts const void * just like glTexImage2D does. So why is it working for the one but not for the other? Or could the error source be somewhere else?
Edit
I also tried something else. Normally I pass -1 for size when calling i.e. sqlite3_bind_text, because the call will then automatically search for a null terminator. So I thought that I might have to pass the correct size in bytes when calling sqlite3_bind_blob because there might be no terminator there. So for an image with the size of 225 x 225 with 3 channels, I passed 225 * 225 * 3 as size parameter. Unfortunately this did not work either.

Display FFMPEG decoded frame in a GLFW window

I am implementing the client program of a game where the server sends encoded frames of the game to the client (via UDP), while the client decodes them (via FFMPEG) and displays them in a GLFW window.
My program has two threads:
Thread 1: renders the content of the uint8_t* variable dataToRender
Thread 2: keeps obtaining frames from the server, decodes them and updates dataToRender accordingly
Thread 1 does the typical rendering of a GLFW window in a while-loop. I have already tried to display some dummy frame data (a completely red frame) and it worked:
while (!glfwWindowShouldClose(window)) {
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
...
glBindTexture(GL_TEXTURE_2D, tex_handle);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, window_width, window_height, 0, GL_RGB, GL_UNSIGNED_BYTE, dataToRender);
...
glfwSwapBuffers(window);
}
Thread 2 is where I am having trouble. I am unable to properly store the decoded frame into my dataToRender variable. On top if it, the frame data is originally in YUV format and needs to be converted to RGB. I use FFMPEG's sws_scale for that, which also gives me a bad dst image pointers error output in the console. Here's the code snippet responsible for that part:
size_t data_size = frameBuffer.size(); // frameBuffer is a std::vector where I accumulate the frame data chunks
uint8_t* data = frameBuffer.data(); // convert the vector to a pointer
picture->format = AV_PIX_FMT_RGB24;
av_frame_get_buffer(picture, 1);
while (data_size > 0) {
int ret = av_parser_parse2(parser, c, &pkt->data, &pkt->size,
data, data_size, AV_NOPTS_VALUE, AV_NOPTS_VALUE, 0);
if (ret < 0) {
fprintf(stderr, "Error while parsing\n");
exit(1);
}
data += ret;
data_size -= ret;
if (pkt->size) {
swsContext = sws_getContext(
c->width, c->height,
AV_PIX_FMT_YUV420P, c->width, c->height,
AV_PIX_FMT_RGB24, SWS_BILINEAR, NULL, NULL, NULL
);
uint8_t* rgb24[1] = { data };
int rgb24_stride[1] = { 3 * c->width };
sws_scale(swsContext, rgb24, rgb24_stride, 0, c->height, picture->data, picture->linesize);
decode(c, picture, pkt, outname);
// TODO: copy content of picture->data[0] to "dataToRender" maybe?
}
}
I have already tried doing another sws_scale to copy the content to dataToRender and I cannot get rid of the bad dst image pointers error. Any advice or solution to the problem would be greatly appreciated as I have been stuck for days on this.
I think you should convert YUV to RGB using OpenGL. That is much high efficiency and simple. The fragment shader looks like below:
precision mediump float;
varying vec2 v_texPo;
uniform sampler2D sampler_y;
uniform sampler2D sampler_u;
uniform sampler2D sampler_v;
void main() {
float y, u, v;
vec3 rgb;
y = texture2D(sampler_y, v_texPo).r;
u = texture2D(sampler_u, v_texPo).r - 0.5;
v = texture2D(sampler_v, v_texPo).r - 0.5;
rgb.r = y + 1.403 * v;
rgb.g = y - 0.344 * u - 0.714 * v;
rgb.b = y + 1.770 * u;
gl_FragColor = vec4(rgb, 1);
}
And you should upload three texture to OpenGL.

OpenGL transparency doing weird things

I am trying to render a texture with an alpha channel in it.
This is what I used for texture loading:
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_BGRA, GL_UNSIGNED_BYTE, data);
I enabled GL_BLEND just before I render the texture: glEnable(GL_BLEND);
I also did this at the beginning of the code(the initialization): glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
This is the result(It should be a transparent texture of a first person hand):
But when I load my texture like this(no alpha channel):
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, width, height, 0, GL_BGR, GL_UNSIGNED_BYTE, data);
This is the result:
Does anyone know what can cause this, or do I have to give more code?
Sorry for bad English, thanks in advance.
EDIT:
My texture loading code:
GLuint Texture::loadTexture(const char * imagepath) {
printf("Reading image %s\n", imagepath);
// Data read from the header of the BMP file
unsigned char header[54];
unsigned int dataPos;
unsigned int imageSize;
unsigned int width, height;
// Actual RGB data
unsigned char * data;
// Open the file
FILE * file = fopen(imagepath, "rb");
if (!file) { printf("%s could not be opened. \n", imagepath); getchar(); exit(0); }
// Read the header, i.e. the 54 first bytes
// If less than 54 bytes are read, problem
if (fread(header, 1, 54, file) != 54) {
printf("Not a correct BMP file\n");
exit(0);
}
// A BMP files always begins with "BM"
if (header[0] != 'B' || header[1] != 'M') {
printf("Not a correct BMP file\n");
exit(0);
}
// Make sure this is a 24bpp file
if (*(int*)&(header[0x1E]) != 0) { printf("Not a correct BMP file\n");}
if (*(int*)&(header[0x1C]) != 24) { printf("Not a correct BMP file\n");}
// Read the information about the image
dataPos = *(int*)&(header[0x0A]);
imageSize = *(int*)&(header[0x22]);
width = *(int*)&(header[0x12]);
height = *(int*)&(header[0x16]);
// Some BMP files are misformatted, guess missing information
if (imageSize == 0) imageSize = width*height * 3; // 3 : one byte for each Red, Green and Blue component
if (dataPos == 0) dataPos = 54; // The BMP header is done that way
// Create a buffer
data = new unsigned char[imageSize];
// Read the actual data from the file into the buffer
fread(data, 1, imageSize, file);
// Everything is in memory now, the file wan be closed
fclose(file);
// Create one OpenGL texture
GLuint textureID;
glGenTextures(1, &textureID);
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glBindTexture(GL_TEXTURE_2D, textureID);
if (imagepath == "hand.bmp") {
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, data);
}else {
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, width, height, 0, GL_BGR, GL_UNSIGNED_BYTE, data);
}
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
glGenerateMipmap(GL_TEXTURE_2D);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_R, GL_CLAMP_TO_EDGE);
delete[] data;
return textureID;
}
As you can see its not my own written code, Ive got it from opengl-tutorial.org
My first comment stated:
The repeating, offset pattern looks like the data is treated as having a larger offset, when in reality it has smaller (or opposite).
And that was before I actually noticed what you did. Yes, this is precisely that. You can't treat 4-bytes-per-pixel data as 3-bytes-per-pixel data. The alpha channel gets interpreted as colour and that's why it all offsets this way.
If you want to disregard the alpha channel, you need to strip it off when loading so that it ends up having 3 bytes for each pixel value in the OpenGL texture memory. (That's what #RetoKoradi's answer is proposing, namely creating an RGB texture from RGBA data).
If it isn't actually supposed to look so blue-ish, maybe it's not actually in BGR layout?
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_BGRA, GL_UNSIGNED_BYTE, data);
^
\--- change to GL_RGBA as well
My wild guess is that human skin would have more red than blue light reflected by it.
It looks like you misunderstood how the arguments of glTexImage2D() work:
The 3rd argument (internalformat) defines what format you want to use for the data stored in the texture.
The 7th and 8th argument (format and type) define the format of the data you pass into the call as the last argument.
Based on this, if the format of the data you're passing as the last argument is BGRA, and you want to create an RGB texture from it, the correct call is:
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, width, height, 0, GL_BGRA, GL_UNSIGNED_BYTE, data);
Note that the 7th argument is now GL_BGRA, matching your input data, while the 3rd argument is GL_RGB, specifying that you want to use an RGB texture.
Seams you chose worng texture pixel alignment. To specify the right one try to experiment with values (1, 2, 4) of glPixelStorei with GL_UNPACK_ALIGNMENT.
Specification:
void glPixelStorei( GLenum pname,
GLint param);
pname Specifies the symbolic name of the parameter to be set. One value affects the packing of pixel data into memory: GL_PACK_ALIGNMENT. The other affects the unpacking of pixel data from memory: GL_UNPACK_ALIGNMENT.
param Specifies the value that pname is set to.
glPixelStorei sets pixel storage modes that affect the operation of subsequent glReadPixels as well as the unpacking of texture patterns (see glTexImage2D and glTexSubImage2D).
pname is a symbolic constant indicating the parameter to be set, and param is the new value. One storage parameter affects how pixel data is returned to client memory:
GL_PACK_ALIGNMENT
Specifies the alignment requirements for the start of each pixel row in memory. The allowable values are 1 (byte-alignment), 2 (rows aligned to even-numbered bytes), 4 (word-alignment), and 8 (rows start on double-word boundaries).
The other storage parameter affects how pixel data is read from client memory:
GL_UNPACK_ALIGNMENT
Specifies the alignment requirements for the start of each pixel row in memory. The allowable values are 1 (byte-alignment), 2 (rows aligned to even-numbered bytes), 4 (word-alignment), and 8 (rows start on double-word boundaries).
The following table gives the type, initial value, and range of valid values for each storage parameter that can be set with glPixelStorei.
BMP format do not support transparency at least most common 3 version (only work GL_BGR mode and its masked modifications). USE PNG, DDS, TIFF, TGA(simplest) instead.
Secondly your total image data size computation formula is wrong
imageSize = width*height * 3; // 3 : one byte for each Red, Green and Blue component
Right formula is:
imageSize = 4 * ((width * bitsPerPel + 31) / 32) * height;
where bitsPerPel is current picture bits per pixel (8, 16 or 24).
Here is the code of function wich used to load simple TGA files with transparency support:
// Define targa header.
#pragma pack(1)
typedef struct
{
GLbyte identsize; // Size of ID field that follows header (0)
GLbyte colorMapType; // 0 = None, 1 = paletted
GLbyte imageType; // 0 = none, 1 = indexed, 2 = rgb, 3 = grey, +8=rle
unsigned short colorMapStart; // First colour map entry
unsigned short colorMapLength; // Number of colors
unsigned char colorMapBits; // bits per palette entry
unsigned short xstart; // image x origin
unsigned short ystart; // image y origin
unsigned short width; // width in pixels
unsigned short height; // height in pixels
GLbyte bits; // bits per pixel (8 16, 24, 32)
GLbyte descriptor; // image descriptor
} TGAHEADER;
#pragma pack(8)
GLbyte *gltLoadTGA(const char *szFileName, GLint *iWidth, GLint *iHeight, GLint *iComponents, GLenum *eFormat)
{
FILE *pFile; // File pointer
TGAHEADER tgaHeader; // TGA file header
unsigned long lImageSize; // Size in bytes of image
short sDepth; // Pixel depth;
GLbyte *pBits = NULL; // Pointer to bits
// Default/Failed values
*iWidth = 0;
*iHeight = 0;
*eFormat = GL_BGR_EXT;
*iComponents = GL_RGB8;
// Attempt to open the fil
pFile = fopen(szFileName, "rb");
if(pFile == NULL)
return NULL;
// Read in header (binary)
fread(&tgaHeader, 18/* sizeof(TGAHEADER)*/, 1, pFile);
// Do byte swap for big vs little endian
#ifdef __APPLE__
BYTE_SWAP(tgaHeader.colorMapStart);
BYTE_SWAP(tgaHeader.colorMapLength);
BYTE_SWAP(tgaHeader.xstart);
BYTE_SWAP(tgaHeader.ystart);
BYTE_SWAP(tgaHeader.width);
BYTE_SWAP(tgaHeader.height);
#endif
// Get width, height, and depth of texture
*iWidth = tgaHeader.width;
*iHeight = tgaHeader.height;
sDepth = tgaHeader.bits / 8;
// Put some validity checks here. Very simply, I only understand
// or care about 8, 24, or 32 bit targa's.
if(tgaHeader.bits != 8 && tgaHeader.bits != 24 && tgaHeader.bits != 32)
return NULL;
// Calculate size of image buffer
lImageSize = tgaHeader.width * tgaHeader.height * sDepth;
// Allocate memory and check for success
pBits = new GLbyte[lImageSize];
if(pBits == NULL)
return NULL;
// Read in the bits
// Check for read error. This should catch RLE or other
// weird formats that I don't want to recognize
if(fread(pBits, lImageSize, 1, pFile) != 1)
{
free(pBits);
return NULL;
}
// Set OpenGL format expected
switch(sDepth)
{
case 3: // Most likely case
*eFormat = GL_BGR_EXT;
*iComponents = GL_RGB8;
break;
case 4:
*eFormat = GL_BGRA_EXT;
*iComponents = GL_RGBA8;
break;
case 1:
*eFormat = GL_LUMINANCE;
*iComponents = GL_LUMINANCE8;
break;
};
// Done with File
fclose(pFile);
// Return pointer to image data
return pBits;
}
iWidth, iHeight return texture dimensions, eFormat i iCompoments external and internal image formats. than actual function return value is pointer to texture data.
So your function must look like:
GLuint Texture::loadTexture(const char * imagepath) {
printf("Reading image %s\n", imagepath);
// Data read from the header of the BMP file
int width, height;
int component;
GLenum eFormat;
// Actual RGB data
char * data = LoadTGA(imagepath, &width, &height, &component, &eFormat);
// Create one OpenGL texture
GLuint textureID;
glGenTextures(1, &textureID);
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glBindTexture(GL_TEXTURE_2D, textureID);
if (!strcmp(imagepath,"hand.tga")) { // important because we comparing strings not pointers
glTexImage2D(GL_TEXTURE_2D, 0, component, width, height, 0, eFormat, GL_UNSIGNED_BYTE, data);
}else {
glTexImage2D(GL_TEXTURE_2D, 0, component, width, height, 0, eFormat, GL_UNSIGNED_BYTE, data);
}
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
glGenerateMipmap(GL_TEXTURE_2D);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_R, GL_CLAMP_TO_EDGE);
delete[] data;
return textureID;
}

Opengl wrong colors when applying textures to objects

So I am having a rough go of things when it comes to applying ppm textures to a quad shape. My issue is the color is not "correct."
Here is a few planets from mercury towards jupiter.
The only color that is correct is the earth.
I downloaded the textures online (jpegs) and then converted them from the linux commandline
jpegtopnm Mercury.jpg > Mercury.ppm
I did this for all of the images that you see above.
At first the colors were all inverted so I changed
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, ImgWidth, ImgHeight, 0, GL_RGB,
GL_UNSIGNED_BYTE, TexBits);
to
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, ImgWidth, ImgHeight, 0, GL_BGR,
GL_UNSIGNED_BYTE, TexBits);
and that fixed the inversed colors (r instead of b ... ect). Now i am stuck here.. how do I correct the textures to show the correct colors for the planets?
here is the mercury texture file
and here is the code that handles the textures
void read_file(const char * filename)
{
FILE *infile;
char buf[80];
char string[256];
unsigned char *texImage;
int i, temp;
GLubyte *sp;
if ( (infile = fopen(filename, "rb")) == NULL)
{
printf("File open error\n");
exit(1);
}
fgets(string, 256, infile);
fgets(string, 256, infile);
while (string[0] == '#')
fgets(string, 256, infile);
sscanf(string, "%d %d", &ImgWidth, &ImgHeight);
if (TexBits != 0)
free(TexBits);
TexBits = (GLubyte *) calloc(ImgWidth * ImgHeight * 3, sizeof(GLubyte));
for (i = ImgHeight - 1; i >= 0; i--)
{
sp = TexBits + i * ImgWidth * 3;
fread (sp, sizeof(GLubyte), ImgWidth * 3, infile);
}
fclose(infile);
}
void bindTexture()
{
glGenTextures(10, texName);
glBindTexture(GL_TEXTURE_2D, texName[1]);
glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_REPLACE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
read_file("Mercury.ppm");
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, ImgWidth, ImgHeight, 0, GL_BGR,
GL_UNSIGNED_BYTE, TexBits);
///// rest of planets in order.. ( same code for each of them )
}
void drawCircle(GLfloat size, GLfloat offset, GLint r)
{
quadratic=gluNewQuadric();
gluQuadricDrawStyle(quadratic, GLU_FILL);
gluQuadricTexture(quadratic, GL_TRUE);
gluSphere(quadratic, size, r, r);
}
void display(void)
{
glClear (GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
/* mercury */
glEnable(GL_TEXTURE_2D);
glBindTexture(GL_TEXTURE_2D, texName[1]);
glColor4f(1.0, 1.0, 1.0, 1.0);
DrawPlanet(-17.0f, -4.0f, -9.0f, zAxis, .7f, .3f, 25);
glBindTexture(GL_TEXTURE_2D, 0);
glDisable(GL_TEXTURE_2D);
// rest of code is similar to the one above
}
I am finding it hard to figure out why the colors are a bright color of green instead of the actual color of each planet.. it seems that the program is having a problem with brown.. any ideas?
P6 PPM files have in their header 3 numbers: A width, a height and a maxval. The width and height give the dimensions of the image, and the maxval gives the dynamic range of the image.
In your code above, you scan for the image dimensions, but not the maxval. The maxval is not guaranteed to be on the same line as the image dimensions.
As a result, at the point where you start reading the image, the P6's maxval gets read in as part of the image information, shifting all the bytes, and rotating the apparent R, G and B values.
The exact definition of the P6 header and payload is as follows (courtesy of man ppm):
A "magic number" for identifying the file type. A ppm image's magic number is the two characters "P6".
Whitespace (blanks, TABs, CRs, LFs).
A width, formatted as ASCII characters in decimal.
Whitespace.
A height, again in ASCII decimal.
Whitespace.
The maximum color value (Maxval), again in ASCII decimal. Must be less than 65536.
Newline or other single whitespace character.
A raster of Width * Height pixels, proceeding through the image in normal English reading order. Each pixel is a triplet of red, green, and blue samples, in that order. Each sample is represented in pure binary by either 1 or 2 bytes. If the Maxval is less than 256, it is 1 byte. Otherwise, it is 2 bytes. The most significant byte is first.
Characters from a "#" to the next end-of-line, before the maxval line, are comments and are ignored.
This suggests that rather than using fgets and sscanf (which, honestly, is often the better idea for parsing, especially with line-oriented input), you should consider using a loop with fgetc() and a small state machine to ensure you're fully robust against any P6 file you might encounter.
Something like this may work:
int get_pnm_header(FILE *f)
{
int ch;
if ((ch = fgetc(f)) != 'P')
return -1; // not a PNM file
if ((ch = fgetc(f)) < '1' || ch > '6')
return -1; // not a PNM file
return ch - '0';
}
int get_ppm_integer(FILE *f)
{
int in_comment = 0;
int in_value = 0;
int value = 0;
int ch;
while ((ch = fgetc(f)) != EOF)
{
if (ch == '#')
in_comment = 1;
if (in_comment)
{
in_comment = ch != '\n';
continue;
}
if (isdigit(ch))
in_value = 1;
if (in_value)
{
if (!isdigit(ch))
{
if (!isspace(ch)) /* consume first WS after value */
ungetc(ch, f); /* If not WS, put it back (might be '#' for a comment) */
return value;
}
value = (value * 10) + ch - '0';
continue;
}
if (!isspace(ch))
{
fprintf(stderr, "Warning: unexpected character '%c' in P6 header\n", ch);
}
}
fprintf(stderr, "Warning: EOF encountered reading P6 header\n");
return -1;
}
and then in your code further down:
int pnm_type = get_pnm_header(infile);
if (pnm_type != 6)
// report an error about unexpected file type
ImgWidth = get_ppm_integer(infile);
ImgHeight = get_ppm_integer(infile);
ImgMaxval = get_ppm_integer(infile);
if (ImgMaxval != 255)
// report an error about unsupported maxval
int r = fread((void *)TexBits, 3, ImgHeight * ImgWidth, infile);
if (r != ImgHeight * ImgWidth)
// report an error about a short file.
If you modify get_ppm_integer to just return value on EOF, I believe that function will also work correctly for reading the bytes in the body of P3 files, if you need to support those.

Saving the openGL context as a video output

I am currently trying to save the animation made in openGL to a video file. I have tried using openCV's videowriter but to no advantage. I have successfully been able to generate a snapshot and save it as bmp using the SDL library. If I save all snapshots and then generate the video using ffmpeg, that is like collecting 4 GB worth of images. Not practical.
How can I write video frames directly during rendering?
Here the code i use to take snapshots when I require:
void snapshot(){
SDL_Surface* snap = SDL_CreateRGBSurface(SDL_SWSURFACE,WIDTH,HEIGHT,24, 0x000000FF, 0x0000FF00, 0x00FF0000, 0);
char * pixels = new char [3 *WIDTH * HEIGHT];
glReadPixels(0, 0,WIDTH, HEIGHT, GL_RGB, GL_UNSIGNED_BYTE, pixels);
for (int i = 0 ; i <HEIGHT ; i++)
std::memcpy( ((char *) snap->pixels) + snap->pitch * i, pixels + 3 * WIDTH * (HEIGHT-i - 1), WIDTH*3 );
delete [] pixels;
SDL_SaveBMP(snap, "snapshot.bmp");
SDL_FreeSurface(snap);
}
I need the video output. I have discovered that ffmpeg can be used to create videos from C++ code but have not been able to figure out the process. Please help!
EDIT : I have tried using openCV CvVideoWriter class but the program crashes ("segmentation fault") the moment it is declared.Compilation shows no errors ofcourse. Any suggestions to that?
SOLUTION FOR PYTHON USERS (Requires Python2.7,python-imaging,python-opengl,python-opencv, codecs of format you want to write to, I am on Ubuntu 14.04 64-bit):
def snap():
pixels=[]
screenshot = glReadPixels(0,0,W,H,GL_RGBA,GL_UNSIGNED_BYTE)
snapshot = Image.frombuffer("RGBA",W,H),screenshot,"raw","RGBA",0,0)
snapshot.save(os.path.dirname(videoPath) + "/temp.jpg")
load = cv2.cv.LoadImage(os.path.dirname(videoPath) + "/temp.jpg")
cv2.cv.WriteFrame(videoWriter,load)
Here W and H are the window dimensions (width,height). What is happening is I am using PIL to convert the raw pixels read from the glReadPixels command into a JPEG image. I am loading that JPEG into the openCV image and writing to the videowriter. I was having certain issues by directly using the PIL image into the videowriter (which would save millions of clock cycles of I/O), but right now I am not working on that. Image is a PIL module cv2 is a python-opencv module.
It sounds as though you are using the command line utility: ffmpeg. Rather than using the command-line to encode video from a collection of still images, you should use libavcodec and libavformat. These are the libraries upon which ffmpeg is actually built, and will allow you to encode video and store it in a standard stream/interchange format (e.g. RIFF/AVI) without using a separate program.
You probably will not find a lot of tutorials on implementing this because it has traditionally been the case that people wanted to use ffmpeg to go the other way; that is, decode various video formats for display in OpenGL. I think this is going to change very soon with the introduction of gameplay video encoding to the PS4 and Xbox One consoles, suddenly demand for this functionality will skyrocket.
The general process is this, however:
Pick a container format and CODEC
Often one will decide the other, (e.g. MPEG-2 + MPEG Program Stream)
Start filling a buffer with your still frames
Periodically encode your buffer of still frames and write to your output (packet writing in MPEG terms)
You will do this either when the buffer becomes full, or every n-many ms; you might prefer one over the other depending on whether you want to stream your video live or not.
When your program terminates flush the buffer and close your stream
One nice thing about this is you do not actually need to write to a file. Since you are periodically encoding packets of data from your buffer of still frames, you can stream your encoded video over a network if you want - this is why codec and container (interchange) format are separate.
Another nice thing is you do not have to synchronize the CPU and GPU, you can setup a pixel buffer object and have OpenGL copy data into CPU memory a couple of frames behind the GPU. This makes real-time encoding of video much less demanding, you only have to encode and flush the video to disk or over the network periodically if video latency demands are not unreasonable. This works very well in real-time rendering, since you have a large enough pool of data to keep a CPU thread busy encoding at all times.
Encoding frames can even be done in real-time on the GPU provided enough storage for a large buffer of frames (since ultimately the encoded data has to be copied from GPU to CPU and you want to do this as infrequently as possible). Obviously this is not done using ffmpeg, there are specialized libraries using CUDA / OpenCL / compute shaders for this purpose. I have never used them, but they do exist.
For portability sake, you should stick with libavcodec and Pixel Buffer Objects for asynchronous GPU->CPU copy. CPUs these days have enough cores that you can probably get away without GPU-assisted encoding if you buffer enough frames and encode in multiple simultaneous threads (this creates added synchronization overhead and increased latency when outputting encoded video) or simply drop frames / lower resolution (poor man's solution).
There are a lot of concepts covered here that go well beyond the scope of SDL, but you did ask how to do this with better performance than your current solution. In short, use OpenGL Pixel Buffer Objects to transfer data, and libavcodec for encoding. An example application that encodes video can be found on the ffmpeg libavcodec examples page.
For some fast test something like the code below work (tested), resizable windows are unhandled.
#include <stdio.h>
FILE *avconv = NULL;
...
/* initialize */
avconv = popen("avconv -y -f rawvideo -s 800x600 -pix_fmt rgb24 -r 25 -i - -vf vflip -an -b:v 1000k test.mp4", "w");
...
/* save */
glReadPixels(0, 0, 800, 600, GL_RGB, GL_UNSIGNED_BYTE, pixels);
if (avconv)
fwrite(pixels ,800*600*3 , 1, avconv);
...
/* term */
if (avconv)
pclose(avconv);
Runnable mpg example with FFmpeg 2.7
Explanation and a superset example at: How to use GLUT/OpenGL to render to a file?
Consider https://github.com/FFmpeg/FFmpeg/blob/n3.0/doc/examples/muxing.c to generate a contained format.
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#define GL_GLEXT_PROTOTYPES 1
#include <GL/gl.h>
#include <GL/glu.h>
#include <GL/glut.h>
#include <GL/glext.h>
#include <libavcodec/avcodec.h>
#include <libavutil/imgutils.h>
#include <libavutil/opt.h>
#include <libswscale/swscale.h>
enum Constants { SCREENSHOT_MAX_FILENAME = 256 };
static GLubyte *pixels = NULL;
static GLuint fbo;
static GLuint rbo_color;
static GLuint rbo_depth;
static const unsigned int HEIGHT = 100;
static const unsigned int WIDTH = 100;
static int offscreen = 1;
static unsigned int max_nframes = 100;
static unsigned int nframes = 0;
static unsigned int time0;
/* Model. */
static double angle;
static double delta_angle;
/* Adapted from: https://github.com/cirosantilli/cpp-cheat/blob/19044698f91fefa9cb75328c44f7a487d336b541/ffmpeg/encode.c */
static AVCodecContext *c = NULL;
static AVFrame *frame;
static AVPacket pkt;
static FILE *file;
static struct SwsContext *sws_context = NULL;
static uint8_t *rgb = NULL;
static void ffmpeg_encoder_set_frame_yuv_from_rgb(uint8_t *rgb) {
const int in_linesize[1] = { 4 * c->width };
sws_context = sws_getCachedContext(sws_context,
c->width, c->height, AV_PIX_FMT_RGB32,
c->width, c->height, AV_PIX_FMT_YUV420P,
0, NULL, NULL, NULL);
sws_scale(sws_context, (const uint8_t * const *)&rgb, in_linesize, 0,
c->height, frame->data, frame->linesize);
}
void ffmpeg_encoder_start(const char *filename, int codec_id, int fps, int width, int height) {
AVCodec *codec;
int ret;
avcodec_register_all();
codec = avcodec_find_encoder(codec_id);
if (!codec) {
fprintf(stderr, "Codec not found\n");
exit(1);
}
c = avcodec_alloc_context3(codec);
if (!c) {
fprintf(stderr, "Could not allocate video codec context\n");
exit(1);
}
c->bit_rate = 400000;
c->width = width;
c->height = height;
c->time_base.num = 1;
c->time_base.den = fps;
c->gop_size = 10;
c->max_b_frames = 1;
c->pix_fmt = AV_PIX_FMT_YUV420P;
if (codec_id == AV_CODEC_ID_H264)
av_opt_set(c->priv_data, "preset", "slow", 0);
if (avcodec_open2(c, codec, NULL) < 0) {
fprintf(stderr, "Could not open codec\n");
exit(1);
}
file = fopen(filename, "wb");
if (!file) {
fprintf(stderr, "Could not open %s\n", filename);
exit(1);
}
frame = av_frame_alloc();
if (!frame) {
fprintf(stderr, "Could not allocate video frame\n");
exit(1);
}
frame->format = c->pix_fmt;
frame->width = c->width;
frame->height = c->height;
ret = av_image_alloc(frame->data, frame->linesize, c->width, c->height, c->pix_fmt, 32);
if (ret < 0) {
fprintf(stderr, "Could not allocate raw picture buffer\n");
exit(1);
}
}
void ffmpeg_encoder_finish(void) {
uint8_t endcode[] = { 0, 0, 1, 0xb7 };
int got_output, ret;
do {
fflush(stdout);
ret = avcodec_encode_video2(c, &pkt, NULL, &got_output);
if (ret < 0) {
fprintf(stderr, "Error encoding frame\n");
exit(1);
}
if (got_output) {
fwrite(pkt.data, 1, pkt.size, file);
av_packet_unref(&pkt);
}
} while (got_output);
fwrite(endcode, 1, sizeof(endcode), file);
fclose(file);
avcodec_close(c);
av_free(c);
av_freep(&frame->data[0]);
av_frame_free(&frame);
}
void ffmpeg_encoder_encode_frame(uint8_t *rgb) {
int ret, got_output;
ffmpeg_encoder_set_frame_yuv_from_rgb(rgb);
av_init_packet(&pkt);
pkt.data = NULL;
pkt.size = 0;
ret = avcodec_encode_video2(c, &pkt, frame, &got_output);
if (ret < 0) {
fprintf(stderr, "Error encoding frame\n");
exit(1);
}
if (got_output) {
fwrite(pkt.data, 1, pkt.size, file);
av_packet_unref(&pkt);
}
}
void ffmpeg_encoder_glread_rgb(uint8_t **rgb, GLubyte **pixels, unsigned int width, unsigned int height) {
size_t i, j, k, cur_gl, cur_rgb, nvals;
const size_t format_nchannels = 4;
nvals = format_nchannels * width * height;
*pixels = realloc(*pixels, nvals * sizeof(GLubyte));
*rgb = realloc(*rgb, nvals * sizeof(uint8_t));
/* Get RGBA to align to 32 bits instead of just 24 for RGB. May be faster for FFmpeg. */
glReadPixels(0, 0, width, height, GL_RGBA, GL_UNSIGNED_BYTE, *pixels);
for (i = 0; i < height; i++) {
for (j = 0; j < width; j++) {
cur_gl = format_nchannels * (width * (height - i - 1) + j);
cur_rgb = format_nchannels * (width * i + j);
for (k = 0; k < format_nchannels; k++)
(*rgb)[cur_rgb + k] = (*pixels)[cur_gl + k];
}
}
}
static int model_init(void) {
angle = 0;
delta_angle = 1;
}
static int model_update(void) {
angle += delta_angle;
return 0;
}
static int model_finished(void) {
return nframes >= max_nframes;
}
static void init(void) {
int glget;
if (offscreen) {
/* Framebuffer */
glGenFramebuffers(1, &fbo);
glBindFramebuffer(GL_FRAMEBUFFER, fbo);
/* Color renderbuffer. */
glGenRenderbuffers(1, &rbo_color);
glBindRenderbuffer(GL_RENDERBUFFER, rbo_color);
/* Storage must be one of: */
/* GL_RGBA4, GL_RGB565, GL_RGB5_A1, GL_DEPTH_COMPONENT16, GL_STENCIL_INDEX8. */
glRenderbufferStorage(GL_RENDERBUFFER, GL_RGB565, WIDTH, HEIGHT);
glFramebufferRenderbuffer(GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_RENDERBUFFER, rbo_color);
/* Depth renderbuffer. */
glGenRenderbuffers(1, &rbo_depth);
glBindRenderbuffer(GL_RENDERBUFFER, rbo_depth);
glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH_COMPONENT16, WIDTH, HEIGHT);
glFramebufferRenderbuffer(GL_DRAW_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_RENDERBUFFER, rbo_depth);
glReadBuffer(GL_COLOR_ATTACHMENT0);
/* Sanity check. */
assert(glCheckFramebufferStatus(GL_FRAMEBUFFER));
glGetIntegerv(GL_MAX_RENDERBUFFER_SIZE, &glget);
assert(WIDTH * HEIGHT < (unsigned int)glget);
} else {
glReadBuffer(GL_BACK);
}
glClearColor(0.0, 0.0, 0.0, 0.0);
glEnable(GL_DEPTH_TEST);
glPixelStorei(GL_PACK_ALIGNMENT, 1);
glViewport(0, 0, WIDTH, HEIGHT);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glMatrixMode(GL_MODELVIEW);
time0 = glutGet(GLUT_ELAPSED_TIME);
model_init();
ffmpeg_encoder_start("tmp.mpg", AV_CODEC_ID_MPEG1VIDEO, 25, WIDTH, HEIGHT);
}
static void deinit(void) {
printf("FPS = %f\n", 1000.0 * nframes / (double)(glutGet(GLUT_ELAPSED_TIME) - time0));
free(pixels);
ffmpeg_encoder_finish();
free(rgb);
if (offscreen) {
glDeleteFramebuffers(1, &fbo);
glDeleteRenderbuffers(1, &rbo_color);
glDeleteRenderbuffers(1, &rbo_depth);
}
}
static void draw_scene(void) {
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glLoadIdentity();
glRotatef(angle, 0.0f, 0.0f, -1.0f);
glBegin(GL_TRIANGLES);
glColor3f(1.0f, 0.0f, 0.0f);
glVertex3f( 0.0f, 0.5f, 0.0f);
glColor3f(0.0f, 1.0f, 0.0f);
glVertex3f(-0.5f, -0.5f, 0.0f);
glColor3f(0.0f, 0.0f, 1.0f);
glVertex3f( 0.5f, -0.5f, 0.0f);
glEnd();
}
static void display(void) {
char extension[SCREENSHOT_MAX_FILENAME];
char filename[SCREENSHOT_MAX_FILENAME];
draw_scene();
if (offscreen) {
glFlush();
} else {
glutSwapBuffers();
}
frame->pts = nframes;
ffmpeg_encoder_glread_rgb(&rgb, &pixels, WIDTH, HEIGHT);
ffmpeg_encoder_encode_frame(rgb);
nframes++;
if (model_finished())
exit(EXIT_SUCCESS);
}
static void idle(void) {
while (model_update());
glutPostRedisplay();
}
int main(int argc, char **argv) {
GLint glut_display;
glutInit(&argc, argv);
if (argc > 1)
offscreen = 0;
if (offscreen) {
/* TODO: if we use anything smaller than the window, it only renders a smaller version of things. */
/*glutInitWindowSize(50, 50);*/
glutInitWindowSize(WIDTH, HEIGHT);
glut_display = GLUT_SINGLE;
} else {
glutInitWindowSize(WIDTH, HEIGHT);
glutInitWindowPosition(100, 100);
glut_display = GLUT_DOUBLE;
}
glutInitDisplayMode(glut_display | GLUT_RGBA | GLUT_DEPTH);
glutCreateWindow(argv[0]);
if (offscreen) {
/* TODO: if we hide the window the program blocks. */
/*glutHideWindow();*/
}
init();
glutDisplayFunc(display);
glutIdleFunc(idle);
atexit(deinit);
glutMainLoop();
return EXIT_SUCCESS;
}
I solved the writing of a video file in Python from Python OpenGL the following way:
In the main section, setup the video file to write to:
#Set up video:
width=640
height=480
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
#Open video output file:
out = cv2.VideoWriter('videoout.mp4',fourcc, 20.0, (width,height))
And in the DisplayFunction:
#Read frame:
screenshot = glReadPixels(0,0,width,height,GL_RGB,GL_UNSIGNED_BYTE)
#Convert from binary to cv2 numpy array:
snapshot = Image.frombuffer("RGB",(width,height),screenshot,"raw","RGB",0,0)
snapshot= np.array(snapshot)
snapshot=cv2.flip(snapshot,0)
#write frame to video file:
out.write(snapshot)
if (...): #End movie
glutLeaveMainLoop()
out.release()
print("Exit")
This writes to "videoout.mp4". Observe that it needs the "out.release()" in the end to get a proper mp4 file.