Related
I am trying to render to the OpenGL Framebuffer via an OpenGL Renderbuffer from an OpenCL kernel. The issue is: Even though I can (propably) render/write to the Renderbuffer from an OpenCL kernel, the screen stays empty (-> Black).
I am getting to my limits of what I can test in finite time, so I am asking someone with much more experience to give a tip, about what I am missing.
I personally suspect that I forgot to Bind a Buffer at the right point, but since I don't see which and where, this is practically impossible to check.
Now for some reduced code (So you don't have to look at all the error checking etc.)(This is the function that is called during the render routine):
void TestBuffer(){
GLubyte *buffer = (GLubyte *) malloc(1000 * 1000 * 4);
glReadBuffer(GL_COLOR_ATTACHMENT0);
error = glGetError();
if(error != GL_NO_ERROR){
printf("error with readBuffer, %i\n", error);
}
glReadPixels(0, 0, 1000, 1000, GL_RGBA, GL_UNSIGNED_BYTE, (GLvoid *)buffer);
error = glGetError();
if(error != GL_NO_ERROR){
printf("error with readpixels\n");
}
for(int i = 0; i < 1000*100; i++){
if(buffer[i] != 0){
printf("buffer was not empty # %i: %u\n", i, buffer[i]);
free(buffer);
return;
}
}
printf("buffer was empty\n");
free(buffer);
}
void runShader(){
glFinish(); //Make sure, that OpenGL isn't using our objects
ret = clEnqueueAcquireGLObjects(command_queue, 1, &cl_renderBuffer, 0, NULL, NULL);
// Execute the OpenCL kernel on the list
size_t global_item_size = 1000 * 1000; // Process the entire lists
size_t local_item_size = 1000; // Divide work items into groups of SceenWidth
ret = clEnqueueNDRangeKernel(command_queue, kernel, 1, NULL, &global_item_size, &local_item_size, 0, NULL, NULL);
ret = clEnqueueReleaseGLObjects(command_queue, 1, &cl_renderBuffer, 0, NULL, NULL);
clFlush(command_queue);
clFinish(command_queue);
// We are going to blit into the window (default framebuffer)
glBindFramebuffer (GL_DRAW_FRAMEBUFFER, 0);
glDrawBuffer (GL_BACK); // Use backbuffer as color dst.
// Read from your FBO
glBindFramebuffer (GL_READ_FRAMEBUFFER, gl_frameBuffer);
glReadBuffer (GL_COLOR_ATTACHMENT0); // Use Color Attachment 0 as color src.
// Copy the color and depth buffer from your FBO to the default framebuffer
glBlitFramebuffer (0,0, 1000, 1000, 0,0, 1000, 1000, GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT, GL_NEAREST);
TestBuffer();
}
My ideas where:
Blit the contents of the renderbuffer to the screenbuffer, in case I messed up with binding the new framebuffer object (created earlier), or attaching the renderbuffer (which you can see in the last few lines of the code)
Check, if I messed up with the double Buffer or sth.: this is the TestBuffer() function
Flushing before Finishing, just in case
The shader/kernel code is simple on purpose, to see if the other stuff actually works (.w should be alpha, which should be opaque, so we can see the result, the rest is just a gray rainbow):
#pragma OPENCL EXTENSION all : enable
#define ScreenWidth 1000
#define ScreenHight 1000
const sampler_t sampler = CLK_NORMALIZED_COORDS_FALSE | CLK_ADDRESS_NONE | CLK_FILTER_NEAREST;
__kernel void rainbow(__write_only image2d_t asd) {
int i = get_global_id(0);
unsigned int x = i%ScreenWidth;
unsigned int y = i/ScreenHight;
uint4 pixel; //I wish, I could access this as an array
pixel.x = i;
pixel.y = i;
pixel.z = i;
pixel.w = 255;
write_imageui(asd, (int2)(x, y), pixel);
}
Some further information:
I am only rendering stuff to the COLOR_ATTACHMENT0, since I don't care about the depth or stencil buffer in my usecase. This could be an issue though. (I didn't even generate buffers for them)
I am compiling for Windows 10
The format of the Renderbuffer is RGBA8, but I think the natural format is RGBA24. It once was just RGBA as you can see in the TestBuffer Routine, but I think this should be fine.
What could cause the screen to stay black/empty?
I am trying to render a texture with an alpha channel in it.
This is what I used for texture loading:
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_BGRA, GL_UNSIGNED_BYTE, data);
I enabled GL_BLEND just before I render the texture: glEnable(GL_BLEND);
I also did this at the beginning of the code(the initialization): glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
This is the result(It should be a transparent texture of a first person hand):
But when I load my texture like this(no alpha channel):
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, width, height, 0, GL_BGR, GL_UNSIGNED_BYTE, data);
This is the result:
Does anyone know what can cause this, or do I have to give more code?
Sorry for bad English, thanks in advance.
EDIT:
My texture loading code:
GLuint Texture::loadTexture(const char * imagepath) {
printf("Reading image %s\n", imagepath);
// Data read from the header of the BMP file
unsigned char header[54];
unsigned int dataPos;
unsigned int imageSize;
unsigned int width, height;
// Actual RGB data
unsigned char * data;
// Open the file
FILE * file = fopen(imagepath, "rb");
if (!file) { printf("%s could not be opened. \n", imagepath); getchar(); exit(0); }
// Read the header, i.e. the 54 first bytes
// If less than 54 bytes are read, problem
if (fread(header, 1, 54, file) != 54) {
printf("Not a correct BMP file\n");
exit(0);
}
// A BMP files always begins with "BM"
if (header[0] != 'B' || header[1] != 'M') {
printf("Not a correct BMP file\n");
exit(0);
}
// Make sure this is a 24bpp file
if (*(int*)&(header[0x1E]) != 0) { printf("Not a correct BMP file\n");}
if (*(int*)&(header[0x1C]) != 24) { printf("Not a correct BMP file\n");}
// Read the information about the image
dataPos = *(int*)&(header[0x0A]);
imageSize = *(int*)&(header[0x22]);
width = *(int*)&(header[0x12]);
height = *(int*)&(header[0x16]);
// Some BMP files are misformatted, guess missing information
if (imageSize == 0) imageSize = width*height * 3; // 3 : one byte for each Red, Green and Blue component
if (dataPos == 0) dataPos = 54; // The BMP header is done that way
// Create a buffer
data = new unsigned char[imageSize];
// Read the actual data from the file into the buffer
fread(data, 1, imageSize, file);
// Everything is in memory now, the file wan be closed
fclose(file);
// Create one OpenGL texture
GLuint textureID;
glGenTextures(1, &textureID);
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glBindTexture(GL_TEXTURE_2D, textureID);
if (imagepath == "hand.bmp") {
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, data);
}else {
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, width, height, 0, GL_BGR, GL_UNSIGNED_BYTE, data);
}
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
glGenerateMipmap(GL_TEXTURE_2D);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_R, GL_CLAMP_TO_EDGE);
delete[] data;
return textureID;
}
As you can see its not my own written code, Ive got it from opengl-tutorial.org
My first comment stated:
The repeating, offset pattern looks like the data is treated as having a larger offset, when in reality it has smaller (or opposite).
And that was before I actually noticed what you did. Yes, this is precisely that. You can't treat 4-bytes-per-pixel data as 3-bytes-per-pixel data. The alpha channel gets interpreted as colour and that's why it all offsets this way.
If you want to disregard the alpha channel, you need to strip it off when loading so that it ends up having 3 bytes for each pixel value in the OpenGL texture memory. (That's what #RetoKoradi's answer is proposing, namely creating an RGB texture from RGBA data).
If it isn't actually supposed to look so blue-ish, maybe it's not actually in BGR layout?
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_BGRA, GL_UNSIGNED_BYTE, data);
^
\--- change to GL_RGBA as well
My wild guess is that human skin would have more red than blue light reflected by it.
It looks like you misunderstood how the arguments of glTexImage2D() work:
The 3rd argument (internalformat) defines what format you want to use for the data stored in the texture.
The 7th and 8th argument (format and type) define the format of the data you pass into the call as the last argument.
Based on this, if the format of the data you're passing as the last argument is BGRA, and you want to create an RGB texture from it, the correct call is:
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, width, height, 0, GL_BGRA, GL_UNSIGNED_BYTE, data);
Note that the 7th argument is now GL_BGRA, matching your input data, while the 3rd argument is GL_RGB, specifying that you want to use an RGB texture.
Seams you chose worng texture pixel alignment. To specify the right one try to experiment with values (1, 2, 4) of glPixelStorei with GL_UNPACK_ALIGNMENT.
Specification:
void glPixelStorei( GLenum pname,
GLint param);
pname Specifies the symbolic name of the parameter to be set. One value affects the packing of pixel data into memory: GL_PACK_ALIGNMENT. The other affects the unpacking of pixel data from memory: GL_UNPACK_ALIGNMENT.
param Specifies the value that pname is set to.
glPixelStorei sets pixel storage modes that affect the operation of subsequent glReadPixels as well as the unpacking of texture patterns (see glTexImage2D and glTexSubImage2D).
pname is a symbolic constant indicating the parameter to be set, and param is the new value. One storage parameter affects how pixel data is returned to client memory:
GL_PACK_ALIGNMENT
Specifies the alignment requirements for the start of each pixel row in memory. The allowable values are 1 (byte-alignment), 2 (rows aligned to even-numbered bytes), 4 (word-alignment), and 8 (rows start on double-word boundaries).
The other storage parameter affects how pixel data is read from client memory:
GL_UNPACK_ALIGNMENT
Specifies the alignment requirements for the start of each pixel row in memory. The allowable values are 1 (byte-alignment), 2 (rows aligned to even-numbered bytes), 4 (word-alignment), and 8 (rows start on double-word boundaries).
The following table gives the type, initial value, and range of valid values for each storage parameter that can be set with glPixelStorei.
BMP format do not support transparency at least most common 3 version (only work GL_BGR mode and its masked modifications). USE PNG, DDS, TIFF, TGA(simplest) instead.
Secondly your total image data size computation formula is wrong
imageSize = width*height * 3; // 3 : one byte for each Red, Green and Blue component
Right formula is:
imageSize = 4 * ((width * bitsPerPel + 31) / 32) * height;
where bitsPerPel is current picture bits per pixel (8, 16 or 24).
Here is the code of function wich used to load simple TGA files with transparency support:
// Define targa header.
#pragma pack(1)
typedef struct
{
GLbyte identsize; // Size of ID field that follows header (0)
GLbyte colorMapType; // 0 = None, 1 = paletted
GLbyte imageType; // 0 = none, 1 = indexed, 2 = rgb, 3 = grey, +8=rle
unsigned short colorMapStart; // First colour map entry
unsigned short colorMapLength; // Number of colors
unsigned char colorMapBits; // bits per palette entry
unsigned short xstart; // image x origin
unsigned short ystart; // image y origin
unsigned short width; // width in pixels
unsigned short height; // height in pixels
GLbyte bits; // bits per pixel (8 16, 24, 32)
GLbyte descriptor; // image descriptor
} TGAHEADER;
#pragma pack(8)
GLbyte *gltLoadTGA(const char *szFileName, GLint *iWidth, GLint *iHeight, GLint *iComponents, GLenum *eFormat)
{
FILE *pFile; // File pointer
TGAHEADER tgaHeader; // TGA file header
unsigned long lImageSize; // Size in bytes of image
short sDepth; // Pixel depth;
GLbyte *pBits = NULL; // Pointer to bits
// Default/Failed values
*iWidth = 0;
*iHeight = 0;
*eFormat = GL_BGR_EXT;
*iComponents = GL_RGB8;
// Attempt to open the fil
pFile = fopen(szFileName, "rb");
if(pFile == NULL)
return NULL;
// Read in header (binary)
fread(&tgaHeader, 18/* sizeof(TGAHEADER)*/, 1, pFile);
// Do byte swap for big vs little endian
#ifdef __APPLE__
BYTE_SWAP(tgaHeader.colorMapStart);
BYTE_SWAP(tgaHeader.colorMapLength);
BYTE_SWAP(tgaHeader.xstart);
BYTE_SWAP(tgaHeader.ystart);
BYTE_SWAP(tgaHeader.width);
BYTE_SWAP(tgaHeader.height);
#endif
// Get width, height, and depth of texture
*iWidth = tgaHeader.width;
*iHeight = tgaHeader.height;
sDepth = tgaHeader.bits / 8;
// Put some validity checks here. Very simply, I only understand
// or care about 8, 24, or 32 bit targa's.
if(tgaHeader.bits != 8 && tgaHeader.bits != 24 && tgaHeader.bits != 32)
return NULL;
// Calculate size of image buffer
lImageSize = tgaHeader.width * tgaHeader.height * sDepth;
// Allocate memory and check for success
pBits = new GLbyte[lImageSize];
if(pBits == NULL)
return NULL;
// Read in the bits
// Check for read error. This should catch RLE or other
// weird formats that I don't want to recognize
if(fread(pBits, lImageSize, 1, pFile) != 1)
{
free(pBits);
return NULL;
}
// Set OpenGL format expected
switch(sDepth)
{
case 3: // Most likely case
*eFormat = GL_BGR_EXT;
*iComponents = GL_RGB8;
break;
case 4:
*eFormat = GL_BGRA_EXT;
*iComponents = GL_RGBA8;
break;
case 1:
*eFormat = GL_LUMINANCE;
*iComponents = GL_LUMINANCE8;
break;
};
// Done with File
fclose(pFile);
// Return pointer to image data
return pBits;
}
iWidth, iHeight return texture dimensions, eFormat i iCompoments external and internal image formats. than actual function return value is pointer to texture data.
So your function must look like:
GLuint Texture::loadTexture(const char * imagepath) {
printf("Reading image %s\n", imagepath);
// Data read from the header of the BMP file
int width, height;
int component;
GLenum eFormat;
// Actual RGB data
char * data = LoadTGA(imagepath, &width, &height, &component, &eFormat);
// Create one OpenGL texture
GLuint textureID;
glGenTextures(1, &textureID);
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glBindTexture(GL_TEXTURE_2D, textureID);
if (!strcmp(imagepath,"hand.tga")) { // important because we comparing strings not pointers
glTexImage2D(GL_TEXTURE_2D, 0, component, width, height, 0, eFormat, GL_UNSIGNED_BYTE, data);
}else {
glTexImage2D(GL_TEXTURE_2D, 0, component, width, height, 0, eFormat, GL_UNSIGNED_BYTE, data);
}
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
glGenerateMipmap(GL_TEXTURE_2D);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_R, GL_CLAMP_TO_EDGE);
delete[] data;
return textureID;
}
I have created 2d texture array like this
glTexImage3D(GL_TEXTURE_2D_ARRAY,
0, // No mipmaps
GL_RGBA8, // Internal format
width, height, 100, // width,height,layer count
0, // border?
GL_RGBA, // format
GL_UNSIGNED_BYTE, // type
0); // pointer to data
How do I increase its size from 100 to 200 for example? I guess I would have to create a new 2d array with size 200 and copy the images with glCopyTexSubImage3D over?
glBindTexture(GL_TEXTURE_2D_ARRAY, texture_id);
glCopyTexSubImage3D(GL_TEXTURE_2D_ARRAY,
0,
0, 0, 0,
0, 0,
width, height
);
glDeleteTextures(1, &texture_id);
GLuint new_tex_id;
glGenTextures(1, &new_tex_id);
glBindTexture(GL_TEXTURE_2D_ARRAY, new_tex_id);
glTexImage3D(GL_TEXTURE_2D_ARRAY,
0,
GL_RGBA8,
width, height, 200,
0,
GL_RGBA,
GL_UNSIGNED_BYTE,
0);
//How do I get the data in `GL_READ_BUFFER` into my newly bound texture?
texture_id = new_tex_id;
But how do I actually get the data out of the GL_READ_BUFFER?
glCopyTexSubImage copies data from the framebuffer, not from a texture. That's why it doesn't take two texture objects to copy with.
Copying from a texture into another texture requires glCopyImageSubData. This is an OpenGL 4.3 function, from ARB_copy_image. A similar function can also be found in NV_copy_image, which may be more widely supported.
BTW, you should generally avoid doing this operation at all. If you needed a 200 element array texture, you should have allocated that the first time.
The glCopyImageSubData() function that #NicolBolas pointed out is the easiest solution if you're ok with requiring OpenGL 4.3 or later.
You can use glCopyTexSubImage3D() for this purpose. But since the source for this function is the current read framebuffer, you need to bind your original texture as a framebuffer attachment. The code could roughly look like this:
GLuint fbo = 0;
glGenFramebuffers(1, &fbo);
glBindFramebuffer(GL_READ_FRAMEBUFFER, fbo);
glBindTexture(GL_TEXTURE_2D_ARRAY, new_tex_id);
for (int layer = 0; layer < 100; ++layer) {
glFramebufferTextureLayer(GL_READ_FRAMEBUFFER,
GL_COLOR_ATTACHMENT0, tex_id, 0, layer);
glCopyTexSubImage3D(GL_TEXTURE_2D_ARRAY,
0, 0, 0, layer, 0, 0, width, height);
}
You can also use glBlitFramebuffer() instead:
GLuint fbos[2] = {0, 0};
glGenFramebuffers(2, fbos);
glBindFramebuffer(GL_READ_FRAMEBUFFER, fbos[0]);
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, fbos[1]);
for (int layer = 0; layer < 100; ++layer) {
glFramebufferTextureLayer(GL_READ_FRAMEBUFFER,
GL_COLOR_ATTACHMENT0, tex_id, 0, layer);
glFramebufferTextureLayer(GL_DRAW_FRAMEBUFFER,
GL_COLOR_ATTACHMENT0, new_tex_id, 0, layer);
glBlitFramebuffer(
0, 0, width, height, 0, 0, width, height,
GL_COLOR_BUFFER_BIT, GL_NEAREST);
}
The two options should be more or less equivalent. I would probably go with glBlitFramebuffer() since it's a newer function (introduced in 3.0), and it might be much more commonly used. So it might be more optimized. But if this is performance critical in your application, you should try both, and compare.
I'm trying to use luminance textures on my ATI graphics card.
The problem: I'm not being able to correctly retrieve data from my GPU. Whenever I try to read it (using glReadPixels), all it gives me is an 'all-ones' array (1.0, 1.0, 1.0...).
You can test it with this code:
#include <stdio.h>
#include <stdlib.h>
#include <GL/glew.h>
#include <GL/glut.h>
static int arraySize = 64;
static int textureSize = 8;
//static GLenum textureTarget = GL_TEXTURE_2D;
//static GLenum textureFormat = GL_RGBA;
//static GLenum textureInternalFormat = GL_RGBA_FLOAT32_ATI;
static GLenum textureTarget = GL_TEXTURE_RECTANGLE_ARB;
static GLenum textureFormat = GL_LUMINANCE;
static GLenum textureInternalFormat = GL_LUMINANCE_FLOAT32_ATI;
int main(int argc, char** argv)
{
// create test data and fill arbitrarily
float* data = new float[arraySize];
float* result = new float[arraySize];
for (int i = 0; i < arraySize; i++)
{
data[i] = i + 1.0;
}
// set up glut to get valid GL context and
// get extension entry points
glutInit (&argc, argv);
glutCreateWindow("TEST1");
glewInit();
// viewport transform for 1:1 pixel=texel=data mapping
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluOrtho2D(0.0, textureSize, 0.0, textureSize);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glViewport(0, 0, textureSize, textureSize);
// create FBO and bind it (that is, use offscreen render target)
GLuint fboId;
glGenFramebuffersEXT(1, &fboId);
glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, fboId);
// create texture
GLuint textureId;
glGenTextures (1, &textureId);
glBindTexture(textureTarget, textureId);
// set texture parameters
glTexParameteri(textureTarget, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(textureTarget, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(textureTarget, GL_TEXTURE_WRAP_S, GL_CLAMP);
glTexParameteri(textureTarget, GL_TEXTURE_WRAP_T, GL_CLAMP);
// define texture with floating point format
glTexImage2D(textureTarget, 0, textureInternalFormat, textureSize, textureSize, 0, textureFormat, GL_FLOAT, 0);
// attach texture
glFramebufferTexture2DEXT(GL_FRAMEBUFFER_EXT, GL_COLOR_ATTACHMENT0_EXT, textureTarget, textureId, 0);
// transfer data to texture
//glDrawBuffer(GL_COLOR_ATTACHMENT0_EXT);
//glRasterPos2i(0, 0);
//glDrawPixels(textureSize, textureSize, textureFormat, GL_FLOAT, data);
glBindTexture(textureTarget, textureId);
glTexSubImage2D(textureTarget, 0, 0, 0, textureSize, textureSize, textureFormat, GL_FLOAT, data);
// and read back
glReadBuffer(GL_COLOR_ATTACHMENT0_EXT);
glReadPixels(0, 0, textureSize, textureSize, textureFormat, GL_FLOAT, result);
// print out results
printf("**********************\n");
printf("Data before roundtrip:\n");
printf("**********************\n");
for (int i = 0; i < arraySize; i++)
{
printf("%f, ", data[i]);
}
printf("\n\n\n");
printf("**********************\n");
printf("Data after roundtrip:\n");
printf("**********************\n");
for (int i = 0; i < arraySize; i++)
{
printf("%f, ", result[i]);
}
printf("\n");
// clean up
delete[] data;
delete[] result;
glDeleteFramebuffersEXT (1, &fboId);
glDeleteTextures (1, &textureId);
system("pause");
return 0;
}
I also read somewhere on the internet that ATI cards don't support luminance yet. Does anyone know if this is true?
This has nothing to do with luminance values; the problem is with you reading floating point values.
In order to read floating-point data back properly via glReadPixels, you first need to set the color clamping mode. Since you're obviously not using OpenGL 3.0+, you should be looking at the ARB_color_buffer_float extension. In that extension is glClampColorARB, which works pretty much like the core 3.0 verison.
here's what I found out:
1) If you use GL_LUMINANCE as texture format (and GL_LUMINANCE_FLOAT32_ATI GL_LUMINANCE32F_ARB or GL_RGBA_FLOAT32_ATI as internal format), the glClampColor(..) (or glClampColorARB(..)) doesn't seem to work at all.
I was only able to see the values getting actively clamped/not clamped if I set the texture format to GL_RGBA. I don't understand why this happens, since the only glClampColor(..) limitation I heard of is that it works exclusively with floating-point buffers, which all chosen internal formats seems to be.
2) If you use GL_LUMINANCE (again, with GL_LUMINANCE_FLOAT32_ATI, GL_LUMINANCE32F_ARB or GL_RGBA_FLOAT32_ATI as internal format), it looks like you must "correct" your output buffer dividing each of its elements by 3. I guess this happens because when you use glTexImage2D(..) with GL_LUMINANCE it internally replicates each array component three times and when you read GL_LUMINANCE values with glReadPixel(..) it calculates its values from the sum of the RGB components (thus, three times what you have given as input). But again, it stills give you clamped values.
3) Finally, if you use GL_RED as texture format (instead of GL_LUMINANCE), you don't need to pack your input buffer and you get your output buffer properly. The values are not clamped and you don't need to call glClampColor(..) at all.
So, I guess I'll stick with GL_RED, because in the end what I wanted was an easy way to send and collect floating-point values from my "kernels" without having to worry about offsetting array indexes or anything like this.
I'm writing an app for Mac OS X with OpenGL 2.1
I have a CVOpenGLTextureRef which holds the texture that I render with GL_QUADS and everything works fine.
I now need to determine which pixels of the texture are black, therefore I have written this code to read raw data from texture:
//"image" is the CVOpenGLTextureRef
GLenum textureTarget = CVOpenGLTextureGetTarget(image);
GLuint textureName = CVOpenGLTextureGetName(image);
glEnable(textureTarget);
glBindTexture(textureTarget, textureName);
GLint textureWidth, textureHeight;
int bytes;
glGetTexLevelParameteriv(textureTarget, 0, GL_TEXTURE_WIDTH, &textureWidth);
glGetTexLevelParameteriv(textureTarget, 0, GL_TEXTURE_HEIGHT, &textureHeight);
bytes = textureWidth*textureHeight;
GLfloat buffer[bytes];
glGetTexImage(textureTarget, 0, GL_LUMINANCE, GL_FLOAT, &buffer);
GLenum error = glGetError();
glGetError() reports GL_NO_ERROR but buffer is unchanged after the call to glGetTexImage()...it's still blank.
Am I doing something wrong?
Note that I can't use glReadPixels() because I modify the texture before rendering it and I need to get raw data of the unmodified texture.
EDIT: I tried even with the sequent approach but I still have zero buffer as output
unsigned char *buffer = (unsigned char *)malloc(textureWidth * textureHeight * sizeof(unsigned char));
glGetTexImage(textureTarget, 0, GL_LUMINANCE, GL_UNSIGNED_BYTE, buffer);
EDIT2: Same problem is reported here and here
Try this:
glGetTexImage(textureTarget, 0, GL_LUMINANCE, GL_FLOAT, buffer);
Perhaps you were thinking of this idiom:
vector< GLfloat > buffer( bytes );
glGetTexImage(textureTarget, 0, GL_LUMINANCE, GL_FLOAT, &buffer[0]);
EDIT: Setting your pack alignment before readback may also be worthwhile:
glPixelStorei(GL_PACK_ALIGNMENT, 1);
I have discovered that this is an allowed behavior of glGetTexImage() with respect to CVOpenGLTextureRef. The only sure way is to draw texture into a FBO and then to read from that with glReadPixels()