Generate mipmap with pbo is too slow? - opengl

I'm developing a video-like application.
In order to accelerate the speed of the texture uploading, I use pbo to increase it.
In order to deal with the aliasing artifact, I use mipmap to help this.
Well, let me show you the major code
// Step 1: Build the texture.
// Generate texture.
GLuint tex;
glGenTextures(1, &tex)
// Generate pbo.
GLuint pbo;
glGenBuffers(1, &pbo);
glBufferData(GL_PIXEL_UNPACK_BUFFER, width_* height_ * channel_, 0, GL_DYNAMIC_DRAW);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
// Set tex parameters.
glBindTexture(GL_TEXTURE_2D, tex_id);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
// Allocate memory for texture
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, width_, height_, 0, GL_RGB, GL_UNSIGNED_BYTE, 0);
// Generate the mipmap
glGenerateMipMap(GL_TEXTURE_2D);
// Step2: For each frame, upload texture and generate mipmap.
while(true){
// Map pbo to client memory.
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pbo);
glBufferData(GL_PIXEL_UNPACK_BUFFER, width_* height_ * channel_, 0, GL_DYNAMIC_DRAW);
mapped_buffer_ = (GLubyte*) glMapBufferRange(GL_PIXEL_UNPACK_BUFFER, 0, width_ * height_ * channel_, GL_MAP_WRITE_BIT);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
// Copy image data.
memcpy(mapped_buffer, pointer, height_ * width * channel_);
// Setting last parameter to 0 indicate that we use async mode to upload texture from pbo to texture.
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, width_, height_, GL_RGB, GL_UNSIGNED_BYTE, 0);
// Generate mip map is sync function, so it will block until texture uploading is done.
glGenerateMipMap(GL_TEXTURE_2D);
}
The result is that frame rate dropped significantly.
I think the reason is that generating mipmap will break the advantage of async uploading texture using pbo.
As far as I know, there are no method to make mipmap to be generated automatically when texture-uploading is done.
Is there some advance?

Related

Separate stencil with FBO (NVidia)

I'm trying to have a FBO with separate depth and stencil buffers.
I know that NVidia GPU's historically only supported packed depth/stencil.
However I stumbled on the ARB_texture_stencil8 extension and wonder how to use it against a FBO.
This code gives gl error 1159 on glFramebufferTexture2DEXT(GL_FRAMEBUFFER_EXT, GL_STENCIL_ATTACHMENT_EXT, GL_TEXTURE_2D, fboStencilTexture, 0):
static GLuint fboId, rboDepth, rboStencil, TEXTURE_WIDTH, TEXTURE_HEIGHT;
if (!fboColorTexture) {
fboDepthTexture = fboId = rboDepth = rboStencil = TEXTURE_WIDTH = TEXTURE_HEIGHT = 0; // vid restart?
glGenTextures(1, &fboColorTexture);
glBindTexture(GL_TEXTURE_2D, fboColorTexture);
qglTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
qglTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
qglTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
qglTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
}
if (!fboDepthTexture) {
glGenTextures(1, &fboDepthTexture);
glBindTexture(GL_TEXTURE_2D, fboDepthTexture);
qglTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
qglTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
qglTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
qglTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
}
if (!fboStencilTexture) {
glGenTextures(1, &fboStencilTexture);
glBindTexture(GL_TEXTURE_2D, fboStencilTexture);
qglTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
qglTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
qglTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
qglTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
}
GLuint curWidth = r_virtualResolution.GetFloat() * glConfig.vidWidth, curHeight = r_virtualResolution.GetFloat() * glConfig.vidHeight;
if (curWidth != TEXTURE_WIDTH || curHeight != TEXTURE_HEIGHT) {
TEXTURE_WIDTH = curWidth;
TEXTURE_HEIGHT = curHeight;
glBindTexture(GL_TEXTURE_2D, fboColorTexture);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB5_A1, TEXTURE_WIDTH, TEXTURE_HEIGHT, 0, GL_BGRA, GL_UNSIGNED_BYTE, NULL); //NULL means reserve texture memory, but texels are undefined
glBindTexture(GL_TEXTURE_2D, fboDepthTexture);
glTexImage2D(GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT16, TEXTURE_WIDTH, TEXTURE_HEIGHT, 0, GL_DEPTH_COMPONENT, GL_FLOAT, 0);
glBindTexture(GL_TEXTURE_2D, fboStencilTexture);
glTexImage2D(GL_TEXTURE_2D, 0, GL_STENCIL_INDEX8, TEXTURE_WIDTH, TEXTURE_HEIGHT, 0, GL_STENCIL_INDEX, GL_FLOAT, 0);
}
//-------------------------
if (!fboId) {
// create a framebuffer object, you need to delete them when program exits.
glGenFramebuffersEXT(1, &fboId);
glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, fboId);
// attach a texture to FBO color attachement point
glFramebufferTexture2DEXT(GL_FRAMEBUFFER_EXT, GL_COLOR_ATTACHMENT0_EXT, GL_TEXTURE_2D, fboColorTexture, 0);
// attach a renderbuffer to depth attachment point
glFramebufferTexture2DEXT(GL_FRAMEBUFFER_EXT, GL_DEPTH_ATTACHMENT_EXT, GL_TEXTURE_2D, fboDepthTexture, 0);
glFramebufferTexture2DEXT(GL_FRAMEBUFFER_EXT, GL_STENCIL_ATTACHMENT_EXT, GL_TEXTURE_2D, fboStencilTexture, 0);
glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, 0);
}
glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, fboId);
The purpose of ARB_texture_stencil8 is to permit you to use stencil-only formats as textures. That is, reading from them in shaders.
This does not mean that you can separate your stencil and depth buffers. The idea behind stencil-8 textures is that you would generate their data by either copying the stencil portion of a depth/stencil texture, or you would render without a depth buffer entirely.
So you cannot use this extension to guarantee that you can render to separate depth and stencil images. That's still hardware dependent.
Also, if you're going to use new features like ARB_texture_stencil8, you shouldn't be combining them with old EXT_framebuffer_object APIs. So stop using glFramebufferTexture2DEXT and start using glFramebufferTexture.

Creating and reading 1D textures in OpenGL 4.x

I have problems to use 1D textures in OpenGL 4.x.
I create my 1d texture this way (BTW: I removed my error checks to make the code more clear and shorter - usually after each gl call a BLUE_ASSERTEx(glGetError() == GL_NO_ERROR, "glGetError failed."); follows):
glGenTextures(1, &textureId_);
// bind texture
glBindTexture(GL_TEXTURE_1D, textureId_);
// tells OpenGL how the data that is going to be uploaded is aligned
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
BLUE_ASSERTEx(description.data, "Invalid data provided");
glTexImage1D(
GL_TEXTURE_1D, // Specifies the target texture. Must be GL_TEXTURE_1D or GL_PROXY_TEXTURE_1D.
0, // Specifies the level-of-detail number. Level 0 is the base image level. Level n is the nth mipmap reduction image.
GL_RGBA32F,
description.width,
0, // border: This value must be 0.
GL_RGBA,
GL_FLOAT,
description.data);
BLUE_ASSERTEx(glGetError() == GL_NO_ERROR, "glGetError failed.");
// texture sampling/filtering operation.
glTexParameteri(GL_TEXTURE_1D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri (GL_TEXTURE_1D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexParameteri (GL_TEXTURE_1D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri (GL_TEXTURE_1D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glBindTexture(GL_TEXTURE_1D, 0);
After the creation I try to read the pixel data of the created texture this way:
const int width = width_;
const int height = 1;
// Allocate memory for depth buffer screenshot
float* pixels = new float[width*height*sizeof(buw::vector4f)];
// bind texture
glBindTexture(GL_TEXTURE_1D, textureId_);
glPixelStorei(GL_PACK_ALIGNMENT, 1);
glReadPixels(0, 0, width, height, GL_RGBA, GL_FLOAT, pixels);
glBindTexture(GL_TEXTURE_1D, 0);
buw::Image_4f::Ptr img(new buw::Image_4f(width, height, pixels));
buw::storeImageAsFile(filename.toCString(), img.get());
delete pixels;
But the returned pixel data is different to the input pixel data (input: color ramp, ouptut: black image)
Any ideas how to solve the issue? Maybe I am using a wrong API call.
Replacing glReadPixels by glGetTexImage does fix the issue.

How is glDrawBuffers associated to drawing to a depth texture

You can specify what buffers to draw to using
glDrawBuffer()
example
GLenum buffers[] = { GL_COLOR_ATTACHMENT0 };
glDrawBuffers( 1, buffers );
ok so that makes sense.
how about the depth texture though?
so we have here our creation of a frame buffer
glGenFramebuffers(1, &m_uifboHandle);
// Initialize FBO
glBindFramebuffer(GL_FRAMEBUFFER, m_uifboHandle);
unsigned int m_uiTextureHandle[2];
glGenTextures( 2, m_uiTextureHandle );
glBindTexture( GL_TEXTURE_2D, m_uiTextureHandle[x]);
// Reserve space for our 2D render target
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexImage2D(GL_TEXTURE_2D, 0, iInternalFormat, uiWidth, uiHeight, 0, GL_RGB, GL_UNSIGNED_BYTE, NULL);
glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, m_uiTextureHandle[0], 0);
//now the depth
glBindTexture(GL_TEXTURE_2D, m_uiTextureHandle[1]);
glTexImage2D(GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT24, uiWidth, uiHeight, 0, GL_DEPTH_COMPONENT, GL_UNSIGNED_BYTE, 0);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D, m_uiTextureHandle[1], 0);
glBindFramebuffer(GL_FRAMEBUFFER, 0);
so if these steps basically:
1) make an frame to manipulate to render to
2) gives us a location for attachment 0 to write to
3) creates our depth attachment
now how can i write to it? i seen examples where rendering automatically renders to it but i want to understand how binding to write to it works.
glDrawBuffers is not related to the depth buffer at all. It allows selecting the color buffers to render into, and there can be more than one color buffer per framebuffer. But there can be at most one depth buffer. So either there is no depth buffer at all, or there is one and the depth value is written to the depth buffer (assuimg depth test is enabled and the depth mask is set to GL_TRUE).

What might be the issue with my pipeline to rendering the depth buffer to a texture?

The main steps for depth testing from my understanding:
1) enable depth testing and how we want to depth test
2) create the frame buffer object and make sure it has a depth attached to it
3) bind our frame buffer object ( make sure to clear it before rendering )
4) draw stuff
And that should be it no? our frame buffer depth attachment should have depth data? But I always get straight 1's default depth clear color
step 1:
glEnable(GL_DEPTH_TEST);
glDepthFunc( GL_LEQUAL );
step 2:
//create the frame buffer object
glGenFramebuffers(1, &m_uifboHandle);
// Initialize FBO
glBindFramebuffer(GL_FRAMEBUFFER, m_uifboHandle);
//create 2 texture handles 1 for diffuse, 1 for depth
unsigned int m_uiTextureHandle[2];
glGenTextures( 2, m_uiTextureHandle );
//create the diffuse texture
glBindTexture( GL_TEXTURE_2D, m_uiTextureHandle[0]);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, uiWidth, uiHeight, 0, GL_RGB, GL_UNSIGNED_BYTE, NULL);
glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, m_uiTextureHandle[0], 0);
.
//create the depth buffer
glBindTexture(GL_TEXTURE_2D, m_uiTextureHandle[1]);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameterf( GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP );
glTexParameterf( GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP );
glTexImage2D(GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT, uiWidth, uiHeight, 0, GL_DEPTH_COMPONENT, GL_UNSIGNED_BYTE, 0);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D, m_uiTextureHandle[1], 0);
//go back to default binding
glBindFramebuffer(GL_FRAMEBUFFER, 0);
step 3:
//bind the frame buffer object
glBindFramebuffer( GL_FRAMEBUFFER, m_uifboHandle );
//clear it
glClear( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT );
step 4:
//draw things
Are these not the steps?
Am i missing something?
I've tried a few different tutorials.
I can't get any depth to render to a texture
I keep getting straight 1's over and over.
The framebuffer probably is not complete. Try checking for completeness. Moreover your code was:
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, uiWidth, uiHeight, 0, GL_RGB, GL_UNSIGNED_BYTE, NULL);
However, it should be (watch the RGB-RGBA):
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, uiWidth, uiHeight, 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL);

glBlitFramebuffer Invalid Operation

I've been messing around with framebuffers and render to texture and I came across the need to blit them. Again on some machines I get a GL_INVALID_OPERATION right after the glBlitFramebuffer call. Each texture bound to the framebuffer is setup the exact same way, all the same size and parameters. Also, when I try to blit one entire texture (previously succesfully rendered to) to another framebuffer, only the destination 'rectangle' to write in is smaller than the rectangle to read from (e.g. when I want to blit it to a quarter of the screen), it throws a GL_INVALID_OPERATION too.
EDIT: Actually it always throws the error whenever the rectangles to read from and draw to have a different size, so I can't blit to a texture of a different size, or the same size but a different sized 'render to' area...?
Everytime I blit to a manually generated framebuffer the status is checked through glCheckFramebufferStatus and it always returns GL_FRAMEBUFFER_COMPLETE.
-BIGGEST SNIP EVER-, see below for shorter 'source code', obviously a couple C++ errors and not complete, but its only for the GL calls
The OpenGL error occurs when I call the last method of the viewport (Viewport::blit) with the screen framebuffer as target (by passing NULL). It first sets the read buffer of its own framebuffer (the draw buffers were already set) and then it calls RenderTarget::blit which calls glBlitFramebuffer. In the blit method it binds both buffers, and you can see it calls glCheckFramebufferStatus there which does not return an error.
I've been reading this over and over but I can't seem to find the error that causes it. When I blit the color buffer I use GL_LINEAR, otherwise I use GL_NEAREST All color buffers use GL_RGB32F as internal format and the depth buffer (which I never blit) uses GL_DEPTH_COMPONENT32F
EDIT, a shorter example, just took all the GL calls and filled the params I used
glBindFramebuffer(GL_READ_FRAMEBUFFER, _GL_Framebuffer);
glReadBuffer(GL_COLOR_ATTACHMENT0 + index);
glBindFramebuffer(GL_READ_FRAMEBUFFER, 0);
// OpenGL error check, does not return an error
glBindFramebuffer(GL_READ_FRAMEBUFFER, _GL_Framebuffer);
GLenum status = glCheckFramebufferStatus(GL_READ_FRAMEBUFFER);
if(status != GL_FRAMEBUFFER_COMPLETE)
{
// Some error checking, fortunately status always turns out to be complete
}
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0);
glBlitFramebuffer(0, 0, screenWidth, screenHeight, 0, 0, screenWidth, screenHeight, target, (target == GL_COLOR_BUFFER_BIT) ? GL_LINEAR : GL_NEAREST);
// If the source/destination read/draw rectangles are different in size, GL_INVALID_OPERATION is cought here
And the Framebuffer/Texture creation:
glGenFramebuffer(1, &_GL_Framebuffer);
glGenTextures(1, &_GL_ZBuffer);
glBindTexture(GL_TEXTURE_2D, _GL_ZBuffer);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_BASE_LEVEL, 0);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAX_LEVEL, 0);
glTexImage2D(GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT32F, screenWidth, screenHeight, 0, GL_DEPTH_COMPONENT, GL_FLOAT, 0);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_R, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glBindTexture(GL_TEXTURE_2D, 0);
glBindFramebuffer(GL_FRAMEBUFFER, _GL_Framebuffer);
glFramebufferTexture(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT + 0, _GL_ZBuffer, 0);
glBindFramebuffer(GL_FRAMEBUFFER, 0);
int writeIndices[BUFFER_COUNT];
for(unsigned int i = 0; i < BUFFER_COUNT; ++i)
{
writeIndices[i] = i;
glGenTextures(1, &_GL_Texture);
glBindTexture(GL_TEXTURE_2D, _GL_Texture);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_BASE_LEVEL, 0);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAX_LEVEL, 0);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB32F, screenWidth, screenHeight, 0, GL_RGB, GL_FLOAT, 0);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_R, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glBindTexture(GL_TEXTURE_2D, 0);
glBindFramebuffer(GL_FRAMEBUFFER, _GL_Framebuffer);
glFramebufferTexture(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0 + i, _GL_Texture, 0);
glBindFramebuffer(GL_FRAMEBUFFER, 0);
// In the actual code each texture is obviously saved in an object
}
GLenum *enums = new GLenum[BUFFER_COUNT];
for(unsigned int i = 0; i < BUFFER_COUNT; ++i)
{
// Get index and validate
int index = *(indices + i); // indices = writeIndices
if(index < 0 || index >= maxAttachments)
{
delete[] enums;
return false;
}
// Set index
enums[i] = GL_COLOR_ATTACHMENT0 + index;
}
// Set indices
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, _GL_Framebuffer);
glDrawBuffers(BUFFER_COUNT, enums);
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0);
delete[] enums;
// OpenGL error check, no errors
After some careful reading I found out the difference in multisampling was the problem. The 'main' FBO was setup by SFML, so by simply setting the anti aliasing level on startup to 0 the problem was partially solved.
It now blits if the draw/read rectangles are unequal in size, but it keeps crashing on some machines where it is SUPPOSED to work.