OpenGL can not delete buffer - opengl

I draw objects with using glBufferData. I want to replace drawing data consecutively. And I want to delete bufferdata and I rewrite new buffer data but I can not do it. Because memory can not being released. And After a few times, I am taking System Out of Memory errors. How can I delete data? GLDeleteBuffer is not running.
My codes are given as below.
public void ReDraw()
{
for (int xx = 0; xx < 500; xx++)
{
this.DeleteBuffers(0)
this.LoadDrawDatas(0, Obje);
this.openGLControl.Invalidate();
}
}
private void DeleteBuffers(int data)
{
if (this.texturesBuffer[data] != null)
{
GL.DeleteBuffers(this.texturesBuffer[data].Length, this.texturesBuffer[data]);
GL.DeleteTextures(1, this.textures[data]);
}
if (this.vertex!= null)
{
GL.DeleteBuffers(this.vertex[data].Length, this.vertex[data]);
GL.DeleteVertexArrays(this.vertex[data].Length, this.vertex[data);
GL.DeleteFramebuffers(this.vertex[data].Length, this.vertex[data]);
GL.DeleteRenderbuffers(this.vertex[data].Length, this.vertex[data]);
}
if (this.normal != null)
{
GL.DeleteBuffers(this.normal[data].Length, this.normal[data]);
GL.DeleteFramebuffers(this.normal[data].Length, this.normal[data]);
GL.DeleteRenderbuffers(this.normal[data].Length, this.normal[data]);
GL.DeleteVertexArrays(this.normal[data].Length, this.normal[data]);
}
}
private void LoadDrawDatas(int data,UcBoyutluNesneBilgisi obje)
{
Rectangle r1 = new Rectangle(0, 0, obje.Texture.Width, obje.Texture.Height);
GL.GenTextures(1, this.textures[data]);
GL.ActiveTexture(TextureUnit.Texture0);
GL.Enable(EnableCap.Texture2D);
GL.BindTexture(TextureTarget.Texture2D, this.textures[data][0]);
System.Drawing.Imaging.BitmapData bitmapData21 = obje.Texture.LockBits(r1, System.Drawing.Imaging.ImageLockMode.ReadOnly, System.Drawing.Imaging.PixelFormat.Format32bppArgb);
GL.TexImage2D(TextureTarget.Texture2D, 0, PixelInternalFormat.Rgba, obje.Texture.Width, obje.Texture.Height, 0, PixelFormat.Bgra, PixelType.UnsignedByte, bitmapData21.Scan0);
GL.TexParameter(TextureTarget.Texture2D, TextureParameterName.TextureMinFilter, (int)TextureMinFilter.Linear);
obje.Texture.UnlockBits(bitmapData21);
GL.GenBuffers(1, this.texturesBuffer[data]);
GL.BindBuffer(BufferTarget.PixelPackBuffer, this.texturesBuffer[data][0]);
GL.BufferData(BufferTarget.PixelPackBuffer, (IntPtr)(obje.texturesCoord.Count * sizeof(float)), obje.texturesCoord.ToArray(), BufferUsageHint.StreamDraw);
GL.GenBuffers(1, this.normal[data]);
GL.BindBuffer(BufferTarget.PixelPackBuffer, this.normal[data][0]);
GL.BufferData(BufferTarget.PixelPackBuffer, (IntPtr)(obje.Normals.Count * sizeof(float)), obje.Normals.ToArray(), BufferUsageHint.StreamDraw);
GL.GenBuffers(1, this.vertex[data]);
GL.BindBuffer(BufferTarget.PixelPackBuffer, this.vertex[data][0]);
GL.BufferData(BufferTarget.PixelPackBuffer, (IntPtr)(obje.VertexKoordinatlari.Count * sizeof(float)), IntPtr.Zero, BufferUsageHint.StreamDraw);
GL.BufferSubData(BufferTarget.PixelPackBuffer, IntPtr.Zero, (obje.Vertexs.Count * sizeof(float)), obje.VertexsToArray());
GL.GenBuffers(1, this.indeks[data]);
GL.BindBuffer(BufferTarget.PixelPackBuffer, this.indeks[data][0]);
GL.BufferData(BufferTarget.PixelPackBuffer, (IntPtr)this.indexsData[data].Length * sizeof(int)), this.indexsData[data], BufferUsageHint.StreamDraw);
}
private void openGLControl_Paint(object sender, PaintEventArgs e)
{
GL.Clear(ClearBufferMask.ColorBufferBit | ClearBufferMask.DepthBufferBit);
GL.MatrixMode(MatrixMode.Modelview);
GL.LoadIdentity();
GL.MatrixMode(MatrixMode.Projection);
GL.LoadIdentity();
GL.Rotate(X, 1, 0, 0);
GL.Rotate(Y, 0, 1, 0);
GL.Rotate(Z, 0, 0, 1);
GL.Scale(this.zoom* 0.01f, this.zoom * 0.01f, this.zoom * 0.01f);
GL.PushMatrix();
GL.EnableClientState(ArrayCap.NormalArray);
GL.BindBuffer(BufferTarget.ArrayBuffer, normals[0]);
GL.NormalPointer(NormalPointerType.Float, 0, 0);
GL.EnableClientState(ArrayCap.VertexArray);
GL.BindBuffer(BufferTarget.ArrayBuffer, vertex[0]);
GL.VertexPointer(3, VertexPointerType.Float, 0, IntPtr.Zero);
if (texturesBuffer!= null)
{
GL.EnableClientState(ArrayCap.TextureCoordArray);
GL.BindBuffer(BufferTarget.ArrayBuffer, texturesBuffer[0]);
GL.TexCoordPointer(2, TexCoordPointerType.Float, 0, IntPtr.Zero);
}
GL.BindBuffer(BufferTarget.ElementArrayBuffer, indeks[0]);
GL.DrawElements(PrimitiveType.Triangles, indexsData.Length, DrawElementsType.UnsignedInt, IntPtr.Zero);
GL.BindBuffer(BufferTarget.ArrayBuffer, 0);
GL.BindBuffer(BufferTarget.ElementArrayBuffer, 0);
GL.DisableClientState(ArrayCap.NormalArray);
GL.DisableClientState(ArrayCap.VertexArray);
GL.DisableClientState(ArrayCap.TextureCoordArray);
GL.PopMatrix();
}

You don't need to call glGenBuffers everytime you want to upload data to your VBO (the buffers created by glGenBuffers are called VBOs).
Once your VBOs are created, you can simply overwrite the data using glBufferData. OpenGL will reallocate the memory on the GPU by itself in the background.
Also, for better performance, if your VBOs are the same size from one frame to another, you can use glBufferSubData to update the content of your buffers without suffering the performance penalty of OpenGL reallocating the memory on the GPU

Related

Opengl simple quad rendering

Do you have any idea why this isn't working?
The old immediate mode works but I want to use VAO and VBOs.
(PS: I know the VOA creation should only be created once, but I build it all in this method for the test. I will move thoses lines after testing)
private void allocateIndexBuffer(GL2 graphics, int[] indices) {
int[] id = new int[1];
graphics.glGenBuffers(1, id, 0);
int vboId = id[0];
graphics.glBindBuffer(GL2.GL_ELEMENT_ARRAY_BUFFER, vboId);
IntBuffer buffer = IntBuffer.allocate(indices.length);
buffer.put(0, indices);
buffer.flip();
graphics.glBufferData(GL2.GL_ELEMENT_ARRAY_BUFFER, indices.length, buffer, GL2.GL_DYNAMIC_DRAW);
//graphics.glDeleteBuffers(vboId, buffer); TODO: clean up when on closing
}
private void allocateAttributeBuffer(GL2 graphics, int attribute, float[] data) {
int[] id = new int[1];
graphics.glGenBuffers(1, id, 0);
int vboId = id[0];
graphics.glBindBuffer(GL2.GL_ARRAY_BUFFER, vboId); //juste remplir vboId ou le remplacer à chaque fois ?
FloatBuffer buffer = FloatBuffer.allocate(data.length);
buffer.put(0, data);
buffer.flip();
graphics.glBufferData(GL2.GL_ARRAY_BUFFER, data.length, buffer, GL2.GL_DYNAMIC_DRAW);
graphics.glVertexAttribPointer(0, 2, GL2.GL_FLOAT, false, 0, 0); //once the buffer is bound
graphics.glEnableVertexAttribArray(0);
graphics.glBindBuffer(GL2.GL_ARRAY_BUFFER, 0);
//graphics.glDeleteBuffers(vboId, buffer); TODO: clean up when on closing
//graphics.glDeleteVertexArrays(vboId, null); TODO: clean up vaos
}
#Override
protected void draw(GL2 graphics) {
String mode = "new";
if (mode.equals("new")) {
float[] vertices = {
bounds.getX(), bounds.getY(),
bounds.getX(), bounds.getY() + bounds.getHeight(),
bounds.getX() + bounds.getWidth(), bounds.getY() + bounds.getHeight(),
bounds.getX() + bounds.getWidth(), bounds.getY(),
};
int[] indices = { 0, 1, 2, 2, 1, 3 };
//creation vao
int[] id = new int[1];
graphics.glGenVertexArrays(1, id, 0);
int vaoId = id[0];
graphics.glBindVertexArray(vaoId);
allocateIndexBuffer(graphics, indices);
allocateAttributeBuffer(graphics, 0, vertices);
graphics.glBindVertexArray(0);
//render
graphics.glBindVertexArray(vaoId);
graphics.glEnableVertexAttribArray(0);
graphics.glDrawElements(GL2.GL_TRIANGLES, indices.length, GL2.GL_UNSIGNED_INT, 0);
graphics.glDisableVertexAttribArray(0);
graphics.glBindVertexArray(0);
graphics.glFlush();
} else if (mode.equals("old")) {
graphics.glColor3f(255, 0, 0);
graphics.glBegin(GL2.GL_QUADS);
graphics.glVertex2f(bounds.getX(), bounds.getY());
graphics.glVertex2f(bounds.getX() + bounds.getWidth(), bounds.getY());
graphics.glVertex2f(bounds.getX() + bounds.getWidth(), bounds.getY() + bounds.getHeight());
graphics.glVertex2f(bounds.getX(), bounds.getY() + bounds.getHeight());
graphics.glEnd();
}
}
The size of the buffer has to be specified in bytes (see glBufferData);
graphics.glBufferData(GL2.GL_ELEMENT_ARRAY_BUFFER, indices.length, buffer, GL2.GL_DYNAMIC_DRAW);
graphics.glBufferData(GL2.GL_ELEMENT_ARRAY_BUFFER, indices.capacity() * 4,
buffer, GL2.GL_DYNAMIC_DRAW);
graphics.glBufferData(GL2.GL_ARRAY_BUFFER, data.length, buffer, GL2.GL_DYNAMIC_DRAW);
graphics.glBufferData(GL2.GL_ARRAY_BUFFER, data.capacity() * 4,
buffer, GL2.GL_DYNAMIC_DRAW);

Directx11 drawing to wrong render target

I'm attempting to render to a texture for the purpose of shadow mapping in DirectX11. I've set up and bound a separate render target to draw to. Problem is, after calling OMSetRenderTargets it's still rendering to the previously bound render target.
The graphics diagnostics event list shows that OMSetRenderTargets is being called, setting "obj:30" as the render target view. However, the following DrawIndexed call shows the render target as "obj:17", which is the previously bound render target.
Event List
Draw Call
I have the DirectX debug layer enabled, however it does not show any errors or warning messages. I've also ensured that the texture is not bound as a shader resource when the draw call happens but no luck there either.
These are both called by the following function
void GraphicsHandler::DrawSceneToRenderTarget(ID3D11RenderTargetView* RenderTarget, ID3D11VertexShader* WithVertexShader, ID3D11PixelShader* WithPixelShader)
{
const unsigned int VertexSize = sizeof(Vertex);
const unsigned int Offset = 0;
DeviceContext->ClearDepthStencilView(DepthStencilView, D3D11_CLEAR_DEPTH | D3D11_CLEAR_STENCIL, 1.0f, 0.0f);
DeviceContext->VSSetShader(WithVertexShader, nullptr, 0);
DeviceContext->PSSetShader(WithPixelShader, nullptr, 0);
DeviceContext->OMSetRenderTargets(1, &RenderTarget, DepthStencilView); //Render target set here
for (auto& Obj : ActiveScene.Objects)
{
ObjectInfo ObjectData;
ObjectData.ObjectTransform = XMMatrixIdentity();
ObjectData.ObjectTransform *= XMMatrixRotationRollPitchYaw(Obj->Rotator.X, Obj->Rotator.Y, Obj->Rotator.Z);
ObjectData.ObjectTransform *= XMMatrixTranslation(Obj->Position.X, Obj->Position.Y, Obj->Position.Z);
ObjectData.ObjectTransform *= XMMatrixScaling(Obj->Scale.X, Obj->Scale.Y, Obj->Scale.Z);
ObjectData.NormalMatrix = XMMatrixTranspose(XMMatrixInverse(nullptr, ObjectData.ObjectTransform));
DeviceContext->UpdateSubresource(ObjectBuffer, 0, nullptr, &ObjectData, 0, 0);
DeviceContext->UpdateSubresource(MaterialBuffer, 0, nullptr, &Obj->Mat, 0, 0);
DeviceContext->IASetVertexBuffers(0, 1, &Obj->VertexBuffer, &VertexSize, &Offset);
DeviceContext->IASetIndexBuffer(Obj->IndexBuffer, DXGI_FORMAT_R16_UINT, 0);
DeviceContext->VSSetConstantBuffers(0, 1, &ObjectBuffer);
//DeviceContext->PSSetConstantBuffers(0, 1, &MaterialBuffer);
DeviceContext->DrawIndexed(Obj->Indices.size(), 0, 0); //Draw called here
}
}
with the problematic calls to that being in the following two functions
void GraphicsHandler::RenderSceneDepth()
{
DeviceContext->RSSetState(RasterizerState);
DeviceContext->PSSetShaderResources(0, 1, &SceneDepthSRV);
DeviceContext->UpdateSubresource(CameraBuffer, 0, nullptr, &ActiveScene.SceneCamera.GetCameraVSInfo(), 0, 0);
DeviceContext->VSSetConstantBuffers(1, 1, &CameraBuffer);
DeviceContext->ClearRenderTargetView(SceneDepthRTV, Colors::Black);
DrawSceneToRenderTarget(SceneDepthRTV, VertexShader, DepthShader);
}
void GraphicsHandler::RenderShadowMap(ShadowMap& SM)
{
//Clear shader resources, as the texture can't be bound as input and output
ID3D11ShaderResourceView* NullResources[2] = { nullptr, nullptr };
DeviceContext->PSSetShaderResources(0, 2, NullResources);
DeviceContext->RSSetState(SMRasterizerState); //Need to render back faces only
ID3D11SamplerState* Samplers[2] = { SamplerState, ShadowSamplerState };
DeviceContext->PSSetSamplers(0, 2, Samplers);
//If the light is a directional source, render a directional shadow map
DirectionalLight* DirLight = nullptr;
DirLight = dynamic_cast<DirectionalLight*>(SM.ParentLight);
if (DirLight)
{
ID3D11RenderTargetView* RTV = SM.RTVs[0];
SM.LightPovCamera.ForwardDirection = DirLight->Direction;
DeviceContext->ClearRenderTargetView(RTV, Colors::Black);
DeviceContext->UpdateSubresource(LightPovBuffer, 0, nullptr, &SM.LightPovCamera.GetCameraVSInfo(), 0, 0);
DeviceContext->VSSetConstantBuffers(1, 1, &LightPovBuffer);
DrawSceneToRenderTarget(RTV, VertexShader, DepthShader);
}
//Otherwise, render to each face of the texturecube
else
{
for (int N = 0; N < 6; N++)
{
DeviceContext->ClearRenderTargetView(SM.RTVs[N], Colors::Black);
Camera POVCam = SM.GetCameraForCubemapFace(N);
DeviceContext->UpdateSubresource(LightPovBuffer, 0, nullptr, &POVCam.GetCameraVSInfo(), 0, 0);
DeviceContext->VSSetConstantBuffers(1, 1, &LightPovBuffer);
DrawSceneToRenderTarget(SM.RTVs[N], VertexShader, DepthShader);
}
}
}
Woops my mistake, the debug layer actually wasn't enabled, the error was caused by the render target having different dimensions to the depth stencil view. Apologies!

Setting up vertex attributes with OpenTK

I am having trouble setting the position, normal, and texture coordinate attributes in my shader. I am using meshomatic to load obj files, here is how the attributes are added to a single vbo:
void LoadBuffers(MeshData m)
{
float[] verts, norms, texcoords;
uint[] indices;
m.OpenGLArrays(out verts, out norms, out texcoords, out indices);
GL.GenBuffers(1, out dataBuffer);
GL.GenBuffers(1, out indexBuffer);
// Set up data for VBO.
// We're going to use one VBO for all geometry, and stick it in
// in (VVVVNNNNCCCC) order. Non interleaved.
int buffersize = (verts.Length + norms.Length + texcoords.Length);
float[] bufferdata = new float[buffersize];
vertOffset = 0;
normOffset = verts.Length;
texcoordOffset = (verts.Length + norms.Length);
verts.CopyTo(bufferdata, vertOffset);
norms.CopyTo(bufferdata, normOffset);
texcoords.CopyTo(bufferdata, texcoordOffset);
bool v = false;
for (int i = texcoordOffset; i < bufferdata.Length; i++)
{
if (v)
{
bufferdata[i] = 1 - bufferdata[i];
v = false;
}
else
{
v = true;
}
}
// Load geometry data
GL.BindBuffer(BufferTarget.ArrayBuffer, dataBuffer);
GL.BufferData<float>(BufferTarget.ArrayBuffer, (IntPtr)(buffersize * sizeof(float)), bufferdata,
BufferUsageHint.StaticDraw);
// Load index data
GL.BindBuffer(BufferTarget.ElementArrayBuffer, indexBuffer);
GL.BufferData<uint>(BufferTarget.ElementArrayBuffer,
(IntPtr)(indices.Length * sizeof(uint)), indices, BufferUsageHint.StaticDraw);
}
And here is how I am drawing:
void DrawBuffer()
{
// Push current Array Buffer state so we can restore it later
GL.PushClientAttrib(ClientAttribMask.ClientVertexArrayBit);
GL.ClientActiveTexture(TextureUnit.Texture0);
GL.BindTexture(TextureTarget.Texture2D, diff);
GL.EnableVertexAttribArray(positionIndex);
GL.BindBuffer(BufferTarget.ArrayBuffer, dataBuffer);
GL.VertexAttribPointer(positionIndex, 3, VertexAttribPointerType.Float, false, 0, vertOffset);
GL.EnableVertexAttribArray(texcoordIndex);
GL.BindBuffer(BufferTarget.ArrayBuffer, dataBuffer);
GL.VertexAttribPointer(texcoordIndex, 2, VertexAttribPointerType.Float, false, 0, texcoordOffset);
GL.EnableVertexAttribArray(normalIndex);
GL.BindBuffer(BufferTarget.ArrayBuffer, dataBuffer);
GL.VertexAttribPointer(normalIndex, 3, VertexAttribPointerType.Float, false, 0, normOffset);
// Index array
GL.BindBuffer(BufferTarget.ElementArrayBuffer, indexBuffer);
GL.DrawElements(PrimitiveType.Triangles, m.Tris.Length * 3, DrawElementsType.UnsignedInt, IntPtr.Zero);
// Restore the state
GL.PopClientAttrib();
}
However my texture coordinates are wrong. It seems that only a single pixel from my texture is used to cover the entire obj. I think I am using GL.VertexAttribPointer(...) incorrectly. What is the second arg int "size"?

glDrawElements Fails to draw

I'm working on what will eventually be a terrain drawer, however at the moment I am just trying to get a basic pair of triangles to draw with glDrawElements. The code looks as such:
public class TerrainFlat extends AbstractTerrain {
IntBuffer ib = BufferUtils.createIntBuffer(3);
int vHandle = ib.get(0);
int cHandle = ib.get(1);
int iHandle = ib.get(2);
FloatBuffer vBuffer = BufferUtils.createFloatBuffer(19);
FloatBuffer cBuffer = BufferUtils.createFloatBuffer(18);
ShortBuffer iBuffer = BufferUtils.createShortBuffer(6);
#Override
public void initilize(){
float[] vertexData = {50, 20, 100, 50, -20, 100, 10, -20, 100, -10, -20, 100, -50, -20, 100, -50, 20, 100};
float[] colorData = {1, 0, 0, 0, 1, 0, 0, 0, 1, 0 , 0 , 1 , 0 , 1 , 0 , 1 , 0 , 0 };
short[] indexData = {0,1,2,3,4,5};
vBuffer.put(vertexData);
vBuffer.flip();
cBuffer.put(colorData);
cBuffer.flip();
iBuffer.put(indexData);
iBuffer.flip();
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_COLOR_ARRAY);
glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, iHandle);
glBufferDataARB(GL_ELEMENT_ARRAY_BUFFER_ARB, iBuffer, GL_STATIC_DRAW_ARB);
glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, 0);
glBindBufferARB(GL_ARRAY_BUFFER_ARB, vHandle);
glBufferDataARB(GL_ARRAY_BUFFER_ARB, vBuffer, GL_STATIC_DRAW_ARB);
glBindBufferARB(GL_ARRAY_BUFFER_ARB, 0);
glBindBufferARB(GL_ARRAY_BUFFER_ARB, cHandle);
glBufferDataARB(GL_ARRAY_BUFFER_ARB, cBuffer, GL_STATIC_DRAW_ARB);
glBindBufferARB(GL_ARRAY_BUFFER_ARB, 0);
}
#Override
public void setUp(float posX, float posY, float posZ){
}
#Override
public void draw(){
glClear(GL_COLOR_BUFFER_BIT);
glGenBuffersARB(ib);
vHandle = ib.get(0);
cHandle = ib.get(1);
iHandle = ib.get(2);
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_COLOR_ARRAY);
glBindBufferARB(GL_ARRAY_BUFFER_ARB, vHandle);
glVertexPointer(3, GL_FLOAT, 3 << 2 , 0L);
glBindBufferARB(GL_ARRAY_BUFFER_ARB, 0);
glBindBufferARB(GL_ARRAY_BUFFER_ARB, cHandle);
glColorPointer(3, GL_FLOAT, 3 << 2 , 0L);
glBindBufferARB(GL_ARRAY_BUFFER_ARB, 0);
glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, iHandle);
glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_SHORT, 0L);
glBindBufferARB(GL_ARRAY_BUFFER_ARB, 0);
glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, 0);
}
#Override
public void destroy(){
ib.put(0, vHandle);
ib.put(1, cHandle);
ib.put(2, iHandle);
glDeleteBuffersARB(ib);
}
}
This does not draw anything, the screen is entirely black. GL_CULL_FACE is not enabled, and there is no lighting (or lack thereof) to cause this error. Moving the data binding into the Draw() function lets it work (if you want I can add that code to this). This however means I create my buffer's every draw function (ie: bad). I'm wondering outright what I'm doing wrong here, because I've moved it around and slapped it silly for 4 days now.
You're setting vHandle, cHandle and iHandle to 0.
You never need to create the IntBuffer in line 1 anyway. Replace
int vHandle = ib.get(0);
int cHandle = ib.get(1);
int iHandle = ib.get(2);
with
int vHandle = glGenBuffers();
int cHandle = glGenBuffers();
int iHandle = glGenBuffers();

does anyone have an example of PBO + SDL2.0 + OpenGL

Does any one have a working example of PBO's (Pixel Buffer Objects) + SDL2.0 (Simple DirectMedia Layer) + OpenGL?
The reasons is to get asynchronous GPU -> CPU downloading with GLGetPixels and thus get a performance boost.
Here's my attempt. There's no measured difference at all with use_pbo = false or true. And I've used PBOs with GLUT before on the same machine so I know that my hardware supports it.
I've looked a lot at the http://www.songho.ca/opengl/gl_pbo.html tutorial but don't see that i'm doing anything wrong.
bool use_pbo = true; //remember to wait a bit for fps to pick up speed
if(SDL_Init(SDL_INIT_EVENTS) != 0) throw "SDL_Init";
SDL_GL_SetAttribute(SDL_GL_RED_SIZE, 8);
SDL_GL_SetAttribute(SDL_GL_GREEN_SIZE, 8);
SDL_GL_SetAttribute(SDL_GL_BLUE_SIZE, 8);
SDL_GL_SetAttribute(SDL_GL_ALPHA_SIZE, 8);
SDL_GL_SetAttribute(SDL_GL_DOUBLEBUFFER, 1);
sdl_window = SDL_CreateWindow("", 10, 20, window_width, window_height, SDL_WINDOW_OPENGL | SDL_WINDOW_RESIZABLE);
sdl_gl_context = SDL_GL_CreateContext(sdl_window);
SDL_GL_MakeCurrent(sdl_window, sdl_gl_context);
vertical_sync(false);
{
glGenBuffers(2, pbos);
glBindBuffer(GL_PIXEL_PACK_BUFFER, pbos[0]);
glBufferData(GL_PIXEL_PACK_BUFFER, pbo_width*pbo_height*4, 0, GL_STREAM_READ);
glBindBuffer(GL_PIXEL_PACK_BUFFER, pbos[1]);
glBufferData(GL_PIXEL_PACK_BUFFER, pbo_width*pbo_height*4, 0, GL_STREAM_READ);
glBindBuffer(GL_PIXEL_PACK_BUFFER, 0);
}
//render to default frame buffer
{
glDrawBuffer(GL_BACK);
glClearColor(0.1, 0.2, 0.3, 0);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glEnable(GL_BLEND);
glBlendFunc(GL_ONE, GL_ONE_MINUS_SRC_ALPHA);
glUseProgram(simple_shader);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, transparent_texture);
glBegin(GL_TRIANGLE_STRIP);
//textcoord, color, position
glVertexAttrib2f(2, 0, 1); glVertexAttrib4f(1, 1, 0, 0, 1); glVertexAttrib2f(0, -1, -1); //bottom left
glVertexAttrib2f(2, 1, 1); glVertexAttrib4f(1, 0, 1, 0, 1); glVertexAttrib2f(0, +1, -1); //bottom right
glVertexAttrib2f(2, 0, 0); glVertexAttrib4f(1, 0, 0, 1, 1); glVertexAttrib2f(0, -1, +1); //top left
glVertexAttrib2f(2, 1, 0); glVertexAttrib4f(1, 1, 1, 0, 1); glVertexAttrib2f(0, +1, +1); //top right
glEnd();
glBindTexture(GL_TEXTURE_2D, 0);
glUseProgram(0);
glDisable(GL_BLEND);
while(true)
{
if(use_pbo)
{
glBindBuffer(GL_PIXEL_PACK_BUFFER, pbos[1]);
GLvoid* map_buffer_data = glMapBuffer(GL_PIXEL_PACK_BUFFER, GL_READ_ONLY);
if(map_buffer_data)
{
//glRasterPos2i(-1, -1); //default, left bottom
glRasterPos2i(0, -1);
glDrawPixels(pbo_width, pbo_height, GL_BGRA, GL_UNSIGNED_BYTE, map_buffer_data);
glUnmapBuffer(GL_PIXEL_PACK_BUFFER);
}
glBindBuffer(GL_PIXEL_PACK_BUFFER, 0);
}
else
{
glRasterPos2i(0, -1); //right bottom
glDrawPixels(pbo_width, pbo_height, GL_BGRA, GL_UNSIGNED_BYTE, t.c);
}
}
}
SDL_GL_SwapWindow(sdl_window);
//cycle buffers
{
GLuint t = pbos[1];
pbos[1] = pbos[0];
pbos[0] = t;
}
Try to bind PBO to your texture
GL_PIXEL_UNPACK_BUFFER for pixel data being passed into OpenGL GL_PIXEL_PACK_BUFFER for pixel data being retrieved from OpenGL
Also you should define texture image with glTexImage2D
If a non-zero named buffer object is bound to the GL_PIXEL_UNPACK_BUFFER target
(see glBindBuffer) while a texture image is
specified, data is treated as a byte offset into the buffer object's data store.