Only one texture gets rendered on my FrameBuffer - opengl

I've three images which I'm trying to render with my SpriteBatch and FrameBuffer.
These are the steps I'm doing.
Clear the screen, depth and stencil buffers.
Bind the FrameBuffer. See FrameBuffer.bind().
Use my SpriteBatch to render three solid quads with different sizes and different positions.
Unbind my FrameBuffer. See FrameBuffer.unbind().
Bind my FrameBuffers colorTexture.
Render my FrameBuffer with my SpriteBatch.
Here's some facts I know about the this problem
The textures shows the right color. They are not white.
The first texture I render is shown, while the other two isn't. If I change the order, the first texture still gets rendered.
With three textures, I mean three separate texture ids.
I've depth test enabled.
No OpenGL error or FrameBuffer error is being thrown.
When rendering using my SpriteBatch directly to the screen all three quads is shown.
The size of my FrameBuffer is the size of my screen.
Here's how it's supposed to look like
Here's how it's looking with my framebuffer.
Here's how it's looking with my framebuffer and drawing the first texture two times. Still missing the other two textures. Position in not the problem.
Here's the code for my FrameBuffer :
// Stencil is broken currently broken, in my example. I'm not using any stencil buffers.
public FrameBuffer(int width, int height, boolean hasDepth, boolean hasStencil) {
this.width = width;
this.height = height;
frameBufferID = GL30.glGenFramebuffers();
GL30.glBindFramebuffer(GL30.GL_FRAMEBUFFER, frameBufferID);
if (hasDepth && hasStencil) {
int depthAndStencilBufferID = GL30.glGenRenderbuffers();
GL30.glBindRenderbuffer(GL30.GL_RENDERBUFFER, depthAndStencilBufferID);
GL30.glRenderbufferStorage(GL30.GL_RENDERBUFFER, GL30.GL_DEPTH24_STENCIL8, width, height);
GL30.glFramebufferRenderbuffer(GL30.GL_FRAMEBUFFER, GL30.GL_DEPTH_STENCIL_ATTACHMENT, GL30.GL_RENDERBUFFER, depthAndStencilBufferID);
}
else if (hasDepth) {
int depthBufferID = GL30.glGenRenderbuffers();
GL30.glBindRenderbuffer(GL30.GL_RENDERBUFFER, depthBufferID);
GL30.glRenderbufferStorage(GL30.GL_RENDERBUFFER, GL30.GL_DEPTH_COMPONENT32F, width, height);
GL30.glFramebufferRenderbuffer(GL30.GL_FRAMEBUFFER, GL30.GL_DEPTH_ATTACHMENT, GL30.GL_RENDERBUFFER, depthBufferID);
}
else if (hasStencil) {
int stencilBufferID = GL30.glGenRenderbuffers();
GL30.glBindRenderbuffer(GL30.GL_RENDERBUFFER, stencilBufferID);
GL30.glRenderbufferStorage(GL30.GL_RENDERBUFFER, GL30.GL_STENCIL_INDEX16, width, height);
GL30.glFramebufferRenderbuffer(GL30.GL_FRAMEBUFFER, GL30.GL_STENCIL_ATTACHMENT, GL30.GL_RENDERBUFFER, stencilBufferID);
}
colorTexture = new Texture(width, height);
colorTexture.bind();
GL30.glFramebufferTexture2D(GL30.GL_FRAMEBUFFER, GL30.GL_COLOR_ATTACHMENT0, GL11.GL_TEXTURE_2D, colorTexture.getTextureID(), 0);
int result = GL30.glCheckFramebufferStatus(GL30.GL_FRAMEBUFFER);
//error handling
RenderUtil.unbindFrameBuffer();
}
private Texture colorTexture;
private int width, height;
private int frameBufferID, depthBufferID, stencilBufferID;
public void begin () {
GL30.glBindFramebuffer(GL30.GL_FRAMEBUFFER, frameBufferID);
GL11.glViewport(0, 0, width, height);
}
public void end () {
GL11.glViewport(0, 0, RenderingEngine.getWidth(), RenderingEngine.getHeight());
RenderUtil.unbindFrameBuffer();
RenderUtil.unbindTextures();
}
public Texture getColorTexture () {
return colorTexture;
}
public int getWidth () {
return width;
}
public int getHeight () {
return height;
}
Here's how I create a Texture if it's to any use :
private void loadTexture (ByteBuffer buffer, int textureID, int width, int height) {
GL13.glActiveTexture(GL13.GL_TEXTURE0);
GL11.glBindTexture(GL11.GL_TEXTURE_2D, textureID);
GL11.glPixelStorei(GL11.GL_UNPACK_ALIGNMENT, 1);
GL11.glTexImage2D(GL11.GL_TEXTURE_2D, 0, GL11.GL_RGBA8, width, width, 0, GL11.GL_RGBA, GL11.GL_UNSIGNED_BYTE, buffer);
GL30.glGenerateMipmap(GL11.GL_TEXTURE_2D);
GL11.glTexParameteri(GL11.GL_TEXTURE_2D, GL11.GL_TEXTURE_WRAP_S, GL11.GL_REPEAT);
GL11.glTexParameteri(GL11.GL_TEXTURE_2D, GL11.GL_TEXTURE_WRAP_T, GL11.GL_REPEAT);
GL11.glTexParameteri(GL11.GL_TEXTURE_2D, GL11.GL_TEXTURE_MAG_FILTER, GL11.GL_NEAREST);
GL11.glTexParameteri(GL11.GL_TEXTURE_2D, GL11.GL_TEXTURE_MIN_FILTER, GL11.GL_LINEAR_MIPMAP_LINEAR);
}
There seems to be a problem in my SpriteBatch when switching texture. Here's how the switching works.
I remind you, using my SpriteBatch to render to the screen gives my the wanted result. But rendering to a FrameBuffer doesn't work.
`
public void flush () {
flushes++;
buffer.flip();
MESH.setVertices(buffer);
if (lastTexture != null) {
lastTexture.bind();
}
MESH.draw();
buffer.clear();
}
/** Begins the SpriteBatch. */
public void begin () {
if (rendering) {
new IllegalStateException("You have to SpriteBatch.end() before calling begin again!").printStackTrace();
}
flushes = 0;
rendering = true;
shader.bind();
shader.setUniformMat4f("projection", camera.getCombined());
shader.setUniformVec4("color", color);
}
/** Ends the SpriteBatch, also flushes it. */
public void end () {
if (!rendering) {
new IllegalStateException("You've to SpriteBatch.begin before ending the spritebatch").printStackTrace();
}
rendering = false;
flush();
RenderUtil.unbindShaders();
RenderUtil.unbindTextures();
}
/** Switches the textures and flushes the old one.
* #param t */
private void switchTextures (Texture t) {
if (lastTexture != null) {
flush();
}
lastTexture = t;
}
`
Going to create a small example. Should be up soon.
EDIT:
Problem wasn't my FrameBuffer. Was my SpriteBatch.

You need to clear your buffers while your FBO is bound, otherwise you're only clearing the color/stencil/depth buffer associated with your window. That is not the behavior you want, because the depth test in this scenario uses your FBO's depth rather than the window's (default framebuffer).
public void begin () is the most logical place to do this:
public void begin () {
GL30.glBindFramebuffer(GL30.GL_FRAMEBUFFER, frameBufferID);
GL11.glClear(GL11.GL_COLOR_BUFFER_BIT | GL11.GL_DEPTH_BUFFER_BIT | GL11.GL_STENCIL_BUFFER_BIT);
GL11.glViewport(0, 0, width, height);
}

Related

OpenGL : reading color buffer

I attached 4 color buffers to a framebuffer and render in each of them. Each color buffer has the size of the window. I'm trying to read the color of the pixels of one of these color buffers using the coordinates of the mouse pointer.
mouse move event handler
void mouseMoveEvent(QMouseEvent *event)
{
int x = event->pos().x();
int y = event->pos().y();
makeCurrent();
glBindFramebuffer(GL_READ_FRAMEBUFFER, FBOIndex::GEOMETRY);
{
// I save the values I'm interested in in the attachment GL_COLOR_ATTACHMENT3
// but I always get 0 from any other attachment I try
glReadBuffer(GL_COLOR_ATTACHMENT3);
QVector<GLubyte> pixel(3);
glReadPixels(x, geometry().height() - y, 1, 1, GL_RGB, GL_UNSIGNED_BYTE, &(pixel[0]));
QString PixelColor = QColor(pixel[0], pixel[1], pixel[2]).name();
qDebug() << PixelColor; // => always 0
}
glBindFramebuffer(GL_FRAMEBUFFER, defaultFramebufferObject());
doneCurrent();
}
But for every color buffer I always read the value 0.
The color buffers are written correctly during the rendering phase, I tested each of them by displaying the texture to which they are attached. I also tested the pixel-reading selected by the mouse pointer to the default framebuffer and it works correctly.
Where am I wrong?
Thanks!
EDIT
The seemingly strange thing is that, if I use a "dedicated" framebuffer, I can correctly read the values stored in the texture.
void mouseMoveEvent(QMouseEvent *event)
{
int x = event->pos().x();
int y = event->pos().y();
GLuint fbo;
makeCurrent();
glGenFramebuffers(1, &fbo);
glBindFramebuffer(GL_FRAMEBUFFER, fbo);
{
GLuint texture = textures[TextureIndex::COLOUR];
glBindTexture(GL_TEXTURE_2D, texture);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, texture, 0);
QVector<GLubyte> pixel(3);
glReadPixels(x, geometry().height() - y, 1, 1, GL_RGB, GL_UNSIGNED_BYTE, &(pixel[0]));
QString PixelColor = QColor(pixel[0], pixel[1], pixel[2]).name();
qDebug() << PixelColor; // => correct value
}
glBindFramebuffer(GL_FRAMEBUFFER, defaultFramebufferObject());
glDeleteFramebuffers(1, &fbo);
doneCurrent();
}
But clearly it seems useless to use another framebuffer when I already have one with exactly the information I need.
I also tried to read directly the values of the texture (as suggested by #Spektre), but also in this case I always get 0.
void mouseMoveEvent(QMouseEvent *event)
{
int x = event->pos().x();
int y = event->pos().y();
makeCurrent();
{
GLuint texture = textures[TextureIndex::COLOUR];
glBindTexture(GL_TEXTURE_2D, texture);
glTexSubImage2D(GL_TEXTURE_2D, 0, x, geometry().height() - y, 1, 1, GL_RGB, GL_UNSIGNED_BYTE, &(pixel[0]));
QString PixelColor = QColor(pixel[0], pixel[1], pixel[2]).name();
qDebug() << PixelColor; // => always 0
}
doneCurrent();
}
My approach is correct, but I was not binding to the correct framebuffer.
FBOIndex::GEOMETRY is an enum value that I use to index a FBOs array, where I store all the framebuffer object names, so in general it is not a correct framebuffer object name.
I have defined a method addFBO(index) that creates a framebuffer and stores it at the position index in the FBOs array. The method returns the framebuffer object name of the generated framebuffer. If a framebuffer already exists at the position index, then the method simply returns the associated framebuffer object name.
So, by changing the code in the following way, I finally get the desired result.
void mouseMoveEvent(QMouseEvent *event)
{
int x = event->pos().x();
int y = event->pos().y();
makeCurrent();
glBindFramebuffer(GL_READ_FRAMEBUFFER, addFBO(FBOIndex::GEOMETRY));
{
glReadBuffer(GL_COLOR_ATTACHMENT3);
QVector<GLubyte> pixel(3);
glReadPixels(x, geometry().height() - y, 1, 1, GL_RGB, GL_UNSIGNED_BYTE, &(pixel[0]));
QString PixelColor = QColor(pixel[0], pixel[1], pixel[2]).name();
qDebug() << PixelColor; // => correct value
}
glBindFramebuffer(GL_FRAMEBUFFER, defaultFramebufferObject());
doneCurrent();
}

How can I store the previously rendered from from an OpenGL Fragment Shader for later use?

I'm writing a FFGL video effect plugin (using this shadertoy port). I want to store a previously rendered frame to use in a future calculation. Specifically I am trying to make the equivalent of a video delay.
Is it possible to write to an external buffer from a fragment shader and then use that stored value at a later time (say, 30 frames later)?
you create a FBO, and bind it. whatever you render next goes onto it. so if you're using a shader, that still applies like on the default FBO.
here's some code to help you out
import java.nio.ByteBuffer;
import static org.lwjgl.opengl.GL11.*;
import static org.lwjgl.opengl.GL14.*;
import static org.lwjgl.opengl.GL30.*;
public class FBO {
public int fbo,tex,depth;
public void delete() {glDeleteTextures(tex);glDeleteRenderbuffers(depth);glDeleteFramebuffers(fbo);}
public void bindTexture() {glBindTexture(GL_TEXTURE_2D,tex);}
public void bind() {glBindFramebuffer(GL_FRAMEBUFFER,fbo);}
public static void unbind() {glBindFramebuffer(GL_FRAMEBUFFER,0);}
public FBO(){ // create the fbo
glBindTexture(GL_TEXTURE_2D,tex=glGenTextures()); // create texture, set correct filters
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER,GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_WRAP_S,GL_CLAMP);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_WRAP_T,GL_CLAMP);
glBindRenderbuffer(GL_RENDERBUFFER,depth=glGenRenderbuffers()); // create buffer for depth
glBindFramebuffer(GL_FRAMEBUFFER,fbo=glGenFramebuffers()); // create framebuffer
glFramebufferTexture2D(GL_FRAMEBUFFER,GL_COLOR_ATTACHMENT0,GL_TEXTURE_2D,tex,0); // attach texture
glFramebufferRenderbuffer(GL_FRAMEBUFFER,GL_DEPTH_ATTACHMENT,GL_RENDERBUFFER,depth); // attach depth
unbind(); // incase not using immediately
}
public void resize(int w,int h) {
glBindTexture(GL_TEXTURE_2D,tex); // update texture size
glTexImage2D(GL_TEXTURE_2D,0,GL_RGBA8,w,h,0,GL_RGBA,GL_UNSIGNED_BYTE,(ByteBuffer)null);
glBindRenderbuffer(GL_RENDERBUFFER,depth); // update depth size
glRenderbufferStorage(GL_RENDERBUFFER,GL_DEPTH_COMPONENT24,w,h);
}
public void render() {
// glColor4f(1,1,1,1); // you may want to do these customly
// glDisable(GL_BLEND);
glBindTexture(GL_TEXTURE_2D, tex);
Tess.begin();
Tess.vertex( 0, height, 0, 0, 0);
Tess.vertex(width, height, 0, 1, 0);
Tess.vertex(width, 0, 0, 1, 1);
Tess.vertex( 0, 0, 0, 0, 1);
Tess.draw();
// Tess btw, just manually handles verts and tex coords
// that could be pure pipeline, or by vbo / vao / interleaved vbo
// width / height are just the 2d size. could render for post processing
}
}

Framebuffer in Libgdx, using glclearcolor() clears whole screen

I'm using glClearColor inside begin() and end() in framebuffer,
but it is clearing the whole screen color, Am I doing something wrong?
public class FrameBufferTest implements ApplicationListener{
OrthographicCamera camera;
SpriteBatch batcher;
FrameBuffer fbo;
Texture tex;
#Override
public void create() {
batcher = new SpriteBatch();
camera = new OrthographicCamera(800, 480);
camera.position.set(camera.viewportWidth/2f, camera.viewportHeight/2f, 0);
fbo = new FrameBuffer(Format.RGBA8888, 100, 100,false);
camera.position.set(camera.viewportWidth/2f, camera.viewportHeight/2f, 0);
tex = new Texture("data/bg.png");
}
#Override
public void render() {
GL20 gl = Gdx.graphics.getGL20();
gl.glClear(GL20.GL_COLOR_BUFFER_BIT);
gl.glClearColor(0.5f,0.5f,0.5f,0.5f); // grey color
camera.update();
batcher.setProjectionMatrix(camera.combined);
batcher.enableBlending();
batcher.begin();
batcher.draw(tex, 0, 0, 256, 256);
batcher.end();
fbo.begin();
gl.glClearColor(1f,0,0,1f);
gl.glClear(GL20.GL_COLOR_BUFFER_BIT);
fbo.end();
batcher.begin();
batcher.draw(fbo.getColorBufferTexture(), 512, 0, 100, 100);
batcher.end();
}
Swap these two lines at the beginning of your render method:
gl.glClear(GL20.GL_COLOR_BUFFER_BIT);
gl.glClearColor(0.5f,0.5f,0.5f,0.5f); // grey color
In the order you have them, it is clearing the color using the most recently set clear color, which is the red that you set farther down in your render method (since this is a loop).

Opengl C++: texture code textures all models with the same texture

I created a class to hold my models information. I have to models rendering correctly and the textures properly wrapping, but for some reason if I have multiple models it will texture all of my models in just 1 texture as you can see in this image: http://imgur.com/d0glIwF
Any ideas why this might be happening?
This is my code:
struct BitMapFile
{
int sizeX;
int sizeY;
unsigned char *data;
};
// Routine to read a bitmap file.
// Works only for uncompressed bmp files of 24-bit color.
BitMapFile *getBMPData(string filename)
{
BitMapFile *bmp = new BitMapFile;
unsigned int size, offset, headerSize;
// Read input file name.
ifstream infile(filename.c_str(), ios::binary);
// Get the starting point of the image data.
infile.seekg(10);
infile.read((char *) &offset, 4);
// Get the header size of the bitmap.
infile.read((char *) &headerSize,4);
// Get width and height values in the bitmap header.
infile.seekg(18);
infile.read( (char *) &bmp->sizeX, 4);
infile.read( (char *) &bmp->sizeY, 4);
// Allocate buffer for the image.
size = bmp->sizeX * bmp->sizeY * 24;
bmp->data = new unsigned char[size];
// Read bitmap data.
infile.seekg(offset);
infile.read((char *) bmp->data , size);
// Reverse color from bgr to rgb.
int temp;
for (int i = 0; i < size; i += 3)
{
temp = bmp->data[i];
bmp->data[i] = bmp->data[i+2];
bmp->data[i+2] = temp;
}
return bmp;
}
class Model
{
public:
Model(string modelFilename, string textureFilename);
float getCenterX() { return m_CenterX; }
float getCenterY() { return m_CenterY; }
float getCenterZ() { return m_CenterZ; }
void SetCenterX(float x) { m_CenterX = x; }
void SetCenterY(float y) { m_CenterY = y; }
void SetCenterZ(float z) { m_CenterZ = z; }
void LoadTexture(string fileName);
//load model function
void Draw();
private:
float m_CenterX, m_CenterY, m_CenterZ, m_Width, m_Height, m_Depth;
string m_ModelFilename;
int m_Texture;
string m_TextureName;
};
Model::Model(string modelFilename, string textureFilename)
{
m_ModelFilename = modelFilename;
m_TextureName = textureFilename;
//load model function//
LoadTexture(m_TextureName);
}
void Model::LoadTexture(string TextureName)
{
// Local storage for bmp image data.
BitMapFile *image[1];
string filename = TextureName;
filename.append(".bmp");
// Load the texture.
image[0] = getBMPData(filename);
// Bind grass image to texture index[i].
glBindTexture(GL_TEXTURE_2D, m_Texture); //makes room for our texture
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER,GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_NEAREST);
glTexImage2D(GL_TEXTURE_2D, //always GL_TEXTURE_2D
0, //0 for now
GL_RGB, //format opengl uses to read textures
image[0]->sizeX, //width
image[0]->sizeY, //height
0, //the border of the image
GL_RGB, //GL_RGB because pixels are stored in RGB format
GL_UNSIGNED_BYTE, //GL_UNSIGNED_BYTE because pixels are stored as unsigned numbers
image[0]->data); //actual pixel data
}
void Model::Draw()
{
glPushMatrix();
glTranslatef(m_CenterX, m_CenterY, m_CenterZ);
glEnable(GL_TEXTURE_2D);
glBindTexture(GL_TEXTURE_2D, m_Texture);
glBegin(GL_TRIANGLES);
//my code for drawing the model to the screen. it isn't the problem so i removed it
glEnd();
glDisable(GL_TEXTURE_2D);
glPopMatrix();
}
Model model;
Model model1;
// Drawing routine.
void drawScene(void)
{
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glLoadIdentity();
model.SetCenterX(0);
model.SetCenterY(0);
model.SetCenterZ(12);
model.Draw();
model1.SetCenterX(12);
model1.SetCenterY(10);
model1.SetCenterZ(0);
model1.Draw();
glutSwapBuffers();
}
void setup(void)
{
glClearColor(0.0, 0.0, 0.0, 0.0);
//model = Model("monkey.obj", "launch");
model = Model("cube.obj", "launch");
model1 = Model("cube.obj", "grass");
// Specify how texture values combine with current surface color values.
glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_REPLACE);
}
Thank you in advance.
The problem is that you're not creating a texture id. You can do that using the glGenTextures function. In your case I would put it at the beginning of the LoadTexture method - just ask it for 1 texture id and save what it gives you back into m_Texture.
Remember that, like everything you create using glGen*, it should also be deleted when you're done with it using glDelete* (glDeleteTextures in this case).
Also, consider moving to more modern OpenGL with shaders and vertex arrays. This is a very broad topic, unfortunately. There are lots of tutorials and books available, I learned from OpenGL Superbible, though I hear that some people don't like it very much...

opengl - framebuffer texture clipped smaller than I set it?

I'm using opengl ES 2.0
I'm using a framebuffer linked to a texture to compile an offscreen render (of some simplistic metaballs), and then I'm rendering that texture to the main back buffer.
Everything is looking great except that the texture appears clipped, ie. it is not the full window dimensions (short of about 128 pixels on one axis). Here's a screenshot: http://tinypic.com/r/9telwg/7
Any ideas what could cause this? I read here to set glViewport to the size of the texture, but that gives me a different aspect ratio since the texture metaballsTexture is square (1024x1024) and my window is 768x1024. It also still remains a bit clipped, as it seems I can't get the frame buffer to be big enough, even though the texture is bigger than my window size. Below is my code. I call PrepareToAddMetaballs() during the render when I'm ready, then successive calls to AddMetaball, now rendered onto my offscreeen FBO, then FinishedAddingMetaballs when I'm done, and later call Render() to display the offscreen texture linked to the FBO onto the main backbuffer.
#include "Metaballs.h"
#include "s3e.h"
#include "IwGL.h"
#include "Render.h"
#include "vsml.h"
#include <vector>
#include <string>
#include <iostream>
#include "1013Maths.h"
#define GL_RGBA8 0x8058
MetaBalls::MetaBalls() : metaballsTexture(NULL), metaballsShader(NULL) {
glGenFramebuffers(1, &myFBO);
metaballTexture[0] = NULL;
metaballTexture[1] = NULL;
metaballTexture[2] = NULL;
CRender::Instance()->CreateTexture("WaterCanvas.png", &metaballsTexture);
CRender::Instance()->CreateTexture("metaball.pvr", &metaballTexture[0]);
CRender::Instance()->CreateTexture("metaball-1.png", &metaballTexture[1]);
CRender::Instance()->CreateTexture("metaball-2.png", &metaballTexture[2]);
CRender::Instance()->CreateShader("Shaders/metaballs.fs", "Shaders/metaballs.vs", &metaballsShader);
glBindFramebuffer(GL_FRAMEBUFFER, myFBO);
// Attach texture to frame buffer
glBindTexture(GL_TEXTURE_2D, metaballsTexture->m_id);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, metaballsTexture->m_id, 0);
glClearColor(1,1,1,0);
glClear(GL_COLOR_BUFFER_BIT);
if (glCheckFramebufferStatus(GL_FRAMEBUFFER) != GL_FRAMEBUFFER_COMPLETE) {
std::string error = "Metaballs framebuffer incomplete";
std::cerr << error << std::endl;
throw error;
}
float w = PTM_DOWNSCALE(float(metaballsTexture->GetWidth()));
float h = PTM_DOWNSCALE(float(metaballsTexture->GetHeight()));
CRender::Instance()->BuildQuad(
tVertex( b2Vec3(0,0,0), b2Vec2(0,1) ),
tVertex( b2Vec3(w,0,0), b2Vec2(1,1) ),
tVertex( b2Vec3(w,h,0), b2Vec2(1,0) ),
tVertex( b2Vec3(0,h,0), b2Vec2(0,0) ),
buffer);
}
MetaBalls::~MetaBalls() {
CRender::Instance()->ReleaseShader(metaballsShader);
CRender::Instance()->ReleaseTexture(metaballsTexture);
CRender::Instance()->ReleaseTexture(metaballTexture[0]);
CRender::Instance()->ReleaseTexture(metaballTexture[1]);
CRender::Instance()->ReleaseTexture(metaballTexture[2]);
glDeleteFramebuffers(1, &myFBO);
}
void MetaBalls::PrepareToAddMetaballs(b2Vec3& paintColour) {
// bind render to texture
glBindFramebuffer(GL_FRAMEBUFFER, myFBO);
// Set our viewport so our texture isn't clipped (appears stretched and clipped)
// glViewport(0, 0, metaballsTexture->GetWidth(), metaballsTexture->GetHeight());
glClearColor(paintColour.x, paintColour.y, paintColour.z, 0.0f);
glClear(GL_COLOR_BUFFER_BIT);
}
void MetaBalls::FinishedAddingMetaballs() {
glBindFramebuffer(GL_FRAMEBUFFER, NULL);
// CRender::Instance()->SetWindowViewport();
}
void MetaBalls::AddMetaball(float x, float y, uint size) {
// render the metaball texture to larger texture
VSML::setIdentityMatrix(pTransform);
pTransform[12] = PTM_DOWNSCALE(x);
pTransform[13] = PTM_DOWNSCALE(y+4); // the +4 is for a bit of overlap with land
float oldview[16];
float identity[16];
VSML::setIdentityMatrix(identity);
memcpy(oldview, CRender::Instance()->GetViewMatrix(), sizeof(float)*16);
memcpy(CRender::Instance()->GetViewMatrix(),identity, sizeof(float)*16);
CRender::Instance()->DrawSprite(metaballTexture[size], pTransform, 1.0f, true);
memcpy(CRender::Instance()->GetViewMatrix(),oldview, sizeof(float)*16);
}
void MetaBalls::Render() {
VSML::setIdentityMatrix(pTransform);
pTransform[12] = PTM_DOWNSCALE(-128);
pTransform[13] = PTM_DOWNSCALE(-256);
// render our metaballs texture using alpha test shader
CRender::Instance()->BindShader(metaballsShader);
CRender::Instance()->BindTexture(0, metaballsTexture);
CRender::Instance()->SetMatrix(metaballsShader, "view", CRender::Instance()->GetViewMatrix());
CRender::Instance()->SetMatrix(metaballsShader, "world", pTransform);
CRender::Instance()->SetMatrix(metaballsShader, "proj", CRender::Instance()->GetProjMatrix());
CRender::Instance()->SetBlending(true);
CRender::Instance()->DrawPrimitives(buffer);
CRender::Instance()->SetBlending(false);
}
====================
EDIT
Aha! Got it. I haven't found this example anywhere, but I fixed it by adjusting the perspective matrix. It was set to 1024x768 when it was working, but with a window size of 768x1024, the projection matrix was changing, as well as the viewport. By setting each to 1024x768 manually (I chose to use constants), the metaballs are rendered correctly offscreen with proper aspect ratio. Their 1024x1024 texture is rendered as a billboard with that aspect ratio nice and sharp. After I'm done I restore them to what the rest of the application uses. Below is the working code:
#include "Metaballs.h"
#include "s3e.h"
#include "IwGL.h"
#include "Render.h"
#include "vsml.h"
#include <vector>
#include <string>
#include <iostream>
#include "1013Maths.h"
MetaBalls::MetaBalls() : metaballsTexture(NULL), metaballsShader(NULL) {
glGenFramebuffers(1, &myFBO);
metaballTexture[0] = NULL;
metaballTexture[1] = NULL;
metaballTexture[2] = NULL;
CRender::Instance()->CreateTexture("WaterCanvas.png", &metaballsTexture);
CRender::Instance()->CreateTexture("metaball.pvr", &metaballTexture[0]);
CRender::Instance()->CreateTexture("metaball-1.png", &metaballTexture[1]);
CRender::Instance()->CreateTexture("metaball-2.png", &metaballTexture[2]);
CRender::Instance()->CreateShader("Shaders/metaballs.fs", "Shaders/metaballs.vs", &metaballsShader);
glBindFramebuffer(GL_FRAMEBUFFER, myFBO);
// Attach texture to frame buffer
glBindTexture(GL_TEXTURE_2D, metaballsTexture->m_id);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, metaballsTexture->m_id, 0);
if (glCheckFramebufferStatus(GL_FRAMEBUFFER) != GL_FRAMEBUFFER_COMPLETE) {
std::string error = "Metaballs framebuffer incomplete";
std::cerr << error << std::endl;
throw error;
}
float w = PTM_DOWNSCALE(float(metaballsTexture->m_width));
float h = PTM_DOWNSCALE(float(metaballsTexture->m_height));
CRender::Instance()->BuildQuad(
tVertex( b2Vec3(0,0,0), b2Vec2(0,1) ),
tVertex( b2Vec3(w,0,0), b2Vec2(1,1) ),
tVertex( b2Vec3(w,h,0), b2Vec2(1,0) ),
tVertex( b2Vec3(0,h,0), b2Vec2(0,0) ),
buffer);
// return to default state
glBindFramebuffer(GL_FRAMEBUFFER, 0);
}
MetaBalls::~MetaBalls() {
CRender::Instance()->ReleaseShader(metaballsShader);
CRender::Instance()->ReleaseTexture(metaballsTexture);
CRender::Instance()->ReleaseTexture(metaballTexture[0]);
CRender::Instance()->ReleaseTexture(metaballTexture[1]);
CRender::Instance()->ReleaseTexture(metaballTexture[2]);
glDeleteFramebuffers(1, &myFBO);
}
void MetaBalls::PrepareToAddMetaballs(b2Vec3& paintColour) {
// bind render to texture
glBindFramebuffer(GL_FRAMEBUFFER, myFBO);
// Set orthographic projection
cfloat w = SCREEN_WIDTH / PTM_RATIO;
cfloat h = SCREEN_HEIGHT / PTM_RATIO;
VSML::ortho(-w, 0, -h, 0, 0.0f, -1.0f, CRender::Instance()->m_Proj);
// Set our viewport so our texture isn't clipped
glViewport(0, 0, SCREEN_WIDTH, SCREEN_HEIGHT);
glClearColor(paintColour.x, paintColour.y, paintColour.z, 0.1f);
glClear(GL_COLOR_BUFFER_BIT);
}
void MetaBalls::FinishedAddingMetaballs() {
glBindFramebuffer(GL_FRAMEBUFFER, NULL);
CRender::Instance()->SetWindowViewport();
}
void MetaBalls::AddMetaball(float x, float y, uint size) {
// render the metaball texture to larger texture
VSML::setIdentityMatrix(pTransform);
pTransform[12] = PTM_DOWNSCALE(x);
pTransform[13] = PTM_DOWNSCALE(y);
float oldview[16];
float identity[16];
VSML::setIdentityMatrix(identity);
memcpy(oldview, CRender::Instance()->GetViewMatrix(), sizeof(float)*16);
memcpy(CRender::Instance()->GetViewMatrix(),identity, sizeof(float)*16);
CRender::Instance()->DrawSprite(metaballTexture[size], pTransform, 1.0f, true);
memcpy(CRender::Instance()->GetViewMatrix(),oldview, sizeof(float)*16);
}
void MetaBalls::Render() {
VSML::setIdentityMatrix(pTransform);
pTransform[12] = PTM_DOWNSCALE(0);
pTransform[13] = PTM_DOWNSCALE(-256);
// render our metaballs texture using alpha test shader
CRender::Instance()->BindShader(metaballsShader);
CRender::Instance()->BindTexture(0, metaballsTexture);
CRender::Instance()->SetMatrix(metaballsShader, "view", CRender::Instance()->GetViewMatrix());
CRender::Instance()->SetMatrix(metaballsShader, "world", pTransform);
CRender::Instance()->SetMatrix(metaballsShader, "proj", CRender::Instance()->GetProjMatrix());
CRender::Instance()->SetBlending(true);
CRender::Instance()->DrawPrimitives(buffer);
CRender::Instance()->SetBlending(false);
}
Are you setting your viewport according to the texture's size? I didnt find any view port setting on your code...