LibGDX SpriteBatch draw method producing results that make no sense - opengl

I've read the docs: https://libgdx.badlogicgames.com/ci/nightlies/docs/api/com/badlogic/gdx/graphics/g2d/SpriteBatch.html#draw-com.badlogic.gdx.graphics.Texture-float-float-float-float-float-float-float-float-float-int-int-int-int-boolean-boolean-
I thought I understood the behavior of the x, y, originX, and originY parameters, but apparently not. I am playing around with a simple tutorial where a SpriteBatch is drawn onto the screen. The game is using OrthographicCamera and FitViewport. The following code draws the texture in the middle of the screen
float halfW = width * 0.5f;
float halfH = height * 0.5f;
batch.draw(cavemanTexture, // Texture
-halfW, -halfH, // x, y
halfW, halfH, // originX, originY
width, height, // width, height
WORLD_TO_SCREEN, WORLD_TO_SCREEN, // scaleX, scaleY
0.0f, // rotation
0, 0, // srcX, srcY
width, height, // srcWidth, srcHeight
false, false); // flipX, flipY
I guess I do not understand what is meant when the docs say "offset" in the sentence, "The rectangle is offset by originX, originY relative to the origin."
To me if you wanted to put the texture in the center of the screen you could simply do the following:
batch.draw(cavemanTexture, // Texture
-halfW, -halfH, // x, y
0, 0, // originX, originY
width, height, // width, height
WORLD_TO_SCREEN, WORLD_TO_SCREEN, // scaleX, scaleY
0.0f, // rotation
0, 0, // srcX, srcY
width, height, // srcWidth, srcHeight
false, false); // flipX, flipY
Because that would move the bottom left corner down and to the left, enough to center the texture. But if I try that, then the image actually disappears off screen.
Here is the full original code:
package com.cookbook.samples;
import com.badlogic.gdx.Gdx;
import com.badlogic.gdx.graphics.Color;
import com.badlogic.gdx.graphics.GL20;
import com.badlogic.gdx.graphics.OrthographicCamera;
import com.badlogic.gdx.graphics.Texture;
import com.badlogic.gdx.graphics.Texture.TextureFilter;
import com.badlogic.gdx.graphics.g2d.SpriteBatch;
import com.badlogic.gdx.utils.viewport.FitViewport;
import com.badlogic.gdx.utils.viewport.Viewport;
public class SpriteBatchSample extends GdxSample {
private static final Color BACKGROUND_COLOR = new Color(0.39f, 0.58f, 0.92f, 1.0f);
private static final float WORLD_TO_SCREEN = 1.0f / 100.0f;
private static final float SCENE_WIDTH = 12.80f;
private static final float SCENE_HEIGHT = 7.20f;
private OrthographicCamera camera;
private Viewport viewport;
private SpriteBatch batch;
private Texture cavemanTexture;
private Color oldColor;
#Override
public void create() {
camera = new OrthographicCamera();
viewport = new FitViewport(SCENE_WIDTH, SCENE_HEIGHT, camera);
batch = new SpriteBatch();
oldColor = new Color();
cavemanTexture = new Texture(Gdx.files.internal("data/caveman.png"));
cavemanTexture.setFilter(TextureFilter.Nearest, TextureFilter.Nearest);
}
#Override
public void dispose() {
batch.dispose();
cavemanTexture.dispose();
}
#Override
public void render() {
Gdx.gl.glClearColor(BACKGROUND_COLOR.r,
BACKGROUND_COLOR.g,
BACKGROUND_COLOR.b,
BACKGROUND_COLOR.a);
Gdx.gl.glClear(GL20.GL_COLOR_BUFFER_BIT);
batch.setProjectionMatrix(camera.combined);
batch.begin();
int width = cavemanTexture.getWidth();
int height = cavemanTexture.getHeight();
float originX = width * 0.5f;
float originY = height * 0.5f;
// Render caveman centered on the screen
batch.draw(cavemanTexture, // Texture
-originX, -originY, // x, y
originX, originY, // originX, originY
width, height, // width, height
WORLD_TO_SCREEN, WORLD_TO_SCREEN, // scaleX, scaleY
0.0f, // rotation
0, 0, // srcX, srcY
width, height, // srcWidth, srcHeight
false, false); // flipX, flipY
// Render caveman on the top left corner at 2x size
batch.draw(cavemanTexture,
-4.0f - originX, 1.5f - originY,
originX, originY,
width, height,
WORLD_TO_SCREEN * 2.0f, WORLD_TO_SCREEN * 2.0f,
0.0f,
0, 0,
width, height,
false, false);
// Render caveman on the bottom left corner at 0.5x size
batch.draw(cavemanTexture,
-4.0f - originX, -1.5f - originY,
originX, originY,
width, height,
WORLD_TO_SCREEN * 0.5f, WORLD_TO_SCREEN * 0.5f,
0.0f,
0, 0,
width, height,
false, false);
// Render caveman on top right corner at 2x size and rotated 45 degrees
batch.draw(cavemanTexture,
4.0f - originX, 1.5f - originY,
originX, originY,
width, height,
WORLD_TO_SCREEN * 2.0f, WORLD_TO_SCREEN * 2.0f,
45.0f,
0, 0,
width, height,
false, false);
// Render caveman on bottom right corner at 1.5x size and flipped around X and Y
batch.draw(cavemanTexture,
4.0f - originX, -1.5f - originY,
originX, originY,
width, height,
WORLD_TO_SCREEN * 1.5f, WORLD_TO_SCREEN * 1.5f,
0.0f,
0, 0,
cavemanTexture.getWidth(), height,
true, true);
// Save batch color
oldColor.set(batch.getColor());
// Render blue caveman
batch.setColor(Color.CYAN);
batch.draw(cavemanTexture,
-2.0f - originX, -originY,
originX, originY,
width, height,
WORLD_TO_SCREEN, WORLD_TO_SCREEN,
0.0f,
0, 0,
width, height,
false, false);
// Render red caveman
batch.setColor(Color.RED);
batch.draw(cavemanTexture,
-originX, -originY + 2.0f,
originX, originY,
width, height,
WORLD_TO_SCREEN, WORLD_TO_SCREEN,
0.0f,
0, 0,
width, height,
false, false);
// Render green caveman
batch.setColor(Color.GREEN);
batch.draw(cavemanTexture,
2.0f - originX, -originY,
originX, originY,
width, height,
WORLD_TO_SCREEN, WORLD_TO_SCREEN,
0.0f,
0, 0,
width, height,
false, false);
// Render yellow caveman
batch.setColor(Color.YELLOW);
batch.draw(cavemanTexture,
-originX, -originY - 2.0f,
originX, originY,
width, height,
WORLD_TO_SCREEN, WORLD_TO_SCREEN,
0.0f,
0, 0,
width, height,
false, false);
batch.setColor(oldColor);
batch.end();
}
#Override
public void resize(int width, int height) {
viewport.update(width, height, false);
}
}

The name of your scale factor (WORLD_TO_SCREEN) makes me suspicious that you misunderstand how Viewports work. The whole point of using a Viewport at all is so you don't have to think about the screen size when you are placing stuff in your game world. The size you pass to the Viewport constructor is how much of the game world you want to be visible. When you draw something using a Viewport's camera, its size should not be related to screen dimensions, because the Viewport has already abstracted that away. You should be drawing it at the size it should be in terms of world units.
If the origin is left at (0, 0), the texture is drawn with its bottom left corner at (x, y). The origin is an offset from the bottom corner, about which the scale and rotation are applied. If you used a scale of 1 and rotation of 0, the origin x and y would have no effect whatsoever.
If you are not planning to draw sprites with rotation, I would avoid the complicated draw methods and just use SpriteBatch.draw(textureRegion, x, y, width, height). In a typical game, you will have a TextureAtlas of TextureRegions, so you'll never be passing a Texture object to SpriteBatch.
You will either want to think of your world in meter (or similar) units, or if you're doing retro pixel art you might want to use retro pixel (not screen pixel) units. So your caveman might be 1m tall and you want to see a 16m x 9m tall view of your game world. In this case, you would instantiate your viewport with FitViewport(16f, 9f). And when you drew your caveman, you'd draw it with something like
batch.draw(cavemanRegion, x, y, 1f / cavemanRegion.height * cavemanRegion.Width, 1f)
where x and y are where the bottom left corner should be in the game world.

Here the object is scaled and rotated around x,y
public class Enemy {
static final float scale = .4f;
int type_ship;
int x = 0;
int y = 0;
int angle = 0;
Rectangle rect;
TextureRegion region;
public Enemy(int type_ship, int x, int y) {
this.type_ship = type_ship;
this.x = x;
this.y = y;
rect = SpriteMaps.get_sprite_rect(SpriteMaps.ENEMY_RED);
region = new TextureRegion(Textures.getShipsTexture(),
(int)rect.x, (int)rect.y, (int)rect.width, (int)rect.height);
}
public void update() {
}
public void draw(SpriteBatch batch) {
float width = rect.width;
float height = rect.height;
float draw_x = this.x - width / 2;
float draw_y = this.y - height / 2;
batch.draw(region, draw_x, draw_y, width / 2, height / 2,
rect.width, rect.height, scale, scale, angle);
}
}

Related

How to stop stretching in opengl

I created Lines and when I'm rotate the line. Line will be stretch. How can I stop stretch at rotation time. When I change height in Ortho it will be not displaying properly. When Line is going left or right it will be start strtching but when it will be reach in main point it will come in real position.
#include<fstream>
#include<iostream>
#include<stdlib.h>
#include<glut.h>
using namespace std;
float yr = 0;
void introscreen();
void screen();
void screen1();
void PitchLadder();
int width = 1268;
int height = 720;
float translate = 0.0f;
GLfloat angle = 0.0f;
void display(void) {
glClearColor(0, 0, 0, 0);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(-300, 300, -10, 25, 0, 1);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
static int center_x = 0;
static int center_y = 0;
}
void specialKey(int key, int x, int y) {
switch (key) {
case GLUT_KEY_UP:
translate += 1.0f;
break;
case GLUT_KEY_DOWN:
translate -= 1.0f;
break;
case GLUT_KEY_LEFT:
angle += 1.0f;
break;
case GLUT_KEY_RIGHT:
angle -= 1.0f;
break;
}
glutPostRedisplay();
}
void Rolling(void) {
glClear(GL_COLOR_BUFFER_BIT);
glColor3f(0, 1, 0);
glPushMatrix();
glRotatef(-angle, 0, 0, 1);
glTranslatef(-10, translate,0);
PitchLadder();
glPopMatrix();
glFlush();
}
void PitchLadder() {
GLfloat y;
GLfloat y2;
GLfloat fSize[5];
GLfloat fCurrSize;
fCurrSize = fSize[2];
for (y2 = -90.0f ; y2 <= 90.0f ; y2 += 10.0f) {
glLineWidth(fCurrSize);
glBegin(GL_LINES);
glVertex3f(-50.0f , y2 , 0);
glVertex3f(50.0f , y2 , 0);
glEnd();
fCurrSize += 1.0f;
screen();
screen1();
}
}
void renderbitmap1(float x3, float y3, void *font1, char *string1) {
char *c1;
glRasterPos2f(x3, y3);
for (c1=string1; *c1 != '\0'; c1++) {
glutBitmapCharacter(font1, *c1);
}
}
void screen(void) {
glColor3f(0, 1, 0);
char buf1[20] = { '\0' };
for (int row1 = -90.0f; row1 <= 90 + yr; row1 +=10.0f) {
sprintf_s(buf1,"%i", row1);
renderbitmap1(70 , (yr+row1), GLUT_BITMAP_TIMES_ROMAN_24, buf1);
}
}
void renderbitmap2(float x4, float y4, void *font2, char *string2) {
char *c1;
glRasterPos2f(x4, y4);
for (c1=string2; *c1 != '\0'; c1++) {
glutBitmapCharacter(font2, *c1);
}
}
void screen1(void) {
glColor3f(0, 1, 0);
char buf1[20] = { '\0' };
for (int row1 = -90.0f; row1 <= 90 + yr; row1 +=10.0f) {
sprintf_s(buf1,"%i", row1);
renderbitmap2(-70 , (yr+row1), GLUT_BITMAP_TIMES_ROMAN_24, buf1);
}
}
int main(int arg, char** argv) {
glutInit(&arg, argv);
glutInitDisplayMode(GLUT_SINGLE | GLUT_RGB);
glutInitWindowSize(width, height);
glutInitWindowPosition(50, 100);
glutCreateWindow("HUD Lines");
display();
glutDisplayFunc(Rolling);
glutSpecialFunc(specialKey);
glutMainLoop();
return 0;
}
At Orthographic Projection, the view space coordinates are linearly mapped to the clip space coordinates respectively normalized device coordinates. The normlaized device space is a cube with a minimum of (-1, -1, -1) and a maximum of (1, 1, 1).
Finally the coordinates in normalized device space are mapped to the rectangular viewport.
If the viewport is rectangular then the aspect ratio has to be considered, when the view space coordinates are transformed to clip space.
The mapping of the normalized device coordinates to the viewport distorted the geometry by the reciprocal aspect ration of the viewport. This distortion has to be compensated by the orthographic projection.
When the orthographic projection is set by glOrtho(left, right, bottom, top, near, far), then the cuboid volume is defined, which maps (left, bottom, near) to (-1, -1, -1) and (right, top, far) to (1, 1, 1).
It is not necessary that the x and y range of the orthographic projection is equal the view port rectangle, bit the ration (left-right)/(top-bottom)hast to be equal the ration of the viewport rectangle else the geometry will be distored.
double size = 200.0f;
double aspect = (double)width / (double)height;
glOrtho(-aspect*size/2.0, aspect*size/2.0, -size/2.0, size/2.0, -1.0, 1.0);
Your window size and orthographic "view" do not have the same aspect ratio:
// This creates a window that's 1268 x 720 (a wide rectangle)
int width = 1268;
int height = 720;
glutInitWindowSize(width, height);
// This creates a "view" that's 300 x 300 (a square)
glOrtho(-300, 300, -10, 25, 0, 1);
The "view" will be stretched to fill the viewport (window). You are seeing a 300 x 300 image being stretched to 1268x720, which definitely makes horizontal lines appear longer than vertical lines even though they're the same length in the code.
You should call glOrtho using the width and height variables of your window:
glOrtho(0, width, 0, height, 0, 1);
Notice that I have changed the arguments to (left = 0, right = width, bottom = 0, top = height, ...). This allows you to work with a screen coordinate space that is similar to 2D rendering but the bottom-left corner is (0,0) and the top-right is (width,height).

OpenGL video frame fitting

I am working on a project where I have to project the data from a camera with a resolution of 640x480 on a 4K screen in portrait mode.
The camera is the Kinect V1 but I will switch to version 2 with a better resolution (1920x1080).
My question is how to change the scale of a texture to display in order to get a correct result.
For the moment, I have managed to display on the entire screen but the image is flattened in width. The ideal would be to keep the proportionality and cut an X width on each side of the image.
I am using SDL with OpenGL, here is the concerned part of the code:
// window creation
auto window = SDL_CreateWindow("Imagine",
x,
y,
0,
0,
SDL_WINDOW_OPENGL | SDL_WINDOW_SHOWN | SDL_WINDOW_FULLSCREEN_DESKTOP | SDL_WINDOW_ALLOW_HIGHDPI | SDL_WINDOW_BORDERLESS);
// GL initialization and texture creation
void SdlNuitrackRenderHandler::initTexture(int width, int height)
{
glEnable(GL_TEXTURE_2D);
glEnableClientState(GL_VERTEX_ARRAY);
glDisableClientState(GL_COLOR_ARRAY);
glOrtho(0, _width, _height, 0, -1.0, 1.0);
glMatrixMode(GL_PROJECTION);
glPushMatrix();
glLoadIdentity();
glGenTextures(1, &_textureID);
width = power2(width);
height = power2(height);
if (_textureBuffer != 0)
delete[] _textureBuffer;
_textureBuffer = new uint8_t[width * height * 3];
memset(_textureBuffer, 0, sizeof(uint8_t) * width * height * 3);
glBindTexture(GL_TEXTURE_2D, _textureID);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, width, height, 0, GL_RGB, GL_UNSIGNED_BYTE, NULL);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
// Set texture coordinates [0, 1] and vertexes position
{
_textureCoords[0] = (float) _width / width;
_textureCoords[1] = (float) _height / height;
_textureCoords[2] = (float) _width / width;
_textureCoords[3] = 0.0;
_textureCoords[4] = 0.0;
_textureCoords[5] = 0.0;
_textureCoords[6] = 0.0;
_textureCoords[7] = (float) _height / height;
_vertexes[0] = _width;
_vertexes[1] = _height;
_vertexes[2] = _width;
_vertexes[3] = 0.0;
_vertexes[4] = 0.0;
_vertexes[5] = 0.0;
_vertexes[6] = 0.0;
_vertexes[7] = _height;
}
// Texture rendering
// Render prepared background texture
void SdlNuitrackRenderHandler::renderTexture()
{
glClearColor(1, 1, 1, 1);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glEnable(GL_TEXTURE_2D);
glColor4f(1, 1, 1, 1);
glBindTexture(GL_TEXTURE_2D, _textureID);
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, _width, _height, GL_RGB, GL_UNSIGNED_BYTE, _textureBuffer);
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
glVertexPointer(2, GL_FLOAT, 0, _vertexes);
glTexCoordPointer(2, GL_FLOAT, 0, _textureCoords);
glDrawArrays(GL_TRIANGLE_FAN, 0, 4);
glDisableClientState(GL_VERTEX_ARRAY);
glDisableClientState(GL_TEXTURE_COORD_ARRAY);
glDisable(GL_TEXTURE_2D);
}
While I agree with what was written in the comments about this being out-dated OpenGL code, the issue has nothing to do with OpenGL at its heart. You want to draw 1 rectangle with the correct aspect ratio inside another rectangle that has a different aspect ratio. You simply need to know where to place the vertices.
Typically with the TEXTURE_2D texture target, you want your texture coordinates to be 0-1 in both directions, unless you plan to crop the input image. There was a time when textures had to have a width and height that were a power of 2. That hasn't been the case in a very long time. So remove these 2 lines:
width = power2(width);
height = power2(height);
So the first thing is to set those properly:
_textureCoords[0] = 1.0;
_textureCoords[1] = 1.0;
_textureCoords[2] = 1.0;
_textureCoords[3] = 0.0;
_textureCoords[4] = 0.0;
_textureCoords[5] = 0.0;
_textureCoords[6] = 0.0;
_textureCoords[7] = 1.0;
(Consequently, that code is really hard to read and will be a pain to maintain. You should make the texture coordinates (and vertex coordinates) be a struct with an x and y value so it makes sense. Right now it's not obvious that it's 4 sets of 2D coordinates that are (max, max), (max, min), (min, min), (min, max). But I digress.)
Next, to figure out the texture coordinates, you need to know whether the video is going to be scaled to fit the width or the height. To do this, you can figure out
double widthScaleRatio = displayWidth / imageWidth; // <- using this scale will guarantee the width of the new image is the same as the display's width, but might crop the height
double heightScaleRatio = displayHeight / imageHeight; // <- using this scale will guarantee the height of the new image is the same as the display's height but might crop the width
double scale = 1.0;
// If scaling by the widthScaleRatio makes the height too big, use the heightScaleRatio
// Otherwise use the widthScaleRatio
if (imageHeight * widthScaleRatio > displayHeight)
{
scale = heightScaleRatio;
}
else
{
scale = widthScaleRatio;
}
Now scale you width and height by the scale:
double newWidth = imageWidth * scale;
double newHeight = imageHeight * scale;
and set your vertices based on that:
_vertexes[0] = newWidth;
_vertexes[1] = newHeight;
_vertexes[2] = newWidth;
_vertexes[3] = 0.0;
_vertexes[4] = 0.0;
_vertexes[5] = 0.0;
_vertexes[6] = 0.0;
_vertexes[7] = newHeight;
And the same caveat applies to making this code easier to read as with the texture coordinates.
EDIT: Here's a simple program to show how it works:
int main(){
double displayWidth = 2160;
double displayHeight = 4096;
double imageWidth = 640;
double imageHeight = 480;
double widthScaleRatio = displayWidth / imageWidth; // <- using this scale will guarantee the width of the new image is the same as the display's width, but might crop the height
double heightScaleRatio = displayHeight / imageHeight; // <- using this scale will guarantee the height of the new image is the same as the display's height but might crop the width
double scale = 1.0;
// If scaling by the widthScaleRatio makes the height too big, use the heightScaleRatio
// Otherwise use the widthScaleRatio
if (imageHeight * widthScaleRatio > displayHeight)
{
scale = heightScaleRatio;
}
else
{
scale = widthScaleRatio;
}
double newWidth = imageWidth * scale;
double newHeight = imageHeight * scale;
std::cout << "New size = (" << newWidth << ", " << newHeight << ")\n";
}
When I run it, I get:
New size = (2160, 1620)

opengl: Trouble setting up viewport and glortho

I am trying to setup my OpenGL views for some texture rendering. Following some advice on the forum, I set up my viewport and ortho matrix as follows:
First I try to compute the screen width and height that I can use while maintaining the aspect ratio of my image:
void resize(int w, int h)
{
float target_aspect_ratio = image_width / image_height;
width = w;
height = (int)(width / target_aspect_ratio + 0.5f);
if (height > h) {
height = h;
width = (int)(height * target_aspect_ratio + 0.5f);
}
off_x = (w - width)/2.f;
off_y = (h - height)/2;
// I want to center my image. So I have these offsets
glViewport(off_x, off_y, width, height);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(0, width, 0, height, 0.0f, 1.0f);
Now when I want to render my texture I do:
void paint()
{
// texture binding etc.
glTexCoord2f(0, 0); glVertex2f(0, 0);
glTexCoord2f(1, 0); glVertex2f(width, 0);
glTexCoord2f(1, 1); glVertex2f(width, height);
glTexCoord2f(0, 1); glVertex2f(0, height);
}
However, this does not show the image as expected. It does not maintain the aspect ratio as I size the screen. It is almost like the glViewport has no effect and I can verify this function gets called every time my window is resized.
Update:
It is strange. Almost as if these calls have no effect. I even did something as:
_off_x = _off_y = 0;
_width = 500;
_height = 500;
So I expected the viewport to be lower left box of my screen but the image is being drawn as before basically using the whole screen as the viewport.
Update 2:
Ok, so if I call
glViewport(_off_x, _off_y, _width, _height);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(0, _width, 0, _height, 0, 1);
in my paint events, it works as expected! However, I thought it was enough to put this in the resize event handler.
Before start drawing, you need to switch your Matrix mode to GL_MODELVIEW. You don't need to set your projection matrix inside your render function at each frame.
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
Here is a detailed analysis that I wrote about glMatrixMode() function modes :
OpenGL glMatrixMode(GL_PROJECTION) vs glMatrixMode(GL_MODELVIEW)

glViewport size not changing

I have two viewports that are currently display 4 points in each of their corners. One viewport (left side) has gray corners, the other (right) has red corners. The two viewports are placed next to each other, with the red viewport intentionally made smaller. The viewport with red corners is also drawing a GL_TRIANGLES.
You can see them here:
The points in the triangle should be in the bottom left corner, top left corner and bottom right corner of the red viewport, but the bottom right corner is significantly short.
I think the second (red) glViewport is still using measurements from a previous object, the other viewport probably.
Am I setting up the second viewport correctly?
I want to have the second viewport 100 pixels across, not 900 pixels, squeezed into 100 pixels.
Here is my code.
I am using the latest version of glut and currently still learning how to use OpenGL/glut.
// windows/pannels
int winWidth = 900; // window width
int winHeight = 600; // window height
int pannelToolsWidth = 100;
int pannelToolsHeight = winHeight;
int pannelToolsX = winWidth - pannelToolsWidth;
int pannelToolsY = 0;
int drawSpaceWidth = winWidth - pannelToolsWidth;
int drawSpaceHeight = winHeight;
// drawing
float red = 0.0f;
float green = 0.0f;
float blue = 0.0f;
float lineSize = 1.0f;
float pointSize = 1.0f;
// ...
// draws a pannel. Make sure to define a colour before calling.
void drawPannel(int x, int y, int width, int height)
{
glBegin(GL_TRIANGLES);
glVertex2f(float(x), float(y));
glVertex2f(float(x), float(height));
glVertex2f(float(width), 0.0);
glEnd();
//glBegin(GL_TRIANGLES);
// glVertex2f(, );
// glVertex2f(, );
// glVertex2f(, );
//glEnd();
}
// Starts a new glViewport and visualises the bounderies with coloured points.
// scene: id of the viewport (viewports draw different things)
// x/y: viewports draw from the bottom left corner
void drawViewport(int x, int y, int width, int height, float red, float green, float blue, int scene)
{
// draw viewport (colours used to visualise limits)
glViewport(x, y, width, height);
glColor3f(red, green, blue);
glBegin(GL_POINTS);
glVertex2f(0.0f, float(winHeight));
glVertex2f(float(winWidth), float(winHeight));
glVertex2f(0.0f, 0.0f);
glVertex2f(float(winWidth), 0.0f);
glEnd();
// draw viewport content. text methods should be called last because the glRasterPos2i() effects the viewport
switch (scene)
{
// add more cases for other viewports here.
case 0:
// colour
// drawing
// text
break;
case 1:
glColor3f(0.8, 0.8, 0.7);
drawPannel(0, 0, pannelToolsWidth, pannelToolsHeight);
break;
default:
// do nothing
break;
}
}
void init()
{
glClearColor(1.0f, 1.0f, 1.0f, 1.0f); //set clear background colour
glPointSize(10.0); //set point size
glColor3f(1.0f, 1.0f, 1.0f); //set draw colour
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluOrtho2D(0, winWidth, 0, winHeight); //setup 2D projection
}
void collectScene()
{
glClear(GL_COLOR_BUFFER_BIT);
// viewport(s)
drawViewport(0, 0, drawSpaceWidth, drawSpaceHeight, 0.5, 0.5, 0.5, 0); // paint area
drawViewport(pannelToolsX, pannelToolsY, pannelToolsWidth, pannelToolsHeight, 1, 0, 0, 1); // GUI area
glFlush();
glutSwapBuffers(); //swap front and back buffers
}
//called when no event in queue
void idle()
{
glutPostRedisplay();
}
int main()
{
// create the window
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGBA); // double buffer with RGBA
glutInitWindowSize(winWidth, winHeight);
glutInitWindowPosition(100, 100); // from top left
glutCreateWindow("Paint");
init();
glutDisplayFunc(collectScene);
glutIdleFunc(idle);
glutMainLoop();
return 0;
}
Thank you
Projection matrix is relative to the viewport, indeed it is usually set after glViewport call. In your case it doesn't change.

Renderbuffers larger than window size - OpenGL

I'm trying to draw to a renderbuffer (512x512) that's larger than the screen size (i.e., 320x480).
After doing a glReadPixels, the image looks correct, except once the dimensions of the image exceed that of the screen size- in this example, past 320 horizontal and 480 vertical. What causes this anomaly? Is there something I'm missing?
When the window size is >= the size of the renderbuffer, this code works absolutely fine.
Example image that was rendered to the buffer & glReadPixel'd:
http://img593.imageshack.us/img593/3220/rendertobroke.png
unsigned int canvasFrameBuffer;
bglGenFramebuffers(1, &canvasFrameBuffer);
bglBindFramebuffer(BGL_RENDERBUFFER, canvasFrameBuffer);
// Attach renderbuffer
unsigned int canvasRenderBuffer;
bglGenRenderbuffers(1, &canvasRenderBuffer);
bglBindRenderbuffer(BGL_RENDERBUFFER, canvasRenderBuffer);
bglRenderbufferStorage(BGL_RENDERBUFFER, BGL_RGBA4, width, height);
bglFramebufferRenderbuffer(BGL_FRAMEBUFFER, BGL_COLOR_ATTACHMENT0, BGL_RENDERBUFFER, canvasRenderBuffer);
bglViewport(0, 0, width, height);
Matrix::matrix_t identity, colorMatrix;
Matrix::LoadIdentity(&identity);
Matrix::LoadIdentity(&colorMatrix);
bglClearColor(1.0f, 1.0f, 1.0f, 1.0f);
bglClear(BGL_COLOR_BUFFER_BIT);
Vector::vector_t oldPos, oldScale;
Vector::Copy(&oldPos, &pos);
Vector::Mul(&pos, 0.0f);
Vector::Copy(&oldScale, &scale);
Vector::Load(&scale, 1, 1, 1);
int oldHAlign = halignment;
int oldVAlign = valignment;
halignment = Font::HALIGN_LEFT;
valignment = Font::VALIGN_BOTTOM;
float oldXRatio = vid.xratio;
float oldYRatio = vid.yratio;
vid.xratio = 1;
vid.yratio = 1;
Drawing::Set2D(this->size.x, this->size.y); // glOrtho and setup projection/modelview matrices
Draw(&identity, &colorMatrix);
Vector::Copy(&pos, &oldPos);
Vector::Copy(&scale, &oldScale);
halignment = oldHAlign;
valignment = oldVAlign;
vid.xratio = oldXRatio;
vid.yratio = oldYRatio;
byte *buffer = (byte*)Z_Malloc(width * height * 3, ZT_STATIC);
bglPixelStorei(BGL_PACK_ALIGNMENT, 1);
bglReadPixels(0, 0, width, height, BGL_RGB, BGL_UNSIGNED_BYTE, buffer);
byte *final = RGBtoLuminance(buffer, width, height);
SaveTGA("canvas.tga", final, width, height, 1);
Z_Free(buffer);
// unbind frame buffer
bglBindRenderbuffer(BGL_RENDERBUFFER, 0);
bglBindFramebuffer(BGL_FRAMEBUFFER, 0);
bglDeleteRenderbuffers(1, &canvasRenderBuffer);
bglDeleteFramebuffers(1, &canvasFrameBuffer);
bglViewport(0, 0, vid.width, vid.height);
Here's the answer.
Change this line:
bglBindFramebuffer(BGL_RENDERBUFFER, canvasFrameBuffer);
to this:
bglBindFramebuffer(BGL_FRAMEBUFFER, canvasFrameBuffer);