glReadPixels moving coordinates with zoom - c++

I'm trying to read a rectangle of pixels by selecting two points (opposite corners) using glReadPixels with C++ code. The problem comes out when selecting the Y-Axis value.
At the moment, I manage to get the glReadPixels work until zooming. When you zoom (in or out), the tool reads pixels from other Y values (the X are always OK)
What am I doing wrong?
GLORTHO:
IZDA = mCameraPosition.x - ((double)(rectangleDim.x/SCRARatio)) * mZoomFactor / 2;
DCHA = mCameraPosition.x + ((double)(rectangleDim.x/SCRARatio)) * mZoomFactor / 2
ABAJO = mCameraPosition.y - ((double)rectangleDim.y) * mZoomFactor / 2;
ARRIBA = mCameraPosition.y + ((double)rectangleDim.y) * mZoomFactor / 2;
glOrtho(IZDA, DCHA, ABAJO, ARRIBA, -1.0f, 1.0f);
GETSCREENCOORDS
realDim.x = dimension.x * (dimension.x * mZoomFactor / 2);
realDim.y = dimension.y * (dimension.y * mZoomFactor / 2);
GSC.x = (dimension.x*(x-mCameraPosition.x)) / realDim.x;
GSC.y = (dimension.y/realDim.y) * ((pow(dimension.y, 2)*mZoomFactor/2)+mCameraPosition.y + y - imgDim.y);
GETWORLDCOORDS
realDim.x = dimension.x * (dimension.x * mZoomFactor / 2);
realDim.y = dimension.y * (dimension.y * mZoomFactor / 2);
GWC.x =
((x * realDim.x) / dimension.x) + mCameraPosition.x;
GWC.y =
((y * realDim.y) / dimension.y)
-((dimension.y * (dimension.y * mZoomFactor/2)) + mCameraPosition.y)
+ imgDim.y;
CHECKFILLVALUE (Using glReadPixels)
ComplexData::Point a, b;
// Set points screen coordinates
getScreenCoords(FIP.x, FIP.y);
a.x = GSC.x;
a.y = GSC.y;
getScreenCoords(FEP.x, FEP.y);
b.x = GSC.x;
b.y = GSC.y;
// Set length and width to read (and write)
double RX_Length, RY_Length;
if (b.x < a.x) RX_Length = a.x - b.x;
else RX_Length = b.x - a.x;
if (b.y < a.y) RY_Length = a.y - b.y;
else RY_Length = b.y - a.y;
// Pixel data vector
float *tdata = new float[3*RX_Length*RY_Length];
double desp = (mZoomFactor*dvcamera)/dvzoom;
getScreenCoords(min(FIP.x, FEP.x), gHeight-FIP.y-2*(mCameraPosition.y-desp));
// Reading from the GET SCREEN COORDINATES global values (GSC)
glReadPixels(GSC.x, GSC.y, RX_Length, RY_Length, GL_RGB, GL_FLOAT, tdata); // <--- Works great until zooming
glRasterPos2d(min(FIP.x, FEP.x), gHeight-max(FIP.y, FEP.y));
glDrawPixels(RX_Length, RY_Length, GL_RGB, GL_FLOAT, tdata); // <--- Works great
delete[] tdata;
MOUSEWHEEL (Zoom)
Drawing::Point pt = e->Location;
getWorldCoords(pt.X, pt.Y);
Initial_Zoom.X = rectangleDim.x - GWC.x;
Initial_Zoom.Y = GWC.y;
// ZOOM IN
if (e->Delta > 0){
if (mZoomFactor > float::Epsilon * 1000.0f){
mZoomFactor /= 1.15f;
}
}
// ZOOM OUT
else {
if (mZoomFactor < float::MaxValue / 1000.0f){
mZoomFactor *= 1.15f;
}
}

I manage to find the solution.
Replace:
getScreenCoords(min(FIP.x, FEP.x), gHeight-FIP.y-2*(mCameraPosition.y-desp));
glReadPixels(GSC.x, GSC.y, RX_Length, RY_Length, GL_RGB, GL_FLOAT, tdata);
With:
getScreenCoords(std::min(FIP.x, FEP.x), FIP.y);
glReadPixels(GSC.x, rectangleDim.y - GSC.y, RX, RY, GL_RGB, GL_FLOAT, tdata);
And voilá. It works ^^

Related

waves from selected pixel (openGL C++)

I've created a program that draws a waving flag and I want to add a functionality that will create new wave on selected pixel, but I can't make it start where I want it to start an that even make the flag stop waving (prob. because of synced sin).
Here's my display func.
const int W = 800;
const int H = 600;
// simulates Frame Buffer
unsigned char pixels[H][W][3] = { 0 }; // 3 is for RGB
void display()
{
glClear(GL_COLOR_BUFFER_BIT); // clean frame buffer
createFlag();
int i, j;
double dist;
offset += 0.25;
for (i = 0; i < H; i++)
for (j = 0; j < W; j++)
{
dist = sqrt(pow(i + H / 2.0, 2) + pow(j + W / 2.0, 2));
pixels[i][j][0] += 135 + 55 * (1 + 1 * sin(dist / 25 - offset)) / 2; // red
pixels[i][j][1] += 135 + 85 * (1 + 1 * sin(dist / 25 - offset)) / 2; // green
pixels[i][j][2] += 135 + 105 * (1 + 1 * sin(dist / 25 - offset)) / 2; // blue
}
// draws the matrix pixels
glDrawPixels(W, H, GL_RGB, GL_UNSIGNED_BYTE, pixels);
glutSwapBuffers(); // show all
}
And here is my mouse func.
void mouse(int button, int state, int x, int y)
{
if (button == GLUT_LEFT_BUTTON && state == GLUT_DOWN)
{
double dist;
offset += 0.1;
for (y = 0; y < H; y++)
for (x = 0; x < W; x++)
{
dist = sqrt(pow(H/2.0 -(H - y), 2) + pow(W/2.0 -x, 2)); //problem is prob. here
pixels[y][x][0] += 135+ 55 * (1 + 1 * sin(dist / 50.0 - offset)) / 2; // red
pixels[y][x][1] += 135+ 85 * (1 + 1 * sin(dist / 50.0 - offset)) / 2; // green
pixels[y][x][2] += 135+105 * (1 + 1 * sin(dist / 50.0 - offset)) / 2; // blue
if (offset < 0.3)
offset += 0.05;
}
}
}
Here are some points that I see:
in your mouse function you don't use your arguments x and y which are the position of your mouse click, instead you create local variables x and y. If you want to start a wave from the pixel you clicked on, you have to center your dist to this point, so an idea of code could be (inspired by GLUT mouse button down):
void mouse(int button, int state, int x, int y)
{
// save the left button state
if (button == GLUT_LEFT_BUTTON)
{
leftMouseButtonDown = (state == GLUT_DOWN);
}
// save the mouse position
mouseXPos = x;
mouseYPos = y;
}
void display()
{
glClear(GL_COLOR_BUFFER_BIT); // clean frame buffer
createFlag();
double dist;
offset += 0.25;
for (i = 0; i < H; i++)
for (j = 0; j < W; j++)
{
dist = sqrt(pow(i + H / 2.0, 2) + pow(j + W / 2.0, 2));
pixels[i][j][0] += 135 + 55 * (1 + 1 * sin(dist / 25 - offset)) / 2; // red
pixels[i][j][1] += 135 + 85 * (1 + 1 * sin(dist / 25 - offset)) / 2; // green
pixels[i][j][2] += 135 + 105 * (1 + 1 * sin(dist / 25 - offset)) / 2; // blue
}
if (leftMouseButtonDown) // the way you can get leftMouseButtonDown depends on your
// configuration of your different files
// (main.cpp, scene.cpp, ...)
new_wave() ; //see below
// draws the matrix pixels
glDrawPixels(W, H, GL_RGB, GL_UNSIGNED_BYTE, pixels);
glutSwapBuffers(); // show all
}
void new_wave()
{
x = mouseXPos ; // same remark as leftMouseButtonDown
y = mouseYPos ;
offset = 0.1;
for (i = 0; i < H; i++) // i and not y
for (j = 0; j < W; j++)
{
dist = sqrt(pow(i - y, 2) + pow(j - x, 2)); // change here
pixels[i][j][0] += 135+ 55 * (1 + 1 * sin(dist / 50.0 - offset)) / 2; // red
pixels[i][j][1] += 135+ 85 * (1 + 1 * sin(dist / 50.0 - offset)) / 2; // green
pixels[i][j][2] += 135+105 * (1 + 1 * sin(dist / 50.0 - offset)) / 2; // blue
if (offset < 0.3)
offset += 0.05;
// I don't really get what you do here with the offset, but another parameter to
// play with could be the amplitude of this wave that starts from the point
// you clicked, like when a drop falls on water
}
}
if you want a real-time waving flag you have to take time as a parameter of your pixels color value
I haven't tested it but I'm not sure on how openGL reacts to RGB coloring with values over 255, which happens in your case, maybe keep that in mind if you still have bugs
dist = sqrt(pow(i + H / 2.0, 2) + pow(j + W / 2.0, 2)); this is centered in (-W/2.0, -H/2.0), is that what you want? (maybe yes just want to make sure, if you want to simulate some wind you could set wind's origin where you want, which is what you do here)
int i, j; in your display isn't useful (just to clear some code)
So these are some remarks I would have made myself, this piece of code probably won't be your final one.
Let me know if I misunderstood what you are aiming to do, or if something I wrote is unclear.

How to have text centered inside each slice of a pie chart?

I would like to get the text labels (percentages) centered within each pie slice. It currently works a bit for two of the quadrants:
What am I doing wrong?
void PieChartWidget::paintEvent(QPaintEvent *) {
QPainter painter(this);
QRectF size;
painter.setPen(QPen(Qt::black, 2));
if (this->height() > this->width()) {
size = QRectF(5, 5, this->width() - 10, this->width() - 10);
} else {
size = QRectF(5, 5, this->height() - 5, this->height() - 10);
}
double sum = 0.0, startAng = 0.0;
double angle, endAng;
double percent;
for (int i = 0; i < qvValues.size(); i++) {
sum += qvValues[i];
}
for (int i = 0; i < qvValues.size(); i++) {
percent = qvValues[i] / sum;
angle = percent * 360.0;
endAng = startAng + angle;
painter.setBrush(qvColors[i]);
painter.drawPie(size, static_cast<int>(startAng * 16),
static_cast<int>(angle * 16));
startAng = endAng;
if (percent != 0) {
double draw_x = width() / 2 +
cos(PI * (endAng / 180.0 - angle / 360.0)) * this->width() / 4.0;
double draw_y = height() / 2 +
sin(PI * (endAng / 180.0 - angle / 360.0)) * this->width() / 4.0;
painter.drawText(draw_x, draw_y, QString::number(percent * 100) + "%");
}
}
}
On this line:
painter.drawText(this->width()/4,this->height(), QString::number(percent*100)+"%");
You seem to draw the percentage in the same place every time. You do successfully draw the percentage for each section, they're just being drawn in the same place every time. Try changing it to this:
painter.drawText(double(i + 1) * this->width()/4,this->height(), QString::number(percent*100)+"%");
And you'll see what I mean. By multiplying the x value by some changing value, the x position of each drawn text will change, and thus you will be able to see the different percentages being drawn.
If you want it to draw in each quadrant, then your code might look something like this:
# define PI 3.14159265358979323846
...
double draw_x = this->width / 2.0 + cos(PI * (end_angle / 180.0 - angle / 360.0)) * this->width / 4.0;
double draw_y = this->height / 2.0 - sin(PI * (end_angle / 180.0 - angle / 360.0)) * this->width / 4.0;
painter.drawText(draw_x, draw_y, QString::number(percent*100)+"%");
Basically, what's happening in the above code is I'm calculating the x and y coords of the middle of each slice. Then, I'm drawing the percentages in those positions.

Manipulating sfml Vertex Array

I am doing research on the sfml Vertex Array functions.Based on this tutorial I've been introduced to a basic implementation and am wanting to add to it. Unfortunately I am relatively new at OOP and would appreciate any help adding to this.
The output generates a checkerboard like pattern using a sprite grid.
My goal is to connect the grid-floor tiles using a pathfinding algorithm(recursive bactracker) to generate a path.
the rest of this part is instantiated in the main.cpp:
//load the texture for our background vertex array
Texture textureBackground;
textureBackground.loadFromFile("graphics/background_sheet.png");
once in the game loop as:
//pass the vertex array by reference to the createBackground function
int tileSize = createBackground(background, arena);
and finally in the draw scene:
window.draw(background, &textureBackground);
#include "stdafx.h"
#include <SFML/Graphics.hpp>
#include "zArena.h"
int createBackground(VertexArray& rVA, IntRect arena)
{
// Anything we do to rVA we are actually doing to background (in the main function)
// How big is each tile/texture
const int TILE_SIZE = 50;
const int TILE_TYPES = 3;
const int VERTS_IN_QUAD = 4;
int worldWidth = arena.width / TILE_SIZE;
int worldHeight = arena.height / TILE_SIZE;
// What type of primitive are we using?
rVA.setPrimitiveType(Quads);
// Set the size of the vertex array
rVA.resize(worldWidth * worldHeight * VERTS_IN_QUAD);
// Start at the beginning of the vertex array
int currentVertex = 0;
for (int w = 0; w < worldWidth; w++)
{
for (int h = 0; h < worldHeight; h++)
{
// Position each vertex in the current quad
rVA[currentVertex + 0].position = Vector2f(w * TILE_SIZE, h * TILE_SIZE);
rVA[currentVertex + 1].position = Vector2f((w * TILE_SIZE) + TILE_SIZE, h * TILE_SIZE);
rVA[currentVertex + 2].position = Vector2f((w * TILE_SIZE) + TILE_SIZE, (h * TILE_SIZE) + TILE_SIZE);
rVA[currentVertex + 3].position = Vector2f((w * TILE_SIZE), (h * TILE_SIZE) + TILE_SIZE);
// Define the position in the Texture to draw for current quad
// Either mud, stone, grass or wall
//if (h == 0 || h == worldHeight - 1 || w == 0 || w == worldWidth - 1)
if ((h % 2 !=0)&& (w % 2 != 0))
{
// Use the wall texture
rVA[currentVertex + 0].texCoords = Vector2f(0, 0 + TILE_TYPES * TILE_SIZE);
rVA[currentVertex + 1].texCoords = Vector2f(TILE_SIZE, 0 + TILE_TYPES * TILE_SIZE);
rVA[currentVertex + 2].texCoords = Vector2f(TILE_SIZE, TILE_SIZE + TILE_TYPES * TILE_SIZE);
rVA[currentVertex + 3].texCoords = Vector2f(0, TILE_SIZE + TILE_TYPES * TILE_SIZE);
}
else
{
// Use a random floor texture
srand((int)time(0) + h * w - h);
int mOrG = (rand() % TILE_TYPES);
int verticalOffset = mOrG * TILE_SIZE;
//int verticalOffset = 0;
rVA[currentVertex + 0].texCoords = Vector2f(0, 0 + verticalOffset);
rVA[currentVertex + 1].texCoords = Vector2f(TILE_SIZE, 0 + verticalOffset);
rVA[currentVertex + 2].texCoords = Vector2f(TILE_SIZE, TILE_SIZE + verticalOffset);
rVA[currentVertex + 3].texCoords = Vector2f(0, TILE_SIZE + verticalOffset);
}
// Position ready for the next for vertices
currentVertex = currentVertex + VERTS_IN_QUAD;
}
}
return TILE_SIZE;
}
As far as i see, there you're generating your tiles on the fly. If you want to create something like a walkable space, you should generate your tile map first, and then draw it based on the content generated.
Maybe overkilling your question, there are several ways to generate random maps satisfying specific constraints.
When you have the choice done, then you can simply draw as you do, but instead of
// Use a random floor texture
srand((int)time(0) + h * w - h);
int mOrG = (rand() % TILE_TYPES);
int verticalOffset = mOrG * TILE_SIZE;
should have something like
// Select texture rect based on generated tilemap
int mOrG = tilemap[w][h]; // Or tilemap[h * worldWidth + w] if you do it as unidimensional array
int verticalOffset = mOrG * TILE_SIZE;
With this approach you must pass tilemap to your render method or, even better, create a TileMap class overriding draw() method

Calculating iso tile co-ordinates for a TMX map when zoomed on a CCLayerPanZoom control

I'm working on some code to place isometric CCTMXTiledMap onto a CCLayerPanZoom control and then convert a touch location into ISO tilemap co-ordinates. This all works perfectly well for me, so long as the scale of the CClayerPanZoom is 1 (i.e. if I don't zoom in or zoom out). I can pan the map around and still calculate the correct iso tile co-oridinates. However, as soon as I zoom the tiled map in or out the iso cordinates returned by my code are completely wrong. Please see below for my code to calculate the iso co-ordinates from the touch location.
-(CGPoint) tilePosFromLocation:(CGPoint)location tileMap:(CCTMXTiledMap*)thisTileMap panZoom:(CCLayerPanZoom*)thisPanZoom
{
float midScale = (thisPanZoom.minScale + thisPanZoom.maxScale) / 2.0;
float newScale = (thisPanZoom.scale <= midScale) ? thisPanZoom.maxScale : thisPanZoom.minScale;
if (thisPanZoom.scale < 1)
{
newScale = newScale + thisPanZoom.scale;
}
else
{
newScale = newScale - thisPanZoom.scale;
}
CGFloat deltaX = (location.x - thisPanZoom.anchorPoint.x * (thisPanZoom.contentSize.width / CC_CONTENT_SCALE_FACTOR()) ) * (newScale);
CGFloat deltaY = (location.y - thisPanZoom.anchorPoint.y * (thisPanZoom.contentSize.height / CC_CONTENT_SCALE_FACTOR()) ) * (newScale);
CGPoint position = ccp((thisPanZoom.position.x - deltaX) , (thisPanZoom.position.y - deltaY) );
float halfMapWidth = thisTileMap.mapSize.width * 0.5f;
float mapHeight = thisTileMap.mapSize.height;
float tileWidth = thisTileMap.tileSize.width / CC_CONTENT_SCALE_FACTOR() * newScale;
float tileHeight = thisTileMap.tileSize.height / CC_CONTENT_SCALE_FACTOR() * newScale;
CGPoint tilePosDiv = CGPointMake(position.x / tileWidth, position.y / tileHeight );
float inverseTileY = tilePosDiv.y - (mapHeight * CC_CONTENT_SCALE_FACTOR()) * newScale; //mapHeight + tilePosDiv.y;
float posX = (int)(tilePosDiv.y - tilePosDiv.x + halfMapWidth);
float posY = (int)(inverseTileY + tilePosDiv.x - halfMapWidth + mapHeight);
// make sure coordinates are within isomap bounds
posX = MAX(0, posX);
posX = MIN(thisTileMap.mapSize.width - 1, posX);
posY = MAX(0, posY);
posY = MIN(thisTileMap.mapSize.height - 1, posY);
return CGPointMake(posX, posY);
}
Can anyone offer any insight into where I'm going wrong with this?
Thanks,
Alan

Using glColorPointer with glDrawElements results in nothing being drawn

I'm working on just making uniformly colors spheres for a project and I'm running into an issue. The spheres run fine but when I try to color them with glColorPointer they stop appearing. OpenGL isn't showing any errors when I call glGetError so I'm at a loss for why this would happen.
The code to generate the vertices, colors etc:
void SphereObject::setupVertices()
{
//determine the array sizes
//vertices per row (+1 for the repeated one at the end) * three for each coordinate
//times the number of rows
int arraySize = myNumVertices * 3;
myNumIndices = (myVerticesPerRow + 1) * myRows * 2;
myVertices = new GLdouble[arraySize];
myIndices = new GLuint[myNumIndices];
myNormals = new GLdouble[arraySize];
myColors = new GLint[myNumVertices * 4];
//use spherical coordinates to calculate the vertices
double phiIncrement = 360 / myVerticesPerRow;
double thetaIncrement = 180 / (double)myRows;
int arrayIndex = 0;
int colorArrayIndex = 0;
int indicesIndex = 0;
double x, y, z = 0;
for(double theta = 0; theta <= 180; theta += thetaIncrement)
{
//loop including the repeat for the last vertex
for(double phi = 0; phi <= 360; phi += phiIncrement)
{
//make sure that the last vertex is repeated
if(360 - phi < phiIncrement)
{
x = myRadius * sin(radians(theta)) * cos(radians(0));
y = myRadius * sin(radians(theta)) * sin(radians(0));
z = myRadius * cos(radians(theta));
}
else
{
x = myRadius * sin(radians(theta)) * cos(radians(phi));
y = myRadius * sin(radians(theta)) * sin(radians(phi));
z = myRadius * cos(radians(theta));
}
myColors[colorArrayIndex] = myColor.getX();
myColors[colorArrayIndex + 1] = myColor.getY();
myColors[colorArrayIndex + 2] = myColor.getZ();
myColors[colorArrayIndex + 3] = 1;
myVertices[arrayIndex] = x;
myVertices[arrayIndex + 1] = y;
myVertices[arrayIndex + 2] = z;
if(theta <= 180 - thetaIncrement)
{
myIndices[indicesIndex] = arrayIndex / 3;
myIndices[indicesIndex + 1] = (arrayIndex / 3) + myVerticesPerRow + 1;
indicesIndex += 2;
}
arrayIndex += 3;
colorArrayIndex += 4;
}
}
}
And the code to actually render the thing
void SphereObject::render()
{
glPushMatrix();
glPushClientAttrib(GL_CLIENT_VERTEX_ARRAY_BIT);
glEnableClientState(GL_COLOR_ARRAY);
glColorPointer(4, GL_INT, 0, myColors);
glEnableClientState(GL_VERTEX_ARRAY);
glVertexPointer(3, GL_DOUBLE, 0, myVertices);
glDrawElements(GL_QUAD_STRIP, myNumIndices, GL_UNSIGNED_INT, myIndices);
glDisableClientState(GL_VERTEX_ARRAY);
glDisableClientState(GL_COLOR_ARRAY);
glPopClientAttrib();
glPopMatrix();
}
Any and all help would be appreciated. I'm really having a hard time for some reason.
When you use GL_INT (or any integer type) for color pointer, it linearly maps the largest possible integer value to 1.0f (maximum color), and 0 to 0.0f (minimum color).
Therefore unless your values of RGB and A are in the billions, they will likely appear completely black (or transparent if that's enabled). I see that you've got alpha = 1, which will essentially be zero after conversion to float.