C++ OpenGL: Ray Trace Shading Isn't Properly Shading - c++

I'm a CS student and for our final we were told to construct the reflections on multiple spheres via ray tracing. That's almost literally what we got for directions except a picture for how it should look when finished. So I need spheres, with they're reflections (using ray tracing) mapped on them with the proper shading from a light.
Well I have all of it working, except having multiple spheres and the fact that it doesn't look like the picture he gave us for a rubric.
The multiple spheres thing I'm not too sure how to do, but I'd say I need to store them in a 2D array and modify a few sections of code.
What I thought was modifying the sphere_intersect and find_reflect to include which sphere is being analyzed. Next, modify find_reflect so that when the new vector u is calculated its starting point (P0) is also updated. Then if the ray hits a sphere it will have to count how many times the ray has been reflected. At some point terminate (after 10 times maybe) and then I'll just draw the pixel. For an added touch I'd like to add solid colors to the spheres which would call for finding the normal of a sphere I believe.
Anyways I'm going to attach a picture of his, a picture of mine, and the source code. Hopefully someone can help me out on this one.
Thanks in advance!
Professor's spheres
My spheres
#include "stdafx.h"
#include <stdio.h>
#include <stdlib.h>
#include <GL/glut.h>
#include <math.h>
#include <string>
#define screen_width 750
#define screen_height 750
#define true 1
#define false 0
#define perpendicular 0
int gridXsize = 20;
int gridZsize = 20;
float plane[] = {0.0, 1.0, 0.0, -50.0,};
float sphere[] = {250.0, 270.0, -100.0, 100.0};
float eye[] = {0.0, 400.0, 550.0};
float light[] = {250.0, 550.0, -200.0};
float dot(float *u, float *v)
{
return u[0]*v[0] + u[1]*v[1] + u[2]*v[2];
}
void norm(float *u)
{
float norm = sqrt(abs(dot(u,u)));
for (int i =0; i <3; i++)
{
u[i] = u[i]/norm;
}
}
float plane_intersect(float *u, float *pO)
{
float normt[3] = {plane[0], plane[1], plane[2]};
float s;
if (dot(u,normt) == 0)
{
s = -10;
}
else
{
s = (plane[3]-(dot(pO,normt)))/(dot(u,normt));
}
return s;
}
float sphere_intersect(float *u, float *pO)
{
float deltaP[3] = {sphere[0]-pO[0],sphere[1]-pO[1],sphere[2]-pO[2]};
float deltLen = sqrt(abs(dot(deltaP,deltaP)));
float t=0;
float answer;
float det;
if ((det =(abs(dot(u,deltaP)*dot(u,deltaP))- (deltLen*deltLen)+sphere[3]*sphere[3])) < 0)
{
answer = -10;
}
else
{
t =-1*dot(u,deltaP)- sqrt(det) ;
if (t>0)
{
answer = t;
}
else
{
answer = -10;
}
}
return answer;
}
void find_reflect(float *u, float s, float *pO)
{
float n[3] = {pO[0]+s *u[0]-sphere[0],pO[1]+s *u[1]-sphere[1],pO[2]+s *u[2]- sphere[2]};
float l[3] = {s *u[0],s *u[1],s *u[2]};
u[0] =(2*dot(l,n)*n[0])-l[0];
u[1] = (2*dot(l,n)*n[1])-l[1];
u[2] = (2*dot(l,n)*n[2])-l[2];
}
float find_shade(float *u,float s, float *pO)
{
float answer;
float lightVec[3] = {light[0]-(pO[0]+s *u[0]), light[1]-(pO[1]+s *u[1]), light[2]-(pO[2]+s *u[2])};
float n[3] = {pO[0]+s *u[0]-sphere[0],pO[1]+s *u[1]-sphere[1],pO[2]+s *u[2]-sphere[2]};
answer = -1*dot(lightVec,n)/(sqrt(abs(dot(lightVec,lightVec)))*sqrt(abs(dot(n,n))));
return answer;
}
void init()
{
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluOrtho2D(0,screen_width,0,screen_height);
}
void display()
{
glClear(GL_COLOR_BUFFER_BIT| GL_DEPTH_BUFFER_BIT);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
for (int i=0; i < screen_width; i++)
{
for (int j=0; j < screen_height; j++)
{
float ray[3] = {1*(eye[0]-i),-1*(eye[1]-j),1*eye[2]};
float point[3] = {i,j,0};
norm(ray);
int plotted = false;
while (!plotted)
{
float s_plane = plane_intersect(ray, point);
float s_sphere = sphere_intersect(ray, point);
if (s_plane <= 0 && s_sphere <=0)
{
glColor3f(0,0,0);
glBegin(GL_POINTS);
glVertex3f(i,j,0);
glEnd();
plotted = true;
}
else if (s_sphere >= 0 && (s_plane <=0 || s_sphere <= s_plane))
{
find_reflect(ray, s_sphere, point);
}
else if (s_plane >=0 && (s_sphere <=0 ||s_plane <= s_sphere))
{
float shade = find_shade(ray, s_plane, point);
float xx = s_plane*ray[0] + eye[0];
float z = s_plane*ray[2] + eye[2];
if (abs((int)xx/gridXsize)%2 == abs((int)z/gridZsize)%2)
{
glColor3f(shade,0,0);
}
else
{
glColor3f(shade,shade,shade);
}
glBegin(GL_POINTS);
glVertex3f(i,j,0);
glEnd();
plotted = true;
}
}
}
}
glFlush();
}
int main(int argc, char **argv)
{
glutInit(&argc, argv);
glutCreateWindow("Ray Trace with Sphere.");
glutInitWindowSize(screen_width,screen_height);
glutInitDisplayMode(GLUT_SINGLE|GLUT_RGB);
glutDisplayFunc(display);
init();
glutMainLoop();
return 0;
}

The professor did not tell you too much, because such a topic is covered thousands of time over the web, just check-out "Whitted Raytracing" ;) It's homework, and 5mn of googling around would solve the issue... Some clues to help without doing your homework for you
Do it step by step, don't try to reproduce the picture in one step
Get one sphere working, if hit the plane green pixel, the sphere red pixel, nothing, black. It's enough to get the intersections computing right. It looks like, from your picture, that you don't have the intersections right, for a start
Same as previous, with several spheres. Same as one sphere : check intersection for all objects, keep the closest intersection from the point of view.
Same as previous, but also compute the amount of light received for each intersection found, to have shade of red for spheres, and shade of green for the plane. (hint: dot product ^^)
Texture for the plane
Reflection for the spheres. Protip: a mirror don't reflect 100% of the light, just a fraction of it.

Related

How to aim the camera at the z-index of the cell in front of my character?

I've got a 3D terrain environment like so:
I'm trying to get the character (camera) to look up when climbing hills, and look down when descending, like climbing in real life.
This is what it's currently doing:
Right now the camera moves up and down the hills just fine, but I can't get the camera angle to work correctly. The only way I can think of aiming up or down depending on the terrain is getting the z-index of the cell my character is currently facing, and set that as the focus, but I really have no idea how to do that.
This is admittedly for an assignment, and we're intentionally not using objects so things are organized a little strangely.
Here's how I'm currently doing things:
const int M = 100; // width
const int N = 100; // height
double zHeights[M+1][N+1]; // 2D array containing the z-indexes of terrain cells
double gRX = 1.5; // x position of character
double gRY = 2.5; // y position of character
double gDirection = 45; // direction of character
double gRSpeed = 0.05; // move speed of character
double getZ(double x, double y) // returns the height of the current cell
{
double z = .5*sin(x*.25) + .4*sin(y*.15-.43);
z += sin(x*.45-.7) * cos(y*.315-.31)+.5;
z += sin(x*.15-.97) * sin(y*.35-8.31);
double amplitute = 5;
z *= amplitute;
return z;
}
void generateTerrain()
{
glBegin(GL_QUADS);
for (int i = 0; i <= M; i++)
{
for (int j = 0; j <= N; j++)
{
zHeights[i][j] = getZ(i,j);
}
}
}
void drawTerrain()
{
for (int i = 0; i < M; i++)
{
for (int j = 0; j < N; j++)
{
glColor3ub( (i*34525+j*5245)%256, (i*3456345+j*6757)%256, (i*98776+j*6554544)%256);
glVertex3d(i, j, getZ(i,j));
glVertex3d(i, j+1, getZ(i,j+1));
glVertex3d(i+1, j+1, getZ(i+1,j+1));
glVertex3d(i+1, j, getZ(i+1,j));
}
}
}
void display() // callback to glutDisplayFunc
{
glEnable(GL_DEPTH_TEST);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glLoadIdentity();
double radians = gDirection /180.*3.141592654; // converts direction to radians
double z = getZ((int)gRX, (int)gRY); // casts as int to find z-index in zHeights[][]
double dx = cos(radians)*gRSpeed;
double dy = sin(radians)*gRSpeed;
double at_x = gRX + dx;
double at_y = gRY + dy;
double at_z = z; // source of problem, no idea what to do
gluLookAt(gRX, gRY, z + 2, // eye position
at_x, at_y, at_z + 2, // point to look at, also wrong
0, 0, 1); // up vector
drawTerrain();
glEnd();
}
void init()
{
generateTerrain();
}
Firstly, I don't see any reason to cast to int here:
double z = getZ((int)gRX, (int)gRY);
Just use the double values to get a smooth behavior.
Your basic approach is already pretty good. You take the current position (gRX, gRY), walk a bit in the viewing direction (dx, dy) and use that as the point to look at. There are just two small things that need adaptation:
double dx = cos(radians)*gRSpeed;
double dy = sin(radians)*gRSpeed;
Although multiplying by gRSpeed might be a good idea, in my opinion, this factor should not be related to the character's kinematics. Instead, this represents the smoothness of your view direction. Small values make the direction stick very closely to the terrain geometry, larger values smooth it out.
And finally, you need to evaluate the height at your look-at point:
double at_z = getZ(at_x, at_y);

Implementation of feature detection algorithm

I'm fairly new to programming and would like to know how to start implementing the following algorithm in C++,
Given a binary image where pixels with intensity 255 show edges and pixels with intensity 0 show the background, find line segments longer than n pixels in the image. t is a counter showing the number of iterations without finding a line, and tm is the maximum number of iterations allowed before exiting the program.
Let t=0.
Take two edge points randomly from the image and find equation of the line passing
through them.
Find m, the number of other edge points in the image that are within distance d pixels of
the line.
If m > n, go to Step 5.
Otherwise (m ≤ n), increment t by 1 and if t < tm go to Step 2, and
if t ≥ tm exit program.
Draw the line and remove the edge points falling within distance d pixels of it from the
image. Then, go to Step 1
Basically, I just want to randomly pick two points from the image, find the distance between them, and if that distance is too small, I would detect a line between them.
I would appreciate if a small code snippet is provided, to get me started.
this is more like a RANSAC parametric line detection. I would also keep this post updated if I get it done.
/* Display Routine */
#include "define.h"
ByteImage bimg; //A copy of the image to be viewed
int width, height; //Window dimensions
GLfloat zoomx = 1.0, zoomy = 1.0; //Pixel zoom
int win; //Window index
void resetViewer();
void reshape(int w, int h) {
glViewport(0, 0, (GLsizei)w, (GLsizei)h);
if ((w!=width) || (h!=height)) {
zoomx=(GLfloat)w/(GLfloat)bimg.nc;
zoomy=(GLfloat)h/(GLfloat)bimg.nr;
glPixelZoom(zoomx,zoomy);
}
width=w; height=h;
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluOrtho2D(0.0, (GLdouble)w, 0.0, (GLdouble)h);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
}
void mouse(int button, int state, int x, int y) {
glutPostRedisplay();
if((button == GLUT_LEFT_BUTTON) && (state == GLUT_DOWN) &&
(zoomx==1.0) && (zoomy==1.0)){
printf(" row=%d, col=%d, int=%d.\n", y,x, (int)bimg.image[(bimg.nr-1-y)*bimg.nc+x]);
glutPostRedisplay();
}
}
void display() {
glClear(GL_COLOR_BUFFER_BIT);
glRasterPos2i(0, 0);
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glDrawPixels((GLsizei)bimg.nc,(GLsizei)bimg.nr, GL_LUMINANCE,GL_UNSIGNED_BYTE, bimg.image);
glutSwapBuffers();
}
Let us assume you have an int[XDIMENSION][YDIMENSION]
Let t=0.
int t = 0; // ;-)
Take two edge points randomly from the image and find equation of the line passing through them.
Brute force: you could randomly search the image for points and re-search when they are not edge points
struct Point {
int x;
int y;
};
bool is_edge(Point a) {
return image[a.x][a.y] == 255;
}
int randomUpto(int upto) {
int r = rand() % upto;
return r;
}
, which needs the pseudo-random number generator to be initialized via
srand(time(NULL));
To find edge points
Point a;
do {
a.x = randomUpto(XDIMENSION);
a.y = randomUpto(YDIMENSION);
} while ( ! is_edge(a) );
Find m, the number of other edge points in the image that are within distance d pixels of the line.
You need the line between the points. Some searching yields this fine answer, which leads to
std::vector<Point> getLineBetween(Point a, Point b) {
double dx = b.x - a.x;
double dy = b.y - a.y;
double dist = sqrt(dx * dx + dy * dy);
dx /= dist;
dy /= dist;
std::vector<Point> points;
points.push_back(a);
for ( int i = 0 ; i < 2*dist; i++ ) {
Point tmp;
tmp.x = a.x + (int)(i * dx /2.0);
tmp.y = a.y + (int)(i * dy /2.0);
if ( tmp.x != points.back().x
|| tmp.y != points.back().y ) {
points.push_back(tmp);
}
}
return points;
}
Do you see a pattern here? Isolate the steps into substeps, ask google, look at the documentation, try out stuff until it works.
Your next steps might be to
create a distance function, euclidean should suffice
find all points next to line (or next to a point, which is easier) based on the distance function
Try out some and come back if you still need help.

Wierd Raytracing Artifacts

I am trying to create a ray tracer using Qt, but I have some really weird artifacts going on.
Before I implemented shading, I just had 4 spheres, 3 triangles and 2 bounded planes in my scene. They all showed up as expected and as the color expected however, for my planes, I would see dots the same color as the background. These dots would stay static from my view position, so if I moved the camera around the dots would move around as well. However they only affected the planes and triangles and would never appear on the spheres.
One I implemented shading the issue got worse. The dots now also appear on spheres in the light source, so any part affected by the diffuse.
Also, my one plane of pure blue (RGB 0,0,255) has gone straight black. Since I have two planes I switched their colors and again the blue one went black, so it's a color issue and not a plane issue.
If anyone has any suggestions as to what the problem could be or wants to see any particular code let me know.
#include "plane.h"
#include "intersection.h"
#include <math.h>
#include <iostream>
Plane::Plane(QVector3D bottomLeftVertex, QVector3D topRightVertex, QVector3D normal, QVector3D point, Material *material)
{
minCoords_.setX(qMin(bottomLeftVertex.x(),topRightVertex.x()));
minCoords_.setY(qMin(bottomLeftVertex.y(),topRightVertex.y()));
minCoords_.setZ(qMin(bottomLeftVertex.z(),topRightVertex.z()));
maxCoords_.setX(qMax(bottomLeftVertex.x(),topRightVertex.x()));
maxCoords_.setY(qMax(bottomLeftVertex.y(),topRightVertex.y()));
maxCoords_.setZ(qMax(bottomLeftVertex.z(),topRightVertex.z()));
normal_ = normal;
normal_.normalize();
point_ = point;
material_ = material;
}
Plane::~Plane()
{
}
void Plane::intersect(QVector3D rayOrigin, QVector3D rayDirection, Intersection* result)
{
if(normal_ == QVector3D(0,0,0)) //plane is degenerate
{
cout << "degenerate plane" << endl;
return;
}
float minT;
//t = -Normal*(Origin-Point) / Normal*direction
float numerator = (-1)*QVector3D::dotProduct(normal_, (rayOrigin - point_));
float denominator = QVector3D::dotProduct(normal_, rayDirection);
if (fabs(denominator) < 0.0000001) //plane orthogonal to view
{
return;
}
minT = numerator / denominator;
if (minT < 0.0)
{
return;
}
QVector3D intersectPoint = rayOrigin + (rayDirection * minT);
//check inside plane dimensions
if(intersectPoint.x() < minCoords_.x() || intersectPoint.x() > maxCoords_.x() ||
intersectPoint.y() < minCoords_.y() || intersectPoint.y() > maxCoords_.y() ||
intersectPoint.z() < minCoords_.z() || intersectPoint.z() > maxCoords_.z())
{
return;
}
//only update if closest object
if(result->distance_ > minT)
{
result->hit_ = true;
result->intersectPoint_ = intersectPoint;
result->normalAtIntersect_ = normal_;
result->distance_ = minT;
result->material_ = material_;
}
}
QVector3D MainWindow::traceRay(QVector3D rayOrigin, QVector3D rayDirection, int depth)
{
if(depth > maxDepth)
{
return backgroundColour;
}
Intersection* rayResult = new Intersection();
foreach (Shape* shape, shapeList)
{
shape->intersect(rayOrigin, rayDirection, rayResult);
}
if(rayResult->hit_ == false)
{
return backgroundColour;
}
else
{
QVector3D intensity = QVector3D(0,0,0);
QVector3D shadowRay = pointLight - rayResult->intersectPoint_;
shadowRay.normalize();
Intersection* shadowResult = new Intersection();
foreach (Shape* shape, shapeList)
{
shape->intersect(rayResult->intersectPoint_, shadowRay, shadowResult);
}
if(shadowResult->hit_ == true)
{
intensity += shadowResult->material_->diffuse_ * intensityAmbient;
}
else
{
intensity += rayResult->material_->ambient_ * intensityAmbient;
// Diffuse
intensity += rayResult->material_->diffuse_ * intensityLight * qMax(QVector3D::dotProduct(rayResult->normalAtIntersect_,shadowRay), 0.0f);
// Specular
QVector3D R = ((2*(QVector3D::dotProduct(rayResult->normalAtIntersect_,shadowRay))* rayResult->normalAtIntersect_) - shadowRay);
R.normalize();
QVector3D V = rayOrigin - rayResult->intersectPoint_;
V.normalize();
intensity += rayResult->material_->specular_ * intensityLight * pow(qMax(QVector3D::dotProduct(R,V), 0.0f), rayResult->material_->specularExponent_);
}
return intensity;
}
}
So I figured out my issues. They are due to float being terrible at precision, any check for < 0.0 would intermittently fail because of floats precision. I had to add an offset to all my checks so that I was checking for < 0.001.

my satellite circumnavigates, but along a non-circular path (correcting spherical coord math)

This is how I position my torus (satellite) upon a sphere, and then rotate it around the sphere:
int satellite_1_1_step = 0;
int &r_satellite_1_1_step = satellite_1_1_step;
float satellite_1_1_divider = 300;
float satellite_1_1_theta = 6.5;
float satellite_1_1_phi = 1;
float satellite_1_1_theta_increment = 20/satellite_1_1_divider;
float satellite_1_1_phi_increment = 20/satellite_1_1_divider;
void satellite_1_1 ()
{
float satellite_1_1_theta_math = (satellite_1_1_theta-(satellite_1_1_theta_increment * r_satellite_1_1_step))/10.0*M_PI;
float satellite_1_1_phi_math = (satellite_1_1_phi-(satellite_1_1_phi_increment * r_satellite_1_1_step))/10.0*2*M_PI;
r_satellite_1_1_x = radius_exodus_pos * sin(satellite_1_1_theta_math) * cos(satellite_1_1_phi_math);
r_satellite_1_1_y = radius_exodus_pos * sin(satellite_1_1_theta_math) * sin(satellite_1_1_phi_math);
r_satellite_1_1_z = radius_exodus_pos * cos(satellite_1_1_theta_math);
glPushMatrix();
glTranslatef(r_satellite_1_1_x,r_satellite_1_1_y,r_satellite_1_1_z);
glColor3f(1,0,0);
glutSolidTorus(0.04, 0.2, 10, 100);
glEnd();
glPopMatrix();
}
This is how I update and increment its position:
void satellite_1_1_increment()
{
if (r_satellite_1_1_step < satellite_1_1_divider)
{
++(r_satellite_1_1_step);
}
if (r_satellite_1_1_step >= satellite_1_1_divider)
{
r_satellite_1_1_step = 1;
}
}
So, my torus (satellite) moves around the sphere, ending back up in its starting position, and continues over again - which is great. However, the path it takes wobbles around the poles (I think) along the way - rather than simply circumnavigating the sphere.
Is there an improvement that can be made to my math which will cause the satellite to circumnavigate the sphere in a more circular path?
The first issue I see is this:
void satellite_1_1_increment()
{
if (r_satellite_1_1_step < satellite_1_1_divider)
{
++(r_satellite_1_1_step);
}
if (r_satellite_1_1_step >= satellite_1_1_divider)
{
r_satellite_1_1_step = 1;
}
}
What happens at the edge case when the step is incremented by the first test such that it satisfies the second test? It is immediately reset, thus missing the value. I think you want it written like this to avoid that problem:
void satellite_1_1_increment()
{
if (r_satellite_1_1_step >= satellite_1_1_divider)
r_satellite_1_1_step = 1;
else ++r_satellite_1_1_step;
}
Is 1 the correct reset value? Maybe it should be 0?
Changed the first two lines of:
void satellite_1_1 ()
float satellite_1_1_theta_math = (satellite_1_1_theta+(satellite_1_1_theta_increment* r_satellite_1_1_step))*M_PI;
float satellite_1_1_phi_math = (satellite_1_1_phi-(satellite_1_1_phi_increment* r_satellite_1_1_step))*M_PI/360;
Now the satellite orbits 360 degrees along the equator. Adding a glRotatef after my glPushMatrix lets me fine tune its axis.
Thanks again wallyk. - kropcke

zooming mandelbrot set second time doesnot let it zoom in desired place

I am using opengl/c++ to draw mandelbrot set and trying to zoom into. I am able to zoom for the first time and zooms where i want (by clicking), but when i try to zoom next time it does not zoom where i intended to zoom instead it shift and zoom little bit far from the place i want to zoom.
I use
#include <GL/gl.h>
#include <GL/glu.h>
#include <GL/glut.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
double dividecubesby = 700;
double left = -2.0;
double right = 2.0;
double bottom = -2.0;
double top = 2.0;
int maxiteration = 128;
int zoomlevel = 3;
double baseSize = 4.0;
double Size = 0.0;
double xco=0.0;
double yco=0.0;
void SetXYpos(int px,int py)
{
xco = left+(right-left)*px/dividecubesby;
yco = top-(top-bottom)*py/dividecubesby;
}
void keyPressed(unsigned char key, int x, int y)
{
int xx= x;
int yy= y;
setXYpos(xx,yy);
Size = 0.5*(pow(2.0, (-zoomlevel)));
switch(key){
case 'z':
left = xco - Size;
right = xco + Size;
bottom = yco - Size;
top = yco + Size;
dividecubesby = dividecubesby+100;
maxiteration = maxiteration+100;
zoomlevel=zoomlevel+1;
glutPostRedisplay();
break;
}
}
int mandtest(double Cr, double Ci)
{
double Zr = 0.0;
double Zi = 0.0;
int times = 0;
double temp;
Zr = Zr+Cr;
Zi = Zi+Ci;
while ((((Zr*Zr)+(Zi*Zi))<=4) && (times < maxiteration)){
temp = (Zr*Zr)-(Zi*Zi);
Zi = 2*Zr*Zi;
Zr = temp+Cr;
Zi = Zi+Ci;
times = times+1;
}
return times;
}
void display(void)
{
glClear(GL_COLOR_BUFFER_BIT);
glColor3f(1.0f,1.0f,1.0f);
double deltax = ((right - left)/(dividecubesby-1));
double deltay = ((top- bottom)/(dividecubesby-1));
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluOrtho2D(left,right,bottom,top);
glBegin(GL_POINTS);
for(double x= left;x<=right;x += deltax ){
for(double y= bottom; y<=top;y += deltay ){
if((mandtest(x,y))==maxiteration){
glColor3f(1.0f,1.0f,1.0f);
glVertex2f(x,y);
}
else {
glColor3f((float)mandtest(x,y)/10,0.0f,(float)mandtest(x,y)/30);
glVertex2f(x,y);
}
}
}
glEnd();
glFlush();
}
to calculate where the mouse is clicked interms of the cartesian co-ordinate [-2,2]
px and py are pixel coordinate
You have too many variables. What defines the width of your image? (right - left)? baseSize + f(zoomLevel)? SizeReal? It's not clear whose job it is to set whom and who is used by whom, so you cannot hope to update everything consistently.
Also, why does dividecubesby increase by a flat 500 while the image size halves with every zoom? Where is the width/height of your window system window which define the limits of the clicked coordinates?
My suggestion is to start from scratch and maybe draw a graph of who updates whom (left/right -> imageWidth). Make sure that you get the correct clicked coordinates independent of what your drawing window (left/right/top/bottom) is, and go on from there. As it is, I think your first zoom works correctly by accident.