c++ OpenGL terrain generation - c++

Im trying to make a terrain from a grid of vertices and i have a bug and just cant find it.Im stuck with it for 3 hours.Im using c++ and opengl.Im plan to use a blendmap for texturing and a height map later.Anyway here's the code:
Heres how it should look like: http://postimg.org/image/9431kcvy7/
Heres how it looks:
http://postimg.org/image/xxsoesqkp/
As you can see the tringles are separated by a 1 unit rectagle and it look like all the bottom points form a triangle with the point that has coordinates (0,0,0)
I know this problem might seem easy to solve but ive lost already 3 hours trying.Please help:)
Map.h
#ifndef MAP_H
#define MAP_H
#include <string>
#include <vector>
#include <iostream>
#include <fstream>
#include <SFML/OpenGL.hpp>
#include <SFML/Graphics.hpp>
#include <windows.h>
using namespace std;
struct coordinate{
float x,y,z;
};
struct face{
int v[3];
int n[3];
};
struct uv{
float x;
float y;
};
class Map
{
private:
int mapX,mapY;
vector<coordinate> vertex;
vector<uv>textureCoordinates;
vector<coordinate>normals;
vector< vector<face> > faces;
string fileNameString;
sf::Image image[5];
sf::Color faceColor,blendPixel,p0,p1,p2;
sf::Image texture;
sf::Uint8 pixels[256*256*4];
unsigned int imageID[3],textureID;
public:
void load(const char *fileName);
void draw();
};
#endif // MAP_H
And Map.cpp
#include "Map.h"
#define blendMap 3
#define heightMap 4
void Map::load(const char *fileName)
{
int i,j;
fileNameString=fileName;
vector<face> F;
coordinate v;
face f;
image[0].loadFromFile(fileNameString+"/0.png");
image[1].loadFromFile(fileNameString+"/1.png");
image[2].loadFromFile(fileNameString+"/2.png");
image[blendMap].loadFromFile(fileNameString+"/blendMap.png");
image[heightMap].loadFromFile(fileNameString+"/heightMap.png");
mapX=image[blendMap].getSize().x;
mapY=image[blendMap].getSize().y;
for(i=-mapY/2;i<mapY/2;i++)
for(j=-mapX/2;j<mapX/2;j++)
{
v.x=j*0.5;
v.z=i*0.5;
vertex.push_back(v);
}
for(i=0;i<mapY-1;i++)
{
for(j=0;j<2*(mapX-1);j++)
F.push_back(f);
faces.push_back(F);
}
for(i=0;i<mapY-1;i++)
for(j=0;j<(mapX-1)*2;j+=2)
{
faces[i][j].v[0]=i*mapX+j;
faces[i][j].v[1]=i*mapX+j+1;
faces[i][j].v[2]=(i+1)*mapX+j;
faces[i][j+1].v[0]=i*mapX+j+1;
faces[i][j+1].v[1]=(i+1)*mapX+j+1;
faces[i][j+1].v[2]=(i+1)*mapX+j;
}
for(i=0;i<mapX*mapY;i++)
{
color=image[heightMap].getPixel(i/mapX,i%mapX);
vertex[i].y=0;//(float)color.r/25.5-10;
}
}
void Map::draw()
{
unsigned int i,j;
for(i=0;i<mapY-1;i++)
for(j=0;j<(mapX-1)*2;j+=2)
{
glBindTexture(GL_TEXTURE_2D,imageID[0]);
glBegin(GL_TRIANGLES);
glTexCoord2f (0,0);
glVertex3f(vertex[faces[i][j].v[0]].x , vertex[faces[i][j].v[0]].y , vertex[faces[i][j].v[0]].z);
glTexCoord2f (1,0);
glVertex3f(vertex[faces[i][j].v[1]].x , vertex[faces[i][j].v[1]].y , vertex[faces[i][j].v[1]].z);
glTexCoord2f (0,1);
glVertex3f(vertex[faces[i][j].v[2]].x , vertex[faces[i][j].v[2]].y , vertex[faces[i][j].v[2]].z);
glTexCoord2f (0,0);
glVertex3f(vertex[faces[i][j+1].v[0]].x , vertex[faces[i][j+1].v[0]].y , vertex[faces[i][j+1].v[0]].z);
glTexCoord2f (1,0);
glVertex3f(vertex[faces[i][j+1].v[1]].x , vertex[faces[i][j+1].v[1]].y , vertex[faces[i][j+1].v[1]].z);
glTexCoord2f (0,1);
glVertex3f(vertex[faces[i][j+1].v[2]].x , vertex[faces[i][j+1].v[2]].y , vertex[faces[i][j+1].v[2]].z);
glEnd();
}
}

A few things:
for(i=-mapY/2;i<mapY/2;i++)
This is dangerous and probably not the intention of the loop, anyway. You want to loop mapY times. However, if mapY is odd, you will loop only mapY - 1 times. E.g. if mapY = 3, then -mapY / 2 = -1; mapY / 2 = 1. So you will loop with the values -1 and 0. That's a first problem, which results in too few vertices in your buffer (this is probably the main problem). Instead do the shifting on the coordinate level:
for(i = 0; i < mapY; i++)
for(j = 0; j < mapX; j++)
{
v.x = j * 0.5 - mapY / 2.0;
v.z = i * 0.5 - mapX / 2.0;
vertex.push_back(v);
}
Is there a reason why you use a vector<vector<...>> for the faces? It will give you all kinds of problems regarding indexing as you already noticed. Just use a vector<Face> and put all your faces in there. Usually, you create this structure once and never touch it again. So the 2D indexing is probably not necessary. If you want to stay with the 2D indexing, this loop has wrong bounds:
for(j=0;j<(mapX-1)*2;j+=2)
This upper bound is an inclusive bound. Therefore, use
for(j = 0; j <= (mapX - 1) * 2; j += 2)

Related

C++ / SFML: Printing convex shapes to the screen using two recursive calls only displays the shapes from the first recursive call and not the second

I am using SFML and coding in C++. The program I am writing must be a recursive implementation.
My goal is to create a function that recursively draws a square to the screen in different positions and rotations dependent upon the previously drawn square.
Each subsequent square should be smaller than the previous function call and rotated 45 degrees to the left( from the left corner of the previous square ) or 45 to the right of the previous square.
Each new square spawns two more squares etc..
My idea is to pass the upper left point and the upper right point of a square to two different recursive function calls and use these points as starting points for the subsequent squares.
While the squares generated will also pass upper left and right corners to recursive function calls etc..
The code I have developed is not displaying both squares that should have been generated from the recursive function calls. Only one side is being shown.
I have developed the following code (Please forgive my code.. I haven't been coding in C++ for too long..)
DRIVER of PROGRAM ( main.cpp )
#include <SFML/System.hpp>
#include <SFML/Graphics.hpp>
#include <SFML/Window.hpp>
#include "PTree.hpp"
using namespace std;
using namespace sf;
int main( int argc, char* argv[ ] )
{
double L = 0.0; // Length of square sides
int N = 0; // Number of times to call recursive function
L = atol( argv[ 1 ] );
N = atoi( argv[ 2 ] );
Vector2f vPoint;
vPoint.x = 0;
vPoint.y = 0;
// Create and Display Window
PTree tree( L, N );
return 0;
}
( PTree.hpp )
#ifndef PTREE_H
#define PTREE_H
using namespace std;
using namespace sf;
class PTree /*:public sf::Drawable, public sf::Transformable*/{
public:
// Constructor
PTree( double L, int N );
// Destructor
~PTree();
// Recursive function to draw Pythagorias Tree
void pTree( double L, int N, Vector2f vPoint, Vector2f vOrigin, float rotation );
private:
float width = 0;
float height = 0;
int originX = 0;
int originY = 0;
float rotation = 0;
RenderWindow window;
int angle1 = 0;
int angle2 = 0;
};
#endif // PTREE_H included
( PTree.cpp )
#include <SFML/System.hpp>
#include <SFML/Graphics.hpp>
#include <SFML/Window.hpp>
#include <math.h>
#include "PTree.hpp"
#include <iostream>
using namespace std;
using namespace sf;
// Constructor
PTree::PTree( double L, int N )
{
width = ( 6 * L );
height = ( 4 * L );
Vector2f vPoint = { width/2, height - 1 };
Vector2f vOrigin;
vOrigin.x = L/2;
vOrigin.y = L;
/* vPoint.x = width/2;
vPoint.y = height - 1;
*/
window.create( VideoMode( width, height ), "Pythagoras Fractal Tree" );
pTree( L, N, vPoint, vOrigin, 0 );
}
// Destructor
PTree::~PTree(){}
/*###########################################################################*/
// Recursive function to draw Pythagorias Tree
void PTree::pTree( double L, int N, Vector2f vPoint, Vector2f vOrigin, float rotation )
{
Vector2f vPointR;
if( N < 1 )
{
return;
}
// Define a convex shape called convexSquare
ConvexShape convexSquare( 4 );
convexSquare.setPoint( 0, Vector2f( 0, 0 ));
convexSquare.setPoint( 1, Vector2f( 0, L ));
convexSquare.setPoint( 2, Vector2f( L, L ));
convexSquare.setPoint( 3, Vector2f( L, 0 ));
convexSquare.setOutlineThickness( 1.f );
convexSquare.setFillColor( Color::Black );
convexSquare.setOutlineColor( Color::White );
convexSquare.setPosition( vPoint );
convexSquare.setOrigin( vOrigin );
convexSquare.setRotation( rotation );
while( window.isOpen( ))
{
Event event;
while( window.pollEvent( event ))
{
if( event.type == Event::Closed )
{
window.close( );
}
}
if( N >= 0 )
{
window.draw( convexSquare );
window.display( );
L = ( L * ( sqrt(2)/2 ));
N = N - 1;
rotation = rotation - 135;
cout << "LOOPS:" << N << endl;
//left
vPoint = convexSquare.getTransform( ).transformPoint( convexSquare.getPoint( 0 ));
vOrigin = convexSquare.getPoint( (angle1) );
pTree( L, N, vPoint, vOrigin, rotation );
angle1 = (( angle1 + 1 ) % 4 );
//right
vPointR = convexSquare.getTransform( ).transformPoint( convexSquare.getPoint( 3 ));
vOrigin = convexSquare.getPoint( 2 );
pTree( L, N, vPointR, vOrigin, rotation-90 );
}
}
cout << "X value =" << vPoint.x << " Y value = " << vPoint.y << endl;
So far I have tried to return various points of the convex shapes for the second recursive call to the function pTree. This did not display anything either.
Initially I was only using Vector2f vPoint and modifying it prior to each recursive call but after exhausting my knowledge base for a solution I created a new variable specifically for the right side squares called Vector2f vPointR.
The SFML documentation does not provide sufficient examples for noobs like myself. The API is essentially a list of options with minimal examples if any for each function. Ive searched the internet to the best of my ability to see if I am passing the wrong points but could not find an answer.
The one thing that did work ( although not entirely correct ) was when I switched the recursive calls... meaning I moved the call for the right side squares before the call for the left side squares but the problem with this is that the left side s
quares were not displaying.
At this point I am also trying to work out the proper rotation for each square but this is the least of my problems.
Is there an issue with the way I am trying to display these squares recursively?
I am not sure where to go from here other than Stack Overflow for help.
Thanks for your time and expertise.
Don't recursively call the entire while loop. Only recurively call the drawing part
// Initialize window...
while (window.isOpen())
{
sf::Event event;
// Handle events...
window.clear();
// call the recursive function here
window.display();
}
Also you may want to use sf::RectangleShape to draw instead of sf::ConvexShape
Here's a working "example":
#include <SFML/Graphics.hpp>
#include <cmath>
void drawPythagoreanTree(sf::RenderTarget&, const float, const int);
int main()
{
const float L = 150;
const int N = 14;
const unsigned width = static_cast<unsigned>(6 * L);
const unsigned height = static_cast<unsigned>(4 * L);
sf::RenderWindow window{{width, height}, "Pythagorean Tree"};
while (window.isOpen())
{
for (sf::Event event; window.pollEvent(event);)
{
if (event.type == sf::Event::Closed)
window.close();
}
window.clear(sf::Color::White);
drawPythagoreanTree(window, L, N);
window.display();
}
}
void drawPythagoreanTree(sf::RenderTarget& target, const int N,
const sf::RectangleShape& parent)
{
static const float halfSqrt2 = sqrt(2.f) / 2;
if (N < 1) return;
target.draw(parent);
auto const& sz = parent.getSize();
auto const& tf = parent.getTransform();
auto childL = parent; // copy parent's color and rotation
childL.setSize(sz * halfSqrt2); // resize
childL.setOrigin(0, childL.getSize().y); // bottom left corner
childL.setPosition(tf.transformPoint({0, 0})); // reposition
childL.rotate(-45);
drawPythagoreanTree(target, N - 1, childL);
auto childR = parent; // copy parent's color and rotation
childR.setSize(sz * halfSqrt2); // resize
childR.setOrigin(childR.getSize()); // bottom right corner
childR.setPosition(tf.transformPoint({sz.x, 0})); // reposition
childR.rotate(45);
drawPythagoreanTree(target, N - 1, childR);
}
void drawPythagoreanTree(sf::RenderTarget& target, const float L, const int N)
{
sf::RectangleShape rect{{L, L}};
// set origin to center of the rect, easier to center position on screen
rect.setOrigin(rect.getSize() / 2.f);
rect.setPosition(target.getSize().x / 2.f, target.getSize().y - L / 2.f);
rect.setFillColor(sf::Color::Black);
drawPythagoreanTree(target, N, rect);
}

Tiles being drawn in the wrong location

I've finally managed to get my tiles drawn on the screen somewhat in a correct way. Although the location is a bit off and I can't seem to figure out why...
I'm using SFML for drawing.
Tile.hpp:
#ifndef TILE_HPP
#define TILE_HPP
#include <SFML/Graphics.hpp>
#include <SFML/System.hpp>
#include "textureManager.hpp"
class Tile {
public:
Tile();
Tile(sf::Vector2i coord, int biome);
~Tile();
sf::Vector2i getCoord() const { return coord; };
int getBiome() const { return biome; };
void setCoord(sf::Vector2i coord) { this->coord = coord; };
void setBiome(int biome) { this->biome = biome; };
void draw(int x, int y, sf::RenderWindow* rw);
void update(sf::Texture& texture);
private:
sf::Vector2i coord;
int biome;
sf::Sprite sprite;
};
#endif
Tile.cpp
#include <SFML/Graphics.hpp>
#include <SFML/System.hpp>
#include "textureManager.hpp"
#include "tile.hpp"
Tile::Tile()
{}
Tile::Tile(sf::Vector2i coord, int biome) {
this->biome = biome;
this->coord = coord;
}
Tile::~Tile(){}
void Tile::draw(int x, int y, sf::RenderWindow* rw)
{
sprite.setPosition(x, y);
rw->draw(sprite);
}
void Tile::update(sf::Texture& texture)
{
switch (biome)
{
// Not important here
}
}
Now the more relevant part: the drawing
void StatePlay::draw(const float dt)
{
game->window.setView(view);
game->window.clear(sf::Color::Black);
sf::Vector2f offset = camera.getLocation();
int newX = (offset.x / map.getTileSize()) - (map.chunkSize / 2);
int newY = (offset.y / map.getTileSize()) - (map.chunkSize / 2);
for (int x = 0; x < map.chunkSize; x++)
{
for (int y = 0; y < map.chunkSize; y++)
{
Tile tile = map.getTile(newX + x, newY + y);
tile.draw((newX + x) * map.getTileSize(), (newY + y) * map.getTileSize(), &game->window);
}
}
return;
}
StatePlay::StatePlay(Game* game)
{
this->game = game;
sf::Vector2f pos = sf::Vector2f(game->window.getSize()); // 1366x768
view.setSize(pos);
pos *= 0.5f; // 688x384
view.setCenter(pos);
// Initialize map
map.init(game->gameTime, game->textureManager.getImage("tileset.png"));
float w = (float) map.getWidth(); // 500
float h = (float) map.getHeight(); // 500
w *= 0.5f; // 250
h *= 0.5f; // 250
w *= map.getTileSize(); // 250 * 32 = 8000
h *= map.getTileSize(); // 250 * 32 = 8000
// Move camera
// Uses view::move from sfml to move the view with w and h
// Also sets camera private to w and h values, return with camera::getLocation()
camera.setLocation(&view, sf::Vector2f(w, h));
}
The result is that I only see the ~10 tiles squared, in the bottom left corner of my screen, covering about 3/4.
The correct tiles are chosen, but the draw location is wrong... It should draw the center of 64x64 (x 32px each) tiles, as much as fit on the screen.
I have fixed the problem. It was a very stupid mistake...
At first without drawing anything, it is normal to center the view on 0.5f * sf::View::getSize() to get the view centered in your window. So the center was already at half of my window size. When using Camera::setLocation(), I used the sf::View::move() to move the view accordingly. So when trying to center it on the map, it added the x and y correctly, but also half of my window size. This resulted in having an offset which was incorrect. Substracting or leaving those values out has fixed this stupid problem.
Thank you for the help.

Importing and Displaying .fbx files in OpenGl

I have been trying to import and display an fbx file using the FBX SDK.Untill. I managed to load in the file, but I got stuck at the part where I have to display it.
The questions:
What exactly are those indices?
How should I display the vertices?
Here is the class that I made:
3dModelBasicStructs.h
struct vertex
{
float x,y,z;
};
struct texturecoords
{
float a,b;
};
struct poligon
{
int a,b,c;
};
Model.h
#ifndef MODEL_H
#define MODEL_H
#define FBXSDK_NEW_API
#define MAX_VERTICES 80000
#define MAX_POLIGONS 80000
#include <fbxsdk.h>
#include "3dModelBasicStructs.h"
#include <iostream>
#include <GL/glut.h>
using namespace std;
class Model
{
public:
Model(char*);
~Model();
void ShowDetails();
char* GetModelName();
void SetModelName( char* );
void GetFbxInfo( FbxNode* );
void RenderModel();
void InitializeVertexBuffer( vertex* );
private:
char Name[25];
vertex vertices[MAX_VERTICES];
poligon poligons[MAX_POLIGONS];
int *indices;
int numIndices;
int numVertices;
};
#endif
Model.cpp
#include "Model.h"
Model::Model(char *filename)
{
cout<<"\nA model has been built!";
numVertices=0;
numIndices=0;
FbxManager *manager = FbxManager::Create();
FbxIOSettings *ioSettings = FbxIOSettings::Create(manager, IOSROOT);
manager->SetIOSettings(ioSettings);
FbxImporter *importer=FbxImporter::Create(manager,"");
importer->Initialize(filename,-1,manager->GetIOSettings());
FbxScene *scene = FbxScene::Create(manager,"tempName");
importer->Import(scene);
importer->Destroy();
FbxNode* rootNode = scene->GetRootNode();
this->SetModelName(filename);
if(rootNode) { this->GetFbxInfo(rootNode); }
}
Model::~Model()
{
cout<<"\nA model has been destroied!";
}
void Model::ShowDetails()
{
cout<<"\nName:"<<Name;
cout<<"\nVertices Number:"<<numVertices;
cout<<"\nIndices which i never get:"<<indices;
}
char* Model::GetModelName()
{
return Name;
}
void Model::SetModelName(char *x)
{
strcpy(Name,x);
}
void Model::GetFbxInfo( FbxNode* Node )
{
int numKids = Node->GetChildCount();
FbxNode *childNode = 0;
for ( int i=0 ; i<numKids ; i++)
{
childNode = Node->GetChild(i);
FbxMesh *mesh = childNode->GetMesh();
if ( mesh != NULL)
{
//================= Get Vertices ====================================
int numVerts = mesh->GetControlPointsCount();
for ( int j=0; j<numVerts; j++)
{
FbxVector4 vert = mesh->GetControlPointAt(j);
vertices[numVertices].x=(float)vert.mData[0];
vertices[numVertices].y=(float)vert.mData[1];
vertices[numVertices++].z=(float)vert.mData[2];
cout<<"\n"<<vertices[numVertices-1].x<<" "<<vertices[numVertices- 1].y<<" "<<vertices[numVertices-1].z;
this->InitializeVertexBuffer(vertices);
}
//================= Get Indices ====================================
int *indices = mesh->GetPolygonVertices();
numIndices+=mesh->GetPolygonVertexCount();
}
this->GetFbxInfo(childNode);
}
}
void Model::RenderModel()
{
glDrawElements(GL_TRIANGLES,36,GL_INT,indices);
}
void Model::InitializeVertexBuffer(vertex *vertices)
{
glEnableClientState(GL_VERTEX_ARRAY);
glVertexPointer(3,GL_FLOAT,0,vertices);
//glDrawArrays(GL_TRIANGLES,0,36);
}
Sadly , When i try to use drawelements i get this error:
Unhandled exception at 0x77e215de in A new begging.exe: 0xC0000005: Access violation reading location 0xcdcdcdcd.
2) How should I display the vertices?
Questions like these indicate, that you should work through some OpenGL tutorials. Those are the basics and you need to know them.
This is a good start regarding your problem, but you'll need to work through the whole tutorial
http://opengl.datenwolf.net/gltut/html/Basics/Tut01%20Following%20the%20Data.html
1) What exactly are those indices ?
You have a list of vertices. The index of a vertex is the position at which it is in that list. You can draw vertex arrays by its indices using glDrawElements
Update due to comment
Say you have a cube with shared vertices (uncommon in OpenGL, but I'm too lazy for writing down 24 vertices).
I have them in my program in an array, that forms a list of their positions. You load them from a file, I'm writing them a C array:
GLfloat vertices[3][] = {
{-1,-1, 1},
{ 1,-1, 1},
{ 1, 1, 1},
{-1, 1, 1},
{-1,-1,-1},
{ 1,-1,-1},
{ 1, 1,-1},
{-1, 1,-1},
};
This gives the vertices indices (position in the array), in the picture it looks like
To draw a cube we have to tell OpenGL in which vertices, in which order make a face. So let's have a look at the faces:
We're going to build that cube out of triangles. 3 consecutive indices make up a triangle. For the cube this is
GLuint face_indices[3][] = {
{0,1,2},{2,3,0},
{1,5,6},{6,2,1},
{5,4,7},{7,6,5},
{4,0,3},{3,7,4},
{3,2,6},{6,7,2},
{4,5,0},{1,0,5}
};
You can draw this then by pointing OpenGL to the vertex array
glVertexPointer(3, GL_FLOAT, 0, &vertices[0][0]);
and issuing a batches call on the array with vertices. There are 6*2 = 12 triangles, each triangle consisting of 3 vertices, which makes a list of 36 indices.
glDrawElements(GL_TRIANGLES, 36, GL_UNSIGNED_INT, &face_indices[0][0]);

std::vector memory, vector of unwanted 0's

My Code works for my purely glut implementation, but I am trying to get it to work in qt.
I have a vector of masspoints for a wire mesh system
std::vector<masspoint> m_particles;
The problem is in my qt version none of what I write really sticks and I am left with an array of zeros. Basically I am confused why the glut version has correct values but the qt one does not given that it is basically identical code. What is wrong with the qt code?
Yes I only see zeros when using qDebug. When I am calling my drawing function in the qt version all vertex points turn out to be 0 in all components so nothing is seen.
int myboog = 1;
int county = 0;
// Constructors
Cloth::Cloth(float width, float height, int particles_in_width, int particles_in_height):
m_width(particles_in_width),
m_height(particles_in_height),
m_dimensionWidth(width),
m_dimensionHeight(height),
m_distanceX(width/(float)particles_in_width),
m_distanceY(height/(float)particles_in_height)
{
//Set the particle array to the given size
//Height by width
//mparticles is the name of our vector
m_particles.resize(m_width*m_height);
qDebug() << m_particles.size();
// Create the point masses to simulate the cloth
for (int x = 0; x < m_width; ++x)
{
for (int y=0; y < m_height; ++y)
{
// Place the pointmass of the cloth, lift the edges to give the wind more effect as the cloth falls
Vector3f position = Vector3f(m_dimensionWidth * (x / (float)m_width),
((x==0)||(x==m_width-1)||(y==0)||(y==m_height-1)) ? m_distanceY/2.0f:0,
m_dimensionHeight * (y / (float)m_height));
// The gravity effect is applied to new pmasspoints
m_particles[y * m_width + x] = masspoint(position,Vector3f(0,-0.06,0));
}
}
int num = (int)m_particles.size();
for (int i=0; i<num; ++i)
{
masspoint* p = &m_particles[i];
if(myboog)
{
qDebug() << "test " << *p->getPosition().getXLocation() << county;
county++;
}
}
myboog = 0;
// Calculate the normals for the first time so the initial draw is correctly lit
calculateClothNormals();
}
Code for masspoint involved in constructor for CLoth
#ifndef MASSPOINT_H
#define MASSPOINT_H
#include <QGLWidget>
#include "vector3f.h"
class masspoint
{
private:
Vector3f m_position; // Current Location of the pointmass
Vector3f m_velocity; // Direction and speed the pointmass is traveling in
Vector3f m_acceleration; // Speed at which the pointmass is accelerating (used for gravity)
Vector3f m_forceAccumulated; // Force that has been accumulated since the last update
Vector3f m_normal; // Normal of this pointmass, used to light the cloth when drawing
float m_damping; // Amount of velocity lost per update
bool m_stationary; // Whether this pointmass is currently capible of movement
public:
masspoint& operator= (const masspoint& particle);
//Some constructors
masspoint();
masspoint(const masspoint& particle);
masspoint(Vector3f position, Vector3f acceleration);
//Like eulur integration
void integrate(float duration);
// Accessor functions
//Get the position of the point mass
inline Vector3f getPosition() const {return m_position;}
Vector stuff involved in the constructor for CLoth
#ifndef VECTOR3F_H
#define VECTOR3F_H
#include <math.h>
// Vector library to be used
class Vector3f
{
private:
float m_x, m_y, m_z;
public:
const float* getXLocation() const { return &m_x; }

C++ OpenGL: Ray Trace Shading Isn't Properly Shading

I'm a CS student and for our final we were told to construct the reflections on multiple spheres via ray tracing. That's almost literally what we got for directions except a picture for how it should look when finished. So I need spheres, with they're reflections (using ray tracing) mapped on them with the proper shading from a light.
Well I have all of it working, except having multiple spheres and the fact that it doesn't look like the picture he gave us for a rubric.
The multiple spheres thing I'm not too sure how to do, but I'd say I need to store them in a 2D array and modify a few sections of code.
What I thought was modifying the sphere_intersect and find_reflect to include which sphere is being analyzed. Next, modify find_reflect so that when the new vector u is calculated its starting point (P0) is also updated. Then if the ray hits a sphere it will have to count how many times the ray has been reflected. At some point terminate (after 10 times maybe) and then I'll just draw the pixel. For an added touch I'd like to add solid colors to the spheres which would call for finding the normal of a sphere I believe.
Anyways I'm going to attach a picture of his, a picture of mine, and the source code. Hopefully someone can help me out on this one.
Thanks in advance!
Professor's spheres
My spheres
#include "stdafx.h"
#include <stdio.h>
#include <stdlib.h>
#include <GL/glut.h>
#include <math.h>
#include <string>
#define screen_width 750
#define screen_height 750
#define true 1
#define false 0
#define perpendicular 0
int gridXsize = 20;
int gridZsize = 20;
float plane[] = {0.0, 1.0, 0.0, -50.0,};
float sphere[] = {250.0, 270.0, -100.0, 100.0};
float eye[] = {0.0, 400.0, 550.0};
float light[] = {250.0, 550.0, -200.0};
float dot(float *u, float *v)
{
return u[0]*v[0] + u[1]*v[1] + u[2]*v[2];
}
void norm(float *u)
{
float norm = sqrt(abs(dot(u,u)));
for (int i =0; i <3; i++)
{
u[i] = u[i]/norm;
}
}
float plane_intersect(float *u, float *pO)
{
float normt[3] = {plane[0], plane[1], plane[2]};
float s;
if (dot(u,normt) == 0)
{
s = -10;
}
else
{
s = (plane[3]-(dot(pO,normt)))/(dot(u,normt));
}
return s;
}
float sphere_intersect(float *u, float *pO)
{
float deltaP[3] = {sphere[0]-pO[0],sphere[1]-pO[1],sphere[2]-pO[2]};
float deltLen = sqrt(abs(dot(deltaP,deltaP)));
float t=0;
float answer;
float det;
if ((det =(abs(dot(u,deltaP)*dot(u,deltaP))- (deltLen*deltLen)+sphere[3]*sphere[3])) < 0)
{
answer = -10;
}
else
{
t =-1*dot(u,deltaP)- sqrt(det) ;
if (t>0)
{
answer = t;
}
else
{
answer = -10;
}
}
return answer;
}
void find_reflect(float *u, float s, float *pO)
{
float n[3] = {pO[0]+s *u[0]-sphere[0],pO[1]+s *u[1]-sphere[1],pO[2]+s *u[2]- sphere[2]};
float l[3] = {s *u[0],s *u[1],s *u[2]};
u[0] =(2*dot(l,n)*n[0])-l[0];
u[1] = (2*dot(l,n)*n[1])-l[1];
u[2] = (2*dot(l,n)*n[2])-l[2];
}
float find_shade(float *u,float s, float *pO)
{
float answer;
float lightVec[3] = {light[0]-(pO[0]+s *u[0]), light[1]-(pO[1]+s *u[1]), light[2]-(pO[2]+s *u[2])};
float n[3] = {pO[0]+s *u[0]-sphere[0],pO[1]+s *u[1]-sphere[1],pO[2]+s *u[2]-sphere[2]};
answer = -1*dot(lightVec,n)/(sqrt(abs(dot(lightVec,lightVec)))*sqrt(abs(dot(n,n))));
return answer;
}
void init()
{
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluOrtho2D(0,screen_width,0,screen_height);
}
void display()
{
glClear(GL_COLOR_BUFFER_BIT| GL_DEPTH_BUFFER_BIT);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
for (int i=0; i < screen_width; i++)
{
for (int j=0; j < screen_height; j++)
{
float ray[3] = {1*(eye[0]-i),-1*(eye[1]-j),1*eye[2]};
float point[3] = {i,j,0};
norm(ray);
int plotted = false;
while (!plotted)
{
float s_plane = plane_intersect(ray, point);
float s_sphere = sphere_intersect(ray, point);
if (s_plane <= 0 && s_sphere <=0)
{
glColor3f(0,0,0);
glBegin(GL_POINTS);
glVertex3f(i,j,0);
glEnd();
plotted = true;
}
else if (s_sphere >= 0 && (s_plane <=0 || s_sphere <= s_plane))
{
find_reflect(ray, s_sphere, point);
}
else if (s_plane >=0 && (s_sphere <=0 ||s_plane <= s_sphere))
{
float shade = find_shade(ray, s_plane, point);
float xx = s_plane*ray[0] + eye[0];
float z = s_plane*ray[2] + eye[2];
if (abs((int)xx/gridXsize)%2 == abs((int)z/gridZsize)%2)
{
glColor3f(shade,0,0);
}
else
{
glColor3f(shade,shade,shade);
}
glBegin(GL_POINTS);
glVertex3f(i,j,0);
glEnd();
plotted = true;
}
}
}
}
glFlush();
}
int main(int argc, char **argv)
{
glutInit(&argc, argv);
glutCreateWindow("Ray Trace with Sphere.");
glutInitWindowSize(screen_width,screen_height);
glutInitDisplayMode(GLUT_SINGLE|GLUT_RGB);
glutDisplayFunc(display);
init();
glutMainLoop();
return 0;
}
The professor did not tell you too much, because such a topic is covered thousands of time over the web, just check-out "Whitted Raytracing" ;) It's homework, and 5mn of googling around would solve the issue... Some clues to help without doing your homework for you
Do it step by step, don't try to reproduce the picture in one step
Get one sphere working, if hit the plane green pixel, the sphere red pixel, nothing, black. It's enough to get the intersections computing right. It looks like, from your picture, that you don't have the intersections right, for a start
Same as previous, with several spheres. Same as one sphere : check intersection for all objects, keep the closest intersection from the point of view.
Same as previous, but also compute the amount of light received for each intersection found, to have shade of red for spheres, and shade of green for the plane. (hint: dot product ^^)
Texture for the plane
Reflection for the spheres. Protip: a mirror don't reflect 100% of the light, just a fraction of it.