Raylib unexpected numbers of collisions on elastic collision simulation - c++

How can i fix ridicilous amounts of collision when i increase mass or speed?
#include "raylib.h"
#include <cstdio>
#define screenWidth 800
#define screenHeight 450
Vector2 GetNewSpeeds(float mass1, float speed1, float mass2, float speed2)
{
Vector2 newSpeeds;
newSpeeds.x = (speed1 * (mass1 - mass2) + 2 * mass2 * speed2) / (mass1 + mass2);
newSpeeds.y = (speed2 * (mass2 - mass1) + 2 * mass1 * speed1) / (mass1 + mass2);
return newSpeeds;
}
int main()
{
InitWindow(screenWidth, screenHeight, "1D Elastic Collision Simulation");
Rectangle square1 = { 90.0f, screenHeight / 2, 50.0f, 50.0f };
Vector2 speed1 = { 0.0f, 0.0f };
float mass1 = 1.0f;
Rectangle square2 = { 700.0f, screenHeight / 2, 50.0f, 50.0f };
Vector2 speed2 = { -200.0f, 0.0f };
float mass2 = 1000000.0f;
Rectangle wall = { 0, 0, 20, screenHeight };
Color wallColor = LIGHTGRAY;
int collisionCount = 0;
bool touching = false;
while (!WindowShouldClose())
{
ClearBackground(RAYWHITE);
if (CheckCollisionRecs(square1, wall)) {
speed1.x = -speed1.x;
collisionCount++;
}
if (CheckCollisionRecs(square1, square2))
{
if (!touching) {
Vector2 newSpeeds = GetNewSpeeds(mass1, speed1.x, mass2, speed2.x);
speed1.x = newSpeeds.x;
speed2.x = newSpeeds.y;
collisionCount++;
touching = true;
}
}
else {
touching = false;
}
square1.x += speed1.x * GetFrameTime();
square2.x += speed2.x * GetFrameTime();
DrawRectangleRec(square1, BLUE);
DrawRectangleRec(square2, RED);
DrawRectangleRec(wall, wallColor);
char text[32];
sprintf_s(text, "Collision Count: %d", collisionCount);
DrawText(text, 10, 10, 20, BLACK);
EndDrawing();
}
CloseWindow();
return 0;
}
I was expecting blocks to smoothly collide and have a perfect number of collision physically correct. But its about frametime or something. When i increase mass or speed it loses its smoothness and starts giving unexpected results.

Related

openGL is rendering only one layer on z axis

I'm writing an FPS game. I'm using sfml for a window, sounds etc. and OpenGL for 3D graphics, but I found a problem. OpenGL is rendering only this vertices which have 0 on the z-axis. What have I done wrong? I've struct containing topLeft, topRight, bottomLeft and bottomRight and they're sf::Vector3f.
//engine
using namespace DlsE;
DlsE::Engine::Engine()
{
quad = Quad(
Vector3x3<float>(
sf::Vector3f(200 - 200, 200 - 200, 50),
sf::Vector3f(200 + 200, 200 - 200, 50),
sf::Vector3f(200 - 200, 200 + 200, -50),
sf::Vector3f(200 + 200, 200 + 200, -50)
)
);
}
DlsE::Engine::~Engine()
{
}
void DlsE::Engine::Draw(sf::RenderWindow * okno)
{
okno->popGLStates();
glColor3f(1, 0, 0);
quad.Draw();
quad.RotateByDeg(2.f*manager::deltaTime.asSeconds(), 0, 0,quad.MakeCenterContext());
glFlush();
okno->pushGLStates();
}
//Quad.cpp
Quad::Quad()
{
}
DlsE::Quad::Quad(Vector3x3<float> p) : p(p)
{
}
Quad::~Quad()
{
}
void DlsE::Quad::Draw()
{
glBegin(GL_TRIANGLES);
glVertex3f(p.topLeft.x, p.topLeft.y, p.topLeft.z);
glVertex3f(p.topRight.x, p.topRight.y, p.topRight.z);
glVertex3f(p.bottomRight.x, p.bottomRight.y, p.bottomRight.z);
glVertex3f(p.topLeft.x, p.topLeft.y, p.topLeft.z);
glVertex3f(p.bottomRight.x, p.bottomRight.y, p.bottomRight.z);
glVertex3f(p.bottomLeft.x, p.bottomLeft.y, p.bottomLeft.z);
glEnd();
}
void DlsE::Quad::RotateByDeg(float x, float y, float z, sf::Vector3f context)
{
p = RotateX(x*PI / 180.f, p, context);
p = RotateY(y*PI / 180.f, p, context);
p = RotateZ(z*PI / 180.f, p, context);
}
sf::Vector3f DlsE::Quad::MakeCenterContext()
{
return sf::Vector3f(
(p.bottomLeft.x + p.bottomRight.x + p.topLeft.x + p.topRight.x) / 4,
(p.bottomLeft.y + p.bottomRight.y + p.topLeft.y + p.topRight.y) / 4,
(p.bottomLeft.z + p.bottomRight.z + p.topLeft.z + p.topRight.z) / 4);
}
//Game.cpp
std::string Game::Update(sf::RenderWindow * okno)
{
return "GAME";
}
void Game::Draw(sf::RenderWindow * okno)
{
glClearColor(0, 1, 1, 0);
glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT );
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
engine.Draw(okno);
}
Game::Game()
{
}
Game::~Game()
{
}
screenshot

Billboarding C++

I got a code from my teacher that currently shows a 3D globe and a 2D particle system. The camera moves around in circles. The particle system is supposed to face the camera.
According to my lecture notes, I have to multiply the billboard with the inverse of the camera's view matrix. I would love to try that but I have trouble using the variable for the view matrix.
#include "pch.h"
#include <Kore/Application.h>
#include <Kore/IO/FileReader.h>
#include <Kore/Math/Core.h>
#include <Kore/Math/Random.h>
#include <Kore/System.h>
#include <Kore/Input/Keyboard.h>
#include <Kore/Input/Mouse.h>
#include <Kore/Audio/Mixer.h>
#include <Kore/Graphics/Image.h>
#include <Kore/Graphics/Graphics.h>
#include <Kore/Log.h>
#include "ObjLoader.h"
#include "Collision.h"
#include "PhysicsWorld.h"
#include "PhysicsObject.h"
using namespace Kore;
// A simple particle implementation
class Particle {
public:
VertexBuffer* vb;
IndexBuffer* ib;
mat4 M;
// The current position
vec3 position;
// The current velocity
vec3 velocity;
// The remaining time to live
float timeToLive;
// The total time time to live
float totalTimeToLive;
// Is the particle dead (= ready to be re-spawned?)
bool dead;
void init(const VertexStructure& structure) {
vb = new VertexBuffer(4, structure,0);
float* vertices = vb->lock();
SetVertex(vertices, 0, -1, -1, 0, 0, 0);
SetVertex(vertices, 1, -1, 1, 0, 0, 1);
SetVertex(vertices, 2, 1, 1, 0, 1, 1);
SetVertex(vertices, 3, 1, -1, 0, 1, 0);
vb->unlock();
// Set index buffer
ib = new IndexBuffer(6);
int* indices = ib->lock();
indices[0] = 0;
indices[1] = 1;
indices[2] = 2;
indices[3] = 0;
indices[4] = 2;
indices[5] = 3;
ib->unlock();
dead = true;
}
void Emit(vec3 pos, vec3 velocity, float timeToLive) {
position = pos;
this->velocity = velocity;
dead = false;
this->timeToLive = timeToLive;
totalTimeToLive = timeToLive;
}
Particle() {
}
void SetVertex(float* vertices, int index, float x, float y, float z, float u, float v) {
vertices[index* 8 + 0] = x;
vertices[index*8 + 1] = y;
vertices[index*8 + 2] = z;
vertices[index*8 + 3] = u;
vertices[index*8 + 4] = v;
vertices[index*8 + 5] = 0.0f;
vertices[index*8 + 6] = 0.0f;
vertices[index*8 + 7] = -1.0f;
}
void render(TextureUnit tex, Texture* image) {
Graphics::setTexture(tex, image);
Graphics::setVertexBuffer(*vb);
Graphics::setIndexBuffer(*ib);
Graphics::drawIndexedVertices();
}
void Integrate(float deltaTime) {
timeToLive -= deltaTime;
if (timeToLive < 0.0f) {
dead = true;
}
// Note: We are using no forces or gravity at the moment.
position += velocity * deltaTime;
// Build the matrix
M = mat4::Translation(position.x(), position.y(), position.z()) * mat4::Scale(0.2f, 0.2f, 0.2f);
}
};
class ParticleSystem {
public:
// The center of the particle system
vec3 position;
// The minimum coordinates of the emitter box
vec3 emitMin;
// The maximal coordinates of the emitter box
vec3 emitMax;
// The list of particles
Particle* particles;
// The number of particles
int numParticles;
// The spawn rate
float spawnRate;
// When should the next particle be spawned?
float nextSpawn;
ParticleSystem(int maxParticles, const VertexStructure& structure ) {
particles = new Particle[maxParticles];
numParticles = maxParticles;
for (int i = 0; i < maxParticles; i++) {
particles[i].init(structure);
}
spawnRate = 0.05f;
nextSpawn = spawnRate;
position = vec3(0.5f, 1.3f, 0.5f);
float b = 0.1f;
emitMin = position + vec3(-b, -b, -b);
emitMax = position + vec3(b, b, b);
}
void update(float deltaTime) {
// Do we need to spawn a particle?
nextSpawn -= deltaTime;
bool spawnParticle = false;
if (nextSpawn < 0) {
spawnParticle = true;
nextSpawn = spawnRate;
}
for (int i = 0; i < numParticles; i++) {
if (particles[i].dead) {
if (spawnParticle) {
EmitParticle(i);
spawnParticle = false;
}
}
particles[i].Integrate(deltaTime);
}
}
void render(TextureUnit tex, Texture* image, ConstantLocation mLocation, mat4 V) {
Graphics::setBlendingMode(BlendingOperation::SourceAlpha, BlendingOperation::InverseSourceAlpha);
Graphics::setRenderState(RenderState::DepthWrite, false);
/************************************************************************/
/* Exercise 7 1.1 */
/************************************************************************/
/* Change the matrix V in such a way that the billboards are oriented towards the camera */
/************************************************************************/
/* Exercise 7 1.2 */
/************************************************************************/
/* Animate using at least one new control parameter */
for (int i = 0; i < numParticles; i++) {
// Skip dead particles
if (particles[i].dead) continue;
Graphics::setMatrix(mLocation, particles[i].M * V);
particles[i].render(tex, image);
}
Graphics::setRenderState(RenderState::DepthWrite, true);
}
float getRandom(float minValue, float maxValue) {
int randMax = 1000000;
int randInt = Random::get(0, randMax);
float r = (float) randInt / (float) randMax;
return minValue + r * (maxValue - minValue);
}
void EmitParticle(int index) {
// Calculate a random position inside the box
float x = getRandom(emitMin.x(), emitMax.x());
float y = getRandom(emitMin.y(), emitMax.y());
float z = getRandom(emitMin.z(), emitMax.z());
vec3 pos;
pos.set(x, y, z);
vec3 velocity(0, 0.3f, 0);
particles[index].Emit(pos, velocity, 3.0f);
}
};
namespace {
const int width = 1024;
const int height = 768;
double startTime;
Shader* vertexShader;
Shader* fragmentShader;
Program* program;
float angle = 0.0f;
// null terminated array of MeshObject pointers
MeshObject* objects[] = { nullptr, nullptr, nullptr, nullptr, nullptr, nullptr };
// null terminated array of PhysicsObject pointers
PhysicsObject* physicsObjects[] = { nullptr, nullptr, nullptr, nullptr, nullptr, nullptr };
// The view projection matrix aka the camera
mat4 P;
mat4 View;
mat4 PV;
vec3 cameraPosition;
MeshObject* sphere;
PhysicsObject* po;
PhysicsWorld physics;
// uniform locations - add more as you see fit
TextureUnit tex;
ConstantLocation pvLocation;
ConstantLocation mLocation;
ConstantLocation tintLocation;
Texture* particleImage;
ParticleSystem* particleSystem;
double lastTime;
void update() {
double t = System::time() - startTime;
double deltaT = t - lastTime;
//Kore::log(Info, "%f\n", deltaT);
lastTime = t;
Kore::Audio::update();
Graphics::begin();
Graphics::clear(Graphics::ClearColorFlag | Graphics::ClearDepthFlag, 0xff9999FF, 1000.0f);
Graphics::setFloat4(tintLocation, vec4(1, 1, 1, 1));
program->set();
angle += 0.3f * deltaT;
float x = 0 + 3 * Kore::cos(angle);
float z = 0 + 3 * Kore::sin(angle);
cameraPosition.set(x, 2, z);
//PV = mat4::Perspective(60, (float)width / (float)height, 0.1f, 100) * mat4::lookAt(vec3(0, 2, -3), vec3(0, 2, 0), vec3(0, 1, 0));
P = mat4::Perspective(60, (float)width / (float)height, 0.1f, 100);
View = mat4::lookAt(vec3(x, 2, z), vec3(0, 2, 0), vec3(0, 1, 0));
PV = P * View;
Graphics::setMatrix(pvLocation, PV);
// iterate the MeshObjects
MeshObject** current = &objects[0];
while (*current != nullptr) {
// set the model matrix
Graphics::setMatrix(mLocation, (*current)->M);
(*current)->render(tex);
++current;
}
// Update the physics
physics.Update(deltaT);
PhysicsObject** currentP = &physics.physicsObjects[0];
while (*currentP != nullptr) {
(*currentP)->UpdateMatrix();
Graphics::setMatrix(mLocation, (*currentP)->Mesh->M);
(*currentP)->Mesh->render(tex);
++currentP;
}
particleSystem->update(deltaT);
particleSystem->render(tex, particleImage, mLocation, View);
Graphics::end();
Graphics::swapBuffers();
}
void SpawnSphere(vec3 Position, vec3 Velocity) {
PhysicsObject* po = new PhysicsObject();
po->SetPosition(Position);
po->Velocity = Velocity;
po->Collider.radius = 0.2f;
po->Mass = 5;
po->Mesh = sphere;
// The impulse should carry the object forward
// Use the inverse of the view matrix
po->ApplyImpulse(Velocity);
physics.AddObject(po);
}
void keyDown(KeyCode code, wchar_t character) {
if (code == Key_Space) {
// The impulse should carry the object forward
// Use the inverse of the view matrix
vec4 impulse(0, 0.4, 2, 0);
mat4 viewI = View;
viewI.Invert();
impulse = viewI * impulse;
vec3 impulse3(impulse.x(), impulse.y(), impulse.z());
SpawnSphere(cameraPosition + impulse3 *0.2f, impulse3);
}
}
void keyUp(KeyCode code, wchar_t character) {
if (code == Key_Left) {
// ...
}
}
void mouseMove(int x, int y, int movementX, int movementY) {
}
void mousePress(int button, int x, int y) {
}
void mouseRelease(int button, int x, int y) {
}
void init() {
FileReader vs("shader.vert");
FileReader fs("shader.frag");
vertexShader = new Shader(vs.readAll(), vs.size(), VertexShader);
fragmentShader = new Shader(fs.readAll(), fs.size(), FragmentShader);
// This defines the structure of your Vertex Buffer
VertexStructure structure;
structure.add("pos", Float3VertexData);
structure.add("tex", Float2VertexData);
structure.add("nor", Float3VertexData);
program = new Program;
program->setVertexShader(vertexShader);
program->setFragmentShader(fragmentShader);
program->link(structure);
tex = program->getTextureUnit("tex");
pvLocation = program->getConstantLocation("PV");
mLocation = program->getConstantLocation("M");
tintLocation = program->getConstantLocation("tint");
objects[0] = new MeshObject("Base.obj", "Level/basicTiles6x6.png", structure);
objects[0]->M = mat4::Translation(0.0f, 1.0f, 0.0f);
sphere = new MeshObject("ball_at_origin.obj", "Level/unshaded.png", structure);
SpawnSphere(vec3(0, 2, 0), vec3(0, 0, 0));
Graphics::setRenderState(DepthTest, true);
Graphics::setRenderState(DepthTestCompare, ZCompareLess);
Graphics::setTextureAddressing(tex, U, Repeat);
Graphics::setTextureAddressing(tex, V, Repeat);
particleImage = new Texture("SuperParticle.png", true);
particleSystem = new ParticleSystem(100, structure);
}
}
int kore(int argc, char** argv) {
Application* app = new Application(argc, argv, width, height, 0, false, "Exercise7");
init();
app->setCallback(update);
startTime = System::time();
lastTime = 0.0f;
Kore::Mixer::init();
Kore::Audio::init();
Keyboard::the()->KeyDown = keyDown;
Keyboard::the()->KeyUp = keyUp;
Mouse::the()->Move = mouseMove;
Mouse::the()->Press = mousePress;
Mouse::the()->Release = mouseRelease;
app->start();
delete app;
return 0;
}
There's a comment where the teacher wants us to add the code.
The variable for the view matrix "View" is in "namespace". I've only ever used namespace as a library but this one doesn't have a name. So how do I use it?
The comment says that we should use matrix V. So I just add V = Inverse View Matrix * Model Matrix to the code and it removes the rotation?
I'm sorry for the stupid questions, it's supposed to be a class for beginners but it's really anything but. The lecture notes aren't very helpful when it comes to the programming part and I only found tutorials for OpenGL or Unity or Direct X and where not using any of it.
Please help me, I need to hand this in until Saturday morning and I've already spent the last two days trying out code and I've got nothing so far!
You can find the whole thing here: https://github.com/TUDGameTechnology/Exercise7
You don't have to do anything special to access an unnamed namespace. This thread explains more.
You are most probably trying to reference View within methods that cannot see your namespace because of the order in which they are defined in your file.
This line in your update method:
particleSystem->render(tex, particleImage, mLocation, View);
is already passing View into the render method.
void render(TextureUnit tex, Texture* image, ConstantLocation mLocation, mat4 V)
That means that in this case mat4 v is your camera view.

How can I rotate triangle in OpenGL?

I want to rotate my triangle in OpenGL, and running program on Raspberry Pi.
I can draw triangle and move it.
But I have no idea to rotate it..
Nothing rotates.
#include <cstdio>
#include <ctime>
#include <cmath>
#include <string>
#include <EGL/egl.h>
#include <EGL/eglext.h>
#include <GLES2/gl2.h>
#include <GLES/gl.h>
#include <bcm_host.h>
EGLDisplay Disp;
EGLSurface Surface;
EGLContext Context;
int ScrWidth, ScrHeight;
float MVPMatrix[16];
float ProjectionMatrix[16];
float ViewMatrix[16];
using namespace std;
class Shader
{
private:
string VertexShaderFile;
string FragmentShaderFile;
GLuint Load(GLenum type, string FileName)
{
(Compile shader)
}
GLuint Program;
bool Linked;
public:
Shader(string FileNameV, string FileNameF)
{
Linked = false;
VertexShaderFile = FileNameV;
FragmentShaderFile = FileNameF;
}
bool Load()
{
(Link vertex/fragment shader)
}
void Use()
{
glUseProgram(Program);
}
int GetAttrLoc(const char *Name)
{
glGetAttribLocation(Program, Name);
}
int GetUniformLoc(const char *Name)
{
return glGetUniformLocation(Program, Name);
}
~Shader()
{
if(Linked)
{
Linked = false;
glDeleteProgram(Program);
}
}
};
class Triangle
{
private:
const int COORDS_PER_VERTEX = 3;
const int vertexCount = 9 / COORDS_PER_VERTEX; //9: Length of triangleCoords
const int vertexStride = COORDS_PER_VERTEX * 4; // 4 bytes per vertex
static float TriangleCoords [];
float Color[4];
float XOff;
float YOff;
float ZOff;
Shader *S;
public:
Triangle()
{
XOff = YOff = ZOff = 0;
S = new Shader("Shaders/test.vsh", "Shaders/test.fsh");
if (!S->Load())
{
delete S;
S = NULL;
}
}
void SetColor(int R, int G, int B, int A)
{
Color[0] = R / 255.0;
Color[1] = G / 255.0;
Color[2] = B / 255.0;
Color[3] = A / 255.0;
}
void SetXYZ(int X, int Y, int Z)
{
(Sets position)
}
bool Draw()
{
float TriangleCoords[] = { // in counterclockwise order:
-0.0 + XOff, 0.622008459 + YOff, 0.0 + ZOff, // top
-0.5 + XOff, -0.311004243 + YOff, 0.0 + ZOff, // bottom left
0.5 + XOff, -0.311004243 + YOff, 0.0 + ZOff // bottom right
};
printf("%f\n", TriangleCoords[1]);
//glMatrixMode(GL_PROJECTION);
if (S == NULL)
return false;
S->Use();
// Load the vertex data
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, TriangleCoords);
// get handle to shape's transformation matrix
// Pass the projection and view transformation to the shader
//UniformMatrix4fv(S->GetUniformLoc("uMVPMatrix"), 1, false, MVPMatrix);
glUniform4fv(S->GetUniformLoc("vColor"), 1, Color);
glEnableVertexAttribArray(0);
//glPushMatrix();
glLoadIdentity();
glMatrixMode(GL_MODELVIEW);
float X = LocalTime->tm_hour / 23.0;
float Y = LocalTime->tm_min / 59.0;
float Z = LocalTime->tm_sec / 59.0;
glTranslatef(0, 0, 1);
glRotatef(60, 1.f, 0.f, 0.f);
glRotatef(30, 0.f, 1.f, 0.f);
glRotatef(30, 0.f, 0.f, 1.f);
glDrawArrays(GL_TRIANGLES, 0, 3);
//glPopMatrix();
return true;
}
};
bool InitDisplay()
{
bcm_host_init();
Disp = eglGetDisplay(EGL_DEFAULT_DISPLAY);
if(eglInitialize(Disp, NULL, NULL) != EGL_TRUE)
{
printf("Display initialize error.\n");
return false;
}
printf("Display initialized.\n");
static const EGLint AttrList[] =
{
EGL_RED_SIZE, 8,
EGL_GREEN_SIZE, 8,
EGL_BLUE_SIZE, 8,
EGL_ALPHA_SIZE, 8,
EGL_SURFACE_TYPE, EGL_WINDOW_BIT,
EGL_NONE
};
EGLConfig Config;
int ConfigCount;
if(eglChooseConfig(Disp, AttrList, &Config, 1, &ConfigCount) != EGL_TRUE)
{
printf("Display choose config error.\n");
return false;
}
printf("Display config chosen. %d configs.\n", ConfigCount);
//if(eglBindAPI(EGL_OPENGL_ES_API) != EGL_TRUE)
//{
// printf("Bind API error.\n");
// return false;
//}
//printf("API bound.\n");
static const EGLint ContextAttr[] =
{
EGL_CONTEXT_CLIENT_VERSION, 2,
EGL_NONE
};
if((Context = eglCreateContext(Disp, Config, EGL_NO_CONTEXT, ContextAttr)) == EGL_NO_CONTEXT)
{
printf("Create context error.\n");
return false;
}
printf("Context created.\n");
if(graphics_get_display_size(0 /* LCD */, &ScrWidth, &ScrHeight) < 0)
{
printf("Get screen size error.\n");
return false;
}
printf("Got screen size. %dx%d\n", ScrWidth, ScrHeight);
DISPMANX_DISPLAY_HANDLE_T DispmanDisp;
DispmanDisp = vc_dispmanx_display_open(0 /* LCD */);
printf("Dispmanx - Display opened.\n");
DISPMANX_UPDATE_HANDLE_T DispmanUpdate;
DispmanUpdate = vc_dispmanx_update_start(0);
printf("Dispmanx - Update started.\n");
DISPMANX_ELEMENT_HANDLE_T DispmanElement;
VC_RECT_T DestRect;
VC_RECT_T SrcRect;
DestRect.x = 0;
DestRect.y = 0;
DestRect.width = ScrWidth;
DestRect.height = ScrHeight;
SrcRect.x = 0;
SrcRect.y = 0;
SrcRect.width = ScrWidth << 16;
SrcRect.height = ScrHeight << 16;
DispmanElement= vc_dispmanx_element_add(
DispmanUpdate,
DispmanDisp,
0/*layer*/,
&DestRect,
0/*src*/,
&SrcRect,
DISPMANX_PROTECTION_NONE,
0 /*alpha*/,
0/*clamp*/,
0/*transform*/
);
printf("Dispmanx - Element added.\n");
static EGL_DISPMANX_WINDOW_T NativeWindow;
NativeWindow.element = DispmanElement;
NativeWindow.width = ScrWidth;
NativeWindow.height = ScrHeight;
vc_dispmanx_update_submit_sync(DispmanUpdate);
printf("Dispmanx - Sync submited.\n");
if((Surface = eglCreateWindowSurface(Disp, Config, &NativeWindow, NULL)) == EGL_NO_SURFACE)
{
printf("Create surface error.\n");
return false;
}
printf("Surface created\n");
if(eglMakeCurrent(Disp, Surface, Surface, Context) != EGL_TRUE)
{
printf("Make onnection between context and surface error.\n");
return false;
}
printf("Connection made between context and surface.\n");
glEnable(GL_CULL_FACE);
glMatrixMode(GL_MODELVIEW);
printf("Graphics system ready.\n");
return true;
}
void makeFrustum(float fovY, float aspectRatio, float front, float back)
{
const float DEG2RAD = 3.14159265 / 180;
float tangent = tan(fovY / 2 * DEG2RAD); // tangent of half fovY
float height = front * tangent; // half height of near plane
float width = height * aspectRatio; // half width of near plane
// params: left, right, bottom, top, near, far
glFrustumf(-width, width, -height, height, front, back);
}
void DrawLoop()
{
glHint(GL_PERSPECTIVE_CORRECTION_HINT, GL_NICEST);
glViewport(0, 0, ScrWidth, ScrHeight);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
makeFrustum(45.0, ScrWidth / (float)ScrHeight, 1, 500);
glEnableClientState(GL_VERTEX_ARRAY);
Triangle T1;
Triangle T2;
Triangle T3;
Triangle T4;
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glTranslatef(0.f, 0.f, -50.f);
while (1)
{
time_t Time;
time(&Time);
tm *LocalTime = localtime(&Time);
printf("%d:%d:%d\n", LocalTime->tm_hour, LocalTime->tm_min, LocalTime->tm_sec);
float R = LocalTime->tm_hour / 23.0;
float G = LocalTime->tm_min / 59.0;
float B = LocalTime->tm_sec / 59.0;
T1.SetColor(255, 0, 0, 255);
T1.SetXYZ(B * ScrWidth, B * ScrHeight, 0);
//glClearColor(0, 0, 0, 1.0);
//glClear(GL_COLOR_BUFFER_BIT);
if (!T1.Draw() || !T2.Draw() || !T3.Draw() || !T4.Draw())
{
return;
}
glFlush();
eglSwapBuffers(Disp, Surface);
}
}
int main()
{
if(!InitDisplay())
{
printf("Display initialize error.\n");
return false;
}
DrawLoop();
return 0;
}
What should I do to rotate triangle?
I referenced working code, but it still doesn't rotates.
You're trying to use OpenGL ES 1.1 functions (glLoadIdentity, glMatrixMode, glTranslatef, glRotatef, etc) in an OpenGL ES 2.0 context - this won't work. You can use either OpenGL ES 1.1 OR OpenGL ES 2.0, but you can't use both at the same time from the same context.
I would suggest sticking with OpenGL ES 2.0 using shaders, and learning the OpenGL ES 2.0 way of doing things, as this is how all of the newer APIs work.
To do translation and rotation you need to encode it into an MVP matrix and pass this as a uniform to the vertex shader which you use when calculating gl_Position. Some examples here:
https://open.gl/transformations

C++ Raytracer with opengl display skew in specific resolution

I have a ray tracer (from www.scratchapixel.com) that I use to write a image to memory that I then display at once using Opengl (glut). I use the width and height and divide the screen to get a Opengl point for every pixels. It kinda works.
My problem is that my width has to be between 500 and 799. It cannot be <= 499 of >= 800, witch doesn't make sense to me. The image becomes skew. I have tried it on 2 computers with the same result.
799x480
800x480
Here's the full code:
#define _USE_MATH_DEFINES
#include <cstdlib>
#include <cstdio>
#include <cmath>
#include <fstream>
#include <vector>
#include <iostream>
#include <cassert>
// OpenGl
#include "GL/glut.h"
GLuint width = 799, height = 480;
GLdouble width_step = 2.0f / width, height_step = 2.0f / height;
const int MAX_RAY_DEPTH = 3;
const double INFINITY = HUGE_VAL;
template<typename T>
class Vec3
{
public:
T x, y, z;
// Vector constructors.
Vec3() : x(T(0)), y(T(0)), z(T(0)) {}
Vec3(T xx) : x(xx), y(xx), z(xx) {}
Vec3(T xx, T yy, T zz) : x(xx), y(yy), z(zz) {}
// Vector normalisation.
Vec3& normalize()
{
T nor = x * x + y * y + z * z;
if (nor > 1) {
T invNor = 1 / sqrt(nor);
x *= invNor, y *= invNor, z *= invNor;
}
return *this;
}
// Vector operators.
Vec3<T> operator * (const T &f) const { return Vec3<T>(x * f, y * f, z * f); }
Vec3<T> operator * (const Vec3<T> &v) const { return Vec3<T>(x * v.x, y * v.y, z * v.z); }
T dot(const Vec3<T> &v) const { return x * v.x + y * v.y + z * v.z; }
Vec3<T> operator - (const Vec3<T> &v) const { return Vec3<T>(x - v.x, y - v.y, z - v.z); }
Vec3<T> operator + (const Vec3<T> &v) const { return Vec3<T>(x + v.x, y + v.y, z + v.z); }
Vec3<T>& operator += (const Vec3<T> &v) { x += v.x, y += v.y, z += v.z; return *this; }
Vec3<T>& operator *= (const Vec3<T> &v) { x *= v.x, y *= v.y, z *= v.z; return *this; }
Vec3<T> operator - () const { return Vec3<T>(-x, -y, -z); }
};
template<typename T>
class Sphere
{
public:
// Sphere variables.
Vec3<T> center; /// position of the sphere
T radius, radius2; /// sphere radius and radius^2
Vec3<T> surfaceColor, emissionColor; /// surface color and emission (light)
T transparency, reflection; /// surface transparency and reflectivity
// Sphere constructor.
// position(c), radius(r), surface color(sc), reflectivity(refl), transparency(transp), emission color(ec)
Sphere(const Vec3<T> &c, const T &r, const Vec3<T> &sc,
const T &refl = 0, const T &transp = 0, const Vec3<T> &ec = 0) :
center(c), radius(r), surfaceColor(sc), reflection(refl),
transparency(transp), emissionColor(ec), radius2(r * r)
{}
// compute a ray-sphere intersection using the geometric solution
bool intersect(const Vec3<T> &rayorig, const Vec3<T> &raydir, T *t0 = NULL, T *t1 = NULL) const
{
// we start with a vector (l) from the ray origin (rayorig) to the center of the curent sphere.
Vec3<T> l = center - rayorig;
// tca is a vector length in the direction of the normalise raydir.
// its length is streched using dot until it forms a perfect right angle triangle with the l vector.
T tca = l.dot(raydir);
// if tca is < 0, the raydir is going in the opposite direction. No need to go further. Return false.
if (tca < 0) return false;
// if we keep on into the code, it's because the raydir may still hit the sphere.
// l.dot(l) gives us the l vector length to the power of 2. Then we use Pythagoras' theorem.
// remove the length tca to the power of two (tca * tca) and we get a distance from the center of the sphere to the power of 2 (d2).
T d2 = l.dot(l) - (tca * tca);
// if this distance to the center (d2) is greater than the radius to the power of 2 (radius2), the raydir direction is missing the sphere.
// No need to go further. Return false.
if (d2 > radius2) return false;
// Pythagoras' theorem again: radius2 is the hypotenuse and d2 is one of the side. Substraction gives the third side to the power of 2.
// Using sqrt, we obtain the length thc. thc is how deep tca goes into the sphere.
T thc = sqrt(radius2 - d2);
if (t0 != NULL && t1 != NULL) {
// remove thc to tca and you get the length from the ray origin to the surface hit point of the sphere.
*t0 = tca - thc;
// add thc to tca and you get the length from the ray origin to the surface hit point of the back side of the sphere.
*t1 = tca + thc;
}
// There is a intersection with a sphere, t0 and t1 have surface distances values. Return true.
return true;
}
};
std::vector<Sphere<double> *> spheres;
// function to mix 2 T varables.
template<typename T>
T mix(const T &a, const T &b, const T &mix)
{
return b * mix + a * (T(1) - mix);
}
// This is the main trace function. It takes a ray as argument (defined by its origin
// and direction). We test if this ray intersects any of the geometry in the scene.
// If the ray intersects an object, we compute the intersection point, the normal
// at the intersection point, and shade this point using this information.
// Shading depends on the surface property (is it transparent, reflective, diffuse).
// The function returns a color for the ray. If the ray intersects an object, it
// returns the color of the object at the intersection point, otherwise it returns
// the background color.
template<typename T>
Vec3<T> trace(const Vec3<T> &rayorig, const Vec3<T> &raydir,
const std::vector<Sphere<T> *> &spheres, const int &depth)
{
T tnear = INFINITY;
const Sphere<T> *sphere = NULL;
// Try to find intersection of this raydir with the spheres in the scene
for (unsigned i = 0; i < spheres.size(); ++i) {
T t0 = INFINITY, t1 = INFINITY;
if (spheres[i]->intersect(rayorig, raydir, &t0, &t1)) {
// is the rayorig inside the sphere (t0 < 0)? If so, use the second hit (t0 = t1)
if (t0 < 0) t0 = t1;
// tnear is the last sphere intersection (or infinity). Is t0 in front of tnear?
if (t0 < tnear) {
// if so, update tnear to this closer t0 and update the closest sphere
tnear = t0;
sphere = spheres[i];
}
}
}
// At this moment in the program, we have the closest sphere (sphere) and the closest hit position (tnear)
// For this pixel, if there's no intersection with a sphere, return a Vec3 with the background color.
if (!sphere) return Vec3<T>(.5); // Grey background color.
// if we keep on with the code, it is because we had an intersection with at least one sphere.
Vec3<T> surfaceColor = 0; // initialisation of the color of the ray/surface of the object intersected by the ray.
Vec3<T> phit = rayorig + (raydir * tnear); // point of intersection.
Vec3<T> nhit = phit - sphere->center; // normal at the intersection point.
// if the normal and the view direction are not opposite to each other,
// reverse the normal direction.
if (raydir.dot(nhit) > 0) nhit = -nhit;
nhit.normalize(); // normalize normal direction
// The angle between raydir and the normal at point hit (not used).
//T s_angle = acos(raydir.dot(nhit)) / ( sqrt(raydir.dot(raydir)) * sqrt(nhit.dot(nhit)));
//T s_incidence = sin(s_angle);
T bias = 1e-5; // add some bias to the point from which we will be tracing
// Do we have transparency or reflection?
if ((sphere->transparency > 0 || sphere->reflection > 0) && depth < MAX_RAY_DEPTH) {
T IdotN = raydir.dot(nhit); // raydir.normal
// I and N are not pointing in the same direction, so take the invert.
T facingratio = std::max(T(0), -IdotN);
// change the mix value between reflection and refraction to tweak the effect (fresnel effect)
T fresneleffect = mix<T>(pow(1 - facingratio, 3), 1, 0.1);
// compute reflection direction (not need to normalize because all vectors
// are already normalized)
Vec3<T> refldir = raydir - nhit * 2 * raydir.dot(nhit);
Vec3<T> reflection = trace(phit + (nhit * bias), refldir, spheres, depth + 1);
Vec3<T> refraction = 0;
// if the sphere is also transparent compute refraction ray (transmission)
if (sphere->transparency) {
T ior = 1.2, eta = 1 / ior;
T k = 1 - eta * eta * (1 - IdotN * IdotN);
Vec3<T> refrdir = raydir * eta - nhit * (eta * IdotN + sqrt(k));
refraction = trace(phit - nhit * bias, refrdir, spheres, depth + 1);
}
// the result is a mix of reflection and refraction (if the sphere is transparent)
surfaceColor = (reflection * fresneleffect + refraction * (1 - fresneleffect) * sphere->transparency) * sphere->surfaceColor;
}
else {
// it's a diffuse object, no need to raytrace any further
// Look at all sphere to find lights
double shadow = 1.0;
for (unsigned i = 0; i < spheres.size(); ++i) {
if (spheres[i]->emissionColor.x > 0) {
// this is a light
Vec3<T> transmission = 1.0;
Vec3<T> lightDirection = spheres[i]->center - phit;
lightDirection.normalize();
T light_angle = (acos(raydir.dot(lightDirection)) / ( sqrt(raydir.dot(raydir)) * sqrt(lightDirection.dot(lightDirection))));
T light_incidence = sin(light_angle);
for (unsigned j = 0; j < spheres.size(); ++j) {
if (i != j) {
T t0, t1;
// Does the ray from point hit to the light intersect an object?
// If so, calculate the shadow.
if (spheres[j]->intersect(phit + (nhit * bias), lightDirection, &t0, &t1)) {
shadow = std::max(0.0, shadow - (1.0 - spheres[j]->transparency));
transmission = transmission * spheres[j]->surfaceColor * shadow;
//break;
}
}
}
// For each light found, we add light transmission to the pixel.
surfaceColor += sphere->surfaceColor * transmission *
std::max(T(0), nhit.dot(lightDirection)) * spheres[i]->emissionColor;
}
}
}
return surfaceColor + sphere->emissionColor;
}
// Main rendering function. We compute a camera ray for each pixel of the image,
// trace it and return a color. If the ray hits a sphere, we return the color of the
// sphere at the intersection point, else we return the background color.
Vec3<double> *image = new Vec3<double>[width * height];
static Vec3<double> cam_pos = Vec3<double>(0);
template<typename T>
void render(const std::vector<Sphere<T> *> &spheres)
{
Vec3<T> *pixel = image;
T invWidth = 1 / T(width), invHeight = 1 / T(height);
T fov = 30, aspectratio = T(width) / T(height);
T angle = tan(M_PI * 0.5 * fov / T(180));
// Trace rays
for (GLuint y = 0; y < height; ++y) {
for (GLuint x = 0; x < width; ++x, ++pixel) {
T xx = (2 * ((x + 0.5) * invWidth) - 1) * angle * aspectratio;
T yy = (1 - 2 * ((y + 0.5) * invHeight)) * angle;
Vec3<T> raydir(xx, yy, -1);
raydir.normalize();
*pixel = trace(cam_pos, raydir, spheres, 0);
}
}
}
//********************************** OPEN GL ***********************************************
void init(void)
{
/* Select clearing (background) color */
glClearColor(0.0, 0.0, 0.0, 0.0);
glShadeModel(GL_FLAT);
/* Initialize viewing values */
//glMatrixMode(GL_PROJECTION);
gluOrtho2D(0,width,0,height);
}
void advanceDisplay(void)
{
cam_pos.z = cam_pos.z - 2;
glutPostRedisplay();
}
void backDisplay(void)
{
cam_pos.z = cam_pos.z + 2;
glutPostRedisplay();
}
void resetDisplay(void)
{
Vec3<double> new_cam_pos;
new_cam_pos = cam_pos;
cam_pos = new_cam_pos;
glutPostRedisplay();
}
void reshape(int w, int h)
{
glLoadIdentity();
gluOrtho2D(0,width,0,height);
glLoadIdentity();
}
void mouse(int button, int state, int x, int y)
{
switch (button)
{
case GLUT_LEFT_BUTTON:
if(state == GLUT_DOWN)
{
glutIdleFunc(advanceDisplay);
}
break;
case GLUT_MIDDLE_BUTTON:
if(state == GLUT_DOWN)
{
glutIdleFunc(resetDisplay);
}
break;
case GLUT_RIGHT_BUTTON:
if(state == GLUT_DOWN)
{
glutIdleFunc(backDisplay);
}
break;
}
}
void display(void)
{
int i;
float x, y;
/* clear all pixels */
glClear(GL_COLOR_BUFFER_BIT);
glPushMatrix();
render<double>(spheres); // Creates the image and put it to memory in image[].
i=0;
glBegin(GL_POINTS);
for(y=1.0f;y>-1.0;y=y-height_step)
{
for(x=1.0f;x>-1.0;x=x-width_step)
{
glColor3f((std::min(double(1), image[i].x)),
(std::min(double(1), image[i].y)),
(std::min(double(1), image[i].z)));
glVertex2f(x, y);
if(i < width*height)
{
i = i + 1;
}
}
}
glEnd();
glPopMatrix();
glutSwapBuffers();
}
int main(int argc, char **argv)
{
// position, radius, surface color, reflectivity, transparency, emission color
spheres.push_back(new Sphere<double>(Vec3<double>(0, -10004, -20), 10000, Vec3<double>(0.2), 0.0, 0.0));
spheres.push_back(new Sphere<double>(Vec3<double>(3, 0, -15), 2, Vec3<double>(1.00, 0.1, 0.1), 0.65, 0.95));
spheres.push_back(new Sphere<double>(Vec3<double>(1, -1, -18), 1, Vec3<double>(1.0, 1.0, 1.0), 0.9, 0.9));
spheres.push_back(new Sphere<double>(Vec3<double>(-2, 2, -15), 2, Vec3<double>(0.1, 0.1, 1.0), 0.05, 0.5));
spheres.push_back(new Sphere<double>(Vec3<double>(-4, 3, -18), 1, Vec3<double>(0.1, 1.0, 0.1), 0.3, 0.7));
spheres.push_back(new Sphere<double>(Vec3<double>(-4, 0, -25), 1, Vec3<double>(1.00, 0.1, 0.1), 0.65, 0.95));
spheres.push_back(new Sphere<double>(Vec3<double>(-1, 1, -25), 2, Vec3<double>(1.0, 1.0, 1.0), 0.0, 0.0));
spheres.push_back(new Sphere<double>(Vec3<double>(2, 2, -25), 1, Vec3<double>(0.1, 0.1, 1.0), 0.05, 0.5));
spheres.push_back(new Sphere<double>(Vec3<double>(5, 3, -25), 2, Vec3<double>(0.1, 1.0, 0.1), 0.3, 0.7));
// light
spheres.push_back(new Sphere<double>(Vec3<double>(-10, 20, 0), 3, Vec3<double>(0), 0, 0, Vec3<double>(3)));
spheres.push_back(new Sphere<double>(Vec3<double>(0, 10, 0), 3, Vec3<double>(0), 0, 0, Vec3<double>(1)));
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGB);
glutInitWindowSize(width, height);
glutInitWindowPosition(10,10);
glutCreateWindow(argv[0]);
init();
glutDisplayFunc(display);
glutReshapeFunc(reshape);
glutMouseFunc(mouse);
glutMainLoop();
delete [] image;
while (!spheres.empty()) {
Sphere<double> *sph = spheres.back();
spheres.pop_back();
delete sph;
}
return 0;
}
This is where the image is written to memory:
Vec3<double> *image = new Vec3<double>[width * height];
static Vec3<double> cam_pos = Vec3<double>(0);
template<typename T>
void render(const std::vector<Sphere<T> *> &spheres)
{
Vec3<T> *pixel = image;
T invWidth = 1 / T(width), invHeight = 1 / T(height);
T fov = 30, aspectratio = T(width) / T(height);
T angle = tan(M_PI * 0.5 * fov / T(180));
// Trace rays
for (GLuint y = 0; y < height; ++y) {
for (GLuint x = 0; x < width; ++x, ++pixel) {
T xx = (2 * ((x + 0.5) * invWidth) - 1) * angle * aspectratio;
T yy = (1 - 2 * ((y + 0.5) * invHeight)) * angle;
Vec3<T> raydir(xx, yy, -1);
raydir.normalize();
*pixel = trace(cam_pos, raydir, spheres, 0);
}
}
}
This is where I read it back and write it to each point of Opengl:
void display(void)
{
int i;
float x, y;
/* clear all pixels */
glClear(GL_COLOR_BUFFER_BIT);
glPushMatrix();
render<double>(spheres); // Creates the image and put it to memory in image[].
i=0;
glBegin(GL_POINTS);
for(y=1.0f;y>-1.0;y=y-height_step)
{
for(x=1.0f;x>-1.0;x=x-width_step)
{
glColor3f((std::min(double(1), image[i].x)),
(std::min(double(1), image[i].y)),
(std::min(double(1), image[i].z)));
glVertex2f(x, y);
if(i < width*height)
{
i = i + 1;
}
}
}
glEnd();
glPopMatrix();
glutSwapBuffers();
}
I have no idea what is causing this. Is it a bad design? An Opengl display mode? I don't know.
Is it a bad design?
Yes! Upload your rendered scene to a texture and then render a quad with it:
// g++ -O3 main.cpp -lglut -lGL -lGLU
#include <cstdlib>
#include <cstdio>
#include <cmath>
#include <fstream>
#include <vector>
#include <iostream>
#include <cassert>
// OpenGl
#include "GL/glut.h"
GLuint width = 800, height = 480;
GLdouble width_step = 2.0f / width;
GLdouble height_step = 2.0f / height;
const int MAX_RAY_DEPTH = 3;
template<typename T>
class Vec3
{
public:
T x, y, z;
// Vector constructors.
Vec3() : x(T(0)), y(T(0)), z(T(0)) {}
Vec3(T xx) : x(xx), y(xx), z(xx) {}
Vec3(T xx, T yy, T zz) : x(xx), y(yy), z(zz) {}
// Vector normalisation.
Vec3& normalize()
{
T nor = x * x + y * y + z * z;
if (nor > 1) {
T invNor = 1 / sqrt(nor);
x *= invNor, y *= invNor, z *= invNor;
}
return *this;
}
// Vector operators.
Vec3<T> operator * (const T &f) const { return Vec3<T>(x * f, y * f, z * f); }
Vec3<T> operator * (const Vec3<T> &v) const { return Vec3<T>(x * v.x, y * v.y, z * v.z); }
T dot(const Vec3<T> &v) const { return x * v.x + y * v.y + z * v.z; }
Vec3<T> operator - (const Vec3<T> &v) const { return Vec3<T>(x - v.x, y - v.y, z - v.z); }
Vec3<T> operator + (const Vec3<T> &v) const { return Vec3<T>(x + v.x, y + v.y, z + v.z); }
Vec3<T>& operator += (const Vec3<T> &v) { x += v.x, y += v.y, z += v.z; return *this; }
Vec3<T>& operator *= (const Vec3<T> &v) { x *= v.x, y *= v.y, z *= v.z; return *this; }
Vec3<T> operator - () const { return Vec3<T>(-x, -y, -z); }
};
template<typename T>
class Sphere
{
public:
// Sphere variables.
Vec3<T> center; /// position of the sphere
T radius, radius2; /// sphere radius and radius^2
Vec3<T> surfaceColor, emissionColor; /// surface color and emission (light)
T transparency, reflection; /// surface transparency and reflectivity
// Sphere constructor.
// position(c), radius(r), surface color(sc), reflectivity(refl), transparency(transp), emission color(ec)
Sphere(const Vec3<T> &c, const T &r, const Vec3<T> &sc,
const T &refl = 0, const T &transp = 0, const Vec3<T> &ec = 0) :
center(c), radius(r), surfaceColor(sc), reflection(refl),
transparency(transp), emissionColor(ec), radius2(r * r)
{}
// compute a ray-sphere intersection using the geometric solution
bool intersect(const Vec3<T> &rayorig, const Vec3<T> &raydir, T *t0 = NULL, T *t1 = NULL) const
{
// we start with a vector (l) from the ray origin (rayorig) to the center of the curent sphere.
Vec3<T> l = center - rayorig;
// tca is a vector length in the direction of the normalise raydir.
// its length is streched using dot until it forms a perfect right angle triangle with the l vector.
T tca = l.dot(raydir);
// if tca is < 0, the raydir is going in the opposite direction. No need to go further. Return false.
if (tca < 0) return false;
// if we keep on into the code, it's because the raydir may still hit the sphere.
// l.dot(l) gives us the l vector length to the power of 2. Then we use Pythagoras' theorem.
// remove the length tca to the power of two (tca * tca) and we get a distance from the center of the sphere to the power of 2 (d2).
T d2 = l.dot(l) - (tca * tca);
// if this distance to the center (d2) is greater than the radius to the power of 2 (radius2), the raydir direction is missing the sphere.
// No need to go further. Return false.
if (d2 > radius2) return false;
// Pythagoras' theorem again: radius2 is the hypotenuse and d2 is one of the side. Substraction gives the third side to the power of 2.
// Using sqrt, we obtain the length thc. thc is how deep tca goes into the sphere.
T thc = sqrt(radius2 - d2);
if (t0 != NULL && t1 != NULL) {
// remove thc to tca and you get the length from the ray origin to the surface hit point of the sphere.
*t0 = tca - thc;
// add thc to tca and you get the length from the ray origin to the surface hit point of the back side of the sphere.
*t1 = tca + thc;
}
// There is a intersection with a sphere, t0 and t1 have surface distances values. Return true.
return true;
}
};
std::vector<Sphere<double> *> spheres;
// function to mix 2 T varables.
template<typename T>
T mix(const T &a, const T &b, const T &mix)
{
return b * mix + a * (T(1) - mix);
}
// This is the main trace function. It takes a ray as argument (defined by its origin
// and direction). We test if this ray intersects any of the geometry in the scene.
// If the ray intersects an object, we compute the intersection point, the normal
// at the intersection point, and shade this point using this information.
// Shading depends on the surface property (is it transparent, reflective, diffuse).
// The function returns a color for the ray. If the ray intersects an object, it
// returns the color of the object at the intersection point, otherwise it returns
// the background color.
template<typename T>
Vec3<T> trace(const Vec3<T> &rayorig, const Vec3<T> &raydir,
const std::vector<Sphere<T> *> &spheres, const int &depth)
{
T tnear = INFINITY;
const Sphere<T> *sphere = NULL;
// Try to find intersection of this raydir with the spheres in the scene
for (unsigned i = 0; i < spheres.size(); ++i) {
T t0 = INFINITY, t1 = INFINITY;
if (spheres[i]->intersect(rayorig, raydir, &t0, &t1)) {
// is the rayorig inside the sphere (t0 < 0)? If so, use the second hit (t0 = t1)
if (t0 < 0) t0 = t1;
// tnear is the last sphere intersection (or infinity). Is t0 in front of tnear?
if (t0 < tnear) {
// if so, update tnear to this closer t0 and update the closest sphere
tnear = t0;
sphere = spheres[i];
}
}
}
// At this moment in the program, we have the closest sphere (sphere) and the closest hit position (tnear)
// For this pixel, if there's no intersection with a sphere, return a Vec3 with the background color.
if (!sphere) return Vec3<T>(.5); // Grey background color.
// if we keep on with the code, it is because we had an intersection with at least one sphere.
Vec3<T> surfaceColor = 0; // initialisation of the color of the ray/surface of the object intersected by the ray.
Vec3<T> phit = rayorig + (raydir * tnear); // point of intersection.
Vec3<T> nhit = phit - sphere->center; // normal at the intersection point.
// if the normal and the view direction are not opposite to each other,
// reverse the normal direction.
if (raydir.dot(nhit) > 0) nhit = -nhit;
nhit.normalize(); // normalize normal direction
// The angle between raydir and the normal at point hit (not used).
//T s_angle = acos(raydir.dot(nhit)) / ( sqrt(raydir.dot(raydir)) * sqrt(nhit.dot(nhit)));
//T s_incidence = sin(s_angle);
T bias = 1e-5; // add some bias to the point from which we will be tracing
// Do we have transparency or reflection?
if ((sphere->transparency > 0 || sphere->reflection > 0) && depth < MAX_RAY_DEPTH) {
T IdotN = raydir.dot(nhit); // raydir.normal
// I and N are not pointing in the same direction, so take the invert.
T facingratio = std::max(T(0), -IdotN);
// change the mix value between reflection and refraction to tweak the effect (fresnel effect)
T fresneleffect = mix<T>(pow(1 - facingratio, 3), 1, 0.1);
// compute reflection direction (not need to normalize because all vectors
// are already normalized)
Vec3<T> refldir = raydir - nhit * 2 * raydir.dot(nhit);
Vec3<T> reflection = trace(phit + (nhit * bias), refldir, spheres, depth + 1);
Vec3<T> refraction = 0;
// if the sphere is also transparent compute refraction ray (transmission)
if (sphere->transparency) {
T ior = 1.2, eta = 1 / ior;
T k = 1 - eta * eta * (1 - IdotN * IdotN);
Vec3<T> refrdir = raydir * eta - nhit * (eta * IdotN + sqrt(k));
refraction = trace(phit - nhit * bias, refrdir, spheres, depth + 1);
}
// the result is a mix of reflection and refraction (if the sphere is transparent)
surfaceColor = (reflection * fresneleffect + refraction * (1 - fresneleffect) * sphere->transparency) * sphere->surfaceColor;
}
else {
// it's a diffuse object, no need to raytrace any further
// Look at all sphere to find lights
double shadow = 1.0;
for (unsigned i = 0; i < spheres.size(); ++i) {
if (spheres[i]->emissionColor.x > 0) {
// this is a light
Vec3<T> transmission = 1.0;
Vec3<T> lightDirection = spheres[i]->center - phit;
lightDirection.normalize();
T light_angle = (acos(raydir.dot(lightDirection)) / ( sqrt(raydir.dot(raydir)) * sqrt(lightDirection.dot(lightDirection))));
T light_incidence = sin(light_angle);
for (unsigned j = 0; j < spheres.size(); ++j) {
if (i != j) {
T t0, t1;
// Does the ray from point hit to the light intersect an object?
// If so, calculate the shadow.
if (spheres[j]->intersect(phit + (nhit * bias), lightDirection, &t0, &t1)) {
shadow = std::max(0.0, shadow - (1.0 - spheres[j]->transparency));
transmission = transmission * spheres[j]->surfaceColor * shadow;
//break;
}
}
}
// For each light found, we add light transmission to the pixel.
surfaceColor += sphere->surfaceColor * transmission *
std::max(T(0), nhit.dot(lightDirection)) * spheres[i]->emissionColor;
}
}
}
return surfaceColor + sphere->emissionColor;
}
// Main rendering function. We compute a camera ray for each pixel of the image,
// trace it and return a color. If the ray hits a sphere, we return the color of the
// sphere at the intersection point, else we return the background color.
Vec3<double> *image = new Vec3<double>[width * height];
static Vec3<double> cam_pos = Vec3<double>(0);
template<typename T>
void render(const std::vector<Sphere<T> *> &spheres)
{
Vec3<T> *pixel = image;
T invWidth = 1 / T(width), invHeight = 1 / T(height);
T fov = 30, aspectratio = T(width) / T(height);
T angle = tan(M_PI * 0.5 * fov / T(180));
// Trace rays
for (GLuint y = 0; y < height; ++y) {
for (GLuint x = 0; x < width; ++x, ++pixel) {
T xx = (2 * ((x + 0.5) * invWidth) - 1) * angle * aspectratio;
T yy = (1 - 2 * ((y + 0.5) * invHeight)) * angle;
Vec3<T> raydir(xx, yy, -1);
raydir.normalize();
*pixel = trace(cam_pos, raydir, spheres, 0);
}
}
}
//********************************** OPEN GL ***********************************************
void advanceDisplay(void)
{
cam_pos.z = cam_pos.z - 2;
glutPostRedisplay();
}
void backDisplay(void)
{
cam_pos.z = cam_pos.z + 2;
glutPostRedisplay();
}
void resetDisplay(void)
{
Vec3<double> new_cam_pos;
new_cam_pos = cam_pos;
cam_pos = new_cam_pos;
glutPostRedisplay();
}
void mouse(int button, int state, int x, int y)
{
switch (button)
{
case GLUT_LEFT_BUTTON:
if(state == GLUT_DOWN)
{
glutIdleFunc(advanceDisplay);
}
break;
case GLUT_MIDDLE_BUTTON:
if(state == GLUT_DOWN)
{
glutIdleFunc(resetDisplay);
}
break;
case GLUT_RIGHT_BUTTON:
if(state == GLUT_DOWN)
{
glutIdleFunc(backDisplay);
}
break;
}
}
GLuint tex = 0;
void display(void)
{
int i;
float x, y;
render<double>(spheres); // Creates the image and put it to memory in image[].
std::vector< unsigned char > buf;
buf.reserve( width * height * 3 );
for( size_t y = 0; y < height; ++y )
{
for( size_t x = 0; x < width; ++x )
{
// flip vertically (height-y) because the OpenGL texture origin is in the lower-left corner
// flip horizontally (width-x) because...the original code did so
size_t i = (height-y) * width + (width-x);
buf.push_back( (unsigned char)( std::min(double(1), image[i].x) * 255.0 ) );
buf.push_back( (unsigned char)( std::min(double(1), image[i].y) * 255.0 ) );
buf.push_back( (unsigned char)( std::min(double(1), image[i].z) * 255.0 ) );
}
}
/* clear all pixels */
glClearColor(0.0, 0.0, 0.0, 0.0);
glClear(GL_COLOR_BUFFER_BIT);
glMatrixMode( GL_PROJECTION );
glLoadIdentity();
glMatrixMode( GL_MODELVIEW );
glLoadIdentity();
glEnable( GL_TEXTURE_2D );
glBindTexture( GL_TEXTURE_2D, tex );
glTexSubImage2D
(
GL_TEXTURE_2D, 0,
0, 0,
width, height,
GL_RGB,
GL_UNSIGNED_BYTE,
&buf[0]
);
glBegin( GL_QUADS );
glTexCoord2i( 0, 0 );
glVertex2i( -1, -1 );
glTexCoord2i( 1, 0 );
glVertex2i( 1, -1 );
glTexCoord2i( 1, 1 );
glVertex2i( 1, 1 );
glTexCoord2i( 0, 1 );
glVertex2i( -1, 1 );
glEnd();
glutSwapBuffers();
}
int main(int argc, char **argv)
{
// position, radius, surface color, reflectivity, transparency, emission color
spheres.push_back(new Sphere<double>(Vec3<double>(0, -10004, -20), 10000, Vec3<double>(0.2), 0.0, 0.0));
spheres.push_back(new Sphere<double>(Vec3<double>(3, 0, -15), 2, Vec3<double>(1.00, 0.1, 0.1), 0.65, 0.95));
spheres.push_back(new Sphere<double>(Vec3<double>(1, -1, -18), 1, Vec3<double>(1.0, 1.0, 1.0), 0.9, 0.9));
spheres.push_back(new Sphere<double>(Vec3<double>(-2, 2, -15), 2, Vec3<double>(0.1, 0.1, 1.0), 0.05, 0.5));
spheres.push_back(new Sphere<double>(Vec3<double>(-4, 3, -18), 1, Vec3<double>(0.1, 1.0, 0.1), 0.3, 0.7));
spheres.push_back(new Sphere<double>(Vec3<double>(-4, 0, -25), 1, Vec3<double>(1.00, 0.1, 0.1), 0.65, 0.95));
spheres.push_back(new Sphere<double>(Vec3<double>(-1, 1, -25), 2, Vec3<double>(1.0, 1.0, 1.0), 0.0, 0.0));
spheres.push_back(new Sphere<double>(Vec3<double>(2, 2, -25), 1, Vec3<double>(0.1, 0.1, 1.0), 0.05, 0.5));
spheres.push_back(new Sphere<double>(Vec3<double>(5, 3, -25), 2, Vec3<double>(0.1, 1.0, 0.1), 0.3, 0.7));
// light
spheres.push_back(new Sphere<double>(Vec3<double>(-10, 20, 0), 3, Vec3<double>(0), 0, 0, Vec3<double>(3)));
spheres.push_back(new Sphere<double>(Vec3<double>(0, 10, 0), 3, Vec3<double>(0), 0, 0, Vec3<double>(1)));
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGB);
glutInitWindowSize(width, height);
glutInitWindowPosition(10,10);
glutCreateWindow(argv[0]);
glutDisplayFunc(display);
glutMouseFunc(mouse);
glGenTextures( 1, &tex );
glBindTexture( GL_TEXTURE_2D, tex );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR );
glPixelStorei( GL_UNPACK_ALIGNMENT, 1 );
glTexImage2D( GL_TEXTURE_2D, 0, 3, width, height, 0, GL_RGB, GL_UNSIGNED_BYTE, NULL );
glutMainLoop();
delete [] image;
while (!spheres.empty()) {
Sphere<double> *sph = spheres.back();
spheres.pop_back();
delete sph;
}
return 0;
}
How to load and display images is also explained on www.scratchapixel.com. Strange you didn't see this lesson:
http://www.scratchapixel.com/lessons/3d-basic-lessons/lesson-5-colors-and-digital-images/source-code/
It's all in there and they explain you how to display images using GL textures indeed.

environment mapping using OpenGL and Cg

I write a program to implement environment mapping using OpenGL and Cg shader language.But the result is not very right.When calculate the color of the model,we will blend the reflection with a decal texture.A uniform parameter called reflectivity allows the application to control how reflective the material is.
Firstly I list my fragment Cg code:
void main_f(float2 texCoord : TEXCOORD0,
float3 R : TEXCOORD1,
out float4 color : COLOR,
uniform float reflectivity,
uniform sampler2D decalMap,
uniform samplerCUBE environmentMap)
{
//fetch reflected environment color
float3 reflectedColor = texCUBE(environmentMap,R);
//fetch the decal base coloe
float3 decalColor = tex2D(decalMap,texCoord);
color.xyz = lerp(reflectedColor,decalColor,reflectivity);//change !!!!!!!!
color.w = 1;
}
I set the uniform parameter reflectivity as 0.6.And the result is :
As we can see,the color information from the decal texture is lost.There is only color information from environment cube texture.And if I set reflectivity as 0,the model will be dark.
But if I change the color.xyz in the fragment cg code as :
color.xyz = decalColor;
I can get the right result(only has color from decal texture) :
And if I change the color.xyz in the fragment cg code as :
color.xyz = reflectedColor;
I can get the right result(only has color from environment cube texture) ,too:
And my question is :
Why it does not work when I blend the color information from decal texture with the color information from environment cube texture using Cg function lerp?
at last I list my cg vertex shader and cpp file:
vertex.cg:
void main_v(float4 position : POSITION,
float2 texCoord : TEXCOORD0,//decal texture
float3 normal : NORMAL,
out float4 oPosition : POSITION,
out float2 oTexCoord : TEXCOORD0,//out decal texture
out float3 R : TEXCOORD1,//reflective vector
uniform float3 eyePositionW,//eye position in world space
uniform float4x4 modelViewProj,
uniform float4x4 modelToWorld
)
{
modelViewProj = glstate.matrix.mvp;
oPosition = mul(modelViewProj,position);
oTexCoord = texCoord;
float3 positionW = mul(modelToWorld,position).xyz;
float3 N = mul((float3x3)modelToWorld,normal);
N = normalize(N);
float3 I = positionW - eyePositionW;//incident vector
R = reflect(I,N);
}
main.cpp:
#pragma comment(lib,"glew32.lib")
#pragma comment(lib,"GLAUX.LIB")
#pragma comment(lib,"cg.lib")
#pragma comment(lib,"cgGL.lib")
#include <GL/glew.h>
#include <GL/glut.h>
#include <GL/glaux.h>
#include <CG/cg.h>
#include <CG/cgGL.h>
#include "MonkeyHead.h"
#include <iostream>
#include <cmath>
using namespace std;
int loop;
/* Use enum to assign unique symbolic OpenGL texture names. */
enum {
TO_BOGUS = 0,
TO_DECAL,
TO_ENVIRONMENT,
};
const double myPi = 3.14159;
//for Cg shader
static CGcontext myCgContext;
static CGprofile myCgVertexProfile,myCgFragmentProfile;
static CGprogram myCgVertexProgram,myCgFragmentProgram;
static const char *myProgramName = "CgTest18CubeMapReflective",
*myVertexProgramFileName = "vertex.cg",
*myVertexProgramName = "main_v",
*myFragmentProgramFileName = "fragment.cg",
*myFragmentProgramName = "main_f";
static CGparameter myCgVertexParam_modelToWorld;
//bmp files for cube map
const char *bmpFile[6] = {"Data/1.bmp","Data/2.bmp","Data/3.bmp",
"Data/4.bmp","Data/5.bmp","Data/6.bmp"};
const char *decalBmpFile = "Data/decal.bmp";
static float eyeAngle = 0.53;
static float eyeHeight = 0.0f;
static float headSpain = 0.0f;
static const GLfloat vertex[4*6][3] = {
/* Positive X face. */
{ 1, -1, -1 }, { 1, 1, -1 }, { 1, 1, 1 }, { 1, -1, 1 },
/* Negative X face. */
{ -1, -1, -1 }, { -1, 1, -1 }, { -1, 1, 1 }, { -1, -1, 1 },
/* Positive Y face. */
{ -1, 1, -1 }, { 1, 1, -1 }, { 1, 1, 1 }, { -1, 1, 1 },
/* Negative Y face. */
{ -1, -1, -1 }, { 1, -1, -1 }, { 1, -1, 1 }, { -1, -1, 1 },
/* Positive Z face. */
{ -1, -1, 1 }, { 1, -1, 1 }, { 1, 1, 1 }, { -1, 1, 1 },
/* Negative Z face. */
{ -1, -1, -1 }, { 1, -1, -1 }, { 1, 1, -1 }, { -1, 1, -1 },
};
static float reflectivity = 0.6;
GLuint decalTexture;
bool animating = false;//enable animating or not
static void drawMonkeyHead()
{
static GLfloat *texCoords = NULL;
const int numVertices = sizeof(MonkeyHead_vertices)
/ (3 * sizeof(MonkeyHead_vertices[0]));
const float scaleFactor = 1.5;
//generate texcoords
texCoords = (GLfloat*)malloc(2 * numVertices * sizeof(GLfloat));
if (!texCoords)
{
cerr << "ERROR : Monkey head texcoords memory malloc failed !" << endl;
exit(1);
}
for (loop = 0;loop < numVertices;++loop)
{
texCoords[loop * 2] = scaleFactor * MonkeyHead_vertices[3 * loop];
texCoords[loop * 2 + 1] = scaleFactor * MonkeyHead_vertices[3 * loop + 1];
}
//use vertex array
//enable array
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_NORMAL_ARRAY);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
//assign array data
glVertexPointer(3,GL_FLOAT,3 * sizeof(GLfloat),MonkeyHead_vertices);
glNormalPointer(GL_FLOAT,3 * sizeof(GLfloat),MonkeyHead_normals);
glTexCoordPointer(2,GL_FLOAT,2 * sizeof(GLfloat),texCoords);
glDrawElements(GL_TRIANGLES,3 * MonkeyHead_num_of_triangles,
GL_UNSIGNED_SHORT,MonkeyHead_triangles);
}
//read bmp image file
AUX_RGBImageRec *LoadBMP(const char *FileName)
{
FILE *File = NULL;
if(!FileName)
return NULL;
File = fopen(FileName,"r");
if (File)
{
fclose(File);
return auxDIBImageLoad(FileName);
}
return NULL;
}
//load decal texture from a bmp file
int loadDecalTexture()
{
int status = 1;
AUX_RGBImageRec *TextureImage = NULL;
if ((TextureImage = LoadBMP(decalBmpFile)))
{
glGenTextures(1,&decalTexture);
glBindTexture(GL_TEXTURE_2D,decalTexture);
glTexImage2D(GL_TEXTURE_2D,0,GL_RGB,TextureImage->sizeX,
TextureImage->sizeY,0,GL_RGB,GL_UNSIGNED_BYTE,
TextureImage->data);//指定纹理
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_LINEAR);//指定过滤模式
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER,GL_LINEAR);
}
else
status = 0;
if (TextureImage)
{
if (TextureImage->data)
free(TextureImage->data);
free(TextureImage);
}
return status;
}
//load cube map from 6 bmp files
int loadCubeMap()
{
int status = 1;
AUX_RGBImageRec *TextureImage[6] = {NULL,NULL,NULL,NULL,NULL,NULL};
for (loop = 0;loop < 6;++loop)
{
if (!(TextureImage[loop] = LoadBMP(bmpFile[loop])))
{
cout << "ERROR :load bmp file " << loop << " failed !" << endl;
status = 0;
}
}
glTexImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_X, 0, GL_RGB, TextureImage[0] ->sizeX, TextureImage[0] ->sizeY,
0, GL_RGB, GL_UNSIGNED_BYTE, TextureImage[0] ->data);
glTexImage2D(GL_TEXTURE_CUBE_MAP_NEGATIVE_X, 0, GL_RGB, TextureImage[1] ->sizeX, TextureImage[1] ->sizeY,
0, GL_RGB, GL_UNSIGNED_BYTE, TextureImage[1] ->data);
glTexImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_Y, 0, GL_RGB, TextureImage[2] ->sizeX, TextureImage[2] ->sizeY,
0, GL_RGB, GL_UNSIGNED_BYTE, TextureImage[2] ->data);
glTexImage2D(GL_TEXTURE_CUBE_MAP_NEGATIVE_Y, 0, GL_RGB, TextureImage[3] ->sizeX, TextureImage[3] ->sizeY,
0, GL_RGB, GL_UNSIGNED_BYTE, TextureImage[3] ->data);
glTexImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_Z, 0, GL_RGB, TextureImage[4] ->sizeX, TextureImage[4] ->sizeY,
0, GL_RGB, GL_UNSIGNED_BYTE, TextureImage[4] ->data);
glTexImage2D(GL_TEXTURE_CUBE_MAP_NEGATIVE_Z, 0, GL_RGB, TextureImage[5] ->sizeX, TextureImage[5] ->sizeY,
0, GL_RGB, GL_UNSIGNED_BYTE, TextureImage[5] ->data);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
//free memory
for (loop = 0;loop < 6;++loop)
{
if (TextureImage[loop])
{
if (TextureImage[loop] ->data)
{
free(TextureImage[loop] ->data);
}
free(TextureImage[loop]);
}
}
return status;
}
//draw th surroundings as a cube with each face of
//the cube environment map applied.
void drawSurroundings(const GLfloat *eyePosition)
{
const float surroundingsDistance = 8;
glLoadIdentity();
gluLookAt(eyePosition[0],eyePosition[1],eyePosition[2],
0,0,0,0,1,0);
glScalef(surroundingsDistance,
surroundingsDistance,
surroundingsDistance);
glEnable(GL_TEXTURE_CUBE_MAP);
glBindTexture(GL_TEXTURE_CUBE_MAP,TO_ENVIRONMENT);
glTexEnvi(GL_TEXTURE_ENV,GL_TEXTURE_ENV_MODE,GL_REPLACE);
glBegin(GL_QUADS);
for (loop = 0;loop < 4 * 6;++loop)
{
glTexCoord3fv(vertex[loop]);
glVertex3fv(vertex[loop]);
}
glEnd();
}
static void checkForCgError(const char *situation)
{
CGerror error;
const char *string = cgGetLastErrorString(&error);
if (error != CG_NO_ERROR) {
cout << "ERROR : " << myProgramName << situation << string << endl;
if (error == CG_COMPILER_ERROR) {
cout << cgGetLastListing(myCgContext) << endl;
}
exit(1);
}
}
//init Cg shaders
void initCg()
{
myCgContext = cgCreateContext();
myCgVertexProfile = cgGLGetLatestProfile(CG_GL_VERTEX);
cgGLSetOptimalOptions(myCgVertexProfile);
checkForCgError("selecting vertex profile");
myCgVertexProgram = cgCreateProgramFromFile(
myCgContext,
CG_SOURCE,
myVertexProgramFileName,
myCgVertexProfile,
myVertexProgramName,
NULL);
checkForCgError("Creating vertex Cg program from file");
cgGLLoadProgram(myCgVertexProgram);
checkForCgError("loading vertex program");
myCgFragmentProfile = cgGLGetLatestProfile(CG_GL_FRAGMENT);
cgGLSetOptimalOptions(myCgFragmentProfile);
checkForCgError("selecting fragment profile");
myCgFragmentProgram = cgCreateProgramFromFile(
myCgContext,
CG_SOURCE,
myFragmentProgramFileName,
myCgFragmentProfile,
myFragmentProgramName,
NULL);
checkForCgError("Creating fragment Cg program from file");
cgGLLoadProgram(myCgFragmentProgram);
checkForCgError("loading fragment program");
}
//compute rotate transformation matrix
void makeRotateMatrix(float angle,
float ax,float ay,float az,
float m[16])
{
float radians, sine, cosine, ab, bc, ca, tx, ty, tz;
float axis[3];
float mag;
axis[0] = ax;
axis[1] = ay;
axis[2] = az;
mag = sqrt(axis[0]*axis[0] + axis[1]*axis[1] + axis[2]*axis[2]);
if (mag) {
axis[0] /= mag;
axis[1] /= mag;
axis[2] /= mag;
}
radians = angle * myPi / 180.0;
sine = sin(radians);
cosine = cos(radians);
ab = axis[0] * axis[1] * (1 - cosine);
bc = axis[1] * axis[2] * (1 - cosine);
ca = axis[2] * axis[0] * (1 - cosine);
tx = axis[0] * axis[0];
ty = axis[1] * axis[1];
tz = axis[2] * axis[2];
m[0] = tx + cosine * (1 - tx);
m[1] = ab + axis[2] * sine;
m[2] = ca - axis[1] * sine;
m[3] = 0.0f;
m[4] = ab - axis[2] * sine;
m[5] = ty + cosine * (1 - ty);
m[6] = bc + axis[0] * sine;
m[7] = 0.0f;
m[8] = ca + axis[1] * sine;
m[9] = bc - axis[0] * sine;
m[10] = tz + cosine * (1 - tz);
m[11] = 0;
m[12] = 0;
m[13] = 0;
m[14] = 0;
m[15] = 1;
}
//compute translation transformation matrix
static void makeTranslateMatrix(float x, float y, float z, float m[16])
{
m[0] = 1; m[1] = 0; m[2] = 0; m[3] = x;
m[4] = 0; m[5] = 1; m[6] = 0; m[7] = y;
m[8] = 0; m[9] = 0; m[10] = 1; m[11] = z;
m[12] = 0; m[13] = 0; m[14] = 0; m[15] = 1;
}
//multiply a floar4x4 matrix by another float4x4 matrix
static void multMatrix(float dst[16],const float src1[16],const float src2[16])
{
for (int i = 0;i < 4;++i)
{
for (int j = 0;j < 4;++j)
{
dst[i * 4 + j] = src1[i * 4 + 0] * src2[0 * 4 + j] +
src1[i * 4 + 1] * src2[1 * 4 + j] +
src1[i * 4 + 2] * src2[2 * 4 + j] +
src1[i * 4 + 3] * src2[3 * 4 + j];
}
}
}
void init()
{
glewInit();
glClearColor(0.0,0.0,0.0,1.0);
glShadeModel(GL_SMOOTH);
glEnable(GL_DEPTH_TEST);
if (!loadDecalTexture())
{
cout << "ERROR : load decal texture from bmp file failed !" << endl;
exit(1);
}
glBindTexture(GL_TEXTURE_CUBE_MAP,TO_ENVIRONMENT);
if (!loadCubeMap())
{
cout << "ERROR : load cube map from bmp file failed !" << endl;
exit(1);
}
initCg();
}
void display()
{
const GLfloat eyePosition[4] = {6 * sin(eyeAngle),
eyeHeight,
6 * cos(eyeAngle),
1};
float tranlateMatrix[16],rotateMatrix[16],modelMatrix[16];
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
cgGLEnableProfile(myCgVertexProfile);
checkForCgError("enabling vertex profile");
cgGLEnableProfile(myCgFragmentProfile);
checkForCgError("enabling fragment profile");
cgGLBindProgram(myCgVertexProgram);
checkForCgError("binding vertex program");
cgGLBindProgram(myCgFragmentProgram);
checkForCgError("binding fragment program");
glLoadIdentity();
glTranslatef(0.0,0.0,-5.0);
glRotatef(headSpain,0,1,0);
//set some uniform parameters in Cg shader
cgGLSetParameter3fv(
cgGetNamedParameter(myCgVertexProgram,"eyePositionW"),
eyePosition);
checkForCgError("setting eyePositionW parameter");
makeRotateMatrix(headSpain,0,1,0,rotateMatrix);
makeTranslateMatrix(0.0,0.0,-5.0,tranlateMatrix);
multMatrix(modelMatrix,tranlateMatrix,rotateMatrix);
//set the Cg matrix parameter : modelToWorld
cgSetMatrixParameterfr(
cgGetNamedParameter(myCgVertexProgram,"modelToWorld"),
modelMatrix);
checkForCgError("setting modelToWorld parameter");
cgGLSetParameter1f(
cgGetNamedParameter(myCgFragmentProgram,"reflectivity"),
reflectivity);
checkForCgError("setting reflectivity parameter");
cgGLSetTextureParameter(
cgGetNamedParameter(myCgFragmentProgram,"decalMap"),
decalTexture);
checkForCgError("setting decalTexture parameter");
cgGLSetTextureParameter(
cgGetNamedParameter(myCgFragmentProgram,"environmentMap"),
TO_ENVIRONMENT);
checkForCgError("setting environmentMap parameter");
drawMonkeyHead();
cgGLDisableProfile(myCgVertexProfile);
checkForCgError("disabling vertex profile");
cgGLDisableProfile(myCgFragmentProfile);
checkForCgError("disabling fragment profile");
drawSurroundings(eyePosition);
glutSwapBuffers();
}
static void idle()
{
headSpain += 0.5;
if (headSpain > 360)
{
headSpain -= 360;
}
glutPostRedisplay();
}
static void keyboard(unsigned char key,int x,int y)
{
switch(key)
{
case ' ':
animating = !animating;
if (animating)
{
glutIdleFunc(idle);
}
else
glutIdleFunc(NULL);
break;
case 'r':
reflectivity += 0.1;
if (reflectivity > 1.0)
{
reflectivity = 1.0;
}
cout << "reflectivity : " << reflectivity << endl;
glutPostRedisplay();
break;
case 'R':
reflectivity -= 0.1;
if (reflectivity < 0.0)
{
reflectivity = 0.0;
}
cout << "reflectivity : " << reflectivity << endl;
glutPostRedisplay();
break;
case 27:
cgDestroyProgram(myCgVertexProgram);
cgDestroyContext(myCgContext);
exit(0);
break;
}
}
void reshape(int w,int h)
{
glViewport(0,0,(GLsizei)w,(GLsizei)h);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluPerspective(60.0,1,1.0,20.0);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
}
int main(int argc,char** argv)
{
glutInit(&argc,argv);
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGB | GLUT_DEPTH);
glutInitWindowPosition(0,0);
glutInitWindowSize(600,600);
glutCreateWindow("CubeMapReflection");
init();
glutDisplayFunc(display);
glutReshapeFunc(reshape);
glutKeyboardFunc(keyboard);
glutMainLoop();
return 0;
}
The first thing I see is that the lerp statement needs to have it's values reversed.
color.xyz = lerp(reflectedColor,decalColor,reflectivity);//change !!!!!!!!
should be
color.xyz = lerp(decalColor, reflectedColor, reflectivity);
because the lerp documentation says:
lerp(a, b, w) returns a when w = 0 and b when w = 1 and you want full decal when reflectivity = 0 and full reflected when reflectivity = 1.
I see that the effect you're trying to achieve is akin to GL_MODULATE. You will need to multiple the values together, not lerp between them. Try this, it should work and give you the effect you want.
color.xyz = (reflectedColor.xyz * reflectivity) * decalColor;