How to get a bit from a float using GLSL on WebGL - glsl

On a web app with threeJs I have a float32array that I pass to my vertex shader as a bufferAttribute named "state" of my Geometry.
How can I get the nth bit of each float in the vertex shader ?
I need something like :
vertexShader: `
attribute float state;
varying float active;
void main() {
active = getnthbit(state, 4); // get the fourth bit of the float value of state
...
}`,...
I checked the answer of Tommy here : https://stackoverflow.com/a/41946046/3548345
Using this formulat
float bit = step(0.5, mod(float(value) / 8.0, 1.0));
to get the third bit, but it doesn't work and I don't get it.

In WebGL2 you can pass integer data and do integer math (signed and unsigned) in shaders.
In WebGL1 you really get floats and even things marked as an int may actually not be ints. As such AFAIK you can really only get the first 23 bits using the mod method
You could implement getNthBit(float v, int bit) as
float getNthBit(float v, int bit) {
return mod(floor((v + 0.5) / pow(2.0, float(bit))), 2.0);
}
or something like that.
const gl = document.querySelector('canvas').getContext('webgl');
const vs = `
void main() {
gl_PointSize = 150.0;
gl_Position = vec4(0, 0, 0, 1);
}
`;
const fs = `
precision highp float;
uniform float offset;
float getNthBit(float v, int bit) {
return mod(floor((v + 0.5) / pow(2.0, float(bit))), 2.0);
}
void main() {
float v = gl_FragCoord.x + offset;
int bit = int(gl_FragCoord.y);
float nthBit = getNthBit(v, bit);
gl_FragColor = vec4(0, nthBit > 0.0 ? 1.0 : 0.0, 0, 1);
}
`;
const prg = twgl.createProgram(gl, [vs, fs]);
const offLoc = gl.getUniformLocation(prg, 'offset');
gl.useProgram(prg);
let offset = 0;
function render(time) {
offset += 150;
gl.uniform1f(offLoc, offset);
gl.drawArrays(gl.POINTS, 0, 1); // draw 1 point
requestAnimationFrame(render);
}
requestAnimationFrame(render);
<script src="https://twgljs.org/dist/4.x/twgl-full.min.js"></script>
<canvas
width="150"
height="32"
style="
width: 450px;
height: 192px;
image-rendering: pixelated;
border: 1px solid black;
"></canvas>
Note that if you just want to pull out every bit as though it's binary data then you could just declare it as a vec2 of UNSIGNED_SHORT so even though you put float32s into the buffer you read them as UNSIGNED_SHORT
and then use something like
float getNthBit(vec2 v, int bit) {
float t = bit < 16 ? v[0] : v[1];
int b = bit < 16 ? bit : bit - 16;
return mod(floor((t + 0.5) / pow(2.0, float(b))), 2.0);
}
Example
const gl = document.querySelector('canvas').getContext('webgl');
const vs = `
attribute vec2 v;
varying vec2 v_v;
void main() {
gl_PointSize = 150.0;
gl_Position = vec4(0, 0, 0, 1);
v_v = v;
}
`;
const fs = `
precision highp float;
varying vec2 v_v;
float getNthBit(vec2 v, int bit) {
float t = bit < 16 ? v[0] : v[1];
int b = bit < 16 ? bit : bit - 16;
return mod(floor((t + 0.5) / pow(2.0, float(b))), 2.0);
}
void main() {
int bit = 31 - int(gl_FragCoord.x / 4.0);
float nthBit = getNthBit(v_v, bit);
gl_FragColor = vec4(0, nthBit > 0.0 ? 1.0 : 0.0, 0, 1);
}
`;
const prg = twgl.createProgram(gl, [vs, fs]);
const vLoc = gl.getAttribLocation(prg, 'v');
const buf = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, buf, gl.STATIC_DRAW);
gl.enableVertexAttribArray(vLoc);
gl.vertexAttribPointer(vLoc, 2, gl.UNSIGNED_SHORT, false, 0, 0);
gl.useProgram(prg);
const data = new Float32Array(1);
const bits = new Uint32Array(data.buffer);
const inputElem = document.querySelector('input');
const codeElem = document.querySelector('pre');
function render() {
data[0] = inputElem.value;
codeElem.textContent = bits[0].toString(2).padStart(32, '0');
gl.bufferData(gl.ARRAY_BUFFER, data, gl.STATIC_DRAW);
gl.drawArrays(gl.POINTS, 0, 1); // draw 1 point
}
render();
inputElem.addEventListener('input', render);
<script src="https://twgljs.org/dist/4.x/twgl-full.min.js"></script>
<canvas
width="128"
height="4"
style="
width: 256px;
height: 8px;
image-rendering: pixelated;
border: 1px solid black;
display: block;
"></canvas>
<pre></pre>
<input type="number" value="13445">

Related

Ray tracing: making a plane for sphere

I am working on an assignment where I need to ray trace a sphere with a plane (floor). I have the sphere but I am having trouble with the plane. I use the ray-plane intersection formula:
t = -(o-p).n / d.n. I have this in Plane.h, however when I run my code I get errors from Ray.h. Could someone explain what I'm doing wrong? Any help would be appreciated.
Plane.h
`#include "..\..\raytrace\Ray.h"
class Plane
{
using Colour = cv::Vec3b; // RGB Value
private:
Vec3 normal_;
Vec3 distance_;
Colour color_;
public:
Plane();
Plane(Vec3 norm, Vec3 dis, Colour color) : normal_(norm), distance_(dis), color_(color) {}
Vec3 norm() const {
return normal_;
}
Vec3 dis() const {
return distance_;
}
Colour color() const {
return color_;
}
float findIntersection(Ray ray) {
Vec3 rayDirection = ray.mPosition();
float denominator = rayDirection.dot(normal_);
if (denominator == 0) {
return false;
}
else {
//mPosition() is origin in Ray.h
float t = -(((ray.mPosition() - distance_)).dot(normal_)) / denominator;
}
}
};
`
Ray.h
#include <Eigen/Dense>
#include <Eigen/Geometry>
#include <cmath>
#include "Image.h"
// Data types
typedef float Scalar; //**custom datatype: Scalar is float
typedef Eigen::Matrix<Scalar, 3, 1> Vec3; //***Vec3 is a custom datatype (specific kind)
typedef Eigen::Matrix<Scalar, 2, 1> Vec2;
typedef unsigned char uchar;
class Ray
{
private:
Vec3 mPosition_; //point
public:
Ray() {}
//constuctor, when we construct ray we get mPosition_
Ray(Vec3 mPosition) : mPosition_(mPosition) {
//
}
float t;
Vec3 mPosition() const {
return mPosition_;
}
public:
inline Vec3 generateRay(Vec3 const& pt) {
Vec3 origin = mPosition_;
Vec3 direction = pt - mPosition_; // d = s -e, pt is pixel Position
direction.normalize();
return pt + t * direction;
}
};
main.cpp
#include <cmath>
#include "Image.h"
#include "Ray.h"
#include "../build/raytrace/Plane.h"
//Color functions
using Colour = cv::Vec3b; // RGB Value
//Color is a Vec3b datatype, use Color instead of Vec3b, it has 3 vectors, hold 3 values b/w 0-255
Colour red() { return Colour(255, 0, 0); }
Colour green() { return Colour(0, 255,0); }
Colour blue() { return Colour(0, 0, 255); }
Colour white() { return Colour(255, 255, 255); }
Colour black() { return Colour(0, 0, 0); }
//bounding the channel wise pixel color between 0 to 255
//bounding the color value, if a value is beyond 255 clamp it to 255, and any value below 0 clamp to 0.
uchar Clamp(int color)
{
if (color < 0) return 0;
if (color >= 255) return 255;
return color;
}
int main(int, char**){
//Create an image object with 500 x 500 resolution.
Image image = Image(500, 500);
//Coordinates of image rectangle
Vec3 llc = Vec3(-1, -1, -1); //**llc - lower left corner
Vec3 urc = Vec3(1, 1, -1); //**urc - upper right corner
int width = urc(0) - llc(0);
int height = urc(1) - llc(1);
Vec2 pixelUV = Vec2((float)width / image.cols, (float)height / image.rows);
/// TODO: define camera position (view point), sphere center, sphere radius (Weightage: 5%)
Vec3 CameraPoint = Vec3(0, 0, 0); //**it is the origin
Vec3 SphereCenter = Vec3(0, 0, -5); //**it is the Sphere Position
float SphereRadius = 2.0;
Vec3 LightSource = Vec3(2.0, 0.0, 3.0); //**
Vec3 ambient = Vec3(0, 0, 0.5); //**
Vec3 diffuse = Vec3(224, 180, 255); //** 0, 255, 100 - green
Vec3 Origin = CameraPoint;
//end
for (int row = 0; row < image.rows; ++row) {
for (int col = 0; col < image.cols; ++col) {
//TODO: Build primary rays
//Find the pixel position (PixelPos) for each row and col and then construct the vector PixelPos-Origin
Vec3 pixelPos = Vec3(llc(0) + pixelUV(0) * (col + 0.5), llc(1) + pixelUV(1) * (row + 0.5), -1);
//create a ray object
Ray r; //**
//Vec3 rayDir = pixelPos - Origin; //**direction of the ray
Vec3 rayDir = r.generateRay(pixelPos); //**pixelPos-Origin
rayDir.normalize(); //**normalize the ray direction vector
//Ray-sphere intersection...(refer to the lecture slides and Section 4.4.1 of the textbook)
float a = rayDir.dot(rayDir);
Vec3 s0_r0 = Origin - SphereCenter; //***s0_r0 - sphere center - ray origin
float b = 2.0 * rayDir.dot(s0_r0);
float c = s0_r0.dot(s0_r0) - pow(SphereRadius, 2);
//compute the discriminant
float discriminant = pow(b, 2) - 4 * a * c;
//if the discriminant is greater than zero
if(discriminant > 0){
//find roots t1 and t2
float t1 = (-b - sqrt((pow(b, 2)) - 4.0 * a * c)) / (2.0 * a); //**
float t2 = (-b + sqrt((pow(b, 2)) - 4.0 * a * c)) / (2.0 * a); //**
//determine which one is the real intersection point
float t;
//Sphere s;
if (t1 < t2 && (t1 > 0 && t2 > 0)) {
t = t1;
//} //should this be after the if-statement below, so that it uses t = t1 and not just float t.????
if (t > 0) {
//Shade the pixel, normal is Intersection - SphereCenter, LightVector is LightSource- Intersection, make sure to normalize the vectors
Vec3 Intersection = Origin + (t * rayDir);
Vec3 Normal = Intersection - SphereCenter; //** normalize
Normal.normalize(); //**
Vec3 LightVector = LightSource - Intersection; //**normalize
LightVector.normalize(); //**
float diffuseTerm = LightVector.dot(Normal);
if (diffuseTerm < 0) diffuseTerm = 0;
Colour colour(0, 0, 0); //The ambient base
colour[0] = Clamp(ambient[0] + diffuse[0] * diffuseTerm);
colour[1] = Clamp(ambient[1] + diffuse[1] * diffuseTerm);
colour[2] = Clamp(ambient[2] + diffuse[2] * diffuseTerm);
image(row, col) = colour;
}
}//
else {
image(row, col) = black();
}
} else {
//No intersection, discriminant < 0
image(row, col) = red(); //**makes blue background colour
}
////**Plane intersection
//create a plane object
Plane plane(Vec3(-5, 0, -4), Vec3(0, 0, -1), black());
//Plane plane;
////if ray hits plane -> color black
//if (plane.findIntersection(rayDir) == 1) {
// image(row, col) = black();
//}
//else {
// image(row, col) = white();
//}
}
}
/// Required outputs: (1) Ray traced image of a sphere (2) Ray traced image when the camera is placed inside the sphere (complete black)
image.save("./result.png");
image.display();
return EXIT_SUCCESS;
}
Errors
enter image description here
#include is a shockingly simple directive. It literally just copy-pastes the content of the file.
main.cpp includes both Ray.h and Plane.h, and Plane.h includes Ray.h, so Ray.h ends up being included twice. That's why the compiler is complaining about a "class redefinition".
You can add #pragma once at the top of all your header files to let the compiler know know to skip the file if it was included already.
N.B. #pragma once is not officially part of the language, but it is supported by all compilers and has a few small advantages over the alternative.

iterate over large amount of data in fragment shader

I'm trying to iterate over a large amount of data in my fragment shader in webgl. I want to pass a lot of data to it and then iterate on each pass of the fragment shader. I'm having some issues doing that though. My ideas were the following:
1. pass the data in uniforms to the frag shader, but I can't send very much data that way.
2. use a buffer to send data as I do verts to the vert shader and then use a varying to send data to the frag shader. unfortunately this seems to involve some issues. (a) varying's interpolate between vectors and I think that'll cause issues with my code (although perhaps this is unavoidable ) (b) more importantly, I don't know how to iterate over the data i pass to my fragment shader. I'm already using a buffer for my 3d point coordinates, but how does webgl handle a second buffer and data coming through it.
* I mean to say, in what order is data fetched from each buffer (my first buffer containing 3d coordinates and the second buffer I'm trying to add)? lastly, as stated above, if i want to iterate over all the data passed for every pass of the fragment shader, how can i do that? *
i've already tried using a uniform array and iterate over that in my fragment shader but i ran into limitations I believe since there is a relatively small size limit for uniforms. I'm currently trying the second method mentioned above.
//pseudo code
vertexCode = `
attribute vec4 3dcoords;
varying vec4 3dcoords;
??? ??? my_special_data;
void main(){...}
`
fragCode = `
varying vec4 3dcoords;
void main(){
...
// perform math operation on 3dcoords for all values in my_special_data variable and store in variable my_results
if( my_results ... ){
gl_FragColor += ...;
}
`
Textures in WebGL are random access 2D arrays of data so you can use them to read lots of data
Example:
const width = 256;
const height = 256;
const vs = `
attribute vec4 position;
void main() {
gl_Position = position;
}
`;
const fs = `
precision highp float;
uniform sampler2D tex;
const int width = ${width};
const int height = ${height};
void main() {
vec4 sums = vec4(0);
for (int y = 0; y < height; ++y) {
for (int x = 0; x < width; ++x) {
vec2 xy = (vec2(x, y) + 0.5) / vec2(width, height);
sums += texture2D(tex, xy);
}
}
gl_FragColor = sums;
}
`;
function main() {
const gl = document.createElement('canvas').getContext('webgl');
// check if we can make floating point textures
const ext1 = gl.getExtension('OES_texture_float');
if (!ext1) {
return alert('need OES_texture_float');
}
// check if we can render to floating point textures
const ext2 = gl.getExtension('WEBGL_color_buffer_float');
if (!ext2) {
return alert('need WEBGL_color_buffer_float');
}
// make a 1x1 pixel floating point RGBA texture and attach it to a framebuffer
const framebufferInfo = twgl.createFramebufferInfo(gl, [
{ type: gl.FLOAT, },
], 1, 1);
// make random 256x256 texture
const data = new Uint8Array(width * height * 4);
for (let i = 0; i < data.length; ++i) {
data[i] = Math.random() * 256;
}
const tex = twgl.createTexture(gl, {
src: data,
minMag: gl.NEAREST,
wrap: gl.CLAMP_TO_EDGE,
});
// compile shaders, link, lookup locations
const programInfo = twgl.createProgramInfo(gl, [vs, fs]);
// create a buffer and put a 2 unit
// clip space quad in it using 2 triangles
const bufferInfo = twgl.createBufferInfoFromArrays(gl, {
position: {
numComponents: 2,
data: [
-1, -1,
1, -1,
-1, 1,
-1, 1,
1, -1,
1, 1,
],
},
});
// render to the 1 pixel texture
gl.bindFramebuffer(gl.FRAMEBUFFER, framebufferInfo.framebuffer);
// set the viewport for 1x1 pixels
gl.viewport(0, 0, 1, 1);
gl.useProgram(programInfo.program);
// calls gl.bindBuffer, gl.enableVertexAttribArray, gl.vertexAttribPointer
twgl.setBuffersAndAttributes(gl, programInfo, bufferInfo);
// calls gl.activeTexture, gl.bindTexture, gl.uniformXXX
twgl.setUniforms(programInfo, {
tex,
});
const offset = 0;
const count = 6;
gl.drawArrays(gl.TRIANGLES, offset, count);
// read the result
const pixels = new Float32Array(4);
gl.readPixels(0, 0, 1, 1, gl.RGBA, gl.FLOAT, pixels);
console.log('webgl sums:', pixels);
const sums = new Float32Array(4);
for (let i = 0; i < data.length; i += 4) {
for (let j = 0; j < 4; ++j) {
sums[j] += data[i + j] / 255;
}
}
console.log('js sums:', sums);
}
main();
<script src="https://twgljs.org/dist/4.x/twgl-full.min.js"></script>

Shaders: How to draw 3D point verts without generating geometry?

I have a 3D Webgl scene. I am using Regl http://regl.party/ . Which is WebGL. So I am essentially writing straight GLSL.
This is a game project. I have an array of 3D positions [[x,y,z] ...] which are bullets, or projectiles. I want to draw these bullets as a simple cube, sphere, or particle. No requirement on the appearance.
How can I make shaders and a draw call for this without having to create a repeated duplicate set of geometry for the bullets?
Preferring an answer with a vert and frag shader example that demonstrates the expected data input and can be reverse engineered to handle the CPU binding layer
You create an regl command which encapsulates a bunch of data. You can then call it with an object.
Each uniform can take an optional function to supply its value. That function is passed a regl context as the first argument and then the object you passed as the second argument so you can call it multiple times with a different object to draw the same thing (same vertices, same shader) somewhere else.
var regl = createREGL()
const objects = [];
const numObjects = 100;
for (let i = 0; i < numObjects; ++i) {
objects.push({
x: rand(-1, 1),
y: rand(-1, 1),
speed: rand(.5, 1.5),
direction: rand(0, Math.PI * 2),
color: [rand(0, 1), rand(0, 1), rand(0, 1), 1],
});
}
function rand(min, max) {
return Math.random() * (max - min) + min;
}
const starPositions = [[0, 0, 0]];
const starElements = [];
const numPoints = 5;
for (let i = 0; i < numPoints; ++i) {
for (let j = 0; j < 2; ++j) {
const a = (i * 2 + j) / (numPoints * 2) * Math.PI * 2;
const r = 0.5 + j * 0.5;
starPositions.push([
Math.sin(a) * r,
Math.cos(a) * r,
0,
]);
}
starElements.push([
0, 1 + i * 2, 1 + i * 2 + 1,
]);
}
const drawStar = regl({
frag: `
precision mediump float;
uniform vec4 color;
void main () {
gl_FragColor = color;
}`,
vert: `
precision mediump float;
attribute vec3 position;
uniform mat4 mat;
void main() {
gl_Position = mat * vec4(position, 1);
}`,
attributes: {
position: starPositions,
},
elements: starElements,
uniforms: {
mat: (ctx, props) => {
const {viewportWidth, viewportHeight} = ctx;
const {x, y} = props;
const aspect = viewportWidth / viewportHeight;
return [.1 / aspect, 0, 0, 0,
0, .1, 0, 0,
0, 0, 0, 0,
x, y, 0, 1];
},
color: (ctx, props) => props.color,
}
})
regl.frame(function () {
regl.clear({
color: [0, 0, 0, 1]
});
objects.forEach((o) => {
o.direction += rand(-0.1, 0.1);
o.x += Math.cos(o.direction) * o.speed * 0.01;
o.y += Math.sin(o.direction) * o.speed * 0.01;
o.x = (o.x + 3) % 2 - 1;
o.y = (o.y + 3) % 2 - 1;
drawStar(o);
});
})
<script src="https://cdnjs.cloudflare.com/ajax/libs/regl/1.3.11/regl.min.js"></script>
You can draw all of the bullets as point sprites, in which case you just need to provide the position and size of each bullet and draw them as GL_POINTS. Each “point” is rasterized to a square based on the output of your vertex shader (which runs once per point). Your fragment shader is called for each fragment in that square, and can color the fragment however it wants—with a flat color, by sampling a texture, or however else you want.
Or you can provide a single model for all bullets, a separate transform for each bullet, and draw them as instanced GL_TRIANGLES or GL_TRIANGLE_STRIP or whatever. Read about instancing on the OpenGL wiki.
Not a WebGL coder so read with prejudice...
Encode the vertexes in a texture
beware of clamping use texture format that does not clamp to <0.0,+1.0> like GL_LUMINANCE32F_ARB or use vertexes in that range only. To check for clamping use:
GLSL debug prints
Render single rectangle covering whole screen
and use the texture from #1 as input. This will ensure that a fragment shader is called for each pixel of the screen/view exactly once.
Inside fragment shader read the texture and check the distance of a fragment to your vertexes
based on it render your stuff or dicard() fragment... spheres are easy, but boxes and other shapes might be complicated to render based on the distance of vertex especially if they can be arbitrary oriented (which need additional info in the input texture).
To ease up this you can prerender them into some texture and use the distance as texture coordinates ...
This answer of mine is using this technique:
raytrace through 3D mesh
You can sometimes get away with using GL_POINTS with a large gl_PointSize and a customized fragment shader.
An example shown here using distance to point center for fragment alpha. (You could also just as well sample a texture)
The support for large point sizes might be limited though, so check that before deciding on this route.
var canvas = document.getElementById('cvs');
gl = canvas.getContext('webgl');
var vertices = [
-0.5, 0.75,0.0,
0.0, 0.5, 0.0,
-0.75,0.25,0.0,
];
var vertex_buffer = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, vertex_buffer);
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array(vertices), gl.STATIC_DRAW);
gl.bindBuffer(gl.ARRAY_BUFFER, null);
var vertCode =
`attribute vec3 coord;
void main(void) {
gl_Position = vec4(coord, 1.0);
gl_PointSize = 50.0;
}`;
var vertShader = gl.createShader(gl.VERTEX_SHADER);
gl.shaderSource(vertShader, vertCode);
gl.compileShader(vertShader);
var fragCode =
`void main(void) {
mediump float ds = distance(gl_PointCoord.xy, vec2(0.5,0.5))*2.0;
mediump vec4 fg_color=vec4(0.0, 0.0, 0.0,1.0- ds);
gl_FragColor = fg_color;
}`;
var fragShader = gl.createShader(gl.FRAGMENT_SHADER);
gl.shaderSource(fragShader, fragCode);
gl.compileShader(fragShader);
var shaderProgram = gl.createProgram();
gl.attachShader(shaderProgram, vertShader);
gl.attachShader(shaderProgram, fragShader);
gl.linkProgram(shaderProgram);
gl.useProgram(shaderProgram);
gl.bindBuffer(gl.ARRAY_BUFFER, vertex_buffer);
var coord = gl.getAttribLocation(shaderProgram, "coord");
gl.vertexAttribPointer(coord, 3, gl.FLOAT, false, 0, 0);
gl.enableVertexAttribArray(coord);
gl.viewport(0,0,canvas.width,canvas.height);
gl.drawArrays(gl.POINTS, 0, 3);
<!doctype html>
<html>
<body>
<canvas width = "400" height = "400" id = "cvs"></canvas>
</body>
</html>

Billboarding C++

I got a code from my teacher that currently shows a 3D globe and a 2D particle system. The camera moves around in circles. The particle system is supposed to face the camera.
According to my lecture notes, I have to multiply the billboard with the inverse of the camera's view matrix. I would love to try that but I have trouble using the variable for the view matrix.
#include "pch.h"
#include <Kore/Application.h>
#include <Kore/IO/FileReader.h>
#include <Kore/Math/Core.h>
#include <Kore/Math/Random.h>
#include <Kore/System.h>
#include <Kore/Input/Keyboard.h>
#include <Kore/Input/Mouse.h>
#include <Kore/Audio/Mixer.h>
#include <Kore/Graphics/Image.h>
#include <Kore/Graphics/Graphics.h>
#include <Kore/Log.h>
#include "ObjLoader.h"
#include "Collision.h"
#include "PhysicsWorld.h"
#include "PhysicsObject.h"
using namespace Kore;
// A simple particle implementation
class Particle {
public:
VertexBuffer* vb;
IndexBuffer* ib;
mat4 M;
// The current position
vec3 position;
// The current velocity
vec3 velocity;
// The remaining time to live
float timeToLive;
// The total time time to live
float totalTimeToLive;
// Is the particle dead (= ready to be re-spawned?)
bool dead;
void init(const VertexStructure& structure) {
vb = new VertexBuffer(4, structure,0);
float* vertices = vb->lock();
SetVertex(vertices, 0, -1, -1, 0, 0, 0);
SetVertex(vertices, 1, -1, 1, 0, 0, 1);
SetVertex(vertices, 2, 1, 1, 0, 1, 1);
SetVertex(vertices, 3, 1, -1, 0, 1, 0);
vb->unlock();
// Set index buffer
ib = new IndexBuffer(6);
int* indices = ib->lock();
indices[0] = 0;
indices[1] = 1;
indices[2] = 2;
indices[3] = 0;
indices[4] = 2;
indices[5] = 3;
ib->unlock();
dead = true;
}
void Emit(vec3 pos, vec3 velocity, float timeToLive) {
position = pos;
this->velocity = velocity;
dead = false;
this->timeToLive = timeToLive;
totalTimeToLive = timeToLive;
}
Particle() {
}
void SetVertex(float* vertices, int index, float x, float y, float z, float u, float v) {
vertices[index* 8 + 0] = x;
vertices[index*8 + 1] = y;
vertices[index*8 + 2] = z;
vertices[index*8 + 3] = u;
vertices[index*8 + 4] = v;
vertices[index*8 + 5] = 0.0f;
vertices[index*8 + 6] = 0.0f;
vertices[index*8 + 7] = -1.0f;
}
void render(TextureUnit tex, Texture* image) {
Graphics::setTexture(tex, image);
Graphics::setVertexBuffer(*vb);
Graphics::setIndexBuffer(*ib);
Graphics::drawIndexedVertices();
}
void Integrate(float deltaTime) {
timeToLive -= deltaTime;
if (timeToLive < 0.0f) {
dead = true;
}
// Note: We are using no forces or gravity at the moment.
position += velocity * deltaTime;
// Build the matrix
M = mat4::Translation(position.x(), position.y(), position.z()) * mat4::Scale(0.2f, 0.2f, 0.2f);
}
};
class ParticleSystem {
public:
// The center of the particle system
vec3 position;
// The minimum coordinates of the emitter box
vec3 emitMin;
// The maximal coordinates of the emitter box
vec3 emitMax;
// The list of particles
Particle* particles;
// The number of particles
int numParticles;
// The spawn rate
float spawnRate;
// When should the next particle be spawned?
float nextSpawn;
ParticleSystem(int maxParticles, const VertexStructure& structure ) {
particles = new Particle[maxParticles];
numParticles = maxParticles;
for (int i = 0; i < maxParticles; i++) {
particles[i].init(structure);
}
spawnRate = 0.05f;
nextSpawn = spawnRate;
position = vec3(0.5f, 1.3f, 0.5f);
float b = 0.1f;
emitMin = position + vec3(-b, -b, -b);
emitMax = position + vec3(b, b, b);
}
void update(float deltaTime) {
// Do we need to spawn a particle?
nextSpawn -= deltaTime;
bool spawnParticle = false;
if (nextSpawn < 0) {
spawnParticle = true;
nextSpawn = spawnRate;
}
for (int i = 0; i < numParticles; i++) {
if (particles[i].dead) {
if (spawnParticle) {
EmitParticle(i);
spawnParticle = false;
}
}
particles[i].Integrate(deltaTime);
}
}
void render(TextureUnit tex, Texture* image, ConstantLocation mLocation, mat4 V) {
Graphics::setBlendingMode(BlendingOperation::SourceAlpha, BlendingOperation::InverseSourceAlpha);
Graphics::setRenderState(RenderState::DepthWrite, false);
/************************************************************************/
/* Exercise 7 1.1 */
/************************************************************************/
/* Change the matrix V in such a way that the billboards are oriented towards the camera */
/************************************************************************/
/* Exercise 7 1.2 */
/************************************************************************/
/* Animate using at least one new control parameter */
for (int i = 0; i < numParticles; i++) {
// Skip dead particles
if (particles[i].dead) continue;
Graphics::setMatrix(mLocation, particles[i].M * V);
particles[i].render(tex, image);
}
Graphics::setRenderState(RenderState::DepthWrite, true);
}
float getRandom(float minValue, float maxValue) {
int randMax = 1000000;
int randInt = Random::get(0, randMax);
float r = (float) randInt / (float) randMax;
return minValue + r * (maxValue - minValue);
}
void EmitParticle(int index) {
// Calculate a random position inside the box
float x = getRandom(emitMin.x(), emitMax.x());
float y = getRandom(emitMin.y(), emitMax.y());
float z = getRandom(emitMin.z(), emitMax.z());
vec3 pos;
pos.set(x, y, z);
vec3 velocity(0, 0.3f, 0);
particles[index].Emit(pos, velocity, 3.0f);
}
};
namespace {
const int width = 1024;
const int height = 768;
double startTime;
Shader* vertexShader;
Shader* fragmentShader;
Program* program;
float angle = 0.0f;
// null terminated array of MeshObject pointers
MeshObject* objects[] = { nullptr, nullptr, nullptr, nullptr, nullptr, nullptr };
// null terminated array of PhysicsObject pointers
PhysicsObject* physicsObjects[] = { nullptr, nullptr, nullptr, nullptr, nullptr, nullptr };
// The view projection matrix aka the camera
mat4 P;
mat4 View;
mat4 PV;
vec3 cameraPosition;
MeshObject* sphere;
PhysicsObject* po;
PhysicsWorld physics;
// uniform locations - add more as you see fit
TextureUnit tex;
ConstantLocation pvLocation;
ConstantLocation mLocation;
ConstantLocation tintLocation;
Texture* particleImage;
ParticleSystem* particleSystem;
double lastTime;
void update() {
double t = System::time() - startTime;
double deltaT = t - lastTime;
//Kore::log(Info, "%f\n", deltaT);
lastTime = t;
Kore::Audio::update();
Graphics::begin();
Graphics::clear(Graphics::ClearColorFlag | Graphics::ClearDepthFlag, 0xff9999FF, 1000.0f);
Graphics::setFloat4(tintLocation, vec4(1, 1, 1, 1));
program->set();
angle += 0.3f * deltaT;
float x = 0 + 3 * Kore::cos(angle);
float z = 0 + 3 * Kore::sin(angle);
cameraPosition.set(x, 2, z);
//PV = mat4::Perspective(60, (float)width / (float)height, 0.1f, 100) * mat4::lookAt(vec3(0, 2, -3), vec3(0, 2, 0), vec3(0, 1, 0));
P = mat4::Perspective(60, (float)width / (float)height, 0.1f, 100);
View = mat4::lookAt(vec3(x, 2, z), vec3(0, 2, 0), vec3(0, 1, 0));
PV = P * View;
Graphics::setMatrix(pvLocation, PV);
// iterate the MeshObjects
MeshObject** current = &objects[0];
while (*current != nullptr) {
// set the model matrix
Graphics::setMatrix(mLocation, (*current)->M);
(*current)->render(tex);
++current;
}
// Update the physics
physics.Update(deltaT);
PhysicsObject** currentP = &physics.physicsObjects[0];
while (*currentP != nullptr) {
(*currentP)->UpdateMatrix();
Graphics::setMatrix(mLocation, (*currentP)->Mesh->M);
(*currentP)->Mesh->render(tex);
++currentP;
}
particleSystem->update(deltaT);
particleSystem->render(tex, particleImage, mLocation, View);
Graphics::end();
Graphics::swapBuffers();
}
void SpawnSphere(vec3 Position, vec3 Velocity) {
PhysicsObject* po = new PhysicsObject();
po->SetPosition(Position);
po->Velocity = Velocity;
po->Collider.radius = 0.2f;
po->Mass = 5;
po->Mesh = sphere;
// The impulse should carry the object forward
// Use the inverse of the view matrix
po->ApplyImpulse(Velocity);
physics.AddObject(po);
}
void keyDown(KeyCode code, wchar_t character) {
if (code == Key_Space) {
// The impulse should carry the object forward
// Use the inverse of the view matrix
vec4 impulse(0, 0.4, 2, 0);
mat4 viewI = View;
viewI.Invert();
impulse = viewI * impulse;
vec3 impulse3(impulse.x(), impulse.y(), impulse.z());
SpawnSphere(cameraPosition + impulse3 *0.2f, impulse3);
}
}
void keyUp(KeyCode code, wchar_t character) {
if (code == Key_Left) {
// ...
}
}
void mouseMove(int x, int y, int movementX, int movementY) {
}
void mousePress(int button, int x, int y) {
}
void mouseRelease(int button, int x, int y) {
}
void init() {
FileReader vs("shader.vert");
FileReader fs("shader.frag");
vertexShader = new Shader(vs.readAll(), vs.size(), VertexShader);
fragmentShader = new Shader(fs.readAll(), fs.size(), FragmentShader);
// This defines the structure of your Vertex Buffer
VertexStructure structure;
structure.add("pos", Float3VertexData);
structure.add("tex", Float2VertexData);
structure.add("nor", Float3VertexData);
program = new Program;
program->setVertexShader(vertexShader);
program->setFragmentShader(fragmentShader);
program->link(structure);
tex = program->getTextureUnit("tex");
pvLocation = program->getConstantLocation("PV");
mLocation = program->getConstantLocation("M");
tintLocation = program->getConstantLocation("tint");
objects[0] = new MeshObject("Base.obj", "Level/basicTiles6x6.png", structure);
objects[0]->M = mat4::Translation(0.0f, 1.0f, 0.0f);
sphere = new MeshObject("ball_at_origin.obj", "Level/unshaded.png", structure);
SpawnSphere(vec3(0, 2, 0), vec3(0, 0, 0));
Graphics::setRenderState(DepthTest, true);
Graphics::setRenderState(DepthTestCompare, ZCompareLess);
Graphics::setTextureAddressing(tex, U, Repeat);
Graphics::setTextureAddressing(tex, V, Repeat);
particleImage = new Texture("SuperParticle.png", true);
particleSystem = new ParticleSystem(100, structure);
}
}
int kore(int argc, char** argv) {
Application* app = new Application(argc, argv, width, height, 0, false, "Exercise7");
init();
app->setCallback(update);
startTime = System::time();
lastTime = 0.0f;
Kore::Mixer::init();
Kore::Audio::init();
Keyboard::the()->KeyDown = keyDown;
Keyboard::the()->KeyUp = keyUp;
Mouse::the()->Move = mouseMove;
Mouse::the()->Press = mousePress;
Mouse::the()->Release = mouseRelease;
app->start();
delete app;
return 0;
}
There's a comment where the teacher wants us to add the code.
The variable for the view matrix "View" is in "namespace". I've only ever used namespace as a library but this one doesn't have a name. So how do I use it?
The comment says that we should use matrix V. So I just add V = Inverse View Matrix * Model Matrix to the code and it removes the rotation?
I'm sorry for the stupid questions, it's supposed to be a class for beginners but it's really anything but. The lecture notes aren't very helpful when it comes to the programming part and I only found tutorials for OpenGL or Unity or Direct X and where not using any of it.
Please help me, I need to hand this in until Saturday morning and I've already spent the last two days trying out code and I've got nothing so far!
You can find the whole thing here: https://github.com/TUDGameTechnology/Exercise7
You don't have to do anything special to access an unnamed namespace. This thread explains more.
You are most probably trying to reference View within methods that cannot see your namespace because of the order in which they are defined in your file.
This line in your update method:
particleSystem->render(tex, particleImage, mLocation, View);
is already passing View into the render method.
void render(TextureUnit tex, Texture* image, ConstantLocation mLocation, mat4 V)
That means that in this case mat4 v is your camera view.

environment mapping using OpenGL and Cg

I write a program to implement environment mapping using OpenGL and Cg shader language.But the result is not very right.When calculate the color of the model,we will blend the reflection with a decal texture.A uniform parameter called reflectivity allows the application to control how reflective the material is.
Firstly I list my fragment Cg code:
void main_f(float2 texCoord : TEXCOORD0,
float3 R : TEXCOORD1,
out float4 color : COLOR,
uniform float reflectivity,
uniform sampler2D decalMap,
uniform samplerCUBE environmentMap)
{
//fetch reflected environment color
float3 reflectedColor = texCUBE(environmentMap,R);
//fetch the decal base coloe
float3 decalColor = tex2D(decalMap,texCoord);
color.xyz = lerp(reflectedColor,decalColor,reflectivity);//change !!!!!!!!
color.w = 1;
}
I set the uniform parameter reflectivity as 0.6.And the result is :
As we can see,the color information from the decal texture is lost.There is only color information from environment cube texture.And if I set reflectivity as 0,the model will be dark.
But if I change the color.xyz in the fragment cg code as :
color.xyz = decalColor;
I can get the right result(only has color from decal texture) :
And if I change the color.xyz in the fragment cg code as :
color.xyz = reflectedColor;
I can get the right result(only has color from environment cube texture) ,too:
And my question is :
Why it does not work when I blend the color information from decal texture with the color information from environment cube texture using Cg function lerp?
at last I list my cg vertex shader and cpp file:
vertex.cg:
void main_v(float4 position : POSITION,
float2 texCoord : TEXCOORD0,//decal texture
float3 normal : NORMAL,
out float4 oPosition : POSITION,
out float2 oTexCoord : TEXCOORD0,//out decal texture
out float3 R : TEXCOORD1,//reflective vector
uniform float3 eyePositionW,//eye position in world space
uniform float4x4 modelViewProj,
uniform float4x4 modelToWorld
)
{
modelViewProj = glstate.matrix.mvp;
oPosition = mul(modelViewProj,position);
oTexCoord = texCoord;
float3 positionW = mul(modelToWorld,position).xyz;
float3 N = mul((float3x3)modelToWorld,normal);
N = normalize(N);
float3 I = positionW - eyePositionW;//incident vector
R = reflect(I,N);
}
main.cpp:
#pragma comment(lib,"glew32.lib")
#pragma comment(lib,"GLAUX.LIB")
#pragma comment(lib,"cg.lib")
#pragma comment(lib,"cgGL.lib")
#include <GL/glew.h>
#include <GL/glut.h>
#include <GL/glaux.h>
#include <CG/cg.h>
#include <CG/cgGL.h>
#include "MonkeyHead.h"
#include <iostream>
#include <cmath>
using namespace std;
int loop;
/* Use enum to assign unique symbolic OpenGL texture names. */
enum {
TO_BOGUS = 0,
TO_DECAL,
TO_ENVIRONMENT,
};
const double myPi = 3.14159;
//for Cg shader
static CGcontext myCgContext;
static CGprofile myCgVertexProfile,myCgFragmentProfile;
static CGprogram myCgVertexProgram,myCgFragmentProgram;
static const char *myProgramName = "CgTest18CubeMapReflective",
*myVertexProgramFileName = "vertex.cg",
*myVertexProgramName = "main_v",
*myFragmentProgramFileName = "fragment.cg",
*myFragmentProgramName = "main_f";
static CGparameter myCgVertexParam_modelToWorld;
//bmp files for cube map
const char *bmpFile[6] = {"Data/1.bmp","Data/2.bmp","Data/3.bmp",
"Data/4.bmp","Data/5.bmp","Data/6.bmp"};
const char *decalBmpFile = "Data/decal.bmp";
static float eyeAngle = 0.53;
static float eyeHeight = 0.0f;
static float headSpain = 0.0f;
static const GLfloat vertex[4*6][3] = {
/* Positive X face. */
{ 1, -1, -1 }, { 1, 1, -1 }, { 1, 1, 1 }, { 1, -1, 1 },
/* Negative X face. */
{ -1, -1, -1 }, { -1, 1, -1 }, { -1, 1, 1 }, { -1, -1, 1 },
/* Positive Y face. */
{ -1, 1, -1 }, { 1, 1, -1 }, { 1, 1, 1 }, { -1, 1, 1 },
/* Negative Y face. */
{ -1, -1, -1 }, { 1, -1, -1 }, { 1, -1, 1 }, { -1, -1, 1 },
/* Positive Z face. */
{ -1, -1, 1 }, { 1, -1, 1 }, { 1, 1, 1 }, { -1, 1, 1 },
/* Negative Z face. */
{ -1, -1, -1 }, { 1, -1, -1 }, { 1, 1, -1 }, { -1, 1, -1 },
};
static float reflectivity = 0.6;
GLuint decalTexture;
bool animating = false;//enable animating or not
static void drawMonkeyHead()
{
static GLfloat *texCoords = NULL;
const int numVertices = sizeof(MonkeyHead_vertices)
/ (3 * sizeof(MonkeyHead_vertices[0]));
const float scaleFactor = 1.5;
//generate texcoords
texCoords = (GLfloat*)malloc(2 * numVertices * sizeof(GLfloat));
if (!texCoords)
{
cerr << "ERROR : Monkey head texcoords memory malloc failed !" << endl;
exit(1);
}
for (loop = 0;loop < numVertices;++loop)
{
texCoords[loop * 2] = scaleFactor * MonkeyHead_vertices[3 * loop];
texCoords[loop * 2 + 1] = scaleFactor * MonkeyHead_vertices[3 * loop + 1];
}
//use vertex array
//enable array
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_NORMAL_ARRAY);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
//assign array data
glVertexPointer(3,GL_FLOAT,3 * sizeof(GLfloat),MonkeyHead_vertices);
glNormalPointer(GL_FLOAT,3 * sizeof(GLfloat),MonkeyHead_normals);
glTexCoordPointer(2,GL_FLOAT,2 * sizeof(GLfloat),texCoords);
glDrawElements(GL_TRIANGLES,3 * MonkeyHead_num_of_triangles,
GL_UNSIGNED_SHORT,MonkeyHead_triangles);
}
//read bmp image file
AUX_RGBImageRec *LoadBMP(const char *FileName)
{
FILE *File = NULL;
if(!FileName)
return NULL;
File = fopen(FileName,"r");
if (File)
{
fclose(File);
return auxDIBImageLoad(FileName);
}
return NULL;
}
//load decal texture from a bmp file
int loadDecalTexture()
{
int status = 1;
AUX_RGBImageRec *TextureImage = NULL;
if ((TextureImage = LoadBMP(decalBmpFile)))
{
glGenTextures(1,&decalTexture);
glBindTexture(GL_TEXTURE_2D,decalTexture);
glTexImage2D(GL_TEXTURE_2D,0,GL_RGB,TextureImage->sizeX,
TextureImage->sizeY,0,GL_RGB,GL_UNSIGNED_BYTE,
TextureImage->data);//指定纹理
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_LINEAR);//指定过滤模式
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER,GL_LINEAR);
}
else
status = 0;
if (TextureImage)
{
if (TextureImage->data)
free(TextureImage->data);
free(TextureImage);
}
return status;
}
//load cube map from 6 bmp files
int loadCubeMap()
{
int status = 1;
AUX_RGBImageRec *TextureImage[6] = {NULL,NULL,NULL,NULL,NULL,NULL};
for (loop = 0;loop < 6;++loop)
{
if (!(TextureImage[loop] = LoadBMP(bmpFile[loop])))
{
cout << "ERROR :load bmp file " << loop << " failed !" << endl;
status = 0;
}
}
glTexImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_X, 0, GL_RGB, TextureImage[0] ->sizeX, TextureImage[0] ->sizeY,
0, GL_RGB, GL_UNSIGNED_BYTE, TextureImage[0] ->data);
glTexImage2D(GL_TEXTURE_CUBE_MAP_NEGATIVE_X, 0, GL_RGB, TextureImage[1] ->sizeX, TextureImage[1] ->sizeY,
0, GL_RGB, GL_UNSIGNED_BYTE, TextureImage[1] ->data);
glTexImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_Y, 0, GL_RGB, TextureImage[2] ->sizeX, TextureImage[2] ->sizeY,
0, GL_RGB, GL_UNSIGNED_BYTE, TextureImage[2] ->data);
glTexImage2D(GL_TEXTURE_CUBE_MAP_NEGATIVE_Y, 0, GL_RGB, TextureImage[3] ->sizeX, TextureImage[3] ->sizeY,
0, GL_RGB, GL_UNSIGNED_BYTE, TextureImage[3] ->data);
glTexImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_Z, 0, GL_RGB, TextureImage[4] ->sizeX, TextureImage[4] ->sizeY,
0, GL_RGB, GL_UNSIGNED_BYTE, TextureImage[4] ->data);
glTexImage2D(GL_TEXTURE_CUBE_MAP_NEGATIVE_Z, 0, GL_RGB, TextureImage[5] ->sizeX, TextureImage[5] ->sizeY,
0, GL_RGB, GL_UNSIGNED_BYTE, TextureImage[5] ->data);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
//free memory
for (loop = 0;loop < 6;++loop)
{
if (TextureImage[loop])
{
if (TextureImage[loop] ->data)
{
free(TextureImage[loop] ->data);
}
free(TextureImage[loop]);
}
}
return status;
}
//draw th surroundings as a cube with each face of
//the cube environment map applied.
void drawSurroundings(const GLfloat *eyePosition)
{
const float surroundingsDistance = 8;
glLoadIdentity();
gluLookAt(eyePosition[0],eyePosition[1],eyePosition[2],
0,0,0,0,1,0);
glScalef(surroundingsDistance,
surroundingsDistance,
surroundingsDistance);
glEnable(GL_TEXTURE_CUBE_MAP);
glBindTexture(GL_TEXTURE_CUBE_MAP,TO_ENVIRONMENT);
glTexEnvi(GL_TEXTURE_ENV,GL_TEXTURE_ENV_MODE,GL_REPLACE);
glBegin(GL_QUADS);
for (loop = 0;loop < 4 * 6;++loop)
{
glTexCoord3fv(vertex[loop]);
glVertex3fv(vertex[loop]);
}
glEnd();
}
static void checkForCgError(const char *situation)
{
CGerror error;
const char *string = cgGetLastErrorString(&error);
if (error != CG_NO_ERROR) {
cout << "ERROR : " << myProgramName << situation << string << endl;
if (error == CG_COMPILER_ERROR) {
cout << cgGetLastListing(myCgContext) << endl;
}
exit(1);
}
}
//init Cg shaders
void initCg()
{
myCgContext = cgCreateContext();
myCgVertexProfile = cgGLGetLatestProfile(CG_GL_VERTEX);
cgGLSetOptimalOptions(myCgVertexProfile);
checkForCgError("selecting vertex profile");
myCgVertexProgram = cgCreateProgramFromFile(
myCgContext,
CG_SOURCE,
myVertexProgramFileName,
myCgVertexProfile,
myVertexProgramName,
NULL);
checkForCgError("Creating vertex Cg program from file");
cgGLLoadProgram(myCgVertexProgram);
checkForCgError("loading vertex program");
myCgFragmentProfile = cgGLGetLatestProfile(CG_GL_FRAGMENT);
cgGLSetOptimalOptions(myCgFragmentProfile);
checkForCgError("selecting fragment profile");
myCgFragmentProgram = cgCreateProgramFromFile(
myCgContext,
CG_SOURCE,
myFragmentProgramFileName,
myCgFragmentProfile,
myFragmentProgramName,
NULL);
checkForCgError("Creating fragment Cg program from file");
cgGLLoadProgram(myCgFragmentProgram);
checkForCgError("loading fragment program");
}
//compute rotate transformation matrix
void makeRotateMatrix(float angle,
float ax,float ay,float az,
float m[16])
{
float radians, sine, cosine, ab, bc, ca, tx, ty, tz;
float axis[3];
float mag;
axis[0] = ax;
axis[1] = ay;
axis[2] = az;
mag = sqrt(axis[0]*axis[0] + axis[1]*axis[1] + axis[2]*axis[2]);
if (mag) {
axis[0] /= mag;
axis[1] /= mag;
axis[2] /= mag;
}
radians = angle * myPi / 180.0;
sine = sin(radians);
cosine = cos(radians);
ab = axis[0] * axis[1] * (1 - cosine);
bc = axis[1] * axis[2] * (1 - cosine);
ca = axis[2] * axis[0] * (1 - cosine);
tx = axis[0] * axis[0];
ty = axis[1] * axis[1];
tz = axis[2] * axis[2];
m[0] = tx + cosine * (1 - tx);
m[1] = ab + axis[2] * sine;
m[2] = ca - axis[1] * sine;
m[3] = 0.0f;
m[4] = ab - axis[2] * sine;
m[5] = ty + cosine * (1 - ty);
m[6] = bc + axis[0] * sine;
m[7] = 0.0f;
m[8] = ca + axis[1] * sine;
m[9] = bc - axis[0] * sine;
m[10] = tz + cosine * (1 - tz);
m[11] = 0;
m[12] = 0;
m[13] = 0;
m[14] = 0;
m[15] = 1;
}
//compute translation transformation matrix
static void makeTranslateMatrix(float x, float y, float z, float m[16])
{
m[0] = 1; m[1] = 0; m[2] = 0; m[3] = x;
m[4] = 0; m[5] = 1; m[6] = 0; m[7] = y;
m[8] = 0; m[9] = 0; m[10] = 1; m[11] = z;
m[12] = 0; m[13] = 0; m[14] = 0; m[15] = 1;
}
//multiply a floar4x4 matrix by another float4x4 matrix
static void multMatrix(float dst[16],const float src1[16],const float src2[16])
{
for (int i = 0;i < 4;++i)
{
for (int j = 0;j < 4;++j)
{
dst[i * 4 + j] = src1[i * 4 + 0] * src2[0 * 4 + j] +
src1[i * 4 + 1] * src2[1 * 4 + j] +
src1[i * 4 + 2] * src2[2 * 4 + j] +
src1[i * 4 + 3] * src2[3 * 4 + j];
}
}
}
void init()
{
glewInit();
glClearColor(0.0,0.0,0.0,1.0);
glShadeModel(GL_SMOOTH);
glEnable(GL_DEPTH_TEST);
if (!loadDecalTexture())
{
cout << "ERROR : load decal texture from bmp file failed !" << endl;
exit(1);
}
glBindTexture(GL_TEXTURE_CUBE_MAP,TO_ENVIRONMENT);
if (!loadCubeMap())
{
cout << "ERROR : load cube map from bmp file failed !" << endl;
exit(1);
}
initCg();
}
void display()
{
const GLfloat eyePosition[4] = {6 * sin(eyeAngle),
eyeHeight,
6 * cos(eyeAngle),
1};
float tranlateMatrix[16],rotateMatrix[16],modelMatrix[16];
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
cgGLEnableProfile(myCgVertexProfile);
checkForCgError("enabling vertex profile");
cgGLEnableProfile(myCgFragmentProfile);
checkForCgError("enabling fragment profile");
cgGLBindProgram(myCgVertexProgram);
checkForCgError("binding vertex program");
cgGLBindProgram(myCgFragmentProgram);
checkForCgError("binding fragment program");
glLoadIdentity();
glTranslatef(0.0,0.0,-5.0);
glRotatef(headSpain,0,1,0);
//set some uniform parameters in Cg shader
cgGLSetParameter3fv(
cgGetNamedParameter(myCgVertexProgram,"eyePositionW"),
eyePosition);
checkForCgError("setting eyePositionW parameter");
makeRotateMatrix(headSpain,0,1,0,rotateMatrix);
makeTranslateMatrix(0.0,0.0,-5.0,tranlateMatrix);
multMatrix(modelMatrix,tranlateMatrix,rotateMatrix);
//set the Cg matrix parameter : modelToWorld
cgSetMatrixParameterfr(
cgGetNamedParameter(myCgVertexProgram,"modelToWorld"),
modelMatrix);
checkForCgError("setting modelToWorld parameter");
cgGLSetParameter1f(
cgGetNamedParameter(myCgFragmentProgram,"reflectivity"),
reflectivity);
checkForCgError("setting reflectivity parameter");
cgGLSetTextureParameter(
cgGetNamedParameter(myCgFragmentProgram,"decalMap"),
decalTexture);
checkForCgError("setting decalTexture parameter");
cgGLSetTextureParameter(
cgGetNamedParameter(myCgFragmentProgram,"environmentMap"),
TO_ENVIRONMENT);
checkForCgError("setting environmentMap parameter");
drawMonkeyHead();
cgGLDisableProfile(myCgVertexProfile);
checkForCgError("disabling vertex profile");
cgGLDisableProfile(myCgFragmentProfile);
checkForCgError("disabling fragment profile");
drawSurroundings(eyePosition);
glutSwapBuffers();
}
static void idle()
{
headSpain += 0.5;
if (headSpain > 360)
{
headSpain -= 360;
}
glutPostRedisplay();
}
static void keyboard(unsigned char key,int x,int y)
{
switch(key)
{
case ' ':
animating = !animating;
if (animating)
{
glutIdleFunc(idle);
}
else
glutIdleFunc(NULL);
break;
case 'r':
reflectivity += 0.1;
if (reflectivity > 1.0)
{
reflectivity = 1.0;
}
cout << "reflectivity : " << reflectivity << endl;
glutPostRedisplay();
break;
case 'R':
reflectivity -= 0.1;
if (reflectivity < 0.0)
{
reflectivity = 0.0;
}
cout << "reflectivity : " << reflectivity << endl;
glutPostRedisplay();
break;
case 27:
cgDestroyProgram(myCgVertexProgram);
cgDestroyContext(myCgContext);
exit(0);
break;
}
}
void reshape(int w,int h)
{
glViewport(0,0,(GLsizei)w,(GLsizei)h);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluPerspective(60.0,1,1.0,20.0);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
}
int main(int argc,char** argv)
{
glutInit(&argc,argv);
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGB | GLUT_DEPTH);
glutInitWindowPosition(0,0);
glutInitWindowSize(600,600);
glutCreateWindow("CubeMapReflection");
init();
glutDisplayFunc(display);
glutReshapeFunc(reshape);
glutKeyboardFunc(keyboard);
glutMainLoop();
return 0;
}
The first thing I see is that the lerp statement needs to have it's values reversed.
color.xyz = lerp(reflectedColor,decalColor,reflectivity);//change !!!!!!!!
should be
color.xyz = lerp(decalColor, reflectedColor, reflectivity);
because the lerp documentation says:
lerp(a, b, w) returns a when w = 0 and b when w = 1 and you want full decal when reflectivity = 0 and full reflected when reflectivity = 1.
I see that the effect you're trying to achieve is akin to GL_MODULATE. You will need to multiple the values together, not lerp between them. Try this, it should work and give you the effect you want.
color.xyz = (reflectedColor.xyz * reflectivity) * decalColor;