glsl strange behavior with frag shader and player movement - c++

im creating a 2d top down game in sfml where i would like the player to only be able to see things in their fov of 45 deg, currently my fragment shader looks like follows
uniform sampler2D texture;
uniform vec2 pos;
uniform vec2 screenSize;
uniform float in_angle;
void main()
{
vec2 fc = gl_FragCoord.xy/screenSize;
vec2 ndcCoords = vec2(0.0);
float fov = radians(45);
ndcCoords = (pos + (screenSize/2))/screenSize;
ndcCoords.y = abs(ndcCoords.y - 1.0);
float angle = radians(-angle+90+45);
float coT;
float siT;
vec2 adj = vec2(0.0);
coT = cos(angle);
siT = sin(angle);
adj.x = coT * ndcCoords.x - siT * ndcCoords.y;
adj.y = siT * ndcCoords.x + coT * ndcCoords.y;
vec2 diff = normalize(ndcCoords - fc);
float dist = acos(dot(diff, normalize(adj)));
vec3 color = vec3(0.0f);
if(dist < fov/2)
{
color = vec3(1.0);
}
gl_FragColor = vec4(color, 1.0) * texture2D(texture, gl_TexCoord[0].xy);
}
what this is doing is adjusting the playerPos vec2 and rotating it, so i can determine what fragcoords are within the players fov, however, when i move down without moving the mouse from directly above the player the fov shifts to the left / right without the player rotating at all, i've tried every solution i can think of but i can't seem to stop it, nor can i find a solution to this online. any suggestions would be appreciated

a solution has arisen, instead of trying to rotate the object to get its vector2 normalised direction, a simpler method is to calculate the angle between a frag and the playerPosition followed by creating a difference by subtracting the player rotation in radians as shown below. this can then be adjusted for the coordinate space and compared to the players fov
void main()
{
float fov = radians(45);
float pi = radians(180);
vec3 color = vec3(0.2);
vec2 st = gl_FragCoord.xy/screenSize.xy;
pos = (pos + (screenSize/2)) / screenSize;
float angleToObj = atan2(st.y - pos.y, st.x - pos.x);
float angleDiff = angleToObj - radians(-angle);
if(angleDiff > pi)
angleDiff -= 2.0f * pi;
if(angleDiff < -pi)
angleDiff += 2.0f * pi;
if(abs(angleDiff) < fov/2)
color = vec3(1.0f);
gl_FragColor = vec4(color, 1.0) * texture2D(texture, gl_TexCoord[0].xy);
}

Related

Problem with normals when drawing instanced mesh

I'm rendering a sphere with instanced drawing, while rotating the model-view-matrix around the Y axis.
It looks ok at the beginning:
But at another angle, things get worse:
It looks to me like a problem with normals. Currently, I'm calculating the normal-matrix from my model-view-matrix and then pass it to the shader, which is doing phong-like lighting:
attribute vec4 a_position;
attribute vec3 a_normal;
attribute vec4 a_color;
attribute vec2 a_coord;
attribute mat4 a_matrix;
uniform mat4 u_mv_matrix;
uniform mat4 u_projection_matrix;
uniform mat3 u_normal_matrix;
varying vec4 v_position;
varying vec3 v_normal;
varying vec4 v_color;
varying vec2 v_coord;
void main() {
vec4 transformedPosition = u_mv_matrix * a_matrix * a_position;
v_position = transformedPosition;
v_normal = u_normal_matrix * a_normal;
v_color = a_color;
v_coord = a_coord;
gl_Position = u_projection_matrix * transformedPosition;
}
uniform sampler2D u_sampler;
varying vec4 v_position;
varying vec3 v_normal;
varying vec4 v_color;
varying vec2 v_coord;
void main() {
vec3 lightPosition = vec3(0.0); // XXX
// set diffuse and specular colors
vec3 cDiffuse = (v_color * texture2D(u_sampler, v_coord)).rgb;
vec3 cSpecular = vec3(0.3);
// lighting calculations
vec3 N = normalize(v_normal);
vec3 L = normalize(lightPosition - v_position.xyz);
vec3 E = normalize(-v_position.xyz);
vec3 H = normalize(L + E);
// Calculate coefficients.
float phong = max(dot(N, L), 0.0);
const float kMaterialShininess = 20.0;
const float kNormalization = (kMaterialShininess + 8.0) / (3.14159265 * 8.0);
float blinn = pow(max(dot(N, H), 0.0), kMaterialShininess) * kNormalization;
// diffuse coefficient
vec3 diffuse = phong * cDiffuse;
// specular coefficient
vec3 specular = blinn * cSpecular;
gl_FragColor = vec4(diffuse + specular, 1);
}
Final note: I'm working on desktop OpenGL 2.1 as well as WebGL on the browser.
Edit: Per request, I'm adding some information.
The mesh is built as follows, by passing an identity matrix:
void Sphere::append(IndexedVertexBatch<XYZ.N.UV> &batch, const Matrix &matrix) const {
float sectorStep = TWO_PI / sectorCount;
float stackStep = PI / stackCount;
for(int i = 0; i <= stackCount; ++i) {
float stackAngle = HALF_PI - i * stackStep;
float xy = radius * cosf(stackAngle);
float z = radius * sinf(stackAngle);
for(int j = 0; j <= sectorCount; ++j) {
float sectorAngle = j * sectorStep;
float x = xy * cosf(sectorAngle);
float y = xy * sinf(sectorAngle);
float nx = x / radius;
float ny = y / radius;
float nz = z / radius;
float s = (float)j / sectorCount;
float t = (float)i / stackCount;
batch.addVertex(matrix.transformPoint(x, y, z), matrix.transformNormal(nx, ny, nz), glm::vec2(s, t));
}
}
for(int i = 0; i < stackCount; ++i) {
float k1 = i * (sectorCount + 1);
float k2 = k1 + sectorCount + 1;
for(int j = 0; j < sectorCount; ++j, ++k1, ++k2) {
if (i != 0) {
if (frontFace == CCW) {
batch.addIndices(k1, k1 + 1, k2);
} else {
batch.addIndices(k1, k2, k1 + 1);
}
}
if (i != (stackCount - 1)) {
if (frontFace == CCW) {
batch.addIndices(k1 + 1, k2 + 1, k2);
} else {
batch.addIndices(k1 + 1, k2, k2 + 1);
}
}
}
}
}
Regarding the transformation matrices, it works as follow:
camera.getMVMatrix()
.setIdentity()
.translate(0, -150, -600)
.rotateY(clock()->getTime() * 0.5f);
State()
.setShader(shader)
.setShaderMatrix<MV>(camera.getMVMatrix())
.setShaderMatrix<PROJECTION>(camera.getProjectionMatrix())
.setShaderMatrix<NORMAL>(camera.getNormalMatrix())
.apply();
Finally, the light position is defined as vec3(0) in the fragment shader.
Note: As you can see, I'm using my own framework which provides among other things high level methods for building meshes and handling transformations. It's all straightforward stuff, proven to work as intended, but let me know if you need pointers to the source-code.
Update: The lighting part of the shader I used ended up being wrong, so I switched to another method.
But in essence, the solution I proposed in my answer is still valid (or at least it does the job of solving the "normal problem" when instancing is used, and non-uniform scaling is avoided.)
Here is a gist with the source-code. There is also an online WebGL demo.
The solution was relatively simple: there is no point in passing a normal-matrix to the shader.
Instead, the normal needs to be computed in the vertex shader:
v_normal = vec3(u_mv_matrix * a_matrix * vec4(a_normal, 0.0));
Credits

Fish-eye warping about mouse position - fragment shader

I'm trying to create a fish-eye effect but only in a small radius around the mouse position. I've been able to modify this code to work about the mouse position (demo) but I can't figure out where the zooming is coming from. I'm expecting the output to warp the image similarly to this (ignore the color inversion for the sake of this question):
Relevant code:
// Check if within given radius of the mouse
vec2 diff = myUV - u_mouse - 0.5;
float distance = dot(diff, diff); // square of distance, saves a square-root
// Add fish-eye
if(distance <= u_radius_squared) {
vec2 xy = 2.0 * (myUV - u_mouse) - 1.0;
float d = length(xy * maxFactor);
float z = sqrt(1.0 - d * d);
float r = atan(d, z) / PI;
float phi = atan(xy.y, xy.x);
myUV.x = d * r * cos(phi) + 0.5 + u_mouse.x;
myUV.y = d * r * sin(phi) + 0.5 + u_mouse.y;
}
vec3 tex = texture2D(tMap, myUV).rgb;
gl_FragColor.rgb = tex;
This is my first shader, so other improvements besides fixing this issue are also welcome.
Compute the vector from the current fragment to the mouse and the length of the vector:
vec2 diff = myUV - u_mouse;
float distance = length(diff);
The new texture coordinate is the sum of the mouse position and the scaled direction vector:
myUV = u_mouse + normalize(diff) * u_radius * f(distance/u_radius);
For instance:
uniform float u_radius;
uniform vec2 u_mouse;
void main()
{
vec2 diff = myUV - u_mouse;
float distance = length(diff);
if (distance <= u_radius)
{
float scale = (1.0 - cos(distance/u_radius * PI * 0.5));
myUV = u_mouse + normalize(diff) * u_radius * scale;
}
vec3 tex = texture2D(tMap, myUV).rgb;
gl_FragColor = vec4(tex, 1.0);
}

Billboarding using Qt3D 2.0

I am looking for the best way to create a billboard in Qt3D. I would like a plane which faces the camera wherever it is and does not change sized when the camera dollies forward or back. I have read how to do this using GLSL vertex and geometry shaders, but I am looking for the Qt3D way, unless customer shaders is the most efficient and best way of billboarding.
I have looked, and it appears I can set the Matrix on a QTransform via properties, but it isn't clear to me how I would manipulate the matrix, or perhaps there is a better way? I am using the C++ api, but a QML answer would do. I could port it to C++.
If you want to draw just one billboard, you can add a plane and rotate it whenever the camera moves. However, if you want to do this efficiently with thousands or millions of billboards, I recommend using custom shaders. We did this to draw impostor spheres in Qt3D.
However, we didn't use a geometry shader because we were targeting systems that didn't support geometry shaders. Instead, we used only the vertex shader by placing four vertices in the origin and moved these on the shader. To create many copies, we used instanced drawing. We moved each set of four vertices according to the positions of the spheres. Finally, we moved each of the four vertices of each sphere such that they result in a billboard that is always facing the camera.
Start out by subclassing QGeometry and created a buffer functor that creates four points, all in the origin (see spherespointgeometry.cpp). Give each point an ID that we can use later. If you use geometry shaders, the ID is not needed and you can get away with creating only one vertex.
class SpheresPointVertexDataFunctor : public Qt3DRender::QBufferDataGenerator
{
public:
SpheresPointVertexDataFunctor()
{
}
QByteArray operator ()() Q_DECL_OVERRIDE
{
const int verticesCount = 4;
// vec3 pos
const quint32 vertexSize = (3+1) * sizeof(float);
QByteArray verticesData;
verticesData.resize(vertexSize*verticesCount);
float *verticesPtr = reinterpret_cast<float*>(verticesData.data());
// Vertex 1
*verticesPtr++ = 0.0;
*verticesPtr++ = 0.0;
*verticesPtr++ = 0.0;
// VertexID 1
*verticesPtr++ = 0.0;
// Vertex 2
*verticesPtr++ = 0.0;
*verticesPtr++ = 0.0;
*verticesPtr++ = 0.0;
// VertexID 2
*verticesPtr++ = 1.0;
// Vertex 3
*verticesPtr++ = 0.0;
*verticesPtr++ = 0.0;
*verticesPtr++ = 0.0;
// VertexID3
*verticesPtr++ = 2.0;
// Vertex 4
*verticesPtr++ = 0.0;
*verticesPtr++ = 0.0;
*verticesPtr++ = 0.0;
// VertexID 4
*verticesPtr++ = 3.0;
return verticesData;
}
bool operator ==(const QBufferDataGenerator &other) const Q_DECL_OVERRIDE
{
Q_UNUSED(other);
return true;
}
QT3D_FUNCTOR(SpheresPointVertexDataFunctor)
};
For the real positions, we used a separate QBuffer. We also set color and scale, but I have omitted those here (see spheredata.cpp):
void SphereData::setPositions(QVector<QVector3D> positions, QVector3D color, float scale)
{
QByteArray ba;
ba.resize(positions.size() * sizeof(QVector3D));
SphereVBOData *vboData = reinterpret_cast<QVector3D *>(ba.data());
for(int i=0; i<positions.size(); i++) {
QVector3D &position = vboData[i];
position = positions[i];
}
m_buffer->setData(ba);
m_count = positions.count();
}
Then, in QML, we connected the geometry with the buffer in a QGeometryRenderer. This can also be done in C++, if you prefer (see
Spheres.qml):
GeometryRenderer {
id: spheresMeshInstanced
primitiveType: GeometryRenderer.TriangleStrip
enabled: instanceCount != 0
instanceCount: sphereData.count
geometry: SpheresPointGeometry {
attributes: [
Attribute {
name: "pos"
attributeType: Attribute.VertexAttribute
vertexBaseType: Attribute.Float
vertexSize: 3
byteOffset: 0
byteStride: (3 + 3 + 1) * 4
divisor: 1
buffer: sphereData ? sphereData.buffer : null
}
]
}
}
Finally, we created custom shaders to draw the billboards. Note that because we were drawing impostor spheres, the billboard size was increased to handle raytracing in the fragment shader from awkward angles. You likely do not need the 2.0*0.6 factor in general.
Vertex shader:
#version 330
in vec3 vertexPosition;
in float vertexId;
in vec3 pos;
in vec3 col;
in float scale;
uniform vec3 eyePosition = vec3(0.0, 0.0, 0.0);
uniform mat4 modelMatrix;
uniform mat4 mvp;
out vec3 modelSpherePosition;
out vec3 modelPosition;
out vec3 color;
out vec2 planePosition;
out float radius;
vec3 makePerpendicular(vec3 v) {
if(v.x == 0.0 && v.y == 0.0) {
if(v.z == 0.0) {
return vec3(0.0, 0.0, 0.0);
}
return vec3(0.0, 1.0, 0.0);
}
return vec3(-v.y, v.x, 0.0);
}
void main() {
vec3 position = vertexPosition + pos;
color = col;
radius = scale;
modelSpherePosition = (modelMatrix * vec4(position, 1.0)).xyz;
vec3 view = normalize(position - eyePosition);
vec3 right = normalize(makePerpendicular(view));
vec3 up = cross(right, view);
float texCoordX = 1.0 - 2.0*(float(vertexId==0.0) + float(vertexId==2.0));
float texCoordY = 1.0 - 2.0*(float(vertexId==0.0) + float(vertexId==1.0));
planePosition = vec2(texCoordX, texCoordY);
position += 2*0.6*(-up - right)*(scale*float(vertexId==0.0));
position += 2*0.6*(-up + right)*(scale*float(vertexId==1.0));
position += 2*0.6*(up - right)*(scale*float(vertexId==2.0));
position += 2*0.6*(up + right)*(scale*float(vertexId==3.0));
vec4 modelPositionTmp = modelMatrix * vec4(position, 1.0);
modelPosition = modelPositionTmp.xyz;
gl_Position = mvp*vec4(position, 1.0);
}
Fragment shader:
#version 330
in vec3 modelPosition;
in vec3 modelSpherePosition;
in vec3 color;
in vec2 planePosition;
in float radius;
out vec4 fragColor;
uniform mat4 modelView;
uniform mat4 inverseModelView;
uniform mat4 inverseViewMatrix;
uniform vec3 eyePosition;
uniform vec3 viewVector;
void main(void) {
vec3 rayDirection = eyePosition - modelPosition;
vec3 rayOrigin = modelPosition - modelSpherePosition;
vec3 E = rayOrigin;
vec3 D = rayDirection;
// Sphere equation
// x^2 + y^2 + z^2 = r^2
// Ray equation is
// P(t) = E + t*D
// We substitute ray into sphere equation to get
// (Ex + Dx * t)^2 + (Ey + Dy * t)^2 + (Ez + Dz * t)^2 = r^2
float r2 = radius*radius;
float a = D.x*D.x + D.y*D.y + D.z*D.z;
float b = 2.0*E.x*D.x + 2.0*E.y*D.y + 2.0*E.z*D.z;
float c = E.x*E.x + E.y*E.y + E.z*E.z - r2;
// discriminant of sphere equation
float d = b*b - 4.0*a*c;
if(d < 0.0) {
discard;
}
float t = (-b + sqrt(d))/(2.0*a);
vec3 sphereIntersection = rayOrigin + t * rayDirection;
vec3 normal = normalize(sphereIntersection);
vec3 normalDotCamera = color*dot(normal, normalize(rayDirection));
float pi = 3.1415926535897932384626433832795;
vec3 position = modelSpherePosition + sphereIntersection;
// flat red
fragColor = vec4(1.0, 0.0, 0.0, 1.0);
}
It has been some time since we first implemented this, and there might be easier ways to do it now, but this should give you an idea of the pieces you need.

Oren-Nayar lighting in OpenGL (how to calculate view direction in fragment shader)

I'm trying to implement Oren-Nayar lighting in the fragment shader as shown here.
However, I'm getting some strange lighting effects on the terrain as shown below.
I am currently sending the shader the 'view direction' uniform as the camera's 'front' vector. I am not sure if this is correct, as moving the camera around changes the artifacts.
Multiplying the 'front' vector by the MVP matrix gives a better result, but the artifacts are still very noticable when viewing the terrain from some angles. It is particularly noticable in dark areas and around the edges of the screen.
What could be causing this effect?
Artifact example
How the scene should look
Vertex Shader
#version 450
layout(location = 0) in vec3 position;
layout(location = 1) in vec3 normal;
out VS_OUT {
vec3 normal;
} vert_out;
void main() {
vert_out.normal = normal;
gl_Position = vec4(position, 1.0);
}
Tesselation Control Shader
#version 450
layout(vertices = 3) out;
in VS_OUT {
vec3 normal;
} tesc_in[];
out TESC_OUT {
vec3 normal;
} tesc_out[];
void main() {
if(gl_InvocationID == 0) {
gl_TessLevelInner[0] = 1.0;
gl_TessLevelInner[1] = 1.0;
gl_TessLevelOuter[0] = 1.0;
gl_TessLevelOuter[1] = 1.0;
gl_TessLevelOuter[2] = 1.0;
gl_TessLevelOuter[3] = 1.0;
}
tesc_out[gl_InvocationID].normal = tesc_in[gl_InvocationID].normal;
gl_out[gl_InvocationID].gl_Position = gl_in[gl_InvocationID].gl_Position;
}
Tesselation Evaluation Shader
#version 450
layout(triangles, equal_spacing) in;
in TESC_OUT {
vec3 normal;
} tesc_in[];
out TESE_OUT {
vec3 normal;
float height;
vec4 shadow_position;
} tesc_out;
uniform mat4 model_view;
uniform mat4 model_view_perspective;
uniform mat3 normal_matrix;
uniform mat4 depth_matrix;
vec3 lerp(vec3 v0, vec3 v1, vec3 v2) {
return (
(vec3(gl_TessCoord.x) * v0) +
(vec3(gl_TessCoord.y) * v1) +
(vec3(gl_TessCoord.z) * v2)
);
}
vec4 lerp(vec4 v0, vec4 v1, vec4 v2) {
return (
(vec4(gl_TessCoord.x) * v0) +
(vec4(gl_TessCoord.y) * v1) +
(vec4(gl_TessCoord.z) * v2)
);
}
void main() {
gl_Position = lerp(
gl_in[0].gl_Position,
gl_in[1].gl_Position,
gl_in[2].gl_Position
);
tesc_out.normal = normal_matrix * lerp(
tesc_in[0].normal,
tesc_in[1].normal,
tesc_in[2].normal
);
tesc_out.height = gl_Position.y;
tesc_out.shadow_position = depth_matrix * gl_Position;
gl_Position = model_view_perspective * gl_Position;
}
Fragment Shader
#version 450
in TESE_OUT {
vec3 normal;
float height;
vec4 shadow_position;
} frag_in;
out vec4 colour;
uniform vec3 view_direction;
uniform vec3 light_position;
#define PI 3.141592653589793
void main() {
const vec3 ambient = vec3(0.1, 0.1, 0.1);
const float roughness = 0.8;
const vec4 water = vec4(0.0, 0.0, 0.8, 1.0);
const vec4 sand = vec4(0.93, 0.87, 0.51, 1.0);
const vec4 grass = vec4(0.0, 0.8, 0.0, 1.0);
const vec4 ground = vec4(0.49, 0.27, 0.08, 1.0);
const vec4 snow = vec4(0.9, 0.9, 0.9, 1.0);
if(frag_in.height == 0.0) {
colour = water;
} else if(frag_in.height < 0.2) {
colour = sand;
} else if(frag_in.height < 0.575) {
colour = grass;
} else if(frag_in.height < 0.8) {
colour = ground;
} else {
colour = snow;
}
vec3 normal = normalize(frag_in.normal);
vec3 view_dir = normalize(view_direction);
vec3 light_dir = normalize(light_position);
float NdotL = dot(normal, light_dir);
float NdotV = dot(normal, view_dir);
float angleVN = acos(NdotV);
float angleLN = acos(NdotL);
float alpha = max(angleVN, angleLN);
float beta = min(angleVN, angleLN);
float gamma = dot(view_dir - normal * dot(view_dir, normal), light_dir - normal * dot(light_dir, normal));
float roughnessSquared = roughness * roughness;
float roughnessSquared9 = (roughnessSquared / (roughnessSquared + 0.09));
// calculate C1, C2 and C3
float C1 = 1.0 - 0.5 * (roughnessSquared / (roughnessSquared + 0.33));
float C2 = 0.45 * roughnessSquared9;
if(gamma >= 0.0) {
C2 *= sin(alpha);
} else {
C2 *= (sin(alpha) - pow((2.0 * beta) / PI, 3.0));
}
float powValue = (4.0 * alpha * beta) / (PI * PI);
float C3 = 0.125 * roughnessSquared9 * powValue * powValue;
// now calculate both main parts of the formula
float A = gamma * C2 * tan(beta);
float B = (1.0 - abs(gamma)) * C3 * tan((alpha + beta) / 2.0);
// put it all together
float L1 = max(0.0, NdotL) * (C1 + A + B);
// also calculate interreflection
float twoBetaPi = 2.0 * beta / PI;
float L2 = 0.17 * max(0.0, NdotL) * (roughnessSquared / (roughnessSquared + 0.13)) * (1.0 - gamma * twoBetaPi * twoBetaPi);
colour = vec4(colour.xyz * (L1 + L2), 1.0);
}
First I've plugged your fragment shader into my renderer with my view/normal/light vectors and it works perfectly. So the problem has to be in the way you calculate those vectors.
Next, you say that you set view_dir to your camera's front vector. I assume that you meant "camera's front vector in the world space" which would be incorrect. Since you calculate the dot products with vectors in the camera space, the view_dir must be in the camera space too. That is vec3(0,0,1) would be an easy way to check that. If it works -- we found your problem.
However, using (0,0,1) for the view direction is not strictly correct when you do perspective projection, because the direction from the fragment to the camera then depends on the location of the fragment on the screen. The correct formula then would be view_dir = normalize(-pos) where pos is the fragment's position in camera space (that is with model-view matrix applied without the projection). Further, this quantity now depends only on the fragment location on the screen, so you can calculate it as:
view_dir = normalize(vec3(-(gl_FragCoord.xy - frame_size/2) / (frame_width/2), flen))
flen is the focal length of your camera, which you can calculate as flen = cot(fovx/2).
I know this is a long dead thread, but I've been having the same problem (for several years), and finally found the solution...
It can be partially solved by fixing the orientation of the surface normals to match the polygon winding direction, but you can also get rid of the artifacts in the shader, by changing the following two lines...
float angleVN = acos(cos_nv);
float angleLN = acos(cos_nl);
to this...
float angleVN = acos(clamp(cos_nv, -1.0, 1.0));
float angleLN = acos(clamp(cos_nl, -1.0, 1.0));
Tada!

Is it possible to draw simple geometrical shapes in a Pixel Shader?

I'm currently learning about shaders and graphics pipelines and I was wondering if a pixel shader could be used to create, for example, a triangle or a more complex shape like a zigzag.
Could this be done without the use of a vertex shader?
Answer is yes! You can draw anything you want using pixel shader by implementing a ray Tracer. Here is a sample code:
uniform vec3 lightposition;
uniform vec3 cameraposition;
uniform float motion;
struct Ray
{
vec3 org;
vec3 dir;
};
struct Sphere
{
vec3 Center;
float Radius;
vec4 Color;
float MatID;
float id;
};
struct Intersection
{
float t;
vec3 normal;
vec3 hitpos;
vec4 color;
float objectid;
float materialID;
};
bool sphereIntersect(Ray eyeray, Sphere sp, inout Intersection intersection)
{
float t1=0.0;
eyeray.dir = normalize(eyeray.dir);
float B = 2.0 *( ( eyeray.dir.x * (eyeray.org.x - sp.Center.x ) )+ ( eyeray.dir.y *(eyeray.org.y - sp.Center.y )) + ( eyeray.dir.z * (eyeray.org.z - sp.Center.z ) ));
float C = pow((eyeray.org.x - sp.Center.x),2.0) + pow((eyeray.org.y - sp.Center.y),2.0) + pow((eyeray.org.z - sp.Center.z),2.0) - pow(sp.Radius,2.0);
float D = B*B - 4.0*C ;
if(D>=0.0)
{
t1= (-B - pow(D, .5)) / 2.0;
if (t1 < 0.0)
{
t1 = (-B + pow(D, .5)) / 2.0;
if( t1 < 0.0)
return false;
else
{
if (t1 > 1e-2 && t1 < intersection.t)
{
intersection.t = t1;
intersection.materialID = sp.MatID;
intersection.hitpos = eyeray.org + t1 * eyeray.dir;
intersection.normal = normalize(intersection.hitpos - sp.Center);
intersection.color = sp.Color;
intersection.objectid = sp.id;
return true;
}
}
}
else
{
if(t1 > 1e-2 && t1 < intersection.t)
{
intersection.t = t1;
intersection.materialID = sp.MatID;
intersection.hitpos = eyeray.org + t1 * eyeray.dir;
intersection.normal = normalize(intersection.hitpos - sp.Center);
intersection.color = sp.Color;
intersection.objectid = sp.id;
return true;
}
}
}
else
return false;
}
void findIntersection(Ray ray, inout Intersection intersection)
{
intersection.t = 1e10;
intersection.materialID = 0.0;
Sphere sp1 = Sphere(vec3(-2.0,0.0,-5.0),1.5,vec4(0.5, 0.1, 0.5, 1.0),1.0,1.0);
Sphere sp2 = Sphere(vec3( 2.0,0.0,-5.0),1.5,vec4(0.5,0.5,0.1,1.0),1.0,2.0);
Sphere sp3 = Sphere(vec3( 0.0,3.0,-5.0),1.5,vec4(0.1,0.5,0.5,1.0),1.0,3.0);
sphereIntersect(ray, sp1, intersection);
sphereIntersect(ray, sp2, intersection);
sphereIntersect(ray, sp3, intersection);
}
vec4 CalculateColor(vec4 ambient ,float shiness,vec3 intersection, vec3 normal);
Ray ReflectedRay(vec3 Normal,Ray EyeRay,vec3 intersection);
vec4 GetColor(Ray ray)
{
Ray currentRay = ray;
vec4 finalColor = vec4(0.0);
for(int bounce = 1 ; bounce < 4 ; bounce++)
{
Intersection intersection;
intersection.objectid = 0.0;
findIntersection(currentRay, intersection);
if (intersection.materialID == 0.0) // We could not find any object. We return the background color
return finalColor;
else if (intersection.materialID == 1.0)
{
vec3 lv = lightposition - intersection.hitpos;
vec3 nlv = normalize(lv);
Intersection shadowIntersection;
Ray shadowRay = Ray(intersection.hitpos, nlv);
shadowIntersection.objectid = intersection.objectid;
findIntersection(shadowRay, shadowIntersection);
if (shadowIntersection.t > length(lv) || shadowIntersection.t < 1)
{
finalColor = finalColor + float(1.0f/bounce) * CalculateColor(intersection.color, 100.0, intersection.hitpos, intersection.normal);;
}
else
{
finalColor = finalColor + float(1.0f/bounce) * intersection.color;
}
//currentRay = Ray(intersection.hitpos, reflect(ray.dir, intersection.normal));
currentRay = ReflectedRay(intersection.normal,ray,intersection.hitpos);
}
}
return finalColor;
}
Ray createRay(float ScreenWidth,float ScreenHeight)
{
Ray toret;
toret.org = cameraposition;
float left = -3.0;
float bottom = -3.0;
float screenZ = -3.0;
float su = -3.0 + gl_FragCoord.x/ScreenWidth * 6; //gl_FragCoord gives you the current x and y component of your current pixel
float sv = -3.0 + gl_FragCoord.y/ScreenHeight * 6;
float sz = screenZ - cameraposition.z;
toret.dir = normalize(vec3(su,sv,sz));
//vec2 p = (gl_FragCoord.xy/resolution) * 2 ;
//toret.dir = normalize(vec3(p, -1.0));
return toret;
}
Ray ReflectedRay(vec3 Normal,Ray EyeRay,vec3 intersection)
{
Ray reflection;
reflection.dir = EyeRay.dir - 2 * Normal * dot(EyeRay.dir,Normal);
reflection.org = intersection + reflection.dir * 0.01;
return reflection;
}
vec4 CalculateColor(vec4 ambient ,float shiness,vec3 intersection, vec3 normal)
{
//intensities
vec3 Idifuse = vec3(1, 1, 1);
vec3 Iambient = vec3(0.8, 0.8, 0.8);
vec3 Ispecular = vec3(1,1,1);
vec3 kDifuse = vec3(0.5,0.5,0.5); //for difuse
vec3 kSpecular = vec3(0.75, 0.6, 0.3); //for specular
vec3 kAmbient = vec3(0.1, 0.2, 0.3); //for ambient
//vec4 kSpecular = vec4(0.5,0.5,0.5,1.0);
//vec4 kDifuse = vec4(0.5,0.5,0.5,1.0);
float ColorDifuse = max(dot(normal,lightposition),0.0) * kDifuse;
//vector calculations
vec3 l = normalize(lightposition - intersection); //light vector
vec3 n = normalize(normal); // normalVector of point in the sea
vec3 v = normalize(cameraposition - intersection); // view Vector
vec3 h = normalize(v + l); // half Vector
vec3 difuse = kDifuse * Idifuse * max(0.0, dot(n, l));
vec3 specular = kSpecular * Ispecular * pow(max(0.0, dot(n, h)), shiness);
vec3 color = ambient.xyz + difuse + specular;
return vec4(color,1.0);
gl_FragColor = vec4(color,1.0);
}
void main()
{
if(lightposition == vec3(0.0,0.0,0.0))
gl_FragColor = vec4(0.0,1.0,0.0,1.0);
Ray eyeray = createRay(600.0,600.0);
gl_FragColor = GetColor(eyeray);
}
A useful technique is to use a fragment shader (I'm an OpenGL guy) with point sprites. Point sprites in OpenGL 3+ get rendered as squares of pixels, with the size of the square (gl_PointSize) set by the vertex shader.
In the fragment shader, gl_PointCoord has the x and y coords of this particular pixel within the square, from 0.0 to 1.0. So you can draw a circle by testing if gl_PointCoord.x and gl_PointCoord.y are both within the radius and discarding if not, a framed square by checking that .x and .y are with some distance of the edge, and so on. It's classic maths, define a function(x, y) which returns true for points within the shape you want, false if not.
The Orange book, OpenGL Shading Language 3rd edition, has some examples (which in turn come from RenderMan) of how to draw such shapes.
Hope this helps.
What you want is called procedural textures or procedural shading.
You can draw different shapes with a simple (and not so simple) math.
Take a look for some examples here:
http://glslsandbox.com/
More on google.