Oren-Nayar lighting in OpenGL (how to calculate view direction in fragment shader) - c++

I'm trying to implement Oren-Nayar lighting in the fragment shader as shown here.
However, I'm getting some strange lighting effects on the terrain as shown below.
I am currently sending the shader the 'view direction' uniform as the camera's 'front' vector. I am not sure if this is correct, as moving the camera around changes the artifacts.
Multiplying the 'front' vector by the MVP matrix gives a better result, but the artifacts are still very noticable when viewing the terrain from some angles. It is particularly noticable in dark areas and around the edges of the screen.
What could be causing this effect?
Artifact example
How the scene should look
Vertex Shader
#version 450
layout(location = 0) in vec3 position;
layout(location = 1) in vec3 normal;
out VS_OUT {
vec3 normal;
} vert_out;
void main() {
vert_out.normal = normal;
gl_Position = vec4(position, 1.0);
}
Tesselation Control Shader
#version 450
layout(vertices = 3) out;
in VS_OUT {
vec3 normal;
} tesc_in[];
out TESC_OUT {
vec3 normal;
} tesc_out[];
void main() {
if(gl_InvocationID == 0) {
gl_TessLevelInner[0] = 1.0;
gl_TessLevelInner[1] = 1.0;
gl_TessLevelOuter[0] = 1.0;
gl_TessLevelOuter[1] = 1.0;
gl_TessLevelOuter[2] = 1.0;
gl_TessLevelOuter[3] = 1.0;
}
tesc_out[gl_InvocationID].normal = tesc_in[gl_InvocationID].normal;
gl_out[gl_InvocationID].gl_Position = gl_in[gl_InvocationID].gl_Position;
}
Tesselation Evaluation Shader
#version 450
layout(triangles, equal_spacing) in;
in TESC_OUT {
vec3 normal;
} tesc_in[];
out TESE_OUT {
vec3 normal;
float height;
vec4 shadow_position;
} tesc_out;
uniform mat4 model_view;
uniform mat4 model_view_perspective;
uniform mat3 normal_matrix;
uniform mat4 depth_matrix;
vec3 lerp(vec3 v0, vec3 v1, vec3 v2) {
return (
(vec3(gl_TessCoord.x) * v0) +
(vec3(gl_TessCoord.y) * v1) +
(vec3(gl_TessCoord.z) * v2)
);
}
vec4 lerp(vec4 v0, vec4 v1, vec4 v2) {
return (
(vec4(gl_TessCoord.x) * v0) +
(vec4(gl_TessCoord.y) * v1) +
(vec4(gl_TessCoord.z) * v2)
);
}
void main() {
gl_Position = lerp(
gl_in[0].gl_Position,
gl_in[1].gl_Position,
gl_in[2].gl_Position
);
tesc_out.normal = normal_matrix * lerp(
tesc_in[0].normal,
tesc_in[1].normal,
tesc_in[2].normal
);
tesc_out.height = gl_Position.y;
tesc_out.shadow_position = depth_matrix * gl_Position;
gl_Position = model_view_perspective * gl_Position;
}
Fragment Shader
#version 450
in TESE_OUT {
vec3 normal;
float height;
vec4 shadow_position;
} frag_in;
out vec4 colour;
uniform vec3 view_direction;
uniform vec3 light_position;
#define PI 3.141592653589793
void main() {
const vec3 ambient = vec3(0.1, 0.1, 0.1);
const float roughness = 0.8;
const vec4 water = vec4(0.0, 0.0, 0.8, 1.0);
const vec4 sand = vec4(0.93, 0.87, 0.51, 1.0);
const vec4 grass = vec4(0.0, 0.8, 0.0, 1.0);
const vec4 ground = vec4(0.49, 0.27, 0.08, 1.0);
const vec4 snow = vec4(0.9, 0.9, 0.9, 1.0);
if(frag_in.height == 0.0) {
colour = water;
} else if(frag_in.height < 0.2) {
colour = sand;
} else if(frag_in.height < 0.575) {
colour = grass;
} else if(frag_in.height < 0.8) {
colour = ground;
} else {
colour = snow;
}
vec3 normal = normalize(frag_in.normal);
vec3 view_dir = normalize(view_direction);
vec3 light_dir = normalize(light_position);
float NdotL = dot(normal, light_dir);
float NdotV = dot(normal, view_dir);
float angleVN = acos(NdotV);
float angleLN = acos(NdotL);
float alpha = max(angleVN, angleLN);
float beta = min(angleVN, angleLN);
float gamma = dot(view_dir - normal * dot(view_dir, normal), light_dir - normal * dot(light_dir, normal));
float roughnessSquared = roughness * roughness;
float roughnessSquared9 = (roughnessSquared / (roughnessSquared + 0.09));
// calculate C1, C2 and C3
float C1 = 1.0 - 0.5 * (roughnessSquared / (roughnessSquared + 0.33));
float C2 = 0.45 * roughnessSquared9;
if(gamma >= 0.0) {
C2 *= sin(alpha);
} else {
C2 *= (sin(alpha) - pow((2.0 * beta) / PI, 3.0));
}
float powValue = (4.0 * alpha * beta) / (PI * PI);
float C3 = 0.125 * roughnessSquared9 * powValue * powValue;
// now calculate both main parts of the formula
float A = gamma * C2 * tan(beta);
float B = (1.0 - abs(gamma)) * C3 * tan((alpha + beta) / 2.0);
// put it all together
float L1 = max(0.0, NdotL) * (C1 + A + B);
// also calculate interreflection
float twoBetaPi = 2.0 * beta / PI;
float L2 = 0.17 * max(0.0, NdotL) * (roughnessSquared / (roughnessSquared + 0.13)) * (1.0 - gamma * twoBetaPi * twoBetaPi);
colour = vec4(colour.xyz * (L1 + L2), 1.0);
}

First I've plugged your fragment shader into my renderer with my view/normal/light vectors and it works perfectly. So the problem has to be in the way you calculate those vectors.
Next, you say that you set view_dir to your camera's front vector. I assume that you meant "camera's front vector in the world space" which would be incorrect. Since you calculate the dot products with vectors in the camera space, the view_dir must be in the camera space too. That is vec3(0,0,1) would be an easy way to check that. If it works -- we found your problem.
However, using (0,0,1) for the view direction is not strictly correct when you do perspective projection, because the direction from the fragment to the camera then depends on the location of the fragment on the screen. The correct formula then would be view_dir = normalize(-pos) where pos is the fragment's position in camera space (that is with model-view matrix applied without the projection). Further, this quantity now depends only on the fragment location on the screen, so you can calculate it as:
view_dir = normalize(vec3(-(gl_FragCoord.xy - frame_size/2) / (frame_width/2), flen))
flen is the focal length of your camera, which you can calculate as flen = cot(fovx/2).

I know this is a long dead thread, but I've been having the same problem (for several years), and finally found the solution...
It can be partially solved by fixing the orientation of the surface normals to match the polygon winding direction, but you can also get rid of the artifacts in the shader, by changing the following two lines...
float angleVN = acos(cos_nv);
float angleLN = acos(cos_nl);
to this...
float angleVN = acos(clamp(cos_nv, -1.0, 1.0));
float angleLN = acos(clamp(cos_nl, -1.0, 1.0));
Tada!

Related

Problem with normals when drawing instanced mesh

I'm rendering a sphere with instanced drawing, while rotating the model-view-matrix around the Y axis.
It looks ok at the beginning:
But at another angle, things get worse:
It looks to me like a problem with normals. Currently, I'm calculating the normal-matrix from my model-view-matrix and then pass it to the shader, which is doing phong-like lighting:
attribute vec4 a_position;
attribute vec3 a_normal;
attribute vec4 a_color;
attribute vec2 a_coord;
attribute mat4 a_matrix;
uniform mat4 u_mv_matrix;
uniform mat4 u_projection_matrix;
uniform mat3 u_normal_matrix;
varying vec4 v_position;
varying vec3 v_normal;
varying vec4 v_color;
varying vec2 v_coord;
void main() {
vec4 transformedPosition = u_mv_matrix * a_matrix * a_position;
v_position = transformedPosition;
v_normal = u_normal_matrix * a_normal;
v_color = a_color;
v_coord = a_coord;
gl_Position = u_projection_matrix * transformedPosition;
}
uniform sampler2D u_sampler;
varying vec4 v_position;
varying vec3 v_normal;
varying vec4 v_color;
varying vec2 v_coord;
void main() {
vec3 lightPosition = vec3(0.0); // XXX
// set diffuse and specular colors
vec3 cDiffuse = (v_color * texture2D(u_sampler, v_coord)).rgb;
vec3 cSpecular = vec3(0.3);
// lighting calculations
vec3 N = normalize(v_normal);
vec3 L = normalize(lightPosition - v_position.xyz);
vec3 E = normalize(-v_position.xyz);
vec3 H = normalize(L + E);
// Calculate coefficients.
float phong = max(dot(N, L), 0.0);
const float kMaterialShininess = 20.0;
const float kNormalization = (kMaterialShininess + 8.0) / (3.14159265 * 8.0);
float blinn = pow(max(dot(N, H), 0.0), kMaterialShininess) * kNormalization;
// diffuse coefficient
vec3 diffuse = phong * cDiffuse;
// specular coefficient
vec3 specular = blinn * cSpecular;
gl_FragColor = vec4(diffuse + specular, 1);
}
Final note: I'm working on desktop OpenGL 2.1 as well as WebGL on the browser.
Edit: Per request, I'm adding some information.
The mesh is built as follows, by passing an identity matrix:
void Sphere::append(IndexedVertexBatch<XYZ.N.UV> &batch, const Matrix &matrix) const {
float sectorStep = TWO_PI / sectorCount;
float stackStep = PI / stackCount;
for(int i = 0; i <= stackCount; ++i) {
float stackAngle = HALF_PI - i * stackStep;
float xy = radius * cosf(stackAngle);
float z = radius * sinf(stackAngle);
for(int j = 0; j <= sectorCount; ++j) {
float sectorAngle = j * sectorStep;
float x = xy * cosf(sectorAngle);
float y = xy * sinf(sectorAngle);
float nx = x / radius;
float ny = y / radius;
float nz = z / radius;
float s = (float)j / sectorCount;
float t = (float)i / stackCount;
batch.addVertex(matrix.transformPoint(x, y, z), matrix.transformNormal(nx, ny, nz), glm::vec2(s, t));
}
}
for(int i = 0; i < stackCount; ++i) {
float k1 = i * (sectorCount + 1);
float k2 = k1 + sectorCount + 1;
for(int j = 0; j < sectorCount; ++j, ++k1, ++k2) {
if (i != 0) {
if (frontFace == CCW) {
batch.addIndices(k1, k1 + 1, k2);
} else {
batch.addIndices(k1, k2, k1 + 1);
}
}
if (i != (stackCount - 1)) {
if (frontFace == CCW) {
batch.addIndices(k1 + 1, k2 + 1, k2);
} else {
batch.addIndices(k1 + 1, k2, k2 + 1);
}
}
}
}
}
Regarding the transformation matrices, it works as follow:
camera.getMVMatrix()
.setIdentity()
.translate(0, -150, -600)
.rotateY(clock()->getTime() * 0.5f);
State()
.setShader(shader)
.setShaderMatrix<MV>(camera.getMVMatrix())
.setShaderMatrix<PROJECTION>(camera.getProjectionMatrix())
.setShaderMatrix<NORMAL>(camera.getNormalMatrix())
.apply();
Finally, the light position is defined as vec3(0) in the fragment shader.
Note: As you can see, I'm using my own framework which provides among other things high level methods for building meshes and handling transformations. It's all straightforward stuff, proven to work as intended, but let me know if you need pointers to the source-code.
Update: The lighting part of the shader I used ended up being wrong, so I switched to another method.
But in essence, the solution I proposed in my answer is still valid (or at least it does the job of solving the "normal problem" when instancing is used, and non-uniform scaling is avoided.)
Here is a gist with the source-code. There is also an online WebGL demo.
The solution was relatively simple: there is no point in passing a normal-matrix to the shader.
Instead, the normal needs to be computed in the vertex shader:
v_normal = vec3(u_mv_matrix * a_matrix * vec4(a_normal, 0.0));
Credits

Billboarding using Qt3D 2.0

I am looking for the best way to create a billboard in Qt3D. I would like a plane which faces the camera wherever it is and does not change sized when the camera dollies forward or back. I have read how to do this using GLSL vertex and geometry shaders, but I am looking for the Qt3D way, unless customer shaders is the most efficient and best way of billboarding.
I have looked, and it appears I can set the Matrix on a QTransform via properties, but it isn't clear to me how I would manipulate the matrix, or perhaps there is a better way? I am using the C++ api, but a QML answer would do. I could port it to C++.
If you want to draw just one billboard, you can add a plane and rotate it whenever the camera moves. However, if you want to do this efficiently with thousands or millions of billboards, I recommend using custom shaders. We did this to draw impostor spheres in Qt3D.
However, we didn't use a geometry shader because we were targeting systems that didn't support geometry shaders. Instead, we used only the vertex shader by placing four vertices in the origin and moved these on the shader. To create many copies, we used instanced drawing. We moved each set of four vertices according to the positions of the spheres. Finally, we moved each of the four vertices of each sphere such that they result in a billboard that is always facing the camera.
Start out by subclassing QGeometry and created a buffer functor that creates four points, all in the origin (see spherespointgeometry.cpp). Give each point an ID that we can use later. If you use geometry shaders, the ID is not needed and you can get away with creating only one vertex.
class SpheresPointVertexDataFunctor : public Qt3DRender::QBufferDataGenerator
{
public:
SpheresPointVertexDataFunctor()
{
}
QByteArray operator ()() Q_DECL_OVERRIDE
{
const int verticesCount = 4;
// vec3 pos
const quint32 vertexSize = (3+1) * sizeof(float);
QByteArray verticesData;
verticesData.resize(vertexSize*verticesCount);
float *verticesPtr = reinterpret_cast<float*>(verticesData.data());
// Vertex 1
*verticesPtr++ = 0.0;
*verticesPtr++ = 0.0;
*verticesPtr++ = 0.0;
// VertexID 1
*verticesPtr++ = 0.0;
// Vertex 2
*verticesPtr++ = 0.0;
*verticesPtr++ = 0.0;
*verticesPtr++ = 0.0;
// VertexID 2
*verticesPtr++ = 1.0;
// Vertex 3
*verticesPtr++ = 0.0;
*verticesPtr++ = 0.0;
*verticesPtr++ = 0.0;
// VertexID3
*verticesPtr++ = 2.0;
// Vertex 4
*verticesPtr++ = 0.0;
*verticesPtr++ = 0.0;
*verticesPtr++ = 0.0;
// VertexID 4
*verticesPtr++ = 3.0;
return verticesData;
}
bool operator ==(const QBufferDataGenerator &other) const Q_DECL_OVERRIDE
{
Q_UNUSED(other);
return true;
}
QT3D_FUNCTOR(SpheresPointVertexDataFunctor)
};
For the real positions, we used a separate QBuffer. We also set color and scale, but I have omitted those here (see spheredata.cpp):
void SphereData::setPositions(QVector<QVector3D> positions, QVector3D color, float scale)
{
QByteArray ba;
ba.resize(positions.size() * sizeof(QVector3D));
SphereVBOData *vboData = reinterpret_cast<QVector3D *>(ba.data());
for(int i=0; i<positions.size(); i++) {
QVector3D &position = vboData[i];
position = positions[i];
}
m_buffer->setData(ba);
m_count = positions.count();
}
Then, in QML, we connected the geometry with the buffer in a QGeometryRenderer. This can also be done in C++, if you prefer (see
Spheres.qml):
GeometryRenderer {
id: spheresMeshInstanced
primitiveType: GeometryRenderer.TriangleStrip
enabled: instanceCount != 0
instanceCount: sphereData.count
geometry: SpheresPointGeometry {
attributes: [
Attribute {
name: "pos"
attributeType: Attribute.VertexAttribute
vertexBaseType: Attribute.Float
vertexSize: 3
byteOffset: 0
byteStride: (3 + 3 + 1) * 4
divisor: 1
buffer: sphereData ? sphereData.buffer : null
}
]
}
}
Finally, we created custom shaders to draw the billboards. Note that because we were drawing impostor spheres, the billboard size was increased to handle raytracing in the fragment shader from awkward angles. You likely do not need the 2.0*0.6 factor in general.
Vertex shader:
#version 330
in vec3 vertexPosition;
in float vertexId;
in vec3 pos;
in vec3 col;
in float scale;
uniform vec3 eyePosition = vec3(0.0, 0.0, 0.0);
uniform mat4 modelMatrix;
uniform mat4 mvp;
out vec3 modelSpherePosition;
out vec3 modelPosition;
out vec3 color;
out vec2 planePosition;
out float radius;
vec3 makePerpendicular(vec3 v) {
if(v.x == 0.0 && v.y == 0.0) {
if(v.z == 0.0) {
return vec3(0.0, 0.0, 0.0);
}
return vec3(0.0, 1.0, 0.0);
}
return vec3(-v.y, v.x, 0.0);
}
void main() {
vec3 position = vertexPosition + pos;
color = col;
radius = scale;
modelSpherePosition = (modelMatrix * vec4(position, 1.0)).xyz;
vec3 view = normalize(position - eyePosition);
vec3 right = normalize(makePerpendicular(view));
vec3 up = cross(right, view);
float texCoordX = 1.0 - 2.0*(float(vertexId==0.0) + float(vertexId==2.0));
float texCoordY = 1.0 - 2.0*(float(vertexId==0.0) + float(vertexId==1.0));
planePosition = vec2(texCoordX, texCoordY);
position += 2*0.6*(-up - right)*(scale*float(vertexId==0.0));
position += 2*0.6*(-up + right)*(scale*float(vertexId==1.0));
position += 2*0.6*(up - right)*(scale*float(vertexId==2.0));
position += 2*0.6*(up + right)*(scale*float(vertexId==3.0));
vec4 modelPositionTmp = modelMatrix * vec4(position, 1.0);
modelPosition = modelPositionTmp.xyz;
gl_Position = mvp*vec4(position, 1.0);
}
Fragment shader:
#version 330
in vec3 modelPosition;
in vec3 modelSpherePosition;
in vec3 color;
in vec2 planePosition;
in float radius;
out vec4 fragColor;
uniform mat4 modelView;
uniform mat4 inverseModelView;
uniform mat4 inverseViewMatrix;
uniform vec3 eyePosition;
uniform vec3 viewVector;
void main(void) {
vec3 rayDirection = eyePosition - modelPosition;
vec3 rayOrigin = modelPosition - modelSpherePosition;
vec3 E = rayOrigin;
vec3 D = rayDirection;
// Sphere equation
// x^2 + y^2 + z^2 = r^2
// Ray equation is
// P(t) = E + t*D
// We substitute ray into sphere equation to get
// (Ex + Dx * t)^2 + (Ey + Dy * t)^2 + (Ez + Dz * t)^2 = r^2
float r2 = radius*radius;
float a = D.x*D.x + D.y*D.y + D.z*D.z;
float b = 2.0*E.x*D.x + 2.0*E.y*D.y + 2.0*E.z*D.z;
float c = E.x*E.x + E.y*E.y + E.z*E.z - r2;
// discriminant of sphere equation
float d = b*b - 4.0*a*c;
if(d < 0.0) {
discard;
}
float t = (-b + sqrt(d))/(2.0*a);
vec3 sphereIntersection = rayOrigin + t * rayDirection;
vec3 normal = normalize(sphereIntersection);
vec3 normalDotCamera = color*dot(normal, normalize(rayDirection));
float pi = 3.1415926535897932384626433832795;
vec3 position = modelSpherePosition + sphereIntersection;
// flat red
fragColor = vec4(1.0, 0.0, 0.0, 1.0);
}
It has been some time since we first implemented this, and there might be easier ways to do it now, but this should give you an idea of the pieces you need.

Is it possible to draw simple geometrical shapes in a Pixel Shader?

I'm currently learning about shaders and graphics pipelines and I was wondering if a pixel shader could be used to create, for example, a triangle or a more complex shape like a zigzag.
Could this be done without the use of a vertex shader?
Answer is yes! You can draw anything you want using pixel shader by implementing a ray Tracer. Here is a sample code:
uniform vec3 lightposition;
uniform vec3 cameraposition;
uniform float motion;
struct Ray
{
vec3 org;
vec3 dir;
};
struct Sphere
{
vec3 Center;
float Radius;
vec4 Color;
float MatID;
float id;
};
struct Intersection
{
float t;
vec3 normal;
vec3 hitpos;
vec4 color;
float objectid;
float materialID;
};
bool sphereIntersect(Ray eyeray, Sphere sp, inout Intersection intersection)
{
float t1=0.0;
eyeray.dir = normalize(eyeray.dir);
float B = 2.0 *( ( eyeray.dir.x * (eyeray.org.x - sp.Center.x ) )+ ( eyeray.dir.y *(eyeray.org.y - sp.Center.y )) + ( eyeray.dir.z * (eyeray.org.z - sp.Center.z ) ));
float C = pow((eyeray.org.x - sp.Center.x),2.0) + pow((eyeray.org.y - sp.Center.y),2.0) + pow((eyeray.org.z - sp.Center.z),2.0) - pow(sp.Radius,2.0);
float D = B*B - 4.0*C ;
if(D>=0.0)
{
t1= (-B - pow(D, .5)) / 2.0;
if (t1 < 0.0)
{
t1 = (-B + pow(D, .5)) / 2.0;
if( t1 < 0.0)
return false;
else
{
if (t1 > 1e-2 && t1 < intersection.t)
{
intersection.t = t1;
intersection.materialID = sp.MatID;
intersection.hitpos = eyeray.org + t1 * eyeray.dir;
intersection.normal = normalize(intersection.hitpos - sp.Center);
intersection.color = sp.Color;
intersection.objectid = sp.id;
return true;
}
}
}
else
{
if(t1 > 1e-2 && t1 < intersection.t)
{
intersection.t = t1;
intersection.materialID = sp.MatID;
intersection.hitpos = eyeray.org + t1 * eyeray.dir;
intersection.normal = normalize(intersection.hitpos - sp.Center);
intersection.color = sp.Color;
intersection.objectid = sp.id;
return true;
}
}
}
else
return false;
}
void findIntersection(Ray ray, inout Intersection intersection)
{
intersection.t = 1e10;
intersection.materialID = 0.0;
Sphere sp1 = Sphere(vec3(-2.0,0.0,-5.0),1.5,vec4(0.5, 0.1, 0.5, 1.0),1.0,1.0);
Sphere sp2 = Sphere(vec3( 2.0,0.0,-5.0),1.5,vec4(0.5,0.5,0.1,1.0),1.0,2.0);
Sphere sp3 = Sphere(vec3( 0.0,3.0,-5.0),1.5,vec4(0.1,0.5,0.5,1.0),1.0,3.0);
sphereIntersect(ray, sp1, intersection);
sphereIntersect(ray, sp2, intersection);
sphereIntersect(ray, sp3, intersection);
}
vec4 CalculateColor(vec4 ambient ,float shiness,vec3 intersection, vec3 normal);
Ray ReflectedRay(vec3 Normal,Ray EyeRay,vec3 intersection);
vec4 GetColor(Ray ray)
{
Ray currentRay = ray;
vec4 finalColor = vec4(0.0);
for(int bounce = 1 ; bounce < 4 ; bounce++)
{
Intersection intersection;
intersection.objectid = 0.0;
findIntersection(currentRay, intersection);
if (intersection.materialID == 0.0) // We could not find any object. We return the background color
return finalColor;
else if (intersection.materialID == 1.0)
{
vec3 lv = lightposition - intersection.hitpos;
vec3 nlv = normalize(lv);
Intersection shadowIntersection;
Ray shadowRay = Ray(intersection.hitpos, nlv);
shadowIntersection.objectid = intersection.objectid;
findIntersection(shadowRay, shadowIntersection);
if (shadowIntersection.t > length(lv) || shadowIntersection.t < 1)
{
finalColor = finalColor + float(1.0f/bounce) * CalculateColor(intersection.color, 100.0, intersection.hitpos, intersection.normal);;
}
else
{
finalColor = finalColor + float(1.0f/bounce) * intersection.color;
}
//currentRay = Ray(intersection.hitpos, reflect(ray.dir, intersection.normal));
currentRay = ReflectedRay(intersection.normal,ray,intersection.hitpos);
}
}
return finalColor;
}
Ray createRay(float ScreenWidth,float ScreenHeight)
{
Ray toret;
toret.org = cameraposition;
float left = -3.0;
float bottom = -3.0;
float screenZ = -3.0;
float su = -3.0 + gl_FragCoord.x/ScreenWidth * 6; //gl_FragCoord gives you the current x and y component of your current pixel
float sv = -3.0 + gl_FragCoord.y/ScreenHeight * 6;
float sz = screenZ - cameraposition.z;
toret.dir = normalize(vec3(su,sv,sz));
//vec2 p = (gl_FragCoord.xy/resolution) * 2 ;
//toret.dir = normalize(vec3(p, -1.0));
return toret;
}
Ray ReflectedRay(vec3 Normal,Ray EyeRay,vec3 intersection)
{
Ray reflection;
reflection.dir = EyeRay.dir - 2 * Normal * dot(EyeRay.dir,Normal);
reflection.org = intersection + reflection.dir * 0.01;
return reflection;
}
vec4 CalculateColor(vec4 ambient ,float shiness,vec3 intersection, vec3 normal)
{
//intensities
vec3 Idifuse = vec3(1, 1, 1);
vec3 Iambient = vec3(0.8, 0.8, 0.8);
vec3 Ispecular = vec3(1,1,1);
vec3 kDifuse = vec3(0.5,0.5,0.5); //for difuse
vec3 kSpecular = vec3(0.75, 0.6, 0.3); //for specular
vec3 kAmbient = vec3(0.1, 0.2, 0.3); //for ambient
//vec4 kSpecular = vec4(0.5,0.5,0.5,1.0);
//vec4 kDifuse = vec4(0.5,0.5,0.5,1.0);
float ColorDifuse = max(dot(normal,lightposition),0.0) * kDifuse;
//vector calculations
vec3 l = normalize(lightposition - intersection); //light vector
vec3 n = normalize(normal); // normalVector of point in the sea
vec3 v = normalize(cameraposition - intersection); // view Vector
vec3 h = normalize(v + l); // half Vector
vec3 difuse = kDifuse * Idifuse * max(0.0, dot(n, l));
vec3 specular = kSpecular * Ispecular * pow(max(0.0, dot(n, h)), shiness);
vec3 color = ambient.xyz + difuse + specular;
return vec4(color,1.0);
gl_FragColor = vec4(color,1.0);
}
void main()
{
if(lightposition == vec3(0.0,0.0,0.0))
gl_FragColor = vec4(0.0,1.0,0.0,1.0);
Ray eyeray = createRay(600.0,600.0);
gl_FragColor = GetColor(eyeray);
}
A useful technique is to use a fragment shader (I'm an OpenGL guy) with point sprites. Point sprites in OpenGL 3+ get rendered as squares of pixels, with the size of the square (gl_PointSize) set by the vertex shader.
In the fragment shader, gl_PointCoord has the x and y coords of this particular pixel within the square, from 0.0 to 1.0. So you can draw a circle by testing if gl_PointCoord.x and gl_PointCoord.y are both within the radius and discarding if not, a framed square by checking that .x and .y are with some distance of the edge, and so on. It's classic maths, define a function(x, y) which returns true for points within the shape you want, false if not.
The Orange book, OpenGL Shading Language 3rd edition, has some examples (which in turn come from RenderMan) of how to draw such shapes.
Hope this helps.
What you want is called procedural textures or procedural shading.
You can draw different shapes with a simple (and not so simple) math.
Take a look for some examples here:
http://glslsandbox.com/
More on google.

Shadowmapping always produces shadows beyond far plane

I am working on the beginnings of omnidirectional shadow mapping in my engine. For now I am only producing one shadowmap as a test. I am getting an odd result when using my current shaders. Here is a screenshot which shows the problem:
I am using a near value of 0.5 and a far value of 5.0 in the projection matrix for the shadowmap render. As near as I can tell, any value with a light-space z larger than my far plane distance is being computed by my fragment shader as in shadow.
This is my fragment shader:
in vec2 st;
uniform sampler2D colorTexture;
uniform sampler2D normalTexture;
uniform sampler2D depthTexture;
uniform sampler2D shadowmapTexture;
uniform mat4 invProj;
uniform mat4 lightProj;
uniform vec3 lightPosition;
out vec3 color;
void main () {
vec3 clipSpaceCoords;
clipSpaceCoords.xy = st.xy * 2.0 - 1.0;
clipSpaceCoords.z = texture(depthTexture, st).x * 2.0 - 1.0;
vec4 position = invProj * vec4(clipSpaceCoords,1.0);
position.xyz /= position.w;
vec4 lightSpace = lightProj * vec4(position.xyz,1.0);
lightSpace.xyz /= lightSpace.w;
lightSpace.xyz = lightSpace.xyz * 0.5 + 0.5;
float lightDepth = texture(shadowmapTexture, lightSpace.xy).x;
vec3 normal = texture(normalTexture, st);
vec3 diffuse;
float shadowFactor = 1.0;
if(lightSpace.w > 0.0 && lightSpace.z > lightDepth+0.0042) {
shadowFactor = 0.2;
}
else {
float k = 0.00001;
vec3 distanceToLight = lightPosition - position.xyz;
float distanceLength = length(distanceToLight);
float attenuation = (1.0 / (1.0 + (0.1 * distanceLength) + k * (distanceLength * distanceLength)));
float diffuseTemp = max(dot(normalize(normal), normalize(distanceToLight)), 0.0);
diffuse = vec3(1.0, 1.0, 1.0) * attenuation * diffuseTemp;
}
vec3 gamma = vec3(1.0/2.2);
color = pow(texture(colorTexture, st).xyz*shadowFactor+diffuse, gamma);
}
How can I fix this issue (Other than increasing my far plane distance)?
One other question, as this is the first time I have attempted shadowmapping: am I doing the lighting in relation to the shadows correctly?

Atmospheric scattering OpenGL 3.3

Im currently trying to convert a shader by Sean O'Neil to version 330 so i can try it out in a application im writing. Im having some issues with deprecated functions, so i replaced them, but im almost completely new to glsl, so i probably did a mistake somewhere.
Original shaders can be found here:
http://www.gamedev.net/topic/592043-solved-trying-to-use-atmospheric-scattering-oneill-2004-but-get-black-sphere/
My horrible attempt at converting them:
Vertex shader:
#version 330 core
// Input vertex data, different for all executions of this shader.
layout(location = 0) in vec3 vertexPosition_modelspace;
layout(location = 2) in vec3 vertexNormal_modelspace;
uniform vec3 v3CameraPos; // The camera's current position
uniform vec3 v3LightPos; // The direction vector to the light source
uniform vec3 v3InvWavelength; // 1 / pow(wavelength, 4) for the red, green, and blue channels
uniform float fCameraHeight; // The camera's current height
uniform float fCameraHeight2; // fCameraHeight^2
uniform float fOuterRadius; // The outer (atmosphere) radius
uniform float fOuterRadius2; // fOuterRadius^2
uniform float fInnerRadius; // The inner (planetary) radius
uniform float fInnerRadius2; // fInnerRadius^2
uniform float fKrESun; // Kr * ESun
uniform float fKmESun; // Km * ESun
uniform float fKr4PI; // Kr * 4 * PI
uniform float fKm4PI; // Km * 4 * PI
uniform float fScale; // 1 / (fOuterRadius - fInnerRadius)
uniform float fScaleDepth; // The scale depth (i.e. the altitude at which the atmosphere's average density is found)
uniform float fScaleOverScaleDepth; // fScale / fScaleDepth
const int nSamples = 2;
const float fSamples = 2.0;
invariant out vec3 v3Direction;
// Values that stay constant for the whole mesh.
uniform mat4 MVP;
uniform mat4 V;
uniform mat4 M;
uniform vec3 LightPosition_worldspace;
out vec4 dgl_SecondaryColor;
out vec4 dgl_Color;
float scale(float fCos)
{
float x = 1.0 - fCos;
return fScaleDepth * exp(-0.00287 + x*(0.459 + x*(3.83 + x*(-6.80 + x*5.25))));
}
void main(void)
{
//gg_FrontColor = vec3(1.0, 0.0, 0.0);
//gg_FrontSecondaryColor = vec3(0.0, 1.0, 0.0);
// Get the ray from the camera to the vertex, and its length (which is the far point of the ray passing through the atmosphere)
vec3 v3Pos = vertexPosition_modelspace;
vec3 v3Ray = v3Pos - v3CameraPos;
float fFar = length(v3Ray);
v3Ray /= fFar;
// Calculate the ray's starting position, then calculate its scattering offset
vec3 v3Start = v3CameraPos;
float fHeight = length(v3Start);
float fDepth = exp(fScaleOverScaleDepth * (fInnerRadius - fCameraHeight));
float fStartAngle = dot(v3Ray, v3Start) / fHeight;
float fStartOffset = fDepth*scale(fStartAngle);
// Initialize the scattering loop variables
gl_FrontColor = vec4(0.0, 0.0, 0.0, 0.0);
gl_FrontSecondaryColor = vec4(0.0, 0.0, 0.0, 0.0);
float fSampleLength = fFar / fSamples;
float fScaledLength = fSampleLength * fScale;
vec3 v3SampleRay = v3Ray * fSampleLength;
vec3 v3SamplePoint = v3Start + v3SampleRay * 0.5;
// Now loop through the sample rays
vec3 v3FrontColor = vec3(0.2, 0.1, 0.0);
for(int i=0; i<nSamples; i++)
{
float fHeight = length(v3SamplePoint);
float fDepth = exp(fScaleOverScaleDepth * (fInnerRadius - fHeight));
float fLightAngle = dot(v3LightPos, v3SamplePoint) / fHeight;
float fCameraAngle = dot(v3Ray, v3SamplePoint) / fHeight;
float fScatter = (fStartOffset + fDepth*(scale(fLightAngle) - scale(fCameraAngle)));
vec3 v3Attenuate = exp(-fScatter * (v3InvWavelength * fKr4PI + fKm4PI));
v3FrontColor += v3Attenuate * (fDepth * fScaledLength);
v3SamplePoint += v3SampleRay;
}
// Finally, scale the Mie and Rayleigh colors and set up the varying variables for the pixel shader
gl_FrontSecondaryColor.rgb = v3FrontColor * fKmESun;
gl_FrontColor.rgb = v3FrontColor * (v3InvWavelength * fKrESun);
gl_Position = MVP * vec4(vertexPosition_modelspace,1);
v3Direction = v3CameraPos - v3Pos;
dgl_SecondaryColor = gl_FrontSecondaryColor;
dgl_Color = gl_FrontColor;
}
Fragment shader:
#version 330 core
out vec4 dgl_FragColor;
uniform vec3 v3LightPos;
uniform float g;
uniform float g2;
invariant in vec3 v3Direction;
in vec4 dgl_SecondaryColor;
in vec4 dgl_Color;
uniform mat4 MV;
void main (void)
{
float fCos = dot(v3LightPos, v3Direction) / length(v3Direction);
float fMiePhase = 1.5 * ((1.0 - g2) / (2.0 + g2)) * (1.0 + fCos*fCos) / pow(1.0 + g2 - 2.0*g*fCos, 1.5);
dgl_FragColor = dgl_Color + fMiePhase * dgl_SecondaryColor;
dgl_FragColor.a = dgl_FragColor.b;
}
I wrote a function to render a sphere, and im trying to render this shader onto a inverted version of it, the sphere works completely fine, with normals and all. My problem is that the sphere gets rendered all black, so the shader is not working.
Edit: Got the sun to draw, but the sky is still all black.
This is how i'm trying to render the atmosphere inside my main rendering loop.
glUseProgram(programAtmosphere);
glBindTexture(GL_TEXTURE_2D, 0);
//######################
glUniform3f(v3CameraPos, getPlayerPos().x, getPlayerPos().y, getPlayerPos().z);
glm::vec3 lightDirection = lightPos/length(lightPos);
glUniform3f(v3LightPos, lightDirection.x , lightDirection.y, lightDirection.z);
glUniform3f(v3InvWavelength, 1.0f / pow(0.650f, 4.0f), 1.0f / pow(0.570f, 4.0f), 1.0f / pow(0.475f, 4.0f));
glUniform1fARB(fCameraHeight, 10.0f+length(getPlayerPos()));
glUniform1fARB(fCameraHeight2, (10.0f+length(getPlayerPos()))*(10.0f+length(getPlayerPos())));
glUniform1fARB(fInnerRadius, 10.0f);
glUniform1fARB(fInnerRadius2, 100.0f);
glUniform1fARB(fOuterRadius, 10.25f);
glUniform1fARB(fOuterRadius2, 10.25f*10.25f);
glUniform1fARB(fKrESun, 0.0025f * 20.0f);
glUniform1fARB(fKmESun, 0.0015f * 20.0f);
glUniform1fARB(fKr4PI, 0.0025f * 4.0f * 3.141592653f);
glUniform1fARB(fKm4PI, 0.0015f * 4.0f * 3.141592653f);
glUniform1fARB(fScale, 1.0f / 0.25f);
glUniform1fARB(fScaleDepth, 0.25f);
glUniform1fARB(fScaleOverScaleDepth, 4.0f / 0.25f );
glUniform1fARB(g, -0.990f);
glUniform1f(g2, -0.990f * -0.990f);
Any ideas?
Edit: updated the code, and added a picture.
I think the problem there is, that you write to 'FragColor', which may be a 'dead end' output variable in the fragment shader, since one must explicitly bind it to a color number before linking the program:
glBindFragDataLocation(programAtmosphere,0,"FragColor");
or using this in a shader:
layout(location = 0) out vec4 FragColor
You may try to use the builtin out vars instead: gl_FragColor, which is an alias for gl_FragData[0] and therefore the same as above binding.
EDIT: Forgot to say, when using the deprecated builtins, you must have a compatibility declaration:
#version 330 compatibility
EDIT 2: To test the binding, I'd write a constant color to it to disable possible calculations errors, since these may not yield the expected result, because of errors or zero input.