Related
(This Image is What I want to implement)
I am attempting Post Processing using Compute Shader to implement Light Shaft for multiple Spot Lights in the DX12 framework.
The first thing I tried was the method at the following link:https://gitlab.com/tomasoh/100_procent_more_volume/-/blob/master/shaders/volumetric.frag
It's a very complicated and hard-to-understand kind of shader, but it's basically built on the premise of using multiple lights, so it's a kind of example for the purpose.
However, since the game I'm making has 32 light source limitations, considering that excessive amount of Frame Drop will occur in the part of calculating Visibility by making Shadow Map for all light sources, I decided to implement Visibility as 1.0 Constant and did not get the desired result. (Of course it's a result.)
Down below is how I did this thing:
#include "lighting.hlsl"
Texture2D<float4> inputTexture : register(t0);
Texture2D<float> depthTexture : register(t1);
RWTexture2D<float4> outputTexture : register(u0);
#define PI 3.141592653589793238f
cbuffer VolumetricCB : register(b1)
{
float absorptionTau : packoffset(c0);
float3 absorptionColor : packoffset(c0.y);
int scatteringSamples : packoffset(c1.x);
float scatteringTau : packoffset(c1.y);
float scatteringZFar : packoffset(c1.z);
float3 scatteringColor : packoffset(c2);
matrix gInvProj : packoffset(c3);
matrix gInvView : packoffset(c7);
float3 gCameraPos : packoffset(c11);
Light gLights[NUM_LIGHTS] : packoffset(c12);
}
float random(float2 co)
{
return frac(sin(dot(co.xy, float2(12.9898, 78.233))) * 43758.5453123);
}
float3 PixelWorldPos(float depthValue, int2 pixel)
{
uint width, height;
inputTexture.GetDimensions(width, height);
float2 fPixel = float2(pixel.x, pixel.y);
float x = (fPixel.x / width * 2) - 1;
float y = (fPixel.y / height * (-2)) + 1;
float z = depthValue;
float4 ndcCoords = float4(x, y, z, 1.0f);
float4 p = mul(ndcCoords, gInvProj);
p /= p.w;
float4 worldCoords = mul(p, gInvView);
return worldCoords.xyz;
}
float3 absorptionTransmittance(float dist)
{
return absorptionColor * exp(-dist * (absorptionTau + scatteringTau));
}
float phaseFunction(float3 inDir, float3 outDir)
{
float cosAngle = dot(inDir, outDir) / (length(inDir) * length(outDir));
float x = (1.0 + cosAngle) / 2.0;
float x2 = x * x;
float x4 = x2 * x2;
float x8 = x4 * x4;
float x16 = x8 * x8;
float x32 = x16 * x16;
float nom = 0.5 + 16.5 * x32;
float factor = 1.0 / (4.0 * PI);
return nom * factor;
}
float3 volumetricScattering(float3 worldPosition, Light light)
{
float3 result = float3(0.0, 0.0, 0.0);
float3 camToFrag = worldPosition - gCameraPos;
if (length(camToFrag) > scatteringZFar)
{
camToFrag = normalize(camToFrag) * scatteringZFar;
}
float3 deltaStep = camToFrag / (scatteringSamples + 1);
float3 fragToCamNorm = normalize(gCameraPos - worldPosition);
float3 x = gCameraPos;
float rand = random(worldPosition.xy + worldPosition.z);
x += (deltaStep * rand);
for (int i = 0; i < scatteringSamples; ++i)
{
float visibility = 1.0;
float3 lightToX = x - light.Position;
float lightDist = length(lightToX);
float omega = 4 * PI * lightDist * lightDist;
float3 Lin = absorptionTransmittance(lightDist) * visibility * light.Diffuse * light.SpotPower / omega;
float3 Li = Lin * scatteringTau * scatteringColor * phaseFunction(normalize(lightToX), fragToCamNorm);
result += Li * absorptionTransmittance(distance(x, gCameraPos)) * length(deltaStep);
x += deltaStep;
}
return result;
}
[numthreads(32, 32, 1)]
void CS(uint3 dispatchID : SV_DispatchThreadID)
{
int2 pixel = int2(dispatchID.x, dispatchID.y);
float4 volumetricColor = float4(0.0, 0.0, 0.0, 1.0);
float depthValue = depthTexture[pixel].r;
float3 worldPosition = PixelWorldPos(depthValue, pixel);
float fragCamDist = distance(worldPosition, gCameraPos);
for (int i = 0; i < NUM_LIGHTS; ++i)
{
if (gLights[i].Type == SPOT_LIGHT && gLights[i].FalloffEnd > length(gLights[i].Position - worldPosition))
volumetricColor += float4(volumetricScattering(worldPosition, gLights[i]), 0.0);
}
outputTexture[pixel] = volumetricColor + inputTexture[pixel];
}
(AbsorptionTau = -0.061f, ScatteringTau = 0.059f)
All these Codes for that Tiny Spot...
The second method was shown in Chapter 13 of GPU GEM3.
It was a method of drawing only Light Source on a separate Render Target, processing the Render Target using Post Processing Shder to create light scattering, and then merging it with a back buffer. (At least that's how I understand it.)
However, this method was designed only for one very strong light, and to fix it, I modified the code as below, but it didn't work well.
[numthreads(32, 32, 1)]
void CS(uint3 dispatchID : SV_DispatchThreadID)
{
uint2 pixel = dispatchID.xy;
uint width, height;
inputTexture.GetDimensions(width, height);
float4 result = inputTexture[pixel];
for (int i = 0; i < NUM_LIGHTS; ++i)
{
if(gLights[i].Type == SPOT_LIGHT)
{
float2 texCoord = float2(pixel.x / width, pixel.y / height);
float2 deltaTexCoord = (texCoord - mul(mul(float4(gLights[i].Position, 1.0f), gView), gProj).xy);
deltaTexCoord *= 1.0f / NUM_SAMPLES * Density;
float3 color = inputTexture[pixel].rgb;
float illuminationDecay = 1.0f;
for (int j = 0; j < NUM_SAMPLES; j++)
{
texCoord -= deltaTexCoord;
uint2 modifiedPixel = uint2(texCoord.x * width, texCoord.y * height);
float3 sample = inputTexture[modifiedPixel].rgb;
sample *= illuminationDecay * Weight;
color += sample;
illuminationDecay *= Decay;
}
result += float4(color * Exposure, 1);
}
}
outputTexture[pixel] = result;
}
this just 'Blur' these light source map, and surely it's not what I wanted.
Is there a similar kind of example to the implementation that I want, or is there a simpler way to do this? I've spent a week on this issue, but I haven't achieved much.
edit :
I did it! but there's some error about direction of light volume.
[numthreads(32, 32, 1)]
void CS(uint3 dispatchID : SV_DispatchThreadID)
{
float4 result = { 0.0f, 0.0f, 0.0f, 0.0f };
uint2 pixel = dispatchID.xy;
uint width, height;
inputTexture.GetDimensions(width, height);
float2 texCoord = (float2(pixel) + 0.5f) / float2(width, height);
float depth = depthTexture[pixel].r;
float3 screenPos = GetPositionVS(texCoord, depth);
float3 rayEnd = float3(0.0f, 0.0f, 0.0f);
const uint sampleCount = 16;
const float stepSize = length(screenPos - rayEnd) / sampleCount;
// Perform ray marching to integrate light volume along view ray:
[loop]
for (uint i = 0; i < NUM_LIGHTS; ++i)
{
[branch]
if (gLights[i].Type == SPOT_LIGHT)
{
float3 V = float3(0.0f, 0.0f, 0.0f) - screenPos;
float cameraDistance = length(V);
V /= cameraDistance;
float marchedDistance = 0;
float accumulation = 0;
float3 P = screenPos + V * stepSize * dither(pixel.xy);
for (uint j = 0; j < sampleCount; ++j)
{
float3 L = mul(float4(gLights[i].Position, 1.0f), gView).xyz - P;
const float dist2 = dot(L, L);
const float dist = sqrt(dist2);
L /= dist;
//float3 viewDir = mul(float4(gLights[i].Direction, 1.0f), gView).xyz;
float3 viewDir = gLights[i].Direction;
float SpotFactor = dot(L, normalize(-viewDir));
float spotCutOff = gLights[i].outerCosine;
[branch]
if (SpotFactor > spotCutOff)
{
float attenuation = DoAttenuation(dist, gLights[i].Range);
float conAtt = saturate((SpotFactor - gLights[i].outerCosine) / (gLights[i].innerCosine - gLights[i].outerCosine));
conAtt *= conAtt;
attenuation *= conAtt;
attenuation *= ExponentialFog(cameraDistance - marchedDistance);
accumulation += attenuation;
}
marchedDistance += stepSize;
P = P + V * stepSize;
}
accumulation /= sampleCount;
result += max(0, float4(accumulation * gLights[i].Color * gLights[i].VolumetricStrength, 1));
}
}
outputTexture[pixel] = inputTexture[pixel] + result;
}
this is my compute shader, but when I doesn't multiply view matrix to direction, it goes wrong like this :
as you can see, street lamp's volume direction is good, but vehicle's headlight's volume direction is different from it's spot light direction.
and when I multiply view matrix to direction :
head lights gone wrong AND street lamp goes wrong too.
I still finding where's wrong in my cpu codes, but I haven't find anything.
this might be helpful. here's my shader code about spot lighting.
float CalcAttenuation(float d, float falloffStart, float falloffEnd)
{
return saturate((falloffEnd - d) / (falloffEnd - falloffStart));
}
float3 BlinnPhongModelLighting(float3 lightDiff, float3 lightVec, float3 normal, float3 view, Material mat)
{
const float m = mat.Exponent;
const float f = ((mat.IOR - 1) * (mat.IOR - 1)) / ((mat.IOR + 1) * (mat.IOR + 1));
const float3 fresnel0 = float3(f, f, f);
float3 halfVec = normalize(view + lightVec);
float roughness = (m + 8.0f) * pow(saturate(dot(halfVec, normal)), m) / 8.0f;
float3 fresnel = CalcReflectPercent(fresnel0, halfVec, lightVec);
float3 specular = fresnel * roughness;
specular = specular / (specular + 1.0f);
return (mat.Diffuse.rgb + specular * mat.Specular) * lightDiff;
}
float3 ComputeSpotLight(Light light, Material mat, float3 pos, float3 normal, float3 view)
{
float3 result = float3(0.0f, 0.0f, 0.0f);
bool bCompute = true;
float3 lightVec = light.Position - pos;
float d = length(lightVec);
if (d > light.FalloffEnd)
bCompute = false;
if (bCompute)
{
lightVec /= d;
float ndotl = max(dot(lightVec, normal), 0.0f);
float3 lightDiffuse = light.Diffuse * ndotl;
float att = CalcAttenuation(d, light.FalloffStart, light.FalloffEnd);
lightDiffuse *= att;
float spotFactor = pow(max(dot(-lightVec, light.Direction), 0.0f), light.SpotPower);
lightDiffuse *= spotFactor;
result = BlinnPhongModelLighting(lightDiffuse, lightVec, normal, view, mat);
}
return result;
}
I am writing a ray tracer, so far with only spheres, in C++ and after implementing Phong's reflection model, shadows and reflections, everything seemed to work fine. When I implemented refractions and fresnel I can't seem to get things to look right. I have been thinking whether or not it could be because of how I move the rayOrigin when I am inside/outside the sphere object but after trying and googling I still can't get it right.
Below is an image. The gray background is a large diffuse sphere and the smaller blue sphere behind the red sphere is also diffuse. The others are reflective and refractive with ior 1.5-1.6. There are two point lights, on slightly to left and one slighly to the right.
As seen in the image, the spheres don't appear transparent at all. There are also noticeable circular color differences on the spheres. Maybe this can be because of the way I combine the colors for each pixel in my trace function:
Vec3 trace(Vec3& rayOrigin, Vec3& rayDirection, unsigned recursiveDepth, std::vector<Sphere>& spheres, std::vector<Light>& lights, RenderOption& options) {
//Finding nearest intersecting object
float nearestDepth = 1e8;
Sphere nearestObject;
unsigned id = 0;
Vec3 origin = rayOrigin + rayDirection * BIAS;
for (unsigned i = 0; i < spheres.size(); ++i) {
if (spheres[i].intersect(origin, rayDirection)) {
if (spheres[i].depth < nearestDepth) {
nearestDepth = spheres[i].depth;
nearestObject = spheres[i];
id = i;
}
}
}
Vec3 backgroundColor = Vec3(0.0f, 0.0f, 0.0f);
if (!nearestObject.exists) {
//No intersecting object -> background cooler
return backgroundColor;
} else {
Vec3 totalColor;
Vec3 lightDirection;
//Ambient color
totalColor += options.ambientColor * nearestObject.ambientColor; //Ambient color set to 0
//Calculate fresnel, update fresnelReflection & fresnelRefraction of nearestObject sent in
fresnel(rayDirection, nearestObject);
//Recursive reflection and refraction
if ((nearestObject.reflectivity > 0.0f || nearestObject.transparency > 0.0f) && recursiveDepth < options.recursionDepth) {
//Reflection case
if (nearestObject.fresnelReflection > 0.0f) {
Vec3 reflection = computeReflection(rayDirection, nearestObject.normal);
Vec3 reflectedColor = trace(nearestObject.intersection, reflection, ++recursiveDepth, spheres, lights, options);
totalColor += reflectedColor * nearestObject.fresnelReflection;
}
//Refraction case
if (nearestObject.fresnelRefraction > 0.0f) {
Vec3 refractionDirection = computeRefraction(rayDirection, nearestObject.normal, nearestObject.indexOfRefraction, nearestObject.intersection);
Vec3 refractedColor = trace(nearestObject.intersection, refractionDirection, ++recursiveDepth, spheres, lights, options);
totalColor += refractedColor * nearestObject.fresnelRefraction;
}
}
//Phong reflection model and shadows
for (unsigned i = 0; i < lights.size(); ++i) {
//Shadow ray
Vec3 intersectionPointBias = nearestObject.intersection + nearestObject.normal * BIAS;
Vec3 shadowRayDirection = lights[i].position - intersectionPointBias; //normalized in intersect function
for (unsigned k = 0; k < spheres.size(); ++k) //kolla inte nearestObject mot sig själv
{
if (!spheres[k].intersect(intersectionPointBias, shadowRayDirection))
{
//Diffuse
lightDirection = lights[i].position - nearestObject.normal;
lightDirection.normalize();
totalColor += lights[i].diffuse * std::max(0.0f, nearestObject.normal.dot(lightDirection)) * nearestObject.diffuseColor;
//Specular
Vec3 viewDirection = nearestObject.intersection - options.cameraOrigin;
viewDirection.normalize();
Vec3 reflection = lightDirection - nearestObject.normal * 2 * (nearestObject.normal.dot(lightDirection));
reflection.normalize();
totalColor += lights[i].specular * nearestObject.specularColor * std::max(0.0f, pow(reflection.dot(viewDirection), nearestObject.shininessCoefficient));
}
}
}
return totalColor;
}
}
Here are the other relevant functions:
computeRefraction:
Vec3 computeRefraction(const Vec3& I, const Vec3& N, const float &ior, Vec3& intersection) {
Vec3 normal = N; normal.normalize();
normal = normal;
Vec3 incident = I; incident.normalize();
float cosi = incident.dot(normal);
float n1, n2;
if (cosi > 0.0f) {
//Incident and normal have same direction, INSIDE sphere
n1 = ior;
n2 = 1.0f;
normal = -normal;
} else {
//Incident and normal have opposite direction, OUTSIDE sphere
n1 = 1.0f;
n2 = ior;
cosi = -cosi;
}
float eta = n1 / n2;
float k = 1.0f - (eta * eta) * (1.0f - cosi * cosi);
if (k < 0.0f) {
//internal reflection
Vec3 reflectionRay = computeReflection(incident, normal);
intersection = intersection + (normal * BIAS);
return reflectionRay;
} else {
Vec3 refractionVector = incident * eta + normal * (eta * cosi - sqrt(k));
refractionVector.normalize();
intersection = intersection - (normal * BIAS);
return refractionVector;
}
}
fresnel:
void fresnel(const Vec3& I, Sphere& obj) {
Vec3 normal = obj.normal;
Vec3 incident = I;
float cosi = clamp(-1.0f, 1.0f, incident.dot(normal));
float etai = 1.0f, etat = obj.indexOfRefraction;
if (cosi > 0) {
std::swap(etai, etat);
}
float sint = etai / etat * sqrt(std::max(0.0f, 1 - cosi * cosi));
if (sint >= 1) {
obj.fresnelReflection = 1.0f;
obj.fresnelRefraction = 0.0f;
} else {
float cost = sqrt(std::max(0.0f, 1 - sint * sint));
cosi = abs(cost);
float Rs = ((etat * cosi) - (etai * cost)) / ((etat * cosi) + (etai * cost));
float Rp = ((etai * cosi) - (etat * cost)) / ((etai * cosi) + (etat * cost));
obj.fresnelReflection = (Rs * Rs + Rp * Rp) / 2;
obj.fresnelRefraction = 1.0f - obj.fresnelReflection;
}
}
reflection:
Vec3 computeReflection(const Vec3& rayDirection, const Vec3& objectNormal){
Vec3 normal = objectNormal;
Vec3 incident = rayDirection;
Vec3 reflection = incident - normal * (normal.dot(rayDirection)) * 2;
reflection.normalize();
return reflection;
}
Any help in understanding and resolving these rendering issues would be greatly appreciated as no other posts or theory has helped resolve this on my own this past week. Thank you!
I have a mesh with arbitrary normals and I have calculated them using standard method
Method to calculate the tangents..
void calcTangent(uint32_t idx1, uint32_t idx2, uint32_t idx3)
{
vertex v1 = _vertex[idx1];
vertex v2 = _vertex[idx2];
vertex v3 = _vertex[idx3];
float du1 = v3.Text.x - v1.Text.x;
float dv1 = v3.Text.y - v1.Text.y;
float du2 = v2.Text.x - v1.Text.x;
float dv2 = v2.Text.y - v1.Text.y;
float tx1 = v3.Pos.x - v1.Pos.x;
float ty1 = v3.Pos.y - v1.Pos.z;
float tz1 = v3.Pos.z - v1.Pos.z;
float tx2 = v2.Pos.x - v1.Pos.x;
float ty2 = v2.Pos.y - v1.Pos.z;
float tz2 = v2.Pos.z - v1.Pos.z;
float r = 1.0f / (du1 * dv2 - dv1 * du2);
float e1x = (dv2 * tx1 - dv1 * tx2) * r;
float e1y = (dv2 * ty1 - dv1 * ty2) * r;
float e1z = (dv2 * tz1 - dv1 * tz2) * r;
//Binormals
float e2x = (du1 * tx2 - du2 * tx1) * r;
float e2y = (du1 * ty2 - du2 * ty1) * r;
float e2z = (du1 * tz2 - du2 * tz1) * r;
XMFLOAT3 ot1 = Math::gramSchmidthF({ v1.Norm.x, v1.Norm.y, v1.Norm.z }, { e1x, e1y, e1z });
XMFLOAT3 ot2 = Math::gramSchmidthF({ v2.Norm.x, v2.Norm.y, v2.Norm.z }, { e1x, e1y, e1z });
XMFLOAT3 ot3 = Math::gramSchmidthF({ v3.Norm.x, v3.Norm.y, v3.Norm.z }, { e1x, e1y, e1z });
_vertex[idx1].Tangent = ot1;
_vertex[idx2].Tangent = ot2;
_vertex[idx3].Tangent = ot3;
}
the bitangent will not be passed to the shader, and will be calculated in PS..
vertex shader and pixel shaders..
struct VS_INPUT
{
float4 Position : POSITION;
float3 Normal : NORMAL;
float2 Texture : TEXCOORD;
float3 Tangent : TANGENT;
};
struct PS_INPUT
{
float4 Position : SV_POSITION;
float3 Normal : NORMAL;
float3 Tangent : TANGENT;
float3 Binormal : BINORMAL;
float2 Texture : TEXCOORD0;
float3 ViewDirection : TEXCOORD1;
};
PS_INPUT vertex_shader(VS_INPUT input)
{
PS_INPUT output = (PS_INPUT)0;
input.Position.w = 1.0f;
//transformations
output.Position = mul(input.Position, World);
output.Position = mul(output.Position, View);
output.Position = mul(output.Position, Projection);
//
output.Normal = normalize(mul(float4(input.Normal, 0), World).xyz);
output.Texture = input.Texture;
float3 worldPosition = mul(input.Position, World).xyz;
output.ViewDirection = normalize(CAMERA_POSITION - worldPosition);
//add the tangent and binormal
output.Tangent = normalize(mul(float4(input.Tangent, 0), World).xyz);
output.Binormal = normalize(cross(output.Normal, output.Tangent));
return output;
}
float4 ps(PS_INPUT input) : SV_Target
{
float4 OUT = (float4)0;
//texture normal
float3 sampledNormal = (2 * normalMapTexture.Sample(normalMapSampler, input.Texture).xyz) - 1.0; // Map normal from [0..1] to [-1..1]
//creating matrix
// Tangent
// Binormal
// Normal
float3x3 tbn = float3x3(input.Tangent, input.Binormal, input.Normal);
//convert tangent space to world space
sampledNormal = mul(sampledNormal, tbn); // Transform normal from normal map to world space
float3 viewDirection = normalize(input.ViewDirection);
//texture color
float4 color = colorTexture.Sample(samLinear, input.Texture); //getting the color from texture without normals..
//ambient color
float3 ambient = getVectorColorContribution(AMBIENT_COLOR, color.rgb); //mult AMBIENT_COLOR(.rgb) * AMBIENT_COLOR Intensity (.a) * color
float3 diffuse = (float3)0;
float3 specular = (float3)0;
float3 lightDirection = normalize(-LIGHT_DIR.xyz);
float n_dot_l = dot(sampledNormal, lightDirection);
//calculating the diffuse value
diffuse = saturate(n_dot_l) * LIGHT_COLOR.rgb * LIGHT_COLOR.a;
}
//changing the return types will change the result to basic or specular..
OUT.rgb = diffuse * color;
OUT.a = 1.0f;
return OUT;
}
Here it is the result, only using diffuse, to avoid specular errors..
please anyone knows why is this?
After a lot of digging, in DirectXMesh there is a function to calculate tangents right(ComputeTangentFrame) it saves the result in XMFLOAT4 tangent[3]array , works perfectly, so my problem was calculating tangents. Hope it helps anyone else..
Little Example using it:
uint32_t idx[3];
idx[0] = 0;
idx[1] = 1;
idx[2] = 2;
XMFLOAT3 pos[3];
pos[0] = Pos1;
pos[1] = Pos2;
pos[2] = Pos3;
XMFLOAT3 normals[3];
normals[0] = Normal1;
normals[1] = Normal2;
normals[2] = Normal3;
XMFLOAT2 t[3];
t[0] = TextureCoord1;
t[1] = TextureCoord2;
t[2] = TextureCoord3;
XMFLOAT4 tangent[3];
ComputeTangentFrame(idx,1, pos, normals,t,3, tangent);
I did mouse picking with terrain for these lessons (but used c++)
https://www.youtube.com/watch?v=DLKN0jExRIM&index=29&listhLoLuZVfUksDP
http://antongerdelan.net/opengl/raycasting.html
The problem is that the position of the mouse does not correspond to the place where the ray intersects with the terrane:
There's a big blunder on the vertical and a little horizontal.
Do not look at the shadows, this is not a corrected map of normals.
What can be wrong? My code:
void MousePicker::update() {
view = cam->getViewMatrix();
currentRay = calculateMouseRay();
if (intersectionInRange(0, RAY_RANGE, currentRay)) {
currentTerrainPoint = binarySearch(0, 0, RAY_RANGE, currentRay);
}
else {
currentTerrainPoint = vec3();
}
}
vec3 MousePicker::calculateMouseRay() {
glfwGetCursorPos(win, &mouseInfo.xPos, &mouseInfo.yPos);
vec2 normalizedCoords = getNormalizedCoords(mouseInfo.xPos, mouseInfo.yPos);
vec4 clipCoords = vec4(normalizedCoords.x, normalizedCoords.y, -1.0f, 1.0f);
vec4 eyeCoords = toEyeCoords(clipCoords);
vec3 worldRay = toWorldCoords(eyeCoords);
return worldRay;
}
vec2 MousePicker::getNormalizedCoords(double xPos, double yPos) {
GLint width, height;
glfwGetWindowSize(win, &width, &height);
//GLfloat x = (2.0 * xPos) / width - 1.0f;
GLfloat x = -((width - xPos) / width - 0.5f) * 2.0f;
//GLfloat y = 1.0f - (2.0f * yPos) / height;
GLfloat y = ((height - yPos) / height - 0.5f) * 2.0f;
//float z = 1.0f;
mouseInfo.normalizedCoords = vec2(x, y);
return vec2(x,y);
}
vec4 MousePicker::toEyeCoords(vec4 clipCoords) {
vec4 invertedProjection = inverse(projection) * clipCoords;
//vec4 eyeCoords = translate(invertedProjection, clipCoords);
mouseInfo.eyeCoords = vec4(invertedProjection.x, invertedProjection.y, -1.0f, 0.0f);
return vec4(invertedProjection.x, invertedProjection.y, -1.0f, 0.0f);
}
vec3 MousePicker::toWorldCoords(vec4 eyeCoords) {
vec3 rayWorld = vec3(inverse(view) * eyeCoords);
vec3 mouseRay = vec3(rayWorld.x, rayWorld.y, rayWorld.z);
rayWorld = normalize(rayWorld);
mouseInfo.worldRay = rayWorld;
return rayWorld;
}
//*********************************************************************************
vec3 MousePicker::getPointOnRay(vec3 ray, float distance) {
vec3 camPos = cam->getCameraPos();
vec3 start = vec3(camPos.x, camPos.y, camPos.z);
vec3 scaledRay = vec3(ray.x * distance, ray.y * distance, ray.z * distance);
return vec3(start + scaledRay);
}
vec3 MousePicker::binarySearch(int count, float start, float finish, vec3 ray) {
float half = start + ((finish - start) / 2.0f);
if (count >= RECURSION_COUNT) {
vec3 endPoint = getPointOnRay(ray, half);
//Terrain* ter = &getTerrain(endPoint.x, endPoint.z);
if (terrain != NULL) {
return endPoint;
}
else {
return vec3();
}
}
if (intersectionInRange(start, half, ray)) {
return binarySearch(count + 1, start, half, ray);
}
else {
return binarySearch(count + 1, half, finish, ray);
}
}
bool MousePicker::intersectionInRange(float start, float finish, vec3 ray) {
vec3 startPoint = getPointOnRay(ray, start);
vec3 endPoint = getPointOnRay(ray, finish);
if (!isUnderGround(startPoint) && isUnderGround(endPoint)) {
return true;
}
else {
return false;
}
}
bool MousePicker::isUnderGround(vec3 testPoint) {
//Terrain* ter = &getTerrain(testPoint.x, testPoint.z);
float height = 0;
if (terrain != NULL) {
height = terrain->getHeightPoint(testPoint.x, testPoint.z);
mouseInfo.height = height;
}
if (testPoint.y < height) {
return true;
}
else {
return false;
}
}
Terrain MousePicker::getTerrain(float worldX, float worldZ) {
return *terrain;
}
In perspective projection, a ray from the eye position through a point on the screen can defined by 2 points. The first point is the eye (camera) position which is (0, 0, 0) in view space. The second point has to be calculated by the position on the screen.
The screen position has to be converted to normalized device coordinates in range from (-1,-1) to (1,1).
w = with of the viewport
h = height of the viewport
x = X position of the mouse
y = Y position ot the mouse
GLfloat ndc_x = 2.0 * x/w - 1.0;
GLfloat ndc_y = 1.0 - 2.0 * y/h; // invert Y axis
To calculate a point on the ray, which goes through the camera position and through the point on the screen, the field of view and the aspect ratio of the perspective projection has to be known:
fov_y = vertical field of view angle in radians
aspect = w / h
GLfloat tanFov = tan( fov_y * 0.5 );
glm::vec3 ray_P = vec3( ndc_x * aspect * tanFov, ndc_y * tanFov, -1.0 ) );
A ray from the camera position through a point on the screen can be defined by the following position (P0) and normalized direction (dir), in world space:
view = view matrix
glm::mat4 invView = glm::inverse( view );
glm::vec3 P0 = invView * glm::vec3(0.0f, 0.0f, 0.0f);
// = glm::vec3( view[3][0], view[3][1], view[3][2] );
glm::vec3 dir = glm::normalize( invView * ray_P - P0 );
In this case, the answers to the following questions will be interesting too:
How to recover view space position given view space depth value and ndc xy
Is it possble get which surface of cube will be click in OpenGL?
How to render depth linearly in modern OpenGL with gl_FragCoord.z in fragment shader?
GLSL spotlight projection volume
Applying to your code results in the following changes:
The Perspective Projection Matrix looks like this:
r = right, l = left, b = bottom, t = top, n = near, f = far
2*n/(r-l) 0 0 0
0 2*n/(t-b) 0 0
(r+l)/(r-l) (t+b)/(t-b) -(f+n)/(f-n) -1
0 0 -2*f*n/(f-n) 0
it follows:
aspect = w / h
tanFov = tan( fov_y * 0.5 );
p[0][0] = 2*n/(r-l) = 1.0 / (tanFov * aspect)
p[1][1] = 2*n/(t-b) = 1.0 / tanFov
Convert from screen (mouse) coordinates to normalized device coordinates:
vec2 MousePicker::getNormalizedCoords(double x, double y) {
GLint w, h;
glfwGetWindowSize(win, &width, &height);
GLfloat ndc_x = 2.0 * x/w - 1.0;
GLfloat ndc_y = 1.0 - 2.0 * y/h; // invert Y axis
mouseInfo.normalizedCoords = vec2(ndc_x, ndc_x);
return vec2(ndc_x, ndc_x);
}
Calculate A ray from the camera position through a point on the screen (mouse position) in world space:
vec3 MousePicker::calculateMouseRay( void ) {
glfwGetCursorPos(win, &mouseInfo.xPos, &mouseInfo.yPos);
vec2 normalizedCoords = getNormalizedCoords(mouseInfo.xPos, mouseInfo.yPos);
ray_Px = normalizedCoords.x / projection[0][0]; // projection[0][0] == 1.0 / (tanFov * aspect)
ray_Py = normalizedCoords.y / projection[1][1]; // projection[1][1] == 1.0 / tanFov
glm::vec3 ray_P = vec3( ray_Px, ray_Py, -1.0f ) );
vec3 camPos = cam->getCameraPos(); // == glm::vec3( view[3][0], view[3][1], view[3][2] );
glm::mat4 invView = glm::inverse( view );
glm::vec3 P0 = camPos;
glm::vec3 dir = glm::normalize( invView * ray_P - P0 );
return dir;
}
I'm currently learning about shaders and graphics pipelines and I was wondering if a pixel shader could be used to create, for example, a triangle or a more complex shape like a zigzag.
Could this be done without the use of a vertex shader?
Answer is yes! You can draw anything you want using pixel shader by implementing a ray Tracer. Here is a sample code:
uniform vec3 lightposition;
uniform vec3 cameraposition;
uniform float motion;
struct Ray
{
vec3 org;
vec3 dir;
};
struct Sphere
{
vec3 Center;
float Radius;
vec4 Color;
float MatID;
float id;
};
struct Intersection
{
float t;
vec3 normal;
vec3 hitpos;
vec4 color;
float objectid;
float materialID;
};
bool sphereIntersect(Ray eyeray, Sphere sp, inout Intersection intersection)
{
float t1=0.0;
eyeray.dir = normalize(eyeray.dir);
float B = 2.0 *( ( eyeray.dir.x * (eyeray.org.x - sp.Center.x ) )+ ( eyeray.dir.y *(eyeray.org.y - sp.Center.y )) + ( eyeray.dir.z * (eyeray.org.z - sp.Center.z ) ));
float C = pow((eyeray.org.x - sp.Center.x),2.0) + pow((eyeray.org.y - sp.Center.y),2.0) + pow((eyeray.org.z - sp.Center.z),2.0) - pow(sp.Radius,2.0);
float D = B*B - 4.0*C ;
if(D>=0.0)
{
t1= (-B - pow(D, .5)) / 2.0;
if (t1 < 0.0)
{
t1 = (-B + pow(D, .5)) / 2.0;
if( t1 < 0.0)
return false;
else
{
if (t1 > 1e-2 && t1 < intersection.t)
{
intersection.t = t1;
intersection.materialID = sp.MatID;
intersection.hitpos = eyeray.org + t1 * eyeray.dir;
intersection.normal = normalize(intersection.hitpos - sp.Center);
intersection.color = sp.Color;
intersection.objectid = sp.id;
return true;
}
}
}
else
{
if(t1 > 1e-2 && t1 < intersection.t)
{
intersection.t = t1;
intersection.materialID = sp.MatID;
intersection.hitpos = eyeray.org + t1 * eyeray.dir;
intersection.normal = normalize(intersection.hitpos - sp.Center);
intersection.color = sp.Color;
intersection.objectid = sp.id;
return true;
}
}
}
else
return false;
}
void findIntersection(Ray ray, inout Intersection intersection)
{
intersection.t = 1e10;
intersection.materialID = 0.0;
Sphere sp1 = Sphere(vec3(-2.0,0.0,-5.0),1.5,vec4(0.5, 0.1, 0.5, 1.0),1.0,1.0);
Sphere sp2 = Sphere(vec3( 2.0,0.0,-5.0),1.5,vec4(0.5,0.5,0.1,1.0),1.0,2.0);
Sphere sp3 = Sphere(vec3( 0.0,3.0,-5.0),1.5,vec4(0.1,0.5,0.5,1.0),1.0,3.0);
sphereIntersect(ray, sp1, intersection);
sphereIntersect(ray, sp2, intersection);
sphereIntersect(ray, sp3, intersection);
}
vec4 CalculateColor(vec4 ambient ,float shiness,vec3 intersection, vec3 normal);
Ray ReflectedRay(vec3 Normal,Ray EyeRay,vec3 intersection);
vec4 GetColor(Ray ray)
{
Ray currentRay = ray;
vec4 finalColor = vec4(0.0);
for(int bounce = 1 ; bounce < 4 ; bounce++)
{
Intersection intersection;
intersection.objectid = 0.0;
findIntersection(currentRay, intersection);
if (intersection.materialID == 0.0) // We could not find any object. We return the background color
return finalColor;
else if (intersection.materialID == 1.0)
{
vec3 lv = lightposition - intersection.hitpos;
vec3 nlv = normalize(lv);
Intersection shadowIntersection;
Ray shadowRay = Ray(intersection.hitpos, nlv);
shadowIntersection.objectid = intersection.objectid;
findIntersection(shadowRay, shadowIntersection);
if (shadowIntersection.t > length(lv) || shadowIntersection.t < 1)
{
finalColor = finalColor + float(1.0f/bounce) * CalculateColor(intersection.color, 100.0, intersection.hitpos, intersection.normal);;
}
else
{
finalColor = finalColor + float(1.0f/bounce) * intersection.color;
}
//currentRay = Ray(intersection.hitpos, reflect(ray.dir, intersection.normal));
currentRay = ReflectedRay(intersection.normal,ray,intersection.hitpos);
}
}
return finalColor;
}
Ray createRay(float ScreenWidth,float ScreenHeight)
{
Ray toret;
toret.org = cameraposition;
float left = -3.0;
float bottom = -3.0;
float screenZ = -3.0;
float su = -3.0 + gl_FragCoord.x/ScreenWidth * 6; //gl_FragCoord gives you the current x and y component of your current pixel
float sv = -3.0 + gl_FragCoord.y/ScreenHeight * 6;
float sz = screenZ - cameraposition.z;
toret.dir = normalize(vec3(su,sv,sz));
//vec2 p = (gl_FragCoord.xy/resolution) * 2 ;
//toret.dir = normalize(vec3(p, -1.0));
return toret;
}
Ray ReflectedRay(vec3 Normal,Ray EyeRay,vec3 intersection)
{
Ray reflection;
reflection.dir = EyeRay.dir - 2 * Normal * dot(EyeRay.dir,Normal);
reflection.org = intersection + reflection.dir * 0.01;
return reflection;
}
vec4 CalculateColor(vec4 ambient ,float shiness,vec3 intersection, vec3 normal)
{
//intensities
vec3 Idifuse = vec3(1, 1, 1);
vec3 Iambient = vec3(0.8, 0.8, 0.8);
vec3 Ispecular = vec3(1,1,1);
vec3 kDifuse = vec3(0.5,0.5,0.5); //for difuse
vec3 kSpecular = vec3(0.75, 0.6, 0.3); //for specular
vec3 kAmbient = vec3(0.1, 0.2, 0.3); //for ambient
//vec4 kSpecular = vec4(0.5,0.5,0.5,1.0);
//vec4 kDifuse = vec4(0.5,0.5,0.5,1.0);
float ColorDifuse = max(dot(normal,lightposition),0.0) * kDifuse;
//vector calculations
vec3 l = normalize(lightposition - intersection); //light vector
vec3 n = normalize(normal); // normalVector of point in the sea
vec3 v = normalize(cameraposition - intersection); // view Vector
vec3 h = normalize(v + l); // half Vector
vec3 difuse = kDifuse * Idifuse * max(0.0, dot(n, l));
vec3 specular = kSpecular * Ispecular * pow(max(0.0, dot(n, h)), shiness);
vec3 color = ambient.xyz + difuse + specular;
return vec4(color,1.0);
gl_FragColor = vec4(color,1.0);
}
void main()
{
if(lightposition == vec3(0.0,0.0,0.0))
gl_FragColor = vec4(0.0,1.0,0.0,1.0);
Ray eyeray = createRay(600.0,600.0);
gl_FragColor = GetColor(eyeray);
}
A useful technique is to use a fragment shader (I'm an OpenGL guy) with point sprites. Point sprites in OpenGL 3+ get rendered as squares of pixels, with the size of the square (gl_PointSize) set by the vertex shader.
In the fragment shader, gl_PointCoord has the x and y coords of this particular pixel within the square, from 0.0 to 1.0. So you can draw a circle by testing if gl_PointCoord.x and gl_PointCoord.y are both within the radius and discarding if not, a framed square by checking that .x and .y are with some distance of the edge, and so on. It's classic maths, define a function(x, y) which returns true for points within the shape you want, false if not.
The Orange book, OpenGL Shading Language 3rd edition, has some examples (which in turn come from RenderMan) of how to draw such shapes.
Hope this helps.
What you want is called procedural textures or procedural shading.
You can draw different shapes with a simple (and not so simple) math.
Take a look for some examples here:
http://glslsandbox.com/
More on google.