C++ and DirectX: Calculate tangent to mesh with arbitrary normals - c++

I have a mesh with arbitrary normals and I have calculated them using standard method
Method to calculate the tangents..
void calcTangent(uint32_t idx1, uint32_t idx2, uint32_t idx3)
{
vertex v1 = _vertex[idx1];
vertex v2 = _vertex[idx2];
vertex v3 = _vertex[idx3];
float du1 = v3.Text.x - v1.Text.x;
float dv1 = v3.Text.y - v1.Text.y;
float du2 = v2.Text.x - v1.Text.x;
float dv2 = v2.Text.y - v1.Text.y;
float tx1 = v3.Pos.x - v1.Pos.x;
float ty1 = v3.Pos.y - v1.Pos.z;
float tz1 = v3.Pos.z - v1.Pos.z;
float tx2 = v2.Pos.x - v1.Pos.x;
float ty2 = v2.Pos.y - v1.Pos.z;
float tz2 = v2.Pos.z - v1.Pos.z;
float r = 1.0f / (du1 * dv2 - dv1 * du2);
float e1x = (dv2 * tx1 - dv1 * tx2) * r;
float e1y = (dv2 * ty1 - dv1 * ty2) * r;
float e1z = (dv2 * tz1 - dv1 * tz2) * r;
//Binormals
float e2x = (du1 * tx2 - du2 * tx1) * r;
float e2y = (du1 * ty2 - du2 * ty1) * r;
float e2z = (du1 * tz2 - du2 * tz1) * r;
XMFLOAT3 ot1 = Math::gramSchmidthF({ v1.Norm.x, v1.Norm.y, v1.Norm.z }, { e1x, e1y, e1z });
XMFLOAT3 ot2 = Math::gramSchmidthF({ v2.Norm.x, v2.Norm.y, v2.Norm.z }, { e1x, e1y, e1z });
XMFLOAT3 ot3 = Math::gramSchmidthF({ v3.Norm.x, v3.Norm.y, v3.Norm.z }, { e1x, e1y, e1z });
_vertex[idx1].Tangent = ot1;
_vertex[idx2].Tangent = ot2;
_vertex[idx3].Tangent = ot3;
}
the bitangent will not be passed to the shader, and will be calculated in PS..
vertex shader and pixel shaders..
struct VS_INPUT
{
float4 Position : POSITION;
float3 Normal : NORMAL;
float2 Texture : TEXCOORD;
float3 Tangent : TANGENT;
};
struct PS_INPUT
{
float4 Position : SV_POSITION;
float3 Normal : NORMAL;
float3 Tangent : TANGENT;
float3 Binormal : BINORMAL;
float2 Texture : TEXCOORD0;
float3 ViewDirection : TEXCOORD1;
};
PS_INPUT vertex_shader(VS_INPUT input)
{
PS_INPUT output = (PS_INPUT)0;
input.Position.w = 1.0f;
//transformations
output.Position = mul(input.Position, World);
output.Position = mul(output.Position, View);
output.Position = mul(output.Position, Projection);
//
output.Normal = normalize(mul(float4(input.Normal, 0), World).xyz);
output.Texture = input.Texture;
float3 worldPosition = mul(input.Position, World).xyz;
output.ViewDirection = normalize(CAMERA_POSITION - worldPosition);
//add the tangent and binormal
output.Tangent = normalize(mul(float4(input.Tangent, 0), World).xyz);
output.Binormal = normalize(cross(output.Normal, output.Tangent));
return output;
}
float4 ps(PS_INPUT input) : SV_Target
{
float4 OUT = (float4)0;
//texture normal
float3 sampledNormal = (2 * normalMapTexture.Sample(normalMapSampler, input.Texture).xyz) - 1.0; // Map normal from [0..1] to [-1..1]
//creating matrix
// Tangent
// Binormal
// Normal
float3x3 tbn = float3x3(input.Tangent, input.Binormal, input.Normal);
//convert tangent space to world space
sampledNormal = mul(sampledNormal, tbn); // Transform normal from normal map to world space
float3 viewDirection = normalize(input.ViewDirection);
//texture color
float4 color = colorTexture.Sample(samLinear, input.Texture); //getting the color from texture without normals..
//ambient color
float3 ambient = getVectorColorContribution(AMBIENT_COLOR, color.rgb); //mult AMBIENT_COLOR(.rgb) * AMBIENT_COLOR Intensity (.a) * color
float3 diffuse = (float3)0;
float3 specular = (float3)0;
float3 lightDirection = normalize(-LIGHT_DIR.xyz);
float n_dot_l = dot(sampledNormal, lightDirection);
//calculating the diffuse value
diffuse = saturate(n_dot_l) * LIGHT_COLOR.rgb * LIGHT_COLOR.a;
}
//changing the return types will change the result to basic or specular..
OUT.rgb = diffuse * color;
OUT.a = 1.0f;
return OUT;
}
Here it is the result, only using diffuse, to avoid specular errors..
please anyone knows why is this?

After a lot of digging, in DirectXMesh there is a function to calculate tangents right(ComputeTangentFrame) it saves the result in XMFLOAT4 tangent[3]array , works perfectly, so my problem was calculating tangents. Hope it helps anyone else..
Little Example using it:
uint32_t idx[3];
idx[0] = 0;
idx[1] = 1;
idx[2] = 2;
XMFLOAT3 pos[3];
pos[0] = Pos1;
pos[1] = Pos2;
pos[2] = Pos3;
XMFLOAT3 normals[3];
normals[0] = Normal1;
normals[1] = Normal2;
normals[2] = Normal3;
XMFLOAT2 t[3];
t[0] = TextureCoord1;
t[1] = TextureCoord2;
t[2] = TextureCoord3;
XMFLOAT4 tangent[3];
ComputeTangentFrame(idx,1, pos, normals,t,3, tangent);

Related

Compute Normals After Vertex Deformation?

I am coding a vertex and a fragment shader trying to distort the surface of some water and then computing blinn-phong lighting on the surface. I am able to successfully compute the deformed matrices with a simple noise function, but how can I find the distorted normals? Since it isn't a linear transformation I am stuck, could anyone help?
Here are the relevant files:
vertex shader:
#version 150
uniform mat4 u_Model;
uniform mat4 u_ModelInvTr;
uniform mat4 u_ViewProj;
uniform vec4 u_Color;
uniform int u_Time;
in vec4 vs_Pos; // The array of vertex positions passed to the shader
in vec4 vs_Nor; // The array of vertex normals passed to the shader
in vec4 vs_Col; // The array of vertex colors passed to the shader.
in vec2 vs_UV; // UV coords for texture to pass thru to fragment shader
in float vs_Anim; // 0.f or 1.f To pass thru to fragment shader
in float vs_T2O;
out vec4 fs_Pos;
out vec4 fs_Nor;
out vec4 fs_LightVec;
out vec4 fs_Col;
out vec2 fs_UVs;
out float fs_Anim;
out float fs_dimVal;
out float fs_T2O;
uniform vec4 u_CamPos;
out vec4 fs_CamPos;
const vec4 lightDir = normalize(vec4(0.0, 1.f, 0.0, 0));
mat4 rotationMatrix(vec3 axis, float angle) {
axis = normalize(axis);
float s = sin(angle);
float c = cos(angle);
float oc = 1.0 - c;
return mat4(oc * axis.x * axis.x + c, oc * axis.x * axis.y - axis.z * s, oc * axis.z * axis.x + axis.y * s, 0.0, oc * axis.x * axis.y + axis.z * s, oc * axis.y * axis.y + c, oc * axis.y * axis.z - axis.x * s, 0.0,oc * axis.z * axis.x - axis.y * s, oc * axis.y * axis.z + axis.x * s, oc * axis.z * axis.z + c, 0.0, 0.0, 0.0, 0.0, 1.0);
}
vec4 rotateLightVec(float deg, vec4 LV) {
mat4 R = rotationMatrix(vec3(0,0,1), deg);
return R * LV;
}
float random1(vec3 p) {
return fract(sin(dot(p, vec3(127.1, 311.7, 191.999)))*43758.5453);
}
vec3 random2( vec3 p ) {
return fract( sin( vec3(dot(p, vec3(127.1, 311.7, 58.24)), dot(p, vec3(269.5, 183.3, 657.3)), dot(p, vec3(420.69, 69.420, 469.20))) ) * 43758.5453);
}
void main()
{
fs_Col = vs_Col;
fs_UVs = vs_UV;
fs_Anim = vs_Anim;
fs_T2O = vs_T2O;
mat3 invTranspose = mat3(u_ModelInvTr);
fs_Nor = vec4(invTranspose * vec3(vs_Nor), 0);
vec4 modelposition = u_Model * vs_Pos;
if (vs_Anim != 0) { // if we want to animate this surface
// check region in texture to decide which animatable type is drawn
bool lava = fs_UVs.x >= 13.f/16.f && fs_UVs.y < 2.f/16.f;
bool water = !lava && fs_UVs.x >= 13.f/16.f && fs_UVs.y <= 4.f/16.f;
if (water) {
// define an oscillating time so that model can transition back and forth
float t = (cos(u_Time * 0.05) + 1)/2; // u_Time increments by 1 every frame. Domain [0,1]
vec3 temp = random2(vec3(modelposition.x, modelposition.y, modelposition.z)); // range [0, 1]
temp = (temp - 0.5)/25; // [0, 1/scalar]
modelposition.x = mix(modelposition.x - temp.x, modelposition.x + temp.x, t);
modelposition.y = mix(modelposition.y - temp.y, modelposition.y + 3*temp.y, t);
modelposition.z = mix(modelposition.z - temp.z, modelposition.z + temp.z, t);
} else if (lava) {
// define an oscillating time so that model can transition back and forth
float t = (cos(u_Time * 0.01) + 1)/2; // u_Time increments by 1 every frame. Domain [0,1]
vec3 temp = random2(vec3(modelposition.x, modelposition.y, modelposition.z)); // range [0, 1]
temp = (temp - 0.5)/25; // [0, 1/scalar]
modelposition.x = mix(modelposition.x - temp.x, modelposition.x + temp.x, t);
modelposition.y = mix(modelposition.y - temp.y, modelposition.y + 3*temp.y, t);
modelposition.z = mix(modelposition.z - temp.z, modelposition.z + temp.z, t);
}
}
fs_dimVal = random1(modelposition.xyz/100.f);
fs_LightVec = rotateLightVec(0.001 * u_Time, lightDir); // Compute the direction in which the light source lies
fs_CamPos = u_CamPos; // uniform handle for the camera position instead of the inverse
fs_Pos = modelposition;
gl_Position = u_ViewProj * modelposition;// gl_Position is a built-in variable of OpenGL which is
// used to render the final positions of the geometry's vertices
}
fragment shader:
#version 330
uniform vec4 u_Color; // The color with which to render this instance of geometry.
uniform sampler2D textureSampler;
uniform int u_Time;
uniform mat4 u_ViewProj;
uniform mat4 u_Model;
in vec4 fs_Pos;
in vec4 fs_Nor;
in vec4 fs_LightVec;
in vec4 fs_Col;
in vec2 fs_UVs;
in float fs_Anim;
in float fs_T2O;
in float fs_dimVal;
out vec4 out_Col;
in vec4 fs_CamPos;
float random1(vec3 p) {
return fract(sin(dot(p,vec3(127.1, 311.7, 191.999)))
*43758.5453);
}
float random1b(vec3 p) {
return fract(sin(dot(p,vec3(169.1, 355.7, 195.999)))
*95751.5453);
}
float mySmoothStep(float a, float b, float t) {
t = smoothstep(0, 1, t);
return mix(a, b, t);
}
float cubicTriMix(vec3 p) {
vec3 pFract = fract(p);
float llb = random1(floor(p) + vec3(0,0,0));
float lrb = random1(floor(p) + vec3(1,0,0));
float ulb = random1(floor(p) + vec3(0,1,0));
float urb = random1(floor(p) + vec3(1,1,0));
float llf = random1(floor(p) + vec3(0,0,1));
float lrf = random1(floor(p) + vec3(1,0,1));
float ulf = random1(floor(p) + vec3(0,1,1));
float urf = random1(floor(p) + vec3(1,1,1));
float mixLoBack = mySmoothStep(llb, lrb, pFract.x);
float mixHiBack = mySmoothStep(ulb, urb, pFract.x);
float mixLoFront = mySmoothStep(llf, lrf, pFract.x);
float mixHiFront = mySmoothStep(ulf, urf, pFract.x);
float mixLo = mySmoothStep(mixLoBack, mixLoFront, pFract.z);
float mixHi = mySmoothStep(mixHiBack, mixHiFront, pFract.z);
return mySmoothStep(mixLo, mixHi, pFract.y);
}
float fbm(vec3 p) {
float amp = 0.5;
float freq = 4.0;
float sum = 0.0;
for(int i = 0; i < 8; i++) {
sum += cubicTriMix(p * freq) * amp;
amp *= 0.5;
freq *= 2.0;
}
return sum;
}
void main()
{
vec4 diffuseColor = texture(textureSampler, fs_UVs);
bool apply_lambert = true;
float specularIntensity = 0;
if (fs_Anim != 0) {
// check region in texture to decide which animatable type is drawn
bool lava = fs_UVs.x >= 13.f/16.f && fs_UVs.y < 2.f/16.f;
bool water = !lava && fs_UVs.x >= 13.f/16.f && fs_UVs.y < 4.f/16.f;
if (lava) {
// slowly gyrate texture and lighten and darken with random dimVal from vert shader
vec2 movingUVs = vec2(fs_UVs.x + fs_Anim * 0.065/16 * sin(0.01*u_Time),
fs_UVs.y - fs_Anim * 0.065/16 * sin(0.01*u_Time + 3.14159/2));
diffuseColor = texture(textureSampler, movingUVs);
vec4 warmerColor = diffuseColor + vec4(0.3, 0.3, 0, 0);
vec4 coolerColor = diffuseColor - vec4(0.1, 0.1, 0, 0);
diffuseColor = mix(warmerColor, coolerColor, 0.5 + fs_dimVal * 0.65*sin(0.02*u_Time));
apply_lambert = false;
} else if (water) {
// blend between 3 different points in texture to create a wavy subtle change over time
vec2 offsetUVs = vec2(fs_UVs.x - 0.5f/16.f, fs_UVs.y - 0.5f/16.f);
diffuseColor = texture(textureSampler, fs_UVs);
vec4 altColor = texture(textureSampler, offsetUVs);
altColor.x += fs_dimVal * pow(altColor.x+.15, 5);
altColor.y += fs_dimVal * pow(altColor.y+.15, 5);
altColor.z += 0.5 * fs_dimVal * pow(altColor.z+.15, 5);
diffuseColor = mix(diffuseColor, altColor, 0.5 + 0.35*sin(0.05*u_Time));
offsetUVs -= 0.25f/16.f;
vec4 newColor = texture(textureSampler, offsetUVs);
diffuseColor = mix(diffuseColor, newColor, 0.5 + 0.5*sin(0.025*u_Time)) + fs_dimVal * vec4(0.025);
diffuseColor.a = 0.7;
// ----------------------------------------------------
// Blinn-Phong Shading
// ----------------------------------------------------
vec4 lightDir = normalize(fs_LightVec - fs_Pos);
vec4 viewDir = normalize(fs_CamPos - fs_Pos);
vec4 halfVec = normalize(lightDir + viewDir);
float shininess = 400.f;
float specularIntensity = max(pow(dot(halfVec, normalize(fs_Nor)), shininess), 0);
}
}
// Calculate the diffuse term for Lambert shading
float diffuseTerm = dot(normalize(fs_Nor), normalize(fs_LightVec));
// Avoid negative lighting values
diffuseTerm = clamp(diffuseTerm, 0, 1);
float ambientTerm = 0.3;
float lightIntensity = diffuseTerm + ambientTerm; //Add a small float value to the color multiplier
//to simulate ambient lighting. This ensures that faces that are not
//lit by our point light are not completely black.
vec3 col = diffuseColor.rgb;
// Compute final shaded color
if (apply_lambert) {
col = col * lightIntensity + col * specularIntensity;
}
// & Check the rare, special case where we draw face between two diff transparent blocks as opaque
if (fs_T2O != 0) {
out_Col = vec4(col, 1.f);
} else {
out_Col = vec4(col, diffuseColor.a);
}
// distance fog!
vec4 fogColor = vec4(0.6, 0.75, 0.9, 1.0);
float FC = gl_FragCoord.z / gl_FragCoord.w / 124.f;
float falloff = clamp(1.05 - exp(-1.05f * (FC - 0.9f)), 0.f, 1.f);
out_Col = mix(out_Col, fogColor, falloff);
}
I tried implementing blinn-phong in the fragment shader, but I think it is wrong simple from the wrong normals. I think this can be done with some sort of tangent and cross product solution, but how can I know the tangent of the surface given we only know the vertex position?
I am not using unity, just bare c++ and most of the answers I am finding online are for java or unity which I do not understand.`

DX12) Trying to Implement Volumetric Scattering for multiple Spot Light, but It's not going well

(This Image is What I want to implement)
I am attempting Post Processing using Compute Shader to implement Light Shaft for multiple Spot Lights in the DX12 framework.
The first thing I tried was the method at the following link:https://gitlab.com/tomasoh/100_procent_more_volume/-/blob/master/shaders/volumetric.frag
It's a very complicated and hard-to-understand kind of shader, but it's basically built on the premise of using multiple lights, so it's a kind of example for the purpose.
However, since the game I'm making has 32 light source limitations, considering that excessive amount of Frame Drop will occur in the part of calculating Visibility by making Shadow Map for all light sources, I decided to implement Visibility as 1.0 Constant and did not get the desired result. (Of course it's a result.)
Down below is how I did this thing:
#include "lighting.hlsl"
Texture2D<float4> inputTexture : register(t0);
Texture2D<float> depthTexture : register(t1);
RWTexture2D<float4> outputTexture : register(u0);
#define PI 3.141592653589793238f
cbuffer VolumetricCB : register(b1)
{
float absorptionTau : packoffset(c0);
float3 absorptionColor : packoffset(c0.y);
int scatteringSamples : packoffset(c1.x);
float scatteringTau : packoffset(c1.y);
float scatteringZFar : packoffset(c1.z);
float3 scatteringColor : packoffset(c2);
matrix gInvProj : packoffset(c3);
matrix gInvView : packoffset(c7);
float3 gCameraPos : packoffset(c11);
Light gLights[NUM_LIGHTS] : packoffset(c12);
}
float random(float2 co)
{
return frac(sin(dot(co.xy, float2(12.9898, 78.233))) * 43758.5453123);
}
float3 PixelWorldPos(float depthValue, int2 pixel)
{
uint width, height;
inputTexture.GetDimensions(width, height);
float2 fPixel = float2(pixel.x, pixel.y);
float x = (fPixel.x / width * 2) - 1;
float y = (fPixel.y / height * (-2)) + 1;
float z = depthValue;
float4 ndcCoords = float4(x, y, z, 1.0f);
float4 p = mul(ndcCoords, gInvProj);
p /= p.w;
float4 worldCoords = mul(p, gInvView);
return worldCoords.xyz;
}
float3 absorptionTransmittance(float dist)
{
return absorptionColor * exp(-dist * (absorptionTau + scatteringTau));
}
float phaseFunction(float3 inDir, float3 outDir)
{
float cosAngle = dot(inDir, outDir) / (length(inDir) * length(outDir));
float x = (1.0 + cosAngle) / 2.0;
float x2 = x * x;
float x4 = x2 * x2;
float x8 = x4 * x4;
float x16 = x8 * x8;
float x32 = x16 * x16;
float nom = 0.5 + 16.5 * x32;
float factor = 1.0 / (4.0 * PI);
return nom * factor;
}
float3 volumetricScattering(float3 worldPosition, Light light)
{
float3 result = float3(0.0, 0.0, 0.0);
float3 camToFrag = worldPosition - gCameraPos;
if (length(camToFrag) > scatteringZFar)
{
camToFrag = normalize(camToFrag) * scatteringZFar;
}
float3 deltaStep = camToFrag / (scatteringSamples + 1);
float3 fragToCamNorm = normalize(gCameraPos - worldPosition);
float3 x = gCameraPos;
float rand = random(worldPosition.xy + worldPosition.z);
x += (deltaStep * rand);
for (int i = 0; i < scatteringSamples; ++i)
{
float visibility = 1.0;
float3 lightToX = x - light.Position;
float lightDist = length(lightToX);
float omega = 4 * PI * lightDist * lightDist;
float3 Lin = absorptionTransmittance(lightDist) * visibility * light.Diffuse * light.SpotPower / omega;
float3 Li = Lin * scatteringTau * scatteringColor * phaseFunction(normalize(lightToX), fragToCamNorm);
result += Li * absorptionTransmittance(distance(x, gCameraPos)) * length(deltaStep);
x += deltaStep;
}
return result;
}
[numthreads(32, 32, 1)]
void CS(uint3 dispatchID : SV_DispatchThreadID)
{
int2 pixel = int2(dispatchID.x, dispatchID.y);
float4 volumetricColor = float4(0.0, 0.0, 0.0, 1.0);
float depthValue = depthTexture[pixel].r;
float3 worldPosition = PixelWorldPos(depthValue, pixel);
float fragCamDist = distance(worldPosition, gCameraPos);
for (int i = 0; i < NUM_LIGHTS; ++i)
{
if (gLights[i].Type == SPOT_LIGHT && gLights[i].FalloffEnd > length(gLights[i].Position - worldPosition))
volumetricColor += float4(volumetricScattering(worldPosition, gLights[i]), 0.0);
}
outputTexture[pixel] = volumetricColor + inputTexture[pixel];
}
(AbsorptionTau = -0.061f, ScatteringTau = 0.059f)
All these Codes for that Tiny Spot...
The second method was shown in Chapter 13 of GPU GEM3.
It was a method of drawing only Light Source on a separate Render Target, processing the Render Target using Post Processing Shder to create light scattering, and then merging it with a back buffer. (At least that's how I understand it.)
However, this method was designed only for one very strong light, and to fix it, I modified the code as below, but it didn't work well.
[numthreads(32, 32, 1)]
void CS(uint3 dispatchID : SV_DispatchThreadID)
{
uint2 pixel = dispatchID.xy;
uint width, height;
inputTexture.GetDimensions(width, height);
float4 result = inputTexture[pixel];
for (int i = 0; i < NUM_LIGHTS; ++i)
{
if(gLights[i].Type == SPOT_LIGHT)
{
float2 texCoord = float2(pixel.x / width, pixel.y / height);
float2 deltaTexCoord = (texCoord - mul(mul(float4(gLights[i].Position, 1.0f), gView), gProj).xy);
deltaTexCoord *= 1.0f / NUM_SAMPLES * Density;
float3 color = inputTexture[pixel].rgb;
float illuminationDecay = 1.0f;
for (int j = 0; j < NUM_SAMPLES; j++)
{
texCoord -= deltaTexCoord;
uint2 modifiedPixel = uint2(texCoord.x * width, texCoord.y * height);
float3 sample = inputTexture[modifiedPixel].rgb;
sample *= illuminationDecay * Weight;
color += sample;
illuminationDecay *= Decay;
}
result += float4(color * Exposure, 1);
}
}
outputTexture[pixel] = result;
}
this just 'Blur' these light source map, and surely it's not what I wanted.
Is there a similar kind of example to the implementation that I want, or is there a simpler way to do this? I've spent a week on this issue, but I haven't achieved much.
edit :
I did it! but there's some error about direction of light volume.
[numthreads(32, 32, 1)]
void CS(uint3 dispatchID : SV_DispatchThreadID)
{
float4 result = { 0.0f, 0.0f, 0.0f, 0.0f };
uint2 pixel = dispatchID.xy;
uint width, height;
inputTexture.GetDimensions(width, height);
float2 texCoord = (float2(pixel) + 0.5f) / float2(width, height);
float depth = depthTexture[pixel].r;
float3 screenPos = GetPositionVS(texCoord, depth);
float3 rayEnd = float3(0.0f, 0.0f, 0.0f);
const uint sampleCount = 16;
const float stepSize = length(screenPos - rayEnd) / sampleCount;
// Perform ray marching to integrate light volume along view ray:
[loop]
for (uint i = 0; i < NUM_LIGHTS; ++i)
{
[branch]
if (gLights[i].Type == SPOT_LIGHT)
{
float3 V = float3(0.0f, 0.0f, 0.0f) - screenPos;
float cameraDistance = length(V);
V /= cameraDistance;
float marchedDistance = 0;
float accumulation = 0;
float3 P = screenPos + V * stepSize * dither(pixel.xy);
for (uint j = 0; j < sampleCount; ++j)
{
float3 L = mul(float4(gLights[i].Position, 1.0f), gView).xyz - P;
const float dist2 = dot(L, L);
const float dist = sqrt(dist2);
L /= dist;
//float3 viewDir = mul(float4(gLights[i].Direction, 1.0f), gView).xyz;
float3 viewDir = gLights[i].Direction;
float SpotFactor = dot(L, normalize(-viewDir));
float spotCutOff = gLights[i].outerCosine;
[branch]
if (SpotFactor > spotCutOff)
{
float attenuation = DoAttenuation(dist, gLights[i].Range);
float conAtt = saturate((SpotFactor - gLights[i].outerCosine) / (gLights[i].innerCosine - gLights[i].outerCosine));
conAtt *= conAtt;
attenuation *= conAtt;
attenuation *= ExponentialFog(cameraDistance - marchedDistance);
accumulation += attenuation;
}
marchedDistance += stepSize;
P = P + V * stepSize;
}
accumulation /= sampleCount;
result += max(0, float4(accumulation * gLights[i].Color * gLights[i].VolumetricStrength, 1));
}
}
outputTexture[pixel] = inputTexture[pixel] + result;
}
this is my compute shader, but when I doesn't multiply view matrix to direction, it goes wrong like this :
as you can see, street lamp's volume direction is good, but vehicle's headlight's volume direction is different from it's spot light direction.
and when I multiply view matrix to direction :
head lights gone wrong AND street lamp goes wrong too.
I still finding where's wrong in my cpu codes, but I haven't find anything.
this might be helpful. here's my shader code about spot lighting.
float CalcAttenuation(float d, float falloffStart, float falloffEnd)
{
return saturate((falloffEnd - d) / (falloffEnd - falloffStart));
}
float3 BlinnPhongModelLighting(float3 lightDiff, float3 lightVec, float3 normal, float3 view, Material mat)
{
const float m = mat.Exponent;
const float f = ((mat.IOR - 1) * (mat.IOR - 1)) / ((mat.IOR + 1) * (mat.IOR + 1));
const float3 fresnel0 = float3(f, f, f);
float3 halfVec = normalize(view + lightVec);
float roughness = (m + 8.0f) * pow(saturate(dot(halfVec, normal)), m) / 8.0f;
float3 fresnel = CalcReflectPercent(fresnel0, halfVec, lightVec);
float3 specular = fresnel * roughness;
specular = specular / (specular + 1.0f);
return (mat.Diffuse.rgb + specular * mat.Specular) * lightDiff;
}
float3 ComputeSpotLight(Light light, Material mat, float3 pos, float3 normal, float3 view)
{
float3 result = float3(0.0f, 0.0f, 0.0f);
bool bCompute = true;
float3 lightVec = light.Position - pos;
float d = length(lightVec);
if (d > light.FalloffEnd)
bCompute = false;
if (bCompute)
{
lightVec /= d;
float ndotl = max(dot(lightVec, normal), 0.0f);
float3 lightDiffuse = light.Diffuse * ndotl;
float att = CalcAttenuation(d, light.FalloffStart, light.FalloffEnd);
lightDiffuse *= att;
float spotFactor = pow(max(dot(-lightVec, light.Direction), 0.0f), light.SpotPower);
lightDiffuse *= spotFactor;
result = BlinnPhongModelLighting(lightDiffuse, lightVec, normal, view, mat);
}
return result;
}

What's wrong with my Normal mapping? C++ Directx

Hi guys' I'm trying to implement Normal Mapping in Directx and I'm very close to creating it but I'm getting these weird black colors on some objects. This is how it looks like without the Normal Mapping:
And when I apply the Normal Mapping effect this is how it looks:
Where I get the black color on e.g the wall and some part of the cube which is odd.
My Pixel Shader file looks like this:
cbuffer PositionBuffer : register(b1)
{
float3 cameraPos;
float pad;
float3 lightPos;
float pad1;
};
cbuffer PhongBuffer : register(b2) {
float4 ambient;
float4 diffus;
float4 blank;
float shininess;
float3 padd;
};
Texture2D texDiffuse : register(t0);
Texture2D texNormal : register(t1);
SamplerState texSampler : register(s0);
struct PSIn
{
float4 Pos : SV_Position;
float3 Normal : NORMAL;
float2 TexCoord : TEX;
float4 PosWorld : VIEWPOSITION;
float4 Tangent : TANGENT; //Added this
float4 Binormal : BINORMAL; //Added this
};
//-----------------------------------------------------------------------------------------
// Pixel Shader
//-----------------------------------------------------------------------------------------
float4 PS_main(PSIn input) : SV_Target
{
float3 N;
float3 L;
float3 R;
float3 V;
float3 I;
float3 textureColor;
float4 bumpMap;
float3 bumpNormal;
// Sample the texture pixel at this location.
textureColor = texDiffuse.Sample(texSampler, input.TexCoord).xyz;
// Sample the pixel in the bump map.
bumpMap = texNormal.Sample(texSampler, input.TexCoord);
// Expand the range of the normal value from (0, +1) to (-1, +1).
bumpMap = (bumpMap * 2.0f) - 1.0f;
// Calculate the normal from the data in the bump map.
bumpNormal = (bumpMap.x * input.Tangent) + (bumpMap.y * input.Binormal) + (bumpMap.z * input.Normal);
// Normalize the resulting bump normal.
//bumpNormal = Normalize(bumpNormal;
L = lightPos - input.PosWorld;
//N = input.Normal;
N = bumpNormal;
V = cameraPos - input.PosWorld;
R = reflect(-L, N);
L = normalize(L);
N = normalize(N);
V = normalize(V);
R = normalize(R);
I = ambient.xyz + (textureColor * max(dot(L,N), 0) + blank.xyz * max(pow(dot(R,V), shininess), 0)); //Phong Shader Formula
if(texNormal.Sample(texSampler, input.TexCoord).z <= 0) { //Prevent the objects that don't have bump map from being given the Normal Mapping effect
return texDiffuse.Sample(texSampler, input.TexCoord);
}
if(dot(L, N) < 0) {
return texDiffuse.Sample(texSampler, input.TexCoord);
}
return float4(I, 1);
}
And my Vertex Shader looks like this:
cbuffer MatrixBuffer : register(b0)
{
matrix ModelToWorldMatrix;
matrix WorldToViewMatrix;
matrix ProjectionMatrix;
};
struct VSIn
{
float3 Pos : POSITION;
float3 Normal : NORMAL;
float3 Tangent : TANGENT;
float3 Binormal : BINORMAL;
float2 TexCoord : TEX;
};
struct PSIn
{
float4 Pos : SV_Position;
float3 Normal : NORMAL;
float2 TexCoord : TEX;
float4 PosWorld : VIEWPOSITION;
float3 Tangent : TANGENT; //Added this
float3 Binormal : BINORMAL; //Added this
};
//-----------------------------------------------------------------------------------------
// Vertex Shader
//-----------------------------------------------------------------------------------------
PSIn VS_main(VSIn input)
{
PSIn output = (PSIn)0;
// Model->View transformation
matrix MV = mul(WorldToViewMatrix, ModelToWorldMatrix);
// Model->View->Projection (clip space) transformation
// SV_Position expects the output position to be in clip space
matrix MVP = mul(ProjectionMatrix, MV);
// Perform transformations and send to output
output.Pos = mul(MVP, float4(input.Pos, 1));
output.Normal = normalize( mul(ModelToWorldMatrix, float4(input.Normal,0)).xyz ); //Convert the Normal for the vertex to World Space
output.TexCoord = input.TexCoord;
output.PosWorld = mul(ModelToWorldMatrix, float4(input.Pos, 1)); //Convert to World Space
output.Tangent = normalize( mul(ModelToWorldMatrix, float4(input.Tangent,0)).xyz ); //Convert the Tangent for the vertex to World Space
output.Binormal = normalize( mul(ModelToWorldMatrix, float4(input.Binormal,0)).xyz ); //Convert the Binormal for the vertex to World Space
return output;
}
The way I calculate the Binormal and Tangent for each vertex from the CPU side is using this method (taken from Rasterek Tutorial http://www.rastertek.com/dx11tut20.html):
void OBJModel_t::CalculateModelVectors(std::vector<vertex_t> & vertices) {
int faceCount, i, index;
vertex_t vertex1, vertex2, vertex3;
vec3f tangent, binormal, normal;
int m_vertexCount = vertices.size();
// Calculate the number of faces in the model.
faceCount = m_vertexCount / 3;
// Initialize the index to the model data.
index = 0;
// Go through all the faces and calculate the the tangent, binormal, and normal vectors.
for (i = 0; i<faceCount; i++)
{
// Get the three vertices for this face from the model.
vertex1.Pos.x = vertices[index].Pos.x;
vertex1.Pos.y = vertices[index].Pos.y;
vertex1.Pos.z = vertices[index].Pos.z;
vertex1.TexCoord.x = vertices[index].TexCoord.x;
vertex1.TexCoord.y = vertices[index].TexCoord.y;
vertex1.Normal.x = vertices[index].Normal.x;
vertex1.Normal.y = vertices[index].Normal.y;
vertex1.Normal.z = vertices[index].Normal.z;
index++;
vertex2.Pos.x = vertices[index].Pos.x;
vertex2.Pos.y = vertices[index].Pos.y;
vertex2.Pos.z = vertices[index].Pos.z;
vertex2.TexCoord.x = vertices[index].TexCoord.x;
vertex2.TexCoord.y = vertices[index].TexCoord.y;
vertex2.Normal.x = vertices[index].Normal.x;
vertex2.Normal.y = vertices[index].Normal.y;
vertex2.Normal.z = vertices[index].Normal.z;
index++;
vertex3.Pos.x = vertices[index].Pos.x;
vertex3.Pos.y = vertices[index].Pos.y;
vertex3.Pos.z = vertices[index].Pos.z;
vertex3.TexCoord.x = vertices[index].TexCoord.x;
vertex3.TexCoord.y = vertices[index].TexCoord.y;
vertex3.Normal.x = vertices[index].Normal.x;
vertex3.Normal.y = vertices[index].Normal.y;
vertex3.Normal.z = vertices[index].Normal.z;
index++;
// Calculate the tangent and binormal of that face.
CalculateTangentBinormal(vertex1, vertex2, vertex3, tangent, binormal);
// Calculate the new normal using the tangent and binormal.
CalculateNormal(tangent, binormal, normal);
// Store the normal, tangent, and binormal for this face back in the model structure.
vertices[index - 1].Normal.x = normal.x;
vertices[index - 1].Normal.y = normal.y;
vertices[index - 1].Normal.z = normal.z;
vertices[index - 1].Tangent.x = tangent.x;
vertices[index - 1].Tangent.y = tangent.y;
vertices[index - 1].Tangent.z = tangent.z;
vertices[index - 1].Binormal.x = binormal.x;
vertices[index - 1].Binormal.y = binormal.y;
vertices[index - 1].Binormal.z = binormal.z;
vertices[index - 2].Normal.x = normal.x;
vertices[index - 2].Normal.y = normal.y;
vertices[index - 2].Normal.z = normal.z;
vertices[index - 2].Tangent.x = tangent.x;
vertices[index - 2].Tangent.y = tangent.y;
vertices[index - 2].Tangent.z = tangent.z;
vertices[index - 2].Binormal.x = binormal.x;
vertices[index - 2].Binormal.y = binormal.y;
vertices[index - 2].Binormal.z = binormal.z;
vertices[index - 3].Normal.x = normal.x;
vertices[index - 3].Normal.y = normal.y;
vertices[index - 3].Normal.z = normal.z;
vertices[index - 3].Tangent.x = tangent.x;
vertices[index - 3].Tangent.y = tangent.y;
vertices[index - 3].Tangent.z = tangent.z;
vertices[index - 3].Binormal.x = binormal.x;
vertices[index - 3].Binormal.y = binormal.y;
vertices[index - 3].Binormal.z = binormal.z;
}
return;
}
void OBJModel_t::CalculateTangentBinormal(vertex_t vertex1, vertex_t vertex2, vertex_t vertex3, vec3f& tangent, vec3f& binormal) {
float vector1[3], vector2[3];
float tuVector[2], tvVector[2];
float den;
float length;
// Calculate the two vectors for this face.
vector1[0] = vertex2.Pos.x - vertex1.Pos.x;
vector1[1] = vertex2.Pos.y - vertex1.Pos.y;
vector1[2] = vertex2.Pos.z - vertex1.Pos.z;
vector2[0] = vertex3.Pos.x - vertex1.Pos.x;
vector2[1] = vertex3.Pos.y - vertex1.Pos.y;
vector2[2] = vertex3.Pos.z - vertex1.Pos.z;
// Calculate the tu and tv texture space vectors.
tuVector[0] = vertex2.TexCoord.x - vertex1.TexCoord.x;
tvVector[0] = vertex2.TexCoord.y - vertex1.TexCoord.y;
tuVector[1] = vertex3.TexCoord.x - vertex1.TexCoord.x;
tvVector[1] = vertex3.TexCoord.y - vertex1.TexCoord.y;
// Calculate the denominator of the tangent/binormal equation.
den = 1.0f / (tuVector[0] * tvVector[1] - tuVector[1] * tvVector[0]);
// Calculate the cross products and multiply by the coefficient to get the tangent and binormal.
tangent.x = (tvVector[1] * vector1[0] - tvVector[0] * vector2[0]) * den;
tangent.y = (tvVector[1] * vector1[1] - tvVector[0] * vector2[1]) * den;
tangent.z = (tvVector[1] * vector1[2] - tvVector[0] * vector2[2]) * den;
binormal.x = (tuVector[0] * vector2[0] - tuVector[1] * vector1[0]) * den;
binormal.y = (tuVector[0] * vector2[1] - tuVector[1] * vector1[1]) * den;
binormal.z = (tuVector[0] * vector2[2] - tuVector[1] * vector1[2]) * den;
// Calculate the length of this normal.
length = sqrt((tangent.x * tangent.x) + (tangent.y * tangent.y) + (tangent.z * tangent.z));
// Normalize the normal and then store it
tangent.x = tangent.x / length;
tangent.y = tangent.y / length;
tangent.z = tangent.z / length;
// Calculate the length of this normal.
length = sqrt((binormal.x * binormal.x) + (binormal.y * binormal.y) + (binormal.z * binormal.z));
// Normalize the normal and then store it
binormal.x = binormal.x / length;
binormal.y = binormal.y / length;
binormal.z = binormal.z / length;
return;
}
void OBJModel_t::CalculateNormal(vec3f tangent, vec3f binormal, vec3f& normal)
{
float length;
// Calculate the cross product of the tangent and binormal which will give the normal vector.
normal.x = (tangent.y * binormal.z) - (tangent.z * binormal.y);
normal.y = (tangent.z * binormal.x) - (tangent.x * binormal.z);
normal.z = (tangent.x * binormal.y) - (tangent.y * binormal.x);
// Calculate the length of the normal.
length = sqrt((normal.x * normal.x) + (normal.y * normal.y) + (normal.z * normal.z));
// Normalize the normal.
normal.x = normal.x / length;
normal.y = normal.y / length;
normal.z = normal.z / length;
return;
}

c++ DirectX lighting in pixel shader issue

I have a problem that I cant manage to figure out. I just added a point light to my project and it makes the textures go completely black. I have no idea why.
I think that it might be either the normal that is not updating correctly or it might be calculation of s.x, s.y and s.z.
I would be very happy if someone had time to take a look at it and help me. Thanks.
So. Here is my pixel shader :
Texture2D txDiffuse : register(t0);
SamplerState sampState;
cbuffer PointLight : register(b0)
{
float3 Pos;
float diff;
float amb;
float spec;
float range;
float intensity;
};
struct VS_IN
{
float4 Pos : SV_POSITION;
float2 Tex : TEXCOORD;
float4 Norm : NORMAL;
float4 Pos2 : POSITION;
};
float4 PS_main(VS_IN input) : SV_Target
{
float3 s = txDiffuse.Sample(sampState, input.Tex).xyz;
float3 lightPos = Pos;
float3 lightVector = lightPos - input.Pos2;
lightVector = normalize(lightVector);
float nDotL = dot(lightVector, input.Norm);
float diff1 = 0.8;
float amb1 = 0.1;
s.x = (s.x * diff * nDotL + s.x * amb);
s.y = (s.y * diff * nDotL + s.y * amb);
s.z = (s.z * diff * nDotL + s.z * amb);
return float4(s, 0.0);
};
Geometry shader :
cbuffer worldMatrix : register(b0)
{
matrix world;
}
cbuffer viewMatrix : register(b1)
{
matrix view;
}
cbuffer projectionMatrix : register(b2)
{
matrix projection;
}
struct VS_IN
{
float4 Pos : SV_POSITION;
float2 Tex : TEXCOORD;
};
struct VS_OUT
{
float4 Pos : SV_POSITION;
float2 Tex : TEXCOORD;
float4 Norm : NORMAL;
float4 Pos2 : POSITION;
};
[maxvertexcount(6)]
void main(triangle VS_IN input[3] : SV_POSITION, inout TriangleStream< VS_OUT > output2)
{
matrix wvp = mul(projection, mul(world, view));
matrix worldView = mul(world, view);
float4 normal = float4(cross(input[1].Pos - input[0].Pos, input[2].Pos - input[0].Pos), 0.0f);
normal = normalize(normal);
float4 rotNorm = mul(worldView, normal);
rotNorm = normalize(rotNorm);
VS_OUT output[3];
for (uint i = 0; i < 3; i++)
{
output[i].Pos = input[i].Pos;
output[i].Pos = mul(wvp, input[i].Pos);
output[i].Tex = input[i].Tex;
output[i].Norm = rotNorm;
output[i].Pos2 = mul(worldView, output[i].Pos);
output2.Append(output[i]);
}
output2.RestartStrip();
VS_OUT outputcopy[3];
for (uint i = 0; i < 3; i++)
{
outputcopy[i].Pos = input[i].Pos + (normal);
outputcopy[i].Pos = mul(wvp, outputcopy[i].Pos);
outputcopy[i].Tex = input[i].Tex;
outputcopy[i].Norm = rotNorm;
outputcopy[i].Pos2 = mul(worldView, outputcopy[i].Pos);
output2.Append(outputcopy[i]);
}
output2.RestartStrip();
}
Code to initializing the point light:
struct PointLight
{
Vector3 Pos;
float diff;
float amb;
float spec;
float range;
float intensity;
};
PointLight* pointLight = nullptr;
PointLight PL =
{
Vector3(0.0f, 0.0f, -3.0f),
0.8f,
0.2f,
0.0f,
100.0f,
1.0f
};
pointLight = &PL;
D3D11_BUFFER_DESC lightBufferDesc;
memset(&lightBufferDesc, 0, sizeof(lightBufferDesc));
lightBufferDesc.BindFlags = D3D11_BIND_CONSTANT_BUFFER;
lightBufferDesc.Usage = D3D11_USAGE_DEFAULT;
lightBufferDesc.StructureByteStride = 0;
lightBufferDesc.MiscFlags = 0;
lightBufferDesc.ByteWidth = sizeof(PointLight);
D3D11_SUBRESOURCE_DATA pointLightData;
memset(&pointLightData, 0, sizeof(pointLightData));
pointLightData.pSysMem = &PL;
gDevice->CreateBuffer(&lightBufferDesc, &pointLightData, &lightBuffer);
and in render() i run
gDeviceContext->PSSetConstantBuffers(0, 1, &lightBuffer);
Texture will be black if s.x, s.y, s.z equal to zero.
s.x = (s.x * diff * nDotL + s.x * amb);
s.y = (s.y * diff * nDotL + s.y * amb);
s.z = (s.z * diff * nDotL + s.z * amb);
Try to change diff and amb with a non-zero constant so that you can be sure that you set contant buffer correctly or not. If after you change them, it's still black then it must be nDotL and/or sampled texture that is zero. Then try with non-zero constant for texture sample. If they're still causing texture to look black then your light vector calculation is the culprit.

how to render sprites as true spheres?

I'm trying to render my fluid simulator with liquid effect,
here is my render result:
but I want to get this result
here is my geometry and pixel shader
[maxvertexcount(4)]
void mainGS(point GSPS_INPUT gInput[1],inout TriangleStream<GSPS_OUTPUT> TriStream)
{
float size = 0.065;
matrix mv = View;
float3 right = normalize(float3(mv._11,mv._21,mv._31));
float3 up = normalize(float3(mv._12,mv._22,mv._32));
//
float3 posEye = mul( float4(gInput[0].Pos.xyz, 1.0),world).xyz;
//
float halfWidth = size/length(posEye);
float halfHeight = size/length(posEye);
//
float4 v[4];
v[0] = float4(gInput[0].Pos + halfWidth*right - halfHeight*up, 1.0f);
v[1] = float4(gInput[0].Pos + halfWidth*right + halfHeight*up, 1.0f);
v[2] = float4(gInput[0].Pos - halfWidth*right - halfHeight*up, 1.0f);
v[3] = float4(gInput[0].Pos - halfWidth*right + halfHeight*up, 1.0f);
//
//
GSPS_OUTPUT output;
[unroll]
for(int i=0; i<4; ++i)
{
//
output.Pos = mul(v[i], View);
output.PosW = (output.Pos.xyz);
output.Pos = mul(output.Pos, Projection);
output.Tex = gQuadTexC[i];
TriStream.Append(output);
}
TriStream.RestartStrip();
}
//pixel shader
float4 PS(GSPS_OUTPUT input) : SV_TARGET
{
float3 N;
N.xy = input.Tex*float2(2.0f,-2.0f) + float2(-1.0f,1.0f);
float mag = dot(N.xy, N.xy);
if (mag > 1.0)
{
discard;
}
N.z = sqrt(1.0f-mag);
N = N * 0.5 + 0.5;
float4 pixelPos = float4(input.PosW + N*(1/32), 1.0);
float4 clipSpacePos = mul(pixelPos, Projection);
float depthval = (clipSpacePos.z / clipSpacePos.w);
I found here this, but it's not fully explanation
Link