I'm using DirectX 11. I'm trying to draw a Cube mesh to the screen but the bottom half is getting cut off. If I move the camera up/down the bottom half is still cut off, which leads me to think that it's not a viewport/rasterizer issue, but I'm not sure. The pictures are of the cube looking down and then looking up. You can see the cube is getting cut off regardless of the camera position. I think it might be an issue with my projection matrices.
I've attached the RenderDoc capture here, and you can see that the VS input is correct, but when viewing the VS output with solid shading, the same thing happens. https://drive.google.com/file/d/1sh7tj0hPYwD936BEQCL0wtH8ZzXMiEno/view?usp=sharing
This is how I'm calculating my matrices:
mat4 LookAtMatrix(float3 Position, float3 Target, float3 Up) {
float3 Forward = Normalise(Target - Position);
float3 Right = Cross(Normalise(Up), Forward);
float3 UpV = Cross(Forward, Right);
mat4 Out;
Out.v[0] = float4(Right, 0);
Out.v[1] = float4(UpV, 0);
Out.v[2] = float4(Forward, 0);
Out.v[3] = float4(Position, 1);
return Out;
}
mat4 ProjectionMatrix(f32 FOV, f32 Aspect, f32 Near, f32 Far) {
mat4 Out;
f32 YScale = 1.0f / tan((FOV * Deg2Rad) / 2.0f);
f32 XScale = YScale / Aspect;
f32 NmF = Near - Far;
Out.v[0] = float4(XScale, 0, 0, 0);
Out.v[1] = float4(0, YScale, 0, 0);
Out.v[2] = float4(0, 0, (Far + Near) / NmF, -1.0f);
Out.v[3] = float4(0, 0, 2 * Far * Near / NmF, 0);
return Out;
}
And this is how I'm calling these functions (The issue happens reguardless of whether I use rotation or not):
D3D11_MAPPED_SUBRESOURCE Resource;
HRESULT Result = DeviceContext->Map(ConstantBuffer, 0, D3D11_MAP_WRITE_DISCARD, 0, &Resource);
if(FAILED(Result)) FatalError("DeviceContext->Map failed");
matrix_buffer *Buffer = (matrix_buffer *)Resource.pData;
static float yR = 0.0f;
yR += 50.0f * DeltaTime;
while(yR > 360.0f) yR -= 360.0f;
while(yR < 0.0f) yR += 360.0f;
quat R = QuatFromAngles(0.0f, yR, 0.0f);
const float Speed = 100.0f;
static float3 Position = float3(0, 0, -300);
if(WDown) Position.z += Speed * DeltaTime;
if(ADown) Position.x += Speed * DeltaTime;
if(SDown) Position.z -= Speed * DeltaTime;
if(DDown) Position.x -= Speed * DeltaTime;
if(QDown) Position.y -= Speed * DeltaTime;
if(EDown) Position.y += Speed * DeltaTime;
Buffer->WorldMatrix = RotationMatrix(R, float3(0, 0, 0));
Buffer->ViewMatrix = LookAtMatrix(Position, Position+float3(0, 0, 1), float3(0, 1, 0));
Buffer->ProjectionMatrix = ProjectionMatrix(45.0f, 1366/768, 0.1f, 1000.0f);
DeviceContext->Unmap(ConstantBuffer, 0);
And this is my vertex shader code:
struct vertex_data {
float3 Position : POSITION;
float2 UV : TEXCOORD;
float4 Colour : COLOR;
float3 Normal : NORMAL;
};
struct pixel_data {
float4 Position : SV_POSITION;
float2 UV : TEXCOORD;
float4 Colour : COLOR;
float3 Normal : NORMAL;
};
cbuffer MatrixBuffer {
float4x4 WorldMatrix;
float4x4 ViewMatrix;
float4x4 ProjectionMatrix;
};
pixel_data VertexMain(vertex_data Input) {
pixel_data Output;
float4 V = float4(Input.Position, 1);
Output.Position = mul(V, transpose(WorldMatrix));
Output.Position = mul(Output.Position, transpose(ViewMatrix));
Output.Position = mul(Output.Position, transpose(ProjectionMatrix));
Output.UV = Input.UV;
Output.Colour = Input.Colour;
Output.Normal = Input.Normal;
return Output;
}
Here is my code for setting up the viewport (Width/Height are 1366/768 - the size of the window):
D3D11_VIEWPORT Viewport;
Viewport.Width = (float)Width;
Viewport.Height = (float)Height;
Viewport.MinDepth = 0.0f;
Viewport.MaxDepth = 1.0f;
Viewport.TopLeftX = 0.0f;
Viewport.TopLeftY = 0.0f;
DeviceContext->RSSetViewports(1, &Viewport);
I've seen similar issues caused by:
Transposed matrices (are you using row major or column major matrices? Do you need a #pragma pack_matrix? It looks like you've finnicked with transposing quite a bit - avoid doing that, as you will make mistakes that are difficult to reason about)
Otherwise messed up matrix multiplication order. If you bob the camera up/down/left/right or arcball it around & rotate the model, does it actually work? Make sure you incorporate camera rotations with camera translations and object rotations / translations, otherwise you might incorrectly think your code works. What if you zoom near or far?
I recommend when debugging these issues that you first try running your shader transformations in CPU code:
Take a simple model-space coordinate (e.g. 0,0,0).
Pass it through your world matrix, and check if it looks right.
Pass it through your view matrix, verify it.
Then your proj matrix.
Even that simple test can be quite revealing. Basically, if you think your vertex shader is wrong, that's fortunately usually the easiest shader to validate in software! If this passes, try a few other vertices, like the vertices if your box. If that succeeds in software, then now you know it somehow has to do with how you're passing vertex data to the GPU (e.g. row-major vs column-major). If not, then you've built a simple CPU-side repro, great.
(Also, I'm not sure what your pixel shader is, but to rule it out and isolate the vertex shader, consider making the pixel shader just return a solid white)
Related
I need to use perspective transformation but I can't understand how to define model coordinates of sprite. If I use orthogonal projection I can define coordinate of each vertex as number pixels on screen. But with perspective projection I can't.
Orthogonal projection:
glm::ortho<GLfloat>(0.0f, screen_width, screen_height, 0.0f, 1.0f, -1.0f));
Perspective:
glm::perspective(glm::radians(45.f), (float)screen_width / (float)screen_height, 0.1f, 100.f);
Vertex shader:
#version 330 core
layout (std140) uniform Matrices
{
mat4 ProjectionMatrix;
mat4 ViewMatrix;
mat4 ModelMatrix;
};
layout (location = 0) in vec2 position;
layout (location = 1) in vec2 inTexCoords;
out vec2 TextureCoords;
void main()
{
TextureCoords = inTexCoords;
gl_Position = ProjectionMatrix * ViewMatrix * ModelMatrix * vec4(position, 1.f, 1.0);
}
For example
vertices[1] = 0.f;
vertices[8] = 0.f;
vertices[12] = 0.f;
vertices[13] = 0.f;
for (GLuint i = 0; i < m_totSprites; ++i) {
// Vertex pos
vertices[0] = m_clips[i].w;
vertices[4] = vertices[0];
vertices[5] = m_clips[i].h;
vertices[9] = vertices[5];
// Texture pos
vertices[2] = (m_clips[i].x + m_clips[i].w) / tw;
vertices[3] = (m_clips[i].y + m_clips[i].h) / th;
vertices[6] = (m_clips[i].x + m_clips[i].w) / tw;
vertices[7] = m_clips[i].y / th;
vertices[10] = m_clips[i].x / tw;
vertices[11] = m_clips[i].y / th;
vertices[14] = m_clips[i].x / tw;
vertices[15] = (m_clips[i].y + m_clips[i].h) / th;
It works well with orthogonal projection. How can I define vertex coordinates for perspective?
What the different with model coordinates in orthogonal projection and perspective? Why in first case it's easy to set coords of vertices as pixel sizes, but in all examples with perspective they normalized between -0.5 to 0.5? It's necessary?
Initially I was misunderstood difference between orthogonal and perspective projections. As I understood now all vertices mapped initially in NDC for perspective projection. Then they moved, scaled, etc with model matrix. Pixel perfect rendering can be realized only with some constant depth or with orthogonal. I't unuseful for 3D with perspective projection.
if you have projection matrix you need a view matrix too.
there's glm::lookAt() for ex
i use this combo usually
glm::lookAt(glm::vec3(-1.2484,0.483,1.84384), glm::vec3(-0.3801, -0.4183,-3.15),glm::vec3( 0., 0.2,-00.))
glm::perspective(45., 1., 1.2, 300.)
glm::mat4(1.)
I'm drawing a 2D tilemap using OpenGL and I will like to be able to know where the position of the mouse corresponds into my scene. This is what I currently have:
To draw this screen this projection is used
glm::mat4 projection = glm::perspective(
glm::radians(45.0f),
(float)screenWidth / (float)screenHeight,
1.0f,
100.0f
);
Then this camera is used to move and zoom the tilemap
glm::vec3 camera(0.0f, 0.0f, -1.00f);
Which then translates into a camera view
glm::mat4 cameraView = glm::translate(state.projection, camera);
That finally gets passed through a uniform to the vertex shader
#version 330 core
layout(location = 0) in vec2 aPosition;
uniform mat4 uCameraView;
void main() {
gl_Position = uCameraView * vec4(aPosition.x, aPosition.y, 0.0f, 1.0f);
}
This shader receives a normalized vertex, which it means that I never know how much in pixels a tile is in my screen.
Now I'm trying to somehow calculate where the mouse will be inside of my scene if it was projected like a ray into the tilemap and then hit it. If I managed to get the position of that collision I will be able to know which tile the mouse is hovering.
What will be the best approach to find this coordinate?
In the end I found this solution to map the mouse pixel coordinates to the perspective:
glm::vec4 tile = glm::translate(projection, glm::vec3(0.0f, 0.0f, camera.z)) *
glm::vec4(size.tile.regular, size.tile.regular, camera.z, 1.0f);
glm::vec3 ndcTile =
glm::vec3(tile.x / tile.w, tile.y / tile.w, tile.z / tile.w);
float pixelUnit = windowWidth * ndcTile.x;
float pixelCameraX = (camera.x / size.tile.regular) * pixelUnit;
float pixelCameraY = (camera.y / size.tile.regular) * pixelUnit;
float originX = (windowWidth / 2.0f) + pixelCameraX;
float originY = (windowHeight / 2.0f) - pixelCameraY;
float tileX = (state.input.pixelCursorX - originX) / pixelUnit;
float tileY = (state.input.pixelCursorY - originY) / pixelUnit;
selectedTileX = tileX > 0 ? tileX : tileX - 1;
selectedTileY = tileY > 0 ? tileY : tileY - 1;
I'm trying to calculate lighting in tangent space. But I just keep getting abnormal results. I was modifying the book's demo code and I wander if there maybe something wrong with the transformation matrix I created.
I'm having trouble solving a problem in Introduction to 3D Game Programming with DirectX 11. I tried to use matrix TBN
Tx, Ty, Tz,
Bx, By, Bz,
Nx, Ny, Nz
as the book provided but I found the light vector to be wrongly transformed to tangent space and now I have no clue how to debug this shader.
Here is my Pixel Shader:
float4 PS1(VertexOut pin,
uniform int gLightCount,
uniform bool gUseTexure,
uniform bool gAlphaClip,
uniform bool gFogEnabled,
uniform bool gReflectionEnabled) : SV_Target{
// Interpolating normal can unnormalize it, so normalize it.
pin.NormalW = normalize(pin.NormalW);
pin.TangentW = normalize(pin.TangentW);
// The toEye vector is used in lighting.
float3 toEye = gEyePosW - pin.PosW;
// Cache the distance to the eye from this surface point.
float distToEye = length(toEye);
// Calculate normalMapSample
float3 normalMapSample =
normalize(SampledNormal2Normal(gNormalMap.Sample(samLinear, pin.Tex).rgb));
// normalize toEye
toEye = normalize(toEye);
// Default to multiplicative identity.
float4 texColor = float4(1, 1, 1, 1);
if (gUseTexure)
{
// Sample texture.
texColor = gDiffuseMap.Sample(samLinear, pin.Tex);
if (gAlphaClip)
{
// Discard pixel if texture alpha < 0.1. Note that we do this
// test as soon as possible so that we can potentially exit the shader
// early, thereby skipping the rest of the shader code.
clip(texColor.a - 0.1f);
}
}
//
// Lighting.
//
float4 litColor = texColor;
if (gLightCount > 0)
{
// Start with a sum of zero.
float4 ambient = float4(0.0f, 0.0f, 0.0f, 0.0f);
float4 diffuse = float4(0.0f, 0.0f, 0.0f, 0.0f);
float4 spec = float4(0.0f, 0.0f, 0.0f, 0.0f);
// Sum the light contribution from each light source.
[unroll]
for (int i = 0; i < gLightCount; ++i)
{
float4 A, D, S;
ComputeDirectionalLightInTangent(gMaterial, gDirLights[i],
normalMapSample, World2TangentSpace(pin.NormalW, pin.TangentW, gTexTransform), toEye,
A, D, S);
ambient += A;
diffuse += D;
spec += S;
}
litColor = texColor*(ambient + diffuse) + spec;
if (gReflectionEnabled)
{
float3 incident = -toEye;
float3 reflectionVector = reflect(incident, normalMapSample);
float4 reflectionColor = gCubeMap.Sample(samLinear, reflectionVector);
litColor += gMaterial.Reflect*reflectionColor;
}
}
//
// Fogging
//
if (gFogEnabled)
{
float fogLerp = saturate((distToEye - gFogStart) / gFogRange);
// Blend the fog color and the lit color.
litColor = lerp(litColor, gFogColor, fogLerp);
}
// Common to take alpha from diffuse material and texture.
litColor.a = gMaterial.Diffuse.a * texColor.a;
return litColor;
}
And Here are function SampledNormal2Normal, World2TangentSpace and ComputeDirectionalLightInTangent:
float3 SampledNormal2Normal(float3 sampledNormal)
{
float3 normalT = 2.0f*sampledNormal - 1.0f;
return normalT;
}
float3x3 World2TangentSpace(float3 unitNormalW, float3 tangentW, float4x4 texTransform)
{
// Build orthonormal basis.
float3 N = unitNormalW;
float3 T = normalize(tangentW - dot(tangentW, N)*N);
float3 B = cross(N, T);
float3x3 TBN = float3x3(T, B, N);
/*float3x3 invTBN = float3x3(T.x, T.y, T.z, B.x, B.y, B.z, N.x, N.y, N.z);
return invTBN;*/
float3 T_ = T - dot(N, T)*N;
float3 B_ = B - dot(N, B)*N - (dot(T_, B)*T_) / dot(T_, T_);
float3x3 invT_B_N = float3x3(T_.x, T_.y, T_.z, B_.x, B_.y, B_.z, N.x, N.y, N.z);
return invT_B_N;
}
void ComputeDirectionalLightInTangent(Material mat, DirectionalLight L,
float3 normalT, float3x3 toTS, float3 toEye,
out float4 ambient,
out float4 diffuse,
out float4 spec)
{
// Initialize outputs.
ambient = float4(0.0f, 0.0f, 0.0f, 0.0f);
diffuse = float4(0.0f, 0.0f, 0.0f, 0.0f);
spec = float4(0.0f, 0.0f, 0.0f, 0.0f);
// The light vector aims opposite the direction the light rays travel.
float3 lightVec = -L.Direction;
lightVec = mul(lightVec, toTS);
lightVec = normalize(lightVec);
// toEye to Tangent Space
toEye = mul(toEye, toTS);
toEye = normalize(toEye);
// Add ambient term.
ambient = mat.Ambient * L.Ambient;
// Add diffuse and specular term, provided the surface is in
// the line of site of the light.
float diffuseFactor = dot(lightVec, normalT);
// Flatten to avoid dynamic branching.
[flatten]
if (diffuseFactor > 0.0f)
{
float3 v = reflect(-lightVec, normalT);
float specFactor = pow(max(dot(v, toEye), 0.0f), mat.Specular.w);
diffuse = diffuseFactor * mat.Diffuse * L.Diffuse;
spec = specFactor * mat.Specular * L.Specular;
}
}
The result I got seem to be much darker in most places and too bright in several highlight area. I wonder if anyone can help me with my code or give me advice on how to debug a hlsl shader. My thousand thanks!
I'm trying to implement object picking system and so far it works if I don't move my camera.
If I don't move camera it shows mouse position correctly in world coordinates. I can't capture the mouse in my screenshot but it's inside the white "circle" and the coordinates are on the top left corner in the image.
But if I move the camera down a bit, it continues to show (0, 0) at the center of the screen again.
What I want and expect it to be is (0, -5) or something.
Here is my code:
double mouse_x, mouse_y;
glfwGetCursorPos(window, &mouse_x, &mouse_y);
vec3 mouse_pos = vec3(float(mouse_x), float(mouse_y), 0.0f);
world_mouse_pos = unProject(mouse_pos, view, projection, vec4(0, 0, window_width, window_height));
mouse_offset.x = last_mouse_pos.x - world_mouse_pos.x;
mouse_offset.y = last_mouse_pos.y - world_mouse_pos.y;
last_mouse_pos.x = world_mouse_pos.x;
last_mouse_pos.y = world_mouse_pos.y;
m_state = glfwGetMouseButton(window, GLFW_MOUSE_BUTTON_MIDDLE);
if (m_state == GLFW_PRESS){
view = translate(view, vec3(-mouse_offset, 0.0f));
glUniformMatrix4fv(view_loc, 1, GL_FALSE, &view.data[0][0]);
}
unProject function:
inline vec3 unProject(const vec3 &pos, const mat4 &modelview, const mat4 &proj, const vec4 &viewport){
mat4 inv = inverse(proj * modelview);
vec4 temp = vec4(pos, 1.0f);
temp.x = ((temp.x - viewport.x) / viewport.z);
temp.y = ((temp.y - viewport.y) / viewport.w);
temp = temp * 2 - 1;
temp.y = - temp.y;
vec4 obj = inv * temp;
return vec3(obj.x, obj.y, obj.z);
}
I am using an orthographic projection. Basically unProject is always returning the same values despite the camera being in another position.
What am I missing?
If others have the same problem, I figured it out. So I forgot that my matrices are row major so my unProject function was wrong, here is the correct function:
inline vec3 unProject(const vec3 &pos, const mat4 &modelview, const mat4 &proj, const vec4 &viewport){
mat4 inv = inverse(modelview * proj);
vec4 temp = vec4(pos, 1.0f);
temp.x = ((temp.x - viewport.x) / viewport.z);
temp.y = ((temp.y - viewport.y) / viewport.w);
temp = temp * 2 - 1;
temp.y = -temp.y;
vec4 obj = transpose(inv) * temp;
obj /= obj.w;
return vec3(obj.x, obj.y, obj.z);
}
Basically I changed the order of the first multiplication and transpose the inverse matrix to be able to multiply it by a vec4. I also divided the final vec4 obj by obj.w, just to be more general-purpose function and work in a Perspective projection, it makes no difference in orthographic projection.
I also made small changes to the other code.
double mouse_x, mouse_y;
glfwGetCursorPos(window, &mouse_x, &mouse_y);
vec3 mouse_pos = vec3(float(mouse_x), float(mouse_y), 0.0f);
world_mouse_pos = unProject(mouse_pos, view, projection, vec4(0, 0, window_width, window_height));
mouse_offset.x = last_mouse_pos.x - world_mouse_pos.x;
mouse_offset.y = last_mouse_pos.y - world_mouse_pos.y;
last_mouse_pos.x = world_mouse_pos.x;
last_mouse_pos.y = world_mouse_pos.y;
mouse_offset = -mouse_offset;
m_state = glfwGetMouseButton(window, GLFW_MOUSE_BUTTON_MIDDLE);
if (m_state == GLFW_PRESS){
view = translate(view, vec3(mouse_offset*0.5, 0.0f));
glUniformMatrix4fv(view_loc, 1, GL_FALSE, &view.data[0][0]);
}
I multiply mouse_offset by -1 to have inverted controls, which is just a preference, but I also multiply it by 0.5, which is basically the velocity of panning the camera.
I am working on a 3D project in DirectX11, and am currently implementing different lights using the Frank Luna 3D Game Programming with DirectX11 book with my existing code.
Currently, I am developing a spotlight, which should follow the camera's position and look in the same direction, however, the position that is being lit is moving oddly. When the position is being changes, the direction vector of the light seems to be tracking in the (+x, +y, 0) direction. Best explained with a picture.
It look here like they are lit properly, and if the camera stays where it is, the spotlight can be moved around as you'd expect, and it tracks the camera direction.
In this image, I've moved the camera closer to the boxes, along the z axis, and the light spot should just get smaller on the nearest box, but it's instead tracking upwards.
This is the code where the spotlight struct is being set up to be passed into the constant buffer, that is all of the values in the struct, aside from a float being used as a pad at the end:
cb.spotLight = SpotLight();
cb.spotLight.ambient = XMFLOAT4(0.5f, 0.5f, 0.5f, 1.0f);
cb.spotLight.specular = XMFLOAT4(0.5, 0.5, 0.5, 10.0);
cb.spotLight.diffuse = XMFLOAT4(0.5, 0.5, 0.5, 1.0);
cb.spotLight.attenuation = XMFLOAT3(1, 1, 1);
cb.spotLight.range = 15;
XMVECTOR cameraP = XMLoadFloat3(&cameraPos);
XMVECTOR s = XMVectorReplicate(cb.spotLight.range);
XMVECTOR l = XMLoadFloat3(&camera.getForwards());
XMVECTOR lookat = XMVectorMultiplyAdd(s, l, cameraP);
XMStoreFloat3(&cb.spotLight.direction, XMVector3Normalize(lookat - XMVectorSet(cameraPos.x, cameraPos.y, cameraPos.z, 1.0f)));
cb.spotLight.position = cameraPos;
cb.spotLight.spot = 96;
Here is the function being used to calculate the ambient, diffuse and specular values of the spotlight in the shader:
void calculateSpotLight(Material mat, SpotLight light, float3 position, float3 normal, float3 toEye,
out float4 ambient, out float4 diffuse, out float4 specular)
{
ambient = float4(0, 0, 0, 0);
specular = float4(0, 0, 0, 0);
diffuse = float4(0, 0, 0, 0);
float3 lightV = light.position - position;
float distance = length(lightV);
if (distance > light.range)
{
return;
}
lightV /= distance;
ambient = mat.ambient * light.ambient;
float diffuseFact = dot(lightV, normal);
[flatten]
if (diffuseFact > 0.0f)
{
float3 vect = reflect(-lightV, normal);
float specularFact = pow(max(dot(vect, toEye), 0.0f), mat.specular.w);
diffuse = diffuseFact * mat.diffuse * light.diffuse;
specular = specularFact * mat.specular * light.specular;
}
float spot = pow(max(dot(-lightV, float3(-light.direction.x, -light.direction.y, light.direction.z)), 0.0f), light.spot);
float attenuation = spot / dot(light.attenuation, float3(1.0f, distance, distance*distance));
ambient *= spot;
diffuse *= attenuation;
specular *= attenuation;
}
And for completenesses sake, the vertex and the relevant section of the pixel shader.
VS_OUTPUT VS( float4 Pos : POSITION, float3 NormalL : NORMAL, float2 TexC : TEXCOORD )
{
VS_OUTPUT output = (VS_OUTPUT)0;
output.Pos = mul( Pos, World );
//Get normalised vector to camera position in world coordinates
output.PosW = normalize(eyePos - output.Pos.xyz);
output.Pos = mul( output.Pos, View );
output.Pos = mul( output.Pos, Projection );
//Getting normalised surface normal
float3 normalW = mul(float4(NormalL, 0.0f), World).xyz;
normalW = normalize(normalW);
output.Norm = normalW;
output.TexC = TexC;
return output;
}
float4 PS( VS_OUTPUT input ) : SV_Target
{
input.Norm = normalize(input.Norm);
Material newMat;
newMat.ambient = material.ambient;
newMat.diffuse = texCol;
newMat.specular = specCol;
float4 ambient = (0.0f, 0.0f, 0.0f, 0.0f);
float4 specular = (0.0f, 0.0f, 0.0f, 0.0f);
float4 diffuse = (0.0f, 0.0f, 0.0f, 0.0f);
float4 amb, spec, diff;
calculateSpotLight(newMat, spotLight, input.PosW, input.Norm, input.PosW, amb, diff, spec);
ambient += amb;
specular += spec;
diffuse += diff;
//Other light types
float4 colour;
colour = ambient + specular + diffuse;
colour.a = material.diffuse.a;
return colour;
}
Where did I go wrong?
Third argument input.PosW is incorrect here. You must use position in world space. input.PosW is a normalized vector. It doesn't make any sense to subtract normalized vector from light position.
You have
calculateSpotLight(newMat, spotLight, input.PosW, input.Norm, input.PosW, amb, diff, spec);
You need (input.Pos in WS, not projection space)
calculateSpotLight(newMat, spotLight, input.Pos, input.Norm, input.PosW, amb, diff, spec);