HLSL: Cut a texture radially - hlsl

I'm currently having a pixel shader:
float4 PixelShaderFunction(float2 coords: TEXCOORD0) : COLOR0
{
float4 color = tex2D(Sampler0,coords);
float dx = coords.x - 0.5f;
float dy = coords.y - 0.5f;
float tpos = dx * dx + dy * dy;
if(tpos <= 0.25f && tpos > 0.25f-width )
return color;
else
return float4(0.0f, 0.0f, 0.0f, 0.0f);
}
This way I am able to draw a circle. But how do I cut the circle, e.g. draw 30 degrees circle? Or 60 degrees one?
Thanks.

I would suggest to use the intrinsic atan2 (doc)(wiki) to compute the angle of your fragment in relation to the circle center and then do a clipping like your distance clipping.

Related

Mesh getting cut off

I'm using DirectX 11. I'm trying to draw a Cube mesh to the screen but the bottom half is getting cut off. If I move the camera up/down the bottom half is still cut off, which leads me to think that it's not a viewport/rasterizer issue, but I'm not sure. The pictures are of the cube looking down and then looking up. You can see the cube is getting cut off regardless of the camera position. I think it might be an issue with my projection matrices.
I've attached the RenderDoc capture here, and you can see that the VS input is correct, but when viewing the VS output with solid shading, the same thing happens. https://drive.google.com/file/d/1sh7tj0hPYwD936BEQCL0wtH8ZzXMiEno/view?usp=sharing
This is how I'm calculating my matrices:
mat4 LookAtMatrix(float3 Position, float3 Target, float3 Up) {
float3 Forward = Normalise(Target - Position);
float3 Right = Cross(Normalise(Up), Forward);
float3 UpV = Cross(Forward, Right);
mat4 Out;
Out.v[0] = float4(Right, 0);
Out.v[1] = float4(UpV, 0);
Out.v[2] = float4(Forward, 0);
Out.v[3] = float4(Position, 1);
return Out;
}
mat4 ProjectionMatrix(f32 FOV, f32 Aspect, f32 Near, f32 Far) {
mat4 Out;
f32 YScale = 1.0f / tan((FOV * Deg2Rad) / 2.0f);
f32 XScale = YScale / Aspect;
f32 NmF = Near - Far;
Out.v[0] = float4(XScale, 0, 0, 0);
Out.v[1] = float4(0, YScale, 0, 0);
Out.v[2] = float4(0, 0, (Far + Near) / NmF, -1.0f);
Out.v[3] = float4(0, 0, 2 * Far * Near / NmF, 0);
return Out;
}
And this is how I'm calling these functions (The issue happens reguardless of whether I use rotation or not):
D3D11_MAPPED_SUBRESOURCE Resource;
HRESULT Result = DeviceContext->Map(ConstantBuffer, 0, D3D11_MAP_WRITE_DISCARD, 0, &Resource);
if(FAILED(Result)) FatalError("DeviceContext->Map failed");
matrix_buffer *Buffer = (matrix_buffer *)Resource.pData;
static float yR = 0.0f;
yR += 50.0f * DeltaTime;
while(yR > 360.0f) yR -= 360.0f;
while(yR < 0.0f) yR += 360.0f;
quat R = QuatFromAngles(0.0f, yR, 0.0f);
const float Speed = 100.0f;
static float3 Position = float3(0, 0, -300);
if(WDown) Position.z += Speed * DeltaTime;
if(ADown) Position.x += Speed * DeltaTime;
if(SDown) Position.z -= Speed * DeltaTime;
if(DDown) Position.x -= Speed * DeltaTime;
if(QDown) Position.y -= Speed * DeltaTime;
if(EDown) Position.y += Speed * DeltaTime;
Buffer->WorldMatrix = RotationMatrix(R, float3(0, 0, 0));
Buffer->ViewMatrix = LookAtMatrix(Position, Position+float3(0, 0, 1), float3(0, 1, 0));
Buffer->ProjectionMatrix = ProjectionMatrix(45.0f, 1366/768, 0.1f, 1000.0f);
DeviceContext->Unmap(ConstantBuffer, 0);
And this is my vertex shader code:
struct vertex_data {
float3 Position : POSITION;
float2 UV : TEXCOORD;
float4 Colour : COLOR;
float3 Normal : NORMAL;
};
struct pixel_data {
float4 Position : SV_POSITION;
float2 UV : TEXCOORD;
float4 Colour : COLOR;
float3 Normal : NORMAL;
};
cbuffer MatrixBuffer {
float4x4 WorldMatrix;
float4x4 ViewMatrix;
float4x4 ProjectionMatrix;
};
pixel_data VertexMain(vertex_data Input) {
pixel_data Output;
float4 V = float4(Input.Position, 1);
Output.Position = mul(V, transpose(WorldMatrix));
Output.Position = mul(Output.Position, transpose(ViewMatrix));
Output.Position = mul(Output.Position, transpose(ProjectionMatrix));
Output.UV = Input.UV;
Output.Colour = Input.Colour;
Output.Normal = Input.Normal;
return Output;
}
Here is my code for setting up the viewport (Width/Height are 1366/768 - the size of the window):
D3D11_VIEWPORT Viewport;
Viewport.Width = (float)Width;
Viewport.Height = (float)Height;
Viewport.MinDepth = 0.0f;
Viewport.MaxDepth = 1.0f;
Viewport.TopLeftX = 0.0f;
Viewport.TopLeftY = 0.0f;
DeviceContext->RSSetViewports(1, &Viewport);
I've seen similar issues caused by:
Transposed matrices (are you using row major or column major matrices? Do you need a #pragma pack_matrix? It looks like you've finnicked with transposing quite a bit - avoid doing that, as you will make mistakes that are difficult to reason about)
Otherwise messed up matrix multiplication order. If you bob the camera up/down/left/right or arcball it around & rotate the model, does it actually work? Make sure you incorporate camera rotations with camera translations and object rotations / translations, otherwise you might incorrectly think your code works. What if you zoom near or far?
I recommend when debugging these issues that you first try running your shader transformations in CPU code:
Take a simple model-space coordinate (e.g. 0,0,0).
Pass it through your world matrix, and check if it looks right.
Pass it through your view matrix, verify it.
Then your proj matrix.
Even that simple test can be quite revealing. Basically, if you think your vertex shader is wrong, that's fortunately usually the easiest shader to validate in software! If this passes, try a few other vertices, like the vertices if your box. If that succeeds in software, then now you know it somehow has to do with how you're passing vertex data to the GPU (e.g. row-major vs column-major). If not, then you've built a simple CPU-side repro, great.
(Also, I'm not sure what your pixel shader is, but to rule it out and isolate the vertex shader, consider making the pixel shader just return a solid white)

For mouse click ray casting a line, why aren't my starting rays updating to my camera position after I move my camera?

When camera is moved around, why are my starting rays are still stuck at origin 0, 0, 0 even though the camera position has been updated?
It works fine if I start the program and my camera position is at default 0, 0, 0. But once I move my camera for instance pan to the right and click some more, the lines are still coming from 0 0 0 when it should be starting from wherever the camera is. Am I doing something terribly wrong? I've checked to make sure they're being updated in the main loop. I've used this code snippit below referenced from:
picking in 3D with ray-tracing using NinevehGL or OpenGL i-phone
// 1. Get mouse coordinates then normalize
float x = (2.0f * lastX) / width - 1.0f;
float y = 1.0f - (2.0f * lastY) / height;
// 2. Move from clip space to world space
glm::mat4 inverseWorldMatrix = glm::inverse(proj * view);
glm::vec4 near_vec = glm::vec4(x, y, -1.0f, 1.0f);
glm::vec4 far_vec = glm::vec4(x, y, 1.0f, 1.0f);
glm::vec4 startRay = inverseWorldMatrix * near_vec;
glm::vec4 endRay = inverseWorldMatrix * far_vec;
// perspective divide
startR /= startR.w;
endR /= endR.w;
glm::vec3 direction = glm::vec3(endR - startR);
// start the ray points from the camera position
glm::vec3 startPos = glm::vec3(camera.GetPosition());
glm::vec3 endPos = glm::vec3(startPos + direction * someLength);
The first screenshot I click some rays, the 2nd I move my camera to the right and click some more but the initial starting rays are still at 0, 0, 0. What I'm looking for is for the rays to come out wherever the camera position is in the 3rd image, ie the red rays sorry for the confusion, the red lines are supposed to shoot out and into the distance not up.
// and these are my matrices
// projection
glm::mat4 proj = glm::perspective(glm::radians(camera.GetFov()), (float)width / height, 0.1f, 100.0f);
// view
glm::mat4 view = camera.GetViewMatrix(); // This returns glm::lookAt(this->Position, this->Position + this->Front, this->Up);
// model
glm::mat4 model = glm::translate(glm::mat4(1.0f), glm::vec3(0.0f, 0.0f, 0.0f));
Its hard to tell where in the code the problem lies. But, I use this function for ray casting that is adapted from code from scratch-a-pixel and learnopengl:
vec3 rayCast(double xpos, double ypos, mat4 projection, mat4 view) {
// converts a position from the 2d xpos, ypos to a normalized 3d direction
float x = (2.0f * xpos) / WIDTH - 1.0f;
float y = 1.0f - (2.0f * ypos) / HEIGHT;
float z = 1.0f;
vec3 ray_nds = vec3(x, y, z);
vec4 ray_clip = vec4(ray_nds.x, ray_nds.y, -1.0f, 1.0f);
// eye space to clip we would multiply by projection so
// clip space to eye space is the inverse projection
vec4 ray_eye = inverse(projection) * ray_clip;
// convert point to forwards
ray_eye = vec4(ray_eye.x, ray_eye.y, -1.0f, 0.0f);
// world space to eye space is usually multiply by view so
// eye space to world space is inverse view
vec4 inv_ray_wor = (inverse(view) * ray_eye);
vec3 ray_wor = vec3(inv_ray_wor.x, inv_ray_wor.y, inv_ray_wor.z);
ray_wor = normalize(ray_wor);
return ray_wor;
}
where you can draw your line with startPos = camera.Position and endPos = camera.Position + rayCast(...) * scalar_amount.

View space positions from depth in DirectX orthographic camera

I have a depth texture and I'm trying to output the view space positions on the screen.
I'm using an orthgraphic camera.
here is the result
float3 position_in_view_space(float2 uv)
{
float z = depth_tex.SampleLevel(depth_sampler, uv, 0.0f).x;
// Get x/w and y/w from the viewport position
float x = uv.x * 2 - 1;
float y = (1 - uv.y) * 2 - 1;
float4 vProjectedPos = float4(x, y, z, 1.0f);
// Transform by the inverse projection matrix
float4 vPositionVS = mul(vProjectedPos, inv_proj);
// Divide by w to get the view-space position
return vPositionVS.xyz / vPositionVS.w;
}
float3 position_from =position_in_view_space(tex_coord.xy);
output.color = float4(position_from, 1.0);
return output;

How I can map the position in real pixels of the mouse to a normalized one when using glm::perspective?

I'm drawing a 2D tilemap using OpenGL and I will like to be able to know where the position of the mouse corresponds into my scene. This is what I currently have:
To draw this screen this projection is used
glm::mat4 projection = glm::perspective(
glm::radians(45.0f),
(float)screenWidth / (float)screenHeight,
1.0f,
100.0f
);
Then this camera is used to move and zoom the tilemap
glm::vec3 camera(0.0f, 0.0f, -1.00f);
Which then translates into a camera view
glm::mat4 cameraView = glm::translate(state.projection, camera);
That finally gets passed through a uniform to the vertex shader
#version 330 core
layout(location = 0) in vec2 aPosition;
uniform mat4 uCameraView;
void main() {
gl_Position = uCameraView * vec4(aPosition.x, aPosition.y, 0.0f, 1.0f);
}
This shader receives a normalized vertex, which it means that I never know how much in pixels a tile is in my screen.
Now I'm trying to somehow calculate where the mouse will be inside of my scene if it was projected like a ray into the tilemap and then hit it. If I managed to get the position of that collision I will be able to know which tile the mouse is hovering.
What will be the best approach to find this coordinate?
In the end I found this solution to map the mouse pixel coordinates to the perspective:
glm::vec4 tile = glm::translate(projection, glm::vec3(0.0f, 0.0f, camera.z)) *
glm::vec4(size.tile.regular, size.tile.regular, camera.z, 1.0f);
glm::vec3 ndcTile =
glm::vec3(tile.x / tile.w, tile.y / tile.w, tile.z / tile.w);
float pixelUnit = windowWidth * ndcTile.x;
float pixelCameraX = (camera.x / size.tile.regular) * pixelUnit;
float pixelCameraY = (camera.y / size.tile.regular) * pixelUnit;
float originX = (windowWidth / 2.0f) + pixelCameraX;
float originY = (windowHeight / 2.0f) - pixelCameraY;
float tileX = (state.input.pixelCursorX - originX) / pixelUnit;
float tileY = (state.input.pixelCursorY - originY) / pixelUnit;
selectedTileX = tileX > 0 ? tileX : tileX - 1;
selectedTileY = tileY > 0 ? tileY : tileY - 1;

DirectX11 Shadow slope bias incorrect

I've recently implemented shadows on my terrain project, using PCF filtering, however, the shadows on the slopes of the terrain appear incorrect, as you can see below:
How can I remove the artifacts caused on the slopes of the terrain? A quick search suggests implementing a slope based bias, but how would I calculate it?
Here is my pixel shader:
Texture2D shadowMap : register(t0);
SamplerComparisonState SampleTypeCmp : register(s0);
cbuffer SPerLightCB : register(b0)
{
float4 ambientColour;
float4 diffuseColour;
};
cbuffer SPerLightPosCB : register(b1)
{
float3 lightPos;
float padding;
};
struct PixelIn
{
float4 Pos : SV_POSITION;
float4 WPos : TEXCOORD0;
float4 LPos : TEXCOORD1;
float3 Normal : NORMAL;
};
float2 texOffset(int u, int v)
{
return float2(u * 1.0f / 1024.0f, v * 1.0f / 1024.0f);
}
float4 main(PixelIn pin) : SV_Target
{
//re-homogenize position after interpolation
pin.LPos.xyz /= pin.LPos.w;
//if position is not visible to the light - dont illuminate it
//results in hard light frustum
if (pin.LPos.x < -1.0f || pin.LPos.x > 1.0f ||
pin.LPos.y < -1.0f || pin.LPos.y > 1.0f ||
pin.LPos.z < 0.0f || pin.LPos.z > 1.0f)
{
return ambientColour;
}
//transform clip space coords to texture space coords (-1:1 to 0:1)
pin.LPos.x = pin.LPos.x / 2 + 0.5;
pin.LPos.y = pin.LPos.y / -2 + 0.5;
pin.LPos.z -= 0.001f; //apply bias
//PCF Sampling
float sum = 0.0f;
float x, y;
//perform PCF filtering
for (y = -1.5; y <= 1.5; y += 1.0)
{
for (x = -1.5; x <= 1.5; x += 1.0)
{
sum += shadowMap.SampleCmpLevelZero(SampleTypeCmp, pin.LPos.xy + texOffset(x, y), pin.LPos.z);
}
}
float shadowFactor = sum / 16.0;
//calculate ilumination
float3 L = normalize(lightPos - pin.WPos.xyz);
float ndotl = dot(normalize(pin.Normal), L);
return ambientColour + shadowFactor * diffuseColour * ndotl;
}