I am using direct 3d for the first time and i am looking for a way to represent bounding boxes (rectangles/cylinders/spheres). How do i outline the bounding box? Is this a shader, or do i use somthing else to create an outlined shape?
chasester
The simplest way is to use a line list to create a unit wireframe bounding volume.
using a transform matrix, you can scale, translate or rotate the volume around any object in your 3D world.
accomplishing this requires 3 main parts:
A constant VB and IB with the linelist verts set up (remember it needs to be centered around the origin, and have a unit length of 1), plus a VB for per-instance data.
An input layout that takes in a transform matrix as a per instance member.
A vertex shader that applies the transform matrix to each vertex of the cube. (the pixel shader need only output the color as is supplied).
(It should be noted that the same principle applies to spheres, hulls etc. as well)
The shaders I use look like this:
struct WIRE_VS_INPUT
{
float3 Position : POSITION;
float4 Color : COLOR0;
float3 Transform : TRANSFORM;
float3 Size : SIZE;
};
struct WIRE_VS_OUTPUT
{
float4 Position : SV_POSITION;
float4 Color : COLOR0;
};
WIRE_VS_OUTPUT WireBoxVS( WIRE_VS_INPUT Input )
{
WIRE_VS_OUTPUT Output;
Output.Position = mul(float4((Input.Position * Input.Size) + Input.Transform,1),g_mWorldViewProjection);
Output.Color = Input.Color;
return Output;
}
float4 WireBoxPS( WIRE_VS_OUTPUT Input ) : SV_TARGET
{
return Input.Color;
}
The cube VB & IB setup:
const DirectX::XMFLOAT3 kWireCube[] =
{
DirectX::XMFLOAT3(-1,-1,-1),
DirectX::XMFLOAT3(1,-1,-1),
DirectX::XMFLOAT3(1,1,-1),
DirectX::XMFLOAT3(-1,1,-1),
DirectX::XMFLOAT3(-1,-1,1),
DirectX::XMFLOAT3(1,-1,1),
DirectX::XMFLOAT3(1,1,1),
DirectX::XMFLOAT3(-1,1,1)
};
const WORD kWireCubeIndices[] =
{
0,1,
1,2,
2,3,
3,0,
4,5,
5,6,
6,7,
7,4,
0,4,
1,5,
2,6,
3,7
};
Related
I'm trying to optimize my application, so I've started to use instancing to save myself from multiple draw calls for the same object.
But what I'm not being able to understand is how do I position each instanced object on the right position, I've been learning from Rastertek tutorials and this is their shader code:
struct VertexInputType
{
float4 position : POSITION;
float2 tex : TEXCOORD0;
float3 instancePosition : TEXCOORD1;
};
struct PixelInputType
{
float4 position : SV_POSITION;
float2 tex : TEXCOORD0;
};
////////////////////////////////////////////////////////////////////////////////
// Vertex Shader
////////////////////////////////////////////////////////////////////////////////
PixelInputType TextureVertexShader(VertexInputType input)
{
PixelInputType output;
// Change the position vector to be 4 units for proper matrix calculations.
input.position.w = 1.0f;
// Update the position of the vertices based on the data for this particular instance.
input.position.x += input.instancePosition.x;
input.position.y += input.instancePosition.y;
input.position.z += input.instancePosition.z;
// Calculate the position of the vertex against the world, view, and projection matrices.
output.position = mul(input.position, worldMatrix);
output.position = mul(output.position, viewMatrix);
output.position = mul(output.position, projectionMatrix);
// Store the texture coordinates for the pixel shader.
output.tex = input.tex;
return output;
}
I've been using XMMatrixTranslation from DirectXMath.h on my world matrix to put each object on place, but now looks like the instanced objects have to get their coordinates directly on the shader and as I'm aware of this will be done for each vertex on each instance.
This really got me confused, what's the purpose of the world matrix if every instance of the object is already going to be placed in the world by instancePosition? If there's a way to keep using my world matrix to translate each instance of the same object to the world it would be many times better for me, but I don't know if it is possible, if anyone knows anything which could help me on this it would be great.
I have searched a lot on Google and most of examples achieving gradient using texture coordinates. But I don't have texture coordinates with me. I am working on 3D text on which I want to apply gradient color. Is it possible? If yes, how? Is it necessary to have texture coordinates for obtaining color gradient?
Following is the part of my hlsl shader file :
struct VS_INPUT
{
float3 Pos : POSITION;
float3 Norm : NORMAL;
};
struct PS_INPUT
{
float4 Pos : SV_POSITION;
float3 WorldNorm : TEXCOORD0;
float3 CameraPos : TEXCOORD1;
float3 WorldPos : TEXCOORD2;
};
//--------------------------------------------------------------------------------------
// Vertex Shader
//--------------------------------------------------------------------------------------
PS_INPUT VS( VS_INPUT input )
{
PS_INPUT output = (PS_INPUT)0;
float4 worldPos = mul( float4(input.Pos,1), World );
float4 cameraPos = mul( worldPos, View );
output.WorldPos = worldPos;
output.WorldNorm = normalize(mul( input.Norm, (float3x3)World ));
output.CameraPos = cameraPos;
output.Pos = mul( cameraPos, Projection );
return output;
}
//--------------------------------------------------------------------------------------
// Pixel Shader Without Light
//--------------------------------------------------------------------------------------
float4 PS( PS_INPUT input) : SV_Target
{
float4 finalColor = {1.0f, 0.0f, 0.0f, 1.0f};
return finalColor;
}
//--------------------------------------------------------------------------------------
technique10 Render
{
pass P0
{
SetVertexShader( CompileShader( vs_4_0_level_9_1, VS() ) );
SetGeometryShader( NULL );
SetPixelShader( CompileShader( ps_4_0_level_9_1, PS() ) );
}
}
You don't need texture coordinates, because they are only one possible method (the most flexible) to save needed information about how your gradient should look like, as such origin, direction and length.
To have a gradient from left to right of your 3D-Text, you need to know where is left and right in your shader to take the appropriate color. I assume that your text is changing and such dynamically, so you need to transport this information into the shader, which either can be placed into the vertices directly by texture coordinates or with a constant buffer. Last method would only work if you draw at most one text per drawcall, because the gradient data is persistent over the whole drawing of all triangles in your drawcall.
If your situation is more special, as like your text is axis-aligned, you could take this axis and the worldposition in your pixelshader to determine the relative position for your gradient, but this method makes many assumptions as you still need the left and right maximum of your text.
The task is: shader takes in a constant color, then generates pixel colors according to their positions by replacing two of four color components (RGBA) with texture coordinates.
With hardcoded component set it will be like:
float4 inputColor : register(c0);
float4 main(float2 uv : TEXCOORD) : COLOR
{
float4 color = 0;
color.a = inputColor.a;
color.r = inputColor.r;
color.g = uv.x;
color.b = uv.y;
return color;
}
Now I'd like to pass in a parameter(s) specifying which components should be replaced with uv.x and uv.y. Let's say inputColor has -1 and -2 in these components. Or there are uint xIndex and yIndex parameters specifying positions in vector4 to be replaced. HLSL does not allow "color[xIndex] = uv.x".
Currently I've done that in ugly way with a bunch of if-else. But I feel like there is some cross-product or matrix multiplication solution. Any ideas?
You could work with two additional vectors as channelmasks. It works like indexing, but with vector operators.
float4 inputColor : register(c0);
float4 uvx_mask : register(c1); //e.g. (0,0,1,0)
float4 uvy_mask : register(c2); // e.g. (0,0,0,1)
float4 main(float2 uv : TEXCOORD) : COLOR
{
float4 color = 0;
// replacing uvx channel with uv.x
color = lerp(inputColor, uv.x * uvx_mask, uvx_mask);
// replacing uvy channel with uv.y
color = lerp(color , uv.y * uvy_mask, uvy_mask);
return color; //in this example (inputColor.r, inputColor.g, uv.x, uv.y)
}
If you need even the last bit of performance you could work alternative with the preprocessor (#define, #ifdef) to build the right code on demand.
I'm using HLSL and DirectX 9. I'm trying to recalculate the normals of a mesh so that HLSL receives updated normals as a result of transforming the mesh. What method is best to do this...also...D3DXComputeNormals will not work for me because I do not use FVF_NORMAL as a vertex declaration...I declare vertex format like so:
const D3DVERTEXELEMENT9 dec[4] =
{
{0, 0, D3DDECLTYPE_FLOAT3, D3DDECLMETHOD_DEFAULT, D3DDECLUSAGE_POSITION,0},
{0, 12, D3DDECLTYPE_FLOAT3, D3DDECLMETHOD_DEFAULT, D3DDECLUSAGE_NORMAL, 0},
{0, 24, D3DDECLTYPE_FLOAT2, D3DDECLMETHOD_DEFAULT, D3DDECLUSAGE_TEXCOORD,0},
D3DDECL_END()
};
I know how to access the adjacency data and vertex buffers but I'm not sure what method to use in order to properly associate a vertex and its normal with a face...Any help would be greatly appreciated. Thanks!
It's not a good idea to update the normals on CPU and send it to GPU each frame. That would ruin the performance. What you really should do is to calculate the transformed normals in the vertex shader, just like you do with the positions. HLSL code would look like this:
float4x4 mWorldView; // World * View
float4x4 mWorldViewProj; // World * View * Proj
struct VS_OUTPUT
{
float4 position : POSITION;
float2 tex : TEXCOORD0;
float3 normalVS : TEXCOORD1; // view-space normal
};
// Vertex Shader
VS_OUTPUT VS(float3 position : POSITION,
float3 normal : NORMAL,
float2 tex : TEXCOORD0)
{
VS_OUTPUT out;
// transform the position
out.position = mul(float4(position, 1), mWorldViewProj);
// pass the texture coordinates to the pixel shader
out.tex = tex;
// calculate the transformed normal
float3 n = mul(normal, (float3x3)mWorldView); // calculate view-space normal
// and use it for vertex lighting
// ... some shading calculations ...
// or pass it to the pixel shader and perform per-pixel lighting
out.normalVS = n;
// output
return out;
}
I'm (re)learning DirectX and have moved into HLSL coding. Prior to using my custom .fx file I created a skybox for a game with a vertex buffer of quads. Everything worked fine...texture mapped and wrapped beautifully. However now that I have HLSL setup to manage the vertices there are distinctive seams where the quads meet. The textures all line up properly I just cant get rid of this damn seam!
I tend to think the problem is with the texCube...or rather all the texturing information here. I'm texturing the quads in DX...it may just be that I still don't quite get the link between the two..not sure. Anyway thanks for the help in advance!
Heres the .fx file:
float4x4 World;
float4x4 View;
float4x4 Projection;
float3 CameraPosition;
Texture SkyBoxTexture;
samplerCUBE SkyBoxSampler = sampler_state
{
texture = <SkyBoxTexture>;
minfilter = ANISOTROPIC;
mipfilter = LINEAR;
AddressU = Wrap;
AddressV = Wrap;
AddressW = Wrap;
};
struct VertexShaderInput
{
float4 Position : POSITION0;
};
struct VertexShaderOutput
{
float4 Position : POSITION0;
float3 TextureCoordinate : TEXCOORD0;
};
VertexShaderOutput VertexShaderFunction(VertexShaderInput input)
{
VertexShaderOutput output;
float4 worldPosition = mul(input.Position, World);
float4 viewPosition = mul(worldPosition, View);
output.Position = mul(viewPosition, Projection);
float4 VertexPosition = mul(input.Position, World);
output.TextureCoordinate = VertexPosition - CameraPosition;
return output;
}
float4 PixelShaderFunction(VertexShaderOutput input) : COLOR0
{
return texCUBE(SkyBoxSampler, normalize(input.TextureCoordinate));
}
technique Skybox
{
pass Pass1
{
VertexShader = compile vs_2_0 VertexShaderFunction();
PixelShader = compile ps_2_0 PixelShaderFunction();
}
}
To avoid seams you need to draw your skybox in a single DrawIndexedPrimitive call, preferably using triangle strip. DON'T draw each face as separate primitive transformed with individual matrix or something like that - you WILL get seams. If you for some unexplainable reason don't want to use single DrawIndexedPrimitive call for skybox parts, then you must ensure that all faces are drawn using same matrix (same world + view + projection matrix used in every call) and same coordinate values for corner vertices - i.e. "top" face should use exactly same vectors (position) for corners that are used by "side" faces.
Another thing is that you should either store skybox as
cubemap (looks like that's what you're doing) - make just 8 vertices for skybox, draw them as indexed primitive.
Or an unwrapped "atlas" texture that has unused areas filled. with border color.
Or - if you're fine with shaders, you could "raytrace" skybox using shader.
You need to clamp the texture coordinates with setsampler state to get rid of the seam. This toymaker page explains this. Toymaker is a great site for learning Direct3D you should check out the tutorials if you have any more trouble.
You may like to draw a skybox using only one quad. Everything you need is an inverse of World*View*Proj matrix, that is (World*View*Proj)^(-1).
The vertices of the quad should be: (1, 1, 1, 1), (1, -1, 1, 1), (-1, 1, 1, 1), (-1, -1, 1, 1).
Then you compute texture coordinates in VS:
float4 pos = mul(vPos, WorldViewProjMatrixInv);
float3 tex_coord = pos.xyz / pos.w;
And finally you sample the texture in PS:
float4 color = texCUBE(sampler, tex_coord);
No worry about any seams! :)