EDIT: IT seems like the only problem left now is that the light comes from the opposite direction if i use the calculation with a normal map. If i only use:
n = normalize(Input.NorView);
it seems to be fine.
I am starting to learn some HLSL Shading with DirectX10 and I have tried to use a normal map to calculate my phong lightning.
First off here is an example of how far I have come:
http://i.stack.imgur.com/IFAo4.jpg
I am not quite sure if this is what im looking to accomplish with this normal map:
http://i.stack.imgur.com/moQvf.jpg
I dont know..shouldn't this look more 3Dish? Maybe I have just an false understanding of the usage of a normal map, but in my mindset a normal map is used to make a model more detailed by adding shadows based on the normal map, so its looks more 3D ish.
And here is my shading code:
Vertex Shader:
T3dVertexPSIn MeshVS(T3dVertexVSIn Input) {
T3dVertexPSIn output = (T3dVertexPSIn) 0;
float4 tempVar;
output.Pos = mul(float4(Input.Pos.xyz, 1).xyzw, g_WorldViewProjection);
output.Tex = Input.Tex;
tempVar = mul(float4(Input.Pos.xyz, 1).xyzw, g_WorldView);
output.PosView = tempVar.xyz;
tempVar = mul(float4(Input.Nor.xyz, 0).xyzw, g_WorldViewNormals);
output.NorView = tempVar.xyz;
output.NorView = normalize(output.NorView);
tempVar = mul(float4(Input.Tan.xyz, 0).xyzw, g_WorldViewNormals);
output.TanView = tempVar.xyz;
output.TanView = normalize(output.TanView);
return output;
}
Pixel Shader:
float4 MeshPS(T3dVertexPSIn Input) : SV_Target0 {
float4 output = (float4)0; //output color
float3 N = normalize(Input.NorView);
float3 T = Input.TanView;
T = normalize(T - N * dot(N,T));
float3 B = cross(T,N);
column_major float3x3 mTBN= {T, B, N};
float4 matDiffuse = g_Diffuse.Sample(samAnisotropic, Input.Tex);
float4 matSpecular = g_Specular.Sample(samAnisotropic, Input.Tex);
float4 matGlow = g_Glow.Sample(samAnisotropic, Input.Tex);
float4 colLight = float4(1,1,1,1);
float4 colLightAmbient = float4(1,1,1,1);
float3 n = mul(g_Normal.Sample(samAnisotropic, Input.Tex).rgb * 2.0 - 1.0, mTBN);
//float3 n = mul(float3x3(T,B,N), nT);
//float3 n = normalize(Input.NorView);
float4 I = g_LightDirView;
float3 r = reflect(-I.xyz, n);
float3 v = normalize(-Input.PosView);
float cd = 0.5f, cs = 0.3f, ca = 0.1f, cg = 0.3f;
output = cd*matDiffuse*saturate(dot(n,I.xyz))*colLight
+cs*matSpecular*pow(saturate(dot(r.xyz,v)),10)*colLight
+ca*matDiffuse*colLightAmbient
+cg*matGlow;
return output;
}
I also have the feeling that the lightning is changing direction when im using the normal map.but I am not sure.
Maybe someone can explain this matter to me a little bit.
Thanks in advance for any help!
A Normal map does not create shadowing. It, simply, allows you to re-evaluate the lighting per texel.
Seems like i was calculating the normal incorrect and some textures also got the wrong normal.
Related
I'm trying to import many transitions from GL Transitions into my video sequencer by converting GLSL to HLSL.
For example, this simple cross fade:
vec4 transition (vec2 uv) {
return mix(
getFromColor(uv),
getToColor(uv),
progress
);
}
is correctly translated in my HLSL code:
#define D2D_INPUT_COUNT 2
#define D2D_INPUT0_SIMPLE
#define D2D_INPUT1_SIMPLE
#define D2D_REQUIRES_SCENE_POSITION // The pixel shader requires the SCENE_POSITION input.
#include "d2d1effecthelpers.hlsli"
cbuffer constants : register(b0)
{
float progress : packoffset(c0.x);
...
}
float4 crossfade(float4 v1,float4 v2)
{
return lerp(v1, v2, progress);
}
D2D_PS_ENTRY(main)
{
float4 v1 = D2DGetInput(0);
float4 v2 = D2DGetInput(1);
return crossfade(v1,v2);
}
The same doesn't work for Wind effect:
// Custom parameters
uniform float size; // = 0.2
float rand (vec2 co) {
return fract(sin(dot(co.xy ,vec2(12.9898,78.233))) * 43758.5453);
}
vec4 transition (vec2 uv) {
float r = rand(vec2(0, uv.y));
float m = smoothstep(0.0, -size, uv.x*(1.0-size) + size*r - (progress * (1.0 + size)));
return mix(
getFromColor(uv),
getToColor(uv),
m
);
}
This time HLSL is this:
float fract(float x)
{
return x - floor(x);
}
float rand(float2 co)
{
return fract(sin(dot(co.xy, float2(12.9898, 78.233))) * 43758.5453);
}
float4 wind(float4 v1, float4 v2,float2 uv)
{
float r = rand(float2(0, uv.y));
p1 = 0.2f;
progress = 0.5f; // hardcoded variables for testing, they will be taken from the buffer
float m = smoothstep(0.0f, -p1, uv.x*(1.0f-p1) + p1*r - (progress * (1.0f + p1)));
return lerp(v1, v2, m);
}
D2D_PS_ENTRY(main)
{
float4 v1 = D2DGetInput(0);
float4 v2 = D2DGetInput(1);
return wind(v1,v2,D2DGetScenePosition().xy);
}
Have I misunderstood the OpenGL's mix and fract and rand stuff? I only get the second image pixels in my HLSL version without mixing.
EDIT: I 've hardcoded size to 0.992 and multiplied progress by 4 in the HLSL. Now it seems to work, do I miss some bounds-related issues? Is the smoothstep function working as expected?
I found it.
It would need in main entry the usage of D2DGetInputCoordinate instead of D2DGetScenePosition
After doing that, the transitions run fine.
Here's a simple hull shader code that i made, to try to understand tessellation.
I just can't find anything wrong with this code, but the compile function always return false. Here's my code:
My input and output structures:
[domain("tri")] // indicates a triangle patch (3 verts)
[partitioning("fractional_odd")] // fractional avoids popping
// vertex ordering for the output triangles
[outputtopology("triangle_cw")]
[outputcontrolpoints(3)]
// name of the patch constant hull shader
[patchconstantfunc("ConstantsHS")]
//[maxtessfactor(7.0)]
cbuffer TessellationBuffer
{
float tessellationAmount;
float3 padding;
};
struct VS_CONTROL_POINT_OUTPUT
{
float3 vWorldPos : POSITION;
float2 vTexCoord : TEXCOORD0;
float3 vNormal : NORMAL0;
};
struct HS_CONTROL_POINT_OUTPUT
{
float3 vWorldPos : POSITION;
float2 vTexCoord : TEXCOORD0;
float3 vNormal : NORMAL0;
};
struct HS_CONSTANT_DATA_OUTPUT
{
float Edges[3] : SV_TessFactor;
float Inside : SV_InsideTessFactor;
};
My functions:
HS_CONTROL_POINT_OUTPUT HS(InputPatch<VS_CONTROL_POINT_OUTPUT, 3> inputPatch, uint uCPID : SV_OutputControlPointID, uint patchId : SV_PrimitiveID )
{
HS_CONTROL_POINT_OUTPUT Output;
Output.vWorldPos = inputPatch[uCPID].vWorldPos;
Output.vTexCoord = inputPatch[uCPID].vTexCoord;
Output.vNormal = inputPatch[uCPID].vNormal;
return Output;
};
HS_CONSTANT_DATA_OUTPUT ConstantsHS(InputPatch<VS_CONTROL_POINT_OUTPUT, 3> inputPatch, uint PatchID : SV_PrimitiveID )
{
HS_CONSTANT_DATA_OUTPUT Output;
Output.Edges[0] = tessellationAmount;
Output.Edges[1] = tessellationAmount;
Output.Edges[2] = tessellationAmount;
Output.Inside = tessellationAmount;
return Output;
};
Thanks for any help.
The attributes have to be set on the entry point like below, then your hull shader is valid :
[domain("tri")] // indicates a triangle patch (3 verts)
[partitioning("fractional_odd")] // fractional avoids popping
// vertex ordering for the output triangles
[outputtopology("triangle_cw")]
[outputcontrolpoints(3)]
// name of the patch constant hull shader
[patchconstantfunc("ConstantsHS")]
//[maxtessfactor(7.0)]
HS_CONTROL_POINT_OUTPUT HS(InputPatch<VS_CONTROL_POINT_OUTPUT, 3> inputPatch, uint uCPID : SV_OutputControlPointID, uint patchId : SV_PrimitiveID )
On a side note, The command line tool FXC.exe would have print an error message that would have put you in the right direction : error X3000: syntax error: unexpected token 'cbuffer'
And i am unsure of what function you are referring to, D3DCompile return a HRESULT, not a boolean, and it also output a blob for you with the error messages in case of failure.
I'm absolutely stumped as well as my instructors/lab-assistants.
For some reason, the following HLSL code is returning this in the output window:
error X8000 : D3D11 Internal Compiler error : Invalid Bytecode: Invalid operand type for operand #1 of opcode #86 (counts are 1-based).
Here's the function in the HLSL causing the issue:
// Projects a sphere diameter large in screen space to calculate desired tesselation factor
float SphereToScreenSpaceTessellation(float3 p0, float3 p1, float diameter)
{
float3 centerPoint = (p0 + p1) * 0.5f;
float4 point0 = mul( float4(centerPoint,1.0f) , gTileWorldView);
float4 point1 = point0;
point1.x += diameter;
float4 point0ClipSpace = mul(point0, gTileProj);
float4 point1ClipSpace = mul(point1, gTileProj);
point0ClipSpace /= point0ClipSpace.w;
point1ClipSpace /= point1ClipSpace.w;
point0ClipSpace.xy *= gScreenSize;
point1ClipSpace.xy *= gScreenSize;
float projSizeOfEdge = distance(point0ClipSpace, point1ClipSpace);
float result = projSizeOfEdge / gTessellatedTriWidth;
return clamp(result, 0, 64);
}
I've narrowed it down to the point where it may be the "mul" intrinsic. We've taken everything out of the code and tried to return out a temporary variable like this, and it works fine:
float SphereToScreenSpaceTessellation(float3 p0, float3 p1, float diameter)
{
float temp = 0;
float3 centerPoint = (p0 + p1) * 0.5f;
float4 point0 = mul( float4(centerPoint,1.0f) , gTileWorldView);
float4 point1 = point0;
point1.x += diameter;
float4 point0ClipSpace = mul(point0, gTileProj);
float4 point1ClipSpace = mul(point1, gTileProj);
point0ClipSpace /= point0ClipSpace.w;
point1ClipSpace /= point1ClipSpace.w;
point0ClipSpace.xy *= gScreenSize;
point1ClipSpace.xy *= gScreenSize;
float projSizeOfEdge = distance(point0ClipSpace, point1ClipSpace);
float result = projSizeOfEdge / gTessellatedTriWidth;
return temp;
//return clamp(result, 0, 64);
}
If anyone is wondering:
gTileWorldView, gTileProj are float4x4's in a .hlsli file
gScreenSize is a float2 in a .hlsli file.
gTessellatedTriWidth is a float in a .hlsli file.
The following function is as states in a 2011 NVidia shader at : http://dx11-xpr.googlecode.com/svn/trunk/XPR/Media/Effects/TerrainTessellation.fx
I tried to copy and paste their solution replacing their variables with the one above, and the same error listed happens.
I'm absolutely stumped and I need assistance in order to do this assignment, please help.
Check out this line:
point0ClipSpace.xy *= gScreenSize;
Is gScreenSize a float2? I do not believe you can scalar multiply a vec by any vec type.
I have a vtkPolyData filled with points and cells that I want to draw on the screen. My polydata represents brain fibers (list of lines in 3D). A cell is a fiber. It's working, but I need to add colors between all points. We decided to color the polydata using a shader because there will be a lot of coloring methods. My vertex shader is:
vtkShader2 *shader = vtkShader2::New();
shader->SetType(VTK_SHADER_TYPE_VERTEX);
shader->SetSourceCode(R"VertexShader(
#version 120
attribute vec3 next_point;
varying vec3 vColor; // Pass to fragment shader
void main() {
float r = gl_Vertex.x - next_point.x;
float g = gl_Vertex.y - next_point.y;
float b = gl_Vertex.z - next_point.z;
if (r < 0.0) { r *= -1.0; }
if (g < 0.0) { g *= -1.0; }
if (b < 0.0) { b *= -1.0; }
const float norm = 1.0 / sqrt(r*r + g*g + b*b);
vColor = vec3(r * norm, g * norm, b * norm);
gl_Position = ftransform();
}
)VertexShader");
shader->SetContext(shader_program->GetContext());
shader_program->GetShaders()->AddItem(shader);
The goal here is, for each point, get the next point to calculate the color of the line between them. The problem is that I can't find a way to set the value of "next_point". I'm pretty sure it's always filled with 0.0 because the output image is red, blue and green on the sides.
I tried using vtkProperty::AddShaderVariable() but I never saw any change and the method's documentation hints about a "uniform variable" so it's probably not the right way.
// Splitted in 3 because I'm not sure how to pass a vtkPoints object to AddShaderVariable
fibersActor->GetProperty()->AddShaderVariable("next_x", nb_points, next_x);
fibersActor->GetProperty()->AddShaderVariable("next_y", nb_points, next_y);
fibersActor->GetProperty()->AddShaderVariable("next_z", nb_points, next_z);
I also tried using a vtkFloatArray filled with my points, then setting it as a data array.
vtkFloatArray *next_point = vtkFloatArray::New();
next_point->SetName("next_point");
next_point->SetNumberOfComponents(3);
next_point->Resize(nb_points);
// Fill next_point ...
polydata->GetPointData()->AddArray(next_point);
// Tried the vtkAssignAttribute class. Did nothing.
tl;dr Can you please tell me how to pass a list of points into a GLSL attribute variable? Thanks for your time.
I'm trying to implement Sketchy Drawings. I'm at the part of the process which calls for the use of the noise texture to derive uncertainty values that will provide an offset into the edge map.
Here is a picture of my edge map for a torus:
And here is the noise texture I've gotten using the Perlin function as suggested:
I have these saved as textures in edgeTexture and noiseTexture respectively.
Now I'm stuck on the section where you have to offset the texture coordinates of the edge map by uncertainty values derived from the noise texture. This image is from the book:
offs = turbulence(s, t);
offt = turbulence(1 - s, 1 - t);
I'm ignoring the 2x2 matrix for the time being. Here is my current fragment shader attempt and the result it produces:
#version 330
out vec4 vFragColor;
uniform sampler2D edgeTexture;
uniform sampler2D noiseTexture;
smooth in vec2 vTexCoords;
float turbulence(float s, float t)
{
float sum = 0;
float scale = 1;
float s1 = 1;
vec2 coords = vec2(s,t);
for (int i=0; i < 10; i++)
{
vec4 noise = texture(noiseTexture, 0.25 * s1 * coords);
sum += scale * noise.x;
scale = scale / 2;
s1 = s1 * 2;
}
return sum;
}
void main( void )
{
float off_s = turbulence(vTexCoords.s, vTexCoords.t);
float off_t = turbulence(1 - vTexCoords.s, 1 - vTexCoords.t);
vFragColor = texture(edgeTexture, vTexCoords + vec2(off_s, off_t));
}
Clearly my addition to the vTexCoords is way off, but I can't see why. I have tried several other turbulence function definitions but none were close to the desired output so I'm thinking my overall approach is flawed somewhere. Any help here is greatly appreciated, and please comment if I haven't been clear. The desired output for a torus would just look like a roughly drawn circle I would imagine.
Your turbulence function will return values in the range (0,1). Firstly you need to change this to get values centered on 0. This should be done inside the loop in the function or you'll end up with a strange distribution. So firstly, I think you should change the line:
vec4 noise = texture(noiseTexture, 0.25 * s1 * coords);
to
vec4 noise = texture(noiseTexture, 0.25 * s1 * coords) * 2.0 - 1.0;
You then need to scale the offset so that you're not sampling the edge texture too far away from the fragment being drawn. Change:
vFragColor = texture(edgeTexture, vTexCoords + vec2(off_s, off_t));
to
vFragColor = texture(edgeTexture, vTexCoords + vec2(off_s, off_t) * off_scale);
where off_scale is some small value (perhaps around 0.05) chosen by experimentation.