I'm trying to run a practice of D3D 11 rendering system to load and render FBX files but I have a problem transforming vertex in vertex shader.
I don't suppose what is wrong, in Visual Studio Graphics Debugger I can see the mesh passed to pipeline is OK in the input assembler stage but after vertex shader transformations all breaks out and render go wrong, if someone can tell me what's go wrong I'll appreciate the info.
View of the Input Assembler Stage
View of the Vertex Shader Output
This is the Vertex Shader code
cbuffer MatrixBuffer
{
matrix worldMatrix;
matrix viewMatrix;
matrix projectionMatrix;
};
struct VertexInputType
{
float4 position : POSITION;
float3 normal : NORMAL;
float2 uv : TEXCOORD;
};
struct PixelInputType
{
float4 position : SV_POSITION;
float3 normal : NORMAL;
float2 uv : TEXCOORD;
};
PixelInputType TextureVertexShader(VertexInputType input)
{
PixelInputType output;
output.position = mul(input.position, worldMatrix);
output.position = mul(output.position, viewMatrix);
output.position = mul(output.position, projectionMatrix);
output.normal = input.normal;
output.uv = input.uv;
return output;
}
And this is the code of matrix initialization
float lFieldOfView = 3.141592f * 0.4f;
float lScreenAspect = static_cast<float>(width_) / static_cast<float>(height_);
DirectX::XMMATRIX lProjection = MatrixDirectX::XMMatrixPerspectiveFovLHlFieldOfView, lScreenAspect, 1.0f, 10000.0f);
DirectX::XMStoreFloat4x4(&lMatrixBuffer.projectionMatrix, lProjectionMatrix);
DirectX::XMMATRIX lWorldMatrix = DirectX::XMMatrixIdentity();
DirectX::XMStoreFloat4x4(&lMatrixBuffer.worldMatrix, lWorldMatrix);
DirectX::XMFLOAT3 lookAtPos(0.0f, 0.0f, 0.0f);
DirectX::XMFLOAT3 eyePos(0.0f, 0.0f, -50.0f);
DirectX::XMFLOAT3 upDir(0.0f, 1.0f, 0.0f);
DirectX::FXMVECTOR lLookAtPos = DirectX::XMLoadFloat3(&lookAtPos);
DirectX::FXMVECTOR lEyePos = DirectX::XMLoadFloat3(&eyePos);
DirectX::FXMVECTOR lUpDir = DirectX::XMLoadFloat3(&upDir);
DirectX::XMMATRIX lViewMatrix = DirectX::XMMatrixLookAtLH(lEyePos, lLookAtPos, lUpDir);
DirectX::XMStoreFloat4x4(&lMatrixBuffer.viewMatrix, lViewMatrix);
D3D11_BUFFER_DESC lBufferDesc = { 0 };
lBufferDesc.ByteWidth = sizeof(MatrixBufferType);
lBufferDesc.Usage = D3D11_USAGE_DYNAMIC;
lBufferDesc.BindFlags = D3D11_BIND_CONSTANT_BUFFER;
lBufferDesc.CPUAccessFlags = D3D11_CPU_ACCESS_WRITE;
D3D11_SUBRESOURCE_DATA lMatrixBufferData;
lMatrixBufferData.pSysMem = &lMatrixBuffer;
hResult = D3DDevice_->CreateBuffer(&lBufferDesc, &lMatrixBufferData, &D3DMatrixBuffer_);
From the comment it looks like the issue actually might be a matrix row-major/column-major difference. The original HLSL code would probably work if the matrices would be transposed on the CPU side.
Related
im trying to use constant buffers in d3d11 but turns out that when the shader multiplies the pos vector with the matrix it doesnt return anything and i get a black screen,i have tried everything that i found on tutorials but i had nothing,here is the code:
D3D11_BUFFER_DESC udesc;
ZeroMemory(&udesc,sizeof(D3D11_BUFFER_DESC));
udesc.Usage = D3D11_USAGE_DEFAULT;
udesc.CPUAccessFlags = 0;
udesc.ByteWidth = sizeof(uniformsperobj);
udesc.MiscFlags = 0;
udesc.BindFlags = D3D11_BIND_CONSTANT_BUFFER;
hr = dispositivo->CreateBuffer(&udesc,NULL,&uniformbuffer);
//draw frame function//
model = XMMatrixIdentity();
campos = XMVectorSet(0.0f, 0.0f, -0.5f, 0.0f);
camtarget = XMVectorSet(0.0f, 0.0f, 0.0f, 0.0f);
camup = XMVectorSet(0.0f, 1.0f, 0.0f, 0.0f);
view = XMMatrixLookAtLH(posdecam, objdecam, arrdecam);
proyection = XMMatrixPerspectiveFovLH(0.4f * 3.14f, (float)WINDOW_WIDTH /WINDOW_HEIGHT, 1.0, 1000.0f);
XMMATRIX MVP = model*view*proyection;
upo.MVP = XMMatrixTranspose(MVP);
context->UpdateSubresource(uniformbuffer,0,NULL,&upo,0,0);
context->VSSetConstantBuffers(0,1,&uniformbuffer)
/*
********************************shaders.fx***********************************************
*/
cbuffer uniformesporobjeto
{
float4x4 MVP;
};
struct vertex
{
float4 Pos : SV_POSITION;
float4 Color : COLOR;
};
vertex vshader(float4 position : POSITION, float4 color : COLOR)
{
vertex output;
output.Pos = mul(position, MVP);
output.Color = color;
return output;
}
float4 pshader(vertex input) : SV_TARGET
{
return input.Color;
}
sorry if i posted too much code but the problem is that i dont really know were the error is because im stupid
Images with examples of the problem: http://imgur.com/gallery/vmMyk
Hi,
I need some help with rendering 2D objects in 3D scene with 3D camera. I think I managed to solve 2D coordinates with LH world coordinates. However, my rendered 2D objects are in a correct place, only when camera is at [0.0f, 0.0f, 0.0f] coordinates. In every other position, the location of 2D objects on scene is malformed. I think my matrices are screwed up, but don't know where to look further. I'd appreciate good ideas, please comment if something's missing for you, I'll edit the main post to provide you more information.
I'm using simple 3D color HLSL (VS and PS ver: 4.0) shader with alpha blending for the bigger triangle:
cbuffer ConstantBuffer : register( b0 )
{
matrix World;
matrix View;
matrix Projection;
}
struct VS_INPUT
{
float4 Pos : POSITION;
float4 Color : COLOR;
};
struct PS_INPUT
{
float4 Pos : SV_POSITION;
float4 Color : COLOR;
};
PS_INPUT VS ( VS_INPUT input )
{
PS_INPUT output = (PS_INPUT)0;
input.Pos.w = 1.0f;
output.Pos = mul ( input.Pos, World );
output.Pos = mul ( output.Pos, View );
output.Pos = mul ( output.Pos, Projection );
output.Color = input.Color;
return output;
}
float4 PS ( PS_INPUT input ) : SV_Target
{
return input.Color;
}
That's my Vertex data struct:
struct Vertex
{
DirectX::XMFLOAT3 position;
DirectX::XMFLOAT4 color;
Vertex() {};
Vertex(DirectX::XMFLOAT3 aPosition, DirectX::XMFLOAT4 aColor)
: position(aPosition)
, color(aColor)
{};
};
Render call for object:
bool PrimitiveMesh::Draw()
{
unsigned int stride = sizeof(Vertex);
unsigned int offset = 0;
D3DSystem::GetD3DDeviceContext()->IASetVertexBuffers(0, 1, &iVertexBuffer, &stride, &offset);
D3DSystem::GetD3DDeviceContext()->IASetIndexBuffer(iIndexBuffer, DXGI_FORMAT_R32_UINT, 0);
D3DSystem::GetD3DDeviceContext()->IASetPrimitiveTopology(D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST);
return true;
}
Draw call with initialization:
static PrimitiveMesh* mesh;
if (mesh == 0)
{
std::vector<PrimitiveMesh::Vertex> vertices;
mesh = new PrimitiveMesh();
DirectX::XMFLOAT4 color = { 186 / 256.0f, 186 / 256.0f, 186 / 256.0f, 0.8f };
vertices.push_back({ DirectX::XMFLOAT3(0.0f, 0.0f, 0.0f), color });
vertices.push_back({ DirectX::XMFLOAT3(0.0f, 600.0f, 0.0f), color });
vertices.push_back({ DirectX::XMFLOAT3(800.0f, 600.0f, 0.0f), color });
mesh->SetVerticesAndIndices(vertices);
}
// Getting clean matrices here:
D3D::Matrices(world, view, projection, ortho);
iGI->TurnZBufferOff();
iGI->TurnOnAlphaBlending();
mesh->Draw();
XMMATRIX view2D = Camera::View2D();
iColorShader->Render(iGI->GetContext(), 3, &world, &view2D, &ortho);
iGI->TurnZBufferOn();
These are my 2D calculations for camera:
up = DirectX::XMVectorSet(0.0f, 1.0f, 0.0f, 0.0f);
lookAt = DirectX::XMVectorSet(0.0f, 0.0f, 1.0f, 0.0f);
rotationMatrix = DirectX::XMMatrixRotationRollPitchYaw(0.0f, 0.0f, 0.0f); // (pitch, yaw, roll);
up = DirectX::XMVector3TransformCoord(up, rotationMatrix);
lookAt = DirectX::XMVector3TransformCoord(lookAt, rotationMatrix) + position;
view2D = DirectX::XMMatrixLookAtLH(position, lookAt, up);
I'll appreciate any help.
Kind regards.
With Shaders, you are not forced to use matrices, you have the flexibility to simplify the problem.
Let say you render 2d objects, using coordinates in pixels, the only requirement, is to scale offset them back into the normalized projective space.
A vertex shader could be as short as that :
cbuffer ConstantBuffer : register( b0 ) {
float2 rcpDim; // 1 / renderTargetSize
}
PS_INPUT VS ( VS_INPUT input ) {
PS_INPUT output;
output.Pos.xy = input.Pos.xy * rcpDim * 2; // from pixel to [0..2]
output.Pos.xy -= 1; // to [-1..1]
output.Pos.y *= -1; // because top left in texture space is bottom left in projective space
output.Pos.zw = float2(0,1);
output.Color = input.Color;
return output;
}
You can of course build a set of matrices achieving the same result with your original shader, just set World and View to identity and projection to an ortho projection with XMMatrixOrthographicOffCenterLH(0,width,0,height,0,1). But as you are beggining with 3D programming, you will soon have to learn to deal with multiple shaders anyway, so take it as an exercice.
Well, I fixed my problem. For some weird reason, DirectXMath was generating false XMMATRIX. My XMMatrixOrtographicLH() was completely incorrect for good parameters. I solved my problem with classic definition of Ortohraphic matrix, found in this article (definition in Fig. 10)
auto orthoMatrix = DirectX::XMMatrixIdentity();
orthoMatrix.r[0].m128_f32[0] = 2.0f / Engine::VideoSettings::Current()->WindowWidth();
orthoMatrix.r[1].m128_f32[1] = 2.0f / Engine::VideoSettings::Current()->WindowHeight();
orthoMatrix.r[2].m128_f32[2] = -(2.0f / (screenDepth - screenNear));
orthoMatrix.r[2].m128_f32[3] = -(screenDepth + screenNear) / (screenDepth - screenNear);
galop1n give a good solution but on my system
cbuffer ConstantBuffer : register( b0 ) {
float2 rcpDim; // 1 / renderTargetSize
}
NEED to be a multiple of 16 for made like here:
struct VS_CONSTANT_BUFFER
{
DirectX::XMFLOAT2 rcpDim;
DirectX::XMFLOAT2 rcpDim2;
};
// Supply the vertex shader constant data.
VS_CONSTANT_BUFFER VsConstData;
VsConstData.rcpDim = { 2.0f / w,2.0f / h};
// Fill in a buffer description.
D3D11_BUFFER_DESC cbDesc;
ZeroMemory(&cbDesc, sizeof(cbDesc));
cbDesc.ByteWidth = sizeof(VS_CONSTANT_BUFFER);
cbDesc.Usage = D3D11_USAGE_DYNAMIC;
cbDesc.BindFlags = D3D11_BIND_CONSTANT_BUFFER;
cbDesc.CPUAccessFlags = D3D11_CPU_ACCESS_WRITE;
cbDesc.MiscFlags = 0;
cbDesc.StructureByteStride = 0;
// Fill in the subresource data.
D3D11_SUBRESOURCE_DATA InitData;
ZeroMemory(&InitData, sizeof(InitData));
InitData.pSysMem = &VsConstData;
InitData.SysMemPitch = 0;
InitData.SysMemSlicePitch = 0;
// Create the buffer.
HRESULT hr = pDevice->CreateBuffer(&cbDesc, &InitData,
&pConstantBuffer11);
or aligned
__declspec(align(16))
struct VS_CONSTANT_BUFFER
{
DirectX::XMFLOAT2 rcpDim;
};
I'm trying to create a Cube Map in DirectX 9, but for some reason it's not working. I've used DirectX's Texture Utility to create a dds texture file for the cube, but when I draw it, it's only drawing a solid color. Here's the code I've done:
SkyBox.h
#pragma once
#include<D3DX9Mesh.h>
#include"DirectX.h"
class SkyBox{
public:
SkyBox(LPCSTR textureFile);
~SkyBox();
void Draw();
protected:
IDirect3DCubeTexture9* texture;
LPD3DXMESH mesh;
};
SkyBox.cpp
#include"SkyBox.h"
SkyBox::SkyBox(LPCSTR textureFile)
{
D3DXCreateBox(DirectX::device, 1.0f, 1.0f, 1.0f, &mesh, NULL);
D3DXCreateCubeTextureFromFile(DirectX::device, textureFile, &texture);
}
SkyBox::~SkyBox()
{
mesh->Release();
texture->Release();
}
void SkyBox::Draw()
{
D3DXHANDLE textureHandle = DirectX::currentShaderEffect->GetParameterByName(0, "tex0");
DirectX::currentShaderEffect->SetTexture(textureHandle, texture);
DirectX::currentShaderEffect->CommitChanges();
UINT passNum = 5;
DirectX::currentShaderEffect->Begin(&passNum, 0);
DirectX::currentShaderEffect->BeginPass(5);
mesh->DrawSubset(0);
DirectX::currentShaderEffect->EndPass();
DirectX::currentShaderEffect->End();
}
And this is my shader for the Cube Map:
uniform extern float4x4 mvp;
uniform extern texture tex0;
struct SkyboxVS
{
float4 pos : POSITION0;
float3 uv0 : TEXCOORD0;
};
sampler SkyBoxTex = sampler_state
{
Texture = <tex0>;
MinFilter = LINEAR;
MagFilter = LINEAR;
MipFilter = LINEAR;
AddressU = WRAP;
AddressV = WRAP;
};
SkyboxVS VertexSkybox(float3 position : POSITION0, float3 texCoord : TEXCOORD0)
{
SkyboxVS skyVS = (SkyboxVS)0;
skyVS.pos = mul(float4(position, 1.0f), mvp);
skyVS.uv0 = texCoord;
return skyVS;
}
float4 PixelSkybox(float3 texCoord: TEXCOORD0) : COLOR
{
float4 color = texCUBE(SkyBoxTex, texCoord);
return color;
}
technique TransformTech
{
pass P5
{
vertexShader = compile vs_2_0 VertexSkybox();
pixelShader = compile ps_2_0 PixelSkybox();
ZFunc = Always;
StencilEnable = true;
StencilFunc = Always;
StencilPass = Replace;
StencilRef = 0;
}
}
Here's some sample code:
Sky::Sky(const std::string& envmapFilename, float skyRadius)
: mRadius(skyRadius)
{
HR(D3DXCreateSphere(gd3dDevice, skyRadius, 30, 30, &mSphere, 0));
HR(D3DXCreateCubeTextureFromFile(gd3dDevice, envmapFilename.c_str(), &mEnvMap));
ID3DXBuffer* errors = 0;
HR(D3DXCreateEffectFromFile(gd3dDevice, "sky.fx", 0, 0, 0,
0, &mFX, &errors));
if( errors )
MessageBox(0, (char*)errors->GetBufferPointer(), 0, 0);
mhTech = mFX->GetTechniqueByName("SkyTech");
mhWVP = mFX->GetParameterByName(0, "gWVP");
mhEnvMap = mFX->GetParameterByName(0, "gEnvMap");
// Set effect parameters that do not vary.
HR(mFX->SetTechnique(mhTech));
HR(mFX->SetTexture(mhEnvMap, mEnvMap));
}
void Sky::draw()
{
// Sky always centered about camera's position.
D3DXMATRIX W;
D3DXVECTOR3 p = gCamera->pos();
D3DXMatrixTranslation(&W, p.x, p.y, p.z);
HR(mFX->SetMatrix(mhWVP, &(W*gCamera->viewProj())));
UINT numPasses = 0;
HR(mFX->Begin(&numPasses, 0));
HR(mFX->BeginPass(0));
HR(mSphere->DrawSubset(0));
HR(mFX->EndPass());
HR(mFX->End());
}
And shader code:
OutputVS EnvMapVS(float3 posL : POSITION0, float3 normalL : NORMAL0, float2 tex0: TEXCOORD0)
{
// Zero out our output.
OutputVS outVS = (OutputVS)0;
// Transform normal to world space.
outVS.normalW = mul(float4(normalL, 0.0f), gWorldInvTrans).xyz;
// Transform vertex position to world space.
float3 posW = mul(float4(posL, 1.0f), gWorld).xyz;
// Compute the unit vector from the vertex to the eye.
outVS.toEyeW = gEyePosW - posW;
// Transform to homogeneous clip space.
outVS.posH = mul(float4(posL, 1.0f), gWVP);
// Pass on texture coordinates to be interpolated in rasterization.
outVS.tex0 = tex0;
// Done--return the output.
return outVS;
}
float4 EnvMapPS(float3 normalW : TEXCOORD0,
float3 toEyeW : TEXCOORD1,
float2 tex0 : TEXCOORD2) : COLOR
{
// Interpolated normals can become unnormal--so normalize.
normalW = normalize(normalW);
toEyeW = normalize(toEyeW);
// Light vector is opposite the direction of the light.
float3 lightVecW = -gLight.dirW;
// Compute the reflection vector.
float3 r = reflect(-lightVecW, normalW);
// Determine how much (if any) specular light makes it into the eye.
float t = pow(max(dot(r, toEyeW), 0.0f), gMtrl.specPower);
// Determine the diffuse light intensity that strikes the vertex.
float s = max(dot(lightVecW, normalW), 0.0f);
// Get the texture color.
float4 texColor = tex2D(TexS, tex0);
// Get the reflected color.
float3 envMapTex = reflect(-toEyeW, normalW);
float3 reflectedColor = texCUBE(EnvMapS, envMapTex);
// Weighted average between the reflected color, and usual
// diffuse/ambient material color modulated with the texture color.
float3 ambientMtrl = gReflectivity*reflectedColor + (1.0f-gReflectivity)*(gMtrl.ambient*texColor);
float3 diffuseMtrl = gReflectivity*reflectedColor + (1.0f-gReflectivity)*(gMtrl.diffuse*texColor);
// Compute the ambient, diffuse and specular terms separately.
float3 spec = t*(gMtrl.spec*gLight.spec).rgb;
float3 diffuse = s*(diffuseMtrl*gLight.diffuse.rgb);
float3 ambient = ambientMtrl*gLight.ambient;
float3 final = ambient + diffuse + spec;
// Output the color and the alpha.
return float4(final, gMtrl.diffuse.a*texColor.a);
}
I am implementing Pre-Pass Lighting algorithm in OpenGL for my master dissertation project, after implementing a Deferred renderer as well. The Deferred renderer works perfectly and I based the implementation of PPL on it. I got a very weird artifact after the lighting pass of the algorithm: the data contained in the L-buffer, where I accumulate the contributions of the lights in the scene, is correct, but results to be slightly off in respect to the geometry so when I apply it to the scene in the material pass the result it's clearly visible! (I can't post the image here but here it's a link to see it http://postimage.org/image/kxhlbnl9v/)
It looks like the light map cube is somehow computed with an offset (different in every axes) from the geometry. I checked the shaders and C++ code many times, I do not understand where this problem comes from. I am running out of ideas. Below there is the code for the 3 passes of the algorithm that are called in sequence. The code is experimental for now so I know it's not well designed at this stage. I also add the shaders I use in every stage to write to G-buffer, L-buffer and framebuffer in order.
C++ CODE:
// Draw geometry to g buffer
void GLPrePassLightingRendererV2::GeometryStage()
{
// Set GL states
glFrontFace(GL_CCW);
glCullFace(GL_BACK);
glEnable(GL_CULL_FACE);
glDepthFunc(GL_LEQUAL);
glDisable(GL_BLEND);
glEnable(GL_DEPTH_TEST);
glDepthMask(GL_TRUE);
// Bind G-Buffer for geometry pass
mGBuffer->BindForWriting();
// Bind geometry stage shaders
mTargetRenderSystem->BindShader(mGeometryStageVS);
mTargetRenderSystem->BindShader(mGeometryStageFS);
// Clear the framebuffer
mTargetRenderSystem->ClearFrameBuffer(FBT_COLOUR | FBT_DEPTH);
// Iterate over all the Renderables in the previously built RenderQueue
RenderableList* visibles = mSceneManager->GetRenderQueue()->GetRenderables();
// Set shader params here
//[...]
// Get the transformation info from the node the renderable is attached to
for (RenderableList::iterator it = visibles->begin(); it != visibles->end(); ++it)
{
Renderable* renderable = *it;
Material* mat = renderable->GetMaterial();
mGeometryStageVS->Update();
mGeometryStageFS->Update();
// Render the object
RenderOperation rop;
renderable->GetRenderOperation(rop);
mTargetRenderSystem->Render(rop);
}
// Only the geometry pass will write to the depth buffer
glDepthMask(GL_FALSE);
glDisable(GL_DEPTH_TEST);
}
// Accumulate lights contribs in L-buffer using G-buffer
void GLPrePassLightingRendererV2::LightingStage()
{
// Enable additive blending for lights
glEnable(GL_BLEND);
glBlendEquation(GL_FUNC_ADD);
glBlendFunc(GL_ONE, GL_ONE);
//glCullFace(GL_FRONT);
// Bind shader for light stage
mTargetRenderSystem->BindShader(mLightStageVS);
mTargetRenderSystem->BindShader(mLightStageFS);
// Bind G-Buffer for reading and L-Buffer for writing for lighting pass
mGBuffer->BindForReading();
mLBuffer->BindForWriting();
mTargetRenderSystem->ClearFrameBuffer(FBT_COLOUR);
// Set shader params
// [...]
// Get all the lights in frustum, not by renderable
const LightList& lights = mSceneManager->GetLightsInFrustum();
// For each light in the frustum
LightList::const_iterator front_light_it;
for (LightList::const_iterator lit = lights.begin(); lit != lights.end(); ++lit)
{
// Send per light parameters to the shader
Light* l = (*lit);
SetLight(*l);
// Calculate bounding sphere for light and scale accordingly to instensity
float lightSphereScale = GetPointLightSphereScale(l->GetColor(), l->GetDiffuseIntensity());
// TODO: Render a sphere for each point light, a full screen quad for each directional
worldMtx.Identity();
worldMtx.SetScale(lightSphereScale, lightSphereScale, lightSphereScale);
worldMtx.SetTranslation(l->GetPosition());
mLightStageVS->SetParameterValue("gWorldMtx", (float*)&worldMtx);
mLightStageVS->Update();
mLightStageFS->Update();
static MeshInstance* sphere = mSceneManager->CreateMeshInstance("LightSphere", MBT_LIGHT_SPHERE);
RenderOperation rop;
sphere->GetSubMeshInstance(0)->GetRenderOperation(rop);
mTargetRenderSystem->Render(rop);
}
// Disable additive blending
glDisable(GL_BLEND);
}
// Combine L-buffer and material information per object
void GLPrePassLightingRendererV2::MaterialStage()
{
// Set some GL states
glDepthMask(GL_TRUE);
glEnable(GL_DEPTH_TEST);
//glCullFace(GL_BACK);
// Bind material stage shaders (TODO: actually every object will bind its own matarial, if not a default one is used)
mTargetRenderSystem->BindShader(mMaterialStageVS);
mTargetRenderSystem->BindShader(mMaterialStageFS);
// Bind L-Buffer for reading
mLBuffer->BindForReading();
mTargetRenderSystem->ClearFrameBuffer(FBT_COLOUR | FBT_DEPTH, Math::ColourValue::WHITE);
// Iterate over all the Renderables in the previously built RenderQueue
RenderableList* visibles = mSceneManager->GetRenderQueue()->GetRenderables();
// Set shader params here
// [...]
// Get the transformation info from the node the renderable is attached to
for (RenderableList::iterator it = visibles->begin(); it != visibles->end(); ++it)
{
Renderable* renderable = *it;
Material* mat = renderable->GetMaterial();
// Set texture units
if (mat)
{
for (unsigned short i = 0; i < mat->GetTextureUnitCount(); ++i)
{
const TextureUnit* unit = mat->GetTextureUnit(i);
GLTexture* t = static_cast<GLTexture*>(unit->GetTexture());
glActiveTexture(GL_TEXTURE1); // This is needed because the first texture map slot is hold by the LBuffer!
glBindTexture(GL_TEXTURE_2D, t->GetGLId());
}
}
mMaterialStageVS->Update();
mMaterialStageFS->Update();
// Render the object
RenderOperation rop;
renderable->GetRenderOperation(rop);
mTargetRenderSystem->Render(rop);
}
}
NVIDIA CG Shaders:
// Vertex shader for Deferred Rendering geometry stage.
float4x4 gWorldMtx;
float4x4 gViewMtx;
float4x4 gProjectionMtx;
struct a2v
{
float3 position : POSITION;
float3 normal : NORMAL;
float2 texCoord : TEXCOORD0;
};
struct v2f
{
float4 position : POSITION;
float3 normal : TEXCOORD0;
float3 wPosition : TEXCOORD1;
float2 texCoord : TEXCOORD2;
};
v2f PPL_geometry_stage_vs(a2v IN)
{
v2f OUT;
// Transform to world space
OUT.wPosition = mul(gWorldMtx, float4(IN.position, 1.0f)).xyz;
OUT.normal = mul(gWorldMtx, float4(IN.normal, 0.0f)).xyz;
// Transform to homogeneous clip space
OUT.position = mul(gViewMtx, float4(OUT.wPosition, 1.0f));
OUT.position = mul(gProjectionMtx, OUT.position);
OUT.texCoord = IN.texCoord;
return OUT;
}
// Fragment shader for Pre-pass Lighing geometry stage.
struct f2a
{
float4 position : COLOR0;
float4 normal : COLOR1;
};
f2a PPL_geometry_stage_fs(v2f IN)
{
f2a OUT;
OUT.position = float4(IN.wPosition, 1.0f);
OUT.normal = float4(normalize(IN.normal), 1.0f);
return OUT;
}
// Vertex shader for Pre-pass lighing light stage.
float4x4 gWorldMtx;
float4x4 gViewMtx;
float4x4 gProjectionMtx;
struct a2v
{
float3 position : POSITION;
};
struct v2f
{
float4 position : POSITION;
float4 lightPos : TEXCOORD0;
};
v2f PPL_light_stage_vs(a2v IN)
{
v2f OUT;
float4x4 wv = mul(gWorldMtx, gViewMtx);
float4x4 wvp = mul(gViewMtx, gProjectionMtx);
wvp = mul(wvp, gWorldMtx);
// Only transforms position to world space
OUT.position = mul(wvp, float4(IN.position, 1.0f));
// Copy light position to calculate fragment coordinate
OUT.lightPos = OUT.position;
return OUT;
}
// Fragment shader for Pre-pass lighing light stage.
// Light structures
struct BaseLight
{
float3 color;
float ambientIntensity;
float diffuseIntensity;
};
struct DirectionalLight
{
struct BaseLight base;
float3 direction;
};
struct Attenuation
{
float constant;
float linearr;
float quadratic;
};
struct PointLight
{
struct BaseLight base;
float3 position;
Attenuation atten;
};
struct SpotLight
{
struct PointLight base;
float3 direction;
float cutoff;
};
// G-Buffer textures
sampler2D gPositionMap : TEXUNIT0;
sampler2D gNormalMap : TEXUNIT1;
// Light variables
float3 gEyePosition;
DirectionalLight gDirectionalLight;
PointLight gPointLight;
SpotLight gSpotLight;
int gLightType;
float gSpecularPower;
float4 PPL_light_stage_point_light_fs(v2f IN) : COLOR0
{
// Get fragment coordinate, from NDC space [-1, 1] to [0, 1].
float2 fragcoord = ((IN.lightPos.xy / IN.lightPos.w) + 1.0f) / 2.0f;
// Calculate lighting with G-Buffer textures
float3 position = tex2D(gPositionMap, fragcoord).xyz;
float3 normal = tex2D(gNormalMap, fragcoord).xyz;
normal = normalize(normal);
// Attenuation
float3 lightDirection = position - gPointLight.position;
float dist = length(lightDirection);
float att = gPointLight.atten.constant + gPointLight.atten.linearr * dist + gPointLight.atten.quadratic * dist * dist;
// NL
lightDirection = normalize(lightDirection);
float NL = dot(normal, -lightDirection);
// Specular (Blinn-Phong)
float specular = 0.0f;
//if (NL > 0)
//{
// float3 vertexToEye = normalize(gEyePosition - position);
// float3 lightReflect = normalize(reflect(lightDirection, normal));
// specular = pow(saturate(dot(vertexToEye, lightReflect)), gSpecularPower);
//}
// Apply attenuation to NL
NL = NL / min(1.0, att);
float3 lightColor = gPointLight.base.color * gPointLight.base.diffuseIntensity;
return float4(lightColor.r, lightColor.g, lightColor.b, 1.0f) * NL;
}
// Vertex shader for Pre-pass lighing material stage.
float4x4 gWorldMtx;
float4x4 gViewMtx;
float4x4 gProjectionMtx;
struct a2v
{
float3 position : POSITION;
float3 normal : NORMAL;
float2 texcoord : TEXCOORD0;
};
struct v2f
{
float4 position : POSITION;
float2 texcoord : TEXCOORD0;
float3 normal : TEXCOORD1;
float4 projPos : TEXCOORD2;
};
v2f PPL_material_stage_vs(a2v IN)
{
v2f OUT;
float4x4 wv = mul(gWorldMtx, gViewMtx);
float4x4 wvp = mul(gViewMtx, gProjectionMtx);
wvp = mul(wvp, gWorldMtx);
// Only transforms position to world space
OUT.position = mul(wvp, float4(IN.position, 1.0f));
// Normal (It's not necessary, but i have to see if it influences the execution)
OUT.normal = mul(gWorldMtx, float4(IN.normal, 0.0f)).xyz;
// Copy texture coordinates
OUT.texcoord = IN.texcoord;
// Copy projected position to get the fragment coordinate
OUT.projPos = OUT.position;
return OUT;
}
// Fragment shader for Pre-pass lighing material stage.
// L-buffer texture
sampler2D gLightMap : TEXUNIT0;
// Object's material specific textures
sampler2D gColorMap : TEXUNIT1;
float4 PPL_material_stage_fs(v2f IN) : COLOR0
{
float2 fragcoord = ((IN.projPos.xy / IN.projPos.w) + 1.0f) / 2.0f;
// Get all light contributions for this pixel
float4 light = tex2D(gLightMap, fragcoord);
float3 combined = saturate(light.rgb);// + light.aaa);
// Get material albedo from texture map
float4 diffuse = tex2D(gColorMap, IN.texcoord);
return float4(combined, 1.0f) * diffuse;
}
Any suggestions?
You may want to use the WPOS register (VPOS in HLSL) instead of calculating the screen locations.
I'm currently having a problem with lighting in Directx 11, actually its ambient lighting. Here is the code:
cbuffer ConstantBuffer
{
float4x4 final;
float4x4 rotation; // the rotation matrix
float4 lightvec; // the light's vector
float4 lightcol; // the light's color
float4 ambientcol; // the ambient light's color
}
struct VOut
{
float4 color : COLOR;
float4 position : SV_POSITION;
};
VOut VShader(float4 position : POSITION, float4 normal : NORMAL)
{
VOut output;
output.position = mul(final, position);
// set the ambient light
output.color = ambientcol;
// calculate the diffuse light and add it to the ambient light
float4 norm = normalize(mul(rotation, normal));
float diffusebrightness = saturate(dot(norm, lightvec));
output.color += lightcol * diffusebrightness;
return output;
}
float4 PShader(float4 color : COLOR) : SV_TARGET
{
return color;
}
Then i send the values to the shader:
ambLight.LightVector = D3DXVECTOR4(1.0f, 1.0f, 1.0f, 0.0f);
ambLight.LightColor = D3DXCOLOR(0.5f, 0.5f, 0.5f, 1.0f);
ambLight.AmbientColor = D3DXCOLOR(0.2f, 0.2f, 0.2f, 1.0f);
ShaderManager.UpdateSubresourceDiffuseShader(devcon);
And then i get the following:
Why?
I tried your shader and seems to work, so maybe some variable are not passed correctly.
you could try to directly set one variable name to color output:
output.color = lightvec;
and
output.color = lightcol;
As a start, so you could double check values are passed properly.