Alpha channel is acting and outputting green in directx11 and c++ - c++

I am new to Directx11 .I wanted to create point Light behaviour to my program.So I defined a struct like so
struct PointLights
{
XMFLOAT3 Pos; //Light Position in (X,Y,Z)
float Range; //the range after which objects will not be affected by this light
XMFLOAT4 Color; //Light Color
XMFLOAT3 att; // Attenuation Parameters
float Intensity;
};
And Added it to constant buffer like
struct ConstantBuffer
{
XMMATRIX World;
XMMATRIX WorldInv;
XMMATRIX View;
XMMATRIX Project;
XMFLOAT3 LighDirs[2];
XMFLOAT4 LightColor[2];
float SpecualrIntensity;
XMFLOAT3 CameraPos;
PointLights pointLight;
};
This function is used to create buffer
void InitConstantBuffer(void)
{
D3D11_BUFFER_DESC desc;
SecureZeroMemory(&desc,sizeof(desc));
desc.BindFlags= D3D11_BIND_CONSTANT_BUFFER;
desc.ByteWidth = sizeof(ConstantBuffer);
desc.CPUAccessFlags = 0;
desc.Usage = D3D11_USAGE_DEFAULT;
SecureZeroMemory(&g_ConstantBuffer,sizeof(ID3D11Buffer));
HRESULT HR= dev->CreateBuffer(&desc,0,&g_ConstantBuffer);
if(FAILED(HR))
MessageBox(_hwnd,L"Couldnt Create constant buffer",L"ERROR",0);
}
Now When i update the point lights color to black but set its alpha to 1 (0,0,0,1)
the out put is green and output is black if alpha is 0 (0,0,0,0)
PointLights pointLight;
SecureZeroMemory(&pointLight,sizeof(PointLights));
pointLight.att = XMFLOAT3(0,0,1);
pointLight.Intensity = 5;
pointLight.Pos = XMFLOAT3(Lppos[0],Lppos[1],Lppos[2]);
pointLight.Range = 200;
pointLight.Color = XMFLOAT4(0,0,0,1); // black
ConstantBuffer cb;
cb.World = XMMatrixTranspose(g_World);
cb.View = XMMatrixTranspose(g_View);
cb.Project = XMMatrixTranspose(g_Projection);
cb.WorldInv = InverseTranspose(g_World);
cb.LighDirs[1] = lights[1].Dir;
cb.LightColor[1] = lights[1].Color;
cb.LightColor[0] = lights[0].Color;cb.LighDirs[0] = lights[0].Dir;
cb.LightColor[0] = lights[0].Color;
cb.SpecualrIntensity = 10;
cb.CameraPos = XMFLOAT3(0,0,-10);
cb.pointLight = pointLight;
devcon->UpdateSubresource(g_ConstantBuffer,0,0,&cb,0,0);
![output is green][1]
Cant upload image as stackoverflow wont let me...I hate it
Here is hte pixel shader
struct PointLights{
float3 Position;
float Range;
float4 Color;
float3 Atten;
float Intensity;
};
cbuffer ConstantBuffer:register(b0)
{
matrix world;
matrix worldInv;
matrix view;
matrix project;
float3 LightDir[2];
float4 LightColor[2];
float SpecualrIntensity;
float3 CameraPos;
PointLights pointLight;
};
float4 Ps(Vout vo):SV_TARGET
{
float4 col = (float4)0;
col = pointLight.Color;
return col;}
Please do help me out.. I tried rearranging the members of struct but still in vain
Thanks ....

It's quite likely that the data alignment between the C++ constant buffer structures and HLSL cbuffer declarations do not quite match. Try using XMFLOAT4 instead of XMFLOAT3 to ensure they line up the same way in both.
You are using XMMATRIX in struct ConstantBuffer. This might work, it might not, depending on exactly how you are using it. It makes ConstantBuffer require 16-byte alignement in memory. If you are making this assumption, use XMVECTOR rather then XMFLOAT3 or XMFLOAT4. Otherwise, change XMMATRIX to XMMFLOAT4X4.
If you find the DirectXMath alignment thing confusing, consider using the SimpleMath wrapper from DirectX Tool Kit.

Related

Direct3D11 wrong vertex transformations in vertex shader

I'm trying to run a practice of D3D 11 rendering system to load and render FBX files but I have a problem transforming vertex in vertex shader.
I don't suppose what is wrong, in Visual Studio Graphics Debugger I can see the mesh passed to pipeline is OK in the input assembler stage but after vertex shader transformations all breaks out and render go wrong, if someone can tell me what's go wrong I'll appreciate the info.
View of the Input Assembler Stage
View of the Vertex Shader Output
This is the Vertex Shader code
cbuffer MatrixBuffer
{
matrix worldMatrix;
matrix viewMatrix;
matrix projectionMatrix;
};
struct VertexInputType
{
float4 position : POSITION;
float3 normal : NORMAL;
float2 uv : TEXCOORD;
};
struct PixelInputType
{
float4 position : SV_POSITION;
float3 normal : NORMAL;
float2 uv : TEXCOORD;
};
PixelInputType TextureVertexShader(VertexInputType input)
{
PixelInputType output;
output.position = mul(input.position, worldMatrix);
output.position = mul(output.position, viewMatrix);
output.position = mul(output.position, projectionMatrix);
output.normal = input.normal;
output.uv = input.uv;
return output;
}
And this is the code of matrix initialization
float lFieldOfView = 3.141592f * 0.4f;
float lScreenAspect = static_cast<float>(width_) / static_cast<float>(height_);
DirectX::XMMATRIX lProjection = MatrixDirectX::XMMatrixPerspectiveFovLHlFieldOfView, lScreenAspect, 1.0f, 10000.0f);
DirectX::XMStoreFloat4x4(&lMatrixBuffer.projectionMatrix, lProjectionMatrix);
DirectX::XMMATRIX lWorldMatrix = DirectX::XMMatrixIdentity();
DirectX::XMStoreFloat4x4(&lMatrixBuffer.worldMatrix, lWorldMatrix);
DirectX::XMFLOAT3 lookAtPos(0.0f, 0.0f, 0.0f);
DirectX::XMFLOAT3 eyePos(0.0f, 0.0f, -50.0f);
DirectX::XMFLOAT3 upDir(0.0f, 1.0f, 0.0f);
DirectX::FXMVECTOR lLookAtPos = DirectX::XMLoadFloat3(&lookAtPos);
DirectX::FXMVECTOR lEyePos = DirectX::XMLoadFloat3(&eyePos);
DirectX::FXMVECTOR lUpDir = DirectX::XMLoadFloat3(&upDir);
DirectX::XMMATRIX lViewMatrix = DirectX::XMMatrixLookAtLH(lEyePos, lLookAtPos, lUpDir);
DirectX::XMStoreFloat4x4(&lMatrixBuffer.viewMatrix, lViewMatrix);
D3D11_BUFFER_DESC lBufferDesc = { 0 };
lBufferDesc.ByteWidth = sizeof(MatrixBufferType);
lBufferDesc.Usage = D3D11_USAGE_DYNAMIC;
lBufferDesc.BindFlags = D3D11_BIND_CONSTANT_BUFFER;
lBufferDesc.CPUAccessFlags = D3D11_CPU_ACCESS_WRITE;
D3D11_SUBRESOURCE_DATA lMatrixBufferData;
lMatrixBufferData.pSysMem = &lMatrixBuffer;
hResult = D3DDevice_->CreateBuffer(&lBufferDesc, &lMatrixBufferData, &D3DMatrixBuffer_);
From the comment it looks like the issue actually might be a matrix row-major/column-major difference. The original HLSL code would probably work if the matrices would be transposed on the CPU side.

Direct3D11: Rendering 2D in 3D scene: how to make 2D objects not move on Viewport when camera changes position?

Images with examples of the problem: http://imgur.com/gallery/vmMyk
Hi,
I need some help with rendering 2D objects in 3D scene with 3D camera. I think I managed to solve 2D coordinates with LH world coordinates. However, my rendered 2D objects are in a correct place, only when camera is at [0.0f, 0.0f, 0.0f] coordinates. In every other position, the location of 2D objects on scene is malformed. I think my matrices are screwed up, but don't know where to look further. I'd appreciate good ideas, please comment if something's missing for you, I'll edit the main post to provide you more information.
I'm using simple 3D color HLSL (VS and PS ver: 4.0) shader with alpha blending for the bigger triangle:
cbuffer ConstantBuffer : register( b0 )
{
matrix World;
matrix View;
matrix Projection;
}
struct VS_INPUT
{
float4 Pos : POSITION;
float4 Color : COLOR;
};
struct PS_INPUT
{
float4 Pos : SV_POSITION;
float4 Color : COLOR;
};
PS_INPUT VS ( VS_INPUT input )
{
PS_INPUT output = (PS_INPUT)0;
input.Pos.w = 1.0f;
output.Pos = mul ( input.Pos, World );
output.Pos = mul ( output.Pos, View );
output.Pos = mul ( output.Pos, Projection );
output.Color = input.Color;
return output;
}
float4 PS ( PS_INPUT input ) : SV_Target
{
return input.Color;
}
That's my Vertex data struct:
struct Vertex
{
DirectX::XMFLOAT3 position;
DirectX::XMFLOAT4 color;
Vertex() {};
Vertex(DirectX::XMFLOAT3 aPosition, DirectX::XMFLOAT4 aColor)
: position(aPosition)
, color(aColor)
{};
};
Render call for object:
bool PrimitiveMesh::Draw()
{
unsigned int stride = sizeof(Vertex);
unsigned int offset = 0;
D3DSystem::GetD3DDeviceContext()->IASetVertexBuffers(0, 1, &iVertexBuffer, &stride, &offset);
D3DSystem::GetD3DDeviceContext()->IASetIndexBuffer(iIndexBuffer, DXGI_FORMAT_R32_UINT, 0);
D3DSystem::GetD3DDeviceContext()->IASetPrimitiveTopology(D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST);
return true;
}
Draw call with initialization:
static PrimitiveMesh* mesh;
if (mesh == 0)
{
std::vector<PrimitiveMesh::Vertex> vertices;
mesh = new PrimitiveMesh();
DirectX::XMFLOAT4 color = { 186 / 256.0f, 186 / 256.0f, 186 / 256.0f, 0.8f };
vertices.push_back({ DirectX::XMFLOAT3(0.0f, 0.0f, 0.0f), color });
vertices.push_back({ DirectX::XMFLOAT3(0.0f, 600.0f, 0.0f), color });
vertices.push_back({ DirectX::XMFLOAT3(800.0f, 600.0f, 0.0f), color });
mesh->SetVerticesAndIndices(vertices);
}
// Getting clean matrices here:
D3D::Matrices(world, view, projection, ortho);
iGI->TurnZBufferOff();
iGI->TurnOnAlphaBlending();
mesh->Draw();
XMMATRIX view2D = Camera::View2D();
iColorShader->Render(iGI->GetContext(), 3, &world, &view2D, &ortho);
iGI->TurnZBufferOn();
These are my 2D calculations for camera:
up = DirectX::XMVectorSet(0.0f, 1.0f, 0.0f, 0.0f);
lookAt = DirectX::XMVectorSet(0.0f, 0.0f, 1.0f, 0.0f);
rotationMatrix = DirectX::XMMatrixRotationRollPitchYaw(0.0f, 0.0f, 0.0f); // (pitch, yaw, roll);
up = DirectX::XMVector3TransformCoord(up, rotationMatrix);
lookAt = DirectX::XMVector3TransformCoord(lookAt, rotationMatrix) + position;
view2D = DirectX::XMMatrixLookAtLH(position, lookAt, up);
I'll appreciate any help.
Kind regards.
With Shaders, you are not forced to use matrices, you have the flexibility to simplify the problem.
Let say you render 2d objects, using coordinates in pixels, the only requirement, is to scale offset them back into the normalized projective space.
A vertex shader could be as short as that :
cbuffer ConstantBuffer : register( b0 ) {
float2 rcpDim; // 1 / renderTargetSize
}
PS_INPUT VS ( VS_INPUT input ) {
PS_INPUT output;
output.Pos.xy = input.Pos.xy * rcpDim * 2; // from pixel to [0..2]
output.Pos.xy -= 1; // to [-1..1]
output.Pos.y *= -1; // because top left in texture space is bottom left in projective space
output.Pos.zw = float2(0,1);
output.Color = input.Color;
return output;
}
You can of course build a set of matrices achieving the same result with your original shader, just set World and View to identity and projection to an ortho projection with XMMatrixOrthographicOffCenterLH(0,width,0,height,0,1). But as you are beggining with 3D programming, you will soon have to learn to deal with multiple shaders anyway, so take it as an exercice.
Well, I fixed my problem. For some weird reason, DirectXMath was generating false XMMATRIX. My XMMatrixOrtographicLH() was completely incorrect for good parameters. I solved my problem with classic definition of Ortohraphic matrix, found in this article (definition in Fig. 10)
auto orthoMatrix = DirectX::XMMatrixIdentity();
orthoMatrix.r[0].m128_f32[0] = 2.0f / Engine::VideoSettings::Current()->WindowWidth();
orthoMatrix.r[1].m128_f32[1] = 2.0f / Engine::VideoSettings::Current()->WindowHeight();
orthoMatrix.r[2].m128_f32[2] = -(2.0f / (screenDepth - screenNear));
orthoMatrix.r[2].m128_f32[3] = -(screenDepth + screenNear) / (screenDepth - screenNear);
galop1n give a good solution but on my system
cbuffer ConstantBuffer : register( b0 ) {
float2 rcpDim; // 1 / renderTargetSize
}
NEED to be a multiple of 16 for made like here:
struct VS_CONSTANT_BUFFER
{
DirectX::XMFLOAT2 rcpDim;
DirectX::XMFLOAT2 rcpDim2;
};
// Supply the vertex shader constant data.
VS_CONSTANT_BUFFER VsConstData;
VsConstData.rcpDim = { 2.0f / w,2.0f / h};
// Fill in a buffer description.
D3D11_BUFFER_DESC cbDesc;
ZeroMemory(&cbDesc, sizeof(cbDesc));
cbDesc.ByteWidth = sizeof(VS_CONSTANT_BUFFER);
cbDesc.Usage = D3D11_USAGE_DYNAMIC;
cbDesc.BindFlags = D3D11_BIND_CONSTANT_BUFFER;
cbDesc.CPUAccessFlags = D3D11_CPU_ACCESS_WRITE;
cbDesc.MiscFlags = 0;
cbDesc.StructureByteStride = 0;
// Fill in the subresource data.
D3D11_SUBRESOURCE_DATA InitData;
ZeroMemory(&InitData, sizeof(InitData));
InitData.pSysMem = &VsConstData;
InitData.SysMemPitch = 0;
InitData.SysMemSlicePitch = 0;
// Create the buffer.
HRESULT hr = pDevice->CreateBuffer(&cbDesc, &InitData,
&pConstantBuffer11);
or aligned
__declspec(align(16))
struct VS_CONSTANT_BUFFER
{
DirectX::XMFLOAT2 rcpDim;
};

Background face is visible over foreground face in same mesh while using a diffuse shader in DirectX

I am trying to create a simple diffuse shader to paint primitive objects in DirectX 9 and faced following problem. When I used a DirectX primitive object like a Torus or Teapot, some faces in the foreground part of the mesh is invisible. I don't think this is the same thing as faces being invisible as I cannot reproduce this behavior for primitive objects like Sphere or Box where no two quads have the same normal. Following are some screenshots in fill and wire-frame modes.
torus fill-mode
Following is my vertex deceleration code.
// vertex position...
D3DVERTEXELEMENT9 element;
element.Stream = 0;
element.Offset = 0;
element.Type = D3DDECLTYPE_FLOAT3;
element.Method = D3DDECLMETHOD_DEFAULT;
element.Usage = D3DDECLUSAGE_POSITION;
element.UsageIndex = 0;
m_vertexElement.push_back(element);
// vertex normal
element.Stream = 0;
element.Offset = 12; //3 floats * 4 bytes per float
element.Type = D3DDECLTYPE_FLOAT3;
element.Method = D3DDECLMETHOD_DEFAULT;
element.Usage = D3DDECLUSAGE_NORMAL;
element.UsageIndex = 0;
m_vertexElement.push_back(element);
And shader code in development.
float4x4 MatWorld : register(c0);
float4x4 MatViewProj : register(c4);
float4 matColor : register(c0);
struct VS_INPUT
{
float4 Position : POSITION;
float3 Normal : NORMAL;
};
struct VS_OUTPUT
{
float4 Position : POSITION;
float3 Normal : TEXCOORD0;
};
struct PS_OUTPUT
{
float4 Color : COLOR0;
};
VS_OUTPUT vsmain(in VS_INPUT In)
{
VS_OUTPUT Out;
float4 wpos = mul(In.Position, MatWorld);
Out.Position = mul(wpos, MatViewProj);
Out.Normal = normalize(mul(In.Normal, MatWorld));
return Out;
};
PS_OUTPUT psmain(in VS_OUTPUT In)
{
PS_OUTPUT Out;
float4 ambient = {0.1, 0.0, 0.0, 1.0};
float3 light = {1, 0, 0};
Out.Color = ambient + matColor * saturate(dot(light, In.Normal));
return Out;
};
I have also tried setting different render states for Depth-Stencil but wasn't successful.
project files
I figure it out! this is a Depth Buffer(Z-Buffer) issue, you can enable Z-Buffer in your code, either by fixed pipeline or in the shader.
To enable z-buffer in fixed pipeline:
First add the following code when creating D3D deivce
d3dpp.EnableAutoDepthStencil = TRUE ;
d3dpp.AutoDepthStencilFormat = D3DFMT_D16 ;
Then enable z-buffer before drawing
device->SetRenderState(D3DRS_ZENABLE, TRUE) ;
At last, clear z-buffer in render function
device->Clear( 0, NULL, D3DCLEAR_TARGET | D3DCLEAR_ZBUFFER, D3DCOLOR_XRGB(0,0,0), 1.0f, 0 );

How to create Cubemap in DirectX 9

I'm trying to create a Cube Map in DirectX 9, but for some reason it's not working. I've used DirectX's Texture Utility to create a dds texture file for the cube, but when I draw it, it's only drawing a solid color. Here's the code I've done:
SkyBox.h
#pragma once
#include<D3DX9Mesh.h>
#include"DirectX.h"
class SkyBox{
public:
SkyBox(LPCSTR textureFile);
~SkyBox();
void Draw();
protected:
IDirect3DCubeTexture9* texture;
LPD3DXMESH mesh;
};
SkyBox.cpp
#include"SkyBox.h"
SkyBox::SkyBox(LPCSTR textureFile)
{
D3DXCreateBox(DirectX::device, 1.0f, 1.0f, 1.0f, &mesh, NULL);
D3DXCreateCubeTextureFromFile(DirectX::device, textureFile, &texture);
}
SkyBox::~SkyBox()
{
mesh->Release();
texture->Release();
}
void SkyBox::Draw()
{
D3DXHANDLE textureHandle = DirectX::currentShaderEffect->GetParameterByName(0, "tex0");
DirectX::currentShaderEffect->SetTexture(textureHandle, texture);
DirectX::currentShaderEffect->CommitChanges();
UINT passNum = 5;
DirectX::currentShaderEffect->Begin(&passNum, 0);
DirectX::currentShaderEffect->BeginPass(5);
mesh->DrawSubset(0);
DirectX::currentShaderEffect->EndPass();
DirectX::currentShaderEffect->End();
}
And this is my shader for the Cube Map:
uniform extern float4x4 mvp;
uniform extern texture tex0;
struct SkyboxVS
{
float4 pos : POSITION0;
float3 uv0 : TEXCOORD0;
};
sampler SkyBoxTex = sampler_state
{
Texture = <tex0>;
MinFilter = LINEAR;
MagFilter = LINEAR;
MipFilter = LINEAR;
AddressU = WRAP;
AddressV = WRAP;
};
SkyboxVS VertexSkybox(float3 position : POSITION0, float3 texCoord : TEXCOORD0)
{
SkyboxVS skyVS = (SkyboxVS)0;
skyVS.pos = mul(float4(position, 1.0f), mvp);
skyVS.uv0 = texCoord;
return skyVS;
}
float4 PixelSkybox(float3 texCoord: TEXCOORD0) : COLOR
{
float4 color = texCUBE(SkyBoxTex, texCoord);
return color;
}
technique TransformTech
{
pass P5
{
vertexShader = compile vs_2_0 VertexSkybox();
pixelShader = compile ps_2_0 PixelSkybox();
ZFunc = Always;
StencilEnable = true;
StencilFunc = Always;
StencilPass = Replace;
StencilRef = 0;
}
}
Here's some sample code:
Sky::Sky(const std::string& envmapFilename, float skyRadius)
: mRadius(skyRadius)
{
HR(D3DXCreateSphere(gd3dDevice, skyRadius, 30, 30, &mSphere, 0));
HR(D3DXCreateCubeTextureFromFile(gd3dDevice, envmapFilename.c_str(), &mEnvMap));
ID3DXBuffer* errors = 0;
HR(D3DXCreateEffectFromFile(gd3dDevice, "sky.fx", 0, 0, 0,
0, &mFX, &errors));
if( errors )
MessageBox(0, (char*)errors->GetBufferPointer(), 0, 0);
mhTech = mFX->GetTechniqueByName("SkyTech");
mhWVP = mFX->GetParameterByName(0, "gWVP");
mhEnvMap = mFX->GetParameterByName(0, "gEnvMap");
// Set effect parameters that do not vary.
HR(mFX->SetTechnique(mhTech));
HR(mFX->SetTexture(mhEnvMap, mEnvMap));
}
void Sky::draw()
{
// Sky always centered about camera's position.
D3DXMATRIX W;
D3DXVECTOR3 p = gCamera->pos();
D3DXMatrixTranslation(&W, p.x, p.y, p.z);
HR(mFX->SetMatrix(mhWVP, &(W*gCamera->viewProj())));
UINT numPasses = 0;
HR(mFX->Begin(&numPasses, 0));
HR(mFX->BeginPass(0));
HR(mSphere->DrawSubset(0));
HR(mFX->EndPass());
HR(mFX->End());
}
And shader code:
OutputVS EnvMapVS(float3 posL : POSITION0, float3 normalL : NORMAL0, float2 tex0: TEXCOORD0)
{
// Zero out our output.
OutputVS outVS = (OutputVS)0;
// Transform normal to world space.
outVS.normalW = mul(float4(normalL, 0.0f), gWorldInvTrans).xyz;
// Transform vertex position to world space.
float3 posW = mul(float4(posL, 1.0f), gWorld).xyz;
// Compute the unit vector from the vertex to the eye.
outVS.toEyeW = gEyePosW - posW;
// Transform to homogeneous clip space.
outVS.posH = mul(float4(posL, 1.0f), gWVP);
// Pass on texture coordinates to be interpolated in rasterization.
outVS.tex0 = tex0;
// Done--return the output.
return outVS;
}
float4 EnvMapPS(float3 normalW : TEXCOORD0,
float3 toEyeW : TEXCOORD1,
float2 tex0 : TEXCOORD2) : COLOR
{
// Interpolated normals can become unnormal--so normalize.
normalW = normalize(normalW);
toEyeW = normalize(toEyeW);
// Light vector is opposite the direction of the light.
float3 lightVecW = -gLight.dirW;
// Compute the reflection vector.
float3 r = reflect(-lightVecW, normalW);
// Determine how much (if any) specular light makes it into the eye.
float t = pow(max(dot(r, toEyeW), 0.0f), gMtrl.specPower);
// Determine the diffuse light intensity that strikes the vertex.
float s = max(dot(lightVecW, normalW), 0.0f);
// Get the texture color.
float4 texColor = tex2D(TexS, tex0);
// Get the reflected color.
float3 envMapTex = reflect(-toEyeW, normalW);
float3 reflectedColor = texCUBE(EnvMapS, envMapTex);
// Weighted average between the reflected color, and usual
// diffuse/ambient material color modulated with the texture color.
float3 ambientMtrl = gReflectivity*reflectedColor + (1.0f-gReflectivity)*(gMtrl.ambient*texColor);
float3 diffuseMtrl = gReflectivity*reflectedColor + (1.0f-gReflectivity)*(gMtrl.diffuse*texColor);
// Compute the ambient, diffuse and specular terms separately.
float3 spec = t*(gMtrl.spec*gLight.spec).rgb;
float3 diffuse = s*(diffuseMtrl*gLight.diffuse.rgb);
float3 ambient = ambientMtrl*gLight.ambient;
float3 final = ambient + diffuse + spec;
// Output the color and the alpha.
return float4(final, gMtrl.diffuse.a*texColor.a);
}

Pre-Pass Lighting OpenGL implementation artifact

I am implementing Pre-Pass Lighting algorithm in OpenGL for my master dissertation project, after implementing a Deferred renderer as well. The Deferred renderer works perfectly and I based the implementation of PPL on it. I got a very weird artifact after the lighting pass of the algorithm: the data contained in the L-buffer, where I accumulate the contributions of the lights in the scene, is correct, but results to be slightly off in respect to the geometry so when I apply it to the scene in the material pass the result it's clearly visible! (I can't post the image here but here it's a link to see it http://postimage.org/image/kxhlbnl9v/)
It looks like the light map cube is somehow computed with an offset (different in every axes) from the geometry. I checked the shaders and C++ code many times, I do not understand where this problem comes from. I am running out of ideas. Below there is the code for the 3 passes of the algorithm that are called in sequence. The code is experimental for now so I know it's not well designed at this stage. I also add the shaders I use in every stage to write to G-buffer, L-buffer and framebuffer in order.
C++ CODE:
// Draw geometry to g buffer
void GLPrePassLightingRendererV2::GeometryStage()
{
// Set GL states
glFrontFace(GL_CCW);
glCullFace(GL_BACK);
glEnable(GL_CULL_FACE);
glDepthFunc(GL_LEQUAL);
glDisable(GL_BLEND);
glEnable(GL_DEPTH_TEST);
glDepthMask(GL_TRUE);
// Bind G-Buffer for geometry pass
mGBuffer->BindForWriting();
// Bind geometry stage shaders
mTargetRenderSystem->BindShader(mGeometryStageVS);
mTargetRenderSystem->BindShader(mGeometryStageFS);
// Clear the framebuffer
mTargetRenderSystem->ClearFrameBuffer(FBT_COLOUR | FBT_DEPTH);
// Iterate over all the Renderables in the previously built RenderQueue
RenderableList* visibles = mSceneManager->GetRenderQueue()->GetRenderables();
// Set shader params here
//[...]
// Get the transformation info from the node the renderable is attached to
for (RenderableList::iterator it = visibles->begin(); it != visibles->end(); ++it)
{
Renderable* renderable = *it;
Material* mat = renderable->GetMaterial();
mGeometryStageVS->Update();
mGeometryStageFS->Update();
// Render the object
RenderOperation rop;
renderable->GetRenderOperation(rop);
mTargetRenderSystem->Render(rop);
}
// Only the geometry pass will write to the depth buffer
glDepthMask(GL_FALSE);
glDisable(GL_DEPTH_TEST);
}
// Accumulate lights contribs in L-buffer using G-buffer
void GLPrePassLightingRendererV2::LightingStage()
{
// Enable additive blending for lights
glEnable(GL_BLEND);
glBlendEquation(GL_FUNC_ADD);
glBlendFunc(GL_ONE, GL_ONE);
//glCullFace(GL_FRONT);
// Bind shader for light stage
mTargetRenderSystem->BindShader(mLightStageVS);
mTargetRenderSystem->BindShader(mLightStageFS);
// Bind G-Buffer for reading and L-Buffer for writing for lighting pass
mGBuffer->BindForReading();
mLBuffer->BindForWriting();
mTargetRenderSystem->ClearFrameBuffer(FBT_COLOUR);
// Set shader params
// [...]
// Get all the lights in frustum, not by renderable
const LightList& lights = mSceneManager->GetLightsInFrustum();
// For each light in the frustum
LightList::const_iterator front_light_it;
for (LightList::const_iterator lit = lights.begin(); lit != lights.end(); ++lit)
{
// Send per light parameters to the shader
Light* l = (*lit);
SetLight(*l);
// Calculate bounding sphere for light and scale accordingly to instensity
float lightSphereScale = GetPointLightSphereScale(l->GetColor(), l->GetDiffuseIntensity());
// TODO: Render a sphere for each point light, a full screen quad for each directional
worldMtx.Identity();
worldMtx.SetScale(lightSphereScale, lightSphereScale, lightSphereScale);
worldMtx.SetTranslation(l->GetPosition());
mLightStageVS->SetParameterValue("gWorldMtx", (float*)&worldMtx);
mLightStageVS->Update();
mLightStageFS->Update();
static MeshInstance* sphere = mSceneManager->CreateMeshInstance("LightSphere", MBT_LIGHT_SPHERE);
RenderOperation rop;
sphere->GetSubMeshInstance(0)->GetRenderOperation(rop);
mTargetRenderSystem->Render(rop);
}
// Disable additive blending
glDisable(GL_BLEND);
}
// Combine L-buffer and material information per object
void GLPrePassLightingRendererV2::MaterialStage()
{
// Set some GL states
glDepthMask(GL_TRUE);
glEnable(GL_DEPTH_TEST);
//glCullFace(GL_BACK);
// Bind material stage shaders (TODO: actually every object will bind its own matarial, if not a default one is used)
mTargetRenderSystem->BindShader(mMaterialStageVS);
mTargetRenderSystem->BindShader(mMaterialStageFS);
// Bind L-Buffer for reading
mLBuffer->BindForReading();
mTargetRenderSystem->ClearFrameBuffer(FBT_COLOUR | FBT_DEPTH, Math::ColourValue::WHITE);
// Iterate over all the Renderables in the previously built RenderQueue
RenderableList* visibles = mSceneManager->GetRenderQueue()->GetRenderables();
// Set shader params here
// [...]
// Get the transformation info from the node the renderable is attached to
for (RenderableList::iterator it = visibles->begin(); it != visibles->end(); ++it)
{
Renderable* renderable = *it;
Material* mat = renderable->GetMaterial();
// Set texture units
if (mat)
{
for (unsigned short i = 0; i < mat->GetTextureUnitCount(); ++i)
{
const TextureUnit* unit = mat->GetTextureUnit(i);
GLTexture* t = static_cast<GLTexture*>(unit->GetTexture());
glActiveTexture(GL_TEXTURE1); // This is needed because the first texture map slot is hold by the LBuffer!
glBindTexture(GL_TEXTURE_2D, t->GetGLId());
}
}
mMaterialStageVS->Update();
mMaterialStageFS->Update();
// Render the object
RenderOperation rop;
renderable->GetRenderOperation(rop);
mTargetRenderSystem->Render(rop);
}
}
NVIDIA CG Shaders:
// Vertex shader for Deferred Rendering geometry stage.
float4x4 gWorldMtx;
float4x4 gViewMtx;
float4x4 gProjectionMtx;
struct a2v
{
float3 position : POSITION;
float3 normal : NORMAL;
float2 texCoord : TEXCOORD0;
};
struct v2f
{
float4 position : POSITION;
float3 normal : TEXCOORD0;
float3 wPosition : TEXCOORD1;
float2 texCoord : TEXCOORD2;
};
v2f PPL_geometry_stage_vs(a2v IN)
{
v2f OUT;
// Transform to world space
OUT.wPosition = mul(gWorldMtx, float4(IN.position, 1.0f)).xyz;
OUT.normal = mul(gWorldMtx, float4(IN.normal, 0.0f)).xyz;
// Transform to homogeneous clip space
OUT.position = mul(gViewMtx, float4(OUT.wPosition, 1.0f));
OUT.position = mul(gProjectionMtx, OUT.position);
OUT.texCoord = IN.texCoord;
return OUT;
}
// Fragment shader for Pre-pass Lighing geometry stage.
struct f2a
{
float4 position : COLOR0;
float4 normal : COLOR1;
};
f2a PPL_geometry_stage_fs(v2f IN)
{
f2a OUT;
OUT.position = float4(IN.wPosition, 1.0f);
OUT.normal = float4(normalize(IN.normal), 1.0f);
return OUT;
}
// Vertex shader for Pre-pass lighing light stage.
float4x4 gWorldMtx;
float4x4 gViewMtx;
float4x4 gProjectionMtx;
struct a2v
{
float3 position : POSITION;
};
struct v2f
{
float4 position : POSITION;
float4 lightPos : TEXCOORD0;
};
v2f PPL_light_stage_vs(a2v IN)
{
v2f OUT;
float4x4 wv = mul(gWorldMtx, gViewMtx);
float4x4 wvp = mul(gViewMtx, gProjectionMtx);
wvp = mul(wvp, gWorldMtx);
// Only transforms position to world space
OUT.position = mul(wvp, float4(IN.position, 1.0f));
// Copy light position to calculate fragment coordinate
OUT.lightPos = OUT.position;
return OUT;
}
// Fragment shader for Pre-pass lighing light stage.
// Light structures
struct BaseLight
{
float3 color;
float ambientIntensity;
float diffuseIntensity;
};
struct DirectionalLight
{
struct BaseLight base;
float3 direction;
};
struct Attenuation
{
float constant;
float linearr;
float quadratic;
};
struct PointLight
{
struct BaseLight base;
float3 position;
Attenuation atten;
};
struct SpotLight
{
struct PointLight base;
float3 direction;
float cutoff;
};
// G-Buffer textures
sampler2D gPositionMap : TEXUNIT0;
sampler2D gNormalMap : TEXUNIT1;
// Light variables
float3 gEyePosition;
DirectionalLight gDirectionalLight;
PointLight gPointLight;
SpotLight gSpotLight;
int gLightType;
float gSpecularPower;
float4 PPL_light_stage_point_light_fs(v2f IN) : COLOR0
{
// Get fragment coordinate, from NDC space [-1, 1] to [0, 1].
float2 fragcoord = ((IN.lightPos.xy / IN.lightPos.w) + 1.0f) / 2.0f;
// Calculate lighting with G-Buffer textures
float3 position = tex2D(gPositionMap, fragcoord).xyz;
float3 normal = tex2D(gNormalMap, fragcoord).xyz;
normal = normalize(normal);
// Attenuation
float3 lightDirection = position - gPointLight.position;
float dist = length(lightDirection);
float att = gPointLight.atten.constant + gPointLight.atten.linearr * dist + gPointLight.atten.quadratic * dist * dist;
// NL
lightDirection = normalize(lightDirection);
float NL = dot(normal, -lightDirection);
// Specular (Blinn-Phong)
float specular = 0.0f;
//if (NL > 0)
//{
// float3 vertexToEye = normalize(gEyePosition - position);
// float3 lightReflect = normalize(reflect(lightDirection, normal));
// specular = pow(saturate(dot(vertexToEye, lightReflect)), gSpecularPower);
//}
// Apply attenuation to NL
NL = NL / min(1.0, att);
float3 lightColor = gPointLight.base.color * gPointLight.base.diffuseIntensity;
return float4(lightColor.r, lightColor.g, lightColor.b, 1.0f) * NL;
}
// Vertex shader for Pre-pass lighing material stage.
float4x4 gWorldMtx;
float4x4 gViewMtx;
float4x4 gProjectionMtx;
struct a2v
{
float3 position : POSITION;
float3 normal : NORMAL;
float2 texcoord : TEXCOORD0;
};
struct v2f
{
float4 position : POSITION;
float2 texcoord : TEXCOORD0;
float3 normal : TEXCOORD1;
float4 projPos : TEXCOORD2;
};
v2f PPL_material_stage_vs(a2v IN)
{
v2f OUT;
float4x4 wv = mul(gWorldMtx, gViewMtx);
float4x4 wvp = mul(gViewMtx, gProjectionMtx);
wvp = mul(wvp, gWorldMtx);
// Only transforms position to world space
OUT.position = mul(wvp, float4(IN.position, 1.0f));
// Normal (It's not necessary, but i have to see if it influences the execution)
OUT.normal = mul(gWorldMtx, float4(IN.normal, 0.0f)).xyz;
// Copy texture coordinates
OUT.texcoord = IN.texcoord;
// Copy projected position to get the fragment coordinate
OUT.projPos = OUT.position;
return OUT;
}
// Fragment shader for Pre-pass lighing material stage.
// L-buffer texture
sampler2D gLightMap : TEXUNIT0;
// Object's material specific textures
sampler2D gColorMap : TEXUNIT1;
float4 PPL_material_stage_fs(v2f IN) : COLOR0
{
float2 fragcoord = ((IN.projPos.xy / IN.projPos.w) + 1.0f) / 2.0f;
// Get all light contributions for this pixel
float4 light = tex2D(gLightMap, fragcoord);
float3 combined = saturate(light.rgb);// + light.aaa);
// Get material albedo from texture map
float4 diffuse = tex2D(gColorMap, IN.texcoord);
return float4(combined, 1.0f) * diffuse;
}
Any suggestions?
You may want to use the WPOS register (VPOS in HLSL) instead of calculating the screen locations.